metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "josephhuxley/PoloDB",
"score": 3
} |
#### File: PoloDB/pypolodb/test_db.py
```python
import polodb
import os.path
DB_PATH = '/tmp/test-py.db'
def test_open():
if os.path.exists(DB_PATH):
print('database exist, remove')
os.remove(DB_PATH)
db = polodb.Database(DB_PATH)
db.close()
def test_create_collection():
db = polodb.Database(DB_PATH)
try:
collection = db.createCollection('test')
collection.insert({
'name': '<NAME>',
'age': 14,
})
result = collection.find({
'name': '<NAME>',
'age': 14,
})
assert len(result) == 1
assert result[0]['name'] == '<NAME>'
assert result[0]['age'] == 14
finally:
db.close()
``` |
{
"source": "Josephine-coding/coach_diary",
"score": 3
} |
#### File: src/utils/sql_functions.py
```python
import sqlalchemy
import mysql.connector
def mysql_connect():
''' Allow the connection to the mysql database coach_diary '''
from conf.conf_connect import mysql_pseudo, mysql_pw
mysql_username = mysql_pseudo
mysql_password = mysql_pw
database_name = 'coach_diary'
database_connection = sqlalchemy.create_engine('mysql+mysqlconnector://{0}:{1}@localhost/{2}'.format(mysql_username, mysql_password, database_name), pool_recycle=1, pool_timeout=57600).connect()
return database_connection
# Creation of the tables
def create_client_table(db_connection):
''' Create the client table'''
query = ''' CREATE TABLE IF NOT EXISTS client(
id_client INT NOT NULL,
name VARCHAR(30) NOT NULL,
firstname VARCHAR(30),
information VARCHAR(250),
PRIMARY KEY (id_customer)
)'''
db_connection.execute(query)
def create_text_table(db_connection):
""" Create the text table """
query = '''CREATE TABLE IF NOT EXISTS text(
id_text INT NOT NULL,
content TEXT NOT NULL,
creation_date DATE NOT NULL,
modification_date DATE,
id_client INT NOT NULL,
PRIMARY KEY (id_text),
FOREIGN KEY (id_client) REFERENCES client(id_client)
)'''
db_connection.execute(query)
# Testing query
def test_add_client(db_connection):
query = """INSERT INTO client (id_client, name, firstname, information)
VALUES (0, Doe, John, new client
)"""
db_connection.execute(query)
``` |
{
"source": "josephinemonica/pcn",
"score": 3
} |
#### File: josephinemonica/pcn/my_kitti_util.py
```python
import os
import numpy as np
class Analyzer():
'''
Analyze for error of all track all car and all time
'''
def __init__(self, eval_path_fun):
# To use self.get_eval_path(data_type,track_no,car_no)
# lambda fuunction
self.get_eval_path = eval_path_fun
self.poses = np.empty((0,4))
self.errors = np.empty((0,3))
def get_datatype_trackno_carno(self,data_types = ["train","val"]):
'''
argument_list list of tuple ("data_type", track_no, car_no)
'''
# Get data_type, track_no, car_no
argument_list = []
for data_type in data_types:
path_ = self.get_eval_path(data_type)
# Get all tracks
track_number_string_list = os.listdir(path_)
track_number_list = list(map(int,track_number_string_list))
track_number_list.sort()
for track_no in track_number_list:
# Get all cars inside track
track_path = self.get_eval_path(data_type,track_no)
# All car numbers(in string) contains -001 which is invalid
car_number_string_list = os.listdir(track_path)
car_number_list = list(map(int,car_number_string_list))
car_number_list.sort()
car_number_list = [car_no for car_no in car_number_list if car_no >=0]
for car_no in car_number_list:
argument_list.append((data_type,track_no,car_no))
return argument_list
``` |
{
"source": "josephine-nl/advent-of-code-2021",
"score": 3
} |
#### File: advent-of-code-2021/day10/main.py
```python
def loadfile(name):
lines = []
f = open(name, "r")
for x in f:
if x.endswith('\n'):
x = x[:-1]
lines.append(x)
return lines
def findCorrupted(line):
closingsExpected = []
for character in line:
if character in openingCharacters.keys():
closingsExpected.insert(0,openingCharacters[character])
elif character == closingsExpected[0]:
del closingsExpected[0]
else:
return closingCharacters[character]
return 0
def completeLine(line):
closingsExpected = []
total = 0
for character in line:
if character in openingCharacters.keys():
closingsExpected.insert(0,openingCharacters[character])
elif character == closingsExpected[0]:
del closingsExpected[0]
else:
return 0
for character in closingsExpected:
total = (total*5) + closingCharactersComplete[character]
return total
openingCharacters = {
"(":")",
"<":">",
"{":"}",
"[":"]"
}
closingCharacters = {
")": 3,
"]": 57,
"}": 1197,
">": 25137
}
closingCharactersComplete = {
")": 1,
"]": 2,
"}": 3,
">": 4
}
lines = loadfile("data.txt")
print(lines)
total = 0
for line in lines:
total = total + findCorrupted(line)
print("Answer 10a: ", total)
total = 0
scores = []
for line in lines:
score = completeLine(line)
if score > 0:
scores.append(score)
scores = sorted(scores)
middlescore = scores[int(len(scores)/2-0.5)]
print("Answer 10b: ", middlescore)
```
#### File: advent-of-code-2021/day12/main.py
```python
def loadfile(name):
lines = []
f = open(name, "r")
for x in f:
if x.endswith('\n'):
x = x[:-1]
lines.append(x.split("-"))
return lines
def pathFromPosition (position, graph, path, s, e, goingTwice, bt):
beenThere = bt.copy()
path = path + "-" + position
print(path)
paths = []
if position == e:
return [path]
else:
edges = findEdgesFromPosition(position, graph)
if len(edges) == 0:
print("Doodlopend ", path)
return []
for edge in edges:
if not position[0].isupper():
if goingTwice == False:
graph = removeNodeFromGraph(graph, position)
else:
if position == s:
graph = removeNodeFromGraph(graph, position)
elif position in beenThere:
print(beenThere)
print("hiephoooi", path)
goingTwice = False
for p in beenThere:
graph = removeNodeFromGraph(graph, p)
else:
beenThere.append(position)
print("Beenthere", position, path)
cedge = edge.copy()
cedge.remove(position)
nextNode = cedge[0]
print(goingTwice)
paths.extend(pathFromPosition(nextNode, graph, path, s, e, goingTwice, beenThere))
return paths
def removeNodeFromGraph (graph, position):
g = []
for edge in graph:
if position not in edge:
g.append(edge)
return g
def findEdgesFromPosition (position, graph):
edges = []
for edge in graph:
if position in edge:
edges.append(edge)
return edges
originalGraph = loadfile("test.txt")
print(originalGraph)
endPaths = pathFromPosition("start", originalGraph, "", "start", "end", False, [])
endPaths2 = pathFromPosition("start", originalGraph, "", "start", "end", True, [])
print(endPaths)
print(endPaths2)
endPaths2 = list(dict.fromkeys(endPaths2))
print("Opdracht 12a: ", len(endPaths))
print("Opdracht 12b: ", len(endPaths2))
```
#### File: advent-of-code-2021/day2/main.py
```python
def loadfile(name):
values = []
f = open(name, "r")
for x in f:
values.append(x)
return values
def day2():
depth = 0
position = 0
depth2 = 0
for i in range(0, len(values)):
value = values[i].split()
if value[0] == "forward":
position += int(value[1])
depth2 += int(value[1]) * depth
elif value[0] == "down":
depth += int(value[1])
elif value[0] == "up":
depth -= int(value[1])
return [position,depth, depth2]
values = loadfile("data.txt")
print(values)
solution = day2()
print("full solution: " + str(solution))
print("solution day2a: " + str(solution[0]*solution[1]))
print("solution day2b: " + str(solution[0]*solution[2]))
```
#### File: advent-of-code-2021/day5/main.py
```python
def loadfile(name):
lines = []
f = open(name, "r")
for x in f:
if x.endswith('\n'):
x = x[:-1]
points = x.split(" -> ")
line = []
for point in points:
p = [int(point.split(",")[0]), int(point.split(",")[1])]
line.append(p)
lines.append(line)
return lines
def getStraightLines():
straightLines = []
for line in lines:
if line[0][0] == line[1][0] or line[0][1] == line[1][1]:
straightLines.append(line)
return straightLines
def getAllPointsHit(Lines):
pointsHit = {}
for line in Lines:
if line[0][0] == line[1][0]:
direction = 1 if line[0][1] < line[1][1] else -1
for i in range(line[0][1], line[1][1] + direction, direction):
coordinate = str(line[0][0]) + "," + str(i)
if coordinate in pointsHit:
pointsHit[coordinate] = pointsHit[coordinate] + 1
else:
pointsHit[coordinate] = 1
elif line[0][1] == line[1][1]:
direction = 1 if line[0][0] < line[1][0] else -1
for i in range(line[0][0], line[1][0] + direction, direction):
coordinate = str(i) + "," + str(line[0][1])
if coordinate in pointsHit:
pointsHit[coordinate] = pointsHit[coordinate] + 1
else:
pointsHit[coordinate] = 1
elif (line[0][0] - line[1][0] == line[0][1] - line[1][1]):
c = line[0] if line[0][0] < line [1][0] else line[1]
endpoint = line[0] if line[0][0] > line [1][0] else line[1]
notConnectedToEndPoint = True
while notConnectedToEndPoint:
coordinate = str(c[0]) + "," + str(c[1])
if coordinate in pointsHit:
pointsHit[coordinate] = pointsHit[coordinate] + 1
else:
pointsHit[coordinate] = 1
if c[0] == endpoint[0] and c[1] == endpoint[1]:
notConnectedToEndPoint = False
c[0] = c[0] + 1
c[1] = c[1] + 1
elif (line[0][0] - line[1][0] == line[1][1] - line[0][1]):
c = line[0] if line[0][0] < line [1][0] else line[1]
endpoint = line[0] if line[0][0] > line [1][0] else line[1]
notConnectedToEndPoint = True
while notConnectedToEndPoint:
coordinate = str(c[0]) + "," + str(c[1])
if coordinate in pointsHit:
pointsHit[coordinate] = pointsHit[coordinate] + 1
else:
pointsHit[coordinate] = 1
if c[0] == endpoint[0] and c[1] == endpoint[1]:
notConnectedToEndPoint = False
c[0] = c[0] + 1
c[1] = c[1] - 1
else:
print("ERROR")
print("Line: ", str(line))
return pointsHit
def getAmountPointsMoreThenOne(hitPoints):
counter = 0
for coordinate, hits in hitPoints.items():
if hits > 1:
counter += 1
return counter
lines = loadfile("data.txt")
straightLines = getStraightLines()
hitPointsStraight = getAllPointsHit(straightLines)
amountStraight = getAmountPointsMoreThenOne(hitPointsStraight)
hitPoints = getAllPointsHit(lines)
amount = getAmountPointsMoreThenOne(hitPoints)
print("solution day3a: " + str(amountStraight))
print("solution day3b: " + str(amount))
``` |
{
"source": "JosephineRabbit/cycle_depth_estimation",
"score": 2
} |
#### File: cycle_depth_estimation/models/encoder_decoder.py
```python
import torch.nn as nn
import torch
import functools
import torch.nn.functional as F
def get_norm_layer(norm_type='batch'):
if norm_type == 'batch':
norm_layer = functools.partial(nn.BatchNorm2d, affine=True)
elif norm_type == 'instance':
norm_layer = functools.partial(nn.InstanceNorm2d, affine=False)
elif norm_type == 'none':
norm_layer = None
else:
raise NotImplementedError('normalization layer [%s] is not found' % norm_type)
return norm_layer
def get_nonlinearity_layer(activation_type='PReLU'):
if activation_type == 'ReLU':
nonlinearity_layer = nn.ReLU(True)
elif activation_type == 'SELU':
nonlinearity_layer = nn.SELU(True)
elif activation_type == 'LeakyReLU':
nonlinearity_layer = nn.LeakyReLU(0.1, True)
elif activation_type == 'PReLU':
nonlinearity_layer = nn.PReLU()
else:
raise NotImplementedError('activation layer [%s] is not found' % activation_type)
return nonlinearity_layer
class _EncoderBlock(nn.Module):
def __init__(self, input_nc, middle_nc, output_nc, norm_layer=nn.BatchNorm2d, nonlinearity=nn.PReLU(), use_bias=False):
super(_EncoderBlock, self).__init__()
model = [
nn.Conv2d(input_nc, middle_nc, kernel_size=3, stride=1, padding=1, bias=use_bias),
norm_layer(middle_nc),
nonlinearity,
nn.Conv2d(middle_nc, output_nc, kernel_size=3, stride=1, padding=1, bias=use_bias),
norm_layer(output_nc),
nonlinearity
]
self.model = nn.Sequential(*model)
def forward(self, x):
return self.model(x)
class _InceptionBlock(nn.Module):
def __init__(self, input_nc, output_nc, norm_layer=nn.BatchNorm2d, nonlinearity=nn.PReLU(), width=1, drop_rate=0, use_bias=False):
super(_InceptionBlock, self).__init__()
self.width = width
self.drop_rate = drop_rate
for i in range(width):
layer = nn.Sequential(
nn.ReflectionPad2d(i*2+1),
nn.Conv2d(input_nc, output_nc, kernel_size=3, padding=0, dilation=i*2+1, bias=use_bias)
)
setattr(self, 'layer'+str(i), layer)
self.norm1 = norm_layer(output_nc * width)
self.norm2 = norm_layer(output_nc)
self.nonlinearity = nonlinearity
self.branch1x1 = nn.Sequential(
nn.ReflectionPad2d(1),
nn.Conv2d(output_nc * width, output_nc, kernel_size=3, padding=0, bias=use_bias)
)
def forward(self, x):
result = []
for i in range(self.width):
layer = getattr(self, 'layer'+str(i))
result.append(layer(x))
output = torch.cat(result, 1)
output = self.nonlinearity(self.norm1(output))
output = self.norm2(self.branch1x1(output))
if self.drop_rate > 0:
output = F.dropout(output, p=self.drop_rate, training=self.training)
return self.nonlinearity(output+x)
class _DecoderUpBlock(nn.Module):
def __init__(self, input_nc, middle_nc, output_nc, norm_layer=nn.BatchNorm2d, nonlinearity=nn.PReLU(), use_bias=False):
super(_DecoderUpBlock, self).__init__()
model = [
nn.ReflectionPad2d(1),
nn.Conv2d(input_nc, middle_nc, kernel_size=3, stride=1, padding=0, bias=use_bias),
norm_layer(middle_nc),
nonlinearity,
nn.ConvTranspose2d(middle_nc, output_nc, kernel_size=3, stride=2, padding=1, output_padding=1),
norm_layer(output_nc),
nonlinearity
]
self.model = nn.Sequential(*model)
def forward(self, x):
return self.model(x)
class _OutputBlock(nn.Module):
def __init__(self, input_nc, output_nc, kernel_size=3, use_bias=False):
super(_OutputBlock, self).__init__()
model = [
nn.ReflectionPad2d(int(kernel_size/2)),
nn.Conv2d(input_nc, output_nc, kernel_size=kernel_size, padding=0, bias=use_bias),
nn.Tanh()
]
self.model = nn.Sequential(*model)
def forward(self, x):
return self.model(x)
class _UNetEncoder(nn.Module):
def __init__(self, input_nc,ngf=64, layers=4, norm='batch', activation='PReLU', drop_rate=0,
weight=0.1):
super(_UNetEncoder, self).__init__()
self.layers = layers
self.weight = weight
norm_layer = get_norm_layer(norm_type=norm)
nonlinearity = get_nonlinearity_layer(activation_type=activation)
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
# encoder part
self.pool = nn.AvgPool2d(kernel_size=2, stride=2)
self.conv1 = nn.Sequential(
nn.ReflectionPad2d(3),
nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias),
norm_layer(ngf),
nonlinearity
)
self.conv2 = _EncoderBlock(ngf, ngf*2, ngf*2, norm_layer, nonlinearity, use_bias)
self.conv3 = _EncoderBlock(ngf*2, ngf*4, ngf*4, norm_layer, nonlinearity, use_bias)
self.conv4 = _EncoderBlock(ngf*4, ngf*8, ngf*8, norm_layer, nonlinearity, use_bias)
center=[]
for i in range(7-layers):
center +=[
_InceptionBlock(ngf*8, ngf*8, norm_layer, nonlinearity, 7-layers, drop_rate, use_bias)
]
self.center = nn.Sequential(*center)
def forward(self, input):
conv1 = self.pool(self.conv1(input))
conv2 = self.pool(self.conv2.forward(conv1))
conv3 = self.pool(self.conv3.forward(conv2))
center_in = self.pool(self.conv4.forward(conv3))
center_out = self.center.forward(center_in)
result = [conv1,conv2,conv3,center_in,center_out]
return result
class _UNetDecoder(nn.Module):
def __init__(self,output_nc, ngf=64, layers=4, norm='batch', activation='PReLU',weight=0.1):
super(_UNetDecoder, self).__init__()
self.layers = layers
self.weight = weight
norm_layer = get_norm_layer(norm_type=norm)
nonlinearity = get_nonlinearity_layer(activation_type=activation)
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
# encoder part
self.pool = nn.AvgPool2d(kernel_size=2, stride=2)
center=[]
self.deconv_center=_DecoderUpBlock(ngf * 8, ngf * 8, ngf * 4, norm_layer, nonlinearity, use_bias)
self.deconv4 = _DecoderUpBlock(ngf * (4 + 4), ngf * 8, ngf * 2, norm_layer, nonlinearity, use_bias)
self.deconv3 = _DecoderUpBlock(ngf * (2 + 2) + output_nc, ngf * 4, ngf, norm_layer, nonlinearity, use_bias)
self.deconv2 = _DecoderUpBlock(ngf * (1 + 1) + output_nc, ngf * 2, int(ngf / 2), norm_layer, nonlinearity,
use_bias)
self.output4 = _OutputBlock(ngf * (4 + 4), output_nc, 3, use_bias)
self.output3 = _OutputBlock(ngf * (2 + 2) + output_nc, output_nc, 3, use_bias)
self.output2 = _OutputBlock(ngf * (1 + 1) + output_nc, output_nc, 3, use_bias)
self.output1 = _OutputBlock(int(ngf / 2) + output_nc, output_nc, 7, use_bias)
self.upsample = nn.Upsample(scale_factor=2, mode='nearest')
def forward(self, input):
conv1, conv2, conv3, center_in, center_out=input
result=[center_in]
center_out=self.deconv_center(center_out)
deconv4 = self.deconv4.forward(torch.cat([center_out, conv3 * self.weight], 1))
output4 = self.output4.forward(torch.cat([center_out, conv3 * self.weight], 1))
result.append(output4)
deconv3 = self.deconv3.forward(torch.cat([deconv4, conv2 * self.weight * 0.5, self.upsample(output4)], 1))
output3 = self.output3.forward(torch.cat([deconv4, conv2 * self.weight * 0.5, self.upsample(output4)], 1))
result.append(output3)
deconv2 = self.deconv2.forward(torch.cat([deconv3, conv1 * self.weight * 0.1, self.upsample(output3)], 1))
output2 = self.output2.forward(torch.cat([deconv3, conv1 * self.weight * 0.1, self.upsample(output3)], 1))
result.append(output2)
output1 = self.output1.forward(torch.cat([deconv2, self.upsample(output2)], 1))
result.append(output1)
return result
```
#### File: my_seg_depth/dis_seg/train2.py
```python
import sys
import time
from options.train_options import TrainOptions
from my_seg_depth.dis_seg.my_data import dataloader
from util.visualizer import Visualizer
from tensorboardX import SummaryWriter
from torch.nn import init
import torch
#from .model import Seg_Depth
#from .networks import G_1,Discriminator,Feature_net,SEG,DEP
from my_seg_depth.dis_seg.model2 import Seg_Depth
import torch
import itertools
from util.image_pool import ImagePool
import torch.nn as nn
from util.util import scale_pyramid
import os
import util.util as util
from collections import OrderedDict
my_weights = [
1.2,#ground
0.9,#road
1.3,#sidewalk
1.3,#parking
1.3,#railtrack
0.9,#building
1.1,#wall
1.2,#fence
1.2,#guardrail
1.3,#bridge
1.3,#tunnel
1.3,#pole
1.3,#polegroup
1.4,#trafficlight
1.4,#trafficsign
1.2,#vegetation
1.3,#terrain
1.1,#sky
1.5,#person
1.6,#rider
1.1,#car
1.3,#truck
1.3,#bus
1.3,#carvan
1.5,#trailer
1.5,#train
1.6,#motorcycle
1.4,#biicycle
]
def create_model_segdepth(opt):
print(opt.model)
model = Seg_Depth()
model.initialize(opt)
print("model [%s] was created." % (model.name()))
return model
if __name__ == '__main__':
opt = TrainOptions().parse()
dataset_train = dataloader(opt, train_or_test='train')
dataset_test = dataloader(opt, train_or_test='test')
writer_train = SummaryWriter(log_dir='./summary/my_seg_depth')
writer_test = SummaryWriter(log_dir='./summary/my_seg_depth')
opt = TrainOptions().parse()
#opt = TrainOptions().parse()
dataset_train = dataloader(opt, train_or_test='train')
#dataset_train = dataloader(opt, train_or_test='train')
dataset_test = dataloader(opt, train_or_test='test')
model =create_model_segdepth(opt)
visualizer = Visualizer(opt)
total_steps = 0
global_iter=0
for epoch in range(1,30):
epoch_start_time = time.time()
iter_data_time = time.time()
epoch_iter = 0
for i, data in enumerate(dataset_train):
print(global_iter)
global_iter += 1
iter_start_time = time.time()
#if total_steps % opt.print_freq == 0:
# t_data = iter_start_time - iter_data_time
#visualizer.reset()
total_steps += opt.batch_size
epoch_iter += opt.batch_size
model.set_input(data)
model.optimize_parameters(train_or_test='train')
if (global_iter % 20 == 0):
errors = model.get_current_losses()
for name, error in errors.items():
print('------------------')
writer_train.add_scalar("{}train/{}".format(opt.name, name), error, global_iter)
images = model.get_current_visuals()
for name, img in images.items():
img = img / img.max()
# if len(img.shape)==3:
img = torch.from_numpy(img.transpose([2, 0, 1]))
writer_train.add_image("{}train/img_{}".format(opt.name, name), img, global_iter)
if total_steps % opt.save_latest_freq == 0:
print('saving the latest model (epoch %d, total_steps %d)' %
(epoch, total_steps))
model.save_networks('iter_%d' % total_steps)
iter_data_time = time.time()
if (global_iter % 200 == 0 and global_iter > 200):
print("validation start")
model.eval()
for ii, data_test in enumerate(dataset_test):
if (ii == 10):
break
with torch.no_grad():
model.set_input(data_test)
model.optimize_parameters(train_or_test='test')
errors = model.get_current_losses()
for name, error in errors.items():
writer_test.add_scalar("{}test/{}".format(opt.name, name), error, global_iter + ii)
images = model.get_current_visuals()
for name, img in images.items():
im = torch.from_numpy(img.transpose([2, 0, 1])).squeeze(0)
writer_test.add_image("{}test/img_{}".format(opt.name, name), im, global_iter + ii)
print("validation done")
if epoch % 5 == 0:
print('saving the model at the end of epoch %d, iters %d' %
(epoch, total_steps))
model.save_networks(epoch)
print('End of epoch %d / %d \t Time Taken: %d sec' %
(epoch, opt.niter + opt.niter_decay, time.time() - epoch_start_time))
#model.update_learning_rate()
```
#### File: cycle_depth_estimation/my_seg_depth/my_data.py
```python
import numpy as np
import torch
import os
import os.path
import random
from PIL import Image
import torchvision.transforms as transforms
import torch.utils.data as data
import torchvision.transforms.functional as F
from options.train_options import TrainOptions
ignore_label = 255
IMG_EXTENSIONS = [
'.jpg', '.JPG', '.jpeg', '.JPEG',
'.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP',
]
def is_image(filename):
return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
class MaskToTensor(object):
def __call__(self, img):
return torch.from_numpy(np.array(img, dtype=np.int32)).long()
def make_dataset(path_files):
image_paths = []
assert os.path.isdir(path_files), '%s is not a valid directory' %path_files
#print(path_files)
for root, _, fnames in os.walk(path_files):
for fname in sorted(fnames):
#print(fname)
if is_image(fname):
# print('----------------')
path = os.path.join(root,fname)
image_paths.append(path)
#print(len(image_paths))
return image_paths, len(image_paths)
class CreateDataset(data.Dataset):
def initialize(self,opt,train_or_test):
self.opt = opt
self.train_or_test = train_or_test
if(train_or_test=='train'):
img_syn_path = opt.img_source_file_train
img_real_path = opt.img_target_file_train
lab_syn_path = opt.lab_source_file_train
lab_real_path = opt.lab_target_file_train
depth_source_path = opt.depth_source_file_train
else:
img_syn_path = opt.img_source_file_test
img_real_path = opt.img_target_file_test
lab_syn_path = opt.lab_source_file_test
lab_real_path = opt.lab_target_file_test
depth_source_path = opt.depth_source_file_test
self.img_syn_paths,self.img_syn_size = make_dataset(img_syn_path)
self.img_real_paths, self.img_real_size = make_dataset(img_real_path)
self.lab_syn_paths,self.lab_syn_size = make_dataset(lab_syn_path)
self.lab_real_paths,self.lab_real_suze = make_dataset(lab_real_path)
self.depth_source_paths, self.depth_source_size = make_dataset(depth_source_path)
self.transform_augment_normalize = get_transform(opt, True, normalize=True)
self.transform_no_augment_normalize = get_transform(opt, False, normalize=True)
self.mask2tensor = MaskToTensor()
self.invalid_synthia = [0, 1, 2, 3, 4, 5]
self.invalid_cityscape = [0, 1, 2, 3, 4, 5]
self.syn_id_to_realid = {0: 0,
1: 7,
2: 8,
3: 11,
4: 12,
5: 13,
6: 17,
7: 19,
8: 20,
9: 21,
10: 22,
11: 23,
12: 24,
13: 25,
14: 26,
15: 27,
16: 28,
17: 31,
18: 32,
19: 33,
20: 7,
21: 0,
22: 0,
}
self.real_id_to_trainid = {-1: ignore_label, 0: ignore_label, 1: ignore_label, 2: ignore_label,
3: ignore_label, 4: ignore_label, 5: ignore_label,
6: 0,
7: 1,
8: 2,
9: 3,
10: 4,
11: 5,
12: 6,
13: 7,
14: 8,
15: 9,
16: 10,
17: 11,
18: 12,
19: 13,
20: 14,
21: 15,
22: 16,
23: 17,
24: 18,
25: 19,
26: 20,
27: 21,
28: 22,
29: 23,
30: 24,
31: 25,
32: 26,
33: 27
}
def __getitem__(self, item):
index = random.randint(0, self.img_real_size - 1)
img_source_path = self.img_syn_paths[item % self.img_syn_size]
img_target_path = self.img_real_paths[index]
img_source = Image.open(img_source_path).convert('RGB')
img_target = Image.open(img_target_path).convert('RGB')
img_source = img_source.resize([640, 192], Image.BILINEAR)
img_target = img_target.resize([640, 192], Image.BILINEAR)
depth_source_path = self.depth_source_paths[item % self.depth_source_size]
depth_source = Image.open(depth_source_path) # .convert('RGB')
depth_source = depth_source.resize([640, 192], Image.BILINEAR)
lab_source_path = self.lab_syn_paths[item % self.lab_syn_size]
lab_target_path = self.lab_real_paths[index]
lab_source = Image.open(lab_source_path) # .convert('RGB')
lab_target = Image.open(lab_target_path) # .convert('RGB')
lab_source = lab_source.resize([640, 192], Image.NEAREST)
lab_target = lab_target.resize([640, 192], Image.NEAREST)
lab_source = np.array(lab_source)
lab_source_copy = lab_source.copy()
for k, v in self.syn_id_to_realid.items():
lab_source_copy[lab_source == k] = v
lab_target = np.array(lab_target)
lab_target_copy = lab_target.copy()
for k, v in self.real_id_to_trainid.items():
lab_target_copy[lab_target == k] = v
lab_source_copy[lab_source_copy == k] = v
lab_target = Image.fromarray(lab_target_copy.astype(np.uint8))
lab_source = Image.fromarray(lab_source_copy.astype(np.uint8))
if (self.train_or_test == 'train'):
img_source, lab_source,depth_source, scale = paired_transform_(self.opt, img_source, lab_source,depth_source)
img_source = self.transform_augment_normalize(img_source)
depth_source = self.transform_augment_normalize(depth_source)
lab_source = self.mask2tensor(np.array(lab_source))
target_dummy = lab_target
if (self.train_or_test == 'train'):
img_target, lab_target, scale = paired_transform(self.opt, img_target, lab_target)
img_target = self.transform_augment_normalize(img_target)
lab_target = self.mask2tensor(np.array(lab_target))
lab_source = lab_source.unsqueeze(0)
lab_target = lab_target.unsqueeze(0)
depth_source = depth_source.unsqueeze(0)
del target_dummy
return {'img_syn': img_source, 'img_real': img_target,
'seg_l_syn': lab_source, 'seg_l_real': lab_target,
'dep_l_syn': depth_source,
'img_source_paths': img_source_path, 'img_target_paths': img_target_path,
'lab_source_paths': lab_source_path, 'lab_target_paths': lab_target_path,
'depth_source_path': depth_source_path
}
def __len__(self):
return max(self.img_syn_size, self.img_real_size)
def name(self):
return 'T^2Dataset'
def paired_transform(opt, image, seg):
scale_rate = 1.0
opt.flip=True
opt.rotation=True
if opt.flip:
n_flip = random.random()
if n_flip > 0.5:
image = F.hflip(image)
seg= F.hflip(seg)
if opt.rotation:
n_rotation = random.random()
if n_rotation > 0.5:
degree = random.randrange(-500, 500)/100
image = F.rotate(image, degree, Image.BILINEAR)
seg=F.rotate(seg, degree, Image.NEAREST)
return image, seg, scale_rate
def paired_transform_(opt, image, seg,dep):
scale_rate = 1.0
opt.flip=True
opt.rotation=True
if opt.flip:
n_flip = random.random()
if n_flip > 0.5:
image = F.hflip(image)
seg= F.hflip(seg)
dep = F.hflip(dep)
if opt.rotation:
n_rotation = random.random()
if n_rotation > 0.5:
degree = random.randrange(-500, 500)/100
image = F.rotate(image, degree, Image.BILINEAR)
seg=F.rotate(seg, degree, Image.NEAREST)
dep = F.rotate(dep,degree,Image.NEAREST)
return image, seg,dep, scale_rate
def get_transform(opt, augment,normalize):
transforms_list = []
# if augment:
# if opt.isTrain:
# transforms_list.append(transforms.ColorJitter(brightness=0.0, contrast=0.0, saturation=0.0, hue=0.0))
if normalize:
transforms_list += [
transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
]
else:
transforms_list += [
transforms.ToTensor()
]
return transforms.Compose(transforms_list)
def dataloader(opt,train_or_test):
datasets = CreateDataset()
datasets.initialize(opt,train_or_test)
dataset = data.DataLoader(datasets, batch_size=opt.batch_size, shuffle=True, num_workers=8)
return dataset
if __name__ == '__main__':
import matplotlib.pyplot as plt
opt = TrainOptions().parse()
dataset=dataloader(opt,train_or_test='train')
for i, data in enumerate(dataset):
img=data['lab_target'].data.numpy()
print(img.shape)
print(img.max())
plt.imshow(np.squeeze(img[0,:,:,:]))
plt.show()
```
#### File: cycle_depth_estimation/my_seg_depth/train.py
```python
import sys
import time
from options.train_options import TrainOptions
from my_seg_depth.my_data import dataloader
from util.visualizer import Visualizer
from tensorboardX import SummaryWriter
import my_seg_depth.networks as networks
from torch.nn import init
import torch
#from .model import Seg_Depth
#from .networks import G_1,Discriminator,Feature_net,SEG,DEP
import torch
import itertools
from util.image_pool import ImagePool
import torch.nn as nn
from util.util import scale_pyramid
import os
import util.util as util
from collections import OrderedDict
class BaseModel():
@staticmethod
def modify_commandline_options(parser, is_train):
return parser
def name(self):
return 'BaseModel'
def initialize(self,opt):
self.opt = opt
# self.gpu_ids = opt.gpu_ids
self.is_Train = opt.isTrain
# self.device = torch.device('cuda:{}'.format(self.gpu_ids[0,1])) \
# if self.gpu_ids else torch.device('cpu')
self.save_dir = os.path.join(opt.checkpoints_dir,opt.name)
if opt.resize_or_crop != 'scale_width':
torch.backends.cudnn.benchmark = True
self.loss_names = []
self.model_names = []
self.visual_names = []
self.image_paths = []
def set_input(self,input):
self.input = input
def forward(self):
pass
def setup(self, opt, parser=None):
if self.isTrain:
self.schedulers = [networks.get_scheduler(optimizer, opt) for optimizer in self.optimizers]
if not self.isTrain or opt.continue_train:
self.load_networks(opt.epoch)
self.print_networks(opt.verbose)
# make models eval mode during test time
def eval(self):
for name in self.model_names:
if isinstance(name, str):
net = getattr(self, 'net_' + name)
net.eval()
def test(self):
with torch.no_grad():
self.forward()
# get image paths
def get_image_paths(self):
return self.image_paths
def optimize_parameters(self,train_or_test):
pass
# update learning rate (called once every epoch)
def update_learning_rate(self):
for scheduler in self.schedulers:
scheduler.step()
lr = self.optimizers[0].param_groups[0]['lr']
print('learning rate = %.7f' % lr)
# return visualization images. train.py will display these images, and save the images to a html
def get_current_visuals(self):
visual_ret = OrderedDict()
for name in self.visual_names:
if isinstance(name, str):
value = getattr(self, name)
if isinstance(value, list):
visual_data=value[-1].data
else:
visual_data=value.data
segname_syn=['syn_seg_l',
'syn_seg_pre']
segname_real=['real_seg_l',
'real_seg_pre']
if (name in segname_syn):
visual_ret[name]=util.label2im(visual_data)
elif (name in segname_real):
visual_ret[name]=util.label2im(visual_data)
else:
visual_ret[name] = util.tensor2im(visual_data)
return visual_ret
# return traning losses/errors. train.py will print out these errors as debugging information
def get_current_losses(self):
errors_ret = OrderedDict()
for name in self.loss_names:
if isinstance(name, str):
# float(...) works for both scalar tensor and float number
errors_ret[name] = float(getattr(self,'loss_'+name))
return errors_ret
# save models to the disk
def save_networks(self, epoch):
for name in self.model_names:
if isinstance(name, str):
save_filename = '%s_net_%s.pth' % (epoch, name)
save_path = os.path.join(self.save_dir, save_filename)
net = getattr(self, 'net_' + name)
torch.save(net.state_dict(), save_path)
def __patch_instance_norm_state_dict(self, state_dict, module, keys, i=0):
key = keys[i]
if i + 1 == len(keys): # at the end, pointing to a parameter/buffer
if module.__class__.__name__.startswith('InstanceNorm') and \
(key == 'running_mean' or key == 'running_var'):
if getattr(module, key) is None:
state_dict.pop('.'.join(keys))
if module.__class__.__name__.startswith('InstanceNorm') and \
(key == 'num_batches_tracked'):
state_dict.pop('.'.join(keys))
else:
self.__patch_instance_norm_state_dict(state_dict, getattr(module, key), keys, i + 1)
# load models from the disk
def load_networks(self, epoch):
for name in self.model_names:
if isinstance(name, str):
load_filename = '%s_net_%s.pth' % (epoch, name)
load_path = os.path.join(self.save_dir, load_filename)
net = getattr(self, 'net' + name)
if isinstance(net, torch.nn.DataParallel):
net = net.module
print('loading the model from %s' % load_path)
# if you are using PyTorch newer than 0.4 (e.g., built from
# GitHub source), you can remove str() on self.device
state_dict = torch.load(load_path, map_location=str(self.device))
if hasattr(state_dict, '_metadata'):
del state_dict._metadata
# patch InstanceNorm checkpoints prior to 0.4
for key in list(state_dict.keys()): # need to copy keys here because we mutate in loop
self.__patch_instance_norm_state_dict(state_dict, net, key.split('.'))
net.load_state_dict(state_dict)
# print network information
def print_networks(self, verbose):
print('---------- Networks initialized -------------')
for name in self.model_names:
if isinstance(name, str):
net = getattr(self, 'net_' + name)
num_params = 0
for param in net.parameters():
num_params += param.numel()
if verbose:
print(net)
print('[Network %s] Total number of parameters : %.3f M' % (name, num_params / 1e6))
print('-----------------------------------------------')
# set requies_grad=Fasle to avoid computation
def set_requires_grad(self, nets, requires_grad=False):
if not isinstance(nets, list):
nets = [nets]
for net in nets:
if net is not None:
for param in net.parameters():
param.requires_grad = requires_grad
def init_weights(net, init_type='normal', gain=0.02):
def init_func(m):
classname = m.__class__.__name__
if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
if init_type == 'normal':
init.normal_(m.weight.data, 0.0, gain)
elif init_type == 'xavier':
init.xavier_normal_(m.weight.data, gain=gain)
elif init_type == 'kaiming':
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif init_type == 'orthogonal':
init.orthogonal_(m.weight.data, gain=gain)
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
if hasattr(m, 'bias') and m.bias is not None:
init.constant_(m.bias.data, 0.0)
elif classname.find('BatchNorm2d') != -1:
init.normal_(m.weight.data, 1.0, gain)
init.constant_(m.bias.data, 0.0)
print('initialize network with %s' % init_type)
net.apply(init_func)
def init_net(net, init_type='normal', init_gain=0.02, gpu_ids=[]):
# if len(gpu_ids) > 0:
# assert(torch.cuda.is_available())
# net.to(gpu_ids[0])
# net = torch.nn.DataParallel(net, gpu_ids)
net = net.cuda()
net = nn.DataParallel(net)
init_weights(net, init_type, gain=init_gain)
return net
class Seg_Depth(BaseModel):
def name(self):
return 'Seg_Depth_Model'
def initialize(self,opt):
BaseModel.initialize(self,opt)
self.loss_names=['G1_dis','G1_seg','D_G1',
'G2_dis','G2_seg','D_G2',
'seg_syn','seg_real','dep_syn'
]
self.visual_names = ['syn_img','real_img','syn_seg_l','real_seg_l',
'syn_seg_pre','real_seg_pre',
'syn_dep_l','syn_dep_pre']
if self.is_Train:
self.model_names = ['G_1', 'G_2', 'Dis_en',
'Feature','Seg_de','Dep_de'
]
else: # during test time, only load Gs
self.model_names = ['G_1', 'G_2',
'Feature','Seg_de','Dep_de']
self.net_G_1 = networks.define_G(opt.input_nc, opt.output_nc, opt.netG, opt.init_type, opt.init_gain)
self.net_G_2 = networks.define_G(opt.input_nc, opt.output_nc, opt.netG,
opt.init_type, opt.init_gain)
self.net_Dis_en = networks.define_D(input_nc=128)
# self.net_Feature = networks.Feature_net(input_nc=128,mid_nc =1024)
self.net_Feature = init_net(networks.Feature_net(input_nc=128,mid_nc =1024))
self.net_Seg_de = init_net(networks.SEG(n_cls=28))
self.net_Dep_de = init_net(networks.DEP())
self.optimizer_G_1 = torch.optim.Adam(self.net_G_1.parameters(),
lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizer_G_2 = torch.optim.Adam(self.net_G_2.parameters(),
lr=opt.lr,betas=(opt.beta1,0.999))
self.optimizer_F = torch.optim.Adam(self.net_Feature.parameters(),
lr=opt.lr,betas=(opt.beta1,0.999))
self.optimizer_Seg = torch.optim.Adam(self.net_Seg_de.parameters(),
lr=opt.lr,betas=(opt.beta1,0.999))
self.optimizer_Dep = torch.optim.Adam(self.net_Dep_de.parameters(),
lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizer_D = torch.optim.Adam(itertools.chain(self.net_Dis_en.parameters()),
lr=opt.lr_D, betas=(opt.beta1, 0.999))
self.syn_imgpool = ImagePool(opt.pool_size)
self.real_imgpool = ImagePool(opt.pool_size)
self.criterionGAN = networks.GANLoss(use_lsgan =not opt.no_lsgan).cuda()
self.criterionSeg = torch.nn.CrossEntropyLoss(size_average=True, ignore_index=255).cuda()
self.criterionDep = torch.nn.L1Loss()
def set_input(self,input):
self.real_img = input['img_real'].cuda()
self.syn_img = input['img_syn'].cuda()
self.real_seg_l = input['seg_l_real'].squeeze(1).cuda()
self.syn_seg_l = input['seg_l_syn'].squeeze(1).cuda()
#print(self.syn_seg_l.shape)
self.syn_dep_l = input['dep_l_syn'].squeeze(1).cuda()
def forward(self):
self.syn_features1 = self.net_G_1(self.syn_img)
self.real_features1 = self.net_G_2(self.real_img)
def forward_features2(self,features1):
self.features2 = self.net_Feature(features1.detach())
return self.features2
def backward_D(self):
pre_s = self.net_Dis_en(self.syn_features1.detach())
self.loss_D_G1 = self.criterionGAN(pre_s,False)
pre_r = self.net_Dis_en(self.real_features1.detach())
self.loss_D_G2 = self.criterionGAN(pre_r,True)
return self.loss_D_G1+self.loss_D_G2
def detach_list(self,list):
for i in list:
i = i.detach()
return list
def backward_Seg(self):
syn_f2,syn_inf = self.net_Feature(self.syn_features1.detach())
real_f2, real_inf = self.net_Feature(self.real_features1.detach())
self.syn_seg_pre = self.net_Seg_de(self.detach_list(syn_f2),syn_inf.detach())
self.real_seg_pre = self.net_Seg_de(self.detach_list(real_f2), real_inf.detach())
self.loss_seg_syn = self.criterionSeg(self.syn_seg_pre,self.syn_seg_l)
self.loss_seg_real = self.criterionSeg(self.real_seg_pre,self.real_seg_l)
return self.loss_seg_real+self.loss_seg_syn
def backward_Dep(self):
syn_f2, syn_inf = self.net_Feature(self.syn_features1.detach())
self.syn_dep_pre = self.net_Dep_de(self.detach_list(syn_f2),syn_inf.detach())
self.loss_dep_syn = self.criterionDep(self.syn_dep_pre,self.syn_dep_l)
return self.loss_dep_syn
def backward_G(self):
#print(self.syn_features1.shape)
pre_s = self.net_Dis_en(self.syn_features1)
self.loss_G1_dis = self.criterionGAN(pre_s, True)
pre_r = self.net_Dis_en(self.real_features1)
self.loss_G2_dis = self.criterionGAN(pre_r, False)
syn_f2, syn_inf = self.net_Feature(self.syn_features1)
real_f2, real_inf = self.net_Feature(self.real_features1)
seg_syn_pre = self.net_Seg_de(syn_f2, syn_inf)
seg_real_pre = self.net_Seg_de(real_f2, real_inf)
#print(seg_real_pre.shape,self.real_seg_l.shape)
self.loss_G1_seg = self.criterionSeg(seg_syn_pre, self.syn_seg_l)
self.loss_G2_seg = self.criterionSeg(seg_real_pre, self.real_seg_l)
# self.loss_G_1 = self.+loss_seg_real
# self.loss_G_2 = loss_G_syn+loss_seg_syn
return self.loss_G1_dis+self.loss_G1_seg,self.loss_G2_dis+self.loss_G2_seg
def optimize_parameters(self,train_or_test):
self.set_requires_grad(self.net_Dis_en, False)
self.set_requires_grad([self.net_G_1, self.net_G_2,
self.net_Seg_de, self.net_Feature, self.net_Dep_de], False)
if (train_or_test=='train'):
print('train')
self.set_requires_grad(self.net_Dis_en,False)
self.set_requires_grad([self.net_G_1,self.net_G_2,
self.net_Seg_de,self.net_Feature,self.net_Dep_de],True)
self.forward()
self.optimizer_G_1.zero_grad()
self.optimizer_G_2.zero_grad()
self.loss_G1,self.loss_G2 =self.backward_G()
if (train_or_test == 'train'):
print('g_update')
self.loss_G1.backward()
self.loss_G2.backward()
self.optimizer_G_1.step()
self.optimizer_G_2.step()
self.optimizer_F.zero_grad()
self.loss_s = self.backward_Seg()
self.loss_d = self.backward_Dep()
self.loss_ff = 5*self.loss_s+self.loss_d
if (train_or_test == 'train'):
print('F udate')
self.loss_ff.backward()
self.optimizer_F.step()
#self.forward()
self.optimizer_Seg.zero_grad()
self.loss_s = self.backward_Seg()
if (train_or_test == 'train'):
self.loss_s.backward()
print('seg update')
self.optimizer_Seg.step()
#self.forward()
self.optimizer_Dep.zero_grad()
self.loss_d = self.backward_Dep()
if (train_or_test == 'train'):
self.loss_d.backward()
print('dep update')
self.optimizer_Dep.step()
if (train_or_test == 'train'):
self.set_requires_grad(self.net_Dis_en, True)
self.set_requires_grad([self.net_G_1, self.net_G_2,
self.net_Seg_de, self.net_Feature, self.net_Dep_de], False)
self.optimizer_D.zero_grad()
self.loss_D = self.backward_D()
self.loss_D.backward()
print('dis update')
self.optimizer_D.step()
def create_model_segdepth(opt):
print(opt.model)
model = Seg_Depth()
model.initialize(opt)
print("model [%s] was created." % (model.name()))
return model
if __name__ == '__main__':
opt = TrainOptions().parse()
opt.name = 'synthia_segCycle'
dataset_train = dataloader(opt, train_or_test='train')
dataset_test = dataloader(opt, train_or_test='test')
writer_train = SummaryWriter(log_dir='./summary/my_seg_depth')
writer_test = SummaryWriter(log_dir='./summary/my_seg_depth')
opt = TrainOptions().parse()
#opt = TrainOptions().parse()
dataset_train = dataloader(opt, train_or_test='train')
#dataset_train = dataloader(opt, train_or_test='train')
dataset_test = dataloader(opt, train_or_test='test')
model =create_model_segdepth(opt)
visualizer = Visualizer(opt)
total_steps = 0
global_iter=0
for epoch in range(1,30):
epoch_start_time = time.time()
iter_data_time = time.time()
epoch_iter = 0
for i, data in enumerate(dataset_train):
print(global_iter)
if (global_iter % 200 == 0 and global_iter > 200):
print("validation start")
model.eval()
for ii, data_test in enumerate(dataset_test):
if (ii == 50):
break
with torch.no_grad():
model.set_input(data_test)
model.optimize_parameters(train_or_test='test')
errors = model.get_current_losses()
for name, error in errors.items():
writer_test.add_scalar("{}test/{}".format(opt.name, name), error, global_iter + ii)
images = model.get_current_visuals()
for name, img in images.items():
im = torch.from_numpy(img.transpose([2, 0, 1])).squeeze(0)
writer_test.add_image("{}test/img_{}".format(opt.name, name), im, global_iter + ii)
print("validation done")
global_iter += 1
iter_start_time = time.time()
if total_steps % opt.print_freq == 0:
t_data = iter_start_time - iter_data_time
visualizer.reset()
total_steps += opt.batch_size
epoch_iter += opt.batch_size
model.set_input(data)
model.optimize_parameters(train_or_test='train')
if (global_iter % 50 == 0):
errors = model.get_current_losses()
for name, error in errors.items():
print('------------------')
writer_train.add_scalar("{}train/{}".format(opt.name, name), error, global_iter)
images = model.get_current_visuals()
for name, img in images.items():
print(name,img.shape)
img = img / img.max()
# if len(img.shape)==3:
img = torch.from_numpy(img.transpose([2, 0, 1]))
writer_train.add_image("{}train/img_{}".format(opt.name, name), img, global_iter)
if total_steps % opt.save_latest_freq == 0:
print('saving the latest model (epoch %d, total_steps %d)' %
(epoch, total_steps))
model.save_networks('iter_%d' % total_steps)
iter_data_time = time.time()
if epoch % 5 == 0:
print('saving the model at the end of epoch %d, iters %d' %
(epoch, total_steps))
model.save_networks(epoch)
print('End of epoch %d / %d \t Time Taken: %d sec' %
(epoch, opt.niter + opt.niter_decay, time.time() - epoch_start_time))
model.update_learning_rate()
```
#### File: cycle_depth_estimation/new_model/train7.py
```python
import sys
import time
from options.train_options import TrainOptions
from try_data import dataloader
from util.visualizer import Visualizer
from tensorboardX import SummaryWriter
import cv2
import shutil
import numpy as np
import os
from torch.nn import init
import torch
#from .model import Seg_Depth
#from .networks import G_1,Discriminator,Feature_net,SEG,DEP
from model7 import Seg_Depth
import torch
import itertools
from util.image_pool import ImagePool
import torch.nn as nn
from my_eval import eval_metric
from collections import OrderedDict
def create_model_segdepth(opt):
print(opt.model)
model = Seg_Depth()
model.initialize(opt)
print("model [%s] was created." % (model.name()))
return model
if __name__ == '__main__':
opt = TrainOptions().parse()
dataset_train = dataloader(opt, train_or_test='train')
dataset_test = dataloader(opt, train_or_test='test')
writer_train = SummaryWriter(log_dir='./summary/215_vt_t')
writer_test = SummaryWriter(log_dir='./summary/215_vt_t')
opt = TrainOptions().parse()
dataset_train = dataloader(opt, train_or_test='train')
dataset_test = dataloader(opt, train_or_test='test')
model =create_model_segdepth(opt)
visualizer = Visualizer(opt)
total_steps = 0
global_iter=0
for epoch in range(1,600):
epoch_start_time = time.time()
iter_data_time = time.time()
epoch_iter = 0
for i, data in enumerate(dataset_train):
model.train()
print(global_iter)
global_iter += 1
iter_start_time = time.time()
total_steps += opt.batch_size
epoch_iter += opt.batch_size
model.set_input(data,train_or_test='train')
model.optimize_parameters(train_or_test='train')
model.visual(train_or_test='train')
if (global_iter % 20 == 0):
errors = model.get_current_losses()
for name, error in errors.items():
writer_train.add_scalar("{}train/{}".format(opt.name, name), error, global_iter)
images = model.get_current_visuals()
for name, img in images.items():
img = img / img.max()
# if len(img.shape)==3:
# if (name in segname_syn):
print('show_shape', img.shape,'show_name',name)
img = torch.from_numpy(img.transpose([2, 0, 1]))
writer_train.add_image("{}train/img_{}".format(opt.name, name), img, global_iter)
if global_iter % opt.save_latest_freq == 0:
print('saving the latest model (epoch %d, total_steps %d)' %
(epoch, total_steps))
model.save_networks('iter_%d' % total_steps)
iter_data_time = time.time()
if global_iter % 500 == 0:
print("validation start")
model.eval()
model.visual(train_or_test='test')
shutil.rmtree('/home/dut-ai/Documents/temp/code/pytorch-CycleGAN-and-pix2pix/data/test_re')
os.mkdir('/home/dut-ai/Documents/temp/code/pytorch-CycleGAN-and-pix2pix/data/test_re')
for ii, data_test in enumerate(dataset_test):
model.set_input(data_test,train_or_test='test')
model.optimize_parameters(train_or_test='test')
images = model.get_eval_visuals()
for name, imgs in images.items():
#writer_train.add_image("{}test/img_{}".format(opt.name, name), img, global_iter + ii)
if name=='real_dep_pre':
for nn in range(len(imgs)):
img = imgs[nn]
print(img.max(),img.min())
#img = img / 255
# print('show_shape',img.shape,name)
# if len(img.shape)==3:
img = torch.from_numpy(img.transpose([2, 0, 1]))
print(str(model.return_name()[nn]),'++++++++++++++')
cv2.imwrite(
# '/home/dut-ai/Documents/temp/code/pytorch-CycleGAN-and-pix2pix/my_seg_depth/trymulti/semantic_trans/save_kitti/2011_09_26/2011_09_26_drive_0001_sync/image_02/dep_ref/'
'/home/dut-ai/Documents/temp/code/pytorch-CycleGAN-and-pix2pix/data/test_re/'+
str(model.return_name()[nn]),
np.array(img[0,:, :]))
abs_rel, sq_rel, rmse, rmse_log, a1, a2, a3=eval_metric()
print('{:>10},{:>10},{:>10},{:>10},{:>10},{:>10},{:>10}'.format('abs_rel', 'sq_rel', 'rmse', 'rmse_log','a1', 'a2', 'a3'))
print('{:10.4f},{:10.4f},{:10.4f},{:10.4f},{:10.4f},{:10.4f},{:10.4f}'.format(abs_rel, sq_rel, rmse, rmse_log, a1, a2, a3))
with open('records.txt', 'a+') as f:
f.write(str(epoch) + "-"+str(global_iter) + '{:10.4f},{:10.4f},{:10.4f},{:10.4f},{:10.4f},{:10.4f},{:10.4f}'.format(abs_rel, sq_rel, rmse, rmse_log, a1, a2, a3) + "\n")
print("validation done")
writer_train.add_scalar("{}val_dep/{}".format(opt.name, 'abs_rel'), abs_rel, global_iter)
writer_train.add_scalar("{}val_dep/{}".format(opt.name, 'sq_rel'), sq_rel, global_iter)
writer_train.add_scalar("{}val_dep/{}".format(opt.name, 'rmse'), rmse, global_iter)
writer_train.add_scalar("{}val_dep/{}".format(opt.name, 'rmse_log'), rmse_log, global_iter)
writer_train.add_scalar("{}val_dep/{}".format(opt.name, 'a1'), a1, global_iter)
writer_train.add_scalar("{}val_dep/{}".format(opt.name, 'a2'), a2, global_iter)
writer_train.add_scalar("{}val_dep/{}".format(opt.name, 'a3'), a3, global_iter)
#if epoch % 2 == 0:
# print('saving the model at the end of epoch %d, iters %d' %
# (epoch, total_steps))
# model.save_networks(epoch)
print('End of epoch %d / %d \t Time Taken: %d sec' %
(epoch, opt.niter + opt.niter_decay, time.time() - epoch_start_time))
```
#### File: cycle_depth_estimation/new_model/train_new.py
```python
import sys
import time
from options.train_options import TrainOptions
from my_seg_depth.trymulti.semantic_trans.try_data import dataloader
from util.visualizer import Visualizer
from tensorboardX import SummaryWriter
import cv2
import shutil
import numpy as np
import os
from torch.nn import init
import torch
#from .model import Seg_Depth
#from .networks import G_1,Discriminator,Feature_net,SEG,DEP
from mdoel6 import Seg_Depth
import torch
import itertools
from util.image_pool import ImagePool
import torch.nn as nn
from my_eval import eval_metric
from collections import OrderedDict
def create_model_segdepth(opt):
print(opt.model)
model = Seg_Depth()
model.initialize(opt)
print("model [%s] was created." % (model.name()))
return model
if __name__ == '__main__':
opt = TrainOptions().parse()
dataset_train = dataloader(opt, train_or_test='train')
dataset_test = dataloader(opt, train_or_test='test')
writer_train = SummaryWriter(log_dir='./summary/1_30_vt_t')
writer_test = SummaryWriter(log_dir='./summary/1_30_vt_t')
opt = TrainOptions().parse()
dataset_train = dataloader(opt, train_or_test='train')
dataset_test = dataloader(opt, train_or_test='test')
model =create_model_segdepth(opt)
visualizer = Visualizer(opt)
total_steps = 0
global_iter=0
for epoch in range(1,30):
epoch_start_time = time.time()
iter_data_time = time.time()
epoch_iter = 0
for i, data in enumerate(dataset_train):
print(global_iter)
global_iter += 1
iter_start_time = time.time()
total_steps += opt.batch_size
epoch_iter += opt.batch_size
model.set_input(data,train_or_test='train')
model.optimize_parameters(train_or_test='train')
model.visual(train_or_test='train')
if (global_iter % 20 == 0):
errors = model.get_current_losses()
for name, error in errors.items():
print('------------------')
writer_train.add_scalar("{}train/{}".format(opt.name, name), error, global_iter)
images = model.get_current_visuals()
for name, img in images.items():
img = img / img.max()
# if len(img.shape)==3:
# if (name in segname_syn):
print('show_shape', img.shape,'show_name',name)
img = torch.from_numpy(img.transpose([2, 0, 1]))
writer_train.add_image("{}train/img_{}".format(opt.name, name), img, global_iter)
if total_steps % opt.save_latest_freq == 0:
print('saving the latest model (epoch %d, total_steps %d)' %
(epoch, total_steps))
model.save_networks('iter_%d' % total_steps)
iter_data_time = time.time()
if global_iter % 1000 == 0:
print("validation start")
model.eval()
model.visual(train_or_test='test')
shutil.rmtree('/home/dut-ai/Documents/depth_selection/val_selection_cropped/pred')
os.mkdir('/home/dut-ai/Documents/depth_selection/val_selection_cropped/pred')
for ii, data_test in enumerate(dataset_test):
with torch.no_grad():
model.set_input(data_test,train_or_test='test')
model.optimize_parameters(train_or_test='test')
images = model.get_eval_visuals()
for name, img in images.items():
img = img / img.max()
print('show_shape',img.shape)
# if len(img.shape)==3:
img = torch.from_numpy(img.transpose([2, 0, 1]))
#writer_train.add_image("{}test/img_{}".format(opt.name, name), img, global_iter + ii)
if name=='real_dep_ref':
print(str(model.return_name()[0][0])+'groundtruth_depth'+str(model.return_name()[1][0]))
cv2.imwrite(
# '/home/dut-ai/Documents/temp/code/pytorch-CycleGAN-and-pix2pix/my_seg_depth/trymulti/semantic_trans/save_kitti/2011_09_26/2011_09_26_drive_0001_sync/image_02/dep_ref/'
'/home/dut-ai/Documents/depth_selection/val_selection_cropped/pred/'+
str(model.return_name()[0][0])+'groundtruth_depth'+str(model.return_name()[1][0]), np.array(img[0,:, :])*255)
abs_rel, sq_rel, rmse, rmse_log, a1, a2, a3=eval_metric()
print('{:>10},{:>10},{:>10},{:>10},{:>10},{:>10},{:>10}'.format('abs_rel', 'sq_rel', 'rmse', 'rmse_log','a1', 'a2', 'a3'))
print('{:10.4f},{:10.4f},{:10.4f},{:10.4f},{:10.4f},{:10.4f},{:10.4f}'.format(abs_rel, sq_rel, rmse, rmse_log, a1, a2, a3))
with open('records.txt', 'a+') as f:
f.write(str(epoch) + "-"+str(global_iter) + '{:10.4f},{:10.4f},{:10.4f},{:10.4f},{:10.4f},{:10.4f},{:10.4f}'.format(abs_rel, sq_rel, rmse, rmse_log, a1, a2, a3) + "\n")
print("validation done")
#if epoch % 2 == 0:
# print('saving the model at the end of epoch %d, iters %d' %
# (epoch, total_steps))
# model.save_networks(epoch)
print('End of epoch %d / %d \t Time Taken: %d sec' %
(epoch, opt.niter + opt.niter_decay, time.time() - epoch_start_time))
```
#### File: cycle_depth_estimation/new_multi/model5.py
```python
import torch
import itertools
from util.image_pool import ImagePool
#from .base_model import BaseModel
from new_multi import networks5_ds
from new_multi.networks5_ds import init_net, init_weights,get_masks
import torch.nn as nn
import os
import util.util as util
from collections import OrderedDict
import functools
import torch.nn.functional as F
import torch.autograd as autograd
import numpy as np
class BaseModel():
@staticmethod
def modify_commandline_options(parser, is_train):
return parser
def name(self):
return 'BaseModel'
def initialize(self,opt):
self.opt = opt
self.is_Train = opt.isTrain
self.save_dir = os.path.join(opt.checkpoints_dir,opt.name)
if opt.resize_or_crop != 'scale_width':
torch.backends.cudnn.benchmark = True
self.loss_names = []
self.model_names = []
self.visual_names = []
self.dep_ref_name = []
self.image_paths = []
def set_input(self,input):
self.input = input
def forward(self):
pass
def setup(self, opt, parser=None):
if self.is_Train:
self.schedulers = [networks5_ds.get_scheduler(optimizer, opt) for optimizer in self.optimizers]
if not self.is_Train or opt.continue_train:
self.load_networks5_ds(opt.epoch)
self.print_networks5_ds(opt.verbose)
# make models eval mode during test time
def eval(self):
for name in self.model_names:
if isinstance(name, str):
net = getattr(self, 'net_' + name)
net.eval()
def test(self):
with torch.no_grad():
self.forward()
# get image paths
def get_image_paths(self):
return self.image_paths
def optimize_parameters(self,train_or_test):
pass
# update learning rate (called once every epoch)
def update_learning_rate(self):
for scheduler in self.schedulers:
scheduler.step()
lr = self.optimizers[0].param_groups[0]['lr']
print('learning rate = %.7f' % lr)
# return visualization images. train.py will display these images, and save the images to a html
def get_current_visuals(self):
visual_ret = OrderedDict()
for name in self.visual_names:
if isinstance(name, str):
value = getattr(self, name)
if isinstance(value, list):
visual_data=value[-1].data
else:
visual_data=value.data
#print(name,visual_data.shape)
segname_syn=['syn_seg_l',
'syn_seg_pre']
segname_real=['real_seg_l',
'real_seg_pre']
if (name in segname_syn):
visual_ret[name]=util.label2im(visual_data)
elif (name in segname_real):
visual_ret[name]=util.label2im(visual_data)
else:
visual_ret[name] = util.tensor2im(visual_data)
return visual_ret
def get_eval_visuals(self):
visual_ret = OrderedDict()
for name in self.dep_ref_name:
if isinstance(name, str):
value = getattr(self, name)
if isinstance(value, list):
visual_data=value[-1].data
else:
visual_data=value.data
visual_ret[name] = util.tensor2im(visual_data)
return visual_ret
def save_current_visuals(self):
visual_ret = OrderedDict()
for name in self.visual_names:
if isinstance(name, str):
value = getattr(self, name)
if isinstance(value, list):
visual_data=value[-1].data
else:
visual_data=value.data
segname_syn=['syn_seg_l',
'syn_seg_pre']
segname_real=['real_seg_l',
'real_seg_pre']
if (name in segname_syn):
visual_ret[name]=util.label2im(visual_data)
elif (name in segname_real):
visual_ret[name]=util.label2im(visual_data)
else:
visual_ret[name] = util.tensor2im(visual_data)
return visual_ret
# return traning losses/errors. train.py will print out these errors as debugging information
def get_current_losses(self):
errors_ret = OrderedDict()
for name in self.loss_names:
if isinstance(name, str):
# float(...) works for both scalar tensor and float number
errors_ret[name] = float(getattr(self,'loss_'+name))
return errors_ret
# save models to the disk
def save_networks(self, epoch):
for name in self.model_names:
save_dir = './checkpoints/1_21'
print(name)
save_filename = '%s_net_%s.pth' % (epoch, name)
save_path = os.path.join(save_dir, save_filename)
net = getattr(self, 'net_' + name)
torch.save(net.state_dict(), save_path)
def __patch_instance_norm_state_dict(self, state_dict, module, keys, i=0):
key = keys[i]
if i + 1 == len(keys): # at the end, pointing to a parameter/buffer
if module.__class__.__name__.startswith('InstanceNorm') and \
(key == 'running_mean' or key == 'running_var'):
if getattr(module, key) is None:
state_dict.pop('.'.join(keys))
if module.__class__.__name__.startswith('InstanceNorm') and \
(key == 'num_batches_tracked'):
state_dict.pop('.'.join(keys))
else:
self.__patch_instance_norm_state_dict(state_dict, getattr(module, key), keys, i + 1)
# load models from the disk
# print network information
def print_networks5_ds(self, verbose):
print('---------- Networks initialized -------------')
for name in self.model_names:
if isinstance(name, str):
net = getattr(self, 'net_' + name)
num_params = 0
for param in net.parameters():
num_params += param.numel()
if verbose:
print(net)
print('[Network %s] Total number of parameters : %.3f M' % (name, num_params / 1e6))
print('-----------------------------------------------')
# set requies_grad=Fasle to avoid computation
def set_requires_grad(self, nets, requires_grad=False):
if not isinstance(nets, list):
nets = [nets]
for net in nets:
if net is not None:
for param in net.parameters():
param.requires_grad = requires_grad
class Seg_Depth(BaseModel):
def name(self):
return 'Seg_Depth_Model'
def initialize(self,opt):
BaseModel.initialize(self,opt)
self.net_FD1 = networks5_ds._Discriminator(input_nc=512).cuda()
self.net_FD2 = networks5_ds._Discriminator(input_nc=256).cuda()
self.net_FD3 = networks5_ds._Discriminator(input_nc=128).cuda()
self.net_FD1 = init_net(self.net_FD1)
self.net_FD2 = init_net(self.net_FD2)
self.net_FD3 = init_net(self.net_FD3)
#self.net_FD0 = networks5_ds._Discriminator(input_nc=64).cuda()
self.net_G_1 = networks5_ds.G_1().cuda()
self.net_G_1 = nn.DataParallel(self.net_G_1)
self.net_G_1.load_state_dict(torch.load(
'/home/dut-ai/Documents/temp/code/pytorch-CycleGAN-and-pix2pix/my_seg_depth/trymulti/semantic_trans/checkpoints/1_14/iter_8000_net_G_1.pth'))
print('1')
self.net_G_2 = networks5_ds.General_net().cuda()
self.net_G_2 = nn.DataParallel(self.net_G_2)
self.net_G_2.load_state_dict(torch.load(
'/home/dut-ai/Documents/temp/code/pytorch-CycleGAN-and-pix2pix/my_seg_depth/trymulti/semantic_trans/checkpoints/1_14/iter_8000_net_G_2.pth'))
print('2')
# self.net_Seg_de = networks5_ds.SEG(n_cls=28).cuda()
# # self.net_Seg_de = init_net(self.net_Seg_de)
# self.net_Seg_de = nn.DataParallel(self.net_Seg_de)
# self.net_Seg_de.load_state_dict(torch.load(
# './checkpoints/1_14/iter_8000_net_Seg_de.pth'))
#
# self.net_Dep_de = networks5_ds.DEP().cuda()
# # self.net_Dep_de = init_net(self.net_Dep_de)
# self.net_Dep_de = nn.DataParallel(self.net_Dep_de)
# self.net_Dep_de.load_state_dict(torch.load(
# './checkpoints/1_14/iter_8000_net_Dep_de.pth'))
# './checkpoints/1_3_0_vt_t/iter_70000_net_Dep_de.pth'))
self.net_R_D = networks5_ds.R_dep().cuda()
# self.net_R_D = nn.DataParallel(self.net_R_D).cuda()
self.net_R_D = init_net(self.net_R_D)
# self.net_R_D.load_state_dict(torch.load(
# '/home/dut-ai/Documents/temp/code/pytorch-CycleGAN-and-pix2pix/my_seg_depth/trymulti/semantic_trans/checkpoints/1_14/iter_8000_net_R_D.pth'
# '/home/dut-ai/Documents/temp/code/pytorch-CycleGAN-and-pix2pix/my_seg_depth/trymulti/semantic_trans/checkpoints/1_14/iter_8000_net_R_D.pth'
# './checkpoints/1_1_4/iter_34000_net_R_D.pth'
# './checkpoints/1_3_0_vt_t/iter_70000_net_R_D.pth'
#))
self.optimizer_G_1 = torch.optim.Adam(self.net_G_1.parameters(),
lr=opt.lr/5, betas=(opt.beta1, 0.999))
self.optimizer_G_2 = torch.optim.Adam(self.net_G_2.parameters(),
lr=opt.lr/3,betas=(opt.beta1,0.999))
# self.optimizer_Seg = torch.optim.Adam(self.net_Seg_de.parameters(),
# lr=opt.lr/2,betas=(opt.beta1,0.999))
# self.optimizer_Dep = torch.optim.Adam(self.net_Dep_de.parameters(),
# lr=opt.lr/2, betas=(opt.beta1, 0.999))
self.optimizer_R_D = torch.optim.Adam(self.net_R_D.parameters(),
lr=opt.lr/2, betas=(opt.beta1, 0.999)
)
# self.optimizer_FD0 = torch.optim.Adam(self.net_FD0.parameters(),
# lr=opt.lr/4, betas=(opt.beta1, 0.999)
# )
self.optimizer_FD1 = torch.optim.Adam(self.net_FD1.parameters(),
lr=opt.lr/4, betas=(opt.beta1, 0.999)
)
self.optimizer_FD2 = torch.optim.Adam(self.net_FD2.parameters(),
lr=opt.lr/4, betas=(opt.beta1, 0.999)
)
self.optimizer_FD3 = torch.optim.Adam(self.net_FD3.parameters(),
lr=opt.lr/4, betas=(opt.beta1, 0.999)
)
self.syn_imgpool = ImagePool(opt.pool_size)
self.real_imgpool = ImagePool(opt.pool_size)
self.criterionGAN = networks5_ds.GANLoss(use_lsgan =True)
self.criterionSeg = torch.nn.CrossEntropyLoss(size_average=True, ignore_index=255)
self.criterionDep =torch.nn.L1Loss()
self.criterionStyle = torch.nn.MSELoss()
# self.criterionEdge = nn.BCELoss()
self.criterionDep_bce = networks5_ds.BCEDepLoss()
def visual(self,train_or_test):
if train_or_test=='train':
self.loss_names = [
]
self.visual_names = ['syn_img', 'real_img', 'syn_seg_l', # 'real_seg_l',
#'syn_seg_pre', 'real_seg_pre',
'syn_dep_l',
#'syn_dep_pre', 'real_dep_pre',
'syn_dep_ref', 'real_dep_ref',
]
self.dep_ref_name = ['real_dep_ref']
self.model_names = ['G_1', 'G_2',# 'Dis0_en', # ,'Dis1_en',#'Dis2_en','Dis3_en',
#'Dep_de',
#'Seg_de',
'R_D' # ,'DIS'
]
else: # during test time, only load Gs
self.loss_names = [ # 'G2_dis',
]
self.visual_names = ['syn_img', 'real_img', 'syn_seg_l', # 'real_seg_l',
#'syn_seg_pre', 'real_seg_pre',
'syn_dep_l',
#'syn_dep_pre', 'real_dep_pre',
'syn_dep_ref', 'real_dep_ref',
]
self.dep_ref_name = ['real_dep_ref']
self.model_names = ['G_1', 'G_2', #'Dep_de',
#'Seg_de',
'R_D']
def detach_list(self,list):
for i in list:
i = i.detach()
return list
def set_input(self,input,train_or_test):
self.real_img = input['img_real'].cuda()
self.syn_img = input['img_syn'].cuda()
if train_or_test =='train':
self.is_Train=True
self.real_seg_l = input['seg_l_real'].squeeze(1).cuda()
self.syn_seg_l = input['seg_l_syn'].squeeze(1).cuda()
self.syn_dep_l = input['dep_l_syn'].squeeze(1).cuda()
self.real_img_paths = input['img_source_paths']
self.syn_img_paths = input['img_target_paths']
self.syn_dep_ls = input['depth_l_s'].float().cuda()
self.syn_seg_le = input['seg_e_syn'].float().cuda()
self.real_seg_le = input['seg_e_real'].float().cuda()
else:
self.f_name = input['f_name']
self.l_name = input['l_name']
self.is_Train=False
# self.real_seg_l = input['seg_l_real'].squeeze(1).cuda()
self.syn_seg_l = input['seg_l_syn'].squeeze(1).cuda()
self.syn_dep_l = input['dep_l_syn'].squeeze(1).cuda()
self.real_img_paths = input['img_source_paths']
self.syn_img_paths = input['img_target_paths']
self.syn_dep_ls = input['depth_l_s'].float().cuda()
self.syn_seg_le = input['seg_e_syn'].float().cuda()
#self.real_seg_le = input['seg_e_real'].float().cuda()
def return_name(self):
return self.f_name,self.l_name
def calc_gradient_penalty(self,netD, real_data, fake_data):
interpolates = real_data
#for i in range(12):
alpha = torch.rand(1).cuda()
interpolates[0,:,:,:] = alpha*real_data[0,:,:,:]+ ((1 - alpha) * fake_data[0,:,:,:])
interpolates = interpolates.cuda()
interpolates = autograd.Variable(interpolates, requires_grad=True)
disc_interpolates = netD(interpolates)
gradients = autograd.grad(outputs=disc_interpolates, inputs=interpolates,
grad_outputs=torch.ones(disc_interpolates.size()).cuda(),
create_graph=True, retain_graph=True, only_inputs=True)[0]
LAMBDA = 10
gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean() * LAMBDA
return gradient_penalty
def backward_D(self):
self.set_requires_grad([ # self.net_Dis3_en, self.net_Dis2_en,
#self.net_Dis1_en,
self.net_FD0], True)
self.optimizer_FD0.zero_grad()
#self.optimizer_D1.zero_grad()
# self.optimizer_D2.zero_grad()
# self.optimizer_D3.zero_grad()
s_f = self.net_G_1(self.syn_img)
#syn_features1,syn_Features = self.net_G_2(s_f,'S')
#del syn_Features
pre_s= self.net_FD0(s_f)
loss_D_syn0 = self.criterionGAN(pre_s, False)
pre_r0 = self.net_FD0(self.real_Features[0])
# pre_r2 = self.net_Dis2_en(real_features1[:, 512:768, :, :])
# pre_r3 = self.net_Dis3_en(real_features1[:, 768:1024, :, :])
loss_D_real0 = self.criterionGAN(pre_r0, True)
self.loss_D0 = loss_D_real0+ loss_D_syn0
self.loss_D0.backward()
print('DF0 update')
self.optimizer_FD0.step()
# self.loss_D_real2 = self.criterionGAN(pre_r2, False)
# self.loss_D_real3 = self.criterionGAN(pre_r3, False)
self.loss_D_real = (loss_D_real0).detach()
self.loss_D_syn = (loss_D_syn0).detach()
#+self.loss_D_real2+self.loss_D_real3).detach()
def backward_DISDEP(self):
# self.set_requires_grad([self.net_FD1,self.net_FD2,self.net_FD3],True)
# feats, seg, (dep_4, dep_o) = self.net_R_D(self.real_Features, self.real_features1)
# real_feats = self.detach_list(feats)
# del feats,seg,(dep_4,dep_o)
# s_feats, seg, (dep_4, dep_o) = self.net_R_D(self.syn_Features, self.syn_features1)
# syn_feats = self.detach_list(s_feats)
# del s_feats, seg, (dep_4, dep_o)
# self.optimizer_R_D.zero_grad()
# self.optimizer_R_D.step()
#self.optimizer_.zero_grad()
self.optimizer_FD1.zero_grad()
D_real = self.net_FD1(self.real_feats[0].detach())
D_fake = self.net_FD1(self.syn_feats[0].detach())
loss = self.criterionGAN(D_real, True) + self.criterionGAN(D_fake, False)
loss.backward()
self.optimizer_FD1.step()
self.loss_DEP_syn = self.criterionGAN(D_fake, False).detach()
self.loss_DEP_real = self.criterionGAN(D_real, True).detach()
print('FD1 update')
del D_real,D_fake
self.optimizer_FD2.zero_grad()
D_real = self.net_FD2(self.real_feats[1].detach())
self.loss_DEP_real += self.criterionGAN(D_real, True).detach()
D_fake = self.net_FD2(self.syn_feats[1].detach())
self.loss_DEP_syn += self.criterionGAN(D_fake, False).detach()
(self.criterionGAN(D_real, True) + self.criterionGAN(D_fake, False)).backward()
self.optimizer_FD2.step()
self.loss_DEP_syn += self.criterionGAN(D_fake, False).detach()
self.loss_DEP_real += self.criterionGAN(D_real, True).detach()
del D_fake,D_real
print('FD2 update')
self.optimizer_FD3.zero_grad()
D_real = self.net_FD3(self.real_feats[2].detach())
self.loss_DEP_real = self.criterionGAN(D_real, True).detach()
D_fake = self.net_FD3(self.syn_feats[2].detach())
self.loss_DEP_syn = self.criterionGAN(D_fake, False).detach()
(self.criterionGAN(D_real, True) + self.criterionGAN(D_fake, False)).backward()
self.optimizer_FD3.step()
self.loss_DEP_syn += self.criterionGAN(D_fake, False).detach()
self.loss_DEP_real += self.criterionGAN(D_real, True).detach()
del D_fake, D_real
self.set_requires_grad([self.net_FD1,self.net_FD2,self.net_FD3], False)
def test_return(self):
return self.real_img,self.real_dep_ref
def backward_R_D(self,train_or_test):
self.optimizer_R_D.zero_grad()
A = True
if A:
feats,seg,(dep_4,dep_o) = self.net_R_D(self.real_Features,self.real_features1)
self.real_feats = self.detach_list(feats)
seg_loss_real = 0
if self.is_Train:
# for sed in r_Seds:
# se_loss_real =se_loss_real+ se_loss_real+self.criterionEdge(sed[:,0,:,:],self.real_seg_le)
seg_loss_real = self.criterionSeg(seg,self.real_seg_l)
pred1 = self.net_FD1(feats[0])
pred2 = self.net_FD2(feats[1])
pred3 = self.net_FD3(feats[2])
D_real_loss =seg_loss_real+0.2*self.criterionGAN(pred1,False) \
+0.2*self.criterionGAN(pred2,False)+0.2*self.criterionGAN(pred3,False)#+ seg_loss_real#+se_loss_real
self.real_dep_ref = dep_o.squeeze(1).detach()
if train_or_test=='train':
D_real_loss.backward()
self.optimizer_R_D.step()
self.real_feats = self.detach_list(feats)
self.optimizer_R_D.zero_grad()
del feats,seg,(dep_4,dep_o)
feats, seg, (dep_4, dep_o) = self.net_R_D(self.syn_Features,self.syn_features1)
s_se_loss = 0
s_seg_loss = 0
dep_loss=0
#o_m, z_m = get_masks(self.syn_dep_l.clone())
B = True
if B:
sky_m= self.syn_seg_l.clone()
sky_m[sky_m!=17]=1
sky_m[sky_m==17]=0
print(sky_m.shape,'sky_m')
oms, zms = get_masks(torch.cat([sky_m.unsqueeze(1),sky_m.unsqueeze(1),
sky_m.unsqueeze(1),sky_m.unsqueeze(1)],1).float()*self.syn_dep_ls.clone())
dep_loss = self.criterionDep(dep_o,sky_m.float()*self.syn_dep_l)
s_seg_loss = self.criterionSeg(seg,self.syn_seg_l)
for s_Dep in dep_4:
dep_loss = dep_loss+self.criterionDep_bce(sky_m.unsqueeze(1).float()*s_Dep,torch.cat([sky_m.unsqueeze(1),sky_m.unsqueeze(1),
sky_m.unsqueeze(1),sky_m.unsqueeze(1)],1).float()*self.syn_dep_ls.clone(),oms,zms)
# D_real_160=self.net_Dis_160(real_dep_160)
# D_real_320 = self.net_Dis_320(real_dep_320)
syn_dep_ref = dep_o.squeeze(1)
D_syn_loss = dep_loss+s_se_loss+s_seg_loss#+dep_loss
if train_or_test == 'train':
D_syn_loss.backward()
self.optimizer_R_D.step()
self.loss_dep_ref = dep_loss.detach()
# print('loss_dep',self.loss_dep_ref)
self.syn_dep_ref = syn_dep_ref.detach()
self.syn_feats = self.detach_list(feats)
return D_syn_loss#+#D_real_loss+D_syn_loss#+D_syn_loss#+10**-30*(loss_style_0+loss_style_1+loss_style_2)
def backward_G_1(self):
self.set_requires_grad([self.net_R_D,self.net_G_2], False)
self.set_requires_grad([self.net_G_1],True)
ss = self.net_G_1(self.syn_img)
syn_features1, syn_Features = self.net_G_2(ss, 'S')
s_feats,s_seg,(s_dep_4,s_dep_o) = self.net_R_D(syn_Features,syn_features1)
loss_dep = self.criterionDep(s_dep_o, self.syn_dep_l)
loss_seg_syn = self.criterionSeg(s_seg, self.syn_seg_l)
loss_G_1 = loss_seg_syn+loss_dep
return loss_G_1
def backward_G_2(self):
self.set_requires_grad([self.net_R_D, self.net_G_1], False)
self.set_requires_grad([#self.net_Dis3_en,self.net_Dis2_en,
# self.net_Dis1_en,
#self.net_FD0,
self.net_FD1,self.net_FD2], False)
ss = self.net_G_1(self.syn_img)
syn_features1, syn_Features = self.net_G_2(ss.detach(), 'S')
feats, seg, (dep_4, dep_o) = self.net_R_D(syn_Features,syn_features1)
self.syn_feats = self.detach_list(feats)
s_seg_loss = 0
dep_loss = 0
B = True
if B:
sky_m = self.syn_seg_l.clone()
sky_m[sky_m != 17] = 1
sky_m[sky_m == 17] = 0
print(sky_m.shape, 'sky_m')
dep_loss = self.criterionDep(dep_o, sky_m.float() * self.syn_dep_l)
s_seg_loss = self.criterionSeg(seg, self.syn_seg_l)
D_syn_loss = dep_loss + s_seg_loss
self.syn_features1 = syn_features1.detach()
self.syn_Features = self.detach_list(syn_Features)
del syn_features1,syn_Features,feats
real_features1,real_Features = self.net_G_2(self.real_img,'R')
feats, seg, (dep_4, dep_o) = self.net_R_D(real_Features, real_features1)
self.real_features1 = real_features1.detach()
self.real_Features = self.detach_list(real_Features)
del (dep_4,dep_o),feats
seg_loss_real = 0
if self.is_Train:
# for sed in r_Seds:
# se_loss_real =se_loss_real+ se_loss_real+self.criterionEdge(sed[:,0,:,:],self.real_seg_le)
seg_loss_real = self.criterionSeg(seg, self.real_seg_l)
D_real_loss = seg_loss_real
return D_syn_loss+2*D_real_loss
def optimize_parameters(self,train_or_test):
self.set_requires_grad(self.net_G_2, True)
self.optimizer_G_2.zero_grad()
self.loss_G2 = self.backward_G_2()
if (train_or_test == 'train'):
print('g2_update')
self.loss_G2.backward()
self.optimizer_G_2.step()
self.set_requires_grad([self.net_G_1, #self.net_G_2,
#self.net_Seg_de
], True)
self.set_requires_grad([#self.net_Seg_de,
self.net_G_2,
#self.net_Dis3_en, self.net_Dis2_en,
# self.net_Dis1_en,
], False)
self.optimizer_G_1.zero_grad()
self.loss_G1 =self.backward_G_1()
if (train_or_test == 'train'):
print('g1_update')
self.loss_G1.backward()
self.optimizer_G_1.step()
# self.set_requires_grad([self.net_Seg_de, self.net_Dep_de],True)
self.set_requires_grad([self.net_G_1, self.net_G_2],False)#,self.net_DIS], False)
self.set_requires_grad([self.net_G_1, self.net_G_2], False)
self.set_requires_grad(self.net_R_D,True)
loss_R_D = self.backward_R_D(train_or_test)
if (train_or_test=='train'):
print('R_D update')
if (train_or_test == 'train'):
self.set_requires_grad([self.net_G_1, self.net_G_2,
self.net_R_D], False)
self.set_requires_grad([
self.net_FD1,self.net_FD2,self.net_FD3], True)
#self.backward_D()
self.backward_DISDEP()
```
#### File: cycle_depth_estimation/util/util.py
```python
from __future__ import print_function
import torch
import numpy as np
from PIL import Image
import os
import torch.nn.functional as F
synthia_txtpath='/home/dut-ai/Documents/temp/synthia_encoding.txt'
cat2color_synthia={}
with open(synthia_txtpath,'r') as file:
for line in file.readlines():
templist = line.strip().split('\t')
label = templist.pop(0)
templist=[int(element) for element in templist]
cat2color_synthia[int(label)] = templist
cityscape_txtpath='/home/dut-ai/Documents/temp/cityscape_encoding.txt'
cat2color_cityscape={}
with open(cityscape_txtpath,'r') as file:
for line in file.readlines():
templist = line.strip().split('\t')
label = templist.pop(0)
templist=[int(element) for element in templist]
cat2color_cityscape[int(label)] = templist
def label2im(image_tensor):
cat2color=cat2color_cityscape
if len(image_tensor.shape)==3:
print(image_tensor.shape)
image_tensor=image_tensor.cpu().numpy()[0,:,:]
else:
print('++++++++++',image_tensor.shape)
image_tensor=np.argmax(image_tensor[0,:,:,:].cpu().numpy(),0)
print('------------',image_tensor.shape)
h=image_tensor.shape[0]
w=image_tensor.shape[1]
image_show=np.zeros(shape=[h,w,3])
for category in list(cat2color.keys()):
try:
x, y = np.where(image_tensor == category)
image_show[x, y] = np.array(cat2color[category])
except:
continue
return image_show
# Converts a Tensor into an image array (numpy)
# |imtype|: the desired type of the converted numpy array
def tensor2im(input_image, imtype=np.uint8):
if isinstance(input_image, torch.Tensor):
image_tensor = input_image.data
if len(image_tensor.shape) == 3:
image_tensor = image_tensor.unsqueeze(1)
else:
return input_image
image_numpy = image_tensor[0].cpu().float().numpy()
if image_numpy.shape[0] == 1:
image_numpy = np.tile(image_numpy, (3, 1, 1))
print(image_numpy.shape)
image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0
return image_numpy.astype(imtype)
def diagnose_network(net, name='network'):
mean = 0.0
count = 0
for param in net.parameters():
if param.grad is not None:
mean += torch.mean(torch.abs(param.grad.data))
count += 1
if count > 0:
mean = mean / count
print(name)
print(mean)
def save_image(image_numpy, image_path):
image_pil = Image.fromarray(image_numpy)
image_pil.save(image_path)
def print_numpy(x, val=True, shp=False):
x = x.astype(np.float64)
if shp:
print('shape,', x.shape)
if val:
x = x.flatten()
print('mean = %3.3f, min = %3.3f, max = %3.3f, median = %3.3f, std=%3.3f' % (
np.mean(x), np.min(x), np.max(x), np.median(x), np.std(x)))
def mkdirs(paths):
if isinstance(paths, list) and not isinstance(paths, str):
for path in paths:
mkdir(path)
else:
mkdir(paths)
def mkdir(path):
if not os.path.exists(path):
os.makedirs(path)
def scale_pyramid(img, num_scales):
scaled_imgs = [img]
s = img.size()
h = s[2]
w = s[3]
for i in range(1, num_scales):
ratio = 2**i
nh = h // ratio
nw = w // ratio
scaled_img = F.upsample(img, size=(nh, nw), mode='nearest')
scaled_imgs.append(scaled_img)
scaled_imgs.reverse()
return scaled_imgs
``` |
{
"source": "JosephineRabbit/MLMSNet",
"score": 2
} |
#### File: JosephineRabbit/MLMSNet/pr_curve.py
```python
import numpy as np
import os
import PIL.Image as Image
import pdb
import matplotlib.pyplot as plt
def main():
#algs = ['sb_crf']
datasets = ['ECSSD']
for dataset in datasets:
print(dataset)
#dir = '/home/rabbit/Datasets/%s'%dataset
input_dir = '/home/rabbit/Desktop/DUT_train/PRE/ECSSD/test2/mask2/'
output_dir = '/home/rabbit/Desktop/DUT_train/PRE/ECSSD/test2/'
#gt_dir = '%s/masks'%dir
gt_dir = '/home/rabbit/Desktop/DUT_train/PRE/ECSSD/test2/gt/'
#input_dirs = ['%s/%s'%(dir, alg) for alg in algs]
fig = plt.figure(figsize=(9, 3))
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
name = 'result'
#for input_dir, alg in zip(input_dirs, algs):
evaluate(input_dir,gt_dir, output_dir,name="result")
sb = np.load('%s/%s.npz'%(output_dir,name))
ax1.plot(sb['m_recs'], sb['m_pres'], linewidth=1, )
ax2.plot(np.linspace(0, 1, 21), sb['m_fms'], linewidth=1)
print(' fm: %.4f, mea: %.4f'%( sb['m_thfm'], sb['m_mea']))
ax1.grid(True)
ax1.set_xlabel('Recall', fontsize=14)
ax1.set_ylabel('Precision', fontsize=14)
ax2.grid(True)
ax2.set_xlabel('Threshold', fontsize=14)
ax2.set_ylabel('F-measure', fontsize=14)
handles, labels = ax1.get_legend_handles_labels()
lgd = ax1.legend(handles, labels, loc='center left', bbox_to_anchor=(0.5, -0.5), ncol=8, fontsize=14)
fig.savefig('%s.pdf'%dataset, bbox_extra_artists=(lgd,), bbox_inches='tight')
def evaluate(input_dir, gt_dir, output_dir=None, name=None):
if not os.path.exists(output_dir):
os.mkdir(output_dir)
filelist = os.listdir(input_dir)
eps = np.finfo(float).eps
m_pres = np.zeros(21)
m_recs = np.zeros(21)
m_fms = np.zeros(21)
m_thfm = 0
m_mea = 0
it = 1
for filename in filelist:
if not filename.endswith('.png'):
continue
# print('evaluating image %d'%it)
mask = Image.open('%s/%s' % (input_dir, filename))
mask = np.array(mask, dtype=np.float)
if len(mask.shape) != 2:
mask = mask[:, :, 0]
mask = (mask - mask.min()) / (mask.max()-mask.min()+eps)
gt = Image.open('%s/%s' % (gt_dir, filename))
gt = np.array(gt, dtype=np.uint8)
gt[gt != 0] = 1
pres = []
recs = []
fms = []
mea = np.abs(gt-mask).mean()
# threshold fm
binary = np.zeros(mask.shape)
th = 2*mask.mean()
if th > 1:
th = 1
binary[mask >= th] = 1
sb = (binary * gt).sum()
pre = sb / (binary.sum()+eps)
rec = sb / (gt.sum()+eps)
thfm = 1.3 * pre * rec / (0.3 * pre + rec + eps)
for th in np.linspace(0, 1, 21):
binary = np.zeros(mask.shape)
binary[ mask >= th] = 1
pre = (binary * gt).sum() / (binary.sum()+eps)
rec = (binary * gt).sum() / (gt.sum()+ eps)
fm = 1.3 * pre * rec / (0.3*pre + rec + eps)
pres.append(pre)
recs.append(rec)
fms.append(fm)
fms = np.array(fms)
pres = np.array(pres)
recs = np.array(recs)
m_mea = m_mea * (it-1) / it + mea / it
m_fms = m_fms * (it - 1) / it + fms / it
m_recs = m_recs * (it - 1) / it + recs / it
m_pres = m_pres * (it - 1) / it + pres / it
m_thfm = m_thfm * (it - 1) / it + thfm / it
it += 1
if not (output_dir is None or name is None):
np.savez('%s/%s.npz'%(output_dir, name), m_mea=m_mea, m_thfm=m_thfm, m_recs=m_recs, m_pres=m_pres, m_fms=m_fms)
return m_thfm, m_mea
if __name__ == '__main__':
main()
```
#### File: JosephineRabbit/MLMSNet/test.py
```python
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.optim as optim
from torch.utils.data import DataLoader
import torch.nn.functional as F
#import config
import numpy as np
from data_new import DataFolder
#from NN import *
import time
# from gan import *from torch.optim.lr_scheduler import StepLR, MultiStepLR
import os
from torch.autograd import Variable
import cv2
#from e_m_transfer import *
from D_E_U import *
# test_dirs = [("/home/neverupdate/Downloads/SalGAN-master/Dataset/TEST-IMAGE", "/home/neverupdate/Downloads/SalGAN-master/Dataset/TEST-MASK")]
import numpy as np
import os
import PIL.Image as Image
import pdb
import matplotlib.pyplot as plt
#D2 = DSS(*extra_layer(vgg(base['dss'], 3), extra['dss']), connect['dss'], 1).cuda()
#D2.load_state_dict(torch.load('/home/rabbit/Desktop/DUT_train/checkpoint/DSS/with_e/D3epoch21.pkl'))
#D2.load_state_dict(torch.load('D15epoch11.pkl'))
#G = Generator(input_dim=4,num_filter=64,output_dim=1)
#G.cuda()
#G.load_state_dict(torch.load('/home/rabbit/Desktop/DUT_train/Gepoch6_2.pkl'))
D_E = DSS(*extra_layer(vgg(base['dss'], 3), extra['dss']),config.BATCH_SIZE).cuda()
U = D_U().cuda()
D_E.load_state_dict(torch.load('/home/rabbit/Desktop/D_E4epoch2.pkl'))
U.load_state_dict(torch.load('/home/rabbit/Desktop/U_4epoch2.pkl'))
p= './PRE/ECSSD/test2/'
test_dirs = [
#("/home/rabbit/Datasets/DUTS/DUT-test/DUT-test-Image",
#"/home/rabbit/Datasets/DUTS/DUT-test/DUT-test-Mask"),
#( "/home/rabbit/Datasets/DUTS/DUT-train/DUT-train-Image",
#"/home/rabbit/Datasets/DUTS/DUT-train/DUT-train-Mask")
("/home/rabbit/Datasets/ECSSD/ECSSD-Image",
"/home/rabbit/Datasets/ECSSD/ECSSD-Mask"),
#("/home/rabbit/Datasets/THUR-Image",
#"/home/rabbit/Datasets/THUR-Mask"),
#("/home/www/Desktop/DUT_train/Sal_Datasets/THUR-Image",
# "/home/www/Desktop/DUT_train/Sal_Datasets/THUR-Mask"),
#("/home/rabbit/Datasets/SOD/SOD-Image",
#"/home/rabbit/Datasets/SOD/SOD-Mask")
#("/home/rabbit/Datasets/SED1/SED1-Image",
#"/home/rabbit/Datasets/SED1/SED1-Mask")
#("/home/neverupdate/Downloads/SalGAN-master/SED2/SED2-Image",
# "/home/neverupdate/Downloads/SalGAN-master/SED2/SED2-Mask")
#("/home/rabbit/Datasets/PASCALS/PASCALS-Image",
#"/home/rabbit/Datasets/PASCALS/PASCALS-Mask")
#("/home/neverupdate/Downloads/SalGAN-master/MSRA5000/MSRA5000-Image",
#"/home/neverupdate/Downloads/SalGAN-master/MSRA5000/MSRA5000-Mask")
#("/home/neverupdate/Downloads/SalGAN-master/HKU-IS/HKU-IS_Image",
#"/home/neverupdate/Downloads/SalGAN-master/HKU-IS/HKU-IS-Mask")
#("/home/rabbit/Datasets/OMRON/OMRON-Image",
#"/home/rabbit/Datasets/OMRON/OMRON-Mask")
]
def process_data_dir(data_dir):
files = os.listdir(data_dir)
files = map(lambda x: os.path.join(data_dir, x), files)
return sorted(files)
batch_size = 1
DATA_DICT = {}
IMG_FILES = []
GT_FILES = []
IMG_FILES_TEST = []
GT_FILES_TEST = []
for dir_pair in test_dirs:
X, y = process_data_dir(dir_pair[0]), process_data_dir(dir_pair[1])
IMG_FILES.extend(X)
GT_FILES.extend(y)
IMGS_train, GT_train = IMG_FILES, GT_FILES
test_folder = DataFolder(IMGS_train, GT_train, False)
test_data = DataLoader(test_folder, batch_size=1, num_workers=2, shuffle=False,
)
sum_eval_mae = 0
sum_eval_loss = 0
num_eval = 0
mae = 0
evaluation = nn.L1Loss()
mean = (0.485,0.456,0.406)
std = (0.229,0.224,0.225)
best_eval = None
sum_train_mae = 0
sum_train_loss = 0
sum_train_gan = 0
sum_fm=0
eps = np.finfo(float).eps
##train
for iter_cnt, (img_batch, label_batch, edges, shape, name) in enumerate(test_data):
#D2.eval()
D_E.eval()
print(iter_cnt)
label_batch = label_batch.numpy()[0, :, :]
img_batch = Variable(img_batch).cuda() # ,Variable(z_.cuda())
binary = np.zeros(label_batch.shape)
f,m,e = D_E(img_batch)
masks, es, DIC = U(f)
#ut2 = out2.numpy()
mask = masks[2].data[0].cpu()
edges=edges.cpu().numpy()[0,:,:]
print(np.shape(edges))
#mask1 =out[1].data[0].cpu()
#mask2 =out[2].data[0].cpu()
#mask2 =out[2].data[0].cpu()
mask=mask.numpy()
#p_edge = out[7].data[0].cpu().numpy()
#img_batch = img_batch.cpu().numpy()[0,:,:,:]
#print(np.shape(img_batch))
#img = np.transpose(img_batch, [1, 2, 0])
#img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
#g_t=label_batch
#print(np.shape(g_t))
#print(np.shape(mask))
#pr = np.transpose(mask, [1, 2, 0])
#save_img = p +'gt/'+str(name)[2:-3]
save_gt = p+ '/gt/'+str(name)[2:-3]
#save_pre = p+ str(name)[2:-7]+'_p.png'
save_m = p+'/mask/'+str(name)[2:-7]+'.png'
#save_m2 = p+ '/mask2/'+str(name)[2:-7]+'.png'
save_edge = p+str(name)[2:-7]+'_e.png'
save_ed_p = p+str(name)[2:-7]+'_pe.png'
#print(save_pre)
cv2.imwrite(save_m, mask[0, :, :] * 255)
#cv2.imwrite(save_m2, out2[0,:,:]*255)
cv2.imwrite(save_gt, label_batch[0,:,:] * 255)
#cv2.imwrite(save_edge, edges[0,:,:]* 255)
#cv2.imwrite(save_ed_p,p_edge[0,:,:]*255)
#mask = (mask-mask.min())/(mask.max()-mask.min())
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.optim as optim
from torch.utils.data import DataLoader
import torch.nn.functional as F
#import config
import numpy as np
from data_new import DataFolder
#from NN import *
import time
# from gan import *from torch.optim.lr_scheduler import StepLR, MultiStepLR
import os
from torch.autograd import Variable
import cv2
#from e_m_transfer import *
from D_E_U import *
# test_dirs = [("/home/neverupdate/Downloads/SalGAN-master/Dataset/TEST-IMAGE", "/home/neverupdate/Downloads/SalGAN-master/Dataset/TEST-MASK")]
import numpy as np
import os
import PIL.Image as Image
import pdb
import matplotlib.pyplot as plt
#D2 = DSS(*extra_layer(vgg(base['dss'], 3), extra['dss']), connect['dss'], 1).cuda()
#D2.load_state_dict(torch.load('/home/rabbit/Desktop/DUT_train/checkpoint/DSS/with_e/D3epoch21.pkl'))
#D2.load_state_dict(torch.load('D15epoch11.pkl'))
#G = Generator(input_dim=4,num_filter=64,output_dim=1)
#G.cuda()
#G.load_state_dict(torch.load('/home/rabbit/Desktop/DUT_train/Gepoch6_2.pkl'))
D_E = DSS(*extra_layer(vgg(base['dss'], 3), extra['dss']),config.BATCH_SIZE).cuda()
U = D_U().cuda()
D_E.load_state_dict(torch.load('/home/rabbit/Desktop/D_E4epoch2.pkl'))
U.load_state_dict(torch.load('/home/rabbit/Desktop/U_4epoch2.pkl'))
p= './PRE/ECSSD/test2/'
test_dirs = [
#("/home/rabbit/Datasets/DUTS/DUT-test/DUT-test-Image",
#"/home/rabbit/Datasets/DUTS/DUT-test/DUT-test-Mask"),
#( "/home/rabbit/Datasets/DUTS/DUT-train/DUT-train-Image",
#"/home/rabbit/Datasets/DUTS/DUT-train/DUT-train-Mask")
("/home/rabbit/Datasets/ECSSD/ECSSD-Image",
"/home/rabbit/Datasets/ECSSD/ECSSD-Mask"),
#("/home/rabbit/Datasets/THUR-Image",
#"/home/rabbit/Datasets/THUR-Mask"),
#("/home/www/Desktop/DUT_train/Sal_Datasets/THUR-Image",
# "/home/www/Desktop/DUT_train/Sal_Datasets/THUR-Mask"),
#("/home/rabbit/Datasets/SOD/SOD-Image",
#"/home/rabbit/Datasets/SOD/SOD-Mask")
#("/home/rabbit/Datasets/SED1/SED1-Image",
#"/home/rabbit/Datasets/SED1/SED1-Mask")
#("/home/neverupdate/Downloads/SalGAN-master/SED2/SED2-Image",
# "/home/neverupdate/Downloads/SalGAN-master/SED2/SED2-Mask")
#("/home/rabbit/Datasets/PASCALS/PASCALS-Image",
#"/home/rabbit/Datasets/PASCALS/PASCALS-Mask")
#("/home/neverupdate/Downloads/SalGAN-master/MSRA5000/MSRA5000-Image",
#"/home/neverupdate/Downloads/SalGAN-master/MSRA5000/MSRA5000-Mask")
#("/home/neverupdate/Downloads/SalGAN-master/HKU-IS/HKU-IS_Image",
#"/home/neverupdate/Downloads/SalGAN-master/HKU-IS/HKU-IS-Mask")
#("/home/rabbit/Datasets/OMRON/OMRON-Image",
#"/home/rabbit/Datasets/OMRON/OMRON-Mask")
]
def process_data_dir(data_dir):
files = os.listdir(data_dir)
files = map(lambda x: os.path.join(data_dir, x), files)
return sorted(files)
batch_size = 1
DATA_DICT = {}
IMG_FILES = []
GT_FILES = []
IMG_FILES_TEST = []
GT_FILES_TEST = []
for dir_pair in test_dirs:
X, y = process_data_dir(dir_pair[0]), process_data_dir(dir_pair[1])
IMG_FILES.extend(X)
GT_FILES.extend(y)
IMGS_train, GT_train = IMG_FILES, GT_FILES
test_folder = DataFolder(IMGS_train, GT_train, False)
test_data = DataLoader(test_folder, batch_size=1, num_workers=2, shuffle=False,
)
sum_eval_mae = 0
sum_eval_loss = 0
num_eval = 0
mae = 0
evaluation = nn.L1Loss()
mean = (0.485,0.456,0.406)
std = (0.229,0.224,0.225)
best_eval = None
sum_train_mae = 0
sum_train_loss = 0
sum_train_gan = 0
sum_fm=0
eps = np.finfo(float).eps
##train
for iter_cnt, (img_batch, label_batch, edges, shape, name) in enumerate(test_data):
#D2.eval()
D_E.eval()
print(iter_cnt)
label_batch = label_batch.numpy()[0, :, :]
img_batch = Variable(img_batch).cuda() # ,Variable(z_.cuda())
binary = np.zeros(label_batch.shape)
f,m,e = D_E(img_batch)
masks, es, DIC = U(f)
#ut2 = out2.numpy()
mask = masks[2].data[0].cpu()
edges=edges.cpu().numpy()[0,:,:]
print(np.shape(edges))
#mask1 =out[1].data[0].cpu()
#mask2 =out[2].data[0].cpu()
#mask2 =out[2].data[0].cpu()
mask=mask.numpy()
#p_edge = out[7].data[0].cpu().numpy()
#img_batch = img_batch.cpu().numpy()[0,:,:,:]
#print(np.shape(img_batch))
#img = np.transpose(img_batch, [1, 2, 0])
#img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
#g_t=label_batch
#print(np.shape(g_t))
#print(np.shape(mask))
#pr = np.transpose(mask, [1, 2, 0])
#save_img = p +'gt/'+str(name)[2:-3]
save_gt = p+ '/gt/'+str(name)[2:-3]
#save_pre = p+ str(name)[2:-7]+'_p.png'
save_m = p+'/mask/'+str(name)[2:-7]+'.png'
#save_m2 = p+ '/mask2/'+str(name)[2:-7]+'.png'
save_edge = p+str(name)[2:-7]+'_e.png'
save_ed_p = p+str(name)[2:-7]+'_pe.png'
#print(save_pre)
cv2.imwrite(save_m, mask[0, :, :] * 255)
#cv2.imwrite(save_m2, out2[0,:,:]*255)
cv2.imwrite(save_gt, label_batch[0,:,:] * 255)
#cv2.imwrite(save_edge, edges[0,:,:]* 255)
#cv2.imwrite(save_ed_p,p_edge[0,:,:]*255)
#mask = (mask-mask.min())/(mask.max()-mask.min())
```
#### File: JosephineRabbit/MLMSNet/train_DEU.py
```python
from D_E_U import *
D_E = DSS(*extra_layer(vgg(base['dss'], 3), extra['dss']),config.BATCH_SIZE).cuda()
U = D_U().cuda()
U.cuda()
data_dirs = [
("/home/rabbit/Datasets/DUTS/DUT-train/DUT-train-Image",
"/home/rabbit/Datasets/DUTS/DUT-train/DUT-train-Mask"),
]
test_dirs = [("/home/rabbit/Datasets/SED1/SED1-Image",
"/home/rabbit/Datasets/SED1/SED1-Mask")]
D_E.base.load_state_dict(torch.load('/home/rabbit/Desktop/DUT_train/weights/vgg16_feat.pth'))
initialize_weights(U)
DE_optimizer = optim.Adam(D_E.parameters(), lr=config.D_LEARNING_RATE, betas=(0.5, 0.999))
U_optimizer = optim.Adam(U.parameters(), lr=config.U_LEARNING_RATE, betas=(0.5, 0.999))
BCE_loss = torch.nn.BCELoss().cuda()
def process_data_dir(data_dir):
files = os.listdir(data_dir)
files = map(lambda x: os.path.join(data_dir, x), files)
return sorted(files)
batch_size =BATCH_SIZE
DATA_DICT = {}
IMG_FILES = []
GT_FILES = []
IMG_FILES_TEST = []
GT_FILES_TEST = []
for dir_pair in data_dirs:
X, y = process_data_dir(dir_pair[0]), process_data_dir(dir_pair[1])
IMG_FILES.extend(X)
GT_FILES.extend(y)
for dir_pair in test_dirs:
X, y = process_data_dir(dir_pair[0]), process_data_dir(dir_pair[1])
IMG_FILES_TEST.extend(X)
GT_FILES_TEST.extend(y)
IMGS_train, GT_train = IMG_FILES, GT_FILES
train_folder = DataFolder(IMGS_train, GT_train, True)
train_data = DataLoader(train_folder, batch_size=BATCH_SIZE, num_workers=NUM_WORKERS, shuffle=True,
drop_last=True)
test_folder = DataFolder(IMG_FILES_TEST, GT_FILES_TEST, trainable=False)
test_data = DataLoader(test_folder, batch_size=1, num_workers=NUM_WORKERS, shuffle=False)
def cal_DLoss(out_m,out_e, mask, edge):
# if l == 0:
# 0 f 1 t
# ll = Variable(torch.ones(mask.shape()))
D_masks_loss = 0
D_edges_loss = 0
for i in range(6):
#print(out_m[i].size())
#print(mask.size())
D_masks_loss += F.binary_cross_entropy(out_m[i], mask)
for i in range(6):
D_edges_loss += F.binary_cross_entropy(out_e[i], edge)
return ( D_masks_loss, D_edges_loss)
best_eval = None
x = 0
ma = 1
for epoch in range(1, config.NUM_EPOCHS + 1):
sum_train_mae = 0
sum_train_loss = 0
sum_train_gan = 0
##train
for iter_cnt, (img_batch, label_batch, edges, shape, name) in enumerate(train_data):
D_E.train()
x = x + 1
# print(img_batch.size())
label_batch = Variable(label_batch).cuda()
# print(torch.typename(label_batch))
print('training start!!')
# for iter, (x_, _) in enumerate(train_data):
img_batch = Variable(img_batch.cuda()) # ,Variable(z_.cuda())
edges = Variable(edges).cuda()
##########DSS#########################
######train dis
##fake
f,y1,y2 = D_E(img_batch)
m_l_1,e_l_1 = cal_DLoss(y1,y2,label_batch,edges)
DE_optimizer.zero_grad()
DE_l_1 = m_l_1 +e_l_1
DE_l_1.backward()
DE_optimizer.step()
w = [2,2,3,3]
f, y1, y2 = D_E(img_batch)
masks,DIC = U(f)
pre_ms_l = 0
ma = torch.abs(label_batch-masks[4]).mean()
pre_m_l = F.binary_cross_entropy(masks[4],label_batch)
for i in range(4):
pre_ms_l +=w[i] * F.binary_cross_entropy(masks[i],label_batch)
DE_optimizer.zero_grad()
DE_l_1 = pre_ms_l/20+30*pre_m_l
DE_l_1.backward()
DE_optimizer.step()
f, y1, y2 = D_E(img_batch)
masks,DIC = U(f)
pre_ms_l = 0
ma = torch.abs(label_batch-masks[4]).mean()
pre_m_l = F.binary_cross_entropy(masks[4], label_batch)
for i in range(4):
pre_ms_l += w[i] * F.binary_cross_entropy(masks[i], label_batch)
U_optimizer.zero_grad()
U_l_1 = pre_ms_l/20+30*pre_m_l
U_l_1.backward()
U_optimizer.step()
sum_train_mae += ma.data.cpu()
print("Epoch:{}\t {}/{}\ \t mae:{}".format(epoch, iter_cnt + 1,
len(train_folder) / config.BATCH_SIZE,
sum_train_mae / (iter_cnt + 1)))
##########save model
# torch.save(D.state_dict(), './checkpoint/DSS/with_e_2/D15epoch%d.pkl' % epoch)
torch.save(D_E.state_dict(), './checkpoint/DSS/with_e_2/D_Eepoch%d.pkl' % epoch)
torch.save(U.state_dict(), './checkpoint/DSS/with_e_2/Uis.pkl')
print('model saved')
###############test
eval1 = 0
eval2 = 0
t_mae = 0
for iter_cnt, (img_batch, label_batch, edges, shape, name) in enumerate(test_data):
D_E.eval()
U.eval()
label_batch = Variable(label_batch).cuda()
print('val!!')
# for iter, (x_, _) in enumerate(train_data):
img_batch = Variable(img_batch.cuda()) # ,Variable(z_.cuda())
f,y1,y2 = D_E(img_batch)
masks, DIC = U(f)
mae_v2 = torch.abs(label_batch - masks[4]).mean().data[0]
# eval1 += mae_v1
eval2 += mae_v2
# m_eval1 = eval1 / (iter_cnt + 1)
m_eval2 = eval2 / (iter_cnt + 1)
print("test mae", m_eval2)
with open('results1.txt', 'a+') as f:
f.write(str(epoch) + " 2:" + str(m_eval2) + "\n")
```
#### File: JosephineRabbit/MLMSNet/train.py
```python
from model import *
from config import *
import torch.optim as optim
from collections import OrderedDict
def load(path):
state_dict = torch.load(path)
state_dict_rename = OrderedDict()
for k, v in state_dict.items():
name = k[7:] # remove `module.`
state_dict_rename[name] = v
#print(state_dict_rename)
#model.load_state_dict(state_dict_rename)
return state_dict_rename
D_E = DSS(*extra_layer(vgg(base['dss'], 3), extra['dss']),e_extract_layer(),nums =BATCH_SIZE).cuda()
#initialize_weights(D_E)
#D_E.base.load_state_dict(torch.load('../vgg16_feat.pth'))
#print(D_E)
D_E.load_state_dict(load('D:\WRm/checkpoints/D_Eepoch3.pkl'))
D_E =nn.DataParallel(D_E).cuda()
U = D_U().cuda()
#initialize_weights(U)
U.load_state_dict(load('D:\WRm/checkpoints/Uepoch3.pkl'))
U =nn.DataParallel(U)
#D_E.base.load_state_dict(torch.load('/home/neverupdate/Downloads/SalGAN-master/weights/vgg16_feat.pth'))
#D_E.load_state_dict(torch.load('./checkpoints/D_Eepoch3.pkl'))
#U.load_state_dict(torch.load('./checkpoints/Uepoch3.pkl'))
DE_optimizer = optim.Adam(D_E.parameters(), lr=config.D_LEARNING_RATE,betas=(0.5,0.999))
U_optimizer = optim.Adam(U.parameters(), lr=config.U_LEARNING_RATE, betas=(0.5, 0.999))
TR_sal_dirs = [ ("D:\WRM/DUTS/DUTS-TR/DUTS-TR-Image",
"D:\WRM/DUTS/DUTS-TR/DUTS-TR-Mask")]
TR_ed_dir = [("./images/train",
"./bon/train")]
TE_sal_dirs = [("D:\WRM/ECSSD (2)/ECSSD-Image",
"D:\WRM/ECSSD (2)/ECSSD-Mask")]
TE_ed_dir = [("./images/test",
"./bon/test")]
def DATA(sal_dirs,ed_dir,trainable):
S_IMG_FILES = []
S_GT_FILES = []
E_IMG_FILES = []
E_GT_FILES = []
for dir_pair in sal_dirs:
X, y = process_data_dir(dir_pair[0]), process_data_dir(dir_pair[1])
S_IMG_FILES.extend(X)
S_GT_FILES.extend(y)
for dir_pair in ed_dir:
X, y = process_data_dir(dir_pair[0]), process_data_dir(dir_pair[1])
E_IMG_FILES.extend(X)
E_GT_FILES.extend(y)
S_IMGS_train, S_GT_train = S_IMG_FILES, S_GT_FILES
E_IMGS_train, E_GT_train = E_IMG_FILES, E_GT_FILES
folder = DataFolder(S_IMGS_train, S_GT_train, E_IMGS_train, E_GT_train, trainable)
if trainable:
data = DataLoader(folder, batch_size=BATCH_SIZE, num_workers=2, shuffle=trainable)
else:
data = DataLoader(folder, batch_size=1, num_workers=2, shuffle=trainable)
return data
train_data = DATA(TR_sal_dirs,TR_ed_dir,trainable=True)
test_data = DATA(TE_sal_dirs,TE_ed_dir,trainable=False)
def cal_eLoss(edges,label):
loss = 0
w =[1,1,1,1,1,5]
for i in range(6):
#print(label[i].shape)
#print(edges[i].shape)
loss += w[i]*F.binary_cross_entropy(edges[i],label)/10
return loss
def cal_s_mLoss(maps,label):
loss = 0
w = [1, 1, 1, 1, 1, 1]
for i in range(6):
loss =loss+ w[i]*F.binary_cross_entropy( maps[i],label) / 6
return loss
def cal_s_eLoss(es,label):
loss = 0
w =[1,1,1,1,1]
for i in range(5):
loss =loss+w[i]* F.binary_cross_entropy(es[i],label)/5
return loss
def cal_e_mLoss(e_m,label):
loss=0
w = [1, 1, 1, 1, 1, 1]
for i in range(5):
loss =loss+ w[i] * F.binary_cross_entropy(e_m[i],label) / 5
return loss
def cal_s_e2mLoss(e_m,maps):
loss = 0
w = [1, 1, 1, 1, 1, 1]
for i in range(5):
loss = loss+ w[i] * F.binary_cross_entropy( e_m[i],maps[i]) / 5
return loss
best_eval = None
ma = 0
def main(train_data,test_data):
best_eval = None
ma = 0
for epoch in range(1, NUM_EPOCHS + 1):
sum_train_mae = 0
sum_train_loss = 0
x = 0
##train
for iter_cnt, (img, img_e, sal_l, sal_e, ed_l, name) in enumerate(train_data):
D_E.train()
U.train()
x = x + 1
print('training start!!')
# for iter, (x_, _) in enumerate(train_data):
img = Variable(img.cuda()) # ,Variable(z_.cuda())
img_e = Variable(img_e.cuda())
sal_l = Variable(sal_l.cuda(), requires_grad=False)
sal_e = Variable(sal_e.cuda(), requires_grad=False)
ed_l = Variable(ed_l, requires_grad=False).cuda()
##########DSS#########################
######train dis
dd = True
if dd == True:
##fake
f, edges, e_s, e = D_E(img,img_e)
ff = list()
for i in range(5):
ff.append(f[i].detach())
edges_L = cal_eLoss(edges,ed_l)
e_s_L = cal_e_mLoss(e_s, sal_l)
e_L = cal_s_eLoss(e, sal_e)
#s_m_L = cal_s_mLoss(s, sal_l)
# masks, es = U(f)
# pre_ms_l = 0
# pre_es_l = 0
# ma = torch.abs(sal_l - masks[1]).mean()
# pre_m_l = F.binary_cross_entropy(masks[1], sal_l)
# for i in range(2):
# pre_ms_l += F.binary_cross_entropy(masks[1], sal_l)
# pre_es_l += F.binary_cross_entropy(es[1], sal_e)
DE_optimizer.zero_grad()
DE_l_1 = 5 * e_s_L + 10*e_L + 5*edges_L
DE_l_1.backward()
DE_optimizer.step()
uu = True
if uu == True:
masks, es = U(ff)
# mmm = masks[2].detach().cpu().numpy()
# print(mmm.shape)
# mmmmm = Image.fromarray(mmm[0,0,:,:])
# mmmmm.save('1.png')
# cv2.imshow('1.png',mmm[0,0,:,:]*255)
# cv2.waitKey()
pre_ms_l = 0
pre_es_l = 0
ma = torch.abs(sal_l - masks[2]).mean()
# print(ma)
pre_m_l = F.binary_cross_entropy(masks[2], sal_l)
for i in range(2):
pre_ms_l += F.binary_cross_entropy(masks[i], sal_l)
pre_es_l += F.binary_cross_entropy(es[i], sal_e)
U_l_1 = 50 * pre_m_l + 10 * pre_es_l + pre_ms_l
U_optimizer.zero_grad()
U_l_1.backward()
U_optimizer.step()
sum_train_mae += float(ma)
print(
"Epoch:{}\t iter:{} sum:{} \t mae:{}".format(epoch, x, len(train_data), sum_train_mae / (iter_cnt + 1)))
##########save model
# torch.save(D.state_dict(), './checkpoint/DSS/with_e_2/D15epoch%d.pkl' % epoch)
torch.save(D_E.state_dict(), 'D:\WRM/checkpoints/D_Eepoch%d.pkl' % epoch)
torch.save(U.state_dict(), 'D:\WRM/checkpoints/Uepoch%d.pkl' % epoch)
print('model saved')
###############test
eval1 = 0
eval2 = 0
t_mae = 0
for iter_cnt, (img, img_e, sal_l, sal_e, ed_l, name) in enumerate(test_data):
D_E.eval()
U.eval()
label_batch = Variable(sal_l).cuda()
img_eb = Variable(img_e).cuda()
print('val!!')
# for iter, (x_, _) in enumerate(train_data):
img_batch = Variable(img.cuda()) # ,Variable(z_.cuda())
f, edges, e_s, e = D_E(img_batch,img_eb)
masks, es = U(f)
mae_v2 = torch.abs(label_batch - masks[2]).mean().data[0]
# eval1 += mae_v1
eval2 += mae_v2
# m_eval1 = eval1 / (iter_cnt + 1)
m_eval2 = eval2 / (iter_cnt + 1)
print("test mae", m_eval2)
with open('results1.txt', 'a+') as f:
f.write(str(epoch) + " 2:" + str(m_eval2) + "\n")
if __name__ == '__main__':
main(train_data,test_data)
``` |
{
"source": "Josephine-Tsai/sc-projects",
"score": 4
} |
#### File: stanCode_Projects/my_photoshop/blur.py
```python
from simpleimage import SimpleImage
def blur(old_img):
"""
This function can blur the picture.
:param old_img: image, the original image "images/smiley-face.png"
:return new_img: image, the blurred 'old_img'
"""
# Create a new blank image which is as big as the original image.
new_img = SimpleImage.blank(old_img.width, old_img.height)
for x in range(old_img.width):
for y in range(old_img.height):
# Pick up a pixel which we want to record new value.
new_pixel = new_img.get_pixel(x, y)
count = 0
pixel_red = 0
pixel_green = 0
pixel_blue = 0
# Get the total value of 9 (the central one + other nearest 8) pixels (the red/green/blue value).
for i in range(-1, 2):
for j in range(-1, 2):
# Make sure all the pixels are in the scale of image.
if old_img.width > (x+i) >= 0 and old_img.height > (y+j) >= 0:
avg_pixel = old_img.get_pixel(x+i, y+j)
pixel_red += avg_pixel.red
pixel_green += avg_pixel.green
pixel_blue += avg_pixel.blue
count += 1
# Calculate the average value of 9 pixels and record it into the pixel we picked(new_pixel).
new_pixel.red = pixel_red//count
new_pixel.green = pixel_green//count
new_pixel.blue = pixel_blue//count
return new_img
def main():
"""
The user needs to give a picture, and the program will firstly show the picture.
Then the program will use the blur() function to blur the picture for one time.
And then put it into a for loop to run the blur() function for several times.
Finally, show that picture.
"""
old_img = SimpleImage("images/smiley-face.png")
old_img.show()
blurred_img = blur(old_img)
for i in range(5):
blurred_img = blur(blurred_img)
blurred_img.show()
if __name__ == '__main__':
main()
```
#### File: stanCode_Projects/recursion_application/anagram.py
```python
FILE = 'dictionary.txt' # This is the filename of an English dictionary
EXIT = '-1' # Controls when to stop the loop
# Global Variables
dictionary = [] # This is a list of dictionary.
searching_list = [] # This is a list of searching results.
def main():
"""
This program use the 'find_anagrams' function to find all the anagram(s) for the word input by user.
Then, it will print all the results and the total amount of the results.
"""
global searching_list
read_dictionary()
print("Welcome to stanCode \"Anagram Generator\" (or -1 to quit)")
word = input('Find anagrams for: ')
# Before user typing the EXIT number, this program will keep working.
while word != '-1':
current_str = []
print('Searching...')
find_anagrams(word, current_str)
print(f'{len(searching_list)} anagrams: {searching_list}')
# Clear the list 'searching_list' and then restart.
searching_list = []
word = input('Find anagrams for: ')
return
def read_dictionary():
"""
This function reads file "dictionary.txt" stored in FILE
and appends words in each line into a Python list.
"""
global dictionary
with open(FILE, 'r') as file:
for line in file:
dictionary += [line.strip()]
def find_anagrams(s, current_index):
"""
This function use recursion to find all the possible permutations of s (the user input).
:param s: string, the user input word
:param current_index: list, a empty list which will be use to record the index
:return: nothing / this function only adds words into the global variable'searching_list'
"""
global searching_list
# Base-Case
if len(current_index) == len(s):
current_str = ''
# Turns the index list into string, and check if the string is in the dictionary.
for digit in current_index:
current_str += s[digit]
if current_str in dictionary:
if current_str not in searching_list:
searching_list += [current_str]
print('Found: ' + current_str)
print('Searching...')
else:
if has_prefix(s, current_index):
for i in range(len(s)):
if i in current_index:
pass
else:
current_index.append(i)
# Recursion
find_anagrams(s, current_index)
current_index.pop()
else:
return
def has_prefix(s, sub_index):
"""
This function can make sure that the current string(recorded in index) is in the dictionary.
(or it will return False and stop finding.
:param s: string, the user input word
:param sub_index: list, current list (recorded in the index type)
:return: (bool) If there is any words with prefix stored in current list(recorded in index)
"""
current_str = ''
for digit in sub_index:
current_str += s[digit]
for word in dictionary:
if word.startswith(current_str):
return True
return False
if __name__ == '__main__':
main()
``` |
{
"source": "joseph-ismailyan/nand",
"score": 4
} |
#### File: joseph-ismailyan/nand/nand.py
```python
import numpy as np
import matplotlib.pyplot as plt
#sigmoid function
def nonlin(x, deriv=False):
if(deriv==True):
return x*(1-x)
else:
return 1/(1+np.exp(-x))
#input data
X = np.array([[0,0],
[0,1],
[1,0],
[1,1]])
#correct output data
y = np.array([[0],
[1],
[1],
[0]])
#assign seed so we always start with the same data
np.random.seed(1)
#generate random weights
w1 = 2*np.random.random((2,4)) - 1
w2 = 2*np.random.random((4,1)) - 1
#print('\nWeights 1: \n', w1,'\n\n')
#print('Weights 2:\n', w2,'\n')
#optimize weights
Er_list = []
optFound = False
runs = 80000
#split into segments to test cost vs benefit of runs
#choose n within 2 ordes of magnitude of 'runs'
num_segments = int(runs/100)
rate = 3 #this is the % between segments that will make a segement the 'optimum'
for i in range(runs):
#layer 1 is the input layer X
l1 = X
#layer 2 is X multiplied by the randomly generated weight matrix, w1
l2 = nonlin(np.dot(l1, w1))
#layer 3 is layer 2 mulitplied by random weight matrix w2
#layer 3 is also the output layer
l3 = nonlin(np.dot(l2, w2))
Er_l3 = y - l3
Er_list.append(np.mean(abs(Er_l3)))
#aka error of output mulitplied by the derivative of the output
#this find how far off our weights are
l3_delta = Er_l3*nonlin(l3, deriv=True)
Er_l2 = l3_delta.dot(w2.T)
l2_delta = Er_l2*nonlin(l2, deriv=True)
#keep within 1 order of magnitude of runs
if(i % 10000 == 0):
print('Error at run #{}: '.format(i), np.mean(np.abs(Er_l3)))
#print(w2,'\n')
#update weights
w2 += l2.T.dot(l3_delta)
w1 += l1.T.dot(l2_delta)
if(len(Er_list) > (num_segments*2) and (optFound == False)):
#if the relative difference between segents is less
# than 'rate' then we've found a (relatively) optimal number of runs
if(100*np.abs((Er_list[i-num_segments]- Er_list[i])/Er_list[i-num_segments]) < rate):
optPoint = i
optFound = True
#print('Input: ',X)
#print('Output: ',l3)
#print('Actual: ',y)
#print('\n\n\n',Er_list[2000] - Er_list[4000])
fig, ax = plt.subplots()
#ax.annotate('figure pixels',
#xy=(optPoint, Er_list[optPoint]), xycoords='figure pixels')
if(optFound):
print('Optimum stopping point at', optPoint)
ax.annotate('optimum',
xy=(optPoint, Er_list[optPoint]), xycoords='data',
xytext=(-15, 25), textcoords='offset points',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='right', verticalalignment='bottom')
plt.plot(Er_list)
plt.show()
``` |
{
"source": "josephites/pyethapp",
"score": 2
} |
#### File: pyethapp/examples/urlfetcher.py
```python
import json, re
import random
import sys
import ethereum.blocks
import ethereum.utils
import ethereum.abi
import rlp
try:
from urllib.request import build_opener
except:
from urllib2 import build_opener
my_privkey = ethereum.utils.sha3('<KEY>')
my_address = ethereum.utils.privtoaddr(my_privkey).encode('hex')
print 'My address', my_address
# Address of the main proxy contract
my_contract_address = ethereum.utils.normalize_address('0xd53096b3cf64d4739bb774e0f055653e7f2cd710')
# Makes a request to a given URL (first arg) and optional params (second arg)
def make_request(*args):
opener = build_opener()
opener.addheaders = [('User-agent',
'Mozilla/5.0'+str(random.randrange(1000000)))]
try:
return opener.open(*args).read().strip()
except Exception as e:
try:
p = e.read().strip()
except:
p = e
raise Exception(p)
true, false = True, False
# ContractTranslator object for the main proxy contract
ct = ethereum.abi.ContractTranslator([{"constant": false, "type": "function", "name": "get(string)", "outputs": [{"type": "int256", "name": "out"}], "inputs": [{"type": "string", "name": "url"}]}, {"inputs": [{"indexed": false, "type": "string", "name": "url"}, {"indexed": false, "type": "address", "name": "callback"}, {"indexed": false, "type": "uint256", "name": "responseId"}, {"indexed": false, "type": "uint256", "name": "fee"}], "type": "event", "name": "GetRequest(string,address,uint256,uint256)"}])
# ContractTranslator object for the contract that is used for testing the main contract
ct2 = ethereum.abi.ContractTranslator([{"constant": false, "type": "function", "name": "callback(bytes,uint256)", "outputs": [], "inputs": [{"type": "bytes", "name": "response"}, {"type": "uint256", "name": "responseId"}]}])
app, my_nonce, chainservice = None, None, None
# Called once on startup
def on_start(_app):
print 'Starting URL translator service'
global app, my_nonce, chainservice
app = _app
chainservice = app.services.chain
my_nonce = chainservice.chain.head.get_nonce(my_address)
# Called every block
def on_block(blk):
global my_nonce, chainservice
for receipt in blk.get_receipts():
for _log in receipt.logs:
# Get all logs to the proxy contract address of the right type
if _log.address == my_contract_address:
log = ct.listen(_log)
if log and log["_event_type"] == "GetRequest":
print 'fetching: ', log["url"]
# Fetch the response
try:
response = make_request(log["url"])
except:
response = ''
print 'response: ', response
# Create the response transaction
txdata = ct2.encode('callback', [response, log["responseId"]])
tx = ethereum.transactions.Transaction(my_nonce, 60 * 10**9, min(100000 + log["fee"] / (60 * 10**9), 2500000), log["callback"], 0, txdata).sign(my_privkey)
print 'txhash: ', tx.hash.encode('hex')
print 'tx: ', rlp.encode(tx).encode('hex')
# Increment the nonce so the next transaction is also valid
my_nonce += 1
# Send it
success = chainservice.add_transaction(tx, broadcast_only=True)
assert success
print 'sent tx'
```
#### File: pyethapp/pyethapp/dao.py
```python
from ethereum.block import BlockHeader
from ethereum.utils import decode_hex, int256, big_endian_to_int
def is_dao_challenge(config, number, amount, skip):
return number == config['DAO_FORK_BLKNUM'] and amount == 1 and skip == 0
def build_dao_header(config):
return BlockHeader(
prevhash=decode_hex('a218e2c611f21232d857e3c8cecdcdf1f65f25a4477f98f6f47e4063807f2308'),
uncles_hash=decode_hex('1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347'),
coinbase=decode_hex('bcdfc35b86bedf72f0cda046a3c16829a2ef41d1'),
state_root=decode_hex('c5e389416116e3696cce82ec4533cce33efccb24ce245ae9546a4b8f0d5e9a75'),
tx_list_root=decode_hex('7701df8e07169452554d14aadd7bfa256d4a1d0355c1d174ab373e3e2d0a3743'),
receipts_root=decode_hex('26cf9d9422e9dd95aedc7914db690b92bab6902f5221d62694a2fa5d065f534b'),
bloom=int256.deserialize(
decode_hex('00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'),
),
difficulty=big_endian_to_int(decode_hex('38c3bf2616aa')),
number=config['DAO_FORK_BLKNUM'],
gas_limit=big_endian_to_int(decode_hex('47e7c0')),
gas_used=big_endian_to_int(decode_hex('014820')),
timestamp=big_endian_to_int(decode_hex('578f7aa8')),
extra_data=config['DAO_FORK_BLKEXTRA'],
mixhash=decode_hex('5b5acbf4bf305f948bd7be176047b20623e1417f75597341a059729165b92397'),
nonce=decode_hex('bede87201de42426')
)
```
#### File: pyethapp/pyethapp/leveldb_service.py
```python
import os
import sys
from devp2p.service import BaseService
from ethereum.db import BaseDB
from gevent.event import Event
from gevent.hub import getcurrent
import leveldb
from ethereum import slogging
from ethereum.utils import encode_hex
import random
slogging.set_level('db', 'debug')
log = slogging.get_logger('db')
compress = decompress = lambda x: x
PY3 = sys.version_info >= (3,)
"""
memleak in py-leveldb
25140 ralf 20 0 3360m 1.3g 53m S 3 4.2 4:12.71 pyethapp
26167 ralf 20 0 2943m 1.0g 44m S 1 3.3 3:19.51 pyethapp
25140 ralf 20 0 3531m 1.5g 61m S 1 4.7 5:07.49 pyethapp
26167 ralf 20 0 3115m 1.1g 47m S 1 3.6 4:03.54 pyethapp
mit reload_db()
4096 ralf 20 0 1048m 362m 14m S 2 1.1 1:21.97 pyethapp
4109 ralf 20 0 975m 307m 14m S 2 1.0 1:16.03 pyethapp
4096 ralf 20 0 903m 431m 9484 S 2 1.3 1:54.29 pyethapp
4109 ralf 20 0 807m 367m 8852 S 1 1.1 1:47.01 pyethapp
4109 ralf 20 0 2609m 640m 60m S 3 2.0 2:41.05 pyethapp
4096 ralf 20 0 1232m 521m 14m S 6 1.6 2:28.68 pyethapp
deserializing all blocks + pow + add to list:
1GB after 300k blocks
reading all entries == 400MB
+ check pow every 1000 32 caches = 580MB
+ check pow every 1000 1 cache = 590MB
"""
class LevelDB(BaseDB):
"""
filename the database directory
block_cache_size (default: 8 * (2 << 20)) maximum allowed size for the block cache in bytes
write_buffer_size (default 2 * (2 << 20))
block_size (default: 4096) unit of transfer for the block cache in bytes
max_open_files: (default: 1000)
create_if_missing (default: True) if True, creates a new database if none exists
error_if_exists (default: False) if True, raises and error if the database exists
paranoid_checks (default: False) if True, raises an error as soon as an internal
corruption is detected
"""
max_open_files = 32000
block_cache_size = 8 * 1024**2
write_buffer_size = 4 * 1024**2
def __init__(self, dbfile):
self.uncommitted = dict()
log.info('opening LevelDB',
path=dbfile,
block_cache_size=self.block_cache_size,
write_buffer_size=self.write_buffer_size,
max_open_files=self.max_open_files)
self.dbfile = dbfile
self.db = leveldb.LevelDB(dbfile, max_open_files=self.max_open_files)
self.commit_counter = 0
def reopen(self):
del self.db
self.db = leveldb.LevelDB(self.dbfile)
def get(self, key):
log.trace('getting entry', key=encode_hex(key)[:8])
if key in self.uncommitted:
if self.uncommitted[key] is None:
raise KeyError("key not in db")
log.trace('from uncommitted')
return self.uncommitted[key]
log.trace('from db')
if PY3:
if isinstance(key, str):
key = key.encode()
o = bytes(self.db.Get(key))
else:
o = decompress(self.db.Get(key))
self.uncommitted[key] = o
return o
def put(self, key, value):
log.trace('putting entry', key=encode_hex(key)[:8], len=len(value))
self.uncommitted[key] = value
def commit(self):
log.debug('committing', db=self)
batch = leveldb.WriteBatch()
for k, v in list(self.uncommitted.items()):
if v is None:
batch.Delete(k)
else:
compress_v = compress(v)
if PY3:
if isinstance(k, str):
k = k.encode()
if isinstance(compress_v, str):
compress_v = compress_v.encode()
batch.Put(k, compress_v)
self.db.Write(batch, sync=False)
self.uncommitted.clear()
log.debug('committed', db=self, num=len(self.uncommitted))
# self.commit_counter += 1
# if self.commit_counter % 100 == 0:
# self.reopen()
def delete(self, key):
log.trace('deleting entry', key=key)
self.uncommitted[key] = None
def _has_key(self, key):
try:
self.get(key)
return True
except KeyError:
return False
except Exception as e:
log.info('key: {}, type(key):{}'.format(key, type(key)))
raise
def __contains__(self, key):
return self._has_key(key)
def __eq__(self, other):
return isinstance(other, self.__class__) and self.db == other.db
def __repr__(self):
return '<DB at %d uncommitted=%d>' % (id(self.db), len(self.uncommitted))
def inc_refcount(self, key, value):
self.put(key, value)
def dec_refcount(self, key):
pass
def revert_refcount_changes(self, epoch):
pass
def commit_refcount_changes(self, epoch):
pass
def cleanup(self, epoch):
pass
def put_temporarily(self, key, value):
self.inc_refcount(key, value)
self.dec_refcount(key)
class LevelDBService(LevelDB, BaseService):
"""A service providing an interface to a level db."""
name = 'db'
default_config = dict(data_dir='')
def __init__(self, app):
BaseService.__init__(self, app)
assert self.app.config['data_dir']
self.uncommitted = dict()
self.stop_event = Event()
dbfile = os.path.join(self.app.config['data_dir'], 'leveldb')
LevelDB.__init__(self, dbfile)
self.h = random.randrange(10**50)
def _run(self):
self.stop_event.wait()
def stop(self):
self.stop_event.set()
# commit?
log.debug('closing db')
def __hash__(self):
return self.h
```
#### File: pyethapp/pyethapp/lmdb_service.py
```python
import os
import lmdb
from devp2p.service import BaseService
from ethereum.db import BaseDB
from ethereum.slogging import get_logger
from gevent.event import Event
log = get_logger('db')
# unique objects to represent state in the transient store, the delete
# operation will store the DELETE constant in the transient store, and NULL is
# used to avoid conflicts with None, effectivelly allowing the user to store it
NULL = object()
DELETE = object()
TB = (2 ** 10) ** 4
class LmDBService(BaseDB, BaseService):
"""A service providing an interface to a lmdb."""
name = 'db'
default_config = dict() # the defaults are defined in pyethapp.db_service
def __init__(self, app):
assert app.config['data_dir']
BaseService.__init__(self, app)
db_directory = os.path.join(app.config['data_dir'], 'lmdb')
self.env = lmdb.Environment(db_directory, map_size=TB)
self.db_directory = db_directory
self.uncommitted = dict()
self.stop_event = Event()
def _run(self):
self.stop_event.wait()
def stop(self):
self.stop_event.set()
def put(self, key, value):
self.uncommitted[key] = value
def delete(self, key):
self.uncommitted[key] = DELETE
def inc_refcount(self, key, value):
self.put(key, value)
def dec_refcount(self, key):
pass
def put_temporarily(self, key, value):
self.inc_refcount(key, value)
self.dec_refcount(key)
def reopen(self):
self.env.close()
del self.env
# the map_size is stored in the database itself after it's first created
self.env = lmdb.Environment(self.db_directory)
def get(self, key):
value = self.uncommitted.get(key, NULL)
if value is DELETE:
raise KeyError('key not in db')
if value is NULL:
with self.env.begin(write=False) as transaction:
value = transaction.get(key, NULL)
if value is NULL:
raise KeyError('key not in db')
self.uncommitted[key] = value
return value
def commit(self):
keys_to_delete = (
key
for key, value in list(self.uncommitted.items())
if value is DELETE
)
items_to_insert = (
(key, value)
for key, value in list(self.uncommitted.items())
if value not in (DELETE, NULL) # NULL shouldn't happen
)
with self.env.begin(write=True) as transaction:
for key in keys_to_delete:
transaction.delete(key)
cursor = transaction.cursor()
cursor.putmulti(items_to_insert, overwrite=True)
def revert_refcount_changes(self, epoch):
pass
def commit_refcount_changes(self, epoch):
pass
def cleanup(self, epoch):
pass
def __contains__(self, key):
try:
self.get(key)
except KeyError:
return False
return True
def __eq__(self, other):
return isinstance(other, self.__class__) and self.db == other.db
def __repr__(self):
return '<DB at %d uncommitted=%d>' % (id(self.env), len(self.uncommitted))
```
#### File: pyethapp/pyethapp/pow_service.py
```python
from __future__ import division
from builtins import object
from past.utils import old_div
import time
import gevent
import gipc
import random
from devp2p.service import BaseService
from ethereum.pow.ethpow import mine, TT64M1
from ethereum.slogging import get_logger
from ethereum.utils import encode_hex
log = get_logger('pow')
log_sub = get_logger('pow.subprocess')
class Miner(gevent.Greenlet):
rounds = 100
max_elapsed = 1.
def __init__(self, mining_hash, block_number, difficulty, nonce_callback,
hashrate_callback, cpu_pct=100):
self.mining_hash = mining_hash
self.block_number = block_number
self.difficulty = difficulty
self.nonce_callback = nonce_callback
self.hashrate_callback = hashrate_callback
self.cpu_pct = cpu_pct
self.is_stopped = False
super(Miner, self).__init__()
def _run(self):
nonce = random.randint(0, TT64M1)
while not self.is_stopped:
log_sub.trace('starting mining round')
st = time.time()
bin_nonce, mixhash = mine(self.block_number, self.difficulty, self.mining_hash,
start_nonce=nonce, rounds=self.rounds)
elapsed = time.time() - st
if bin_nonce:
log_sub.info('nonce found')
self.nonce_callback(bin_nonce, mixhash, self.mining_hash)
break
delay = elapsed * (1 - old_div(self.cpu_pct, 100.))
hashrate = int(self.rounds // (elapsed + delay))
self.hashrate_callback(hashrate)
log_sub.trace('sleeping', delay=delay, elapsed=elapsed, rounds=self.rounds)
gevent.sleep(delay + 0.001)
nonce += self.rounds
# adjust
adjust = old_div(elapsed, self.max_elapsed)
self.rounds = int(old_div(self.rounds, adjust))
log_sub.debug('mining task finished', is_stopped=self.is_stopped)
def stop(self):
self.is_stopped = True
self.join()
class PoWWorker(object):
"""
communicates with the parent process using: tuple(str_cmd, dict_kargs)
"""
def __init__(self, cpipe, cpu_pct):
self.cpipe = cpipe
self.miner = None
self.cpu_pct = cpu_pct
def send_found_nonce(self, bin_nonce, mixhash, mining_hash):
log_sub.info('sending nonce')
self.cpipe.put(('found_nonce', dict(bin_nonce=bin_nonce, mixhash=mixhash,
mining_hash=mining_hash)))
def send_hashrate(self, hashrate):
log_sub.trace('sending hashrate')
self.cpipe.put(('hashrate', dict(hashrate=hashrate)))
def recv_set_cpu_pct(self, cpu_pct):
self.cpu_pct = max(0, min(100, cpu_pct))
if self.miner:
self.miner.cpu_pct = self.cpu_pct
def recv_mine(self, mining_hash, block_number, difficulty):
"restarts the miner"
log_sub.debug('received new mining task', difficulty=difficulty)
assert isinstance(block_number, int)
if self.miner:
self.miner.stop()
self.miner = Miner(mining_hash, block_number, difficulty, self.send_found_nonce,
self.send_hashrate, self.cpu_pct)
self.miner.start()
def run(self):
while True:
cmd, kargs = self.cpipe.get()
assert isinstance(kargs, dict)
getattr(self, 'recv_' + cmd)(**kargs)
def powworker_process(cpipe, cpu_pct):
"entry point in forked sub processes, setup env"
gevent.get_hub().SYSTEM_ERROR = BaseException # stop on any exception
PoWWorker(cpipe, cpu_pct).run()
# parent process defined below ##############################################3
class PoWService(BaseService):
name = 'pow'
default_config = dict(pow=dict(
activated=False,
cpu_pct=100,
coinbase_hex=None,
mine_empty_blocks=True
))
def __init__(self, app):
super(PoWService, self).__init__(app)
cpu_pct = self.app.config['pow']['cpu_pct']
self.cpipe, self.ppipe = gipc.pipe(duplex=True)
self.worker_process = gipc.start_process(
target=powworker_process, args=(self.cpipe, cpu_pct))
self.chain = app.services.chain
self.chain.on_new_head_cbs.append(self.mine_head_candidate)
self.hashrate = 0
@property
def active(self):
return self.app.config['pow']['activated']
def mine_head_candidate(self, _=None):
hc = self.chain.head_candidate
if not self.active or self.chain.is_syncing:
return
elif (hc.transaction_count == 0 and
not self.app.config['pow']['mine_empty_blocks']):
return
log.debug('mining', difficulty=hc.difficulty)
self.ppipe.put(('mine', dict(mining_hash=hc.mining_hash,
block_number=hc.number,
difficulty=hc.difficulty)))
def recv_hashrate(self, hashrate):
log.trace('hashrate updated', hashrate=hashrate)
self.hashrate = hashrate
def recv_found_nonce(self, bin_nonce, mixhash, mining_hash):
log.info('nonce found: {}'.format(encode_hex(mining_hash)))
block = self.chain.head_candidate
if block.mining_hash != mining_hash:
log.warn('mining_hash does not match')
gevent.spawn_later(0.5, self.mine_head_candidate)
return False
block.header.mixhash = mixhash
block.header.nonce = bin_nonce
if self.chain.add_mined_block(block):
log.debug('mined block %d (%s) added to chain' % (
block.number, encode_hex(block.hash[:8])))
return True
else:
log.debug('failed to add mined block %d (%s) to chain' % (
block.number, encode_hex(block.hash[:8])))
return False
def _run(self):
self.mine_head_candidate()
while True:
cmd, kargs = self.ppipe.get()
assert isinstance(kargs, dict)
getattr(self, 'recv_' + cmd)(**kargs)
def stop(self):
self.worker_process.terminate()
self.worker_process.join()
super(PoWService, self).stop()
```
#### File: pyethapp/tests/test_app.py
```python
from builtins import str
import os
import pytest
from pyethapp import app
from pyethapp import config
from click.testing import CliRunner
genesis_json = {
"nonce": "0x00000000000000ff",
"difficulty": "0xff0000000",
"mixhash": "0xff00000000000000000000000000000000000000000000000000000000000000",
"coinbase": "0xff00000000000000000000000000000000000000",
"timestamp": "0xff",
"parentHash": "0xff00000000000000000000000000000000000000000000000000000000000000",
"extraData": "0x11bbe8db4e347b4e8c937c1c8370e4b5ed33adb3db69cbdb7a38e1e50b1b82fa",
"gasLimit": "0xffff",
"alloc": {
"ffffffffffffffffffffffffffffffffffffffff": {"balance": "9876543210"},
"0000000000000000000000000000000000000000": {"balance": "1234567890"}
}
}
genesis_yaml = """
eth:
genesis: {}
""".format(genesis_json)
def test_show_usage():
runner = CliRunner()
result = runner.invoke(app.app, [])
assert "Usage: app " in result.output, result.output
def test_no_such_option():
runner = CliRunner()
result = runner.invoke(app.app, ['--WTF'])
assert 'no such option: --WTF' in result.output, result.output
def test_no_such_command():
runner = CliRunner()
result = runner.invoke(app.app, ['eat'])
assert 'Error: No such command "eat"' in result.output, result.output
@pytest.mark.parametrize('content', ['', '<html/>', 'print "hello world"'])
def test_non_dict_yaml_as_config_file(content):
runner = CliRunner()
with runner.isolated_filesystem():
with open('config.yaml', 'w') as text_file:
text_file.write(content)
result = runner.invoke(app.app, ['-C', 'config.yaml'])
assert 'content of config should be an yaml dictionary' in result.output, result.output
@pytest.mark.parametrize('param', [('--Config', 'myconfig.yaml'),
('-C', 'myconfig.yaml'),
('-c', 'mygenesis.json'),
('-c', 'dict')])
def test_custom_config_file(param):
runner = CliRunner()
with runner.isolated_filesystem():
opt, arg = param
if arg.endswith('.yaml'):
with open(arg, 'w') as text_file:
text_file.write(genesis_yaml)
elif arg.endswith('.json'):
with open(arg, 'w') as text_file:
text_file.write(str(genesis_json))
else:
arg = str(genesis_json).replace('\n', '').replace(' ', '')
if opt == '-c':
arg = 'eth.genesis={}'.format(arg)
result = runner.invoke(app.app, [opt, arg, 'config'])
if arg.endswith('.json'):
patterns = ['genesis: {}'.format(param[1])]
else:
patterns = ["{}: '{}'".format(k, v) for k, v in list(genesis_json.items()) if k != 'alloc']
for pat in patterns:
assert pat in result.output, '`{}` not found'.format(pat)
for k, v in list(genesis_json['alloc'].items()):
assert k in result.output
assert v['balance'] in result.output
def test_config_from_datadir(tmpdir):
"""Test, that when given a `--data-dir`, the app
reads the config from the '`--data-dir`/config.yaml'.
"""
DIR = "datadir"
runner = CliRunner()
with runner.isolated_filesystem():
os.mkdir(DIR)
runner.invoke(app.app, ["--data-dir", DIR, "config"])
with open(os.path.join(DIR, config.CONFIG_FILE_NAME), "w") as configfile:
configfile.write("p2p:\n max_peers: 9000")
result = runner.invoke(app.app, ["--data-dir", DIR, "config"])
assert "max_peers: 9000" in result.output
if __name__ == '__main__':
test_show_usage()
test_no_such_option()
test_no_such_command()
test_non_dict_yaml_as_config_file('')
test_non_dict_yaml_as_config_file('<html/>')
test_non_dict_yaml_as_config_file('print "hello world"')
test_custom_config_file(('--Config', 'myconfig.yaml'))
test_custom_config_file(('-C', 'myconfig.yaml'))
test_custom_config_file(('-c', 'mygenesis.json'))
test_custom_config_file(('-c', 'dict'))
```
#### File: pyethapp/tests/test_genesis.py
```python
from pprint import pprint
import pytest
from ethereum.db import DB
from ethereum.config import Env, default_config
from ethereum.genesis_helpers import mk_genesis_block
from ethereum.state import State
from ethereum.utils import encode_hex
from pyethapp.utils import merge_dict
from pyethapp.config import update_config_from_genesis_json
import pyethapp.config as konfig
from pyethapp.profiles import PROFILES
def test_genesis_config():
"test setting genesis alloc using the config"
alloc = {'1' * 40: {'wei': 1}, # wei
'2' * 40: {'balance': 2}, # balance
'3' * 20: {'balance': 3}, # 20 bytes
}
config = dict(eth=dict(genesis=dict(alloc=alloc)))
konfig.update_config_with_defaults(config, {'eth': {'block': default_config}})
# Load genesis config
update_config_from_genesis_json(config, config['eth']['genesis'])
bc = config['eth']['block']
pprint(bc)
env = Env(DB(), bc)
genesis = mk_genesis_block(env)
state = State(genesis.state_root, env)
for address, value_dict in list(alloc.items()):
value = list(value_dict.values())[0]
assert state.get_balance(address) == value
@pytest.mark.parametrize('profile', list(PROFILES.keys()))
def test_profile(profile):
config = dict(eth=dict())
konfig.update_config_with_defaults(config, {'eth': {'block': default_config}})
# Set config values based on profile selection
merge_dict(config, PROFILES[profile])
# Load genesis config
update_config_from_genesis_json(config, config['eth']['genesis'])
bc = config['eth']['block']
pprint(bc)
env = Env(DB(), bc)
genesis = mk_genesis_block(env)
assert encode_hex(genesis.hash) == config['eth']['genesis_hash']
```
#### File: pyethapp/pyethapp/validator_service.py
```python
from __future__ import print_function
import time
import random
import gevent
from devp2p.service import BaseService
from ethereum.slogging import get_logger
from ethereum.utils import privtoaddr, encode_hex, sha3
from ethereum.casper_utils import generate_validation_code, call_casper, check_skips, \
get_timestamp, \
get_casper_ct, get_dunkle_candidates, sign_block, \
make_withdrawal_signature, RandaoManager
log = get_logger('validator')
BLOCK_TIME = 3
global_block_counter = 0
casper_ct = get_casper_ct()
class ValidatorService(BaseService):
name = 'validator'
default_config = dict(validator=dict(
activated=False,
privkey='',
deposit_size=0,
seed=''
))
def __init__(self, app):
super(ValidatorService, self).__init__(app)
self.config = app.config
self.chainservice = app.services.chain
self.chain = self.chainservice.chain
self.chain.time = lambda: int(time.time())
self.key = self.config['validator']['privkey']
print("*"*100)
print(repr(self.key))
print(len(self.key))
self.address = privtoaddr(self.key)
self.validation_code = generate_validation_code(self.address)
self.validation_code_hash = sha3(self.validation_code)
# TODO: allow configure seed?
seed = sha3(self.key)
self.randao = RandaoManager(seed)
self.received_objects = {}
self.used_parents = {}
self.next_skip_count = 0
self.next_skip_timestamp = 0
self.epoch_length = self.call_casper('getEpochLength')
self.active = False
self.activated = self.app.config['validator']['activated']
app.services.chain.on_new_head_cbs.append(self.on_new_head)
self.update_activity_status()
self.cached_head = self.chain.head_hash
def on_new_head(self, block):
if not self.activated:
return
if self.app.services.chain.is_syncing:
return
self.update()
def update_activity_status(self):
start_epoch = self.call_casper('getStartEpoch', [self.validation_code_hash])
now_epoch = self.call_casper('getEpoch')
end_epoch = self.call_casper('getEndEpoch', [self.validation_code_hash])
if start_epoch <= now_epoch < end_epoch:
self.active = True
self.next_skip_count = 0
self.next_skip_timestamp = get_timestamp(self.chain, self.next_skip_count)
else:
self.active = False
def tick(self):
delay = 0
# Try to create a block
# Conditions:
# (i) you are an active validator,
# (ii) you have not yet made a block with this parent
if self.active and self.chain.head_hash not in self.used_parents:
t = time.time()
# Is it early enough to create the block?
if t >= self.next_skip_timestamp and (not self.chain.head or t > self.chain.head.header.timestamp):
# Wrong validator; in this case, just wait for the next skip count
if not check_skips(self.chain, self.validation_code_hash, self.next_skip_count):
self.next_skip_count += 1
self.next_skip_timestamp = get_timestamp(self.chain, self.next_skip_count)
log.debug('Not my turn, wait',
next_skip_count=self.next_skip_count,
next_skip_timestamp=self.next_skip_timestamp,
now=int(time.time()))
return
self.used_parents[self.chain.head_hash] = True
blk = self.make_block()
assert blk.timestamp >= self.next_skip_timestamp
if self.chainservice.add_mined_block(blk):
self.received_objects[blk.hash] = True
log.debug('0x%s made and added block %d (%s) to chain' % (encode_hex(self.address[:8]), blk.header.number, encode_hex(blk.header.hash[:8])))
else:
log.debug('0x%s failed to make and add block %d (%s) to chain' % (encode_hex(self.address[:8]), blk.header.number, encode_hex(blk.header.hash[:8])))
self.update()
else:
delay = max(self.next_skip_timestamp - t, 0)
# Sometimes we received blocks too early or out of order;
# run an occasional loop that processes these
if random.random() < 0.02:
self.chain.process_time_queue()
self.chain.process_parent_queue()
self.update()
return delay
def make_block(self):
pre_dunkle_count = self.call_casper('getTotalDunklesIncluded')
dunkle_txs = get_dunkle_candidates(self.chain, self.chain.state)
blk = self.chainservice.head_candidate
randao = self.randao.get_parent(self.call_casper('getRandao', [self.validation_code_hash]))
blk = sign_block(blk, self.key, randao, self.validation_code_hash, self.next_skip_count)
# Make sure it's valid
global global_block_counter
global_block_counter += 1
for dtx in dunkle_txs:
assert dtx in blk.transactions, (dtx, blk.transactions)
log.debug('made block with timestamp %d and %d dunkles' % (blk.timestamp, len(dunkle_txs)))
return blk
def update(self):
if self.cached_head == self.chain.head_hash:
return
self.cached_head = self.chain.head_hash
if self.chain.state.block_number % self.epoch_length == 0:
self.update_activity_status()
if self.active:
self.next_skip_count = 0
self.next_skip_timestamp = get_timestamp(self.chain, self.next_skip_count)
log.debug('Head changed: %s, will attempt creating a block at %d' % (encode_hex(self.chain.head_hash), self.next_skip_timestamp))
def withdraw(self, gasprice=20 * 10**9):
sigdata = make_withdrawal_signature(self.key)
txdata = casper_ct.encode('startWithdrawal', [self.validation_code_hash, sigdata])
tx = Transaction(self.chain.state.get_nonce(self.address), gasprice, 650000, self.chain.config['CASPER_ADDR'], 0, txdata).sign(self.key)
self.chainservice.add_transaction(tx, force=True)
def deposit(self, gasprice=20 * 10**9):
assert value * 10**18 >= self.chain.state.get_balance(self.address) + gasprice * 1000000
tx = Transaction(self.chain.state.get_nonce(self.address) * 10**18, gasprice, 1000000,
casper_config['CASPER_ADDR'], value * 10**18,
ct.encode('deposit', [self.validation_code, self.randao.get(9999)]))
def call_casper(self, fun, args=[]):
return call_casper(self.chain.state, fun, args)
def _run(self):
while True:
if self.activated:
delay = self.tick()
gevent.sleep(delay)
def stop(self):
super(ValidatorService, self).stop()
``` |
{
"source": "joseph-iussa/dupfinder",
"score": 3
} |
#### File: tests/integration/core_test.py
```python
import pytest
from pathlib import Path
from dupfinder.core import get_absolute_root_paths, get_absolute_file_paths
def test__get_absolute_root_paths__throws_on_bad_path(tmpdir):
bad_root_paths = [str(Path(str(tmpdir), 'not/a/path'))]
with pytest.raises(FileNotFoundError):
get_absolute_root_paths(bad_root_paths)
def test__get_absolute_file_paths__gets_all_paths(tmpdir):
test_paths = (
Path(str(tmpdir), 'foo.txt'),
Path(str(tmpdir), 'sub/foo.txt'),
Path(str(tmpdir), 'sub/bar.txt'),
Path(str(tmpdir), 'sub/sub/baz.txt'),
Path(str(tmpdir), 'sub/sub/box.txt'),
)
for path in test_paths:
if not path.parent.exists():
path.parent.mkdir(parents=True)
path.touch()
test_root_paths = (
str(Path(str(tmpdir), '.').resolve()),
str(Path(str(tmpdir), 'sub').resolve())
)
returned_paths = get_absolute_file_paths(test_root_paths)
assert all(str(test_path.resolve()) in returned_paths for test_path in test_paths)
``` |
{
"source": "josephizatt/furucombo-contract",
"score": 2
} |
#### File: furucombo-contract/scripts/certora_ci.py
```python
import os
import time
import subprocess
import argparse
RULE_DIR = './specs/scripts'
CONTRACT_RULES_MAP = {
# Proxy, Registry and libPrams
"LibParam.sol": ["libParam.sh", "naiveLibParam.sh", "naiveLibParamB63.sh"],
"Registry.sol": ["privRegistry.sh", "registry.sh"],
"Proxy.sol": ["privProxy.sh", "proxy.sh proxy"],
# Handler
"HAaveProtocol.sol": ["runHAave.sh"],
"HAaveProtocolV2.sol": ["runHAave2.sh"],
"HBalancer.sol": ["runHBalancer.sh", "runHBalancerSpecific.sh"],
"HBProtocol.sol": ["runHBProtocol.sh", "runHBProtocolSpecific.sh"],
"HCEther.sol": ["runHCEther.sh"],
"HCToken.sol": ["runHCToken.sh"],
"HComptroller.sol": ["runHComptroller.sh"],
"HSCompound.sol": ["runHSCompound.sh", "runHSCompoundSpecific.sh"],
"HCurve.sol": ["runHCurve.sh"],
"HCurveDao.sol": ["runHCurveDao.sh"],
"HFunds.sol": ["runHFunds.sh"],
"HFurucomboStaking.sol": ["runHFurucomboStaking.sh"],
"HGasTokens.sol": ["runHGasTokens.sh"],
"HMaker.sol": ["runHMaker.sh", "runHMakerSpecific.sh"],
"HStakingRewardsAdapter.sol": ["runHStakingRewardsAdapter.sh"],
"HSushiSwap.sol": ["runHSushiSwap.sh"],
"HUniswapV2.sol": ["runHUniswapV2.sh"],
"HYVault.sol": ["runHYVault.sh"],
"HWeth.sol": ["runHWeth.sh"],
"HUniswapV3.sol": ["runHUniswapV3.sh"],
"HPolygon.sol": ["runHPolygon.sh"],
"HOneInchV3.sol": ["runHOneInchV3.sh"],
"HGelatoV2LimitOrder.sol": ["runHGelatoV2LimitOrder.sh"]
}
def run_cmd(cmd):
process = subprocess.Popen([cmd], stdout=subprocess.PIPE, shell=True)
status = process.wait()
out, err = process.communicate()
if status > 0:
raise Exception("execute cmd(%s): %r" % (cmd, err))
return out
def get_changed_contracts():
out = run_cmd("git log -m -1 --name-only --pretty=\"format: \"")
files = str(out, 'utf-8').split('\n')
sols = []
for f in files:
if str.endswith(f, '.sol') and str.startswith(f, 'contracts'):
sols.append(f.split('/')[-1])
print("changed contract: \n %r" % sols)
return sols
def _exec_certora_rule(rule):
_cmd = "%s/%s" % (RULE_DIR, rule)
print(_cmd)
report = {
"job_id": "Not found, please review certora prover home",
"job_status": '',
"job_output": ''
}
process = subprocess.Popen(
[_cmd], stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
_prefix = 'You can follow up on the status: '
while True:
output = process.stdout.readline().decode('utf-8')
if _prefix in output:
output = output.replace(_prefix, '')
report["job_id"] = output.split('?')[0].split('/')[-1]
report["job_status"] = output
report["job_output"] = output.replace("jobStatus", "output")
break
if 'Fatal error' in output:
report["job_id"] = output
break
if process.poll() is not None:
report["job_id"] = "Not found url"
break
process.terminate()
return report
def run_certora_rules(sols):
reports = dict()
for sol in sols:
if sol not in CONTRACT_RULES_MAP.keys():
print("Can't find %s rules" % sol)
continue
reports[sol] = dict()
rules = CONTRACT_RULES_MAP.get(sol, [])
for rule in rules:
report = _exec_certora_rule(rule)
reports[sol][rule] = report
return reports
def output_reports(reports):
print("--- Report Output ---")
for sol, rule in reports.items():
for rule_name, rule_report in rule.items():
print("%s(%s) job_id:%s" %
(sol, rule_name, rule_report['job_id']))
print(rule_report['job_status'].replace('\n', ''))
print(rule_report['job_output'].replace('\n', ''))
print('-------\n')
if __name__ == "__main__":
# parse command
parser = argparse.ArgumentParser(
description='Process certora prover rules.')
parser.add_argument('--range', "-r", type=str, nargs='?',
default='diff', choices=['diff', 'all'])
args = parser.parse_args()
print(args)
# execute certora rule
if args.range == 'diff':
sols = get_changed_contracts()
else:
sols = CONTRACT_RULES_MAP.keys()
print("checked contract: \n %r" % sols)
# Run certora rules
reports = run_certora_rules(sols)
output_reports(reports)
``` |
{
"source": "JosephJamesDoyle87/software",
"score": 3
} |
#### File: software/tests/failure_test.py
```python
import unittest
-class TestSimple(unittest.TestCase):
-
- def test_failure(self):
- self.assertTrue(False)
+def test_success():
+ assert True
``` |
{
"source": "JosephJContreras/dbreport",
"score": 3
} |
#### File: tests/data/db_setup.py
```python
import os
import sqlite3 as sq3
import sys
from text_unidecode import unidecode
# set up paths for where to find original database
BASE_PATH = os.path.split(os.path.abspath(__file__))[0]
BASE_DATABASE_FILENAME = "chinook.db"
BASE_DATABASE_PATH = os.path.join(BASE_PATH, BASE_DATABASE_FILENAME)
# Or where to find dump file and create test database
DUMP_PATH = os.path.join(BASE_PATH, f"{BASE_DATABASE_FILENAME[:-3]}_dump.sql")
TEST_PATH = os.path.join(BASE_PATH, f"test_{BASE_DATABASE_FILENAME}")
VIEW_DIR = os.path.join(BASE_PATH, "views")
def extract_dump(db_path, dump_path):
"""
Create dump of database given the path to the database file
PARAMETERS
db_path: str: path to database file
dump_path: str: path to the dump file to be saved
"""
with sq3.connect(db_path) as conn:
with open(dump_path, "w", newline="") as f:
# noinspection PyTypeChecker
for line in conn.iterdump():
f.write(unidecode(line))
def load_dump(db_path, dump_path):
"""
Create test database given path to dump file
PARAMETERS
db_path: str: path to database file to be created
dump_path: str: path to the dump file to be used to create database
"""
# start by removing the existing database if one exists
try:
os.remove(db_path)
except PermissionError:
print(f"could not delete {db_path}, continuing anyway")
except FileNotFoundError:
# if the file was not found, nothing needs to be done.
pass
with open(dump_path, "r") as f:
sql = f.read()
with sq3.connect(db_path) as cursor:
try:
cursor.executescript(sql)
except sq3.OperationalError:
# most likely cause is the table already exists
pass
def add_views(db_path, view_dir):
"""
add views to database
PARAMETERS
db_path: str: path to database to have views added
view_dir: str: path the directory with view files. The contents of the
files are not the sql to create the view, but rather the
query the view should have. The view name uses the filename
of the query
"""
conn = sq3.connect(db_path)
cursor = conn.cursor()
for file in os.listdir(view_dir):
if not file.lower().endswith(".sql"):
# skip all non .sql files
continue
path = os.path.join(view_dir, file)
sql = f"CREATE VIEW IF NOT EXISTS [{file[:-4]}] AS\n"
with open(path, "r") as f:
sql += f.read()
try:
cursor.execute(sql)
conn.commit()
except sq3.OperationalError:
print(f"failed to create view {file}")
continue
cursor.close()
conn.close()
if __name__ == "__main__":
args = sys.argv[1]
if args == "create-dump":
# to extract database and create dump file run...
extract_dump(BASE_DATABASE_PATH, DUMP_PATH)
elif args == "load-dump":
# to load dump file and create test db run...
load_dump(TEST_PATH, DUMP_PATH)
add_views(TEST_PATH, VIEW_DIR)
else:
print("did not create anything")
``` |
{
"source": "josephjcontreras/FEMethods",
"score": 3
} |
#### File: femethods/core/_base_elements.py
```python
from abc import ABC, abstractmethod
from typing import List, Optional, Tuple
from warnings import warn
import matplotlib.pyplot as plt
import numpy as np
# Importing loads is only used for checking the type. Find a better way to do
# this without needing to import loads
from femethods.loads import Load, PointLoad
from femethods.mesh import Mesh
from femethods.reactions import Reaction
BOUNDARY_CONDITIONS = List[Tuple[Optional[int], Optional[int]]]
# Allow upper case letters for variable names to match engineering conventions
# for variables, such as E for Young's modulus and I for the polar moment of
# inertia
# noinspection PyPep8Naming
class Base(ABC):
"""base object to be used as base for both FEM analysis"""
def __init__(self, length: float, E: float = 1, Ixx: float = 1) -> None:
self.length = length
self.E = E # Young's modulus
self.Ixx = Ixx # area moment of inertia
@property
def length(self) -> float:
return self._length
@length.setter
def length(self, length: float) -> None:
if length <= 0:
# length must be a positive number
raise ValueError("length must be positive!")
self._length = length
@property
def E(self) -> float:
return self._E
@E.setter
def E(self, E: float) -> None:
if E <= 0:
raise ValueError("Young's modulus must be positive!")
self._E = E
@property
def Ixx(self) -> float:
return self._Ixx
@Ixx.setter
def Ixx(self, Ixx: float) -> None:
if Ixx <= 0:
raise ValueError("Area moment of inertia must be positive!")
self._Ixx = Ixx
# Allow upper case letters for variable names to match engineering conventions
# for variables, such as E for Young's modulus and I for the polar moment of
# inertia
# noinspection PyPep8Naming
class Element(Base, ABC):
"""General element that will be inherited from for specific elements"""
def __init__(self, length: float, E: float = 1, Ixx: float = 1) -> None:
super().__init__(length, E, Ixx)
self._node_deflections = None
self._K = None # global stiffness matrix
self._reactions: Optional[List[Reaction]] = None
self._loads: Optional[List[Load]] = None
@property
def loads(self) -> Optional[List[Load]]:
return self._loads
@loads.setter
def loads(self, loads: List[Load]) -> None:
# validate that loads is a list of valid Loads
for load in loads:
if not isinstance(load, Load):
raise TypeError(f"type {type(load)} is not of type Load")
self.invalidate()
self._loads = loads
self.__validate_load_locations()
def __validate_load_locations(self) -> bool:
"""All loads and reactions must have unique locations
This function will validate that all loads do not line up with any
reactions. If a load is aligned with a reaction, it is adjusted by a
slight amount so it can be solved.
:returns True if successful, False otherwise
"""
assert self.reactions is not None
assert self.loads is not None
for reaction in self.reactions:
for load in self.loads:
if load.location == reaction.location:
# the load is directly on the reaction. Offset the load
# location a tiny amount so that it is very close, but not
# exactly on the reaction.
# This is done so that the global stiffness matrix
# is calculated properly to give accurate results
# offset the load towards the inside of the beam to be sure
# the new load position is located on the beam.
if reaction.location == 0:
load.location += 1e-8
warn(
f"load location moved by 1e-8 to avoid reaction "
f"at {reaction.location}"
)
else:
load.location -= 1e-8
warn(
f"load location moved by -1e-8 to avoid reaction"
f" at {reaction.location}"
)
return True
@property
def reactions(self) -> Optional[List[Reaction]]:
return self._reactions
@reactions.setter
def reactions(self, reactions: List[Reaction]) -> None:
for reaction in reactions:
if not isinstance(reaction, Reaction):
msg = f"type {type(reaction)} is not of type Reaction"
raise TypeError(msg)
self.invalidate()
self._reactions = reactions
@abstractmethod
def remesh(self) -> None:
"""force a remesh calculation and invalidate any calculation results"""
raise NotImplementedError("method must be overloaded")
def invalidate(self) -> None:
"""invalidate the element to force resolving"""
self._node_deflections = None
self._K = None
if self.reactions is not None:
for reaction in self.reactions:
reaction.invalidate()
@property
def K(self) -> np.array:
"""global stiffness matrix"""
if self._K is None:
self._K = self.stiffness_global()
return self._K
def solve(self) -> None:
"""solve the system the FEM system to define the nodal displacements
and reaction forces.
"""
self.__validate_load_locations()
self.remesh()
self._calc_node_deflections()
self._get_reaction_values()
@abstractmethod
def _calc_node_deflections(self) -> None:
raise NotImplementedError("must be overloaded!")
@abstractmethod
def _get_reaction_values(self) -> None:
raise NotImplementedError("must be overloaded!")
@abstractmethod
def stiffness(self, L: float) -> None:
"""return local stiffness matrix, k, as numpy array evaluated with beam
element length L, where L defaults to the length of the beam
"""
raise NotImplementedError("Method must be overloaded!")
@abstractmethod
def stiffness_global(self) -> None:
# Initialize the global stiffness matrix, then iterate over the
# elements, calculate a local stiffness matrix, and add it to the
# global stiffness matrix.
raise NotImplementedError("Method must be overloaded!")
@staticmethod
def apply_boundary_conditions(
k: np.array, bcs: BOUNDARY_CONDITIONS
) -> np.array:
"""
Given the stiffness matrix 'k_local', and the boundary conditions as a list
of tuples, apply the boundary conditions to the stiffness matrix by
setting the rows and columns that correspond to the boundary conditions
to zeros, with ones on the diagonal.
The boundary conditions (bcs) are in the form
bcs = [(displacement1, rotation1), (displacement2, rotation2)]
For the boundary condition, if the conditional evaluates to None, then
movement is allowed, otherwise no displacement is allowed.
The boundary condition coordinates must match the stiffness matrix.
That is, if the stiffness matrix is a local matrix, the boundary
conditions must also be local.
returns the adjusted stiffness matrix after the boundary
conditions are applied
"""
def apply(k_local: np.array, i: int) -> np.array:
"""sub function to apply the boundary condition at row/col i to
stiffness matrix k_local
return the stiffness matrix k_local with boundary conditions applied
"""
k_local[i] = 0 # set entire row to zeros
k_local[:, i] = 0 # set entire column to zeros
k_local[i][i] = 1 # set diagonal to 1
return k_local
# TODO: Check the sizes of the boundary conditions and stiffness matrix
for node, bc in enumerate(bcs):
v, q = bc
if v is not None:
k = apply(k, node * 2)
if q is not None:
k = apply(k, node * 2 + 1)
return k
# Allow upper case letters for variable names to match engineering conventions
# for variables, such as E for Young's modulus and I for the polar moment of
# inertia
# noinspection PyPep8Naming
class BeamElement(Element):
"""base beam element"""
def __init__(
self,
length: float,
loads: List[Load],
reactions: List[Reaction],
E: float = 1,
Ixx: float = 1,
):
super().__init__(length, E, Ixx)
self.reactions = reactions
self.loads = loads # note loads are set after reactions
self.mesh = Mesh(length, loads, reactions, 2)
def remesh(self) -> None:
assert self.loads is not None
assert self.reactions is not None
self.mesh = Mesh(self.length, self.loads, self.reactions, 2)
self.invalidate()
@property
def node_deflections(self) -> np.ndarray:
if self._node_deflections is None:
self._node_deflections = self._calc_node_deflections()
return self._node_deflections
def __get_boundary_conditions(self) -> BOUNDARY_CONDITIONS:
# Initialize the boundary conditions to None for each node, then
# iterate over reactions and apply them to the boundary conditions
# based on the reaction type.
assert self.reactions is not None
bc: BOUNDARY_CONDITIONS = [
(None, None) for _ in range(len(self.mesh.nodes))
]
for r in self.reactions:
assert r is not None
i = self.mesh.nodes.index(r.location)
bc[i] = r.boundary
return bc
def _calc_node_deflections(self) -> np.ndarray:
"""solve for vertical and angular displacement at each node"""
assert self.loads is not None
# Get the boundary conditions from the reactions
bc = self.__get_boundary_conditions()
# Apply boundary conditions to global stiffness matrix. Note that the
# boundary conditions are applied to a copy of the stiffness matrix to
# avoid changing the property K, so it can still be used with further
# calculations (ie, for calculating reaction values)
kg = self.K.copy()
kg = self.apply_boundary_conditions(kg, bc)
# Use the same method of adding the input loads as the boundary
# conditions. Start by initializing a numpy array to zero loads, then
# iterate over the loads and add them to the appropriate index based on
# the load type (force or moment)
# noinspection PyUnresolvedReferences
p = np.zeros((self.mesh.dof, 1))
for ld in self.loads:
i = self.mesh.nodes.index(ld.location)
if isinstance(ld, PointLoad):
p[i * 2][0] = ld.magnitude # input force
else:
p[i * 2 + 1][0] = ld.magnitude # input moment
# Solve the global system of equations {p} = [K]*{d} for {d}
# save the deflection vector for the beam, so the analysis can be
# reused without recalculating the stiffness matrix.
# This vector should be cleared anytime any of the beam parameters
# gets changed.
self._node_deflections = np.linalg.solve(kg, p)
return self._node_deflections
def _get_reaction_values(self) -> np.ndarray:
"""Calculate the nodal forces acting on the beam. Note that the forces
will also include the input forces.
reactions are calculated by solving the matrix equation
{r} = [K] * {d}
where
- {r} is the vector of forces acting on the beam
- [K] is the global stiffness matrix (without BCs applied)
- {d} displacements of nodes
"""
K = self.K # global stiffness matrix
d = self.node_deflections # force displacement vector
# noinspection PyUnresolvedReferences
r = np.matmul(K, d)
assert self.reactions is not None
for ri in self.reactions:
i = self.mesh.nodes.index(ri.location)
force, moment = r[i * 2 : i * 2 + 2]
# set the values in the reaction objects
ri.force = force[0]
ri.moment = moment[0]
return r
def shape(self, x: float, L: Optional[float] = None) -> np.ndarray:
"""return an array of the shape functions evaluated at x the local
x-value
"""
if L is None:
L = self.length
N1 = 1 / L ** 3 * (L ** 3 - 3 * L * x ** 2 + 2 * x ** 3)
N2 = 1 / L ** 2 * (L ** 2 * x - 2 * L * x ** 2 + x ** 3)
N3 = 1 / L ** 3 * (3 * L * x ** 2 - 2 * x ** 3)
N4 = 1 / L ** 2 * (-L * x ** 2 + x ** 3)
return np.array([N1, N2, N3, N4])
def plot_shapes(self, n: int = 25) -> None: # pragma: no cover
"""plot shape functions for the with n data points"""
x = np.linspace(0, self.length, n)
# set up list of axes with a grid where the two figures in each column
# share an x axis
axes = []
fig = plt.figure()
axes.append(fig.add_subplot(221))
axes.append(fig.add_subplot(222))
axes.append(fig.add_subplot(223, sharex=axes[0]))
axes.append(fig.add_subplot(224, sharex=axes[1]))
N: List[List[int]] = [[], [], [], []]
for xi in x:
n_local = self.shape(xi)
for i in range(4):
N[i].append(n_local[i])
for k in range(4):
ax = axes[k]
ax.grid(True)
ax.plot(x, N[k], label=f"$N_{k + 1}$")
ax.legend()
fig.subplots_adjust(wspace=0.25, hspace=0)
plt.show()
def stiffness(self, L: float) -> np.ndarray:
"""return local stiffness matrix, k, as numpy array evaluated with beam
element length L
"""
E = self.E
Ixx = self.Ixx
k = np.array(
[
[12, 6 * L, -12, 6 * L],
[6 * L, 4 * L ** 2, -6 * L, 2 * L ** 2],
[-12, -6 * L, 12, -6 * L],
[6 * L, 2 * L ** 2, -6 * L, 4 * L ** 2],
]
)
return E * Ixx / L ** 3 * k
def stiffness_global(self) -> np.array:
# Initialize the global stiffness matrix, then iterate over the
# elements, calculate a local stiffness matrix, and add it to the
# global stiffness matrix.
# noinspection PyUnresolvedReferences
kg = np.zeros((self.mesh.dof, self.mesh.dof))
for e in range(self.mesh.num_elements):
# iterate over all the elements and add the local stiffness matrix
# to the global stiffness matrix at the proper index
k = self.stiffness(self.mesh.lengths[e]) # local stiffness matrix
i1, i2 = (e * 2, e * 2 + 4) # global slicing index
kg[i1:i2, i1:i2] = kg[i1:i2, i1:i2] + k # current element
self._K = kg
return self._K
```
#### File: josephjcontreras/FEMethods/run_examples.py
```python
from femethods.elements import Beam
from femethods.loads import PointLoad
from femethods.reactions import FixedReaction, PinnedReaction
def example_1():
"""
Cantilevered Beam with Fixed Support and End Loading
"""
print("=" * 79)
print("Example 1")
print(
"Show an example with a cantilevered beam with a fixed support and "
"point load at the end\n"
)
beam_len = 10
# Note that both the reaction and load are both lists. They must always be
# given to Beam as a list,
r = [FixedReaction(0)] # define reactions as list
p = [PointLoad(magnitude=-2, location=beam_len)] # define loads as list
b = Beam(beam_len, loads=p, reactions=r, E=29e6, Ixx=125)
# an explicit solve is required to calculate the reaction values
b.solve()
print(b)
def example_2():
"""
Cantilevered Beam with 3 Pinned Supports and End Loading
"""
print("=" * 79)
print("Example 2")
print("Show an example with 3 Pinned Supports and End Loading\n")
beam_len = 10
# Note that both the reaction and load are both lists. They must always be
# given to Beam as a list,
r = [PinnedReaction(0), PinnedReaction(2), PinnedReaction(6)] # define reactions
p = [PointLoad(magnitude=-2, location=beam_len)] # define loads
b = Beam(beam_len, loads=p, reactions=r, E=29e6, Ixx=125)
# an explicit solve is required to calculate the reaction values
b.solve()
print(b)
if __name__ == "__main__":
example_1()
example_2()
``` |
{
"source": "josephjcontreras/FormatSearch",
"score": 3
} |
#### File: josephjcontreras/FormatSearch/FormatSearch.py
```python
import sublime
import sublime_plugin
from sublime import get_clipboard, set_clipboard
from Default.paragraph import expand_to_paragraph
class Base(object):
def process(self, func):
t = self.getText()
set_clipboard(func(t))
self.view.window().status_message('data copied to clipboard')
def getText(self):
view = self.view
text = []
if view.sel():
for region in view.sel():
if region.empty():
# text is not selected, expand to the current paragraph
text.append(view.substr(expand_to_paragraph(view, region.b)))
else:
# there is some text already selected, use that only
text.append(view.substr(region))
text = text[0].split('\n')
return text
def parseText(self, paragraphs, quote=False, suffix=''):
parsed_text = []
for par in paragraphs:
if len(par) > 0:
parsed_text.append(str(par) + suffix)
if quote:
# parsed text should should be enclosed in double quotes, and
# separated by commas and space
string = ', '.join(['"' + item + '"' for item in parsed_text])
else:
# create a space delimited list of the parsed text
string = ' '.join(parsed_text)
return string
class PlainCommand(sublime_plugin.TextCommand, Base):
def run(self, edit):
self.process(self.plain)
def plain(self, text):
return self.parseText(text, quote=False, suffix='')
class DrawingCommand(sublime_plugin.TextCommand, Base):
def run(self, edit):
self.process(self.drawing)
def drawing(self, text):
return self.parseText(text, quote=False, suffix='.SLDDRW')
class PartCommand(sublime_plugin.TextCommand, Base):
def run(self, edit):
self.process(self.part)
def part(self, text):
return self.parseText(text, quote=False, suffix='.SLDPRT')
class AssemblyCommand(sublime_plugin.TextCommand, Base):
def run(self, edit):
self.process(self.assembly)
def assembly(self, text):
return self.parseText(text, quote=False, suffix='.SLDASM')
class TcmCommand(sublime_plugin.TextCommand, Base):
def run(self, edit):
self.process(self.TCM)
def TCM(self, text):
return self.parseText(text, quote=True, suffix='')
class CommaCommand(sublime_plugin.TextCommand, Base):
def run(self, edit):
self.process(self.comma)
def comma(self, text):
return self.parseText(text)
def parseText(self, paragraphs):
parsed_text = []
for par in paragraphs:
if len(par) > 0:
parsed_text.append(str(par))
string = ', '.join(parsed_text)
return string
``` |
{
"source": "josephjcontreras/revisions",
"score": 4
} |
#### File: josephjcontreras/revisions/revisions.py
```python
from itertools import cycle
class MaximumRevisionError(Exception):
"""
Exception for when the sequence runs out and revisions start repeating
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, *kwargs)
def revisions(sequence="full"):
"""
Generates infinite series of revisions
Creates a generator that will iterate over an infinite sequence of
revisions in the form
0, A, B, C, ..., X, Y, Z, AA, AB, AC, ..., AX, AY, AZ, BA, BB, BC, ...
yield: revision
yield type: str
"""
if sequence == "full":
sequence = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
elif sequence == "partial":
# missing i, o, and s
sequence = "ABCDEFGHJKLMNPQRTUVWXYZ"
else:
# for simplicity here, do not allow custom sequences
raise ValueError(
f"sequence parameter must be either "
f"'full', or 'partial', not {sequence}"
)
# yield the initial release as revision 0
yield "0"
# create a revision cycle, and a suffix cycle. The suffix cycle is the same
# as the revision cycle, but will be incremented at a different rate. The
# suffix will be used when the revision cycles through and starts repeating
rev_cycle = cycle(sequence)
suffix_cycle = cycle(sequence)
# length of revision cycle. This will be used to determine when a suffix
# is required.
rev_length = len(sequence)
rev_limit = rev_length + rev_length ** 2
suffix = "" # initially, no suffix (A, B, C, ...)
for k, rev in enumerate(rev_cycle):
if rev_limit < k:
# limit of sequence with a single suffix have been reached
# break
msg = f"maximum revision level ({rev_limit}) has been reached!"
raise MaximumRevisionError(msg)
if k % rev_length == 0 and k > 0:
# increment the suffix every time the revision list rolls over
suffix = next(suffix_cycle)
yield suffix + rev
``` |
{
"source": "josephjcontreras/vectormath",
"score": 3
} |
#### File: vectormath/tests/test_vector3.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
import numpy as np
from vectormath import Vector2, Vector2Array, Vector3, Vector3Array
class TestVMathVector3(unittest.TestCase):
def test_init_exceptions(self):
self.assertRaises(TypeError, Vector3Array, np.r_[1], np.r_[1], 3)
self.assertRaises(ValueError,
Vector3Array, np.r_[1, 2], np.r_[1], np.r_[1])
self.assertRaises(ValueError, Vector3Array, np.array([0, 0]))
self.assertRaises(ValueError,
Vector3Array, 'Make', ' me a ', 'vector!')
self.assertRaises(ValueError, Vector3Array, ([0, 0], [0, 0], [0, 0]))
def test_init(self):
v1 = Vector3Array()
v2 = Vector3Array(0, 0, 0)
self.assertTrue(np.array_equal(v1, v2))
v3 = Vector3Array(v1)
self.assertTrue(np.array_equal(v1, v3))
self.assertTrue(v1 is not v3)
v4 = Vector3Array(np.r_[0, 0, 0])
self.assertTrue(np.array_equal(v1, v4))
v5 = Vector3Array(np.c_[np.r_[1, 0, 0],
np.r_[0, 1, 0],
np.r_[0, 0, 1]])
self.assertTrue(np.array_equal(v5.length, np.r_[1, 1, 1]))
v6 = Vector3Array(np.r_[1, 0, 0], np.r_[0, 1, 0], np.r_[0, 0, 1])
self.assertTrue(np.array_equal(v6.length, np.r_[1, 1, 1]))
v7 = Vector3Array([0, 0, 0])
self.assertTrue(np.array_equal(v1, v7))
v8 = Vector3Array(x=0, y=0, z=0)
self.assertTrue(np.array_equal(v1, v8))
v9 = Vector3Array(
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]
)
v10 = Vector3Array([
[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0]
])
self.assertTrue(np.array_equal(v9, v10))
v11 = Vector3Array([[[[[0]], [[0]], [[0]]]]])
self.assertTrue(np.array_equal(v1, v11))
v12 = Vector3Array([0]*5, [0]*5, [0]*5)
self.assertTrue(np.array_equal(v10, v12))
v13 = Vector3Array((0, 0, 0))
self.assertTrue(np.array_equal(v1, v13))
v14 = Vector3Array(([0, 0, 0], [0, 0, 0]))
self.assertTrue(np.array_equal(v14, Vector3Array([0]*2, [0]*2, [0]*2)))
def test_indexing(self):
v2 = Vector3Array(1, 2, 3)
self.assertTrue(v2[0, 0] == 1)
self.assertTrue(v2[0, 1] == 2)
self.assertTrue(v2[0, 2] == 3)
self.assertTrue(len(v2[0]) == 3)
self.assertRaises(IndexError, lambda: v2[3])
self.assertRaises(IndexError, lambda: v2[0, 3])
l = []
for x in v2[0]:
l.append(x)
self.assertTrue(np.array_equal(np.array(l), np.r_[1, 2, 3]))
self.assertTrue(np.array_equal(v2, Vector3Array(l)))
l = []
v3 = Vector3Array([[1, 2, 3],
[2, 3, 4]])
for v in v3:
l.append(v)
self.assertTrue(np.array_equal(
np.array(l),
np.array([[1, 2, 3], [2, 3, 4]]))
)
self.assertTrue(np.array_equal(Vector3Array(l), v3))
v4 = Vector3Array()
v4[0, 0] = 1
v4[0, 1] = 2
v4[0, 2] = 3
self.assertTrue(np.array_equal(v2, v4))
def test_copy(self):
vOrig = Vector3Array()
vCopy = vOrig.copy()
self.assertTrue(np.array_equal(vOrig, vCopy))
self.assertTrue(vOrig is not vCopy)
def test_size(self):
v1 = Vector3Array()
self.assertTrue(v1.nV == 1)
v2 = Vector3Array(np.c_[np.r_[1, 0, 0],
np.r_[0, 1, 0],
np.r_[0, 0, 1]])
self.assertTrue(v2.nV == 3)
v3 = Vector3Array(
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]
)
self.assertTrue(v3.nV == 5)
v4 = Vector3Array(0, 0, 0)
self.assertTrue(v4.nV == 1)
def test_setget(self):
v1 = Vector3Array(1, 1, 1)
self.assertTrue(v1.x == 1)
v1.x = 2
self.assertTrue(v1.x == 2)
self.assertTrue(v1.y == 1)
v1.y = 2
self.assertTrue(v1.y == 2)
self.assertTrue(v1.z == 1)
v1.z = 2
self.assertTrue(v1.z == 2)
v2 = Vector3Array([[0, 1, 2],
[1, 2, 3]])
self.assertTrue(np.array_equal(v2.x, [0, 1]))
v2.x = [0, -1]
self.assertTrue(np.array_equal(v2.x, [0, -1]))
self.assertTrue(np.array_equal(v2.y, [1, 2]))
v2.y = [-1, -2]
self.assertTrue(np.array_equal(v2.y, [-1, -2]))
self.assertTrue(np.array_equal(v2.z, [2, 3]))
v2.z = [0, 0]
self.assertTrue(np.array_equal(v2.z, [0, 0]))
def test_length(self):
v1 = Vector3Array(1, 1, 1)
self.assertTrue(v1.length == np.sqrt(3))
v2 = Vector3Array(np.r_[1, 2], np.r_[1, 2], np.r_[1, 2])
self.assertTrue(np.array_equal(v2.length, np.sqrt(np.r_[3, 12])))
v3 = Vector3Array(1, 0, 0)
v3.length = 5
self.assertTrue(v3.length == 5)
v4 = Vector3Array(np.r_[1, 1], np.r_[0, 0], np.r_[1, 2])
self.assertRaises(ValueError, lambda: setattr(v4, 'length', 5))
v5 = Vector3Array(np.r_[1, 0], np.r_[0, 0], np.r_[0, 1])
self.assertTrue(np.array_equal(v5.length, [1, 1]))
v5.length = [-1, 3]
self.assertTrue(np.array_equal(v5, [[-1., -0., -0.], [0., 0., 3.]]))
self.assertTrue(np.array_equal(v5.length, [1, 3]))
v6 = Vector3Array()
self.assertTrue(v6.length == 0)
self.assertRaises(ZeroDivisionError, lambda: setattr(v6, 'length', 5))
v6.length = 0
self.assertTrue(v6.length == 0)
v7 = Vector3Array(
[0, 0, 1, 0, 0],
[0, 1, 0, 0, 0],
[1, 0, 0, 0, 0]
)
length = [5, 5, 5, 5, 5]
self.assertRaises(ZeroDivisionError,
lambda: setattr(v7, 'length', length))
length = [5, 5, 5, 0, 0]
v7.length = length
self.assertTrue(np.array_equal(length, v7.length))
def test_ops(self):
v1 = Vector3Array(1, 1, 1)
v2 = Vector3Array(2, 2, 2)
self.assertTrue(np.array_equal(v2-v1, v1))
self.assertTrue(np.array_equal(v1-v2, -v1))
self.assertTrue(np.array_equal(v1+v1, v2))
self.assertTrue(np.array_equal(v1*v2, v2))
self.assertTrue(np.array_equal(v2/v1, v2))
self.assertTrue(np.array_equal(2*v1, v2))
self.assertTrue(np.array_equal(v2/2, v1))
self.assertTrue(np.array_equal(v1+1, v2))
self.assertTrue(np.array_equal(v2-1, v1))
v1 = Vector3Array(np.r_[1, 1.], np.r_[1, 1.], np.r_[1, 1.])
v2 = Vector3Array(np.r_[2, 2.], np.r_[2, 2.], np.r_[2, 2.])
self.assertTrue(np.array_equal(v2-v1, v1))
self.assertTrue(np.array_equal(v1-v2, -v1))
self.assertTrue(np.array_equal(v1+v1, v2))
self.assertTrue(np.array_equal(v1*v2, v2))
self.assertTrue(np.array_equal(v2/v1, v2))
self.assertTrue(np.array_equal(2*v1, v2))
self.assertTrue(np.array_equal(v2/2, v1))
self.assertTrue(np.array_equal(v1+1, v2))
self.assertTrue(np.array_equal(v2-1, v1))
def test_dot(self):
v1 = Vector3Array(1, 1, 1)
v2 = Vector3Array(2, 2, 2)
self.assertTrue(v1.dot(v2) == 6)
v1l = Vector3Array(np.r_[1, 1.], np.r_[1, 1.], np.r_[1, 1.])
v2l = Vector3Array(np.r_[2, 2.], np.r_[2, 2.], np.r_[2, 2.])
self.assertTrue(np.array_equal(v1l.dot(v2l), np.r_[6, 6]))
self.assertTrue(np.array_equal(v1.dot(v2l), np.r_[6, 6]))
self.assertTrue(np.array_equal(v1l.dot(v2), np.r_[6, 6]))
v3 = Vector3Array([3]*4, [3]*4, [3]*4)
self.assertRaises(ValueError, lambda: v3.dot(v2l))
self.assertRaises(TypeError, lambda: v3.dot(5))
def test_cross(self):
v1 = Vector3Array(1, 0, 0)
v2 = Vector3Array(0, 1, 0)
vC = Vector3Array(0, 0, 1)
self.assertTrue(np.array_equal(v1.cross(v2), vC))
v1 = Vector3Array(np.r_[1, 1], np.r_[0, 0], np.r_[0, 0])
v2 = Vector3Array(np.r_[0, 0], np.r_[1, 1], np.r_[0, 0])
vC = Vector3Array(np.r_[0, 0], np.r_[0, 0], np.r_[1, 1])
self.assertTrue(np.array_equal(v1.cross(v2), vC))
v3 = Vector3Array([3]*4, [3]*4, [3]*4)
def f(): v3.cross(v2)
self.assertRaises(ValueError, f)
def f(): v3.cross(5)
self.assertRaises(TypeError, f)
def test_as_percent(self):
v1 = Vector3Array(10, 0, 0)
v2 = Vector3Array(20, 0, 0)
self.assertTrue(np.array_equal(v1.as_percent(2), v2))
self.assertTrue(np.array_equal(v1, Vector3Array(10, 0, 0)))# not copied
v3 = Vector3Array(
[0, 0, 2, 0, 0],
[0, 2, 0, 0, 0],
[2, 0, 0, 0, 0]
)
v4 = v3 * .5
self.assertTrue(np.array_equal(v3.as_percent(.5), v4))
v5 = Vector3Array()
self.assertTrue(np.array_equal(v5.as_percent(100), v5))
v6 = Vector3Array(5, 5, 5)
self.assertTrue(np.array_equal(v6.as_percent(0), v5))
self.assertRaises(TypeError,
lambda: v6.as_percent('One Hundred Percent'))
def test_normalize(self):
v1 = Vector3Array(5, 0, 0)
self.assertTrue(v1.length == 5)
self.assertTrue(v1.normalize() is v1)
self.assertTrue(v1.length == 1)
v2 = Vector3Array()
self.assertRaises(ZeroDivisionError, lambda: v2.normalize())
v3 = Vector3Array(
[0, 0, 2],
[0, 2, 0],
[2, 0, 0]
)
self.assertTrue(np.array_equal(v3.length, [2, 2, 2]))
self.assertTrue(v3.normalize() is v3)
self.assertTrue(np.array_equal(v3.length, [1, 1, 1]))
def test_as_length(self):
v1 = Vector3Array(1, 1, 1)
v2 = v1.as_length(1)
self.assertTrue(v1 is not v2)
self.assertTrue(v1.length == np.sqrt(3))
self.assertTrue(v2.length == 1)
v3 = Vector3Array(np.r_[1, 2], np.r_[1, 2], np.r_[1, 2])
v4 = v3.as_length([1, 2])
self.assertTrue(np.array_equal(v4.length, [1, 2]))
self.assertRaises(ValueError, lambda: v3.as_length(5))
v5 = Vector3Array(np.r_[1, 0], np.r_[0, 0], np.r_[0, 1])
self.assertTrue(np.array_equal(v5.length, [1, 1]))
v6 = v5.as_length([-1, 3])
self.assertTrue(np.array_equal(v6, [[-1., -0., -0.], [0., 0., 3.]]))
self.assertTrue(np.array_equal(v6.length, [1, 3]))
v7 = Vector3Array()
self.assertRaises(ZeroDivisionError, lambda: v7.as_length(5))
v8 = v7.as_length(0)
self.assertTrue(v8.length == 0)
v9 = Vector3Array(
[0, 0, 1, 0, 0],
[0, 1, 0, 0, 0],
[1, 0, 0, 0, 0]
)
length = [5, 5, 5, 5, 5]
self.assertRaises(ZeroDivisionError, lambda: v9.as_length(length))
length = [5, 5, 5, 0, 0]
v10 = v9.as_length(length)
self.assertTrue(np.array_equal(length, v10.length))
def test_as_unit(self):
v1 = Vector3Array(1, 0, 0)
v2 = v1.as_unit()
self.assertTrue(v1 is not v2)
self.assertTrue(np.array_equal(v1, v2))
self.assertTrue(v2.length == 1)
v3 = Vector3Array(np.r_[1, 2], np.r_[1, 2], np.r_[1, 2])
v4 = v3.as_unit()
self.assertTrue(np.array_equal(v4.length, [1, 1]))
v5 = Vector3Array(1, 1, 1)
v6 = v5.as_unit()
self.assertTrue(v6.length == 1)
self.assertTrue(v6.x == v6.y)
self.assertTrue(v6.z == v6.y)
v7 = Vector3Array()
self.assertRaises(ZeroDivisionError, v7.as_unit)
v9 = Vector3Array(
[0, 0, 1, 0, 0],
[0, 1, 0, 0, 0],
[1, 0, 0, 0, 0]
)
self.assertRaises(ZeroDivisionError, v9.as_unit)
def test_view_types(self):
v1 = Vector3Array(np.random.rand(100, 3))
self.assertTrue(isinstance(v1, Vector3Array))
self.assertTrue(isinstance(v1[1:2], Vector3Array))
self.assertTrue(isinstance(v1[1:50:2], Vector3Array))
self.assertTrue(isinstance(v1[4], Vector3))
self.assertTrue(isinstance(v1[4, :], np.ndarray))
self.assertTrue(isinstance(v1.x, np.ndarray))
self.assertTrue(isinstance(v1[1:30, :], np.ndarray))
a1 = np.array([1., 2.])
with self.assertRaises(ValueError):
a1.view(Vector3)
with self.assertRaises(ValueError):
a1.view(Vector3Array)
a1 = np.array([1., 2., 3.])
self.assertTrue(isinstance(a1.view(Vector3), Vector3))
with self.assertRaises(ValueError):
a1.view(Vector3Array)
a1 = np.array([[1., 2., 3.]])
with self.assertRaises(ValueError):
a1.view(Vector3)
self.assertTrue(isinstance(a1.view(Vector3Array), Vector3Array))
self.assertTrue(isinstance(v1.view(Vector3Array), Vector3Array))
with self.assertRaises(ValueError):
v1.view(Vector2Array)
with self.assertRaises(ValueError):
v1.view(Vector3)
with self.assertRaises(ValueError):
v1.view(Vector2)
v1 = Vector3([1., 2., 3.])
with self.assertRaises(ValueError):
v1.view(Vector3Array)
with self.assertRaises(ValueError):
v1.view(Vector2Array)
with self.assertRaises(ValueError):
v1.view(Vector2)
self.assertTrue(isinstance(v1.view(Vector3), Vector3))
v1 = np.kron(Vector3([1., 0., 0.]), np.atleast_2d(np.ones(10)).T)
self.assertFalse(isinstance(v1, Vector3))
def test_angle(self):
# test a unit vector along each coordinate
v1 = Vector3(1, 0, 0) # x-axis, use this as datum
v = [Vector3(1, 0, 0), Vector3(0, 1, 0), Vector3(0, 0, 1),
Vector3(-1, 0, 0), Vector3(0, -1, 0), Vector3(0, 0, -1)]
angles_deg = [0, 90, 90, 180, 90, 90]
angles_rad = [0, np.pi / 2, np.pi / 2, np.pi, np.pi / 2, np.pi / 2]
for k in range(6):
a_deg = v1.angle(v[k], unit='deg')
a_rad0 = v1.angle(v[k], unit='rad')
a_rad1 = v1.angle(v[k])
self.assertEqual(a_deg, angles_deg[k])
self.assertEqual(a_rad0, angles_rad[k])
self.assertEqual(a_rad1, angles_rad[k])
# verify the associative property
self.assertEqual(v1.angle(v[k]), v[k].angle(v1))
with self.assertRaises(TypeError):
angleResult = v1.angle('anything but Vector3')
with self.assertRaises(ValueError):
angleResult = v1.angle(v[0], unit='invalid entry')
with self.assertRaises(ZeroDivisionError):
angleResult = v1.angle(Vector3(0, 0, 0))
# def test_mult_warning(self):
# with warnings.catch_warnings(record=True) as w:
# v1 = Vector3Array()
# v2 = v1 * 3
# self.assertTrue(len(w) == 0)
# M = Matrix3()
# v3 = v2 * M
# self.assertTrue(len(w) == 1)
if __name__ == '__main__':
unittest.main()
```
#### File: vectormath/vectormath/vector.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
class BaseVector(np.ndarray):
"""Class to contain basic operations used by all Vector classes"""
def __new__(cls, *args, **kwargs):
"""BaseVector cannot be created"""
raise NotImplementedError('Please specify Vector2 or Vector3')
@property
def x(self):
"""x-component of vector"""
return self[0]
@x.setter
def x(self, value):
self[0] = value
@property
def y(self):
"""y-component of vector"""
return self[1]
@y.setter
def y(self, value):
self[1] = value
@property
def length(self):
"""Length of vector"""
return float(np.sqrt(np.sum(self**2)))
@length.setter
def length(self, value):
if not np.isscalar(value):
raise ValueError('Length must be a scalar')
value = float(value)
if self.length != 0:
new_length = value/self.length
self *= new_length
return
if value != 0:
raise ZeroDivisionError('Cannot resize vector of length 0 to '
'nonzero length')
@property
def rho(self):
"""Radial coordinate of this vector (equal to the length of the vector)"""
return self.length
@rho.setter
def rho(self, value):
self.length = value
@property
def theta(self):
"""Angular coordinate / azimuthal angle of this vector in radians
Based on polar coordinate space (or sperical coordinate space for `Vector3`)
returns angle between this vector and the positive x-axis
range: (-pi <= theta <= pi)
"""
return float(np.arctan2(self.y, self.x))
# pylint: disable=fixme
# TODO: Add `theta` and `theta_deg` setters
# @theta.setter
# def theta(self, value):
# ...
@property
def theta_deg(self):
"""Angular coordinate / azimuthal angle of this vector in degrees
Based on polar coordinate space (or sperical coordinate space for `Vector3`)
returns angle between this vector and the positive x-axis
range: (-180 <= theta_deg <= 180)
"""
return self.theta * 180 / np.pi
def as_length(self, value):
"""Return a new vector scaled to given length"""
new_vec = self.copy()
new_vec.length = value
return new_vec
def as_percent(self, value):
"""Return a new vector scaled by given decimal percent"""
new_vec = self.copy()
new_vec.length = value * self.length
return new_vec
def as_unit(self):
"""Return a new vector scaled to length 1"""
new_vec = self.copy()
new_vec.normalize()
return new_vec
def normalize(self):
"""Scale the length of a vector to 1 in place"""
self.length = 1
return self
def dot(self, vec):
"""Dot product with another vector"""
if not isinstance(vec, self.__class__):
raise TypeError('Dot product operand must be a vector')
return np.dot(self, vec)
def cross(self, vec):
"""Cross product with another vector"""
if not isinstance(vec, self.__class__):
raise TypeError('Cross product operand must be a vector')
return self.__class__(np.cross(self, vec))
def angle(self, vec, unit='rad'):
"""Calculate the angle between two Vectors
unit: unit for returned angle, either 'rad' or 'deg'. Defaults to 'rad'
"""
if not isinstance(vec, self.__class__):
raise TypeError('Angle operand must be of class {}'
.format(self.__class__.__name__))
if unit not in ['deg', 'rad']:
raise ValueError('Only units of rad or deg are supported')
denom = self.length * vec.length
if denom == 0:
raise ZeroDivisionError('Cannot calculate angle between '
'zero-length vector(s)')
ang = np.arccos(self.dot(vec) / denom)
if unit == 'deg':
ang = ang * 180 / np.pi
return ang
def __mul__(self, multiplier):
return self.__class__(self.view(np.ndarray) * multiplier)
class Vector3(BaseVector):
"""Primitive 3D vector defined from the origin
New Vector3 can be created with:
- another Vector3
- length-3 array
- x, y, and y values
- no input (returns [0., 0., 0.])
"""
# pylint: disable=fixme
# TODO: add support for instantiating Vector3 with `polar`=True
def __new__(cls, x=None, y=None, z=None): #pylint: disable=arguments-differ
def read_array(X, Y, Z):
"""Build Vector3 from another Vector3, [x, y, z], or x/y/z"""
if isinstance(X, cls) and Y is None and Z is None:
return cls(X.x, X.y, X.z)
if (isinstance(X, (list, tuple, np.ndarray)) and len(X) == 3 and
Y is None and Z is None):
return cls(X[0], X[1], X[2])
if X is None and Y is None and Z is None:
return cls(0, 0, 0)
if np.isscalar(X) and np.isscalar(Y) and np.isscalar(Z):
xyz = np.r_[X, Y, Z]
xyz = xyz.astype(float)
return xyz.view(cls)
raise ValueError('Invalid input for Vector3 - must be an instance '
'of a Vector3, a length-3 array, 3 scalars, or '
'nothing for [0., 0., 0.]')
return read_array(x, y, z)
def __array_wrap__(self, out_arr, context=None): #pylint: disable=no-self-use, unused-argument
"""This is called at the end of ufuncs
If the output is the wrong shape, return the ndarray view
instead of vector view
"""
if out_arr.shape != (3,):
out_arr = out_arr.view(np.ndarray)
return out_arr
def __array_finalize__(self, obj):
"""This is called when initializing the vector
If the constructor is used, obj is None. If slicing is
used, obj has the same class as self. In both these cases,
we let things pass.
If we are viewing another array class as a vector, then obj has
a different class than self. In this case, if the array has
an invalid shape a ValueError is raised
"""
if obj is None or obj.__class__ is Vector3:
return
if self.shape != (3,):
raise ValueError(
'Invalid array to view as Vector3 - must be length-3 array.'
)
@property
def z(self):
"""z-component of vector"""
return self[2]
@z.setter
def z(self, value):
self[2] = value
@property
def phi(self):
"""Polar angle / inclination of this vector in radians
Based on sperical coordinate space
returns angle between this vector and the positive z-azis
range: (0 <= phi <= pi)
"""
return np.arctan2(np.sqrt(self.x**2 + self.y**2), self.z)
# pylint: disable=fixme
# TODO: Add `phi` and `phi_deg` setters
# @phi.setter
# def phi(self, value):
# ...
@property
def phi_deg(self):
"""Polar angle / inclination of this vector in degrees
Based on sperical coordinate space
returns angle between this vector and the positive z-azis
range: (0 <= phi <= pi)
"""
return self.phi * 180 / np.pi
class Vector2(BaseVector):
"""Primitive 2D vector defined from the origin
New Vector2 can be created with:
- another Vector2
- length-2 array
- x and y values
- rho and theta, if polar=True; specify unit as 'rad' (default) or 'deg'
- no input (returns [0., 0.])
"""
def __new__(cls, x=None, y=None, polar=False, unit='rad'): #pylint: disable=arguments-differ
def read_array(X, Y):
"""Build Vector2 from another Vector2, [x, y], or x/y"""
if isinstance(X, cls) and Y is None:
if polar:
raise ValueError(
'When copying a Vector2, polar=True is not supported'
)
return cls(X.x, X.y)
if (isinstance(X, (list, tuple, np.ndarray)) and len(X) == 2 and
Y is None):
return cls(X[0], X[1], polar, unit)
if X is None and Y is None:
return cls(0, 0, polar, unit)
if np.isscalar(X) and np.isscalar(Y):
if polar:
if unit not in ['deg', 'rad']:
raise ValueError(
'Only units of rad or deg are supported'
)
if unit == 'deg':
Y = Y / 180 * np.pi
X, Y = X * np.cos(Y), X * np.sin(Y)
xyz = np.r_[X, Y]
xyz = xyz.astype(float)
return xyz.view(cls)
raise ValueError('Invalid input for Vector2 - must be an instance '
'of a Vector2, a length-2 array, 2 scalars, or '
'nothing for [0., 0.]')
return read_array(x, y)
def __array_wrap__(self, out_arr, context=None): #pylint: disable=no-self-use, unused-argument
if out_arr.shape != (2,):
out_arr = out_arr.view(np.ndarray)
return out_arr
def __array_finalize__(self, obj):
if obj is None or obj.__class__ is Vector2:
return
if self.shape != (2,):
raise ValueError(
'Invalid array to view as Vector2 - must be length-2 array.'
)
def cross(self, vec):
"""Cross product with another vector"""
if not isinstance(vec, self.__class__):
raise TypeError('Cross product operand must be a vector')
return Vector3(0, 0, np.asscalar(np.cross(self, vec)))
class BaseVectorArray(BaseVector):
"""Class to contain basic operations used by all VectorArray classes"""
@property
def x(self):
"""Array of x-component of vectors"""
return self[:, 0]
@x.setter
def x(self, value):
self[:, 0] = value
@property
def y(self):
"""Array of y-component of vectors"""
return self[:, 1]
@y.setter
def y(self, value):
self[:, 1] = value
@property
def nV(self):
"""Number of vectors"""
return self.shape[0]
def normalize(self):
"""Scale the length of all vectors to 1 in place"""
self.length = np.ones(self.nV)
return self
@property
def dims(self):
"""Tuple of different dimension names for Vector type"""
raise NotImplementedError('Please use Vector2Array or Vector3Array')
@property
def length(self):
"""Array of vector lengths"""
return np.sqrt(np.sum(self**2, axis=1)).view(np.ndarray)
@length.setter
def length(self, l):
l = np.array(l)
if self.nV != l.size:
raise ValueError('Length vector must be the same number of '
'elements as vector.')
# This case resizes all vectors with nonzero length
if np.all(self.length != 0):
new_length = l/self.length
for dim in self.dims:
setattr(self, dim, new_length*getattr(self, dim))
return
# This case only applies to single vectors
if self.nV == 1 and l == 0:
assert self.length == 0, \
'Nonzero length should be resized in the first case'
for dim in self.dims:
setattr(self, dim, 0.)
return
# This case only applies if vectors with length == 0
# in an array are getting resized to 0
if self.nV > 1 and np.array_equal(self.length.nonzero(), l.nonzero()): #pylint: disable=no-member
new_length = l/[x if x != 0 else 1 for x in self.length]
for dim in self.dims:
setattr(self, dim, new_length*getattr(self, dim))
return
# Error if length zero array is resized to nonzero value
raise ZeroDivisionError('Cannot resize vector of length 0 to '
'nonzero length')
def dot(self, vec):
"""Dot product with another vector"""
if not isinstance(vec, self.__class__):
raise TypeError('Dot product operand must be a VectorArray')
if self.nV != 1 and vec.nV != 1 and self.nV != vec.nV:
raise ValueError('Dot product operands must have the same '
'number of elements.')
return np.sum((getattr(self, d)*getattr(vec, d) for d in self.dims), 1)
def angle(self, vec, unit='rad'):
"""Angle method is only for Vectors, not VectorArrays"""
raise NotImplementedError('angle not implemented for VectorArrays')
class Vector3Array(BaseVectorArray):
"""List of Vector3
A new Vector3Array can be created with:
- another Vector3Array
- x/y/z lists of equal length
- n x 3 array
- nothing (returns [[0., 0., 0.]])
"""
def __new__(cls, x=None, y=None, z=None): #pylint: disable=arguments-differ
def read_array(X, Y, Z):
"""Build Vector3Array from various inputs"""
if isinstance(X, cls) and Y is None and Z is None:
X = np.atleast_2d(X)
return cls(X.x.copy(), X.y.copy(), X.z.copy())
if isinstance(X, (list, tuple)):
X = np.array(X)
if isinstance(Y, (list, tuple)):
Y = np.array(Y)
if isinstance(Z, (list, tuple)):
Z = np.array(Z)
if isinstance(X, np.ndarray) and Y is None and Z is None:
X = np.squeeze(X)
if X.size == 3:
X = X.flatten()
return cls(X[0], X[1], X[2])
if len(X.shape) == 2 and X.shape[1] == 3:
return cls(
X[:, 0].copy(), X[:, 1].copy(), X[:, 2].copy()
)
raise ValueError(
'Unexpected shape for vector init: {shp}'.format(
shp=X.shape
)
)
if np.isscalar(X) and np.isscalar(Y) and np.isscalar(Z):
X, Y, Z = float(X), float(Y), float(Z)
elif not (isinstance(X, type(Y)) and isinstance(X, type(Z))):
raise TypeError('Must be the same types for x, y, and '
'z for vector init')
if isinstance(X, np.ndarray):
if not (X.shape == Y.shape and X.shape == Z.shape):
raise ValueError('Must be the same shapes for x, y, '
'and z in vector init')
vec_ndarray = np.c_[X, Y, Z]
vec_ndarray = vec_ndarray.astype(float)
return vec_ndarray.view(cls)
if X is None:
X, Y, Z = 0.0, 0.0, 0.0
vec_ndarray = np.r_[X, Y, Z].reshape((1, 3))
return np.asarray(vec_ndarray).view(cls)
return read_array(x, y, z)
def __array_wrap__(self, out_arr, context=None): #pylint: disable=no-self-use, unused-argument
if len(out_arr.shape) != 2 or out_arr.shape[1] != 3:
out_arr = out_arr.view(np.ndarray)
return out_arr
def __array_finalize__(self, obj):
if obj is None or obj.__class__ is Vector3Array:
return
if len(self.shape) != 2 or self.shape[1] != 3: #pylint: disable=unsubscriptable-object
raise ValueError(
'Invalid array to view as Vector3Array - must be '
'array of shape (*, 3).'
)
def __getitem__(self, i):
"""Overriding _getitem__ allows coersion to Vector3 or ndarray"""
item_out = super(Vector3Array, self).__getitem__(i)
if np.isscalar(i):
return item_out.view(Vector3)
if isinstance(i, slice):
return item_out
return item_out.view(np.ndarray)
@property
def z(self):
"""Array of z-component of vectors"""
return self[:, 2]
@z.setter
def z(self, value):
self[:, 2] = value
@property
def dims(self):
return ('x', 'y', 'z')
def cross(self, vec):
"""Cross product with another Vector3Array"""
if not isinstance(vec, Vector3Array):
raise TypeError('Cross product operand must be a Vector3Array')
if self.nV != 1 and vec.nV != 1 and self.nV != vec.nV:
raise ValueError('Cross product operands must have the same '
'number of elements.')
return Vector3Array(np.cross(self, vec))
class Vector2Array(BaseVectorArray):
"""List of Vector2
A new Vector2Array can be created with:
- another Vector2Array
- x/y lists of equal length
- n x 2 array
- nothing (returns [[0., 0.]])
"""
def __new__(cls, x=None, y=None): #pylint: disable=arguments-differ
def read_array(X, Y):
"""Build Vector2Array from various inputs"""
if isinstance(X, cls) and Y is None:
X = np.atleast_2d(X)
return cls(X.x.copy(), X.y.copy())
if isinstance(X, (list, tuple)):
X = np.array(X)
if isinstance(Y, (list, tuple)):
Y = np.array(Y)
if isinstance(X, np.ndarray) and Y is None:
X = np.squeeze(X)
if X.size == 2:
X = X.flatten()
return cls(X[0], X[1])
if len(X.shape) == 2 and X.shape[1] == 2:
return cls(
X[:, 0].copy(), X[:, 1].copy()
)
raise ValueError(
'Unexpected shape for vector init: {shp}'.format(
shp=X.shape
)
)
if np.isscalar(X) and np.isscalar(Y):
X, Y = float(X), float(Y)
elif not isinstance(X, type(Y)):
raise TypeError('Must be the same types for x and y '
'for vector init')
if isinstance(X, np.ndarray):
if X.shape != Y.shape:
raise ValueError('Must be the same shapes for x and y '
'in vector init')
vec_ndarray = np.c_[X, Y]
vec_ndarray = vec_ndarray.astype(float)
return vec_ndarray.view(cls)
if X is None:
X, Y = 0.0, 0.0
vec_ndarray = np.r_[X, Y].reshape((1, 2))
return np.asarray(vec_ndarray).view(cls)
return read_array(x, y)
def __array_wrap__(self, out_arr, context=None): #pylint: disable=no-self-use, unused-argument
if len(out_arr.shape) != 2 or out_arr.shape[1] != 2:
out_arr = out_arr.view(np.ndarray)
return out_arr
def __array_finalize__(self, obj):
if obj is None or obj.__class__ is Vector2Array:
return
if len(self.shape) != 2 or self.shape[1] != 2: #pylint: disable=unsubscriptable-object
raise ValueError(
'Invalid array to view as Vector2Array - must be '
'array of shape (*, 2).'
)
def __getitem__(self, i):
"""Overriding _getitem__ allows coercion to Vector2 or ndarray"""
item_out = super(Vector2Array, self).__getitem__(i)
if np.isscalar(i):
return item_out.view(Vector2)
if isinstance(i, slice):
return item_out
return item_out.view(np.ndarray)
@property
def dims(self):
return ('x', 'y')
``` |
{
"source": "joseph-jnl/rlbook",
"score": 2
} |
#### File: rlbook/rlbook/bandits.py
```python
import pandas as pd
import numpy as np
from abc import ABCMeta, abstractmethod
from concurrent.futures import ProcessPoolExecutor
from multiprocessing import cpu_count
from typing import Callable, Type, Dict
from rlbook.testbeds import Testbed
import warnings
from collections import namedtuple
import logging
from itertools import repeat
from copy import deepcopy
from math import sqrt, log
def init_constant(testbed, q_val=0):
"""Set initial action value estimate as a given constant, defaults to 0"""
return {a: q_val for a in testbed.expected_values}
class Bandit(metaclass=ABCMeta):
"""Base Bandit class
Attributes:
testbed (TestBed class object):
Testbed object that returns a Reward value for a given Action
columns (list of strings):
List of numpy column names to use when outputting results
as a pandas dataframe.
action_values (numpy array):
Stores results of the actions values method.
Contains Run, Step, Action, and Reward
Initialized as None, and created with the run method.
n (int):
Current step in a run
Q_init (initialization function):
Function to use for initializing Q values, defaults to zero init
Q (dict):
Action-value estimates in format {action: reward_estimate (float), ...}
Na (dict):
Count of how many times an action has been chosen
{action X: action X count, ...}
At (int):
Action that corresponds to the index of the selected testbed arm
"""
def __init__(self, Q_init: Dict):
self.columns = [
"run",
"step",
"action",
"reward",
"optimal_action",
]
self.action_values = None
self.n = 1
self.Q_init = Q_init
self.Q = deepcopy(Q_init)
self.Na = {a: 0 for a in self.Q}
self.At = self.argmax(self.Q)
def initialization(self, testbed):
"""Reinitialize bandit for a new run when running in serial or parallel"""
testbed.reset_ev()
self.n = 1
self.Q = deepcopy(self.Q_init)
self.Na = {a: 0 for a in self.Q}
self.At = self.argmax(self.Q)
def argmax(self, Q):
"""Return max estimate Q, if tie between actions, choose at random between tied actions"""
Q_array = np.array(list(self.Q.values()))
At = np.argwhere(Q_array == np.max(Q_array)).flatten().tolist()
if len(At) > 1:
At = np.random.choice(At)
else:
At = At[0]
return list(Q.keys())[At]
@abstractmethod
def select_action(self, testbed):
"""Select action logic"""
pass
def run(self, testbed, steps, n_runs=1, n_jobs=4, serial=False):
"""Run bandit for specified number of steps and optionally multiple runs"""
if serial:
self.action_values = self._serialrun(testbed, steps, n_runs)
elif n_runs >= 4:
if n_jobs > cpu_count():
warnings.warn(
f"Warning: running n_jobs: {n_jobs}, with only {cpu_count()} cpu's detected",
RuntimeWarning,
)
self.action_values = self._multirun(testbed, steps, n_runs, n_jobs=n_jobs)
else:
self.action_values = self._serialrun(testbed, steps, n_runs)
def _serialrun(self, testbed, steps, n_runs):
action_values = np.empty((steps, len(self.columns), n_runs))
for k in range(n_runs):
action_values[:, 0, k] = k
for n in range(steps):
action_values[n, 1, k] = n
action_values[n, 2:, k] = self.select_action(testbed)
# Reset Q for next run
self.initialization(testbed)
return action_values
def _singlerun(self, testbed, steps, idx_run):
# Generate different random states for parallel workers
np.random.seed()
action_values = np.empty((steps, len(self.columns), 1))
action_values[:, 0, 0] = idx_run
for n in range(steps):
action_values[n, 1, 0] = n
action_values[n, 2:, 0] = self.select_action(testbed)
# Reset Q for next run
self.initialization(testbed)
return action_values
def _multirun(self, testbed, steps, n_runs, n_jobs=4):
with ProcessPoolExecutor(max_workers=n_jobs) as executor:
action_values = executor.map(
self._singlerun,
repeat(testbed, n_runs),
[steps for n in range(n_runs)],
list(range(n_runs)),
)
return np.squeeze(np.stack(list(action_values), axis=2))
def output_df(self):
"""Reshape action_values numpy array and output as pandas dataframe"""
n_rows = self.action_values.shape[2] * self.action_values.shape[0]
df = pd.DataFrame(
data=self.action_values.transpose(2, 0, 1).reshape(-1, len(self.columns)),
columns=self.columns,
)
return df
class EpsilonGreedy(Bandit):
"""Epsilon greedy bandit
Choose the 'greedy' option that maximizes reward but 'explore' a random action
for a certain percentage of steps according to the epsilon value
Attributes:
epsilon (float):
epsilon coefficient configuring the probability to explore non-optimal actions,
ranging from 0.0 to 1.0
alpha (float or "sample_average"):
Constant step size ranging from 0.0 to 1.0, resulting in Q being the weighted average
of past rewards and initial estimate of Q
Note on varying step sizes such as using 1/n "sample_average":
self.Q[self.At] = self.Q[self.At] + 1/self.Na[self.At]*(R-self.Q[self.At])
Theoretically guaranteed to converge, however in practice, slow to converge compared to constant alpha
"""
def __init__(self, Q_init: Dict, epsilon=0.1, alpha=0.1):
super().__init__(Q_init)
self.epsilon = epsilon
self.alpha = alpha
def select_action(self, testbed):
if np.random.binomial(1, self.epsilon) == 1:
self.At = list(self.Q.keys())[np.random.randint(len(self.Q))]
else:
self.At = self.argmax(self.Q)
A_best = testbed.best_action()
R = testbed.action_value(self.At)
self.Na[self.At] += 1
if self.alpha == "sample_average":
self.Q[self.At] = self.Q[self.At] + 1 / self.Na[self.At] * (
R - self.Q[self.At]
)
else:
logging.debug(f"alpha: {self.alpha}, At: {self.At}, R: {R}")
self.Q[self.At] = self.Q[self.At] + self.alpha * (R - self.Q[self.At])
self.n += 1
return (self.At, R, A_best)
def output_df(self):
"""Reshape action_values numpy array and output as pandas dataframe
Add epsilon coefficient used for greedy bandit
"""
df = super().output_df()
df["epsilon"] = self.epsilon
return df
class UCL(Bandit):
"""Upper Confidence Limit bandit
Estimate an upper bound for a given action that includes a measure of uncertainty
based on how often the action has been chosen in the past
At = argmax( Qt(a) + c * sqrt(ln(t)/Nt(a)))
Sqrt term is a measure of variance of an action's Upper Bound
The more often an action is selected, the uncertainty decreases (denominator increases)
When another action is selected,
the uncertainty increases (the numerator since time increase, but in smaller increments due to the ln)
Attributes:
c (float):
c > 0 controls the degree of exploration, specifically the confidence level of a UCL for a given action
U (dict):
Action-value uncertainty estimate in format {action: uncertainty (float), ...}
alpha (float or "sample_average"):
Constant step size ranging from 0.0 to 1.0, resulting in Q being the weighted average
of past rewards and initial estimate of Q
Note on varying step sizes such as using 1/n "sample_average":
self.Q[self.At] = self.Q[self.At] + 1/self.Na[self.At]*(R-self.Q[self.At])
Theoretically guaranteed to converge, however in practice, slow to converge compared to constant alpha
"""
def __init__(self, Q_init: Dict, c=0.1, alpha=0.1):
"""
"""
super().__init__(Q_init)
self.c = c
# Initialize self.Na as 1e-100 number instead of 0
self.Na = {a: 1e-100 for a in self.Na}
self.alpha = alpha
def initialization(self, testbed):
"""Reinitialize bandit attributes for a new run"""
testbed.reset_ev()
self.n = 1
self.Q = deepcopy(self.Q_init)
self.Na = {a: 1e-100 for a in self.Na}
def select_action(self, testbed):
logging.debug(f"Na: {self.Na}")
self.U = {a: Q + self.c * sqrt(log(self.n)/self.Na[a]) for a, Q in self.Q.items()}
self.At = self.argmax(self.U)
A_best = testbed.best_action()
R = testbed.action_value(self.At)
self.Na[self.At] += 1
if self.alpha == "sample_average":
self.Q[self.At] = self.Q[self.At] + 1 / self.Na[self.At] * (
R - self.Q[self.At]
)
else:
logging.debug(f"alpha: {self.alpha}, At: {self.At}, R: {R}")
self.Q[self.At] = self.Q[self.At] + self.alpha * (R - self.Q[self.At])
self.n += 1
return (self.At, R, A_best)
def output_df(self):
"""Reshape action_values numpy array and output as pandas dataframe
Add c coefficient used for UCL
"""
df = super().output_df()
df["c"] = self.c
return df
class Gradient(Bandit):
"""Gradient bandit
Learn a set of numerical preferences "H" rather than estimate a set of action values "Q"
H preferences are all relative to each other, no correlation to a potential reward
Update H using:
Ht+1(At) = Ht(At) + lr * (Rt - Q[At]) * (1 - softmax(At)) for At
Ht+1(a) = Ht(a) + lr * (Rt - Q[At]) * softmax(a) for all a != At
where At is action chosen
Attributes:
H (dict):
Action-value uncertainty estimate in format {action: uncertainty (float), ...}
lr (float between 0.0-1.0):
learning rate, step size to update H
alpha (float or "sample_average"):
Constant step size ranging from 0.0 to 1.0, resulting in Q being the weighted average
of past rewards and initial estimate of Q
Note on varying step sizes such as using 1/n "sample_average":
self.Q[self.At] = self.Q[self.At] + 1/self.Na[self.At]*(R-self.Q[self.At])
Theoretically guaranteed to converge, however in practice, slow to converge compared to constant alpha
"""
def __init__(self, Q_init: Dict, lr=0.1, alpha=0.1):
"""
"""
super().__init__(Q_init)
self.lr = lr
self.alpha = alpha
self.H = deepcopy(self.Q_init)
def initialization(self, testbed):
"""Reinitialize bandit attributes for a new run"""
testbed.reset_ev()
self.n = 1
self.H = deepcopy(self.Q_init)
self.Q = deepcopy(self.Q_init)
self.Na = {a: 0 for a in self.Q}
def softmax(self, H):
h = np.array([val for val in H.values()])
probs = np.exp(h)/sum(np.exp(h))
return dict(zip(H.keys(), probs))
def select_action(self, testbed):
"""
Select At based on H prob
Then update H via:
Ht+1(At) = Ht(At) + lr * (Rt - Q[At]) * (1 - softmax(At)) for At
Ht+1(a) = Ht(a) + lr * (Rt - Q[At]) * softmax(a) for all a != At
where At is action chosen
"""
probs = self.softmax(self.H)
logging.debug(f"probs: {probs}")
self.At = int(np.random.choice(list(self.H.keys()), 1, p=list(probs.values())))
A_best = testbed.best_action()
R = testbed.action_value(self.At)
self.Na[self.At] += 1
logging.debug(f"H: {self.H}")
logging.debug(f"Q: {self.Q}")
for a in self.H:
if a == self.At:
self.H[a] = self.H[a] + self.lr * (R - self.Q[a]) * (1 - probs[a])
else:
self.H[a] = self.H[a] - self.lr * (R - self.Q[a]) * (probs[a])
if self.alpha == "sample_average":
self.Q[self.At] = self.Q[self.At] + 1 / self.Na[self.At] * (
R - self.Q[self.At]
)
else:
logging.debug(f"alpha: {self.alpha}, At: {self.At}, R: {R}")
self.Q[self.At] = self.Q[self.At] + self.alpha * (R - self.Q[self.At])
self.n += 1
return (self.At, R, A_best)
def output_df(self):
"""Reshape action_values numpy array and output as pandas dataframe
Add learning rate
"""
df = super().output_df()
df["lr"] = self.lr
return df
``` |
{
"source": "JosephJvB/terminal",
"score": 3
} |
#### File: JosephJvB/terminal/im-bill-gates.py
```python
import os
import sys
#print(os.getcwd())
class Main:
def __init__(self, ext):
self.ext = ext
def main_loop(self, d):
for f in os.listdir(d):
if not f.startswith("."):
rel_path = os.path.join(d, f)
if os.path.isdir(rel_path):
self.main_loop(rel_path)
elif os.path.isfile(rel_path) and not f == "im-bill-gates.py":# make script file recognize self
self.rename(rel_path)
else: print("not file or dir?", rel_path)
return
def rename(self, f):
arr = f.split(".")
del arr [-1]
arr.append(self.ext)
renamed = ".".join(arr)
os.rename(f, renamed)
return
if not len(sys.argv) > 1 or not sys.argv[1]:
print("but what language tho?")
else:
m = Main(sys.argv[1])
m.main_loop(os.getcwd())
print("DONE!")
``` |
{
"source": "josephkane/zoo-testing",
"score": 3
} |
#### File: zoo-testing/zoo/animal.py
```python
from species import *
from transport import *
class Animal:
def __init__(self, name, species):
self.name = name
self.species = species
class Betta(Animal, Swimming):
def __init__(self, color, name):
super().__init__(name, BettaTrifasciata(color))
```
#### File: zoo-testing/zoo/test_habitat.py
```python
import unittest
from zoo import *
class TestHabitat(unittest.TestCase):
def test_name_empty_string_by_default(self):
habitat = Habitat()
self.assertEqual(habitat.name, '')
def test_members_empty_set_by_default(self):
habitat = Habitat()
self.assertIsInstance(habitat.members, set)
def test_add_animal_to_habitat(self):
aquarium = Aquarium('freshwater')
bob = Betta('orange', 'Bob')
james = Betta('orange', 'James')
aquarium.add_member(bob)
self.assertIn(bob, aquarium.members)
aquarium.add_member(james)
self.assertIn(bob, aquarium.members)
self.assertIn(james, aquarium.members)
def test_remove_members(self):
aquarium = Aquarium('freshwater')
james = Betta('orange', 'James')
aquarium.add_member(james)
aquarium.remove_member(james)
self.assertNotIn(james, aquarium.members)
if __name__ == '__main__':
unittest.main()
```
#### File: zoo-testing/zoo/test_species.py
```python
import unittest
from zoo import *
class TestSpecies(unittest.TestCase):
def test_common_name_empty_string_by_default(self):
species = Species()
self.assertEqual(species.common_name, '')
def test_georegion_empty_string_by_default(self):
species = Species()
self.assertEqual(species.geo_region, '')
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "JosephKBS/util",
"score": 3
} |
#### File: util/chatbot/app.py
```python
from flask import Flask, render_template, request, jsonfy
from flask_cors import flask_cors
from chat import get_response
app = Flask(__name__)
#CORS(app)
# 만약 CORS(app) 실행할거면 밑에 app.get전부 지우면 됨.
@app.get("/")
def index_get():
return render_template("base.html") # landing page
@app.post("/predict"):
def predict():
text = request.get_json.get("message")
# TODO: check if text is valid
response = get_response(text)
message = {"answer":response}
return jsonfy(message)
if __name__=="__main__":
app.run(debug=True)
``` |
{
"source": "Josephkhland/MUFA-MMO",
"score": 3
} |
#### File: Josephkhland/MUFA-MMO/mufadb.py
```python
import pymongo
import math
import datetime
from enum import Enum
from mongoengine import *
#Connects with the Database
connect('MUFAdatabase', host='localhost', port=27017)
#Creates an array size 15 filled with zeros. Used later for initializations.
array_zero_15 = []
array_zero_24 = []
for i in range(15):
array_zero_15.append(0)
for i in range(24):
array_zero_24.append(0)
#Useful function:
def nextLevelEXP(level):
exponent = 1.5
baseXP = 100
return math.floor(baseXP*(level**exponent))
#Classes Declarations
class Battler(Document): meta = {'allow_inheritance': True}
class Monster(Battler): pass
class Player(Battler): pass
class Node(Document): meta = {'allow_inheritance': True}
class WorldNode(Node): pass
class GuildHub(Document): pass
class ArmorSet(Document): pass
class Spell(Document): pass
class Item(Document): meta = {'allow_inheritance': True}
class Armor(Item): pass
class Weapon(Item): pass
class Spellbook(Item): pass
class Artifact(Item): pass
class Battle(Node): pass
class Dungeon(Node): pass
class activeCondition(EmbeddedDocument): pass
class descendant(EmbeddedDocument): pass
class character(EmbeddedDocument): pass
#ENUMERATIONS
class Conditions(Enum):
POISONED = 0 #Take 1 Damage per Action, until condition expires.
BURNING = 1 #Take 5 Damage per Action, until condition expires. If FROZEN applies, both conditions are removed
FROZEN = 2 #Can't move. If attacked with Crash weapon you take double damage but the condition is removed. If BURNING applies both conditions are removed.
PARALYZED = 3 #Agility counts as 0 for combat calculations.
TERRIFIED = 4 #Can't use Slash, Pierce or Crash Weapons.
BLINDED = 5 #Your Precision is actively 10%
DEAFENED = 6 #Your Evasion is 0%
SILENCED = 7 #You can't use Spells
CURSED = 8 #Your Health doesn't heal up
BLEEDING = 9 #Take 1 Damage per Action, until condition expires. Prevents healing. Healing cures the condition.
SLOWED = 10 #Agility is halved for any calculations
WEAKENED = 11 #Strength is halved for any calculations
ASLEEP = 12 #Can't move. Wakes up on Damage or After 1 Hour.
PETRIFIED = 13 #Can't move. Double Crash Damage.
DEAD = 14 #Can't move. Can't be healed. Can't be selected as active character.
class Buffs(Enum):
ARMOR_UP = 0 #Increases Physical DR_p by 10% and Physical DR_f by 10.
EVASION_UP = 1 #Increases Evasion by 5%
MARMOR_UP = 2 #Increases Magical DR_p by 10% and Magical DR_f by 10.
PRECISION_UP = 3 #Increases Precision by 5%
DAMAGE_UP = 4 #Increases total DAMAGE you deal by 1%
KARMA_UP =5 #Increases Karma by 1%
SPELL_RES_UP = 6 #Increases Spell Resist by 10 %
CRAFT_UP = 7 #Increases Crafting chances / Dismantling chances by 10%
F_EXHAUSTION_RES_UP =8 #Increases Forced Exhaustion damage resistance by 10%
CONDITION_RES_POISONED = 9 #Increases corresponding condition resistance by 10%
CONDITION_RES_BURNING = 10
CONDITION_RES_FROZEN = 11
CONDITION_RES_PARALYZED = 12
CONDITION_RES_TERRIFIED = 13
CONDITION_RES_BLINDED = 14
CONDITION_RES_DEAFENED = 15
CONDITION_RES_SILENCED = 16
CONDITION_RES_CURSED = 17
CONDITION_RES_BLEEDING = 18
CONDITION_RES_SLOWED = 19
CONDITION_RES_WEAKENED = 20
CONDITION_RES_ASLEEP = 21
CONDITION_RES_PETRIFIED = 22
CONDITION_RES_DEAD = 23
class ItemType(Enum):
HELMET = 0
CHESTPIECE = 1
BOOTS = 2
SLASH = 3
PIERCE = 4
CRASH = 5
RANGED = 6
ARTIFACT = 7
SPELLBOOK = 8
class GuildPrivacy(Enum):
CLOSED = 0 #The Guild can't be entered by those that discover its node.
ALLIANCE = 1 #The Guild can only be entered by Players with a Base within an Allied Guild.
OPEN = 2 #The Guild is open and anyone that finds it can enter.
class PrimaryStat(Enum):
WIL = 0
VIT = 1
AGI = 2
STR = 3
#Classes definitions
class Item(Document):
item_id = IntField(primary_key = True)
name = StringField(max_length = 50)
#item_types: 0 -> helmet (armor),
# 1 -> chestpiece (armor),
# 2 -> boots (armor),
# 3 -> slash (weapon)
# 4 -> pierce (weapon)
# 5 -> crash (weapon)
# 6 -> ranged (weapon)
# 7 -> artifact (use from inventory)
# 8 -> spellbook
item_type = IntField( default = 9)
#Special Stats:
spell_resistance = IntField(default = 0) #When targeted by a spell, reduce the success chance of it's effect on you by X%
condition_resistances = ListField(IntField(), default = array_zero_15 ) #When someone attempts to inflict a condition on you, use these to resist.
forced_exhaustion_resistance = IntField(default = 0) #When someone attempts to deal damage to your actions (Exhaustion Damage) use this to resist it.
#Looting
drop_chance = IntField(default = 0) #Chance of dropping when a character wielding it is killed.
#Crafting Stats:
crafting_recipe = ListField(ListField(IntField()), default = []) #List of Resources required in format List([resource_id, quantity])
dismantling_difficulty = IntField() #Reduces Chance of acquiring each of its resources upon dismantling.
value = IntField(default = 0)
meta = {'allow_inheritance': True}
class activeCondition(EmbeddedDocument):
name = StringField()
date_added = DateTimeField()
duration = IntField()
class descendant(EmbeddedDocument):
will_bonus = IntField( default =1)
vitality_bonus = IntField( default =1)
agility_bonus = IntField( default =1)
strength_bonus = IntField(default =1)
starting_karma = IntField( default =0)
character_name = StringField(max_length = 20)
class ArmorSet(Document):
name = StringField(max_length = 20)
two_items_set_bonus = ListField(IntField(), default = [0,0,0,0]) #Armor Set Bonus increase [Will, Vitality, Agility, Strength] while equipped.
full_set_bonus = ListField(IntField(), default = [0,0,0,0])
class character(EmbeddedDocument):
willpower = IntField(default = 1)
vitality = IntField(default = 1)
agility = IntField(default = 1)
strength = IntField(default = 1)
money_carried = IntField( default = 0)
inventory = ListField(ReferenceField(Item), default =[])
precision_base = IntField( default = 10)
evasion_base = IntField( default = 10)
coordinates = ListField(IntField())
instance_stack = ListField(ReferenceField(Node), default =[])
conditions = EmbeddedDocumentListField(activeCondition, default =[])
buffs = EmbeddedDocumentListField(activeCondition, default = [])
#Three values: armor_equiped[0] -> helmet,
# armor_equiped[1] -> chestpiece,
# armor_equiped[2] -> boots
armor_equiped = ListField(ReferenceField(Item), default=[])
armor_set = ReferenceField(ArmorSet)
set_bonus_specification= IntField(default =0) # 0 for none, 1 for half, 2 for full
#Four values: weapons_equiped[0] -> slash,
# weapons_equiped[1] -> pierce ,
# weapons_equiped[2] -> crash ,
# weapons_equiped[3] ->ranged
weapons_equiped = ListField(ReferenceField(Item), default=[])
max_actions = IntField(default = 100)
actions_left = IntField(default = 100)
karma = IntField(default = 0)
current_health = IntField( default = 10)
current_sanity = IntField( default = 10)
level = IntField(default = 1)
experience = IntField(default =0)
exp_to_next_level = IntField(default = nextLevelEXP(1))
unused_points = IntField(default =0)
is_dead = BooleanField(default = False)
imageURL = StringField(default = "https://cdn.discordapp.com/embed/avatars/0.png")
name = StringField()
spell_resistance = IntField(default = 0) #When targeted by a spell, reduce the success chance of it's effect on you by X%
condition_resistances = ListField(IntField(), default = array_zero_15 ) #When someone attempts to inflict a condition on you, use these to resist.
forced_exhaustion_resistance = IntField(default = 0) #When someone attempts to deal damage to your actions (Exhaustion Damage) use this to resist it.
def getInstance(self):
return self.instance_stack[-1]
def replaceInstance(self,new_instance):
self.instance_stack[-1] = new_instance
return 0
def enterInstance(self, new_instance):
self.instance_stack.append(new_instance)
return 0
def exitInstance(self):
return self.instance_stack.pop()
def checkLevel(self):
if self.experience > self.exp_to_next_level:
self.levelUp()
def levelUp(self):
self.level+= 1
self.experience = self.experience % self.exp_to_next_level
self.exp_to_next_level = nextLevelEXP(self.level)
self.unused_points += 1
self.checkLevel()
def addCondition(self, condition):
for con in self.conditions:
if con.name == condition.name:
if con.duration == -1: return "**"+self.name + "** *is already* `" + con.name +"`"
if con.duration != -1 and con.duration < condition.duration:
con.duration = condition.duration
con.date_added = condition.date_added
return "**"+self.name + "** *is now* `" + con.name +"`!"
self.conditions.append(condition)
return "**"+self.name + "** *is now* `" + condition.name +"`!"
def kill(self):
self.current_health = 0
self.current_sanity = 0
self.actions_left = 0
self.is_dead = True
n_con = activeCondition(name = 'DEAD',
date_added = datetime.datetime.now(),
duration = -1)
return self.addCondition(n_con)
class Node(Document):
node_id = StringField(primary_key = True)
sub_nodes_ids = ListField(StringField())
north_exit = StringField()
east_exit = StringField()
south_exit = StringField()
west_exit = StringField()
members = ListField(ReferenceField(Battler))
entrance_message = StringField(default = "No description Available.")
resources = ListField(IntField())
meta = {'allow_inheritance': True}
class WorldNode(Node):
coordinates = ListField(IntField())
guild_id = StringField(max_length = 20)
class GuildHub(Document):
guild_id = StringField(primary_key = True)
name = StringField()
coordinates = ListField(IntField())
invites_channel = StringField()
privacy_setting = IntField(default = GuildPrivacy.CLOSED.value)
alliances = ListField(StringField(max_length = 20), default =[]) #List of Guild_ids that this guild is friendly with.
shop = ListField(ReferenceField(Item), default =[])
class Battler(Document):
battler_id = StringField(primary_key = True)
name = StringField()
faction = IntField()
creation_date = DateTimeField()
meta = {'allow_inheritance': True}
def getCharacter(self):
pass
def updateCurrentCharacter(self, c):
pass
class Player(Battler):
characters_list = EmbeddedDocumentListField(character, default = [])
active_character = IntField(default =0)
money_stored = IntField(default =0)
items_stored = ListField(ReferenceField(Item), default =[])
descendant_options = EmbeddedDocumentListField(descendant, default = [])
guild_id = StringField(max_length = 20)
last_action_date = DateTimeField()
faction = IntField(default = 0)
pve_player_limit = IntField(default = 1)
pve_join_password = StringField()
def getCharacter(self):
return self.characters_list[self.active_character]
def updateCurrentCharacter(self, c):
self.characters_list[self.active_character] = c
return
def getCharacterByName(self, name):
for ch in self.characters_list:
if ch.name == name:
return ch
return None
def updateCharacterByName(self, c):
counter = 0
for ch in self.characters_list:
if ch.name == c.name:
self.characters_list[counter] = c
return
counter +=1
def getCharacterInNode(self, node_id):
for c in self.characters_list:
if c.getInstance().node_id == node_id:
return c
return None
def getCurrentNode(self):
return self.getCharacter().getInstance()
def maxCharacters(self):
return 6
class Monster(Battler):
character_stats = EmbeddedDocumentField(character)
behaviour = IntField() #Monster Behaviors will be figured out later.
faction = IntField(default = 1)
def getCharacter(self):
return self.character_stats
def getCharacterInNode(self, node):
return self.character_stats
class GhostBattler(Battler):
previous_id = StringField()
def getCharacter(self):
return None
class MonsterEntry(Document):
name = StringField()
character_stats = EmbeddedDocumentField(character)
class Spell(Document):
name = StringField(max_length = 50)
sanity_cost = IntField(default = 0)
spell_success_rate = IntField(default = 100) #The success rate of the spell taking effect on a target.
instance_type = IntField() #The instance this spell can be activated Free Roam: 0, Dungeon: 1, Battle: 2
targets = IntField(default = 1) # Special Values: Self -> 0 , ONE_SIDE -> -1 , ALL -> -2
damage = IntField(default = 0) #Negative for Healing.
on_success_buff_chance = ListField(IntField(), default = array_zero_24)
on_success_buff_duration = ListField (IntField(), default = array_zero_24)
on_success_condition_inflict_chance = ListField(IntField(), default = array_zero_15)
on_success_condition_duration = ListField(IntField(), default = array_zero_15)
on_success_force_exhaustion_damage = IntField(default = 0) #Deals damage directly to someone's actions left. (PvP-Only)
actions_required = IntField(default = 1) #The number of actions required to use this spell.
ingredients = ListField(ListField(IntField()), default = []) #List of Resources required in format List([resource_id, quantity])
class Armor(Item):
armor_set = ReferenceField(ArmorSet)
evasion_chance_reduction = IntField(default = 0) #Armor Stat | Decrease evasion by X%
physical_damage_reduction_f = IntField (default = 0) #Armor Stat | Flat damage reduction from Physical Sources
magic_damage_reduction_f = IntField(default = 0) #Armor Stat | Flat damage reduction from Magical Sources
physical_damage_reduction_p = IntField(default = 0) #Armor Stat | Percentage damage reduction from Physical Sources
magic_damage_reduction_p = IntField(default = 0) #Armor Stat | Percentage damage reduction from Magical Sources
thorn_condition_inflict_chance = ListField(IntField(), default = array_zero_15) #Upon getting hit, chance of inflicting conditions to attacker.
thorn_condition_duration = ListField(IntField(), default = array_zero_15) #Upon getting hit, duration of any condition that gets inflicted to attacker from Thorn effect.
thorn_force_exhaustion_damage = IntField(default = 0) #Upon getting hit, deals damage to the Attacker's Actions (Exhaustion Damage) (vs Players only)
class Weapon(Item):
precision_scale = IntField(default = 0) #Weapon Stat | For each point in agility this is added to the Precision%
damage_amp_scale = IntField(default = 0) #Weapon Stat | For each point in strength this is added to damage_amplification%
damage_per_amp = IntField(default = 0) #Weapon Stat | Amount of Bonus Damage, for each time that the damage is amplified.
damage_base = IntField(default = 0) #Weapon Stat | Base damage that is dealt per Hit.
on_hit_condition_inflict_chance = ListField(IntField(), default = array_zero_15) #Upon hitting someone. chance of inflicting conditions to attacker.
on_hit_condition_duration = ListField(IntField(), default = array_zero_15) #Upon hitting someone, duration of any inflicting conditions.
on_hit_force_exhaustion_damage = IntField() #Deals damage directly to someone's actions left. (vs Player only)
class Spellbook(Item):
spells = ListField(ReferenceField(Spell), default = []) #Spellbook | Includes the available spells.
class Artifact(Item):
study_requirement = IntField()
consumable = BooleanField()
spell = ReferenceField(Spell)
key_item = BooleanField()
class battlelog(EmbeddedDocument):
battler = ReferenceField(Battler)
action_description = StringField()
timestamp = DateTimeField()
class Battle(Node):
loot = ListField(ReferenceField(Item))
money_loot = IntField(default = 0)
actions_log = EmbeddedDocumentListField(battlelog)
player_limit = IntField(default = 1)
join_password = StringField()
meta = {'allow_inheritance': True}
def getMember_not_in_faction(self, mid, faction):
counter = 0
for member in self.members:
if member.faction != faction:
if counter == mid:
return member
counter += 1
return None
def getEnemies_of_faction(self,faction):
enemies = []
for member in self.members:
if member.faction != faction:
enemies.append(member)
return enemies
class lock(EmbeddedDocument):
is_active = BooleanField(default = False)
key_tag = StringField() #Starts with "SWITCH_" if it's a switch.
hack_difficulty = IntField(default = 100)
demolish_difficulty = IntField(default = 100)
inspection_description = StringField()
tag = StringField()
class trap(EmbeddedDocument):
is_active = BooleanField(default = False)
trap_obscurity = IntField(default = 100)
trap_lethality = IntField(default = 10) #Damage taken when triggered
hack_difficulty = IntField(default = 100)
inspection_description = StringField()
class path(EmbeddedDocument):
lock = EmbeddedDocumentField(lock)
trap = EmbeddedDocumentField(trap)
obscurity = IntField(default = 0)
inspection_description = StringField()
tag = StringField()
class interactable(EmbeddedDocument):
lock = EmbeddedDocumentField(lock)
trap = EmbeddedDocumentField(trap)
obscurity = IntField(default = 0)
is_dial = BooleanField(default = False) #When True, the dial can take more than 1 value
correct_dial_value = IntField(default = 1)
current_dial_value = IntField(default = 0)
inspection_description = StringField()
this_location_string = StringField()
tag = StringField()
location_of_lock = StringField()
class Dungeon(Node):
name = StringField()
d_name = StringField()
dungeon_instance = IntField()
treasure = ListField(ReferenceField(Item), default = [])
gold_loot = IntField()
entrance_side = StringField()
north = EmbeddedDocumentField(path)
east = EmbeddedDocumentField(path)
south = EmbeddedDocumentField(path)
west = EmbeddedDocumentField(path)
interactables = EmbeddedDocumentListField(interactable, default = [])
class DungeonEntry(Document):
name = StringField()
max_monsters = IntField(default = 1)
current_monsters = IntField(default = 0)
monsters_list = ListField(StringField(), default = [])
average_number_of_rooms = IntField(default = 1)
id_prefix = StringField()
descriptor_tags = ListField(StringField(),default =[])
deadends_tags = ListField(StringField(), default = [])
pathways_tags = ListField(StringField(), default =[])
existing_instances = IntField(default = 0)
max_instances_of_dungeon = IntField(default = 1)
class PackageNames(Document):
name = StringField()
class Tags(Document):
name = StringField()
collection = ListField(StringField(), default =[])
```
#### File: Josephkhland/MUFA-MMO/mufaintervals.py
```python
import time, threading
import math
import datetime
import mufadb as db
import mufabattle as mb
import mufagenerator as mg
StartTime=time.time()
def hourly_content_generation():
mg.generate_random_dungeons()
mg.dungeon_monsters_generate()
mg.global_monsters_generate()
def action() :
print('action ! -> time : {:.1f}s'.format(time.time()-StartTime))
def solve_conditions(hourly= False, tworly = False):
battlers = db.Battler.objects.no_dereference()
for b in battlers:
if isinstance(b, db.Player):
for pCharac in b.characters_list:
to_remove = []
for con in pCharac.conditions:
if con.duration == -1:
continue
else :
end_time = con.date_added + datetime.timedelta(hours =con.duration)
if (datetime.datetime.now() >= end_time):
to_remove.append(con)
for i in to_remove:
pCharac.conditions.remove(i)
if hourly:
if mb.has_condition(pCharac,"CURSED") or mb.has_condition(pCharac,"DEAD") or mb.has_condition(pCharac, "BLEEDING"):
pass
else:
pCharac.current_health = min(pCharac.vitality*10, math.ceil(pCharac.current_health+ (pCharac.vitality*0.1)))
pCharac.actions_left = pCharac.max_actions
if tworly:
pCharac.current_sanity = min(pCharac.willpower*10, math.ceil(pCharac.current_sanity+ (pCharac.willpower*0.1)))
b.updateCharacterByName(pCharac)
b.save()
elif isinstance (b, db.Monster):
pCharac = b.getCharacter()
to_remove = []
for con in pCharac.conditions:
if con.duration == -1:
continue
else :
end_time = con.date_added + datetime.timedelta(hours =con.duration)
if (datetime.datetime.now() >= end_time):
to_remove.append(con)
for i in to_remove:
pCharac.conditions.remove(i)
if hourly:
if mb.has_condition(pCharac,"CURSED") or mb.has_condition(pCharac,"DEAD") or mb.has_condition(pCharac, "BLEEDING"):
pass
else:
pCharac.current_health = min(pCharac.vitality*10, math.ceil(pCharac.current_health+ (pCharac.vitality*0.1)))
pCharac.actions_left = pCharac.max_actions
if tworly:
pCharac.current_sanity = min(pCharac.willpower*10, math.ceil(pCharac.current_sanity+ (pCharac.willpower*0.1)))
b.character_stats = pCharac
b.save()
log_message = datetime.datetime.now().ctime() + " : Completed Interval Update"
if hourly:
hourly_content_generation()
log_message += " | hourly == TRUE"
if tworly:
hourly_content_generation()
log_message += " | tworly == TRUE"
print(log_message)
class setInterval :
def __init__(self,interval,action) :
self.interval=interval
self.action=action
self.stopEvent=threading.Event()
self.seconds_passed = datetime.datetime.now().minute*60
if datetime.datetime.now().hour % 2 == 0:
self.seconds_passed += 0
else:
self.seconds_passed += 3600
thread=threading.Thread(target=self.__setInterval)
thread.start()
def __setInterval(self) :
nextTime=time.time()+self.interval
while not self.stopEvent.wait(nextTime-time.time()) :
self.seconds_passed = (self.seconds_passed+self.interval)%7200
if self.seconds_passed == 3600:
nextTime+=self.interval
self.action(True,False)
elif self.seconds_passed == 0:
nextTime+=self.interval
self.action(False,True)
else:
nextTime+=self.interval
self.action(False,False)
def cancel(self) :
self.stopEvent.set()
def update():
inter = setInterval(30,solve_conditions)
# start action every 60s
#inter=setInterval(30,solve_conditions)
#print('just after setInterval -> time : {:.1f}s'.format(time.time()-StartTime))
# will stop interval in 5s
#t=threading.Timer(5,inter.cancel)
#t.start()
```
#### File: MUFA-MMO/Players/profile.py
```python
import mufadb as db
import mufa_constants as mconst
import mufa_world as mw
import mufadisplay as mdisplay
import mufa_item_management as mim
import character
import datetime
import discord
import asyncio
from discord.ext import commands
class Profile(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command(name='myprofile')
async def show(self,ctx, user: discord.User = None):
"""Shows your Profile, or the profile of a mentioned user."""
if not character.checkRegistration(str(ctx.author.id)):
return await ctx.send("You are not registered. Please register by using the command `!register`")
userID = str(ctx.author.id)
if user != None :
userID = str(user.id)
await ctx.channel.send(embed=character.show(userID, self.bot))
@commands.command(name='myimage')
async def image(self,ctx, *args):
"""Shows your active character's image
+myimage set imageURL
In order to set a new picture for your character.
"""
if not character.checkRegistration(str(ctx.author.id)):
return await ctx.send("You are not registered. Please register by using the command `!register`")
playerID = str(ctx.author.id)
battler = db.Player.objects.get(battler_id = playerID)
pCharac = battler.getCharacter()
if len(args)==0:
await ctx.send(pCharac.imageURL)
else:
if len(args) == 2 and args[0] == "set":
pCharac.imageURL = args[1]
battler.updateCurrentCharacter(pCharac)
battler.save()
@commands.command(name='mycharacter')
async def my_character(self, ctx, *args):
"""Shows the details of your currently Active Character"""
if not character.checkRegistration(str(ctx.author.id)):
return await ctx.send("You are not registered. Please register by using the command `!register`")
playerID = str(ctx.author.id)
battler = db.Player.objects.get(battler_id = playerID)
pCharac = battler.getCharacter()
status = ":bust_in_silhouette: "
if pCharac.is_dead:
status = ":skull: "
healthString = mdisplay.digits_panel(pCharac.current_health,pCharac.vitality*10, 8) + ":heart: "
sanityString = mdisplay.digits_panel(pCharac.current_sanity,pCharac.willpower*10, 8) + ":brain: "
actionsString = mdisplay.digits_panel(pCharac.actions_left, pCharac.max_actions, 8) + ":zap: "
embed = discord.Embed(
title = status+pCharac.name + " ("+playerID+")",
description = healthString + sanityString + actionsString,
colour = discord.Colour.red(),
timestamp = datetime.datetime.now()
)
embed.set_footer(text="Instance: "+str(pCharac.getInstance().node_id) +" ("+str(pCharac.coordinates[0])+","+str(pCharac.coordinates[1])+")")
#Setting up the field for the Primary Stats.
primaryStatsString = mdisplay.line("Willpower",pCharac.willpower,12)
primaryStatsString += mdisplay.line("Vitality",pCharac.vitality,12)
primaryStatsString += mdisplay.line("Agility",pCharac.agility,12)
primaryStatsString += mdisplay.line("Strength",pCharac.strength,12)
primaryStatsString += mdisplay.line("Karma",pCharac.karma,12)
embed.add_field(name="Primary Stats", value=primaryStatsString, inline=True)
#Setting up the field for the Progression Stats
progressionStatsString = mdisplay.line("Level", pCharac.level,12)
progressionStatsString += "Experience: "+ mdisplay.digits_panel(pCharac.experience, pCharac.exp_to_next_level,12) + "\n"
progressionStatsString += mdisplay.line("Unused Points", pCharac.unused_points,12)
embed.add_field(name="Progression", value=progressionStatsString, inline=True)
#Setting up the field for the Armor
armorString = mdisplay.equipment(0,pCharac.armor_equiped[0])
armorString += mdisplay.equipment(1,pCharac.armor_equiped[1])
armorString += mdisplay.equipment(2,pCharac.armor_equiped[2])
if pCharac.set_bonus_specification == 1:
armorString += " :small_blue_diamond:*Set Bonus*: `"+pCharac.armor_set.name
if pCharac.armor_set.two_items_set_bonus[0] != 0:
armorString += " "+ str(pCharac.armor_set.two_items_set_bonus[0]) + " WILL"
if pCharac.armor_set.two_items_set_bonus[1] != 0:
armorString += " "+ str(pCharac.armor_set.two_items_set_bonus[1]) + " VIT"
if pCharac.armor_set.two_items_set_bonus[2] != 0:
armorString += " "+ str(pCharac.armor_set.two_items_set_bonus[2]) + " AGI"
if pCharac.armor_set.two_items_set_bonus[3] != 0:
armorString += " "+ str(pCharac.armor_set.two_items_set_bonus[3]) + " STR"
armorString += "`\n"
if pCharac.set_bonus_specification == 2:
armorString += " :small_orange_diamond:*Set Bonus*: `"+pCharac.armor_set.name
if pCharac.armor_set.full_set_bonus[0] != 0:
armorString += " "+ str(pCharac.armor_set.full_set_bonus[0]) + " WILL"
if pCharac.armor_set.full_set_bonus[1] != 0:
armorString += " "+ str(pCharac.armor_set.full_set_bonus[1]) + " VIT"
if pCharac.armor_set.full_set_bonus[2] != 0:
armorString += " "+ str(pCharac.armor_set.full_set_bonus[2]) + " AGI"
if pCharac.armor_set.full_set_bonus[3] != 0:
armorString += " "+ str(pCharac.armor_set.full_set_bonus[3]) + " STR"
armorString += "`\n"
embed.add_field(name="Armor", value=armorString, inline=False)
#Setting up the field for Weapons
weaponString = mdisplay.equipment(3,pCharac.weapons_equiped[0])
weaponString += mdisplay.equipment(4,pCharac.weapons_equiped[1])
weaponString += mdisplay.equipment(5,pCharac.weapons_equiped[2])
weaponString += mdisplay.equipment(6,pCharac.weapons_equiped[3])
embed.add_field(name=":crossed_swords:Weapons:crossed_swords:", value=weaponString, inline=True)
#Setting up the Spells Field
spellString = ":diamond_shape_with_a_dot_inside:`Fireball`\n:diamond_shape_with_a_dot_inside:`Diamond Spray`"
embed.add_field(name=":book:Spells:book:", value=spellString, inline=True)
#Setting up Inventory Field
inventoryString = "Use `+myinventory` to access your inventory\n\n"
inventoryString+= ":scales:Carrying `"+str(len(pCharac.inventory))+"/"+str(pCharac.strength*6)+"` items "
inventoryString+= ":coin: Gold Carried `"+str(pCharac.money_carried)+"`"
embed.add_field(name=":handbag:Inventory:handbag:", value=inventoryString, inline=False)
embed.set_thumbnail(url= pCharac.imageURL)
await ctx.send(embed = embed)
@commands.command(name='mycharacters')
async def my_characters(self, ctx):
"""Shows an index of your characters"""
if not character.checkRegistration(str(ctx.author.id)):
return await ctx.send("You are not registered. Please register by using the command `!register`")
playerID = str(ctx.author.id)
battler = db.Player.objects.get(battler_id = playerID)
embed = discord.Embed(
title = battler.name + " ("+playerID+")",
description = "Index of your characters. Use the indexes here when attempting to select another of your characters",
colour = discord.Colour.red(),
timestamp = datetime.datetime.now()
)
counter = 0
for chara in battler.characters_list:
disabling_conditions = [False,False,False]
this_value = "Conditions: "
for con in chara.conditions:
if con.name == 'DEAD':
disabling_conditions[0] = True
if con.name == 'PETRIFIED':
disabling_conditions[1] = True
if con.name == 'ASLEEP':
disabling_conditions[2] = True
this_value += "`"+con.name+"` "
name_plugin = " "
if disabling_conditions[2]:
name_plugin += ":zzz: "
if disabling_conditions[1]:
name_plugin += ":rock: "
if disabling_conditions[0]:
name_plugin += ":skull: "
embed.add_field(name=str(counter) +": " + chara.name +name_plugin, value= this_value, inline = False)
counter += 1
await ctx.send(embed = embed)
@commands.command(name='suicide')
async def suicide(self, ctx):
"""Kills your currently active character"""
if not character.checkRegistration(str(ctx.author.id)):
return await ctx.send("You are not registered. Please register by using the command `!register`")
playerID = str(ctx.author.id)
battler = db.Player.objects.get(battler_id = playerID)
pCharac = battler.getCharacter()
message_to_send = pCharac.kill()
battler.updateCurrentCharacter(pCharac)
battler.save()
await ctx.send(message_to_send)
@commands.command(name='switch_to_character')
async def switch_to_character(self, ctx, *args):
"""Switch your Active character to the one you define with an index.
Use the index as seen in your character's list. ( !mycharacters )
Correct usage: !switch_to_character index
"""
if not character.checkRegistration(str(ctx.author.id)):
return await ctx.send("You are not registered. Please register by using the command `!register`")
playerID = str(ctx.author.id)
battler = db.Player.objects.get(battler_id = playerID)
if int(args[0]) <0 or int(args[0]) >= len(battler.characters_list):
message_to_send = "Invalid argument"
else:
battler.active_character = int(args[0])
battler.save()
message_to_send = "Successfully changed active character to: **"+battler.getCharacter().name+"**."
await ctx.send(message_to_send)
@commands.command(name='abandon_character')
async def abandon_character(self, ctx, *args):
"""Permamently deletes a character.
Use it to clear up space for more characters.
"""
if not character.checkRegistration(str(ctx.author.id)):
return await ctx.send("You are not registered. Please register by using the command `!register`")
playerID = str(ctx.author.id)
battler = db.Player.objects.get(battler_id = playerID)
if int(args[0]) <0 or int(args[0]) >= len(battler.characters_list):
message_to_send = "Invalid argument"
else:
temp = battler.characters_list[int(args[0])].name
del battler.characters_list[int(args[0])]
if battler.active_character >= int(args[0]):
battler.active_character = max(0, battler.active_character -1)
battler.save()
message_to_send = "Goodbye forever: **"+temp+"**."
await ctx.send(message_to_send)
@commands.command(name='mydescendants', aliases = ['descendants', 'character_options'])
async def show_descendants(self, ctx, *args):
"""
Shows your available descendants.
"""
if not character.checkRegistration(str(ctx.author.id)):
return await ctx.send("You are not registered. Please register by using the command `!register`")
playerID = str(ctx.author.id)
battler = db.Player.objects.get(battler_id = playerID)
embedList = mdisplay.displayDescendants(battler)
totalTabs = len(embedList)
c_t = 0
if len(embedList) == 0 :
return await ctx.send("You have no items in your inventory!")
msg = await ctx.send(embed = embedList[0])
if totalTabs > 1:
loop = True
previous_tab = '◀️'
next_tab = '▶️'
await msg.add_reaction(previous_tab)
await msg.add_reaction(next_tab)
def reaction_filter(reaction, user):
return str(user.id) == str(ctx.author.id) and str(reaction.emoji) in [previous_tab,next_tab]
while loop:
try:
pending_collectors =[self.bot.wait_for('reaction_add', timeout=5, check = reaction_filter),
self.bot.wait_for('reaction_remove', timeout=5, check = reaction_filter)]
done_collectors, pending_collectors = await asyncio.wait(pending_collectors, return_when=asyncio.FIRST_COMPLETED)
for collector in pending_collectors:
collector.cancel()
for collector in done_collectors:
reaction, user = await collector
if reaction.emoji == next_tab:
c_t = (c_t+1) % totalTabs
elif reaction.emoji == previous_tab:
c_t = (c_t-1)
if c_t <0:
c_t = totalTabs -1
msg.edit(embed = embedList[c_t])
except asyncio.TimeoutError:
await msg.add_reaction('💤')
loop = False
@commands.command(name='myinventory', aliases = ['inventory', 'inv', 'i'])
async def show_inventory(self, ctx, *args):
"""
Shows your inventory
"""
if not character.checkRegistration(str(ctx.author.id)):
return await ctx.send("You are not registered. Please register by using the command `!register`")
playerID = str(ctx.author.id)
battler = db.Player.objects.get(battler_id = playerID)
if len(battler.characters_list) == 0:
return await ctx.send("You don't have an active character")
pCharac = battler.getCharacter()
embedList = mdisplay.displayInventoryList(pCharac)
totalTabs = len(embedList)
c_t = 0
if len(embedList) == 0 :
return await ctx.send("You have no items in your inventory!")
msg = await ctx.send(embed = embedList[0])
if totalTabs > 1:
loop = True
previous_tab = '◀️'
next_tab = '▶️'
await msg.add_reaction(previous_tab)
await msg.add_reaction(next_tab)
def reaction_filter(reaction, user):
return str(user.id) == str(ctx.author.id) and str(reaction.emoji) in [previous_tab,next_tab]
while loop:
try:
pending_collectors =[self.bot.wait_for('reaction_add', timeout=5, check = reaction_filter),
self.bot.wait_for('reaction_remove', timeout=5, check = reaction_filter)]
done_collectors, pending_collectors = await asyncio.wait(pending_collectors, return_when=asyncio.FIRST_COMPLETED)
for collector in pending_collectors:
collector.cancel()
for collector in done_collectors:
reaction, user = await collector
if reaction.emoji == next_tab:
c_t = (c_t+1) % totalTabs
elif reaction.emoji == previous_tab:
c_t = (c_t-1)
if c_t <0:
c_t = totalTabs -1
msg.edit(embed = embedList[c_t])
except asyncio.TimeoutError:
await msg.add_reaction('💤')
loop = False
@commands.command(name='upgrade', aliases = ['level_up'])
async def upgrade_character(self, ctx, *args):
"""
Use this command in order to use any unspent points you've earned from leveling up.
"""
if not character.checkRegistration(str(ctx.author.id)):
return await ctx.send("You are not registered. Please register by using the command `!register`")
playerID = str(ctx.author.id)
battler = db.Player.objects.no_dereference().get(battler_id = playerID)
if len(battler.characters_list) == 0:
return await ctx.send("You don't have an active character")
pCharac = battler.getCharacter()
embed = mdisplay.display_level_up_details(pCharac)
msg = await ctx.send(embed= embed)
willpower_emoji = "🧠"
vitality_emoji = "💟"
agility_emoji = "👟"
strength_emoji = "💪"
success_emoji = "✅"
cancel_emoji = "❎"
loop = True
used_emojis = [success_emoji,cancel_emoji]
if pCharac.willpower <= pCharac.unused_points:
await msg.add_reaction(willpower_emoji)
used_emojis.append(willpower_emoji)
if pCharac.vitality <= pCharac.unused_points:
await msg.add_reaction(vitality_emoji)
used_emojis.append(vitality_emoji)
if pCharac.agility <= pCharac.unused_points:
await msg.add_reaction(agility_emoji)
used_emojis.append(agility_emoji)
if pCharac.strength <= pCharac.unused_points:
await msg.add_reaction(strength_emoji)
used_emojis.append(strength_emoji)
await msg.add_reaction(success_emoji)
await msg.add_reaction(cancel_emoji)
def reaction_filter(reaction, user):
return str(user.id) == str(ctx.author.id) and str(reaction.emoji) in used_emojis
while loop:
try:
pending_collectors =[self.bot.wait_for('reaction_add', timeout=5, check = reaction_filter),
self.bot.wait_for('reaction_remove', timeout=5, check = reaction_filter)]
done_collectors, pending_collectors = await asyncio.wait(pending_collectors, return_when=asyncio.FIRST_COMPLETED)
for collector in pending_collectors:
collector.cancel()
for collector in done_collectors:
reaction, user = await collector
if reaction.emoji == willpower_emoji:
if pCharac.willpower <= pCharac.unused_points:
pCharac.unused_points -= pCharac.willpower
pCharac.willpower += 1
elif reaction.emoji == vitality_emoji:
if pCharac.vitality <= pCharac.unused_points:
pCharac.unused_points -= pCharac.vitality
pCharac.vitality += 1
elif reaction.emoji == agility_emoji:
if pCharac.agility <= pCharac.unused_points:
pCharac.unused_points -= pCharac.agility
pCharac.agility += 1
elif reaction.emoji == strength_emoji:
if pCharac.strength <= pCharac.unused_points:
pCharac.unused_points -= pCharac.strength
pCharac.strength += 1
elif reaction.emoji == success_emoji:
battler.updateCharacterByName(pCharac.name)
battler.save()
await msg.add_reaction('💤')
loop = False
elif reaction.emoji == cancel_emoji:
await msg.add_reaction('💤')
loop = False
msg.edit(embed = mdisplay.display_level_up_details(pCharac))
except asyncio.TimeoutError:
await msg.add_reaction('💤')
loop = False
@commands.command(name='migrate')
@commands.guild_only()
async def migrate(self, ctx):
"""Change the Serve/Guild you are registered to."""
if not character.checkRegistration(str(ctx.author.id)):
return await ctx.send("You are not registered. Please register by using the command `!register`")
playerID = str(ctx.author.id)
guildID = str(ctx.guild.id)
battler = db.Player.objects.get(battler_id = playerID)
battler.guild_id = guildID
battler.save()
await ctx.send("Migrated to **"+ ctx.guild.name + "** succesfully!")
@commands.command(name='compare')
async def compare_items(self, ctx, *args):
"""Compare an item, with the one you have equipped in the according slot, use with"""
if not character.checkRegistration(str(ctx.author.id)):
return await ctx.send("You are not registered. Please register by using the command `!register`")
if not await character.playabilityCheck(ctx, str(ctx.author.id)):
return
battler = db.Player.objects.get(battler_id = str(ctx.author.id))
if len(args) <1 or len(args) >2:
return await ctx.send("Invalid number of arguments")
storage_aliases = ["-s", "s", "storage", "vault", "house"]
if args[1] in storage_aliases:
return await ctx.send(embed =mim.compare_item(int(args[0]),battler,True))
else:
return await ctx.send(embed =mim.compare_item(int(args[0]),battler,False))
@commands.command(name='equip')
async def equip_item(self, ctx, *args):
"""Equip an item from your inventory"""
if not character.checkRegistration(str(ctx.author.id)):
return await ctx.send("You are not registered. Please register by using the command `!register`")
if not await character.playabilityCheck(ctx, str(ctx.author.id)):
return
battler = db.Player.objects.get(battler_id = str(ctx.author.id))
pCharac = battler.getCharacter()
node = pCharac.getInstance()
if isinstance(node, db.Battle):
return await ctx.send("You can't change your equipment during a battle.")
if len(args) <1 or len(args) >2:
return await ctx.send("Invalid number of arguments")
item_index = int(args[0])
try:
slot_index = int(args[1])
except:
slot_index = 0
return await ctx.send(mim.equipItem(item_index,battler,slot_index))
@commands.command(name='unequip')
async def unequip_item(self, ctx, *args):
"""Place an equipped item in your inventory"""
if not character.checkRegistration(str(ctx.author.id)):
return await ctx.send("You are not registered. Please register by using the command `!register`")
if not await character.playabilityCheck(ctx, str(ctx.author.id)):
return
battler = db.Player.objects.get(battler_id = str(ctx.author.id))
pCharac = battler.getCharacter()
node = pCharac.getInstance()
if isinstance(node, db.Battle):
return await ctx.send("You can't change your equipment during a battle.")
if len(args) !=1 :
return await ctx.send("Invalid number of arguments")
slot_index = int(args[0])
return await ctx.send(mim.unequipItem(battler,slot_index))
@commands.command(name='storeitem')
@commands.guild_only()
async def store_item(self, ctx, *args):
"""Place an item from inventory to Storage"""
if not character.checkRegistration(str(ctx.author.id)):
return await ctx.send("You are not registered. Please register by using the command `!register`")
if not await character.playabilityCheck(ctx, str(ctx.author.id)):
return
battler = db.Player.objects.get(battler_id = str(ctx.author.id))
pCharac = battler.getCharacter()
node = pCharac.getInstance()
if node.guild_id != battler.guild_id:
return await ctx.send("Your currently active character is not near your guild.")
if len(args) !=1 :
return await ctx.send("Invalid number of arguments")
slot_index = int(args[0])
await ctx.send(mim.storeItem(pCharac.inventory, slot_index, battler))
inventory = pCharac.inventory
del inventory[slot_index]
pCharac.inventory = inventory
battler.updateCurrentCharacter(pCharac)
battler.save()
return
@commands.command(name='set_pve_password')
async def set_pve_password(self,ctx, *args):
"""If you wish for your PvE Sessions to be password protected, use this
Command in order to create a password.
"""
if not character.checkRegistration(str(ctx.author.id)):
return await ctx.send("You are not registered. Please register by using the command `!register`")
if len(args) == 0:
return await ctx.send("This command requires at least one argument")
if len(args[0]) < 6:
return await ctx.send("If you wish to set a new password it needs to be at least 6 Characters.")
playerID = str(ctx.author.id)
battler = db.Player.objects.no_dereference().get(battler_id = playerID)
battler.pve_join_password = args[0]
battler.save()
await ctx.send("You have successfully created a password for your PvE Sessions")
return
@commands.command(name='remove_pve_password')
async def remove_pve_password(self,ctx, *args):
"""Use this if you have created a password for your PvE sessions, and you wish to make them public once again.
"""
if not character.checkRegistration(str(ctx.author.id)):
return await ctx.send("You are not registered. Please register by using the command `!register`")
playerID = str(ctx.author.id)
battler = db.Player.objects.no_dereference().get(battler_id = playerID)
battler.pve_join_password = None
battler.save()
await ctx.send("You have successfully turned your PvE Sessions Public.")
return
@commands.command(name='set_party_limit')
async def set_party(self,ctx, *args):
"""Use this to determine the maximum number of players in your party for Battle instances and Dungeons.
"""
if not character.checkRegistration(str(ctx.author.id)):
return await ctx.send("You are not registered. Please register by using the command `!register`")
if len(args) == 0:
return await ctx.send("This command requires at least one argument")
if int(args[0]) < 1:
return await ctx.send("Party size limit can't be less than 1")
playerID = str(ctx.author.id)
battler = db.Player.objects.no_dereference().get(battler_id = playerID)
battler.pve_player_limit = int(args[0])
battler.save()
await ctx.send("You have successfully changed your Party Limit.")
return
# The setup fucntion below is neccesarry. Remember we give bot.add_cog() the name of the class in this case MembersCog.
# When we load the cog, we use the name of the file.
def setup(bot):
bot.add_cog(Profile(bot))
``` |
{
"source": "josephkirk/MeshBaker",
"score": 2
} |
#### File: bakermod/marmoset/marmoset.py
```python
import mset
import os
import json
import inspect
import sys
from enum import Enum
# from CrossBaker import BakeMap, Import, ImportType, Padding
from pprint import pprint
from re import compile
# Enums
Import = Enum("Import", ["UseMeshName", "UseFileName"])
ImportTypes = {
Import.UseMeshName: compile(r"(\w*)_(low|high)_?\w*.(obj|fbx)"),
Import.UseFileName: compile(r"(\w*).(obj|fbx)\Z")
}
Padding = Enum("Padding",[
"None", "Moderate", "Extreme"])
BakeMap = Enum("BakeMap",[
"Normals",
"ObjectNormals",
"Height",
"Position",
"Curvature",
"Convexity",
"Concavity",
"Thickness",
"BentNormals",
"ObjectBentNormals",
"AO",
"Albedo",
"Gloss",
"Specular",
"Diffuse",
"Roughness",
"Metalness",
"Emissive",
"Transparency",
"VertexColor",
"DiffuseLighting",
"SpecularLighting",
"Lighting",
"MaterialID",
"ObjectID",
"GroupID",
"UVIsland",
"Wireframe",
"AlphaMask"])
# Variable
ImportType = Import.UseFileName
MeshPath = D:/test
SavePath = D:/test
ExportPath =
BakeMaps = ['Normals', 'ObjectNormal', 'Curvature', 'Ambient Occlusion', 'Material ID', 'Position', 'Thickness', 'Height']
OutputWidth = 2048
OutputHeight = 2048
OutputSamples = 8
OutputBits = 8
if not MeshPath:
print("Mesh Path not found")
raise RuntimeError
# Method
class BakeGroup:
def __init__(self, transform):
self.transform = transform
@property
def high(self):
return self.transform.getChildren()[1]
@property
def low(self):
return self.transform.getChildren()[0]
def addChild(self, childItem, gc=None):
if "low" in childItem.name.lower():
for item in childItem.getChildren():
item.parent = self.low
elif "high" in childItem.name.lower():
for item in childItem.getChildren():
item.parent = self.high
else:
for item in childItem.getChildren():
if "low" in item.name.lower():
item.parent = self.low
elif "high" in item.name.lower():
item.parent = self.high
else:
if isinstance(gc, mset.TransformObject):
item.parent = gc
childItem.destroy()
class Baker:
def __init__(self):
self.baker = mset.BakerObject()
self.bakeMaps = {name:bakemapItem for name,bakemapItem in zip(BakeMap, self.baker.getAllMaps())}
pprint(self.bakeMaps)
self.groups = {}
def toggleMapState(self, mapName, state):
self.getMap(mapName).enabled = state
def isMapEnabled(self, mapName):
return self.getMap(mapName).enabled
def getMap(self, mapName):
self.bakeMaps.get(mapName)
def setMapSuffix(self, mapName, suffix):
self.bakeMaps.get(mapName).suffix = suffix
def resetMapSuffix(self, mapName):
self.bakeMaps.get(mapName).resetSuffix()
def hasGroup(self, grpName):
return grpName in self.groups
def getGroup(self, grpName):
return self.groups.get(grpName)
def addGroup(self, name):
grp = BakeGroup(self.baker.addGroup(name))
self.groups[name] = grp
return grp
def setOutputSize(self, width , height):
self.baker.outputWidth = width
self.baker.outputHeight = height
def setOuputPath(self, path):
self.outputPath = path
def setOuputPSD(self, state):
self.outputSinglePsd = state
def setOuputBits(self, value):
self.outputBits = value
def setOuputSamples(self, value):
self.outputSamples = value
def setIgnoreGroups(self, state):
for map in self.bakeMaps.values():
if hasattr(map, "ignoreGroups"):
map.ignoreGroups = state
def setDither(self, state):
for map in self.bakeMaps.values():
if hasattr(map, "dither"):
map.dither = state
def resetSuffixAll(self):
for map in self.bakeMaps.values():
map.resetSuffix()
def useMultipleTextureSet(self, state):
self.baker.multipleTextureSets = state
def setPadding(self, paddingType):
"""None, Moderate, Extreme"""
self.edgePadding = paddingType.name
def setPaddingSize(self, paddingSize):
self.edgePaddingSize = paddingSize
def getRegexGrp(regex, grpid, default=""):
result = default
try:
result = regex.group(grpid)
except IndexError:
pass
return result
if __name__ == "__main__":
regex = ImportTypes[ImportType]
mfiles = dict()
files = (dict(
name=getRegexGrp(regex.search(f), 1),
path=os.path.join(MeshPath,f))
for f in os.listdir(MeshPath) if regex.match(f))
# files = (f for f in dirContents if os.path.isfile(f))
baker = Baker()
wrongNameGrp = mset.TransformObject("Wrong Namming")
for mfile in files:
if not baker.hasGroup(mfile["name"]):
baker.addGroup(mfile["name"])
obj = mset.importModel(mfile["path"])
baker.getGroup(mfile["name"]).addChild(obj, wrongNameGrp)
mset.saveScene(SavePath)
```
#### File: crossbaker/libs/classes.py
```python
try:
from PySide2.QtCore import Property, QObject, Signal, Slot
except:
from PySide.QtCore import Property, QObject, Signal, Slot
from collections import namedtuple
from crossbaker import logger
import subprocess
import os
Size = namedtuple("Size", ["w","h"])
class App(QObject):
def __init__(self, name, path, execmod=[]):
super(App, self).__init__()
#
self._name = ""
self._path = ""
self._mod = []
#
self.name = name
self.path = path
self.mod = execmod
def __str__(self):
return self.path
def __repr__(self):
return "{}:{}:{}".format(self.name, self.path, self.mod)
@property
def name(self):
return self._name
@name.setter
def name(self, newName):
self._name = str(newName)
@property
def path(self):
return self._path
@path.setter
def path(self, newpath):
self._path = str(newpath)
@property
def mod(self):
return self._mod
@mod.setter
def mod(self, newmod):
if hasattr(newmod, "__iter__"):
self._mod = [str(mod) for mod in newmod]
def getname(self):
return self.name.capitalize()
def setname(self, newName):
self.name = newName
logger.debug(newName)
def getpath(self):
return self.path
def setpath(self, newpath):
self.path = newpath
logger.debug(newpath)
@Signal
def nameChanged(self):
pass
@Signal
def pathChanged(self):
pass
def run(self):
logger.debug("exec {} with args: {}".format(self.path, self.mod))
if os.path.exists(self.path):
com = subprocess.Popen(
["{}".format(self.path)]+self.mod,
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
return com.communicate()
appname = Property(str, getname, setname, notify=nameChanged)
apppath = Property(str, getpath, setpath, notify=pathChanged)
class BakerApp(App):
def __init__(self, name, path, execmod=None):
super(BakerApp, self).__init__(name, path)
self.mod = execmod
@property
def mod(self):
return self._mod
@mod.setter
def mod(self, newmod):
if hasattr(newmod, "get"):
self._mod = newmod
def run(self):
if hasattr(self.mod, "get"):
logger.debug("exec {} with args: {}".format(self.name, self.mod.get()))
com = subprocess.Popen(
["{}".format(self.path)]+self.mod.get(),
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
com.communicate()
class ImageApp(App):
def __init__(self, name, path, execmod=[]):
super(ImageApp, self).__init__(name, path, execmod)
def run(self):
super(ImageApp, self).run()
```
#### File: samples/editabletreemodel/ui_mainwindow.py
```python
from PySide import QtCore, QtGui
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(573, 468)
self.centralwidget = QtGui.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.vboxlayout = QtGui.QVBoxLayout(self.centralwidget)
self.vboxlayout.setContentsMargins(0, 0, 0, 0)
self.vboxlayout.setSpacing(0)
self.vboxlayout.setObjectName("vboxlayout")
self.view = QtGui.QTreeView(self.centralwidget)
self.view.setAlternatingRowColors(True)
self.view.setSelectionBehavior(QtGui.QAbstractItemView.SelectItems)
self.view.setHorizontalScrollMode(QtGui.QAbstractItemView.ScrollPerPixel)
self.view.setAnimated(False)
self.view.setAllColumnsShowFocus(True)
self.view.setObjectName("view")
self.vboxlayout.addWidget(self.view)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtGui.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 573, 31))
self.menubar.setObjectName("menubar")
self.fileMenu = QtGui.QMenu(self.menubar)
self.fileMenu.setObjectName("fileMenu")
self.actionsMenu = QtGui.QMenu(self.menubar)
self.actionsMenu.setObjectName("actionsMenu")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtGui.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.exitAction = QtGui.QAction(MainWindow)
self.exitAction.setObjectName("exitAction")
self.insertRowAction = QtGui.QAction(MainWindow)
self.insertRowAction.setObjectName("insertRowAction")
self.removeRowAction = QtGui.QAction(MainWindow)
self.removeRowAction.setObjectName("removeRowAction")
self.insertColumnAction = QtGui.QAction(MainWindow)
self.insertColumnAction.setObjectName("insertColumnAction")
self.removeColumnAction = QtGui.QAction(MainWindow)
self.removeColumnAction.setObjectName("removeColumnAction")
self.insertChildAction = QtGui.QAction(MainWindow)
self.insertChildAction.setObjectName("insertChildAction")
self.fileMenu.addAction(self.exitAction)
self.actionsMenu.addAction(self.insertRowAction)
self.actionsMenu.addAction(self.insertColumnAction)
self.actionsMenu.addSeparator()
self.actionsMenu.addAction(self.removeRowAction)
self.actionsMenu.addAction(self.removeColumnAction)
self.actionsMenu.addSeparator()
self.actionsMenu.addAction(self.insertChildAction)
self.menubar.addAction(self.fileMenu.menuAction())
self.menubar.addAction(self.actionsMenu.menuAction())
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(QtGui.QApplication.translate("MainWindow", "Editable Tree Model", None, QtGui.QApplication.UnicodeUTF8))
self.fileMenu.setTitle(QtGui.QApplication.translate("MainWindow", "&File", None, QtGui.QApplication.UnicodeUTF8))
self.actionsMenu.setTitle(QtGui.QApplication.translate("MainWindow", "&Actions", None, QtGui.QApplication.UnicodeUTF8))
self.exitAction.setText(QtGui.QApplication.translate("MainWindow", "E&xit", None, QtGui.QApplication.UnicodeUTF8))
self.exitAction.setShortcut(QtGui.QApplication.translate("MainWindow", "Ctrl+Q", None, QtGui.QApplication.UnicodeUTF8))
self.insertRowAction.setText(QtGui.QApplication.translate("MainWindow", "Insert Row", None, QtGui.QApplication.UnicodeUTF8))
self.insertRowAction.setShortcut(QtGui.QApplication.translate("MainWindow", "Ctrl+I, R", None, QtGui.QApplication.UnicodeUTF8))
self.removeRowAction.setText(QtGui.QApplication.translate("MainWindow", "Remove Row", None, QtGui.QApplication.UnicodeUTF8))
self.removeRowAction.setShortcut(QtGui.QApplication.translate("MainWindow", "Ctrl+R, R", None, QtGui.QApplication.UnicodeUTF8))
self.insertColumnAction.setText(QtGui.QApplication.translate("MainWindow", "Insert Column", None, QtGui.QApplication.UnicodeUTF8))
self.insertColumnAction.setShortcut(QtGui.QApplication.translate("MainWindow", "Ctrl+I, C", None, QtGui.QApplication.UnicodeUTF8))
self.removeColumnAction.setText(QtGui.QApplication.translate("MainWindow", "Remove Column", None, QtGui.QApplication.UnicodeUTF8))
self.removeColumnAction.setShortcut(QtGui.QApplication.translate("MainWindow", "Ctrl+R, C", None, QtGui.QApplication.UnicodeUTF8))
self.insertChildAction.setText(QtGui.QApplication.translate("MainWindow", "Insert Child", None, QtGui.QApplication.UnicodeUTF8))
self.insertChildAction.setShortcut(QtGui.QApplication.translate("MainWindow", "Ctrl+N", None, QtGui.QApplication.UnicodeUTF8))
import editabletreemodel_rc
```
#### File: crossbaker/test/test_baker.py
```python
import pytest
def test_run_current_baker():
import crossbaker
print("exec {} baker using arguments: {}".format(
crossbaker.bakers.current().path, crossbaker.bakers.current().mod.get()))
crossbaker.bakers.current().run()
assert False
``` |
{
"source": "josephkirk/PipelineTools",
"score": 2
} |
#### File: PipelineTools/main/ControlsMakerUI.py
```python
import os
from maya import OpenMayaUI, cmds, mel
import pymel.core as pm
import uiStyle
import logging
from ..core.objectClass import controlShape as cs
from functools import partial
try:
from PySide2 import QtWidgets, QtCore, QtGui
except ImportError:
from PySide import QtCore, QtGui
QtWidgets = QtGui
reload(cs)
reload(uiStyle)
# ------------------------------------------------------------------------------
logging.basicConfig()
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
# ------------------------------------------------------------------------------
SIGNAL = QtCore.SIGNAL
SLOT = QtCore.SLOT
# ------------------------------------------------------------------------------
def mayaWindow():
"""
Get Maya's main window.
:rtype: QMainWindow
"""
# window = OpenMayaUI.MQtUtil.mainWindow()
# window = shiboken.wrapInstance(long(window), QMainWindow)
window = pm.ui.Window('MayaWindow').asQtObject()
return window
# ------------------------------------------------------------------------------
def connect_transform(ob, target, **kws):
attrdict = {
'translate': ['tx', 'ty', 'tz'],
'rotate': ['rx', 'ry', 'rz'],
'scale': ['sx', 'sy', 'sz']
}
for atr in attrdict:
if atr not in kws:
kws[atr] = False
if 'all' in kws:
if kws['all']:
kws[atr] = True
for atr, value in attrdict.items():
if atr in kws:
if kws[atr] is False:
continue
if 'disconnect' in kws:
if kws['disconnect']:
ob.attr(atr) // target.attr(atr)
for attr in value:
ob.attr(attr) // target.attr(attr)
log.info('{} disconnect to {}'.format(
ob.attr(attr), target.attr(attr)))
continue
ob.attr(atr) >> target.attr(atr)
for attr in value:
ob.attr(attr) >> target.attr(attr)
log.info('{} connect to {}'.format(
ob.attr(attr), target.attr(attr)))
def xformTo(ob, target):
const = pm.parentConstraint(target, ob)
pm.delete(const)
log.info('{} match to {}'.format(ob,target))
def create_group(ob):
obname = ob.name().split('|')[-1]
if ob.nodeType() == 'joint':
parent = pm.nt.Joint(name=obname + '_offset')
else:
parent = pm.nt.Transform(name=obname + 'Gp')
oldParent = ob.getParent()
# parent.setTranslation(ob.getTranslation('world'), 'world')
# parent.setRotation(ob.getRotation('world'), 'world')
xformTo(parent, ob)
parent.setParent(oldParent)
ob.setParent(parent)
log.info('create Parent Transform %s'%parent)
return parent
def create_loc(ob):
obname = ob.name().split('|')[-1]
loc = pm.spaceLocator(name=obname + '_loc')
loc.setTranslation(ob.getTranslation('world'), 'world')
loc.setRotation(ob.getRotation('world'), 'world')
loc_Gp = create_group(loc)
return loc
def getUIValue(valueName, defaultValue=0):
if valueName in pm.optionVar:
log.debug('{}:{}'.format(valueName, pm.optionVar[valueName]))
return pm.optionVar[valueName]
else:
pm.optionVar[valueName] = defaultValue
log.debug('{}:{}'.format(valueName, pm.optionVar[valueName]))
return defaultValue
def setUIValue(valueName, value):
pm.optionVar[valueName] = value
log.debug('{}:{}'.format(valueName, pm.optionVar[valueName]))
def null():
pass
# ------------------------------------------------------------------------------
class main(QtWidgets.QMainWindow):
'''
Qt UI to rename Object in Scene
'''
def __init__(self, parent=None):
super(main, self).__init__()
self._name = 'ControlCreatorWindow'
try:
pm.deleteUI(self._name)
except:
pass
if parent:
assert isinstance(parent, QtWidgets.QMainWindow), \
'Parent is not of type QMainWindow'
self.setParent(parent)
else:
self.setParent(mayaWindow())
self.setWindowFlags(QtCore.Qt.Window)
self.setWindowTitle('Control Maker')
self.setObjectName(self._name)
self.setStyleSheet(uiStyle.styleSheet)
self.createMenuBar()
self.createStatusBar()
def _initUIValue(self):
self.nameSuffix = 'ctl'
self.name = "controlObject"
self.controlObject = cs.main(self.name, color=(255,255,0,255))
self.controlColor = tuple(self.controlObject.color)
self.connectType_dict = {
'translate': partial(connect_transform, translate=True),
'rotate': partial(connect_transform, rotate=True),
'scale': partial(connect_transform, scale=True),
'all': partial(connect_transform, all=True),
'parent': partial(pm.parentConstraint, mo=True),
'point': partial(pm.pointConstraint, mo=True),
'orient': partial(pm.orientConstraint, mo=True),
}
getUIValue('controlmaker_connectUseLoc', 0)
getUIValue('controlmaker_connectType',0)
getUIValue('controlmaker_createSelOffset', 1)
getUIValue('controlmaker_createCtlOffset', 1)
getUIValue('controlmaker_createChain', 0)
getUIValue('controlmaker_useObjectName', 0)
self._storeshape = None
def _initMainUI(self):
self._initUIValue()
# create Widget
self.topFiller = QtWidgets.QWidget(self)
self.topFiller.setSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
self.bottomFiller = QtWidgets.QWidget(self)
self.bottomFiller.setSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
self.mainCtner = QtWidgets.QWidget(self)
# Create Layout
self.mainLayout = QtWidgets.QVBoxLayout(self.mainCtner)
# Add widget
self.mainLayout.addWidget(self.topFiller)
self.createMainWidgets()
self.mainLayout.addWidget(self.bottomFiller)
# Set Layout
self.mainCtner.setLayout(self.mainLayout)
self.setCentralWidget(self.mainCtner)
self.setStyleSheet(uiStyle.styleSheet)
self.disableSetAxis = False
self._connectFunction()
def createMainWidgets(self):
self.mainLayout.addWidget(self.controlShapeBox())
def controlShapeBox(self):
uiGrp = QtWidgets.QGroupBox('Control Shape Maker')
layout = QtWidgets.QVBoxLayout()
# - Create Sub Layout --------------------------
controlAttributeGroupLayout = QtWidgets.QGridLayout()
controlNameAttributeLayout = QtWidgets.QVBoxLayout()
controlAttributeGroupLayout.setAlignment(QtCore.Qt.AlignTop)
controlNameAttributeLayout.setAlignment(QtCore.Qt.AlignTop)
controlNameAttributeLayout.setSpacing(2)
controlNameLayout = QtWidgets.QHBoxLayout()
controlAttributeLayout = QtWidgets.QHBoxLayout()
controlShapeLayout = QtWidgets.QVBoxLayout()
controlOptionGroup = QtWidgets.QGroupBox('Option')
controlOptionGroupLayout = QtWidgets.QHBoxLayout()
controlOptionGroup.setLayout(controlOptionGroupLayout)
createButtonLayout = QtWidgets.QHBoxLayout()
# - Create Widget ------------------------------
self.controlName_text = uiStyle.labelGroup(
"Name: ", QtWidgets.QLineEdit, controlNameLayout)
self.controlNameSuffix_comboBox = uiStyle.labelGroup(
"Suffix: ", QtWidgets.QComboBox, controlNameLayout)
controlNameAttributeLayout.addLayout(controlNameLayout)
self.controlHeight_label, self.controlHeight_floatspinbox = uiStyle.labelGroup(
"Height: ", QtWidgets.QDoubleSpinBox, controlAttributeLayout, returnLabel=True)
self.controlLength_label, self.controlLength_floatspinbox = uiStyle.labelGroup(
"Length: ", QtWidgets.QDoubleSpinBox, controlAttributeLayout, returnLabel=True)
self.controlRadius_label, self.controlRadius_floatspinbox = uiStyle.labelGroup(
"Radius: ", QtWidgets.QDoubleSpinBox, controlAttributeLayout, returnLabel=True)
self.controlColor_button = uiStyle.labelGroup(
"Color: ", QtWidgets.QPushButton, controlAttributeLayout)
controlNameAttributeLayout.addLayout(controlAttributeLayout)
self.controlType_combobox = uiStyle.labelGroup(
"Type: ", QtWidgets.QComboBox, controlShapeLayout)
self.controlSmoothness_combobox = uiStyle.labelGroup(
"Step: ", QtWidgets.QComboBox, controlShapeLayout)
self.controlAxis_combobox = uiStyle.labelGroup(
"Axis: ", QtWidgets.QComboBox, controlShapeLayout)
controlAttributeGroupLayout.addLayout(controlNameAttributeLayout,0,0)
controlAttributeGroupLayout.addLayout(controlShapeLayout,0,1)
controlAttributeGroupLayout.setColumnStretch(0,2)
layout.addLayout(controlAttributeGroupLayout)
# self.groupControl_checkbox, = uiStyle.multiOptionsLayout(
# ['group',],
# parent=controlOptionGroupLayout
# )
self.offsetX_floatspinbox,\
self.offsetY_floatspinbox,\
self.offsetZ_floatspinbox = uiStyle.multiLabelLayout(
['x', 'y', 'z'],
QtWidgets.QDoubleSpinBox,
groupLabel='Offset: ',
parent=controlOptionGroupLayout
)
self.addControlShape_button = QtWidgets.QPushButton('Add Shape')
self.deleteControlShape_button = QtWidgets.QPushButton('Delete Shape')
self.changeControlShape_button = QtWidgets.QPushButton('Change Shape')
self.copyControlShape_button = QtWidgets.QPushButton('Copy Shape')
self.pasteControlShape_button = QtWidgets.QPushButton('Paste Shape')
self.createControl_button = QtWidgets.QPushButton('Create Control')
self.setColor_button = QtWidgets.QPushButton('Set Color')
createButtonLayout.addWidget(self.addControlShape_button)
createButtonLayout.addWidget(self.deleteControlShape_button)
createButtonLayout.addWidget(self.changeControlShape_button)
createButtonLayout.addWidget(self.copyControlShape_button)
createButtonLayout.addWidget(self.pasteControlShape_button)
createButtonLayout.addWidget(self.setColor_button)
createButtonLayout.addWidget(self.createControl_button)
layout.addWidget(controlOptionGroup)
layout.addLayout(createButtonLayout)
# ----------------------------------------------
self.controlName_text.setPlaceholderText(self.name)
self.controlNameSuffix_comboBox.addItems(['ctl','cnt','control'])
self.controlLength_floatspinbox.setValue(self.controlObject.length)
self.controlRadius_floatspinbox.setValue(self.controlObject.radius)
self.controlHeight_floatspinbox.setValue(self.controlObject.height)
self.controlHeight_label.hide()
self.controlHeight_floatspinbox.hide()
self.controlType_combobox.addItems(self.controlObject._controlType.keys())
self.controlType_combobox.setCurrentText('Pin')
self.controlSmoothness_combobox.addItems(self.controlObject._resolutions.keys())
self.controlAxis_combobox.addItems(self.controlObject._axisList)
self.controlColor_button.setStyleSheet(".QPushButton { background-color: rgba(%d,%d,%d,%d) } "%tuple(self.controlObject.color))
# ----------------------------------------------
uiGrp.setLayout(layout)
return uiGrp
def createMenuBar(self):
# create Action
def menuItem(name, func , parent, **kws):
newAction = QtWidgets.QAction(name, self)
if 'checkable' in kws:
newAction.setCheckable(kws['checkable'])
if 'checked' in kws:
# if kws['checked'].isdigit():
# newAction.setChecked(bool(int(kws['checked'])))
if 'value' in kws and kws['value'] != 'isCheck':
newAction.setChecked(kws['checked'] == kws['value'])
else:
newAction.setChecked(kws['checked'])
if 'value' in kws:
def emitValue():
if kws['value'] == 'isCheck':
# print newAction.isChecked()
func(newAction.isChecked())
return
func(kws['value'])
newAction.triggered.connect(emitValue)
else:
newAction.triggered.connect(func)
if parent:
parent.addAction(newAction)
return newAction
self.reset_action = QtWidgets.QAction('Reset', self)
self.reset_action.setToolTip('Reset UI To Default Value')
self.reset_action.setStatusTip('Reset UI To Default Value')
self.reset_action.triggered.connect(self.resetUI)
# create Menu
self.menubar = self.menuBar()
self.optionmenu = self.menubar.addMenu('Option')
self.optionmenu.addAction(self.reset_action)
self.optionmenu.addSeparator().setText('Connect Action')
# self.toggleConnectThroughLocAction = menuItem(
# 'Create Control for Childrens', partial(setUIValue, 'controlmaker_createForHie'), self.optionmenu, value ='isCheck',
# checkable=True, checked=getUIValue('controlmaker_createForHie'))
self.toggleConnectThroughLocAction = menuItem(
'Connect Through Loc', partial(setUIValue, 'controlmaker_connectUseLoc'), self.optionmenu, value ='isCheck',
checkable=True, checked=getUIValue('controlmaker_connectUseLoc'))
self.connectTypeGroup = QtWidgets.QActionGroup(self)
self.toggleConnectTranslateAction = menuItem(
'Connect Translate', partial(setUIValue, 'controlmaker_connectType'), self.connectTypeGroup, value='translate',
checkable=True, checked=getUIValue('controlmaker_connectType'))
self.toggleConnectRotateAction = menuItem(
'Connect Rotate', partial(setUIValue, 'controlmaker_connectType'), self.connectTypeGroup, value='rotate',
checkable=True, checked=getUIValue('controlmaker_connectType'))
self.toggleConnectAllAction = menuItem(
'Connect All', partial(setUIValue, 'controlmaker_connectType'), self.connectTypeGroup, value='all',
checkable=True, checked=getUIValue('controlmaker_connectType'))
self.togglePoConstraintAction = menuItem(
'Point Constraint', partial(setUIValue, 'controlmaker_connectType'), self.connectTypeGroup, value='point',
checkable=True, checked=getUIValue('controlmaker_connectType'))
self.toggleOConstraintAction = menuItem(
'Orient Constraint', partial(setUIValue, 'controlmaker_connectType'), self.connectTypeGroup, value='orient',
checkable=True, checked=getUIValue('controlmaker_connectType'))
self.togglePConstraintAction = menuItem(
'Parent Constraint', partial(setUIValue, 'controlmaker_connectType'), self.connectTypeGroup, value='parent',
checkable=True, checked=getUIValue('controlmaker_connectType'))
self.toggleConnectAction = menuItem(
'None', partial(setUIValue, 'controlmaker_connectType'), self.connectTypeGroup, value=0,
checkable=True, checked=getUIValue('controlmaker_connectType'))
# self.toggleConnectAction.setChecked(True)
self.optionmenu.addAction(self.toggleConnectTranslateAction)
self.optionmenu.addAction(self.toggleConnectRotateAction)
self.optionmenu.addAction(self.toggleConnectAllAction)
self.optionmenu.addAction(self.togglePoConstraintAction)
self.optionmenu.addAction(self.toggleOConstraintAction)
self.optionmenu.addAction(self.togglePConstraintAction)
self.optionmenu.addAction(self.toggleConnectAction)
self.toggleCreateSelectOffset = menuItem(
'Create Select Offset', partial(setUIValue, 'controlmaker_createSelOffset') , self.optionmenu, value ='isCheck',
checkable=True, checked=getUIValue('controlmaker_createSelOffset'))
self.toggleTakeObjectName = menuItem(
'Use Object Name', partial(setUIValue, 'controlmaker_useObjectName') , self.optionmenu, value ='isCheck',
checkable=True, checked=getUIValue('controlmaker_useObjectName'))
self.optionmenu.addSeparator().setText('Other Action')
self.toggleCreateOffset = menuItem(
'Create Control Offset', partial(setUIValue, 'controlmaker_createCtlOffset') , self.optionmenu, value ='isCheck',
checkable=True, checked=getUIValue('controlmaker_createCtlOffset'))
self.toggleParent = menuItem(
'Create Parent Chain', partial(setUIValue, 'controlmaker_createChain') , self.optionmenu, value ='isCheck',
checkable=True, checked=getUIValue('controlmaker_createChain'))
def createStatusBar(self):
self.statusbar = self.statusBar()
self.statusbar.showMessage('Tool to create rig Controls')
# UI Changed Action
def showEvent(self, event):
self._initMainUI()
# self.updateUIJob = pm.scriptJob(
# e= ["SelectionChanged",self.autoUpdateUI],
# parent = self._name,
# rp=True)
self.show()
def closeEvent(self, event):
# if hasattr(self, 'updateUIJob'):
# if pm.scriptJob(exists=self.updateUIJob):
# pm.scriptJob(k=self.updateUIJob)
self.close()
def autoUpdateUI(self):
pass
def resetUI(self):
self.resetOptionVar()
self._initMainUI()
self.show()
def _connectFunction(self):
self.controlName_text.textEdited.connect(self.onChangeName)
self.controlNameSuffix_comboBox.currentTextChanged.connect(self.onChangeNameSuffix)
self.controlRadius_floatspinbox.valueChanged.connect(self.onChangeRadius)
self.controlLength_floatspinbox.valueChanged.connect(self.onChangeLength)
self.controlHeight_floatspinbox.valueChanged.connect(self.onChangeHeight)
self.controlColor_button.clicked.connect(self.onChangeColor)
self.controlType_combobox.currentTextChanged.connect(self.onChangeType)
self.controlSmoothness_combobox.currentTextChanged.connect(self.onChangeResolution)
self.controlAxis_combobox.currentTextChanged.connect(self.onChangeAxis)
# self.groupControl_checkbox.stateChanged.connect(self.setGroupOptionState)
# self.setAxis_checkbox.stateChanged.connect(self.setAxisOptionState)
# self.mirror_checkbox.stateChanged.connect(self.setMirrorOptionState)
self.offsetX_floatspinbox.valueChanged.connect(self.onChangeOffsetX)
self.offsetY_floatspinbox.valueChanged.connect(self.onChangeOffsetY)
self.offsetZ_floatspinbox.valueChanged.connect(self.onChangeOffsetZ)
self.addControlShape_button.clicked.connect(self.onAddShape)
self.deleteControlShape_button.clicked.connect(self.onDeleteShape)
self.changeControlShape_button.clicked.connect(self.onChangeShape)
self.copyControlShape_button.clicked.connect(self.onCopyShape)
self.pasteControlShape_button.clicked.connect(self.onPasteShape)
self.createControl_button.clicked.connect(self.onCreateShape)
self.setColor_button.clicked.connect(self.onSetColor)
# def onToggleConnectType(self):
# if self.toggleConnectAction.isChecked():
# self.toggleConstraintAction.setChecked(False)
# return
# if self.toggleConstraintAction.isChecked():
# self.toggleConnectAction.setChecked(False)
# return
def onChangeName(self, value):
self.controlObject.name = value
log.debug("name: %s"%self.controlObject.name)
def onChangeNameSuffix(self, value):
self.controlObject._suffix = value
log.debug("Name: %s"%self.controlObject.name)
def onChangeRadius(self, value):
self.controlObject.radius = value
log.debug("Radius: %s"%self.controlObject.radius)
def onChangeLength(self, value):
self.controlObject.length = value
log.debug("Length: %s"%self.controlObject.length)
def onChangeHeight(self, value):
self.controlObject.height = value
log.debug("Height: %s"%self.controlObject.height)
def onChangeColor(self):
colorDialog = QtWidgets.QColorDialog.getColor(QtCore.Qt.yellow)
color = colorDialog.getRgb()
self.controlColor_button.setStyleSheet(
".QPushButton { background-color: rgba(%d,%d,%d,%d) } "%tuple(color))
self.controlObject.color = [color[0]/255.0, color[1]/255.0, color[2]/255.0, color[3]/255.0]
log.debug("Color: %s"%self.controlObject.color)
def onChangeType(self, value):
self.controlObject.currentType = value
currentIndex = self.controlAxis_combobox.currentIndex()
self.controlAxis_combobox.setEnabled(True)
self.controlSmoothness_combobox.setEnabled(True)
self.disableSetAxis = True
self.controlObject.forceSetAxis = False
self.controlLength_label.show()
self.controlLength_floatspinbox.show()
self.controlHeight_label.hide()
self.controlHeight_floatspinbox.hide()
self.controlRadius_label.show()
self.controlRadius_floatspinbox.show()
self.controlRadius_label.setText('Radius')
if value.endswith('Pin'):
self.controlAxis_combobox.clear()
if value.startswith('Double'):
self.controlAxis_combobox.addItems(
self.controlObject._axisList[:6])
if value.startswith('Sphere'):
axisList = self.controlObject._axisList[:2]
axisList.append(self.controlObject._axisList[3])
axisList.extend(['-'+a for a in axisList])
self.controlAxis_combobox.addItems(axisList)
else:
self.controlAxis_combobox.addItems(
self.controlObject._axisList)
elif any([value == typ for typ in ['Arrow','Cylinder', 'CircleArrow','HalfCylinder','Circle','Hemisphere']]):
self.controlObject.forceSetAxis = True
self.controlAxis_combobox.clear()
self.controlAxis_combobox.addItems(
self.controlObject._axisList[:6])
elif any([value == typ for typ in ['DoubleArrow','Rectangle', 'Cross', 'Triangle', 'ThinCross', 'RoundSquare']]):
self.controlSmoothness_combobox.setEnabled(False)
self.controlObject.forceSetAxis = True
self.controlAxis_combobox.clear()
self.controlAxis_combobox.addItems(
self.controlObject._axisList[:3])
self.controlRadius_label.hide()
self.controlRadius_floatspinbox.hide()
else:
self.controlAxis_combobox.setEnabled(False)
self.controlAxis_combobox.setCurrentIndex(currentIndex)
self.disableSetAxis = False
if currentIndex > (self.controlAxis_combobox.count()-1):
self.controlAxis_combobox.setCurrentIndex(
self.controlAxis_combobox.count()-1)
if any([value == typ for typ in ['Octa','NSphere','Hemisphere','Sphere','Circle']]):
self.controlLength_label.hide()
self.controlLength_floatspinbox.hide()
self.controlHeight_label.hide()
self.controlHeight_floatspinbox.hide()
self.controlSmoothness_combobox.setEnabled(False)
if any([value == typ for typ in ['Cube','Arrow','DoubleArrow']]):
self.controlRadius_label.show()
self.controlRadius_floatspinbox.show()
self.controlRadius_label.setText('Width')
self.controlHeight_label.show()
self.controlHeight_floatspinbox.show()
self.controlLength_floatspinbox.setEnabled(True)
log.debug("CurrentType: %s"%self.controlObject.currentType.__name__)
def onChangeResolution(self, value):
self.controlObject.step = value
log.debug("Resolution: %s"%self.controlObject.step)
def onChangeAxis(self, value):
if not self.disableSetAxis:
self.controlObject.axis = value
log.debug("Axis: %s"%self.controlObject.axis)
def onChangeOffsetX(self, value):
offset = self.controlObject.offset
offset[0] = value
self.controlObject.offset = offset
log.debug("Offset: %s"%self.controlObject.offset)
def onChangeOffsetY(self, value):
offset = self.controlObject.offset
offset[1] = value
self.controlObject.offset = offset
log.debug("Offset: %s"%self.controlObject.offset)
def onChangeOffsetZ(self, value):
offset = self.controlObject.offset
offset[2] = value
self.controlObject.offset = offset
log.debug("Offset: %s"%self.controlObject.offset)
def onCreateShape(self):
if pm.selected():
controls = []
# with pm.UndoChunk():
pm.undoInfo( state=False )
sel = pm.selected()
for ob in sel:
control = self.controlObject.currentType()
control.setMatrix(ob.getMatrix(ws=True), ws=True)
if getUIValue('controlmaker_useObjectName'):
control.rename(ob+'_ctl')
if getUIValue('controlmaker_createCtlOffset'):
create_group(control)
if getUIValue('controlmaker_createSelOffset'):
if ob.nodeType() == "joint":
create_group(ob)
if getUIValue('controlmaker_connectType'):
if getUIValue('controlmaker_connectUseLoc'):
loc = create_loc(ob)
pm.parentConstraint(control, loc)
loc.getParent().setParent(control.getParent())
self.connectType_dict[getUIValue('controlmaker_connectType')](loc, ob)
else:
self.connectType_dict[getUIValue('controlmaker_connectType')](control, ob)
if controls and getUIValue('controlmaker_createChain'):
if control.getParent():
control.getParent().setParent(controls[-1])
else:
control.setParent(controls[-1])
controls.append(control)
pm.undoInfo( state=True )
return controls
else:
control = self.controlObject.currentType
return control()
def onSetColor(self):
for control in pm.selected():
try:
controlShape = control.getShape()
if controlShape:
controlShape.overrideEnabled.set(True)
controlShape.overrideRGBColors.set(True)
controlShape.overrideColorRGB.set(self.controlObject.color)
sg = control.shadingGroups()[0] if control.shadingGroups() else None
if sg:
shdr = sg.inputs()[0]
shdr.outColor.set(self.controlObject.color.rgb)
shdr.outTransparency.set([self.controlObject.color.a for i in range(3)])
except AttributeError as why:
log.error(why)
def onChangeShape(self):
for control in pm.selected():
temp = self.controlObject.currentType()
pm.delete(control.getShape(), shape=True)
pm.parent(temp.getShape(), control, r=True, s=True)
pm.delete(temp)
# return control
def onCopyShape(self):
if not pm.selected():
return
sel = pm.selected()[0]
sel_shape = sel.getShape()
if not isinstance(sel_shape, pm.nt.NurbsCurve):
return
self._storeshape = sel_shape.getCVs()
def onPasteShape(self):
if not self._storeshape:
return
axisData = {}
axisData['XY'] = (0, 0, 0)
axisData['XZ'] = (90, 0, 0)
axisData['YZ'] = (0, 0, 90)
axisData['YX'] = (180, 0, 0)
axisData['ZX'] = (-90, 0, 0)
axisData['ZY'] = (0, 0, -90)
for control in pm.selected():
if not hasattr(control, "getShape"):
if not control.getShape():
continue
temp = pm.curve(p=self._storeshape)
try:
temp.setRotation(axisData[self.controlobject.axis])
pm.makeIdentity(temp, apply=True)
pm.delete(control.getShape(), shape=True)
pm.parent(temp.getShape(), control, r=True, s=True)
finally:
pm.delete(temp)
def onAddShape(self):
for control in pm.selected():
temp = self.controlObject.currentType()
pm.parent(temp.getShape(), control, r=True, s=True)
pm.delete(temp)
def onDeleteShape(self):
for control in pm.selected():
try:
pm.delete(control.getShapes()[-1])
except AttributeError, IndexError:
pass
@staticmethod
def resetOptionVar():
for var in pm.optionVar:
try:
if var.startswith('controlmaker'):
del pm.optionVar[var]
except KeyError:
pass
def show():
win = main()
win.show()
return win
if __name__ =='__main__':
try:
app = QtWidgets.QApplication([])
except:
raise
show()
if app in globals():
app.exec_()
``` |
{
"source": "josephkirk/qt_crossApps",
"score": 2
} |
#### File: qt_crossApps/Libs/ui.py
```python
try:
from PySide2 import QtWidgets, QtCore, QtGui
except ImportError:
from PySide import QtCore, QtGui
QtWidgets = QtGui
styleSheet = """
QFrame {
font: italic 12px;
border: 2px solid rgb(20,20,20);
border-radius: 4px;
border-width: 0px;
padding: 2px;
background-color: rgb(70,70,70);
}
QMenu {
margin: 2px; /* some spacing around the menu */
}
QMenuBar {
font: bold 12px;
border-color: lightgray;
border-width: 2px;
background-color: qlineargradient(x1:0, y1:0, x2:0, y2:1,
stop:0 rgb(30,30,30), stop:1 rgb(40,40,40));
}
QPushButton {
background-color: rgb(100,100,100);
}
QGroupBox {
font: bold 12px;
color: rgb(200,200,200);
padding-top: 10px;
background-color: rgb(80,80,80);
border: 1px solid gray;
border-radius: 4px;
margin-top: 5px;
}
QGroupBox::title {
subcontrol-origin: margin;
subcontrol-position: top center; /* position at the top center */
padding: 0px 5px;
}
QSlider::groove:horizontal {
border: 1px solid #999999;
height: 8px; /* the groove expands to the size of the slider by default. by giving it a height, it has a fixed size */
background: qlineargradient(x1:0, y1:0, x2:0, y2:1, stop:0 #B1B1B1, stop:1 #c4c4c4);
margin: 2px 0;
}
QSlider::handle:horizontal {
background: qlineargradient(x1:0, y1:0, x2:1, y2:1, stop:0 #b4b4b4, stop:1 #8f8f8f);
border: 1px solid #5c5c5c;
width: 18px;
margin: -2px 0; /* handle is placed by default on the contents rect of the groove. Expand outside the groove */
border-radius: 3px;
}
QListWidget {
color: lightgray;
font: bold;
background: qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1,
stop: 0 #222222, stop: 1 #333333);
show-decoration-selected: 1; /* make the selection span the entire width of the view */
}
QListWidget::item {
bottom: 5px;
}
QListWidget::item:selected {
padding-left: 5px;
}
QListWidget::item:selected:!active {
background: qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1,
stop: 0 darkgray, stop: 1 gray);
}
QListWidget::item:selected:active {
background: qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1,
stop: 0 darkgray, stop: 1 gray);
}
QListWidget::item:hover {
background: qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1,
stop: 0 #333333, stop: 1 #444444);
}
QStatusBar {
border-color: lightgray;
border-width: 2px;
background: qlineargradient(x1:0, y1:0, x2:0, y2:1,
stop:0 rgb(30,30,30), stop:1 rgb(40,40,40));
}
QStatusBar::item {
border: 1px solid red;
border-radius: 3px;
}
"""
def addDivider(widget, layout=None):
line = QtWidgets.QFrame(widget)
line.setFrameShape(QtWidgets.QFrame.HLine)
line.setFrameShadow(QtWidgets.QFrame.Sunken)
if layout:
layout.addWidget(line)
else:
return widget
def labelGroup(name, widget, parent=None, returnLabel=False, *args, **kws):
layout = QtWidgets.QHBoxLayout()
label = QtWidgets.QLabel(name)
# print args, kws
createWidget = widget(*args, **kws)
layout.addWidget(label)
layout.addWidget(createWidget)
if parent:
parent.addLayout(layout)
result = (label, createWidget) if returnLabel else createWidget
return result
else:
result = (label, createWidget, layout) if returnLabel else (createWidget, layout)
return result
def multiLabelLayout(names, widget, groupLabel='', dir='horizontal', parent=None, *args, **kws):
dirDict = {
'horizontal': QtWidgets.QBoxLayout.LeftToRight,
'vertical': QtWidgets.QBoxLayout.TopToBottom
}
layout = QtWidgets.QBoxLayout(dirDict[dir])
if groupLabel:
label = QtWidgets.QLabel(groupLabel)
layout.addWidget(label)
widgets = []
for name in names:
sublayout = QtWidgets.QHBoxLayout()
sublabel = QtWidgets.QLabel(name)
createWidget = widget(*args, **kws)
sublayout.addWidget(sublabel)
sublayout.addWidget(createWidget)
layout.addLayout(sublayout)
widgets.append(createWidget)
if parent:
parent.setSpacing(2)
# parent.setStretch(0,1)
parent.addLayout(layout)
return tuple(widgets)
else:
return (tuple(widgets), layout)
def multiOptionsLayout(names, groupname='', parent=None, updateActions=[]):
layout = QtWidgets.QHBoxLayout()
if groupname:
label = QtWidgets.QLabel(groupname)
layout.addWidget(label)
createWidgets = []
for name in names:
createWidget = QtWidgets.QCheckBox(name)
layout.addWidget(createWidget)
createWidgets.append(createWidget)
if updateActions:
for id, cc in enumerate(updateActions):
try:
createWidgets[id].stateChanged.connect(cc)
except IndexError:
pass
if parent:
parent.addLayout(layout)
return tuple(createWidgets)
else:
return (tuple(createWidgets), layout)
def multiButtonsLayout(names, parent=None, actions=[]):
layout = QtWidgets.QHBoxLayout()
createWidgets = []
for name in names:
# print name
createWidget = QtWidgets.QPushButton(name)
layout.addWidget(createWidget)
createWidgets.append(createWidget)
if actions:
for id, cc in enumerate(actions):
try:
createWidgets[id].clicked.connect(cc)
except IndexError:
pass
if parent:
parent.addLayout(layout)
# print tuple(createWidgets)
return tuple(createWidgets)
else:
return (tuple(createWidgets), layout)
def buttonsGroup(groupnames, names, collums=2, parent=None, iconsPath=[], actions=[]):
group = QtWidgets.QGroupBox(groupnames)
layout = QtWidgets.QGridLayout()
buttons = [QtWidgets.QPushButton(name) for name in names]
for id, iconPath in enumerate(iconsPath):
icon = QtGui.QIcon(iconPath)
buttons[id].setIcon(icon)
buttons[id].setText('')
buttons[id].setIconSize(QtCore.QSize(28,28))
buttons[id].setFixedSize(QtCore.QSize(28,28))
buttons[id].setStyleSheet('QPushButton {border-style: none; border-width: 0px; background-color: rgba(0,0,0,0) } ')
stack = buttons[:]
row=0
while stack:
for collum in range(collums):
if stack:
button = stack.pop()
layout.addWidget(button, row, collum)
row += 1
for id, action in enumerate(actions):
buttons[id].clicked.connect(action)
group.setLayout(layout)
return group
def findIcon(icon):
"""
Loop over all icon paths registered in the XBMLANGPATH environment
variable ( appending the tools icon path to that list ). If the
icon exist a full path will be returned.
:param str icon: icon name including extention
:return: icon path
:rtype: str or None
"""
paths = []
# get maya icon paths
if os.environ.get("XBMLANGPATH"):
paths = os.environ.get("XBMLANGPATH").split(os.pathsep)
# append tool icon path
paths.insert(
0,
os.path.join(
os.path.split(__file__)[0],
"icons"
)
)
```
#### File: josephkirk/qt_crossApps/main.py
```python
_name = "Universal_Tool_Checker"
_version = "001.00"
_author = "<NAME>"
try:
from PySide2 import QtCore, QtGui, QtWidgets, QtNetwork
except ImportError:
from PySide import QtCore, QtGui
QtWidgets = QtGui
import logging
from .Lib.ui import styleSheet
# Pyside Refactor
Signal = QtCore.Signal
Slot = QtCore.Slot
QW = QtWidgets
QC = QtCore
QG = QtGui
# Logging initialize #
logging.basicConfig()
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
# Utils Function
# UIClass
class main(QtWidgets.QMainWindow):
'''
Universal Tool Finder
'''
def __init__(self):
super().__init__()
self.setWindowFlags(QtCore.Qt.Window)
self.setWindowTitle(_name)
self.setObjectName(_name)
# init UI
self._initMainUI()
self.createMenuBar()
self.statusBar()
# Util Function
# Initial Value Definition
def _initUIValue(self):
pass
def _getUIValue(self):
pass
def _initMainUI(self):
self._initUIValue()
self._getUIValue()
# create Widget
self.topFiller = QtWidgets.QWidget(self)
self.topFiller.setSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
self.bottomFiller = QtWidgets.QWidget(self)
self.bottomFiller.setSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
self.mainCtner = QtWidgets.QWidget(self)
# Create Layout
self.mainLayout = QtWidgets.QVBoxLayout(self.mainCtner)
# Add widget
self.addWidgets()
# Set Layout
self.mainCtner.setLayout(self.mainLayout)
self.setCentralWidget(self.mainCtner)
self.setStyleSheet(uiStyle.styleSheet)
self._connectFunction()
def addWidgets(self):
pass
def _connectFunction(self):
def connect(button,func):
button.clicked.connect(func)
pass
def createMenuBar(self):
# create Action
self.reset_action = QtWidgets.QAction('Reset', self)
self.reset_action.setToolTip('Reset UI To Default Value')
self.reset_action.setStatusTip('Reset UI To Default Value')
self.reset_action.triggered.connect(self.resetUI)
# create Menu
self.menubar = self.menuBar()
self.optionmenu = self.menubar.addMenu('Option')
self.optionmenu.addAction(self.reset_action)
# self.me
def resetUI(self):
self._initMainUI()
self.show()
@classmethod
def showUI(cls):
cls().show()
def show():
win = main()
win.show()
return win
if __name__ =='__main__':
try:
app = QtWidgets.QApplication([])
except:
raise
show()
if app in globals():
app.exec_()
```
#### File: thonside/viewimplementation/widget_example.py
```python
from PySide2.QtWidgets import QApplication
from viewimplementation import terminal
from pyinterpreter import interpreter
class TerminalPython(terminal.Terminal):
def __init__(self, parent=None):
super(TerminalPython, self).__init__(parent=parent)
# Init interpreter and add globals to context that give access from it.
self.interpreter = interpreter.Interpreter(extra_context=globals().copy())
# connect write and input interpreter to the view implementation.
self.interpreter.write_slot = self.write
self.interpreter.input_slot = self.raw_input
# define prompt
self.prompt = self.interpreter.prompt
# connect enter signal to interpreter
self.press_enter.connect(self.interpreter.run)
# rename interpreter
self.interpreter.inter_name = "ThonSide Interpreter"
# start interpreter
self.interpreter.interact()
if __name__ == '__main__':
import sys
app = QApplication(sys.argv)
console = TerminalPython()
console.show()
sys.exit(app.exec_())
``` |
{
"source": "JosephKithome/recipeApi",
"score": 4
} |
#### File: app/app/calc.py
```python
def add(x, y):
"""Sum two numbers"""
return x + y
def subtract(x, y):
"""Subtract two numbers"""
return x - y
``` |
{
"source": "JosephKJ/ARM",
"score": 2
} |
#### File: code/scripts/print_results.py
```python
import argparse
import os
import pickle
import torch
import numpy as np
args = argparse.ArgumentParser(allow_abbrev=False)
args.add_argument("--root", type=str, required=True)
args.add_argument("--start", type=int, required=True)
args.add_argument("--load_model", default=False, action="store_true")
args.add_argument("--num_runs", type=int, default=5)
args = args.parse_args()
def treat_underscores(x):
res = []
for c in x:
if c == "_":
res.append("\\_")
else:
res.append(c)
return "".join(res)
def print_results(args):
ms_avg = {"val": {"acc": [], "forgetting": []},
"test": {"acc": [], "forgetting": []}}
for m in range(args.start, args.start + args.num_runs):
out_dir = os.path.join(args.root, str(m))
config_p = os.path.join(out_dir, "config.pickle")
config = None
tries = 0
while tries < 1000:
try:
with open(config_p, "rb") as config_f:
config = pickle.load(config_f)
break
except:
tries += 1
if config is None:
continue
if args.load_model:
torch.load(os.path.join(config.out_dir, "latest_models.pytorch"))
actual_t = config.max_t
for prefix in ["val", "test"]:
if not config.stationary:
accs_dict = getattr(config, "%s_accs" % prefix)
ms_avg[prefix]["acc"].append(accs_dict[actual_t])
forgetting_dict = getattr(config, "%s_forgetting" % prefix)
if actual_t in forgetting_dict:
ms_avg[prefix]["forgetting"].append(forgetting_dict[actual_t])
print("model %d, %s: acc %.4f, forgetting %.4f" % (
config.model_ind, prefix, accs_dict[actual_t], forgetting_dict[actual_t]))
else:
accs_dict = getattr(config, "%s_accs_data" % prefix)
ms_avg[prefix]["acc"].append(accs_dict[actual_t])
print("model %d, %s: acc %.4f" % (config.model_ind, prefix, accs_dict[actual_t]))
print("---")
for prefix in ["val", "test"]:
for metric in ["acc", "forgetting"]:
if len(ms_avg[prefix][metric]) == 0:
ms_avg[prefix][metric] = (-1, -1)
else:
avg = np.array(ms_avg[prefix][metric]).mean()
std = np.array(ms_avg[prefix][metric]).std()
ms_avg[prefix][metric] = (avg, std)
print("average %s: acc %.4f +- %.4f, forgetting %.4f +- %.4f" % (
prefix, ms_avg[prefix]["acc"][0], ms_avg[prefix]["acc"][1],
ms_avg[prefix]["forgetting"][0], ms_avg[prefix]["forgetting"][1]))
if __name__ == "__main__":
print_results(args)
``` |
{
"source": "JosephKJ/class-incremental-learning",
"score": 2
} |
#### File: adaptive-aggregation-networks/trainer/zeroth_phase.py
```python
import torch
import tqdm
import numpy as np
import torch.nn as nn
import torchvision
from torch.optim import lr_scheduler
from torchvision import datasets, models, transforms
from utils.misc import *
from utils.process_fp import process_inputs_fp
import torch.nn.functional as F
def incremental_train_and_eval_zeroth_phase(the_args, epochs, b1_model, ref_model, \
tg_optimizer, tg_lr_scheduler, trainloader, testloader, iteration, start_iteration, \
lamda, dist, K, lw_mr, fix_bn=False, weight_per_class=None, device=None):
# Setting up the CUDA device
if device is None:
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
for epoch in range(epochs):
# Set the 1st branch model to the training mode
b1_model.train()
# Fix the batch norm parameters according to the config
if fix_bn:
for m in b1_model.modules():
if isinstance(m, nn.BatchNorm2d):
m.eval()
# Set all the losses to zeros
train_loss = 0
train_loss1 = 0
train_loss2 = 0
# Set the counters to zeros
correct = 0
total = 0
# Learning rate decay
tg_lr_scheduler.step()
# Print the information
print('\nEpoch: %d, learning rate: ' % epoch, end='')
print(tg_lr_scheduler.get_lr()[0])
for batch_idx, (inputs, targets) in enumerate(trainloader):
# Get a batch of training samples, transfer them to the device
inputs, targets = inputs.to(device), targets.to(device)
# Clear the gradient of the paramaters for the tg_optimizer
tg_optimizer.zero_grad()
# Forward the samples in the deep networks
outputs = b1_model(inputs)
# Compute classification loss
loss = nn.CrossEntropyLoss(weight_per_class)(outputs, targets)
# Backward and update the parameters
loss.backward()
tg_optimizer.step()
# Record the losses and the number of samples to compute the accuracy
train_loss += loss.item()
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
# Print the training losses and accuracies
print('Train set: {}, train loss: {:.4f} accuracy: {:.4f}'.format(len(trainloader), train_loss/(batch_idx+1), 100.*correct/total))
# Running the test for this epoch
b1_model.eval()
test_loss = 0
correct = 0
total = 0
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(testloader):
inputs, targets = inputs.to(device), targets.to(device)
outputs = b1_model(inputs)
loss = nn.CrossEntropyLoss(weight_per_class)(outputs, targets)
test_loss += loss.item()
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
print('Test set: {} test loss: {:.4f} accuracy: {:.4f}'.format(len(testloader), test_loss/(batch_idx+1), 100.*correct/total))
if len(the_args.ckpt_dir_fg) > 0:
torch.save(b1_model, the_args.ckpt_dir_fg)
return b1_model
``` |
{
"source": "JosephKJ/iOD",
"score": 3
} |
#### File: data/datasets/finetune_dataset.py
```python
import os
import torch
import torch.utils.data as data
from fvcore.common.file_io import PathManager
class ImageStoreDataset(data.Dataset):
def __init__(self, cfg):
path = os.path.join(cfg.WG.IMAGE_STORE_LOC, 'image_store.pth')
with PathManager.open(path, 'rb') as f:
image_store = torch.load(f)
self.images = image_store.retrieve()
def __getitem__(self, index):
return self.images[index]
def __len__(self):
return len(self.images)
```
#### File: iOD/tests/test_rpn.py
```python
import logging
import unittest
import torch
from detectron2.config import get_cfg
from detectron2.modeling.backbone import build_backbone
from detectron2.modeling.proposal_generator.build import build_proposal_generator
from detectron2.structures import Boxes, ImageList, Instances, RotatedBoxes
from detectron2.utils.events import EventStorage
logger = logging.getLogger(__name__)
class RPNTest(unittest.TestCase):
def test_rpn(self):
torch.manual_seed(121)
cfg = get_cfg()
cfg.MODEL.PROPOSAL_GENERATOR.NAME = "RPN"
cfg.MODEL.ANCHOR_GENERATOR.NAME = "DefaultAnchorGenerator"
cfg.MODEL.RPN.BBOX_REG_WEIGHTS = (1, 1, 1, 1)
backbone = build_backbone(cfg)
proposal_generator = build_proposal_generator(cfg, backbone.output_shape())
num_images = 2
images_tensor = torch.rand(num_images, 20, 30)
image_sizes = [(10, 10), (20, 30)]
images = ImageList(images_tensor, image_sizes)
image_shape = (15, 15)
num_channels = 1024
features = {"res4": torch.rand(num_images, num_channels, 1, 2)}
gt_boxes = torch.tensor([[1, 1, 3, 3], [2, 2, 6, 6]], dtype=torch.float32)
gt_instances = Instances(image_shape)
gt_instances.gt_boxes = Boxes(gt_boxes)
with EventStorage(): # capture events in a iOD storage to discard them
proposals, proposal_losses = proposal_generator(
images, features, [gt_instances[0], gt_instances[1]]
)
expected_losses = {
"loss_rpn_cls": torch.tensor(0.0804563984),
"loss_rpn_loc": torch.tensor(0.0990132466),
}
for name in expected_losses.keys():
assert torch.allclose(proposal_losses[name], expected_losses[name])
expected_proposal_boxes = [
Boxes(torch.tensor([[0, 0, 10, 10], [7.3365392685, 0, 10, 10]])),
Boxes(
torch.tensor(
[
[0, 0, 30, 20],
[0, 0, 16.7862777710, 13.1362524033],
[0, 0, 30, 13.3173446655],
[0, 0, 10.8602609634, 20],
[7.7165775299, 0, 27.3875980377, 20],
]
)
),
]
expected_objectness_logits = [
torch.tensor([0.1225359365, -0.0133192837]),
torch.tensor([0.1415634006, 0.0989848152, 0.0565387346, -0.0072308783, -0.0428492837]),
]
for i in range(len(image_sizes)):
assert len(proposals[i]) == len(expected_proposal_boxes[i])
assert proposals[i].image_size == (image_sizes[i][0], image_sizes[i][1])
assert torch.allclose(
proposals[i].proposal_boxes.tensor, expected_proposal_boxes[i].tensor
)
assert torch.allclose(proposals[i].objectness_logits, expected_objectness_logits[i])
def test_rrpn(self):
torch.manual_seed(121)
cfg = get_cfg()
cfg.MODEL.PROPOSAL_GENERATOR.NAME = "RRPN"
cfg.MODEL.ANCHOR_GENERATOR.NAME = "RotatedAnchorGenerator"
cfg.MODEL.ANCHOR_GENERATOR.SIZES = [[32, 64]]
cfg.MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS = [[0.25, 1]]
cfg.MODEL.ANCHOR_GENERATOR.ANGLES = [[0, 60]]
cfg.MODEL.RPN.BBOX_REG_WEIGHTS = (1, 1, 1, 1, 1)
cfg.MODEL.RPN.HEAD_NAME = "StandardRPNHead"
backbone = build_backbone(cfg)
proposal_generator = build_proposal_generator(cfg, backbone.output_shape())
num_images = 2
images_tensor = torch.rand(num_images, 20, 30)
image_sizes = [(10, 10), (20, 30)]
images = ImageList(images_tensor, image_sizes)
image_shape = (15, 15)
num_channels = 1024
features = {"res4": torch.rand(num_images, num_channels, 1, 2)}
gt_boxes = torch.tensor([[2, 2, 2, 2, 0], [4, 4, 4, 4, 0]], dtype=torch.float32)
gt_instances = Instances(image_shape)
gt_instances.gt_boxes = RotatedBoxes(gt_boxes)
with EventStorage(): # capture events in a iOD storage to discard them
proposals, proposal_losses = proposal_generator(
images, features, [gt_instances[0], gt_instances[1]]
)
expected_losses = {
"loss_rpn_cls": torch.tensor(0.0432923734),
"loss_rpn_loc": torch.tensor(0.1552739739),
}
for name in expected_losses.keys():
assert torch.allclose(proposal_losses[name], expected_losses[name])
expected_proposal_boxes = [
RotatedBoxes(
torch.tensor(
[
[0.60189795, 1.24095452, 61.98131943, 18.03621292, -4.07244873],
[15.64940453, 1.69624567, 59.59749603, 16.34339333, 2.62692475],
[-3.02982378, -2.69752932, 67.90952301, 59.62455750, 59.97010040],
[16.71863365, 1.98309708, 35.61507797, 32.81484985, 62.92267227],
[0.49432933, -7.92979717, 67.77606201, 62.93098450, -1.85656738],
[8.00880814, 1.36017394, 121.81007385, 32.74150467, 50.44297409],
[16.44299889, -4.82221127, 63.39775848, 61.22503662, 54.12270737],
[5.00000000, 5.00000000, 10.00000000, 10.00000000, -0.76943970],
[17.64130402, -0.98095351, 61.40377808, 16.28918839, 55.53118134],
[0.13016054, 4.60568953, 35.80157471, 32.30180359, 62.52872086],
[-4.26460743, 0.39604485, 124.30079651, 31.84611320, -1.58203125],
[7.52815342, -0.91636634, 62.39784622, 15.45565224, 60.79549789],
]
)
),
RotatedBoxes(
torch.tensor(
[
[0.07734215, 0.81635046, 65.33510590, 17.34688377, -1.51821899],
[-3.41833067, -3.11320257, 64.17595673, 60.55617905, 58.27033234],
[20.67383385, -6.16561556, 63.60531998, 62.52315903, 54.85546494],
[15.00000000, 10.00000000, 30.00000000, 20.00000000, -0.18218994],
[9.22646523, -6.84775209, 62.09895706, 65.46472931, -2.74307251],
[15.00000000, 4.93451595, 30.00000000, 9.86903191, -0.60272217],
[8.88342094, 2.65560246, 120.95362854, 32.45022202, 55.75970078],
[16.39088631, 2.33887148, 34.78761292, 35.61492920, 60.81977463],
[9.78298569, 10.00000000, 19.56597137, 20.00000000, -0.86660767],
[1.28576660, 5.49873352, 34.93610382, 33.22600174, 60.51599884],
[17.58912468, -1.63270092, 62.96052551, 16.45713997, 52.91245270],
[5.64749718, -1.90428460, 62.37649155, 16.19474792, 61.09543991],
[0.82255805, 2.34931135, 118.83985901, 32.83671188, 56.50753784],
[-5.33874989, 1.64404404, 125.28501892, 33.35424042, -2.80731201],
]
)
),
]
expected_objectness_logits = [
torch.tensor(
[
0.10111768,
0.09112845,
0.08466332,
0.07589971,
0.06650183,
0.06350251,
0.04299347,
0.01864817,
0.00986163,
0.00078543,
-0.04573630,
-0.04799230,
]
),
torch.tensor(
[
0.11373727,
0.09377633,
0.05281663,
0.05143715,
0.04040275,
0.03250912,
0.01307789,
0.01177734,
0.00038105,
-0.00540255,
-0.01194804,
-0.01461012,
-0.03061717,
-0.03599222,
]
),
]
torch.set_printoptions(precision=8, sci_mode=False)
for i in range(len(image_sizes)):
assert len(proposals[i]) == len(expected_proposal_boxes[i])
assert proposals[i].image_size == (image_sizes[i][0], image_sizes[i][1])
# It seems that there's some randomness in the result across different machines:
# This test can be run on a local machine for 100 times with exactly the same result,
# However, a different machine might produce slightly different results,
# thus the atol here.
err_msg = "computed proposal boxes = {}, expected {}".format(
proposals[i].proposal_boxes.tensor, expected_proposal_boxes[i].tensor
)
assert torch.allclose(
proposals[i].proposal_boxes.tensor, expected_proposal_boxes[i].tensor, atol=1e-5
), err_msg
err_msg = "computed objectness logits = {}, expected {}".format(
proposals[i].objectness_logits, expected_objectness_logits[i]
)
assert torch.allclose(
proposals[i].objectness_logits, expected_objectness_logits[i], atol=1e-5
), err_msg
if __name__ == "__main__":
unittest.main()
``` |
{
"source": "JosephKJ/NCL",
"score": 2
} |
#### File: JosephKJ/NCL/selfsupervised_learning.py
```python
from __future__ import print_function
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.optim import lr_scheduler
from torchvision import transforms
import pickle
import os
import os.path
import datetime
import numpy as np
from data.rotationloader import DataLoader, GenericDataset
from utils.util import AverageMeter, accuracy
from models.resnet import BasicBlock
from tqdm import tqdm
import shutil
class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10):
super(ResNet, self).__init__()
self.in_planes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.linear = nn.Linear(512*block.expansion, num_classes)
if is_adapters:
self.parallel_conv1 = nn.Conv2d(3, 64, kernel_size=1, stride=1, bias=False)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
if is_adapters:
out = F.relu(self.bn1(self.conv1(x)+self.parallel_conv1(x)))
else:
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def train(epoch, model, device, dataloader, optimizer, exp_lr_scheduler, criterion, args):
loss_record = AverageMeter()
acc_record = AverageMeter()
exp_lr_scheduler.step()
model.train()
for batch_idx, (data, label) in enumerate(tqdm(dataloader(epoch))):
data, label = data.to(device), label.to(device)
optimizer.zero_grad()
output = model(data)
loss = criterion(output, label)
# measure accuracy and record loss
acc = accuracy(output, label)
acc_record.update(acc[0].item(), data.size(0))
loss_record.update(loss.item(), data.size(0))
# compute gradient and do optimizer step
optimizer.zero_grad()
loss.backward()
optimizer.step()
print('Train Epoch: {} Avg Loss: {:.4f} \t Avg Acc: {:.4f}'.format(epoch, loss_record.avg, acc_record.avg))
return loss_record
def test(model, device, dataloader, args):
acc_record = AverageMeter()
model.eval()
for batch_idx, (data, label) in enumerate(tqdm(dataloader())):
data, label = data.to(device), label.to(device)
output = model(data)
# measure accuracy and record loss
acc = accuracy(output, label)
acc_record.update(acc[0].item(), data.size(0))
print('Test Acc: {:.4f}'.format(acc_record.avg))
return acc_record
def main():
# Training settings
parser = argparse.ArgumentParser(description='Rot_resNet')
parser.add_argument('--batch_size', type=int, default=128, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--no_cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--num_workers', type=int, default=4, help='number of data loading workers')
parser.add_argument('--seed', type=int, default=1,
help='random seed (default: 1)')
parser.add_argument('--epochs', type=int, default=200, metavar='N',
help='number of epochs to train (default: 200)')
parser.add_argument('--lr', type=float, default=0.1, metavar='LR',
help='learning rate (default: 0.1)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='SGD momentum (default: 0.9)')
parser.add_argument('--dataset_name', type=str, default='cifar10', help='options: cifar10, cifar100, svhn')
parser.add_argument('--dataset_root', type=str, default='./data/datasets/CIFAR/')
parser.add_argument('--exp_root', type=str, default='./data/experiments/')
parser.add_argument('--model_name', type=str, default='rotnet')
args = parser.parse_args()
use_cuda = not args.no_cuda and torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
torch.manual_seed(args.seed)
runner_name = os.path.basename(__file__).split(".")[0]
model_dir= os.path.join(args.exp_root, runner_name)
if not os.path.exists(model_dir):
os.makedirs(model_dir)
args.model_dir = model_dir+'/'+'{}.pth'.format(args.model_name)
dataset_train = GenericDataset(
dataset_name=args.dataset_name,
split='train',
dataset_root=args.dataset_root
)
dataset_test = GenericDataset(
dataset_name=args.dataset_name,
split='test',
dataset_root=args.dataset_root
)
dloader_train = DataLoader(
dataset=dataset_train,
batch_size=args.batch_size,
num_workers=args.num_workers,
shuffle=True)
dloader_test = DataLoader(
dataset=dataset_test,
batch_size=args.batch_size,
num_workers=args.num_workers,
shuffle=False)
global is_adapters
is_adapters = 0
model = ResNet(BasicBlock, [2,2,2,2], num_classes=4)
model = model.to(device)
optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=5e-4, nesterov=True)
exp_lr_scheduler = lr_scheduler.MultiStepLR(optimizer, milestones=[60, 120, 160, 200], gamma=0.2)
criterion = nn.CrossEntropyLoss()
best_acc = 0
for epoch in range(args.epochs +1):
loss_record = train(epoch, model, device, dloader_train, optimizer, exp_lr_scheduler, criterion, args)
acc_record = test(model, device, dloader_test, args)
is_best = acc_record.avg > best_acc
best_acc = max(acc_record.avg, best_acc)
if is_best:
torch.save(model.state_dict(), args.model_dir)
if __name__ == '__main__':
main()
``` |
{
"source": "JosephKJ/PyTorch-MAML-and-Reptile",
"score": 3
} |
#### File: JosephKJ/PyTorch-MAML-and-Reptile/main.py
```python
import argparse
import numpy as np
import torch
from torch import nn, autograd as ag
import matplotlib.pyplot as plt
from copy import deepcopy
def experiment(run, plot=True):
seed = 0
inner_step_size = 0.02 # stepsize in inner SGD
inner_epochs = 1 # number of epochs of each inner SGD
outer_stepsize_reptile = 0.1 # stepsize of outer optimization, i.e., meta-optimization
outer_stepsize_maml = 0.01
n_iterations = 30000 # number of outer updates; each iteration we sample one task and update on it
rng = np.random.RandomState(seed)
torch.manual_seed(seed)
# Define task distribution
x_all = np.linspace(-5, 5, 50)[:, None] # All of the x points
n_train = 10 # Size of training minibatches
def gen_task():
"Generate classification problem"
phase = rng.uniform(low=0, high=2 * np.pi)
ampl = rng.uniform(0.1, 5)
f_randomsine = lambda x: np.sin(x + phase) * ampl
return f_randomsine
# Define model. Reptile paper uses ReLU, but Tanh gives slightly better results
model = nn.Sequential(
nn.Linear(1, 64),
nn.Tanh(),
nn.Linear(64, 64),
nn.Tanh(),
nn.Linear(64, 1),
)
def to_torch(x):
return ag.Variable(torch.Tensor(x))
def train_on_batch(x, y):
x = to_torch(x)
y = to_torch(y)
model.zero_grad()
ypred = model(x)
loss = (ypred - y).pow(2).mean()
loss.backward()
for param in model.parameters():
param.data -= inner_step_size * param.grad.data
def predict(x):
x = to_torch(x)
return model(x).data.numpy()
# Choose a fixed task and minibatch for visualization
f_plot = gen_task()
xtrain_plot = x_all[rng.choice(len(x_all), size=n_train)]
# Training loop
for iteration in range(n_iterations):
weights_before = deepcopy(model.state_dict())
# Generate task
f = gen_task()
y_all = f(x_all)
# Do SGD on this task
inds = rng.permutation(len(x_all))
train_ind = inds[:-1 * n_train]
val_ind = inds[-1 * n_train:] # Val contains 1/5th of the sine wave
for _ in range(inner_epochs):
for start in range(0, len(train_ind), n_train):
mbinds = train_ind[start:start + n_train]
train_on_batch(x_all[mbinds], y_all[mbinds])
if run == 'MAML':
outer_step_size = outer_stepsize_maml * (1 - iteration / n_iterations) # linear schedule
for start in range(0, len(val_ind), n_train):
dpinds = val_ind[start:start + n_train]
x = to_torch(x_all[dpinds])
y = to_torch(y_all[dpinds])
# Compute the grads
model.zero_grad()
y_pred = model(x)
loss = (y_pred - y).pow(2).mean()
loss.backward()
# Reload the model
model.load_state_dict(weights_before)
# SGD on the params
for param in model.parameters():
param.data -= outer_step_size * param.grad.data
else:
# Interpolate between current weights and trained weights from this task
# I.e. (weights_before - weights_after) is the meta-gradient
weights_after = model.state_dict()
outerstepsize = outer_stepsize_reptile * (1 - iteration / n_iterations) # linear schedule
model.load_state_dict({name: weights_before[name] + (weights_after[name] - weights_before[name]) * outerstepsize
for name in weights_before})
# Periodically plot the results on a particular task and minibatch
if plot and iteration == 0 or (iteration + 1) % 1000 == 0:
plt.cla()
f = f_plot
weights_before = deepcopy(model.state_dict()) # save snapshot before evaluation
plt.plot(x_all, predict(x_all), label="pred after 0", color=(0, 0, 1))
for inneriter in range(32):
train_on_batch(xtrain_plot, f(xtrain_plot))
if (inneriter + 1) % 8 == 0:
frac = (inneriter + 1) / 32
plt.plot(x_all, predict(x_all), label="pred after %i" % (inneriter + 1), color=(frac, 0, 1 - frac))
plt.plot(x_all, f(x_all), label="true", color=(0, 1, 0))
lossval = np.square(predict(x_all) - f(x_all)).mean()
plt.plot(xtrain_plot, f(xtrain_plot), "x", label="train", color="k")
plt.ylim(-4, 4)
plt.legend(loc="lower right")
plt.pause(0.01)
model.load_state_dict(weights_before) # restore from snapshot
print(f"-----------------------------")
print(f"iteration {iteration + 1}")
print(f"loss on plotted curve {lossval:.3f}") # would be better to average loss over a set of examples, but this is optimized for brevity
def main():
parser = argparse.ArgumentParser(description='MAML and Reptile Sine wave regression example.')
parser.add_argument('--run', dest='run', default='Reptile') # MAML, Reptile
args = parser.parse_args()
experiment(args.run)
if __name__ == '__main__':
main()
``` |
{
"source": "JosephKJ/RegionProposer",
"score": 3
} |
#### File: RegionProposer/lib/_init_path.py
```python
import os.path as osp
import sys
def add_path(path):
if path not in sys.path:
sys.path.insert(0, path)
this_dir = osp.dirname(__file__)
# Add caffe to PYTHONPATH
caffe_path = osp.join('/home/joseph/workspace/SDD-RFCN-python/caffe/python')
# caffe_path = osp.join(this_dir, '..', '..', 'caffe', 'python')
add_path(caffe_path)
``` |
{
"source": "JosephKJ/SDD-RFCN-python",
"score": 3
} |
#### File: lib/objectness/utils.py
```python
import scipy
import os
import cv2
import numpy as np
from map import HeatMap
from sklearn.metrics import jaccard_similarity_score
from timer import Timer
from gc_executor import GC_executor
def generate_objectness_map(heatMapObj, image, hr_method='interpolation', use_gradcam=True):
"""
Generates the objectness confidence score, for a given image.
:param heatMapObj: An object of the heatmap Class
:param image: The image which should be processed
:param hr_method: optional, to so SR or not.
:return: binary_map: which contains the objectness info; filtered_image: which is the map applied to the image.
"""
# 1. Create a Higher Resolution Image
img_gc = None
img = scipy.misc.imresize(image, 8.0, interp='bicubic')
if hr_method == 'super_resolution':
# TODO: Super Resolution
pass
# 2. Get objectness
timer = Timer()
timer.tic()
heat_map = heatMapObj.get_map(img)
# Adding for GC
if use_gradcam:
heat_map_for_gc = heat_map.data * ~heat_map.mask
gc = GC_executor()
heat_map_for_gc = scipy.misc.imresize(heat_map_for_gc, image.shape[0:2], interp='bicubic')
# img_gc, binary_map = gc.grab_cut_with_patch(np.copy(image), np.copy(heat_map_for_gc))
img_gc, binary_map = gc.grab_cut_without_patch(np.copy(image))
negative_binary_map = 1 - binary_map
else:
timer.toc()
# print 'Heatmap genetation took {:.3f}s '.format(timer.total_time)
# print timer.total_time
min_pixel_intensity = heat_map.min()
binary_map = np.where(heat_map > min_pixel_intensity, 1, 0)
negative_binary_map = np.where(heat_map > min_pixel_intensity, 0, 1)
# Trim off any extra rows in the map
map_h, map_w = binary_map.shape
img_h, img_w, _ = image.shape
if map_h > img_h:
diff = map_h - img_h
binary_map = np.delete(binary_map, diff, axis=0) # remove 'diff' rows
negative_binary_map = np.delete(negative_binary_map, diff, axis=0) # remove 'diff' rows
if map_w > img_w:
diff = map_w - img_w
binary_map = np.delete(binary_map, diff, axis=1) # remove 'diff' columns
negative_binary_map = np.delete(negative_binary_map, diff, axis=1) # remove 'diff' columns
# Remove the border in the detections
border = 2
temp = np.zeros_like(binary_map)
temp[border:-border, border:-border] = binary_map[border:-border, border:-border]
binary_map = temp
temp = np.ones_like(negative_binary_map)
temp[border:-border, border:-border] = negative_binary_map[border:-border, border:-border]
negative_binary_map = temp
# Adding for GC ends
# Calculate the IoU
iou = findIoU(image, binary_map)
# Calculate objectness score
# It is the percentage of pixels that are not black
h, w = binary_map.shape
obj_score = np.count_nonzero(binary_map) / (w * h * 1.)
# Expand the map to three channels
three_channel_map = np.stack((binary_map, binary_map, binary_map), axis=2)
# Applying map on the image
filtered_image = image * three_channel_map
filtered_image = filtered_image.astype(np.uint8)
return binary_map, negative_binary_map, filtered_image, iou, obj_score, img_gc
def findIoU(image, preditiction):
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Otsu's Threshholding
ret, thresh = cv2.threshold(gray, 0, 1, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)
# Noise removal
# kernel = np.ones((2, 2), np.uint8)
# opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel, iterations=2)
gt = thresh.flatten()
mask = preditiction.flatten()
iou = jaccard_similarity_score(gt, mask)
return iou
def semantic_segment_image(heatMapObj, image, color='red'):
# Getting the objectness
binary_map, negative_binary_map, filtered_image, iou, obj_score, img_gc = generate_objectness_map(heatMapObj, image)
# Calculating the background
three_channel_map = np.stack((negative_binary_map, negative_binary_map, negative_binary_map), axis=2)
background = (image * three_channel_map).astype(np.uint8)
# Segmentation Foreground
r,g,b = get_rgb_from_color(color)
foreground = np.stack((binary_map*r, binary_map*g, binary_map*b), axis=2).astype(np.uint8)
# Combined Image
full_image = background + foreground
return full_image, iou, obj_score
def get_rgb_from_color(color):
colors = {'red': (255, 83, 26), 'green': (26, 255, 83), 'blue': (26, 140, 255),
'black': (77, 0, 77), 'white': (230, 230, 230), 'violet': (255, 26, 255)}
return colors[color];
if __name__ == '__main__':
print('Inside Main.')
hm = HeatMap()
image_path = os.path.join(
'/home/cs17mtech01001/workspace/SDD-RFCN-python/data/detections/bookstore_video0_9500_pedestrian_2.png')
img = cv2.imread(image_path, cv2.IMREAD_COLOR)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
semantic_segment_image(hm, img, 'red')
``` |
{
"source": "josephklaw/99-CapstoneProject-201920",
"score": 3
} |
#### File: 99-CapstoneProject-201920/src/m1_extra.py
```python
import shared_gui_delegate_on_robot
import time
import rosebot
import ev3dev.ev3 as ev3
#Sprint 2 Functions
def increasing_rate_of_beep(rate_of_beep,rate_of_beep_increase,robot):
""":type robot: rosebot.RoseBot"""
robot.drive_system.go(20,20)
while True:
distance = robot.sensor_system.ir_proximity_sensor.get_distance_in_inches()
# # time.sleep(abs(int(og_rate_of_beep) - (int(rate_of_beep_increase)/int(distance))))
robot.sound_system.beeper.beep().wait()
if int(distance) <= 20:
for k in range(20):
if int(distance) == k:
delay = (k * int(rate_of_beep_increase) + int(rate_of_beep))*(1/100)
time.sleep(delay)
else:
time.sleep(20)
if distance <= 1:
break
robot.drive_system.stop()
robot.arm_and_claw.raise_arm()
def spin_to_find_object(direction,speed,rate_of_beep,rate_of_beep_increase,robot):
""":type robot: rosebot.RoseBot"""
pixy = ev3.Sensor(driver_name="pixy-lego")
pixy.mode = "SIG1"
if direction == "CCW":
robot.drive_system.spin_counterclockwise_until_sees_object(int(speed),pixy.value(3)*pixy.value(4))
if direction == "CW":
robot.drive_system.spin_clockwise_until_sees_object(int(speed),pixy.value(3)*pixy.value(4))
increasing_rate_of_beep(rate_of_beep,rate_of_beep_increase,robot)
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# Sprint 3 Functions
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def spin_to_find_package(speed,robot):
""":type robot: rosebot.RoseBot"""
pixy = ev3.Sensor(driver_name="pixy-lego")
pixy.mode = "SIG1"
robot.drive_system.spin_clockwise_until_sees_object(20, pixy.value(3) * pixy.value(4))
robot.drive_system.stop()
robot.drive_system.go(speed, speed)
while True:
if robot.sensor_system.ir_proximity_sensor.get_distance_in_inches() <=0.5:
break
robot.drive_system.stop()
robot.arm_and_claw.raise_arm()
def find_road(robot):
""":type robot: rosebot.RoseBot"""
robot.drive_system.go(-30,30)
while True:
if robot.sensor_system.color_sensor.get_color() == 1:
break
time.sleep(0.01)
robot.drive_system.stop()
def find_house(color,robot):
""":type robot: rosebot.RoseBot"""
robot.drive_system.go(50,50)
while True:
#If the color sensor detects the color corresponding to the house
if robot.sensor_system.color_sensor.get_color() == int(color):
# If the color is green, the robot turns left
if int(color) == 3:
robot.drive_system.stop()
robot.drive_system.go(0, 50)
break
# If the color is yellow, the robot turns right
if int(color) == 4:
robot.drive_system.stop()
robot.drive_system.go(50,0)
break
# If the color is red, the robot turns left
if int(color) == 5:
robot.drive_system.stop()
robot.drive_system.go(0, 50)
break
# If the color is blue,the robot turns right
if int(color) == 2:
robot.drive_system.stop()
robot.drive_system.go(50,0)
break
#Allows for a 90 degree turn
time.sleep(2)
robot.drive_system.stop()
def deliver_package(greeting,goodbye,robot):
""":type robot: rosebot.RoseBot"""
robot.drive_system.go(50, 50)
time.sleep(2.3)
robot.drive_system.stop()
robot.sound_system.speech_maker(greeting).wait
robot.arm_and_claw.lower_arm()
robot.sound_system.speech_maker(goodbye).wait
robot.drive_system.go(-50,-50)
def full_delivery(color,greeting,goodbye,robot):
find_road(robot)
find_house(color,robot)
deliver_package(greeting,goodbye,robot)
def theft(robot):
""":type robot: rosebot.RoseBot"""
robot.drive_system.go(50,50)
while True:
if robot.sensor_system.ir_proximity_sensor.get_distance_in_inches() <=0.5:
break
robot.drive_system.stop()
robot.arm_and_claw.raise_arm()
robot.drive_system.go(-50,50)
time.sleep(2)
robot.drive_system.stop()
robot.drive_system.go(-50,50)
#Allows for a turn
time.sleep(2.3)
def getaway(laugh,robot):
robot.drive_system.go(100, 100)
robot.sound_system.speech_maker(laugh).wait
time.sleep(5)
robot.drive_system.stop()
def steal_package(color,laugh,robot):
find_house(color,robot)
theft(robot)
getaway(laugh,robot)
``` |
{
"source": "josephko91/2d-path-finder",
"score": 3
} |
#### File: josephko91/2d-path-finder/Node.py
```python
import numpy as np
class Node:
def __init__(self, xy, h):
self.coord = xy # coordinate of node
self.height = h # height of node
self.cost = 0 # cumulated cost of node
self.parent = 0 # parent of node
self.id = 0 # unique ID of node
self.heuristic = 0 # heuristic cost of node
self.state_id = np.array2string(xy, precision=0, separator=',')
```
#### File: josephko91/2d-path-finder/search_algorithms.py
```python
from collections import deque
from Node import Node
from utility import find_height, bound_check, height_check, simple_loop_check, loop_check, get_path, calc_unit_path_cost, sort_by_path_cost, find_mud_cost, calc_heuristic, astar_sort
import numpy as np
import time
# Breadth First Search (BFS)
def bfs(grid, start, target, max_height_delta):
""" This function performs BFS on input data """
# create start node
w = grid.shape[1] # width of grid
h = grid.shape[0] # height of grid
neighbor_matrix = np.array([[1, -1], [1, 0], [1, 1],
[0, -1], [0, 1],
[-1, -1], [-1, 0], [-1, 1]])
start_height = find_height(grid, start)
start_node = Node(start, start_height)
node_id = 1 # initialize unique node id
start_node.id = node_id
# add start_node to queue
open_queue = deque()
open_queue.append(start_node)
closed = {} # initialize closed dictionary
# loop until queue empty or target found
loop_count = 1 # for testing
start_loop_time = time.time()
#while True:
while loop_count < 1001:
#start_loop_time = time.time()
if len(open_queue) == 0: # return fail if queue is empty
result = 'FAIL'
print('result: ', result) # test print
print('cost: ', 2147483647) # for testing
return result
active_node = open_queue.popleft()
#print('ACTIVE NODE: ', active_node.coord) #FOR TESTING
if np.array_equal(active_node.coord, target): # return path when target is reached
result = get_path(active_node, closed) # calls function to print full solution path
print('result: ', result) # test print
print('cost: ', len(result) - 1)
return result
for j in range(8): # loop through all 8 potential neighbors
neighbor_coord = active_node.coord + neighbor_matrix[j]
if bound_check(neighbor_coord, w, h): # boundary check
neighbor_height = find_height(grid, neighbor_coord)
if height_check(neighbor_height, active_node.height, max_height_delta): # height check
node_id += 1
neighbor = Node(neighbor_coord, neighbor_height)
neighbor.id = node_id
neighbor.parent = active_node.id
open_queue, closed = loop_check(neighbor, closed, open_queue) # performs loop check and modifies open/closed as needed
closed[active_node.id] = active_node # add node to closed dictionary
#print('closed coord: ', closed[active_node.id].coord) #FOR TESTING
end_loop_time = time.time()
# if loop_count % 1000 == 0: # for testing
# print('still looping...just passed loop #', loop_count)
# print('time spent in loop #', loop_count, ': ', end_loop_time - start_loop_time, 'seconds')
# loop_count += 1
if loop_count % 10 == 0: # for testing
print('======================= loop #', loop_count, "=======================")
print('Popped node ID: ', active_node.id)
print('Popped node coord: ', active_node.coord)
print('Parent of popped node: ', active_node.parent)
print('elapsed time: ', end_loop_time - start_loop_time, 'seconds')
loop_count += 1
# Uniform Cost Search (UCS)
def ucs(grid, start, target, max_height_delta):
""" This function performs UCS on input data """
# create start node
w = grid.shape[1] # width of grid
h = grid.shape[0] # height of grid
neighbor_matrix = np.array([[1, -1], [1, 0], [1, 1],
[0, -1], [0, 1],
[-1, -1], [-1, 0], [-1, 1]])
start_height = find_height(grid, start)
start_node = Node(start, start_height)
node_id = 1 # initialize unique node id
start_node.id = node_id
# add start_node to queue
open_queue = deque()
open_queue.append(start_node)
closed = {} # initialize closed dictionary
# loop until queue empty or target found
loop_count = 1 # for testing
start_loop_time = time.time()
#while True:
while loop_count < 1001:
#start_loop_time = time.time()
if len(open_queue) == 0: # return fail if queue is empty
result = 'FAIL'
print('result: ', result) # test print
print('cost: ', 2147483647) # for testing
return result
active_node = open_queue.popleft()
#print('ACTIVE NODE: ', active_node.coord) #FOR TESTING
if np.array_equal(active_node.coord, target): # return path when target is reached
result = get_path(active_node, closed) # calls function to print full solution path
print('result: ', result) # test print
print('cost: ', active_node.cost) # for testing
return result
for j in range(8): # loop through all 8 potential neighbors
neighbor_coord = active_node.coord + neighbor_matrix[j]
unit_path_cost = calc_unit_path_cost(neighbor_matrix[j])
if bound_check(neighbor_coord, w, h): # boundary check
neighbor_height = find_height(grid, neighbor_coord)
if height_check(neighbor_height, active_node.height, max_height_delta): # height check
node_id += 1
neighbor = Node(neighbor_coord, neighbor_height)
neighbor.id = node_id
neighbor.parent = active_node.id
neighbor.cost = active_node.cost + unit_path_cost # cumulative path cost
open_queue, closed = loop_check(neighbor, closed, open_queue) # performs loop check and modifies open/closed as needed
closed[active_node.id] = active_node # add node to closed dictionary
open_queue = sort_by_path_cost(open_queue) # sort open queue
#print('closed coord: ', closed[active_node.id].coord) #FOR TESTING
end_loop_time = time.time()
if loop_count % 10 == 0: # for testing
print('======================= loop #', loop_count, "=======================")
print('Popped node ID: ', active_node.id)
print('Popped node coord: ', active_node.coord)
print('Parent of popped node: ', active_node.parent)
print('elapsed time: ', end_loop_time - start_loop_time, 'seconds')
loop_count += 1
# A* search (astar)
def astar(grid, start, target, max_height_delta):
""" This function performs A* on input data """
# create start node
w = grid.shape[1] # width of grid
h = grid.shape[0] # height of grid
neighbor_matrix = np.array([[1, -1], [1, 0], [1, 1],
[0, -1], [0, 1],
[-1, -1], [-1, 0], [-1, 1]])
start_height = find_height(grid, start)
start_node = Node(start, start_height)
node_id = 1 # initialize unique node id
start_node.id = node_id
# add start_node to queue
open_queue = deque()
open_queue.append(start_node)
closed = {} # initialize closed dictionary
loop_count = 1 # for testing
# loop until queue empty or target found
start_loop_time = time.time()
#while True:
while loop_count < 1001:
#for i in range(2): # testing
#start_loop_time = time.time()
if len(open_queue) == 0: # return fail if queue is empty
result = 'FAIL'
print('result: ', result) # test print
print('cost: ', 2147483647) # for testing
return result
active_node = open_queue.popleft()
#print('ACTIVE NODE: ', active_node.coord) #FOR TESTING
if np.array_equal(active_node.coord, target): # return path when target is reached
result = get_path(active_node, closed) # calls function to print full solution path
print('result: ', result) # test print
print('cost: ', active_node.cost) # for testing
return result
for j in range(8): # loop through all 8 potential neighbors
neighbor_coord = active_node.coord + neighbor_matrix[j]
unit_path_cost = calc_unit_path_cost(neighbor_matrix[j])
if bound_check(neighbor_coord, w, h): # boundary check
neighbor_height = find_height(grid, neighbor_coord)
mud_cost = find_mud_cost(grid, neighbor_coord, neighbor_height)
#print('mud cost: ', neighbor_coord, mud_cost) #testing
if height_check(neighbor_height, active_node.height, max_height_delta): # height check
height_change = abs(neighbor_height - active_node.height)
node_id += 1
neighbor = Node(neighbor_coord, neighbor_height)
neighbor.id = node_id
neighbor.parent = active_node.id
neighbor.cost = active_node.cost + unit_path_cost + mud_cost + height_change # cumulative path cost
neighbor.heuristic = calc_heuristic(neighbor_coord, target)
open_queue, closed = loop_check(neighbor, closed, open_queue) # performs loop check and modifies open/closed as needed
closed[active_node.id] = active_node # add node to closed dictionary
open_queue = astar_sort(open_queue) # sort open queue
end_loop_time = time.time()
# if loop_count % 1000 == 0: # for testing
# print('still looping...just passed loop #', loop_count)
# print('time spent in loop #', loop_count, ': ', end_loop_time - start_loop_time, 'seconds')
# loop_count += 1
if loop_count % 10 == 0: # for testing
print('======================= loop #', loop_count, "=======================")
print('Popped node ID: ', active_node.id)
print('Popped node coord: ', active_node.coord)
print('Parent of popped node: ', active_node.parent)
print('elapsed time: ', end_loop_time - start_loop_time, 'seconds')
loop_count += 1
#print('closed coord: ', closed[active_node.id].coord) #FOR TESTING
``` |
{
"source": "josephko91/checkers-ai",
"score": 4
} |
#### File: josephko91/checkers-ai/algorithm.py
```python
from board import Board
import copy
import time
import random
def minimax(board, depth, max_player):
"""
The basic minimax algorithm with no modifications
input: board (Board), depth (int), max_player (Bool)
output: tuple of best move sequence e.g. ('E', (1,2), (2,3))
"""
minimax.count += 1
# If you reached a terminal node or game is over
if depth == 0 or board.game_over():
return board.evaluate(max_player), ()
if max_player: # maximizing player
value = float('-inf')
for move, child_board in board.possible_moves().items():
child_board = copy.deepcopy(child_board)
minimax_value = minimax(child_board, depth - 1, False)[0]
if minimax_value > value:
value = minimax_value
best_move = move
return value, best_move
else: # minimizing player
value = float('inf')
board.active_player = not board.active_player # switch the player
for move, child_board in board.possible_moves().items():
child_board = copy.deepcopy(child_board)
child_board.active_player = not child_board.active_player
value = min(value, minimax(child_board, depth - 1, True)[0])
return value, ()
# def minimax_alpha_beta(board, depth, alpha, beta, max_player):
# """
# Minimax with alpha-beta pruning
# input: board (Board), depth (int), max_player (Bool)
# output: tuple of best move sequence e.g. ('E', (1,2), (2,3))
# """
# minimax_alpha_beta.count += 1
# # If you reached a terminal node or game is over
# if depth == 0 or board.game_over():
# return board.evaluate(max_player), ()
# if max_player: # maximizing player
# value = float('-inf')
# for move, child_board in board.possible_moves().items():
# child_board = copy.deepcopy(child_board)
# child_board.active_player = not child_board.active_player
# minimax_value = minimax_alpha_beta(child_board, depth - 1, alpha, beta, False)[0]
# if minimax_value > value:
# value = minimax_value
# best_move = move
# # in case of equal value, update based on random binary generator
# if minimax_value == value:
# random_num = random.randint(1, 101)
# if random_num > 50:
# #if bool(random.getrandbits(1)): # random T or F
# best_move = move # update best move with random probability
# alpha = max(alpha, value)
# if alpha > beta:
# break
# elif alpha == beta:
# random_num = random.randint(1, 101)
# if random_num > 50:
# #if bool(random.getrandbits(1)): # random T or F
# break
# return value, best_move, child_board
# else: # minimizing player
# value = float('inf')
# #board_copy = copy.deepcopy(board)
# #board.active_player = not board.active_player # switch the player
# for move, child_board in board.possible_moves().items():
# child_board = copy.deepcopy(child_board)
# child_board.active_player = not child_board.active_player
# value = min(value, minimax_alpha_beta(child_board, depth - 1, alpha, beta, True)[0])
# beta = min(beta, value)
# if beta < alpha:
# break
# elif beta == alpha:
# random_num = random.randint(1, 101)
# if random_num > 50:
# #if bool(random.getrandbits(1)): # random T or F
# break
# return value, ()
def minimax_alpha_beta(board, color, root, depth, alpha, beta, max_player, moves_list, board_list):
"""
Minimax with alpha-beta pruning
input: board (Board), depth (int), max_player (Bool)
output: tuple of best move sequence e.g. ('E', (1,2), (2,3))
"""
minimax_alpha_beta.count += 1
# If you reached a terminal node or game is over
if depth == 0 or board.game_over():
return board.evaluate(color), ()
if max_player: # maximizing player
value = float('-inf')
for move, max_child in board.possible_moves().items():
max_child_copy = copy.deepcopy(max_child)
max_child_copy.active_player = not max_child_copy.active_player
minimax_value = minimax_alpha_beta(max_child_copy, color, root, depth - 1, alpha, beta, False, moves_list, board_list)[0]
if minimax_value >= value:
value = minimax_value
if depth == root:
moves_list.append(move)
board_list.append(max_child)
# # TESTING
# if move[0] == "J": # JUMP heuristic
# value += 10*(len(move)-2)
alpha = max(alpha, value)
if alpha >= beta:
break
# pick best move
if depth == root:
if len(moves_list) == 0:
return value, None, None
else:
random_num = random.randint(0,len(moves_list)-1)
#random_num = 0
best_move = moves_list[random_num]
best_board = board_list[random_num]
return value, best_move, best_board
else:
return value, ()
else: # minimizing player
value = float('inf')
#board_copy = copy.deepcopy(board)
#board.active_player = not board.active_player # switch the player
for move, min_child in board.possible_moves().items():
min_child_copy = copy.deepcopy(min_child)
min_child_copy.active_player = not min_child_copy.active_player
value = min(value, minimax_alpha_beta(min_child_copy, color, root, depth - 1, alpha, beta, True, moves_list, board_list)[0])
beta = min(beta, value)
# # TESTING
# if move[0] == "J": # JUMP heuristic
# value -= 5*(len(move)-2)
if beta <= alpha:
break
return value, ()
def minimax_alpha_beta_final(static_board, board, color, root, depth, alpha, beta, max_player, best_move, best_board):
"""
Minimax with alpha-beta pruning
input: board (Board), depth (int), max_player (Bool)
output: tuple of best move sequence e.g. ('E', (1,2), (2,3))
"""
minimax_alpha_beta_final.count += 1
# If you reached a terminal node or game is over
if depth == 0 or board.game_over():
return board.evaluate(color, static_board), None, None
# maximizing player
if max_player:
value = float('-inf')
moves_dict = board.possible_moves()
for move, max_child in moves_dict.items():
max_child_copy = copy.deepcopy(max_child)
max_child_copy.active_player = not max_child_copy.active_player
minimax_value = minimax_alpha_beta_final(static_board, max_child_copy, color, root, depth - 1, alpha, beta, False, best_move, best_board)[0]
if minimax_value > value:
value = minimax_value
best_move = move
best_board = max_child
if depth == root and minimax_value == value:
if bool(random.getrandbits(1)): # random T/F
value = minimax_value
best_move = move
best_board = max_child
alpha = max(alpha, value)
if alpha >= beta:
break
#return float('inf'), None, None
return value, best_move, best_board
# minimizing player
else:
value = float('inf')
moves_dict = board.possible_moves()
for move, min_child in moves_dict.items():
min_child_copy = copy.deepcopy(min_child)
min_child_copy.active_player = not min_child_copy.active_player
value = min(value, minimax_alpha_beta_final(static_board, min_child_copy, color, root, depth - 1, alpha, beta, True, best_move, best_board)[0])
beta = min(beta, value)
if beta <= alpha:
break
#return float('-inf'), None, None
return value, None, None
def minimax_alpha_beta_rand(static_board, board, color, root, depth, alpha, beta, max_player, best_move, best_board):
"""
Minimax with alpha-beta pruning
input: board (Board), depth (int), max_player (Bool)
output: tuple of best move sequence e.g. ('E', (1,2), (2,3))
"""
minimax_alpha_beta_rand.count += 1
# If you reached a terminal node or game is over
if depth == 0 or board.game_over():
return board.evaluate(color, static_board), None, None
# maximizing player
if max_player:
value = float('-inf')
for move, max_child in board.possible_moves().items():
max_child_copy = copy.deepcopy(max_child)
max_child_copy.active_player = not max_child_copy.active_player
minimax_value = minimax_alpha_beta_rand(static_board, max_child_copy, color, root, depth - 1, alpha, beta, False, best_move, best_board)[0]
if minimax_value > value:
value = minimax_value
best_move = move
best_board = max_child
if depth == root and minimax_value == value:
if bool(random.getrandbits(1)): # random T/F
value = minimax_value
best_move = move
best_board = max_child
alpha = max(alpha, value)
if alpha >= beta:
break
return value, best_move, best_board
# minimizing player
else:
value = float('inf')
#board_copy = copy.deepcopy(board)
#board.active_player = not board.active_player # switch the player
for move, min_child in board.possible_moves().items():
min_child_copy = copy.deepcopy(min_child)
min_child_copy.active_player = not min_child_copy.active_player
value = min(value, minimax_alpha_beta_rand(static_board, min_child_copy, color, root, depth - 1, alpha, beta, True, best_move, best_board)[0])
beta = min(beta, value)
if beta <= alpha:
break
return value, None, None
# ================ UNFINISHED AND UNUSED CODE ====================== #
# def iterative_deepening(board, max_player, time_limit):
# """
# Minimax with alpha-beta pruning and iterative deepening
# input: board (Board), depth (int), max_player (Bool)
# output: tuple of best move sequence e.g. ('E', (1,2), (2,3))
# """
# minimax_alpha_beta.count = 0
# start_time = time.time()
# depth = 0
# condition = True
# result_matrix = []
# hash_table = {}
# while condition == True:
# try:
# result = minimax_alpha_beta(board, depth, float("-inf"), float("inf"), True, start_time, time_limit, hash_table)
# except:
# break
# depth += 1
# result_matrix[depth] = result
# return result_matrix[len(result_matrix)], minimax_alpha_beta.count, depth # return last available result
# # define minimax_alpha_beta here
# def minimax_alpha_beta(board, depth, alpha, beta, max_player, start_time, time_limit, hash_table):
# ### throw timeout exception ###
# if (time_limit - (current_time - start_time)) < 0.01*time_limit or (current_time - start_time) > time_limit:
# raise Exception("timeout!")
# ### retrieve stored bounds from transposition table ###
# if hash(board) in hash_table:
# minimax_alpha_beta.count += 1 # keep count of all recursive function calls (i.e. # of nodes)
# # If you reached a terminal node or game is over
# if depth == 0 or board.game_over():
# return board.evaluate(max_player), ()
# if max_player: # maximizing player
# value = float('-inf')
# for move, child_board in board.possible_moves().items():
# child_board = copy.deepcopy(child_board)
# minimax_value = minimax_alpha_beta(child_board, depth - 1, alpha, beta, False, start_time, time_limit, hash_table)[0]
# if minimax_value > value:
# value = minimax_value
# best_move = move
# alpha = max(alpha, value)
# if alpha >= beta:
# break
# return value, best_move
# else: # minimizing player
# value = float('inf')
# board.active_player = not board.active_player # switch the player
# for move, child_board in board.possible_moves().items():
# child_board = copy.deepcopy(child_board)
# child_board.active_player = not child_board.active_player
# value = min(value, minimax_alpha_beta(child_board, depth - 1, alpha, beta, True, start_time, time_limit, hash_table)[0])
# beta = min(beta, value)
# if beta <= alpha:
# break
# return value, ()
# ### Store bounds in transposition table ###
# # fail low result -> upper bound
# if value <= alpha:
# if hash(board) in hash_table:
# updated_entry = {"upper" = value}
# hash_table[hash(board)].update(updated_entry) # update upper bound
# else: # create new entry
# hash_table[hash(board)] = {"upper" = value, "lower" = float("-inf")}
# # exact minimax value
# if value > alpha and value < beta:
# hash_table[hash(board)] = {"upper" = value, "lower" = value} # overwrite
# # fail high result -> lower bound
# if value >= beta:
# if hash(board) in hash_table:
# updated_entry = {"lower" = value}
# hash_table[hash(board)].update(updated_entry) # update upper bound
# else: # create new entry
# hash_table[hash(board)] = {"upper" = float("inf"), "lower" = value}
```
#### File: josephko91/checkers-ai/board.py
```python
import copy
import itertools
from collections import deque
from utility import create_jumps_sequences, create_jumps_dict
from statistics import mean
from math import sqrt, floor, ceil
import random
class Board:
def __init__(self, board_list, is_black):
self.positions_black = {} # {(row, col):pawn_or_king}
self.positions_white = {}
self.positions_empty = set()
self.num_pieces_black = 0
self.num_pieces_white = 0
self.num_kings_black = 0
self.num_kings_white = 0
self.nrows = len(board_list)
self.ncols = len(board_list[0])
self.active_player = is_black
# iterate through board and fill out attributes
for i in range(self.nrows):
for j in range((i+1)%2, self.ncols + (i+1)%2, 2):
if board_list[i][j] == ".": # empty square
self.positions_empty.add((i, j))
elif board_list[i][j] == "b": # black pawn
self.positions_black[(i, j)] = "pawn"
self.num_pieces_black += 1
elif board_list[i][j] == "B": # black king
self.positions_black[(i, j)] = "king"
self.num_kings_black += 1
self.num_pieces_black += 1
elif board_list[i][j] == "w": # white pawn
self.positions_white[(i, j)] = "pawn"
self.num_pieces_white += 1
elif board_list[i][j] == "W": # white king
self.positions_white[(i, j)] = "king"
self.num_kings_white += 1
self.num_pieces_white += 1
# ============================== UNUSED CODE ============================== #
# def __eq__(self, other):
# if len(self.positions_black) != len(other.positions_black) or len(self.positions_white) != len(other.positions_white):
# return False
# else:
# return self.positions_black == other.positions_black and self.positions_white == other.positions_white
# def __hash__(self):
# hash_value = ""
# black_positions = self.positions_black.keys()
# sorted_black_positions = sorted(black_positions, key = lambda k: (k[0], k[1]))
# white_positions = self.positions_white.keys()
# sorted_white_positions = sorted(white_positions, key = lambda k: (k[0], k[1]))
# for key in sorted_black_positions:
# coord = str(key[0]) + str(key[1])
# if self.positions_black[key] == "pawn":
# letter = str(ord("b"))
# else:
# letter = str(ord("B"))
# hash_value = hash_value + coord + letter
# for key in sorted_white_positions:
# coord = str(key[0]) + str(key[1])
# if self.positions_white[key] == "pawn":
# letter = str(ord("w"))
# else:
# letter = str(ord("W"))
# hash_value = hash_value + coord + letter
# if self.active_player == True:
# color_switch = 1
# else:
# color_switch = 0
# hash_value = hash_value + str(color_switch)
# return int(hash_value)
# ==================================================================================== #
def change_to_king(self, new_position):
"""
Change pawn to a king
"""
if self.active_player: # active player is BLACK
self.positions_black[new_position] = "king"
self.num_kings_black += 1
else: # active player is WHITE
self.positions_white[new_position] = "king"
self.num_kings_white += 1
def simple_move(self, old_position, new_position):
"""
Move a piece from one coordinate to another
"""
self.positions_empty.add(old_position) # add an empty space where old position was
self.positions_empty.remove(new_position) # remove new position from empty
if self.active_player: # active player is BLACK
piece_type = self.positions_black[old_position]
del self.positions_black[old_position] # remove old entry from dictionary
self.positions_black[new_position] = piece_type # add new position to dictionary
# if land on last row, change to kind
if piece_type == "pawn" and new_position[0] == 7:
self.change_to_king(new_position)
else: # active player is WHITE
piece_type = self.positions_white[old_position]
del self.positions_white[old_position] # remove old entry from dictionary
self.positions_white[new_position] = piece_type # add new position to dictionary
# if land on last row, change to kind
if piece_type == "pawn" and new_position[0] == 0:
self.change_to_king(new_position)
def jump_move(self, old_position, new_position):
"""
Jump active piece and remove the jumped piece
"""
jumped_position = (int((old_position[0]+new_position[0])/2), int((old_position[1]+new_position[1])/2))
self.positions_empty.add(old_position) # add an empty space where old position was
self.positions_empty.add(jumped_position) # add an empty space where jumped piece was
self.positions_empty.remove(new_position) # remove new position from empty
if self.active_player: # active player is BLACK
# exit if there is a failed jump
if jumped_position not in self.positions_white:
return "fail"
piece_type = self.positions_black[old_position]
del self.positions_black[old_position] # remove old entry from dictionary
self.positions_black[new_position] = piece_type # add new position to dictionary
if self.positions_white[jumped_position] == "king":
self.num_pieces_white -= 1
self.num_kings_white -= 1
else:
self.num_pieces_white -= 1
del self.positions_white[jumped_position] # remove jumped opp piece
# if land on last row, change to kind
if piece_type == "pawn" and new_position[0] == 7:
self.change_to_king(new_position)
else: # active player is WHITE
# exit if there is a failed jump
if jumped_position not in self.positions_black:
return "fail"
piece_type = self.positions_white[old_position]
del self.positions_white[old_position] # remove old entry from dictionary
self.positions_white[new_position] = piece_type # add new position to dictionary
if self.positions_black[jumped_position] == "king":
self.num_pieces_black -= 1
self.num_kings_black -= 1
else:
self.num_pieces_black -= 1
del self.positions_black[jumped_position] # remove jumped opp piece
# if land on last row, change to kind
if piece_type == "pawn" and new_position[0] == 0:
self.change_to_king(new_position)
def evaluate(self, color, static_board):
"""
Evaluation heuristic function of the board
- most recent moves were made by opposite of self.active_player
"""
if color: # active player is BLACK
# value = 0 # initialize
# # if player has won
# if self.num_pieces_white == 0 and self.num_pieces_black > 0:
# return 1000000000
# elif self.num_pieces_black == 0 and self.num_pieces_white > 0:
# return -1000000000
# else: # if player has not won yet
# num_in_king_row = 0
# num_in_middle_center = 0
# num_in_middle_sides = 0
# # iterate through pieces on board
# n = 0
# sum_x = 0
# sum_y = 0
# for position in self.positions_black:
# # calculations for mean
# n += 1
# sum_x += position[0]
# sum_y += position[1]
# # assign value to pieces
# if self.positions_black[position] == "pawn":
# value += 6000
# else:
# value += 9000
# # count number of pieces in king row (i.e. home row)
# if position[0] == 0:
# num_in_king_row += 1
# # count board positioning
# if position[0] > 2 and position[0] < 5: # middle two rows
# if position[1] > 1 and position[1] < 6:
# num_in_middle_center += 1
# else:
# num_in_middle_sides += 1
# # calculate mean
# mean_x = sum_x/n
# mean_y = sum_y/n
# # calculate RMS
# running_sum_x = 0
# running_sum_y = 0
# for position in self.positions_black:
# running_sum_x += (position[0] - mean_x)**2
# running_sum_y += (position[1] - mean_y)**2
# rms = ((running_sum_x + running_sum_y)/n)**(1/2)
# if rms == 0:
# rms_value = 1
# else:
# rms_value = ceil(1/rms)
# # early to mid game
# if static_board.num_pieces_black > 10 and static_board.num_pieces_white > 10:
# value += (2*num_in_middle_center + num_in_middle_sides + num_in_king_row)*100 + (1/rms_value)*20
# else: # later game
# value += 10*((static_board.num_pieces_white - self.num_pieces_white) - (static_board.num_pieces_black - self.num_pieces_black)) + (1/rms_value)*10
# # add random value for last digit of value
# #value += random.randint(0, 9)
# return value
if self.num_pieces_white == 0 and self.num_pieces_black > 0:
return 1000000000
elif self.num_pieces_black == 0 and self.num_pieces_white > 0:
return -1000000000
elif self.num_pieces_black > 8 and self.num_pieces_white > 8:
num_in_king_row = 0
num_in_center = 0
for position in self.positions_black:
# heuristic for king row pawns
if self.positions_black[position] == "pawn":
if position[0] == 0:
num_in_king_row += 1
# heuristic for being in center of board
if position[1] > 1 and position[1] < 6:
num_in_center += 1
if static_board.num_pieces_black > 10:
return 10*(self.num_pieces_black - self.num_kings_black) + 15*self.num_kings_black + 2*num_in_king_row
else:
return 10*(self.num_pieces_black - self.num_kings_black) + 15*self.num_kings_black + num_in_king_row
else:
if static_board.num_pieces_black >= static_board.num_pieces_white:
num_lost_black = static_board.num_pieces_black - self.num_pieces_black
num_lost_white = static_board.num_pieces_white - self.num_pieces_white
if num_lost_black <= num_lost_white:
trades = min(num_lost_white, num_lost_black) + 1
return 10*(self.num_pieces_black - self.num_kings_black) + 15*self.num_kings_black + 10*trades
else:
return 10*(self.num_pieces_black - self.num_kings_black) + 15*self.num_kings_black
else:
return 10*(self.num_pieces_black - self.num_kings_black) + 15*self.num_kings_black
#return 10*self.num_pieces_black
#return 20*(self.num_pieces_black-self.num_kings_black) + 30*self.num_kings_black + 10*(self.num_pieces_black-self.num_pieces_white)
#return 100*(self.num_pieces_black - self.num_kings_black) + 150*(self.num_kings_black) + 10*(self.num_pieces_black - self.num_pieces_white)
# if self.num_pieces_black < 10:
# #========================= hard-coded assignment based on location ======================#
# piece_value = {(0,1):4, (0,3):4, (0,5):4, (0,7):4,
# (1,0):4, (1,2):3, (1,4):3, (1,6):3,
# (2,1):3, (2,3):2, (2,5):2, (2,7):4,
# (3,0):4, (3,2):2, (3,4):1, (3,6):3,
# (4,1):3, (4,3):1, (4,5):2, (4,7):4,
# (5,0):4, (5,2):2, (5,4):1, (5,6):3,
# (6,1):3, (6,3):2, (6,5):2, (6,7):4,
# (7,0):4, (7,2):4, (7,4):4, (7,6):4}
# h_value = 0
# for key in self.positions_black:
# h_value += piece_value[key]
# return h_value + self.num_kings_black + 5*(self.num_pieces_black-self.num_kings_white)
# else:
# return 4*(self.num_pieces_black - self.num_kings_black) + 8*self.num_kings_black
#=======================================================================================#
#if game over for any player
# if self.num_pieces_white == 0 or self.num_pieces_black == 0:
# heuristic = (self.num_pieces_black - self.num_kings_black) + 3*(self.num_kings_black) + 20*self.num_pieces_black
# return heuristic
# # calculate heuristics
# num_in_king_row = 0
# num_in_center = 0
# x_white = []
# y_white = []
# x_black = []
# y_black = []
# for position in self.positions_black:
# # heuristic for king row pawns
# if self.positions_black[position] == "pawn":
# if position[0] == 7:
# num_in_king_row += 1
# # heuristic for being in center of board
# if position[1] > 1 and position[1] < 6:
# num_in_center += 1
# # store x and y coords in list
# x_black.append(position[0])
# y_black.append(position[1])
# for position in self.positions_white:
# # store x and y coords in list
# x_white.append(position[0])
# y_white.append(position[1])
# # calculate centroids
# centroid_white = (mean(x_white), mean(y_white))
# centroid_black = (mean(x_black), mean(y_black))
# # calculate distance between centroids
# dist_bw_centroids = sqrt(pow(centroid_white[0]-centroid_black[0], 2) + pow(centroid_white[1]-centroid_black[1], 2))
# if self.num_pieces_black > 10: # early in the game
# heuristic = 10*(self.num_pieces_black - self.num_kings_black) + 20*num_in_king_row + 5*num_in_center
# elif self.num_pieces_white < 3:
# idling_kings = 0
# for position in self.positions_black:
# # heuristic to deter kings away from idling at kings row
# if self.positions_black[position] == "king":
# if position[0] == 0:
# idling_kings += 2
# if position[0] == 1 or position[0] == 2:
# idling_kings += 1
# heuristic = 10*(self.num_pieces_black) - 5*idling_kings - floor(5*dist_bw_centroids)
# else:
# heuristic = 10*(self.num_pieces_black - self.num_kings_black) + 30*(self.num_kings_black) + 10*(self.num_pieces_black - self.num_pieces_white)
# return heuristic
else: # active player is WHITE
#return 10*self.num_pieces_white
if self.num_pieces_black == 0 and self.num_pieces_white > 0:
return 1000000000
elif self.num_pieces_white == 0 and self.num_pieces_black > 0:
return -1000000000
elif self.num_pieces_black > 8 and self.num_pieces_white > 8:
num_in_king_row = 0
num_in_center = 0
for position in self.positions_white:
# heuristic for king row pawns
if self.positions_white[position] == "pawn":
if position[0] == 0:
num_in_king_row += 1
# heuristic for being in center of board
if position[1] > 1 and position[1] < 6:
num_in_center += 1
if static_board.num_pieces_white > 10:
return 10*(self.num_pieces_white - self.num_kings_white) + 15*self.num_kings_white + 2*num_in_king_row
else:
return 10*(self.num_pieces_white - self.num_kings_white) + 15*self.num_kings_white + num_in_king_row
else:
if static_board.num_pieces_white >= static_board.num_pieces_black:
num_lost_black = static_board.num_pieces_black - self.num_pieces_black
num_lost_white = static_board.num_pieces_white - self.num_pieces_white
if num_lost_white <= num_lost_black:
trades = min(num_lost_white, num_lost_black) + 1
return 10*(self.num_pieces_white - self.num_kings_white) + 15*self.num_kings_white + 10*trades
else:
return 10*(self.num_pieces_white - self.num_kings_white) + 15*self.num_kings_white
else:
return 10*(self.num_pieces_white - self.num_kings_white) + 15*self.num_kings_white
# #if game over for any player
# if self.num_pieces_white == 0 or self.num_pieces_black == 0:
# heuristic = (self.num_pieces_white - self.num_kings_white) + 3*(self.num_kings_white) + 20*self.num_pieces_white
# return heuristic
# # calculate heuristics
# num_in_king_row = 0
# num_in_center = 0
# x_white = []
# y_white = []
# x_black = []
# y_black = []
# for position in self.positions_white:
# # heuristic for king row pawns
# if self.positions_white[position] == "pawn":
# if position[0] == 7:
# num_in_king_row += 1
# # heuristic for being in center of board
# if position[1] > 1 and position[1] < 6:
# num_in_center += 1
# # store x and y coords in list
# x_white.append(position[0])
# y_white.append(position[1])
# for position in self.positions_black:
# # store x and y coords in list
# x_black.append(position[0])
# y_black.append(position[1])
# # calculate centroids
# centroid_white = (mean(x_white), mean(y_white))
# centroid_black = (mean(x_black), mean(y_black))
# # calculate distance between centroids
# dist_bw_centroids = sqrt(pow(centroid_white[0]-centroid_black[0], 2) + pow(centroid_white[1]-centroid_black[1], 2))
# if self.num_pieces_white > 10: # early in the game
# heuristic = 10*(self.num_pieces_white - self.num_kings_white) + 20*num_in_king_row + 5*num_in_center
# elif self.num_pieces_black < 3:
# idling_kings = 0
# for position in self.positions_white:
# # heuristic to deter kings away from idling at kings row
# if self.positions_white[position] == "king":
# if position[0] == 0:
# idling_kings += 2
# if position[0] == 1 or position[0] == 2:
# idling_kings += 1
# heuristic = 10*(self.num_pieces_white) - 5*idling_kings - floor(5*dist_bw_centroids)
# else:
# heuristic = 10*(self.num_pieces_white - self.num_kings_white) + 30*(self.num_kings_white) + 10*(self.num_pieces_white - self.num_pieces_black)
# return heuristic
def game_over(self):
"""
Check if board is in a game over state
"""
# if self.num_pieces_black < 3 or self.num_pieces_white < 3:
# if self.num_pieces_black == 0 or self.num_pieces_white == 0 or len(self.possible_moves()) == 0:
# return True
# else:
# return False
# else:
# if self.num_pieces_black == 0 or self.num_pieces_white == 0: #or len(self.possible_moves()) == 0:
# return True
# else:
# return False
if self.num_pieces_black == 0 or self.num_pieces_white == 0: #or len(self.possible_moves()) == 0:
return True
else:
return False
def empty_here(self, coord):
"""
Returns true if the coordinate is empty (and on the board)
"""
if coord in self.positions_empty:
return True
else:
return False
def opp_here(self, coord):
"""
Returns true if there is an opponent's piece in the coordinate
"""
if self.active_player: # active player is BLACK
if coord in self.positions_white:
return True
else:
return False
else: # active player is WHITE
if coord in self.positions_black:
return True
else:
return False
def possible_moves(self):
"""
Finds all possible next moves (i.e., all possible next boards)
input: self (Board)
output: moves (Dictionary) e.g. {(J, (coord_old), (coord_jump_1), (coord_jump_2)) : new_board}
"""
moves = {}
jumps_available = False
if self.active_player: # active player is BLACK
for piece in self.positions_black: # loop through all available pieces
piece_type = self.positions_black[piece]
# check if jump available (MUST jump if available)
jumps_queue = self.jumps_available(piece, piece_type, deque())
if jumps_queue != None:
jumps_dict = create_jumps_dict(jumps_queue)
jumps_sequences = create_jumps_sequences(jumps_dict, piece)
for sequence in jumps_sequences:
jumps_available = True
board_copy = copy.deepcopy(self)
origin = piece
for i in range(1, len(sequence)):
landing_spot = sequence[i]
board_copy = copy.deepcopy(board_copy)
if board_copy.jump_move(origin, landing_spot) == "fail":
break
origin = landing_spot
else:
sequence.insert(0, "J")
sequence = tuple(sequence)
moves[sequence] = board_copy
if jumps_available == False: # if there are no jumps available to any piece
for piece in self.positions_black:
piece_type = self.positions_black[piece]
# check if simple move available
for move in self.simple_moves_available(piece, piece_type):
board_copy = copy.deepcopy(self)
board_copy.simple_move(piece, move)
move_sequence = ('E', piece, move)
moves[move_sequence] = board_copy
else: # active player is WHITE
for piece in self.positions_white: # loop through all available pieces
piece_type = self.positions_white[piece]
# check if jump available (MUST jump if available)
jumps_queue = self.jumps_available(piece, piece_type, deque())
if jumps_queue != None:
jumps_dict = create_jumps_dict(jumps_queue)
jumps_sequences = create_jumps_sequences(jumps_dict, piece)
for sequence in jumps_sequences:
jumps_available = True
board_copy = copy.deepcopy(self)
origin = piece
for i in range(1, len(sequence)):
landing_spot = sequence[i]
board_copy = copy.deepcopy(board_copy)
if board_copy.jump_move(origin, landing_spot) == "fail":
break
origin = landing_spot
else:
sequence.insert(0, "J")
sequence = tuple(sequence)
moves[sequence] = board_copy
if jumps_available == False: # if there are no jumps available to any piece
for piece in self.positions_white:
piece_type = self.positions_white[piece]
# check if simple move available
for move in self.simple_moves_available(piece, piece_type):
board_copy = copy.deepcopy(self)
board_copy.simple_move(piece, move)
move_sequence = ('E', piece, move)
moves[move_sequence] = board_copy
return moves
def simple_moves_available(self, piece, piece_type):
"""
Checks which simple moves can be made and returns a list
"""
moves_available = []
if piece_type == "king": # check all four diagonals
for i in range(-1, 2, 2):
for j in range(-1, 2, 2):
if self.empty_here((piece[0]+i, piece[1]+j)):
moves_available.append((piece[0]+i, piece[1]+j))
else: # piece is a pawn
if self.active_player: # active player is BLACK
for j in range(-1, 2, 2): # only moves DOWN (i.e., index goes UP)
if self.empty_here((piece[0]+1, piece[1]+j)):
moves_available.append((piece[0]+1, piece[1]+j))
else: # active player is WHITE
for j in range(-1, 2, 2): # only moves UP (i.e., index goes DOWN)
if self.empty_here((piece[0]-1, piece[1]+j)):
moves_available.append((piece[0]-1, piece[1]+j))
return moves_available
def jumps_available(self, piece, piece_type, jumps):
"""
Checks to see if there is a jump available from this piece's coordinate
and returns available jumps in list of sequences
"""
no_jumps = True
if piece_type == "king": # check all four diagonals
for i in range(-1, 2, 2):
for j in range(-1, 2, 2):
if self.opp_here((piece[0]+i, piece[1]+j)) and self.empty_here((piece[0]+2*i, piece[1]+2*j)):
jumps.append(piece)
no_jumps = False
landing_coord = (piece[0]+2*i, piece[1]+2*j)
jumps.append(landing_coord)
board_copy = copy.deepcopy(self)
board_copy.jump_move(piece, landing_coord)
board_copy.jumps_available(landing_coord, piece_type, jumps) # recursive call here
if no_jumps:
return
else: # it's a pawn piece
if self.active_player: # active player is BLACK
for j in range(-1, 2, 2): # only moves DOWN (i.e., index goes UP)
if self.opp_here((piece[0]+1, piece[1]+j)) and self.empty_here((piece[0]+2, piece[1]+2*j)):
jumps.append(piece)
no_jumps = False
landing_coord = (piece[0]+2, piece[1]+2*j)
jumps.append(landing_coord)
board_copy = copy.deepcopy(self)
board_copy.jump_move(piece, landing_coord)
board_copy.jumps_available(landing_coord, piece_type, jumps) # recursive call here
if no_jumps:
return
else: # active player is WHITE
for j in range(-1, 2, 2): # only moves UP (i.e., index goes DOWN)
if self.opp_here((piece[0]-1, piece[1]+j)) and self.empty_here((piece[0]-2, piece[1]+2*j)): # jump available
jumps.append(piece)
no_jumps = False
landing_coord = (piece[0]-2, piece[1]+2*j)
jumps.append(landing_coord)
board_copy = copy.deepcopy(self)
board_copy.jump_move(piece, landing_coord)
board_copy.jumps_available(landing_coord, piece_type, jumps) # recursive call here
if no_jumps:
return
return jumps
# jumps_dict = create_jumps_dict(jumps)
# jumps_sequences = create_jumps_sequences(jumps_dict, piece)
# return jumps_sequences
```
#### File: josephko91/checkers-ai/gamemaster.py
```python
import os
from utility import write_to_output, print_board, color_is_black, board_to_list, print_results
from board import Board
import time
from algorithm import minimax, minimax_alpha_beta, minimax_alpha_beta_final, minimax_alpha_beta_rand
from math import sqrt, floor
start = time.time()
# parse input file
with open("input.txt", "r") as input_file:
game_mode = input_file.readline().rstrip() # 1st line: game mode
color = input_file.readline().rstrip() # 2nd line: player color
time_left = float(input_file.readline().rstrip()) # 3rd line: remaining time
board_list = []
for i in range(8): # next 8 lines: 2-d list representing the board
board_list.append(list(input_file.readline().rstrip()))
# create initial board object
is_black = color_is_black(color)
start = time.time()
board = Board(board_list, is_black)
end = time.time()
print("time to make board object =", end - start)
# write mean runtimes to calibrate.txt
with open('test.txt', 'w') as output:
# print description of game
print("d_b = 4; d_w = 4; simple heuristic for both b/w", file = output)
#print("v3 changes: changed king weight from 30 to 20, added delta weight to small opp piece case", file = output)
# play 100 games and store in game_results_1.txt
black_wins = 0
white_wins = 0
timeouts = 0
for i in range(10):
start = time.time()
# parse input file
with open("input.txt", "r") as input_file:
game_mode = input_file.readline().rstrip() # 1st line: game mode
color = input_file.readline().rstrip() # 2nd line: player color
time_left = float(input_file.readline().rstrip()) # 3rd line: remaining time
board_list = []
for i in range(8): # next 8 lines: 2-d list representing the board
board_list.append(list(input_file.readline().rstrip()))
# create initial board object
is_black = color_is_black(color)
start = time.time()
board = Board(board_list, is_black)
end = time.time()
print("time to make board object =", end - start)
max_iterations = 100
iteration_count = 1
total_time_black = 0
total_time_white = 0
# loop until someone wins or maximum iterations exceeded
while True:
start = time.time()
minimax_alpha_beta_rand.count = 0
minimax_alpha_beta_final.count = 0
move_count = floor(iteration_count/2)
if board.active_player: # black's turn
# if iteration_count > 50:
# if move_count % 2 == 0:
# value, result, new_board = minimax_alpha_beta(board, board.active_player, 1, float("-inf"), float("inf"), True, (), board)
# # elif move_count % 9 == 0:
# # value, result, new_board = minimax_alpha_beta(board, 8, float("-inf"), float("inf"), True)
# else:
# value, result, new_board = minimax_alpha_beta(board, board.active_player, 6, float("-inf"), float("inf"), True, (), board)
if move_count%2 == 0:
value, result, new_board = minimax_alpha_beta_final(board, board, board.active_player, 4, 4, float("-inf"), float("inf"), True, (), board)
else:
value, result, new_board = minimax_alpha_beta_final(board, board, board.active_player, 4, 4, float("-inf"), float("inf"), True, (), board)
# if move_count < 5:
# value, result, new_board = minimax_alpha_beta(board, board.active_player, 4, 4, float("-inf"), float("inf"), True, (), board)
# elif board.num_pieces_black < 4:
# if move_count%2 == 0:
# value, result, new_board = minimax_alpha_beta(board, board.active_player, 4, 4, float("-inf"), float("inf"), True, (), board)
# else:
# value, result, new_board = minimax_alpha_beta(board, board.active_player, 4, 4, float("-inf"), float("inf"), True, (), board)
# else:
# if move_count%2 == 0:
# value, result, new_board = minimax_alpha_beta(board, board.active_player, 4, 4, float("-inf"), float("inf"), True, (), board)
# else:
# value, result, new_board = minimax_alpha_beta(board, board.active_player, 4, 4, float("-inf"), float("inf"), True, (), board)
else: # white's turn
# value, result, new_board = minimax_alpha_beta(board, board.active_player, 4, 4, float("-inf"), float("inf"), True, (), board)
if move_count%2 == 0:
value, result, new_board = minimax_alpha_beta_rand(board, board, board.active_player, 2, 2, float("-inf"), float("inf"), True, (), board)
else:
value, result, new_board = minimax_alpha_beta_rand(board, board, board.active_player, 2, 2, float("-inf"), float("inf"), True, (), board)
end = time.time()
runtime = end - start
# if we run into a blocked board with lots of pieces left (i.e. it wasn't caught in game_over method):
if result == None:
print("total time black =", total_time_black)
print("total time white =", total_time_white)
if board.num_pieces_black == 0:
white_wins += 1
elif board.num_pieces_white == 0:
black_wins += 1
else:
timeouts += 1
break
# set up new board
board = new_board
# create new board_list (for printing later)
board_list = board_to_list(board)
# print result to game_output.txt
print_results(board, result, board_list, iteration_count, runtime)
# accumulate total runtime
if board.active_player: # black's total time
total_time_black += runtime
else: # white's total time
total_time_white += runtime
# switch player
board.active_player = not board.active_player
# break loop if someone won or exceeded max iterations
if board.game_over() or iteration_count >= max_iterations:
print("total time black =", total_time_black)
print("total time white =", total_time_white)
if board.num_pieces_black == 0:
white_wins += 1
elif board.num_pieces_white == 0:
black_wins += 1
else:
timeouts += 1
break
iteration_count += 1
# print final results to file
print("black wins =", black_wins, file = output)
print("white wins =", white_wins, file = output)
print("timeouts =", timeouts, file = output)
# def print_results(board, result, board_list, iteration_count, runtime):
# if board.active_player == True:
# player = "black"
# else:
# player = "white"
# print("iteration:", iteration_count)
# print("runtime:", runtime)
# print("player:", player)
# print("move:", result)
# for row in board_list:
# print(row)
``` |
{
"source": "josephko91/first-order-logic-engine",
"score": 3
} |
#### File: josephko91/first-order-logic-engine/resolution.py
```python
from Literal import *
from Sentence import *
from KnowledgeBase import *
import copy
import time
def resolution(query, kb):
"""
The resolution algorithm.
input: query (string), kb (KnowledgeBase)
output: returns "TRUE" or "FALSE" and updated kb
"""
#new = set()
history = set()
# negate query and add to KB
query_literal = Literal(query) # convert to Literal
query_literal.negate() # negate
negated_query_sentence = Sentence(str(query_literal)) # convert to Sentence
kb.add_sentence(negated_query_sentence) # add to KB
# loop until all possible sentences have been inferred or contradiction is found
loop_count = 1
# #with open("test_output.txt", "w") as test_output:
# print("==================== original KB ====================")
# print("KB size:", kb.size)
# for sentence in kb.set:
# print(sentence)
sentence1 = negated_query_sentence # initial root of resolution
while True:
#new = set()
# with open("test_output.txt", "a") as test_output:
# print("==================== loop", loop_count, "====================", file=test_output)
start = time.time()
# start with resolvent from previous iteration (initialize with negated query)
possibly_resolvable_sentences = sentence1.get_possibly_resolvable(kb)
sentence2_tuple = possibly_resolvable_sentences[0]
for sentence2_tuple in possibly_resolvable_sentences:
# sentence2_tuple = (predicate, Sentence)
predicate = sentence2_tuple[0]
sentence2 = sentence2_tuple[1]
# skip to next iteration if sentences are the same
if sentence1 == sentence2:
continue
# skip to next iteration if sentences already been resolved
sentence_pair_id_1 = hash((sentence1, predicate, sentence2)) # unique id
sentence_pair_id_2 = hash((sentence2, predicate, sentence1))
if sentence_pair_id_1 in history:
continue
elif sentence_pair_id_2 in history:
continue
else:
history.add(sentence_pair_id_1) # add to history
history.add(sentence_pair_id_2)
# resolve two sentences
resolvents = sentence1.resolve(predicate, sentence2)
if len(resolvents) == 0:
continue
else:
break # if reached this point -> break loop with current sentence 2
# if new KB is a subset of old KB
if len(resolvents) == 0:
# print("final kb size =", kb.size)
return "FALSE"
# if there is a contradiction encountered
if resolvents == "CONTRADICTION":
# print("sentence1:", sentence1)
# print("sentence2:", sentence2)
# print("final kb size =", kb.size)
return "TRUE"
# if it was resolved, add to new
else:
# TESTING
# with open("test_output.txt", "a") as test_output:
# print(" -> sentence 1:", sentence1, file=test_output)
# print(" -> sentence 2:", sentence2, file=test_output)
# add new resolvent to KB
resolvent = resolvents[0] # set new resolvent
kb.add_sentence(resolvent) # update kb
end = time.time()
# with open("test_output.txt", "a") as test_output:
# print("!!! NEW SENTENCES !!!", file=test_output)
# print("( size of new =", len(new), ")", file=test_output)
# for sentence in new:
# print(" ", sentence, file=test_output)
# print("============================================", file=test_output)
# print("KB size, loop", loop_count, ":", kb.size, file=test_output)
# print("time in loop", loop_count, "=", end-start, "seconds", file=test_output)
# if loop_count == 5:
# test_output.close()
# return "False"
loop_count += 1
# update sentence1
sentence1 = resolvent
``` |
{
"source": "josephko91/hmm",
"score": 3
} |
#### File: josephko91/hmm/tagger.py
```python
import numpy as np
from util import accuracy
from hmm import HMM
def model_training(train_data, tags):
"""
Train HMM based on training data
Inputs:
- train_data: (1*num_sentence) a list of sentences, each sentence is an object of line class
- tags: (1*num_tags) a list of POS tags
Returns:
- model: an object of HMM class initialized with parameters(pi, A, B, obs_dict, state_dict) you calculated based on train_data
"""
model = None
L = len(train_data)
S = len(tags)
###################################################
###### Create obs_dict ######
obs_dict = {}
obs_indx = 0
for i in range(L):
for j in range(train_data[i].length):
if train_data[i].words[j] not in obs_dict.keys():
obs_dict[train_data[i].words[j]] = obs_indx
obs_indx += 1
###### Create state_dict ######
state_dict = {i:0 for i in tags}
for i in range(S):
state_dict[tags[i]] = i
###### Calculate pi ######
pi = np.zeros(S, dtype = 'float')
state_counts = {i:0 for i in tags} # dictionary storing counts for each tag (state)
for i in range(len(train_data)):
first_tag = train_data[i].tags[0] # first tag of each Line object
state_counts[first_tag] += 1 # increase count in dictionary
# add counts to pi array in same order as tags array
for i in range(len(tags)):
pi[i] = state_counts[tags[i]]
# normalize by total number of lines to get probabilities
pi = pi / len(train_data)
###### Calculate A ######
A = np.zeros([S, S], dtype = 'float')
# loop through each line
for i in range(len(train_data)):
# loop through each element (except the last one)
for j in range(train_data[i].length - 1):
from_indx = state_dict[train_data[i].tags[j]]
to_indx = state_dict[train_data[i].tags[j+1]]
A[from_indx, to_indx] += 1
# normalize each row of A by sum to get transition matrix
sum_rows = A.sum(axis = 1)
A = A / sum_rows[:, np.newaxis]
###### Calculate B ######
B = np.zeros([S, len(obs_dict)])
for i in range(len(train_data)):
# loop through each element
for j in range(train_data[i].length):
s_indx = state_dict[train_data[i].tags[j]]
o_indx = obs_dict[train_data[i].words[j]]
B[s_indx, o_indx] += 1
# normalize each row of B to get emissions matrix
sum_rows = B.sum(axis = 1)
B = B / sum_rows[:, np.newaxis]
# initialize HMM object
model = HMM(pi, A, B, obs_dict, state_dict)
###################################################
return model
def sentence_tagging(test_data, model, tags):
"""
Inputs:
- test_data: (1*num_sentence) a list of sentences, each sentence is an object of line class
- model: an object of HMM class
- tags: list of unique tags from pos_tags.txt (optional use)
Returns:
- tagging: (num_sentence*num_tagging) a 2D list of output tagging for each sentences on test_data
"""
tagging = []
S = len(model.pi)
###################################################
# loop through each line in data
for i in range(len(test_data)):
# loop through each element
for j in range(test_data[i].length):
# update obs_dict and emissions if there is an unseen observation
if test_data[i].words[j] not in model.obs_dict.keys():
o_indx = len(model.obs_dict)
model.obs_dict[test_data[i].words[j]] = o_indx
new_column = np.full((S, 1), 10**-6)
model.B = np.append(model.B, new_column, axis = 1)
# run viterbi on each line
Osequence = np.asarray(test_data[i].words)
state_path = model.viterbi(Osequence)
tagging.append(state_path)
###################################################
return tagging
``` |
{
"source": "josephkokchin/MY-Syok-Bot",
"score": 3
} |
#### File: MY-Syok-Bot/bot/petrol.py
```python
from requests import get
from parsel import Selector as sel
def PetrolPrice():
"""DaMaCaiDMCJackPot Methods!"""
# Connect to Source
url='https://hargapetrol.my/malaysia-petrol-prices-list.html'
data=get(url,headers={'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3','Accept-Encoding': 'gzip, deflate, br','Accept-Language': 'en-GB,en;q=0.9,en-US;q=0.8,zh-TW;q=0.7,zh;q=0.6','Cache-Control': 'max-age=0','Connection': 'keep-alive','DNT': '1','Upgrade-Insecure-Requests': '1','User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36'})
# Find the valid range
date_from=sel(text=data.text).xpath('.//div[@class="daterange active"][1]/text()').get()
date_till=sel(text=data.text).xpath('.//div[@class="daterange active"][2]/text()').get()
# Price
ron95=sel(text=data.text).xpath('//div[@class="ron95 active"][1]/text()').get()
ron97=sel(text=data.text).xpath('//div[@class="ron97 active"][1]/text()').get()
diesel=sel(text=data.text).xpath('//div[@class="diesel active"][1]/text()').get()
ron95_lw=sel(text=data.text).xpath('.//span[2]//span[@itemprop="priceComponent"]//div[@class="ron95"]/text()').get()
ron97_lw=sel(text=data.text).xpath('.//span[2]//span[@itemprop="priceComponent"]//div[@class="ron97"]/text()').get()
diesel_lw=sel(text=data.text).xpath('.//span[2]//span[@itemprop="priceComponent"]//div[@class="diesel"]/text()').get()
ron95_diff=round((float(ron95)-float(ron95_lw)))
ron95_diff='+'+'{:.2f}'.format(ron95_diff) if ron95_diff>=0 else'-'+str(ron95_diff)
ron97_diff=round((float(ron97)-float(ron97_lw)),3)
ron97_diff='+'+'{:.2f}'.format(ron97_diff) if ron97_diff>=0 else'-'+str(ron97_diff)
diesel_diff=round((float(diesel)-float(diesel_lw)),3)
diesel_diff='+'+'{:.2f}'.format(diesel_diff) if diesel_diff>=0 else'-'+str(diesel_diff)
prices = "RON95 - " + "RM"+str(ron95)+"({})".format(ron95_diff) + "\nRON97 - " + "RM"+str(ron97)+"({})".format(ron97_diff) + "\nDiesel - " + "RM"+str(diesel)+"({})".format(diesel_diff)
# Create Reply
chat_reply = "<b>Petrol prices from " + str(date_from) + " until " + str(date_till) + "</b> \n\n"
chat_reply += prices
return chat_reply
``` |
{
"source": "JosephKowalski2/Py-BasicStats",
"score": 3
} |
#### File: Py-BasicStats/statzcw/stats.py
```python
from typing import List
import math
def zcount(data: List[float]) -> float:
return float(len(data))
def zmean(data: List[float]) -> float:
return sum(data) / zcount(data)
def zmode(data: List[float]) -> float:
return max(set(data), key=data.count)
def zmedian(data: List[float]) -> float:
sorted_list = sorted(data)
index = (len(data) - 1) // 2
if len(data) % 2:
return sorted_list[index]
else:
return (sorted_list[index] + sorted_list[index + 1]) / 2.0
def zvariance(data: List[float]) -> float:
n = zcount(data) - 1
mean = sum(data) / n
deviations = []
for x in data:
deviations.append(abs(mean - x) ** 2)
return sum(deviations) / n
def zstddev(data: List[float]) -> float:
return math.sqrt(zvariance(data))
def zstderr(data: List[float]) -> float:
return zstddev(data) / zcount(data)
def zcorr(datax: List[float], datay: List[float]) -> float:
return cov(datax, datay) / (zstddev(datax) * zstddev(datay))
def cov(datax: List[float], datay: List[float]) -> float:
sum = 0
if zcount(datax) == zcount(datay):
for i in range(len(datax)):
sum += ((datax[i] - zmean(datax)) * (datay[i] - zmean(datay)))
return sum / (zcount(datax) - 1)
def readDataFile(file):
setA, setB = [], []
with open(file) as f:
next_line = f.readline()
for line in f:
row = line.split(',')
setA.append(float(row[0]))
setB.append(float(row[1]))
return setA, setB
def readDataSets(files):
data = {}
for file in files:
csvs = readDataFile(file)
data[file] = csvs
return data
``` |
{
"source": "JosephLai241/AAG",
"score": 4
} |
#### File: AAG/aag/AAG.py
```python
import random
from pyfiglet import Figlet
from utils.Cli import CheckArgs, Parser
from utils.Examples import Examples
from utils.Fonts import FONTS
from utils.List import List
from utils.Make import Make
from utils.Titles import Titles
class Main():
"""
Putting it all together.
"""
@staticmethod
def main():
args, parser = Parser().parse_args()
### List all fonts with their corresponding number.
if args.list:
Titles.title()
List.print_fonts()
### Generate examples for all fonts.
elif args.examples:
Titles.title()
Examples.generate_examples()
### Generate ASCII art based on the selected font and entered string.
elif args.make:
CheckArgs.check_make(args, parser)
for args in args.make:
Make.make(FONTS[int(args[0])], args[1])
### Generate ASCII art from a random font and entered string.
elif args.randomize:
Titles.random_title()
for text in args.randomize:
Make.make(FONTS[random.randint(1,426)], text)
if __name__ == "__main__":
Main.main()
```
#### File: aag/utils/Make.py
```python
from pyfiglet import Figlet
class Make():
"""
Method for making ASCII art.
"""
@staticmethod
def make(font, text):
banner = Figlet(font = font)
print("\n%s" % banner.renderText(text))
``` |
{
"source": "JosephLai241/Reddit-Scraper",
"score": 3
} |
#### File: tests/test_utils/test_Utilities.py
```python
import os
from rich.tree import Tree
from urs.utils.Utilities import DateTree
class TestDateTreeCheckDateFormatMethod():
"""
Testing DateTree class _check_date_format() method.
"""
def test_check_date_format_dash_format(self):
test_date = "06-28-2021"
test_search_date = DateTree._check_date_format(test_date)
assert test_search_date == test_date
def test_check_date_format_slash_format(self):
test_date = "06/28/2021"
test_search_date = DateTree._check_date_format(test_date)
assert test_search_date == "06-28-2021"
def test_check_date_wrong_format(self):
test_date = "06.28.2021"
try:
_ = DateTree._check_date_format(test_date)
assert False
except TypeError:
assert True
def test_check_date_short_date_wrong_format(self):
test_date = "06-28-21"
try:
_ = DateTree._check_date_format(test_date)
assert False
except TypeError:
assert True
class TestDateTreeFindDateDirectoryMethod():
"""
Testing DateTree class _find_date_directory() method.
"""
def test_find_date_directory_directory_exists(self):
os.mkdir("../scrapes/06-28-2021")
dir_exists = DateTree._find_date_directory("06-28-2021")
assert dir_exists == True
def test_find_date_directory_directory_does_not_exist(self):
os.rmdir("../scrapes/06-28-2021")
dir_exists = DateTree._find_date_directory("06-28-2021")
assert dir_exists == False
class TestDateTreeCreateDirectoryTreeMethod():
"""
Testing DateTree class _create_directory_tree() method.
"""
def test_create_directory_tree(self):
os.makedirs("../scrapes/06-28-2021/testing/nested/directories/tree")
test_tree = Tree("test")
try:
DateTree._create_directory_tree("../scrapes/06-28-2021", test_tree)
assert True
except Exception as e:
print(f"An exception was thrown when testing DateTree._create_directory_tree(): {e}")
assert False
class TestDateTreeDisplayTreeMethod():
"""
Testing DateTree class display_tree() method.
"""
def test_display_tree_method_valid_search_date(self):
try:
DateTree.display_tree("06-28-2021")
assert True
except Exception as e:
print(f"An exception was thrown when testing DateTree.display_tree(): {e}")
assert False
def test_display_tree_method_search_date_not_found(self):
try:
DateTree.display_tree("00-00-0000")
assert False
except SystemExit:
assert True
def test_display_tree_method_invalid_search_date(self):
try:
DateTree.display_tree("00.00.0000")
assert False
except SystemExit:
assert True
```
#### File: analytics/utils/PrepData.py
```python
import json
from pathlib import Path
from urs.utils.DirInit import InitializeDirectory
from urs.utils.Global import Status
from urs.utils.Logger import LogAnalyticsErrors
class GetPath():
"""
Methods for determining file paths.
"""
@staticmethod
@LogAnalyticsErrors.log_invalid_top_dir
def get_scrape_type(scrape_file, tool):
"""
Get the name of the scrape-specific directory in which the data is stored
and create the directories within the `analytics` folder.
Parameters
----------
scrape_file: str
String denoting the filepath
tool: str
String denoting the tool type
Exceptions
----------
TypeError:
Raised if the file is not JSON or if the file resides in the `analytics`
directory
Returns
-------
analytics_dir: str
String denoting the path to the directory in which the analytical
data will be written
scrape_dir: str
String denoting the scrape-specific directory
"""
file_path = Path(scrape_file)
scrape_dir = list(file_path.parts)[file_path.parts.index("scrapes") + 2]
if file_path.name.split(".")[1] != "json" or scrape_dir == "analytics":
raise TypeError
split_analytics_dir = \
list(file_path.parts)[:file_path.parts.index("scrapes") + 2] + \
["analytics", tool] + \
list(file_path.parts)[file_path.parts.index("scrapes") + 2:-1]
analytics_dir = "/".join(split_analytics_dir)
InitializeDirectory.create_dirs(analytics_dir)
return analytics_dir, scrape_dir
@staticmethod
def name_file(analytics_dir, path):
"""
Name the frequencies data or wordcloud when saving to file.
Parameters
----------
analytics_dir: str
String denoting the path to the directory in which the analytical
data will be written
path: str
String denoting the full filepath
Returns
-------
filename: str
String denoting the new filepath to save file
"""
return f"{Path(analytics_dir)}/{Path(path).name}"
class Extract():
"""
Methods for extracting the data from scrape files.
"""
@staticmethod
def extract(scrape_file):
"""
Extract data from the file.
Parameters
----------
scrape_file: str
String denoting the filepath
Returns
-------
data: dict
Dictionary containing extracted scrape data
"""
with open(str(scrape_file), "r", encoding = "utf-8") as raw_data:
return json.load(raw_data)
class CleanData():
"""
Methods for cleaning words found in "title", "body" or "text" fields.
"""
@staticmethod
def _remove_extras(word):
"""
Removing unnecessary characters from words.
Parameters
----------
word: str
String denoting the word to clean
Returns
-------
cleaned_word: str
String denoting the cleaned word
"""
illegal_chars = [char for char in "[(),:;.}{<>`]"]
fixed = [
" "
if char in illegal_chars
else char for char in word
]
return "".join(fixed).strip()
@staticmethod
def count_words(field, obj, plt_dict):
"""
Count words that are present in a field, then update the plt_dict dictionary.
Calls previously defined private method:
CleanData._remove_extras()
Parameters
----------
field: str
String denoting the dictionary key to extract data from
obj: dict
Dictionary containing scrape data
plt_dict: dict
Dictionary containing frequency data
Returns
-------
None
"""
words = obj[field].split(" ")
for word in words:
word = CleanData._remove_extras(word)
if not word:
continue
if word not in plt_dict.keys():
plt_dict[word] = 1
else:
plt_dict[word] += 1
class PrepSubreddit():
"""
Methods for preparing Subreddit data.
"""
@staticmethod
def prep_subreddit(data):
"""
Prepare Subreddit data.
Calls previously defined public method:
CleanData.count_words()
Parameters
----------
data: list
List containing extracted scrape data
Returns
-------
frequencies: dict
Dictionary containing finalized word frequencies
"""
status = Status(
"Finished Subreddit analysis.",
"Analyzing Subreddit scrape.",
"white"
)
plt_dict = dict()
status.start()
for submission in data:
CleanData.count_words("selftext", submission, plt_dict)
CleanData.count_words("title", submission, plt_dict)
status.succeed()
return plt_dict
class PrepMutts():
"""
Methods for preparing data that may contain a mix of Reddit objects.
"""
@staticmethod
def prep_mutts(data, plt_dict):
"""
Prepare data that may contain a mix of Reddit objects.
Parameters
----------
data: list
List containing Reddit objects
plt_dict: dict
Dictionary containing frequency data
Returns
-------
None
"""
for obj in data:
### Indicates there is valid data in this field.
if isinstance(obj, dict):
try:
if obj["type"] == "submission":
CleanData.count_words("selftext", obj, plt_dict)
CleanData.count_words("title", obj, plt_dict)
elif obj["type"] == "comment":
CleanData.count_words("body", obj, plt_dict)
except KeyError:
continue
### Indicates this field is forbidden when analyzing Redditor scrapes.
elif isinstance(obj, str):
continue
class PrepRedditor():
"""
Methods for preparing Redditor data.
"""
@staticmethod
def prep_redditor(data):
"""
Prepare Redditor data.
Calls previously defined public methods:
CleanData.count_words()
PrepMutts.prep_mutts()
Parameters
----------
data: dict
Dictionary containing extracted scrape data
Returns
-------
frequencies: dict
Dictionary containing finalized word frequencies
"""
status = Status(
"Finished Redditor analysis.",
"Analyzing Redditor scrape.",
"white"
)
plt_dict = dict()
status.start()
for interactions in data["interactions"].values():
PrepMutts.prep_mutts(interactions, plt_dict)
status.succeed()
return plt_dict
class PrepComments():
"""
Methods for preparing submission comments data.
"""
@staticmethod
def _prep_raw(data, plt_dict):
"""
Prepare raw submission comments.
Calls previously defined public method:
CleanData.count_words()
Parameters
----------
data: list
List containing extracted scrape data
plt_dict: dict
Dictionary containing frequency data
Returns
-------
None
"""
status = Status(
"Finished raw submission comments analysis.",
"Analyzing raw submission comments scrape.",
"white"
)
status.start()
for comment in data:
CleanData.count_words("body", comment, plt_dict)
status.succeed()
@staticmethod
def _prep_structured(data, plt_dict):
"""
An iterative implementation of depth-first search to prepare structured
comments.
Parameters
----------
data: list
List containing extracted scrape data
plt_dict: dict
Dictionary containing frequency data
Returns
-------
None
"""
status = Status(
"Finished structured submission comments analysis.",
"Analyzing structured submission comments scrape.",
"white"
)
status.start()
for comment in data:
CleanData.count_words("body", comment, plt_dict)
stack = []
stack.append(comment)
visited = []
visited.append(comment)
while stack:
current_comment = stack.pop(0)
for reply in current_comment["replies"]:
CleanData.count_words("body", reply, plt_dict)
if reply not in visited:
stack.insert(0, reply)
visited.append(reply)
status.succeed()
@staticmethod
def prep_comments(data):
"""
Prepare submission comments data.
Calls previously defined private methods:
PrepComments._prep_raw()
PrepComments._prep_structured()
Parameters
----------
data: dict
Dictionary containing extracted scrape data
Returns
-------
frequencies: dict
Dictionary containing finalized word frequencies
"""
plt_dict = dict()
PrepComments._prep_raw(data["data"]["comments"], plt_dict) \
if data["scrape_settings"]["style"] == "raw" \
else PrepComments._prep_structured(data["data"]["comments"], plt_dict)
return plt_dict
class PrepLivestream():
"""
Methods for preparing livestream data.
"""
@staticmethod
def prep_livestream(data):
"""
Prepare livestream data.
Parameters
----------
data: list
List containing extracted scrape data
"""
status = Status(
"Finished livestream analysis.",
"Analyzing livestream scrape.",
"white"
)
plt_dict = {}
status.start()
PrepMutts.prep_mutts(data, plt_dict)
status.succeed()
return plt_dict
class PrepData():
"""
Calling all methods for preparing scraped data.
"""
@staticmethod
def prep(scrape_file, scrape_type):
"""
Combine all prep methods into one public method.
Calls previously defined public methods:
PrepSubreddit.prep_subreddit()
PrepSubreddit.prep_redditor()
PrepSubreddit.prep_comments()
Parameters
----------
scrape_file: str
String denoting the filepath
scrape_type: str
String denoting the scrape type
Returns
-------
frequency_data: dict
Dictionary containing extracted scrape data
"""
data = Extract.extract(scrape_file)
if scrape_type == "subreddits":
plt_dict = PrepSubreddit.prep_subreddit(data["data"])
elif scrape_type == "redditors":
plt_dict = PrepRedditor.prep_redditor(data["data"])
elif scrape_type == "comments":
plt_dict = PrepComments.prep_comments(data)
elif scrape_type == "livestream":
plt_dict = PrepLivestream.prep_livestream(data["data"])
return dict(sorted(plt_dict.items(), key = lambda item: item[1], reverse = True))
```
#### File: urs/utils/Logger.py
```python
import logging
import time
from colorama import (
Fore,
Style
)
from urs.utils.DirInit import InitializeDirectory
from urs.utils.Global import (
categories,
convert_time,
date,
short_cat
)
from urs.utils.Titles import Errors
class LogMain():
"""
Decorator for logging URS runtime. Also handles KeyboardInterrupt and adds the
event to the log if applicable.
"""
### Set directory path and log format.
DIR_PATH = f"../scrapes/{date}"
LOG_FORMAT = "[%(asctime)s] [%(levelname)s]: %(message)s"
### Makes the `scrapes/[DATE]` directory in which the log and scraped files
### will be stored.
InitializeDirectory.create_dirs(DIR_PATH)
### Configure logging settings.
logging.basicConfig(
filename = DIR_PATH + "/urs.log",
format = LOG_FORMAT,
level = logging.INFO
)
@staticmethod
def master_timer(function):
"""
Wrapper for logging the amount of time it took to execute main(). Handle
KeyboardInterrupt if user cancels URS.
Parameters
----------
function: function()
Run method within the wrapper
Exceptions
----------
KeyboardInterrupt:
Raised if user cancels URS
Returns
-------
wrapper: function()
Return the wrapper method that runs the method passed into the
decorator
"""
def wrapper(*args):
logging.info("INITIALIZING URS.")
logging.info("")
start = time.time()
try:
function(*args)
except KeyboardInterrupt:
print(Style.BRIGHT + Fore.RED + "\n\nURS ABORTED BY USER.\n")
logging.warning("")
logging.warning("URS ABORTED BY USER.\n")
quit()
logging.info(f"URS COMPLETED IN {time.time() - start:.2f} SECONDS.\n")
return wrapper
class LogError():
"""
Decorator for logging args, PRAW, or rate limit errors.
"""
@staticmethod
def log_no_args(function):
"""
Wrapper for logging if the help message was printed/if no arguments were
given.
Parameters
----------
function: function()
Run method within the wrapper
Exceptions
----------
SystemExit:
Raised if no, invalid, or example args were entered
Returns
-------
wrapper: function()
Return the wrapper method that runs the method passed into the
decorator
"""
def wrapper(self):
try:
args, parser = function(self)
return args, parser
except SystemExit:
logging.info("HELP OR VERSION WAS DISPLAYED.\n")
quit()
return wrapper
@staticmethod
def log_args(error):
"""
Wrapper for logging individual (specific) arg errors.
Parameters
----------
error: str
String denoting the specific error that was raised when processing args
Returns
-------
wrapper: function()
Return the wrapper method that runs the method passed into the
decorator
"""
def decorator(function):
def wrapper(*args):
try:
function(*args)
except ValueError:
Errors.e_title(f"INVALID {error}.")
logging.critical(f"RECEIVED INVALID {error}.")
logging.critical("ABORTING URS.\n")
quit()
return wrapper
return decorator
@staticmethod
def log_rate_limit(function):
"""
Wrapper for logging rate limit and errors.
Parameters
----------
function: function()
Run method within the wrapper
Returns
-------
wrapper: function()
Return the wrapper method that runs the method passed into the
decorator
"""
def wrapper(reddit):
user_limits = function(reddit)
logging.info("RATE LIMIT DISPLAYED.")
logging.info(f"Remaining requests: {user_limits['remaining']}")
logging.info(f"Used requests: {user_limits['used']}")
logging.info("")
if int(user_limits["remaining"]) == 0:
Errors.l_title(convert_time(user_limits["reset_timestamp"]))
logging.critical(f"RATE LIMIT REACHED. RATE LIMIT WILL RESET AT {convert_time(user_limits['reset_timestamp'])}.")
logging.critical("ABORTING URS.\n")
quit()
return user_limits
return wrapper
class LogPRAWScraper():
"""
Decorator for logging scraper runtimes and events.
"""
@staticmethod
def _format_subreddit_log(settings_dict):
"""
Format Subreddit log message.
Parameters
----------
settings_dict: dict
Dictionary containing Subreddit scraping settings
Returns
-------
None
"""
time_filters = [
"day",
"hour",
"month",
"week",
"year"
]
for subreddit_name, settings in settings_dict.items():
for each_setting in settings:
if each_setting[2] in time_filters:
logging.info(f"Getting posts from the past {each_setting[2]} for {categories[short_cat.index(each_setting[0].upper())]} results.")
if each_setting[0].lower() != "s":
logging.info(f"Scraping r/{subreddit_name} for {each_setting[1]} {categories[short_cat.index(each_setting[0].upper())]} results...")
elif each_setting[0].lower() == "s":
logging.info(f"Searching and scraping r/{subreddit_name} for posts containing '{each_setting[1]}'...")
logging.info("")
@staticmethod
def _format_two_arg_log(scraper_type, settings_dict):
"""
Format Redditor or submission comments log message. Both only take two
arguments, which is why only one method is needed to format the messages.
Parameters
----------
scraper_type: str
String denoting the scraper type (Redditors or submission comments)
settings_dict: dict
Dictionary containing Redditor scraping settings
Returns
-------
None
"""
for reddit_object, n_results in settings_dict.items():
plurality = "results" \
if int(n_results) > 1 \
else "result"
if scraper_type == "redditor":
logging.info(f"Scraping {n_results} {plurality} for u/{reddit_object}...")
elif scraper_type == "comments":
logging.info(f"Processing all comments from Reddit post {reddit_object}...") \
if int(n_results) == 0 \
else logging.info(f"Processing {n_results} {plurality} from Reddit post {reddit_object}...")
logging.info("")
@staticmethod
def _format_scraper_log(scraper, settings_dict):
"""
Format log depending on raw or structured export. Calls previously
defined private methods:
LogPRAWScraper._format_subreddit_log()
LogPRAWScraper._format_two_arg_log()
Parameters
----------
scraper: str
String denoting the scraper that was run
settings_dict: dict
Dictionary containing scrape settings
Returns
-------
None
"""
if scraper == "subreddit":
LogPRAWScraper._format_subreddit_log(settings_dict)
elif scraper == "redditor":
LogPRAWScraper._format_two_arg_log("redditor", settings_dict)
elif scraper == "comments":
LogPRAWScraper._format_two_arg_log("comments", settings_dict)
@staticmethod
def scraper_timer(scraper):
"""
Wrapper for logging the amount of time it took to execute a scraper.
Parameters
----------
scraper: str
String denoting the scraper that is run
Returns
-------
decorator: function()
Return the decorator function that runs the method passed into this
method
"""
def decorator(function):
def wrapper(*args):
start = time.time()
logging.info(f"RUNNING {scraper.upper()} SCRAPER.")
logging.info("")
settings_dict = function(*args)
LogPRAWScraper._format_scraper_log(scraper, settings_dict)
logging.info(f"{scraper.upper()} SCRAPER FINISHED IN {time.time() - start:.2f} SECONDS.")
logging.info("")
return wrapper
return decorator
@staticmethod
def log_cancel(function):
"""
Wrapper for logging if the user cancelled Subreddit scraping at the
confirmation page.
Parameters
----------
function: function()
Run method within the wrapper
Returns
-------
wrapper: function()
Return the wrapper method that runs the method passed into the
decorator
"""
def wrapper(*args):
try:
function(*args)
except KeyboardInterrupt:
print(Fore.RED + Style.BRIGHT + "\n\nCancelling.\n")
logging.info("")
logging.info("SUBREDDIT SCRAPING CANCELLED BY USER.\n")
quit()
return wrapper
class LogAnalyticsErrors():
"""
Decorator for logging errors while exporting analytical data.
"""
@staticmethod
def log_invalid_top_dir(function):
"""
Log invalid top directory when running analytical tools.
Parameters
----------
function: function()
Run method within the wrapper
Exceptions
----------
ValueError:
Raised if the file is not located within the scrapes directory
Returns
-------
wrapper: function()
Return the wrapper method that runs the method passed into the
decorator
"""
def wrapper(*args):
try:
return function(*args)
except ValueError:
Errors.i_title("Scrape data is not located within the `scrapes` directory.")
logging.critical("AN ERROR HAS OCCURRED WHILE PROCESSING SCRAPE DATA.")
logging.critical("Scrape data is not located within the `scrapes` directory.")
logging.critical("ABORTING URS.\n")
quit()
except TypeError:
Errors.i_title("Invalid file. Try again with a valid JSON file.")
logging.critical("AN ERROR HAS OCCURRED WHILE PROCESSING SCRAPE DATA.")
logging.critical("Invalid file.")
logging.critical("ABORTING URS.\n")
quit()
return wrapper
class LogAnalytics():
"""
Decorator for logging analytical tools.
"""
@staticmethod
def _get_args_switch(args, tool):
"""
Get tool type for logging.
Parameters
----------
args: Namespace
Namespace object containing all arguments used in the CLI
tool: str
Tool type which denotes a key in the dictionary
Returns
-------
scraper_args: list
List of arguments returned from args
"""
tools = {
"frequencies": [arg_set for arg_set in args.frequencies] \
if args.frequencies \
else None,
"wordcloud": [arg_set for arg_set in args.wordcloud] \
if args.wordcloud \
else None
}
return tools.get(tool)
@staticmethod
def log_save(tool):
"""
Wrapper for logging if the result was saved.
Parameters
----------
tool: str
String denoting the tool that is run
Returns
-------
decorator: function()
Return the decorator function that runs the method passed into this
method
"""
def decorator(function):
def wrapper(*args):
filename = function(*args)
logging.info(f"Saved {tool} to {filename}.")
logging.info("")
return wrapper
return decorator
@staticmethod
def log_show(tool):
"""
Wrapper for logging if the result was displayed.
Parameters
----------
tool: str
String denoting the tool that is run
Returns
-------
decorator: function()
Return the decorator method that runs the method passed into this
method
"""
def decorator(function):
def wrapper(*args):
function(*args)
logging.info(f"Displayed {tool}.")
logging.info("")
return wrapper
return decorator
@staticmethod
def _get_export_switch(f_type):
"""
Get export type for logging.
Parameters
----------
f_type: str
String denoting the file type
Returns
-------
export_message: str
String denoting export option
"""
export_options = {
0: "Exporting to JSON.",
1: "Exporting to CSV."
}
if f_type == "csv":
return export_options.get(1)
return export_options.get(0)
@staticmethod
def log_export(function):
"""
Log the export format for the frequencies generator.
Parameters
----------
function: function()
Run method within the wrapper
Returns
-------
wrapper: function()
Return the wrapper method that runs the method passed into the
decorator
"""
def wrapper(*args):
try:
function(*args)
logging.info(LogAnalytics._get_export_switch(args[1]))
logging.info("")
except Exception as e:
Errors.ex_title(e)
logging.critical("AN ERROR HAS OCCURRED WHILE EXPORTING SCRAPED DATA.")
logging.critical(f"{e}")
logging.critical("ABORTING URS.\n")
quit()
return wrapper
@staticmethod
def _log_tool(args, tool):
"""
Log the analytical tool that was used.
Parameters
----------
args: Namespace
Namespace object containing all arguments used in the CLI
tool: str
String denoting the analytical tool
Returns
-------
None
"""
args_list = LogAnalytics._get_args_switch(args, tool)
for filename in args_list:
logging.info(f"Generating {tool} for file {filename[0]}...")
logging.info("")
@staticmethod
def generator_timer(tool):
"""
Wrapper for logging the amount of time it took to execute a tool.
Parameters
----------
tool: str
String denoting the tool that is run
Returns
-------
decorator: function()
Return the decorator method that runs the method passed into this
method
"""
def decorator(function):
def wrapper(*args):
start = time.time()
logging.info(f"RUNNING {tool.upper()} GENERATOR.")
logging.info("")
LogAnalytics._log_tool(args[0], tool)
function(*args)
logging.info(f"{tool.upper()} GENERATOR FINISHED IN {time.time() - start:.2f} SECONDS.")
logging.info("")
return wrapper
return decorator
class LogExport():
"""
Decorator for logging exporting files.
"""
@staticmethod
def _get_export_switch(args):
"""
Get export type for logging.
Parameters
----------
args: Namespace
Namespace object containing all arguments used in the CLI
Returns
-------
export_message: str
String denoting export option
"""
export_options = {
0: "Exporting to JSON.",
1: "Exporting to CSV."
}
if args.csv:
return export_options.get(1)
return export_options.get(0)
@staticmethod
def log_export(function):
"""
Wrapper for logging the export option.
Parameters
----------
function: function()
Run method within the wrapper
Returns
-------
wrapper: function()
Return the wrapper method that runs the method passed into the
decorator
"""
def wrapper(*args):
try:
function(*args)
logging.info(LogExport._get_export_switch(args[0]))
logging.info("")
except Exception as e:
Errors.ex_title(e)
logging.critical("AN ERROR HAS OCCURRED WHILE EXPORTING SCRAPED DATA.")
logging.critical(f"{e}")
logging.critical("ABORTING URS.\n")
quit()
return wrapper
``` |
{
"source": "JosephLalli/ORCHARDS",
"score": 2
} |
#### File: secondary_analysis/figures/figure_functions.py
```python
import os
import numpy as np
import pandas as pd
import seaborn as sns
import subprocess
import re
import pysam
from itertools import product
import matplotlib.pyplot as plt
from matplotlib import rcParams
from matplotlib.ticker import MultipleLocator
import math
import scipy.stats as stats
from scipy.stats import truncnorm
from figure_constants import palettes, genelengths, hongkongContigs, SNPs, SNP_frequency_cutoff, transmissionSNPs, figures
def set_fig_settings(displayContext, rcParams=dict()):
sns.set()
sns.set_style("white")
assert (displayContext in ['paper','talk','poster'])
sns.set_context(displayContext)
if displayContext == 'paper':
sns.set_context('paper', font_scale=1)
elif displayContext == 'talk':
sns.set_context("talk")
elif displayContext == 'poster':
sns.set_context("poster")
rcParams['figure.figsize'] = [8.0, 6.0]
rcParams['figure.titleweight'] = 'bold'
rcParams['axes.labelweight'] = 'bold'
rcParams['font.weight'] = 'bold'
rcParams['font.size'] = 36
rcParams['font.stretch'] = 'condensed'
rcParams['axes.labelsize'] = 26
rcParams['axes.labelweight'] = 'bold'
rcParams['xtick.labelsize'] = 18
rcParams['ytick.labelsize'] = 18
rcParams['figure.dpi'] = 300
rcParams['font.family'] = 'Roboto'
return rcParams
def addStatsLines(ax, x, y, data,
method='mean', hue=None, order=None, hue_order=None, dodge=True,
linewidth=2, meanwidth=0.35, err_ratio=0.5, meancolor='black', color='black'):
hw = meanwidth
er = err_ratio
xticks = [text.get_text() for text in ax.get_xticklabels()]
sns_box_plotter = sns.categorical._BoxPlotter(x, y, hue, data, order, hue_order, orient=None, width=.8, color=None, palette=None, saturation=.75, dodge=dodge, fliersize=5, linewidth=None)
if hue:
hueOffsets = {hue: sns_box_plotter.hue_offsets[sns_box_plotter.hue_names.index(hue)] for hue in sns_box_plotter.hue_names}
xlocs = {group: xticks.index(str(group[0])) + hueOffsets[group[1]] for group in product(sns_box_plotter.group_names, hueOffsets.keys())}
groups = [x, hue]
hw = hw / len(sns_box_plotter.hue_names)
else:
groups = [x]
xlocs = {group: xcat for group, xcat in zip(sns_box_plotter.group_names, ax.xaxis.get_ticklocs())}
for xcat, df in data.groupby(groups):
xloc = xlocs[xcat]
if method == 'median':
middle = df[y].median()
uppererror = np.percentile(df[y].dropna(), 75) - middle
lowererror = middle - np.percentile(df[y].dropna(), 25)
print (f'{middle}, 95th upper-lower {np.percentile(df[y].dropna(), 95)}, {np.percentile(df[y].dropna(), 5)}, 25th upper-lower {np.percentile(df[y].dropna(), 75)}, {np.percentile(df[y].dropna(), 25)}')
else:
middle = df[y].mean()
uppererror = lowererror = df[y].sem() * 1.96
ax.hlines(middle, xloc - hw, xloc + hw, zorder=10, linewidth=linewidth, color=meancolor)
ax.hlines((middle - lowererror, middle + uppererror),
xloc - (hw * er),
xloc + (hw * er),
zorder=10, linewidth=linewidth, color=color)
ax.vlines(xloc, middle - lowererror, middle + uppererror, zorder=10, linewidth=linewidth, color=color)
return ax
def addRegStats(ax, x, y, data):
def form(x):
if x < 0.01:
return f'{x:.2e}'
elif x > 10:
exp = x//10
exp += 3
return f'{x:.3e}'
else:
return f'{x:.3}'
m, b, r_value, p_value, std_err = stats.linregress(data[[x, y]].dropna().to_numpy())
textstr = f'y = {form(m)}x + {form(b)}\n$r^2$ = {form(r_value**2)}\nci = {form(std_err*1.96)}\np = {form(p_value)}'
ax.text(0.05, .78, textstr, transform=ax.transAxes, ha="left", fontsize=rcParams['font.size'] * .66)
def errorbar(x, y, low, high, order, color, ax):
ynum = [order.index(y_i) for y_i in y]
lowerrbar = [x - low for x, low in zip(x, low)]
uppererrbar = [high - x for x, high in zip(x, high)]
return ax.errorbar(ynum, x, yerr=(lowerrbar, uppererrbar), fmt="none", color=color, elinewidth=1, capsize=5)
def calc_ci(array, z=1.96):
s = np.std(array) # std of vector
n = len(array) # number of obs
return (z * (s / math.sqrt(n)))
def bootstrap(array, num_of_bootstraps, function, *args, **kwargs):
x_bar = function(array, *args, **kwargs)
sampled_results = np.zeros(num_of_bootstraps)
for i in range(num_of_bootstraps):
sample = np.random.choice(array, len(array), replace=True)
sampled_results[i] = function(sample, *args, **kwargs)
deltastar = sampled_results - x_bar
ci = np.percentile(deltastar, 2.5)
return ci
def bootstrap_df(df, num_of_bootstraps, function, *args, **kwargs):
x_bar = function(df, *args, **kwargs)
sampled_results = np.zeros(num_of_bootstraps)
for i in range(num_of_bootstraps):
sample = df.sample(n=len(df), replace=True)
sampled_results[i] = function(sample, *args, **kwargs)
deltastar = sampled_results - x_bar
ci = np.nanpercentile(deltastar, 2.5)
if ci == np.nan:
print (sampled_results)
return ci
def convertListofClassicH3N2SitestoZeroIndexedMStart(listOfSites):
return [site + 15 for site in listOfSites]
def parseGTF(gtffile, segmentLocations):
'''given file location of gtf, and dictionary of starting locations
of each chrom in a concatenated sequence, return dictionary of
{gene product : numpy filter for concatenated sequence'''
with open(gtffile, 'r') as g:
gtf = g.readlines()
coding_regions = {}
for line in gtf:
line = line.replace("/", "_")
lineitems = line.split("\t")
segment_name = lineitems[0]
annotation_type = lineitems[2]
start = int(lineitems[3]) - 1 # adding the -1 here for 0 indexing
stop = int(lineitems[4]) - 1 # adding the -1 here for 0 indexing
gene_name = lineitems[8]
gene_name = gene_name.split(";")[0]
gene_name = gene_name.replace("gene_id ", "")
gene_name = gene_name.replace("\"", "")
if annotation_type.lower() == "cds":
if segment_name not in coding_regions:
coding_regions[segment_name] = {}
coding_regions[segment_name][gene_name] = [[start, stop]]
elif segment_name in coding_regions and gene_name not in coding_regions[segment_name]:
coding_regions[segment_name][gene_name] = [[start, stop]]
elif gene_name in coding_regions[segment_name]:
coding_regions[segment_name][gene_name].append([start, stop])
return coding_regions
def makeManhattanPlot(ax, y, data, nrows=2, subtype=None, geneorder=None, antigenic=True, hue=None, hue_order=['Nonsynonymous', 'Synonymous'], palette_type='synon', color=None, dotsize=40, linewidth=0, alpha=1, negativeDataSet=False, y_label='Minor Allele\nFrequency', no_zero_for_ha=True):
mother_ax = ax
del ax
mother_ax.get_yaxis().set_visible(False)
mother_ax.get_xaxis().set_visible(False)
mother_ax.spines['right'].set_visible(False)
mother_ax.spines['top'].set_visible(False)
mother_ax.spines['left'].set_visible(False)
mother_ax.spines['bottom'].set_visible(False)
if subtype:
if subtype == 'H1N1pdm': # Early on while coding this I called all H1N1 samples "H1N1pdm"; this is incorrect.
subtype = 'H1N1'
data = data.replace('H1N1pdm', 'H1N1')
data = data.loc[data.subtype == subtype]
else:
subtype = data.subtype.first()
if geneorder:
pass
elif antigenic:
geneorder = ["PB2", "PB1", "PA", "HA", "HA_antigenic", "HA_nonantigenic", "NP", "NA", "M1", "M2", "NS1", "NEP", 'PB1-F2', 'PA-X']
else:
geneorder = ["PB2", "PB1", "PA", "HA", "NP", "NA", "M1", "M2", "NS1", "NEP", 'PB1-F2', 'PA-X']
args = {'y': y, 'hue': hue, 'color': color, 'hue_order': hue_order, 'palette': palettes[palette_type], 'alpha': alpha}
if type(dotsize) == int:
args['s'] = dotsize
elif type(dotsize) == str:
args['size'] = dotsize
args['s'] = 40
ymax = data[y].max()
ymin = data[y].min()
ordered_gene_lengths = [(gene, length) for gene, length in genelengths[subtype].items() if gene in geneorder]
ordered_gene_lengths.sort(key=lambda x: geneorder.index(x[0]))
lengths = [x[1] for x in ordered_gene_lengths]
current_length = 0
end_of_row = sum([length for gene, length in ordered_gene_lengths])/nrows
reset_points = [0]
for i, (gene, length) in enumerate(ordered_gene_lengths):
if current_length > end_of_row:
reset_points.append(i)
current_length = length
else:
current_length += length
ncolumns = reset_points[1]
minorrowadjust = int(abs(sum(lengths[0:ncolumns]) - sum(lengths[ncolumns:])) / 2)
# make gene subaxes
ax_x_positions = list()
row_x_positions = list()
current_x_pos = 0
max_x_pos = 0
for i, (gene, length) in enumerate(ordered_gene_lengths):
if (i not in reset_points) or i == 0:
row_x_positions.append((current_x_pos, length))
current_x_pos += length
else:
current_x_pos = minorrowadjust
ax_x_positions.append(row_x_positions)
row_x_positions = list()
row_x_positions.append((current_x_pos, length))
current_x_pos += length
if current_x_pos > max_x_pos:
max_x_pos = current_x_pos
ax_x_positions.append(row_x_positions)
# convert from data to axis positions
text_offset = 0.15
ax_x_positions = [(start / max_x_pos, ((nrows - (i + 1)) / nrows) + text_offset, length / max_x_pos, (1 / nrows) - text_offset)
for i, row in enumerate(ax_x_positions) for start, length in row]
axes = [mother_ax.inset_axes(bounds) for bounds in ax_x_positions]
properGeneName = {gene: gene for gene in geneorder}
properGeneName['HA_antigenic'] = 'Antigenic\nHA'
properGeneName['HA_nonantigenic'] = 'Nonantigenic\nHA'
properGeneName['PB1-F2'] = 'PB1-\nF2'
for i, ((gene, length), ax) in enumerate(zip(ordered_gene_lengths, axes)):
if i in reset_points:
ax.set_ylabel(y_label, labelpad=12)
else:
ax.get_yaxis().set_visible(False)
ax = sns.scatterplot(x='inGenePos', data=data.loc[(data['product'] == gene)], legend=False, ax=ax, linewidth=linewidth, **args)
ax.set_xlim(left=0, right=length)
ax.set_xlabel(properGeneName[gene], labelpad=8)
ax.xaxis.set_major_locator(MultipleLocator(500))
ax.xaxis.set_minor_locator(MultipleLocator(250))
ax.set_ylim((ymin - ymax*0.04), (ymax + ymax*0.04))
ax.tick_params(reset=True, which='both', axis='x', bottom=True, top=False)
if no_zero_for_ha and gene == 'HA' and subtype == 'H3N2':
ax.xaxis.get_major_ticks()[0].draw = lambda *args:None
ax.xaxis.get_major_ticks()[1].draw = lambda *args:None
return mother_ax
# Functions related to calculating bottleneck size
def getReadDepth(sample, segment, pos, alt):
reffile = SNPs.loc[SNPs['sampleID'] == sample, 'referenceFile'].iloc[0]
ref = reffile.split('/')[5]
if 'Hong_Kong' in reffile:
chrom = hongkongContigs[segment]
elif 'Michigan' in reffile:
chrom = ref[:-7] + segment
elif ref[-2:] in ['17', '18', '19']:
chrom = ref[:-2] + segment
else:
chrom = ref + '_' + segment
bamfile = '/'.join(reffile.split('/')[0:6]) + '/map_to_consensus/' + sample + '.bam'
pos = int(pos)
sam = pysam.AlignmentFile(bamfile, "rb")
pileup = sam.pileup(contig=chrom, start=pos-1, end=pos, truncate=True, stepper="nofilter")
column = next(pileup)
column.set_min_base_quality(30)
try:
bases = column.get_query_sequences(mark_matches=True)
altreads = bases.count(alt.lower()) + bases.count(alt.upper())
except:
altreads = 0
frequency = round((altreads / column.get_num_aligned()), 4)
depth = column.get_num_aligned()
return frequency, altreads, depth
def makeBottleneckInputFile(pairings, category):
pairings = list(pairings)
indexes = [pairing[0] for pairing in pairings]
contacts = [pairing[1] for pairing in pairings]
export = transmissionSNPs.loc[(transmissionSNPs['index'].isin(indexes)) & (transmissionSNPs.contact.isin(contacts)), ['index','contact','segment', 'pos', 'ref_nuc','alt_nuc', 'SNP_frequency_index', 'AD_index', 'depth_index','SNP_frequency_contact', 'AD_contact', 'depth_contact']]
for ix, row in export.iterrows():
if pd.isna(row.depth_contact):
export.loc[ix, ['SNP_frequency_contact','AD_contact','depth_contact']] = getReadDepth(row.contact, ix[0], ix[1], row.alt_nuc_contact)
export.fillna(0)
filename = figures + '/bottleneck_figures/' + category.replace(' ', '_') + '.txt'
export.to_csv(filename[:-4] + '.tsv', sep='\t')
export = export.loc[(0.99 > export.SNP_frequency_index) & (export.SNP_frequency_index > 0.01)]
export.loc[export['depth_contact'] == 0, ['SNP_frequency_contact', 'depth_contact', 'AD_contact']] = export.loc[export["depth_contact"]==0].apply(lambda x:getReadDepth(x['contact'], x['segment'], x['pos'], x['alt_nuc']), axis=1)
export = export.loc[~export.duplicated()]
export = export[['SNP_frequency_index', 'SNP_frequency_contact', 'depth_contact', 'AD_contact']].round(5)
export.to_csv(filename, sep='\t', header=False, index=False)
return filename
def koelleBottleneckCategorical(data, category):
categories = list(data[category].unique())
if np.nan in categories:
categories.remove(np.nan)
returnlist = []
for group in categories:
subdata = data.loc[data[category] == group]
indexes = subdata['index']
contacts = subdata['contact']
assert (len(indexes) == len(contacts))
pairings = (zip(indexes, contacts))
filename = makeBottleneckInputFile(pairings, group)
bottleneckregex = r"(?:size\n)(\d*)"
lowerboundregex = r"(?:left bound\n)(\d*)"
upperboundregex = r"(?:right bound\n)(\d*)"
with open(f"{figures}/betabinomialResults_exact.log", 'a+') as outputFile:
cmd = f'Rscript /d/orchards/betaBinomial/Bottleneck_size_estimation_exact.r --file {filename} --plot_bool TRUE --var_calling_threshold {SNP_frequency_cutoff} --Nb_min 1 --Nb_max 200 --confidence_level .95'
outputFile.write(f"\n\n--------------------\n\n{group}\n\n")
print (cmd)
results = subprocess.run(cmd.split(" "), text=True, stdout=subprocess.PIPE)
print (results.stdout)
bottleneck = int(re.search(bottleneckregex, results.stdout).group(1))
lowerbound = int(re.search(lowerboundregex, results.stdout).group(1))
upperbound = int(re.search(upperboundregex, results.stdout).group(1))
outputFile.write(f"{group}: {lowerbound}|--- {bottleneck} ---|{upperbound}")
print (f"{group}: {lowerbound}|--- {bottleneck} ---|{upperbound}")
try:
os.rename('/mnt/d/orchards/betaBinomial/exact_plot.svg', f'{figures}/{group}_bottleneckplot_exact.svg')
except:
print (f'{category} doesn\'t have an exact plot svg')
returnlist.append({'Pairing Category': group, 'Lower CI': lowerbound, 'Avg Bottleneck': bottleneck, 'Upper CI': upperbound})
return returnlist
# Functions related to adding zeros to a log-scaled chart
def extract_exponent(x):
return np.floor(np.log10(x))
def round_to_exponent(x, high=False):
if high:
return 10**extract_exponent(x) + 1
else:
return 10**extract_exponent(x)
def get_truncated_normal_distribution(mean=0, sd=1, low=0, upp=10, n=1):
dist = truncnorm((low - mean) / sd, (upp - mean) / sd, loc=mean, scale=sd)
return dist.rvs(n)
def set_log_ax_ytick_range(ax, r=1):
t = np.log10(ax.get_yticks())
new_t = 10**np.arange(t[0], t[-1], step=r)
ax.set_yticks(new_t)
return ax
def swarmplot_with_zeros(figargs, fig, spacing=.2, gap_between_data_and_zero=0, dotsize=5, jitter_zeros=False):
y = figargs['y']
figargs['data'] = data = figargs['data'].copy()
log_new_zero = determine_log_zero(figargs, spacing) - gap_between_data_and_zero
line_placement = 10**(log_new_zero + spacing)
ax_bottom = 10**(log_new_zero - spacing)
if not jitter_zeros:
figargs['data'][y] = data[y].replace(0, 10**log_new_zero)
else:
data.loc[data[y] == 0, y] = jitter(data.loc[data[y] == 0, y], log_new_zero, (np.log10(ax_bottom * 1.05), np.log10(line_placement * .95)))
figargs['data'] = data
ax = sns.swarmplot(**figargs, size=dotsize)
ax.axhline(line_placement, color='black', linestyle='--')
old_ylim = ax.get_ylim()
fig.canvas.draw_idle()
add_zero_y_axis(ax, log_new_zero)
ax.set_ylim(ax_bottom, old_ylim[1])
return ax
def jitter(points, mid, ranges):
low, high = ranges
sd = (high - low) * .34
return np.power(10, np.array(get_truncated_normal_distribution(mid, sd, low, high, len(points))))
def render_ax(ax):
renderer = plt.gcf().canvas.renderer
ax.draw(renderer)
def determine_log_zero(figargs, spacing):
y = figargs['y']
figargs['data'] = data = figargs['data'].copy()
min_nonzero_y = data.loc[data[y] > 0, y].min()
log_min_y = np.log10(round_to_exponent(min_nonzero_y)) - spacing
return log_min_y
def add_zero_y_axis(ax, log_new_zero):
yticks = ax.get_yticks()
yticks[1] = 10**log_new_zero
labels = [tick._text for tick in ax.get_yticklabels()]
labels[1] = '0'
ax.set_yticks(yticks)
ax.set_yticklabels(labels)
return ax
```
#### File: secondary_analysis/pre_processing_scripts/figure_constants.py
```python
import os
import numpy as np
import pandas as pd
import seaborn as sns
import glob
SNP_frequency_cutoff = 0.01
min_coverage = 100
calcBottlenecks = False
#Set Constants
potentialmixed = ['18VR001531', '19VR004455', '19VR003675', '19VR003920', '19VR003675']
#Samples which, based off visual inspection of mutations, seem to be mixed infections.
#This is determined by looking at 10 samples with the most mutations and seeing if those
#mutations, when close together, tend to be linked on the same read.
installDir = '/'.join(os.getcwd().split('/')[:-3])
metadataDir = installDir + '/data/sample_metadata/'
metadatafile = metadataDir + 'sample_metadata.csv'
completemetadatakey = metadataDir + 'subject_metadata_key.csv'
expandedMetadata = metadataDir + 'subject_metadata.csv'
figures = os.path.join(installDir, 'results', 'figures')
bottleneck_output = os.path.join(installDir, 'results', 'bottleneck_output')
secondaryDataFolders = [installDir + '/data/secondary_analysis/H3N2/18-19',
installDir + '/data/secondary_analysis/H3N2/17-18',
installDir + '/data/secondary_analysis/H1N1/18-19',
installDir + '/data/secondary_analysis/H1N1/17-18',
installDir + '/data/secondary_analysis/FluB/16-17',
installDir + '/data/secondary_analysis/FluB/17-18']
referenceDir = installDir + '/references'
vcfdirs = secondaryDataFolders
vcffiles = [f + '/all_snps_filtered.vcf' for f in vcfdirs]
references = ['A_Singapore_INFIMH-16-0019_2016',
'A_Hong_Kong_4801_2014_EPI834581',
'A_Michigan_45_2015_H1N1_18',
'A_Michigan_45_2015_H1N1_19',
'B_Phuket_3073_2013_17',
'B_Phuket_3073_2013_18']
consensusReferences = [mainSampleFolder + '/consensus/' + reference + '_consensus_noambig.fasta' for mainSampleFolder, reference in zip(secondaryDataFolders, references)]
gtfFiles = [referenceDir + '/' + reference + '_antigenic.gtf' for reference in references]
SnpGenieSegFolders = []
for f in secondaryDataFolders:
SnpGenieSegFolders.extend(glob.glob(f + '/SNPGenie_output/*'))
treefiles = [installDir + '/data/secondary_analysis/FluB/FluB.tree',
installDir + '/data/secondary_analysis/H3N2/H3N2.tree',
installDir + '/data/secondary_analysis/H1N1/H1N1.tree']
clade_references = installDir + '/data/references/subclade_definitions/Clade_reference_sequence_names.txt'
hongkongContigs = {'NP': 'A_Hong_Kong_4801_2014_834574_NP', 'NS': 'A_Hong_Kong_4801_2014_834575_NS',
'MP': 'A_Hong_Kong_4801_2014_834576_MP', 'PA': 'A_Hong_Kong_4801_2014_834577_PA',
'PB2': 'A_Hong_Kong_4801_2014_834578_PB2', 'PB1': 'A_Hong_Kong_4801_2014_834579_PB1',
'NA': 'A_Hong_Kong_4801_2014_834580_NA', 'HA': 'A_Hong_Kong_4801_2014_834581_HA'}
# location of all statistics tsvs:
dataFolder = installDir + '/results/dataframes'
subtypesToAnalyze = ['H1N1', 'H3N2', 'Influenza B']
# exclude 'NA' as a reserved term for nan when importing pandas dataframes
naValues = ['-1.#IND', '1.#QNAN', '1.#IND', '-1.#QNAN', '#N/A', 'N/A', '#NA', 'NULL', 'NaN', '-NaN', 'nan', '-nan', '', '*']
read_tsv_args = {'sep': '\t', 'keep_default_na': False, 'na_values': naValues}
read_csv_args = {'keep_default_na': False, 'na_values': naValues}
gene_to_seg_dict = {'HA': 'HA', 'NA': 'NA', 'PB1': 'PB1', 'PB2': 'PB2', 'PA': 'PA', 'NP': 'NP',
'NEP': 'NS', 'NS1': 'NS', 'M1': 'MP', 'M2': 'MP', 'PB1-F2': 'PB1', 'PA-X': 'PA',
'NB': 'NA', 'BM2': 'MP'}
referenceDict = {"A_Singapore_INFIMH-16-0019_2016": "H3N2",
"A_Hong_Kong_4801_2014_EPI834581": "H3N2",
"A_Michigan_45_2015_H1N1": "H1N1",
"A_Michigan_45_2015_H1N1_18": "H1N1",
"A_Michigan_45_2015_H1N1_19": "H1N1",
"B_Brisbane_60_2008": "Influenza B",
"B_Phuket_3073_2013_17": "Influenza B",
"B_Phuket_3073_2013_18": "Influenza B",
"Influenza A H3N2, Influenza B (Yamagata)": "Mixed"}
referenceSeasonDict = {"A_Singapore_INFIMH-16-0019_2016": "2018-2019 H3N2",
"A_Hong_Kong_4801_2014_EPI834581": "2017-2018 H3N2",
"A_Michigan_45_2015_H1N1_18": "2017-2018 H1N1",
"A_Michigan_45_2015_H1N1_19": "2018-2019 H1N1",
"B_Phuket_3073_2013_17": "2017-2018 Influenza B",
"B_Phuket_3073_2013_18": "2018-2019 Influenza B",
"B_Phuket_3073_2013_16": "2016-2017 Influenza B"}
sampleFolderDict = {'H3N2': {'17-18': installDir + '/data/secondary_analysis/H3N2/17-18',
'18-19': installDir + '/data/secondary_analysis/H3N2/18-19'},
'H1N1': {'17-18': installDir + '/data/secondary_analysis/H1N1/17-18',
'18-19': installDir + '/data/secondary_analysis/H1N1/18-19'},
'H1N1pdm': {'17-18': installDir + '/data/secondary_analysis/H1N1/17-18',
'18-19': installDir + '/data/secondary_analysis/H1N1/18-19'},
'Influenza B': {'16-17': installDir + '/data/secondary_analysis/FluB/16-17',
'17-18': installDir + '/data/secondary_analysis/FluB/17-18'}}
# Dictionary to convert myriad different subtypes in metadata file into consistent set of four subtypes
subtypeDict = {'Influenza A H3N2': 'H3N2', 'Flu A (H3)': 'H3N2',
'Flu A (Unable to Subtype)': 'H3N2', 'Flu B (Yamagata)': 'Influenza B', 'Flu A 09H1': 'H1N1',
'Influenza A H1N1': 'H1N1', 'Influenza A, Influenza B': 'Mixed', 'Influenza B': 'Influenza B',
'Influenza A H3N2, Influenza B (Yamagata)': 'Mixed', 'Influenza A H3N2, Influenza A H1N1': 'Mixed',
'Influenza A, Influenza B (Yamagata)': 'Mixed', 'Influenza B (Yamagata)': 'Influenza B', 'Influenza A': 'H3N2',
'Influenza B (Victoria)': 'Influenza B', 'H3N2': 'H3N2', 'H1N1': 'H1N1', 'Influenza B': 'Influenza B'}
H1N1_antigenic_sites = [87, 88, 90, 91, 92, 132, 141, 142,
143, 147, 171, 172, 174, 177, 180,
170, 173, 202, 206, 210, 211, 212,
151, 154, 156, 157, 158, 159, 200, 238]
H1N1_antigenic_sites = [site - 1 for site in H1N1_antigenic_sites] # convert to zero-index
antigenic_sites = {59, 60, 61, 62, 63, 65, 66, 68, 69, 72, 74, 77, 78, 82, 90, 93, 95, 96, 97, 98, 101, 102, 103, 106, 107, 109, 111, 117, 118, 124, 132, 136, 137, 139, 141, 143, 144, 145, 146, 147, 148, 150, 152, 153, 155, 157, 158, 159, 160, 161, 165, 167, 170, 171, 172, 173, 174, 178, 180, 182, 183, 185, 186, 187, 188, 189, 190, 191, 192, 194, 197, 201, 202, 203, 204, 205, 207, 208, 209, 211, 212, 213, 216, 218, 222, 223, 224, 227, 228, 229, 230, 231, 232, 233, 234, 241, 242, 243, 244, 245, 253, 255, 257, 259, 261, 262, 263, 275, 276, 277, 280, 288, 290, 291, 293, 294, 295, 309, 312, 314, 315, 319, 320, 322, 323, 324, 325, 326, 327}
def convertListofClassicH3N2SitestoZeroIndexedMStart(listOfSites):
return [site + 15 for site in listOfSites]
glycosylation_sites = [8, 22, 38, 45, 63, 81, 133, 126, 159, 160, 165, 246, 285]
glycosylation_sites = set(convertListofClassicH3N2SitestoZeroIndexedMStart(glycosylation_sites))
antigenic_sites = antigenic_sites.union(glycosylation_sites)
genelengths = {'H3N2': {'NEP': 366,
'HA': 1701,
'HA_antigenic': len(antigenic_sites) * 3,
'HA_nonantigenic': 1701 - len(antigenic_sites) * 3,
'M1': 759,
'M2': 294,
'NA': 1410,
'NP': 1497,
'NS1': 693,
'PA': 2151,
'PA-X': 759,
'PB1': 2274,
'PB1-F2': 273,
'PB2': 2280},
'H1N1': {'HA_antigenic': len(H1N1_antigenic_sites) * 3,
'HA_nonantigenic': 1701 - len(H1N1_antigenic_sites) * 3,
'HA': 1701,
'M1': 759,
'M2': 294,
'NA': 1410,
'NP': 1497,
'NEP': 366,
'NS1': 660,
'PA': 2151,
'PA-X': 699,
'PB1': 2274,
'PB1-F2': 273,
'PB2': 2280},
'Influenza B': {'HA': 1755,
'M1': 747,
'NA': 1401,
'NP': 1683,
'NEP': 369,
'NS1': 846,
'PA': 2181,
'PB1': 2259,
'PB2': 2313,
'BM2': 330,
'NB': 303}}
# Display constants
displayContext = 'poster'
palettes = dict()
snsblue, snsorange, snsgreen, snsred, snspurple, snsbrown, snspink, snsgrey, snsyellow, snssky = sns.color_palette('muted')
palettes['kind'] = sns.color_palette(('#eedc5b', '#d3494e'), 2)
palettes['subtype'] = sns.color_palette('deep')
palettes['AAtype'] = sns.color_palette((snsblue, snsorange, snsgreen), 3)
palettes['synon'] = sns.color_palette((snsblue, snsorange), 2)
palettes['vax'] = sns.color_palette('Reds', 2)[::-1]
palettes['age_category'] = sns.color_palette('Paired')
palettes['age_category_only'] = sns.color_palette('tab20')[8:10]
geneOrder = ["PB2", "PB1", 'PB1-F2', "PA", 'PA-X', "HA", "NP", "NA", "M1", "M2", "NS1", "NEP"]
antigenicGeneOrder = ["PB2", "PB1", 'PB1-F2', "PA", 'PA-X', "HA", 'HA_antigenic', 'HA_nonantigenic',"NP", "NA", "M1", "M2", "NS1", "NEP"]
segOrder = ['PB2', 'PB1', 'NP', 'HA', 'NA', 'PA', 'MP', 'NS']
subtypeOrder = ['H3N2', 'H1N1', 'Influenza B']
vaxOrder = [0, 1]
named_vaxOrder = ['Unvaccinated', 'Vaccinated']
ageOrder = ['18 or Under', 'Over 18']
NS_order = ['Nonsynon', 'Synon']
antigenicGeneNames = ["PB2", "PB1", "PA", "HA", 'Anti.\nHA', 'Nonanti.\nHA', "NP", "NA", "M1", "M2", "NS1", "NEP"]
antigenicGeneNames_withMinor = ["PB2", "PB1", 'PB1-F2', "PA", 'PA-X', "HA", 'Anti.\nHA', 'Nonanti.\nHA', "NP", "NA", "M1", "M2", "NS1", "NEP"]
errorBarArgs = {"capsize": .1, "errwidth": 2}
# Load data
print ('loading subjects...')
subjects = pd.read_csv(dataFolder + '/subjects.tsv', **read_tsv_args)
print ('loading samples...')
samples = pd.read_csv(dataFolder + '/samples.tsv', **read_tsv_args)
# For downstream analysis, it can be nice to have a few figure-specific variables
samples['age_category'] = '18 or Under'
samples.loc[samples.age > 18, 'age_category'] = 'Over 18'
meltedPiSamples = samples.melt(id_vars=['sampleID', 'subtype', 'recieved_flu_vaccine', 'age_category', 'symptom_severity'], value_vars=['piN_sample', 'piS_sample']).rename(columns={'variable': 'PiN_PiS', 'value': 'Pi'})
print ('loading segments...')
segments = pd.read_csv(dataFolder + '/segments.tsv', **read_tsv_args)
# I'll go ahead and make a melted version of all dataframes with piN/piS measurements
meltedPiSegments = segments.melt(id_vars=['sampleID', 'subtype', 'segment', 'recieved_flu_vaccine', 'symptom_severity'], value_vars=['piN_segment', 'piS_segment']).rename(columns={'variable': 'PiN_PiS', 'value': 'Pi'})
print ('loading genes...')
genes = pd.read_csv(dataFolder + '/genes.tsv', **read_tsv_args)
try:
meltedPiGenes = genes.melt(id_vars=['sampleID', 'subtype', 'segment', 'product', 'age_category', 'recieved_flu_vaccine', 'symptom_severity'], value_vars=['piN_gene', 'piS_gene']).rename(columns={'variable': 'PiN_PiS', 'value': 'Pi'})
except:
print (genes.columns)
raise
print ('loading SNPs...')
SNPs = pd.read_csv(dataFolder + '/SNPs_lenient_filter.gz', **read_tsv_args)
SNPs
print ('loading transmission pairs...')
transmissionPairs = pd.read_csv(dataFolder + '/transmissionPairs.tsv', **read_tsv_args)
print ('loading transmission segments...')
transmissionSegments = pd.read_csv(dataFolder + '/transmissionSegments.tsv', **read_tsv_args)
print ('loading transmission SNPs...')
transmissionSNPs = pd.read_csv(dataFolder + '/transmissionSNPs_lenient_filter.gz', **read_tsv_args)
# make all vs all distance DF for distance comparisons
allvsall = pd.read_csv('/mnt/d/orchards/H1N1/figures/allvsall.tsv', **read_tsv_args)
allvsall = allvsall.merge(samples, left_on='index', right_on='sampleID', how='left')
allvsall = allvsall.merge(samples, left_on='contact', right_on='sampleID', how='left', suffixes=('_index', '_contact'))
# limit comparisons to those where contact infected after index, and onset of symptoms are separated by less than one week
allvsall = allvsall.loc[(pd.to_datetime(allvsall['time_of_symptom_onset_contact']) - pd.to_datetime(allvsall['time_of_symptom_onset_index'])) >= pd.Timedelta(0)]
allvsall = allvsall.loc[pd.to_datetime(allvsall['time_of_symptom_onset_contact']) - pd.to_datetime(allvsall['time_of_symptom_onset_index']) <= pd.Timedelta('10 days')]
allvsall = allvsall.loc[allvsall.subtype_index == allvsall.subtype_contact]
allvsall['school_match'] = 'Does not attend'
allvsall.loc[allvsall.school_index == allvsall.school_contact, 'school_match'] = 'Within school'
allvsall.loc[allvsall.school_index != allvsall.school_contact, 'school_match'] = 'Between schools'
allvsall['household_match'] = 'Other'
allvsall.loc[allvsall.household_index != allvsall.household_contact, 'household_match'] = 'No'
allvsall.loc[allvsall.household_index == allvsall.household_contact, 'household_match'] = 'Yes'
allvsall = allvsall.reset_index(drop=True)
allvsall['Relatedness'] = 'Random'
allvsall.loc[allvsall.clade_index == allvsall.clade_contact, 'Relatedness'] = 'Same Clade'
allvsall.loc[allvsall.subclade_index == allvsall.subclade_contact, 'Relatedness'] = 'Same Subclade'
allvsall.loc[allvsall.household_index == allvsall.household_contact, 'Relatedness'] = 'Same Household'
allvsall.loc[allvsall.school_index == allvsall.school_contact, 'Relatedness'] = 'Same School'
id_columns = ['sampleID', 'subtype', 'season', 'age', 'age_category', 'recieved_flu_vaccine', 'clade', 'subclade']
sample_N_stats = ['nonsynon_snps_per_day_samp', 'Xue_nonsynon_divergence', 'num_of_nonsynon_muts', 'nonsynon_mutation_rate_samp', 'Xue_nonsynon_divergence_per_day', 'nonsynon_divergence_rate']
sample_S_stats = ['synon_snps_per_day_samp', 'Xue_synon_divergence', 'num_of_synon_muts', 'synon_mutation_rate_samp', 'Xue_synon_divergence_per_day', 'synon_divergence_rate']
segment_N_stats = ['nonsynon_snps_per_day_seg', 'Xue_nonsynon_divergence_segment', 'num_of_nonsynon_muts_segment', 'nonsynon_mutation_rate_seg', 'nonsynon_divergence_per_day_seg', 'nonsynon_divergence_rate_seg']
segment_S_stats = [col.replace('nonsynon_', 'synon_') for col in segment_N_stats]
gene_N_stats = [col.replace('_segment', '').replace('_seg', '')+'_gene' for col in segment_N_stats]
gene_S_stats = [col.replace('_segment', '').replace('_seg', '')+'_gene' for col in segment_S_stats]
sample_N_stats.append('piN_sample')
sample_S_stats.append('piS_sample')
segment_N_stats.append('piN_segment')
segment_S_stats.append('piS_segment')
gene_N_stats.append('piN_gene')
gene_S_stats.append('piS_gene')
N_sample_renameDict = {col: col.replace('nonsynon_', '').replace('piN', 'pi') for col in sample_N_stats}
S_sample_renameDict = {col: col.replace('synon_', '').replace('piS', 'pi') for col in sample_S_stats}
N_segment_renameDict = {col: col.replace('nonsynon_', '').replace('piN', 'pi') for col in segment_N_stats}
S_segment_renameDict = {col: col.replace('synon_', '').replace('piS', 'pi') for col in segment_S_stats}
N_gene_renameDict = {col: col.replace('nonsynon_', '').replace('piN', 'pi') for col in gene_N_stats}
S_gene_renameDict = {col: col.replace('synon_', '').replace('piS', 'pi') for col in gene_S_stats}
N_samples = samples[id_columns + sample_N_stats].rename(columns=N_sample_renameDict)
S_samples = samples[id_columns + sample_S_stats].rename(columns=S_sample_renameDict)
N_segments = segments[['segment'] + id_columns + segment_N_stats].rename(columns=N_segment_renameDict)
S_segments = segments[['segment'] + id_columns + segment_S_stats].rename(columns=S_segment_renameDict)
N_genes = genes[['segment', 'product'] + id_columns + gene_N_stats].rename(columns=N_gene_renameDict)
S_genes = genes[['segment', 'product'] + id_columns + gene_S_stats].rename(columns=S_gene_renameDict)
N_samples['Synon_Nonsynon'] = N_segments['Synon_Nonsynon'] = N_genes['Synon_Nonsynon'] = 'Nonsynon'
S_samples['Synon_Nonsynon'] = S_segments['Synon_Nonsynon'] = S_genes['Synon_Nonsynon'] = 'Synon'
NS_samples = N_samples.append(S_samples)
NS_segments = N_segments.append(S_segments)
NS_genes = N_genes.append(S_genes)
samples['recieved_flu_vaccine'] = samples['recieved_flu_vaccine'].map({0: 'Unvaccinated', 1: 'Vaccinated', np.nan: np.nan})
NS_samples['recieved_flu_vaccine'] = NS_samples['recieved_flu_vaccine'].map({0: 'Unvaccinated', 1: 'Vaccinated', np.nan: np.nan})
genes['recieved_flu_vaccine'] = genes['recieved_flu_vaccine'].map({0: 'Unvaccinated', 1: 'Vaccinated', np.nan: np.nan})
```
#### File: secondary_analysis/pre_processing_scripts/vcfClass.py
```python
import pandas as pd
import os
import gzip
from Bio import SeqIO
from Bio.Seq import Seq
import pysam
import numpy as np
def importVCF(location):
return VCF(location)
class VCF:
def __init__ (self, location, refFile=None, gtfFile=None, bamfiles={}):
self.vcfFileName = location
# handle gz files:
try:
with open(location, 'r') as vcffile:#imgigi
self._vcflines = vcffile.readlines()
except:
try:
with gzip.open(location, 'r') as vcffile:#imgigi
self._vcflines = vcffile.readlines()
except:
raise
self._rowstoskip = self._getVCFStart()
self.header = Header(self._vcflines[0:self._rowstoskip - 1])
self.reference = {}
if refFile:
self.addReferenceFile(refFile)
self.gtffile = gtfFile
self.refFile = refFile
self.bamfiles = {}
self.samples = []
self.samples = self._vcflines[self._rowstoskip - 1].strip().split('\t')[9:]
self.mutations = [MutCall(row, [sample for sample in self.samples]) for row in self._vcflines[self._rowstoskip:]]
self.SNPs = [mut for mut in self.mutations if mut.type == 'SNP']
self.indels = [mut for mut in self.mutations if mut.type == 'insertion' or mut.type == 'deletion']
self._hashedmuts = {mut.chrom: {int(mut1.pos): mut1 for mut1 in self.mutations if mut1.chrom == mut.chrom} for mut in self.mutations}
self.read_tsv_args = {'sep': '\t', 'keep_default_na': False, 'na_values': ['-1.#IND', '1.#QNAN', '1.#IND', '-1.#QNAN', '#N/A', 'N/A', '#NA', 'NULL', 'NaN', '-NaN', 'nan', '-nan']}
self.add_bamfile_locations(bamfiles)
def _getVCFStart(self):
rowstoskip = 0
# Opens resentative vcf as a text file and identifies the row in which our data begins
for index, line in enumerate(self._vcflines):
if "#CHROM" in line:
rowstoskip = index + 1
break
return rowstoskip
def addReferenceFile(self, ref):
self.refFile = ref
with open(ref, 'r') as r:
ref = SeqIO.parse(r, 'fasta')
self.reference = {seq.id: str(seq.seq) for seq in ref}
def averageWithVCF(self, otherVCF, newSampleName=None):
'''
*assumes one sample per vcf*
take intersection of mutations and average their coverages/frequencies etc.
'''
print (f'Averaging {self} with {otherVCF}')
mutsnotinotherVCF = {(mut.chrom, mut.pos) for mut in self.mutations}
for mut in otherVCF:
try:
self._hashedmuts[mut.chrom][mut.pos] = self._hashedmuts[mut.chrom][mut.pos].average(mut)
mutsnotinotherVCF.discard((mut.chrom, mut.pos))
if self._hashedmuts[mut.chrom][mut.pos] == None:
mutsnotinotherVCF.add((mut.chrom, mut.pos))
except KeyError:
pass
for chrom, pos in mutsnotinotherVCF:
self.removemut(chrom, pos)
self.renameSample(self.samples[0],newSampleName)
#self.header = self.header.combineHeaders(otherVCF.header)
return self
def averageSamples(self,sampleA,sampleB,newSampleName=None):
'''take intersection of mutations and average their coverages/frequencies etc.'''
intersection = [mut for mut in self.mutations if mut.hasSample(sampleA) and mut.hasSample(sampleB)]
#iterate through all mutations
for mut in intersection:
mut = mut.averageSamples(sampleA,sampleB)
self.renameSample(sampleA, newSampleName)
self.deleteSample(sampleB)
return self
def renameSample(self, origname, newName):
'''update name of sample origname in both list of samples (self.samples)
and in all mutations'''
if newName == None:
return 1
for mut in self.mutations:
mut.renameSample(origname, newName)
self.samples = [sample if sample != origname else newName for sample in self.samples]
def deleteSample(self, samplename):
for mut in self.mutations:
mut.deleteSample(samplename)
self.samples.remove(samplename)
def mergeVCFs(self,otherVCF):
for mut in otherVCF:
try:
self._hashedmuts[mut.chrom][mut.pos].addSamples(mut.samples, mut)
self.samples.extend(mut.samples)
except KeyError:
self.addMut(mut)
return self
def addMut(self,newMut):
raise Exception('addMut not implemented yet')
def removemut(self, chrom, pos):
'''muts are stored in self.mutations and self._hashedmuts.
this removes all muts w/ chrom and pos from both lists.'''
try:
print('deleting ' + chrom + ' '+str(pos))
self.mutations = [mut for mut in self.mutations if ((mut.chrom != chrom) or (mut.pos != pos))]
del self._hashedmuts[chrom][pos]
except KeyError:
pass
def annotate(self, gtffile=None, reference=None):
if not self.gtffile:
self.gtffile = gtffile
if not self.refFile:
self.addReferenceFile(reference)
coding_regions = extractCodingRegions(gtffile)
# use gene coordinates to create coding sequences from reference sequences
transcripts = createTranscripts(coding_regions, self.reference)
AAtranscripts = {gene:str(Seq(transcript).translate()) for gene, transcript in transcripts.items()}
for segment in coding_regions.keys():
for gene in coding_regions[segment].keys():
priorExonLength = 0
for start, stop in coding_regions[segment][gene]:
for mut in self.fetchSNPs(segment, start, stop+1):
offset = start - priorExonLength
mut.annotate(gene, offset, transcripts[gene], AAtranscripts[gene])
priorExonLength += (stop+1-start)
def add_bamfile_locations(self, bamfiles):
if len(bamfiles) == 0:
self.bamfiles = {}
elif type(bamfiles) == str and len(self.samples) == 1:
bamfile = os.path.abspath(bamfiles)
self.bamfiles = {self.samples[0]:bamfile}
elif type(bamfiles) == list and len(bamfiles) == len(self.samples):
self.bamfiles = {sample:os.path.abspath(bamfile) for sample, bamfile in zip(self.samples, bamfiles)}
elif type(bamfiles == dict):
for sample, bamfile in bamfiles.items():
if sample not in self.samples:
raise Exception(f'Sample {sample} not in VCF.')
self.bamfiles.update(bamfiles)
else:
raise Exception ('Unable to associate bamfiles with samples.')
def fetchSNPs(self, chrom, start=0, end=float('inf')):
return [mutation for mutation in self.mutations \
if (mutation.chrom == chrom and mutation.pos >= start and mutation.pos < end)]
def apply_position_filter(self, removeFails = True, signifigance_at = 0.05, removeFailsMethod = 'Bonferroni', in_read_cutoff=0.1, freq_cutoff=0.01):
#count mutations deleted for fun
if removeFailsMethod == 'Bonferroni':
#Number of comparisons that will be made is number of mutations * number of samples
df = self.to_dataframe()
self.pval_cutoff = signifigance_at/len(df.loc[df.FREQ > 0.01])
else:
self.pval_cutoff = signifigance_at
failcount = 0
for sample in self.samples:
try:
bamfile = self.bamfiles[sample]
except KeyError:
print (f'''Position filter requires the original bamfile from which SNPs were called.\n
Bamfile for sample {sample} is missing.)''')
print ("You can add the bamfile by using vcf.add_bamfile_locations(format: {sample:bamfileLocation})")
bam = pysam.AlignmentFile(bamfile, 'rb')
print (f'Processing {sample}')
origMuts = len(self.mutations)
for mut in self.mutations:
if mut.hasSample(sample):
samp = mut.get(sample)
RD = samp.RD
AD = samp.AD
if .5-abs(.5-samp.freq) > freq_cutoff:
pos_filter_result = mut.apply_pos_filter(bam, sample, self.pval_cutoff, removeFails, in_read_cutoff)
else:
continue
if pos_filter_result != 'PASS':
print(pos_filter_result)
if pos_filter_result == 'FAIL':
failcount += 1
print (failcount)
if not mut.stillHasSNPs():
self.removemut(mut.chrom, mut.pos)
print (f'Finished.\n{failcount} SNVs failed position filter.')
return self
def get_synonymous(self):
return [mutation for mutation in self.mutations \
if mutation.AAtype == 'Synonymous']
def get_nonsynonymous(self):
return [mutation for mutation in self.mutations \
if mutation.AAtype == 'Nonsynonymous']
def to_dict(self):
'''returns list of dictionaries. Each dictionary is 'property':value for each snp in each sample.'''
return [entry for mut in self.mutations for entry in mut.to_dict()]
def to_numpy(self, referenceFile=None, cutoff_freq=0):
# def generate_ref_numpy(self, referenceFile):
if len(self.reference) < 1:
if referenceFile:
self.addReferenceFile(referenceFile)
else:
print('This function requires VCF to have a reference sequence.')
return None
concatrefseq = ""
segStarts = dict()
segCoords = dict()
runningtally = 0
for chrom, seq in self.reference.items():
segStarts[chrom.split('_')[-1]] = runningtally
segCoords[chrom.split('_')[-1]] = (runningtally, runningtally+len(seq))
runningtally += len(seq)
concatrefseq += seq
totalConcatLength = len(concatrefseq)
df = self.to_dataframe()
# only report muts that are within cutoff_freq; adjust all other read counts to 0/1
# first adjust SNPs under cutoff_freq (all ref)
df.loc[(df.FREQ > 0) & (df.FREQ < cutoff_freq), 'AD'] = 0
df.loc[(df.FREQ > 0) & (df.FREQ < cutoff_freq), 'DP'] = df.loc[(df.FREQ > 0) & (df.FREQ < cutoff_freq), 'RD']
df.loc[(df.FREQ > 0) & (df.FREQ < cutoff_freq), 'FREQ'] = 0
# then adjust SNPs above 1-cutoff_freq (all alt)
df.loc[(df.FREQ < 1) & (df.FREQ > 1-cutoff_freq), 'RD'] = 0
df.loc[(df.FREQ < 1) & (df.FREQ > 1-cutoff_freq), 'DP'] = df.loc[(df.FREQ < 1) & (df.FREQ > 1-cutoff_freq), 'AD']
df.loc[(df.FREQ < 1) & (df.FREQ > 1-cutoff_freq), 'FREQ'] = 1
df.chrom = df.chrom.str.split('_').str[-1]
df['inConcatPos'] = df.pos
for seg, offset in segStarts.items():
df.loc[df.chrom == seg,'inConcatPos'] += offset
nucDict={'A':0,'C':1,'G':2,'T':3}
df['ref_nuc'] = df.ref.map(nucDict)
df['alt_nuc'] = df.alt.map(nucDict)
RDdf = df[['sampleID','inConcatPos','ref_nuc','RD', 'chrom']]
RDdf = RDdf.sort_values('RD',ascending=False).reset_index(drop=True)
RDdf = RDdf.loc[~RDdf[['sampleID','inConcatPos','chrom','ref_nuc']].duplicated()]
readCtsDF = pd.pivot_table(RDdf[['sampleID','inConcatPos','ref_nuc','RD']], columns='ref_nuc', values = 'RD', index=('sampleID', 'inConcatPos'))
ADdf = df[['sampleID','inConcatPos','alt_nuc','AD', 'chrom']]
ADdf = ADdf.sort_values('AD',ascending=False).reset_index(drop=True)
ADdf = ADdf.loc[~ADdf[['sampleID','inConcatPos','chrom','alt_nuc']].duplicated()]
readCtsDF.update(pd.pivot_table(df[['sampleID','inConcatPos','alt_nuc','AD']], columns='alt_nuc', values = 'AD', index=('sampleID', 'inConcatPos')))
readCtsDF = readCtsDF.unstack().fillna(0)
positions = readCtsDF[0].columns
readCts = np.zeros((readCtsDF.shape[0],totalConcatLength,4))
readCts[:,positions,0] = readCtsDF[0].to_numpy()
readCts[:,positions,1] = readCtsDF[1].to_numpy()
readCts[:,positions,2] = readCtsDF[2].to_numpy()
readCts[:,positions,3] = readCtsDF[3].to_numpy()
samplelist = list(readCtsDF.index.get_level_values(0))
return readCts, samplelist, list(positions)
def to_dataframe(self):
'''export VCF as tidy dataframe'''
return pd.DataFrame(self.to_dict()).rename(columns={'sample':'sampleID'}) #sample is a reserved term in Pandas
def to_vcf(self, location):
with open (location, 'w') as outfile:
outfile.write(str(self.header))
columnheader = "#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\t"+'\t'.join(self.samples)+'\n'
outfile.write(columnheader)
outfile.writelines([str(mutation)+'\n' for mutation in self.mutations])
def __len__(self):
return len(self.mutations)
def __iter__(self):
for mutation in self.mutations:
yield mutation
def __str__(self):
return f"VCF containing {len (self.samples)} samples and {len(self.mutations)} mutation calls"
class MutCall:
def __init__(self, row, samples):
self._rawlist = row.split('\t')
self.samples = samples
self.chrom = self._rawlist[0]
#internal representation will be 0-indexed. Will export VCFs as 1-indexed, all other export formats as 0-indexed.
self.pos = int(self._rawlist[1])-1
self.id = self._rawlist[2]
self.ref = self._rawlist[3]
self.alt = self._rawlist[4]
self.qual = self._rawlist[5]
self.filter = self._rawlist[6]
self.info = self._rawlist[7]
self.format = self._rawlist[8]
self.type = self._determinetype()
self._sampledata = {sample:SampleMut(sample, data, self.format) for data, sample in zip(self._rawlist[9:], self.samples)}
del self._rawlist
self.genes = []
self.genePOS = []
self.AArefs = []
self.AAalts = []
self.AApos = []
self.AAstrs = []
self.AAtypes = []
def _determinetype(self):
if len(self.ref) > 1:
return "deletion"
elif len(self.alt) > 1:
return "insertion"
elif len(self.alt) == 1:
return "SNP"
else:
return "translocation"
def get(self, sample):
return self._sampledata[sample]
def stillHasSNPs(self):
return np.array([self.hasSample(sample) for sample in self.samples]).any()
def apply_pos_filter(self, bam, sample, cutoff=0.05, removeFails=True, in_read_cutoff=0.1, min_base_quality=30, min_mapping_quality=10, log_loc='snp_filter_log.log'):
'''given pysam alignment object and relevent sample, determine whether average
in-read positions for ref and alt alleles come from different distributions.
If so, it sets that sample's minority allele read ct to 0 (since these are presumed to be alignment errors).
If that means the ref allele is 100%, the sample is removed from the mutation.
If that means the alt allele is 100%, the sample is retained with updated statistics.'''
chrom = self.chrom
pos = self.pos
pileup = bam.pileup(contig=chrom, start=pos, end=pos+1, truncate=True, stepper="nofilter")
column = next(pileup)
sampmut = self.get(sample)
column.set_min_base_quality(min_base_quality)
bases = [base.upper() for base in column.get_query_sequences()]
map_quals = column.get_mapping_qualities()
soft_clipped_positions = [read.alignment.get_reference_positions().index(pos) if pos in read.alignment.get_reference_positions() else 0 for read in column.pileups]
read_lengths = [read.alignment.query_alignment_length for read in column.pileups]
positions = defaultdict(list)
for position, read_length, base, mQ in zip(soft_clipped_positions, read_lengths, bases, map_quals):
if mQ < min_mapping_quality:
continue
try:
inReadPos = position/read_length
positions[base].append(.50-abs(inReadPos-.50)) #distance of SNP from end of read
except:
print (base)
print (bases)
avg_ref_pos = np.mean(positions[self.ref.upper()])
avg_alt_pos = np.mean(positions[self.alt.upper()])
if (self.ref.upper() not in positions.keys()) or (self.alt.upper() not in positions.keys()):
p_value = 1 #Definitely not an error. p-value = 1.
elif (len(positions[self.ref.upper()]) <= 1) or (len(positions[self.alt.upper()]) <= 1):
p_value = 1 #Can't do pvalue calc on one position
else:
ref_positions = bootstrap(np.array(positions[self.ref.upper()]), int(1/cutoff)+1)
alt_positions = bootstrap(np.array(positions[self.alt.upper()]), int(1/cutoff)+1)
bigger_than_pvalue = np.mean(ref_positions>alt_positions)
less_than_pvalue = np.mean(ref_positions<alt_positions)
p_value = min(bigger_than_pvalue, less_than_pvalue)
sampmut.avg_ref_pos = avg_ref_pos
sampmut.avg_alt_pos = avg_alt_pos
sampmut.read_pos_p_value = p_value
# regardless of "ref" and "alt", we want to examine the in-read position of the allele w/
# fewer mapped entries.
if len(positions[self.ref.upper()]) < len(positions[self.alt.upper()]):
dominant_allele_positions = positions[self.alt.upper()]
minor_allele_positions = positions[self.ref.upper()]
else:
minor_allele_positions = positions[self.alt.upper()]
dominant_allele_positions = positions[self.ref.upper()]
location_cutoff = in_read_cutoff
# Read position logic:
# If the positions are significantly different and are separated by a distinct amount, the mutation is not valid.
# If the positions are not significantly different but they are on average
# within last 10% of end of read, the mutation is not valid.
close_to_end_of_read = np.mean(minor_allele_positions) < location_cutoff
sig_diff_pos_from_major_allele = (p_value < cutoff)
if sig_diff_pos_from_major_allele:
sampmut.position_filter = 'FAIL'
with open(log_loc, 'a') as log:
print (f'For sample {sample}, mutation {self.chrom.split("_")[-1]} {self.pos} failed read position filter.',file=log)
print (f'Avg ref position: {np.round(avg_ref_pos,3)}(n={len(positions[self.ref.upper()])}). Avg alt position: {np.round(avg_alt_pos,3)}(n={len(positions[self.alt.upper()])}).',file=log)
print (f'Read location cutoff was {np.round(location_cutoff, 4)}. p-value {p_value} is less than cutoff {cutoff}.', file = log)
#Write to screen
freq= len(positions[self.ref.upper()])/(len(positions[self.ref.upper()])+len(positions[self.alt.upper()]))
if (freq > 0.01) and (freq < 0.99):
print (f'For sample {sample}, mutation {self.chrom.split("_")[-1]} {self.pos} read position filter: {sampmut.position_filter}.')
print (f'Avg ref position: {np.round(avg_ref_pos,3)}(n={len(positions[self.ref.upper()])}). Avg alt position: {np.round(avg_alt_pos,3)}(n={len(positions[self.alt.upper()])}).')
print (f'Read location cutoff was {np.round(location_cutoff, 4)}. p-value {p_value} is less than cutoff {cutoff}.')
if removeFails:
if sampmut.RD>sampmut.AD:
sampmut.zeroOut()
elif sampmut.RD<=sampmut.AD:
sampmut.position_filter = 'PASS'
sampmut.DP -= sampmut.RD
sampmut.RD = 0
sampmut.freq = 1.0
sampmut.freqstring='100%'
sampmut.PVAL = np.round(p_value,4)
sampmut.RBQ = 0
sampmut.RDF = 0
sampmut.RDR = 0
sampmut.update()
print (sampmut.position_filter, sampmut.freqstring)
else:
sampmut.position_filter = 'PASS'
return sampmut.position_filter
def averageSamples(self, sampleA, sampleB):
self._sampledata[sampleA] = self._sampledata[sampleA].average(self._sampledata[sampleB])
return self
def renameSample(self,oldsamplename,newName):
self._sampledata[newName] = self._sampledata.pop(oldsamplename)
self.samples = [sample if sample != oldsamplename else newName for sample in self.samples]
def deleteSample(self, sampletoDelete):
self._sampledata.pop(sampletoDelete)
self.samples.remove(sampletoDelete)
def average(self, otherMut):
sample = self.get(self.samples[0])
sample2 = otherMut.get(otherMut.samples[0])
if sample2.SDP == 0 or sample.SDP==0:
return None
else:
sample = sample.average(sample2)
tempinfo = self.info.split(';')
for ADPloc, item in enumerate(tempinfo):
if "ADP" in item:
ADP = int(item.split('=')[-1])
for otheritem in otherMut.info.split(';'):
if "ADP" in otheritem:
otherADP = int(otheritem.split('=')[-1])
break
break
tempinfo[ADPloc] = str(int((ADP+otherADP)/2))
self.info = "ADP="+";".join(tempinfo)
return self
def hasSample(self, samplename):
return self._sampledata[samplename].exists()
def addSamples(self,samplelist,newMut):
pass
def annotate(self, gene, adjust, dnaseq, AAseq):
inGenePos = self.pos-adjust #pos is 0-indexed, so this is 0-indexed.
self.genes.append(gene)
self.genePOS.append(inGenePos)
if self.ref != dnaseq[inGenePos]:
print (self.pos)
print (self.ref)
print (inGenePos)
print (adjust)
print (gene)
print (dnaseq)
print (AAseq)
print (dnaseq[inGenePos])
raise Exception
#Calc assuming type is SNP:
AApos = int((inGenePos)/3)
refAA = AAseq[AApos]
inAApos = (inGenePos)%3
refcodon = dnaseq[inGenePos-inAApos:inGenePos-inAApos+3]
altcodon = refcodon[:inAApos]+self.alt+refcodon[inAApos+1:]
altAA = str(Seq(altcodon).translate())
# currently not calling indels, this is incomplete (cannot handle partial indels, multiple alts)
if self.type == 'deletion':
refAA = '-'*int(len(self.ref)/3)
if len(refAA) == 0:
refAA = 'd'
if self.type == 'insertion':
altAA = '-'*int(len(self.ref)/3)
if len(refAA) == 0:
altAA = 'd'
self.AApos.append(AApos)
self.AArefs.append(refAA)
self.AAalts.append(altAA)
self.AAstrs.append(refAA+str(AApos)+altAA)
if refAA == altAA:
self.AAtypes.append('Synonymous')
else:
self.AAtypes.append('Nonsynonymous')
def to_dict(self):
selfDict = []
for samplename in self.samples:
sample = self.get(samplename)
result = {'sample':samplename,'chrom':self.chrom, 'pos':self.pos, 'id':self.id, 'ref':self.ref,'alt':self.alt,'qual':self.qual}
result.update(sample.to_dict())
if len(self.AApos) > 0: #if annotated
for gene, inGenePos, AAref, AApos, AAalt, AAstr, AAtype in zip(self.genes, self.genePOS, self.AArefs, self.AApos,self.AAalts,self.AAstrs, self.AAtypes):
geneSpecificResult = result.copy()
geneSpecificResult.update({'gene':gene,'inGenePos':inGenePos,'refAA':AAref, 'AApos':AApos, 'altAA':AAalt, 'AAstr':AAstr, 'AAtype':AAtype})
selfDict.append(geneSpecificResult)
else:
selfDict.append(result)
return selfDict
def __str__(self):
#Add 1 to pos to account for 1-indexing of VCF files
return '\t'.join([self.chrom, str(self.pos+1), self.id, self.ref,self.alt,self.qual,self.filter,self.info,self.format])+'\t'+'\t'.join(str(self.get(sample)) for sample in self.samples)
def __iter__(self):
for sample in self.samples:
yield self._sampledata[sample]
class SampleMut:
def __init__(self, samplename, data, formatinfo):
self.name = samplename
self.format = formatinfo
self.other=[]
for label, item in zip(self.format.split(":"), data.split(":")):
item = item.strip('\n').strip('\"')
if item == '.':
item = 0
elif item.isnumeric():
item = int(item)
if label == "GT":
self.GT = item
elif label == "GQ":
self.GQ = item
elif label == "SDP":
self.SDP = item
elif label == "DP":
self.DP = item
elif label == "RD":
self.RD = item
elif label == "AD":
self.AD = item
elif label == "FREQ":
self.freqstring = item
try:
self.freq = round(float(item.rstrip('%'))/100,4)
except:
self.freq = round(float(self.freqstring)/100,4)
elif label == "PVAL":
self.PVAL = item
elif label == "RBQ":
self.RBQ = item
elif label == "ABQ":
self.ABQ = item
elif label == "RDF":
self.RDF = item
elif label == "RDR":
self.RDR = item
elif label == "ADF":
self.ADF = item
elif label == "ADR":
self.ADR = item
else:
self.other.append((label,item))
# #When calculating freq and depth, Varscan removes AD reads due to quality filter, but not RD reads.
# #But it reports only quality RD reads.
# #That's a problem when you've got 7 crummy RD reads and a 100 good AD reads!
# #I'll recalc depth and frequency here.
if self.SDP != 0:
self.DP = self.RD + self.AD
self.freq = round(self.AD/self.DP, 4)
self.freqstring = str(round((self.freq*100),4))+'%'
self._properties = [self.GT,self.GQ,self.SDP,self.DP,self.RD,self.AD,self.freqstring,self.PVAL,self.RBQ,self.ABQ,self.RDF,self.RDR,self.ADF,self.ADR]
def update(self):
self._properties = [self.GT,self.GQ,self.SDP,self.DP,self.RD,self.AD,self.freqstring,self.PVAL,self.RBQ,self.ABQ,self.RDF,self.RDR,self.ADF,self.ADR]
def average(self, otherSample):
self.freq = round((self.freq+otherSample.freq)/2, 4)
self.freqstring = str(self.freq*100)+"%"
self.GQ = int(round((self.GQ+otherSample.GQ)/2, 0))
self.SDP += otherSample.SDP
self.DP += otherSample.DP
oldAD = self.AD
self.AD = int(round(self.DP*self.freq,0))
oldRD = self.RD
self.RD = self.DP-self.AD
self.RBQ = int(round((self.RBQ+otherSample.RBQ)/2, 0))
self.ABQ = int(round((self.ABQ+otherSample.ABQ)/2, 0))
try:
RDFtoRD = ((self.RDF/oldRD)+(otherSample.RDF/otherSample.RD))/2
self.RDF = int(round(self.RD*RDFtoRD,0))
self.RDR = self.RD-self.RDF
except ZeroDivisionError:
self.RDF=0
self.RDR=0
try:
ADFtoRD = ((self.ADF/oldAD)+(otherSample.ADF/otherSample.AD))/2
self.ADF = int(round(self.AD*ADFtoRD,0))
self.ADR = self.AD-self.ADF
except ZeroDivisionError:
self.RDF=0
self.RDR=0
self.update()
return self
def exists(self):
if self.SDP == 0:
return False
else:
return True
def zeroOut(self):
'''If a sample fails a filter or otherwise needs to be deleted in only one sample,
it's zero-ed out so that it's blank. Importantly, deleting a sample from one mutation
is different than deleting a sample from the whole vcf.'''
self.GT='./.'
self.GQ=0
self.SDP=0
self.DP=0
self.RD=0
self.AD=0
self.freqstring=0
self.PVAL=0
self.RBQ=0
self.ABQ=0
self.RDF=0
self.RDR=0
self.ADF=0
self.ADR=0
self.update()
def to_dict(self):
self.update()
return {"GT":self.GT,"GQ":self.GQ,"SDP":self.SDP,"DP":self.DP,"RD":self.RD,"AD":self.AD,"FREQ":self.freq,"PVAL":self.PVAL,"RBQ":self.RBQ,"ABQ":self.ABQ,"RDF":self.RDF,"RDR":self.RDR,"ADF":self.ADF,"ADR":self.ADR}
def __str__(self):
self.update()
return ":".join([str(item) for item in self._properties])
class Header:
def __init__(self, headertext):
self.text = headertext
def combineHeaders(self,otherheader):
for line in otherheader.text:
if line not in self.text:
self.text.append(line)
def __str__(self):
return "".join(self.text)
def extractCodingRegions(gtffile):
with open(gtffile, 'r') as g:
gtf = g.readlines()
coding_regions = {}
for line in gtf:
line = line.replace("/", "_")
lineitems = line.split("\t")
segment_name = lineitems[0]
annotation_type = lineitems[2]
start = int(lineitems[3]) - 1 # adding the -1 here for 0 indexing
stop = int(lineitems[4]) - 1 # adding the -1 here for 0 indexing
gene_name = lineitems[8]
gene_name = gene_name.split(";")[0]
gene_name = gene_name.replace("gene_id ","")
gene_name = gene_name.replace("\"","")
if annotation_type.lower() == "cds":
if segment_name not in coding_regions:
coding_regions[segment_name] = {}
coding_regions[segment_name][gene_name] = [[start, stop]]
elif segment_name in coding_regions and gene_name not in coding_regions[segment_name]:
coding_regions[segment_name][gene_name] = [[start, stop]]
elif gene_name in coding_regions[segment_name]:
coding_regions[segment_name][gene_name].append([start, stop])
return coding_regions
def createTranscripts(coding_regions, ref_segments):
transcripts = {}
for segment in coding_regions:
for gene in coding_regions[segment]:
transcripts[gene] = ""
coordinates = coding_regions[segment][gene] # define the coding regions for each gene
for start, stop in coordinates: # loop through start/stop sites in coding regions
sequence_chunk = ref_segments[segment][start:stop+1]
transcripts[gene] = transcripts[gene] + sequence_chunk # append each piece of the transcript together
return transcripts
def bootstrap(self, array, bootsize):
return np.random.choice(array, len(array)*bootsize, replace=True).reshape(-1, bootsize).mean(axis=0)
``` |
{
"source": "JosephLalli/statannotations",
"score": 2
} |
#### File: statannotations/statannotations/format_annotations.py
```python
from statannotations.stats.StatResult import StatResult
from typing import Union, List
import numpy as np
import pandas as pd
def pval_annotation_text(result: Union[List[StatResult], StatResult], pvalue_thresholds):
single_value = False
if isinstance(result, list):
x1_pval = np.array([res.pval for res in result])
x1_signif_suff = [res.significance_suffix for res in result]
else:
x1_pval = np.array([result.pval])
x1_signif_suff = [result.significance_suffix]
single_value = True
# Sort the threshold array
pvalue_thresholds = pd.DataFrame(pvalue_thresholds).sort_values(by=0, ascending=False).values
x_annot = pd.Series(["" for _ in range(len(x1_pval))])
for i in range(0, len(pvalue_thresholds)):
if i < len(pvalue_thresholds) - 1:
condition = (x1_pval <= pvalue_thresholds[i][0]) & (pvalue_thresholds[i + 1][0] < x1_pval)
x_annot[condition] = pvalue_thresholds[i][1]
else:
condition = x1_pval < pvalue_thresholds[i][0]
x_annot[condition] = pvalue_thresholds[i][1]
x_annot = pd.Series([f"{star}{signif}" for star, signif in zip(x_annot, x1_signif_suff)])
return x_annot if not single_value else x_annot.iloc[0]
def simple_text(result: StatResult, pvalue_format, pvalue_thresholds, test_short_name=None):
"""
Generates simple text for test name and pvalue
:param result: StatResult instance
:param pvalue_format: format string for pvalue
:param test_short_name: Short name of test to show
:param pvalue_thresholds: String to display per pvalue range
:return: simple annotation
"""
# Sort thresholds
thresholds = sorted(pvalue_thresholds, key=lambda x: x[0])
# Test name if passed
text = test_short_name and test_short_name + " " or ""
for threshold in thresholds:
if result.pval < threshold[0]:
pval_text = "p ≤ {}".format(threshold[1])
break
else:
pval_text = "p = {}".format(pvalue_format).format(result.pval)
return text + pval_text + result.significance_suffix
```
#### File: statannotations/tests/test_stat_result.py
```python
import unittest
import warnings
from functools import partial
import numpy.testing as npt
from statannotations.stats.ComparisonsCorrection import ComparisonsCorrection
from statannotations.stats.StatResult import StatResult
class TestStatResult(unittest.TestCase):
"""Test correction implementation."""
def setUp(self) -> None:
self.benjamini_hochberg = ComparisonsCorrection("Benjamini-Hochberg")
self.stat_result = StatResult("Test X", "X", "Stat", 1, 0.02, alpha=0.05)
self.stat_result.correction_method = self.benjamini_hochberg.name
def test_ns_if_ns(self):
self.stat_result.corrected_significance = False
assert self.stat_result.formatted_output == (
"Test X with Benjamini-Hochberg correction, P_val:2.000e-02 (ns) "
"Stat=1.000e+00")
def test_nothing_if_s(self):
self.stat_result.corrected_significance = True
assert self.stat_result.formatted_output == (
"Test X with Benjamini-Hochberg correction, P_val:2.000e-02 "
"Stat=1.000e+00")
``` |
{
"source": "JosephLalli/statannot",
"score": 3
} |
#### File: statannot/tests/test_statannot.py
```python
import unittest
import warnings
import numpy.testing as npt
from statannot import statannot
class TestBonferroni(unittest.TestCase):
"""Test Bonferroni correction function."""
def test_returns_scalar_with_scalar_input(self):
corrected = statannot.bonferroni(0.5)
with self.assertRaisesRegex(TypeError, 'has no len'):
# If `corrected` is a scalar, calling `len` should raise an error.
len(corrected)
def test_returns_correct_values_with_auto_num_comparisons(self):
raw_p_values = [0.1, 0.05, 0.5]
expected = [0.3, 0.15, 1.0]
observed = statannot.bonferroni(raw_p_values)
npt.assert_allclose(observed, expected)
def test_returns_correct_values_with_manual_num_comparisons_int(self):
raw_p_values = [0.1, 0.05, 0.5]
expected = [0.5, 0.25, 1.0]
with warnings.catch_warnings():
warnings.simplefilter('ignore')
observed = statannot.bonferroni(raw_p_values, 5)
npt.assert_allclose(observed, expected)
def test_returns_correct_values_with_manual_num_comparisons_float(self):
raw_p_values = [0.1, 0.05, 0.5]
expected = [0.5, 0.25, 1.0]
with warnings.catch_warnings():
warnings.simplefilter('ignore')
observed = statannot.bonferroni(raw_p_values, 5.0)
npt.assert_allclose(observed, expected)
``` |
{
"source": "JosephLalli/tensorqtl",
"score": 2
} |
#### File: tensorqtl/tensorqtl/cis.py
```python
import torch
import numpy as np
import pandas as pd
import scipy.stats as stats
import sys
import os
import time
from collections import OrderedDict
sys.path.insert(1, os.path.dirname(__file__))
import genotypeio, eigenmt
from core import *
import imp
import core
imp.reload(core)
from core import *
imp.reload(eigenmt)
def calculate_cis_nominal(genotypes_t, phenotype_t, residualizer=None, return_af=True):
"""
Calculate nominal associations
genotypes_t: genotypes x samples
phenotype_t: single phenotype
residualizer: Residualizer object (see core.py)
"""
p = phenotype_t.reshape(1,-1)
r_nominal_t, genotype_var_t, phenotype_var_t = calculate_corr(genotypes_t, p, residualizer=residualizer, return_var=True)
std_ratio_t = torch.sqrt(phenotype_var_t.reshape(1,-1) / genotype_var_t.reshape(-1,1))
r_nominal_t = r_nominal_t.squeeze()
r2_nominal_t = r_nominal_t.double().pow(2)
if residualizer is not None:
dof = residualizer.dof
else:
dof = p.shape[1] - 2
slope_t = r_nominal_t * std_ratio_t.squeeze()
tstat_t = r_nominal_t * torch.sqrt(dof / (1 - r2_nominal_t))
slope_se_t = (slope_t.double() / tstat_t).float()
# tdist = tfp.distributions.StudentT(np.float64(dof), loc=np.float64(0.0), scale=np.float64(1.0))
# pval_t = tf.scalar_mul(2, tdist.cdf(-tf.abs(tstat)))
if return_af:
af_t, ma_samples_t, ma_count_t = get_allele_stats(genotypes_t)
return tstat_t, slope_t, slope_se_t, af_t, ma_samples_t, ma_count_t
else:
return tstat_t, slope_t, slope_se_t
def calculate_cis_permutations(genotypes_t, phenotype_t, permutation_ix_t,
residualizer=None, random_tiebreak=False):
"""Calculate nominal and empirical correlations"""
permutations_t = phenotype_t[permutation_ix_t]
r_nominal_t, genotype_var_t, phenotype_var_t = calculate_corr(genotypes_t, phenotype_t.reshape(1,-1),
residualizer=residualizer, return_var=True)
std_ratio_t = torch.sqrt(phenotype_var_t.reshape(1,-1) / genotype_var_t.reshape(-1,1))
r_nominal_t = r_nominal_t.squeeze(dim=-1)
std_ratio_t = std_ratio_t.squeeze(dim=-1)
corr_t = calculate_corr(genotypes_t, permutations_t, residualizer=residualizer).pow(2) # genotypes x permutations
corr_t = corr_t[~torch.isnan(corr_t).any(1),:]
if corr_t.shape[0] == 0:
raise ValueError('All correlations resulted in NaN. Please check phenotype values.')
r2_perm_t,_ = corr_t.max(0) # maximum correlation across permutations
r2_nominal_t = r_nominal_t.pow(2)
r2_nominal_t[torch.isnan(r2_nominal_t)] = -1 # workaround for nanargmax()
if not random_tiebreak:
ix = r2_nominal_t.argmax()
else:
ix = torch.nonzero(r2_nominal_t == r2_nominal_t.max(), as_tuple=True)[0]
ix = ix[torch.randint(0, len(ix), [1])[0]]
return r_nominal_t[ix], std_ratio_t[ix], ix, r2_perm_t, genotypes_t[ix]
def calculate_association(genotype_df, phenotype_s, covariates_df=None,
interaction_s=None, maf_threshold_interaction=0.05,
window=1000000, verbose=True):
"""
Standalone helper function for computing the association between
a set of genotypes and a single phenotype.
"""
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
assert genotype_df.columns.equals(phenotype_s.index)
# copy to GPU
phenotype_t = torch.tensor(phenotype_s.values, dtype=torch.float).to(device)
genotypes_t = torch.tensor(genotype_df.values, dtype=torch.float).to(device)
impute_mean(genotypes_t)
dof = phenotype_s.shape[0] - 2
if covariates_df is not None:
assert phenotype_s.index.equals(covariates_df.index)
residualizer = Residualizer(torch.tensor(covariates_df.values, dtype=torch.float32).to(device))
dof -= covariates_df.shape[1]
else:
residualizer = None
if interaction_s is None:
res = calculate_cis_nominal(genotypes_t, phenotype_t, residualizer)
tstat, slope, slope_se, af, ma_samples, ma_count = [i.cpu().numpy() for i in res]
df = pd.DataFrame({
'pval_nominal':2*stats.t.cdf(-np.abs(tstat), dof),
'slope':slope, 'slope_se':slope_se,
'tstat':tstat, 'af':af, 'ma_samples':ma_samples, 'ma_count':ma_count,
}, index=genotype_df.index)
else:
interaction_t = torch.tensor(interaction_s.values.reshape(1,-1), dtype=torch.float32).to(device)
if maf_threshold_interaction > 0:
mask_s = pd.Series(True, index=interaction_s.index)
mask_s[interaction_s.sort_values(kind='mergesort').index[:interaction_s.shape[0]//2]] = False
interaction_mask_t = torch.BoolTensor(mask_s).to(device)
else:
interaction_mask_t = None
genotypes_t, mask_t = filter_maf_interaction(genotypes_t, interaction_mask_t=interaction_mask_t,
maf_threshold_interaction=maf_threshold_interaction)
res = calculate_interaction_nominal(genotypes_t, phenotype_t.unsqueeze(0), interaction_t, residualizer,
return_sparse=False)
tstat, b, b_se, af, ma_samples, ma_count = [i.cpu().numpy() for i in res]
mask = mask_t.cpu().numpy()
dof -= 2
df = pd.DataFrame({
'pval_g':2*stats.t.cdf(-np.abs(tstat[:,0]), dof), 'b_g':b[:,0], 'b_g_se':b_se[:,0],
'pval_i':2*stats.t.cdf(-np.abs(tstat[:,1]), dof), 'b_i':b[:,1], 'b_i_se':b_se[:,1],
'pval_gi':2*stats.t.cdf(-np.abs(tstat[:,2]), dof), 'b_gi':b[:,2], 'b_gi_se':b_se[:,2],
'af':af, 'ma_samples':ma_samples, 'ma_count':ma_count,
}, index=genotype_df.index[mask])
if df.index.str.startswith('chr').all(): # assume chr_pos_ref_alt_build format
df['position'] = df.index.map(lambda x: int(x.split('_')[1]))
return df
def map_nominal(genotype_df, variant_df, phenotype_df, phenotype_pos_df, prefix,
covariates_df=None, maf_threshold=0, interaction_df=None, maf_threshold_interaction=0.05,
group_s=None, window=1000000, run_eigenmt=False,
output_dir='.', write_top=True, write_stats=True, logger=None, verbose=True):
"""
cis-QTL mapping: nominal associations for all variant-phenotype pairs
Association results for each chromosome are written to parquet files
in the format <output_dir>/<prefix>.cis_qtl_pairs.<chr>.parquet
If interaction_df is provided, the top association per phenotype is
written to <output_dir>/<prefix>.cis_qtl_top_assoc.txt.gz unless
write_top is set to False, in which case it is returned as a DataFrame
"""
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if logger is None:
logger = SimpleLogger()
if group_s is not None:
group_dict = group_s.to_dict()
logger.write('cis-QTL mapping: nominal associations for all variant-phenotype pairs')
logger.write(f' * {phenotype_df.shape[1]} samples')
logger.write(f' * {phenotype_df.shape[0]} phenotypes')
if covariates_df is not None:
assert np.all(phenotype_df.columns==covariates_df.index)
logger.write(f' * {covariates_df.shape[1]} covariates')
residualizer = Residualizer(torch.tensor(covariates_df.values, dtype=torch.float32).to(device))
dof = phenotype_df.shape[1] - 2 - covariates_df.shape[1]
else:
residualizer = None
dof = phenotype_df.shape[1] - 2
logger.write(f' * {variant_df.shape[0]} variants')
if interaction_df is not None:
assert interaction_df.index.equals(phenotype_df.columns)
logger.write(f" * including {interaction_df.shape[1]} interaction term(s)")
if maf_threshold_interaction > 0:
logger.write(f' * using {maf_threshold_interaction:.2f} MAF threshold')
elif maf_threshold > 0:
logger.write(f' * applying in-sample {maf_threshold} MAF filter')
genotype_ix = np.array([genotype_df.columns.tolist().index(i) for i in phenotype_df.columns])
genotype_ix_t = torch.from_numpy(genotype_ix).to(device)
if interaction_df is not None:
ni = interaction_df.shape[1]
dof -= 2 * ni
interaction_t = torch.tensor(interaction_df.values, dtype=torch.float32).to(device)
if maf_threshold_interaction > 0 and ni == 1:
mask_s = pd.Series(True, index=interaction_df.index)
mask_s[interaction_df[interaction_df.columns[0]].sort_values(kind='mergesort').index[:interaction_df.shape[0]//2]] = False
interaction_mask_t = torch.BoolTensor(mask_s).to(device)
else:
# TODO: implement filtering for multiple interactions?
interaction_mask_t = None
if ni == 1:
col_order = ['phenotype_id', 'variant_id', 'tss_distance', 'af', 'ma_samples', 'ma_count', 'pval_g', 'b_g', 'b_g_se',
'pval_i', 'b_i', 'b_i_se', 'pval_gi', 'b_gi', 'b_gi_se']
else:
col_order = (['phenotype_id', 'variant_id', 'tss_distance', 'af', 'ma_samples', 'ma_count', 'pval_g', 'b_g', 'b_g_se'] +
[k.replace('i', f"i{i+1}") for i in range(0,ni) for k in ['pval_i', 'b_i', 'b_i_se', 'pval_gi', 'b_gi', 'b_gi_se']])
# use column names instead of numbered interaction variables in output files
var_dict = []
for i,v in enumerate(interaction_df.columns, 1):
for c in ['pval_i', 'b_i', 'b_i_se']:
var_dict.append((c.replace('_i', f'_i{i}'), c.replace('_i', f'_{v}')))
for c in ['pval_gi', 'b_gi', 'b_gi_se']:
var_dict.append((c.replace('_gi', f'_gi{i}'), c.replace('_gi', f'_g-{v}')))
var_dict = dict(var_dict)
igc = genotypeio.InputGeneratorCis(genotype_df, variant_df, phenotype_df, phenotype_pos_df, group_s=group_s, window=window)
# iterate over chromosomes
best_assoc = []
start_time = time.time()
k = 0
logger.write(' * Computing associations')
for chrom in igc.chrs:
logger.write(f' Mapping chromosome {chrom}')
# allocate arrays
n = 0 # number of pairs
if group_s is None:
for i in igc.phenotype_pos_df[igc.phenotype_pos_df['chr'] == chrom].index:
j = igc.cis_ranges[i]
n += j[1] - j[0] + 1
else:
for i in igc.group_s[igc.phenotype_pos_df['chr'] == chrom].drop_duplicates().index:
j = igc.cis_ranges[i]
n += j[1] - j[0] + 1
chr_res = OrderedDict()
chr_res['phenotype_id'] = []
chr_res['variant_id'] = []
chr_res['tss_distance'] = np.empty(n, dtype=np.int32)
chr_res['af'] = np.empty(n, dtype=np.float32)
chr_res['ma_samples'] = np.empty(n, dtype=np.int32)
chr_res['ma_count'] = np.empty(n, dtype=np.int32)
if interaction_df is None:
chr_res['pval_nominal'] = np.empty(n, dtype=np.float64)
chr_res['slope'] = np.empty(n, dtype=np.float32)
chr_res['slope_se'] = np.empty(n, dtype=np.float32)
else:
chr_res['pval_g'] = np.empty(n, dtype=np.float64)
chr_res['b_g'] = np.empty(n, dtype=np.float32)
chr_res['b_g_se'] = np.empty(n, dtype=np.float32)
chr_res['pval_i'] = np.empty([n, ni], dtype=np.float64)
chr_res['b_i'] = np.empty([n, ni], dtype=np.float32)
chr_res['b_i_se'] = np.empty([n, ni], dtype=np.float32)
chr_res['pval_gi'] = np.empty([n, ni], dtype=np.float64)
chr_res['b_gi'] = np.empty([n, ni], dtype=np.float32)
chr_res['b_gi_se'] = np.empty([n, ni], dtype=np.float32)
start = 0
if group_s is None:
for k, (phenotype, genotypes, genotype_range, phenotype_id) in enumerate(igc.generate_data(chrom=chrom, verbose=verbose), k+1):
# copy genotypes to GPU
phenotype_t = torch.tensor(phenotype, dtype=torch.float).to(device)
genotypes_t = torch.tensor(genotypes, dtype=torch.float).to(device)
genotypes_t = genotypes_t[:,genotype_ix_t]
impute_mean(genotypes_t)
variant_ids = variant_df.index[genotype_range[0]:genotype_range[-1]+1]
tss_distance = np.int32(variant_df['pos'].values[genotype_range[0]:genotype_range[-1]+1] - igc.phenotype_tss[phenotype_id])
if maf_threshold > 0:
maf_t = calculate_maf(genotypes_t)
mask_t = maf_t >= maf_threshold
genotypes_t = genotypes_t[mask_t]
mask = mask_t.cpu().numpy().astype(bool)
variant_ids = variant_ids[mask]
tss_distance = tss_distance[mask]
if interaction_df is None:
res = calculate_cis_nominal(genotypes_t, phenotype_t, residualizer=residualizer)
tstat, slope, slope_se, af, ma_samples, ma_count = [i.cpu().numpy() for i in res]
n = len(variant_ids)
else:
genotypes_t, mask_t = filter_maf_interaction(genotypes_t, interaction_mask_t=interaction_mask_t,
maf_threshold_interaction=maf_threshold_interaction)
if genotypes_t.shape[0] > 0:
mask = mask_t.cpu().numpy()
variant_ids = variant_ids[mask]
res = calculate_interaction_nominal(genotypes_t, phenotype_t.unsqueeze(0), interaction_t,
residualizer=residualizer, return_sparse=False,
variant_ids=variant_ids)
tstat, b, b_se, af, ma_samples, ma_count = [i.cpu().numpy() for i in res]
tss_distance = tss_distance[mask]
n = len(variant_ids)
# top association
ix = np.nanargmax(np.abs(tstat[:,1+ni:]).max(1)) # top association among all interactions tested
# index order: 0, 1, 1+ni, 2, 2+ni, 3, 3+ni, ...
order = [0] + [i if j % 2 == 0 else i+ni for i in range(1,ni+1) for j in range(2)]
top_s = [phenotype_id, variant_ids[ix], tss_distance[ix], af[ix], ma_samples[ix], ma_count[ix]]
for i in order:
top_s += [tstat[ix,i], b[ix,i], b_se[ix,i]]
top_s = pd.Series(top_s, index=col_order)
if run_eigenmt: # compute eigenMT correction
top_s['tests_emt'] = eigenmt.compute_tests(genotypes_t, var_thresh=0.99, variant_window=200)
best_assoc.append(top_s)
else: # all genotypes in window were filtered out
n = 0
if n > 0:
chr_res['phenotype_id'].extend([phenotype_id]*n)
chr_res['variant_id'].extend(variant_ids)
chr_res['tss_distance'][start:start+n] = tss_distance
chr_res['af'][start:start+n] = af
chr_res['ma_samples'][start:start+n] = ma_samples
chr_res['ma_count'][start:start+n] = ma_count
if interaction_df is None:
chr_res['pval_nominal'][start:start+n] = tstat
chr_res['slope'][start:start+n] = slope
chr_res['slope_se'][start:start+n] = slope_se
else:
# columns: [g, i_1 ... i_n, gi_1, ... gi_n] --> 0, 1:1+ni, 1+ni:1+2*ni
chr_res['pval_g'][start:start+n] = tstat[:,0]
chr_res['b_g'][start:start+n] = b[:,0]
chr_res['b_g_se'][start:start+n] = b_se[:,0]
chr_res['pval_i'][start:start+n] = tstat[:,1:1+ni]
chr_res['b_i'][start:start+n] = b[:,1:1+ni]
chr_res['b_i_se'][start:start+n] = b_se[:,1:1+ni]
chr_res['pval_gi'][start:start+n] = tstat[:,1+ni:]
chr_res['b_gi'][start:start+n] = b[:,1+ni:]
chr_res['b_gi_se'][start:start+n] = b_se[:,1+ni:]
start += n # update pointer
else: # groups
for k, (phenotypes, genotypes, genotype_range, phenotype_ids, group_id) in enumerate(igc.generate_data(chrom=chrom, verbose=verbose), k+1):
# copy genotypes to GPU
genotypes_t = torch.tensor(genotypes, dtype=torch.float).to(device)
genotypes_t = genotypes_t[:,genotype_ix_t]
impute_mean(genotypes_t)
variant_ids = variant_df.index[genotype_range[0]:genotype_range[-1]+1]
# assuming that the TSS for all grouped phenotypes is the same
tss_distance = np.int32(variant_df['pos'].values[genotype_range[0]:genotype_range[-1]+1] - igc.phenotype_tss[phenotype_ids[0]])
if maf_threshold > 0:
maf_t = calculate_maf(genotypes_t)
mask_t = maf_t >= maf_threshold
genotypes_t = genotypes_t[mask_t]
mask = mask_t.cpu().numpy().astype(bool)
variant_ids = variant_ids[mask]
tss_distance = tss_distance[mask]
if interaction_df is not None:
genotypes_t, mask_t = filter_maf_interaction(genotypes_t, interaction_mask_t=interaction_mask_t,
maf_threshold_interaction=maf_threshold_interaction)
mask = mask_t.cpu().numpy()
variant_ids = variant_ids[mask]
tss_distance = tss_distance[mask]
n = len(variant_ids)
if genotypes_t.shape[0] > 0:
# process first phenotype in group
phenotype_id = phenotype_ids[0]
phenotype_t = torch.tensor(phenotypes[0], dtype=torch.float).to(device)
if interaction_df is None:
res = calculate_cis_nominal(genotypes_t, phenotype_t, residualizer=residualizer)
tstat, slope, slope_se, af, ma_samples, ma_count = [i.cpu().numpy() for i in res]
else:
res = calculate_interaction_nominal(genotypes_t, phenotype_t.unsqueeze(0), interaction_t,
residualizer=residualizer, return_sparse=False,
variant_ids=variant_ids)
tstat, b, b_se, af, ma_samples, ma_count = [i.cpu().numpy() for i in res]
px = [phenotype_id]*n
# iterate over remaining phenotypes in group
for phenotype, phenotype_id in zip(phenotypes[1:], phenotype_ids[1:]):
phenotype_t = torch.tensor(phenotype, dtype=torch.float).to(device)
if interaction_df is None:
res = calculate_cis_nominal(genotypes_t, phenotype_t, residualizer=residualizer)
tstat0, slope0, slope_se0, af, ma_samples, ma_count = [i.cpu().numpy() for i in res]
else:
res = calculate_interaction_nominal(genotypes_t, phenotype_t.unsqueeze(0), interaction_t,
residualizer=residualizer, return_sparse=False,
variant_ids=variant_ids)
tstat0, b0, b_se0, af, ma_samples, ma_count = [i.cpu().numpy() for i in res]
# find associations that are stronger for current phenotype
if interaction_df is None:
ix = np.where(np.abs(tstat0) > np.abs(tstat))[0]
else:
ix = np.where(np.abs(tstat0[:,2]) > np.abs(tstat[:,2]))[0]
# update relevant positions
for j in ix:
px[j] = phenotype_id
if interaction_df is None:
tstat[ix] = tstat0[ix]
slope[ix] = slope0[ix]
slope_se[ix] = slope_se0[ix]
else:
tstat[ix] = tstat0[ix]
b[ix] = b0[ix]
b_se[ix] = b_se0[ix]
chr_res['phenotype_id'].extend(px)
chr_res['variant_id'].extend(variant_ids)
chr_res['tss_distance'][start:start+n] = tss_distance
chr_res['af'][start:start+n] = af
chr_res['ma_samples'][start:start+n] = ma_samples
chr_res['ma_count'][start:start+n] = ma_count
if interaction_df is None:
chr_res['pval_nominal'][start:start+n] = tstat
chr_res['slope'][start:start+n] = slope
chr_res['slope_se'][start:start+n] = slope_se
else:
chr_res['pval_g'][start:start+n] = tstat[:,0]
chr_res['b_g'][start:start+n] = b[:,0]
chr_res['b_g_se'][start:start+n] = b_se[:,0]
chr_res['pval_i'][start:start+n] = tstat[:,1:1+ni]
chr_res['b_i'][start:start+n] = b[:,1:1+ni]
chr_res['b_i_se'][start:start+n] = b_se[:,1:1+ni]
chr_res['pval_gi'][start:start+n] = tstat[:,1+ni:]
chr_res['b_gi'][start:start+n] = b[:,1+ni:]
chr_res['b_gi_se'][start:start+n] = b_se[:,1+ni:]
# top association for the group
if interaction_df is not None:
ix = np.nanargmax(np.abs(tstat[:,1+ni:]).max(1)) # top association among all interactions tested
# index order: 0, 1, 1+ni, 2, 2+ni, 3, 3+ni, ...
order = [0] + [i if j % 2 == 0 else i+ni for i in range(1,ni+1) for j in range(2)]
top_s = [chr_res['phenotype_id'][start:start+n][ix], variant_ids[ix],
tss_distance[ix], af[ix], ma_samples[ix], ma_count[ix]]
for i in order:
top_s += [tstat[ix,i], b[ix,i], b_se[ix,i]]
top_s = pd.Series(top_s, index=col_order)
top_s['num_phenotypes'] = len(phenotype_ids)
if run_eigenmt: # compute eigenMT correction
top_s['tests_emt'] = eigenmt.compute_tests(genotypes_t, var_thresh=0.99, variant_window=200)
best_assoc.append(top_s)
start += n # update pointer
logger.write(f' time elapsed: {(time.time()-start_time)/60:.2f} min')
# convert to dataframe, compute p-values and write current chromosome
if start < len(chr_res['af']):
for x in chr_res:
chr_res[x] = chr_res[x][:start]
if write_stats:
if interaction_df is not None:
cols = ['pval_i', 'b_i', 'b_i_se', 'pval_gi', 'b_gi', 'b_gi_se']
if ni == 1: # squeeze columns
for k in cols:
chr_res[k] = chr_res[k][:,0]
else: # split interactions
for i in range(0, ni): # fix order
for k in cols:
chr_res[k.replace('i', f"i{i+1}")] = None
for k in cols:
for i in range(0, ni):
chr_res[k.replace('i', f"i{i+1}")] = chr_res[k][:,i]
del chr_res[k]
chr_res_df = pd.DataFrame(chr_res)
if interaction_df is None:
m = chr_res_df['pval_nominal'].notnull()
chr_res_df.loc[m, 'pval_nominal'] = 2*stats.t.cdf(-chr_res_df.loc[m, 'pval_nominal'].abs(), dof)
else:
if ni == 1:
m = chr_res_df['pval_gi'].notnull()
chr_res_df.loc[m, 'pval_g'] = 2*stats.t.cdf(-chr_res_df.loc[m, 'pval_g'].abs(), dof)
chr_res_df.loc[m, 'pval_i'] = 2*stats.t.cdf(-chr_res_df.loc[m, 'pval_i'].abs(), dof)
chr_res_df.loc[m, 'pval_gi'] = 2*stats.t.cdf(-chr_res_df.loc[m, 'pval_gi'].abs(), dof)
else:
m = chr_res_df['pval_gi1'].notnull()
chr_res_df.loc[m, 'pval_g'] = 2*stats.t.cdf(-chr_res_df.loc[m, 'pval_g'].abs(), dof)
for i in range(1, ni+1):
chr_res_df.loc[m, f'pval_i{i}'] = 2*stats.t.cdf(-chr_res_df.loc[m, f'pval_i{i}'].abs(), dof)
chr_res_df.loc[m, f'pval_gi{i}'] = 2*stats.t.cdf(-chr_res_df.loc[m, f'pval_gi{i}'].abs(), dof)
# substitute column headers
chr_res_df.rename(columns=var_dict, inplace=True)
print(' * writing output')
chr_res_df.to_parquet(os.path.join(output_dir, f'{prefix}.cis_qtl_pairs.{chrom}.parquet'))
if interaction_df is not None and len(best_assoc) > 0:
best_assoc = pd.concat(best_assoc, axis=1, sort=False).T.set_index('phenotype_id').infer_objects()
m = best_assoc['pval_g'].notnull()
best_assoc.loc[m, 'pval_g'] = 2*stats.t.cdf(-best_assoc.loc[m, 'pval_g'].abs(), dof)
if ni == 1:
best_assoc.loc[m, 'pval_i'] = 2*stats.t.cdf(-best_assoc.loc[m, 'pval_i'].abs(), dof)
best_assoc.loc[m, 'pval_gi'] = 2*stats.t.cdf(-best_assoc.loc[m, 'pval_gi'].abs(), dof)
else:
for i in range(1, ni+1):
best_assoc.loc[m, f'pval_i{i}'] = 2*stats.t.cdf(-best_assoc.loc[m, f'pval_i{i}'].abs(), dof)
best_assoc.loc[m, f'pval_gi{i}'] = 2*stats.t.cdf(-best_assoc.loc[m, f'pval_gi{i}'].abs(), dof)
if run_eigenmt and ni == 1: # leave correction of specific p-values up to user for now (TODO)
if group_s is None:
best_assoc['pval_emt'] = np.minimum(best_assoc['tests_emt']*best_assoc['pval_gi'], 1)
else:
best_assoc['pval_emt'] = np.minimum(best_assoc['num_phenotypes']*best_assoc['tests_emt']*best_assoc['pval_gi'], 1)
best_assoc['pval_adj_bh'] = eigenmt.padjust_bh(best_assoc['pval_emt'])
if ni > 1: # substitute column headers
best_assoc.rename(columns=var_dict, inplace=True)
if write_top:
best_assoc.to_csv(os.path.join(output_dir, f'{prefix}.cis_qtl_top_assoc.txt.gz'),
sep='\t', float_format='%.6g')
else:
return best_assoc
logger.write('done.')
def prepare_cis_output(r_nominal, r2_perm, std_ratio, g, num_var, dof, variant_id, tss_distance, phenotype_id, nperm=10000):
"""Return nominal p-value, allele frequencies, etc. as pd.Series"""
r2_nominal = r_nominal*r_nominal
pval_perm = (np.sum(r2_perm>=r2_nominal)+1) / (nperm+1)
slope = r_nominal * std_ratio
tstat2 = dof * r2_nominal / (1 - r2_nominal)
slope_se = np.abs(slope) / np.sqrt(tstat2)
n2 = 2*len(g)
af = np.sum(g) / n2
if af <= 0.5:
ma_samples = np.sum(g>0.5)
ma_count = np.sum(g[g>0.5])
else:
ma_samples = np.sum(g<1.5)
ma_count = n2 - np.sum(g[g>0.5])
res_s = pd.Series(OrderedDict([
('num_var', num_var),
('beta_shape1', np.NaN),
('beta_shape2', np.NaN),
('true_df', np.NaN),
('pval_true_df', np.NaN),
('variant_id', variant_id),
('tss_distance', tss_distance),
('ma_samples', ma_samples),
('ma_count', ma_count),
('af', af),
('pval_nominal', pval_from_corr(r2_nominal, dof)),
('slope', slope),
('slope_se', slope_se),
('pval_perm', pval_perm),
('pval_beta', np.NaN),
]), name=phenotype_id)
return res_s
def _process_group_permutations(buf, variant_df, tss, dof, group_id, nperm=10000, beta_approx=True):
"""
Merge results for grouped phenotypes
buf: [r_nominal, std_ratio, var_ix, r2_perm, g, num_var, phenotype_id]
"""
# select phenotype with strongest nominal association
max_ix = np.argmax(np.abs([b[0] for b in buf]))
r_nominal, std_ratio, var_ix = buf[max_ix][:3]
g, num_var, phenotype_id = buf[max_ix][4:]
# select best phenotype correlation for each permutation
r2_perm = np.max([b[3] for b in buf], 0)
# return r_nominal, std_ratio, var_ix, r2_perm, g, num_var, phenotype_id
variant_id = variant_df.index[var_ix]
tss_distance = variant_df['pos'].values[var_ix] - tss
res_s = prepare_cis_output(r_nominal, r2_perm, std_ratio, g, num_var, dof, variant_id, tss_distance, phenotype_id, nperm=nperm)
if beta_approx:
res_s[['pval_beta', 'beta_shape1', 'beta_shape2', 'true_df', 'pval_true_df']] = calculate_beta_approx_pval(r2_perm, r_nominal*r_nominal, dof*0.25)
res_s['group_id'] = group_id
res_s['group_size'] = len(buf)
return res_s
def map_cis(genotype_df, variant_df, phenotype_df, phenotype_pos_df, covariates_df=None,
group_s=None, maf_threshold=0, beta_approx=True, nperm=10000,
window=1000000, random_tiebreak=False, logger=None, seed=None,
verbose=True, warn_monomorphic=True):
"""Run cis-QTL mapping"""
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if logger is None:
logger = SimpleLogger()
logger.write('cis-QTL mapping: empirical p-values for phenotypes')
logger.write(f' * {phenotype_df.shape[1]} samples')
logger.write(f' * {phenotype_df.shape[0]} phenotypes')
if group_s is not None:
logger.write(f' * {len(group_s.unique())} phenotype groups')
group_dict = group_s.to_dict()
if covariates_df is not None:
assert np.all(phenotype_df.columns==covariates_df.index), 'Sample names in phenotype matrix columns and covariate matrix rows do not match!'
assert ~(covariates_df.isnull().any().any()), f'Missing or null values in covariates matrix, in columns {",".join(covariates_df.columns[covariates_df.isnull().any(axis=0)].astype(str))}'
logger.write(f' * {covariates_df.shape[1]} covariates')
residualizer = Residualizer(torch.tensor(covariates_df.values, dtype=torch.float32).to(device))
dof = phenotype_df.shape[1] - 2 - covariates_df.shape[1]
else:
residualizer = None
dof = phenotype_df.shape[1] - 2
logger.write(f' * {genotype_df.shape[0]} variants')
if maf_threshold > 0:
logger.write(f' * applying in-sample {maf_threshold} MAF filter')
if random_tiebreak:
logger.write(f' * randomly selecting top variant in case of ties')
genotype_ix = np.array([genotype_df.columns.tolist().index(i) for i in phenotype_df.columns])
genotype_ix_t = torch.from_numpy(genotype_ix).to(device)
# permutation indices
n_samples = phenotype_df.shape[1]
ix = np.arange(n_samples)
if seed is not None:
logger.write(f' * using seed {seed}')
np.random.seed(seed)
permutation_ix_t = torch.LongTensor(np.array([np.random.permutation(ix) for i in range(nperm)])).to(device)
res_df = []
igc = genotypeio.InputGeneratorCis(genotype_df, variant_df, phenotype_df, phenotype_pos_df, group_s=group_s, window=window)
if igc.n_phenotypes == 0:
raise ValueError('No valid phenotypes found.')
start_time = time.time()
logger.write(' * computing permutations')
if group_s is None:
for k, (phenotype, genotypes, genotype_range, phenotype_id) in enumerate(igc.generate_data(verbose=verbose), 1):
# copy genotypes to GPU
genotypes_t = torch.tensor(genotypes, dtype=torch.float).to(device)
genotypes_t = genotypes_t[:,genotype_ix_t]
impute_mean(genotypes_t)
if maf_threshold > 0:
maf_t = calculate_maf(genotypes_t)
mask_t = maf_t >= maf_threshold
genotypes_t = genotypes_t[mask_t]
mask = mask_t.cpu().numpy().astype(bool)
genotype_range = genotype_range[mask]
# filter monomorphic variants
mono_t = (genotypes_t == genotypes_t[:, [0]]).all(1)
if mono_t.any():
genotypes_t = genotypes_t[~mono_t]
genotype_range = genotype_range[~mono_t.cpu()]
if warn_monomorphic:
logger.write(f' * WARNING: excluding {mono_t.sum()} monomorphic variants')
if genotypes_t.shape[0] == 0:
logger.write(f'WARNING: skipping {phenotype_id} (no valid variants)')
continue
phenotype_t = torch.tensor(phenotype, dtype=torch.float).to(device)
res = calculate_cis_permutations(genotypes_t, phenotype_t, permutation_ix_t,
residualizer=residualizer, random_tiebreak=random_tiebreak)
r_nominal, std_ratio, var_ix, r2_perm, g = [i.cpu().numpy() for i in res]
var_ix = genotype_range[var_ix]
variant_id = variant_df.index[var_ix]
tss_distance = variant_df['pos'].values[var_ix] - igc.phenotype_tss[phenotype_id]
res_s = prepare_cis_output(r_nominal, r2_perm, std_ratio, g, genotypes_t.shape[0], dof, variant_id, tss_distance, phenotype_id, nperm=nperm)
if beta_approx:
res_s[['pval_beta', 'beta_shape1', 'beta_shape2', 'true_df', 'pval_true_df']] = calculate_beta_approx_pval(r2_perm, r_nominal*r_nominal, dof)
res_df.append(res_s)
else: # grouped mode
for k, (phenotypes, genotypes, genotype_range, phenotype_ids, group_id) in enumerate(igc.generate_data(verbose=verbose), 1):
# copy genotypes to GPU
genotypes_t = torch.tensor(genotypes, dtype=torch.float).to(device)
genotypes_t = genotypes_t[:,genotype_ix_t]
impute_mean(genotypes_t)
if maf_threshold > 0:
maf_t = calculate_maf(genotypes_t)
mask_t = maf_t >= maf_threshold
genotypes_t = genotypes_t[mask_t]
mask = mask_t.cpu().numpy().astype(bool)
genotype_range = genotype_range[mask]
# filter monomorphic variants
mono_t = (genotypes_t == genotypes_t[:, [0]]).all(1)
if mono_t.any():
genotypes_t = genotypes_t[~mono_t]
genotype_range = genotype_range[~mono_t.cpu()]
if warn_monomorphic:
logger.write(f' * WARNING: excluding {mono_t.sum()} monomorphic variants')
if genotypes_t.shape[0] == 0:
logger.write(f'WARNING: skipping {phenotype_id} (no valid variants)')
continue
# iterate over phenotypes
buf = []
for phenotype, phenotype_id in zip(phenotypes, phenotype_ids):
phenotype_t = torch.tensor(phenotype, dtype=torch.float).to(device)
res = calculate_cis_permutations(genotypes_t, phenotype_t, permutation_ix_t,
residualizer=residualizer, random_tiebreak=random_tiebreak)
res = [i.cpu().numpy() for i in res] # r_nominal, std_ratio, var_ix, r2_perm, g
res[2] = genotype_range[res[2]]
buf.append(res + [genotypes_t.shape[0], phenotype_id])
res_s = _process_group_permutations(buf, variant_df, igc.phenotype_tss[phenotype_ids[0]], dof,
group_id, nperm=nperm, beta_approx=beta_approx)
res_df.append(res_s)
res_df = pd.concat(res_df, axis=1, sort=False).T
res_df.index.name = 'phenotype_id'
logger.write(f' Time elapsed: {(time.time()-start_time)/60:.2f} min')
logger.write('done.')
return res_df.astype(output_dtype_dict).infer_objects()
def map_independent(genotype_df, variant_df, cis_df, phenotype_df, phenotype_pos_df, covariates_df,
group_s=None, maf_threshold=0, fdr=0.05, fdr_col='qval', nperm=10000,
window=1000000, random_tiebreak=False, logger=None, seed=None, verbose=True):
"""
Run independent cis-QTL mapping (forward-backward regression)
cis_df: output from map_cis, annotated with q-values (calculate_qvalues)
"""
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
assert np.all(phenotype_df.index==phenotype_pos_df.index)
assert np.all(covariates_df.index==phenotype_df.columns)
if logger is None:
logger = SimpleLogger()
signif_df = cis_df[cis_df[fdr_col]<=fdr].copy()
cols = [
'num_var', 'beta_shape1', 'beta_shape2', 'true_df', 'pval_true_df',
'variant_id', 'tss_distance', 'ma_samples', 'ma_count', 'af',
'pval_nominal', 'slope', 'slope_se', 'pval_perm', 'pval_beta',
]
if group_s is not None:
cols += ['group_id', 'group_size']
signif_df = signif_df[cols]
signif_threshold = signif_df['pval_beta'].max()
# subset significant phenotypes
if group_s is None:
ix = phenotype_df.index[phenotype_df.index.isin(signif_df.index)]
else:
ix = group_s[phenotype_df.index].loc[group_s[phenotype_df.index].isin(signif_df['group_id'])].index
logger.write('cis-QTL mapping: conditionally independent variants')
logger.write(f' * {phenotype_df.shape[1]} samples')
if group_s is None:
logger.write(f' * {signif_df.shape[0]}/{cis_df.shape[0]} significant phenotypes')
else:
logger.write(f' * {signif_df.shape[0]}/{cis_df.shape[0]} significant groups')
logger.write(f' {len(ix)}/{phenotype_df.shape[0]} phenotypes')
group_dict = group_s.to_dict()
logger.write(f' * {covariates_df.shape[1]} covariates')
logger.write(f' * {genotype_df.shape[0]} variants')
if maf_threshold > 0:
logger.write(f' * applying in-sample {maf_threshold} MAF filter')
if random_tiebreak:
logger.write(f' * randomly selecting top variant in case of ties')
phenotype_df = phenotype_df.loc[ix]
phenotype_pos_df = phenotype_pos_df.loc[ix]
genotype_ix = np.array([genotype_df.columns.tolist().index(i) for i in phenotype_df.columns])
genotype_ix_t = torch.from_numpy(genotype_ix).to(device)
dof = phenotype_df.shape[1] - 2 - covariates_df.shape[1]
ix_dict = {i:k for k,i in enumerate(genotype_df.index)}
# permutation indices
n_samples = phenotype_df.shape[1]
ix = np.arange(n_samples)
if seed is not None:
logger.write(f' * using seed {seed}')
np.random.seed(seed)
permutation_ix_t = torch.LongTensor(np.array([np.random.permutation(ix) for i in range(nperm)])).to(device)
res_df = []
igc = genotypeio.InputGeneratorCis(genotype_df, variant_df, phenotype_df, phenotype_pos_df, group_s=group_s, window=window)
if igc.n_phenotypes == 0:
raise ValueError('No valid phenotypes found.')
logger.write(' * computing independent QTLs')
start_time = time.time()
if group_s is None:
for k, (phenotype, genotypes, genotype_range, phenotype_id) in enumerate(igc.generate_data(verbose=verbose), 1):
# copy genotypes to GPU
phenotype_t = torch.tensor(phenotype, dtype=torch.float).to(device)
genotypes_t = torch.tensor(genotypes, dtype=torch.float).to(device)
genotypes_t = genotypes_t[:,genotype_ix_t]
impute_mean(genotypes_t)
if maf_threshold > 0:
maf_t = calculate_maf(genotypes_t)
mask_t = maf_t >= maf_threshold
genotypes_t = genotypes_t[mask_t]
mask = mask_t.cpu().numpy().astype(bool)
genotype_range = genotype_range[mask]
# 1) forward pass
forward_df = [signif_df.loc[phenotype_id]] # initialize results with top variant
covariates = covariates_df.values.copy() # initialize covariates
dosage_dict = {}
while True:
# add variant to covariates
variant_id = forward_df[-1]['variant_id']
ig = genotype_df.values[ix_dict[variant_id], genotype_ix].copy()
m = ig == -1
ig[m] = ig[~m].mean()
dosage_dict[variant_id] = ig
covariates = np.hstack([covariates, ig.reshape(-1,1)]).astype(np.float32)
dof = phenotype_df.shape[1] - 2 - covariates.shape[1]
residualizer = Residualizer(torch.tensor(covariates, dtype=torch.float32).to(device))
res = calculate_cis_permutations(genotypes_t, phenotype_t, permutation_ix_t,
residualizer=residualizer, random_tiebreak=random_tiebreak)
r_nominal, std_ratio, var_ix, r2_perm, g = [i.cpu().numpy() for i in res]
x = calculate_beta_approx_pval(r2_perm, r_nominal*r_nominal, dof)
# add to list if empirical p-value passes significance threshold
if x[0] <= signif_threshold:
var_ix = genotype_range[var_ix]
variant_id = variant_df.index[var_ix]
tss_distance = variant_df['pos'].values[var_ix] - igc.phenotype_tss[phenotype_id]
res_s = prepare_cis_output(r_nominal, r2_perm, std_ratio, g, genotypes.shape[0], dof, variant_id, tss_distance, phenotype_id, nperm=nperm)
res_s[['pval_beta', 'beta_shape1', 'beta_shape2', 'true_df', 'pval_true_df']] = x
forward_df.append(res_s)
else:
break
forward_df = pd.concat(forward_df, axis=1, sort=False).T
dosage_df = pd.DataFrame(dosage_dict)
# 2) backward pass
if forward_df.shape[0]>1:
back_df = []
variant_set = set()
for k,i in enumerate(forward_df['variant_id'], 1):
covariates = np.hstack([
covariates_df.values,
dosage_df[np.setdiff1d(forward_df['variant_id'], i)].values,
])
dof = phenotype_df.shape[1] - 2 - covariates.shape[1]
residualizer = Residualizer(torch.tensor(covariates, dtype=torch.float32).to(device))
res = calculate_cis_permutations(genotypes_t, phenotype_t, permutation_ix_t,
residualizer=residualizer, random_tiebreak=random_tiebreak)
r_nominal, std_ratio, var_ix, r2_perm, g = [i.cpu().numpy() for i in res]
var_ix = genotype_range[var_ix]
variant_id = variant_df.index[var_ix]
x = calculate_beta_approx_pval(r2_perm, r_nominal*r_nominal, dof)
if x[0] <= signif_threshold and variant_id not in variant_set:
tss_distance = variant_df['pos'].values[var_ix] - igc.phenotype_tss[phenotype_id]
res_s = prepare_cis_output(r_nominal, r2_perm, std_ratio, g, genotypes.shape[0], dof, variant_id, tss_distance, phenotype_id, nperm=nperm)
res_s[['pval_beta', 'beta_shape1', 'beta_shape2', 'true_df', 'pval_true_df']] = x
res_s['rank'] = k
back_df.append(res_s)
variant_set.add(variant_id)
if len(back_df)>0:
res_df.append(pd.concat(back_df, axis=1, sort=False).T)
else: # single independent variant
forward_df['rank'] = 1
res_df.append(forward_df)
else: # grouped phenotypes
for k, (phenotypes, genotypes, genotype_range, phenotype_ids, group_id) in enumerate(igc.generate_data(verbose=verbose), 1):
# copy genotypes to GPU
genotypes_t = torch.tensor(genotypes, dtype=torch.float).to(device)
genotypes_t = genotypes_t[:,genotype_ix_t]
impute_mean(genotypes_t)
if maf_threshold > 0:
maf_t = calculate_maf(genotypes_t)
mask_t = maf_t >= maf_threshold
genotypes_t = genotypes_t[mask_t]
mask = mask_t.cpu().numpy().astype(bool)
genotype_range = genotype_range[mask]
# 1) forward pass
forward_df = [signif_df[signif_df['group_id']==group_id].iloc[0]] # initialize results with top variant
covariates = covariates_df.values.copy() # initialize covariates
dosage_dict = {}
while True:
# add variant to covariates
variant_id = forward_df[-1]['variant_id']
ig = genotype_df.values[ix_dict[variant_id], genotype_ix].copy()
m = ig == -1
ig[m] = ig[~m].mean()
dosage_dict[variant_id] = ig
covariates = np.hstack([covariates, ig.reshape(-1,1)]).astype(np.float32)
dof = phenotype_df.shape[1] - 2 - covariates.shape[1]
residualizer = Residualizer(torch.tensor(covariates, dtype=torch.float32).to(device))
# iterate over phenotypes
buf = []
for phenotype, phenotype_id in zip(phenotypes, phenotype_ids):
phenotype_t = torch.tensor(phenotype, dtype=torch.float).to(device)
res = calculate_cis_permutations(genotypes_t, phenotype_t, permutation_ix_t,
residualizer=residualizer, random_tiebreak=random_tiebreak)
res = [i.cpu().numpy() for i in res] # r_nominal, std_ratio, var_ix, r2_perm, g
res[2] = genotype_range[res[2]]
buf.append(res + [genotypes.shape[0], phenotype_id])
res_s = _process_group_permutations(buf, variant_df, igc.phenotype_tss[phenotype_ids[0]], dof, group_id, nperm=nperm)
# add to list if significant
if res_s['pval_beta'] <= signif_threshold:
forward_df.append(res_s)
else:
break
forward_df = pd.concat(forward_df, axis=1, sort=False).T
dosage_df = pd.DataFrame(dosage_dict)
# 2) backward pass
if forward_df.shape[0]>1:
back_df = []
variant_set = set()
for k,variant_id in enumerate(forward_df['variant_id'], 1):
covariates = np.hstack([
covariates_df.values,
dosage_df[np.setdiff1d(forward_df['variant_id'], variant_id)].values,
])
dof = phenotype_df.shape[1] - 2 - covariates.shape[1]
residualizer = Residualizer(torch.tensor(covariates, dtype=torch.float32).to(device))
# iterate over phenotypes
buf = []
for phenotype, phenotype_id in zip(phenotypes, phenotype_ids):
phenotype_t = torch.tensor(phenotype, dtype=torch.float).to(device)
res = calculate_cis_permutations(genotypes_t, phenotype_t, permutation_ix_t,
residualizer=residualizer, random_tiebreak=random_tiebreak)
res = [i.cpu().numpy() for i in res] # r_nominal, std_ratio, var_ix, r2_perm, g
res[2] = genotype_range[res[2]]
buf.append(res + [genotypes.shape[0], phenotype_id])
res_s = _process_group_permutations(buf, variant_df, igc.phenotype_tss[phenotype_ids[0]], dof, group_id, nperm=nperm)
if res_s['pval_beta'] <= signif_threshold and variant_id not in variant_set:
res_s['rank'] = k
back_df.append(res_s)
variant_set.add(variant_id)
if len(back_df)>0:
res_df.append(pd.concat(back_df, axis=1, sort=False).T)
else: # single independent variant
forward_df['rank'] = 1
res_df.append(forward_df)
res_df = pd.concat(res_df, axis=0, sort=False)
res_df.index.name = 'phenotype_id'
logger.write(f' Time elapsed: {(time.time()-start_time)/60:.2f} min')
logger.write('done.')
return res_df.reset_index().astype(output_dtype_dict)
```
#### File: tensorqtl/tensorqtl/coloc.py
```python
import numpy as np
import pandas as pd
import scipy.stats as stats
import torch
import os
import time
import sys
sys.path.insert(1, os.path.dirname(__file__))
import genotypeio, eigenmt
from core import *
def logsumexp(x, dim=0):
mmax,_ = torch.max(x, dim=dim, keepdim=True)
return mmax + (x-mmax).exp().sum(dim, keepdim=True).log()
def logdiff(x, y, dim=0):
xmax,_ = torch.max(x, dim=dim, keepdim=True)
ymax,_ = torch.max(y, dim=dim, keepdim=True)
mmax = torch.max(xmax, ymax)
return mmax + ((x - mmax).exp() - (y - mmax).exp()).log()
def coloc(genotypes1_t, genotypes2_t, phenotype1_t, phenotype2_t,
residualizer1=None, residualizer2=None, mode='beta',
p1=1e-4, p2=1e-4, p12=1e-5):
"""COLOC from summary statistics (either beta/sds or p-values and MAF)"""
assert phenotype1_t.dim() == 1
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# phenotype 1
if mode == 'beta':
r_nominal_t, genotype_var_t, phenotype_var_t = calculate_corr(
genotypes1_t, phenotype1_t.reshape(1,-1), residualizer1, return_var=True)
r_nominal_t = r_nominal_t.squeeze()
var_ratio_t = phenotype_var_t.reshape(1,-1) / genotype_var_t.reshape(-1,1)
else:
r_nominal_t = calculate_corr(genotypes1_t, phenotype1_t.reshape(1,-1),
residualizer1, return_var=False).squeeze()
r2_nominal_t = r_nominal_t.double().pow(2)
if residualizer1 is not None:
dof = residualizer1.dof
else:
dof = phenotype1_t.shape[0] - 2
if mode == 'beta':
tstat2_t = r2_nominal_t * dof / (1 - r2_nominal_t)
beta2_t = r2_nominal_t * var_ratio_t.squeeze()
beta_var_t = beta2_t / tstat2_t
var_prior = 0.0225 * phenotype_var_t
r = var_prior / (var_prior + beta_var_t)
l1 = 0.5 * ((1 - r).log() + r*tstat2_t)
else:
# compute p-values and z-score to match COLOC results exactly
# (instead of directly using t-statistic)
tstat_t = r_nominal_t * torch.sqrt(dof / (1 - r2_nominal_t))
p = stats.t.cdf(-np.abs(tstat_t.cpu().numpy()), dof) # 2 dropped since canceled in isf
maf_t = calculate_maf(genotypes1_t)
N = phenotype1_t.shape[0]
v = 1 / (2 * N * maf_t * (1 - maf_t))
z2_t = torch.Tensor(stats.norm.isf(p)**2).to(device)
r = 0.0225 / (0.0225 + v)
l1 = 0.5 * ((1 - r).log() + r*z2_t)
# phenotype 2
if phenotype2_t.dim() == 1:
num_phenotypes = 1
num_samples = phenotype2_t.shape[0]
phenotype2_t = phenotype2_t.reshape(1,-1)
else:
num_phenotypes, num_samples = phenotype2_t.shape
if mode == 'beta':
r_nominal_t, genotype_var_t, phenotype_var_t = calculate_corr(
genotypes2_t, phenotype2_t, residualizer2, return_var=True)
r_nominal_t = r_nominal_t.squeeze()
var_ratio_t = phenotype_var_t.reshape(1,-1) / genotype_var_t.reshape(-1,1)
else:
r_nominal_t = calculate_corr(genotypes2_t, phenotype2_t, residualizer2, return_var=False).squeeze()
r2_nominal_t = r_nominal_t.double().pow(2)
if residualizer2 is not None:
dof = residualizer2.dof
else:
dof = num_samples - 2
if mode == 'beta':
tstat2_t = r2_nominal_t * dof / (1 - r2_nominal_t)
beta2_t = r2_nominal_t * var_ratio_t.squeeze()
beta_var_t = beta2_t / tstat2_t
var_prior = 0.0225 * phenotype_var_t
r = var_prior / (var_prior + beta_var_t)
l2 = 0.5 * ((1 - r).log() + r*tstat2_t)
else:
tstat_t = r_nominal_t * torch.sqrt(dof / (1 - r2_nominal_t))
p = stats.t.cdf(-np.abs(tstat_t.cpu().numpy()), dof)
maf_t = calculate_maf(genotypes2_t)
v = 1 / (2 * num_samples * maf_t * (1 - maf_t))
z2_t = torch.Tensor(stats.norm.isf(p)**2).to(device)
r = 0.0225 / (0.0225 + v)
if num_phenotypes > 1:
r = r.reshape(-1,1)
l2 = 0.5 * ((1 - r).log() + r*z2_t)
if num_phenotypes > 1:
lsum = l1.reshape(-1,1) + l2
lh0_abf = torch.zeros([1, num_phenotypes]).to(device)
lh1_abf = np.log(p1) + logsumexp(l1).repeat([1, num_phenotypes])
else:
lsum = l1 + l2
lh0_abf = torch.zeros([1]).to(device)
lh1_abf = np.log(p1) + logsumexp(l1)
lh2_abf = np.log(p2) + logsumexp(l2)
lh3_abf = np.log(p1) + np.log(p2) + logdiff(logsumexp(l1) + logsumexp(l2), logsumexp(lsum))
lh4_abf = np.log(p12) + logsumexp(lsum)
all_abf = torch.cat([lh0_abf, lh1_abf, lh2_abf, lh3_abf, lh4_abf])
return (all_abf - logsumexp(all_abf, dim=0)).exp().squeeze()
def run_pairs(genotype_df, variant_df, phenotype1_df, phenotype2_df, phenotype_pos_df,
covariates1_df=None, covariates2_df=None, p1=1e-4, p2=1e-4, p12=1e-5, mode='beta',
maf_threshold=0, window=1000000, batch_size=10000, logger=None, verbose=True):
"""Compute COLOC for all phenotype pairs"""
assert np.all(phenotype1_df.index == phenotype2_df.index)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if logger is None:
logger = SimpleLogger()
logger.write('Computing COLOC for all pairs of phenotypes')
logger.write(f' * {phenotype1_df.shape[0]} phenotypes')
logger.write(f' * phenotype group 1: {phenotype1_df.shape[1]} samples')
logger.write(f' * phenotype group 2: {phenotype2_df.shape[1]} samples')
if covariates1_df is not None:
assert np.all(phenotype1_df.columns == covariates1_df.index)
logger.write(f' * phenotype group 1: {covariates1_df.shape[1]} covariates')
residualizer1 = Residualizer(torch.tensor(covariates1_df.values, dtype=torch.float32).to(device))
else:
residualizer1 = None
if covariates2_df is not None:
assert np.all(phenotype2_df.columns == covariates2_df.index)
logger.write(f' * phenotype group 2: {covariates2_df.shape[1]} covariates')
residualizer2 = Residualizer(torch.tensor(covariates2_df.values, dtype=torch.float32).to(device))
else:
residualizer2 = None
if maf_threshold > 0:
logger.write(f' * applying in-sample {maf_threshold} MAF filter (in at least one cohort)')
genotype1_ix = np.array([genotype_df.columns.tolist().index(i) for i in phenotype1_df.columns])
genotype1_ix_t = torch.from_numpy(genotype1_ix).to(device)
genotype2_ix = np.array([genotype_df.columns.tolist().index(i) for i in phenotype2_df.columns])
genotype2_ix_t = torch.from_numpy(genotype2_ix).to(device)
igc = genotypeio.InputGeneratorCis(genotype_df, variant_df, phenotype1_df, phenotype_pos_df, window=window)
coloc_df = []
start_time = time.time()
logger.write(' * Computing pairwise colocalization')
for phenotype1, genotypes, genotype_range, phenotype_id in igc.generate_data(verbose=verbose):
phenotype2 = phenotype2_df.loc[phenotype_id]
# copy to GPU
phenotype1_t = torch.tensor(phenotype1, dtype=torch.float).to(device)
phenotype2_t = torch.tensor(phenotype2, dtype=torch.float).to(device)
genotypes_t = torch.tensor(genotypes, dtype=torch.float).to(device)
genotypes1_t = genotypes_t[:,genotype1_ix_t]
genotypes2_t = genotypes_t[:,genotype2_ix_t]
del genotypes_t
# filter monomorphic sites
m = ((genotypes1_t==0).all(1) | (genotypes1_t==1).all(1) | (genotypes1_t==2).all(1) |
(genotypes2_t==0).all(1) | (genotypes2_t==1).all(1) | (genotypes2_t==2).all(1))
genotypes1_t = genotypes1_t[~m]
genotypes2_t = genotypes2_t[~m]
impute_mean(genotypes1_t)
impute_mean(genotypes2_t)
if maf_threshold > 0:
maf1_t = calculate_maf(genotypes1_t)
maf2_t = calculate_maf(genotypes2_t)
mask_t = (maf1_t >= maf_threshold) | (maf2_t >= maf_threshold)
genotypes1_t = genotypes1_t[mask_t]
genotypes2_t = genotypes2_t[mask_t]
coloc_t = coloc(genotypes1_t, genotypes2_t, phenotype1_t, phenotype2_t,
residualizer1=residualizer1, residualizer2=residualizer2,
p1=p1, p2=p2, p12=p12, mode=mode)
coloc_df.append(coloc_t.cpu().numpy())
logger.write(' time elapsed: {:.2f} min'.format((time.time()-start_time)/60))
coloc_df = pd.DataFrame(coloc_df, columns=[f'pp_h{i}_abf' for i in range(5)], index=phenotype1_df.index)
logger.write('done.')
return coloc_df
```
#### File: tensorqtl/tensorqtl/mixqtl.py
```python
import numpy as np
import pandas as pd
import os
import sys
sys.path.insert(1, os.path.dirname(__file__))
import cis
from core import *
def trc(genotypes_t, counts_t, covariates_t=None, select_covariates=True,
count_threshold=0, imputation='offset', mode='standard', return_af=False):
"""
Inputs
genotypes_t: dosages (variants x samples)
counts_t: DESeq size factor-normalized read counts
covariates_t: covariates matrix, first column must be intercept
mode: if 'standard', parallel regression for each variant in genotypes_t
if 'multi', multiple regression for all variants in genotypes_t
Outputs:
t-statistic, beta, beta_se {af, ma_samples, ma_counts} (mode='standard')
beta, beta_se (mode='multi')
"""
nonzero_t = counts_t != 0
if imputation == 'offset':
log_counts_t = counts_t.log1p()
elif imputation == 'half_min':
log_counts_t = counts_t.clone()
log_counts_t[~nonzero_t] = log_counts_t[nonzero_t].min() / 2
log_counts_t = log_counts_t.log()
if covariates_t is not None:
if select_covariates:
# select significant covariates
b_t, b_se_t = linreg(covariates_t[nonzero_t, :], log_counts_t[nonzero_t], dtype=torch.float32)
tstat_t = b_t / b_se_t
m = tstat_t.abs() > 2
m[0] = True # keep intercept
sel_covariates_t = covariates_t[:, m]
else:
sel_covariates_t = covariates_t
# Regress out covariates from non-zero counts, and keep zeros.
# This follows the original mixQTL implementation, but may be
# problematic when count_threshold is 0.
residualizer = Residualizer(sel_covariates_t[nonzero_t, 1:]) # exclude intercept
y_t = counts_t.clone()
y_t[nonzero_t] = residualizer.transform(log_counts_t[nonzero_t].reshape(1,-1), center=True)
else:
y_t = log_counts_t
m_t = counts_t >= count_threshold
if mode == 'standard':
res = cis.calculate_cis_nominal(genotypes_t[:, m_t] / 2, y_t[m_t], return_af=False)
if return_af:
af, ma_samples, ma_counts = get_allele_stats(genotypes_t)
return *res, af, ma_samples, ma_counts
else:
return res
elif mode.startswith('multi'):
X_t = torch.cat([torch.ones([m_t.sum(), 1], dtype=bool).to(genotypes_t.device), genotypes_t[:, m_t].T / 2], axis=1)
b_t, b_se_t = linreg(X_t, y_t[m_t], dtype=torch.float32)
return b_t[1:], b_se_t[1:]
``` |
{
"source": "josephlbarnett/sgqlc",
"score": 2
} |
#### File: sgqlc/types/relay.py
```python
__docformat__ = 'reStructuredText en'
__all__ = ('Node', 'PageInfo', 'Connection', 'connection_args')
from . import Type, Interface, non_null, ArgDict, String, Int
class Node(Interface):
'''Global Object Identification based on Relay specification.
https://facebook.github.io/relay/graphql/objectidentification.htm
'''
id = non_null(id) # noqa: A003
class PageInfo(Type):
''':class:`Connection` page information.
https://facebook.github.io/relay/graphql/connections.htm
'''
end_cursor = str
start_cursor = str
has_next_page = non_null(bool)
has_previous_page = non_null(bool)
class Connection(Type):
'''Cursor Connections based on Relay specification.
https://facebook.github.io/relay/graphql/connections.htm
.. note::
This class exposes ``+=`` (in-place addition) operator to append
information from another connection into this. The usage is as
follow, if ``obj.connection.page_info.has_next_page``, then you
should query the next page using
``after=obj.connection.page_info.end_cursor``. The resulting
object should be ``obj.connection += obj2.connection``, this
will add the contents of ``obj2.connection`` to
``obj.connection``, resetting
``obj.connection.page_info.has_next_page``,
``obj.connection.page_info.end_cursor`` and
the JSON backing store, if any.
'''
__auto_register = False # do not expose this in Schema, just subclasses
page_info = non_null(PageInfo)
def __iadd__(self, other):
# NOTE: assign to list, not '+=', so ContainerType.__setattr__()
# is called to apply to backing store
has_self_nodes = hasattr(self, 'nodes') and self.nodes is not None
has_other_nodes = hasattr(other, 'nodes') and other.nodes is not None
if has_self_nodes and has_other_nodes:
self.nodes = self.nodes + other.nodes
elif has_other_nodes:
self.nodes = other.nodes
has_self_edges = hasattr(self, 'edges') and self.edges is not None
has_other_edges = hasattr(other, 'edges') and other.edges is not None
if has_self_edges and has_other_edges:
self.edges = self.edges + other.edges
elif has_other_edges:
self.edges = other.edges
has_self_page_info = hasattr(self, 'page_info') and \
self.page_info is not None
has_other_page_info = hasattr(other, 'page_info') and \
other.page_info is not None
if has_self_page_info and has_other_page_info:
self.page_info.end_cursor = other.page_info.end_cursor
self.page_info.has_next_page = other.page_info.has_next_page
elif has_other_page_info:
self.page_info = other.page_info
return self
def connection_args(*lst, **mapping):
'''Returns the default parameters for connection.
Extra parameters may be given as argument, both as iterable,
positional tuples or mapping.
By default, provides:
- ``after: String``
- ``before: String``
- ``first: Int``
- ``last: Int``
'''
pd = ArgDict(*lst, **mapping)
pd.setdefault('after', String)
pd.setdefault('before', String)
pd.setdefault('first', Int)
pd.setdefault('last', Int)
return pd
``` |
{
"source": "josephlee222/py-project",
"score": 2
} |
#### File: josephlee222/py-project/ASP_Project_testcase.py
```python
import unittest
import grp1_ASP_Project as prog
class testMyProgram(unittest.TestCase):
def test_topTotals(self):
self.assertEqual(5934194, prog.findCountries.top3_total)
def test_topMean(self):
self.assertEqual(1978066, prog.findCountries.top3_mean)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "josephlee3454/web-scraping",
"score": 3
} |
#### File: josephlee3454/web-scraping/scraping.py
```python
import requests # requests library
# print(requests)
from bs4 import BeautifulSoup as b_soup
# print(b_soup)
URL = 'https://en.wikipedia.org/wiki/Washington_(state)'
response = requests.get(URL)
# print(response)
content = response.content # bringing in unparsed data
# print(content)
soup = b_soup(content, 'html.parser') # parsed data but still kinda krazy with a k
# print(soup.prettify())
# div id = "bodyContent" class = "mw-body-content"
# citation_content = soup.find_all(class_="noprint Inline-Template Template-Fact")
# print(citation_content)
new_list = []
def get_citations_needed_count():
citation_content = soup.find_all(class_="noprint Inline-Template Template-Fact")
new_list.append(citation_content)
number_of_occurences = len(citation_content)
# print(citation_content)
return new_list
def get_citations_needed_report():
citation_content_1 = new_list[0][0]
print("first cite" + citation_content_1.text.strip())
citation_content_2 = new_list[0][1]
print("second cite" + citation_content_2.text.strip())
citation_content_3 = new_list[0][2]
print("third cite" + citation_content_3.text.strip())
citation_content_4 = new_list[0][3]
print("fourth cite" + citation_content_4.text.strip())
citation_content_5 = new_list[0][4]
print("fith cite" + citation_content_5.text.strip())
citation_content_6 = new_list[0][5]
print("sixth cite" + citation_content_6.text.strip())
citation_content_7 = new_list[0][6]
print("seventh cite" + citation_content_7.text.strip())
if __name__ == "__main__":
get_citations_needed_count()
get_citations_needed_report()
``` |
{
"source": "josephlewis42/magpie",
"score": 2
} |
#### File: plugins/hypertext2/__init__.py
```python
import magpie
from flask import Flask, render_template, request, url_for, redirect
from magpie.plugins.abstract_plugin import AbstractPlugin
import threading
import pprint
import json
frontend_instance = None # used by flask to access methods of HTTPFrontend2
app = Flask(__name__)
@app.route('/', methods=['GET', 'POST'])
def upload_file():
try:
if request.method == 'GET':
tests = frontend_instance._magpie.test_configurations.keys()
return render_template('upload.html', tests=tests, **frontend_instance._config)
else: # POST
doc = magpie.comm.Document("No User", frontend_instance.get_name())
file = request.files['upfile']
doc.add_file(file.filename,file)
test = request.form['test']
frontend_instance._magpie.submit_document(doc, test)
return render_template('results.html', document=doc, **frontend_instance._config)
except Exception as e:
frontend_instance._logger.exception(e)
print("Exception")
return (str(e))
@app.route('/config', methods=['GET'])
def configure_app():
contents = "<h3>Tests<h3>"
for testname, testconfig in frontend_instance._magpie.test_configurations.items():
contents += "<h4>{}</h4><pre>{}</pre>".format(testname, pprint.pformat(testconfig))
contents += "<br><a href='{}'>Delete</a>".format(url_for('delete_test', test_name=testname))
contents += "<br><a href='{}'>Edit</a>".format(url_for('edit_test', test_name=testname))
return render_template('configure.html', config = contents, **frontend_instance._config)
@app.route('/edit/<test_name>', methods=['GET', 'POST'])
def edit_test(test_name):
if request.method == 'GET':
# check to see if we're making a new config or editing an existing one
testval = frontend_instance._magpie.test_configurations.get(test_name, None)
if testval == None:
testval = frontend_instance._magpie.make_new_test_configuration(test_name)
testval = json.dumps(testval, sort_keys=True, indent=4, separators=(',', ': '))
return render_template('edit_test.html', test_name=test_name, test_value=testval, msg="", **frontend_instance._config)
else: # POST
testval = request.form['test']
print(testval)
newtests = json.loads(testval)
frontend_instance._magpie.test_configurations[test_name] = newtests
return redirect(url_for('configure_app'))
@app.route('/delete/<test_name>')
def delete_test(test_name):
if test_name in frontend_instance._magpie.test_configurations:
del frontend_instance._magpie.test_configurations[test_name]
return redirect(url_for('configure_app'))
@app.route('/newtest', methods=['POST'])
def new_test():
try:
name = request.form['name']
except KeyError:
name = 'New Test'
return redirect(url_for('edit_test', test_name=name))
def shutdown_server():
func = request.environ.get('werkzeug.server.shutdown')
if func is None:
raise RuntimeError('Not running with the Werkzeug Server')
func()
DEFAULT_CONFIG = {
'port':8080,
'host':'',
'upload_instructions':"""Welcome to the Magpie submission tool, to begin upload the file you would like processed.""",
'title':"Magpie",
'results_header':"",
'results_tail':"",
'message_of_the_day':''
}
class HTTPFrontend2(AbstractPlugin):
def __init__(self):
global frontend_instance
AbstractPlugin.__init__(self, "HTTP2", "<NAME> <<EMAIL>>", 0.1, "BSD 3 Clause", DEFAULT_CONFIG, {})
frontend_instance = self
def teardown(self):
'''Shuts down the plugin, should exit all threads and do all cleanup
needed before closing.'''
def update_config(self, *args):
'''Called when the configuration for the plugin has been updated from
another source.
'''
AbstractPlugin.update_config(self, *args)
print("http2 updating config {}".format(self._config))
try:
shutdown_server()
except RuntimeError:
pass # not running yet.
self._logger.info("Starting HTTP2 Server on Port: http://{host}:{port}".format(**self._config))
background_thread = threading.Thread(target=app.run, kwargs={'host':self._config['host'], 'port':self._config['port'], 'threaded':True})
background_thread.daemon = True
background_thread.start()
``` |
{
"source": "josephlewis42/personal_codebase",
"score": 3
} |
#### File: personal_codebase/blog/dns_message.py
```python
import subprocess
DNS_SERVER = '8.8.8.8'
DOMAIN_NAME = "www.testdnsflagsetting{}.com"
NORECURSE_OPT = "+norecurse"
msg = raw_input("Enter a message, or blank to receive: ")
def read_byte(byteno):
byte = "0b"
for i in range(byteno * 8, (byteno + 1) * 8):
output = subprocess.check_output(['dig','@{}'.format(DNS_SERVER), DOMAIN_NAME.format(i), NORECURSE_OPT])
if ";; AUTHORITY SECTION:" in output:
byte += '1'
else:
byte += '0'
return int(byte, 2) # converts binary to an int
def write_byte(byteno, byte):
to_write = bin(byte)[2:].zfill(8) # gets binary representation of a byte
for loc, b in enumerate(to_write):
if b == '1':
i = (byteno * 8) + loc
subprocess.check_output(['dig','@{}'.format(DNS_SERVER), DOMAIN_NAME.format(i)])
print "Wrote 1 at: {}".format(i)
if len(msg) == 0:
message = ""
for byte in range(1,read_byte(0) + 1): # first byte is length of message
message += chr(read_byte(byte))
if len(message) > 0:
print message
else:
print "[No Message]"
else:
total = len(msg)
write_byte(0, total)
for loc, char in enumerate(msg):
write_byte(loc + 1, ord(char))
print "Message written"
```
#### File: python/auto_summary/AutoSummary.py
```python
import sys
import re
import argparse
__author__ = "<NAME>"
__copyright__ = "Copyright 2011, <NAME>"
__license__ = "BSD"
TOP_HUNDRED_SCORE = 0
EN_TOP = '''the be to of and a in that have i it for not on with he as you do at
this but his by from they we say her she or an will my one all would
there their what so up out if about who get which go me when make can
like time no just him know take person into year your good some could
them see other than then now look only come its over think also back
after use two how our work first well way even new want because any
these give day most us'''
def chars_per_word(sentence):
'''Returns the average characters per word.'''
return float(len(sentence)) / float(len(sentence.split(' ')))
def clean_text(text,leavepunct=False):
# Strip to a-z A-Z (TODO: I18N).
text = text.replace("\n", " ")
if leavepunct:
return re.sub("[^a-z.!\? ]", " ", text.strip().lower())
return re.sub("[^a-z ]", " ", text.strip().lower())
def word_frequency(text):
'''Counts the frequenc of words in the piece of text, and returns
a dict for each word (a-z) lowercased where the key represents the
word and the number of times the word is found the value.
'''
words = {}
tmp = clean_text(text)
# Cut to words.
for word in tmp.split():
if word in words:
words[word] += 1
else:
words[word] = 1
return words
def set_words_to_value(word_dict, top=EN_TOP, value=TOP_HUNDRED_SCORE):
'''Sets the given words to the given value in the given word_dict.
The default is to set the top hundred words in english to the
TOP_HUNDRED_SCORE.
'''
j = word_frequency(top).keys() # get the words in the top hundred text.
# remove the top 100 words
for w in j:
words[w] = value
def sentences_in(text):
'''Returns a list of sentences in the text.
'''
text = text.replace("\n", " ")
return re.split('[\?.!]', text)
def score_sentence(sentence, words):
'''The scoring function, given a dictoinary of word:value pairs,
creates a score for each sentence.
'''
# Score value based upon words and frequencies of those words.
tmp = clean_text(sentence)
total = 0
for word in tmp.split():
if word in words:
total += words[word]
# Make the total in to a percentage.
try:
total /= float(len(tmp.split()))
# Secret ingredient, higher characters per word generally means
# more important sentence.
total *= chars_per_word(tmp)
return total
except ZeroDivisionError:
return -100
def top_sentences(text, word_freq_dict):
'''Returns a sorted list with the top rated sentences first, that
contains the tuples (score, sentence_location, sentence_text)
For example, the sentence "Call me Ishmael" would come back:
(1.8304283, 0, "Call me Ishmael.")
0 would mean it was the first sentence in the text, and it had a
score of 1.83...
'''
sentences = [] # array of tuples (total score, sentence num, sentence text)
known_sentences = set()
currs = 0
for s in sentences_in(text):
currs += 1 # Increment the current sentence.
total = score_sentence(s, words) # Don't add duplicate sentences.
s = s.strip()
if s not in known_sentences:
sentences.append((total, currs, s+"."))
known_sentences.add(s)
# Sort highest rated sentences to lowest.
sentences = sorted(sentences)
sentences.reverse()
return sentences
def __combine_summary(summary):
'''Combines a summary in to a meaningful paragraph. A summary is an
array of tuples with values of (position of sentence, text). This
creates better summary paragraphs by ordering important sentences
in the order they were originally.
'''
summary = sorted(summary)
paragraph = ""
for l, s in summary:
paragraph += s + " "
return paragraph
def percentage_top(percentage, sorted_sentences):
'''Returns the top rated percentage of the given sentences, in
paragraph form.
i.e. to get the top 25 percent of a text, call with:
percentage_top(25, <list from top_sentences>)
'''
percentage = percentage / 100.0
num_sentences = int(len(sorted_sentences)*percentage) + 1
if num_sentences >= len(sorted_sentences):
num_sentences = len(sorted_sentences)
# Create summary (top x sentences)
summary = []
for j in range(num_sentences):
t,l,s = sorted_sentences[j]
summary.append((l,s))
return __combine_summary(summary)
def top_words(num_words, sorted_sentences):
'''Returns x number of words from the top sentences.'''
summary = []
words = 0
try:
for t,l,s in sorted_sentences:
if words >= num_words:
break
words += len(s.split())
summary.append((l,s))
except IndexError:
pass
return __combine_summary(summary)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Creates summaries from documents by magic.')
parser.add_argument('-p','--percent', type=int, default=None,
help='the percent of the original document to give as a summary')
parser.add_argument('-w', '--words', type=int, default=None,
help='the number of words to output as a summary')
parser.add_argument('PATH', help='the path of the textfile to read from')
args = parser.parse_args()
try:
if args.PATH:
text = open(args.PATH).read()
words = word_frequency(text)
set_words_to_value(words)
sentences = top_sentences(text, words)
if args.words:
print top_words(args.words, sentences)
if args.percent:
if args.words:
print "\n\n"
print percentage_top(args.percent, sentences)
except IOError:
print "File given can't be found."
```
#### File: personal_codebase/python/distributed_hash_table.py
```python
import SocketServer, subprocess, sys
from threading import Thread
HASH_LENGTH = 32
NUM_HASHES = 2**HASH_LENGTH
START_HASH = 0
END_HASH = 0
port = 8000
next_host = ""
next_port = ""
prev_host = ""
prev_port = ""
hashed_items = {}
def recv_all(socket):
total_data=[]
while True:
data = socket.recv(1024)
if not data: break
total_data.append(data)
return "".join(total_data)
def pipe_command(request, host, port):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((host, port))
sock.send(request)
response = recv_all(sock)
sock.close()
return response
class SingleTCPHandler(SocketServer.BaseRequestHandler):
"One instance per connection. Override handle(self) to customize action."
def handle(self):
# self.request is the client connection
cmd = self.read_token() # read the command
cmd = cmd.upper()
if cmd == "SEARCH":
self.search()
elif cmd == "SET_LEFT":
self.set_left()
elif cmd == "SET_RIGHT":
self.set_right()
elif cmd == "STORE":
self.store()
elif cmd == "DELETE":
self.delete()
self.request.close()
def read_token(self):
"""Reads a token from the input, discarding beginning whitespace and
consuming a single whitepsace character after the token
"""
WHITESPACE = " \t\r\n"
data = ""
char = self.request.recv(1)
while char in WHITESPACE:
char = self.request.recv(1)
while char not in WHITESPACE and char is not None:
data += char
char = self.request.recv(1)
return data
def hash_in_range(self, hashvalue):
return hashvalue >= START_HASH and hashvalue < END_HASH
def search(self):
hashvalue = int(self.read_token())
if self.hash_in_range(hashvalue):
if hashvalue in hashed_items:
self.request.send(hashed_items[hashvalue])
else:
self.request.send(pipe_command("SEARCH " + str(hashvalue), next_host, next_port))
def set_left(self):
prev_host = self.read_token()
prev_port = int(self.read_token())
def set_right(self):
next_host = self.read_token()
next_port = int(self.read_token())
def store(self):
hashvalue = int(self.read_token())
data = recv_all(self.request)
if self.hash_in_range(hashvalue):
hashed_items[hashvalue] = data
print("stored: {}".format(hashvalue))
else:
self.request.send(pipe_command("STORE " + str(hashvalue) + " " + data, next_host, next_port))
def delete(self):
hashvalue = int(self.read_token())
if self.hash_in_range(hashvalue):
hashed_items[hashvalue] = None
print("deleted: {}".format(hashvalue))
else:
self.request.send(pipe_command("DELETE " + str(hashvalue) + " ", next_host, next_port))
class SimpleServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
# Ctrl-C will cleanly kill all spawned threads
daemon_threads = True
# much faster rebinding
allow_reuse_address = True
def __init__(self, server_address, RequestHandlerClass):
SocketServer.TCPServer.__init__(self, server_address, RequestHandlerClass)
if __name__ == "__main__":
nodeid = int(sys.argv[1])
maxnodes = int(sys.argv[2])
port = int(sys.argv[3])
keyspace = NUM_HASHES / maxnodes
START_HASH = nodeid * keyspace
END_HASH = (nodeid + 1) * keyspace
print("taking hashes between: {} and {} on port {}".format(START_HASH, END_HASH, port))
server = SimpleServer(("", port), SingleTCPHandler)
server.serve_forever()
```
#### File: personal_codebase/python/dns.py
```python
import socket
import time
UDP_IP = 'localhost'
UDP_PORT = 53 #53 is the standard for DNS.
BUFFER_SIZE = 1024
LOG_FILENAME = "DNS.log"
qdb = {'google.p2p':'172.16.17.32','home.p2p':'192.168.127.12','localhost.p2p':'127.0.0.1'}
class DNS():
''' This class is for creating a simple DNS server.'''
def start(self):
'''Starts the DNS and gets it listening.'''
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.bind( (UDP_IP, UDP_PORT) )
print("Server starts at: %s:%d" % (UDP_IP, UDP_PORT))
self.log_time()
except socket.error,e:
print("Error connecting to socket: %s" % (e))
exit()
while True: #Accept Unlimited Connections
try: #If the connection closes before ready it will cause an error.
#Receive data
data, addr = s.recvfrom(BUFFER_SIZE)
#If there is garbage notify the admin.
if not data: break
#Get a response.
query, ip, packet = self.proc_data(data)
#Send the packet back.
self.log(str(addr) + " : " + query + " <--> " + ip)
s.sendto(packet, addr)
except KeyboardInterrupt:
print("Control + c pressed exit() called...")
self.log_time()
exit()
except:
self.log("Connection error, possibly a portscan.")
self.log_time()
def log_time(self):
'''Logs the current time.'''
self.log('Time UTC: ' + str(time.asctime(time.gmtime())) + "\n")
self.log('Time Local: ' + str(time.asctime(time.localtime())) + "\n")
def log(self, data):
'''Logs any data sent to the file specified by LOG_FILENAME.'''
print( str( data.replace("\n", "") ) ) #Give visual feedback.
log_file = open(LOG_FILENAME, 'a')
log_file.write( str(data) )
log_file.close()
def proc_data(self, data):
''' Processes the data. Return the return packet as a string. and the
query site.
This is what the original packet looks like (taken from the rfc).
http://www.faqs.org/rfcs/rfc1035.html
1 1 1 1 1 1
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
| ID | char 0,1
+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
|QR| Opcode |AA|TC|RD|RA| Z | RCODE | char 2,3
+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
| QDCOUNT | char 4,5
+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
| ANCOUNT | char 6,7
+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
| NSCOUNT | char 8,9
+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
| ARCOUNT | char 10,11
+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
'''
#First 16 bits are query ID
qid = data[0:2]
#Next sixteen bits are query info (changes to strings of bits)
first = bin(ord(data[2]))[2:]
second = bin(ord(data[3]))[2:]
#Query or response?
qr = bool(int(first[0]))
#Opcode (0,1 or 2) remove all but first four bits
#opcode = eval('0b'+first[1:5])
opcode = (ord(data[2]) >> 3) & 15
#QDCOUNT an unsigned 16 bit integer specifying the number of
# entries in the question section.
qdcount = data[4:6]
#ANCOUNT an unsigned 16 bit integer specifying the number of
# resource records in the answer section.
ancount = data[6:8]
#NSCOUNT an unsigned 16 bit integer specifying the number of name
# server resource records in the authority records
# section.
nscount = data[8:10]
#ARCOUNT an unsigned 16 bit integer specifying the number of
# resource records in the additional records section.
arcount = data[10:12]
#Query (for now assume that there is only one)
#Query starts with a number of characters, then prints that
#number of chars, then another number, then prints that, etc
#until we get a 0
query = ''
pos=12
length=ord(data[pos])
while length:
query += data[pos+1 : pos+length+1] + '.'
pos += length + 1
length = ord(data[pos])
#Remove trailing dot.
query = query[:-1]
#Only look up our domains
if query.endswith('.p2p') and not query.endswith('.icann.p2p'):
try:
if query.startswith('www.'): #Save db space by not storing wwws
query = query[4:]
ip = qdb[query]
except:
self.log("Query not in DB: %s" % (query))
ip = '0.0.0.0'
else:
try:
ip = socket.gethostbyname(query)
except: #Can't reach dns, just send back nothing.
ip = '0.0.0.0'
#CONSTRUCT RESPONSE:
response = ''
#Add the query id
response += qid
#Add response header.
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
#|QR| Opcode |AA|TC|RD|RA| Z | RCODE |
response += chr(0b10000000) + chr(0b0000000)
#Add qd count
response += qdcount
#Add answer count (same as qd count) FIXME Will fail with more than one msg.
response += qdcount
#Add aanswer coundt
response += chr(0b0) + chr(0b0) + chr(0b0) + chr(0b0)
#Add original question
response += data[12:]
#Add pointer for message compression:
#See RFC Section 4.1.4. Message compression
response += chr(0b11000000) + chr(0b1100)
#TYPE two octets containing one of the RR type codes. This
# field specifies the meaning of the data in the RDATA
# field.
response += chr(0b00000000) + chr(0b00000001)
#CLASS two octets which specify the class of the data in the
# RDATA field.
response += chr(0b00000000) + chr(0b00000001)
#TTL a 32 bit unsigned integer that specifies the time
# interval (in seconds) that the resource record may be
# cached before it should be discarded. Zero values are
# interpreted to mean that the RR can only be used for the
# transaction in progress, and should not be cached.
#
#This should be the same length of time until next DNS cache update, for
#now don't cache.
response += chr(0b00000000) + chr(0b00000000) + chr(0b00000000) + chr(0b00000000)
#RDLENGTH an unsigned 16 bit integer that specifies the length in
# octets of the RDATA field.
#
#For now this is 4 bytes (size of an ip address)
response += chr(0b00000000) + chr(0b00000100)
#RDATA a variable length string of octets that describes the
# resource. The format of this information varies
# according to the TYPE and CLASS of the resource record.
# For example, the if the TYPE is A and the CLASS is IN,
# the RDATA field is a 4 octet ARPA Internet address.
response += socket.inet_aton(ip)
return (query, ip, response)
if __name__ == "__main__":
print("Simple DNS server written by <NAME> <<EMAIL>>")
value = ""
while value != 'y' and value != 'n':
value = raw_input("Do you want to bind externally y/n?")
if value == 'y':
#Find our external ip
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(('google.com', 80))
UDP_IP = s.getsockname()[0]
else:
UDP_IP = 'localhost'
DNS().start()
```
#### File: python/gamebot/games_db.py
```python
import sqlite3
import time
import re
conn = None # Connection to the games db.
cursor = None #The cursor for games
def setup():
'''Sets up the games database (connects, etc.)'''
global conn
global cursor
conn = sqlite3.connect('games_db.sqlite')
conn.row_factory = sqlite3.Row #Produce dicts of data rather than tuples
cursor = conn.cursor()
#If the database isn't set up, create the new tables.
try:
#Is this database set up?
cursor.execute("select * from Games")
except sqlite3.OperationalError:
'''
==Database Layout==
TABLE: Games
name (name of the game)
date (date last accessed)
file (blob of this file)
'''
cursor.execute('''create table Games (name text, date numeric, file blob)''')
setup()
def cleanup(t):
'''Removes games older than the given time (UNIX time).'''
#Remove all deleted nodes
cursor.execute("DELETE FROM Games WHERE date<?",(t,))
# Clean up the database.
cursor.execute("VACUUM")
conn.commit()
def empty():
'''Completely cleans the database.'''
cleanup(time.time())
def __clean_name(name):
return re.sub('[^a-z1-9]', '', name.lower().strip())
def add(name, blob):
'''Adds a game to the database.'''
b = sqlite3.Binary(blob) #Convert the input to a blob.
#Clean up the name to be very simple (for easier searching)
name = __clean_name(name)
cursor.execute("INSERT INTO Games (name, date, file) VALUES (?, ?, ?)", (name, int(time.time()), b))
conn.commit()
def find(name):
'''Finds the game with the given name, returns the binary if
possible, if not returns None.
'''
name = __clean_name(name)
cursor.execute("SELECT * FROM Games WHERE Name=?",(name,))
row = cursor.fetchone()
if row != None:
return row['file']
return None
def random_names():
'''Returns a list of the names of 10 random games.'''
cursor.execute("SELECT name FROM Games ORDER BY RANDOM() LIMIT 10")
names = []
for n in cursor.fetchall():
names.append(n['name'])
return names
```
#### File: python/gamebot/game_searcher.py
```python
import codescraper
DEBUGGING = True
class NoResultsException(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class GameSearcher:
'''The game searcher class provides a framework for searching Google for
swfs.'''
_page_list = []
_list_loc = 0
_next_page = 0
_game_query = ""
_current_url = "" #The current swf's url.
max_recursion = 30 #Number of pages to search until saying there is no game
current_recursion = 0
def _clear_current_search(self):
'''Clears the current game search.'''
self._page_list = []
self._list_loc = 0
self._next_page = 0
self._game_query = ""
def _get_more_games(self):
#Get google page, and get potential paths
query = self._game_query + "&start=" + str(self._next_page)
page_text = codescraper.fetch_page(query)
#There are 10 results per page, so the next page should be 10 results further along.
self._next_page += 10
#This gets all the text between the tags given on the page.
url_list = codescraper.return_between("<cite>", "</cite>", page_text)
if url_list == []:
raise NoResultsException, "No results found!"
for a in url_list:
#Google sometimes puts html tags in their cite tags like <b>
#since these will become messy when you try to create urls from it
#we need to remove them.
a = codescraper.remove_HTML_tags(a)
self._page_list.append(a)
def _get_next_game_url(self):
'''Creates a url for the next game on the list.'''
try:
url = 'http://' + self._page_list[self._list_loc]
except IndexError: #Index out of bounds.
self._get_more_games()
return self._get_next_game_url()
self._list_loc += 1
return url
def get_next_game(self):
self.current_recursion += 1
#Get the next game url
url = self._get_next_game_url()
if url == None:
return None
#Get the content type as told by the webserver.
ct = codescraper.url_content_type(url)
if ct in ["application/x-shockwave-flash", "text/html; charset=iso-8859-1"]:
self._current_url = url #Remember the current url.
return url
return self.get_next_game()
def get_current_game(self):
return self._current_url
def search_for_games(self, query):
'''Searches for games with the current query'''
#Clean the current search
self._clear_current_search()
#Build google query
query = query.replace("%20", "+")
self._game_query = "http://www.google.com/search?q=" + query + "+filetype%3Aswf&hl=en&num=10"
#Populate the list so the first request will be faster.
self._get_more_games()
if __name__ == "__main__":
print("Running breaking tests.")
g = GameSearcher()
g.search_for_games("blah blah all good things happen to great people hannah")
while(g._list_loc < 1):
print g.get_next_game()
print("Running bubble tanks tests.")
g.search_for_games("bubble%20tanks")
while(g._list_loc < 1):
print g.get_next_game()
```
#### File: python/gamebot/games_server.py
```python
import games_db
import game_searcher
import codescraper
import re
import email_fetch
gs = game_searcher.GameSearcher()
ef = None
sofar = 0 #served so far
inst = '''
<span style="text-align: center;"><h1>Game Bot</h1></span>
<p>Thank you for using GAME BOT, the best (and probably only) automated
game fetching service in the world. The game you requested has been
attached, if nothing attached, GAME BOT couldn't find the game you requested.</p>
<p><i>Sharing is caring!</i> Please forward these games to your allies, rather than
having them download from GAME BOT so we can keep bandwidth costs down.</p>
<p>Better yet, just start an email account (we suggest Gmail) which you
and your friends share the password to, then use it to fetch and store
games you all like, creating a private, secure, game collection.</p>
<hr>
<h3> FAQs </h3>
<p>
Q: This isn't the game I asked for, what happened?<br>
A: GAME BOT is after all a computer, and sometimes it gets fooled.<br>
<br>
Q: How do I use GAME BOT?<br>
A: Send GAME BOT an email message where the subject is the game you
want to play, GAME BOT will find it on the Internet and send it
back to you soon.<br>
<br>
Q: Who actually owns these games?<br>
A: All games are Copyright their respective creators, GAME BOT simply
passes what is already available on the Internet on to you.<br>
<br>
Q: How do I contribute?<br>
A: In the future GAME BOT may ask you to complete a survey, that will
pay the maker of GAME BOT to keep it running and updated, but for
now, just enjoy it :)<br>
<br>
Q: How do I play these games?<br>
A: Download the file attached, then open it up in Firefox, Internet Explorer,
Chrome, or any other Flash enabled web browser.<br>
<br>
Q: The game attached does not work, why?<br>
A: Some game authors only allow their games to be played on the sites
they put them on, there is nothing GAME BOT can do about this.<br>
</p>
'''
def __clean_name(name):
return re.sub('[^a-z1-9\w+]', '', name.lower().strip())
def fetch_game(name):
print "Looking for: %s" % (name)
bin = games_db.find(name)
if bin:
print " > Found in database."
return bin
else:
try:
name = __clean_name(name)
gs.search_for_games(name)
game_loc = gs.get_next_game()
print " > Finding online at: %s" % (game_loc)
games_db.add(name, codescraper.fetch_page(game_loc))
return games_db.find(name)
except game_searcher.NoResultsException:
return None
def build_body():
'''Builds the body of the message.'''
body = inst + '''
<hr>
<h3> Random Games You Might Like </h3>
<ul>
'''
for i in games_db.random_names():
body += " <li> "+ i + "</li>"
body += "</ul>"
return body
def mail_caller(msg):
global sofar
j = email_fetch.SimpleMessage(msg)
sofar += 1
print ("%s : %s is searching for: %s" % (sofar, j.personfrom, j.subject))
game = fetch_game(j.subject)
attach = {}
if game != None:
attach['%s.swf' % (__clean_name(j.subject))] = game
ef.send_mail(j.personfrom, j.subject, build_body(), full_attachments=attach)
if __name__ == "__main__":
#while 1:
# fetch_game(raw_input("What game do you want? "))
email_fetch.DEBUG = True #turn on debugging for email fetch (verbose mode)
print "If this is your first time running the server, please configure"
print "it using config_file.ini, config_file.ini is already set up for"
print "gmail (just change your email and password!)"
ef = email_fetch.EmailInterface(time=3, callback=mail_caller)
#Have the email fetcher set up by the ini file.
import ini_reader
ini_reader.setup_fetcher(ef)
try:
ef.run()
except KeyboardInterrupt:
print "Server terminated, bye."
```
#### File: Genetic Programming/Examples/pyevolve_ex17_gtree.py
```python
from pyevolve import GSimpleGA
from pyevolve import GTree
from pyevolve import Crossovers
from pyevolve import Mutators
import time
import random
def eval_func(chromosome):
score = 0.0
# If you want to add score values based
# in the height of the Tree, the extra
# code is commented.
#height = chromosome.getHeight()
for node in chromosome:
score += (100 - node.getData())*0.1
#if height <= chromosome.getParam("max_depth"):
# score += (score*0.8)
return score
def run_main():
genome = GTree.GTree()
root = GTree.GTreeNode(2)
genome.setRoot(root)
genome.processNodes()
genome.setParams(max_depth=3, max_siblings=2, method="grow")
genome.evaluator.set(eval_func)
genome.crossover.set(Crossovers.GTreeCrossoverSinglePointStrict)
ga = GSimpleGA.GSimpleGA(genome)
ga.setGenerations(100)
ga.setMutationRate(0.05)
ga.evolve(freq_stats=10)
print ga.bestIndividual()
if __name__ == "__main__":
run_main()
```
#### File: Genetic Programming/Finished Programs/2010-08-25 GP Test One.py
```python
from pyevolve import *
import math
error_accum = Util.ErrorAccumulator()
def gp_add(a, b):
return a+b
def gp_sub(a,b):
return a-b
def gp_mul(a, b):
return a*b
def gp_div(a,b):
'''
"Safe" division, if divide by 0, return 1.
'''
if b == 0:
return 1.0
else:
return a/(b*1.0)
def rangef(min, max, step):
result = []
while 1:
result.append(min)
min = min+step
if min>=max:
break
return result
def eval_func(chromosome):
global error_accum
error_accum.reset()
code_comp = chromosome.getCompiledCode()
for x in rangef(-1, 1, .1):
evaluated = eval(code_comp)
target = x**2 + x + 1
error_accum += (target, evaluated)
return error_accum.getRMSE()
def main_run():
genome = GTree.GTreeGP()
genome.setParams(max_depth=5, method="ramped")
genome.evaluator.set(eval_func)
ga = GSimpleGA.GSimpleGA(genome)
ga.setParams(gp_terminals = ['x', '1'], gp_function_prefix = "gp")
ga.setMinimax(Consts.minimaxType["minimize"])
ga.setGenerations(100)
ga.setMutationRate(0.08)
ga.setCrossoverRate(1.0)
ga.setPopulationSize(100)
ga.evolve(freq_stats=5)
print ga.bestIndividual()
if __name__ == "__main__":
main_run()
```
#### File: personal_codebase/python/image_generator.py
```python
import os
import random
def write_pgm(filename, content, width):
'''Writes a pgm image file from the given content, with width being
given. The contnet written is the value at the content at the position,
if contnet was [1,0,1,0] and width was 2 the pixels would end up being
10
10
1 being on and 0 being off (black and white).
content can be any iteratable object.
'''
#Create folders if necessary
if os.sep in filename:
filepath = filename[:filename.rfind("/")]
if not os.path.exists(filepath):
os.makedirs(filepath)
#Set up variables
height = int(len(content) / width)
j = 0
line = ""
#Open up the new file.
with open(filename, 'w') as f:
#File header based on pgm standard:
f.write("P1\n%i %i\n" % (width, height))
for i in content:
if j < width:
line += str(i) + " "
j += 1
else:
f.write(line + "\n")
line = ""
j = 0
#Append the last line.
f.write(line)
def add_one(array):
add = True
for i in range(len(array)):
if add:
if array[i] == 1:
array[i] = 0
else:
array[i] = 1
add = False
def main_straight_through():
a = [0]*100
i = 0
while a[99] != 1:
add_one(a)
i += 1
print i
write_pgm("%i/%i/%i.pgm"%(i/1000, i/100, i), a, 10)
def main_random(height=10, width=10, num_to_gen=10000):
length = height * width
num_so_far = 0
try:
while num_so_far < num_to_gen:
my_rand = random.randint(0, 2**length)
a = bin(my_rand)[2:]
to_add = "0" * (length - len(a)) #Padding front with zeros.
a = to_add + a
write_pgm("randout_%s.pgm" % (hex(my_rand)[2:-1]), a, width)
num_so_far += 1
except KeyboardInterrupt:
print("Control + C pressed.")
print("Generated %i random images." % (i))
if __name__ == "__main__":
a = raw_input("[R]andom or [S]equential Image Generation?")
if a.lower() == 'r':
h = input("Height in pixels? ")
w = input("Width in pixels? ")
n = input("How many random images do you want to generate? ")
print("The images will be generated in the present working directory")
print("Their respective ids will be the hex value of their binary represenation.")
main_random(h,w,n)
else:
print("Generating sequential images, warning, this happens very fast.")
print("Press Control + C to stop generation...")
main_straight_through()
```
#### File: Code/Functions/Standard.py
```python
import math
import OpenCalc
#Define Constants
#Define Functions
def abs(num):
return math.fabs(num)
```
#### File: personal_codebase/python/pretty_diff.py
```python
import difflib
import sys
import webbrowser
import sys
import tempfile
def generate_diff(lines1, lines2):
'''Generates a pretty diff and opens the system web browser to show it.
lines1 - a list of strings for the first file's lines.
lines2 - a list of strings for the second file's lines.
'''
diff = difflib.HtmlDiff().make_file(fromlines, tolines, "Original", "New")
with tempfile.NamedTemporaryFile(suffix='.html', delete=False) as tmp:
tmp.writelines(diff)
webbrowser.open(tmp.name)
if __name__ == "__main__":
if len(sys.argv) < 3:
print("usage: {} original_file new_file".format(sys.argv[0]))
exit(2)
fromlines = open(sys.argv[1], 'U').readlines()
tolines = open(sys.argv[2], 'U').readlines()
generate_diff(fromlines, tolines)
```
#### File: python/py_basic_ide/MAIN.pyw
```python
import wx
#YYYYMMDD.HHMMSS of this release.
__version__ = 20100528
human_version = "2010-05-28"
# begin wxGlade: extracode
import pyBASIC
import threading
import time
thread_is_on = False
kill_thread = False
file_location = ""
debug_mode = False
input_queue = ""
OUTPUT_EVENT_ID = wx.NewId()
OPEN_ID = wx.NewId()
SAVE_AS_ID = wx.NewId()
SAVE_ID = wx.NewId()
RUN_ID = wx.NewId()
DEBUG_ID = wx.NewId()
ABOUT_ID = wx.NewId()
UPDATES_ID = wx.NewId()
class OutputEvent ( wx.PyEvent ):
'''An event that handles output from the parser.'''
def __init__(self, standard_output, error_output):
wx.PyEvent.__init__(self)
self.SetEventType(OUTPUT_EVENT_ID)
self.stdout = standard_output
self.stderr = error_output
def OUTPUT_EVENT(win, func):
"""Define Result Event."""
win.Connect(-1, -1, OUTPUT_EVENT_ID, func)
class BASICThread ( threading.Thread ):
'''This thread runs the BASIC program and manages messages to and
from it.'''
def __init__(self, program_text, notify_window):
self.program_text = program_text
threading.Thread.__init__( self )
self._notify_window = notify_window
def stdout(self, text):
'''Handles the stdout questioning for the thread.'''
wx.PostEvent(self._notify_window, OutputEvent(text, ""))
def stderr(self, text):
'''Handles the stderr for the thread.'''
wx.PostEvent(self._notify_window, OutputEvent("", text))
def input(self, text):
'''Handles input for the thread.'''
global input_queue
self.stdout(text)
while input_queue == "":
time.sleep(.1)
iq = input_queue
input_queue = ""
return iq
def kill( self ):
'''Gives the thread a suicide mission.'''
return kill_thread
def run ( self ):
import pyBASIC
global program_text
global thread_is_on, kill_thread
thread_is_on = True
#Replace handlers with our own.
pyBASIC.parser.error_fx = self.stderr
pyBASIC.runner.error_fx = self.stderr
pyBASIC.runner.input_fx = self.input
pyBASIC.runner.output_fx = self.stdout
pyBASIC.runner.check_killed = self.kill
pyBASIC.set_debug( debug_mode )
print "Compileing"
doc = pyBASIC.tokenize_document(self.program_text)
print "Running"
try:
pyBASIC.run(doc)
except:
self.stderr("FATAL ERROR, Quitting")
self.stdout("-------------\nABNORMAL EXIT")
print "Quitting"
kill_thread = False
thread_is_on = False
# end wxGlade
class main_frame(wx.Frame):
def __init__(self, *args, **kwds):
# begin wxGlade: main_frame.__init__
kwds["style"] = wx.CAPTION|wx.CLOSE_BOX|wx.MINIMIZE_BOX|wx.MAXIMIZE_BOX|wx.SYSTEM_MENU|wx.RESIZE_BORDER|wx.TAB_TRAVERSAL|wx.CLIP_CHILDREN
wx.Frame.__init__(self, *args, **kwds)
self.window_1 = wx.SplitterWindow(self, -1, style=wx.SP_3D|wx.SP_BORDER)
self.window_1_pane_2 = wx.Panel(self.window_1, -1)
self.window_1_pane_1 = wx.Panel(self.window_1, -1)
# Menu Bar
self.frame_1_menubar = wx.MenuBar()
self.file_menu = wx.Menu()
self.open_document_item = wx.MenuItem(self.file_menu, OPEN_ID, "Open\tCtrl+o", "Opens an existing document.", wx.ITEM_NORMAL)
self.file_menu.AppendItem(self.open_document_item)
self.file_menu.AppendSeparator()
self.save_document_item = wx.MenuItem(self.file_menu, SAVE_ID, "Save\tCtrl+s", "Saves the current document you are working on.", wx.ITEM_NORMAL)
self.file_menu.AppendItem(self.save_document_item)
self.save_document_as_item = wx.MenuItem(self.file_menu, SAVE_AS_ID, "Save As\tCtrl+Shift+s", "Saves the document you are working with in a new location.", wx.ITEM_NORMAL)
self.file_menu.AppendItem(self.save_document_as_item)
self.frame_1_menubar.Append(self.file_menu, "File")
wxglade_tmp_menu = wx.Menu()
self.run_document_item = wx.MenuItem(wxglade_tmp_menu, RUN_ID, "Run\tCtrl+r", "Runs the currently open document.", wx.ITEM_NORMAL)
wxglade_tmp_menu.AppendItem(self.run_document_item)
self.debug_button = wx.MenuItem(wxglade_tmp_menu, DEBUG_ID, "Debug\tCtrl+d", "Shows debug statments to help you figure out whats going wrong.", wx.ITEM_CHECK)
wxglade_tmp_menu.AppendItem(self.debug_button)
self.frame_1_menubar.Append(wxglade_tmp_menu, "Program")
wxglade_tmp_menu = wx.Menu()
self.about_button = wx.MenuItem(wxglade_tmp_menu, ABOUT_ID, "About", "About pyBASIC IDE", wx.ITEM_NORMAL)
wxglade_tmp_menu.AppendItem(self.about_button)
self.check_updates_menuitem = wx.MenuItem(wxglade_tmp_menu, UPDATES_ID, "Check For Updates", "Checks for updates.", wx.ITEM_NORMAL)
wxglade_tmp_menu.AppendItem(self.check_updates_menuitem)
self.frame_1_menubar.Append(wxglade_tmp_menu, "Help")
self.SetMenuBar(self.frame_1_menubar)
# Menu Bar end
self.editor_text_ctrl = wx.TextCtrl(self.window_1_pane_1, -1, "PRINT \"Hello World\"", style=wx.TE_MULTILINE)
self.output_text_ctrl = wx.TextCtrl(self.window_1_pane_2, -1, "", style=wx.TE_MULTILINE)
self.input_text_ctrl = wx.TextCtrl(self.window_1_pane_2, -1, "")
self.submit_button = wx.Button(self.window_1_pane_2, -1, "Submit")
self.error_text_ctrl = wx.TextCtrl(self.window_1_pane_2, -1, "", style=wx.TE_MULTILINE)
self.__set_properties()
self.__do_layout()
wx.EVT_MENU(self, OPEN_ID, self.open_document)
wx.EVT_MENU(self, SAVE_ID, self.save_document)
wx.EVT_MENU(self, SAVE_AS_ID, self.save_document_as)
wx.EVT_MENU(self, RUN_ID, self.run_basic)
wx.EVT_MENU(self, DEBUG_ID, self.debug_activate)
wx.EVT_MENU(self, ABOUT_ID, self.about_program)
wx.EVT_MENU(self, UPDATES_ID, self.check_updates)
wx.EVT_BUTTON(self, self.submit_button.GetId(), self.submit_text)
# end wxGlade
# Set up event handler for any worker thread results
OUTPUT_EVENT(self,self.OnOutput)
#Bind the input control with the enter key
self.input_text_ctrl.Bind(wx.EVT_KEY_DOWN, self.input_key_press)
def input_key_press(self, event):
'''
Checks for the enter key, if it has been pressed then the program will
submit the value that is in the pad input box
'''
keycode = event.GetKeyCode()
#If user pressed enter or return spawn the submit input event
if keycode == wx.WXK_RETURN or keycode == wx.WXK_NUMPAD_ENTER:
self.submit_text(event)
event.Skip()
def OnOutput(self, event):
'''Handles changing the display when an event is post.'''
if event.stderr != "":
self.error_text_ctrl.AppendText( str(event.stderr) + "\n")
if event.stdout != "":
if event.stdout.endswith("\n"):
self.output_text_ctrl.AppendText( str(event.stdout))
else:
self.output_text_ctrl.AppendText( str(event.stdout) + "\n")
def __set_properties(self):
# begin wxGlade: main_frame.__set_properties
self.SetTitle("pyBASIC - Integrated Development Enviornment")
self.SetSize((700, 500))
self.editor_text_ctrl.SetToolTipString("Write your code here.")
self.output_text_ctrl.SetBackgroundColour(wx.Colour(0, 0, 0))
self.output_text_ctrl.SetForegroundColour(wx.Colour(255, 255, 255))
self.output_text_ctrl.SetToolTipString("Output will appear here")
self.input_text_ctrl.SetToolTipString("Input your text here.")
self.error_text_ctrl.SetForegroundColour(wx.Colour(255, 0, 0))
self.error_text_ctrl.SetToolTipString("Errors will appear here")
# end wxGlade
def __do_layout(self):
# begin wxGlade: main_frame.__do_layout
sizer_1 = wx.BoxSizer(wx.VERTICAL)
sizer_3 = wx.BoxSizer(wx.VERTICAL)
sizer_4 = wx.BoxSizer(wx.HORIZONTAL)
sizer_2 = wx.BoxSizer(wx.HORIZONTAL)
sizer_2.Add(self.editor_text_ctrl, 1, wx.ALL|wx.EXPAND, 4)
self.window_1_pane_1.SetSizer(sizer_2)
sizer_3.Add(self.output_text_ctrl, 2, wx.ALL|wx.EXPAND, 4)
sizer_4.Add(self.input_text_ctrl, 1, wx.ALL, 4)
sizer_4.Add(self.submit_button, 0, wx.ALL, 3)
sizer_3.Add(sizer_4, 0, wx.EXPAND, 0)
sizer_3.Add(self.error_text_ctrl, 1, wx.ALL|wx.EXPAND, 4)
self.window_1_pane_2.SetSizer(sizer_3)
self.window_1.SplitVertically(self.window_1_pane_1, self.window_1_pane_2)
sizer_1.Add(self.window_1, 1, wx.EXPAND, 0)
self.SetSizer(sizer_1)
self.Layout()
self.Centre()
self.SetSize((700, 500))
# end wxGlade
def submit_text(self, event): # wxGlade: main_frame.<event_handler>
global input_queue
input_queue = self.input_text_ctrl.GetValue() #Set even if 0
if input_queue != "":
self.output_text_ctrl.AppendText(">" + str(input_queue) + "\n")
else:
self.error_text_ctrl.AppendText( "INPUT ERROR: Please input some real text or a number.\n")
self.input_text_ctrl.Clear()
event.Skip()
def open_document(self, event): # wxGlade: main_frame.<event_handler>
import os
global file_location
dlg = wx.FileDialog(self, "Open a file", os.getcwd(), "", "BASIC Files (*.bas)|*.bas|All Files|*.*", wx.OPEN)
if dlg.ShowModal() == wx.ID_OK:
path = dlg.GetPath()
print "Opening at: %s" % (path)
file = open(path, 'r')
self.editor_text_ctrl.Clear()
self.editor_text_ctrl.AppendText( file.read() )
file.close()
file_location = path
dlg.Destroy()
def save_document(self, event, quiet=False): # wxGlade: main_frame.<event_handler>
global file_location
if file_location == "":
import os
dlg = wx.FileDialog(self, "Save a file", os.getcwd(), "", "BASIC Files (*.bas)|*.bas|All Files|*.*", wx.SAVE)
if dlg.ShowModal() == wx.ID_OK:
file_location = dlg.GetPath()
if dlg.GetFilterIndex() == 0 and not file_location.endswith(".bas"):
file_location = file_location+".bas"
dlg.Destroy()
print "Saving at: %s" % (file_location)
file = open(file_location, 'w')
file.write(self.editor_text_ctrl.GetValue())
file.close()
event.Skip()
def run_basic(self, event): # wxGlade: main_frame.<event_handler>
'''Run the BASIC program the user has.'''
global kill_thread
global thread_is_on
if thread_is_on:
'''If thread is running'''
print "Thread is on"
kill_thread = True
time.sleep(1)
print "Starting another thread..."
#Clear the inputs
self.input_text_ctrl.Clear()
self.error_text_ctrl.Clear()
self.output_text_ctrl.Clear()
program_text = self.editor_text_ctrl.GetValue()
compiler_thread = BASICThread( program_text, self )
compiler_thread.start()
#event.skip()
def about_program(self, event): # wxGlade: main_frame.<event_handler>
description = """pyBASIC IDE is a cross between TIBASIC, and CLASSIC BASIC, allowing new programmers to experience the excitement of the classical home programming language.\n\n Special thanks to <NAME>."""
licence = """pyBASIC is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the Free Software Foundation;
either version 3 of the License, or (at your option) any later version.
Ubuntu Remote is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details. You should have received a copy of
the GNU General Public License along with Ubuntu Remote; if not, write to
the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA"""
info = wx.AboutDialogInfo()
info.SetName( 'pyBASIC IDE' )
info.SetVersion( human_version )
info.SetDescription( description )
info.SetCopyright( '© 2010 <NAME> <<EMAIL>>' )
info.SetWebSite( 'http://code.google.com/p/pybasic/' )
info.SetLicence( licence )
info.AddDeveloper( '<NAME> <<EMAIL>>' )
wx.AboutBox(info)
def save_document_as(self, event): # wxGlade: main_frame.<event_handler>
global file_location
file_location = ""
self.save_document(event)
def debug_activate(self, event): # wxGlade: main_frame.<event_handler>
global debug_mode
debug_mode = not debug_mode
print debug_mode
def check_updates(self, event, silent=False): # wxGlade: main_frame.<event_handler>
print "Checking for updates please wait..."
import urllib
import webbrowser
try:
version_on_site = urllib.urlopen("http://pybasic.googlecode.com/svn/trunk/current_version.txt").read()
print "Version On Site: " + str(float(version_on_site)) + " This Version " + str(__version__)
if float(version_on_site) > __version__:
dial = wx.MessageDialog(None, 'Updates Avalable!\nhttp://code.google.com/p/pybasic/', 'Info', wx.OK)
dial.ShowModal()
webbrowser.open("http://code.google.com/p/pybasic")
elif silent == False:
dial = wx.MessageDialog(None, 'You are up to date!', 'Info', wx.OK)
dial.ShowModal()
elif float(version_on_site) < __version__:
dial = wx.MessageDialog(None, 'You are using BETA, Thanks!', 'Info', wx.OK)
dial.ShowModal()
except:
if silent == False:
dial = wx.MessageDialog(None, 'Unable to reach server.', 'Info', wx.OK)
dial.ShowModal()
# end of class main_frame
def startup():
app = wx.PySimpleApp(0)
wx.InitAllImageHandlers()
frame_1 = main_frame(None, -1, "")
app.SetTopWindow(frame_1)
frame_1.Show()
frame_1.check_updates("", silent=True)
app.MainLoop()
if __name__ == "__main__":
startup()
```
#### File: py_basic_ide/pyBASIC/parser.py
```python
SHOW_ERRORS = True
import sys
def error_fx(text):
'''The default error handling, print the text to the console.
replace with your own function if you want, have it print to your
wx application or whatever.'''
sys.stderr.write(text)
def show_error(text):
'''
Send an error if SHOW_ERRORS = True
'''
if SHOW_ERRORS:
error_fx(text)
def split_text(text, seperator=" "):
return get_word(text, seperator)
def get_word(text, seperator=" "):
'''
Returns the beginning and end of text seperated around seperator.
If seperator is not found, the tail will be a blank string.
'''
try:
head = text[0:text.index(seperator)]
tail = text[text.index(seperator) + len(seperator) : len(text)]
except ValueError:
return text, ""
return head.strip(), tail.strip()
def remove_between(text, char="\""):
'''
Returns a string from between the next two characters from the
input string, returns the head, thorax, and tail.
Example:
remove_between("TEST \"Hello Jane!\" said Dick.")
("TEST ", "Hello Jane!", "said Dick.")
'''
head, tail = get_word(text, char)
thorax, abdomen = get_word(tail,char)
return head.strip(), thorax.strip(), abdomen.strip()
def has_another(text, substring):
'''
Tests if the text has another substring, if it does returns true,
if else it returns false.
'''
try:
text.index(substring)
return True
except:
return False
def tokenize(line, linenumber):
'''
Tokenize so the runner can work and check for errors in the syntax.
'''
word_list = [] #Is returned with each token in a proper area.
#Get the keyword
first_word, rest_line = split_text(line)
first_word = first_word.upper()
#Add the first word to the list for identification in runner.
word_list.append(first_word)
#Check for first keyword
acceptable_words_list = ["PRINT", "CLS", "IF", "GOTO", \
"LABEL", "INPUT", "LET", "REM", \
"END", "STOP", "", "CLEAR", "LBL"]
if first_word not in acceptable_words_list:
show_error("Token error line %d, %s is not a valid token."
%(linenumber, first_word))
#Tokenize the rest of the line based off of first keyword.
"""
If statment:
["IF", "EXPRESSION", "THEN STATMENT", "ELSE STATMENT"]
Example
IF y=='' THEN PRINT 'Hello'
Is formatted as.
["IF", "%(y)s == ''", "PRINT 'Hello'", "PRINT 'Goodbye'"]
The else is optional.
"""
if first_word in ["IF"]:
#Check for syntax errors
if not has_another(rest_line, "THEN"):
show_error("IF error line %d, no THEN statment."%(linenumber))
expression, tail = get_word(rest_line, "THEN")
word_list.append(expression)
if not has_another(rest_line, "ELSE"):
#if no else
word_list.append( tokenize(tail, linenumber) )
word_list.append( tokenize("REM Nothing", linenumber) )
else:
#If there is an else still.
then, rest = get_word(tail, "ELSE")
word_list.append( tokenize(then, linenumber) )
word_list.append( tokenize(rest, linenumber) )
#Let
if first_word in ["LET"]:
if not has_another(rest_line, "="):
show_error("LET error line %d, no assignment operator after variable." %(linenumber))
else:
head, tail = get_word(rest_line, "=")
word_list.append(head)
word_list.append(tail)
#Input
if first_word in ["INPUT"]:
a,b,c = remove_between(rest_line, "\"")
if a != "":
show_error("INPUT error line %d, too many tokens before String." %(linenumber))
if has_another(c, " "):
show_error("INPUT error line %d, extra tokens found after variable." %(linenumber))
if c == "":
show_error("INPUT error line %d, no assignment variable." %(linenumber))
word_list.append(b) #User Display Text
word_list.append(c) #Variable
#Rem
if first_word in ["REM"]:
word_list.append(rest_line)
#End
if first_word in ["END"]:
if rest_line != "":
show_error("END error line %d, too many tokens after END." %(linenumber))
#Stop
if first_word in ["STOP"]:
if rest_line != "":
show_error("STOP error line %d, too many tokens after STOP." %(linenumber))
#gosub
#Goto Statment
if first_word in ["GOTO"]:
if has_another(rest_line, " "):
show_error("GOTO error line %d, too many tokens after GOTO" %(linenumber))
else:
word_list.append(rest_line)
#PRINT Statment
if first_word in ["PRINT"]:
word_list.append(rest_line)
#Clear statment
if first_word in ["CLS", "CLEAR"]:
if rest_line != "":
show_error("CLEAR/CLS error line %d, too many tokens after CLEAR/CLS." %(linenumber))
#LABEL statment
if first_word in ["LABEL", "LBL"]:
if has_another(rest_line, " "):
show_error("LABEL/LBL error line %d, too many tokens after LABEL/LBL." %(linenumber))
else:
word_list.append(rest_line)
#Return the list of tokenized words
return word_list
def tokenize_document(text):
'''
Create a token list of a document with newline characters.
'''
tokens = []
tokenlines = text.split("\n")
index = 1
for line in tokenlines:
t = tokenize(line, index)
if t != [""]:
tokens.append(t)
index += 1
return tokens
def tokenize_from_file(path):
'''
Create a basic token list from a document.
'''
text = ""
a = file(path)
for line in a:
text += line
return tokenize_document(text)
```
#### File: py_basic_ide/pyBASIC/runner.py
```python
import time
debug = False
def error_fx(text):
'''The default error handling, print the text to the console.
replace with your own function if you want, have it print to your
wx application or whatever.'''
sys.stderr.write(text)
def output_fx(text):
'''The default output handling, print text to the console.
replace with your own function if you want, like have it print to
a text control in your wx application.'''
print text
def input_fx(text):
'''The default user input handler, use raw_input, if you like you
can replace this with your own function, like have it read from a
text control.'''
return raw_input(text)
def check_killed():
'''Checks if the program was killed during execution implemented
by pyBASIC ide to kill runaway threads.'''
return False
def var_replace(string, var_dict):
'''
Replaces variables the user is using ($asdf) with python
understood ones ( %(asdf)s )
'''
terminators = [" ", ",", "\'", "\"", ".", ";", ":", "!", "?"]
#string = string.replace("\\$", "|DOLLAR SIGN|")
newstring = ""
in_var = False
curr_var = ""
for char in string:
#If we are in a var add the current char to the curr var
if in_var and char not in terminators:
curr_var += char
#The start of a variable
if char == '$':
in_var = True
newstring += "%("
#The end of a var
elif in_var == True and char in terminators:
#Give the appropriate ending based on type
if type(var_dict[curr_var.strip()]) == type(0.0):
newstring+=")d"
if type(var_dict[curr_var.strip()]) == type(0):
newstring += ")i"
if type(var_dict[curr_var.strip()]) == type(""):
newstring += ")s"
newstring += char
curr_var = ""
in_var = False
else:
newstring += char
#if closed without finishing variable
if in_var == True:
#Give the appropriate ending based on type
if type(var_dict[curr_var.strip()]) == type(0.0):
newstring+=")d"
if type(var_dict[curr_var.strip()]) == type(0):
newstring += ")i"
if type(var_dict[curr_var.strip()]) == type(""):
newstring += ")s"
return newstring.replace("|DOLLAR SIGN|", "$")
def get_labels(td):
labeldict = {"START": 0}
index = 0;
for line in td:
if line[0] in ["LBL", "LABEL"]:
labeldict[line[1]] = index
index += 1
return labeldict
def error(str,line):
error_fx("Error Line %d: %s" % (line, str))
def debug_msg(str):
if debug:
output_fx(str)
def process_line(index, line, label_list, var_dict):
'''
Processes a line of basic to run. Returns the new index along with
the new variable list.
'''
if line[0] in ["STOP"]:
#Force out of bounds = program stops
index = -100
#Print statment
if line[0] in ["PRINT"]:
try:
output_fx( eval(var_replace(line[1], var_dict)%(var_dict)) )
except KeyError:
error("No such variable", index)
except ValueError:
error("Value Error",index)
except TypeError:
error("Type Error", index)
#Clear Statment
if line[0] in ["CLEAR", "CLS"]:
for i in range(0,100):
output_fx("")
#If statment
if line[0] in ["IF"]:
#debug_msg(var_replace(line[1], var_dict) %(var_dict))
#debug_msg(eval(var_replace(line[1], var_dict)%(var_dict))))
if eval(var_replace(line[1], var_dict)%(var_dict)):
index, var_dict = process_line(index, line[2], label_list, var_dict)
else:
index, var_dict = process_line(index, line[3], label_list, var_dict)
index -= 1
#Goto Statment
if line[0] in ["GOTO"]:
index = label_list[line[1]] -1
#Define Let Statment
if line[0] in ["LET"]:
try:
mystr = var_replace(line[2], var_dict)
x = eval(mystr %(var_dict))
var_dict[line[1]] = x
except ValueError:
error("ValueError", index)
except TypeError:
error("Type Error", index)
#Define Input Statment
if line[0] in ["INPUT"]:
x = input_fx(line[1] + "\n")
try:
x = float(x)
except ValueError:
x = str(x)
var_dict[line[2]] = x
debug_msg(var_dict)
index += 1
return index, var_dict
def run(td):
'''
Runs a BASIC program given a token document.
'''
debug_msg("Lines List:\n"+str(td)+"\n")
start_time=time.time()
index = 0 #Current line in file.
running = True
label_list = get_labels(td)
var_dict = {}
while running:
try:
line = td[index]
index, var_dict = process_line(index, line, label_list,
var_dict)
if check_killed():
#Stop by making a long line
print "Killed"
index = len(td)
except IndexError:
running = False
end_time=time.time()
output_fx("\n\n")
output_fx("--------------------------------")
output_fx("Program exited normally.")
debug_msg("Debug Mode ON:")
debug_msg("Variables: " + str(var_dict))
debug_msg("Labels: " + str(label_list))
debug_msg("Uptime: " + str(end_time - start_time) + " seconds")
```
#### File: python/pyWebserver/webserver.py
```python
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
from os import curdir, sep, path
import mimetypes
import time
import sys
import os
import socket
#Data
port_number = 8000
indexpath = "/index.html"
_404 = "/404.html"
webdir = curdir + "/siteroot"
errordir = "/errors"
host_name = ""
class RequestHandler(BaseHTTPRequestHandler):
global webdir
global indexpath
def do_HEAD(self):
'''
Sends the headders for the content, by guessing the mime
based on the extention
'''
mime = mimetypes.guess_type(self.path) #Guess MIME
self.send_response(200) #Send Client OK Response
self.send_header('Content-type', mime)
self.end_headers()
def do_GET(self):
'''
Sends the client the web page/file that they requested.
This also takes care of the index file.
'''
#Show The Headder
mime = mimetypes.guess_type(self.path) #Guess MIME
self.send_response(200) #Send Client OK Response
self.send_header('Content-type', mime)
self.end_headers()
try:
#If the path is blank show the index
if self.path == "/":
self.path = indexpath
#Send the user the web file
f = open(webdir + self.path)
self.wfile.write(f.read())
f.close()
except(IOError):
'''
Show the 404 error for pages that are unknown.
'''
f = open(webdir + errordir + _404)
self.wfile.write(f.read())
f.close()
def start():
'''
Sets up and starts the webserver.
'''
#Imports
global host_name
global port_number
#Tell the admin when the server started
print "Started HTTPServer, press Control + C to quit."
print time.asctime(), "Server Starts - Host:%s Port:%s" % (host_name, port_number)
#Start the server
server = HTTPServer((host_name,port_number), RequestHandler)
try:
server.serve_forever()
except KeyboardInterrupt:
print time.asctime(), "Server Stops"
server.server_close()
def read_config():
import ConfigParser
global port_number
global indexpath
global _404
global webdir
global errordir
global host_name
global password
config = ConfigParser.ConfigParser()
config.read(curdir + '/Configuration/configure.cfg')
#Set all vars from the file
_404 = config.get('Pages', '_404')
webdir = curdir + config.get('Pages', 'siteroot')
errordir = config.get('Pages', 'error_dir')
indexpath = config.get('Pages', 'index')
config.get('Server', 'host_name') # The commented section gets the hostname from the file #host_name = socket.gethostname()
port_number = int(config.get('Server', 'port'))
def main():
read_config()
start()
if __name__ == '__main__':
main()
```
#### File: randallai/AI/pavlov.py
```python
import threading
import time
import sys
#Turn the variable DEBUGGING to True if you want to see debugging
#messages for this module.
DEBUGGING = False
__version__ = 1.1
__author__ = "<NAME> <<EMAIL>>"
ASSOCIATION_TO_EVENT = 1.0 #The amount of association neededed before one event triggers another
ASSOCIATION_PER_SECOND = 0.2 #The amount to raise association per second that is left in the amount of time since the last neuron call to this one.
DISASSOCIATION_PER_CALL = 0.05 #The amount of association to remove per disassoc call.
MAX_ASSOCIATION_TIME = 3.0 #The number of seconds until two events get no association added when they are fired.
class Neuron:
_evt_a = None
_evt_b = None
_evt_a_time = 0
_evt_b_time = 0
assoc = 0.0
def __init__(self, event_a, event_b):
'''Create a new Neuron that "remembers" how often two events
are registered in proximity to one another.
Paramaters:
event_a -- The first event.
event_b -- The second event.
WARNING: Do not use this class directly unless you are
absoloutly sure you know what you are doing, it is better to
instantiate a ResponseNetwork, which will build and manage
Neurons for you.
'''
self._evt_a = event_a
self._evt_b = event_b
if DEBUGGING:
print("Neuron created between %s, %s" % (event_a, event_b))
def _evt_time_diff(self):
'''The difference in the times each event was last called.
Always positive.
'''
return abs(self._evt_a_time - self._evt_b_time)
def _closeness_to_perfect(self):
'''The measure of how close the events were to being in sync,
closer events mean more related events.
'''
return (MAX_ASSOCIATION_TIME - self._evt_time_diff()) / MAX_ASSOCIATION_TIME
def send_event(self, e):
'''If the association is above the threshold return the event
that was not put in as a paramater, else return None.
'''
if self.assoc >= ASSOCIATION_TO_EVENT:
if DEBUGGING:
print("Response between %s, %s " % (self._evt_a, self._evt_b))
if e == self._evt_b:
return self._evt_a
return self._evt_b
return None
def register_event(self, e):
'''If the event given is one of those that this Neuron registers
and it is in a close enough proximity to the other event this
Neuron registers, add association between the two relative to
how close they are (closer = more association). If the
association is higher than the threshold return the event that
was not called.
Paramaters:
e -- The event that might be in this Neuron that is needed to be
updated.
'''
if e in (self._evt_a, self._evt_b):
#Update time for appropriate event
if e == self._evt_a:
self._evt_a_time = time.time()
else:
self._evt_b_time = time.time()
#If times close enough together add association.
if self._evt_time_diff() < MAX_ASSOCIATION_TIME:
self.assoc += self._closeness_to_perfect() * ASSOCIATION_PER_SECOND
#Notify of updated association
if DEBUGGING:
print("%s, %s Association: %.2f%%" % (self._evt_a, self._evt_b, self.percent_associated()))
#If assoc high enough, return the event not yet sent.
return self.send_event(e)
def decrement_assoc(self, multiplier=1):
'''Decrements the association, based off the DISASSOCIATION_PER_CALL
variable. The optional paramater multiplier can make up for
_very_ large neural nets by multiplying the disassoc time.
Paramaters:
multiplier -- DEFAULT:1 Multiplies the disassoc call by a
this number. (int/float)
'''
self.assoc -= DISASSOCIATION_PER_CALL * multiplier
if self.assoc < 0:
self.assoc = 0
def percent_associated(self):
'''Returns the association of the two events as a percent,
useful for user output, but should not be used anywhere else.
(float)
'''
return (float(self.assoc)/float(ASSOCIATION_TO_EVENT)) * 100
class ResponseNetwork(threading.Thread):
'''A rudimentary condition response "brain". Conditions are
registered, then the brain is started. Any time the class is
notified with a condition it judges the closeness of that condition
with others, if they commonly occur together then an association is
built. When an association meets a supplied threshhold both events
are triggered, in this fashion rudimentary behavior may be observed.
WARNING:
The response network is not meant to be shut down and turned back
on at will, once off returning to an on state may cause bizarre
crashes. Instead use a function that halts updates for a variable
amount of time.
'''
_neurons = [] #A list of created neurons.
_cr = {} #A dictionary of condition > response pairs.
_awake = True #Set to false to have the thread kill itself.
def __init__(self, update_time=1, autostart=False):
'''Sets up the response-network.
Paramaters:
update_time -- A function that returns a float as the number of
seconds until the organs update and fire events,
or just a float. DEFAULT: 1 (function, float)
autostart -- Should pavlov bootstrap it's own thread (true) or are
you going to call start() manually (false)? (bool)
Default: False
Note:
Why use a function for update_time? Well, what if your device
wanted to go to sleep but still retain all of it's conditions
and responses? What about suspending condition response
memory loss while in a state of panic? Maybe the charging
station is a long way away for your robot, and you don't want it
to forget while it is making the trip.
'''
self._update_time = update_time
threading.Thread.__init__ ( self )
if autostart:
self.start()
def __getitem__(self, key):
'''Emulates a list or dictionary, called with a tuple of the
emotion cross, or an index. Raises IndexError if not found.
Example:
>>> ResponseNetwork[('one','two')]
<class Neuron at 0x0000007b>
>>> ResponseNetwork[0]
<class Neuron at 0x0000002a>
>>> ResponseNetwork[None]
IndexError
'''
#Check for strings
try:
for n in self._neurons:
if n._evt_a in key and n._evt_b in key:
return n
except TypeError:
pass
#Check for indexes
try:
return self._neurons[key]
except TypeError:
pass
#else raise error
raise IndexError
def __len__(self):
'''Returns the number of neurons.'''
return len(self._neurons)
def register_con_res(self, condition, response=None):
'''Registers a condition with a response.
Paramaters:
condition -- Any object, usually a string, used to identify
a condition. (any)
response -- A function, or None. The response will be run any
time the condition is created. DEFAULT:None
(__call__ or None)
Note:
If no response is created then a lambda is generated that
performs the operation "1+1"; in very large systems this could
be a problem with speed or memory storage.
Warning:
If the response does not have the __call__ attribute (is a
function) then the default lambda is generated, no warning will
be given.
It is assumed you know what you are doing!
'''
#If thread alive, we must add them to the active list right now.
if self.is_alive():
for cond2 in self._cr.keys():
self._neurons.append(Neuron(cond2, condition))
#Add to the dictionary now.
if hasattr(response, '__call__'):
self._cr[condition] = response
else:
self._cr[condition] = (lambda : 1+1)
def change_response(self, condition, response):
'''Changes the response for a given condition that is allready
registered.
'''
self._cr[condition] = response
def _setup_net(self):
'''Sets up the neural net with the current conditions and
responses in the dictionary.
'''
self._neurons = []
#Get a list of all the conditions
tmp = self._cr.keys()
#Add a neuron bridging all possible keys
for i in range(0, len(tmp)):
for j in range(i + 1, len(tmp)):
self._neurons.append(Neuron(tmp[i], tmp[j]))
if DEBUGGING:
print('-' * 30)
print("Total Neurons: %i" % (len(self._neurons)))
def run(self):
'''A pretty simple run method, decrements all of the
associations in the neurons on a schedule.
'''
#Set up neurons
self._setup_net()
#Allow starting
self._awake = True
#Decrement from here on in until the network is killed.
while self._awake:
#Get the time until the next decrement.
if hasattr(self._update_time, '__call__'):
ut = float(self._update_time())
else:
ut = self._update_time
time.sleep(ut)
for n in self._neurons:
n.decrement_assoc()
def condition(self, c, autoappend=True):
'''Sends a condition through the neural network creating
responses and association between the neurons.
Paramaters:
c -- The condition to begin registering events from.
autoappend -- If the condition is not known about yet, should
it be added to the neural net? Yes makes the
creature more resiliant and able to learn, but
harder to debug. (boolean) Default: True
'''
if autoappend:
if c not in self._cr.keys():
self.register_con_res(c)
#Holds conditions already fired so there are no infinite loops.
done_conditions = set()
fired = set()
#Holds the conditions that have yet to be processed
new_conditions = [c]
if DEBUGGING:
print("=" * 80)
while new_conditions:
c = new_conditions.pop()
done_conditions.add(c)
if DEBUGGING:
print("Condition raised: %s" % (str(c)))
for n in self._neurons:
#Only fire neurons once
if n not in fired:
resp = n.register_event(c)
#Not null and not called before
if resp and resp not in done_conditions:
if resp not in new_conditions:
new_conditions.append(resp)
fired.add(n)
#Call all of the responses now.
for c in done_conditions:
r = self._cr[c]
if hasattr(r, '__call__'):
r()
def lookup_association(self, c):
'''Returns a list of all the things associated with the given
condition. A complete reverse-lookup.
If a and b are associated, and b and c, and c and d, and c
and f, then a reverse lookup for a would return [b,c,d,f].
Note: This does not raise association between Neurons.
Paramaters:
c -- The condition to lookup events from.
'''
#Hold conditions already fired so there are no infinite loops.
done_conditions = set() #Conditions already checked.
fired = set() #Neurons already fired.
#Holds the conditions that have yet to be processed, but have
#been fired by a neuron.
new_conditions = [c]
if DEBUGGING:
print("=" * 80)
print("Reverse lookup on condition: %s" % (str(c)))
while new_conditions:
c = new_conditions.pop()
done_conditions.add(c)
if DEBUGGING:
print("Condition checked: %s" % (str(c)))
for n in self._neurons:
#Only fire neurons once
if n not in fired:
resp = n.send_event(c)
#If the Neuron was fired, and the condition hasn't
#allready been sent.
if resp and resp not in done_conditions:
if resp not in new_conditions:
new_conditions.append(resp)
fired.add(n)
return list(done_conditions)
def get_neuron(self, evt1, evt2):
'''Returns the Neuron that bridges the two events, returns None
if the Neuron doesn't exist.
'''
try:
return self.__getitem__((evt1, evt2))
except IndexError:
return None
def get_association(self, evt1, evt2):
'''Returns a tuple (association, assoc_to_event) for the Neuron
bridging the given two events, Returns (None, None) if Neuron
doesn't exist.
'''
n = self.get_neuron(evt1, evt2)
if n != None:
return (n.assoc, n.assoc_to_evt)
return (None, None)
def sleep(self):
'''Kills the thread.'''
self._awake = False
def export_csv(self):
'''Returns a string representation of a csv file. This can be
written to a file later.
The columns and rows are the neuron names, and in the cells
between are the percentages of association between the two.
The csv is comma delimited and single quotes designate cell
contents.
Neurons that don't exist will be blank, neurons with 0
association will be 0.0000.
'''
output = ""
tmp = self._cr.keys() #Get all the conditions now so it doesn't change.
tmp.sort()
#Get a list of every neuron and it's value for quick lookup
#later on.
neuron_snapshot = {}
for n in self._neurons:
neuron_snapshot[(n._evt_a, n._evt_b)] = n.percent_associated()
#Write column header.
output += "''," #Blank for first col (row headers).
for k in tmp:
output += "'%s'," % (str(k))
output += "\n"
#Write rows
for i in tmp:
#Row header
output += "'%s'," % (i)
#Row contents
for j in tmp:
#Try fetching the percent forward and backward, if not
#found that means we are comparing a key to itself,
#which has no value, for sanity purposes.
try:
percent = neuron_snapshot[(i,j)]
except:
try:
percent = neuron_snapshot[(j,i)]
except:
percent = ""
output += "'%s'," % (str(percent))
#End this line.
output += "\n"
return output
def export_HTML_table(self, hidezeros=True):
'''Returns a string representation of an HTML table. This can
be written to a file later.
The columns and rows are the neuron names, and in the cells
between are the percentages of association between the two.
Neurons that don't exist will be blank.
Paramaters:
hidezeros - If the neuron is zero hide the value. (bool)
DEFAULT: True
'''
output = ""
tmp = self._cr.keys() #Get all the conditions now so it doesn't change.
tmp.sort()
#Get a list of every neuron and it's value for quick lookup
#later on.
neuron_snapshot = {}
for n in self._neurons:
neuron_snapshot[(n._evt_a, n._evt_b)] = n.percent_associated()
#Write column header.
output += "<table>\n<tr><th></th>" #Blank for first col (row headers).
for k in tmp:
output += "<th>%s</th>" % (str(k))
output += "</tr>\n"
#Write rows
for i in tmp:
#Row header
output += "<tr><th>%s</th>" % (i)
#Row contents
for j in tmp:
#Try fetching the percent forward and backward, if not
#found that means we are comparing a key to itself,
#which has no value, for sanity purposes.
try:
percent = neuron_snapshot[(i,j)]
except:
try:
percent = neuron_snapshot[(j,i)]
except:
percent = None
if not percent or hidezeros and int(percent) == 0:
output += "<td></td>"
else:
output += "<td>%.2f</td>" % (percent)
#End this line.
output += "</tr>\n"
output += "</table>"
return output
if __name__ == "__main__":
DEBUGGING = True
r = ResponseNetwork()
r.register_con_res('a')
r.register_con_res('b')
r.register_con_res('c')
r.register_con_res('d')
r.start()
time.sleep(1)
print r[1]
r.condition('a')
r.condition('b')
r.condition('a')
r.condition('b')
r.condition('a')
r.condition('b')
r.condition('a')
r.condition('b')
r.condition('a')
r.condition('b')
r.condition('a')
time.sleep(5)
r.condition('a')
r.condition('c')
r.condition('a')
r.condition('c')
r.condition('a')
r.condition('c')
r.condition('a')
print r.lookup_association('a')
r.sleep()
```
#### File: personal_codebase/wheel_of_fortune_ai/wordgraph.py
```python
import collections
class NgramGraph:
graph = None
def __init__(self):
self.graph = collections.defaultdict(list)
def add_edge(from_node, to_node, score):
graph[from_node].append((score, to_node))
graph[from_node] = sorted(graph[from_node], reverse=True)
def dfs(self, start_node, depth, match_function):
``` |
{
"source": "josephlijo/PythonBoot",
"score": 3
} |
#### File: types-statements-def-classes/SampleCode/01.Helloworld.py
```python
print 'Hello, world'
# A function definition
def greet():
"""A function which prints a string""" # These are docstrings :: https://docs.python.org/3/tutorial/controlflow.html#documentation-strings
print 'Hello from Python'
# Invoke the function
greet()
# Prints __main__ if it is executed as a program
print __name__
```
#### File: types-statements-def-classes/SampleCode/03.Functions.py
```python
def func_without_args():
"""Function without arguments"""
print 'This is function without arguments'
func_without_args()
# Function with arguments
def func_with_args(arg1, arg2):
"""Function which accepts two arguments"""
print 'This function has arguments: ', str(arg1), ' and ', str(arg2)
func_with_args(1, 3)
def get_square_root(num):
"""Function which calculates square root of the passed in argument"""
return num * num
def get_cube_root(arg1):
"""Function which calculates cube root of the passed in argument"""
print 'The cube root is: ', str(arg1 * arg1 * arg1)
print get_square_root(3)
get_cube_root(2)
print get_cube_root(3) # Since there is no return value, it returns `None`
# Function with default parameters
def func_with_def_values(num1, num2=2):
"""Function which has default parameters"""
print 'Value of argument 1 is ', num1, '; argument 2 is ', num2
func_with_def_values(1, 10)
func_with_def_values(2)
func_with_def_values(num1=20, num2=40)
func_with_def_values(num1=443)
#funWithDefaults(num2=99993) # This will give error as we haven't passed the required argument
# Function with variable number of arguments
def func_with_var_args(*args):
"""Function with variable number of arguments"""
result = 0
for item in args: # looping items in the argument list
result = result + item
return result
ADD_RESULT = func_with_var_args(1, 30, 2, 43)
print ADD_RESULT
```
#### File: types-statements-def-classes/SampleCode/04.Conditional.Structures.py
```python
def main():
"""Function demonstration the usage of `if,elif,else"""
x, y = 10, 20
if x < y:
print 'x is less than y'
elif x > y:
print 'x is greater than y'
else:
print 'x and y are both equal'
# Using conditional statements in Python
# a if C else b
def con_state(x, y):
"""Conditional statement - a if C else b"""
print 'x is less than y' if (x < y) else 'x is greater than or equal to y'
if __name__ == '__main__':
main()
con_state(1, 2)
con_state(2, 1)
con_state(2, 2)
```
#### File: types-statements-def-classes/SampleCode/05.Loops.py
```python
def func_with_while():
"""Function demonstratin the usage of `while` loop"""
x, y = 1, 10
print 'This is a while loop'
while x < y:
print x
x = x + 1
def func_with_for():
"""Function demoing the usage of `for` loop"""
x, y = 4, 8
print 'This is a for loop'
# This will print from 4 through 7
for item in range(x, y):
print item
def func_with_collection():
"""Function demoing collection"""
collection = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']
for day in collection:
print day
def func_with_break_continue():
"""Function demoing the usage of `break` and `continue`"""
collection = [7, 1, 2, 4, 3, 9]
for item in collection:
if item % 2 == 0:
continue # Skips the rest of the loop
if (item == 1 or item == 9):
break # Breaks the loop
print item
def enum_for_index_value():
"""Function demoing enumeration"""
collection = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']
for x, y in enumerate(collection):
print x, y
if __name__ == "__main__":
func_with_while()
func_with_for()
func_with_collection()
func_with_break_continue()
enum_for_index_value()
``` |
{
"source": "josephlim94/rapa",
"score": 3
} |
#### File: josephlim94/rapa/application_side_routing.py
```python
import asyncio
import websockets
import pyaudio
import wave
import sys
import time
#CHUNK = 1024*512
CHUNK=1920
wf = wave.open('test.wav', 'rb')
async def hello(uri):
from opus import encoder
async with websockets.connect(uri) as websocket:
CHANNELS = 2
RATE = 48000
encoder = encoder.Encoder(RATE,CHANNELS,'voip')
#await websocket.send("Hello world!")
data = wf.readframes(CHUNK)
#await websocket.send(data)
#text = await websocket.recv()
#print("Should wait: ", text)
##time.sleep(CHUNK/44100)
#time.sleep(int(text))
#data = wf.readframes(CHUNK)
#await websocket.send(data)
#text = await websocket.recv()
#print("Should wait: ", text)
#time.sleep(int(text))
#time.sleep(2)
total = 0
while data:
#stream.write(data)
encoded_data = encoder.encode(data, CHUNK)
#await websocket.send(data)
await websocket.send(encoded_data)
data = wf.readframes(CHUNK)
#response = await websocket.recv()
#time.sleep(int(response))
#if len(data) < CHUNK:
# print(data)
total = total + len(data)
# wait a bit more for the buffered data to finish
time.sleep(2)
print("Read complete")
print(total)
asyncio.get_event_loop().run_until_complete(
hello('ws://192.168.0.104:8000/ws/speaker/audioplayback/'))
```
#### File: rapa/speaker/pyaudio_asynchronous.py
```python
import pyaudio
import logging
import threading
import multiprocessing
from collections import deque
import time
import signal
class PyAudioAsync(multiprocessing.Process):
chunk_frame_length = 1920
number_of_output_channel = 2
number_of_input_channel = 2
channel_width = 2
sample_rate = 44100
audio_input = False
audio_output = False
def __init__(self, *args, **kwargs):
super().__init__(target=self, args=args, kwargs=kwargs)
self.daemon = True
self.save_kwargs(kwargs)
self.process_terminated = multiprocessing.Event()
self.process_terminated.clear()
def save_kwargs(self, kwargs):
if "audio_packet_queue" in kwargs:
self.audio_packet_queue = kwargs["audio_packet_queue"]
if "period_sync_event" in kwargs:
self.period_sync_event = kwargs["period_sync_event"]
if "audio_input" in kwargs:
self.audio_input = kwargs["audio_input"]
if "audio_output" in kwargs:
self.audio_output = kwargs["audio_output"]
def terminate_process(self, signum, frame):
self.process_terminated.set()
def run_audio_output(self):
bytes_per_frame = self.channel_width * self.number_of_output_channel
silent_chunk = b'\x00' * self.chunk_frame_length * bytes_per_frame
output_device_info = self.p.get_default_output_device_info()
'''
output_device_name = "Speakers "
output_device_info = None
for i in range(p.get_device_count()):
device_info = p.get_device_info_by_index(i)
if output_device_name in device_info["name"]:
output_device_info = device_info
if not output_device_info:
print("Output device not found")
return
'''
self.logger.info(output_device_info)
stream = self.p.open(format=pyaudio.paInt16,
channels=self.number_of_output_channel,
rate=self.sample_rate,
output=True,
output_device_index=output_device_info["index"])
while not self.process_terminated.is_set():
self.logger.info("Process: iterating")
audio_packet = None
if not self.audio_packet_queue.empty():
self.logger.info("Playing audio")
audio_packet = self.audio_packet_queue.get()
else:
self.logger.info("Playing silent")
audio_packet = silent_chunk
self.period_sync_event.set()
stream.write(audio_packet)
def run_audio_input(self):
input_device_info = self.p.get_default_input_device_info()
self.logger.info(input_device_info)
stream = self.p.open(format=pyaudio.paInt16,
channels=self.number_of_input_channel,
rate=self.sample_rate,
frames_per_buffer=self.chunk_frame_length,
input=True,
input_device_index=input_device_info["index"])
while not self.process_terminated.is_set():
self.logger.info("Process Audio Input: iterating")
audio_packet = stream.read(self.chunk_frame_length)
self.audio_packet_queue.put(audio_packet)
def run(self):
self.logger = multiprocessing.get_logger()
self.logger.setLevel(logging.INFO)
signal.signal(signal.SIGTERM, self.terminate_process)
self.p = pyaudio.PyAudio()
if self.audio_output:
self.run_audio_output()
elif self.audio_input:
self.run_audio_input()
else:
self.logger.info("Neither output or input enabled")
# start
#process_object = None
logger_created = False
def init_logger():
#format = "%(asctime)s: %(message)s"
#logging.basicConfig(format=format, level=logging.INFO,
# datefmt="%H:%M:%S")
global logger_created
if not logger_created:
multiprocessing.log_to_stderr()
logger_created = True
'''
def wait_data_play_start(period_sync_event):
tic = time.perf_counter()
period_sync_event.wait()
toc = time.perf_counter()
logging.info("Waited for: %f s", toc-tic)
period_sync_event.clear()
'''
```
#### File: rapa/speaker/views.py
```python
from django.shortcuts import render
from django.http import HttpResponse, HttpResponseRedirect, JsonResponse
from django.template import loader
import pyaudio
# Create your views here.
p = pyaudio.PyAudio()
def index(request):
template = loader.get_template('speaker/index.html')
return HttpResponse(template.render(None, request))
def get_input_device_list(request):
global p
input_device_list = {}
for device_index in range(p.get_device_count()):
device_info = p.get_device_info_by_index(device_index)
if device_info['maxInputChannels'] is not 0:
input_device_list[device_index] = device_info
response = JsonResponse(input_device_list)
return response
def get_output_device_list(request):
global p
output_device_list = {}
for device_index in range(p.get_device_count()):
device_info = p.get_device_info_by_index(device_index)
if device_info['maxOutputChannels'] is not 0:
output_device_list[device_index] = device_info
response = JsonResponse(output_device_list)
return response
``` |
{
"source": "josephlin55555/dask",
"score": 2
} |
#### File: dask/bytes/_pyarrow.py
```python
from __future__ import absolute_import
import pyarrow as pa
class HDFS3Wrapper(pa.filesystem.DaskFileSystem):
"""Pyarrow compatibility wrapper class"""
def isdir(self, path):
return self.fs.isdir(path)
def isfile(self, path):
return self.fs.isfile(path)
``` |
{
"source": "JosephLipinski/DnD-5e-Player-Readability",
"score": 3
} |
#### File: JosephLipinski/DnD-5e-Player-Readability/pdf_converter.py
```python
from PIL import Image
import PIL
from pdf2image import convert_from_path, convert_from_bytes
from pdf2image.exceptions import PDFInfoNotInstalledError, PDFPageCountError, PDFSyntaxError
import os
def convert_file_to_images(path_to_file):
return convert_from_path(path_to_file)
def save_pil_images(save_path, images):
print(len(images))
for i in range(0, len(images)):
image = images[i]
image_save_path = os.path.join(save_path, str(i) + ".jpg")
print(image_save_path)
image.save(image_save_path)
if __name__ == "__main__":
cwd = os.getcwd()
path_to_pdf = os.path.join(cwd, 'SRD-OGL_V5.1.pdf')
images = convert_file_to_images(path_to_pdf)
output_path = os.path.join(cwd, 'output')
save_pil_images(output_path, images)
``` |
{
"source": "josephl/lastmod",
"score": 3
} |
#### File: src/lastmod/manager.py
```python
from configparser import ConfigParser, SectionProxy
from contextlib import contextmanager
import json
import logging
import os
import sqlite3
import typing
import urllib.request
import uuid
NAMESPACE = "lastmod"
class CacheManager:
"""
Use as the interface for URL requests
:param cache_path: parent directory for storing/reading cached payloads
:param db: path to SQLite3 database for response records
"""
@classmethod
def from_config(
cls,
config_ini: typing.Union[ConfigParser, SectionProxy, os.PathLike, None] = None,
):
"""
Factory function for using config file instantiation
"""
if isinstance(config_ini, ConfigParser):
section = config_ini[NAMESPACE]
elif isinstance(config_ini, SectionProxy):
section = config_ini
elif isinstance(config_ini, str):
config = ConfigParser()
config.read(config_ini)
section = config[NAMESPACE]
return cls(cache_path=section.get("cache_path"), db=section.get("db"))
def __init__(
self,
cache_path: typing.Optional[os.PathLike],
db: typing.Optional[os.PathLike] = None,
):
if not cache_path:
raise ValueError("Must specify cache_path in argument or config")
self.cache_path = cache_path
self.db = db
def get_cached_response(self, request: urllib.request.Request) -> dict:
"""
Serialize request URL to a subpath relative to self.cache_path
"""
url = request.get_full_url()
cur = self._cx.cursor()
cur.execute("SELECT * FROM response WHERE url = ?", (url,))
return cur.fetchone()
@contextmanager
def database_connection(self):
try:
self._cx = sqlite3.connect(self.db)
self.init_db(self._cx)
self._cx.row_factory = self.response_dict_factory
yield self._cx
except Exception:
raise
finally:
self._cx.close()
self._cx = None
@staticmethod
def init_db(cx):
cur = cx.cursor()
cur.execute(
"""
CREATE TABLE IF NOT EXISTS response (
url TEXT PRIMARY KEY,
headers TEXT,
location TEXT
)
"""
)
@staticmethod
def response_dict_factory(cursor, row):
"""
Convert fetched rows to dict
Set connection.row_factory = <this method>
https://docs.python.org/3/library/sqlite3.html#sqlite3.Connection.row_factory
"""
data = {}
for i, col in enumerate(cursor.description):
data[col[0]] = row[i]
return data
def generate_cache_location(self, url: str) -> os.PathLike:
"""
Create a unique cache location for a URL (absolute path)
"""
basename = str(uuid.uuid5(uuid.NAMESPACE_URL, url))
return os.path.abspath(os.path.join(self.cache_path, basename))
@staticmethod
def normalize_headers(headers: typing.Dict) -> str:
"""
Normalization is lower-casing keys, then serialize the result to JSON
"""
norm = {}
for k, v in headers.items():
norm[k.lower()] = v
return json.dumps(norm)
def insert_response(self, url: str, headers: typing.Dict, location: os.PathLike):
"""
Create record for response
"""
headers = self.normalize_headers(headers)
self._cx.execute(
"INSERT OR REPLACE INTO response VALUES (?, ?, ?)",
(url, headers, location),
)
self._cx.commit()
@contextmanager
def urlopen(
self,
url: typing.Union[str, urllib.request.Request],
data: typing.Any = None,
timeout: typing.Union[int, float, None] = None,
**kwargs,
):
"""
Primary method to make URL request
Accepts arguments matching urllib.request.urlopen(...)
:param url: resource to fetch
:param cache_dest: destination to cache payload
if not specified, automatically determines location within self.cache_path
"""
if isinstance(url, urllib.request.Request):
request = url
else:
request = urllib.request.Request(url)
with self.database_connection():
cached_resp = self.get_cached_response(request)
if cached_resp:
cache_dest = cached_resp.get("location")
if cache_dest:
cache_dest = os.path.abspath(cache_dest)
if cached_resp and os.path.exists(cache_dest):
cached_headers = json.loads(cached_resp["headers"])
last_modified = cached_headers.get("last-modified")
if last_modified:
request.add_header("If-Modified-Since", last_modified)
try:
resp = urllib.request.urlopen(request, data, timeout, **kwargs)
logging.info(f"Request {resp.status}: {resp.url}")
if resp.status == 200:
body = resp.read()
try:
yield body
finally:
cache_dest = cache_dest or self.generate_cache_location(
request.full_url
)
self.insert_response(request.full_url, resp.headers, cache_dest)
with open(cache_dest, "wb") as f:
f.write(body)
except urllib.error.HTTPError as err:
logging.warning(f"Error {err.status}: {err.url}")
if err.status == 304 and cache_dest:
with open(cache_dest, "rb") as f:
body = f.read()
yield body
else:
raise
if __name__ == "__main__":
from argparse import ArgumentParser
logging.basicConfig(level=logging.INFO)
parser = ArgumentParser()
parser.add_argument("-c", "--config", help="Path to INI config")
parser.add_argument(
"-p", "--cache-path", help="Parent directory for caching payload files"
)
parser.add_argument("-d", "--db", help="SQLite3 database for response data")
parser.add_argument("url", help="Fetch url")
parsed_args = parser.parse_args()
if parsed_args.cache_path and parsed_args.db:
mgr = CacheManager(parsed_args.cache_path, parsed_args.db)
elif parsed_args.config:
mgr = CacheManager.from_config(parsed_args.config)
else:
raise ValueError(
"Specify cache path and database path by config or command-line args"
)
with mgr.urlopen(parsed_args.url) as f:
print(f.decode())
``` |
{
"source": "j-osephlong/j-LC3",
"score": 3
} |
#### File: j-osephlong/j-LC3/lc3.py
```python
import numpy as np
import lc3asm
#2^16 16bit memory address's
memory = np.uint16([0]*0xFFFF)
#registers
reg = np.uint16([0]*8)
pc = np.int16(0x0200)
psr = 0xFFFC
halt = True
#special memory ptrs
kbsr_ptr = 0xFE00
kbdr_ptr = 0xFE02
dsr_ptr = 0xFE04
ddr_ptr = 0xFE06
mcr_ptr = 0xFFFE
#from stackoverflow
# https://stackoverflow.com/questions/32030412/twos-complement-sign-extension-python/32031543
def sign_extend(value, bits):
sign_bit = 1 << (bits - 1)
return (value & (sign_bit - 1)) - (value & sign_bit)
def logSign(num):
n = sign_extend(num, 16)
memory[psr]&=0b1111111111111000
if n == 0:
memory[psr]|=0b10
elif n < 0:
memory[psr]|=0b100
elif n > 0:
memory[psr]|=0b1
def getSign():
if (memory[psr]&0b100)>>2 == 0b1:
return -1
elif (memory[psr]&0b10)>>1 == 0b1:
return 0
elif memory[psr]&0b1 == 0b1:
return 1
def addOp(instruct):
ans = reg[(instruct>>6)&0b111]
if (instruct>>5)&0b1 == 0b0:
ans+=reg[instruct&0b111]
else:
ans+=sign_extend(instruct&0b11111, 5)
logSign(ans)
reg[(instruct>>9)&0b111] = ans
def andOp(instruct):
ans = reg[(instruct>>6)&0b111]
if (instruct>>5)&0b1 == 0b0:
ans&=reg[instruct&0b111]
else:
ans&=sign_extend(instruct&0b11111, 5)
logSign(ans)
reg[(instruct>>9)&0b111] = ans
def brOp(instruct):
global pc
if (instruct >> 11) & 0b1 == 0b1 and getSign() == -1:
pc+=sign_extend(instruct&0b111111111, 9)
return
elif (instruct >> 10) & 0b1 == 0b1 and getSign() == 0:
pc+=sign_extend(instruct&0b111111111, 9)
return
elif (instruct >> 9) & 0b1 == 0b1 and getSign() == 1:
pc+=sign_extend(instruct&0b111111111, 9)
def jmpOp(instruct):
pc = reg[(instruct>>6)&0b111]
def jsrOp(instruct):
reg[7] = pc
if (instruct>>11)&0b1 == 0b1:
pc = sign_extend(instruct&0b11111111111, 11)+pc
else:
pc = reg[(instruct>>6)&0b111]
def ldOp(instruct):
ans = memory[sign_extend(instruct&0b111111111, 9) + pc]
logSign(ans)
reg[(instruct>>9)&0b111] = ans
def ldiOp(instruct):
ad = memory[sign_extend(instruct&0b111111111, 9) + pc]
ans = memory[ad]
logSign(ans)
reg[(instruct>>9)&0b111] = ans
def ldrOp(instruct):
ans = memory[sign_extend(instruct&0b111111, 6) + reg[(instruct>>6)&0b111]]
logSign(ans)
reg[(instruct>>9)&0b111] = ans
def leaOp(instruct):
ans = pc+sign_extend(instruct&0b111111111, 9)
logSign(ans)
reg[(instruct>>9)&0b111] = ans
def notOp(instruct):
ans = ~reg[(instruct>>6)&0b111]
logSign(ans)
reg[(instruct>>9)&0b111] = ans
def retOp(instruct):
pc = reg[7]
def rtiOp(instruct):
global pc, reg
reg[6]+=2
memory[psr] = memory[reg[6]-1]
pc = memory[reg[6]-2]
# print("PSR popped: " + hex(memory[reg[6]-1]))
# print("PC popped: " + hex(memory[reg[6]-2]))
# print("SysS:"+hex(reg[6]))
def stOp(instruct):
ans = reg[(instruct>>9)&0b111]
memory[sign_extend(instruct&0b111111111, 9) + pc] = ans
def stiOp(instruct):
ad = memory[sign_extend(instruct&0b111111111, 9) + pc]
memory[ad] = reg[(instruct>>9)&0b111]
def strOp(instruct):
memory[sign_extend(instruct&0b111111, 6) + reg[(instruct>>6)&0b111]] = reg[(instruct>>9)&0b111]
def trapOp(instruct):
global halt
if instruct&0b11111111 == 0x21:
print(chr(reg[0]), end='')
if instruct&0b11111111 == 0x25:
halt = True
if instruct&0b11111111 == 0x22:
ptr = reg[0]
while memory[ptr] != 0:
print(chr(memory[ptr]), end='')
ptr+=1
def rTrap(instruct):
global pc, reg
# print("rTrap " + hex(instruct))
if memory[psr]&0x8000 == 0x8000:
#set OS_SP
reg[6] = memory[lc3asm.symTable['OS_SP']]
#push PSR and PC
#if in user mode, set OS_SP
#change to super mode
reg[6]-=2
memory[reg[6]+1] = memory[psr]
memory[reg[6]] = pc
pc = np.int16(memory[instruct&0b11111111])
if memory[psr]&0x8000 == 0x8000:
#goto super mode
memory[psr]&=0x7FFF
# print("PSR pushed: " + hex(memory[reg[6]-1]))
# print("PC pushed: " + hex(memory[reg[6]]))
# print("SysS:"+hex(reg[6]))
def display():
if memory[ddr_ptr] != 0:
memory[dsr_ptr]&=0x7FFF
print(chr(memory[ddr_ptr]&0xFF), end='')
memory[ddr_ptr] = 0
memory[dsr_ptr]|=0x8000
else:
memory[dsr_ptr]|=0x8000
op_codes = {
0b0001: addOp,
0b0101: andOp,
0b0000: brOp,
0b1100: jmpOp,
0b0100: jsrOp,
0b0010: ldOp,
0b1010: ldiOp,
0b0110: ldrOp,
0b1110: leaOp,
0b1001: notOp,
0b1100: retOp,
0b1000: rtiOp,
0b0011: stOp,
0b1011: stiOp,
0b0111: strOp,
0b1111: trapOp
}
def parse(instruct, debug):
op_code = (instruct >> 12) & 0b1111
if op_code in op_codes:
if debug:
print(op_codes[op_code])
if instruct == 0xF025 or instruct == 0xF021 or instruct == 0xF022:
rTrap(instruct)
else:
op_codes[op_code](instruct)
else:
print("NOP")
def loadIn():
for ad, bina in lc3asm.out.items():
memory[ad] = bina
def run(debug = True):
global pc, halt, memory
halt = False
memory[mcr_ptr] = 0b1000000000000000
while memory[mcr_ptr] == 0b1000000000000000:
pc+=1
if debug:
print(hex(pc))
parse(memory[pc-1], debug)
display()
c = input("\n>") if debug else ''
if c == "r":
for i in range(0, 8):
print("R" + str(i) + ": \t" + hex(reg[i]))
print('PSR:\t' + bin(memory[psr]))
print('MCR:\t' + bin(memory[mcr_ptr]))
print('PC: \t' + hex(pc))
elif c == "p":
return
elif c == "ss":
print("----------------")
for i in range(reg[6], 0x3000):
print(hex(i) + ":\t"+hex(memory[i]))
print("----------------")
print("[LC3.py] Assembling LC3 OS")
lc3asm.loadFile("OS_vector_tables.asm")
lc3asm.asm()
loadIn()
pc = np.int16(0x200)
memory[psr] &=0x7FFF #set supervisor mode
print("[LC3.py] Starting LC3 OS")
run(debug = False)
# pc = np.int16(0x3000)
``` |
{
"source": "Josephlouislg/aiohttpRedisPubSubChat",
"score": 2
} |
#### File: aiohttpRedisPubSubChat/chat_frontend/__main__.py
```python
import argparse
import aioredis
import logging.handlers
import asyncio
import uvloop
from aiohttp import web
from motor.motor_asyncio import AsyncIOMotorClient
from prometheus_client import start_http_server
from chat_frontend.app import websocket_handler, healthcheck
from chat_frontend.room_service import RoomService
log = logging.getLogger(__name__)
def parse_args():
ap = argparse.ArgumentParser()
ap.add_argument("--port", type=int, default=2000)
ap.add_argument("--host", default='0.0.0.0')
ap.add_argument("--redis_host", default='redis')
ap.add_argument("--redis_port", type=int, default=6432)
ap.add_argument("--mongo_host")
ap.add_argument("--mongo_username", default='')
ap.add_argument("--mongo_password", default='')
ap.add_argument("--mongo_db", default='')
ap.add_argument("--debug", default=False)
return ap.parse_args()
async def create_redis_pool(redis_host='redis', redis_port='', pool_size=1):
return await aioredis.create_redis_pool(
address=f'redis://{redis_host}:{redis_port}',
maxsize=pool_size
)
def create_mongo_client(debug, mongo_host, mongo_username, mongo_password):
auth = f"{mongo_username}:{mongo_password}@" if not debug else ''
return AsyncIOMotorClient(
f'mongodb://{auth}{mongo_host}'
)
async def shutdown(app):
for task in app['tasks']:
if not task.cancelled():
task.cancel()
redis_pool = app['redis_pool']
redis_pool.close()
await redis_pool.wait_closed()
async def create_frontend(args):
redis_pool = await create_redis_pool(redis_host=args.redis_host, redis_port=args.redis_port)
mongo_client = create_mongo_client(
debug=args.debug,
mongo_host=args.mongo_host,
mongo_password=args.mongo_password,
mongo_username=args.mongo_username
)
db = mongo_client[args.mongo_db]
room_service = RoomService(redis=redis_pool, db=db)
loop = asyncio.get_event_loop()
tasks = (
loop.create_task(room_service.listen_new_messages()),
loop.create_task(room_service.ping_pong_task())
)
app = web.Application()
app['db'] = db
app['mongo_client'] = mongo_client
app['tasks'] = tasks
app['mongo_client'] = mongo_client
app['room_service'] = room_service
app['redis_pool'] = redis_pool
app.router.add_route(
'GET', '/ws', websocket_handler
)
app.router.add_route(
'GET', '/health', healthcheck
)
app.on_shutdown.append(shutdown)
return app
def main():
args = parse_args()
host = args.host
port = args.port
try:
start_http_server(9100)
except Exception as e:
log.error(f'Prometheus client error, {e}')
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
log.info(f"Listening {host}:{port}")
web.run_app(create_frontend(args), host=host, port=port)
if __name__ == '__main__':
main()
``` |
{
"source": "Josephlouislg/TagSearcher",
"score": 2
} |
#### File: TagSearcher/tag_searcher/app.py
```python
import logging.handlers
from aiohttp import web, WSMsgType
from tag_searcher.tag_tree import TagTreeService
log = logging.getLogger(__name__)
async def request_handler(request):
try:
data = await request.json()
text = data['text']
except Exception as e:
return web.Response(status=400)
tag_tree_service: TagTreeService = request.app['tag_tree_service']
return web.json_response(
status=200,
data={"tags": tag_tree_service.get_tags_by_text(text)}
)
```
#### File: TagSearcher/tag_searcher/__main__.py
```python
import argparse
import csv
import logging.handlers
import asyncio
import uvloop
from aiohttp import web
from tag_searcher.app import request_handler
from tag_searcher.tag_tree import TagTreeService
log = logging.getLogger(__name__)
def parse_args():
ap = argparse.ArgumentParser()
ap.add_argument("--port", type=int, default=2000)
ap.add_argument("--host", default='0.0.0.0')
ap.add_argument("--tag_file_path", default='tags.csv')
return ap.parse_args()
async def create_app(args):
app = web.Application()
with open(args.tag_file_path) as file:
csv_reader = csv.reader(file, delimiter='\n')
tags = (title[0] for title in csv_reader)
app['tag_tree_service'] = TagTreeService(tags=tags)
app.router.add_route(
'POST', '/text', request_handler
)
return app
def main():
args = parse_args()
host = args.host
port = args.port
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
log.info(f"Listening {host}:{port}")
web.run_app(create_app(args), host=host, port=port)
if __name__ == '__main__':
main()
```
#### File: tests/test_api/test_api.py
```python
from tests.conftest import create_app
async def test_200(test_client):
client = await test_client(create_app)
resp = await client.post(
'/text',
json={"text": "test1"},
headers={'Content-Type': 'application/json'}
)
assert resp.status == 200
data = await resp.json()
assert 'tags' in data
assert 'test1' in data['tags']
async def test_400(test_client):
client = await test_client(create_app)
resp = await client.post(
'/text',
data={"text": "test1"},
headers={'Content-Type': 'application/json'}
)
assert resp.status == 400
async def test_invalid_payload(test_client):
client = await test_client(create_app)
resp = await client.post(
'/text',
json={"test": "test1"},
headers={'Content-Type': 'application/json'}
)
assert resp.status == 400
async def test_empty_text(test_client):
client = await test_client(create_app)
resp = await client.post(
'/text',
json={"text": ""},
headers={'Content-Type': 'application/json'}
)
assert resp.status == 200
data = await resp.json()
assert 'tags' in data
assert data['tags'] == []
```
#### File: tests/test_tags_tree/test_tag_tags_tree.py
```python
import pytest
from tag_searcher.tag_tree import TagTreeService
@pytest.fixture
def tags_tree_service():
return TagTreeService(
tags=[
'tag1',
'tag1 tag2',
'tag2 tag3',
'tag2 tag3 tag4',
'tag5 tag6'
]
)
def test_tags_tree_build():
tags_tree_service = TagTreeService(
tags=[
'tag1',
'tag1 tag2',
'tag2 tag3',
'tag2 tag4',
'tag2 tag3 tag4',
'tag5 tag6'
]
)
assert tags_tree_service.tags_tree == {
'tag1': {
'is_leaf': True,
'childs': {'tag2': {'is_leaf': True, 'childs': {}}}
},
'tag2': {
'is_leaf': False,
'childs': {
'tag3': {
'is_leaf': True,
'childs': {
'tag4': {
'is_leaf': True,
'childs': {}
}
}
},
'tag4': {
'is_leaf': True,
'childs': {}
}
}
},
'tag5': {
'is_leaf': False,
'childs': {
'tag6': {
'is_leaf': True,
'childs': {}
}
}
}
}
def test_tag_search(tags_tree_service: TagTreeService):
tags = tags_tree_service.get_tags_by_text(
text='test test tag1 tag2 test test'
)
assert set(tags) == {'tag1', 'tag1 tag2'}
def test_tag_search_with_one_word_beetween_tags(tags_tree_service: TagTreeService):
tags = tags_tree_service.get_tags_by_text(
text='test tag1 test tag2'
)
assert set(tags) == {'tag1', 'tag1 tag2'}
def test_tag_search_with_two_word_beetween_tags(tags_tree_service: TagTreeService):
tags = tags_tree_service.get_tags_by_text(
text='test tag1 test test tag2'
)
assert set(tags) == {'tag1', 'tag1 tag2'}
def test_finding_multiple_tags_with(tags_tree_service: TagTreeService):
tags = tags_tree_service.get_tags_by_text(
text='tag2 tag3 tag4'
)
assert set(tags) == {'tag2 tag3 tag4', 'tag2 tag3'}
def test_no_tags_text_search(tags_tree_service: TagTreeService):
tags = tags_tree_service.get_tags_by_text(
text='test test test test'
)
assert set(tags) == set()
def test_all_tags_search(tags_tree_service: TagTreeService):
tags = tags_tree_service.get_tags_by_text(
text='tag1 tag2 tag3 tag4 tag2'
)
assert set(tags) == {
'tag1',
'tag1 tag2',
'tag2 tag3',
'tag2 tag3 tag4'
}
def test_nested_tags_search(tags_tree_service: TagTreeService):
tags = tags_tree_service.get_tags_by_text(
text='tag1 tag5 tag6 tag2'
)
assert set(tags) == {
'tag1',
'tag1 tag2',
'tag5 tag6'
}
def test_upper_case(tags_tree_service: TagTreeService):
tags = tags_tree_service.get_tags_by_text(
text='Tag1 tAg5 taG6 TAG2'
)
assert set(tags) == {
'Tag1',
'Tag1 TAG2',
'tAg5 taG6'
}
def test_special_chars_removing(tags_tree_service: TagTreeService):
tags = tags_tree_service.get_tags_by_text(
text='tag1 \n tag5, tag6. \r\t\n tag2'
)
assert set(tags) == {
'tag1',
'tag1 tag2',
'tag5 tag6'
}
def test_special_chars_removing_from_tag_tree():
tags_tree_service = TagTreeService(
tags=[
'tAg1',
'tAg1,\n\r\ttag2',
'tag2.\ntag3',
]
)
text = 'tag1 tag2 tag3'
assert set(tags_tree_service.get_tags_by_text(text=text)) == {
'tag1',
'tag1 tag2',
'tag2 tag3'
}
``` |
{
"source": "josephl/summit-python",
"score": 3
} |
#### File: samples/twilio-sms/run_twilio.py
```python
import ConfigParser
from flask import Flask, jsonify, request
import json
from twilio.rest import TwilioRestClient
app = Flask(__name__)
config = ConfigParser.ConfigParser()
config.read('config.ini')
@app.route('/sms/<recipient>', methods=['POST'])
def sms_send(recipient):
auth = (
config.get('Twilio', 'account_sid'),
config.get('Twilio', 'auth_token'))
from_number = config.get('Twilio', 'from_number')
body = request.get_data() or 'OH HI THERE'
client = TwilioRestClient(*auth)
message = client.messages.create(to=recipient, from_=from_number,
body=body)
return jsonify(status=message.status)
if __name__ == '__main__':
app.run(debug=True)
``` |
{
"source": "JosephLteif/Smart-Assistant",
"score": 3
} |
#### File: Smart-Assistant/Features/chatbot.py
```python
import nltk
from nltk.stem.lancaster import LancasterStemmer
stemmer = LancasterStemmer()
import numpy
import tflearn
from tensorflow.python.framework import ops
import random
import json
import pickle
with open("Data\\ChatBot_Data\\Data\\intents.JSON") as file:
data = json.load(file)
try:
with open("Data\\ChatBot_Data\\Model\\data.pickle", "rb") as f:
words, labels, training, output = pickle.load(f)
except:
words = []
labels = []
docs_x = []
docs_y = []
for intent in data['intents']:
for pattern in intent['patterns']:
wrds = nltk.word_tokenize(pattern)
words.extend(wrds)
docs_x.append(wrds)
docs_y.append(intent['tag'])
if intent['tag'] not in labels:
labels.append(intent['tag'])
words = [stemmer.stem(w.lower()) for w in words if w not in "?"]
words = sorted(list(set(words)))
labels = sorted(labels)
training = []
output = []
out_empty = [0 for _ in range(len(labels))]
for x, doc in enumerate(docs_x):
bag=[]
wrds = [stemmer.stem(w) for w in doc]
for w in words:
if w in wrds:
bag.append(1)
else:
bag.append(0)
output_row = out_empty[:]
output_row[labels.index(docs_y[x])] = 1
training.append(bag)
output.append(output_row)
training = numpy.array(training)
output = numpy.array(output)
with open("Data\\ChatBot_Data\\Model\\data.pickle", "wb") as f:
pickle.dump((words, labels, training, output), f)
ops.reset_default_graph()
# creating a neural network
# defining an input shape for the model
net = tflearn.input_data(shape=[None, len(training[0])])
# adding this fully connected layer to the neural network and it will have 8 neurons for the hidden layer
net = tflearn.fully_connected(net, int(((len(training[0])+len(output[0]))*2)/3))
print(int(((len(training[0])+len(output[0]))*2)/3))
net = tflearn.fully_connected(net, int(((len(training[0])+len(output[0]))*2)/3))
# this allow us to get probabilities for each output
net = tflearn.fully_connected(net, len(output[0]), activation="softmax")
net = tflearn.regression(net)
# type of neural network DNN
model = tflearn.DNN(net)
try:
model.load("Data\\ChatBot_Data\\Model\\model.tflearn")
except:
# epoch is the number of times the model will see the data
model.fit(training,output,n_epoch=1000, batch_size=8, show_metric=True)
model.save("Data\\ChatBot_Data\\Model\\model.tflearn")
# model.fit(training,output,n_epoch=1000, batch_size=8, show_metric=False)
# model.save("Data\\ChatBot_Data\\Model\\model.tflearn")
def bag_of_words(s, words):
bag = [0 for _ in range(len(words))]
s_words = nltk.word_tokenize(s)
s_words = [stemmer.stem(word.lower()) for word in s_words]
for se in s_words:
for i, w in enumerate(words):
if w == se:
bag[i] = 1
return numpy.array(bag)
def chat(command):
result = model.predict([bag_of_words(command, words)])
result_index = numpy.argmax(result)
tag = labels[result_index]
for tg in data['intents']:
if tg['tag'] == tag:
responses = tg['responses']
context = tg["context"][0]
response=random.choice(responses)
bot_response = [response,context]
return bot_response
```
#### File: Smart-Assistant/Features/OCR.py
```python
import os
import time
import cv2
import imutils
import numpy as np
import pytesseract
from autocorrect import Speller
from PIL import Image
def VideoOn():
video = cv2.VideoCapture(0, cv2.CAP_DSHOW)
while True:
# check returns true if python can actually read and frame is ndim numpy array
check, frame = video.read()
cv2.imshow('Capturing...', frame)
key = cv2.waitKey(1)
if key == ord('q'):
check, frame = video.read()
a = cv2.imwrite("Data\\OCR_Data\\CaptureImage.jpg", frame)
break
video.release()
cv2.destroyAllWindows()
def TesseractSetup():
# Mention the installed location of Tesseract-OCR in your system
pytesseract.pytesseract.tesseract_cmd = 'C:\\Program Files (x86)\\Tesseract-OCR\\tesseract.exe'
def CropBorder():
# return cropped image from which text needs to be extracted
im = Image.open("Data\OCR_Data\SteveJobsQuote.jpg")
# im = Image.open("./Assets/quote-luck-is-when-skill-meets-opportunity-vinnie-paz-80-71-88.jpg")
if im.mode != 'RGB':
im = im.convert('RGB')
im.save("Data\OCR_Data\SteveJobsQuote.jpg", dpi=(300, 300))
# return border_crop("CaptureImage.jpg")
return cv2.imread("Data\OCR_Data\SteveJobsQuote.jpg")
def ExtractImageData(img):
# cv2.imshow("img", img)
# cv2.waitKey(0)
img = cv2.resize(img, None, fx=2, fy=2, interpolation=cv2.INTER_CUBIC)
# cv2.imshow("img", img)
# cv2.waitKey(0)
# image data
try:
data = pytesseract.image_to_osd(img).split()
except:
return "",0
# Detect language
language = data[-4]
# Detect angle
rotation = data[-9]
print(rotation)
print(data)
# return Image Data
return language, rotation
def PreprocessingImage(img, rotation):
cv2.imshow("img", img)
cv2.waitKey(0)
# apply rotation
rotated = imutils.rotate(img, angle=-(int(rotation)))
cv2.imshow("img", rotated)
cv2.waitKey(0)
# Resize the image to a given scale
img = cv2.resize(rotated, None, fx=2, fy=2, interpolation=cv2.INTER_CUBIC)
cv2.imshow("img", img)
cv2.waitKey(0)
# Blur using GaussianBlur method
img = cv2.GaussianBlur(img, (5, 5), 0)
cv2.imshow("img", img)
cv2.waitKey(0)
# Convert the image to gray scale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# cv2.imshow("img", gray)
# cv2.waitKey(0)
# Apply threshhold
thresh1 = cv2.threshold(gray, 0, 255, cv2.THRESH_TOZERO)[1]
cv2.imshow("img", thresh1)
cv2.waitKey(0)
# Specify structure shape and kernel size.
# Kernel size increases or decreases the area
# of the rectangle to be detected.
# A smaller value like (10, 10) will detect
# each word instead of a sentence.
rect_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (20, 20))
# Appplying dilation on the threshold image
dilation = cv2.dilate(thresh1, rect_kernel, iterations=1)
cv2.imshow("img", dilation)
cv2.waitKey(0)
# Finding contours
contours, hierarchy = cv2.findContours(
dilation, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
# Creating a copy of image
im2 = img.copy()
return im2, contours
def CreateFileToPrintTo():
# A text file is created and flushed
file = open("Data\\OCR_Data\\recognized.txt", "w+")
file.write("")
file.close()
def FindContour(im2, contours, language):
# Looping through the identified contours
# Then rectangular part is cropped and passed on
# to pytesseract for extracting text from it
# Extracted text is then written into the text file
result = ""
file = open("Data\\OCR_Data\\recognized.txt", "a", encoding='utf-8')
for cnt in contours:
x, y, w, h = cv2.boundingRect(cnt)
# Drawing a rectangle on copied image
rect = cv2.rectangle(im2, (x, y), (x + w, y + h), (0, 255, 0), 2)
# Cropping the text block for giving input to OCR
cropped = im2[y:y + h, x:x + w]
try:
# Apply OCR on the cropped image
if language.lower() == 'latin':
text = pytesseract.image_to_string(cropped, lang="eng")
else:
text = pytesseract.image_to_string(cropped)
except:
return "",""
# Storing the text
result += (text + "\n")
return result, file
def AppendResultToFile(result, file):
spell = Speller(only_replacements=True)
result = result.replace(" ", "")
var = spell(result)
file.write(var)
# Close the file
file.close
def launch():
# VideoOn()
TesseractSetup()
img = CropBorder()
language, rotation = ExtractImageData(img)
im2, contours = PreprocessingImage(img, rotation)
CreateFileToPrintTo()
result, file = FindContour(im2, contours, language)
AppendResultToFile(result, file)
os.remove('Data\\OCR_Data\\CaptureImage.jpg')
``` |
{
"source": "JosephLutz/serialCommTest",
"score": 2
} |
#### File: serialCommTest/example_progs/commBoardTests.py
```python
from traceback import format_tb
import threading
from threading import Lock
import termios
import hashlib
import random
import select
import serial
import Queue
import time
import sys
import os
if (sys.hexversion < 0x020100f0):
import TERMIOS
else:
TERMIOS = termios
if (sys.hexversion < 0x020200f0):
import FCNTL
else:
import fcntl
FCNTL = fcntl
TEST_TIMEOUT = 10
NUM_TIMES_PER_BAUD = 1
TEST_BREAK_TIME = 12.0
# ammount of time to allow the serial module to read as many bytes as requested: non-blocking reads
RX_BUFFER_FILL_TIME = 0.02
READ_SIZE = 512
WARMUP_TIME = 0.001
COOLDOWN_TIME =1.0
baudrates = (115200, 57600, 38400, 28800, 19200, 14400, 9600, 4800, 2400, 1200)
def get_state(ser_ports, msg_queue):
out = ''
print_msg_queue(msg_queue)
try:
out += 'event: TX={txEvent}\n'.format(txEvent=ser_ports[0].TX_thread.TX_event.is_set())
except:
out += 'event: TX=[NONE]\n'
for ser in ser_ports:
try:
port = ser.port
except:
port = '[NONE]'
try:
opened = ser.isOpen()
except:
opened = '[NONE]'
try:
alive = ser.TX_thread.is_alive()
except:
alive = '[NONE]'
try:
txEvent = ser.TX_thread.TX_event.is_set()
except:
txEvent = '[NONE]'
try:
open_event = ser.TX_thread.port_open_event.is_set()
except:
open_event = '[NONE]'
try:
self.running_lock.acquire()
running = ser.TX_thread.running
self.running_lock.release()
except:
running = '[NONE]'
out += ' port({port:<12}) opened:{opened:<6} Thread:(alive={alive:<6} TX_event={txEvent:<6} open_event={open_event:<6} running={running:<6})\n'.format(
port=port, opened=opened, alive=alive, txEvent=txEvent, open_event=open_event, running=running)
return out
def get_rand_data(bytes, printableChars=True):
rand = random.SystemRandom()
if printableChars:
returnVal = ''.join([chr(rand.randint(33,126)) for i in range(bytes)])
else:
returnVal = ''.join([chr(rand.randint(1,255)) for i in range(bytes)])
return hashlib.sha1(returnVal).hexdigest() + returnVal
def print_msg_queue(msg_queue):
while not msg_queue.empty():
(threadID, thread_name, msg) = msg_queue.get_nowait()
if threadID is None:
print msg
else:
print '{0:<4} {1:<25} {2}'.format(threadID, ('"'+thread_name+'"'), msg)
def setup_serial_port(port, msg_queue, read_timeout=RX_BUFFER_FILL_TIME, write_timeout=None):
# create generic serial port object
if not os.path.exists( port ):
msg_queue.put((None, None, ('!!!!!!!!!! Port "{port}" does not exist. !!!!!!!!!!'.format(port=port))))
raise BaseException
ser = serial.Serial(port=None)
ser.port = port
ser.baudrate = 115200
ser.bytesize = serial.EIGHTBITS
ser.parity = serial.PARITY_NONE
ser.stopbits = serial.STOPBITS_ONE
ser.xonxoff = False
ser.rtscts = False
ser.timeout = read_timeout
ser.writeTimeout = write_timeout
ser.close()
print 'Generate data for {port}'.format(port=ser.port)
ser.data = get_rand_data(10240, printableChars=False)
ser.read_buffer = [] # ['', 0.0]
ser.port_lock = Lock() # lock exclusive use of hardware
msg_queue.put((None, None, ('Port "{port}" created'.format(port=port))))
return ser
def wait_ports_opened(ser_ports):
# wait for all serial ports to be opened
events = []
for ser in ser_ports:
if not ser.TX_thread.port_open_event.is_set():
events.append(ser.TX_thread.port_open_event)
start_time = time.time()
while (len(events) is not 0):
if not ser.TX_thread.msg_queue.empty():
print_msg_queue(ser.TX_thread.msg_queue)
# wait 100 mSec before checking agan
events[0].wait(0.1)
events = [port_event for port_event in events if not port_event.is_set()]
if ((time.time() - start_time) > TEST_TIMEOUT):
ser_thread.msg_queue.put((ser_thread.threadID, ser_thread.name,
('!!!!!!!!!! Timmed out waiting for serial ports to open. !!!!!!!!!!')))
ser_threads = [ser.TX_thread for ser in ser_ports if not ser.TX_thread.port_open_event.is_set()]
ser_thread.msg_queue.put((None, None,
('ports that did not open:'.format('"' + '", "'.join([ser_threads.name]) + '"'))))
raise BaseException
'''
def check_recieved_data(data, ser_thread):
shaHash = hashlib.sha1()
digestLen = shaHash.digest_size * 2
digest = ''
for index in range(len(data)):
for data_index in range(len(data[index][0])):
digest += data[index][0][data_index]
if len(digest) is digestLen:
break
if len(digest) is digestLen:
break
if len(digest) < digestLen:
self.msg_queue.put((ser_thread.threadID, ser_thread.name,
'Did not even recieve the checksum'))
return False
shaHash.update(data[index][0][(data_index + 1):])
for index in range(index + 1, len(data)):
shaHash.update(data[index][0])
return digest == shaHash.hexdigest()
'''
def _open_serial_port(ser_thread):
ser_thread.ser.port_lock.acquire()
if not ser_thread.ser.isOpen():
try:
ser_thread.ser.open()
except serial.SerialException:
ser_thread.msg_queue.put((ser_thread.threadID, ser_thread.name,
('!!!!!!!!!! serial exception when opening port {port}. !!!!!!!!!!'.format(port=ser_thread.ser.port))))
if not os.path.exists(ser_thread.ser.port):
ser_thread.msg_queue.put((ser_thread.threadID, ser_thread.name,
('!!!!!!!!!! serial port no longer exists {port}. !!!!!!!!!!'.format(port=ser_thread.ser.port))))
ser_thread.ser.port_lock.release()
raise BaseException
if not ser_thread.ser.isOpen():
ser_thread.msg_queue.put((ser_thread.threadID, ser_thread.name,
('!!!!!!!!!! Port "{port}" is not open. !!!!!!!!!!'.format(port=ser_thread.ser.port))))
ser_thread.ser.port_lock.release()
raise BaseException
# DEBUG: Disabling tcdrain
#iflag, oflag, cflag, lflag, ispeed, ospeed, cc = termios.tcgetattr(ser_thread.ser.fd)
#iflag |= (TERMIOS.IGNBRK | TERMIOS.IGNPAR)
#termios.tcsetattr(ser_thread.ser.fd, TERMIOS.TCSANOW, [iflag, oflag, cflag, lflag, ispeed, ospeed, cc])
ser_thread.ser.setRTS(0)
ser_thread.ser.port_lock.release()
'''
class rxThread(threading.Thread):
def __init__(self, name, threadID, msg_queue, timeout):
threading.Thread.__init__(self)
self.running_lock = Lock()
self.port_open_event = threading.Event() # Blocks untill the port is opened
self.running = False
self.threadID = threadID
self.name = name
self.msg_queue = msg_queue
self.timeout = timeout
self.msg_queue.put((self.threadID, self.name, 'RX THREAD CREATED'))
# sync RX and TX
self.event = threading.Event()
def run(self):
self.running_lock.acquire()
self.running = True
_open_serial_port(self)
# notify this port is opened
if not self.port_open_event.is_set():
self.port_open_event.set()
self.msg_queue.put((self.threadID, self.name, 'RX STARTING'))
# start recieving
try:
while self.running:
self.running_lock.release()
data = self._read_data()
if len(data) is 0:
self.running_lock.acquire()
# no new characters recieved within self.timeout period
self.running = False
else:
self.ser.read_buffer += data
self.running_lock.acquire()
except serial.SerialTimeoutException:
pass
else:
self.running_lock.release()
def _read_data(self):
# Wait untill no data has been recieved for a period of time not less than self.timeout.
# Return all data read.
# The value of self.timeout should be less than self.timeout for this to work properly
start_time = time.time()
read_buffer = []
data = ''
while data is not None:
(rlist, _, _) = select.select([self.ser.fileno()], [], [], self.timeout)
if (len(rlist) is 1) and rlist[0] is self.ser.fileno():
data = self.ser.read(READ_SIZE)
read_buffer.append((data, (time.time() - start_time)),)
else:
data = None
return read_buffer
'''
class txThread (threading.Thread):
def __init__(self, name, threadID, msg_queue, TX_event):
threading.Thread.__init__(self)
self.running_lock = Lock()
self.port_open_event = threading.Event() # Blocks untill the port is opened
self.TX_event = TX_event # Releases all the TX threads at the same time (when they are all opened)
self.running = False
self.threadID = threadID
self.name = name
self.msg_queue = msg_queue
self.msg_queue.put((self.threadID, self.name, 'TX THREAD CREATED'))
def run(self):
self.running_lock.acquire()
self.running = True
_open_serial_port(self)
self.msg_queue.put((self.threadID, self.name, 'TX WAITING'))
# notify this port is opened
if not self.port_open_event.is_set():
self.port_open_event.set()
# wait for ALL serial ports to be opened
self.TX_event.wait()
self.msg_queue.put((self.threadID, self.name, 'TX STARTING'))
# start transmit
self.ser.port_lock.acquire()
self.ser.setRTS(1)
self.ser.port_lock.release()
time.sleep(WARMUP_TIME)
try:
while self.running:
self.running_lock.release()
self.work_funct(self.ser)
self.running_lock.acquire()
except serial.SerialTimeoutException:
pass
else:
self.running_lock.release()
# stop transmit
self.ser.port_lock.acquire()
if (self.ser.fd > 0):
#termios.tcdrain(self.ser.fd) # DEBUG: Disabling tcdrain
time.sleep(COOLDOWN_TIME)
self.ser.setRTS(0)
if self.ser.isOpen():
self.ser.close()
self.ser.port_lock.release()
self.msg_queue.put((self.threadID, self.name, 'TX STOPPING'))
def write_work_function(ser):
start_time = time.time()
try:
ser.write(ser.data)
ser.close()
ser.TX_thread.msg_queue.put((ser.TX_thread.threadID, ser.TX_thread.name,
('-- {time:<6.4} sec. ({port:<12} @ {baud:<6}).'.format(
time=(time.time() - start_time), baud=ser.baudrate, port=ser.port))))
except serial.SerialTimeoutException:
if self.ser.isOpen():
ser.close()
ser.TX_thread.msg_queue.put((ser.TX_thread.threadID, ser.TX_thread.name,
('-- {time:<6.4} sec. +++++ SerialTimeoutException +++++ ({port:<12} @ {baud:<6})'.format(
time=(time.time() - start_time), baud=ser.baudrate, port=ser.port))))
def test1(funct, ser_ports, msg_queue):
msg_queue.put((None, None, '\n\n{line:{fill}40}\n{fill}{msg:^38}{fill}\n{line:{fill}40}\n'.format(line='', fill='*', msg='SETUP FOR TEST')))
# Create the threads
id_count = 1
TX_event = threading.Event()
for ser in ser_ports:
# TX Thread
ser.TX_thread = txThread(
'{port}_TX@{baud}'.format(port=os.path.basename(ser.port), baud=ser.baudrate),
id_count, msg_queue, TX_event)
ser.TX_thread.ser = ser # Need to acces the serial port inside the thread
ser.TX_thread.work_funct = funct
# RX Thread
'''
ser.RX_thread = rxThread(
'{port}_RX@{baud}'.format(port=os.path.basename(ser.port), baud=ser.baudrate),
(id_count + 1), msg_queue, (TEST_TIMEOUT / 2))
ser.RX_thread.ser = ser # Need to acces the serial port inside the thread
'''
id_count += 2
# start the threads runnung
running_txThreads = []
for ser in ser_ports:
'''ser.RX_thread.start()'''
ser.TX_thread.start()
running_txThreads.append(ser.TX_thread.name)
num_running_txThreads = len(running_txThreads) + 1
# wait for all serial ports to be opened
wait_ports_opened(ser_ports)
# releas all transmit threads to start sending
if not TX_event.is_set():
TX_event.set()
msg_queue.put((None, None, '\n\n{line:{fill}40}\n{fill}{msg:^38}{fill}\n{line:{fill}40}\n'.format(line='', fill='*', msg='START TEST')))
# loop while the threads run
start_time = time.time()
while (len(running_txThreads) is not 0):
running_txThreads = []
# display what is in the message queue
print_msg_queue(msg_queue)
# have the threads been running long enough
if ((time.time() - start_time) >= TEST_TIMEOUT):
# Stop all threads from running due to timeout
for ser in ser_ports:
ser.TX_thread.running_lock.acquire()
ser.TX_thread.running = False
ser.TX_thread.running_lock.release()
# collect which threads are running
for ser in ser_ports:
ser.TX_thread.running_lock.acquire()
if ser.TX_thread.running:
running_txThreads.append('{padding}"{name}"'.format(padding=('.'*10), name=ser.TX_thread.name))
ser.TX_thread.running_lock.release()
if (len(running_txThreads) is not num_running_txThreads):
num_running_txThreads = len(running_txThreads)
# creat the message to display which threads are running
if len(running_txThreads) is 0:
msg = ' ***** No threads left running. *****'
else:
msg = 'Running threads:\n' + '\n'.join(running_txThreads)
# queue up the message
msg_queue.put((None, None, ('\n{msg}'.format(line=('-'*30), msg=msg))))
msg_queue.put((None, None, ' ***** Wait for all TX threads to terminate. *****'))
# display what is in the message queue
print_msg_queue(msg_queue)
# wait for all threads to finish
non_terminated_len = len([ser.TX_thread for ser in ser_ports if ser.TX_thread.is_alive()] + [None])
while non_terminated_len is not 0:
print_msg_queue(msg_queue)
non_terminated = [ser.TX_thread for ser in ser_ports if ser.TX_thread.is_alive()]
if len(non_terminated) is not non_terminated_len:
non_terminated_len = len(non_terminated)
msg_queue.put((None, None, get_state(ser_ports, msg_queue)))
for ser in ser_ports:
ser.TX_thread.join()
'''
msg_queue.put((None, None, 'Time of TX threads: {time:<6.4} sec.'.format(time=(time.time() - start_time))))
msg_queue.put((None, None, ' ***** Wait for all RX threads to terminate. *****'))
# display what is in the message queue
print_msg_queue(msg_queue)
# wait for all threads to finish
for ser in ser_ports:
ser.RX_thread.join()
'''
msg_queue.put((None, None, 'All threads stopped. Total test time: {time:<6.4} sec.'.format(time=(time.time() - start_time))))
msg_queue.put((None, None, '\n\n{line:{fill}40}\n{fill}{msg:^38}{fill}\n{line:{fill}40}\n'.format(line='', fill='*', msg='STOP TEST')))
msg_queue.put((None, None, '\nSleep: {time:<6.4} sec.\n'.format(time=TEST_BREAK_TIME)))
print_msg_queue(msg_queue)
time.sleep(TEST_BREAK_TIME)
def main():
msg_queue = Queue.Queue()
ser_ports = (
setup_serial_port('/dev/ttyACM0', msg_queue),
setup_serial_port('/dev/ttyACM1', msg_queue),
setup_serial_port('/dev/ttyACM2', msg_queue),
setup_serial_port('/dev/ttyACM3', msg_queue),
setup_serial_port('/dev/ttyACM4', msg_queue),
setup_serial_port('/dev/ttyACM5', msg_queue),
setup_serial_port('/dev/ttyACM6', msg_queue),
setup_serial_port('/dev/ttyACM7', msg_queue),
)
# choose which baudrates to test at
selected_baudrates = baudrates
# run the test NUM_TIMES_PER_BAUD times at the
# baud rate or forever if NUM_TIMES_PER_BAUD is 0
count = NUM_TIMES_PER_BAUD
try:
while ((count is not 0) or (NUM_TIMES_PER_BAUD is 0)):
# run the test for each baudrate
for baud in selected_baudrates:
msg_queue.put((None, None, '\n\n{line:{fill}40}\n{fill}{msg:^38}{fill}\n{line:{fill}40}\n'.format(line='', fill='*', baud='BAUDRATE = {0}'.format(baud))))
# set the baud rate for each serial port
for ser in ser_ports:
ser.baudrate = baud
test1(write_work_function, ser_ports, msg_queue)
msg_queue.put((None, None, '\n\n'))
msg_queue.put((None, None, '{line:40*}\n\n\n'.format(line='')))
if (NUM_TIMES_PER_BAUD is not 0):
count -= 1
except:
error_out = get_state(ser_ports, msg_queue)
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
error_out += '{0}\n'.format('='*80)
error_out += 'lineno:{lineno}, fname:{fname}'.format(fname=fname, lineno=exc_tb.tb_lineno)
#for line in format_tb(exc_tb):
# error_out += '{0}\n'.format(line)
print('\n{line:{fill}80}\n{out}\n{line:{fill}80}'.format(line='', fill='#', out=error_out))
for ser in ser_ports:
# ignore locking
ser.TX_thread.running = False
if not ser.TX_thread.port_open_event.is_set():
ser.TX_thread.port_open_event.set()
if not ser.TX_thread.TX_event.is_set():
ser.TX_thread.TX_event.set()
if __name__ == "__main__":
main()
```
#### File: JosephLutz/serialCommTest/serialData.py
```python
from OrionPythonModules import serial_settings
from msgMonitor import CREATE_SERIAL_PORT
from msgMonitor import PORT_OPENED
from msgMonitor import PORT_CLOSED
from msgMonitor import REPORT_DATA_RECIEVED
import threading
import serial
import select
import termios
import Queue
import time
import sys
import os
if sys.hexversion < 0x020100f0:
import TERMIOS
else:
TERMIOS = termios
from config import *
class SerialData(serial.Serial):
#Used by the txThread and rxThread as the DataSendObj and dataGetObj.
LXM_MODE_VALUES = [
u'RS-232', u'RS-485 2-wire',
u'RS-485/422 4-wire', u'Loopback'
]
LXM_SERIAL_TYPES = {
u'RS232': LXM_MODE_VALUES[0],
u'RS485': LXM_MODE_VALUES[1],
u'RS422': LXM_MODE_VALUES[2],
None: LXM_MODE_VALUES[3],
}
def __init__(self, port, packetSource, msgQueue=None, readTimeout=SERIAL_PORT_READ_TIMEOUT, writeTimeout=None,
interCharTimeout=None):
serial.Serial.__init__(self,
port = None, #number of device, numbering starts at
#zero. if everything fails, the user
#can specify a device string, note
#that this isn't portable anymore
#port will be opened if one is specified
baudrate=115200, #baudrate
bytesize=serial.EIGHTBITS, #number of databits
parity=serial.PARITY_NONE, #enable parity checking
stopbits=serial.STOPBITS_ONE, #number of stopbits
timeout=readTimeout, #set a timeout value, None to wait forever
xonxoff=0, #enable software flow control
rtscts=0, #enable RTS/CTS flow control
writeTimeout=writeTimeout, #set a timeout for writes
dsrdtr=None, #None: use rtscts setting, dsrdtr override if true or false
interCharTimeout=interCharTimeout #Inter-character timeout, None to disable
)
if isinstance(port, str) or isinstance(port, unicode):
self.port = os.path.normpath(port)
else:
# Using an intiger is not as reliable (A guess is made).
self.port = port
# Queue for sending state back to messaging thread
self.msgQueue = msgQueue
# lock for when a thread needs exclusive access to the serial port
self.portLock = threading.Lock() # lock exclusive use of hardware
# list of sent packet information
self.sentPackets = [] #[(packetID, packetLength, hash), ...]
# place holder populated when the txThread is created
self.txThread = None
# data recieved (list of tuples, each containing data read and time since last read)
self.readBuffer = [] # [(data, time), ...]
# place holder populated when the rxThread is created
self.rxThread = None
# Queue that holds data packets to be sent
self.packetSource = packetSource
if self.msgQueue is not None:
self.msgQueue.put((None, {'port': self.port}, CREATE_SERIAL_PORT))
def set_serial_mode(self, mode=None):
def mode_in(mode):
if ((isinstance(mode, str) or isinstance(mode, unicode)) and
(unicode(mode.upper()) in SerialData.LXM_SERIAL_TYPES.keys())):
return SerialData.LXM_SERIAL_TYPES[mode]
elif ((isinstance(mode, str) or isinstance(mode, unicode)) and
(unicode(mode) in SerialData.LXM_SERIAL_TYPES.values())):
return unicode(mode)
elif isinstance(mode, int) and (mode >= 0) and (mode < len(SerialData.LXM_MODE_VALUES)):
return SerialData.LXM_MODE_VALUES[mode]
else:
return u'Loopback'
settings = serial_settings.SerialSettings()
settings.cards = [{
'type': '124',
'ports': [{}, {}, {}, {}, ]
}, {
'type': '124',
'ports': [{}, {}, {}, {}, ]
}]
if isinstance(mode, tuple) and len(mode) is 8:
for mode_index in range(0, 4):
settings.cards[0]['ports'][mode_index]['type'] = mode_in(mode[mode_index])
for mode_index in range(0, 4):
settings.cards[1]['ports'][mode_index]['type'] = mode_in(mode[mode_index])
elif isinstance(mode, str) or isinstance(mode, unicode) or isinstance(mode, int):
mode = mode_in(mode)
for mode_index in range(0, 4):
settings.cards[0]['ports'][mode_index]['type'] = mode
for mode_index in range(0, 4):
settings.cards[1]['ports'][mode_index]['type'] = mode
else:
mode = 'Loopback'
for mode_index in range(0, 4):
settings.cards[0]['ports'][mode_index]['type'] = mode
for mode_index in range(0, 4):
settings.cards[1]['ports'][mode_index]['type'] = mode
settings.apply()
def open_serial_port(self):
self.portLock.acquire()
if not self.isOpen():
if not os.path.exists(self.port):
if self.msgQueue is not None:
self.msgQueue.put((self.txThread.threadID, None,
'Serial port {port} does not exist.'.format(port=self.port)))
self.portLock.release()
return False
try:
self.open()
except serial.SerialException:
if not os.path.exists(self.port):
if self.msgQueue is not None:
self.msgQueue.put(
(self.txThread.threadID, None,
('SerialException while opening port {port}, ' +
'and the port dissapeared after open atempt.'.format(port=self.port))))
else:
if self.msgQueue is not None:
self.msgQueue.put(
(self.txThread.threadID, None,
'SerialException while opening port {port}.'.format(port=self.port)))
self.portLock.release()
return False
if not self.isOpen():
if self.msgQueue is not None:
self.msgQueue.put(
(self.txThread.threadID, None,
'Serial port {port} would not open with specified port configuration.'.format(port=self.port)))
self.portLock.release()
return False
if ENABLE_RTS_LINE:
# NOTE: Set RTS back to False as soon as possible after open.
# open resets RTS True when RTS/CTS flow control disabled
# (re)set RTS to off
self.setRTS(False)
if ENABLE_DTR_LINE:
# set DTR to on
self.setDTR(True)
if ENABLE_TCDRAIN:
iflag, oflag, cflag, lflag, ispeed, ospeed, cc = termios.tcgetattr(self.fd)
iflag |= (TERMIOS.IGNBRK | TERMIOS.IGNPAR)
termios.tcsetattr(self.fd, TERMIOS.TCSANOW, [iflag, oflag, cflag, lflag, ispeed, ospeed, cc])
if self.msgQueue is not None:
self.msgQueue.put((self.txThread.threadID, {'port': self.port}, PORT_OPENED))
self.portLock.release()
return True
def close_serial_port(self):
self.portLock.acquire()
if self.isOpen():
if ENABLE_RTS_LINE:
# set RTS to off
self.setRTS(False)
if ENABLE_DTR_LINE:
# set DTR to off
self.setDTR(False)
# close the port
self.close()
if self.msgQueue is not None:
self.msgQueue.put((self.txThread.threadID, {'port': self.port}, PORT_CLOSED))
self.portLock.release()
#
# These methods determine how the port is used
#
def thread_send_startup(self):
self.sentPackets = []
# opent the port
if not self.open_serial_port():
raise BaseException
def thread_send_start(self):
if ENABLE_RTS_LINE:
self.portLock.acquire()
# set RTS to on
self.setRTS(True)
self.portLock.release()
time.sleep(SERIAL_PORT_WARMUP_TIME)
def send_data(self):
start_time = time.time()
if self.packetSource.queue.empty():
return False
# get the dataTuple from the Queue
dataTuple = None
try:
while dataTuple is None:
dataTuple = self.packetSource.queue.get_nowait()
except Queue.Empty:
return False
# notify we are using a packet
self.packetSource.packetUsed.set()
# write the data
try:
if self.msgQueue is not None:
self.msgQueue.put(
(self.txThread.threadID, None,
'Started TX on {packetLength} byte packet {packetID} @ {time}'.format(
packetID=dataTuple[1], time=(time.time() - start_time), packetLength=dataTuple[2])))
self.write(dataTuple[0])
if self.msgQueue is not None:
self.msgQueue.put(self.txThread.threadID, None,
'Finished TX on {packetLength} byte packet {packetID} @ {time}'.format(
packetID=dataTuple[1], time=(time.time() - start_time), packetLength=dataTuple[2]))
except serial.SerialTimeoutException:
if self.msgQueue is not None:
self.msgQueue.put(self.txThread.threadID, None, 'SerialException durring packet write')
return False
# store tuple of packet info: (packetID, packetLength, hash)
self.sentPackets.append(dataTuple[1:])
return True
def thread_send_stop(self):
if (self.fd > 0):
if ENABLE_RTS_LINE:
self.portLock.acquire()
if ENABLE_TCDRAIN:
termios.tcdrain(self.fd)
time.sleep(SERIAL_PORT_COOLDOWN_TIME)
# set RTS to off
self.setRTS(False)
self.portLock.release()
# use the message queue to send self.sentPackets
if self.msgQueue is not None:
self.msgQueue.put((self.txThread.threadID, self.sentPackets, REPORT_DATA_RECIEVED))
def thread_get_startup(self):
# reset the readBuffer
self.readBuffer = []
# open the port
if not self.open_serial_port():
raise BaseException
def thread_get_start(self):
pass
def get_data(self):
reading = True
bytes_read = 0
start_time = time.time()
while reading:
(rlist, _, _) = select.select([self.fileno()], [], [], self.timeout)
if (len(rlist) is 1) and rlist[0] is self.fileno():
data = self.read(NUM_BYTES_TO_READ)
bytes_read += len(data)
self.readBuffer.append((data, (time.time() - start_time)),)
else:
reading = False
if bytes_read is 0:
return False
return True
def thread_get_stop(self):
# send the readBuffer in the message queue
if self.msgQueue is not None:
self.msgQueue.put((self.txThread.threadID, self.readBuffer, 'Data read before timeout.'))
if __name__ == '__main__':
import tests.serialData_test
tests.serialData_test.runtests
``` |
{
"source": "Joseph-Lux/go",
"score": 4
} |
#### File: Joseph-Lux/go/Board.py
```python
from Piece import Piece
from Group import Group
class Board:
def __init__(self):
self.pieces = [[Piece(row, column) for column in range(19)] for row in range(19)]
self.groups = []
self.turn = 'X'
self.ko = None
def setPiecesBoard(self):
for row in self.pieces:
for piece in row:
piece.board = self
def checkMove(self, row, column):
# Check to see if there is already a piece there
if self.pieces[row][column].player != '-':
return False
# Check to see if move is killing itself
# Return the reverse of the ko property
# This will return true, we can go there, if there is no ko,
# but false, we cannot go there if there is a ko
return True
def addPiece(self, row, column):
# Change the player of the piece at the position of the move
self.pieces[row][column].player = self.turn
# Calculate the liberties of the new move
self.pieces[row][column].calculateLiberties()
# Update groups
self.addToGroups(row, column)
# Update liberties of opposing pieces around the move
self.updateLibsAround(row, column)
# Remove groups
self.removeGroups()
# Switch the player
self.turn = self.oppositeTurn()
def oppositeTurn(self):
if self.turn == 'X':
return 'O'
else:
return 'X'
def addToGroups(self, row, column):
# Make a new group for the new piece
newGroup = Group(self.turn, self)
newGroup.appendPiece(self.pieces[row][column])
# Add the upper neighbor if it matches player
if row != 0 and self.pieces[row - 1][column].player == self.turn:
newGroup.appendGroup(self.pieces[row - 1][column].group)
# Add the right neighbor if it matches player
if column != 18 and self.pieces[row][column + 1].player == self.turn:
newGroup.appendGroup(self.pieces[row][column + 1].group)
# Add the lower neighbor if it matches player
if row != 18 and self.pieces[row + 1][column].player == self.turn:
newGroup.appendGroup(self.pieces[row + 1][column].group)
# Add the left neighbor if it matches player
if column != 0 and self.pieces[row][column - 1].player == self.turn:
newGroup.appendGroup(self.pieces[row][column - 1].group)
newGroup.calculateLiberties()
self.groups.append(newGroup)
def removeGroups(self):
for group in self.groups:
if not group.calculateLiberties():
for piece in group.pieces:
piece.player = '-'
self.groups.remove(group)
def updateLibsAround(self, row, column):
# Update upper neighbor
if row != 0 and self.pieces[row - 1][column].player == self.oppositeTurn():
self.pieces[row - 1][column].liberties -= 1
self.pieces[row - 1][column].group.liberties -= 1
# Update right neighbor
if column != 18 and self.pieces[row][column + 1].player == self.oppositeTurn():
self.pieces[row][column + 1].liberties -= 1
self.pieces[row][column + 1].group.liberties -= 1
# Update lower neighbor
if row != 18 and self.pieces[row + 1][column].player == self.oppositeTurn():
self.pieces[row + 1][column].liberties -= 1
self.pieces[row + 1][column].group.liberties -= 1
# Update left neighbor
if column != 0 and self.pieces[row][column - 1].player == self.oppositeTurn():
self.pieces[row][column - 1].liberties -= 1
self.pieces[row][column - 1].group.liberties -= 1
def outputLibs(self):
string = ''
for row in self.pieces:
for item in row:
string += str(item.liberties) + ' '
string += "\n"
print(string)
def __str__(self):
# Iterate over each row and then iterate over the items in each row,
# keeping spaces between each item and newlines between each row.
string = ''
for row in self.pieces:
for item in row:
string += str(item) + ' '
string += "\n"
return string
```
#### File: Joseph-Lux/go/GoGUI.py
```python
import pygame, sys
from pygame.locals import *
pygame.init() # Initialize pygame
FPS = 50 # Frame rate for the game
fpsClock = pygame.time.Clock() # Clock
# Colors
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
RED = (255, 0, 0)
GREEN = (0, 255, 0, 40)
YELLOW = (255, 255, 0, 30)
BLUE = (0, 0, 255)
# Make display surface
DISPLAYSURF = pygame.display.set_mode((800, 800), 0, 32)
pygame.display.set_caption('Go')
# Load images
BACKGROUNDIMAGE = pygame.image.load('ash.jpg')
BLACKPIECE = pygame.transform.scale(pygame.image.load('1024px-Realistic_Go_Stone.svg.png'), (40, 40))
WHITEPIECE = pygame.transform.scale(pygame.image.load('1024px-Realistic_White_Go_Stone.svg.png'), (40, 40))
HL_BLACKPIECE = BLACKPIECE.copy()
HL_WHITEPIECE = WHITEPIECE.copy()
HL_BLACKPIECE.fill((255, 255, 255, 128), None, pygame.BLEND_RGBA_MULT)
HL_WHITEPIECE.fill((255, 255, 255, 128), None, pygame.BLEND_RGBA_MULT)
##################################
# Event Handling #
##################################
# Check if move can be made and make it if it can
def handleClick(box, board):
if not box[0] or not box[1]:
return
column = (box[0] - 20) // 40
row = (box[1] - 20) // 40
if board.checkMove(row, column):
board.addPiece(row, column)
# Returns a tuple with the x and y position of
# the box where the mouse is hovering
def getBoxAtMousePos(mousex, mousey):
for boxx in range(20, 780, 40):
for boxy in range(20, 780, 40):
boxRect = pygame.Rect(boxx, boxy, 40, 40)
if boxRect.collidepoint(mousex, mousey):
return (boxx, boxy)
return (None, None)
##################################
# Drawing methods #
##################################
# Draws the board onto DISPLAYSURF
def drawBoard(box, board):
# Background image
DISPLAYSURF.blit(BACKGROUNDIMAGE, (0, 0))
# Lines
for x in range(1, 20):
pygame.draw.line(DISPLAYSURF, BLACK, (40 * x, 40), (40 * x, 760))
for y in range(1, 20):
pygame.draw.line(DISPLAYSURF, BLACK, (40, 40 * y), (760, 40 * y))
# Circles for star points
for x in range(3):
for y in range(3):
pygame.draw.circle(DISPLAYSURF, BLACK, (160 + x * 240, 160 + y * 240), 3)
drawPieces(board)
# Draw cursor on the board
if box[0] and box[1] and board.checkMove((box[1] - 20) // 40, (box[0] - 20) // 40):
if board.turn == 'O':
DISPLAYSURF.blit(HL_WHITEPIECE, box)
else:
DISPLAYSURF.blit(HL_BLACKPIECE, box)
# Draw pieces
def drawPieces(board):
for row in board.pieces:
for piece in row:
if piece.player == 'X':
DISPLAYSURF.blit(BLACKPIECE, (20 + 40 * piece.column, 20 + 40 * piece.row))
elif piece.player == 'O':
DISPLAYSURF.blit(WHITEPIECE, (20 + 40 * piece.column, 20 + 40 * piece.row))
##################################
# Main game loop #
##################################
def runGame(board):
while True:
mouseClicked = False
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
elif event.type == MOUSEMOTION:
mousex, mousey = event.pos
elif event.type == MOUSEBUTTONUP:
mousex, mousey = event.pos
mouseClicked = True
elif event.type == KEYUP:
if event.key == K_k:
print(board)
board.outputLibs()
mousedBox = getBoxAtMousePos(mousex, mousey)
if mouseClicked:
handleClick(mousedBox, board)
drawBoard(mousedBox, board)
pygame.display.update()
fpsClock.tick(FPS)
``` |
{
"source": "josephm28/math-nerd-py",
"score": 4
} |
#### File: math-nerd-py/number-types/number_type.py
```python
import divisors
class NumberType:
def __init__(self, num):
self.num = num
self.is_prime = False
self.divisors = divisors.Find(self.num)
self.determine_type()
def show_rundown(self):
print("Looking at number:\t", self.num)
print("\tDivisors:\t", self.divisors.divisors)
print("\tProper Divisors:", self.divisors.proper_divisors)
print("\tType:\t\t", self.type)
def determine_type(self):
if self.num == 0:
self.type = "Zero"
elif self.divisors.proper_sum == self.num:
self.type = "Perfect"
elif self.divisors.proper_sum > self.num:
self.type = "Abundant"
elif self.divisors.proper_sum < self.num:
if self.divisors.proper_sum == 1:
self.is_prime = True
self.type = "Deficient"
``` |
{
"source": "josephM96/PGM_project",
"score": 2
} |
#### File: PGM_project/model/modules.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils import weight_norm
from functools import partial
from model.model_utils import concat_elu
import numpy as np
from model.model_utils import *
from scipy import linalg as la
logabs = lambda x: torch.log(torch.abs(x))
# dictionary for indicating kernel size, kernel valid regrion
# "masking convolution" or "normal convolution with shift and crop"
# what is mixture indicator?
# is horizontal stream reference more than 1 row?
# differnece between conditional dependence and corresponding (r, g, b) selection method
# and mixture logistic and conditioning on the whole image in the pixelCNN++
# where to deploy 3 NIN modules?
# apply dropout
# network in network module
class nin(nn.Module):
def __init__(self, in_channels, out_channels):
super(nin, self).__init__()
self.conv = weight_norm(nn.Conv2d(in_channels, out_channels, 1))
def forward(self, x):
return self.conv(x)
class gated_residual_conv(nn.Module):
def __init__(self, in_channels, is_horizontal=False, is_decoder=False):
super(gated_residual_conv, self).__init__()
self.is_horizontal = is_horizontal
self.is_decoder = is_decoder
conv_op = down_rightward_conv if is_horizontal else downward_conv
self.conv_1 = conv_op(2 * in_channels, in_channels)
self.conv_2 = conv_op(2 * in_channels, 2 * in_channels)
"""
Encoder
horizontal stream input : previous layer's horizontal output, current layer's vertical output(dim=C)
vertical stream input : previous layer's vertical output
Decoder
horizontal stream input : previous layer's horizontal output, CONCAT(current layer's vertical output(C),
symmetric horizontal output from Encoder)(dim=2C)
vertical stream input : previous layer's vertical output, symmetric vertical output from Encoder(dim=C)
"""
if self.is_decoder:
if self.is_horizontal:
self.nin = nin(2 * 2 * in_channels, in_channels)
else:
self.nin = nin(2 * in_channels, in_channels)
else:
if self.is_horizontal:
self.nin = nin(2 * in_channels, in_channels)
self.dropout = nn.Dropout2d(0.5)
def forward(self, x, shortcut_or_vertical_output=None):
original_x = x
x = self.conv_1(concat_elu(x))
if shortcut_or_vertical_output is not None:
x += self.nin(concat_elu(shortcut_or_vertical_output))
x = concat_elu(x)
x = self.dropout(x)
x = self.conv_2(x)
x, gate = x.chunk(2, dim=1) # split across the channel dimension
x *= torch.sigmoid(gate) # gating x
return original_x + x
# "down" means "vertical" stream
# "downright" means "horizontal" stream
class downward_conv(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=(2, 3), stride=(1, 1), top_pad_output=False):
super(downward_conv, self).__init__()
# for vertical, (left, right, top) have to be padded
self.top_pad_output = top_pad_output
self.pad = nn.ZeroPad2d((int((kernel_size[1] - 1) / 2), (int((kernel_size[1] - 1) / 2)), kernel_size[0] - 1, 0))
self.conv = weight_norm(nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride))
if self.top_pad_output:
# down shift means removing the last row of output and add padding at the first index of row
# so that it prevents prediction operation of the last row
self.down_shift = down_shift
def forward(self, x):
x = self.pad(x)
x = self.conv(x)
x = self.down_shift(x) if self.top_pad_output else x
return x
class down_rightward_conv(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=(2, 2), stride=(1, 1), left_pad_output=False):
super(down_rightward_conv, self).__init__()
# for horiontal, (left, top) have to be padded
self.left_pad_output = left_pad_output
self.pad = nn.ZeroPad2d((kernel_size[1] - 1, 0, kernel_size[0] - 1, 0))
self.conv = weight_norm(nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride))
if self.left_pad_output:
# right shift means removing the last column of output and add padding at the first index of column
# so that it prevents prediction operation of the last column
self.right_shift = right_shift
def forward(self, x):
x = self.pad(x)
x = self.conv(x)
x = self.right_shift(x) if self.left_pad_output else x
return x
class downward_deconv(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=(2, 3), stride=(2, 2)):
super(downward_deconv, self).__init__()
# output_padding=1 -> add padding to bottom and right
self.deconv = weight_norm(
nn.ConvTranspose2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride, output_padding=1))
self.kernel_size = kernel_size
def forward(self, x):
x = self.deconv(x)
kernel_H, kernel_W = self.kernel_size
# cropping spatial dimension => removing null regions
return x[Ellipsis, :-(kernel_H - 1), int(np.floor(kernel_W / 2)):-int(np.floor(kernel_W / 2))]
class down_rightward_deconv(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=(2, 2), stride=(2, 2)):
super(down_rightward_deconv, self).__init__()
self.deconv = weight_norm(
nn.ConvTranspose2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride, output_padding=1))
self.kernel_size = kernel_size
def forward(self, x):
x = self.deconv(x)
kernel_H, kernel_W = self.kernel_size
# cropping spatial dimension => removing null regions
return x[Ellipsis, :-(kernel_H - 1), :-(kernel_W - 1)]
#################### From this line, modules for Glow ####################
class ActNorm(nn.Module):
def __init__(self, in_channel, logdet=True):
super().__init__()
self.shift = nn.Parameter(torch.zeros(1, in_channel, 1, 1))
self.scale = nn.Parameter(torch.ones(1, in_channel, 1, 1))
self.register_buffer('initialized', torch.tensor(0, dtype=torch.uint8))
self.logdet = logdet
def initialize(self, input):
with torch.no_grad():
flatten = input.permute(1, 0, 2, 3).contiguous().view(input.shape[1], -1)
mean = (flatten.mean(1).unsqueeze(1).unsqueeze(2).unsqueeze(3).permute(1, 0, 2, 3))
std = (flatten.std(1).unsqueeze(1).unsqueeze(2).unsqueeze(3).permute(1, 0, 2, 3))
self.shift.data.copy_(-mean)
self.scale.data.copy_(1/(std+1e-6))
def forward(self, input):
""" x(data) -> z(latent) """
_, _, H, W = input.shape
if self.initialized.item() == 0:
self.initialize(input)
self.initialized.fill_(1)
log_abs = logabs(self.scale)
logdet = H * W * torch.sum(log_abs)
if self.logdet:
return self.scale*(input+self.shift), logdet
else:
return self.scale * (input + self.shift)
def reverse(self, output):
""" z(latent) -> x(data) """
return output / self.scale - self.shift
class InvConv2d(nn.Module):
def __init__(self, in_channel):
super().__init__()
weight = torch.randn(in_channel, in_channel)
q, _ = torch.qr(weight)
weight = q.unsqueeze(2).unsqueeze(3)
self.weight = nn.Parameter(weight)
def forward(self, input):
_, _, H, W = input.shape
out = F.conv2d(input, self.weight)
logdet = (
H * W * torch.slogdet(self.weight.squeeze().double())[1].float()
)
return out, logdet
def reverse(self, output):
return F.conv2d(output, self.weight.squeeze().inverse().unqueeze(2).unsqueeze(3))
class InvConv2dLU(nn.Module):
def __init__(self, in_channel):
super().__init__()
weight = torch.randn(in_channel, in_channel)
q, _ = la.qr(weight)
w_p, w_l, w_u = la.lu(q.astype(np.float32))
w_s = np.diag(w_u)
w_u = np.triu(w_u, 1)
u_mask = np.triu(np.ones_like(w_u), 1)
l_mask = u_mask.T
w_p = torch.from_numpy(w_p) # rotation matrix
w_l = torch.from_numpy(w_l) # lower triangular matrix with 1 as diagonal entries
w_s = torch.from_numpy(w_s) # diagonal entries of w_u
w_u = torch.from_numpy(w_u) # zero diagonal entries of w_u(upper triangular matrix)
self.register_buffer('w_p', w_p) # rotation matrix, fixed
self.register_buffer('u_mask', torch.from_numpy(u_mask)) # upper triangular matrix as mask 1 without diagonal entries.
self.register_buffer('l_mask', torch.from_numpy(l_mask)) # lower triangular matrix as mask 1 without diagonal entries.
self.register_buffer('s_sign', torch.sign(w_s)) # sign of diagonal entries of s
self.register_buffer('l_eye', torch.eye(l_mask.shape[0])) # identity matrix as size c x c
self.w_l = nn.Parameter(w_l)
self.w_s = nn.Parameter(logabs(w_s))
self.w_u = nn.Parameter(w_u)
def forward(self, input):
_, _, H, W = input.shape
weight = self.calc_weight()
out = F.conv2d(input, weight)
logdet = H*W*torch.sum(self.w_s)
return out, logdet
def calc_weight(self):
weight =(self.w_p
@(self.w_l*self.l_mask + self.l_eye)
@((self.w_u*self.u_mask) + torch.diag(self.s_sign * torch.exp(self.w_s))))
return weight.unsqueeze(2).unsqueeze(3)
def reverse(self, output):
weight = self.calc_weight()
return F.conv2d(output, weight.squeeze().inverse().unsqueeze(2).unsqueeze(3))
class ZeroConv2d(nn.Module):
def __init__(self, in_channel, out_channel, padding=1):
super().__init__()
self.conv = nn.Conv2d(in_channel, out_channel, 3, padding=0)
self.conv.weight.data.zero_()
self.conv.bias.data.zero_()
self.scale = nn.Parameter(torch.zeros(1, out_channel, 1, 1))
def forward(self, input):
out = F.pad(input, [1, 1, 1, 1], value=1)
out = self.conv(out)
out = out * torch.exp(self.scale*3)
return out
class CouplingLayer(nn.Module):
def __init__(self, in_channel, filter_size=512, affine=True):
super().__init__()
self.affine = affine
self.net = nn.Sequential(
nn.Conv2d(in_channel//2, filter_size, 3, padding=1),
nn.ReLU(True),
nn.Conv2d(filter_size, filter_size, 1),
nn.ReLU(True),
ZeroConv2d(filter_size, in_channel if self.affine else in_channel//2),
)
self.net[0].weight.data.normal_(0, 0.05)
self.net[0].bias.data.zero_()
self.net[2].weight.data.normal_(0, 0.05)
self.net[2].bias.data.zero_()
def forward(self, input):
in_a, in_b = input.chunk(2, 1)
if self.affine:
log_s, t = self.net(in_a).chunk(2, 1)
# s = torch.exp(log_s + 2)
s = torch.sigmoid(log_s + 2)
# out_b = s * in_b + t
out_b = (in_b + t) * s
# logdet = torch.sum(torch.log(s).view(input.shape[0], -1), 1)
logdet = torch.sum(torch.log(s).view(input.shape[0], -1), 1)
else:
net_out = self.net(in_a)
out_b = in_b + net_out
logdet = None
return torch.cat([in_a, out_b], dim=1), logdet
def reverse(self, output):
out_a, out_b = output.chunk(2, 1)
if self.affine:
log_s, t = self.net(out_a).chunk(2, 1)
# s = torch.exp(log_s + 2)
s = torch.sigmoid(log_s + 2)
# in_b = (out_b - t)/s
in_b = out_b/s - t
else:
net_out = self.net(out_a)
in_b = out_b - net_out
return torch.cat([out_a, in_b], dim=1)
class Flow(nn.Module):
def __init__(self, in_channel, affine=True, conv_lu=True):
super().__init__()
self.actnorm = ActNorm(in_channel)
if conv_lu:
self.invconv = InvConv2dLU(in_channel)
else:
self.invconv = InvConv2d(in_channel)
self.coupling = CouplingLayer(in_channel, affine=affine)
def forward(self, input):
out, logdet = self.actnorm(input)
out, logdet1 = self.invconv(out)
out, logdet2 = self.coupling(out)
logdet = logdet + logdet1
if logdet2 is not None:
logdet = logdet + logdet2
return out, logdet
def reverse(self, output):
input = self.coupling.reverse(output)
input = self.invconv.reverse(input)
input = self.actnorm.reverse(input)
return input
class Block(nn.Module):
def __init__(self, in_channel, n_flow, split=True, affine=True, conv_lu=True, learned_prior=True):
super().__init__()
squeeze_dim = in_channel * 4
self.flows = nn.ModuleList([])
for i in range(n_flow):
self.flows.append(Flow(squeeze_dim, affine=affine, conv_lu=conv_lu))
self.split = split
self.learned_prior = learned_prior
if learned_prior:
if split:
self.prior = ZeroConv2d(in_channel*2, in_channel*4)
else:
self.prior = ZeroConv2d(in_channel*4, in_channel*8)
else:
if split:
self.prior = torch.zeros((1, in_channel*4, 1, 1))
else:
self.prior = torch.zeros((1, in_channel*8, 1, 1))
def forward(self, input):
B, C, H, W = input.size()
out = self.squeeze(input)
logdet = 0
for flow in self.flows:
out, det = flow(out)
logdet = logdet + det
if self.split:
out, z_new = out.chunk(2, 1)
if self.learned_prior:
mean, log_sd = self.prior(out).chunk(2, 1)
else:
mean, log_sd = self.prior.chunk(2, 1)
mean = mean.repeat(B, 1, *out.shape[2:]).to(out.device)
log_sd = log_sd.repeat(B, 1, *out.shape[2:]).to(out.device)
log_p = gaussian_log_p(z_new, mean, log_sd)
log_p = log_p.view(B, -1).sum(1)
else:
zero = torch.zeros_like(out)
if self.learned_prior:
mean, log_sd = self.prior(zero).chunk(2, 1)
else:
mean, log_sd = self.prior.chunk(2, 1)
mean = mean.repeat(B, 1, *out.shape[2:]).to(out.device)
log_sd = log_sd.repeat(B, 1, *out.shape[2:]).to(out.device)
log_p = gaussian_log_p(out, mean, log_sd)
log_p = log_p.view(B, -1).sum(1)
z_new = out
return out, logdet, log_p, z_new
def reverse(self, output, eps=None, reconstruct=False):
""" z(latent) -> x(data) """
input = output
if reconstruct:
if self.split:
input = torch.cat([output, eps], 1)
else:
input = eps
else:
if self.split:
if self.learned_prior:
mean, log_sd = self.prior(input).chunk(2, 1)
else:
mean, log_sd = self.prior.chunk(2, 1)
mean = mean.repeat(eps.shape[0], 1, *eps.shape[2:]).to(eps.device)
log_sd = log_sd.repeat(eps.shape[0], 1, *eps.shape[2:]).to(eps.device)
z = gaussian_sample(eps, mean, log_sd)
input = torch.cat([output, z], 1)
else:
zero = torch.zeros_like(input)
if self.learned_prior:
mean, log_sd = self.prior(zero).chunk(2, 1)
else:
mean, log_sd = self.prior.chunk(2, 1)
mean = mean.repeat(eps.shape[0], 1, *eps.shape[2:]).to(eps.device)
log_sd = log_sd.repeat(eps.shape[0], 1, *eps.shape[2:]).to(eps.device)
z = gaussian_sample(eps, mean, log_sd)
input = z
for flow in self.flows[::-1]:
input = flow.reverse(input)
input = self.undo_squeeze(input)
return input
def squeeze(self, input):
B, C, H, W = input.size()
squeezed = input.view(B, C, H//2, 2, W//2, 2)
squeezed = squeezed.permute(0, 1, 3, 5, 2, 4)
out = squeezed.contiguous().view((B, C*4, H//2, W//2))
return out
def undo_squeeze(self, output):
B, C, H, W = output.size()
unsqueezed = output.view(B, C//4, 2, 2, H, W)
unsqueezed = unsqueezed.permute(0, 1, 4, 2, 5, 3)
out = unsqueezed.contiguous().view((B, C//4, H*2, W*2))
return out
```
#### File: josephM96/PGM_project/test.py
```python
import argparse
import torch
from tqdm import tqdm
import data_loader.data_loaders as module_data
import model.loss as module_loss
import model.metric as module_metric
import model.model as module_arch
from parse_config import ConfigParser
def main(config):
logger = config.get_logger('test')
# setup data_loader instances #TODO Be careful of 'mode' parameter
data_loader = getattr(module_data, config['data_loader']['type'])(
config['data_loader']['args']['data_dir'],
img_size = config['data_loader']['args']['img_size'],
batch_size=64,
shuffle=False,
validation_split=0.0,
training=False,
num_workers=1
)
# build model architecture
model_background = config.init_obj('arch_background', module_arch)
model_semantic = config.init_obj('arch_semantic', module_arch)
logger.info(model_background)
logger.info(model_semantic)
# get function handles of loss and metrics
loss_fn = getattr(module_loss, config['loss'])
# One Logger, Two different model(background model & semantic model)
logger.info('Loading background model checkpoint: {} ...'.format(config['resume']['background']))
checkpoint_bg = torch.load(config['resume']['background'])
state_bg_dict = checkpoint_bg['state_dict']
if config['n_gpu'] > 1:
model_background = torch.nn.DataParallel(model_background)
model_background.load_state_dict(state_bg_dict)
logger.info('Loading semantic model checkpoint: {} ...'.format(config['resume']['semantic']))
checkpoint_sem = torch.load(config['resume']['semantic'])
state_sem_dict = checkpoint_sem['state_dict']
if config['n_gpu'] > 1:
model_background = torch.nn.DataParallel(model_background)
model_semantic.load_state_dict(state_sem_dict)
# prepare model for testing
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model_b = model_background.to(device)
model_s = model_semantic.to(device)
model_b.eval()
model_s.eval()
total_loss = 0.0
with torch.no_grad():
for i, (data, target) in enumerate(tqdm(data_loader)):
data, target = data.to(device), target.to(device)
output_b, output_s = model_b(data), model_s(data)
# computing loss, metrics on test set
loss_b = loss_fn(data, output_b, input_channels=data.shape[1])
loss_s = loss_fn(data, output_s, input_channels=data.shape[1])
batch_size = data.shape[0]
total_loss += (loss_s.item() - loss_b.item()) * batch_size
n_samples = len(data_loader.sampler)
log = {'loss': total_loss / n_samples}
logger.info(log)
if __name__ == '__main__':
args = argparse.ArgumentParser(description='Test code for evaluation.')
# args.add_argument('-c', '--config', default=None, type=str,
# help='config file path (default: None)')
args.add_argument('-c', '--config', default='./configs/fmnist_glow_config_test.json', type=str,
help='config file path (default: None)')
args.add_argument('-r', '--resume', default=None, type=str,
help='path to latest checkpoint (default: None)')
args.add_argument('-d', '--device', default=None, type=str,
help='indices of GPUs to enable (default: all)')
config_test = ConfigParser.from_args(args)
main(config_test)
``` |
{
"source": "josephmaa/pyuoi",
"score": 3
} |
#### File: pyuoi/linear_model/elasticnet.py
```python
import numpy as np
from .base import AbstractUoILinearRegressor
from sklearn.linear_model import LinearRegression
from sklearn.linear_model._coordinate_descent import _alpha_grid
from sklearn.linear_model import ElasticNet
class UoI_ElasticNet(AbstractUoILinearRegressor, LinearRegression):
r"""UoI\ :sub:`ElasticNet` solver.
Parameters
----------
n_boots_sel : int
The number of data bootstraps to use in the selection module.
Increasing this number will make selection more strict.
n_boots_est : int
The number of data bootstraps to use in the estimation module.
Increasing this number will relax selection and decrease variance.
selection_frac : float
The fraction of the dataset to use for training in each resampled
bootstrap, during the selection module. Small values of this parameter
imply larger "perturbations" to the dataset.
estimation_frac : float
The fraction of the dataset to use for training in each resampled
bootstrap, during the estimation module. The remaining data is used
to obtain validation scores. Small values of this parameters imply
larger "perturbations" to the dataset. IGNORED - Leaving this here
to double check later
n_lambdas : int
The number of regularization values to use for selection.
alphas : list or ndarray
The parameter that trades off L1 versus L2 regularization for a given
lambda.
stability_selection : int, float, or array-like
If int, treated as the number of bootstraps that a feature must
appear in to guarantee placement in selection profile. If float,
must be between 0 and 1, and is instead the proportion of
bootstraps. If array-like, must consist of either ints or floats
between 0 and 1. In this case, each entry in the array-like object
will act as a separate threshold for placement in the selection
profile.
estimation_score : string, "r2" | "AIC" | "AICc" | "BIC"
Objective used to choose the best estimates per bootstrap.
estimation_target : string, "train" | "test"
Decide whether to assess the estimation_score on the train
or test data across each bootstrap. By default, a sensible
choice is made based on the chosen estimation_score.
warm_start : bool
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution
eps : float
Length of the lasso path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
copy_X : bool
If ``True``, X will be copied; else, it may be overwritten.
fit_intercept : bool
Whether to calculate the intercept for this model. If set
to False, no intercept will be used in calculations
(e.g. data is expected to be already centered).
standardize : bool
If True, the regressors X will be standardized before regression by
subtracting the mean and dividing by their standard deviations.
max_iter : int
Maximum number of iterations for iterative fitting methods.
random_state : int, RandomState instance, or None
The seed of the pseudo random number generator that selects a random
feature to update. If int, random_state is the seed used by the random
number generator; If RandomState instance, random_state is the random
number generator; If None, the random number generator is the
RandomState instance used by `np.random`.
comm : MPI communicator
If passed, the selection and estimation steps are parallelized.
logger : Logger
The logger to use for messages when ``verbose=True`` in ``fit``.
If *None* is passed, a logger that writes to ``sys.stdout`` will be
used.
Attributes
----------
coef_ : array, shape (n_features,) or (n_targets, n_features)
Estimated coefficients for the linear regression problem.
intercept_ : float
Independent term in the linear model.
supports_ : ndarray, shape (n_supports, n_features)
Boolean array indicating whether a given regressor (column) is selected
for estimation for a given regularization parameter value (row).
"""
def __init__(self, n_boots_sel=24, n_boots_est=24, selection_frac=0.9,
estimation_frac=0.9, n_lambdas=48,
alphas=np.array([0.5]), stability_selection=1.,
estimation_score='r2', estimation_target=None,
warm_start=True, eps=1e-3, copy_X=True,
fit_intercept=True, standardize=True,
max_iter=1000, random_state=None, comm=None, logger=None):
super(UoI_ElasticNet, self).__init__(
n_boots_sel=n_boots_sel,
n_boots_est=n_boots_est,
selection_frac=selection_frac,
estimation_frac=estimation_frac,
stability_selection=stability_selection,
estimation_score=estimation_score,
estimation_target=estimation_target,
copy_X=copy_X,
fit_intercept=fit_intercept,
standardize=standardize,
random_state=random_state,
comm=comm,
max_iter=max_iter,
logger=logger)
self.n_lambdas = n_lambdas
self.alphas = alphas
self.n_alphas = len(alphas)
self.warm_start = warm_start
self.eps = eps
self.lambdas = None
self._selection_lm = ElasticNet(
fit_intercept=fit_intercept,
max_iter=max_iter,
copy_X=copy_X,
warm_start=warm_start,
random_state=random_state)
self._estimation_lm = LinearRegression(fit_intercept=fit_intercept)
def get_reg_params(self, X, y):
r"""Calculates the regularization parameters (alpha and lambda) to be
used for the provided data.
Note that the Elastic Net penalty is given by
.. math::
\frac{1}{2\ \text{n_samples}} ||y - Xb||^2_2
+ \lambda (\alpha |b|_1 + 0.5 (1 - \alpha) |b|^2_2)
where lambda and alpha are regularization parameters.
``scikit-learn`` does not use these names. Instead, ``scitkit-learn``
denotes alpha by 'l1_ratio' and lambda by 'alpha'.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The design matrix.
y : array-like, shape (n_samples)
The response vector.
Returns
-------
reg_params : a list of dictionaries
A list containing dictionaries with the value of each
(lambda, alpha) describing the type of regularization to impose.
The keys adhere to scikit-learn's terminology (lambda->alpha,
alpha->l1_ratio). This allows easy passing into the ElasticNet
object.
"""
if self.lambdas is None:
self.lambdas = np.zeros((self.n_alphas, self.n_lambdas))
# a set of lambdas are generated for each alpha value (l1_ratio in
# sci-kit learn parlance)
for alpha_idx, alpha in enumerate(self.alphas):
self.lambdas[alpha_idx, :] = _alpha_grid(
X=X, y=y,
l1_ratio=alpha,
fit_intercept=self.fit_intercept,
eps=self.eps,
n_alphas=self.n_lambdas)
# place the regularization parameters into a list of dictionaries
reg_params = list()
for alpha_idx, alpha in enumerate(self.alphas):
for lamb_idx, lamb in enumerate(self.lambdas[alpha_idx]):
# reset the regularization parameter
reg_params.append(dict(alpha=lamb, l1_ratio=alpha))
return reg_params
```
#### File: pyuoi/pyuoi/utils.py
```python
import numpy as np
import sys
import logging
import os
import time
import json
import base64
def softmax(y, axis=-1):
"""Calculates the softmax distribution.
Parameters
----------
y : ndarray
Log-probabilities.
"""
yp = y - y.max(axis=axis, keepdims=True)
epy = np.exp(yp)
return epy / np.sum(epy, axis=axis, keepdims=True)
def sigmoid(x):
"""Calculates the bernoulli distribution.
Parameters
----------
x : ndarray
Log-probabilities.
"""
return np.exp(-np.logaddexp(0, -x))
def log_likelihood_glm(model, y_true, y_pred):
"""Calculates the log-likelihood of a generalized linear model given the
true response variables and the "predicted" response variables. The
"predicted" response variable varies by the specific generalized linear
model under consideration.
Parameters
----------
model : string
The generalized linear model to calculate the log-likelihood for.
y_true : nd-array, shape (n_samples,)
Array of true response values.
y_pred : nd-array, shape (n_samples,)
Array of predicted response values (conditional mean).
Returns
-------
ll : float
The log-likelihood.
"""
if model == 'normal':
# this log-likelihood is calculated under the assumption that the
# variance is the value that maximizes the log-likelihood
rss = (y_true - y_pred)**2
n_samples = y_true.size
ll = -n_samples / 2 * (1 + np.log(np.mean(rss)))
elif model == 'poisson':
if not np.any(y_pred):
if np.any(y_true):
ll = -np.inf
else:
ll = 0.
else:
ll = np.mean(y_true * np.log(y_pred) - y_pred)
else:
raise ValueError('Model is not available.')
return ll
def BIC(ll, n_features, n_samples):
"""Calculates the Bayesian Information Criterion.
Parameters
----------
ll : float
The log-likelihood of the model.
n_features : int
The number of features used in the model.
n_samples : int
The number of samples in the dataset being tested.
Returns
-------
BIC : float
Bayesian Information Criterion
"""
BIC = n_features * np.log(n_samples) - 2 * ll
return BIC
def AIC(ll, n_features):
"""Calculates the Akaike Information Criterion.
Parameters
----------
ll : float
The log-likelihood of the model.
n_features : int
The number of features used in the model.
n_samples : int
The number of samples in the dataset being tested.
Returns
-------
AIC : float
Akaike Information Criterion
"""
AIC = 2 * n_features - 2 * ll
return AIC
def AICc(ll, n_features, n_samples):
"""Calculate the corrected Akaike Information Criterion. This criterion is
useful in cases when the number of samples is small.
If the number of features is equal to the number of samples plus one, then
the AIC is returned (the AICc is undefined in this case).
Parameters
----------
ll : float
The log-likelihood of the model.
n_features : int
The number of features used in the model.
n_samples : int
The number of samples in the dataset being tested.
Returns
-------
AIC : float
Akaike Information Criterion
"""
AICc = AIC(ll, n_features)
if n_samples > (n_features + 1):
AICc += 2 * (n_features**2 + n_features) / (n_samples - n_features - 1)
return AICc
def check_logger(logger, name='uoi', comm=None):
ret = logger
if ret is None:
if comm is not None and comm.Get_size() > 1:
r, s = comm.Get_rank(), comm.Get_size()
name += " " + str(r).rjust(int(np.log10(s)) + 1)
ret = logging.getLogger(name=name)
handler = logging.StreamHandler(sys.stdout)
fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
handler.setFormatter(logging.Formatter(fmt))
ret.addHandler(handler)
return ret
def is_json_serializable(object: any) -> bool:
try:
json.dumps(object)
return True
except (TypeError, OverflowError):
return False
def dump_json(model: "AbstractUoILinearModel", filename: str, results: dict) -> None:
"""
Writes out all model values to JSON.
"""
dirname, basename = os.path.dirname(filename), os.path.basename(
filename)
with open(generate_timestamp_filename(dirname=dirname, basename=basename, file_format=".json"), "w") as file:
json_dump = {}
for dump in (model.__dict__, results):
for key, val in dump.items():
if is_json_serializable(val):
json_dump[key] = val
elif isinstance(val, np.ndarray):
# Encode arrays as base64 strings.
json_dump[key] = (val.shape, str(
base64.b64encode(val), 'utf-8'))
print(
f"JSON attributes written to {generate_timestamp_filename(dirname=dirname, basename=basename, file_format='.json')}.")
json.dump(json_dump, file, sort_keys=True, indent=4)
def generate_timestamp_filename(dirname: str, basename: str, file_format: str) -> str:
"""
Generate a timestamped filename for use in saving files.
"""
timestr = time.strftime("%Y%m%d-%H%M%S")
return os.path.join(dirname, f"{timestr}.{basename}{file_format}")
def write_timestamped_numpy_binary(filename: str, **data: np.array) -> None:
"""
Writes a numpy binary file with a timestamped prefix to a 'saved_runs' directory in the same directory.
"""
basename, dirname = os.path.basename(filename), os.path.dirname(filename)
saved_runs_directory = os.path.join(dirname, 'saved_runs')
if not os.path.exists(saved_runs_directory):
os.makedirs(saved_runs_directory)
# Only process one keyword argument array.
if len(data) == 1:
saved_filename = generate_timestamp_filename(
dirname=saved_runs_directory, basename=basename, file_format='.npy')
np.save(saved_filename, data)
# Process multiple keyworse argument arrays.
else:
saved_filename = saved_filename = generate_timestamp_filename(
dirname=saved_runs_directory, basename=basename, file_format='.npz')
np.savez(saved_filename, **data)
print('File saved to: ', saved_filename)
```
#### File: pyuoi/tests/test_rat7m.py
```python
from pyuoi.linear_model import UoI_L1Logistic
from pyuoi.datasets import make_classification
from matplotlib.widgets import Button
import pandas as pd
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import time
import xarray as xr
from sklearn.model_selection import train_test_split
# filename = "/Users/josephgmaa/pyuoi/pyuoi/data/nolj_Recording_day7_overnight_636674151185633714_1_nolj.c3d.243.features.netcdf"
filename = "/Users/josephgmaa/pyuoi/pyuoi/data/features/nolj_Recording_day7_overnight_636674151185633714_35_nolj.c3d.916.features.netcdf"
df = xr.load_dataset(filename, engine='h5netcdf').to_dataframe()
fig, ax = plt.subplots()
plt.subplots_adjust(bottom=0.2)
class Index(object):
global df # used so you can access local list, funcs, here
global ax
def __init__(self):
self.ind = 0
self.line = None
def next(self, event):
if self.line:
self.line.pop(0).remove()
ax.clear()
ax.plot(df['behavior_name'].to_numpy(), alpha=0.5)
self.ind += 1
y = df.iloc[:, self.ind].to_numpy()
name = df.columns[self.ind]
self.line = ax.plot(y, alpha=0.5) # set y value data
ax.title.set_text(name) # set title of graph
plt.draw()
def prev(self, event):
if self.line:
self.line.pop(0).remove()
ax.clear()
ax.plot(df['behavior_name'].to_numpy(), alpha=0.5)
self.ind += 1
y = df.iloc[:, self.ind].to_numpy()
name = df.columns[self.ind]
self.line = ax.plot(y, alpha=0.5) # set y value data
ax.title.set_text(name) # set title of graph
plt.draw()
def main():
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
start = time.time()
callback = Index()
axprev = plt.axes([0.7, 0.05, 0.1, 0.075])
axnext = plt.axes([0.81, 0.05, 0.1, 0.075])
bnext = Button(axnext, 'Next')
bnext.on_clicked(callback.next)
bprev = Button(axprev, 'Previous')
bprev.on_clicked(callback.prev)
ax.plot(df['behavior_name'].to_numpy())
plt.show()
# for feature in df.columns:
# map_behavior_to_values = {'Walk': 100, 'WetDogShake': 200, 'FaceGroom': 300, 'RScratch': 400, 'BadTracking': 500, 'RGroom': 600,
# 'ProneStill': 700, 'AdjustPosture': 800}
# df['behavior_values'] = df['behavior_name'].map(map_behavior_to_values)
# feature = ax.plot(df[feature])
# ax.scatter(df.index, df.behavior_values)
# ax.text(x=0, y=-100, s=[f"{key}: {value}" for key,
# value in map_behavior_to_values.items()], fontsize="small")
# plt.show()
# del feature
# # Run the classifier.
# print('Running the classifier.')
# X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
# l1log = UoI_L1Logistic().fit(X_train, y_train)
# y_hat = l1log.predict(X_test)
# print(y_hat)
# # Save the results to a numpy binary.
# with open('test.npy', 'wb') as file:
# np.save(file, X)
# np.save(file, y)
# np.save(file, y_hat)
# np.save(file, y_test)
# print(np.load('test.npy').shape)
# ax.clear()
# ax.plot(y_hat)
# plt.show()
end = time.time()
print('Time elapsed: ', end - start)
if __name__ == "__main__":
main()
```
#### File: pyuoi/tests/test_uoi_l1logistic.py
```python
import pytest, numbers, warnings
import numpy as np
from numpy.testing import assert_array_equal, assert_allclose, assert_equal
from scipy.sparse import rand as sprand
from scipy import optimize
from pyuoi import UoI_L1Logistic
from pyuoi.linear_model.logistic import (fit_intercept_fixed_coef,
MaskedCoefLogisticRegression,
LogisticInterceptFitterNoFeatures,
_logistic_regression_path,
_multinomial_loss_grad,
_logistic_loss_and_grad)
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
from sklearn.utils import (compute_class_weight,
check_consistent_length, check_array)
from sklearn.exceptions import ConvergenceWarning
from pyuoi.datasets import make_classification
from pyuoi.lbfgs import fmin_lbfgs, AllZeroLBFGSError
def _logistic_regression_path_old(X, y, Cs=48, fit_intercept=True,
max_iter=100, tol=1e-4, verbose=0, coef=None,
class_weight=None, penalty='l2',
multi_class='auto',
check_input=True,
sample_weight=None,
l1_ratio=None, coef_mask=None):
"""Compute a Logistic Regression model for a list of regularization
parameters.
This is the original function used to check the new indexing-based
version rather than the masking version implemented here.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Input data, target values.
Cs : int | array-like, shape (n_cs,)
List of values for the regularization parameter or integer specifying
the number of regularization parameters that should be used. In this
case, the parameters will be chosen in a logarithmic scale between
1e-4 and 1e4.
fit_intercept : bool
Whether to fit an intercept for the model. In this case the shape of
the returned array is (n_cs, n_features + 1).
max_iter : int
Maximum number of iterations for the solver.
tol : float
Stopping criterion. For the newton-cg and lbfgs solvers, the iteration
will stop when ``max{|g_i | i = 1, ..., n} <= tol``
where ``g_i`` is the i-th component of the gradient.
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
coef : array-like, shape (n_features,), default None
Initialization value for coefficients of logistic regression.
Useless for liblinear solver.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
multi_class : str, {'multinomial', 'auto'}, default: 'auto'
For 'multinomial' the loss minimised is the multinomial loss fit
across the entire probability distribution, *even when the data is
binary*. 'auto' selects binary if the data is binary
and otherwise selects 'multinomial'.
check_input : bool, default True
If False, the input arrays X and y will not be checked.
sample_weight : array-like, shape(n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
coef_mask : array-like, shape (n_features), (n_classes, n_features) optional
Masking array for coef.
Returns
-------
coefs : ndarray, shape (n_cs, n_features) or (n_cs, n_features + 1)
List of coefficients for the Logistic Regression model. If
fit_intercept is set to True then the second dimension will be
n_features + 1, where the last item represents the intercept. For
``multiclass='multinomial'``, the shape is (n_classes, n_cs,
n_features) or (n_classes, n_cs, n_features + 1).
Cs : ndarray
Grid of Cs used for cross-validation.
n_iter : array, shape (n_cs,)
Actual number of iteration for each Cs.
"""
if isinstance(Cs, numbers.Integral):
Cs = np.logspace(-4, 4, Cs)
# Preprocessing.
if check_input:
X = check_array(X, accept_sparse='csr', dtype=np.float64,
accept_large_sparse=True)
y = check_array(y, ensure_2d=False, dtype=None)
check_consistent_length(X, y)
_, n_features = X.shape
classes = np.unique(y)
if multi_class == 'auto':
if len(classes) > 2:
multi_class = 'multinomial'
else:
multi_class = 'ovr'
# If sample weights exist, convert them to array (support for lists)
# and check length
# Otherwise set them to 1 for all examples
if sample_weight is not None:
sample_weight = np.array(sample_weight, dtype=X.dtype, order='C')
check_consistent_length(y, sample_weight)
else:
sample_weight = np.ones(X.shape[0], dtype=X.dtype)
# If class_weights is a dict (provided by the user), the weights
# are assigned to the original labels. If it is "balanced", then
# the class_weights are assigned after masking the labels with a OvR.
le = LabelEncoder()
if isinstance(class_weight, dict) or multi_class == 'multinomial':
class_weight_ = compute_class_weight(class_weight, classes=classes, y=y)
sample_weight *= class_weight_[le.fit_transform(y)]
# For doing a ovr, we need to mask the labels first. for the
# multinomial case this is not necessary.
if multi_class == 'ovr':
coef_size = n_features
w0 = np.zeros(n_features + int(fit_intercept), dtype=X.dtype)
mask_classes = np.array([-1, 1])
mask = (y == 1)
y_bin = np.ones(y.shape, dtype=X.dtype)
y_bin[~mask] = -1.
# for compute_class_weight
if class_weight == "balanced":
class_weight_ = compute_class_weight(class_weight,
classes=mask_classes,
y=y_bin)
sample_weight *= class_weight_[le.fit_transform(y_bin)]
else:
coef_size = classes.size * n_features
lbin = OneHotEncoder(categories=[range(classes.size)], sparse=False)
Y_multi = lbin.fit_transform(y[:, np.newaxis])
if Y_multi.shape[1] == 1:
Y_multi = np.hstack([1 - Y_multi, Y_multi])
w0 = np.zeros((classes.size, n_features + int(fit_intercept)),
dtype=X.dtype)
w0[:, -1] = LogisticInterceptFitterNoFeatures(y,
classes.size).intercept_
if coef is not None:
# it must work both giving the bias term and not
if multi_class == 'ovr':
if coef.size not in (n_features, w0.size):
raise ValueError(
'Initialization coef is of shape %d, expected shape '
'%d or %d' % (coef.size, n_features, w0.size))
w0[:coef.size] = coef
else:
w0[:, :coef.shape[1]] = coef
# Mask initial array
if coef_mask is not None:
if multi_class == 'ovr':
w0[:n_features] *= coef_mask
else:
w0[:, :n_features] *= coef_mask
if multi_class == 'multinomial':
# fmin_l_bfgs_b and newton-cg accepts only ravelled parameters.
target = Y_multi
if penalty == 'l2':
w0 = w0.ravel()
def func(x, *args):
return _multinomial_loss_grad(x, *args)[0:2]
else:
w0 = w0.T.ravel().copy()
def inner_func(x, *args):
return _multinomial_loss_grad(x, *args)[0:2]
def func(x, g, *args):
x = x.reshape(-1, classes.size).T.ravel()
loss, grad = inner_func(x, *args)
grad = grad.reshape(classes.size, -1).T.ravel()
g[:] = grad
return loss
else:
target = y_bin
if penalty == 'l2':
func = _logistic_loss_and_grad
else:
def func(x, g, *args):
loss, grad = _logistic_loss_and_grad(x, *args)
g[:] = grad
return loss
coefs = list()
n_iter = np.zeros(len(Cs), dtype=np.int32)
for i, C in enumerate(Cs):
iprint = [-1, 50, 1, 100, 101][
np.searchsorted(np.array([0, 1, 2, 3]), verbose)]
if penalty == 'l2':
w0, loss, info = optimize.fmin_l_bfgs_b(
func, w0, fprime=None,
args=(X, target, 1. / C, coef_mask, sample_weight),
iprint=iprint, pgtol=tol, maxiter=max_iter)
else:
zeros_seen = [0]
def zero_coef(x, *args):
if multi_class == 'multinomial':
x = x.reshape(-1, classes.size)[:-1]
else:
x = x[:-1]
now_zeros = np.array_equiv(x, 0.)
if now_zeros:
zeros_seen[0] += 1
else:
zeros_seen[0] = 0
if zeros_seen[0] > 1:
return -2048
try:
w0 = fmin_lbfgs(func, w0, orthantwise_c=1. / C,
args=(X, target, 0., coef_mask, sample_weight),
max_iterations=max_iter,
epsilon=tol,
orthantwise_end=coef_size,
progress=zero_coef)
except AllZeroLBFGSError:
w0 *= 0.
info = None
if info is not None and info["warnflag"] == 1:
warnings.warn("lbfgs failed to converge. Increase the number "
"of iterations.", ConvergenceWarning)
# In scipy <= 1.0.0, nit may exceed maxiter.
# See https://github.com/scipy/scipy/issues/7854.
if info is None:
n_iter_i = -1
else:
n_iter_i = min(info['nit'], max_iter)
if multi_class == 'multinomial':
n_classes = max(2, classes.size)
if penalty == 'l2':
multi_w0 = np.reshape(w0, (n_classes, -1))
else:
multi_w0 = np.reshape(w0, (-1, n_classes)).T
if coef_mask is not None:
multi_w0[:, :n_features] *= coef_mask
coefs.append(multi_w0.copy())
else:
if coef_mask is not None:
w0[:n_features] *= coef_mask
coefs.append(w0.copy())
n_iter[i] = n_iter_i
return np.array(coefs), np.array(Cs), n_iter
def test_fit_intercept_fixed_coef():
"""Test that the intercept in fit correctly for fixed coefficients."""
X = np.zeros((6, 5))
coef = np.ones((1, 5))
y = np.ones(6, dtype=int)
y[:3] = 0
b = fit_intercept_fixed_coef(X, coef, y, 2)
assert_allclose(b, 0.)
X = np.zeros((7, 5))
y = np.ones(7, dtype=int)
y[:3] = 0
b = fit_intercept_fixed_coef(X, coef, y, 3)
assert_allclose(b.argmax(), 1)
assert_allclose(b.argmin(), 2)
def test_fit_intercept_no_features():
"""Test that the intercept in fit correctly for fixed coefficients."""
X = np.zeros((5, 1))
y = np.ones(6, dtype=int)
y[:3] = 0
LR = LogisticInterceptFitterNoFeatures(y, 1)
b = LR.intercept_
assert_allclose(b, 0.)
y = np.ones(7, dtype=int)
y[:3] = 0
LR = LogisticInterceptFitterNoFeatures(y, 1)
yhat = LR.predict(X)
assert_allclose(yhat, 1)
py = LR.predict_proba(X)
assert np.all(py > .5)
y = np.ones(7, dtype=int)
y[:3] = 0
LR = LogisticInterceptFitterNoFeatures(y, 3)
yhat = LR.predict(X)
assert_allclose(yhat, 1)
py = LR.predict_proba(X)
assert_allclose(py.argmax(axis=1), 1)
assert_allclose(py.argmin(axis=1), 2)
def test_l1logistic_intercept():
"""Test that binary L1 Logistic fits an intercept when run."""
for fi in [True, False]:
X, y, w, b = make_classification(n_samples=100,
random_state=11,
n_features=4,
w_scale=4.,
include_intercept=fi)
l1log = UoI_L1Logistic(fit_intercept=fi,
n_boots_sel=3,
n_boots_est=3).fit(X, y)
if not fi:
assert_array_equal(l1log.intercept_, 0.)
else:
l1log.intercept_
def test_l1logistic_binary():
"""Test that binary L1 Logistic runs in the UoI framework."""
n_inf = 10
X, y, w, b = make_classification(n_samples=200,
random_state=6,
n_informative=n_inf,
n_features=20,
w_scale=4.,
include_intercept=True)
l1log = UoI_L1Logistic(random_state=10).fit(X, y)
l1log = UoI_L1Logistic(random_state=10, fit_intercept=False).fit(X, y)
l1log.predict_proba(X)
l1log.predict_log_proba(X)
y_hat = l1log.predict(X)
assert_equal(accuracy_score(y, y_hat), l1log.score(X, y))
assert (np.sign(abs(w)) == np.sign(abs(l1log.coef_))).mean() >= .8
def test_l1logistic_binary_multinomial():
"""Test that binary L1 Logistic runs in the UoI framework
using multi_class='multinomial'."""
n_inf = 10
X, y, w, b = make_classification(n_samples=200,
random_state=6,
n_informative=n_inf,
n_features=20,
w_scale=4.,
include_intercept=True)
UoI_L1Logistic(random_state=10, multi_class='multinomial').fit(X, y)
UoI_L1Logistic(random_state=10, fit_intercept=False,
multi_class='multinomial').fit(X, y)
def test_l1logistic_no_ovr():
"""Test that binary L1 Logistic model raises an error for
multiclass='ovr'."""
with pytest.raises(ValueError):
UoI_L1Logistic(multi_class='ovr')
def test_l1logistic_multiclass():
"""Test that multiclass L1 Logistic runs in the UoI framework when all
classes share a support."""
n_features = 20
n_inf = 10
X, y, w, b = make_classification(n_samples=200,
random_state=10,
n_classes=5,
n_informative=n_inf,
n_features=n_features,
shared_support=True,
w_scale=4.)
l1log = UoI_L1Logistic().fit(X, y)
l1log.predict_proba(X)
l1log.predict_log_proba(X)
y_hat = l1log.predict(X)
assert_equal(accuracy_score(y, y_hat), l1log.score(X, y))
assert (np.sign(abs(w)) == np.sign(abs(l1log.coef_))).mean() >= .8
def test_l1logistic_multiclass_not_shared():
"""Test that multiclass L1 Logistic runs in the UoI framework when all
classes share a support."""
n_features = 20
n_inf = 10
X, y, w, b = make_classification(n_samples=400,
random_state=10,
n_classes=5,
n_informative=n_inf,
n_features=n_features,
shared_support=False,
w_scale=4.)
l1log = UoI_L1Logistic(shared_support=False).fit(X, y)
l1log.predict_log_proba(X)
y_hat = l1log.predict(X)
assert_equal(accuracy_score(y, y_hat), l1log.score(X, y))
assert (np.sign(abs(w)) == np.sign(abs(l1log.coef_))).mean() >= .7
def test_masked_logistic():
"""Test the masked logistic regression class."""
n_features = 20
n_inf = 10
for shared_support in [True, False]:
for n_classes in [2, 3]:
for intercept in [True, False]:
X, y, w, b = make_classification(n_samples=200,
random_state=10,
n_classes=n_classes,
n_informative=n_inf,
n_features=n_features,
shared_support=shared_support,
include_intercept=intercept,
w_scale=4.)
mask = np.squeeze(np.logical_not(np.equal(w, 0)))
for penalty in ['l1', 'l2']:
lr = MaskedCoefLogisticRegression(penalty=penalty, C=10.,
warm_start=True,
fit_intercept=intercept)
lr.fit(X, y, coef_mask=mask)
coef_idxs = np.flatnonzero(np.equal(lr.coef_, 0.))
coef_idxs = set(coef_idxs.tolist())
mask_idxs = np.flatnonzero(np.equal(mask, 0))
mask_idxs = set(mask_idxs.tolist())
assert mask_idxs.issubset(coef_idxs)
lr.fit(X, y, coef_mask=mask)
def test_masked_logistic_standardize():
"""Test the masked logistic regression class with `standardize=True`."""
n_features = 20
n_inf = 10
for shared_support in [True, False]:
for n_classes in [2, 3]:
for intercept in [True, False]:
X, y, w, b = make_classification(n_samples=200,
random_state=10,
n_classes=n_classes,
n_informative=n_inf,
n_features=n_features,
shared_support=shared_support,
include_intercept=intercept,
w_scale=4.)
mask = np.squeeze(np.logical_not(np.equal(w, 0)))
for penalty in ['l1', 'l2']:
lr = MaskedCoefLogisticRegression(penalty=penalty, C=10.,
warm_start=True,
fit_intercept=intercept,
standardize=True)
lr.fit(X, y, coef_mask=mask)
coef_idxs = np.flatnonzero(np.equal(lr.coef_, 0.))
coef_idxs = set(coef_idxs.tolist())
mask_idxs = np.flatnonzero(np.equal(mask, 0))
mask_idxs = set(mask_idxs.tolist())
assert mask_idxs.issubset(coef_idxs)
lr.fit(X, y, coef_mask=mask)
@pytest.mark.parametrize("n_classes,penalty,fit_intercept", [(3, "l2", True),
(3, "l2", False),
(3, "l1", True),
(3, "l1", False),
(2, "l2", True),
(2, "l2", False),
(2, "l1", True),
(2, "l1", False)])
def test_masking_with_indexing(n_classes, penalty, fit_intercept):
"""Check that indexing the masks gives the same results as masking with
logistic regression.
"""
X, y, w, intercept = make_classification(n_samples=1000,
n_classes=n_classes,
n_features=20,
n_informative=10,
random_state=0)
mask = w != 0.
if n_classes == 2:
mask = mask.ravel()
coefs, _, _ = _logistic_regression_path(X, y, [10.], coef_mask=mask,
penalty=penalty,
fit_intercept=fit_intercept)
coefs_old, _, _ = _logistic_regression_path_old(X, y, [10.], coef_mask=mask,
penalty=penalty,
fit_intercept=fit_intercept)
assert_allclose(coefs, coefs_old)
coefs, _, _ = _logistic_regression_path(X, y, [10.],
penalty=penalty,
fit_intercept=fit_intercept)
coefs_old, _, _ = _logistic_regression_path_old(X, y, [10.],
penalty=penalty,
fit_intercept=fit_intercept)
assert_allclose(coefs, coefs_old)
@pytest.mark.parametrize("n_classes,penalty,fit_intercept", [(3, "l2", True),
(3, "l2", False),
(3, "l1", True),
(3, "l1", False),
(2, "l2", True),
(2, "l2", False),
(2, "l1", True),
(2, "l1", False)])
def test_all_masked_with_indexing(n_classes, penalty, fit_intercept):
"""Check masking all of the coef either works with intercept or raises an error.
"""
X, y, w, intercept = make_classification(n_samples=1000,
n_classes=n_classes,
n_features=20,
n_informative=10,
random_state=0)
mask = np.zeros_like(w)
if n_classes == 2:
mask = mask.ravel()
coefs, _, _ = _logistic_regression_path(X, y, [10.], coef_mask=mask,
fit_intercept=fit_intercept)
if fit_intercept:
if n_classes == 2:
assert_equal(coefs[0][:-1], 0.)
else:
assert_equal(coefs[0][:, :-1], 0.)
else:
assert_equal(coefs[0], 0.)
def test_estimation_score_usage():
"""Test the ability to change the estimation score in UoI L1Logistic"""
methods = ('acc', 'log', 'BIC', 'AIC', 'AICc')
X, y, w, b = make_classification(n_samples=200,
random_state=6,
n_informative=5,
n_features=10)
scores = []
for method in methods:
l1log = UoI_L1Logistic(random_state=12, estimation_score=method,
tol=1e-2, n_boots_sel=24, n_boots_est=24)
assert_equal(l1log.estimation_score, method)
l1log.fit(X, y)
scores.append(l1log.scores_)
scores = np.stack(scores)
assert_equal(len(np.unique(scores, axis=0)), len(methods))
def test_set_random_state():
"""Tests whether random states are handled correctly."""
X, y, w, b = make_classification(n_samples=100,
random_state=60,
n_informative=4,
n_features=5,
w_scale=4.)
# same state
l1log_0 = UoI_L1Logistic(random_state=13)
l1log_1 = UoI_L1Logistic(random_state=13)
l1log_0.fit(X, y)
l1log_1.fit(X, y)
assert_array_equal(l1log_0.coef_, l1log_1.coef_)
# different state
l1log_1 = UoI_L1Logistic(random_state=14)
l1log_1.fit(X, y)
assert not np.array_equal(l1log_0.coef_, l1log_1.coef_)
# different state, not set
l1log_0 = UoI_L1Logistic()
l1log_1 = UoI_L1Logistic()
l1log_0.fit(X, y)
l1log_1.fit(X, y)
assert not np.array_equal(l1log_0.coef_, l1log_1.coef_)
def test_normalization_by_samples():
"""Test that coef_ does not depend directly on the number of samples."""
n_features = 20
for n_classes in [2, 3]:
X, y, w, b = make_classification(n_samples=200,
random_state=10,
n_classes=n_classes,
n_informative=n_features,
n_features=n_features,
w_scale=4.)
for penalty in ['l1', 'l2']:
lr1 = MaskedCoefLogisticRegression(penalty=penalty, C=1e2)
lr1.fit(X, y)
lr3 = MaskedCoefLogisticRegression(penalty=penalty, C=1e2)
lr3.fit(np.tile(X, (3, 1)), np.tile(y, 3))
assert_allclose(lr1.coef_, lr3.coef_)
def test_l1logistic_binary_strings():
"""Test that binary L1 Logistic runs in the UoI framework."""
n_inf = 10
X, y, w, b = make_classification(n_samples=200,
random_state=6,
n_informative=n_inf,
n_features=20,
w_scale=4.,
include_intercept=True)
classes = ['a', 'b']
lb = LabelEncoder()
lb.fit(classes)
y = lb.inverse_transform(y)
l1log = UoI_L1Logistic(random_state=10).fit(X, y)
y_hat = l1log.predict(X)
assert set(classes) >= set(y_hat)
def test_l1logistic_multiclass_strings():
"""Test that multiclass L1 Logistic runs in the UoI framework when all
classes share a support."""
n_features = 20
n_inf = 10
X, y, w, b = make_classification(n_samples=200,
random_state=10,
n_classes=5,
n_informative=n_inf,
n_features=n_features,
shared_support=True,
w_scale=4.)
classes = ['a', 'b', 'c', 'd', 'e']
lb = LabelEncoder()
lb.fit(classes)
y = lb.inverse_transform(y)
l1log = UoI_L1Logistic(random_state=10).fit(X, y)
y_hat = l1log.predict(X)
assert set(classes) >= set(y_hat)
def test_l1logistic_sparse_input():
"""Test that multiclass L1 Logistic works when using sparse matrix
inputs"""
rs = np.random.RandomState(17)
X = sprand(100, 100, random_state=rs)
classes = ['abc', 'de', 'fgh']
y = np.array(classes)[rs.randint(3, size=100)]
kwargs = dict(
fit_intercept=False,
random_state=rs,
n_boots_sel=4,
n_boots_est=4,
n_C=7,
)
l1log = UoI_L1Logistic(**kwargs).fit(X, y)
y_hat = l1log.predict(X)
assert set(classes) >= set(y_hat)
def test_l1logistic_sparse_input_no_center():
"""Test that multiclass L1 Logistic raises an error when asked to center
sparse data.
"""
rs = np.random.RandomState(17)
X = sprand(10, 10, random_state=rs)
classes = ['abc', 'de', 'fgh']
y = np.array(classes)[rs.randint(3, size=10)]
with pytest.raises(ValueError):
UoI_L1Logistic(fit_intercept=True).fit(X, y)
def test_l1logistic_bad_est_score():
"""Test that multiclass L1 Logistic raises an error when given a bad
estimation_score value.
"""
X = np.random.randn(20, 5)
y = np.ones(20)
with pytest.raises(ValueError):
UoI_L1Logistic(estimation_score='z',
n_boots_sel=10, n_boots_est=10).fit(X, y)
def test_reg_params():
"""Test whether the upper bound on the regularization parameters correctly
zero out the coefficients."""
n_features = 20
n_inf = 10
n_classes = 5
X, y, w, b = make_classification(n_samples=200,
random_state=101,
n_classes=n_classes,
n_informative=n_inf,
n_features=n_features,
shared_support=True)
uoi_log = UoI_L1Logistic()
uoi_log.output_dim = n_classes
reg_params = uoi_log.get_reg_params(X, y)
C = reg_params[0]['C']
# check that coefficients get set to zero
lr = MaskedCoefLogisticRegression(penalty='l1',
C=0.99 * C,
standardize=False,
fit_intercept=True)
lr.fit(X, y)
assert_equal(lr.coef_, 0.)
# check that coefficients above the bound are not set to zero
lr = MaskedCoefLogisticRegression(penalty='l1',
C=1.01 * C,
standardize=False,
fit_intercept=True)
lr.fit(X, y)
assert np.count_nonzero(lr.coef_) > 0
def test_fit_intercept():
"""Tests whether `include_intercept` in passed through to the linear models.
"""
lr = UoI_L1Logistic(fit_intercept=True)
assert lr._selection_lm.fit_intercept
assert lr._estimation_lm.fit_intercept
lr = UoI_L1Logistic(fit_intercept=False)
assert not lr._selection_lm.fit_intercept
assert not lr._estimation_lm.fit_intercept
``` |
{
"source": "josephmachado/online_store",
"score": 2
} |
#### File: app/ops/extract_load.py
```python
from typing import Dict, List
import boto3
import psycopg2.extras as p
import requests
from dagster import op
from utils.config import (
get_aws_creds,
get_customer_db_creds,
get_warehouse_creds,
)
from utils.db import WarehouseConnection
@op(config_schema={"risk_endpoint": str})
def extract_customer_risk_score(context) -> List[Dict[str, int]]:
resp = requests.get(context.op_config["risk_endpoint"])
return resp.json()
@op
def load_customer_risk_score(customer_risk_score: List[Dict[str, int]]):
ins_qry = """
INSERT INTO store.customer_risk_score(
customer_id,
risk_score
)
VALUES (
%(customer_id)s,
%(risk_score)s
)
"""
with WarehouseConnection(get_warehouse_creds()).managed_cursor() as curr:
p.execute_batch(curr, ins_qry, customer_risk_score)
@op(config_schema={"orders_bucket_name": str})
def extract_orders_data(context) -> List[Dict[str, str]]:
s3 = boto3.client("s3", **get_aws_creds())
objs = s3.list_objects_v2(Bucket=context.op_config["orders_bucket_name"])[
"Contents"
]
def get_last_modified(obj) -> int:
return int(obj["LastModified"].strftime("%s"))
last_added = [
obj["Key"] for obj in sorted(objs, key=get_last_modified, reverse=True)
][0]
obj = s3.get_object(
Bucket=context.op_config["orders_bucket_name"], Key=last_added
)
data = obj["Body"].read().decode("utf-8")
orders = []
for line in data.split("\n")[:-1]:
order_id, customer_id, item_id, item_name, delivered_on = str(
line
).split(",")
orders.append(
{
"order_id": order_id,
"customer_id": customer_id,
"item_id": item_id,
"item_name": item_name,
"delivered_on": delivered_on,
}
)
return orders
@op
def load_orders_data(orders_data: List[Dict[str, str]]):
ins_qry = """
INSERT INTO store.orders(
order_id,
customer_id,
item_id,
item_name,
delivered_on
)
VALUES (
%(order_id)s,
%(customer_id)s,
%(item_id)s,
%(item_name)s,
%(delivered_on)s
)
"""
with WarehouseConnection(get_warehouse_creds()).managed_cursor() as curr:
p.execute_batch(curr, ins_qry, orders_data)
@op
def extract_customer_data() -> List[Dict[str, str]]:
with WarehouseConnection(get_customer_db_creds()).managed_cursor() as curr:
curr.execute(
'''
select customer_id,
first_name,
last_name,
state_code,
datetime_created,
datetime_updated
from customers
where
TO_TIMESTAMP(datetime_created, 'YY-MM-DD HH24:MI:ss')
>= current_timestamp - interval '5 minutes'
or TO_TIMESTAMP(datetime_updated, 'YY-MM-DD HH24:MI:ss')
>= current_timestamp - interval '5 minutes'
'''
)
cust_data = curr.fetchall()
return [
{
"customer_id": str(d[0]),
"first_name": str(d[1]),
"last_name": str(d[2]),
"state_code": str(d[3]),
"datetime_created": str(d[4]),
"datetime_updated": str(d[5]),
}
for d in cust_data
]
@op
def load_customer_data(customer_data: List[Dict[str, str]]):
ins_qry = """
INSERT INTO store.customers(
customer_id,
first_name,
last_name,
state_code,
datetime_created,
datetime_updated
)
VALUES (
%(customer_id)s,
%(first_name)s,
%(last_name)s,
%(state_code)s,
%(datetime_created)s,
%(datetime_updated)s
)
"""
with WarehouseConnection(get_warehouse_creds()).managed_cursor() as curr:
p.execute_batch(curr, ins_qry, customer_data)
``` |
{
"source": "Josephmaclean/flask-easy",
"score": 3
} |
#### File: src/flask_easy/auth.py
```python
import os
import inspect
from functools import wraps
import jwt
from flask import request
from jwt.exceptions import ExpiredSignatureError, InvalidTokenError, PyJWTError
from .exc import Unauthorized, ExpiredTokenException, OperationError
def auth_required(other_roles=None):
"""auth required decorator"""
def authorize_user(func):
"""
A wrapper to authorize an action using
:param func: {function}` the function to wrap around
:return:
"""
@wraps(func)
def view_wrapper(*args, **kwargs):
authorization_header = request.headers.get("Authorization")
if not authorization_header:
raise Unauthorized("Missing authentication token")
token = authorization_header.split()[1]
try:
key = os.getenv("JWT_SECRET") # noqa E501
payload = jwt.decode(
token, key=key, algorithms=["HS256", "RS256"]
) # noqa E501
# Get realm roles from payload
available_roles = payload.get("realm_access").get("roles")
# Append service name to function name to form role
# generated_role = service_name + "_" + func.__name__
generated_role = "s"
authorized_roles = []
if other_roles:
authorized_roles = other_roles.split("|")
authorized_roles.append(generated_role)
if is_authorized(authorized_roles, available_roles):
if "user_id" in inspect.getfullargspec(func).args:
kwargs["user_id"] = payload.get(
"preferred_username"
) # noqa E501
return func(*args, **kwargs)
except ExpiredSignatureError as error:
raise ExpiredTokenException("Token Expired") from error
except InvalidTokenError as error:
raise OperationError("Invalid Token") from error
except PyJWTError as error:
raise OperationError("Error decoding token") from error
raise Unauthorized(status_code=403)
return view_wrapper
return authorize_user
def is_authorized(access_roles, available_roles):
"""Check if access roles is in available roles"""
for role in access_roles:
if role in available_roles:
return True
return False
```
#### File: flask_easy/repository/mongo_repository.py
```python
import typing as t
import mongoengine as me
from ..exc import OperationError, NotFoundException
from .repository_interface import RepositoryInterface
class MongoRepository(RepositoryInterface):
"""
MongoRepository to be inherited
"""
model: t.Type[me.Document]
@classmethod
def index(cls) -> t.List[me.Document]:
"""
gets all documents in a mongodb collection
:return: list of mongodb documents
"""
try:
return cls.model.objects()
except me.OperationError as error:
raise OperationError([error.args[0]]) from error
@classmethod
def create(cls, data: dict) -> t.Type[me.Document]:
"""
creates a mongodb document with the data passed to it
:param data: data to persist in the database
:return: mongodb document
"""
try:
db_obj = cls.model(**data)
db_obj.save()
return db_obj
except me.OperationError as error:
raise OperationError([error.args[0]]) from error
@classmethod
def create_all(cls, data: t.List[dict]) -> t.List[t.Type[me.Document]]:
try:
obj_data = [cls.model(**item) for item in data]
return cls.model.objects.insert(obj_data)
except me.OperationError as error:
raise OperationError([error.args[0]]) from error
@classmethod
def update_by_id(cls, obj_id: t.Union[int, str], data: dict) -> t.Type[me.Document]:
"""
:param obj_id:
:param data:
:return:
"""
try:
db_obj = cls.find_by_id(obj_id)
db_obj.modify(**data)
return db_obj
except me.OperationError as error:
raise OperationError([error.args[0]]) from error
@classmethod
def find(cls, query_params: dict) -> t.Type[me.Document]:
"""
returns an item that satisfies the data passed to it if it exists in
the database
:param query_params: {dict}
:return: model_object - Returns an instance object of the model passed
"""
try:
db_obj = cls.model.objects.get(**query_params)
return db_obj
except me.DoesNotExist as error:
raise NotFoundException({"error": "Resource does not exist"}) from error
except me.OperationError as error:
raise OperationError([error.args[0]]) from error
@classmethod
def find_all(cls, query_params: dict) -> t.List[t.Type[me.Document]]:
"""
returns all items that satisfy the filter query_params passed to it
:param query_params: query parameters to filter by
:return: model_object - Returns an instance object of the model passed
"""
try:
db_obj = cls.model.objects(**query_params)
return db_obj
except me.OperationError as error:
raise OperationError([error.args[0]]) from error
@classmethod
def find_by_id(cls, obj_id: t.Union[int, str]) -> t.Type[me.Document]:
try:
db_obj = cls.model.objects.get(pk=obj_id)
return db_obj
except me.DoesNotExist as error:
raise NotFoundException(
{"error": f"Resource of id {obj_id} does not exist"}
) from error
except me.OperationError as error:
raise OperationError([error.args[0]]) from error
@classmethod
def delete(cls, obj_id: t.Union[int, str]) -> bool:
"""
delete an object matching the id
:param obj_id: id of object to be deleted
:return:
"""
try:
db_obj = cls.model.objects.get(pk=obj_id)
db_obj.delete()
return True
except me.DoesNotExist as error:
raise NotFoundException(
{"error": f"Resource of id {obj_id} does not exist"}
) from error
except me.OperationError as error:
raise OperationError([error.args[0]]) from error
```
#### File: flask_easy/scripts/easy_scripts.py
```python
import click
from cookiecutter.main import cookiecutter
@click.group()
def cli():
"""
base cli command
:return: None
"""
@cli.command("scaffold")
@click.argument("output_dir", required=False)
def scaffold(output_dir: str):
"""
Spawn a new project
:param output_dir:
:return:
"""
if output_dir is None:
val = click.prompt("Project name")
extra_context = {"_project_name": val}
else:
extra_context = {"_project_name": "My Awesome App", "_remove_parent": True}
cookiecutter(
"https://github.com/Josephmaclean/easy-scaffold.git",
output_dir=output_dir,
extra_context=extra_context,
)
if __name__ == "__main__":
cli()
```
#### File: scripts/resources/model.py
```python
import os
import click
from jinja2 import Template
from .utils import add_to_init, convert_to_camelcase
def create_model(root_path, name, is_sql=True):
"""
This function creates a model with the name specified. The model
is created in the rootdir/models directory and its auto imported
in the models __init__.py file.
"""
name = name.lower()
file_dir = os.path.join(root_path, "models")
if not os.path.exists(file_dir):
click.echo(click.style(f"cannot find models in {root_path}", fg="red"))
file_name = f"{name}.py"
model_name = convert_to_camelcase(name)
template_string = get_template_string(is_sql)
template = Template(template_string)
data = template.render(model_name=model_name)
file_path = os.path.join(file_dir, file_name)
if not os.path.exists(file_path):
with open(file_path, "w", encoding="UTF-8") as file:
file.write(data)
add_to_init(file_dir, name, model_name)
else:
click.echo(f"{name}.py exits")
def get_template_string(sql):
"""generate template string"""
if sql:
template_string = """from flask_easy import db, fields
class {{model_name}}(db.Model):
pass
"""
else:
template_string = """import mongoengine as me
class {{model_name}}(me.Document):
id = me.IntField(primary_key=True)
"""
return template_string
``` |
{
"source": "Josephmaclean/great_expectations",
"score": 2
} |
#### File: core/usage_statistics/package_dependencies.py
```python
import os
import re
from typing import List, Set
from great_expectations.data_context.util import file_relative_path
class GEDependencies:
"""Store and provide dependencies when requested.
Also acts as a utility to check stored dependencies match our
library requirements.
Attributes: None
"""
"""This list should be kept in sync with our requirements.txt file."""
GE_REQUIRED_DEPENDENCIES: List[str] = sorted(
[
"altair",
"Click",
"colorama",
"cryptography",
"dataclasses",
"importlib-metadata",
"Ipython",
"jinja2",
"jsonpatch",
"jsonschema",
"mistune",
"nbformat",
"numpy",
"packaging",
"pandas",
"pyparsing",
"python-dateutil",
"pytz",
"requests",
"ruamel.yaml",
"scipy",
"termcolor",
"tqdm",
"typing-extensions",
"urllib3",
"tzlocal",
]
)
"""This list should be kept in sync with our requirements-dev*.txt files."""
ALL_GE_DEV_DEPENDENCIES: List[str] = sorted(
[
"PyMySQL",
"azure-identity",
"azure-keyvault-secrets",
"azure-storage-blob",
"black",
"boto3",
"feather-format",
"flake8",
"flask",
"freezegun",
"gcsfs",
"google-cloud-secret-manager",
"google-cloud-storage",
"ipywidgets",
"isort",
"mistune",
"moto",
"nbconvert",
"openpyxl",
"pre-commit",
"psycopg2-binary",
"pyarrow",
"pyathena",
"pyfakefs",
"pyodbc",
"pypd",
"pyspark",
"pytest",
"pytest-benchmark",
"pytest-cov",
"pytest-order",
"pyupgrade",
"requirements-parser",
"s3fs",
"snapshottest",
"snowflake-connector-python",
"snowflake-sqlalchemy",
"sqlalchemy",
"sqlalchemy-bigquery",
"sqlalchemy-dremio",
"sqlalchemy-redshift",
"teradatasqlalchemy",
"xlrd",
]
)
GE_DEV_DEPENDENCIES_EXCLUDED_FROM_TRACKING: List[str] = [
# requirements-dev-contrib.txt:
"black",
"flake8",
"isort",
"pre-commit",
"pytest-cov",
"pytest-order",
"pyupgrade",
# requirements-dev-lite.txt:
"flask",
"freezegun",
"ipywidgets",
"mistune",
"moto",
"nbconvert",
"pyfakefs",
"pytest",
"pytest-benchmark",
"requirements-parser",
"s3fs",
"snapshottest",
# "sqlalchemy", # Not excluded from tracking
]
GE_DEV_DEPENDENCIES: List[str] = set(ALL_GE_DEV_DEPENDENCIES) - set(
GE_DEV_DEPENDENCIES_EXCLUDED_FROM_TRACKING
)
def __init__(self, requirements_relative_base_dir: str = "../../../"):
self._requirements_relative_base_dir = file_relative_path(
__file__, requirements_relative_base_dir
)
self._dev_requirements_prefix: str = "requirements-dev"
def get_required_dependency_names(self) -> List[str]:
"""Sorted list of required GE dependencies"""
return self.GE_REQUIRED_DEPENDENCIES
def get_dev_dependency_names(self) -> List[str]:
"""Sorted list of dev GE dependencies"""
return self.GE_DEV_DEPENDENCIES
def get_required_dependency_names_from_requirements_file(self) -> List[str]:
"""Get unique names of required dependencies.
Returns:
List of string names of required dependencies.
"""
return sorted(
set(
self._get_dependency_names_from_requirements_file(
self.required_requirements_path
)
)
)
def get_dev_dependency_names_from_requirements_file(self) -> List[str]:
"""Get unique names of dependencies from all dev requirements files.
Returns:
List of string names of dev dependencies.
"""
dev_dependency_names: Set[str] = set()
dev_dependency_filename: str
for dev_dependency_filename in self.dev_requirements_paths:
dependency_names: List[
str
] = self._get_dependency_names_from_requirements_file(
os.path.join(
self._requirements_relative_base_dir, dev_dependency_filename
)
)
dev_dependency_names.update(dependency_names)
return sorted(dev_dependency_names)
@property
def required_requirements_path(self) -> str:
"""Get path for requirements.txt
Returns:
String path of requirements.txt
"""
return os.path.join(self._requirements_relative_base_dir, "requirements.txt")
@property
def dev_requirements_paths(self) -> List[str]:
"""Get all paths for requirements-dev files with dependencies in them.
Returns:
List of string filenames for dev requirements files
"""
return [
filename
for filename in os.listdir(self._requirements_relative_base_dir)
if filename.startswith(self._dev_requirements_prefix)
]
def _get_dependency_names_from_requirements_file(self, filepath: str) -> List[str]:
"""Load requirements file and parse to retrieve dependency names.
Args:
filepath: String relative filepath of requirements file to parse.
Returns:
List of string names of dependencies.
"""
with open(filepath) as f:
dependencies_with_versions = f.read().splitlines()
return self._get_dependency_names(dependencies_with_versions)
def _get_dependency_names(self, dependencies: List[str]) -> List[str]:
"""Parse dependency names from a list of strings.
List of strings typically from a requirements*.txt file.
Args:
dependencies: List of strings of requirements.
Returns:
List of dependency names. E.g. 'pandas' from 'pandas>=0.23.0'.
"""
dependency_matches = [
re.search(r"^(?!--requirement)([\w\-.]+)", s) for s in dependencies
]
dependency_names: List[str] = []
for match in dependency_matches:
if match is not None:
dependency_names.append(match.group(0))
return dependency_names
def main():
"""Run this module to generate a list of packages from requirements files to update our static lists"""
ge_dependencies: GEDependencies = GEDependencies()
print("\n\nRequired Dependencies:\n\n")
print(ge_dependencies.get_required_dependency_names_from_requirements_file())
print("\n\nDev Dependencies:\n\n")
print(ge_dependencies.get_dev_dependency_names_from_requirements_file())
assert (
ge_dependencies.get_required_dependency_names()
== ge_dependencies.get_required_dependency_names_from_requirements_file()
), "Mismatch between required dependencies in requirements files and in GEDependencies"
assert (
ge_dependencies.get_dev_dependency_names()
== ge_dependencies.get_dev_dependency_names_from_requirements_file()
), "Mismatch between dev dependencies in requirements files and in GEDependencies"
print(
"\n\nRequired and Dev dependencies in requirements files match those in GEDependencies"
)
if __name__ == "__main__":
main()
``` |
{
"source": "josephmancuso/college",
"score": 3
} |
#### File: college/bootstrap/start.py
```python
from pydoc import locate
from dotenv import find_dotenv, load_dotenv
'''
|--------------------------------------------------------------------------
| Load Environment Variables
|--------------------------------------------------------------------------
|
| Take environment variables from the .env file and load them in.
|
'''
load_dotenv(find_dotenv())
def app(environ, start_response):
''' The WSGI Application Server '''
from wsgi import container
'''
|--------------------------------------------------------------------------
| Add Environ To Service Container
|--------------------------------------------------------------------------
|
| Add the environ to the service container. The environ is generated by the
| the WSGI server above and used by a service provider to manipulate the
| incoming requests
|
'''
container.bind('Environ', environ)
'''
|--------------------------------------------------------------------------
| Execute All Service Providers That Require The WSGI Server
|--------------------------------------------------------------------------
|
| Run all service provider boot methods if the wsgi attribute is true.
|
'''
try:
for provider in container.make('Application').PROVIDERS:
located_provider = locate(provider)().load_app(container)
if located_provider.wsgi is True:
container.resolve(located_provider.boot)
except Exception as e:
container.make('ExceptionHandler').load_exception(e)
'''
|--------------------------------------------------------------------------
| We Are Ready For Launch
|--------------------------------------------------------------------------
|
| If we have a solid response and not redirecting then we need to return
| a 200 status code along with the data. If we don't, then we'll have
| to return a 302 redirection to where ever the user would like go
| to next.
|
'''
start_response(container.make('StatusCode'), container.make('Headers'))
'''
|--------------------------------------------------------------------------
| Final Step
|--------------------------------------------------------------------------
|
| This will take the data variable from the Service Container and return
| it to the WSGI server.
|
'''
return iter([bytes(container.make('Response'), 'utf-8')])
``` |
{
"source": "josephmancuso/gbaleague-masonite2",
"score": 3
} |
#### File: app/events/UserSignedUp.py
```python
from events import Event
from app.notifications import WelcomeNotification
class UserSignedUp(Event):
""" UserSignedUp Event Class """
subscribe = [
'user.signedup'
]
def __init__(self, Notify):
""" Event Class Constructor """
self.notify = Notify
def handle(self):
""" Event Handle Method """
self.notify.mail(WelcomeNotification, to='<EMAIL>')
```
#### File: http/controllers/DraftController.py
```python
from masonite.request import Request
from app.DraftedPokemon import DraftedPokemon
from app.League import League
from app.Pokemon import Pokemon
from app.Team import Team
class DraftController:
''' Class Docstring Description '''
def __init__(self, request: Request):
self.request = request
self.league = League.find(request.param('id'))
def show(self):
if self.request.has('tier'):
tier = self.request.input('tier')
else:
tier = 1
return view('leagues/draft', {'league': self.league, 'tier': tier})
def draft(self):
if self.request.has('draft'):
DraftedPokemon.create(
team_id=self.league.current.team(self.league).id,
pokemon_id=self.request.input('pokemon'),
league_id=self.league.id
)
DraftedPokemon.where('queue_id', self.request.input(
'pokemon')).where('league_id', self.league.id).delete()
# Get Pokemon
pokemon = Pokemon.find(self.request.input('pokemon'))
team = Team.find(self.league.current.team(self.league).id)
team.points -= pokemon.points
team.save()
self.league.broadcast('{} was drafted by {} for {} points'.format(pokemon.name, team.name, pokemon.points))
self.league.next_drafter()
self.league.broadcast("It is currently {}'s turn to draft.".format(team.owner.name))
self.request.session.flash(
'success', 'Successfully Drafted {0}'.format(pokemon.name))
self.request.redirect_to('league.draft', {'id': self.league.id})
elif self.request.has('unqueue'):
DraftedPokemon \
.where('queue_id', self.request.input('pokemon')) \
.where('team_id', self.request.user()
.team(self.league).id).where('league_id', self.league.id) \
.first().delete()
self.request.session.flash('success', 'Successfully Unqueued')
return self.request.redirect_to('league.draft', {'id': self.league.id})
elif self.request.has('queue'):
DraftedPokemon.create(
team_id=auth().team(self.league).id,
queue_id=self.request.input('pokemon'),
league_id=self.league.id
)
self.request.session.flash('success', 'Queue Successful')
return self.request.redirect_to('league.draft', {'id': self.league.id})
self.request.session.flash('warning', 'Could not draft at this time')
return self.request.redirect_to('league.draft', {'id': self.league.id})
def status(self):
league = League.find(request().param('id'))
if request().has('draft-open'):
league.start_draft()
league.broadcast('The draft has started!')
elif request().has('draft-close'):
league.close_draft()
league.broadcast('The draft is closed!')
return request().redirect('/league/{0}/draft'.format(league.id))
```
#### File: http/controllers/WelcomeController.py
```python
from masonite.request import Request
from masonite.view import View
from events import Event
from app.League import League
from masonite import Broadcast
class WelcomeController:
""" Controller For Welcoming The User """
def __init__(self, view: View, request: Request):
self.view = view
self.request = request
def show(self, event: Event, broadcast: Broadcast) -> View.render:
''' Show Welcome Template '''
return self.view.render('index')
def discover(self) -> View.render:
"""Shows the discover page
Returns:
View.render
"""
if self.request.input('search'):
leagues = League.order_by('id', 'desc').get().filter(lambda league: self.request.input(
'search').lower() in league.name.lower())
else:
leagues = League.order_by('id', 'desc').get().take(100)
return self.view.render('discover', {'leagues': leagues})
def slack(self):
return ''
# response = IntegrationManager.driver('discord').user()
# requests.post(
# 'https://discordapp.com/api/webhooks/{0}/{1}'.format(
# response['webhook']['id'],
# response['webhook']['token']
# ),
# json={
# 'content': 'Masonite was successfully integrated!',
# 'username': 'Masonite'
# }
# )
# return response['access_token']
```
#### File: http/middleware/HtmlMinifyMiddleware.py
```python
import htmlmin
from masonite.request import Request
from masonite.response import Response
class HtmlMinifyMiddleware:
def __init__(self, request: Request, response: Response):
self.request = request
self.response = response
def after(self):
if 'text/html' in self.request.header('Content-Type') and not self.request.header('Location'):
self.response.view(
htmlmin.minify(self.response.data())
)
self.request.header('Cache-Control', 'max-age=3600, must-revalidate', http_prefix=False)
```
#### File: http/middleware/LeagueOwner.py
```python
from app.League import League
from masonite.request import Request
class LeagueOwner:
""" Middleware to check if the user is the owner of the league """
def __init__(self, request: Request):
""" Inject Any Dependencies From The Service Container """
self.request = request
def before(self):
""" Run This Middleware Before The Route Executes """
if self.request.user():
if not League.where('owner_id', self.request.user().id).where('id', self.request.param('id')).first():
raise Exception('You are not the league owner.')
def after(self):
""" Run This Middleware After The Route Executes """
pass
```
#### File: app/jobs/ResetEmail.py
```python
from masonite.queues import Queueable
from notifications import Notify
from app.notifications import ResetEmailNotification
class ResetEmail(Queueable):
"""A ResetEmail Job
"""
def __init__(self, notify: Notify):
"""A ResetEmail Constructor
"""
self.notify = notify
def handle(self, user):
"""Logic to handle the job
"""
return self.notify.mail(ResetEmailNotification, user=user, to=user.email)
```
#### File: app/providers/UserModelProvider.py
```python
import os
from events import Event
from masonite.provider import ServiceProvider
from masonite.request import Request
from app.commands.ShareCommand import ShareCommand
# from app.commands.BroadcastWorkCommand import BroadcastWorkCommand
from app.commands.SeedTableCommand import SeedTableCommand
# from app.drivers.BroadcastLocalDriver import BroadcastLocalDriver
from app.events import UserSignedUp
from app.User import User
from config import application
from masonite.view import View
class UserModelProvider(ServiceProvider):
''' Binds the User model into the Service Container '''
wsgi = False
def register(self):
''' Registers The User Into The Service Container '''
self.app.bind('ShareCommand', ShareCommand())
# self.app.bind('BroadcastWorkCommand', BroadcastWorkCommand())
self.app.bind('SeedTableCommand', SeedTableCommand())
# self.app.bind('BroadcastLocalDriver', BroadcastLocalDriver)
def boot(self, view: View, event: Event):
view.share({
'show_if': self._show_if,
'env': os.getenv,
'DEBUG': application.DEBUG
})
event.subscribe(UserSignedUp)
@staticmethod
def _show_if(output, check1, check2=False):
if check2:
if check1 == check2:
return output
else:
if check1:
return output
return ''
```
#### File: gbaleague-masonite2/app/Requests.py
```python
from config.database import Model
from orator.orm import belongs_to
class Requests(Model):
__fillable__ = ['team_id', 'league_id']
__table__ = 'requests'
@belongs_to('team_id', 'id')
def team(self):
from app.Team import Team
return Team
```
#### File: gbaleague-masonite2/app/Team.py
```python
from config.database import Model
from orator.orm import belongs_to
from app.DraftedPokemon import DraftedPokemon
class Team(Model):
__fillable__ = ['name', 'league_id', 'owner_id', 'picture']
def get_team_pokemon(self, league=False):
if league:
return DraftedPokemon \
.where('team_id', self.id) \
.where('league_id', league.id) \
.where_not_null('pokemon_id') \
.get()
else:
return DraftedPokemon \
.where('team_id', self.id) \
.where('league_id', self.league_id) \
.where_not_null('pokemon_id') \
.get()
@belongs_to('owner_id', 'id')
def owner(self):
from app.User import User
return User
```
#### File: databases/migrations/2018_02_18_032808_create_requests_table.py
```python
from orator.migrations import Migration
class CreateRequestsTable(Migration):
def up(self):
"""
Run the migrations.
"""
with self.schema.create('requests') as table:
table.increments('id')
table.integer('team_id').unsigned()
table.foreign('team_id').references('id').on('teams')
table.integer('league_id').unsigned()
table.foreign('league_id').references('id').on('leagues')
table.timestamps()
def down(self):
"""
Revert the migrations.
"""
self.schema.drop('requests')
```
#### File: databases/migrations/2018_07_24_012319_fix_request_league_foreign_key.py
```python
from orator.migrations import Migration
class FixRequestLeagueForeignKey(Migration):
def up(self):
"""
Run the migrations.
"""
with self.schema.table('requests') as table:
table.drop_foreign('requests_league_id_foreign')
table.foreign('league_id').references('id').on('leagues') \
.on_delete('cascade')
def down(self):
"""
Revert the migrations.
"""
with self.schema.table('requests') as table:
pass
```
#### File: databases/migrations/2018_07_24_012836_fix_team_league_foreign_key.py
```python
from orator.migrations import Migration
class FixTeamLeagueForeignKey(Migration):
def up(self):
"""
Run the migrations.
"""
with self.schema.table('teams') as table:
table.drop_foreign('teams_league_id_foreign')
table.foreign('league_id').references('id').on('leagues') \
.on_delete('SET NULL')
def down(self):
"""
Revert the migrations.
"""
with self.schema.table('teams') as table:
pass
``` |
{
"source": "josephmancuso/masonite-azure-driver",
"score": 2
} |
#### File: azure/providers/AzureProvider.py
```python
from config import storage
from masonite.provider import ServiceProvider
from ..drivers import UploadAzureDriver
class AzureProvider(ServiceProvider):
wsgi = False
def register(self):
self.app.bind('UploadAzureDriver', UploadAzureDriver)
def boot(self):
pass
``` |
{
"source": "josephmancuso/masonite-forum",
"score": 3
} |
#### File: http/controllers/RegisterController.py
```python
from validator import Required, Not, Blank, validate, Length
from masonite.facades.Auth import Auth
from config import auth
import bcrypt
class RegisterController(object):
''' Class Docstring Description '''
def __init__(self):
pass
def show(self, Request):
''' Show the registration page '''
return view('auth/register')
def store(self, Request, Session):
''' Register a new user '''
ok, errors = self.validate_input(Request.all())
if not ok:
display = ''
for error in errors:
display += '{0} {1} \n\n\n'.format(error.title(), errors[error][0])
Session.flash('danger', display)
return Request.redirect('/register')
# register the user
password = bytes(bcrypt.hashpw(
bytes(Request.input('password'), 'utf-8'), bcrypt.gensalt()
)).decode('utf-8')
auth.AUTH['model'].create(
name=Request.input('name'),
password=password,
email=Request.input('email'),
)
# login the user
# redirect to the homepage
if Auth(Request).login(Request.input(auth.AUTH['model'].__auth__), Request.input('password')):
Session.flash('success', 'Logged successfuly!')
return Request.redirect('/')
def validate_input(self, data):
rules = {
'name': [Required, Not(Blank())],
'email': [Required, Not(Blank())],
'password': [Required, Not(Blank()),Length(6)],
}
return validate(rules, data)
```
#### File: http/middleware/LoadUserMiddleware.py
```python
from masonite.facades.Auth import Auth
class LoadUserMiddleware:
''' Middleware class which loads the current user into the request '''
def __init__(self, Request):
''' Inject Any Dependencies From The Service Container '''
self.request = Request
def before(self):
''' Run This Middleware Before The Route Executes '''
self.load_user(self.request)
return self.request
def after(self):
''' Run This Middleware After The Route Executes '''
pass
def load_user(self, request):
''' Load user into the request '''
request.set_user(Auth(request).user())
``` |
{
"source": "josephmancuso/masonite-inertia",
"score": 3
} |
#### File: inertia/commands/DemoCommand.py
```python
import os
from cleo import Command
from masonite.packages import append_web_routes
package_directory = os.path.dirname(os.path.realpath(__file__))
class DemoCommand(Command):
"""
Create a Inertia.js demo and add it to your project.
command:name
{argument : description}
"""
def handle(self):
demo_path = os.path.join(package_directory, "../snippets/demo")
append_web_routes(os.path.join(demo_path, "routes.py"))
# install controller
# scaffold app ?
```
#### File: inertia/commands/InstallCommand.py
```python
from cleo import Command
import os
from masonite.packages import create_or_append_config
package_directory = os.path.dirname(os.path.realpath(__file__))
class InstallCommand(Command):
"""
Install Masonite adapter for Inertia.js
install:inertia
"""
def handle(self):
create_or_append_config(
os.path.join(package_directory, "../snippets/config/inertia.py")
)
```
#### File: masonite/inertia/helpers.py
```python
from jinja2 import Markup
def inertia(page_data, app_id="app"):
"""Inertia view helper to render a div with page data required by client-side
Inertia.js adapter."""
return Markup("<div id='{0}' data-page='{1}'></div>".format(app_id, page_data))
```
#### File: inertia/providers/InertiaProvider.py
```python
from masonite.provider import ServiceProvider
from masonite.view import View
from masonite.inertia.core.InertiaResponse import InertiaResponse
from masonite.inertia.commands.InstallCommand import InstallCommand
from masonite.inertia.commands.DemoCommand import DemoCommand
from masonite.inertia.helpers import inertia
class InertiaProvider(ServiceProvider):
"""Masonite adapter for Inertia.js Service Provider."""
wsgi = False
def register(self):
self.app.bind("Inertia", InertiaResponse(self.app))
self.app.bind("InstallCommand", InstallCommand())
self.app.bind("DemoCommand", DemoCommand())
def boot(self, view: View):
self.register_view_helper(view)
def register_view_helper(self, view):
view.share({"inertia": inertia})
``` |
{
"source": "josephmancuso/masonite",
"score": 2
} |
#### File: drivers/queue/AMQPDriver.py
```python
import pickle
import pendulum
import inspect
from urllib import parse
from ...utils.console import HasColoredOutput
class AMQPDriver(HasColoredOutput):
def __init__(self, application):
self.application = application
self.connection = None
self.publishing_channel = None
def set_options(self, options):
self.options = options
return self
def push(self, *jobs, args=(), **kwargs):
for job in jobs:
payload = {
"obj": job,
"args": args,
"callback": self.options.get("callback", "handle"),
"created": pendulum.now(tz=self.options.get("tz", "UTC")),
}
try:
self.connect().publish(payload)
except (self.get_connection_exceptions()):
self.connect().publish(payload)
def get_connection_exceptions(self):
pika = self.get_package_library()
return (
pika.exceptions.ConnectionClosed,
pika.exceptions.ChannelClosed,
pika.exceptions.ConnectionWrongStateError,
pika.exceptions.ChannelWrongStateError,
)
def publish(self, payload):
pika = self.get_package_library()
self.publishing_channel.basic_publish(
exchange=self.options.get("exchange"),
routing_key=self.options.get("queue"),
body=pickle.dumps(payload),
properties=pika.BasicProperties(
delivery_mode=2, # make message persistent
),
)
self.publishing_channel.close()
self.connection.close()
def get_package_library(self):
try:
import pika
except ImportError:
raise ModuleNotFoundError(
"Could not find the 'pika' library. Run 'pip install pika' to fix this."
)
return pika
def connect(self):
try:
import pika
except ImportError:
raise ModuleNotFoundError(
"Could not find the 'pika' library. Run 'pip install pika' to fix this."
)
connection_url = "amqp://{}:{}@{}{}/{}".format(
self.options.get("username"),
self.options.get("password"),
self.options.get("host"),
":" + str(self.options.get("port")) if self.options.get("port") else "",
self.options.get("vhost", "%2F"),
)
if self.options.get("connection_options"):
connection_url += "?" + parse.urlencode(
self.options.get("connection_options")
)
self.connection = pika.BlockingConnection(pika.URLParameters(connection_url))
self.publishing_channel = self.connection.channel()
self.publishing_channel.queue_declare(self.options.get("queue"), durable=True)
return self
def consume(self):
self.success(
'[*] Waiting to process jobs on the "{}" queue. To exit press CTRL+C'.format(
self.options.get("queue")
)
)
self.connect()
self.publishing_channel.basic_qos(prefetch_count=1)
self.publishing_channel.basic_consume(self.options.get("queue"), self.work)
try:
self.publishing_channel.start_consuming()
finally:
self.publishing_channel.stop_consuming()
self.publishing_channel.close()
self.connection.close()
def retry(self):
builder = (
self.application.make("builder")
.new()
.on(self.options.get("connection"))
.table(self.options.get("failed_table", "failed_jobs"))
)
jobs = builder.get()
if len(jobs) == 0:
self.success("No failed jobs found.")
return
for job in jobs:
try:
self.connect().publish(pickle.loads(job["payload"]))
except (self.get_connection_exceptions()):
self.connect().publish(pickle.loads(job["payload"]))
self.success(f"Added {len(jobs)} failed jobs back to the queue")
builder.table(self.options.get("failed_table", "failed_jobs")).where_in(
"id", [x["id"] for x in jobs]
).delete()
def work(self, ch, method, _, body):
job = pickle.loads(body)
obj = job["obj"]
args = job["args"]
callback = job["callback"]
try:
try:
if inspect.isclass(obj):
obj = self.application.resolve(obj)
getattr(obj, callback)(*args)
except AttributeError:
obj(*args)
self.success(
f"[{method.delivery_tag}][{pendulum.now(tz=self.options.get('tz', 'UTC')).to_datetime_string()}] Job Successfully Processed"
)
except Exception as e:
self.danger(
f"[{method.delivery_tag}][{pendulum.now(tz=self.options.get('tz', 'UTC')).to_datetime_string()}] Job Failed"
)
getattr(obj, "failed")(job, str(e))
self.add_to_failed_queue_table(
self.application.make("builder").new(), str(job["obj"]), body, str(e)
)
ch.basic_ack(delivery_tag=method.delivery_tag)
def add_to_failed_queue_table(self, builder, name, payload, exception):
builder.table(self.options.get("failed_table", "failed_jobs")).create(
{
"driver": "amqp",
"queue": self.options.get("queue", "default"),
"name": name,
"connection": self.options.get("connection"),
"created_at": pendulum.now(
tz=self.options.get("tz", "UTC")
).to_datetime_string(),
"exception": exception,
"payload": payload,
"failed_at": pendulum.now(
tz=self.options.get("tz", "UTC")
).to_datetime_string(),
}
)
```
#### File: drivers/queue/DatabaseDriver.py
```python
import pickle
import pendulum
from ...utils.console import HasColoredOutput
from ...utils.time import parse_human_time
import time
class DatabaseDriver(HasColoredOutput):
def __init__(self, application):
self.application = application
def set_options(self, options):
self.options = options
return self
def push(self, *jobs, args=(), **kwargs):
builder = self.get_builder()
available_at = parse_human_time(kwargs.get("delay", "now"))
for job in jobs:
payload = pickle.dumps(
{
"obj": job,
"args": args,
"kwargs": kwargs,
"callback": self.options.get("callback", "handle"),
}
)
builder.create(
{
"name": str(job),
"payload": payload,
"available_at": available_at.to_datetime_string(),
"attempts": 0,
"queue": self.options.get("queue", "default"),
}
)
def consume(self):
print("Listening for jobs on queue: " + self.options.get("queue", "default"))
builder = self.get_builder()
while True:
time.sleep(int(self.options.get("poll", 1)))
if self.options.get("verbosity") == "vv":
print("Checking for available jobs .. ")
builder = builder.new().table(self.options.get("table"))
jobs = (
builder.where("queue", self.options.get("queue", "default"))
.where(
"available_at",
"<=",
pendulum.now(tz=self.options.get("tz", "UTC")).to_datetime_string(),
)
.limit(10)
.order_by("id")
.get()
)
if self.options.get("verbosity") == "vv":
print(f"Found {len(jobs)} job(s) ")
builder.where_in("id", [x["id"] for x in jobs]).update(
{
"reserved_at": pendulum.now(
tz=self.options.get("tz", "UTC")
).to_datetime_string()
}
)
for job in jobs:
builder.where("id", job["id"]).table(self.options.get("table")).update(
{
"ran_at": pendulum.now(
tz=self.options.get("tz", "UTC")
).to_datetime_string(),
}
)
payload = job["payload"]
unserialized = pickle.loads(job["payload"])
obj = unserialized["obj"]
args = unserialized["args"]
callback = unserialized["callback"]
try:
try:
getattr(obj, callback)(*args)
except AttributeError:
obj(*args)
self.success(
f"[{job['id']}][{pendulum.now(tz=self.options.get('tz', 'UTC')).to_datetime_string()}] Job Successfully Processed"
)
if self.options.get("verbosity") == "vv":
print(f"Successful. Deleting Job ID: {job['id']}")
builder.where("id", job["id"]).delete()
except Exception as e: # skipcq
self.danger(
f"[{job['id']}][{pendulum.now(tz=self.options.get('tz', 'UTC')).to_datetime_string()}] Job Failed"
)
job["attempts"] = int(job["attempts"])
if job["attempts"] + 1 < int(self.options.get("attempts", 1)):
builder.where("id", job["id"]).table(
self.options.get("table")
).update(
{
"attempts": int(job["attempts"] + 1),
}
)
elif job["attempts"] + 1 >= int(
self.options.get("attempts", 1)
) and not self.options.get("failed_table"):
# Delete the jobs
builder.where("id", job["id"]).table(
self.options.get("table")
).update(
{
"attempts": job["attempts"] + 1,
}
)
if hasattr(obj, "failed"):
getattr(obj, "failed")(unserialized, str(e))
builder.where("id", job["id"]).table(
self.options.get("table")
).delete()
elif self.options.get("failed_table"):
self.add_to_failed_queue_table(
builder, job["name"], payload, str(e)
)
if hasattr(obj, "failed"):
getattr(obj, "failed")(unserialized, str(e))
builder.where("id", job["id"]).table(
self.options.get("table")
).delete()
else:
builder.where("id", job["id"]).table(
self.options.get("table")
).update(
{
"attempts": job["attempts"] + 1,
}
)
def retry(self):
builder = self.get_builder()
jobs = (
builder.table(self.options.get("failed_table"))
.where("queue", self.options.get("queue", "default"))
.get()
)
if len(jobs) == 0:
self.success("No failed jobs found.")
return
for job in jobs:
builder.table("jobs").create(
{
"name": str(job["name"]),
"payload": job["payload"],
"attempts": 0,
"available_at": pendulum.now(
tz=self.options.get("tz", "UTC")
).to_datetime_string(),
"queue": job["queue"],
}
)
self.success(f"Added {len(jobs)} failed job(s) back to the queue")
builder.table(self.options.get("failed_table", "failed_jobs")).where_in(
"id", [x["id"] for x in jobs]
).delete()
def get_builder(self):
return (
self.application.make("builder")
.new()
.on(self.options.get("connection"))
.table(self.options.get("table"))
)
def add_to_failed_queue_table(self, builder, name, payload, exception):
builder.table(self.options.get("failed_table", "failed_jobs")).create(
{
"driver": "database",
"queue": self.options.get("queue", "default"),
"name": name,
"connection": self.options.get("connection"),
"created_at": pendulum.now(
tz=self.options.get("tz", "UTC")
).to_datetime_string(),
"exception": exception,
"payload": payload,
"failed_at": pendulum.now(
tz=self.options.get("tz", "UTC")
).to_datetime_string(),
}
)
```
#### File: exceptions/exceptionite/solutions.py
```python
class TableNotFound:
def title(self):
return "Table Not Found"
def description(self):
return "You are trying to make a query on a table that cannot be found. Check that :table migration exists and that migrations have been ran with 'python craft migrate' command."
def regex(self):
return r"no such table: (?P<table>(\w+))"
class MissingCSRFToken:
def title(self):
return "Missing CSRF Token"
def description(self):
return "You are trying to make a sensitive request without providing a CSRF token. Your request might be vulnerable to Cross Site Request Forgery. To resolve this issue you should use {{ csrf_field }} in HTML forms or add X-CSRF-TOKEN header in AJAX requests."
def regex(self):
return r"Missing CSRF Token"
class InvalidCSRFToken:
def title(self):
return "The session does not match the CSRF token"
def description(self):
return "Try clearing your cookies for the localhost domain in your browsers developer tools."
def regex(self):
return r"Invalid CSRF Token"
class TemplateNotFound:
def title(self):
return "Template Not Found"
def description(self):
return """':template.html' view file has not been found in registered view locations. Please verify the spelling of the template and that it exists in locations declared in Kernel file. You can check
available view locations with app.make('view.locations')."""
def regex(self):
return r"Template '(?P<template>(\w+))' not found"
class NoneResponse:
def title(self):
return "Response cannot be None"
def description(self):
return """Ensure that the controller method used in this request returned something. A controller method cannot return None or nothing.
If you don't want to return a value you can return an empty string ''."""
def regex(self):
return r"Responses cannot be of type: None."
class RouteMiddlewareNotFound:
def title(self):
return "Did you register the middleware key in your Kernel.py file?"
def description(self):
return "Check your Kernel.py file inside your 'route_middleware' attribute and look for a :middleware key"
def regex(self):
return r"Could not find the \'(?P<middleware>(\w+))\' middleware key"
```
#### File: masonite/filesystem/Storage.py
```python
from typing import TYPE_CHECKING, Any
if TYPE_CHECKING:
from ..foundation import Application
class Storage:
"""File storage manager for Masonite handling managing files with different drivers."""
def __init__(self, application: "Application", store_config: dict = None):
self.application = application
self.drivers = {}
self.store_config = store_config or {}
self.options = {}
def add_driver(self, name: str, driver: str):
self.drivers.update({name: driver})
def set_configuration(self, config: dict) -> "Storage":
self.store_config = config
return self
def get_driver(self, name: str = None) -> Any:
if name is None:
return self.drivers[self.store_config.get("default")]
return self.drivers[name]
def get_config_options(self, name: str = None) -> dict:
if name is None or name == "default":
return self.store_config.get(self.store_config.get("default"))
return self.store_config.get(name)
def disk(self, name: str = "default") -> Any:
"""Get the file manager instance for the given disk name."""
store_config = self.get_config_options(name)
driver = self.get_driver(self.get_config_options(name).get("driver"))
return driver.set_options(store_config)
```
#### File: masonite/foundation/Kernel.py
```python
import os
from cleo import Application as CommandApplication
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from .Application import Application
from .response_handler import response_handler
from .. import __version__
from ..commands import (
TinkerCommand,
CommandCapsule,
KeyCommand,
ServeCommand,
QueueWorkCommand,
QueueRetryCommand,
QueueTableCommand,
QueueFailedCommand,
AuthCommand,
MakePolicyCommand,
MakeControllerCommand,
MakeJobCommand,
MakeMailableCommand,
MakeProviderCommand,
PublishPackageCommand,
MakeTestCommand,
DownCommand,
UpCommand,
MakeCommandCommand,
MakeViewCommand,
MakeMiddlewareCommand,
PresetCommand,
)
from ..environment import LoadEnvironment
from ..middleware import MiddlewareCapsule
from ..routes import Router
from ..loader import Loader
from ..tests.HttpTestResponse import HttpTestResponse
from ..tests.TestResponseCapsule import TestResponseCapsule
class Kernel:
def __init__(self, app: "Application"):
self.application = app
def register(self) -> None:
"""Register core Masonite features in the project."""
self.load_environment()
self.register_framework()
self.register_commands()
self.register_testing()
def load_environment(self) -> None:
"""Load environment variables into the application."""
LoadEnvironment()
def register_framework(self) -> None:
self.application.set_response_handler(response_handler)
self.application.use_storage_path(
os.path.join(self.application.base_path, "storage")
)
self.application.bind("middleware", MiddlewareCapsule())
self.application.bind(
"router",
Router(),
)
self.application.bind("loader", Loader())
def register_commands(self) -> None:
self.application.bind(
"commands",
CommandCapsule(CommandApplication("Masonite", __version__)).add(
TinkerCommand(),
KeyCommand(),
ServeCommand(self.application),
QueueWorkCommand(self.application),
QueueRetryCommand(self.application),
QueueFailedCommand(),
QueueTableCommand(),
AuthCommand(self.application),
MakePolicyCommand(self.application),
MakeControllerCommand(self.application),
MakeJobCommand(self.application),
MakeMailableCommand(self.application),
MakeProviderCommand(self.application),
PublishPackageCommand(self.application),
MakeTestCommand(self.application),
DownCommand(),
UpCommand(),
MakeCommandCommand(self.application),
MakeViewCommand(self.application),
MakeMiddlewareCommand(self.application),
PresetCommand(self.application),
),
)
def register_testing(self) -> None:
test_response = TestResponseCapsule(HttpTestResponse)
self.application.bind("tests.response", test_response)
```
#### File: middleware/route/ShareErrorsInSessionMiddleware.py
```python
from .. import Middleware
from ...facades import Session
from ...validation import MessageBag
class ShareErrorsInSessionMiddleware(Middleware):
"""Share errors as a Message bag if there are any errors flashed to session. If not the message
bag will be empty."""
def before(self, request, _):
request.app.make("view").share(
{
"errors": MessageBag(Session.pull("errors") or {}),
}
)
return request
def after(self, request, _):
return request
```
#### File: masonite/providers/Provider.py
```python
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from ..foundation import Application
class Provider:
def __init__(self, application: "Application") -> None:
self.application = application
def register(self) -> None:
pass
def boot(self) -> None:
pass
```
#### File: masonite/rates/RateLimiter.py
```python
import pendulum
from typing import TYPE_CHECKING, Any, Callable
if TYPE_CHECKING:
from ..foundation import Application
from .limiters import Limiter
class RateLimiter:
def __init__(self, application: "Application"):
self.application = application
self.limiters: dict = {}
def register(self, name, callback: "Limiter") -> "RateLimiter":
self.limiters[name] = callback
return self
@property
def cache(self) -> Any:
"""Get default cache driver"""
return self.application.make("cache").store()
def attempts(self, key: str) -> int:
key = self.clean_key(key)
return int(self.cache.get(key, default=0))
def clean_key(self, key: str) -> str:
"""Clean the rate limiter key from unicode characters."""
if isinstance(key, bytes):
return key.decode("utf-8")
return key
def get_limiter(self, name: str) -> "Limiter":
return self.limiters[name]
def attempt(self, key: str, callback: Callable, max_attempts: int, delay: int = 60):
# don't execute callback if key limited
if self.too_many_attempts(key, max_attempts):
return False
result = callback()
self.hit(key, delay)
return result
def too_many_attempts(self, key: str, max_attempts: int) -> bool:
key = self.clean_key(key)
if self.attempts(key) >= max_attempts:
# trigger remove of cache value if needed
self.cache.get(f"{key}:timer")
if self.cache.has(f"{key}:timer"):
return True
self.reset_attempts(key)
return False
def hit(self, key: str, delay: int) -> int:
key = self.clean_key(key)
# store timestamp when key limit be available again
available_at = pendulum.now().add(seconds=delay).int_timestamp
self.cache.add(f"{key}:timer", available_at, delay)
# ensure key exists
self.cache.add(key, 0, delay)
hits = self.cache.increment(key)
return hits
def reset_attempts(self, key: str) -> bool:
key = self.clean_key(key)
return self.cache.put(key, 0)
def clear(self, key: str):
key = self.clean_key(key)
self.cache.forget(key)
self.cache.forget(f"{key}:timer")
def available_at(self, key: str) -> int:
"""Get UNIX integer timestamp at which key will be available again."""
key = self.clean_key(key)
timestamp = int(self.cache.get(f"{key}:timer", 0))
return timestamp
def available_in(self, key: str) -> int:
"""Get seconds in which key will be available again."""
timestamp = self.available_at(key)
if not timestamp:
return 0
else:
return max(0, timestamp - pendulum.now().int_timestamp)
def remaining(self, key: str, max_attempts: int) -> int:
"""Get remaining attempts before limitation."""
key = self.clean_key(key)
return max_attempts - self.attempts(key)
```
#### File: masonite/routes/Router.py
```python
from urllib import parse
from ..utils.collections import flatten
from ..exceptions import RouteNotFoundException, MethodNotAllowedException
class Router:
http_methods = ["GET", "HEAD", "POST", "PUT", "PATCH", "DELETE", "OPTIONS"]
def __init__(self, *routes, module_location=None):
self.routes = flatten(routes)
def find(self, path, request_method, subdomain=None):
from .HTTPRoute import HTTPRoute
for route in self.routes:
if route.match(path, request_method, subdomain=subdomain):
return route
# we did not find a route matching the given path and method.
# we will try to find a route matching other methods
other_methods = [
method for method in self.http_methods if method != request_method
]
matched_methods = []
for other_method in other_methods:
for route in self.routes:
if route.match(path, other_method, subdomain=subdomain):
matched_methods.append(other_method)
break
# we really did not find a route
if not matched_methods:
return None
# if alternative methods have been found, check if current request method is OPTIONS
# to build a proper reponse else build a method not allowed response
if request_method == "OPTIONS":
def preflight_response(app):
return (
app.make("response")
.with_headers({"Allow": ", ".join(matched_methods)})
.status(204)
)
preflight_route = HTTPRoute(path, request_method=["options"])
preflight_route.get_response = preflight_response
return preflight_route
else:
raise MethodNotAllowedException(matched_methods, request_method)
def matches(self, path):
for route in self.routes:
if route.matches(path):
return route
def find_by_name(self, name):
for route in self.routes:
if route.match_name(name):
return route
def route(self, name: str, parameters: dict = {}, query_params: dict = {}) -> str:
"""Return URL string from given route name and parameters."""
route = self.find_by_name(name)
if route:
return route.to_url(parameters, query_params)
raise RouteNotFoundException(f"Could not find route with the name '{name}'")
def set_controller_locations(self, location):
self.controller_locations = location
return self
def add(self, *routes):
self.routes.append(*routes)
self.routes = flatten(self.routes)
def set(self, *routes):
self.routes = []
self.routes.append(*routes)
self.routes = flatten(self.routes)
@classmethod
def compile_to_url(cls, uncompiled_route, params={}, query_params={}):
"""Compile the route url into a usable url: converts /url/@id into /url/1.
Used for redirection
Arguments:
route {string} -- An uncompiled route like (/dashboard/@user:string/@id:int)
Keyword Arguments:
params {dict} -- Dictionary of parameters to pass to the route (default: {{}})
query_params {dict} -- Dictionary of query parameters to pass to the route (default: {{}})
Returns:
string -- Returns a compiled string (/dashboard/joseph/1)
"""
if "http" in uncompiled_route:
return uncompiled_route
# Split the url into a list
split_url = uncompiled_route.split("/")
# Start beginning of the new compiled url
compiled_url = "/"
# Iterate over the list
for url in split_url:
if url:
# if the url contains a parameter variable like @id:int
if "@" in url:
url = url.replace("@", "").split(":")[0]
if isinstance(params, dict):
compiled_url += str(params[url]) + "/"
elif isinstance(params, list):
compiled_url += str(params.pop(0)) + "/"
elif "?" in url:
url = url.replace("?", "").split(":")[0]
if isinstance(params, dict):
compiled_url += str(params.get(url, "/")) + "/"
elif isinstance(params, list):
compiled_url += str(params.pop(0)) + "/"
else:
compiled_url += url + "/"
compiled_url = compiled_url.replace("//", "")
# The loop isn't perfect and may have an unwanted trailing slash
if compiled_url.endswith("/") and not uncompiled_route.endswith("/"):
compiled_url = compiled_url[:-1]
# The loop isn't perfect and may have 2 slashes next to eachother
if "//" in compiled_url:
compiled_url = compiled_url.replace("//", "/")
# Add eventual query parameters
if query_params:
compiled_url += "?" + parse.urlencode(query_params)
return compiled_url
```
#### File: core/helpers/test_optional.py
```python
from tests import TestCase
from src.masonite.helpers import optional
class SomeClass:
my_attr = 3
def my_method(self):
return 4
class TestOptionalHelper(TestCase):
def test_optional_with_existing(self):
obj = SomeClass()
assert optional(obj).my_attr == 3
assert optional(obj).my_method() == 4
def test_optional_with_undefined(self):
obj = SomeClass()
assert optional(obj).non_existing_attr is None
# not beautiful but we can do this to handle calling methods
assert optional(optional(obj).non_existing_method)() is None
def test_optional_with_undefined_on_none(self):
obj = None
assert optional(obj).non_existing_attr is None
def test_optional_with_default(self):
obj = SomeClass()
assert optional(obj, 0).non_existing_attr == 0
def test_optional_with_callable_default(self):
obj = SomeClass()
assert optional(obj, lambda the_obj: "a").non_existing_attr == "a"
```
#### File: core/utils/test_filesystem.py
```python
from tests import TestCase
from src.masonite.utils.filesystem import get_extension
class TestFileUtils(TestCase):
def test_get_extension(self):
self.assertEqual(get_extension("log.txt"), ".txt")
self.assertEqual(get_extension("archive.tar.gz"), ".tar.gz")
self.assertEqual(get_extension("path/to/log.txt"), ".txt")
self.assertEqual(get_extension("log.txt", without_dot=True), "txt")
self.assertEqual(get_extension("archive.tar.gz", without_dot=True), "tar.gz")
```
#### File: features/dumps/test_dumper.py
```python
from tests import TestCase
from src.masonite.facades import Dump
from src.masonite.exceptions import DumpException
class TestDumper(TestCase):
def setUp(self):
super().setUp()
self.dumper = self.application.make("dumper")
def tearDown(self):
super().tearDown()
self.dumper.clear()
def test_get_dumps(self):
dumps = self.dumper.get_dumps()
assert dumps == []
def test_get_serialized_dumps(self):
dumps = self.dumper.get_serialized_dumps()
assert dumps == []
def test_dump(self):
self.dumper.dump(1, {"test": "value"})
dumps = self.dumper.get_dumps()
assert len(dumps) == 1
first_dump = self.dumper.last()
assert first_dump.line == 25
assert first_dump.filename.endswith("test_dumper.py")
assert first_dump.method == "test_dump"
assert len(first_dump.objects.keys()) == 2
assert first_dump.objects.get("<class 'int'>") == 1
assert first_dump.objects.get("<class 'dict'>") == {"test": "value"}
def test_dump_can_get_variables_name(self):
test = 1
other_test = "a"
self.dumper.dump(test, other_test)
test_dump = self.dumper.last()
assert test_dump.objects.get("test") == 1
assert test_dump.objects.get("other_test") == "a"
def test_serialize_dump(self):
self.dumper.dump(1, {"test": "value"})
data = self.dumper.last().serialize()
assert data.get("line") == 46
assert data.get("filename")
assert data.get("timestamp")
assert data.get("method")
first_object = data.get("objects").get("<class 'int'>")
assert first_object.get("value") == "1"
def test_serialize_dump_properties(self):
class TestObject:
key = "value"
_other_key = 1
my_obj = TestObject()
self.dumper.dump(my_obj)
data = self.dumper.last().serialize()
assert "my_obj" in data.get("objects")
obj_props = data.get("objects").get("my_obj").get("properties")
assert obj_props.get("private").get("_other_key") == "1"
assert obj_props.get("public").get("key") == "value"
def test_can_add_several_dumps(self):
self.dumper.dump(1)
self.dumper.dump(2)
self.dumper.dump(3)
assert len(self.dumper.get_dumps()) == 3
def test_can_clear_dumps(self):
self.dumper.dump(1)
self.dumper.dump(2)
self.dumper.clear()
assert len(self.dumper.get_dumps()) == 0
def test_dump_and_die(self):
self.dumper.dump(1)
with self.assertRaises(DumpException):
self.dumper.dd(2)
assert len(self.dumper.get_dumps()) == 2
def test_dumps_are_ordered_by_most_recent(self):
var = 1
var_latest = 2
self.dumper.dump(var)
self.dumper.dump(var_latest)
assert self.dumper.last().objects.get("var_latest") == 2
assert self.dumper.get_dumps()[0].objects.get("var_latest") == 2
def test_can_revert_dump_order(self):
var = 1
var_latest = 2
self.dumper.dump(var)
self.dumper.dump(var_latest)
assert self.dumper.get_dumps(ascending=True)[0].objects.get("var") == 1
def test_dump_facade(self):
var = "test"
Dump.dump(var)
assert Dump.last().objects.get("var") == "test"
def test_dump_builtins(self):
var = "test"
dump(var)
assert self.dumper.last().objects.get("var") == "test"
def test_dump_output_data_in_console(self):
var = "test"
dump(var)
self.assertConsoleOutputContains(">>> DUMP")
self.assertConsoleOutputContains("test")
```
#### File: features/rates/test_limits.py
```python
import sys
from tests import TestCase
from src.masonite.rates import UnlimitedLimiter, GlobalLimiter, Limit, GuestsOnlyLimiter
from tests.integrations.app.User import User
class TestLimits(TestCase):
def test_limit_from_str(self):
limit = Limit.from_str("10/hour")
assert limit.max_attempts == 10
assert limit.delay == 60
def test_limit_per_minute(self):
limit = Limit.per_minute(5)
assert limit.max_attempts == 5
assert limit.delay == 1
def test_limit_per_hour(self):
limit = Limit.per_hour(10)
assert limit.max_attempts == 10
assert limit.delay == 60
def test_limit_per_day(self):
limit = Limit.per_day(10)
assert limit.max_attempts == 10
assert limit.delay == 60 * 24
def test_limit_custom(self):
limit = Limit(500, 40)
assert limit.max_attempts == 500
assert limit.delay == 40
def test_limit_unlimited(self):
limit = Limit.unlimited()
assert limit.max_attempts == sys.maxsize
assert limit.is_unlimited()
class TestLimiters(TestCase):
def test_unlimited(self):
request = "fake"
limiter = UnlimitedLimiter()
limit = limiter.allow(request)
assert limit.is_unlimited()
assert limit.max_attempts == sys.maxsize
def test_global(self):
request = "fake"
limiter = GlobalLimiter("3/minute")
limit = limiter.allow(request)
assert not limit.is_unlimited()
assert limit.max_attempts == 3
assert limit.delay == 1
limiter = GlobalLimiter("100/day")
limit = limiter.allow(request)
assert not limit.is_unlimited()
assert limit.max_attempts == 100
assert limit.delay == 24 * 60
def test_guests_only(self):
# request as guest
request = self.make_request()
request._ip = "127.0.0.1"
limiter = GuestsOnlyLimiter("2/hour")
limit = limiter.allow(request)
assert not limit.is_unlimited()
assert limit.max_attempts == 2
assert limit.delay == 60
assert limit.key == "127.0.0.1"
request._ip = "192.168.0.1"
limit = limiter.allow(request)
assert limit.key == "192.168.0.1"
def test_guests_only_with_authenticated_user(self):
limiter = GuestsOnlyLimiter("2/hour")
user = User.find(1)
# request as authenticated user
request = self.make_request()
request.set_user(user)
limit = limiter.allow(request)
assert limit.is_unlimited()
```
#### File: features/scheduling/test_scheduling.py
```python
import pendulum
from src.masonite.tests import TestCase
from src.masonite.scheduling import Task
class MockTask(Task):
run_every = "5 minutes"
timezone = "America/New_York"
class TestScheduler(TestCase):
def setUp(self):
super().setUp()
self.task = MockTask()
def test_scheduler_should_run(self):
assert self.task.run_every == "5 minutes"
time = pendulum.now().on(2018, 5, 21).at(22, 5, 5)
self.task._date = time
assert self.task.should_run(time) == True
time = pendulum.now().on(2018, 5, 21).at(22, 6, 5)
self.task._date = time
assert self.task.should_run(time) == False
def test_scheduler_should_run_every_minute(self):
self.task.run_every = "1 minute"
time = pendulum.now().on(2018, 5, 21).at(22, 5, 5)
self.task._date = time
assert self.task.should_run(time) == True
time = pendulum.now().on(2018, 5, 21).at(22, 6, 5)
self.task._date = time
assert self.task.should_run(time) == True
def test_scheduler_should_run_every_2_minutes(self):
self.task.run_every = "2 minutes"
time = pendulum.now().on(2018, 5, 21).at(14, 56, 5)
self.task._date = time
assert self.task.should_run(time) == True
time = pendulum.now().on(2018, 5, 21).at(14, 58, 5)
self.task._date = time
assert self.task.should_run(time) == True
def test_scheduler_should_run_every_hour(self):
self.task.run_every = "1 hour"
time = pendulum.now().on(2018, 5, 21).at(2, 0, 1)
self.task._date = time
assert self.task.should_run(time) == True
time = pendulum.now().on(2018, 5, 21).at(3, 0, 1)
self.task._date = time
assert self.task.should_run(time) == True
self.task.run_every = "2 hours"
time = pendulum.now().on(2018, 5, 21).at(2, 0, 1)
self.task._date = time
assert self.task.should_run(time) == True
self.task.run_every = "2 hours"
time = pendulum.now().on(2018, 5, 21).at(3, 0, 1)
self.task._date = time
assert self.task.should_run(time) == False
time = pendulum.now().on(2018, 5, 21).at(4, 0, 1)
self.task._date = time
assert self.task.should_run(time) == True
def test_scheduler_should_run_every_days(self):
self.task.run_every = "2 days"
time = pendulum.now().on(2018, 5, 21).at(0, 0, 1)
self.task._date = time
assert self.task.should_run(time) == False
time = pendulum.now().on(2018, 5, 23).at(0, 0, 1)
self.task._date = time
assert self.task.should_run(time) == False
self.task.run_at = "5:30"
time = pendulum.now().on(2018, 5, 22).at(5, 30, 0)
self.task._date = time
assert self.task.should_run(time) == True
self.task.run_at = "5:35"
time = pendulum.now().on(2018, 5, 22).at(5, 30, 0)
self.task._date = time
assert self.task.should_run(time) == False
def test_scheduler_should_run_every_months(self):
self.task.run_every = "2 months"
time = pendulum.now().on(2018, 1, 1).at(0, 0, 1)
self.task._date = time
assert self.task.should_run(time) == False
time = pendulum.now().on(2018, 2, 1).at(0, 0, 1)
self.task._date = time
assert self.task.should_run(time) == True
time = pendulum.now().on(2018, 2, 1).at(10, 0, 1)
self.task._date = time
assert self.task.should_run(time) == False
self.task.run_at = "5:30"
time = pendulum.now().on(2018, 2, 1).at(5, 30, 0)
self.task._date = time
assert self.task.should_run(time) == False
def test_twice_daily_at_correct_time(self):
time = pendulum.now().on(2018, 1, 1).at(1, 20, 5)
self.task.run_every = ""
self.task.twice_daily = (1, 13)
self.task._date = time
assert self.task.should_run()
time = pendulum.now().on(2018, 1, 1).at(13, 20, 5)
self.task._date = time
assert self.task.should_run()
def test_twice_daily_at_incorrect_time(self):
time = pendulum.now().on(2018, 1, 1).at(12, 20, 5)
self.task.run_every = ""
self.task.twice_daily = (1, 13)
self.task._date = time
assert self.task.should_run() is False
def test_run_at(self):
self.task.run_every = ""
self.task.run_at = "13:00"
time = pendulum.now().on(2018, 1, 1).at(13, 0, 5)
self.task._date = time
self.task.run_at = "13:05"
time = pendulum.now().on(2018, 1, 1).at(13, 5, 5)
self.task._date = time
assert self.task.should_run() is True
time = pendulum.now().on(2018, 1, 1).at(13, 6, 5)
self.task._date = time
assert self.task.should_run() is False
def test_method_calls(self):
task = MockTask()
task.at("13:00")
time = pendulum.now().on(2018, 1, 1).at(13, 0, 5)
task._date = time
assert task.should_run(time) == True
task = MockTask()
task.every_minute()
time = pendulum.now().on(2018, 5, 21).at(22, 5, 5)
task._date = time
assert task.should_run(time) == True
def test_should_run_task_immediately_by_class(self):
self.craft("schedule:run", "--task TaskTest --force").assertSuccess()
def test_should_run_task_immediately_by_name(self):
self.craft("schedule:run", "--task task_test --force").assertSuccess()
```
#### File: features/session/test_cookie_session.py
```python
from tests import TestCase
class TestCookieSession(TestCase):
def test_can_start_session(self):
request = self.make_request()
session = self.application.make("session")
request.cookie("s_hello", "test")
session.start("cookie")
self.assertEqual(session.get("hello"), "test")
def test_can_get_session_dict(self):
request = self.make_request()
session = self.application.make("session")
request.cookie("s_hello", '{"hello": "test"}')
session.start("cookie")
self.assertEqual(type(session.get("hello")), dict)
def test_can_set_and_get_session_dict(self):
request = self.make_request()
session = self.application.make("session")
session.start("cookie")
session.set("key1", {"hello": "test"})
self.assertEqual(type(session.get("key1")), dict)
self.assertEqual(session.get("key1")["hello"], "test")
def test_can_set_and_get_session(self):
self.make_request()
session = self.application.make("session")
session.start("cookie")
session.set("key1", "test1")
self.assertEqual(session.get("key1"), "test1")
def test_can_increment_and_decrement_session(self):
self.make_request()
session = self.application.make("session")
session.start("cookie")
session.set("key1", "1")
session.set("key5", "5")
session.increment("key1")
session.decrement("key5")
self.assertEqual(session.get("key1"), "2")
self.assertEqual(session.get("key5"), "4")
def test_can_save_session(self):
self.make_request()
response = self.make_response()
session = self.application.make("session")
session.start("cookie")
session.set("key1", "test1")
self.assertEqual(response.cookie("s_key1"), "test1")
def test_can_delete_session(self):
request = self.make_request()
response = self.make_response()
session = self.application.make("session")
request.cookie("s_key", "test")
session.start("cookie")
self.assertEqual(session.get("key"), "test")
session.delete("key")
self.assertEqual(session.get("key"), None)
self.assertEqual(response.cookie("s_key"), None)
self.assertTrue("s_key" in response.cookie_jar.deleted_cookies)
def test_can_pull_session(self):
request = self.make_request()
response = self.make_response()
session = self.application.make("session")
request.cookie("s_key", "test")
session.start("cookie")
self.assertEqual(session.get("key"), "test")
key = session.pull("key")
self.assertEqual(key, "test")
self.assertEqual(session.get("key"), None)
self.assertEqual(response.cookie("s_key"), None)
self.assertTrue("s_key" in response.cookie_jar.deleted_cookies)
def test_can_flush_session(self):
request = self.make_request()
response = self.make_response()
session = self.application.make("session")
request.cookie("s_key", "test")
session.start("cookie")
self.assertEqual(session.get("key"), "test")
session.flush()
self.assertEqual(session.get("key"), None)
self.assertEqual(response.cookie("s_key"), None)
self.assertTrue("s_key" in response.cookie_jar.deleted_cookies)
def test_can_flash(self):
request = self.make_request()
response = self.make_response()
session = self.application.make("session")
session.start("cookie")
session.flash("key", "test")
self.assertEqual(session.get("key"), "test")
self.assertEqual(session.get("key"), None)
self.assertEqual(response.cookie("f_key"), None)
def test_flash_two_keys_does_not_duplicate_data(self):
request = self.make_request()
response = self.make_response()
session = self.application.make("session")
session.start("cookie")
session.flash("key", "test")
session.flash("key2", "test2")
self.assertTrue(session.has("key"))
self.assertTrue(session.has("key2"))
self.assertTrue(response.cookie_jar.exists("f_key"))
self.assertTrue(response.cookie_jar.exists("f_key2"))
self.assertEqual(session.get("key"), "test")
self.assertFalse(session.has("key"))
self.assertTrue(session.has("key2"))
self.assertFalse(response.cookie_jar.exists("f_key"))
self.assertTrue(response.cookie_jar.exists("f_key2"))
``` |
{
"source": "josephmancuso/wire",
"score": 2
} |
#### File: http/controllers/AlertController.py
```python
from jinja2 import Markup
from masonite.controllers import Controller
from masonite.request import Request
from masonite.view import View
from .Component import Component
class AlertController(Component):
"""Count Component"""
attrs = ['message']
def __init__(self):
from wsgi import container
self.message = 'This is a message'
container.resolve(super().__init__)
def show(self):
return self.render('livewire.error')
```
#### File: http/controllers/NameController.py
```python
from masonite.controllers import Controller
from masonite.request import Request
from masonite.view import View
from masonite.auth import Auth
from .Component import Component
class NameController(Component):
"""NameController Controller Class."""
attrs = ['name', 'count', 'see', 'username', 'failed', 'password', 'success', 'loggedin']
def __init__(self, auth: Auth):
self.name = "Joe"
self.count = 10
self.see = False
self.loggedin = False
self.failed = False
resolve(super().__init__)
def show(self):
return self.render('livewire.name')
def increment(self):
self.count += 1
def visible(self):
self.see = True
def login(self):
from wsgi import container
auth = container.make(Auth)
if (auth.login(self.username, self.password or '')):
self.failed = False
self.loggedin = True
return
self.failed = True
self.loggedin = False
```
#### File: app/providers/LiveWire.py
```python
from masonite.provider import ServiceProvider
class LiveWire(ServiceProvider):
"""Provides Services To The Service Container."""
wsgi = False
def register(self):
"""Register objects into the Service Container."""
pass
def boot(self):
"""Boots services required by the container."""
pass
``` |
{
"source": "josephmarcus9/futurecoder",
"score": 2
} |
#### File: backend/main/tests.py
```python
import json
import os
import re
from pathlib import Path
from django.test import Client, TestCase
from main.text import pages
from main.workers import master
client = Client()
master.TESTING = True
def api(method, **kwargs):
response = client.post(f"/api/{method}/", data=kwargs, content_type="application/json")
return response.json()
class StepsTestCase(TestCase):
def setUp(self):
from main.models import User
self.user = User.objects.create_user("admin", "<EMAIL>", "<PASSWORD>")
client.post("/accounts/login/", dict(login="<EMAIL>", password="<PASSWORD>"))
def test_steps(self):
transcript = []
for page_index, page in enumerate(pages.values()):
for step_index, step_name in enumerate(page.step_names[:-1]):
step = getattr(page, step_name)
for substep in [*step.messages, step]:
program = substep.program
if "\n" in program:
code_source = step.expected_code_source or "editor"
else:
code_source = "shell"
response = api(
"run_code",
code=program,
source=code_source,
page_index=page_index,
step_index=step_index,
)
if "state" not in response:
self.fail(response)
state = response.pop("state")
for line in response["result"]:
line["text"] = normalise_output(line["text"])
del response["birdseye_url"]
transcript_item = dict(
program=program.splitlines(),
page=page.title,
step=step_name,
response=response,
)
transcript.append(transcript_item)
is_message = substep in step.messages
if is_message:
self.assertEqual(response["message"], substep.text, transcript_item)
elif step.get_solution:
get_solution = "".join(step.get_solution["tokens"])
assert "def solution(" not in get_solution
assert "returns_stdout" not in get_solution
assert get_solution.strip() in program
transcript_item["get_solution"] = get_solution.splitlines()
if step.parsons_solution:
is_function = transcript_item["get_solution"][0].startswith("def ")
assert len(step.get_solution["lines"]) >= 4 + is_function
self.assertEqual(
response["passed"],
not is_message,
transcript_item,
)
self.assertEqual(
step_index + response["passed"],
state["pages_progress"][page_index],
transcript_item,
)
path = Path(__file__).parent / "test_transcript.json"
if os.environ.get("FIX_TESTS", 0):
dump = json.dumps(transcript, indent=4, sort_keys=True)
path.write_text(dump)
else:
self.assertEqual(transcript, json.loads(path.read_text()))
def normalise_output(s):
s = re.sub(r" at 0x\w+>", " at 0xABC>", s)
return s
```
#### File: main/workers/birdseye.py
```python
import ast
import inspect
from datetime import datetime
import birdseye.bird
from birdseye.bird import BirdsEye
from main.utils import rows_to_dicts
from .worker import console, execute
birdseye.bird.get_unfrozen_datetime = datetime.now
# Import necessary files before limit is set
str(BirdsEye("sqlite://").db)
def exec_birdseye(filename, code):
# Create database in memory
eye = BirdsEye("sqlite://")
traced_file = eye.compile(code, filename)
eye._trace('<module>', filename, traced_file, traced_file.code, 'module', code)
console.locals.update(eye._trace_methods_dict(traced_file))
nodes_by_lineno = {
node.lineno: node
for node in traced_file.nodes
if isinstance(node, ast.FunctionDef)
}
def find_code(root_code):
"""
Trace all functions recursively, like trace_module_deep.
"""
for code_obj in root_code.co_consts:
if not inspect.iscode(code_obj) or code_obj.co_name.startswith('<'):
continue
find_code(code_obj)
lineno = code_obj.co_firstlineno
node = nodes_by_lineno.get(lineno)
if not node:
continue
eye._trace(
code_obj.co_name, filename, traced_file, code_obj,
typ='function',
source=code,
start_lineno=lineno,
end_lineno=node.last_token.end[0] + 1,
)
find_code(traced_file.code)
execute(traced_file.code)
with eye.db.session_scope() as session:
objects = session.query(eye.db.Call, eye.db.Function).all()
calls, functions = [rows_to_dicts(set(column)) for column in zip(*objects)]
return dict(calls=calls, functions=functions)
``` |
{
"source": "josephmartin09/packet_util",
"score": 3
} |
#### File: packet_util/packet_util/bit_field.py
```python
import struct
from . import PacketField
class BitField(PacketField):
def __init__(self, dtype, bitfields, big_endian=True):
"""Initialize the BitField.
:param dtype: The datatype to pack and unpack the individual bitfields as. This must be an unsigned type.
:param bitfields: A dict of field_name to integers that maps the name of the bitfields to their bit lengths.
.. note:: The sum of these bitfields must be equal to the bitlength of the dtype parameter.
"""
# Verify dtype works for bitfields
if not dtype in self._unsigned_types:
raise ValueError(f"{dtype} is not a valid field type. Must be an unsigned type.")
endian_key = ">" if big_endian else "<"
self._struct_key = endian_key + self._unsigned_types[dtype].type_spec
self._len = self._field_types[dtype].size
self._bitlen = self._len * 8
# Verify bitfield lengths add up to exactly the type length
requested_bitlen = sum(bitfields.values())
if not requested_bitlen == self._len * 8:
raise ValueError(f"Length of bitfields input is {requested_bitlen} bits, but MUST be {self._bitlen} bits.")
self._bitfields = bitfields
def __len__(self):
"""Return the length of bitfield in bytes."""
return self._len
def __pack_pyint(self, value):
"""Pack a pyint using the struct library.
:param value: A pyint that must be in the range of this BitFields dtype.
:returns: A bytearray of the value in binary.
"""
return struct.pack(self._struct_key, value)
def pack(self, bitfield):
"""Convert a dict of bitfield values to a bytearray."""
# First check if an int was passed. If so, we can skip everything and just pack it
if isinstance(bitfield, int):
return self.__pack_pyint(bitfield)
if not isinstance(bitfield, dict):
raise TypeError(f"input to pack must be an int or dict. Got {type(bitfield)}")
# Check missing keys. Note the type cast to list is necessary to consider ordering when the keys are compared
req_keys = list(self._bitfields.keys())
provided_keys = list(bitfield.keys())
if req_keys != provided_keys:
raise ValueError(f"bitfield must be supplied the following keys in this order {self._bitfields.keys()}")
# Convert dict to pyint
final_pyint = 0
bitcount = self._bitlen
for name, field_val in bitfield.items():
field_bitlen = self._bitfields[name]
if field_val >= 2**field_bitlen:
raise ValueError(f"Value of {field_val} is too large for a bitfield of {field_bitlen} bits.")
final_pyint |= field_val << (bitcount - field_bitlen)
bitcount -= field_bitlen
# Pack the pyint as bytes
return self.__pack_pyint(final_pyint)
def unpack(self, value, as_pyint=False):
"""Unpack a bytes-like into a dict of bitfield values.
:param value: A bytes-like to unpack.
:param bool as_pyint: Return an int if True, otherwise a dict of each field.
:returns: A pyint of the unpacked value, or a dict of field_name -> pyints for each bitfield.
"""
if not isinstance(value, (bytes, bytearray)):
raise TypeError(f"{value} is not a bytes or bytearray")
if len(value) != self._len:
raise ValueError(f"Bytes-like to unpack must be {self._len} bytes, but got {len(value)} bytes")
# Unpack as pyint
unpacked_pyint = struct.unpack(self._struct_key, value)[0]
if as_pyint:
return unpacked_pyint
# Unpack bitfields
bitcount = self._bitlen
unpacked_fields = dict()
for name, field_bitlen in self._bitfields.items():
unpacked_fields[name] = unpacked_pyint >> (bitcount - field_bitlen)
bitcount -= field_bitlen
unpacked_pyint &= (2**bitcount - 1)
return unpacked_fields
```
#### File: packet_util/test/test_type_field.py
```python
import pytest
from packet_util import TypeField
# For these endian tests, all byte logic is actually handled by the struct libray.
# Therefore, it's sufficient to ensure that Typefield is just passing the correct endian key to struct
def test_big_endian():
f = TypeField('uint32')
assert f._struct_key.startswith(">")
def test_little_endian():
f = TypeField('uint32', big_endian=False)
assert f._struct_key.startswith("<")
def test_pack_invalid_type():
f = TypeField('uint32')
with pytest.raises(TypeError):
f.pack("not an int or float")
def test_unpack_invalid_type():
f = TypeField('uint32')
with pytest.raises(TypeError):
f.unpack("not a bytes-like")
def test_pack():
# signed type
f = TypeField('int16')
assert f.pack(-300) == bytearray([0xFE, 0xD4])
# unsigned type
f = TypeField('uint16')
assert f.pack(65236) == bytearray([0xFE, 0xD4])
# float type
f = TypeField('float')
assert f.pack(1.234) == bytearray([0x3F, 0x9D, 0xF3, 0xB6])
def test_unpack():
# signed type
f = TypeField('int16')
assert f.unpack(bytearray([0xFE, 0xD4])) == -300
# unsigned type
f = TypeField('uint16')
assert f.unpack(bytearray([0xFE, 0xD4])) == 65236
# float type
f = TypeField('float')
assert f.unpack(bytearray([0x3F, 0x9D, 0xF3, 0xB6])) == pytest.approx(1.234)
def test_len():
f = TypeField('int8')
assert len(f) == 1
f = TypeField('int16')
assert len(f) == 2
f = TypeField('int32')
assert len(f) == 4
f = TypeField('int64')
assert len(f) == 8
f = TypeField('float')
assert len(f) == 4
f = TypeField('double')
assert len(f) == 8
``` |
{
"source": "JosephMart/reddit-recomender",
"score": 3
} |
#### File: reddit-recomender/src/main.py
```python
from flask import Flask, request, jsonify
from flask_cors import CORS
from typing import List
from response import Subreddit
import solr
import utils
import traceback
app = Flask(__name__)
CORS(app)
@app.route("/search", methods=["POST"])
def search():
body = request.get_json()
query_subs: List[Subreddit] = None
try:
query_subs = solr.validate_query(body["subreddits"])
except Exception as e:
traceback.print_exc()
return jsonify({"message": str(e)}), 400
query_words, query_words_lmtzd = utils.analyze_queries(query_subs)
doc_subs: List[Subreddit] = None
doc_subs = solr.get_docs(query_words)
docs_lmtzd = utils.analyze_documents(doc_subs)
result = utils.order(query_words_lmtzd, docs_lmtzd, doc_subs)
return jsonify({"result": list(result)})
@app.route("/autocomplete", methods=["GET"])
def autocomplete():
q = request.args.get("q")
over18 = request.args.get("over18")
results = solr.autocomplete(q, over18)
return jsonify({"results": results})
app.run(port=8080)
# query1 = "JavaScript is for losers who like web development"
# query2 = "JavaScript is good for front end web development"
# docs = [
# "JavaScript is a great language",
# "I really enjoy writing JavaScript on the front end",
# "Modern web development is done using JavaScript"
# ]
# doc_ids = [1, 2, 3]
# analyzed_query = utils.analyze_queries([query1, query2])
# analyzed_docs = utils.analyze_documents(docs)
# result = utils.order(analyzed_query, analyzed_docs, doc_ids)
# print(result)
# solr.search(set(["basketball", "baseball"]))
```
#### File: reddit-recomender/src/response.py
```python
import json
from typing import Dict, Union, Type
class Subreddit:
_id: int = None
subscribers: int = None
title: str = None
description: str = None
over_18: bool = None
display_name: str = None
@staticmethod
def from_json(j: Dict[str, Union[str, int]]):
s = Subreddit()
s._id = j["id"]
s.display_name = j["display_name"][0]
try:
s.description = j["public_description"][0]
except:
s.description = ""
s.title = j["title"][0]
s.subscribers = j["subscribers"][0]
s.over_18 = j["over18"][0]
return s
def to_json(self):
return {
"id": self._id,
"display_name": self.display_name,
"subscribers": self.subscribers,
"title": self.title,
"description": self.description,
"over18": self.over_18
}
``` |
{
"source": "josephmc5/sentry",
"score": 2
} |
#### File: sentry/filters/builtins.py
```python
from __future__ import absolute_import
from django.utils.datastructures import SortedDict
from django.utils.translation import ugettext_lazy as _
from sentry.models import GroupStatus
from .base import Filter
__all__ = ('StatusFilter',)
STATUS_LEVELS = (
(GroupStatus.UNRESOLVED, _('Unresolved')),
(GroupStatus.RESOLVED, _('Resolved')),
(GroupStatus.MUTED, _('Muted')),
)
class StatusFilter(Filter):
label = _('Status')
column = 'status'
default = '0'
choices = SortedDict(STATUS_LEVELS)
def get_choices(self):
return self.choices
```
#### File: web/frontend/access_group_migration.py
```python
from __future__ import absolute_import
from collections import defaultdict
from sentry.models import (
AccessGroup, OrganizationMember, OrganizationMemberType, Team
)
from sentry.web.frontend.base import OrganizationView
class AccessGroupMigrationView(OrganizationView):
required_access = OrganizationMemberType.ADMIN
def process_posted_member(self, request, organization, member):
global_access = request.POST.get('user[%s][global_access]' % member.user_id)
teams = request.POST.getlist('user[%s][team]' % member.user_id)
remove = request.POST.get('user[%s][remove]' % member.user_id)
access_type = request.POST.get('user[%s][type]' % member.user_id)
if not access_type:
return
if remove != '1':
if access_type == 'member':
access_type = OrganizationMemberType.MEMBER
elif access_type == 'admin':
access_type = OrganizationMemberType.ADMIN
else:
return
global_access = global_access == '1'
om, created = OrganizationMember.objects.get_or_create(
organization=organization,
user=member.user,
defaults={
'has_global_access': global_access,
'type': access_type,
}
)
if created and not global_access:
for team in teams:
om.teams.add(Team.objects.get_from_cache(slug=team))
member.delete()
def handle(self, request, organization):
member_list = list(AccessGroup.members.through.objects.filter(
accessgroup__team__organization=organization,
).select_related('user', 'accessgroup', 'accessgroup__team'))
if request.method == 'POST':
for member in member_list:
self.process_posted_member(request, organization, member)
for ag in AccessGroup.objects.filter(team__organization=organization):
if not ag.members.exists():
ag.delete()
return self.redirect(request.path)
group_list = set(m.accessgroup for m in member_list)
team_list = organization.team_set.all()
project_list = list(AccessGroup.projects.through.objects.filter(
accessgroup__in=group_list,
).select_related('project', 'project__team'))
# sort projects by group
projects_by_group = defaultdict(list)
for obj in project_list:
projects_by_group[obj.accessgroup_id].append(obj.project)
projects_by_user = defaultdict(list)
for member in member_list:
projects_by_user[member.user_id].extend(projects_by_group[member.accessgroup_id])
results = []
for member in member_list:
results.append((
member.user,
projects_by_user[member.user_id],
))
context = {
'member_list': results,
'group_list': group_list,
'team_list': team_list,
}
return self.respond('sentry/access-group-migration.html', context)
``` |
{
"source": "joseph-mccarthy/night-sky-pi",
"score": 3
} |
#### File: night-sky-pi/test/test_time_functions.py
```python
from datetime import datetime
from unittest import TestCase
from unittest.mock import patch
import src.time_functions as time
class TestTimeFunctions(TestCase):
@patch('src.time_functions._get_now', return_value=datetime(2022,2,1))
def test_get_today(self,mock_datetime):
today = time.get_today()
self.assertEqual(today,datetime(2022,2,1))
@patch('src.time_functions._get_now', return_value=datetime(2022,2,1))
def test_get_yesterday(self,mock_datetime):
yesterday = time.get_yesterday()
self.assertEqual(yesterday,datetime(2022,1,31))
@patch('src.time_functions._get_now', return_value=datetime(2022,2,1))
def test_get_tomorrow(self,mock_datetime):
tomorrow = time.get_tomorrow()
self.assertEqual(tomorrow,datetime(2022,2,2))
@patch('src.time_functions._get_now', return_value=datetime(2022,2,1))
def test_get_dates(self,mock_datetime):
yesterday,today,tomorrow = time.get_dates()
self.assertEqual(yesterday,datetime(2022,1,31))
self.assertEqual(today,datetime(2022,2,1))
self.assertEqual(tomorrow,datetime(2022,2,2))
def test_get_now(self):
self.assertEqual(time._get_now().day,datetime.now().day)
``` |
{
"source": "joseph-mccombs/puente-serverless",
"score": 3
} |
#### File: puente-serverless/puente-etl/load_from_s3.py
```python
import json
import io
import pickle
import pandas as pd
from utils.clients import Clients
#
# Load JSON
#
def load_json_from_s3(s3_client, file_name: str):
print('load_json_from_s3')
response = s3_client.get_object(
Bucket=Clients.S3_BUCKET_NAME,
Key=f'store_json/{file_name}.json',
)
return json.loads(
response['Body'].read().decode('utf-8')
)
#
# Load Pandas DataFrame
#
def load_dataframe_from_s3(s3_client, file_name: str):
print('load_pickle_dict_from_s3')
response = s3_client.get_object(
Bucket=Clients.S3_BUCKET_NAME,
Key=f'store_dataframes/{file_name}.df',
)
pickled_df_buffer = io.BytesIO(response['Body'].read())
df = pd.read_pickle(pickled_df_buffer)
return df
#
# Load Python Dictionary
#
def load_pickle_dict_from_s3(s3_client, file_name: str):
print('load_pickle_dict_from_s3')
response = s3_client.get_object(
Bucket=Clients.S3_BUCKET_NAME,
Key=f'store_pickle_dicts/{file_name}.pickle',
)
raw = response['Body'].read()
data = pickle.loads(raw)
return data
#
# Load Python List
#
def load_pickle_list_from_s3(s3_client, file_name: str):
print('load_pickle_list_from_s3')
response = s3_client.get_object(
Bucket=Clients.S3_BUCKET_NAME,
Key=f'store_pickle_lists/{file_name}.pickle',
)
raw = response['Body'].read()
data = pickle.loads(raw)
return data
if __name__ == '__main__':
# load_json_from_s3(Clients.S3, 'form_specifications_v2')
# load_pickle_dict_from_s3(Clients.S3, 'form_specifications_v2')
load_pickle_list_from_s3(Clients.S3, 'form_specifications_v2')
```
#### File: puente-serverless/puente-etl/transform_form_specifications.py
```python
import pandas as pd
from dotenv import load_dotenv; load_dotenv()
from load_from_s3 import load_pickle_list_from_s3
from utils.clients import Clients
from utils.constants import ColumnOrder
from utils.helpers import \
get_column_order_by_least_null, \
get_fields_from_list_of_dicts, \
get_unique_fields_from_list, \
shortuuid_random, \
to_snake_case
def get_custom_form_schema_df():
full_table_data: list = load_pickle_list_from_s3(Clients.S3, 'form_specifications_v2')
# Denormalize Custom Form JSON
form_df = denormalize_custom_forms(full_table_data)
questions_df = denormalize_questions(full_table_data)
answers_df = denormalize_answers(questions_df=questions_df)
form_schema_df = form_df \
.merge(questions_df, on='custom_form_id') \
.drop(columns='question_options') \
.merge(answers_df, on='question_id')
form_schema_df = form_schema_df[ColumnOrder.FORM_SPECIFICATIONS]
return form_schema_df
def denormalize_custom_forms(forms_data: list):
# Remove nested "fields" which are denormalized elsewhere and "location" which we do not need.
all_fields = get_fields_from_list_of_dicts(forms_data)
cols = [
c
for c in all_fields
if c not in ('fields', 'location')
]
# Order Row Elements by Column
data = []
for form_data in forms_data:
row = []
for col in cols:
row.append(form_data.get(col))
data.append(row)
# Clean Column Names: Remove leading underscore and apply snake_case
column_names = [
to_snake_case(col.lstrip('_'))
for col in cols
]
# Create DataFrame
df = pd.DataFrame(data, columns=column_names) \
.add_prefix('custom_form_')
return df
def denormalize_questions(forms_data: list):
# Get unique nested fields across all data set
fields_list = []
for fields_data in forms_data:
for cols_data in fields_data.get('fields'):
fields_list.extend(list(cols_data.keys()))
cols = get_unique_fields_from_list(fields_list)
if 'id' in cols:
cols.remove('id')
rows = []
for form_data in forms_data:
fields = form_data.get('fields')
custom_form_id = form_data.get('_id')
header = get_section_header(fields)
for item in fields:
row = [custom_form_id, header, shortuuid_random()]
for col in cols:
row.append(item.get(col))
rows.append(row)
# Prepend column for IDs and section Header
# Format column names and add columns for Question ID and Answer ID
cols_renamed = [f'question_{i}' for i in cols]
cols_renamed.insert(0, 'custom_form_id')
cols_renamed.insert(1, 'custom_form_header')
cols_renamed.insert(2, 'question_id')
cols_formatted = to_snake_case(cols_renamed)
df = pd.DataFrame(rows, columns=cols_formatted)
cols_ordered = get_column_order_by_least_null(df)
df = df[cols_ordered]
return df
def denormalize_answers(questions_df: pd.DataFrame):
tmp_df = questions_df[['question_id', 'question_options']]
# Exclude fieldTypes "header" and "numberInput"
tmp_df = tmp_df[tmp_df['question_options'].notnull()]
cols = []
for options_list in tmp_df['question_options']:
if options_list is not None:
for option in options_list:
# Get cols
for fk in list(option.keys()):
if fk not in cols:
cols.append(fk)
if 'id' in cols:
cols.remove('id')
rows = []
for q_id, options_list in zip(tmp_df['question_id'], tmp_df['question_options']):
if options_list is not None:
for option in options_list:
row = [q_id, shortuuid_random()]
for col in cols:
row.append(option.get(col))
rows.append(row)
# Format column names and add columns for Question ID and Answer ID
cols_renamed = [f'answer_{i}' for i in cols]
cols_renamed.insert(0, 'question_id')
cols_renamed.insert(1, 'answer_id')
cols_formatted = to_snake_case(cols_renamed)
df = pd.DataFrame(rows, columns=cols_formatted)
return df
def get_section_header(fields_dict):
for item in fields_dict:
if item.get('fieldType') == 'header':
return item.get('label')
if __name__ == '__main__':
get_custom_form_schema_df()
``` |
{
"source": "joseph-mcdaniel/load-testing",
"score": 3
} |
#### File: joseph-mcdaniel/load-testing/cli.py
```python
import click
@click.command()
@click.option("--requests", "-r", default=500, help="Number of requests")
@click.option("--concurrency", "-c", default=1, help="Number of concurrent requests")
@click.option("--json-file", "-j", default=None, help="Path to output JSON file")
@click.argument("url")
def cli(requests, concurrency, json_file, url):
print(f"Requests: {requests}")
print(f"Concurrency: {concurrency}")
print(f"JSON File: {json_file}")
print(f"URL: {url}")
pass
if __name__ == "__main__":
cli()
``` |
{
"source": "JosephMcGrath/markdown_notebook",
"score": 3
} |
#### File: src/tidynotes/mardown_document.py
```python
import copy
import os
import re
from typing import Any, Dict, List, Optional
import jinja2
import markdown
import yaml
class MarkdownPart:
"""
A part of a markdown document.
Has several mandatory attributes:
* title - the name of the document / section.
* body - the text of the section.
* parts - sub-sections (also MarkdownParts) for any sub-headings.
* metadata - any metadata for the object (stored in a YAML header).
* level - the markdown level of the section (with 0 being used for Pandoc PDFs).
And one optional attribute:
* file - The path that the document came from (if applicable).
"""
renderer = markdown.Markdown(
extensions=["fenced_code", "tables", "sane_lists", "admonition"]
)
env = jinja2.Environment(loader=jinja2.PackageLoader("tidynotes"))
def __init__(self, text: str) -> None:
self.raw = text
self.level = 0
self.file: Optional[str] = None
self.title: Optional[str] = None
self.body: str = ""
self.parts: List[MarkdownPart] = []
self.meta: Dict[str, Any] = {}
self._parse_raw(text)
@classmethod
def from_file(cls, path: str, encoding: str = "utf-8") -> "MarkdownPart":
"""
Load a markdown document from a text file at the specified path.
"""
with open(path, encoding=encoding) as file:
text = file.read()
doc = cls(text)
file_name = os.path.split(path)[1]
file_name = os.path.splitext(file_name)[0]
doc.file = file_name
doc.meta[".file"] = {
"path": path,
"mtime": os.stat(path).st_mtime,
"name": file_name,
}
return doc
def to_file(self, path: str, encoding: str = "utf-8") -> None:
"""
Writes the document to a text file at the specified path.
"""
output = self.combine()
if not output:
return
if os.path.exists(path):
with open(path, "r", encoding=encoding) as file:
existing = file.read()
if existing == output:
return
with open(path, "w", encoding=encoding) as file:
file.write(output)
def combine(self, metadata: bool = True) -> str:
"""
Recombine the document and its parts into a markdown string.
"""
parts = []
if metadata and self.meta:
useable_meta = {x: y for x, y in self.meta.items() if x not in [".file"]}
useable_meta["title"] = self.title
meta_block = "\n".join(["---", yaml.dump(useable_meta).strip(), "---", ""])
parts.append(meta_block)
if self.level > 0 and self.title is not None:
title = "#" * self.level + " " + self.title + "\n"
else:
title = ""
body = "\n".join([title, self.body.strip("\n"), ""]).strip("\n") + "\n"
if self.level == 2:
body = "\n" * 2 + body
elif self.level == 3:
body = "\n" + body
parts.append(body)
if self.parts:
parts.append("\n".join([x.combine(metadata=False) for x in self.parts]))
if not self.body.strip():
parts[-1] = parts[-1].lstrip("\n")
return "\n".join(parts)
def drop_parts(self, pattern: str) -> None:
"""
Drop any parts that have a title matching the provided regex.
Parts without a title can't be dropped this way.
"""
self.parts = [
x
for x in self.parts
if x.title is not None and not re.match(pattern, x.title)
]
def copy(self) -> "MarkdownPart":
"""
Create a copy of the document and all its parts.
"""
return copy.deepcopy(self)
def extract_parts(self, pattern: str) -> List["MarkdownPart"]:
"""
Extract any parts of the document that have a title matching the provided regex.
Parts without a title can't be extracted this way.
"""
output = []
for part in self.parts:
if part.title is not None and re.match(pattern, part.title):
output.append(part.copy())
output.extend(part.extract_parts(pattern))
return output
def is_stub(self) -> bool:
"""
Checks if the note is a stub (no body text and no parts).
"""
return not self.body.strip() and len(self.parts) == 0
def make_replacement(
self, pattern: str, replacement: str, regex: bool = True
) -> None:
"""
Replace any text in the body patching `pattern` with `replacement`.
If `regex` is true then re.sub is used.
"""
if regex:
self.body = re.sub(pattern, replacement, self.body)
else:
self.body = self.body.replace(pattern, replacement)
for part in self.parts:
part.make_replacement(pattern, replacement, regex=regex)
def replace_title(
self, replacements: Dict[str, str], level: Optional[int] = None
) -> None:
"""
Replace the title of the document or its children using a dictionary map.
"""
if self.title is None:
return
if level is None or level == self.level:
self.title = replacements.get(self.title, self.title)
if level == self.level:
return
for part in self.parts:
part.replace_title(replacements=replacements, level=level)
def get_links(self) -> List[str]:
"""
Get any wikilink-style links from the document or its children.
"""
links = re.findall(r"\[\[([^\]]*)\]\]", self.body)
for part in self.parts:
links.extend(part.get_links())
return links
def get_images(self) -> List[str]:
"""
Get any wikilink-style links from the document or its children.
"""
images = re.findall(r"\!\[([^\]]*)\]\(([^\)]*)\)", self.body)
images = [x[1] for x in images]
for part in self.parts:
images.extend(part.get_images())
return images
def add_part(self, new_part: "MarkdownPart") -> None:
"""
Add a copy of the provided part as a sub-heading.
"""
new_part = new_part.copy()
new_part.set_level(self.level + 1)
self.parts.append(new_part)
def html(self) -> str:
"""
Render this document as a a chunk of HTML.
"""
return self.env.get_template("document.html").render(document=self)
def _body_html(self) -> str:
return self.renderer.convert(self.combine(metadata=False))
def _parse_raw(self, text: str) -> None:
"""
Parse raw markdown into its component parts.
"""
lines = text.split("\n")
line_no = -1
lines = self._parse_metadata(lines)
lines = self._parse_title(lines)
child_pattern = re.compile(f"^({'#' * (self.level + 1)}) (.*)$")
# Parse main text
body: List[str] = []
for line_no, line in enumerate(lines):
if re.match(child_pattern, line):
break
body.append(line)
lines = lines[line_no:]
# Parse children
current_child: List[str] = []
children: List[MarkdownPart] = []
for line_no, line in enumerate(lines):
if not re.match(child_pattern, line):
current_child.append(line)
else:
temp = "\n".join(current_child)
if temp.strip():
children.append(MarkdownPart(temp))
current_child = [line]
if current_child and re.match(child_pattern, current_child[0]):
temp = "\n".join(current_child)
if temp.strip():
children.append(MarkdownPart(temp))
self.body = "\n".join(body).strip("\n") + "\n"
self.parts = children
self.set_level(self.level)
def _parse_metadata(self, lines: List[str]) -> List[str]:
"""Parse any metadata from the file."""
line_no = -1
if lines[0].startswith("---"):
meta_block = []
for line_no, line in enumerate(lines):
if line.startswith("---"):
if line_no > 0:
break
else:
meta_block.append(line)
self.meta = {**self.meta, **yaml.safe_load("\n".join(meta_block))}
if "title" in self.meta:
self.title = self.meta["title"]
self.level = 0
lines = lines[line_no + 1 :]
return lines
def _parse_title(self, lines: List[str]) -> List[str]:
"""Parse the document to get the title of the document."""
heading_pattern = re.compile(r"^(#+) (.*)$")
for line_no, line in enumerate(lines):
if not line.strip():
continue
if not re.match(heading_pattern, line):
break
temp, first_heading = re.findall(heading_pattern, line)[0]
if not self.title or first_heading == self.title:
self.title = first_heading
self.level = len(temp)
lines = lines[line_no + 1 :]
break
return lines
def set_level(self, level: int) -> None:
"""
Set the level of a document part.
Its children parts are set to one level higher (recursively).
"""
self.level = level
for part in self.parts:
part.set_level(level + 1)
def __repr__(self) -> str:
return f"<MarkdownPart, title = {self.title}, level = {self.level}>"
__str__ = __repr__
```
#### File: markdown_notebook/tests/test_markdown_document.py
```python
from tidynotes.mardown_document import MarkdownPart
def test_simple_note() -> None:
"""Test for simple note creation."""
title = "Test Title"
body = "This is the note body\n"
test_note = MarkdownPart(f"# {title}\n\n{body}")
assert test_note.title == title
assert test_note.body == body
assert len(test_note.parts) == 0
def test_stub() -> None:
"""Tests for stub note check."""
title = "Test Title"
body = ""
test_note = MarkdownPart(f"# {title}\n\n{body}")
assert test_note.is_stub()
title = "Test Title"
body = "."
test_note = MarkdownPart(f"# {title}\n\n{body}")
assert not test_note.is_stub()
def test_replacement() -> None:
"""Check text replacement without regex."""
title = "Test Title"
body = "Hello world, this isn't a test!\n"
test_note = MarkdownPart(f"# {title}\n\n{body}")
test_note.make_replacement("isn't", "is", regex=False)
assert test_note.body == "Hello world, this is a test!\n"
def test_replacement_regex() -> None:
"""Check text replacement with regex."""
title = "Test Title"
body = "Hello world, this isn't a test!\n"
test_note = MarkdownPart(f"# {title}\n\n{body}")
test_note.make_replacement("isn't", "is", regex=True)
assert test_note.body == "Hello world, this is a test!\n"
def test_no_title_note() -> None:
"""Test for note creation without a title."""
body = "This is the note body"
test_note = MarkdownPart(f"{body}")
assert test_note.title is None
assert test_note.body.strip() == body
assert len(test_note.parts) == 0
``` |
{
"source": "JosephMcGrath/tidyclipper",
"score": 3
} |
#### File: src/tidyclipper/feed_entry.py
```python
import datetime
import re
from urllib import parse
import feedparser
from bs4 import BeautifulSoup
from .templates import ENTRY
def sanitise_html(html: str) -> str:
"""
Removes a set of tags from a HTML string.
Intended to return HTML that can be embeded in a larger document.
"""
if html is None:
return ""
soup = BeautifulSoup(html, "lxml")
# Don't want these tags:
for tag_label in ["img", "script", "embed", "iframe", "hr"]:
for entry in soup.findAll(tag_label):
entry.extract()
# Don't want most attributes
for tag in soup.recursiveChildGenerator():
if hasattr(tag, "attrs"):
tag.attrs = {
key: value for key, value in tag.attrs.items() if key == "href"
}
# Remove tags without text
for target_tag in soup.find_all():
if len(target_tag.get_text(strip=True)) == 0:
target_tag.extract()
output = soup.prettify()
# Wipe out unwwanted tags entirely
output = re.sub(r"<\/?html>", "", output)
output = re.sub(r"<\/?body>", "", output)
output = re.sub(r"<\/?div>", "", output)
output = re.sub(r"<\/?span>", "", output)
output = re.sub(r"(\s)+", r"\1", output)
output = re.sub(r"<(\/?)h1>", r"<\1h3>", output)
output = re.sub(r"<(\/?)h2>", r"<\1h3>", output)
output = output.strip()
return output
def sanitise_url(url: str) -> str:
"""
Cleans up a url by removing the query parameter.
"""
temp = list(parse.urlparse(url))
temp[4] = ""
return parse.urlunparse(temp)
class FeedEntry:
"""
A single entry from an RSS feed.
"""
@classmethod
def from_rss(
cls, entry: feedparser.FeedParserDict, feed: feedparser.FeedParserDict
) -> "FeedEntry":
"""
Converts a feedparser entry / feed into a FeedEntry.
"""
try:
time = datetime.datetime(*entry.published_parsed[:6]).isoformat()
except (AttributeError, TypeError):
time = datetime.datetime.now().isoformat()
return cls(
title=entry.get("title"),
summary=entry.get("summary"),
link=sanitise_url(entry.get("link")),
time=time,
feed=feed.feed.get("title"),
source=feed.get("href"),
)
def __init__(
self, title: str, summary: str, link: str, time: str, feed: str, source: str
):
if title is None:
self.title: str = "-"
else:
self.title = title.strip()
self.summary = summary
self.link = link
self.time = time
self.feed = feed
self.source = source
try:
self.summary = sanitise_html(self.summary)
except TypeError:
pass
def __hash__(self) -> int:
return hash(self.link)
def __repr__(self) -> str:
return f"<Feed Entry : {self.title[:50]}>"
def as_markdown(self) -> str:
"""
Convert the feed entry to a simple markdown output format.
"""
output = f"## {self.title}\n\n"
output += f"* {self.time}\n* {self.feed}\n* {self.link}\n\n"
output += f"{self.summary}\n\n---"
return output
def as_html(self) -> str:
"""
Formats the feed entry to a snippet of HTML.
"""
self.summary = sanitise_html(self.summary)
return ENTRY.render(entry=self)
``` |
{
"source": "josephmckenna/2021_June_IOP_CAPS_2021",
"score": 3
} |
#### File: 2021_June_IOP_CAPS_2021/examples/print_type_info.py
```python
import sys
def PrintTypeInfoTitle():
print("Name\tBytes\tmin\tmax\n")
def PrintTypeInfo(T):
print((type(T).__name__) + "\t" + str(sys.getsizeof(T)))
PrintTypeInfo(True)
PrintTypeInfo('a')
PrintTypeInfo(123)
PrintTypeInfo(12345)
PrintTypeInfo(2* 10**307)
``` |
{
"source": "josephmckinsey/movievoting",
"score": 3
} |
#### File: josephmckinsey/movievoting/movievoting.py
```python
import inspect
from textwrap import wrap
import gspread
import numpy
import terminaltables
from google_auth_oauthlib.flow import Flow
class Credentials:
"This is a complete hack to work with gspread and google-auth."
def __init__(self, token, refresh):
self.access_token = token
self.refresh = refresh
def debug_print(self):
"Aid in debugging this mess"
print("------ Access Token --------")
print(self.access_token)
print("------ Refresh --------")
print(inspect.getsource(self.refresh))
@classmethod
def from_flowcred(cls, flow_cred):
"""Create flowcred using deprecated flowcred object"""
return cls(flow_cred.token, flow_cred.refresh)
def get_credentials():
"""Generates new credentials for google api.
TODO: google_auth has yet to implement storage, so that may have wait for
another day.
TODO: This also doesn't automatically open in your browser.
"""
# We only need read permissions.
scopes = ['https://www.googleapis.com/auth/drive.readonly',
'https://www.googleapis.com/auth/spreadsheets.readonly']
flow = Flow.from_client_secrets_file(
'client_secret.json', scopes,
redirect_uri='urn:ietf:wg:oauth:2.0:oob')
auth_url, _ = flow.authorization_url(prompt='consent')
print('Go to')
print(auth_url)
print('and copy Auth Code')
code = input('Enter auth code: ')
print("Fetching Token.")
flow.fetch_token(code=code)
return flow.credentials
def get_column(wks, row, col, number):
"""Get column with starting row and column, and then going down `number`
rows."""
return list(map(lambda x: x.value,
wks.range(row, col, number+row-1, col)))
class Movie:
"""Define a movie class. Should probably be replaced with dictionary."""
def __init__(self, name, streaming, notes, veto):
self.name = name
self.streaming = streaming
self.notes = notes
self.veto = veto
class Movies:
"""Handles movie information as well as weighted rankings"""
def __init__(self, movies, people, rankings):
"""Define all the things!"""
self.movies = movies
self.people = people
self.rankings = rankings
self.votes = []
self.weighted_ranking = rankings
self.sorting = rankings
@classmethod
def from_worksheet(cls, wks):
"""Grab all the necessary information if it is laid out as expected. See
template for actual understanding behind this."""
print("Getting movie information based on template")
# See the movie spreadsheet for values it is creating. The +2 terms came from
# mostly trial and error.
row_start = 3
movie_names = wks.col_values(1)[2:]
num_movies = len(movie_names)
streaming = get_column(wks, row_start, 2, num_movies)
notes = get_column(wks, row_start, 3, num_movies)
veto = get_column(wks, row_start, 4, num_movies)
movies = []
for i in range(num_movies):
movies.append(
Movie(movie_names[i], streaming[i],
notes[i], veto[i])
)
# This should allow for any number of people voting
num_col = len(wks.row_values(2))
people = list(map(lambda x: x.value, wks.range(2, 5, 2, num_col)))
print("Getting people's scores")
# Gets the whole matrix of values
unprocessed_rankings = wks.range(3, 5, num_movies+2, num_col)
# Move out of cells. Convert to float. And turn it into a matrix.
rankings = numpy.reshape(list(map(lambda x: float(x.value),
unprocessed_rankings)),
(num_movies, num_col - 5 + 1))
return cls(movies, people, rankings)
def input_votes(self):
"""Reset inputs from user input"""
# Request the votings. Use positives for people who are here, and negatives for
# people who aren't.
print("\nPlace equal positive values for people present\
and negative for those absent.")
self.votes.clear()
for i in self.people:
self.votes.append(float(input(str(i) + ': ')))
def calculate_weights(self):
"""Use matrix multiplication to apply weights to rankings. Stores sorted
values to self.sorting."""
self.weighted_ranking = self.rankings.dot(self.votes)
self.sorting = list(reversed(self.weighted_ranking.argsort()))
def pretty_table(self):
"""Formats nice table with `terminaltables` package. Returns unicode string
that you can then print."""
data = []
for i in self.sorting:
entry = self.movies[i]
data.append([entry.name, entry.streaming, '', # notes is blank for wrapping
entry.veto, self.weighted_ranking[i]])
# Should use fancy terminal characters for seamless lines.
table = terminaltables.SingleTable(data)
table.inner_heading_row_border = False # We don't take about titles.
# We need the max width available for notes, so we can wrap it correctly.
max_width = table.column_max_width(2) # 2 is the index for notes field.
# We want to wrap all of the notes fields (see textwrap). Then we use it to
# replace the table_data.
for i, sorted_i in enumerate(self.sorting):
wrapped_notes = '\n'.join(
wrap(self.movies[sorted_i].notes, max_width)
)
table.table_data[i][2] = wrapped_notes
return table.table # need to return the string
print("Authorizing spreadsheet access.")
credentials = Credentials.from_flowcred(get_credentials())
gc = gspread.authorize(credentials)
# Open a worksheet from spreadsheet with one shot
print('Opening "Movies" spreadsheet and "Movies" sheet')
worksheet = gc.open("Movies").worksheet('Movies')
what_to_watch = Movies.from_worksheet(worksheet)
what_to_watch.input_votes()
what_to_watch.calculate_weights()
print()
print(what_to_watch.pretty_table())
``` |
{
"source": "JosephMeghanath/apptuit-py",
"score": 2
} |
#### File: apptuit-py/apptuit/apptuit_client.py
```python
import json
import os
import sys
import time
import warnings
import zlib
from collections import defaultdict
import requests
from apptuit import APPTUIT_PY_TOKEN, APPTUIT_PY_TAGS, DEPRECATED_APPTUIT_PY_TOKEN, __version__
from apptuit.utils import _contains_valid_chars, _get_tags_from_environment, \
_validate_tags, sanitize_name_prometheus, sanitize_name_apptuit
try:
from urllib import quote
except ImportError:
from urllib.parse import quote
try:
from functools import lru_cache
except ImportError:
from backports.functools_lru_cache import lru_cache
MAX_TAGS_LIMIT = 25
SANITIZERS = {
"apptuit": sanitize_name_apptuit,
"prometheus": sanitize_name_prometheus
}
def _get_user_agent():
py_version = sys.version.split()[0]
return "apptuit-py-" + __version__ + ", requests-" + requests.__version__ + ", Py-" + py_version
def _generate_query_string(query_string, start, end):
ret = "?start=" + str(start)
if end:
ret += "&end=" + str(end)
ret += "&q=" + quote(query_string, safe='')
return ret
def _parse_response(resp, start, end=None):
json_resp = json.loads(resp)
outputs = json_resp["outputs"]
if not outputs: # Pythonic way of checking if list is empty
return None
qresult = QueryResult(start, end)
for output in outputs:
results = output["result"]
if not results:
continue
output_id = output["id"]
qresult[output_id] = Output()
for result in results:
dps = result["dps"]
index = []
values = []
for point in dps:
if point[0] < start:
continue
if end is not None and point[0] >= end:
continue
index.append(point[0])
values.append(point[1])
series = TimeSeries(result["metric"], result["tags"], index, values)
qresult[output_id].series.append(series)
return qresult
class Apptuit(object):
"""
Apptuit client - providing APIs to send and query data from Apptuit
"""
def __init__(self, token=None, api_endpoint="https://api.apptuit.ai",
global_tags=None, ignore_environ_tags=False,
sanitize_mode="prometheus"):
"""
Create an apptuit client object
Params:
token: Apptuit token for your tenant
api_endpoint: Apptuit API End point (including the protocol and port)
global_tags: Tags for all datapoints (should be a dict). If you pass
value for global_tags, the APPTUIT_PY_TAGS environment variable
will not be used, even if ignore_environ_tags is false.
ignore_environ_tags: True/False - whether to use environment variable for
global tags (APPTUIT_PY_TAGS)
sanitize_mode: will enable sanitizer, which will automatically change your
metric names to be compatible with apptuit or prometheus. Set it to
None of not needed.
"""
self.sanitizer = None
if sanitize_mode:
self.sanitizer = SANITIZERS.get(sanitize_mode.lower(), None)
if not self.sanitizer:
raise ValueError("sanitizer_mode can only be set to apptuit"
",prometheus or None.")
if not token:
token = os.environ.get(APPTUIT_PY_TOKEN)
if not token:
token = os.environ.get(DEPRECATED_APPTUIT_PY_TOKEN)
if token:
warnings.warn("The environment variable %s is deprecated,"
"please use %s instead" %
(DEPRECATED_APPTUIT_PY_TOKEN, APPTUIT_PY_TOKEN),
DeprecationWarning)
if not token:
raise ValueError("Missing Apptuit API token, "
"either pass it as a parameter or "
"set as value of the environment variable '"
+ APPTUIT_PY_TOKEN + "'.")
self.token = token
if not api_endpoint:
raise ValueError("Invalid value for the 'api_endpoint' parameter")
self.endpoint = api_endpoint
if self.endpoint[-1] == '/':
self.endpoint = self.endpoint[:-1]
self._global_tags = global_tags
if not self._global_tags and not ignore_environ_tags:
self._global_tags = _get_tags_from_environment()
@property
def put_apiurl(self):
"""
Apptuit PUT API URL
"""
return self.endpoint + "/api/put?details"
def _combine_tags_with_globaltags(self, tags):
if tags:
if self._global_tags:
combined_tags = self._global_tags.copy()
combined_tags.update(tags)
else:
combined_tags = tags
return combined_tags
elif self._global_tags:
return self._global_tags
return None
def _create_payload_from_datapoints(self, datapoints):
data = []
for point in datapoints:
if self.sanitizer:
sanitized_metric = self.sanitizer(point.metric)
else:
if not _contains_valid_chars(point.metric):
raise ValueError("Metric Name %s contains an invalid character, "
"allowed characters are unicode letter, "
"a-z, A-Z, 0-9, -, _, ., and /" % point.metric)
sanitized_metric = point.metric
tags = self._combine_tags_with_globaltags(point.tags)
if not tags:
raise ValueError("Missing tags for the metric "
+ point.metric +
". Either pass it as value of the tags"
" parameter to DataPoint or"
" set environment variable '"
+ APPTUIT_PY_TAGS +
"' for global tags")
if len(tags) > MAX_TAGS_LIMIT:
raise ValueError("Too many tags for datapoint %s, maximum allowed number of tags "
"is %d, found %d tags" % (point, MAX_TAGS_LIMIT, len(tags)))
if self.sanitizer:
sanitized_tags = {}
for key, val in tags.items():
sanitized_tags[self.sanitizer(key)] = val
tags = sanitized_tags
else:
_validate_tags(tags)
row = dict()
row["metric"] = sanitized_metric
row["timestamp"] = point.timestamp
row["value"] = point.value
row["tags"] = tags
data.append(row)
return data
def _create_payload_from_timeseries(self, timeseries_list):
data = []
points_count = 0
for timeseries in timeseries_list:
tags = self._combine_tags_with_globaltags(timeseries.tags)
if not tags:
raise ValueError("Missing tags for the metric '%s'. Either pass it as value "
"of the tags parameter to TimeSeriesName, or set environment "
"variable '%s' for global tags, or pass 'global_tags' parameter "
"to the apptuit_client" % (timeseries.metric, APPTUIT_PY_TAGS))
if len(tags) > MAX_TAGS_LIMIT:
raise ValueError("Too many tags for timeseries %s, maximum allowed number of tags "
"is %d, found %d tags" % (timeseries, MAX_TAGS_LIMIT, len(tags)))
for timestamp, value in zip(timeseries.timestamps, timeseries.values):
row = {"metric": timeseries.metric,
"tags": tags,
"timestamp": timestamp,
"value": value}
data.append(row)
points_count += 1
return data, points_count
def send(self, datapoints, timeout=60):
"""
Send the given set of datapoints to Apptuit
Params:
datapoints: A list of DataPoint objects
timeout: Timeout (in seconds) for the HTTP request
It raises an ApptuitSendException in case the backend API responds with an error
"""
if not datapoints:
return
payload = self._create_payload_from_datapoints(datapoints)
self.__send(payload, len(datapoints), timeout)
def send_timeseries(self, timeseries_list, timeout=60):
"""
Send a list of timeseries to Apptuit
Parameters
----------
timeseries_list: A list of TimeSeries objects
timeout: Timeout (in seconds) for the HTTP request
"""
if not timeseries_list:
return
data, points_count = self._create_payload_from_timeseries(timeseries_list)
if points_count != 0:
self.__send(data, points_count, timeout)
@staticmethod
def __get_size_in_mb(buf):
return sys.getsizeof(buf) * 1.0 / (1024 ** 2)
def __send(self, payload, points_count, timeout):
body = json.dumps(payload)
body = zlib.compress(body.encode("utf-8"))
headers = dict()
headers["Authorization"] = "Bearer " + self.token
headers["Content-Type"] = "application/json"
headers["Content-Encoding"] = "deflate"
headers["User-Agent"] = _get_user_agent()
response = requests.post(self.put_apiurl, data=body, headers=headers, timeout=timeout)
if response.status_code != 200 and response.status_code != 204:
status_code = response.status_code
if status_code == 400:
resp_json = response.json()
raise ApptuitSendException(
"Apptuit.send() failed, Due to %d" % status_code,
status_code, resp_json["success"],
resp_json["failed"], resp_json["errors"]
)
if status_code == 413:
raise ApptuitSendException("Too big payload for Apptuit.send(). Trying to send"
" %f mb of data with %d points, please try sending "
"again with fewer points" %
(self.__get_size_in_mb(body), points_count),
status_code, 0, points_count)
if status_code == 401:
error = "Apptuit API token is invalid"
else:
error = "Server Error"
raise ApptuitSendException("Apptuit.send() failed, due to %d: %s" %
(status_code, error),
status_code, 0, points_count, [])
def query(self, query_str, start, end=None, retry_count=0, timeout=180):
"""
Execute the given query on Query service
Params:
query_str - The query string
start - the start timestamp (unix epoch in seconds)
end - the end timestamp (unix epoch in seconds)
timeout - timeout (in seconds) for the HTTP request
Returns a QueryResult object
Individual queried items can be accessed by indexing the result object using either
the integer index of the metric in the query or the metric name.
Example:
apptuit = Apptuit(token=token, api_endpoint='http://api.apptuit.ai')
res = apptuit.query("cpu=fetch('node.cpu').downsample('1h', 'avg');\n \
load=fetch('node.load1').downsample('1h', 'avg');\n \
output(cpu, load)",start=start_time)
# The resulting data can be accessed in two wasy
# 1. using the output name used in the query:
cpu_df = res['cpu'].to_df()
load_df = res['load'].to_df()
# 2. using integer index based on the ordering of the metric in the query
cpu_df = res[0].to_df()
load_df = res[1].to_df()
"""
try:
url = self.__generate_request_url(query_str, start, end)
return self._execute_query(url, start, end, timeout)
except (requests.exceptions.HTTPError, requests.exceptions.SSLError) as e:
if retry_count > 0:
time.sleep(1)
return self.query(query_str, start, end, retry_count=retry_count - 1)
else:
raise ApptuitException("Failed to get response from Apptuit"
"query service due to exception: %s" % str(e))
def _execute_query(self, query_string, start, end, timeout):
headers = dict()
headers["User-Agent"] = _get_user_agent()
if self.token:
headers["Authorization"] = "Bearer " + self.token
hresp = requests.get(query_string, headers=headers, timeout=timeout)
body = hresp.content
return _parse_response(body, start, end)
def __generate_request_url(self, query_string, start, end):
query_string = self.endpoint + "/api/query" + \
_generate_query_string(query_string, start, end)
return query_string
class TimeSeries(object):
"""
Represents a timeseries consisting of metadata, such as tags and metric name, as well as
the data (the index and the values)
"""
def __init__(self, metric, tags, index=None, values=None):
self.name = TimeSeriesName(metric, tags)
if not index and values:
raise ValueError("index cannot be None if values is not None")
if index and not values:
raise ValueError("values cannot be None if index is not None")
if index and values:
if len(index) != len(values):
raise ValueError("Length of index and values must be equal")
self.timestamps = index or []
self.values = values or []
@property
def tags(self):
return self.name.tags
@property
def metric(self):
return self.name.metric
def __repr__(self):
repr_str = '%s{' % self.name.metric
for tagk in sorted(self.name.tags):
tagv = self.name.tags[tagk]
repr_str = repr_str + '%s:%s, ' % (tagk, tagv)
repr_str = repr_str[:-2] + '}'
return repr_str
def __str__(self):
return self.__repr__()
def __len__(self):
return len(self.timestamps)
def add_point(self, timestamp, value):
"""
Add a new point to the timeseries object
"""
self.timestamps.append(timestamp)
self.values.append(float(value))
class TimeSeriesName(object):
"""
Encapsulates a timeseries name representation by using the metric name and tags
"""
def __init__(self, metric, tags):
"""
Parameters
----------
metric: name of the metric
tags: tags for the metric (expected a dict type)
"""
self.metric = metric
self.tags = tags
@property
def tags(self):
return self._tags
@tags.setter
def tags(self, tags):
if tags:
for key in tags:
if not key:
raise ValueError("Tag key can't be '%s'" % key)
self._tags = tags
@property
def metric(self):
return self._metric
@metric.setter
def metric(self, metric):
if not metric:
raise ValueError("metric name cannot be None or empty")
self._metric = metric
def __str__(self):
return self.metric + json.dumps(self.tags, sort_keys=True)
@staticmethod
def encode_metric(metric_name, metric_tags):
"""
Generate an encoded metric name by combining metric_name and metric_tags
Params:
metric_name: name of the metric
metric_tags: tags (expected a dictionary of tag keys vs values)
Returns: An string encoding the metric name and the tags which can be used when
creating metric objects, such as counters, timers etc.
Example:
s = reporter.encode_metric_name('node.cpu', {"type": "idle"})
print(s) # 'node.cpu {"type": "idle"}'
"""
if not isinstance(metric_name, str):
raise ValueError("metric_name should be a string")
if metric_name == "":
raise ValueError("metric_name cannot be empty")
if not isinstance(metric_tags, dict):
raise ValueError("metric_tags must be a dictionary")
encoded_metric_name = metric_name + json.dumps(metric_tags, sort_keys=True)
return encoded_metric_name
@staticmethod
@lru_cache(maxsize=2000)
def decode_metric(encoded_metric_name):
"""
Decode the metric name as encoded by encode_metric_name
Params:
encoded_metric_name: a string encoded in a format as returned by encode_metric_name()
example: 'metricName {"metricTagKey1":"metricValue1","metricTagKey2":"metricValue2"}'
Returns:
The metric name and the dictionary of tags
"""
if encoded_metric_name is None or encoded_metric_name == "":
raise ValueError("Invalid value for encoded_metric_name")
metric_tags = {}
metric_name = encoded_metric_name.strip()
brace_index = encoded_metric_name.find('{')
if brace_index > -1:
try:
metric_tags = json.loads(encoded_metric_name[brace_index:])
metric_name = encoded_metric_name[:brace_index].strip()
except Exception as err:
raise ValueError("Failed to parse the encoded_metric_name %s, invalid format"
% encoded_metric_name, err)
return metric_name, metric_tags
class Output(object):
"""
Represents the output of a query, consisting of a list of TimeSeries
objects representing each time series returned for the query.
"""
def __init__(self):
self.series = []
self.__dataframe = None
def to_df(self, tz=None):
"""
Create a Pandas DataFrame from this data
"""
import pandas as pd
series_names = []
series_list = []
for s in self.series:
series_name = str(s)
series_names.append(series_name)
series_index = pd.to_datetime(s.timestamps, unit='s').tz_localize(tz)
pseries = pd.Series(data=s.values, index=series_index)
series_list.append(pseries)
dataframe = pd.concat(series_list, axis=1)
dataframe.columns = series_names
self.__dataframe = dataframe
return dataframe
class QueryResult(object):
"""
The object returned by Apptuit.query method. Represents the combined
results of the query being executed. If the query which was executed consisted
of multiple lines and multiple outputs were expected it will contain multiple Output
objects for each of those.
"""
def __init__(self, start, end=None):
self.__outputs = defaultdict(Output)
self.start = start
self.end = end
self.__output_keys = {}
self.__output_index = 0
def __repr__(self):
return '{start: %d, end: %s, outputs: %s}' % \
(self.start, str(self.end) if self.end is not None else '',
', '.join(self.__outputs.keys()))
def __setitem__(self, key, value):
self.__outputs[key] = value
self.__output_keys[self.__output_index] = key
self.__output_index += 1
def __getitem__(self, key):
output_id = key
if isinstance(key, int):
output_id = self.__output_keys[key]
return self.__outputs[output_id]
class DataPoint(object):
"""
A single datapoint, representing value of a metric at a specific timestamp
"""
def __init__(self, metric, tags, timestamp, value):
"""
Params:
metric: The name of the metric
tags: A dict representing the tag keys and values of this metric
timestamp: Number of seconds since Unix epoch
value: value of the metric at this timestamp (int or float)
"""
self.timestamp = timestamp
self.timeseries_name = TimeSeriesName(metric, tags)
try:
self.value = float(value)
except TypeError:
raise ValueError("Expected a numeric value got %s" % value)
@property
def metric(self):
return self.timeseries_name.metric
@property
def tags(self):
return self.timeseries_name.tags
def __repr__(self):
_repr = self.metric + "{"
for tagk in sorted(self.tags):
_repr = _repr + "%s:%s, " % (tagk, self.tags[tagk])
_repr = _repr[:-2] + ", timestamp: %d, value: %f}" % (self.timestamp, self.value)
return _repr
def __str__(self):
return self.__repr__()
class ApptuitException(Exception):
def __init__(self, msg):
super(ApptuitException, self).__init__(msg)
self.msg = msg
def __repr__(self):
return self.msg
def __str__(self):
return self.msg
class ApptuitSendException(ApptuitException):
"""
An exception raised by Apptuit.send()
"""
def __init__(self, msg, status_code=None, success=None, failed=None, errors=None):
super(ApptuitSendException, self).__init__(msg)
self.msg = msg
self.status_code = status_code
self.errors = errors or []
self.success = success
self.failed = failed
def __repr__(self):
return self.__str__()
def __str__(self):
msg = str(self.failed) + " points failed"
if self.status_code:
msg += " with status: %d\n" % self.status_code
else:
msg += "\n"
for error in self.errors:
dp = error["datapoint"]
error_msg = error["error"]
msg += "%s error occurred in the datapoint %s\n" % (str(error_msg), str(dp))
return msg
``` |
{
"source": "josephmisiti/django-c10k-demo",
"score": 3
} |
#### File: django-c10k-demo/c10ktools/websockets.py
```python
import collections
import io
import random
import struct
import tulip
class WebSocket:
"""
Basic WebSocket protocol implementation on top of Tulip.
"""
server = True
def __init__(self, reader, writer):
"""
Create a WebSocket handler.
reader must be a tulip.StreamReader, writer a tulip.WriteTransport.
"""
self.reader = reader
self.writer = writer
self.local_closed = False
self.remote_closed = False
@tulip.coroutine
def read_message(self):
"""
Read the next message from the client.
A text frame is returned as str, a binary frame as bytes, None if the
end of the message stream was reached.
"""
# Handle fragmentation
frame = yield from self.read_data_frame()
if frame is None:
return
text = (frame.opcode == 1)
data = [frame.data]
while not frame.fin:
frame = yield from self.read_data_frame()
assert frame.opcode == 0
data.append(frame.data)
data = b''.join(data)
return data.decode('utf-8') if text else data
def write_message(self, data, opcode=None):
"""
Write a message to the client.
By default, str is sent as a text frame, bytes as a binary frame.
"""
self.write_frame(data, opcode)
def close(self, data=b''):
"""
Close the connection with the client.
"""
if not self.local_closed:
self.write_frame(data, opcode=8)
self.local_closed = True
self.writer.close()
def ping(self, data=b''):
"""
Send a Ping.
"""
self.write_frame(data, opcode=9)
def pong(self, data=b''):
"""
Send a Pong.
"""
self.write_frame(data, opcode=10)
@tulip.coroutine
def read_data_frame(self):
while not self.remote_closed:
frame = yield from self.read_frame()
if frame.opcode & 0b1000: # control frame
assert 8 <= frame.opcode <= 10
if frame.opcode == 8:
self.remote_closed = True
self.close()
raise StopIteration # could use a specific exception
elif frame.opcode == 9:
self.pong(frame.data)
elif frame.opcode == 10:
pass # unsolicited Pong
else: # data frame
assert 0 <= frame.opcode <= 2
return frame
@tulip.coroutine
def read_frame(self):
if self.remote_closed:
raise IOError("Cannot read from a closed WebSocket")
# Read the header
data = yield from self.reader.readexactly(2)
head1, head2 = struct.unpack('!BB', data)
fin = bool(head1 & 0b10000000)
assert not head1 & 0b01110000, "reserved bits must be 0"
opcode = head1 & 0b00001111
assert bool(head2 & 0b10000000) == self.server, "invalid masking"
length = head2 & 0b01111111
if length == 126:
data = yield from self.reader.readexactly(2)
length, = struct.unpack('!H', data)
elif length == 127:
data = yield from self.reader.readexactly(8)
length, = struct.unpack('!Q', data)
if self.server:
mask = yield from self.reader.readexactly(4)
# Read the data
data = yield from self.reader.readexactly(length)
if self.server:
data = bytes(b ^ mask[i % 4] for i, b in enumerate(data))
return Frame(fin, opcode, data)
def write_frame(self, data=b'', opcode=None):
if self.local_closed:
raise IOError("Cannot write to a closed WebSocket")
# Encode text and set default opcodes
if isinstance(data, str):
if opcode is None:
opcode = 1
data = data.encode('utf-8')
elif isinstance(data, bytes):
if opcode is None:
opcode = 2
else:
raise TypeError("data must be bytes or str")
# Write the header
header = io.BytesIO()
header.write(struct.pack('!B', 0b10000000 | opcode))
if self.server:
mask_bit = 0b00000000
else:
mask_bit = 0b10000000
mask = struct.pack('!I', random.getrandbits(32))
length = len(data)
if length < 0x7e:
header.write(struct.pack('!B', mask_bit | length))
elif length < 0x7fff:
header.write(struct.pack('!BH', mask_bit | 126, length))
else:
header.write(struct.pack('!BQ', mask_bit | 127, length))
if not self.server:
header.write(mask)
self.writer.write(header.getvalue())
# Write the data
if not self.server:
data = bytes(b ^ mask[i % 4] for i, b in enumerate(data))
self.writer.write(data)
class ClientWebSocket(WebSocket):
"""Client-side WebSocket implementation, for testing."""
server = False
Frame = collections.namedtuple('Frame', ('fin', 'opcode', 'data'))
``` |
{
"source": "josephmisiti/django_gmail",
"score": 2
} |
#### File: django_gmail/django_gmail/views.py
```python
import httplib2
import logging
import pytz
import random
import re
import time
import urllib
import base64
from datetime import datetime, date, timedelta
from email.mime.text import MIMEText
from apiclient.discovery import build
from apiclient.errors import HttpError
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.urlresolvers import reverse
from django.http import HttpResponseBadRequest, HttpResponseRedirect, Http404
from django.shortcuts import get_object_or_404
from django.contrib.auth.decorators import login_required
from django.utils.timezone import utc, make_aware, get_current_timezone
from oauth2client import xsrfutil
from oauth2client.client import OAuth2WebServerFlow, FlowExchangeError
from oauth2client.django_orm import Storage
FLOW = OAuth2WebServerFlow(client_id=settings.GA_CLIENT_ID,
client_secret=settings.GA_CLIENT_SECRET,
scope=settings.GA_SCOPE,
redirect_uri=settings.GOOGLE_REDIRECT_URI)
@login_required
def gmail_oauth_callback(request):
""" Google API oauth2 callback route
https://developers.google.com/api-client-library/python/guide/django
"""
internal_contact = None
code = request.REQUEST.get('code')
state = request.REQUEST.get('state')
user = request.user
#if not xsrfutil.validate_token(settings.SECRET_KEY, state, user):
#print "token not valid"
#return HttpResponseBadRequest()
# if user denies access redirect to activate
if request.GET.get('error') != 'access_denied':
try:
credentials = FLOW.step2_exchange(request.REQUEST)
except FlowExchangeError, exc:
raise Http404
http = httplib2.Http()
http = credentials.authorize(http)
service = build('gmail', 'v1', http=http)
profile = service.users().getProfile(userId="me").execute()
creds,_ = GoogleCredentials.objects.get_or_create(user=request.user)
creds.credentials = credentials
creds.save()
return redirect(settings.GMAIL_POST_AUTH_REDIRECT)
``` |
{
"source": "josephmisiti/django-s3direct",
"score": 2
} |
#### File: django-s3direct/s3direct/views.py
```python
import json
from django.http import HttpResponse
from django.views.decorators.http import require_POST
from django.core.exceptions import PermissionDenied
from django.conf import settings
from .utils import create_upload_data
S3DIRECT_AUTH_TEST = getattr(settings, 'S3DIRECT_AUTH_TEST', lambda u: True)
@require_POST
def get_upload_params(request):
if not S3DIRECT_AUTH_TEST(request.user):
raise PermissionDenied()
content_type = request.POST['type']
source_filename = request.POST['name']
upload_to = request.POST['upload_to']
data = create_upload_data(content_type, source_filename, upload_to)
return HttpResponse(json.dumps(data), content_type="application/json")
``` |
{
"source": "josephmisiti/goodreads_etl_pipeline",
"score": 2
} |
#### File: goodreads_etl_pipeline/src/goodreads_driver.py
```python
from pyspark.sql import SparkSession
from goodreads_transform import GoodreadsTransform
from s3_module import GoodReadsS3Module
from pathlib import Path
import logging
import logging.config
import configparser
from warehouse.goodreads_warehouse_driver import GoodReadsWarehouseDriver
import time
config = configparser.ConfigParser()
config.read_file(open(f"{Path(__file__).parents[0]}/config.cfg"))
# Setting up logger, Logger properties are defined in logging.ini file
logging.config.fileConfig(f"{Path(__file__).parents[0]}/logging.ini")
logger = logging.getLogger(__name__)
def create_sparksession():
return SparkSession.builder.master('yarn').appName("goodreads") \
.config("spark.jars.packages","saurfang:spark-sas7bdat:2.0.0-s_2.11") \
.config("spark.jars.packages", "org.apache.hadoop:hadoop-aws:2.7.2") \
.enableHiveSupport().getOrCreate()
def main():
logging.debug("\n\nSetting up Spark Session...")
spark = create_sparksession()
grt = GoodreadsTransform(spark)
# Modules in the project
modules = {
"author.csv": grt.transform_author_dataset,
"book.csv" : grt.transform_books_dataset,
"reviews.csv" : grt.transform_reviews_dataset,
"user.csv" : grt.tranform_users_dataset
}
logging.debug("\n\nCopying data from s3 landing zone to ...")
gds3 = GoodReadsS3Module()
gds3.s3_move_data(source_bucket= config.get('BUCKET','LANDING_ZONE'), target_bucket= config.get('BUCKET', 'WORKING_ZONE'))
files_in_working_zone = gds3.get_files(config.get('BUCKET', 'WORKING_ZONE'))
# Cleanup processed zone if files available in working zone
if len([set(modules.keys()) & set(files_in_working_zone)]) > 0:
logging.info("Cleaning up processed zone.")
gds3.clean_bucket(config.get('BUCKET', 'PROCESSED_ZONE'))
for file in files_in_working_zone:
if file in modules.keys():
modules[file]()
logging.debug("Waiting before setting up Warehouse")
time.sleep(5)
# Starting warehouse functionality
grwarehouse = GoodReadsWarehouseDriver()
logging.debug("Setting up staging tables")
grwarehouse.setup_staging_tables()
logging.debug("Populating staging tables")
grwarehouse.load_staging_tables()
logging.debug("Setting up Warehouse tables")
grwarehouse.setup_warehouse_tables()
logging.debug("Performing UPSERT")
grwarehouse.perform_upsert()
if __name__ == "__main__":
main()
``` |
{
"source": "josephmisiti/gunicorn",
"score": 2
} |
#### File: gunicorn/tests/test_007-ssl.py
```python
import inspect
import ssl
from unittest import TestCase
# gunicorn
from gunicorn.config import KeyFile, CertFile, SSLVersion, CACerts, \
SuppressRaggedEOFs, DoHandshakeOnConnect, Ciphers, Setting, validate_bool, validate_string, \
validate_pos_int
class SSLTestCase(TestCase):
def test_settings_classes(self):
""" Tests all settings options and their defaults.
"""
self.assertTrue(issubclass(KeyFile, Setting))
self.assertEquals(KeyFile.name, 'keyfile')
self.assertEquals(KeyFile.section, 'Ssl')
self.assertEquals(KeyFile.cli, ['--keyfile'])
self.assertEquals(KeyFile.meta, 'FILE')
self.assertEquals(KeyFile.default, None)
self.assertTrue(issubclass(CertFile, Setting))
self.assertEquals(CertFile.name, 'certfile')
self.assertEquals(CertFile.section, 'Ssl')
self.assertEquals(CertFile.cli, ['--certfile'])
self.assertEquals(CertFile.default, None)
self.assertTrue(issubclass(SSLVersion, Setting))
self.assertEquals(SSLVersion.name, 'ssl_version')
self.assertEquals(SSLVersion.section, 'Ssl')
self.assertEquals(SSLVersion.cli, ['--ssl-version'])
self.assertEquals(SSLVersion.default, ssl.PROTOCOL_TLSv1)
self.assertTrue(issubclass(CACerts, Setting))
self.assertEquals(CACerts.name, 'ca_certs')
self.assertEquals(CACerts.section, 'Ssl')
self.assertEquals(CACerts.cli, ['--ca-certs'])
self.assertEquals(CACerts.meta, 'FILE')
self.assertEquals(CACerts.default, None)
self.assertTrue(issubclass(SuppressRaggedEOFs, Setting))
self.assertEquals(SuppressRaggedEOFs.name, 'suppress_ragged_eofs')
self.assertEquals(SuppressRaggedEOFs.section, 'Ssl')
self.assertEquals(SuppressRaggedEOFs.cli, ['--suppress-ragged-eofs'])
self.assertEquals(SuppressRaggedEOFs.action, 'store_true')
self.assertEquals(SuppressRaggedEOFs.default, True)
self.assertTrue(issubclass(DoHandshakeOnConnect, Setting))
self.assertEquals(DoHandshakeOnConnect.name, 'do_handshake_on_connect')
self.assertEquals(DoHandshakeOnConnect.section, 'Ssl')
self.assertEquals(DoHandshakeOnConnect.cli, ['--do-handshake-on-connect'])
self.assertEquals(DoHandshakeOnConnect.action, 'store_true')
self.assertEquals(DoHandshakeOnConnect.default, False)
self.assertTrue(issubclass(Ciphers, Setting))
self.assertEquals(Ciphers.name, 'ciphers')
self.assertEquals(Ciphers.section, 'Ssl')
self.assertEquals(Ciphers.cli, ['--ciphers'])
self.assertEquals(Ciphers.default, 'TLSv1')
``` |
{
"source": "josephmisiti/jobtastic",
"score": 2
} |
#### File: jobtastic/jobtastic/task.py
```python
from __future__ import division
import logging
import time
import os
import sys
import warnings
from contextlib import contextmanager
from hashlib import md5
import psutil
from celery.datastructures import ExceptionInfo
from celery.states import PENDING, SUCCESS
from celery.task import Task
from celery.utils import gen_unique_id
get_task_logger = None
try:
from celery.utils.log import get_task_logger
except ImportError:
pass # get_task_logger is new in Celery 3.X
cache = None
try:
# For now, let's just say that if Django exists, we should use it.
# Otherwise, try Flask. This definitely needs an actual configuration
# variable so folks can make an explicit decision.
from django.core.cache import cache
HAS_DJANGO = True
except ImportError:
try:
# We should really have an explicitly-defined way of doing this, but
# for now, let's just use werkzeug Memcached if it exists
from werkzeug.contrib.cache import MemcachedCache
from celery import conf
if conf.CELERY_RESULT_BACKEND == 'cache':
uri_str = conf.CELERY_CACHE_BACKEND.strip('memcached://')
uris = uri_str.split(';')
cache = MemcachedCache(uris)
HAS_WERKZEUG = True
except ImportError:
pass
if cache is None:
raise Exception(
"Jobtastic requires either Django or Flask + Memcached result backend")
from jobtastic.states import PROGRESS
@contextmanager
def acquire_lock(lock_name):
"""
A contextmanager to wait until an exclusive lock is available,
hold the lock and then release it when the code under context
is complete.
TODO: This code doesn't work like it should. It doesn't
wait indefinitely for the lock and in fact cycles through
very quickly.
"""
for _ in range(10):
try:
value = cache.incr(lock_name)
except ValueError:
cache.set(lock_name, 0)
value = cache.incr(lock_name)
if value == 1:
break
else:
cache.decr(lock_name)
else:
yield
cache.set(lock_name, 0)
return
yield
cache.decr(lock_name)
class JobtasticTask(Task):
"""
A base ``Celery.Task`` class that provides some common niceties for running
tasks that return some kind of result for which you need to wait.
To create a task that uses these helpers, use ``JobtasticTask`` as a
subclass and define a ``calculate_result`` method which returns a
dictionary to be turned in to JSON. You will also need to define the
following class variables:
* ``significant_kwargs`` The kwarg values that will be converted to strings
and hashed to determine if two versions of the same task are equivalent.
This is a list of 2-tuples with the first item being the kwarg string and
the second being a callable that converts the value to a hashable string.
If no second item is given, it's assumed that calling ``str()`` on the
value works just fine.
* ``herd_avoidance_timeout`` Number of seconds to hold a lock on this task
for other equivalent runs. Generally, this should be set to the longest
estimated amount of time the task could consume.
The following class members are optional:
* ``cache_prefix`` A unique string representing this task. Eg.
``foo.bar.tasks.BazzTask``
* ``cache_duration`` The number of seconds for which the result of this
task should be cached, meaning subsequent equivalent runs will skip
computation. The default is to do no result caching.
* ``memleak_threshold`` When a single run of a Task increase the resident
process memory usage by more than this number of MegaBytes, a warning is
logged to the logger. This is useful for finding tasks that are behaving
badly under certain conditions. By default, no logging is performed.
Set this value to 0 to log all RAM changes and -1 to disable logging.
Provided are helpers for:
1. Handling failures to connect the task broker by either directly
running the task (`delay_or_run`) or by returning a task that
contains the connection error (`delay_or_fail`). This minimizes
the user-facing impact of a dead task broker.
2. Defeating any thundering herd issues by ensuring only one of a task with
specific arguments can be running at a time by directing subsequent calls
to latch on to the appropriate result.
3. Caching the final result for a designated time period so that subsequent
equivalent calls return quickly.
4. Returning the results as JSON, so that they can be processed easily by
client-side javascript.
5. Returning time-based, continually updating progress estimates to
front-end code so that users know what to expect.
"""
abstract = True
@classmethod
def delay_or_eager(self, *args, **kwargs):
"""
Attempt to call self.delay, or if that fails because of a problem with
the broker, run the task eagerly and return an EagerResult.
"""
possible_broker_errors = self._get_possible_broker_errors_tuple()
try:
return self.apply_async(args=args, kwargs=kwargs)
except possible_broker_errors:
return self.apply(args=args, kwargs=kwargs)
@classmethod
def delay_or_run(self, *args, **kwargs):
"""
Attempt to call self.delay, or if that fails, call self.run.
Returns a tuple, (result, required_fallback). ``result`` is the result
of calling delay or run. ``required_fallback`` is True if the broker
failed we had to resort to `self.run`.
"""
warnings.warn(
"delay_or_run is deprecated. Please use delay_or_eager",
DeprecationWarning,
)
possible_broker_errors = self._get_possible_broker_errors_tuple()
try:
result = self.apply_async(args=args, kwargs=kwargs)
required_fallback = False
except possible_broker_errors:
result = self().run(*args, **kwargs)
required_fallback = True
return result, required_fallback
@classmethod
def delay_or_fail(self, *args, **kwargs):
"""
Attempt to call self.delay, but if that fails with an exception, we
fake the task completion using the exception as the result. This allows
us to seamlessly handle errors on task creation the same way we handle
errors when a task runs, simplifying the user interface.
"""
possible_broker_errors = self._get_possible_broker_errors_tuple()
try:
return self.apply_async(args=args, kwargs=kwargs)
except possible_broker_errors as e:
return self.simulate_async_error(e)
@classmethod
def _get_possible_broker_errors_tuple(self):
if hasattr(self.app, 'connection'):
dummy_conn = self.app.connection()
else:
# Celery 2.5 uses `broker_connection` instead
dummy_conn = self.app.broker_connection()
return dummy_conn.connection_errors + dummy_conn.channel_errors
@classmethod
def simulate_async_error(self, exception):
"""
Take this exception and store it as an error in the result backend.
This unifies the handling of broker-connection errors with any other
type of error that might occur when running the task. So the same
error-handling that might retry a task or display a useful message to
the user can also handle this error.
"""
task_id = gen_unique_id()
async_result = self.AsyncResult(task_id)
einfo = ExceptionInfo(sys.exc_info())
async_result.backend.mark_as_failure(
task_id,
exception,
traceback=einfo.traceback,
)
return async_result
@classmethod
def apply_async(self, args, kwargs, **options):
"""
Put this task on the Celery queue as a singleton. Only one of this type
of task with its distinguishing args/kwargs will be allowed on the
queue at a time. Subsequent duplicate tasks called while this task is
still running will just latch on to the results of the running task by
synchronizing the task uuid. Additionally, identical task calls will
return those results for the next ``cache_duration`` seconds.
"""
self._validate_required_class_vars()
cache_key = self._get_cache_key(**kwargs)
# Check for an already-computed and cached result
task_id = cache.get(cache_key) # Check for the cached result
if task_id:
# We've already built this result, just latch on to the task that
# did the work
logging.info(
'Found existing cached and completed task: %s', task_id)
return self.AsyncResult(task_id)
# Check for an in-progress equivalent task to avoid duplicating work
task_id = cache.get('herd:%s' % cache_key)
if task_id:
logging.info('Found existing in-progress task: %s', task_id)
return self.AsyncResult(task_id)
# It's not cached and it's not already running. Use an atomic lock to
# start the task, ensuring there isn't a race condition that could
# result in multiple identical tasks being fired at once.
with acquire_lock('lock:%s' % cache_key):
task_meta = super(JobtasticTask, self).apply_async(
args,
kwargs,
**options
)
logging.info('Current status: %s', task_meta.status)
if task_meta.status in (PROGRESS, PENDING):
cache.set(
'herd:%s' % cache_key,
task_meta.task_id,
timeout=self.herd_avoidance_timeout)
logging.info(
'Setting herd-avoidance cache for task: %s', cache_key)
return task_meta
def calc_progress(self, completed_count, total_count):
"""
Calculate the percentage progress and estimated remaining time based on
the current number of items completed of the total.
Returns a tuple of ``(percentage_complete, seconds_remaining)``.
"""
self.logger.debug(
"calc_progress(%s, %s)",
completed_count,
total_count,
)
current_time = time.time()
time_spent = current_time - self.start_time
self.logger.debug("Progress time spent: %s", time_spent)
if total_count == 0:
return 100, 1
completion_fraction = completed_count / total_count
if completion_fraction == 0:
completion_fraction = 1
total_time = 0
total_time = time_spent / completion_fraction
time_remaining = total_time - time_spent
completion_display = completion_fraction * 100
if completion_display == 100:
return 100, 1 # 1 second to finish up
return completion_display, time_remaining
def update_progress(
self,
completed_count,
total_count,
update_frequency=1,
):
"""
Update the task backend with both an estimated percentage complete and
number of seconds remaining until completion.
``completed_count`` Number of task "units" that have been completed out
of ``total_count`` total "units."
``update_frequency`` Only actually store the updated progress in the
background at most every ``N`` ``completed_count``.
"""
if completed_count - self._last_update_count < update_frequency:
# We've updated the progress too recently. Don't stress out the
# result backend
return
# Store progress for display
progress_percent, time_remaining = self.calc_progress(
completed_count, total_count)
self.logger.debug(
"Updating progress: %s percent, %s remaining",
progress_percent,
time_remaining)
if self.request.id:
self._last_update_count = completed_count
self.update_state(None, PROGRESS, {
"progress_percent": progress_percent,
"time_remaining": time_remaining,
})
def run(self, *args, **kwargs):
if get_task_logger:
self.logger = get_task_logger(self.__class__.__name__)
else:
# Celery 2.X fallback
self.logger = self.get_logger(**kwargs)
self.logger.info("Starting %s", self.__class__.__name__)
self.cache_key = self._get_cache_key(**kwargs)
# Record start time to give estimated time remaining estimates
self.start_time = time.time()
# Keep track of progress updates for update_frequency tracking
self._last_update_count = 0
# Report to the backend that work has been started.
if self.request.id:
self.update_state(None, PROGRESS, {
"progress_percent": 0,
"time_remaining": -1,
})
memleak_threshold = int(getattr(self, 'memleak_threshold', -1))
if memleak_threshold >= 0:
begining_memory_usage = self._get_memory_usage()
self.logger.info("Calculating result")
try:
task_result = self.calculate_result(*args, **kwargs)
except Exception:
# Don't want other tasks waiting for this task to finish, since it
# won't
self._break_thundering_herd_cache()
raise # We can use normal celery exception handling for this
if hasattr(self, 'cache_duration'):
cache_duration = self.cache_duration
else:
cache_duration = -1 # By default, don't cache
if cache_duration >= 0:
# If we're configured to cache this result, do so.
cache.set(self.cache_key, self.request.id, cache_duration)
# Now that the task is finished, we can stop all of the thundering herd
# avoidance
self._break_thundering_herd_cache()
if memleak_threshold >= 0:
self._warn_if_leaking_memory(
begining_memory_usage,
self._get_memory_usage(),
memleak_threshold,
task_kwargs=kwargs,
)
return task_result
def calculate_result(self, *args, **kwargs):
raise NotImplementedError((
"Tasks using JobtasticTask must implement "
"their own calculate_result"
))
@classmethod
def _validate_required_class_vars(self):
"""
Ensure that this subclass has defined all of the required class
variables.
"""
required_members = (
'significant_kwargs',
'herd_avoidance_timeout',
)
for required_member in required_members:
if not hasattr(self, required_member):
raise Exception(
"JobtasticTask's must define a %s" % required_member)
def on_success(self, retval, task_id, args, kwargs):
"""
Store results in the backend even if we're always eager. This ensures
the `delay_or_run` calls always at least have results.
"""
if self.request.is_eager:
# Store the result because celery wouldn't otherwise
self.update_state(task_id, SUCCESS, retval)
def _break_thundering_herd_cache(self):
cache.delete('herd:%s' % self.cache_key)
@classmethod
def _get_cache_key(self, **kwargs):
"""
Take this task's configured ``significant_kwargs`` and build a hash
that all equivalent task calls will match.
Takes in kwargs and returns a string.
To change the way the cache key is generated or do more in-depth
processing, override this method.
"""
m = md5()
for significant_kwarg in self.significant_kwargs:
key, to_str = significant_kwarg
m.update(to_str(kwargs[key]))
if hasattr(self, 'cache_prefix'):
cache_prefix = self.cache_prefix
else:
cache_prefix = '%s.%s' % (self.__module__, self.__name__)
return '%s:%s' % (cache_prefix, m.hexdigest())
def _get_memory_usage(self):
current_process = psutil.Process(os.getpid())
usage = current_process.get_memory_info()
return usage.rss
def _warn_if_leaking_memory(
self, begining_usage, ending_usage, threshold, task_kwargs,
):
growth = ending_usage - begining_usage
threshold_in_bytes = threshold * 1000000
if growth > threshold_in_bytes:
self.warn_of_memory_leak(
growth,
begining_usage,
ending_usage,
task_kwargs,
)
def warn_of_memory_leak(
self, growth, begining_usage, ending_usage, task_kwargs,
):
self.logger.warning(
"Jobtastic:memleak memleak_detected. memory_increase=%05d unit=MB",
growth / 1000000,
)
self.logger.info(
"Jobtastic:memleak memory_usage_start=%05d unit=MB",
begining_usage / 1000000,
)
self.logger.info(
"Jobtastic:memleak memory_usage_end=%05d unit=MB",
ending_usage / 1000000,
)
self.logger.info(
"Jobtastic:memleak task_kwargs=%s",
repr(task_kwargs),
)
``` |
{
"source": "josephmisiti/merchant",
"score": 3
} |
#### File: merchant/billing/gateway.py
```python
from django.utils.importlib import import_module
from django.conf import settings
gateway_cache = {}
class GatewayModuleNotFound(Exception):
pass
class GatewayNotConfigured(Exception):
pass
class CardNotSupported(Exception):
pass
class InvalidData(Exception):
pass
class Gateway(object):
"""Sub-classes to inherit from this and implement the below methods"""
# To indicate if the gateway is in test mode or not
test_mode = getattr(settings, "MERCHANT_TEST_MODE", True)
# The below are optional attributes to be implemented and used by subclases.
#
# Set to indicate the default currency for the gateway.
default_currency = ""
# Sequence of countries supported by the gateway in ISO 3166 alpha-2 format.
# http://en.wikipedia.org/wiki/ISO_3166-1_alpha-2
supported_countries = []
# Sequence of supported card types by the gateway. Members should be valid
# subclasses of the Credit Card object.
supported_cardtypes = []
# Home page URL for the gateway. Used for information purposes only.
homepage_url = ""
# Name of the gateway.
display_name = ""
# Application name or some unique identifier for the gateway.
application_id = ""
def validate_card(self, credit_card):
"""Checks if the credit card is supported by the gateway
and calls the `is_valid` method on it. Responsibility
of the gateway author to use this method before every
card transaction."""
card_supported = None
for card in self.supported_cardtypes:
card_supported = card.regexp.match(credit_card.number)
if card_supported:
credit_card.card_type = card
break
if not card_supported:
raise CardNotSupported("This credit card is not "
"supported by the gateway.")
# Gateways might provide some random number which
# might not pass Luhn's test.
if self.test_mode:
return True
return credit_card.is_valid()
def purchase(self, money, credit_card, options = None):
"""One go authorize and capture transaction"""
raise NotImplementedError
def authorize(self, money, credit_card, options = None):
"""Authorization for a future capture transaction"""
raise NotImplementedError
def capture(self, money, authorization, options = None):
"""Capture funds from a previously authorized transaction"""
raise NotImplementedError
def void(self, identification, options = None):
"""Null/Blank/Delete a previous transaction"""
raise NotImplementedError
def credit(self, money, identification, options = None):
"""Refund a previously 'settled' transaction"""
raise NotImplementedError
def recurring(self, money, creditcard, options = None):
"""Setup a recurring transaction"""
raise NotImplementedError
def store(self, creditcard, options = None):
"""Store the credit card and user profile information
on the gateway for future use"""
raise NotImplementedError
def unstore(self, identification, options = None):
"""Delete the previously stored credit card and user
profile information on the gateway"""
raise NotImplementedError
def get_gateway(gateway, *args, **kwargs):
"""
Return a gateway instance specified by `gateway` name.
This caches gateway classes in a module-level dictionnary to avoid hitting
the filesystem every time we require a gateway.
Should the list of available gateways change at runtime, one should then
invalidate the cache, the simplest of ways would be to:
>>> gateway_cache = {}
"""
# Is the class in the cache?
clazz = gateway_cache.get(gateway, None)
if not clazz:
# Let's actually load it (it's not in the cache)
gateway_filename = "%s_gateway" %gateway
gateway_module = None
for app in settings.INSTALLED_APPS:
try:
gateway_module = import_module(".gateways.%s" %gateway_filename, package=app)
except ImportError:
pass
if not gateway_module:
raise GatewayModuleNotFound("Missing gateway")
gateway_class_name = "".join(gateway_filename.title().split("_"))
try:
clazz = getattr(gateway_module, gateway_class_name)
except AttributeError:
raise GatewayNotConfigured("Missing %s class in the gateway module." %gateway_class_name)
gateway_cache[gateway] = clazz
# We either hit the cache or load our class object, let's return an instance
# of it.
return clazz(*args, **kwargs)
```
#### File: billing/integrations/google_checkout_integration.py
```python
from billing import Integration
from billing.models import GCNewOrderNotification
from django.conf import settings
from xml.dom.minidom import Document
import hmac, hashlib, base64
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_POST
from django.http import HttpResponse
from billing.signals import transaction_was_successful, transaction_was_unsuccessful
from django.conf.urls.defaults import patterns
from django.utils.decorators import method_decorator
SANDBOX_URL = 'https://sandbox.google.com/checkout/api/checkout/v2/checkout/Merchant/%s'
PROD_URL = 'https://checkout.google.com/api/checkout/v2/checkout/Merchant/%s'
BUTTON_SANDBOX_URL = 'http://sandbox.google.com/checkout/buttons/checkout.gif?merchant_id=%(merchant_id)s&w=%(width)s&h=%(height)s&style=white&variant=text&loc=en_US'
BUTTON_URL = 'http://checkout.google.com/checkout/buttons/checkout.gif?merchant_id=%(merchant_id)s&w=%(width)s&h=%(height)s&style=white&variant=text&loc=en_US'
csrf_exempt_m = method_decorator(csrf_exempt)
require_POST_m = method_decorator(require_POST)
class NotConfiguredError(Exception):
pass
class GoogleCheckoutIntegration(Integration):
def __init__(self, options=None):
if not options:
options = {}
super(GoogleCheckoutIntegration, self).__init__(options=options)
if not getattr(settings, "GOOGLE_CHECKOUT_MERCHANT_ID", None):
raise NotConfiguredError("Could not locate the 'GOOGLE_CHECKOUT_MERCHANT_ID' setting")
self.merchant_id = settings.GOOGLE_CHECKOUT_MERCHANT_ID
self._signature = None
@property
def service_url(self):
if self.test_mode:
return SANDBOX_URL % self.merchant_id
return PROD_URL % self.merchant_id
def button_image_url(self):
params = {"merchant_id": self.merchant_id,
"width": self.button_width,
"height": self.button_height}
if self.test_mode:
return BUTTON_SANDBOX_URL % params
return BUTTON_URL % params
@property
def button_width(self):
return self.fields.get("button_width", 180)
@property
def button_height(self):
return self.fields.get("button_height", 46)
def generate_cart_xml(self):
doc = Document()
root = doc.createElement('checkout-shopping-cart')
root.setAttribute('xmlns', 'http://checkout.google.com/schema/2')
doc.appendChild(root)
cart = doc.createElement('shopping-cart')
root.appendChild(cart)
items = doc.createElement('items')
cart.appendChild(items)
ip_items = self.fields.get("items", [])
for item in ip_items:
it = doc.createElement("item")
items.appendChild(it)
it_name = doc.createElement("item-name")
it_name.appendChild(doc.createTextNode(unicode(item["name"])))
it.appendChild(it_name)
it_descr = doc.createElement('item-description')
it_descr.appendChild(doc.createTextNode(unicode(item["description"])))
it.appendChild(it_descr)
it_price = doc.createElement("unit-price")
it_price.setAttribute("currency", unicode(item["currency"]))
it_price.appendChild(doc.createTextNode(unicode(item["amount"])))
it.appendChild(it_price)
it_qty = doc.createElement("quantity")
it_qty.appendChild(doc.createTextNode(unicode(item["quantity"])))
it.appendChild(it_qty)
it_unique_id = doc.createElement("merchant-item-id")
it_unique_id.appendChild(doc.createTextNode(unicode(item["id"])))
it.appendChild(it_unique_id)
checkout_flow = doc.createElement('checkout-flow-support')
root.appendChild(checkout_flow)
merchant_checkout_flow = doc.createElement('merchant-checkout-flow-support')
checkout_flow.appendChild(checkout_flow)
return_url = doc.createElement('continue-shopping-url')
return_url.appendChild(doc.createTextNode(self.fields["return_url"]))
merchant_checkout_flow.appendChild(return_url)
cart_xml = doc.toxml(encoding="utf-8")
hmac_signature = hmac.new(settings.GOOGLE_CHECKOUT_MERCHANT_KEY,
cart_xml,
hashlib.sha1).digest()
self._signature = base64.b64encode(hmac_signature)
return base64.b64encode(cart_xml)
def signature(self):
if not self._signature:
self.generate_cart_xml()
return self._signature
@csrf_exempt_m
@require_POST_m
def gc_notify_handler(self, request):
if request.POST['_type'] == 'new-order-notification':
self.gc_new_order_notification(request)
elif request.POST['_type'] == 'order-state-change-notification':
self.gc_order_state_change_notification(request)
return HttpResponse(request.POST['serial-number'])
def gc_cart_items_blob(self, post_data):
items = post_data.getlist('shopping-cart.items')
cart_blob = ''
for item in items:
item_id = post_data.get('%s.merchant-item-id' % (item), '')
item_name = post_data.get('%s.item-name' % (item), '')
item_desc = post_data.get('%s.item-description' % (item), '')
item_price = post_data.get('%s.unit-price' % (item), '')
item_price_currency = post_data.get('%s.unit-price.currency' % (item), '')
item_quantity = post_data.get('%s.quantity' % (item), '')
cart_blob += '%(item_id)s\t%(item_name)s\t%(item_desc)s\t%(item_price)s\t%(item_price_currency)s\t%(item_quantity)s\n\n' % ({"item_id": item_id,
"item_name": item_name,
"item_desc": item_desc,
"item_price": item_price,
"item_price_currency": item_price_currency,
"item_quantity": item_quantity,})
return cart_blob
def gc_new_order_notification(self, request):
post_data = request.POST.copy()
data = {}
resp_fields = {
"_type": "notify_type",
"serial-number" : "serial_number",
"google-order-number" : "google_order_number",
"buyer-id" : "buyer_id",
"buyer-shipping-address.contact-name" : "shipping_contact_name",
"buyer-shipping-address.address1" : "shipping_address1",
"buyer-shipping-address.address2" : "shipping_address2",
"buyer-shipping-address.city" : "shipping_city",
"buyer-shipping-address.postal-code" : "shipping_postal_code",
"buyer-shipping-address.region" : "shipping_region",
"buyer-shipping-address.country-code" : "shipping_country_code",
"buyer-shipping-address.email" : "shipping_email",
"buyer-shipping-address.company-name" : "shipping_company_name",
"buyer-shipping-address.fax" : "shipping_fax",
"buyer-shipping-address.phone" : "shipping_phone",
"buyer-billing-address.contact-name" : "billing_contact_name",
"buyer-billing-address.address1" : "billing_address1",
"buyer-billing-address.address2" : "billing_address2",
"buyer-billing-address.city" : "billing_city",
"buyer-billing-address.postal-code" : "billing_postal_code",
"buyer-billing-address.region" : "billing_region",
"buyer-billing-address.country-code" : "billing_country_code",
"buyer-billing-address.email" : "billing_email",
"buyer-billing-address.company-name" : "billing_company_name",
"buyer-billing-address.fax" : "billing_fax",
"buyer-billing-address.phone" : "billing_phone",
"buyer-marketing-preferences.email-allowed" : "marketing_email_allowed",
"order-adjustment.total-tax" : "total_tax",
"order-adjustment.total-tax.currency" : "total_tax_currency",
"order-adjustment.adjustment-total" : "adjustment_total",
"order-adjustment.adjustment-total.currency" : "adjustment_total_currency",
"order-total" : "order_total",
"order-total.currency" : "order_total_currency",
"financial-order-state" : "financial_order_state",
"fulfillment-order-state" : "fulfillment_order_state",
"timestamp" : "timestamp",
}
for (key, val) in resp_fields.iteritems():
data[val] = post_data.get(key, '')
data['num_cart_items'] = len(post_data.getlist('shopping-cart.items'))
data['cart_items'] = self.gc_cart_items_blob(post_data)
try:
resp = GCNewOrderNotification.objects.create(**data)
# TODO: Make the type more generic
# TODO: The person might have got charged and yet transaction
# might have failed here. Need a better way to communicate it
transaction_was_successful.send(sender=self.__class__, type="purchase", response=resp)
status = "SUCCESS"
except:
transaction_was_unsuccessful.send(sender=self.__class__, type="purchase", response=post_data)
status = "FAILURE"
return HttpResponse(status)
def gc_order_state_change_notification(self, request):
post_data = request.POST.copy()
order = GCNewOrderNotification.objects.get(google_order_number=post_data['google-order-number'])
order.financial_order_state = post_data['new-financial-order-state']
order.fulfillment_order_state = post_data['new-fulfillment-order-state']
order.save()
def get_urls(self):
urlpatterns = patterns('',
(r'^gc-notify-handler/$', self.gc_notify_handler),
)
return urlpatterns
```
#### File: example/app/views.py
```python
import datetime
from django.core.urlresolvers import reverse
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.http import HttpResponse
from billing import CreditCard, get_gateway
from billing.gateway import CardNotSupported
from app.forms import CreditCardForm
from app.urls import (google_checkout_obj, world_pay_obj,
pay_pal_obj, amazon_fps_obj,
fps_recur_obj, braintree_obj)
from django.conf import settings
from django.contrib.sites.models import RequestSite
def render(request, template, template_vars={}):
return render_to_response(template, template_vars, RequestContext(request))
def index(request, gateway=None):
return authorize(request)
def authorize(request):
amount = 1
response = None
if request.method == 'POST':
form = CreditCardForm(request.POST)
if form.is_valid():
data = form.cleaned_data
credit_card = CreditCard(**data)
merchant = get_gateway("authorize_net")
try:
merchant.validate_card(credit_card)
except CardNotSupported:
response = "Credit Card Not Supported"
response = merchant.purchase(amount, credit_card)
#response = merchant.recurring(amount, credit_card)
else:
form = CreditCardForm(initial={'number':'4222222222222'})
return render(request, 'app/index.html', {'form': form,
'amount': amount,
'response': response,
'title': 'Authorize'})
def paypal(request):
amount = 1
response = None
if request.method == 'POST':
form = CreditCardForm(request.POST)
if form.is_valid():
data = form.cleaned_data
credit_card = CreditCard(**data)
merchant = get_gateway("pay_pal")
try:
merchant.validate_card(credit_card)
except CardNotSupported:
response = "Credit Card Not Supported"
# response = merchant.purchase(amount, credit_card, options={'request': request})
response = merchant.recurring(amount, credit_card, options={'request': request})
else:
form = CreditCardForm(initial={'number':'4797503429879309',
'verification_value': '037',
'month': 1,
'year': 2019,
'card_type': 'visa'})
return render(request, 'app/index.html', {'form': form,
'amount': amount,
'response': response,
'title': 'Paypal'})
def eway(request):
amount = 1
response = None
if request.method == 'POST':
form = CreditCardForm(request.POST)
if form.is_valid():
data = form.cleaned_data
credit_card = CreditCard(**data)
merchant = get_gateway("eway")
try:
merchant.validate_card(credit_card)
except CardNotSupported:
response = "Credit Card Not Supported"
billing_address = {'salutation': 'Mr.',
'address1': 'test',
'address2': ' street',
'city': 'Sydney',
'state': 'NSW',
'company': 'Test Company',
'zip': '2000',
'country': 'au',
'email': '<EMAIL>',
'fax': '0267720000',
'phone': '0267720000',
'mobile': '0404085992',
'customer_ref': 'REF100',
'job_desc': 'test',
'comments': 'any',
'url': 'http://www.google.com.au',
}
response = merchant.purchase(amount, credit_card, options={'request': request, 'billing_address': billing_address})
else:
form = CreditCardForm(initial={'number':'4444333322221111',
'verification_value': '000',
'month': 7,
'year': 2012})
return render(request, 'app/index.html', {'form': form,
'amount': amount,
'response': response,
'title': 'Eway'})
def braintree(request):
amount = 1
response = None
if request.method == 'POST':
form = CreditCardForm(request.POST)
if form.is_valid():
data = form.cleaned_data
credit_card = CreditCard(**data)
merchant = get_gateway("braintree_payments")
try:
merchant.validate_card(credit_card)
except CardNotSupported:
response = "Credit Card Not Supported"
response = merchant.purchase(amount, credit_card)
else:
form = CreditCardForm(initial={'number':'4111111111111111'})
return render(request, 'app/index.html', {'form': form,
'amount': amount,
'response': response,
'title': 'Braintree Payments (S2S)'})
def offsite_paypal(request):
invoice_id = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
return_url = request.build_absolute_uri(reverse('app_offsite_paypal_done'))
cancel_return = request.build_absolute_uri(request.META['PATH_INFO'])
notify_url = request.build_absolute_uri(reverse('paypal-ipn'))
paypal_params = {'amount': 1,
'item_name': "name of the item",
'invoice': invoice_id,
'notify_url': notify_url,
'return_url': return_url,
'cancel_return': cancel_return,
}
pay_pal_obj.add_fields(paypal_params)
template_vars = {"obj": pay_pal_obj, 'title': 'PayPal Offsite'}
return render(request, 'app/offsite_paypal.html', template_vars)
def offsite_google_checkout(request):
return_url = request.build_absolute_uri(reverse('app_offsite_google_checkout_done'))
fields = {'items': [{'amount': 1,
'name': 'name of the item',
'description': 'Item description',
'id': '999AXZ',
'currency': 'USD',
'quantity': 1,
}],
'return_url': return_url,}
google_checkout_obj.add_fields(fields)
template_vars = {'title': 'Google Checkout', "gc_obj": google_checkout_obj}
return render(request, 'app/google_checkout.html', template_vars)
def offsite_world_pay(request):
fields = {"instId": settings.WORLDPAY_INSTALLATION_ID_TEST,
"cartId": "TEST123",
"currency": "USD",
"amount": 1,
"desc": "Test Item",}
world_pay_obj.add_fields(fields)
template_vars = {'title': 'WorldPay', "wp_obj": world_pay_obj}
return render(request, 'app/world_pay.html', template_vars)
def offsite_amazon_fps(request):
fields = {"transactionAmount": "100",
"pipelineName": "SingleUse",
"paymentReason": "Merchant Test",
"paymentPage": request.build_absolute_uri(),
"returnURLPrefix": RequestSite(request),
}
# Save the fps.fields["callerReference"] in the db along with
# the amount to be charged or use the user's unique id as
# the callerReference so that the amount to be charged is known
# Or save the callerReference in the session and send the user
# to FPS and then use the session value when the user is back.
amazon_fps_obj.add_fields(fields)
fields.update({"transactionAmount": "100",
"pipelineName": "Recurring",
"recurringPeriod": "1 Hour",
})
fps_recur_obj.add_fields(fields)
template_vars = {'title': 'Amazon Flexible Payment Service',
"fps_recur_obj": fps_recur_obj,
"fps_obj": amazon_fps_obj}
return render(request, 'app/amazon_fps.html', template_vars)
def offsite_braintree(request):
fields = {"transaction": {
"order_id": datetime.datetime.now().strftime("%Y%m%d%H%M%S"),
"type": "sale",
"options": {
"submit_for_settlement": True
},
},
"site": "%s://%s" %("https" if request.is_secure() else "http",
RequestSite(request).domain)
}
braintree_obj.add_fields(fields)
template_vars = {'title': 'Braintree Payments Transparent Redirect',
"bp_obj": braintree_obj}
return render(request, "app/braintree_tr.html", template_vars)
``` |
{
"source": "josephmisiti/NewsBlur",
"score": 2
} |
#### File: apps/rss_feeds/models.py
```python
import difflib
import datetime
import time
import random
import re
import math
import mongoengine as mongo
import zlib
import hashlib
import redis
import pymongo
from collections import defaultdict
from operator import itemgetter
from bson.objectid import ObjectId
# from nltk.collocations import TrigramCollocationFinder, BigramCollocationFinder, TrigramAssocMeasures, BigramAssocMeasures
from django.db import models
from django.db import IntegrityError
from django.conf import settings
from django.db.models.query import QuerySet
from django.core.urlresolvers import reverse
from django.contrib.sites.models import Site
from mongoengine.queryset import OperationError, Q
from mongoengine.base import ValidationError
from vendor.timezones.utilities import localtime_for_timezone
from apps.rss_feeds.tasks import UpdateFeeds, PushFeeds
from apps.rss_feeds.text_importer import TextImporter
from apps.search.models import SearchStarredStory, SearchFeed
from apps.statistics.rstats import RStats
from utils import json_functions as json
from utils import feedfinder, feedparser
from utils import urlnorm
from utils import log as logging
from utils.fields import AutoOneToOneField
from utils.feed_functions import levenshtein_distance
from utils.feed_functions import timelimit, TimeoutError
from utils.feed_functions import relative_timesince
from utils.feed_functions import seconds_timesince
from utils.story_functions import strip_tags, htmldiff, strip_comments, strip_comments__lxml
from vendor.haystack.query import SearchQuerySet
ENTRY_NEW, ENTRY_UPDATED, ENTRY_SAME, ENTRY_ERR = range(4)
class Feed(models.Model):
feed_address = models.URLField(max_length=764, db_index=True)
feed_address_locked = models.NullBooleanField(default=False, blank=True, null=True)
feed_link = models.URLField(max_length=1000, default="", blank=True, null=True)
feed_link_locked = models.BooleanField(default=False)
hash_address_and_link = models.CharField(max_length=64, unique=True)
feed_title = models.CharField(max_length=255, default="[Untitled]", blank=True, null=True)
is_push = models.NullBooleanField(default=False, blank=True, null=True)
active = models.BooleanField(default=True, db_index=True)
num_subscribers = models.IntegerField(default=-1)
active_subscribers = models.IntegerField(default=-1, db_index=True)
premium_subscribers = models.IntegerField(default=-1)
active_premium_subscribers = models.IntegerField(default=-1)
branch_from_feed = models.ForeignKey('Feed', blank=True, null=True, db_index=True)
last_update = models.DateTimeField(db_index=True)
next_scheduled_update = models.DateTimeField()
last_story_date = models.DateTimeField(null=True, blank=True)
fetched_once = models.BooleanField(default=False)
known_good = models.BooleanField(default=False)
has_feed_exception = models.BooleanField(default=False, db_index=True)
has_page_exception = models.BooleanField(default=False, db_index=True)
has_page = models.BooleanField(default=True)
exception_code = models.IntegerField(default=0)
errors_since_good = models.IntegerField(default=0)
min_to_decay = models.IntegerField(default=0)
days_to_trim = models.IntegerField(default=90)
creation = models.DateField(auto_now_add=True)
etag = models.CharField(max_length=255, blank=True, null=True)
last_modified = models.DateTimeField(null=True, blank=True)
stories_last_month = models.IntegerField(default=0)
average_stories_per_month = models.IntegerField(default=0)
last_load_time = models.IntegerField(default=0)
favicon_color = models.CharField(max_length=6, null=True, blank=True)
favicon_not_found = models.BooleanField(default=False)
s3_page = models.NullBooleanField(default=False, blank=True, null=True)
s3_icon = models.NullBooleanField(default=False, blank=True, null=True)
class Meta:
db_table="feeds"
ordering=["feed_title"]
# unique_together=[('feed_address', 'feed_link')]
def __unicode__(self):
if not self.feed_title:
self.feed_title = "[Untitled]"
self.save()
return "%s (%s - %s/%s/%s)%s" % (
self.feed_title,
self.pk,
self.num_subscribers,
self.active_subscribers,
self.active_premium_subscribers,
(" [B: %s]" % self.branch_from_feed.pk if self.branch_from_feed else ""))
@property
def title(self):
return self.feed_title or "[Untitled]"
@property
def favicon_url(self):
if settings.BACKED_BY_AWS['icons_on_s3'] and self.s3_icon:
return "http://%s/%s.png" % (settings.S3_ICONS_BUCKET_NAME, self.pk)
return reverse('feed-favicon', kwargs={'feed_id': self.pk})
@property
def favicon_url_fqdn(self):
if settings.BACKED_BY_AWS['icons_on_s3'] and self.s3_icon:
return self.favicon_url
return "http://%s%s" % (
Site.objects.get_current().domain,
self.favicon_url
)
@property
def s3_pages_key(self):
return "%s.gz.html" % self.pk
@property
def s3_icons_key(self):
return "%s.png" % self.pk
def canonical(self, full=False, include_favicon=True):
feed = {
'id': self.pk,
'feed_title': self.feed_title,
'feed_address': self.feed_address,
'feed_link': self.feed_link,
'num_subscribers': self.num_subscribers,
'updated': relative_timesince(self.last_update),
'updated_seconds_ago': seconds_timesince(self.last_update),
'subs': self.num_subscribers,
'is_push': self.is_push,
'fetched_once': self.fetched_once,
'not_yet_fetched': not self.fetched_once, # Legacy. Doh.
'favicon_color': self.favicon_color,
'favicon_fade': self.favicon_fade(),
'favicon_border': self.favicon_border(),
'favicon_text_color': self.favicon_text_color(),
'favicon_fetching': self.favicon_fetching,
'favicon_url': self.favicon_url,
's3_page': self.s3_page,
's3_icon': self.s3_icon,
}
if include_favicon:
try:
feed_icon = MFeedIcon.objects.get(feed_id=self.pk)
feed['favicon'] = feed_icon.data
except MFeedIcon.DoesNotExist:
pass
if self.has_page_exception or self.has_feed_exception:
feed['has_exception'] = True
feed['exception_type'] = 'feed' if self.has_feed_exception else 'page'
feed['exception_code'] = self.exception_code
elif full:
feed['has_exception'] = False
feed['exception_type'] = None
feed['exception_code'] = self.exception_code
if not self.has_page:
feed['disabled_page'] = True
if full:
feed['feed_tags'] = json.decode(self.data.popular_tags) if self.data.popular_tags else []
feed['feed_authors'] = json.decode(self.data.popular_authors) if self.data.popular_authors else []
return feed
def save(self, *args, **kwargs):
if not self.last_update:
self.last_update = datetime.datetime.utcnow()
if not self.next_scheduled_update:
self.next_scheduled_update = datetime.datetime.utcnow()
self.fix_google_alerts_urls()
feed_address = self.feed_address or ""
feed_link = self.feed_link or ""
self.hash_address_and_link = hashlib.sha1(feed_address+feed_link).hexdigest()
max_feed_title = Feed._meta.get_field('feed_title').max_length
if len(self.feed_title) > max_feed_title:
self.feed_title = self.feed_title[:max_feed_title]
max_feed_address = Feed._meta.get_field('feed_address').max_length
if len(feed_address) > max_feed_address:
self.feed_address = feed_address[:max_feed_address]
max_feed_link = Feed._meta.get_field('feed_link').max_length
if len(feed_link) > max_feed_link:
self.feed_link = feed_link[:max_feed_link]
try:
super(Feed, self).save(*args, **kwargs)
except IntegrityError, e:
logging.debug(" ---> ~FRFeed save collision (%s), checking dupe..." % e)
duplicate_feeds = Feed.objects.filter(feed_address=self.feed_address,
feed_link=self.feed_link)
if not duplicate_feeds:
feed_address = self.feed_address or ""
feed_link = self.feed_link or ""
hash_address_and_link = hashlib.sha1(feed_address+feed_link).hexdigest()
duplicate_feeds = Feed.objects.filter(hash_address_and_link=hash_address_and_link)
if not duplicate_feeds:
# Feed has been deleted. Just ignore it.
logging.debug(" ***> Changed to: %s - %s: %s" % (self.feed_address, self.feed_link, duplicate_feeds))
logging.debug(' ***> [%-30s] Feed deleted (%s).' % (unicode(self)[:30], self.pk))
return
if self.pk != duplicate_feeds[0].pk:
logging.debug(" ---> ~FRFound different feed (%s), merging..." % duplicate_feeds[0])
feed = Feed.get_by_id(merge_feeds(duplicate_feeds[0].pk, self.pk))
return feed
return self
def index_for_search(self):
if self.num_subscribers > 1 and not self.branch_from_feed:
SearchFeed.index(feed_id=self.pk,
title=self.feed_title,
address=self.feed_address,
link=self.feed_link,
num_subscribers=self.num_subscribers)
def sync_redis(self):
return MStory.sync_feed_redis(self.pk)
@classmethod
def autocomplete(self, prefix, limit=5):
results = SearchQuerySet().autocomplete(address=prefix).order_by('-num_subscribers')[:limit]
if len(results) < limit:
results += SearchQuerySet().autocomplete(title=prefix).order_by('-num_subscribers')[:limit-len(results)]
return list(set([int(f.pk) for f in results]))
@classmethod
def find_or_create(cls, feed_address, feed_link, *args, **kwargs):
feeds = cls.objects.filter(feed_address=feed_address, feed_link=feed_link)
if feeds:
return feeds[0], False
if feed_link and feed_link.endswith('/'):
feeds = cls.objects.filter(feed_address=feed_address, feed_link=feed_link[:-1])
if feeds:
return feeds[0], False
return cls.objects.get_or_create(feed_address=feed_address, feed_link=feed_link, *args, **kwargs)
@classmethod
def merge_feeds(cls, *args, **kwargs):
return merge_feeds(*args, **kwargs)
def fix_google_alerts_urls(self):
if (self.feed_address.startswith('http://user/') and
'/state/com.google/alerts/' in self.feed_address):
match = re.match(r"http://user/(\d+)/state/com.google/alerts/(\d+)", self.feed_address)
if match:
user_id, alert_id = match.groups()
self.feed_address = "http://www.google.com/alerts/feeds/%s/%s" % (user_id, alert_id)
@classmethod
def schedule_feed_fetches_immediately(cls, feed_ids):
logging.info(" ---> ~SN~FMScheduling immediate fetch of ~SB%s~SN feeds..." %
len(feed_ids))
feeds = Feed.objects.filter(pk__in=feed_ids)
for feed in feeds:
feed.count_subscribers()
feed.schedule_feed_fetch_immediately(verbose=False)
@property
def favicon_fetching(self):
return bool(not (self.favicon_not_found or self.favicon_color))
@classmethod
def get_feed_from_url(cls, url, create=True, aggressive=False, fetch=True, offset=0):
feed = None
def criteria(key, value):
if aggressive:
return {'%s__icontains' % key: value}
else:
return {'%s' % key: value}
def by_url(address):
feed = cls.objects.filter(
branch_from_feed=None
).filter(**criteria('feed_address', address)).order_by('-num_subscribers')
if not feed:
duplicate_feed = DuplicateFeed.objects.filter(**criteria('duplicate_address', address))
if duplicate_feed and len(duplicate_feed) > offset:
feed = [duplicate_feed[offset].feed]
if not feed and aggressive:
feed = cls.objects.filter(
branch_from_feed=None
).filter(**criteria('feed_link', address)).order_by('-num_subscribers')
return feed
# Normalize and check for feed_address, dupes, and feed_link
url = urlnorm.normalize(url)
feed = by_url(url)
# Create if it looks good
if feed and len(feed) > offset:
feed = feed[offset]
elif create:
create_okay = False
if feedfinder.isFeed(url):
create_okay = True
elif aggressive:
# Could still be a feed. Just check if there are entries
fp = feedparser.parse(url)
if len(fp.entries):
create_okay = True
if create_okay:
feed = cls.objects.create(feed_address=url)
feed = feed.update()
# Still nothing? Maybe the URL has some clues.
if not feed and fetch:
feed_finder_url = feedfinder.feed(url)
if feed_finder_url and 'comments' not in feed_finder_url:
feed = by_url(feed_finder_url)
if not feed and create:
feed = cls.objects.create(feed_address=feed_finder_url)
feed = feed.update()
elif feed and len(feed) > offset:
feed = feed[offset]
# Not created and not within bounds, so toss results.
if isinstance(feed, QuerySet):
return
return feed
@classmethod
def task_feeds(cls, feeds, queue_size=12, verbose=True):
if not feeds: return
r = redis.Redis(connection_pool=settings.REDIS_FEED_POOL)
if isinstance(feeds, Feed):
if verbose:
logging.debug(" ---> ~SN~FBTasking feed: ~SB%s" % feeds)
feeds = [feeds.pk]
elif verbose:
logging.debug(" ---> ~SN~FBTasking ~SB%s~SN feeds..." % len(feeds))
if isinstance(feeds, QuerySet):
feeds = [f.pk for f in feeds]
r.srem('queued_feeds', *feeds)
now = datetime.datetime.now().strftime("%s")
p = r.pipeline()
for feed_id in feeds:
p.zadd('tasked_feeds', feed_id, now)
p.execute()
# for feed_ids in (feeds[pos:pos + queue_size] for pos in xrange(0, len(feeds), queue_size)):
for feed_id in feeds:
UpdateFeeds.apply_async(args=(feed_id,), queue='update_feeds')
@classmethod
def drain_task_feeds(cls, empty=False):
r = redis.Redis(connection_pool=settings.REDIS_FEED_POOL)
if not empty:
tasked_feeds = r.zrange('tasked_feeds', 0, -1)
r.sadd('queued_feeds', *tasked_feeds)
r.zremrangebyrank('tasked_feeds', 0, -1)
def update_all_statistics(self, full=True, force=False):
self.count_subscribers()
if not self.last_story_date:
self.calculate_last_story_date()
count_extra = False
if random.random() > .99 or not self.data.popular_tags or not self.data.popular_authors:
count_extra = True
if force or full:
self.save_feed_stories_last_month()
if force or (full and count_extra):
self.save_popular_authors()
self.save_popular_tags()
self.save_feed_story_history_statistics()
def calculate_last_story_date(self):
last_story_date = None
try:
latest_story = MStory.objects(
story_feed_id=self.pk
).limit(1).order_by('-story_date').only('story_date').first()
if latest_story:
last_story_date = latest_story.story_date
except MStory.DoesNotExist:
pass
if not last_story_date or seconds_timesince(last_story_date) < 0:
last_story_date = datetime.datetime.now()
self.last_story_date = last_story_date
self.save()
@classmethod
def setup_feeds_for_premium_subscribers(cls, feed_ids):
logging.info(" ---> ~SN~FMScheduling immediate premium setup of ~SB%s~SN feeds..." %
len(feed_ids))
feeds = Feed.objects.filter(pk__in=feed_ids)
for feed in feeds:
feed.setup_feed_for_premium_subscribers()
def setup_feed_for_premium_subscribers(self):
self.count_subscribers()
self.set_next_scheduled_update()
def check_feed_link_for_feed_address(self):
@timelimit(10)
def _1():
feed_address = None
feed = self
try:
is_feed = feedfinder.isFeed(self.feed_address)
except KeyError:
is_feed = False
if not is_feed:
feed_address = feedfinder.feed(self.feed_address)
if not feed_address and self.feed_link:
feed_address = feedfinder.feed(self.feed_link)
else:
feed_address_from_link = feedfinder.feed(self.feed_link)
if feed_address_from_link != self.feed_address:
feed_address = feed_address_from_link
if feed_address:
if (feed_address.endswith('feedburner.com/atom.xml') or
feed_address.endswith('feedburner.com/feed/')):
logging.debug(" ---> Feed points to 'Wierdo', ignoring.")
return False, self
try:
self.feed_address = feed_address
feed = self.save()
feed.schedule_feed_fetch_immediately()
feed.has_feed_exception = False
feed.active = True
feed = feed.save()
except IntegrityError:
original_feed = Feed.objects.get(feed_address=feed_address, feed_link=self.feed_link)
original_feed.has_feed_exception = False
original_feed.active = True
original_feed.save()
merge_feeds(original_feed.pk, self.pk)
return feed_address, feed
if self.feed_address_locked:
return False, self
try:
feed_address, feed = _1()
except TimeoutError, e:
logging.debug(' ---> [%-30s] Feed address check timed out...' % (unicode(self)[:30]))
self.save_feed_history(505, 'Timeout', e)
feed = self
feed_address = None
return bool(feed_address), feed
def save_feed_history(self, status_code, message, exception=None):
MFetchHistory.add(feed_id=self.pk,
fetch_type='feed',
code=int(status_code),
message=message,
exception=exception)
if status_code not in (200, 304):
self.errors_since_good += 1
self.count_errors_in_history('feed', status_code)
self.set_next_scheduled_update()
elif self.has_feed_exception or self.errors_since_good:
self.errors_since_good = 0
self.has_feed_exception = False
self.active = True
self.save()
def save_page_history(self, status_code, message, exception=None):
MFetchHistory.add(feed_id=self.pk,
fetch_type='page',
code=int(status_code),
message=message,
exception=exception)
if status_code not in (200, 304):
self.count_errors_in_history('page', status_code)
elif self.has_page_exception:
self.has_page_exception = False
self.has_page = True
self.active = True
self.save()
def count_errors_in_history(self, exception_type='feed', status_code=None):
logging.debug(' ---> [%-30s] Counting errors in history...' % (unicode(self)[:30]))
fetch_history = MFetchHistory.feed(self.pk)
fh = fetch_history[exception_type + '_fetch_history']
non_errors = [h for h in fh if h['status_code'] and int(h['status_code']) in (200, 304)]
errors = [h for h in fh if h['status_code'] and int(h['status_code']) not in (200, 304)]
if len(non_errors) == 0 and len(errors) > 1:
self.active = True
if exception_type == 'feed':
self.has_feed_exception = True
# self.active = False # No longer, just geometrically fetch
elif exception_type == 'page':
self.has_page_exception = True
self.exception_code = status_code or int(errors[0])
self.save()
elif self.exception_code > 0:
self.active = True
self.exception_code = 0
if exception_type == 'feed':
self.has_feed_exception = False
elif exception_type == 'page':
self.has_page_exception = False
self.save()
return errors, non_errors
def count_subscribers(self, verbose=False):
SUBSCRIBER_EXPIRE = datetime.datetime.now() - datetime.timedelta(days=settings.SUBSCRIBER_EXPIRE)
from apps.reader.models import UserSubscription
if self.branch_from_feed:
original_feed_id = self.branch_from_feed.pk
else:
original_feed_id = self.pk
feed_ids = [f['id'] for f in Feed.objects.filter(branch_from_feed=original_feed_id).values('id')]
feed_ids.append(original_feed_id)
feed_ids = list(set(feed_ids))
subs = UserSubscription.objects.filter(feed__in=feed_ids)
self.num_subscribers = subs.count()
active_subs = UserSubscription.objects.filter(
feed__in=feed_ids,
active=True,
user__profile__last_seen_on__gte=SUBSCRIBER_EXPIRE
)
self.active_subscribers = active_subs.count()
premium_subs = UserSubscription.objects.filter(
feed__in=feed_ids,
active=True,
user__profile__is_premium=True
)
self.premium_subscribers = premium_subs.count()
active_premium_subscribers = UserSubscription.objects.filter(
feed__in=feed_ids,
active=True,
user__profile__is_premium=True,
user__profile__last_seen_on__gte=SUBSCRIBER_EXPIRE
)
self.active_premium_subscribers = active_premium_subscribers.count()
self.save()
if verbose:
if self.num_subscribers <= 1:
print '.',
else:
print "\n %s> %s subscriber%s: %s" % (
'-' * min(self.num_subscribers, 20),
self.num_subscribers,
'' if self.num_subscribers == 1 else 's',
self.feed_title,
),
def _split_favicon_color(self):
color = self.favicon_color
if color:
splitter = lambda s, p: [s[i:i+p] for i in range(0, len(s), p)]
red, green, blue = splitter(color[:6], 2)
return red, green, blue
return None, None, None
def favicon_fade(self):
red, green, blue = self._split_favicon_color()
if red and green and blue:
fade_red = hex(min(int(red, 16) + 35, 255))[2:].zfill(2)
fade_green = hex(min(int(green, 16) + 35, 255))[2:].zfill(2)
fade_blue = hex(min(int(blue, 16) + 35, 255))[2:].zfill(2)
return "%s%s%s" % (fade_red, fade_green, fade_blue)
def favicon_border(self):
red, green, blue = self._split_favicon_color()
if red and green and blue:
fade_red = hex(min(int(int(red, 16) * .75), 255))[2:].zfill(2)
fade_green = hex(min(int(int(green, 16) * .75), 255))[2:].zfill(2)
fade_blue = hex(min(int(int(blue, 16) * .75), 255))[2:].zfill(2)
return "%s%s%s" % (fade_red, fade_green, fade_blue)
def favicon_text_color(self):
# Color format: {r: 1, g: .5, b: 0}
def contrast(color1, color2):
lum1 = luminosity(color1)
lum2 = luminosity(color2)
if lum1 > lum2:
return (lum1 + 0.05) / (lum2 + 0.05)
else:
return (lum2 + 0.05) / (lum1 + 0.05)
def luminosity(color):
r = color['red']
g = color['green']
b = color['blue']
val = lambda c: c/12.92 if c <= 0.02928 else math.pow(((c + 0.055)/1.055), 2.4)
red = val(r)
green = val(g)
blue = val(b)
return 0.2126 * red + 0.7152 * green + 0.0722 * blue
red, green, blue = self._split_favicon_color()
if red and green and blue:
color = {
'red': int(red, 16) / 256.0,
'green': int(green, 16) / 256.0,
'blue': int(blue, 16) / 256.0,
}
white = {
'red': 1,
'green': 1,
'blue': 1,
}
grey = {
'red': 0.5,
'green': 0.5,
'blue': 0.5,
}
if contrast(color, white) > contrast(color, grey):
return 'white'
else:
return 'black'
def save_feed_stories_last_month(self, verbose=False):
month_ago = datetime.datetime.utcnow() - datetime.timedelta(days=30)
stories_last_month = MStory.objects(story_feed_id=self.pk,
story_date__gte=month_ago).count()
self.stories_last_month = stories_last_month
self.save()
if verbose:
print " ---> %s [%s]: %s stories last month" % (self.feed_title, self.pk,
self.stories_last_month)
def save_feed_story_history_statistics(self, current_counts=None):
"""
Fills in missing months between earlier occurances and now.
Save format: [('YYYY-MM, #), ...]
Example output: [(2010-12, 123), (2011-01, 146)]
"""
now = datetime.datetime.utcnow()
min_year = now.year
total = 0
month_count = 0
if not current_counts:
current_counts = self.data.story_count_history and json.decode(self.data.story_count_history)
if not current_counts:
current_counts = []
# Count stories, aggregate by year and month. Map Reduce!
map_f = """
function() {
var date = (this.story_date.getFullYear()) + "-" + (this.story_date.getMonth()+1);
emit(date, 1);
}
"""
reduce_f = """
function(key, values) {
var total = 0;
for (var i=0; i < values.length; i++) {
total += values[i];
}
return total;
}
"""
dates = {}
res = MStory.objects(story_feed_id=self.pk).map_reduce(map_f, reduce_f, output='inline')
for r in res:
dates[r.key] = r.value
year = int(re.findall(r"(\d{4})-\d{1,2}", r.key)[0])
if year < min_year and year > 2000:
min_year = year
# Add on to existing months, always amending up, never down. (Current month
# is guaranteed to be accurate, since trim_feeds won't delete it until after
# a month. Hacker News can have 1,000+ and still be counted.)
for current_month, current_count in current_counts:
year = int(re.findall(r"(\d{4})-\d{1,2}", current_month)[0])
if current_month not in dates or dates[current_month] < current_count:
dates[current_month] = current_count
if year < min_year and year > 2000:
min_year = year
# Assemble a list with 0's filled in for missing months,
# trimming left and right 0's.
months = []
start = False
for year in range(min_year, now.year+1):
for month in range(1, 12+1):
if datetime.datetime(year, month, 1) < now:
key = u'%s-%s' % (year, month)
if dates.get(key) or start:
start = True
months.append((key, dates.get(key, 0)))
total += dates.get(key, 0)
month_count += 1
self.data.story_count_history = json.encode(months)
self.data.save()
if not total or not month_count:
self.average_stories_per_month = 0
else:
self.average_stories_per_month = int(round(total / float(month_count)))
self.save()
def save_classifier_counts(self):
from apps.analyzer.models import MClassifierTitle, MClassifierAuthor, MClassifierFeed, MClassifierTag
def calculate_scores(cls, facet):
map_f = """
function() {
emit(this["%s"], {
pos: this.score>0 ? this.score : 0,
neg: this.score<0 ? Math.abs(this.score) : 0
});
}
""" % (facet)
reduce_f = """
function(key, values) {
var result = {pos: 0, neg: 0};
values.forEach(function(value) {
result.pos += value.pos;
result.neg += value.neg;
});
return result;
}
"""
scores = []
res = cls.objects(feed_id=self.pk).map_reduce(map_f, reduce_f, output='inline')
for r in res:
facet_values = dict([(k, int(v)) for k,v in r.value.iteritems()])
facet_values[facet] = r.key
if facet_values['pos'] + facet_values['neg'] > 1:
scores.append(facet_values)
scores = sorted(scores, key=lambda v: v['neg'] - v['pos'])
return scores
scores = {}
for cls, facet in [(MClassifierTitle, 'title'),
(MClassifierAuthor, 'author'),
(MClassifierTag, 'tag'),
(MClassifierFeed, 'feed_id')]:
scores[facet] = calculate_scores(cls, facet)
if facet == 'feed_id' and scores[facet]:
scores['feed'] = scores[facet]
del scores['feed_id']
elif not scores[facet]:
del scores[facet]
if scores:
self.data.feed_classifier_counts = json.encode(scores)
self.data.save()
def update(self, **kwargs):
from utils import feed_fetcher
r = redis.Redis(connection_pool=settings.REDIS_FEED_POOL)
original_feed_id = int(self.pk)
if getattr(settings, 'TEST_DEBUG', False):
self.feed_address = self.feed_address % {'NEWSBLUR_DIR': settings.NEWSBLUR_DIR}
self.feed_link = self.feed_link % {'NEWSBLUR_DIR': settings.NEWSBLUR_DIR}
self.save()
options = {
'verbose': kwargs.get('verbose'),
'timeout': 10,
'single_threaded': kwargs.get('single_threaded', True),
'force': kwargs.get('force'),
'compute_scores': kwargs.get('compute_scores', True),
'mongodb_replication_lag': kwargs.get('mongodb_replication_lag', None),
'fake': kwargs.get('fake'),
'quick': kwargs.get('quick'),
'debug': kwargs.get('debug'),
'fpf': kwargs.get('fpf'),
'feed_xml': kwargs.get('feed_xml'),
}
disp = feed_fetcher.Dispatcher(options, 1)
disp.add_jobs([[self.pk]])
feed = disp.run_jobs()
if feed:
feed = Feed.get_by_id(feed.pk)
if feed:
feed.last_update = datetime.datetime.utcnow()
feed.set_next_scheduled_update()
r.zadd('fetched_feeds_last_hour', feed.pk, int(datetime.datetime.now().strftime('%s')))
if not feed or original_feed_id != feed.pk:
logging.info(" ---> ~FRFeed changed id, removing %s from tasked_feeds queue..." % original_feed_id)
r.zrem('tasked_feeds', original_feed_id)
r.zrem('error_feeds', original_feed_id)
if feed:
r.zrem('tasked_feeds', feed.pk)
r.zrem('error_feeds', feed.pk)
return feed
@classmethod
def get_by_id(cls, feed_id, feed_address=None):
try:
feed = Feed.objects.get(pk=feed_id)
return feed
except Feed.DoesNotExist:
# Feed has been merged after updating. Find the right feed.
duplicate_feeds = DuplicateFeed.objects.filter(duplicate_feed_id=feed_id)
if duplicate_feeds:
return duplicate_feeds[0].feed
if feed_address:
duplicate_feeds = DuplicateFeed.objects.filter(duplicate_address=feed_address)
if duplicate_feeds:
return duplicate_feeds[0].feed
@classmethod
def get_by_name(cls, query, limit=1):
results = SearchFeed.query(query)
feed_ids = [result.feed_id for result in results]
if limit == 1:
return Feed.get_by_id(feed_ids[0])
else:
return [Feed.get_by_id(f) for f in feed_ids][:limit]
def add_update_stories(self, stories, existing_stories, verbose=False):
ret_values = dict(new=0, updated=0, same=0, error=0)
error_count = self.error_count
if settings.DEBUG or verbose:
logging.debug(" ---> [%-30s] ~FBChecking ~SB%s~SN new/updated against ~SB%s~SN stories" % (
self.title[:30],
len(stories),
len(existing_stories.keys())))
for story in stories:
if not story.get('title'):
continue
story_content = story.get('story_content')
if error_count:
story_content = strip_comments__lxml(story_content)
else:
story_content = strip_comments(story_content)
story_tags = self.get_tags(story)
story_link = self.get_permalink(story)
existing_story, story_has_changed = self._exists_story(story, story_content, existing_stories)
if existing_story is None:
if settings.DEBUG and False:
logging.debug(' ---> New story in feed (%s - %s): %s' % (self.feed_title, story.get('title'), len(story_content)))
s = MStory(story_feed_id = self.pk,
story_date = story.get('published'),
story_title = story.get('title'),
story_content = story_content,
story_author_name = story.get('author'),
story_permalink = story_link,
story_guid = story.get('guid'),
story_tags = story_tags
)
try:
s.save()
ret_values['new'] += 1
except (IntegrityError, OperationError), e:
ret_values['error'] += 1
if settings.DEBUG:
logging.info(' ---> [%-30s] ~SN~FRIntegrityError on new story: %s - %s' % (self.feed_title[:30], story.get('guid'), e))
elif existing_story and story_has_changed:
# update story
original_content = None
try:
if existing_story and existing_story.id:
try:
existing_story = MStory.objects.get(id=existing_story.id)
except ValidationError:
existing_story, _ = MStory.find_story(existing_story.story_feed_id,
existing_story.id,
original_only=True)
elif existing_story and existing_story.story_guid:
existing_story, _ = MStory.find_story(existing_story.story_feed_id,
existing_story.story_guid,
original_only=True)
else:
raise MStory.DoesNotExist
except (MStory.DoesNotExist, OperationError), e:
ret_values['error'] += 1
if verbose:
logging.info(' ---> [%-30s] ~SN~FROperation on existing story: %s - %s' % (self.feed_title[:30], story.get('guid'), e))
continue
if existing_story.story_original_content_z:
original_content = zlib.decompress(existing_story.story_original_content_z)
elif existing_story.story_content_z:
original_content = zlib.decompress(existing_story.story_content_z)
# print 'Type: %s %s' % (type(original_content), type(story_content))
if story_content and len(story_content) > 10:
story_content_diff = htmldiff(unicode(original_content), unicode(story_content))
else:
story_content_diff = original_content
# logging.debug("\t\tDiff: %s %s %s" % diff.getStats())
# logging.debug("\t\tDiff content: %s" % diff.getDiff())
# if existing_story.story_title != story.get('title'):
# logging.debug('\tExisting title / New: : \n\t\t- %s\n\t\t- %s' % (existing_story.story_title, story.get('title')))
if existing_story.story_guid != story.get('guid'):
self.update_read_stories_with_new_guid(existing_story.story_guid, story.get('guid'))
if settings.DEBUG and False:
logging.debug('- Updated story in feed (%s - %s): %s / %s' % (self.feed_title, story.get('title'), len(story_content_diff), len(story_content)))
existing_story.story_feed = self.pk
existing_story.story_title = story.get('title')
existing_story.story_content = story_content_diff
existing_story.story_latest_content = story_content
existing_story.story_original_content = original_content
existing_story.story_author_name = story.get('author')
existing_story.story_permalink = story_link
existing_story.story_guid = story.get('guid')
existing_story.story_tags = story_tags
# Do not allow publishers to change the story date once a story is published.
# Leads to incorrect unread story counts.
# existing_story.story_date = story.get('published') # No, don't
try:
existing_story.save()
ret_values['updated'] += 1
except (IntegrityError, OperationError):
ret_values['error'] += 1
if verbose:
logging.info(' ---> [%-30s] ~SN~FRIntegrityError on updated story: %s' % (self.feed_title[:30], story.get('title')[:30]))
except ValidationError:
ret_values['error'] += 1
if verbose:
logging.info(' ---> [%-30s] ~SN~FRValidationError on updated story: %s' % (self.feed_title[:30], story.get('title')[:30]))
else:
ret_values['same'] += 1
# logging.debug("Unchanged story: %s " % story.get('title'))
return ret_values
def update_read_stories_with_new_guid(self, old_story_guid, new_story_guid):
from apps.reader.models import RUserStory
from apps.social.models import MSharedStory
old_hash = RUserStory.story_hash(old_story_guid, self.pk)
new_hash = RUserStory.story_hash(new_story_guid, self.pk)
# RUserStory.switch_hash(feed_id=self.pk, old_hash=old_hash, new_hash=new_hash)
shared_stories = MSharedStory.objects.filter(story_feed_id=self.pk,
story_guid=old_story_guid)
for story in shared_stories:
story.story_guid = new_story_guid
story.save()
def save_popular_tags(self, feed_tags=None, verbose=False):
if not feed_tags:
all_tags = MStory.objects(story_feed_id=self.pk,
story_tags__exists=True).item_frequencies('story_tags')
feed_tags = sorted([(k, v) for k, v in all_tags.items() if int(v) > 0],
key=itemgetter(1),
reverse=True)[:25]
popular_tags = json.encode(feed_tags)
if verbose:
print "Found %s tags: %s" % (len(feed_tags), popular_tags)
# TODO: This len() bullshit will be gone when feeds move to mongo
# On second thought, it might stay, because we don't want
# popular tags the size of a small planet. I'm looking at you
# Tumblr writers.
if len(popular_tags) < 1024:
self.data.popular_tags = popular_tags
self.data.save()
return
tags_list = []
if feed_tags and isinstance(feed_tags, unicode):
tags_list = json.decode(feed_tags)
if len(tags_list) >= 1:
self.save_popular_tags(tags_list[:-1])
def save_popular_authors(self, feed_authors=None):
if not feed_authors:
authors = defaultdict(int)
for story in MStory.objects(story_feed_id=self.pk).only('story_author_name'):
authors[story.story_author_name] += 1
feed_authors = sorted([(k, v) for k, v in authors.items() if k],
key=itemgetter(1),
reverse=True)[:20]
popular_authors = json.encode(feed_authors)
if len(popular_authors) < 1023:
self.data.popular_authors = popular_authors
self.data.save()
return
if len(feed_authors) > 1:
self.save_popular_authors(feed_authors=feed_authors[:-1])
def trim_feed(self, verbose=False):
trim_cutoff = 500
if self.active_subscribers <= 0:
trim_cutoff = 25
elif self.num_subscribers <= 10 or self.active_premium_subscribers <= 1:
trim_cutoff = 100
elif self.num_subscribers <= 30 or self.active_premium_subscribers <= 3:
trim_cutoff = 200
elif self.num_subscribers <= 50 or self.active_premium_subscribers <= 5:
trim_cutoff = 300
elif self.num_subscribers <= 100 or self.active_premium_subscribers <= 10:
trim_cutoff = 350
elif self.num_subscribers <= 150 or self.active_premium_subscribers <= 15:
trim_cutoff = 400
elif self.num_subscribers <= 200 or self.active_premium_subscribers <= 20:
trim_cutoff = 450
stories = MStory.objects(
story_feed_id=self.pk,
).order_by('-story_date')
if stories.count() > trim_cutoff:
logging.debug(' ---> [%-30s] ~FBFound %s stories. Trimming to ~SB%s~SN...' %
(unicode(self)[:30], stories.count(), trim_cutoff))
try:
story_trim_date = stories[trim_cutoff].story_date
except IndexError, e:
logging.debug(' ***> [%-30s] ~BRError trimming feed: %s' % (unicode(self)[:30], e))
return
extra_stories = MStory.objects(story_feed_id=self.pk,
story_date__lte=story_trim_date)
extra_stories_count = extra_stories.count()
for story in extra_stories:
story.delete()
if verbose:
existing_story_count = MStory.objects(story_feed_id=self.pk).count()
print "Deleted %s stories, %s left." % (extra_stories_count,
existing_story_count)
# @staticmethod
# def clean_invalid_ids():
# history = MFeedFetchHistory.objects(status_code=500, exception__contains='InvalidId:')
# urls = set()
# for h in history:
# u = re.split('InvalidId: (.*?) is not a valid ObjectId\\n$', h.exception)[1]
# urls.add((h.feed_id, u))
#
# for f, u in urls:
# print "db.stories.remove({\"story_feed_id\": %s, \"_id\": \"%s\"})" % (f, u)
def get_stories(self, offset=0, limit=25, force=False):
stories_db = MStory.objects(story_feed_id=self.pk)[offset:offset+limit]
stories = self.format_stories(stories_db, self.pk)
return stories
def find_stories(self, query, offset=0, limit=25):
stories_db = MStory.objects(
Q(story_feed_id=self.pk) &
(Q(story_title__icontains=query) |
Q(story_content__icontains=query) |
Q(story_author_name__icontains=query))
).order_by('-starred_date')[offset:offset+limit]
stories = self.format_stories(stories_db, self.pk)
return stories
@classmethod
def format_stories(cls, stories_db, feed_id=None, include_permalinks=False):
stories = []
for story_db in stories_db:
story = cls.format_story(story_db, feed_id, include_permalinks=include_permalinks)
stories.append(story)
return stories
@classmethod
def format_story(cls, story_db, feed_id=None, text=False, include_permalinks=False):
if isinstance(story_db.story_content_z, unicode):
story_db.story_content_z = story_db.story_content_z.decode('base64')
story_content = story_db.story_content_z and zlib.decompress(story_db.story_content_z) or ''
story = {}
story['story_hash'] = getattr(story_db, 'story_hash', None)
story['story_tags'] = story_db.story_tags or []
story['story_date'] = story_db.story_date.replace(tzinfo=None)
story['story_authors'] = story_db.story_author_name
story['story_title'] = story_db.story_title
story['story_content'] = story_content
story['story_permalink'] = story_db.story_permalink
story['story_feed_id'] = feed_id or story_db.story_feed_id
story['comment_count'] = story_db.comment_count if hasattr(story_db, 'comment_count') else 0
story['comment_user_ids'] = story_db.comment_user_ids if hasattr(story_db, 'comment_user_ids') else []
story['share_count'] = story_db.share_count if hasattr(story_db, 'share_count') else 0
story['share_user_ids'] = story_db.share_user_ids if hasattr(story_db, 'share_user_ids') else []
story['guid_hash'] = story_db.guid_hash if hasattr(story_db, 'guid_hash') else None
if hasattr(story_db, 'source_user_id'):
story['source_user_id'] = story_db.source_user_id
story['id'] = story_db.story_guid or story_db.story_date
if hasattr(story_db, 'starred_date'):
story['starred_date'] = story_db.starred_date
if hasattr(story_db, 'shared_date'):
story['shared_date'] = story_db.shared_date
if include_permalinks and hasattr(story_db, 'blurblog_permalink'):
story['blurblog_permalink'] = story_db.blurblog_permalink()
if text:
from BeautifulSoup import BeautifulSoup
soup = BeautifulSoup(story['story_content'])
text = ''.join(soup.findAll(text=True))
text = re.sub(r'\n+', '\n\n', text)
text = re.sub(r'\t+', '\t', text)
story['text'] = text
if '<ins' in story['story_content'] or '<del' in story['story_content']:
story['has_modifications'] = True
return story
def get_tags(self, entry):
fcat = []
if entry.has_key('tags'):
for tcat in entry.tags:
term = None
if hasattr(tcat, 'label') and tcat.label:
term = tcat.label
elif hasattr(tcat, 'term') and tcat.term:
term = tcat.term
if not term:
continue
qcat = term.strip()
if ',' in qcat or '/' in qcat:
qcat = qcat.replace(',', '/').split('/')
else:
qcat = [qcat]
for zcat in qcat:
tagname = zcat.lower()
while ' ' in tagname:
tagname = tagname.replace(' ', ' ')
tagname = tagname.strip()
if not tagname or tagname == ' ':
continue
fcat.append(tagname)
fcat = [strip_tags(t)[:250] for t in fcat[:12]]
return fcat
def get_permalink(self, entry):
link = entry.get('link')
if not link:
links = entry.get('links')
if links:
link = links[0].get('href')
if not link:
link = entry.get('id')
return link
def _exists_story(self, story=None, story_content=None, existing_stories=None):
story_in_system = None
story_has_changed = False
story_link = self.get_permalink(story)
existing_stories_guids = existing_stories.keys()
# story_pub_date = story.get('published')
# story_published_now = story.get('published_now', False)
# start_date = story_pub_date - datetime.timedelta(hours=8)
# end_date = story_pub_date + datetime.timedelta(hours=8)
for existing_story in existing_stories.values():
content_ratio = 0
# existing_story_pub_date = existing_story.story_date
# print 'Story pub date: %s %s' % (story_published_now, story_pub_date)
if 'story_latest_content_z' in existing_story:
existing_story_content = unicode(zlib.decompress(existing_story.story_latest_content_z))
elif 'story_latest_content' in existing_story:
existing_story_content = existing_story.story_latest_content
elif 'story_content_z' in existing_story:
existing_story_content = unicode(zlib.decompress(existing_story.story_content_z))
elif 'story_content' in existing_story:
existing_story_content = existing_story.story_content
else:
existing_story_content = u''
if isinstance(existing_story.id, unicode):
existing_story.story_guid = existing_story.id
if (story.get('guid') in existing_stories_guids and
story.get('guid') != existing_story.story_guid):
continue
elif story.get('guid') == existing_story.story_guid:
story_in_system = existing_story
# Title distance + content distance, checking if story changed
story_title_difference = abs(levenshtein_distance(story.get('title'),
existing_story.story_title))
seq = difflib.SequenceMatcher(None, story_content, existing_story_content)
if (seq
and story_content
and existing_story_content
and seq.real_quick_ratio() > .9
and seq.quick_ratio() > .95):
content_ratio = seq.ratio()
if story_title_difference > 0 and content_ratio > .98:
story_in_system = existing_story
if story_title_difference > 0 or content_ratio < 1.0:
# print "Title difference - %s/%s (%s): %s" % (story.get('title'), existing_story.story_title, story_title_difference, content_ratio)
story_has_changed = True
break
# More restrictive content distance, still no story match
if not story_in_system and content_ratio > .98:
# print "Content difference - %s/%s (%s): %s" % (story.get('title'), existing_story.story_title, story_title_difference, content_ratio)
story_in_system = existing_story
story_has_changed = True
break
if story_in_system and not story_has_changed:
if story_content != existing_story_content:
# print "Content difference - %s/%s" % (story_content, existing_story_content)
story_has_changed = True
if story_link != existing_story.story_permalink:
# print "Permalink difference - %s/%s" % (story_link, existing_story.story_permalink)
story_has_changed = True
# if story_pub_date != existing_story.story_date:
# story_has_changed = True
break
# if story_has_changed or not story_in_system:
# print 'New/updated story: %s' % (story),
return story_in_system, story_has_changed
def get_next_scheduled_update(self, force=False, verbose=True):
if self.min_to_decay and not force:
return self.min_to_decay
upd = self.stories_last_month / 30.0
subs = (self.active_premium_subscribers +
((self.active_subscribers - self.active_premium_subscribers) / 10.0))
# UPD = 1 Subs > 1: t = 5 # 11625 * 1440/5 = 3348000
# UPD = 1 Subs = 1: t = 60 # 17231 * 1440/60 = 413544
# UPD < 1 Subs > 1: t = 60 # 37904 * 1440/60 = 909696
# UPD < 1 Subs = 1: t = 60 * 12 # 143012 * 1440/(60*12) = 286024
# UPD = 0 Subs > 1: t = 60 * 3 # 28351 * 1440/(60*3) = 226808
# UPD = 0 Subs = 1: t = 60 * 24 # 807690 * 1440/(60*24) = 807690
if upd >= 1:
if subs > 1:
total = 10
else:
total = 60
elif upd > 0:
if subs > 1:
total = 60 - (upd * 60)
else:
total = 60*12 - (upd * 60*12)
elif upd == 0:
if subs > 1:
total = 60 * 6
else:
total = 60 * 24
months_since_last_story = seconds_timesince(self.last_story_date) * 60*60*24*30
total *= max(1, months_since_last_story)
# updates_per_day_delay = 3 * 60 / max(.25, ((max(0, self.active_subscribers)**.2)
# * (self.stories_last_month**0.25)))
# if self.active_premium_subscribers > 0:
# updates_per_day_delay /= min(self.active_subscribers+self.active_premium_subscribers, 4)
# updates_per_day_delay = int(updates_per_day_delay)
# Lots of subscribers = lots of updates
# 24 hours for 0 subscribers.
# 4 hours for 1 subscriber.
# .5 hours for 2 subscribers.
# .25 hours for 3 subscribers.
# 1 min for 10 subscribers.
# subscriber_bonus = 6 * 60 / max(.167, max(0, self.active_subscribers)**3)
# if self.premium_subscribers > 0:
# subscriber_bonus /= min(self.active_subscribers+self.premium_subscribers, 5)
# subscriber_bonus = int(subscriber_bonus)
if self.is_push:
total = total * 12
# 3 day max
if total > 60*24*3:
total = 60*24*3
if verbose:
logging.debug(" ---> [%-30s] Fetched every %s min - Subs: %s/%s/%s Stories: %s" % (
unicode(self)[:30], total,
self.num_subscribers,
self.active_subscribers,
self.active_premium_subscribers,
upd))
return total
def set_next_scheduled_update(self, verbose=False, skip_scheduling=False):
r = redis.Redis(connection_pool=settings.REDIS_FEED_POOL)
total = self.get_next_scheduled_update(force=True, verbose=verbose)
error_count = self.error_count
if error_count:
total = total * error_count
if verbose:
logging.debug(' ---> [%-30s] ~FBScheduling feed fetch geometrically: '
'~SB%s errors. Time: %s min' % (
unicode(self)[:30], self.errors_since_good, total))
random_factor = random.randint(0, total) / 4
next_scheduled_update = datetime.datetime.utcnow() + datetime.timedelta(
minutes = total + random_factor)
self.min_to_decay = total
delta = self.next_scheduled_update - datetime.datetime.now()
minutes_to_next_fetch = delta.total_seconds() / 60
if minutes_to_next_fetch > self.min_to_decay or not skip_scheduling:
self.next_scheduled_update = next_scheduled_update
if self.active_subscribers >= 1:
r.zadd('scheduled_updates', self.pk, self.next_scheduled_update.strftime('%s'))
r.zrem('tasked_feeds', self.pk)
r.srem('queued_feeds', self.pk)
self.save()
@property
def error_count(self):
r = redis.Redis(connection_pool=settings.REDIS_FEED_POOL)
fetch_errors = int(r.zscore('error_feeds', self.pk) or 0)
return fetch_errors + self.errors_since_good
def schedule_feed_fetch_immediately(self, verbose=True):
r = redis.Redis(connection_pool=settings.REDIS_FEED_POOL)
if verbose:
logging.debug(' ---> [%-30s] Scheduling feed fetch immediately...' % (unicode(self)[:30]))
self.next_scheduled_update = datetime.datetime.utcnow()
r.zadd('scheduled_updates', self.pk, self.next_scheduled_update.strftime('%s'))
return self.save()
def setup_push(self):
from apps.push.models import PushSubscription
try:
push = self.push
except PushSubscription.DoesNotExist:
self.is_push = False
else:
self.is_push = push.verified
self.save()
def queue_pushed_feed_xml(self, xml):
r = redis.Redis(connection_pool=settings.REDIS_POOL)
queue_size = r.llen("push_feeds")
if queue_size > 1000:
self.schedule_feed_fetch_immediately()
else:
logging.debug(' ---> [%-30s] [%s] ~FBQueuing pushed stories...' % (unicode(self)[:30], self.pk))
self.set_next_scheduled_update()
PushFeeds.apply_async(args=(self.pk, xml), queue='push_feeds')
# def calculate_collocations_story_content(self,
# collocation_measures=TrigramAssocMeasures,
# collocation_finder=TrigramCollocationFinder):
# stories = MStory.objects.filter(story_feed_id=self.pk)
# story_content = ' '.join([s.story_content for s in stories if s.story_content])
# return self.calculate_collocations(story_content, collocation_measures, collocation_finder)
#
# def calculate_collocations_story_title(self,
# collocation_measures=BigramAssocMeasures,
# collocation_finder=BigramCollocationFinder):
# stories = MStory.objects.filter(story_feed_id=self.pk)
# story_titles = ' '.join([s.story_title for s in stories if s.story_title])
# return self.calculate_collocations(story_titles, collocation_measures, collocation_finder)
#
# def calculate_collocations(self, content,
# collocation_measures=TrigramAssocMeasures,
# collocation_finder=TrigramCollocationFinder):
# content = re.sub(r'’', '\'', content)
# content = re.sub(r'&', '&', content)
# try:
# content = unicode(BeautifulStoneSoup(content,
# convertEntities=BeautifulStoneSoup.HTML_ENTITIES))
# except ValueError, e:
# print "ValueError, ignoring: %s" % e
# content = re.sub(r'</?\w+\s+[^>]*>', '', content)
# content = re.split(r"[^A-Za-z-'&]+", content)
#
# finder = collocation_finder.from_words(content)
# finder.apply_freq_filter(3)
# best = finder.nbest(collocation_measures.pmi, 10)
# phrases = [' '.join(phrase) for phrase in best]
#
# return phrases
# class FeedCollocations(models.Model):
# feed = models.ForeignKey(Feed)
# phrase = models.CharField(max_length=500)
class FeedData(models.Model):
feed = AutoOneToOneField(Feed, related_name='data')
feed_tagline = models.CharField(max_length=1024, blank=True, null=True)
story_count_history = models.TextField(blank=True, null=True)
feed_classifier_counts = models.TextField(blank=True, null=True)
popular_tags = models.CharField(max_length=1024, blank=True, null=True)
popular_authors = models.CharField(max_length=2048, blank=True, null=True)
def save(self, *args, **kwargs):
if self.feed_tagline and len(self.feed_tagline) >= 1000:
self.feed_tagline = self.feed_tagline[:1000]
try:
super(FeedData, self).save(*args, **kwargs)
except (IntegrityError, OperationError):
if hasattr(self, 'id') and self.id: self.delete()
class MFeedIcon(mongo.Document):
feed_id = mongo.IntField(primary_key=True)
color = mongo.StringField(max_length=6)
data = mongo.StringField()
icon_url = mongo.StringField()
not_found = mongo.BooleanField(default=False)
meta = {
'collection' : 'feed_icons',
'allow_inheritance' : False,
}
def save(self, *args, **kwargs):
if self.icon_url:
self.icon_url = unicode(self.icon_url)
try:
super(MFeedIcon, self).save(*args, **kwargs)
except (IntegrityError, OperationError):
# print "Error on Icon: %s" % e
if hasattr(self, '_id'): self.delete()
class MFeedPage(mongo.Document):
feed_id = mongo.IntField(primary_key=True)
page_data = mongo.BinaryField()
meta = {
'collection': 'feed_pages',
'allow_inheritance': False,
}
def save(self, *args, **kwargs):
if self.page_data:
self.page_data = zlib.compress(self.page_data)
super(MFeedPage, self).save(*args, **kwargs)
@classmethod
def get_data(cls, feed_id):
data = None
feed_page = cls.objects(feed_id=feed_id)
if feed_page:
page_data_z = feed_page[0].page_data
if page_data_z:
data = zlib.decompress(page_data_z)
if not data:
dupe_feed = DuplicateFeed.objects.filter(duplicate_feed_id=feed_id)
if dupe_feed:
feed = dupe_feed[0].feed
feed_page = MFeedPage.objects.filter(feed_id=feed.pk)
if feed_page:
page_data_z = feed_page[0].page_data
if page_data_z:
data = zlib.decompress(feed_page[0].page_data)
return data
class MStory(mongo.Document):
'''A feed item'''
story_feed_id = mongo.IntField()
story_date = mongo.DateTimeField()
story_title = mongo.StringField(max_length=1024)
story_content = mongo.StringField()
story_content_z = mongo.BinaryField()
story_original_content = mongo.StringField()
story_original_content_z = mongo.BinaryField()
story_latest_content = mongo.StringField()
story_latest_content_z = mongo.BinaryField()
original_text_z = mongo.BinaryField()
story_content_type = mongo.StringField(max_length=255)
story_author_name = mongo.StringField()
story_permalink = mongo.StringField()
story_guid = mongo.StringField()
story_hash = mongo.StringField()
story_tags = mongo.ListField(mongo.StringField(max_length=250))
comment_count = mongo.IntField()
comment_user_ids = mongo.ListField(mongo.IntField())
share_count = mongo.IntField()
share_user_ids = mongo.ListField(mongo.IntField())
meta = {
'collection': 'stories',
'indexes': [('story_feed_id', '-story_date'),
{'fields': ['story_hash'],
'unique': True,
'types': False,
'drop_dups': True }],
'index_drop_dups': True,
'ordering': ['-story_date'],
'allow_inheritance': False,
'cascade': False,
}
@property
def guid_hash(self):
return hashlib.sha1(self.story_guid).hexdigest()[:6]
@property
def feed_guid_hash(self):
return "%s:%s" % (self.story_feed_id, self.guid_hash)
def save(self, *args, **kwargs):
story_title_max = MStory._fields['story_title'].max_length
story_content_type_max = MStory._fields['story_content_type'].max_length
self.story_hash = self.feed_guid_hash
if self.story_content:
self.story_content_z = zlib.compress(self.story_content)
self.story_content = None
if self.story_original_content:
self.story_original_content_z = zlib.compress(self.story_original_content)
self.story_original_content = None
if self.story_latest_content:
self.story_latest_content_z = zlib.compress(self.story_latest_content)
self.story_latest_content = None
if self.story_title and len(self.story_title) > story_title_max:
self.story_title = self.story_title[:story_title_max]
if self.story_content_type and len(self.story_content_type) > story_content_type_max:
self.story_content_type = self.story_content_type[:story_content_type_max]
super(MStory, self).save(*args, **kwargs)
self.sync_redis()
def delete(self, *args, **kwargs):
self.remove_from_redis()
super(MStory, self).delete(*args, **kwargs)
@classmethod
def find_story(cls, story_feed_id, story_id, original_only=False):
from apps.social.models import MSharedStory
original_found = True
if isinstance(story_id, ObjectId):
story = cls.objects(id=story_id).limit(1).first()
else:
guid_hash = hashlib.sha1(story_id).hexdigest()[:6]
story_hash = "%s:%s" % (story_feed_id, guid_hash)
story = cls.objects(story_hash=story_hash).limit(1).first()
if not story:
original_found = False
if not story and not original_only:
story = MSharedStory.objects.filter(story_feed_id=story_feed_id,
story_guid=story_id).limit(1).first()
if not story and not original_only:
story = MStarredStory.objects.filter(story_feed_id=story_feed_id,
story_guid=story_id).limit(1).first()
return story, original_found
@classmethod
def find_by_id(cls, story_ids):
from apps.social.models import MSharedStory
count = len(story_ids)
multiple = isinstance(story_ids, list) or isinstance(story_ids, tuple)
stories = list(cls.objects(id__in=story_ids))
if len(stories) < count:
shared_stories = list(MSharedStory.objects(id__in=story_ids))
stories.extend(shared_stories)
if not multiple:
stories = stories[0]
return stories
@classmethod
def find_by_story_hashes(cls, story_hashes):
from apps.social.models import MSharedStory
count = len(story_hashes)
multiple = isinstance(story_hashes, list) or isinstance(story_hashes, tuple)
stories = list(cls.objects(story_hash__in=story_hashes))
if len(stories) < count:
hashes_found = [s.story_hash for s in stories]
remaining_hashes = list(set(story_hashes) - set(hashes_found))
story_feed_ids = [h.split(':')[0] for h in remaining_hashes]
shared_stories = list(MSharedStory.objects(story_feed_id__in=story_feed_ids,
story_hash__in=remaining_hashes))
stories.extend(shared_stories)
if not multiple:
stories = stories[0]
return stories
def sync_redis(self, r=None):
if not r:
r = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL)
UNREAD_CUTOFF = datetime.datetime.now() - datetime.timedelta(days=settings.DAYS_OF_UNREAD)
if self.id and self.story_date > UNREAD_CUTOFF:
r.sadd('F:%s' % self.story_feed_id, self.story_hash)
r.zadd('zF:%s' % self.story_feed_id, self.story_hash, time.mktime(self.story_date.timetuple()))
r.expire('F:%s' % self.story_feed_id, settings.DAYS_OF_UNREAD*24*60*60)
r.expire('zF:%s' % self.story_feed_id, settings.DAYS_OF_UNREAD*24*60*60)
def remove_from_redis(self, r=None):
if not r:
r = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL)
if self.id:
r.srem('F:%s' % self.story_feed_id, self.story_hash)
r.zrem('zF:%s' % self.story_feed_id, self.story_hash)
@classmethod
def sync_feed_redis(cls, story_feed_id):
r = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL)
UNREAD_CUTOFF = datetime.datetime.now() - datetime.timedelta(days=settings.DAYS_OF_UNREAD)
feed = Feed.get_by_id(story_feed_id)
stories = cls.objects.filter(story_feed_id=story_feed_id, story_date__gte=UNREAD_CUTOFF)
r.delete('F:%s' % story_feed_id)
r.delete('zF:%s' % story_feed_id)
logging.info(" ---> [%-30s] ~FMSyncing ~SB%s~SN stories to redis" % (feed and feed.title[:30] or story_feed_id, stories.count()))
p = r.pipeline()
for story in stories:
story.sync_redis(r=p)
p.execute()
def count_comments(self):
from apps.social.models import MSharedStory
params = {
'story_guid': self.story_guid,
'story_feed_id': self.story_feed_id,
}
comments = MSharedStory.objects.filter(has_comments=True, **params).only('user_id')
shares = MSharedStory.objects.filter(**params).only('user_id')
self.comment_count = comments.count()
self.comment_user_ids = [c['user_id'] for c in comments]
self.share_count = shares.count()
self.share_user_ids = [s['user_id'] for s in shares]
self.save()
def fetch_original_text(self, force=False, request=None):
original_text_z = self.original_text_z
if not original_text_z or force:
ti = TextImporter(self, request=request)
original_text = ti.fetch()
else:
logging.user(request, "~FYFetching ~FGoriginal~FY story text, ~SBfound.")
original_text = zlib.decompress(original_text_z)
return original_text
class MStarredStory(mongo.Document):
"""Like MStory, but not inherited due to large overhead of _cls and _type in
mongoengine's inheritance model on every single row."""
user_id = mongo.IntField(unique_with=('story_guid',))
starred_date = mongo.DateTimeField()
story_feed_id = mongo.IntField()
story_date = mongo.DateTimeField()
story_title = mongo.StringField(max_length=1024)
story_content = mongo.StringField()
story_content_z = mongo.BinaryField()
story_original_content = mongo.StringField()
story_original_content_z = mongo.BinaryField()
original_text_z = mongo.BinaryField()
story_content_type = mongo.StringField(max_length=255)
story_author_name = mongo.StringField()
story_permalink = mongo.StringField()
story_guid = mongo.StringField()
story_hash = mongo.StringField()
story_tags = mongo.ListField(mongo.StringField(max_length=250))
meta = {
'collection': 'starred_stories',
'indexes': [('user_id', '-starred_date'), ('user_id', 'story_feed_id'), 'story_feed_id'],
'index_drop_dups': True,
'ordering': ['-starred_date'],
'allow_inheritance': False,
}
def save(self, *args, **kwargs):
if self.story_content:
self.story_content_z = zlib.compress(self.story_content)
self.story_content = None
if self.story_original_content:
self.story_original_content_z = zlib.compress(self.story_original_content)
self.story_original_content = None
self.story_hash = self.feed_guid_hash
super(MStarredStory, self).save(*args, **kwargs)
# self.index_for_search()
def index_for_search(self):
story_content = zlib.decompress(self.story_content_z)
SearchStarredStory.index(user_id=self.user_id,
story_id=self.story_guid,
story_title=self.story_title,
story_content=story_content,
story_author=self.story_author_name,
story_date=self.story_date,
db_id=str(self.id))
@property
def guid_hash(self):
return hashlib.sha1(self.story_guid).hexdigest()[:6]
@property
def feed_guid_hash(self):
return "%s:%s" % (self.story_feed_id or "0", self.guid_hash)
def fetch_original_text(self, force=False, request=None):
original_text_z = self.original_text_z
if not original_text_z or force:
ti = TextImporter(self, request=request)
original_text = ti.fetch()
else:
logging.user(request, "~FYFetching ~FGoriginal~FY story text, ~SBfound.")
original_text = zlib.decompress(original_text_z)
return original_text
class MFetchHistory(mongo.Document):
feed_id = mongo.IntField(unique=True)
feed_fetch_history = mongo.DynamicField()
page_fetch_history = mongo.DynamicField()
push_history = mongo.DynamicField()
meta = {
'collection': 'fetch_history',
'allow_inheritance': False,
}
@classmethod
def feed(cls, feed_id, timezone=None):
fetch_history, _ = cls.objects.get_or_create(
feed_id=feed_id,
read_preference=pymongo.ReadPreference.PRIMARY
)
history = {}
for fetch_type in ['feed_fetch_history', 'page_fetch_history', 'push_history']:
history[fetch_type] = getattr(fetch_history, fetch_type)
if not history[fetch_type]:
history[fetch_type] = []
for f, fetch in enumerate(history[fetch_type]):
date_key = 'push_date' if fetch_type == 'push_history' else 'fetch_date'
history[fetch_type][f] = {
date_key: localtime_for_timezone(fetch[0],
timezone).strftime("%Y-%m-%d %H:%M:%S"),
'status_code': fetch[1],
'message': fetch[2]
}
return history
@classmethod
def add(cls, feed_id, fetch_type, date=None, message=None, code=None, exception=None):
if not date:
date = datetime.datetime.now()
fetch_history, _ = cls.objects.get_or_create(
feed_id=feed_id,
read_preference=pymongo.ReadPreference.PRIMARY
)
if fetch_type == 'feed':
history = fetch_history.feed_fetch_history or []
elif fetch_type == 'page':
history = fetch_history.page_fetch_history or []
elif fetch_type == 'push':
history = fetch_history.push_history or []
history.insert(0, (date, code, message))
history = history[:5]
if fetch_type == 'feed':
fetch_history.feed_fetch_history = history
elif fetch_type == 'page':
fetch_history.page_fetch_history = history
elif fetch_type == 'push':
fetch_history.push_history = history
fetch_history.save()
if exception:
MFetchExceptionHistory.add(feed_id, date=date, code=code,
message=message, exception=exception)
if fetch_type == 'feed':
RStats.add('feed_fetch')
class MFetchExceptionHistory(mongo.Document):
feed_id = mongo.IntField(unique=True)
date = mongo.DateTimeField()
code = mongo.IntField()
message = mongo.StringField()
exception = mongo.StringField()
meta = {
'collection': 'fetch_exception_history',
'allow_inheritance': False,
}
@classmethod
def add(cls, feed_id, date=None, code=None, message="", exception=""):
if not date:
date = datetime.datetime.now()
if not isinstance(exception, basestring):
exception = unicode(exception)
fetch_exception, _ = cls.objects.get_or_create(
feed_id=feed_id,
read_preference=pymongo.ReadPreference.PRIMARY
)
fetch_exception.date = date
fetch_exception.code = code
fetch_exception.message = message
fetch_exception.exception = exception
fetch_exception.save()
class DuplicateFeed(models.Model):
duplicate_address = models.CharField(max_length=764, db_index=True)
duplicate_link = models.CharField(max_length=764, null=True, db_index=True)
duplicate_feed_id = models.CharField(max_length=255, null=True, db_index=True)
feed = models.ForeignKey(Feed, related_name='duplicate_addresses')
def __unicode__(self):
return "%s: %s / %s" % (self.feed, self.duplicate_address, self.duplicate_link)
def to_json(self):
return {
'duplicate_address': self.duplicate_address,
'duplicate_link': self.duplicate_link,
'duplicate_feed_id': self.duplicate_feed_id,
'feed_id': self.feed_id
}
def save(self, *args, **kwargs):
max_address = DuplicateFeed._meta.get_field('duplicate_address').max_length
if len(self.duplicate_address) > max_address:
self.duplicate_address = self.duplicate_address[:max_address]
max_link = DuplicateFeed._meta.get_field('duplicate_link').max_length
if self.duplicate_link and len(self.duplicate_link) > max_link:
self.duplicate_link = self.duplicate_link[:max_link]
super(DuplicateFeed, self).save(*args, **kwargs)
def merge_feeds(original_feed_id, duplicate_feed_id, force=False):
from apps.reader.models import UserSubscription
from apps.social.models import MSharedStory
if original_feed_id == duplicate_feed_id:
logging.info(" ***> Merging the same feed. Ignoring...")
return original_feed_id
try:
original_feed = Feed.objects.get(pk=original_feed_id)
duplicate_feed = Feed.objects.get(pk=duplicate_feed_id)
except Feed.DoesNotExist:
logging.info(" ***> Already deleted feed: %s" % duplicate_feed_id)
return original_feed_id
heavier_dupe = original_feed.num_subscribers < duplicate_feed.num_subscribers
branched_original = original_feed.branch_from_feed
if (heavier_dupe or branched_original) and not force:
original_feed, duplicate_feed = duplicate_feed, original_feed
original_feed_id, duplicate_feed_id = duplicate_feed_id, original_feed_id
if branched_original:
original_feed.feed_address = duplicate_feed.feed_address
logging.info(" ---> Feed: [%s - %s] %s - %s" % (original_feed_id, duplicate_feed_id,
original_feed, original_feed.feed_link))
logging.info(" Orig ++> %s: (%s subs) %s / %s %s" % (original_feed.pk,
original_feed.num_subscribers,
original_feed.feed_address,
original_feed.feed_link,
" [B: %s]" % original_feed.branch_from_feed.pk if original_feed.branch_from_feed else ""))
logging.info(" Dupe --> %s: (%s subs) %s / %s %s" % (duplicate_feed.pk,
duplicate_feed.num_subscribers,
duplicate_feed.feed_address,
duplicate_feed.feed_link,
" [B: %s]" % duplicate_feed.branch_from_feed.pk if duplicate_feed.branch_from_feed else ""))
original_feed.branch_from_feed = None
user_subs = UserSubscription.objects.filter(feed=duplicate_feed).order_by('-pk')
for user_sub in user_subs:
user_sub.switch_feed(original_feed, duplicate_feed)
def delete_story_feed(model, feed_field='feed_id'):
duplicate_stories = model.objects(**{feed_field: duplicate_feed.pk})
# if duplicate_stories.count():
# logging.info(" ---> Deleting %s %s" % (duplicate_stories.count(), model))
duplicate_stories.delete()
delete_story_feed(MStory, 'story_feed_id')
delete_story_feed(MFeedPage, 'feed_id')
try:
DuplicateFeed.objects.create(
duplicate_address=duplicate_feed.feed_address,
duplicate_link=duplicate_feed.feed_link,
duplicate_feed_id=duplicate_feed.pk,
feed=original_feed
)
except (IntegrityError, OperationError), e:
logging.info(" ***> Could not save DuplicateFeed: %s" % e)
# Switch this dupe feed's dupe feeds over to the new original.
duplicate_feeds_duplicate_feeds = DuplicateFeed.objects.filter(feed=duplicate_feed)
for dupe_feed in duplicate_feeds_duplicate_feeds:
dupe_feed.feed = original_feed
dupe_feed.duplicate_feed_id = duplicate_feed.pk
dupe_feed.save()
logging.debug(' ---> Dupe subscribers (%s): %s, Original subscribers (%s): %s' %
(duplicate_feed.pk, duplicate_feed.num_subscribers,
original_feed.pk, original_feed.num_subscribers))
if duplicate_feed.pk != original_feed.pk:
duplicate_feed.delete()
else:
logging.debug(" ***> Duplicate feed is the same as original feed. Panic!")
logging.debug(' ---> Deleted duplicate feed: %s/%s' % (duplicate_feed, duplicate_feed_id))
original_feed.branch_from_feed = None
original_feed.count_subscribers()
original_feed.save()
logging.debug(' ---> Now original subscribers: %s' %
(original_feed.num_subscribers))
MSharedStory.switch_feed(original_feed_id, duplicate_feed_id)
return original_feed_id
def rewrite_folders(folders, original_feed, duplicate_feed):
new_folders = []
for k, folder in enumerate(folders):
if isinstance(folder, int):
if folder == duplicate_feed.pk:
# logging.info(" ===> Rewrote %s'th item: %s" % (k+1, folders))
new_folders.append(original_feed.pk)
else:
new_folders.append(folder)
elif isinstance(folder, dict):
for f_k, f_v in folder.items():
new_folders.append({f_k: rewrite_folders(f_v, original_feed, duplicate_feed)})
return new_folders
``` |
{
"source": "josephmisiti/sympy",
"score": 3
} |
#### File: combinatorial/tests/test_comb_numbers.py
```python
from sympy import bernoulli, Symbol, Sum, harmonic, Rational, oo, zoo, pi, I, bell, \
fibonacci, lucas, euler, catalan, binomial, gamma, sqrt, hyper, log, polygamma, diff
x = Symbol('x')
def test_bernoulli():
assert bernoulli(0) == 1
assert bernoulli(1) == Rational(-1,2)
assert bernoulli(2) == Rational(1,6)
assert bernoulli(3) == 0
assert bernoulli(4) == Rational(-1,30)
assert bernoulli(5) == 0
assert bernoulli(6) == Rational(1,42)
assert bernoulli(7) == 0
assert bernoulli(8) == Rational(-1,30)
assert bernoulli(10) == Rational(5,66)
assert bernoulli(1000001) == 0
assert bernoulli(0, x) == 1
assert bernoulli(1, x) == x-Rational(1,2)
assert bernoulli(2, x) == x**2-x+Rational(1,6)
assert bernoulli(3, x) == x**3 - (3*x**2)/2 + x/2
# Should be fast; computed with mpmath
b = bernoulli(1000)
assert b.p % 10**10 == 7950421099
assert b.q == 342999030
b = bernoulli(10**6, evaluate=False).evalf()
assert str(b) == '-2.23799235765713e+4767529'
def test_fibonacci():
assert [fibonacci(n) for n in range(-3, 5)] == [2, -1, 1, 0, 1, 1, 2, 3]
assert fibonacci(100) == 354224848179261915075
assert [lucas(n) for n in range(-3, 5)] == [-4, 3, -1, 2, 1, 3, 4, 7]
assert lucas(100) == 792070839848372253127
assert fibonacci(1, x) == 1
assert fibonacci(2, x) == x
assert fibonacci(3, x) == x**2 + 1
assert fibonacci(4, x) == x**3 + 2*x
def test_bell():
assert [bell(n) for n in range(8)] == [1, 1, 2, 5, 15, 52, 203, 877]
assert bell(0, x) == 1
assert bell(1, x) == x
assert bell(2, x) == x**2 + x
assert bell(5, x) == x**5 + 10*x**4 + 25*x**3 + 15*x**2 + x
def test_harmonic():
assert harmonic(1,1) == 1
assert harmonic(2,1) == Rational(3,2)
assert harmonic(3,1) == Rational(11,6)
assert harmonic(4,1) == Rational(25,12)
# assert harmonic(3,1) == harmonic(3)
assert harmonic(3,5) == 1 + Rational(1,2**5) + Rational(1,3**5)
assert harmonic(10,0) == 10
assert harmonic(oo,1) == zoo
assert harmonic(oo,2) == (pi**2)/6
def test_euler():
assert euler(0) == 1
assert euler(1) == 0
assert euler(2) == -1
assert euler(3) == 0
assert euler(4) == 5
assert euler(6) == -61
assert euler(8) == 1385
assert euler(20, evaluate=False) != 370371188237525
n = Symbol('n', integer=True)
assert euler(n) != -1
assert euler(n).subs(n, 2) == -1
assert euler(20).evalf() == 370371188237525.0
assert euler(20, evaluate=False).evalf() == 370371188237525.0
assert euler(n).rewrite(Sum) == euler(n)
#assert euler(2*n).rewrite(Sum) == I*Sum(Sum((-1)**_j*2**(-_k)*I**(-_k)*(-2*_j + _k)**(2*n + 1)*binomial(_k, _j)/_k, (_j, 0, _k)), (_k, 1, 2*n + 1))
assert euler(2*n+1).rewrite(Sum) == 0
def test_catalan():
assert catalan(1) == 1
assert catalan(2) == 2
assert catalan(3) == 5
assert catalan(4) == 14
# assert catalan(x) == catalan(x)
assert catalan(2*x).rewrite(binomial) == binomial(4*x, 2*x)/(2*x + 1)
assert catalan(Rational(1,2)).rewrite(gamma) == 8/(3*pi)
assert catalan(3*x).rewrite(gamma) == 4**(3*x)*gamma(3*x + Rational(1,2))/(sqrt(pi)*gamma(3*x + 2))
assert catalan(x).rewrite(hyper) == hyper((-x + 1, -x), (2,), 1)
assert diff(catalan(x),x) == (polygamma(0, x + Rational(1,2)) - polygamma(0, x + 2) + 2*log(2))*catalan(x)
c = catalan(0.5).evalf()
assert str(c) == '0.848826363156775'
``` |
{
"source": "josephmisiti/taskmaster",
"score": 2
} |
#### File: taskmaster/cli/master.py
```python
from taskmaster.util import parse_options
from taskmaster.constants import DEFAULT_LOG_LEVEL, DEFAULT_ADDRESS, \
DEFAULT_BUFFER_SIZE
def run(target, kwargs=None, reset=False, size=DEFAULT_BUFFER_SIZE, address=DEFAULT_ADDRESS, log_level=DEFAULT_LOG_LEVEL):
from taskmaster.server import Server, Controller
server = Server(address, size=size, log_level=log_level)
controller = Controller(server, target, kwargs=kwargs, log_level=log_level)
if reset:
controller.reset()
controller.start()
def main():
import optparse
import sys
parser = optparse.OptionParser()
parser.add_option("--address", dest="address", default=DEFAULT_ADDRESS)
parser.add_option("--size", dest="size", default=DEFAULT_BUFFER_SIZE, type=int)
parser.add_option("--reset", dest="reset", default=False, action='store_true')
parser.add_option("--log-level", dest="log_level", default=DEFAULT_LOG_LEVEL)
(options, args) = parser.parse_args()
if len(args) < 1:
print 'Usage: tm-master <callback> [key=value, key2=value2]'
sys.exit(1)
sys.exit(run(args[0], parse_options(args[1:]), **options.__dict__))
if __name__ == '__main__':
main()
``` |
{
"source": "josephmje/datman",
"score": 3
} |
#### File: datman/bin/archive_manifest.py
```python
from docopt import docopt
import pandas as pd
import datman
import datman.utils
default_headers = [
'StudyDescription',
'StudyID',
'PatientName',
'SeriesNumber',
'SeriesDescription']
def main():
arguments = docopt(__doc__)
if arguments['--showheaders']:
for archive in arguments['<archive>']:
manifest = datman.utils.get_archive_headers(archive,
stop_after_first=False)
filepath, headers = list(manifest.items())[0]
print(",".join([archive, filepath]))
print("\t" + "\n\t".join(headers.dir()))
return
headers = (arguments['--headers'] and arguments['--headers'].split(',')) \
or default_headers[:]
headers.insert(0, "Path")
rows = []
for archive in arguments["<archive>"]:
manifest = datman.utils.get_archive_headers(archive)
sortedseries = sorted(manifest.items(),
key=lambda x: x[1].get('SeriesNumber'))
for path, dataset in sortedseries:
row = {header: dataset.get(header, "") for header in headers}
row['Path'] = path
rows.append(row)
if arguments['--oneseries']:
break
data = pd.DataFrame(rows)
print(data.to_csv(index=False))
if __name__ == "__main__":
main()
```
#### File: datman/bin/dm_symlink_scans.py
```python
import os
import sys
import fnmatch
import logging
from docopt import docopt
import datman.config
import datman.utils
import datman.scanid
# set up logging
logger = logging.getLogger(__name__)
formatter = logging.Formatter('%(asctime)s - %(name)s - '
'%(levelname)s - %(message)s')
log_handler = logging.StreamHandler(sys.stdout)
log_handler.setFormatter(formatter)
logger.addHandler(log_handler)
def find_files(directory):
for root, dirs, files in os.walk(directory):
for extension in ['*.nii.gz', '*.bvec', '*.bval']:
for basename in files:
if fnmatch.fnmatch(basename, extension):
filename = os.path.join(root, basename)
yield filename
def create_symlink(src, target_name, dest):
datman.utils.define_folder(dest)
target_path = os.path.join(dest, target_name)
if os.path.isfile(target_path):
logger.warn('{} already exists. Not linking.'.format(target_path))
return
with datman.utils.cd(dest):
rel_path = os.path.relpath(src, dest)
logger.info('Linking {} -> {}'.format(rel_path, target_path))
try:
os.symlink(rel_path, target_path)
except OSError:
logger.error('Unable to link to {}'.format(rel_path))
def force_json_name(json_filename, sub_dir):
'''
dcm2niix adds a suffix if a nifti file already exists even though you
just want the .json sidecar. Force name to match what is expected
'''
json_base = json_filename.split('.')[0]
candidate = [f for f in os.listdir(sub_dir)
if (json_base in f) and ('.json' in f)][0]
if candidate != json_base:
logger.warning('dcm2niix added suffix!\nShould be {}\n'
'Found {}'.format(json_filename, candidate))
src = os.path.join(sub_dir, candidate)
dst = os.path.join(sub_dir, json_filename)
os.rename(src, dst)
def create_json_sidecar(scan_filename, session_nii_dir, session_dcm_dir):
json_filename = os.path.splitext(scan_filename)[0] + '.json'
if os.path.isfile(os.path.join(session_nii_dir, json_filename)):
logger.warn('JSON sidecar {} already exists. '
'Not creating.'.format(json_filename))
return
logger.info('Creating JSON sidecar {}'.format(json_filename))
try:
# dcm2niix creates json using single dicom in dcm directory
datman.utils.run('dcm2niix -b o -s y -f {} -o {} {}'
.format(os.path.splitext(scan_filename)[0],
session_nii_dir,
os.path.join(session_dcm_dir, scan_filename)))
force_json_name(json_filename, session_nii_dir)
except Exception:
logger.error('Unable to create JSON sidecar {}'.format(json_filename))
def get_series(file_name):
# need better way to get series number from nifti
return int(os.path.basename(file_name).split("_")[1][1:])
def is_blacklisted(resource_file, session):
blacklist = datman.utils.read_blacklist(subject=session)
if not blacklist:
return False
series = get_series(resource_file)
for entry in blacklist:
bl_series = int(datman.scanid.parse_filename(entry)[2])
if series == bl_series:
return True
return False
def main():
arguments = docopt(__doc__)
study = arguments['<study>']
site = arguments['--site']
session = arguments['--session']
create_json = arguments['--json']
quiet = arguments['--quiet']
verbose = arguments['--verbose']
debug = arguments['--debug']
# setup log levels
log_level = logging.WARN
if quiet:
log_level = logging.ERROR
if verbose:
log_level = logging.INFO
if debug:
log_level = logging.DEBUG
logger.setLevel(log_level)
log_handler.setLevel(log_level)
# setup the config object
cfg = datman.config.config(study=study)
# get paths
dir_nii = cfg.get_path('nii')
dir_res = cfg.get_path('resources')
dir_dcm = cfg.get_path('dcm')
# get sessions depending on which command line argument was specified
if site:
sessions = [subject for subject in os.listdir(dir_res)
if datman.scanid.parse(subject).site == site]
elif session:
sessions = session
else:
sessions = os.listdir(dir_res)
logger.info('Processing {} sessions'.format(len(sessions)))
for session in sessions:
try:
ident = datman.scanid.parse(session)
except datman.scanid.ParseException:
logger.error('Invalid session: {}'.format(session))
continue
# get all files of interest stored in the session directory within
# RESOURCES
session_res_dir = os.path.join(dir_res, session)
# extensions = ('**/*.nii.gz', '**/*.bvec', '**/*.bval')
session_res_files = []
# temporarily commment out since glob in python 2 can't recurse
# for extension in extensions:
# session_res_files.extend(
# glob(os.path.join(session_res_dir, extension),
# recursive=True)
# )
for filename in find_files(session_res_dir):
session_res_files.append(filename)
session_name = ident.get_full_subjectid_with_timepoint()
session_nii_dir = os.path.join(dir_nii, session_name)
session_dcm_dir = os.path.join(dir_dcm, session_name)
if session_res_files:
# check whether nifti directory exists, otherwise create it
datman.utils.define_folder(session_nii_dir)
# create dictionary with DICOM series numbers as keys and
# filenames as values
session_dcm_files = os.listdir(session_dcm_dir)
dcm_dict = {int(datman.scanid.parse_filename(dcm)[2]):
dcm for dcm in session_dcm_files}
for f in session_res_files:
series_num = get_series(f)
# try to get new nifti filename by matching series number
# in dictionary
try:
scan_filename = os.path.splitext(dcm_dict[series_num])[0]
except (IndexError, KeyError):
if is_blacklisted(f, session_name):
logger.info('Ignored blacklisted series {}'.format(f))
continue
logger.error('Corresponding dcm file not found for {}'
.format(f))
continue
ext = datman.utils.get_extension(f)
nii_name = scan_filename + ext
if create_json and nii_name.endswith('.nii.gz'):
create_json_sidecar(dcm_dict[series_num],
session_nii_dir,
session_dcm_dir)
create_symlink(f, nii_name, session_nii_dir)
if __name__ == '__main__':
main()
```
#### File: datman/bin/dm_update_standards.py
```python
import os
from docopt import docopt
import datman.scanid
import datman.scan
import datman.dashboard
import datman.config
from dashboard.exceptions import InvalidDataException
import logging
logging.basicConfig(level=logging.INFO, format="[%(name)s] %(levelname)s: \
%(message)s")
logger = logging.getLogger(os.path.basename(__file__))
def main():
arguments = docopt(__doc__)
study = arguments["<study>"]
debug = arguments["--debug"]
if debug:
logger.info("Logging in DEBUG mode")
logger.setLevel(logging.DEBUG)
# Get standards and database study
cfg = datman.config.config(study=study)
standards_path = cfg.get_path("std")
standards = [f for f in os.listdir(standards_path) if ".json" in f]
db_study = datman.dashboard.get_project(name=study)
# Add standards to database
for s in standards:
try:
db_study.add_gold_standard(os.path.join(standards_path, s))
except InvalidDataException as e:
logger.error("Standard {} already exists in the Dashboard!"
.format(s))
logger.debug("Returned error: {}".format(e))
continue
else:
logger.info("Successfully added {} to gold_standards".format(s))
if __name__ == "__main__":
main()
```
#### File: datman/bin/purge_session.py
```python
import os
import subprocess
import logging
from docopt import docopt
import datman.config
import datman.dashboard
logger = logging.getLogger(__file__)
CFG = datman.config.config()
def main():
arguments = docopt(__doc__)
study = arguments['<study>']
session = arguments['<session>']
backupdir = arguments['--backupdir']
purgedb = arguments['--purgedb']
CFG.set_study(study)
base_dir = CFG.get_study_base()
logger.info('Searching folders:{}'.format(base_dir))
# Create the backup folder
outdir = os.path.realpath(os.path.join(backupdir, session))
try:
os.makedirs(outdir)
except OSError:
logger.error('Failed making backup directory:{}'.format(outdir))
return
if not purge_filesystem(session, base_dir, outdir):
# somethings gone wrong. End processing here.
return
if purgedb:
try:
db = datman.dashboard.dashboard(study)
db.delete_session(session)
except Exception:
return
def purge_filesystem(session, base_dir, out_dir):
"""
session - session name to remove
base_dir - root of the filesystem to search from
out_dir - directory to create the backup
"""
cmd_rsync = ['rsync',
'-rmz',
'--remove-source-files',
'--include={}*/**'.format(session),
'--include=*/',
'--exclude=*',
base_dir,
out_dir]
if not run_cmd(cmd_rsync):
logger.error('Backup of session:{} failed'.format(session))
return
cmd_find = ['find',
base_dir,
'-depth',
'-type',
'd',
'-empty',
'-name',
'{}*'.format(session),
'-delete']
if not run_cmd(cmd_find):
logger.error('Cleanup of session:{} failed'.format(session))
return
return True
def run_cmd(cmd):
"""Runs a command, logs stuff if fails,
returns True on success"""
try:
subprocess.check_call(cmd)
except subprocess.CalledProcessError as e:
logger.info('Cmd:{}'.format(e.cmd))
logger.info('Status:{}'.format(e.returncode))
if(e.output):
logger.info('Output:{}'.format(e.output))
return
return True
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
main()
```
#### File: datman/tests/test_datman_scanid.py
```python
import datman.scanid as scanid
import pytest
def test_parse_empty():
with pytest.raises(scanid.ParseException):
scanid.parse("")
def test_parse_None():
with pytest.raises(scanid.ParseException):
scanid.parse(None)
def test_parse_garbage():
with pytest.raises(scanid.ParseException):
scanid.parse("lkjlksjdf")
def test_parse_good_datman_scanid():
ident = scanid.parse("DTI_CMH_H001_01_02")
assert ident.study == "DTI"
assert ident.site == "CMH"
assert ident.subject == "H001"
assert ident.timepoint == "01"
assert ident.session == "02"
def test_parse_good_datman_PHA_scanid():
ident = scanid.parse("DTI_CMH_PHA_ADN0001")
assert ident.study == "DTI"
assert ident.site == "CMH"
assert ident.subject == "PHA_ADN0001"
assert ident.timepoint == ""
assert ident.session == ""
assert str(ident) == "DTI_CMH_PHA_ADN0001"
def test_parse_good_date_based_datman_pha_scanid():
ident = scanid.parse("OPT01_UTO_PHA_FBN190603")
assert ident.study == "OPT01"
assert ident.site == "UTO"
assert ident.subject == "PHA_FBN190603"
assert ident.timepoint == ""
assert str(ident) == "OPT01_UTO_PHA_FBN190603"
def test_parse_good_kcni_scanid():
ident = scanid.parse("ABC01_CMH_12345678_01_SE02_MR")
assert ident.study == 'ABC01'
assert ident.site == 'CMH'
assert ident.subject == '12345678'
assert ident.timepoint == '01'
assert ident.session == '02'
def test_parse_good_kcni_PHA_scanid():
ident = scanid.parse("ABC01_CMH_LEGPHA_0001_MR")
assert ident.study == 'ABC01'
assert ident.site == 'CMH'
assert ident.subject == 'PHA_LEG0001'
def test_parses_datman_subject_id_as_datman_identifier():
dm_subject = "DTI01_CMH_H001_01_02"
ident = scanid.parse(dm_subject)
assert isinstance(ident, scanid.DatmanIdentifier)
def test_parses_datman_pha_id_as_datman_identifier():
dm_pha = "DTI01_CMH_PHA_FBN0001"
ident = scanid.parse(dm_pha)
assert isinstance(ident, scanid.DatmanIdentifier)
def test_parses_kcni_subject_id_as_kcni_identifier():
kcni_subject = "DTI01_CMH_H001_01_SE02_MR"
ident = scanid.parse(kcni_subject)
assert isinstance(ident, scanid.KCNIIdentifier)
def test_parses_kcni_pha_id_as_kcni_identifier():
kcni_pha = "DTI01_CMH_ABCPHA_0001_MR"
ident = scanid.parse(kcni_pha)
assert isinstance(ident, scanid.KCNIIdentifier)
def test_parse_exception_when_kcni_subject_id_modality_missing():
with pytest.raises(scanid.ParseException):
scanid.parse("DTI01_CMH_H001_01_SE02")
def test_parse_exception_when_kcni_pha_id_modality_missing():
with pytest.raises(scanid.ParseException):
scanid.parse("DTI01_CMH_ABCPHA_0001")
def test_parse_exception_when_kcni_session_malformed():
with pytest.raises(scanid.ParseException):
scanid.parse("DTI01_CMH_H001_01_02_MR")
def test_user_settings_id_type_respected():
# Datman IDs should be rejected if user says to parse only KCNI IDs
with pytest.raises(scanid.ParseException):
scanid.parse("DTI01_CMH_H001_01_02", settings={'ID_TYPE': 'KCNI'})
# KCNI IDs should be rejected if the user says to parse only Datman IDs
with pytest.raises(scanid.ParseException):
scanid.parse("DTI01_CMH_H001_01_SE02_MR",
settings={'ID_TYPE': 'DATMAN'})
def test_kcni_study_field_is_modified_when_settings_given():
settings = {
'STUDY': {
'DTI01': 'DTI'
}
}
kcni_id = "DTI01_CMH_H001_01_SE02_MR"
ident = scanid.parse(kcni_id, settings=settings)
assert ident.study == 'DTI'
assert str(ident) == "DTI_CMH_H001_01_02"
def test_kcni_site_field_is_modified_when_settings_given():
settings = {
'SITE': {
'UTO': 'UT2'
}
}
kcni_id = 'ABC01_UTO_12345678_01_SE02_MR'
ident = scanid.parse(kcni_id, settings=settings)
assert ident.site == 'UT2'
assert str(ident) == 'ABC01_UT2_12345678_01_02'
def test_kcni_subid_field_is_modified_when_settings_given():
settings = {
'SUBJECT': {
'^(100001|100002)->H\\1': '^H([0-9]+)->\\1'
}
}
kcni_id = 'PAC01_CMH_100001_01_SE01_MR'
ident = scanid.parse(kcni_id, settings=settings)
assert ident.subject == 'H100001'
kcni_id = 'PAC01_CMH_100004_01_SE03_MR'
ident = scanid.parse(kcni_id, settings=settings)
assert ident.subject == '100004'
def test_get_kcni_identifier_from_datman_str():
kcni_ident = scanid.get_kcni_identifier("ABC01_UTO_12345678_01_02")
assert isinstance(kcni_ident, scanid.KCNIIdentifier)
assert kcni_ident.orig_id == "ABC01_UTO_12345678_01_SE02_MR"
def test_get_kcni_identifier_from_datman_pha_str():
kcni_ident = scanid.get_kcni_identifier("ABC01_CMH_PHA_FBN0001")
assert isinstance(kcni_ident, scanid.KCNIIdentifier)
assert kcni_ident.orig_id == "ABC01_CMH_FBNPHA_0001_MR"
def test_get_kcni_identifier_from_datman_date_based_pha_str():
kcni_ident = scanid.get_kcni_identifier("OPT01_UTO_PHA_FBN190603")
assert isinstance(kcni_ident, scanid.KCNIIdentifier)
assert kcni_ident.orig_id == "OPT01_UTO_FBNPHA_190603_MR"
def test_get_kcni_identifier_from_datman_ident():
ident = scanid.parse("SPN01_CMH_0001_01_01")
kcni_ident = scanid.get_kcni_identifier(ident)
assert isinstance(kcni_ident, scanid.KCNIIdentifier)
assert kcni_ident.orig_id == "SPN01_CMH_0001_01_SE01_MR"
def test_get_kcni_identifier_from_datman_pha_ident():
dm_ident = scanid.parse("OPT01_UTO_PHA_ADN0001")
kcni_ident = scanid.get_kcni_identifier(dm_ident)
assert isinstance(kcni_ident, scanid.KCNIIdentifier)
assert kcni_ident.orig_id == "OPT01_UTO_ADNPHA_0001_MR"
def test_get_kcni_identifier_from_datman_with_field_changes():
settings = {
"STUDY": {
"AND01": "ANDT"
},
"SITE": {
"UTO": "CMH"
}
}
kcni = scanid.get_kcni_identifier("ANDT_CMH_0001_01_01", settings)
assert kcni.study == "ANDT"
assert kcni.site == "CMH"
assert kcni.orig_id == "AND01_UTO_0001_01_SE01_MR"
kcni_pha = scanid.get_kcni_identifier("ANDT_CMH_PHA_FBN0023", settings)
assert kcni_pha.study == "ANDT"
assert kcni_pha.site == "CMH"
assert kcni_pha.orig_id == "AND01_UTO_FBNPHA_0023_MR"
def test_get_kcni_identifier_correctly_reverses_subject_field_changes():
settings = {
'SUBJECT': {
'(100001|100002)->H\\1': '^H([0-9]+)->\\1'
}
}
kcni = scanid.get_kcni_identifier("ABC01_CMH_H100001_01_01", settings)
assert kcni.subject == "H100001"
assert kcni.orig_id == "ABC01_CMH_100001_01_SE01_MR"
kcni = scanid.get_kcni_identifier("ABC01_CMH_PHA_FBN201013", settings)
assert kcni.subject == "PHA_FBN201013"
assert kcni.orig_id == "ABC01_CMH_FBNPHA_201013_MR"
def test_get_kcni_identifier_handles_already_kcni():
kcni = "ABC01_UTO_12345678_01_SE02_MR"
kcni_ident = scanid.parse(kcni)
kcni1 = scanid.get_kcni_identifier(kcni)
assert isinstance(kcni1, scanid.KCNIIdentifier)
assert kcni1.orig_id == kcni
kcni2 = scanid.get_kcni_identifier(kcni_ident)
assert isinstance(kcni2, scanid.KCNIIdentifier)
assert kcni2.orig_id == kcni
def test_datman_converted_to_kcni_and_back_is_unmodified():
orig_datman = 'SPN01_CMH_0001_01_01'
dm_ident = scanid.parse(orig_datman)
kcni = scanid.get_kcni_identifier(dm_ident)
assert isinstance(kcni, scanid.KCNIIdentifier)
new_datman = scanid.parse(str(kcni))
assert str(new_datman) == orig_datman
def test_kcni_converted_to_datman_and_back_is_unmodified():
orig_kcni = 'SPN01_CMH_0001_01_SE01_MR'
kcni_ident = scanid.parse(orig_kcni)
datman = scanid.parse(str(kcni_ident))
assert isinstance(datman, scanid.DatmanIdentifier)
new_kcni = scanid.get_kcni_identifier(datman)
assert new_kcni.orig_id == orig_kcni
def test_id_field_changes_correct_for_repeat_conversions():
settings = {
'STUDY': {
'AND01': 'ANDT'
},
'SITE': {
'UTO': 'CMH'
},
'SUBJECT': {
'^(0001|0002)->H\\1': '^H([0-9]+)->\\1'
}
}
correct_kcni = "AND01_UTO_0001_01_SE01_MR"
correct_datman = "ANDT_CMH_H0001_01_01"
# KCNI to datman and back
kcni_ident = scanid.parse(correct_kcni, settings)
dm_ident = scanid.parse(str(kcni_ident), settings)
assert str(dm_ident) == correct_datman
new_kcni = scanid.get_kcni_identifier(dm_ident, settings)
assert new_kcni.orig_id == correct_kcni
# Datman to KCNI and back
dm_ident = scanid.parse(correct_datman, settings)
kcni_ident = scanid.get_kcni_identifier(dm_ident, settings)
assert kcni_ident.orig_id == correct_kcni
new_dm = scanid.parse(str(kcni_ident), settings)
assert str(new_dm) == correct_datman
def test_kcni_get_xnat_subject_id_not_affected_by_field_translation():
settings = {
"STUDY": {
"ABC01": "ABCD"
}
}
pha = "ABC01_CMH_LEGPHA_0001_MR"
pha_ident = scanid.parse(pha, settings)
assert pha_ident.get_xnat_subject_id() == "ABC01_CMH_LEGPHA"
sub = "ABC01_CMH_12345678_01_SE02_MR"
sub_ident = scanid.parse(sub, settings)
assert sub_ident.get_xnat_subject_id() == "ABC01_CMH_12345678"
def test_kcni_get_xnat_experiment_id_not_affected_by_field_translations():
settings = {
"STUDY": {
"ABC01": "ABCD"
}
}
pha = "ABC01_CMH_LEGPHA_0001_MR"
pha_ident = scanid.parse(pha, settings)
assert pha_ident.get_xnat_experiment_id() == pha
sub = "ABC01_CMH_12345678_01_SE02_MR"
sub_ident = scanid.parse(sub, settings)
assert sub_ident.get_xnat_experiment_id() == sub
def test_is_scanid_garbage():
assert not scanid.is_scanid("garbage")
def test_is_scanid_subjectid_only():
assert not scanid.is_scanid("DTI_CMH_H001")
def test_is_scanid_extra_fields():
assert scanid.is_scanid("DTI_CMH_H001_01_01_01_01_01_01") is False
def test_is_datman_scanid_good():
assert scanid.is_scanid("SPN01_CMH_0002_01_01")
def test_is_kcni_scanid_good():
assert scanid.is_scanid("SPN01_CMH_0001_01_SE01_MR")
def test_is_scanid_good_when_already_parsed():
parsed = scanid.parse("DTI_CMH_H001_01_01")
assert scanid.is_scanid(parsed)
def test_is_scanid_with_session_when_already_parsed():
parsed = scanid.parse("OPT01_UT2_UT10001_01_01")
assert scanid.is_scanid_with_session(parsed)
def test_get_full_subjectid():
ident = scanid.parse("DTI_CMH_H001_01_02")
assert ident.get_full_subjectid() == "DTI_CMH_H001"
def test_subject_id_with_timepoint():
ident = scanid.parse("DTI_CMH_H001_01_02")
assert ident.get_full_subjectid_with_timepoint() == 'DTI_CMH_H001_01'
def test_PHA_timepoint():
ident = scanid.parse("DTI_CMH_PHA_ADN0001")
assert ident.get_full_subjectid_with_timepoint() == 'DTI_CMH_PHA_ADN0001'
def test_parse_filename():
ident, tag, series, description = scanid.parse_filename(
'DTI_CMH_H001_01_01_T1_03_description.nii.gz')
assert str(ident) == 'DTI_CMH_H001_01_01'
assert tag == 'T1'
assert series == '03'
assert description == 'description'
def test_parse_filename_parses_when_tag_contains_pha():
ident, tag, series, description = scanid.parse_filename(
"CLZ_CMP_0000_01_01_PHABCD_11_FieldMap-2mm")
assert str(ident) == "CLZ_CMP_0000_01_01"
assert tag == "PHABCD"
assert series == "11"
assert description == "FieldMap-2mm"
_, tag, _, _ = scanid.parse_filename(
"CLZ_CMP_0000_01_01_ABCPHA_11_FieldMap-2mm")
assert tag == "ABCPHA"
_, tag, _, _ = scanid.parse_filename(
"CLZ_CMP_0000_01_01_ABCPHADEF_11_FieldMap-2mm")
assert tag == "ABCPHADEF"
def test_parse_filename_parses_when_tag_contains_kcniish_MR_substring():
ident, tag, series, description = scanid.parse_filename(
"CLZ_CMP_0000_01_01_MRABC_11_FieldMap-2mm.nii.gz")
assert str(ident) == "CLZ_CMP_0000_01_01"
assert tag == "MRABC"
assert series == "11"
assert description == "FieldMap-2mm"
_, tag, _, _ = scanid.parse_filename(
"CLZ_CMP_0000_01_01_ABCMR_11_FieldMap-2mm")
assert tag == "ABCMR"
_, tag, _, _ = scanid.parse_filename(
"CLZ_CMP_0000_01_01_ABCMRDEF_11_FieldMap-2mm")
assert tag == "ABCMRDEF"
def test_parse_filename_parses_when_tag_contains_kcniish_SE_substring():
ident, tag, series, description = scanid.parse_filename(
"CLZ_CMP_0000_01_01_SEABC_11_FieldMap-2mm.nii.gz")
assert str(ident) == "CLZ_CMP_0000_01_01"
assert tag == "SEABC"
assert series == "11"
assert description == "FieldMap-2mm"
_, tag, _, _ = scanid.parse_filename(
"CLZ_CMP_0000_01_01_ABCSE_11_FieldMap-2mm")
assert tag == "ABCSE"
_, tag, _, _ = scanid.parse_filename(
"CLZ_CMP_0000_01_01_ABCSEDEF_11_FieldMap-2mm")
assert tag == "ABCSEDEF"
def test_parse_filename_PHA():
ident, tag, series, description = scanid.parse_filename(
'DTI_CMH_PHA_ADN0001_T1_02_description.nii.gz')
assert str(ident) == 'DTI_CMH_PHA_ADN0001'
assert tag == 'T1'
assert series == '02'
assert description == 'description'
def test_parse_filename_PHA_2():
ident, tag, series, description = scanid.parse_filename(
'SPN01_MRC_PHA_FBN0013_RST_04_EPI-3x3x4xTR2.nii.gz')
assert ident.study == 'SPN01'
assert ident.site == 'MRC'
assert ident.subject == 'PHA_FBN0013'
assert ident.timepoint == ''
assert ident.session == ''
assert str(ident) == 'SPN01_MRC_PHA_FBN0013'
assert tag == 'RST'
assert series == '04'
assert description == 'EPI-3x3x4xTR2'
def test_kcni_id_with_non_mr_modality_is_valid():
ident = scanid.parse("ABC01_CMH_0001_01_SE01_EEG")
assert ident.modality == "EEG"
def test_datman_ids_assigned_mr_modality():
ident = scanid.parse("ABC01_CMH_0001_01_01")
assert ident.modality == "MR"
ident = scanid.parse("ABC01_CMH_PHA_FBN0001")
assert ident.modality == "MR"
def test_parse_filename_with_path():
ident, tag, series, description = scanid.parse_filename(
'/data/DTI_CMH_H001_01_01_T1_02_description.nii.gz')
assert str(ident) == 'DTI_CMH_H001_01_01'
assert tag == 'T1'
assert series == '02'
assert description == 'description'
def test_parse_bids_filename():
ident = scanid.parse_bids_filename("sub-CMH0001_ses-01_run-1_T1w.nii.gz")
assert ident.subject == 'CMH0001'
assert ident.session == '01'
assert ident.run == '1'
assert ident.suffix == 'T1w'
def test_parse_bids_filename_with_full_path():
ident = scanid.parse_bids_filename(
"/some/folder/sub-CMH0001_ses-01_run-1_T1w.nii.gz")
assert ident.subject == 'CMH0001'
assert ident.session == '01'
assert ident.run == '1'
assert ident.suffix == 'T1w'
def test_parse_bids_filename_without_ext():
ident = scanid.parse_bids_filename(
"/some/folder/sub-CMH0001_ses-02_run-3_T1w")
assert ident.subject == 'CMH0001'
assert ident.session == '02'
assert ident.run == '3'
assert ident.suffix == 'T1w'
def test_parse_bids_filename_without_run():
scanid.parse_bids_filename("sub-CMH0001_ses-01_T1w.nii.gz")
def test_parse_bids_filename_missing_subject():
with pytest.raises(scanid.ParseException):
scanid.parse_bids_filename("ses-01_run-1_T1w")
def test_parse_bids_filename_malformed_subject():
with pytest.raises(scanid.ParseException):
scanid.parse_bids_filename("CMH0001_ses-01_run-1_T1w")
def test_parse_bids_filename_missing_session():
with pytest.raises(scanid.ParseException):
scanid.parse_bids_filename("sub-CMH0001_run-1_T1w")
def test_parse_bids_filename_malformed_session():
with pytest.raises(scanid.ParseException):
scanid.parse_bids_filename("sub-CMH0001_ses-_run-1_T1w")
def test_parse_bids_filename_missing_suffix():
with pytest.raises(scanid.ParseException):
scanid.parse_bids_filename("sub-CMH0001_ses-01_run-1.nii.gz")
def test_parse_bids_filename_missing_suffix_and_run():
with pytest.raises(scanid.ParseException):
scanid.parse_bids_filename("sub-CMH0001_ses-01.nii.gz")
def test_unknown_entity_does_not_get_set_as_suffix():
with pytest.raises(scanid.ParseException):
scanid.parse_bids_filename("sub-CMH_ses-01_new-FIELD_T1w.nii.gz")
def test_empty_entity_name_does_not_get_set_as_suffix():
with pytest.raises(scanid.ParseException):
scanid.parse_bids_filename("sub-CMH_ses-01_-FIELD_T1w.nii.gz")
def test_empty_entity_name_and_label_does_not_get_set_as_suffix():
with pytest.raises(scanid.ParseException):
scanid.parse_bids_filename("sub-CMH_ses-01_-_T1w.nii.gz")
def test_bids_file_raises_exception_when_wrong_entities_used_for_anat():
with pytest.raises(scanid.ParseException):
scanid.parse_bids_filename(
"sub-CMH0001_ses-01_ce-somefield_dir-somedir"
"_run-1_T1w.nii.gz")
def test_bids_file_raises_exception_when_wrong_entities_used_for_task():
with pytest.raises(scanid.ParseException):
scanid.parse_bids_filename("sub-CMH0001_ses-01_task-sometask_"
"ce-somefield_run-1_T1w.nii.gz")
def test_bids_file_raises_exception_when_wrong_entities_used_for_fmap():
with pytest.raises(scanid.ParseException):
scanid.parse_bids_filename("sub-CMH0001_ses-01_dir-somedir_"
"rec-somefield_run-1_T1w.nii.gz")
def test_optional_entities_dont_get_parsed_as_suffix():
optional_entities = "sub-CMH0001_ses-01_{}_T1w.nii.gz"
for entity in ['run', 'acq', 'ce', 'rec', 'echo', 'ce', 'mod', 'task']:
optional_field = '{}-11'.format(entity)
bids_name = optional_entities.format(optional_field)
parsed = scanid.parse_bids_filename(bids_name)
assert optional_field not in parsed.suffix
def test_bids_file_equals_string_of_itself():
bids_name = "sub-CMH0001_ses-01_run-1_T1w"
ident = scanid.parse_bids_filename(bids_name)
assert ident == bids_name
def test_bids_file_equals_string_of_itself_minus_run():
bids_name = "sub-CMH0001_ses-01_run-1_T1w"
ident = scanid.parse_bids_filename(bids_name)
assert ident == bids_name.replace("run-1_", "")
def test_bids_file_equals_itself_with_path_and_ext():
bids_name = "sub-CMH0001_ses-01_run-1_T1w"
bids_full_path = "/some/folder/somewhere/{}.nii.gz".format(bids_name)
ident = scanid.parse_bids_filename(bids_name)
assert ident == bids_full_path
def test_bids_file_correctly_parses_when_all_anat_entities_given():
anat_bids = "sub-CMH0001_ses-01_acq-abcd_ce-efgh_rec-ijkl_" + \
"run-1_mod-mnop_somesuffix"
parsed = scanid.parse_bids_filename(anat_bids)
assert str(parsed) == anat_bids
def test_bids_file_correctly_parses_when_all_task_entities_given():
task_bids = "sub-CMH0001_ses-01_task-abcd_acq-efgh_" + \
"rec-ijkl_run-1_echo-11_imi"
parsed = scanid.parse_bids_filename(task_bids)
assert str(parsed) == task_bids
def test_bids_file_correctly_parses_when_all_fmap_entities_given():
fmap_bids = "sub-CMH0001_ses-01_acq-abcd_dir-efgh_run-1_fmap"
parsed = scanid.parse_bids_filename(fmap_bids)
assert str(parsed) == fmap_bids
def test_bids_file_handles_prelapse_session_strings():
prelapse_file = "sub-BRG33006_ses-01R_run-1_something"
parsed = scanid.parse_bids_filename(prelapse_file)
assert str(parsed) == prelapse_file
```
#### File: datman/tests/test_dm_link_project_scans.py
```python
import unittest
import logging
import importlib
from mock import patch
import datman.scanid
# Disable logging for tests
logging.disable(logging.CRITICAL)
link_scans = importlib.import_module('bin.dm_link_project_scans')
class CopyChecklistEntry(unittest.TestCase):
source = datman.scanid.parse("STUDY_CMH_ID1_01_01")
target = datman.scanid.parse("STUDY2_CMH_ID2_01_01")
path = "./target_checklist.csv"
@patch('datman.utils.update_checklist')
@patch('datman.utils.read_checklist')
def test_does_nothing_when_target_checklist_has_entry(self,
mock_read,
mock_update):
mock_read.side_effect = lambda subject=None: \
{self.target: "signed_off",
self.source: "signed_off"}[subject]
link_scans.copy_checklist_entry(self.source, self.target, self.path)
assert mock_update.call_count == 0
@patch('datman.utils.update_checklist')
@patch('datman.utils.read_checklist')
def test_does_nothing_when_no_relevant_entries_in_source(self,
mock_read,
mock_update):
mock_read.return_value = None
link_scans.copy_checklist_entry(self.source, self.target, self.path)
assert mock_update.call_count == 0
@patch('datman.utils.update_checklist')
@patch('datman.utils.read_checklist')
def test_updates_with_correct_entry(self, mock_read, mock_update):
comment = "signed_off"
mock_read.side_effect = lambda subject=None: \
{self.source: comment,
self.target: None}[subject]
link_scans.copy_checklist_entry(self.source, self.target, self.path)
expected_entry = {self.target: comment}
assert mock_update.call_count == 1
mock_update.assert_called_once_with(expected_entry, path=self.path)
class TagsMatch(unittest.TestCase):
tags = ["T1", "PDT2", "DTI60"]
def test_doesnt_crash_with_empty_line(self):
entry = ""
tags_match = link_scans.tags_match(entry, self.tags)
assert tags_match is False
def test_returns_false_with_unparseable_entry(self):
entry = "BAD_ID_01_DTI60_15_Ax-DTI-60plus5 --corrupted-data"
tags_match = link_scans.tags_match(entry, self.tags)
assert tags_match is False
def test_returns_false_with_excluded_tag(self):
entry = "STUDY_SITE_ID_01_01_T2_num_description --corrupted-data"
tags_match = link_scans.tags_match(entry, self.tags)
assert tags_match is False
def test_returns_true_with_matching_tag(self):
entry = "STUDY_SITE_ID_01_01_DTI60_15_Ax-DTI-60plus5 --corrupted-data"
tags_match = link_scans.tags_match(entry, self.tags)
assert tags_match is True
class CopyBlacklistData(unittest.TestCase):
source = 'STUDY_SITE_ID1_01_01'
source_list = './fake_dir/blacklist1.csv'
target = 'STUDY2_SITE_ID2_01_01'
target_list = './fake_dir/blacklist2.csv'
tags = ['T1', 'DTI60']
@patch('datman.utils.update_blacklist')
@patch('datman.utils.read_blacklist')
def test_does_nothing_without_source_blacklist_entries_to_copy(
self,
mock_read,
mock_update):
mock_read.return_value = {}
link_scans.copy_blacklist_data(self.source, self.source_list,
self.target, self.target_list,
self.tags)
assert mock_update.call_count == 0
@patch('datman.utils.update_blacklist')
@patch('datman.utils.read_blacklist')
def test_does_nothing_if_all_entries_present_in_target_blacklist(
self,
mock_read,
mock_update):
post_fix = "_DTI60_05_Ax-DTI-60plus5"
def mock_blacklist(subject, path):
if path == self.target_list and subject == self.target:
return {self.target + post_fix: '--corrupted'}
if path == self.source_list and subject == self.source:
return {self.source + post_fix: '--corrupted'}
return {}
mock_read.side_effect = mock_blacklist
link_scans.copy_blacklist_data(self.source, self.source_list,
self.target, self.target_list,
self.tags)
assert mock_update.call_count == 0
@patch('datman.utils.update_blacklist')
@patch('datman.utils.read_blacklist')
def test_adds_missing_entries_with_matched_tags(self,
mock_read,
mock_update):
fname1 = "_DTI60_05_Ax-DTI-60plus5"
fname2 = "_T1_06_SagT1Bravo"
fname3 = "_PDT2_07_OblAx-T2DEfseXL"
comments = ["--corrupted", "", "--corrupted"]
def mock_blacklist(subject, path):
if path == self.target_list and subject == self.target:
return self._make_entries(self.target,
[fname1], comments)
if path == self.source_list and subject == self.source:
return self._make_entries(self.source,
[fname1, fname2, fname3],
comments)
return {}
mock_read.side_effect = mock_blacklist
link_scans.copy_blacklist_data(self.source, self.source_list,
self.target, self.target_list,
self.tags)
missing_entries = self._make_entries(self.target,
[fname2],
[""])
assert mock_update.call_count == 1
mock_update.assert_called_once_with(missing_entries,
path=self.target_list)
def _make_entries(self, subject, fnames, comments):
entries = {}
for num, fname in enumerate(fnames):
entries[subject + fname] = comments[num]
return entries
```
#### File: datman/tests/test_dm_link_shared_ids.py
```python
import importlib
import unittest
import copy
import logging
# Disable all logging output for tests
logging.disable(logging.CRITICAL)
link_shared = importlib.import_module('bin.dm_link_shared_ids')
class TestRecord(unittest.TestCase):
mock_redcap_record = {'par_id': 'STUDY_SITE_0001_01_01',
'record_id': 0,
'shared_parid_1': 'STUDY_SITE_0002_01_01',
'shared_parid_2': 'STUDY2_CMH_9999_01_01',
'shared_parid_8': 'OTHER_CMH_1234_01_01',
'cmts': 'No comment.'}
mock_kcni_record = {'par_id': 'STU01_ABC_0001_01_SE01_MR',
'record_id': 1,
'shared_parid_1': 'STU02_ABC_0002_01_SE01_MR',
'shared_parid_2': 'STUDY3_ABC_0003_01_SE01_MR',
'cmts': 'Test comment.'}
def test_ignores_records_with_bad_subject_id(self):
bad_redcap_record = {'par_id': 'STUDY_0001_01',
'record_id': 0,
'shared_parid_1': '',
'cmts': ''}
record = link_shared.Record(bad_redcap_record)
assert record.id is None
assert record.study is None
assert not record.matches_study('STUDY')
def test_ignores_badly_named_shared_ids(self):
bad_shared_id = copy.copy(self.mock_redcap_record)
bad_id = 'STUDY_0001_01'
bad_shared_id['shared_parid_4'] = bad_id
record = link_shared.Record(bad_shared_id)
assert bad_id not in record.shared_ids
def test_finds_all_shared_ids_in_record(self):
record = link_shared.Record(self.mock_redcap_record)
expected = [self.mock_redcap_record['shared_parid_1'],
self.mock_redcap_record['shared_parid_2'],
self.mock_redcap_record['shared_parid_8']]
assert sorted(record.shared_ids) == sorted(expected)
def test_correctly_handles_kcni_main_id(self):
id_map = {
'STUDY': {
'STU01': 'STUDY',
},
'SITE': {
'ABC': 'SITE'
}
}
record = link_shared.Record(self.mock_kcni_record, id_map)
assert str(record.id) == 'STUDY_SITE_0001_01_01'
def test_correctly_handles_kcni_shared_ids(self):
id_map = {
'STUDY': {
'STU02': 'STUDY2',
},
'SITE': {
'ABC': 'SITE'
}
}
record = link_shared.Record(self.mock_kcni_record, id_map)
assert 'STUDY2_SITE_0002_01_01' in record.shared_ids
``` |
{
"source": "josephmje/niworkflows",
"score": 2
} |
#### File: niworkflows/interfaces/nibabel.py
```python
import numpy as np
import nibabel as nb
from nipype import logging
from nipype.utils.filemanip import fname_presuffix
from nipype.interfaces.base import (
traits,
TraitedSpec,
BaseInterfaceInputSpec,
File,
SimpleInterface,
OutputMultiObject,
InputMultiObject,
)
IFLOGGER = logging.getLogger("nipype.interface")
class _ApplyMaskInputSpec(BaseInterfaceInputSpec):
in_file = File(exists=True, mandatory=True, desc="an image")
in_mask = File(exists=True, mandatory=True, desc="a mask")
threshold = traits.Float(
0.5, usedefault=True, desc="a threshold to the mask, if it is nonbinary"
)
class _ApplyMaskOutputSpec(TraitedSpec):
out_file = File(exists=True, desc="masked file")
class ApplyMask(SimpleInterface):
"""Mask the input given a mask."""
input_spec = _ApplyMaskInputSpec
output_spec = _ApplyMaskOutputSpec
def _run_interface(self, runtime):
img = nb.load(self.inputs.in_file)
msknii = nb.load(self.inputs.in_mask)
msk = msknii.get_fdata() > self.inputs.threshold
self._results["out_file"] = fname_presuffix(
self.inputs.in_file, suffix="_masked", newpath=runtime.cwd
)
if img.dataobj.shape[:3] != msk.shape:
raise ValueError("Image and mask sizes do not match.")
if not np.allclose(img.affine, msknii.affine):
raise ValueError("Image and mask affines are not similar enough.")
if img.dataobj.ndim == msk.ndim + 1:
msk = msk[..., np.newaxis]
masked = img.__class__(img.dataobj * msk, None, img.header)
masked.to_filename(self._results["out_file"])
return runtime
class _BinarizeInputSpec(BaseInterfaceInputSpec):
in_file = File(exists=True, mandatory=True, desc="input image")
thresh_low = traits.Float(mandatory=True, desc="non-inclusive lower threshold")
class _BinarizeOutputSpec(TraitedSpec):
out_file = File(exists=True, desc="masked file")
out_mask = File(exists=True, desc="output mask")
class Binarize(SimpleInterface):
"""Binarizes the input image applying the given thresholds."""
input_spec = _BinarizeInputSpec
output_spec = _BinarizeOutputSpec
def _run_interface(self, runtime):
img = nb.load(self.inputs.in_file)
self._results["out_file"] = fname_presuffix(
self.inputs.in_file, suffix="_masked", newpath=runtime.cwd
)
self._results["out_mask"] = fname_presuffix(
self.inputs.in_file, suffix="_mask", newpath=runtime.cwd
)
data = img.get_fdata()
mask = data > self.inputs.thresh_low
data[~mask] = 0.0
masked = img.__class__(data, img.affine, img.header)
masked.to_filename(self._results["out_file"])
img.header.set_data_dtype("uint8")
maskimg = img.__class__(mask.astype("uint8"), img.affine, img.header)
maskimg.to_filename(self._results["out_mask"])
return runtime
class _SplitSeriesInputSpec(BaseInterfaceInputSpec):
in_file = File(exists=True, mandatory=True, desc="input 4d image")
class _SplitSeriesOutputSpec(TraitedSpec):
out_files = OutputMultiObject(File(exists=True), desc="output list of 3d images")
class SplitSeries(SimpleInterface):
"""Split a 4D dataset along the last dimension into a series of 3D volumes."""
input_spec = _SplitSeriesInputSpec
output_spec = _SplitSeriesOutputSpec
def _run_interface(self, runtime):
in_file = self.inputs.in_file
img = nb.load(in_file)
extra_dims = tuple(dim for dim in img.shape[3:] if dim > 1) or (1,)
if len(extra_dims) != 1:
raise ValueError(f"Invalid shape {'x'.join(str(s) for s in img.shape)}")
img = img.__class__(
img.dataobj.reshape(img.shape[:3] + extra_dims), img.affine, img.header
)
self._results["out_files"] = []
for i, img_3d in enumerate(nb.four_to_three(img)):
out_file = fname_presuffix(
in_file, suffix=f"_idx-{i:03}", newpath=runtime.cwd
)
img_3d.to_filename(out_file)
self._results["out_files"].append(out_file)
return runtime
class _MergeSeriesInputSpec(BaseInterfaceInputSpec):
in_files = InputMultiObject(
File(exists=True, mandatory=True, desc="input list of 3d images")
)
allow_4D = traits.Bool(
True, usedefault=True, desc="whether 4D images are allowed to be concatenated"
)
class _MergeSeriesOutputSpec(TraitedSpec):
out_file = File(exists=True, desc="output 4d image")
class MergeSeries(SimpleInterface):
"""Merge a series of 3D volumes along the last dimension into a single 4D image."""
input_spec = _MergeSeriesInputSpec
output_spec = _MergeSeriesOutputSpec
def _run_interface(self, runtime):
nii_list = []
for f in self.inputs.in_files:
filenii = nb.squeeze_image(nb.load(f))
ndim = filenii.dataobj.ndim
if ndim == 3:
nii_list.append(filenii)
continue
elif self.inputs.allow_4D and ndim == 4:
nii_list += nb.four_to_three(filenii)
continue
else:
raise ValueError(
"Input image has an incorrect number of dimensions" f" ({ndim})."
)
img_4d = nb.concat_images(nii_list)
out_file = fname_presuffix(
self.inputs.in_files[0], suffix="_merged", newpath=runtime.cwd
)
img_4d.to_filename(out_file)
self._results["out_file"] = out_file
return runtime
class _RegridToZoomsInputSpec(BaseInterfaceInputSpec):
in_file = File(
exists=True, mandatory=True, desc="a file whose resolution is to change"
)
zooms = traits.Tuple(
traits.Float,
traits.Float,
traits.Float,
mandatory=True,
desc="the new resolution",
)
order = traits.Int(3, usedefault=True, desc="order of interpolator")
clip = traits.Bool(
True,
usedefault=True,
desc="clip the data array within the original image's range",
)
smooth = traits.Either(
traits.Bool(),
traits.Float(),
default=False,
usedefault=True,
desc="apply gaussian smoothing before resampling",
)
class _RegridToZoomsOutputSpec(TraitedSpec):
out_file = File(exists=True, dec="the regridded file")
class RegridToZooms(SimpleInterface):
"""Change the resolution of an image (regrid)."""
input_spec = _RegridToZoomsInputSpec
output_spec = _RegridToZoomsOutputSpec
def _run_interface(self, runtime):
from ..utils.images import resample_by_spacing
self._results["out_file"] = fname_presuffix(
self.inputs.in_file, suffix="_regrid", newpath=runtime.cwd
)
resample_by_spacing(
self.inputs.in_file,
self.inputs.zooms,
order=self.inputs.order,
clip=self.inputs.clip,
smooth=self.inputs.smooth,
).to_filename(self._results["out_file"])
return runtime
class _DemeanImageInputSpec(BaseInterfaceInputSpec):
in_file = File(exists=True, mandatory=True, desc="image to be demeaned")
in_mask = File(
exists=True, mandatory=True, desc="mask where median will be calculated"
)
only_mask = traits.Bool(False, usedefault=True, desc="demean only within mask")
class _DemeanImageOutputSpec(TraitedSpec):
out_file = File(exists=True, desc="demeaned image")
class DemeanImage(SimpleInterface):
input_spec = _DemeanImageInputSpec
output_spec = _DemeanImageOutputSpec
def _run_interface(self, runtime):
from ..utils.images import demean
self._results["out_file"] = demean(
self.inputs.in_file,
self.inputs.in_mask,
only_mask=self.inputs.only_mask,
newpath=runtime.cwd,
)
return runtime
class _FilledImageLikeInputSpec(BaseInterfaceInputSpec):
in_file = File(exists=True, mandatory=True, desc="image to be demeaned")
fill_value = traits.Float(1.0, usedefault=True, desc="value to fill")
dtype = traits.Enum(
"float32", "uint8", usedefault=True, desc="force output data type"
)
class _FilledImageLikeOutputSpec(TraitedSpec):
out_file = File(exists=True, desc="demeaned image")
class FilledImageLike(SimpleInterface):
input_spec = _FilledImageLikeInputSpec
output_spec = _FilledImageLikeOutputSpec
def _run_interface(self, runtime):
from ..utils.images import nii_ones_like
self._results["out_file"] = nii_ones_like(
self.inputs.in_file,
self.inputs.fill_value,
self.inputs.dtype,
newpath=runtime.cwd,
)
return runtime
class _GenerateSamplingReferenceInputSpec(BaseInterfaceInputSpec):
fixed_image = File(
exists=True, mandatory=True, desc="the reference file, defines the FoV"
)
moving_image = File(exists=True, mandatory=True, desc="the pixel size reference")
xform_code = traits.Enum(None, 2, 4, usedefault=True, desc="force xform code")
fov_mask = traits.Either(
None,
File(exists=True),
usedefault=True,
desc="mask to clip field of view (in fixed_image space)",
)
keep_native = traits.Bool(
True,
usedefault=True,
desc="calculate a grid with native resolution covering "
"the volume extent given by fixed_image, fast forward "
"fixed_image otherwise.",
)
class _GenerateSamplingReferenceOutputSpec(TraitedSpec):
out_file = File(exists=True, desc="one file with all inputs flattened")
class GenerateSamplingReference(SimpleInterface):
"""
Generates a reference grid for resampling one image keeping original resolution,
but moving data to a different space (e.g. MNI).
If the `fov_mask` optional input is provided, then the abbr:`FoV (field-of-view)`
is cropped to a bounding box containing the brain mask plus an offest of two
voxels along all dimensions. The `fov_mask` should be to the brain mask calculated
from the T1w, and should not contain the brain stem. The mask is resampled into
target space, and then the bounding box is calculated. Finally, the FoV is adjusted
to that bounding box.
"""
input_spec = _GenerateSamplingReferenceInputSpec
output_spec = _GenerateSamplingReferenceOutputSpec
def _run_interface(self, runtime):
if not self.inputs.keep_native:
self._results["out_file"] = self.inputs.fixed_image
return runtime
from .. import __version__
self._results["out_file"] = _gen_reference(
self.inputs.fixed_image,
self.inputs.moving_image,
fov_mask=self.inputs.fov_mask,
force_xform_code=self.inputs.xform_code,
message="%s (niworkflows v%s)" % (self.__class__.__name__, __version__),
newpath=runtime.cwd,
)
return runtime
class _IntensityClipInputSpec(BaseInterfaceInputSpec):
in_file = File(
exists=True, mandatory=True, desc="3D file which intensity will be clipped"
)
p_min = traits.Float(35.0, usedefault=True, desc="percentile for the lower bound")
p_max = traits.Float(99.98, usedefault=True, desc="percentile for the upper bound")
nonnegative = traits.Bool(
True, usedefault=True, desc="whether input intensities must be positive"
)
dtype = traits.Enum(
"int16", "float32", "uint8", usedefault=True, desc="output datatype"
)
invert = traits.Bool(False, usedefault=True, desc="finalize by inverting contrast")
class _IntensityClipOutputSpec(TraitedSpec):
out_file = File(exists=True, desc="file after clipping")
class IntensityClip(SimpleInterface):
"""Clip the intensity range as prescribed by the percentiles."""
input_spec = _IntensityClipInputSpec
output_spec = _IntensityClipOutputSpec
def _run_interface(self, runtime):
self._results["out_file"] = _advanced_clip(
self.inputs.in_file,
p_min=self.inputs.p_min,
p_max=self.inputs.p_max,
nonnegative=self.inputs.nonnegative,
dtype=self.inputs.dtype,
invert=self.inputs.invert,
newpath=runtime.cwd,
)
return runtime
def _gen_reference(
fixed_image,
moving_image,
fov_mask=None,
out_file=None,
message=None,
force_xform_code=None,
newpath=None,
):
"""Generate a sampling reference, and makes sure xform matrices/codes are correct."""
import nilearn.image as nli
if out_file is None:
out_file = fname_presuffix(
fixed_image, suffix="_reference", newpath=newpath
)
# Moving images may not be RAS/LPS (more generally, transverse-longitudinal-axial)
reoriented_moving_img = nb.as_closest_canonical(nb.load(moving_image))
new_zooms = reoriented_moving_img.header.get_zooms()[:3]
# Avoid small differences in reported resolution to cause changes to
# FOV. See https://github.com/nipreps/fmriprep/issues/512
# A positive diagonal affine is RAS, hence the need to reorient above.
new_affine = np.diag(np.round(new_zooms, 3))
resampled = nli.resample_img(
fixed_image, target_affine=new_affine, interpolation="nearest"
)
if fov_mask is not None:
# If we have a mask, resample again dropping (empty) samples
# out of the FoV.
fixednii = nb.load(fixed_image)
masknii = nb.load(fov_mask)
if np.all(masknii.shape[:3] != fixednii.shape[:3]):
raise RuntimeError("Fixed image and mask do not have the same dimensions.")
if not np.allclose(masknii.affine, fixednii.affine, atol=1e-5):
raise RuntimeError("Fixed image and mask have different affines")
# Get mask into reference space
masknii = nli.resample_img(
masknii, target_affine=new_affine, interpolation="nearest"
)
res_shape = np.array(masknii.shape[:3])
# Calculate a bounding box for the input mask
# with an offset of 2 voxels per face
bbox = np.argwhere(np.asanyarray(masknii.dataobj) > 0)
new_origin = np.clip(bbox.min(0) - 2, a_min=0, a_max=None)
new_end = np.clip(bbox.max(0) + 2, a_min=0, a_max=res_shape - 1)
# Find new origin, and set into new affine
new_affine_4 = resampled.affine.copy()
new_affine_4[:3, 3] = new_affine_4[:3, :3].dot(new_origin) + new_affine_4[:3, 3]
# Calculate new shapes
new_shape = new_end - new_origin + 1
resampled = nli.resample_img(
fixed_image,
target_affine=new_affine_4,
target_shape=new_shape.tolist(),
interpolation="nearest",
)
xform = resampled.affine # nibabel will pick the best affine
_, qform_code = resampled.header.get_qform(coded=True)
_, sform_code = resampled.header.get_sform(coded=True)
xform_code = sform_code if sform_code > 0 else qform_code
if xform_code == 1:
xform_code = 2
if force_xform_code is not None:
xform_code = force_xform_code
# Keep 0, 2, 3, 4 unchanged
resampled.header.set_qform(xform, int(xform_code))
resampled.header.set_sform(xform, int(xform_code))
resampled.header["descrip"] = "reference image generated by %s." % (
message or "(unknown software)"
)
resampled.to_filename(out_file)
return out_file
def _advanced_clip(
in_file, p_min=35, p_max=99.98, nonnegative=True, dtype="int16", invert=False, newpath=None,
):
"""
Remove outliers at both ends of the intensity distribution and fit into a given dtype.
This interface tries to emulate ANTs workflows' massaging that truncate images into
the 0-255 range, and applies percentiles for clipping images.
For image registration, normalizing the intensity into a compact range (e.g., uint8)
is generally advised.
To more robustly determine the clipping thresholds, data are removed of spikes
with a median filter.
Once the thresholds are calculated, the denoised data are thrown away and the thresholds
are applied on the original image.
"""
from pathlib import Path
import nibabel as nb
import numpy as np
from scipy import ndimage
from skimage.morphology import ball
out_file = (Path(newpath or "") / "clipped.nii.gz").absolute()
# Load data
img = nb.squeeze_image(nb.load(in_file))
if len(img.shape) != 3:
raise RuntimeError(f"<{in_file}> is not a 3D file.")
data = img.get_fdata(dtype="float32")
# Calculate stats on denoised version, to preempt outliers from biasing
denoised = ndimage.median_filter(data, footprint=ball(3))
a_min = np.percentile(
denoised[denoised > 0] if nonnegative else denoised,
p_min
)
a_max = np.percentile(
denoised[denoised > 0] if nonnegative else denoised,
p_max
)
# Clip and cast
data = np.clip(data, a_min=a_min, a_max=a_max)
data -= data.min()
data /= data.max()
if invert:
data = 1.0 - data
if dtype in ("uint8", "int16"):
data = np.round(255 * data).astype(dtype)
hdr = img.header.copy()
hdr.set_data_dtype(dtype)
img.__class__(data, img.affine, hdr).to_filename(out_file)
return str(out_file)
``` |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.