metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "jeremiahbaclig/A-Star_FinalProj",
"score": 3
}
|
#### File: jeremiahbaclig/A-Star_FinalProj/buttons.py
```python
import pygame
import constants
# draws buttons
def draw_rect(surface):
pygame.draw.rect(surface, constants.SKY_BLUE, constants.button110)
pygame.draw.rect(surface, constants.SKY_BLUE, constants.button120)
pygame.draw.rect(surface, constants.SKY_BLUE, constants.button130)
pygame.draw.rect(surface, constants.SKY_BLUE, constants.button210)
pygame.draw.rect(surface, constants.SKY_BLUE, constants.button220)
pygame.draw.rect(surface, constants.SKY_BLUE, constants.button230)
pygame.draw.rect(surface, constants.SKY_BLUE, constants.button310)
pygame.draw.rect(surface, constants.SKY_BLUE, constants.button320)
pygame.draw.rect(surface, constants.SKY_BLUE, constants.button330)
pygame.draw.rect(surface, constants.VIOLET_RED, constants.button_quit)
# defines text and blits to screen
def text():
from visualization import surface
mono_font = pygame.font.SysFont("monospace", 25)
dimension_font = pygame.font.SysFont("monospace", 14)
label_per = mono_font.render("|10%|20%|30%|", 1, constants.WHITE)
label100 = dimension_font.render("-----100x100-----", 1, constants.WHITE)
label200 = dimension_font.render("-----200x200-----", 1, constants.WHITE)
label300 = dimension_font.render("-----300x300-----", 1, constants.WHITE)
label_quit = mono_font.render("QUIT", 1, constants.WHITE)
surface.blit(label_per, (800, 50))
surface.blit(label100, (825, 160))
surface.blit(label200, (825, 360))
surface.blit(label300, (825, 560))
surface.blit(label_quit, (860, 760))
# handles all button clicks and has inner function to reset constants needed for ARA*
def button_press(event, surface):
from visualization import choice
def clear_constants():
constants.SUM = 0
constants.PATH = []
constants.PATH_DIST = []
constants.OBSTACLES = []
constants.W0 = 9
constants.INFLATION = 10
if event.type == pygame.MOUSEBUTTONDOWN:
mouse_pos = pygame.mouse.get_pos()
if constants.button110.collidepoint(mouse_pos):
pygame.draw.rect(surface, constants.GREEN, constants.button110)
pygame.draw.rect(surface, constants.BLACK, (0, 0, constants.WIDTH + 1, constants.HEIGHT + 1))
pygame.display.update()
choice(constants.W, constants.ENDING, constants.P10, constants.GRID)
clear_constants()
elif constants.button120.collidepoint(mouse_pos):
pygame.draw.rect(surface, constants.GREEN, constants.button120)
pygame.draw.rect(surface, constants.BLACK, (0, 0, constants.WIDTH + 1, constants.HEIGHT + 1))
pygame.display.update()
choice(constants.W, constants.ENDING, constants.P20, constants.GRID)
clear_constants()
elif constants.button130.collidepoint(mouse_pos):
pygame.draw.rect(surface, constants.GREEN, constants.button130)
pygame.draw.rect(surface, constants.BLACK, (0, 0, constants.WIDTH + 1, constants.HEIGHT + 1))
pygame.display.update()
choice(constants.W, constants.ENDING, constants.P30, constants.GRID)
clear_constants()
elif constants.button210.collidepoint(mouse_pos):
pygame.draw.rect(surface, constants.GREEN, constants.button210)
pygame.draw.rect(surface, constants.BLACK, (0, 0, constants.WIDTH + 1, constants.HEIGHT + 1))
pygame.display.update()
choice(constants.W2, constants.ENDING2, constants.P10, constants.GRID2)
clear_constants()
elif constants.button220.collidepoint(mouse_pos):
pygame.draw.rect(surface, constants.GREEN, constants.button220)
pygame.draw.rect(surface, constants.BLACK, (0, 0, constants.WIDTH + 1, constants.HEIGHT + 1))
pygame.display.update()
choice(constants.W2, constants.ENDING2, constants.P20, constants.GRID2)
clear_constants()
elif constants.button230.collidepoint(mouse_pos):
pygame.draw.rect(surface, constants.GREEN, constants.button230)
pygame.draw.rect(surface, constants.BLACK, (0, 0, constants.WIDTH + 1, constants.HEIGHT + 1))
pygame.display.update()
choice(constants.W2, constants.ENDING2, constants.P30, constants.GRID2)
clear_constants()
elif constants.button310.collidepoint(mouse_pos):
pygame.draw.rect(surface, constants.GREEN, constants.button310)
pygame.draw.rect(surface, constants.BLACK, (0, 0, constants.WIDTH + 1, constants.HEIGHT + 1))
pygame.display.update()
choice(constants.W3, constants.ENDING3, constants.P10, constants.GRID3)
clear_constants()
elif constants.button320.collidepoint(mouse_pos):
pygame.draw.rect(surface, constants.GREEN, constants.button320)
pygame.draw.rect(surface, constants.BLACK, (0, 0, constants.WIDTH + 1, constants.HEIGHT + 1))
pygame.display.update()
choice(constants.W3, constants.ENDING3, constants.P20, constants.GRID3)
clear_constants()
elif constants.button330.collidepoint(mouse_pos):
pygame.draw.rect(surface, constants.GREEN, constants.button330)
pygame.draw.rect(surface, constants.BLACK, (0, 0, constants.WIDTH + 1, constants.HEIGHT + 1))
pygame.display.update()
choice(constants.W3, constants.ENDING3, constants.P30, constants.GRID3)
clear_constants()
elif constants.button_quit.collidepoint(mouse_pos):
constants.END = True
else:
draw_rect(surface)
pygame.display.update()
# handles color change on mouse hover over buttons
def button_hover(surface):
mouse_pos = pygame.mouse.get_pos()
if constants.button110.collidepoint(mouse_pos):
pygame.draw.rect(surface, constants.POWDER_BLUE, constants.button110)
pygame.display.update()
elif constants.button120.collidepoint(mouse_pos):
pygame.draw.rect(surface, constants.POWDER_BLUE, constants.button120)
pygame.display.update()
elif constants.button130.collidepoint(mouse_pos):
pygame.draw.rect(surface, constants.POWDER_BLUE, constants.button130)
pygame.display.update()
elif constants.button210.collidepoint(mouse_pos):
pygame.draw.rect(surface, constants.POWDER_BLUE, constants.button210)
pygame.display.update()
elif constants.button220.collidepoint(mouse_pos):
pygame.draw.rect(surface, constants.POWDER_BLUE, constants.button220)
pygame.display.update()
elif constants.button230.collidepoint(mouse_pos):
pygame.draw.rect(surface, constants.POWDER_BLUE, constants.button230)
pygame.display.update()
elif constants.button310.collidepoint(mouse_pos):
pygame.draw.rect(surface, constants.POWDER_BLUE, constants.button310)
pygame.display.update()
elif constants.button320.collidepoint(mouse_pos):
pygame.draw.rect(surface, constants.POWDER_BLUE, constants.button320)
pygame.display.update()
elif constants.button330.collidepoint(mouse_pos):
pygame.draw.rect(surface, constants.POWDER_BLUE, constants.button330)
pygame.display.update()
elif constants.button_quit.collidepoint(mouse_pos):
pygame.draw.rect(surface, constants.PINK, constants.button_quit)
pygame.display.update()
else:
draw_rect(surface)
pygame.display.update()
```
#### File: jeremiahbaclig/A-Star_FinalProj/visualization.py
```python
import pygame, sys, random, math, time
import constants
from buttons import draw_rect
from buttons import button_hover
from buttons import button_press
from buttons import text
# Randomly generates obstacles - draws them red and returns the coordinates of them
def random_fill(x, y, w, p):
obstacle = (x, y)
rand = random.randint(0, 50)
if rand < p:
pygame.draw.rect(surface, constants.RED, (x, y, w, w))
return obstacle
# draws in the correctly sized grid and calls random_fill() for obstacles
def draw(w, p, grid):
obst_list = []
x, y = 0, 0
for row in grid:
for col in row:
pygame.draw.rect(surface, constants.BLUE, (x, y, w, w), 1)
if x == 0 and y == 0:
pygame.draw.rect(surface, constants.GREEN, (x, y, w, w))
pass
elif x == 792 and y == 792 or x == 796 and y == 796 or x == constants.END_3X and y == constants.END_3Y:
continue
else:
val = random_fill(x, y, w, p)
if val is not None:
obst_list.append(val)
pygame.display.update()
x = x + w
y = y + w
x = 0
return obst_list
# straight line distance used for g
def distance(nx, ny, gx, gy):
g = math.sqrt((abs(gx - nx) ** 2) + (abs(gy - ny) ** 2))
return g # + h
# manhattan distance used for h
def manhattan(nx, ny, gx, gy):
h = math.sqrt(abs(nx - gx) + abs(ny - gy))
return h
# Generates all neighbors of the current node and removes based on if it is an obstacle, or if that node has been
# traveled to before. Applies heuristic to neighbors and travels based on minimum f score. Recursively calls itself
# and stores the path that it took for the repairing method.
def astar(x, y, blocked, end, w):
current = (x, y)
all_neighbors = [(x + w, y), (x, y + w), (x + w, y + w),
(x - w, y - w), (x - w, y), (x - w, y + w),
(x, y - w), (x + w, y - w)]
for i in blocked:
if i in all_neighbors:
all_neighbors.remove(i)
for i in constants.PATH:
if i in all_neighbors:
all_neighbors.remove(i)
neighbor_list1 = heuristic(all_neighbors, end)
try:
shortest = min(neighbor_list1, key=neighbor_list1.get)
constants.SUM += neighbor_list1.get(shortest)
for val, key in neighbor_list1.items():
if 0 <= val[0] < 800 and 0 <= val[1] < 800:
if val == shortest:
current = val
pygame.draw.rect(surface, constants.GREEN, (*current, w, w))
pygame.time.wait(1)
pygame.display.update()
constants.PATH_DIST.append(key)
try:
current_index = constants.PATH_DIST.index(key)
if constants.PATH_DIST[current_index] > constants.PATH_DIST[current_index - 3]:
if (constants.PATH_DIST[current_index] - constants.PATH_DIST[current_index - 3]) < 100:
blocked.append(current)
except IndexError:
continue
except ValueError:
pass
constants.PATH.append(current)
try:
if current != end:
astar(*current, blocked, end, w)
except RecursionError:
current_id = constants.PATH.index(current)
if current != constants.START and constants.PATH[current_id - 1] != constants.START:
blocked.append(current)
blocked.append(constants.PATH[current_id - 1])
# print("(R)")
return constants.SUM, constants.PATH
# Takes in neighbor list and using a dictionary, stores the coordinates and calculated f score. Returns dictionary.
def heuristic(neighbors, end):
neighbor_list = {}
counter = 0
if counter != len(neighbors):
for i in neighbors:
dist = distance(*i, *end) + (constants.INFLATION * manhattan(*i, *end)) # CONSTANT ENDING
neighbor_list[i] = dist
counter += 1
return neighbor_list
# Method to visually clear the path that was taken - clears up for next iteration.
def clear(path, w):
for i in path:
pygame.draw.rect(surface, constants.SEA_GREEN, (*i, w, w))
# iterates based on a decrementing W0, decremented inflation e is applied to the heuristic
def repairing(path_sum, blocked, path, end, w):
start_time = time.time()
while constants.W0 > 0:
clear(path, w)
pygame.draw.rect(surface, constants.GREEN, (*end, w, w))
pygame.display.update()
constants.PATH.clear()
sum_next = astar(*constants.START, blocked, end, w)
half_val = math.floor(sum_next[0] / 2)
if sum_next[0] < path_sum:
clear(path, w)
pygame.display.update()
elif half_val == math.floor(path_sum):
break
if constants.INFLATION >= 1:
constants.INFLATION -= 1
constants.W0 -= constants.W1
print("RUN TIME: %s seconds" % (time.time() - start_time))
# called based on button press
def choice(w, end, p, grid):
start_time = time.time()
constants.OBSTACLES = draw(w, p, grid)
print("GRID GENERATION: %s seconds" % (time.time() - start_time))
traveled = astar(*constants.START, constants.OBSTACLES, end, w)
repairing(traveled[0], constants.OBSTACLES, traveled[1], end, w)
pygame.display.update()
# main function
def main():
surface.fill(constants.BLACK)
text()
while constants.END is False:
for event in pygame.event.get():
if event.type == pygame.MOUSEBUTTONDOWN:
button_press(event, surface)
if event.type == pygame.QUIT:
sys.exit()
draw_rect(surface)
button_hover(surface)
pygame.init()
surface = pygame.display.set_mode((constants.WIDTH + 200, constants.HEIGHT))
main()
```
|
{
"source": "Jeremiah-Bergkvist/ProjectEuler",
"score": 3
}
|
#### File: Jeremiah-Bergkvist/ProjectEuler/e30.py
```python
def main():
# Maximum value needed to get calculate
# 9**5 == 59049
# 59049 * 5 == 295245
power = 5
start = 2
stop = power * 9**5
matches = []
for x in xrange(start, stop+1):
xstr = str(x)
xsum = 0
for y in xrange(len(xstr)):
xsum += int(xstr[y]) ** power
if xsum > x:
break
if xsum == x:
matches.append(xsum)
xsum = 0
for x in xrange(len(matches)):
print "[!]", matches[x]
xsum += matches[x]
print "sum", xsum
if __name__ == "__main__":
main()
```
#### File: Jeremiah-Bergkvist/ProjectEuler/e36.py
```python
import timeit
def e36():
s = 0
n = 1
while n < 1000000:
binary = format(n, 'b')
decimal = str(n)
if decimal == decimal[::-1] and binary == binary[::-1]:
s += n
print "Palindrome: %s[10] - %s[2]" %(decimal, binary)
n += 2
print "Sum:", s
def main():
print "---------- e36() ----------"
e36()
print "---------------------------"
if __name__ == "__main__":
main()
```
#### File: Jeremiah-Bergkvist/ProjectEuler/helper.py
```python
import math
'''from decimal import Decimal
from decimal import getcontext
getcontext().prec = 1000'''
# Move up to previous line and clear it
def up_clear():
sys.stdout.write("\033[F\033[2K")
def find_recurring_sequences(s):
length = len(s)
max_width = length // 2
matches = []
width = 1
while width <= max_width:
index = 0
while index + width < length:
if s[index:index+width] == s[index+width: index+width+width]:
#print "[%d] (%d, %d) (%s, %s) %s == %s" % (width, index, index+width-1, index+width, index+width+width-1, s[index:index+width], s[index+width: index+width+width])
matches.append(s[index:index+width])
break
index += 1
width += 1
return matches
def shortest_common_string(s):
seqs = find_recurring_sequences(s)
if len(seqs) < 2:
return None
while len(seqs) > 1:
rep = len(seqs[1]) / len(seqs[0])
if seqs[0] * rep == seqs[1]:
seqs.pop(1)
else:
seqs.pop(0)
return seqs[0]
def rwh_primes1(n):
# http://stackoverflow.com/questions/2068372/fastest-way-to-list-all-primes-below-n-in-python/3035188#3035188
""" Returns a list of primes < n """
sieve = [True] * (n/2)
for i in xrange(3,int(n**0.5)+1,2):
if sieve[i/2]:
sieve[i*i/2::i] = [False] * ((n-i*i-1)/(2*i)+1)
return [2] + [2*i+1 for i in xrange(1,n/2) if sieve[i]]
def appendEs2Sequences(sequences,es):
result=[]
if not sequences:
for e in es:
result.append([e])
else:
for e in es:
result+=[seq+[e] for seq in sequences]
return result
def cartesianproduct(lists):
"""
given a list of lists,
returns all the possible combinations taking one element from each list
The list does not have to be of equal length
"""
return reduce(appendEs2Sequences,lists,[])
def primefactors(n):
'''lists prime factors, from greatest to smallest'''
i = 2
while i<=math.sqrt(n):
if n%i==0:
l = primefactors(n/i)
l.append(i)
return l
i+=1
return [n]
def factorGenerator(n):
p = primefactors(n)
factors={}
for p1 in p:
try:
factors[p1]+=1
except KeyError:
factors[p1]=1
return factors
def divisors(n):
factors = factorGenerator(n)
divisors=[]
listexponents=[map(lambda x:k**x,range(0,factors[k]+1)) for k in factors.keys()]
listfactors=cartesianproduct(listexponents)
for f in listfactors:
divisors.append(reduce(lambda x, y: x*y, f, 1))
divisors.sort()
return divisors
```
|
{
"source": "jeremiahbrem/courtbot-python",
"score": 2
}
|
#### File: management/commands/send_alerts.py
```python
from datetime import datetime
from django.core.management.base import BaseCommand, CommandError
from ...models import Alert
from twilio.rest import Client
from decouple import config
TWILIO_ACCOUNT_SID = config('TWILIO_ACCOUNT_SID')
TWILIO_AUTH_TOKEN = config('TWILIO_AUTH_TOKEN')
TWILIO_FROM_NUMBER = config('TWILIO_FROM_NUMBER')
class Command(BaseCommand):
help = 'Sends un-sent alerts for the current hour'
def handle(self, *args, **options):
unsent_alerts = Alert.objects.filter(sent=False, when=datetime.today())
client = Client(TWILIO_ACCOUNT_SID, TWILIO_AUTH_TOKEN)
for unsent_alert in unsent_alerts:
message = client.messages.create(
to=str(unsent_alert.to),
from_="+19189134069",
body=unsent_alert.what)
unsent_alert.sent = True
unsent_alert.save()
print('Sent message "' + unsent_alert.what + '" to ' + str(unsent_alert.to))
```
|
{
"source": "jeremiahdc23/ReCirq",
"score": 3
}
|
#### File: recirq/quantum_chess/move.py
```python
import recirq.quantum_chess.enums as enums
_ORD_A = ord('a')
def to_rank(x: int) -> str:
"""Returns the algebraic notation rank from the x coordinate."""
return chr(_ORD_A + x)
def to_square(x: int, y: int) -> str:
"""Returns the algebraic notation of a square."""
return chr(_ORD_A + x) + str(y + 1)
def x_of(square: str) -> int:
"""Returns x coordinate of an algebraic notation square (e.g. 'f4')."""
return ord(square[0]) - _ORD_A
def y_of(square: str) -> int:
"""Returns y coordinate of an algebraic notation square (e.g. 'f4')."""
return int(square[1]) - 1
class Move:
"""Container class that has the source and target of a quantum chess move.
If the move is a split move, it will have a target2. If a merge move,
it will have a source2 attribute.
For moves that are input from the quantum chess board API, this will
have a move type and variant that determines what kind of move this is
(capture, exclusion, etc).
"""
def __init__(self,
source: str,
target: str,
*,
source2: str = None,
target2: str = None,
move_type: enums.MoveType = None,
move_variant: enums.MoveVariant = None,
measurement: int = None):
self.source = source
self.source2 = source2
self.target = target
self.target2 = target2
self.move_type = move_type
self.move_variant = move_variant
self.measurement = measurement
def __eq__(self, other):
if isinstance(other, Move):
return (self.source == other.source and self.target == other.target
and self.target2 == other.target2
and self.move_type == other.move_type
and self.move_variant == other.move_variant
and self.measurement == other.measurement)
return False
@classmethod
def from_string(cls, str_to_parse: str):
"""Creates a move from a string shorthand for tests.
Format=source,target,target2,source2[.measurement]:type:variant
with commas omitted.
if target2 is specified, then source2 should
be '--'
Examples:
'a1a2:JUMP:BASIC'
'b1a3c3:SPLIT_JUMP:BASIC'
'b1a3c3.m0:SPLIT_JUMP:BASIC'
'b1a3c3.m1:SPLIT_JUMP:BASIC'
'a3b1--c3:MERGE_JUMP:BASIC'
"""
fields = str_to_parse.split(':')
if len(fields) != 3:
raise ValueError(f'Invalid move string {str_to_parse}')
source = fields[0][0:2]
target = fields[0][2:4]
move_and_measurement = fields[0].split('.', maxsplit=1)
measurement = None
if len(move_and_measurement) == 2:
_, m_str = move_and_measurement
if m_str[0] != 'm':
raise ValueError(f'Invalid measurement string {m_str}')
measurement = int(m_str[1:])
move_type = enums.MoveType[fields[1]]
move_variant = enums.MoveVariant[fields[2]]
if len(fields[0]) <= 4:
return cls(source,
target,
move_type=move_type,
move_variant=move_variant,
measurement=measurement)
if len(fields[0]) <= 6:
return cls(source,
target,
target2=fields[0][4:6],
move_type=move_type,
move_variant=move_variant,
measurement=measurement)
return cls(source,
target,
source2=fields[0][6:8],
move_type=move_type,
move_variant=move_variant,
measurement=measurement)
def is_split_move(self) -> bool:
return self.target2 is not None
def is_merge_move(self) -> bool:
return self.source2 is not None
def has_measurement(self) -> bool:
return self.measurement is not None
def __str__(self):
movestr = self.source + self.target
if self.is_split_move():
movestr = self.source + '^' + self.target + self.target2
if self.is_merge_move():
movestr = self.source + self.source2 + '^' + self.target
if self.has_measurement():
return movestr + '.m' + str(self.measurement)
else:
return movestr
```
|
{
"source": "jeremiah-dibble/caltech-ee148-spring2020-hw01",
"score": 3
}
|
#### File: jeremiah-dibble/caltech-ee148-spring2020-hw01/run_predictions3.py
```python
"""
Created on Tue Apr 6 14:00:58 2021
@author: Jerem
"""
import os
import numpy as np
import json
from PIL import Image
from matplotlib import patches
from matplotlib import pyplot
import matplotlib.image as mpimg
def detect_red_light(I):
'''
This function takes a numpy array <I> and returns a list <bounding_boxes>.
The list <bounding_boxes> should have one element for each red light in the
image. Each element of <bounding_boxes> should itself be a list, containing
four integers that specify a bounding box: the row and column index of the
top left corner and the row and column index of the bottom right corner (in
that order). See the code below for an example.
Note that PIL loads images in RGB order, so:
I[:,:,0] is the red channel
I[:,:,1] is the green channel
I[:,:,2] is the blue channel
'''
bounding_boxes = [] # This should be a list of lists, each of length 4. See format example below.
scores = []
top_boxes = []
'''
BEGIN YOUR CODE
'''
'''
As an example, here's code that generates between 1 and 5 random boxes
of fixed size and returns the results in the proper format.
'''
file_num = -1
for file in anotated_files:
file_num += 1
red_light = np.asarray(Image.open(os.path.join(anotated_path, file)))
box_height, box_width, box_channels = np.shape(red_light)
top_boxes = []
# num_boxes = np.random.randint(1,5)
(n_rows,n_cols,n_channels) = np.shape(I)
num_boxes = int((n_rows*n_cols)/(box_height*box_width))
for i in range(num_boxes*10):
tl_row = np.random.randint(n_rows - box_height)
tl_col = np.random.randint(n_cols - box_width)
br_row = tl_row + box_height
br_col = tl_col + box_width
top_boxes.append([tl_row,tl_col,br_row,br_col])
light_vector = red_light.reshape((-1,))
#print(np.shape(light_vector))
for box in top_boxes:
sub_image = I[box[0]:box[2],box[1]:box[3]]
score = (np.dot(sub_image.reshape((-1,)), light_vector))
scores.append(score)
bounding_boxes += top_boxes
'''
END YOUR CODE
'''
for i in range(len(bounding_boxes)):
assert len(bounding_boxes[i]) == 4
return bounding_boxes, scores
##################################
# This code is used to generate the examples for Q5
# Continued at the end
###################################################
def show_box(key, boxs):
index = file_names.index(key)
I = Image.open(os.path.join(data_path,file_names[index]))
I = np.asarray(I)
img = mpimg.imread(data_path+'/'+key)
for box in boxs:
box_height = box[2] - box[0]
box_width = box[3] - box[1]
figure, ax = pyplot.subplots(1)
rect = patches.Rectangle((box[0],box[1]),box_width,box_height, edgecolor='r', facecolor="none")
ax.imshow(img)
ax.add_patch(rect)
#img = Image.fromarray(I[box[0]:box[2],box[1]:box[3]])
#img.show()
####################################### #
##########################################
# set the path to the downloaded data:
data_path = "C:/Users/Jerem/OneDrive - California Institute of Technology/Caltech-/Classes/Spring 2021/EE 148/HW1/RedLights2011_Medium/RedLights2011_Medium"
anotated_path = "C:/Users/Jerem/OneDrive - California Institute of Technology/Caltech-/Classes/Spring 2021/EE 148/HW1/RedLights2011_Medium/Anotated"
# set a path for saving predictions:
preds_path = 'C:/Users/Jerem/OneDrive - California Institute of Technology/Caltech-/Classes/Spring 2021/EE 148/HW1/predictions/'
os.makedirs(preds_path,exist_ok=True) # create directory if needed
# get sorted list of files:
file_names = sorted(os.listdir(data_path))
anotated_files = sorted(os.listdir(anotated_path))
anotated_files = [f for f in anotated_files if '.jpg' in f]
# remove any non-JPEG files:
file_names = [f for f in file_names if '.jpg' in f]
preds = {}
scores = {}
all_scores = []
mean = 0
for i in range(len(file_names)):
# read image using PIL:
I = Image.open(os.path.join(data_path,file_names[i]))
# convert to numpy array:
I = np.asarray(I)
mean += np.mean(I)
print(i)
mean/i
for i in range(len(file_names)):
# read image using PIL:
I = Image.open(os.path.join(data_path,file_names[i]))
# convert to numpy array:
I = np.asarray(I)
preds[file_names[i]], scores[file_names[i]] = detect_red_light(I)
all_scores.append(scores[file_names[i]])
#red_light = np.asarray(Image.open(os.path.join(anotated_path, anotated_files[2])))
#box_height, box_width, box_channels = np.shape(red_light)
cutoff = np.percentile(np.reshape(all_scores,(-1,)), 99.2)
final_preds= {}
high_scores = []
for key in preds:
#for i in range(len(preds[key])):
best_guess = scores[key].index(np.max(scores[key]))
#best_guess = np.sort(scores[keys)[-1]]
if scores[key][best_guess] > cutoff:
high_scores.append(preds[key][best_guess])
final_preds[key] = high_scores
high_scores = []
# preds[file_names[i]] = detect_red_light(I)
# save preds (overwrites any previous predictions!)
with open(os.path.join(preds_path,'preds.json'),'w') as f:
json.dump(preds,f)
red_light = np.asarray(Image.open(os.path.join(anotated_path, anotated_files[0])))
img = Image.fromarray(red_light)
img.show()
######################################
# Here is the rest of the code to generate examples for Q5
######################################
i = 0
for p in final_preds:
i+=1
show_box(p, final_preds[p])
if i > 100:
break
```
|
{
"source": "Jeremiah-England/Shapely",
"score": 3
}
|
#### File: Shapely/shapely/strtree.py
```python
import ctypes
import logging
from typing import Any, ItemsView, Iterable, Iterator, Optional, Sequence, Tuple, Union
import sys
from shapely.geometry.base import BaseGeometry
from shapely.geos import lgeos
log = logging.getLogger(__name__)
class STRtree:
"""An STR-packed R-tree spatial index.
An index is initialized from a sequence of geometry objects and
optionally an sequence of items. The items, if provided, are stored
in nodes of the tree. If items are not provided, the indices of the
geometry sequence will be used instead.
Stored items and corresponding geometry objects can be spatially
queried using another geometric object.
The tree is immutable and query-only, meaning that once created
nodes cannot be added or removed.
Parameters
----------
geoms : sequence
A sequence of geometry objects.
items : sequence, optional
A sequence of objects which typically serve as identifiers in an
application. This sequence must have the same length as geoms.
Attributes
----------
node_capacity : int
The maximum number of items per node. Default: 10.
Examples
--------
Creating an index of polygons:
>>> from shapely.strtree import STRtree
>>> from shapely.geometry import Polygon
>>>
>>> polys = [Polygon(((0, 0), (1, 0), (1, 1))),
... Polygon(((0, 1), (0, 0), (1, 0))),
... Polygon(((100, 100), (101, 100), (101, 101)))]
>>> tree = STRtree(polys)
>>> query_geom = Polygon(((-1, -1), (2, 0), (2, 2), (-1, 2)))
>>> result = tree.query(query_geom)
>>> polys[0] in result
True
>>> polys[1] in result
True
>>> polys[2] in result
False
Notes
-----
The class maintains a reverse mapping of items to geometries. These
items must therefore be hashable. The tree is filled using the
Sort-Tile-Recursive [1]_ algorithm.
References
----------
.. [1] Leutenegger, <NAME>.; Edgington, <NAME>.; Lopez, <NAME>.
(February 1997). "STR: A Simple and Efficient Algorithm for
R-Tree Packing".
https://ia600900.us.archive.org/27/items/nasa_techdoc_19970016975/19970016975.pdf
"""
def __init__(
self,
geoms: Iterable[BaseGeometry],
items: Iterable[Any] = None,
node_capacity: int = 10,
):
self.node_capacity = node_capacity
# Keep references to geoms
self._geoms = list(geoms)
# Default enumeration index to store in the tree
self._idxs = list(range(len(self._geoms)))
# handle items
self._has_custom_items = items is not None
if not self._has_custom_items:
items = self._idxs
self._items = items
# initialize GEOS STRtree
self._tree = lgeos.GEOSSTRtree_create(self.node_capacity)
i = 0
for idx, geom in zip(self._idxs, self._geoms):
# filter empty geometries out of the input
if geom is not None and not geom.is_empty:
lgeos.GEOSSTRtree_insert(self._tree, geom._geom, ctypes.py_object(idx))
i += 1
self._n_geoms = i
def __reduce__(self):
if self._has_custom_items:
return STRtree, (self._geoms, self._items)
else:
return STRtree, (self._geoms, )
def __del__(self):
if self._tree is not None:
try:
lgeos.GEOSSTRtree_destroy(self._tree)
except AttributeError:
pass # lgeos might be empty on shutdown.
self._tree = None
def _query(self, geom):
if self._n_geoms == 0:
return []
result = []
def callback(item, userdata):
idx = ctypes.cast(item, ctypes.py_object).value
result.append(idx)
lgeos.GEOSSTRtree_query(self._tree, geom._geom, lgeos.GEOSQueryCallback(callback), None)
return result
def query_items(self, geom: BaseGeometry) -> Sequence[Any]:
"""Query for nodes which intersect the geom's envelope to get
stored items.
Items are integers serving as identifiers for an application.
Parameters
----------
geom : geometry object
The query geometry.
Returns
-------
An array or list of items stored in the tree.
Note
----
A geometry object's "envelope" is its minimum xy bounding
rectangle.
Examples
--------
A buffer around a point can be used to control the extent
of the query.
>>> from shapely.strtree import STRtree
>>> from shapely.geometry import Point
>>> points = [Point(i, i) for i in range(10)]
>>> tree = STRtree(points)
>>> query_geom = Point(2,2).buffer(0.99)
>>> [o.wkt for o in tree.query(query_geom)]
['POINT (2 2)']
>>> query_geom = Point(2, 2).buffer(1.0)
>>> [o.wkt for o in tree.query(query_geom)]
['POINT (1 1)', 'POINT (2 2)', 'POINT (3 3)']
A subsequent search through the returned subset using the
desired binary predicate (eg. intersects, crosses, contains,
overlaps) may be necessary to further filter the results
according to their specific spatial relationships.
>>> [o.wkt for o in tree.query(query_geom) if o.intersects(query_geom)]
['POINT (2 2)']
"""
result = self._query(geom)
if self._has_custom_items:
return [self._items[i] for i in result]
else:
return result
def query_geoms(self, geom: BaseGeometry) -> Sequence[BaseGeometry]:
"""Query for nodes which intersect the geom's envelope to get
geometries corresponding to the items stored in the nodes.
Parameters
----------
geom : geometry object
The query geometry.
Returns
-------
An array or list of geometry objects.
"""
result = self._query(geom)
return [self._geoms[i] for i in result]
def query(self, geom: BaseGeometry) -> Sequence[BaseGeometry]:
"""Query for nodes which intersect the geom's envelope to get
geometries corresponding to the items stored in the nodes.
This method is an alias for query_geoms. It may be removed in
version 2.0.
Parameters
----------
geom : geometry object
The query geometry.
Returns
-------
An array or list of geometry objects.
"""
return self.query_geoms(geom)
def _nearest(self, geom, exclusive):
envelope = geom.envelope
def callback(item1, item2, distance, userdata):
try:
callback_userdata = ctypes.cast(userdata, ctypes.py_object).value
idx = ctypes.cast(item1, ctypes.py_object).value
geom2 = ctypes.cast(item2, ctypes.py_object).value
dist = ctypes.cast(distance, ctypes.POINTER(ctypes.c_double))
if callback_userdata["exclusive"] and self._geoms[idx].equals(geom2):
dist[0] = sys.float_info.max
else:
lgeos.GEOSDistance(self._geoms[idx]._geom, geom2._geom, dist)
return 1
except Exception:
log.exception("Caught exception")
return 0
item = lgeos.GEOSSTRtree_nearest_generic(
self._tree,
ctypes.py_object(geom),
envelope._geom,
lgeos.GEOSDistanceCallback(callback),
ctypes.py_object({"exclusive": exclusive}),
)
return ctypes.cast(item, ctypes.py_object).value
def nearest_item(
self, geom: BaseGeometry, exclusive: bool = False
) -> Union[Any, None]:
"""Query the tree for the node nearest to geom and get the item
stored in the node.
Items are integers serving as identifiers for an application.
Parameters
----------
geom : geometry object
The query geometry.
exclusive : bool, optional
Whether to exclude the item corresponding to the given geom
from results or not. Default: False.
Returns
-------
Stored item or None.
None is returned if this index is empty. This may change in
version 2.0.
Examples
--------
>>> from shapely.strtree import STRtree
>>> from shapely.geometry import Point
>>> tree = STRtree([Point(i, i) for i in range(10)])
>>> tree.nearest(Point(2.2, 2.2)).wkt
'POINT (2 2)'
Will only return one object:
>>> tree = STRtree ([Point(0, 0), Point(0, 0)])
>>> tree.nearest(Point(0, 0)).wkt
'POINT (0 0)'
"""
if self._n_geoms == 0:
return None
result = self._nearest(geom, exclusive)
if self._has_custom_items:
return self._items[result]
else:
return result
def nearest_geom(
self, geom: BaseGeometry, exclusive: bool = False
) -> Union[BaseGeometry, None]:
"""Query the tree for the node nearest to geom and get the
geometry corresponding to the item stored in the node.
Parameters
----------
geom : geometry object
The query geometry.
exclusive : bool, optional
Whether to exclude the given geom from results or not.
Default: False.
Returns
-------
BaseGeometry or None.
None is returned if this index is empty. This may change in
version 2.0.
"""
result = self._nearest(geom, exclusive)
return self._geoms[result]
def nearest(
self, geom: BaseGeometry, exclusive: bool = False
) -> Union[BaseGeometry, None]:
"""Query the tree for the node nearest to geom and get the
geometry corresponding to the item stored in the node.
This method is an alias for nearest_geom. It may be removed in
version 2.0.
Parameters
----------
geom : geometry object
The query geometry.
exclusive : bool, optional
Whether to exclude the given geom from results or not.
Default: False.
Returns
-------
BaseGeometry or None.
None is returned if this index is empty. This may change in
version 2.0.
"""
return self.nearest_geom(geom, exclusive=exclusive)
```
#### File: Shapely/tests/test_doctests.py
```python
import os
import doctest
from . import unittest
from glob import glob
optionflags = (doctest.REPORT_ONLY_FIRST_FAILURE |
doctest.NORMALIZE_WHITESPACE |
doctest.ELLIPSIS)
def list_doctests():
print(__file__)
source_files = glob(os.path.join(os.path.dirname(__file__), '*.txt'))
return [filename for filename in source_files]
def open_file(filename, mode='r'):
"""Helper function to open files from within the tests package."""
return open(os.path.join(os.path.dirname(__file__), filename), mode)
def setUp(test):
test.globs.update(dict(open_file=open_file,))
def test_suite():
return unittest.TestSuite(
[doctest.DocFileSuite(os.path.basename(filename),
optionflags=optionflags,
setUp=setUp)
for filename
in list_doctests()])
if __name__ == "__main__":
runner = unittest.TextTestRunner(verbosity=1)
runner.run(test_suite())
```
#### File: Shapely/tests/test_mapping.py
```python
from . import unittest
from shapely.geometry import Point, mapping, Polygon
class MappingTestCase(unittest.TestCase):
def test_point(self):
m = mapping(Point(0, 0))
self.assertEqual(m['type'], 'Point')
self.assertEqual(m['coordinates'], (0.0, 0.0))
def test_empty_polygon(self):
"""Empty polygons will round trip without error"""
self.assertIsNotNone(mapping(Polygon()))
```
#### File: Shapely/tests/test_singularity.py
```python
from . import unittest
from shapely.geometry import Polygon
class PolygonTestCase(unittest.TestCase):
def test_polygon_3(self):
p = (1.0, 1.0)
poly = Polygon([p, p, p])
self.assertEqual(poly.bounds, (1.0, 1.0, 1.0, 1.0))
def test_polygon_5(self):
p = (1.0, 1.0)
poly = Polygon([p, p, p, p, p])
self.assertEqual(poly.bounds, (1.0, 1.0, 1.0, 1.0))
```
#### File: Shapely/tests/test_snap.py
```python
from . import unittest
from shapely.geometry import LineString, Polygon
from shapely.ops import snap
class Snap(unittest.TestCase):
def test_snap(self):
# input geometries
square = Polygon([(1,1), (2, 1), (2, 2), (1, 2), (1, 1)])
line = LineString([(0,0), (0.8, 0.8), (1.8, 0.95), (2.6, 0.5)])
square_coords = square.exterior.coords[:]
line_coords = line.coords[:]
result = snap(line, square, 0.5)
# test result is correct
self.assertTrue(isinstance(result, LineString))
self.assertEqual(result.coords[:], [(0.0, 0.0), (1.0, 1.0), (2.0, 1.0), (2.6, 0.5)])
# test inputs have not been modified
self.assertEqual(square.exterior.coords[:], square_coords)
self.assertEqual(line.coords[:], line_coords)
```
|
{
"source": "jeremiahfallin/roleML",
"score": 2
}
|
#### File: roleml/exceptions/exceptions.py
```python
class MatchTooShort(Exception):
def __init__(self):
Exception.__init__(self, "This package only works with games over 15 minutes.")
class IncorrectMap(Exception):
def __init__(self):
Exception.__init__(self, "This package only handles Summoner’s Rift games.")
class WrongLabel(Exception):
def __init__(self):
Exception.__init__(self, 'Label needs to be in ["clean", "rgapi", "full", "LolGame"]')
class NoOpponentFoundException(Exception):
pass
```
#### File: roleML/tests/test_fix_frame.py
```python
from roleml.roleml import _fix_frame_keys
def key_equals_participant_id(frame):
return all(k == str(v["participantId"]) for k, v in frame["participantFrames"].items())
def test_fix_frame_unordered(clean_game_na):
for frame in clean_game_na["game"]["timeline"]["frames"]:
assert not key_equals_participant_id(frame)
for frame in clean_game_na["game"]["timeline"]["frames"]:
assert key_equals_participant_id(_fix_frame_keys(frame))
def test_fix_frame_ordered(clean_game_euw):
for frame in clean_game_euw["game"]["timeline"]["frames"]:
assert key_equals_participant_id(frame)
for frame in clean_game_euw["game"]["timeline"]["frames"]:
assert key_equals_participant_id(_fix_frame_keys(frame))
```
#### File: roleML/tests/test_old_game.py
```python
import logging
import pytest
import roleml
def test_old_game_pro(pro_game, caplog):
with caplog.at_level(logging.WARNING):
roleml.predict(pro_game["game"], pro_game["game"]["timeline"])
assert caplog.text
def test_recent_game(clean_game_na):
with pytest.warns(None) as record:
roleml.predict(clean_game_na["game"], clean_game_na["game"]["timeline"])
assert not record
```
#### File: roleML/tests/test_positions.py
```python
from roleml.features import _get_positions, _get_lane_frequencies, _get_most_frequent_lane
def test_get_positions_na(clean_game_na):
assert _get_positions(clean_game_na["game"]["timeline"]) == clean_game_na["participants_positions"]
def test_get_lane_frequencies_1(clean_game_na):
assert _get_lane_frequencies(_get_positions(clean_game_na["game"]["timeline"])) == clean_game_na["lane_frequency"]
def test_get_most_frequent_lane_1(clean_game_na):
assert (
_get_most_frequent_lane(_get_positions(clean_game_na["game"]["timeline"]))
== clean_game_na["most_frequent_lane"]
)
```
#### File: roleML/tests/test_predict.py
```python
import pytest
import roleml
from roleml import exceptions
def test_predict_1(clean_game_na):
assert clean_game_na["expected_roles"] == roleml.predict(clean_game_na["game"], clean_game_na["game"]["timeline"])
def test_predict_2(clean_game_euw):
assert clean_game_euw["expected_roles"] == roleml.predict(clean_game_euw["game"], clean_game_euw["game"]["timeline"])
def test_predict_match_too_short(short_game):
with pytest.raises(exceptions.MatchTooShort):
roleml.predict(short_game["game"], short_game["game"]["timeline"])
def test_predict_match_aram(aram_game):
with pytest.raises(exceptions.IncorrectMap):
roleml.predict(aram_game["game"], aram_game["game"]["timeline"])
def test_predict_empty_lane_frequency(empty_lane_frequency_game):
roleml.predict(empty_lane_frequency_game["game"], empty_lane_frequency_game["game"]["timeline"])
assert True
```
|
{
"source": "Jeremiahjacinth13/tradex",
"score": 2
}
|
#### File: tradex/buyAndSell/views.py
```python
import json
from django.shortcuts import render, reverse, get_object_or_404
from django.http import HttpResponse, JsonResponse, HttpResponseRedirect, Http404
from django.contrib.auth import login, logout
from .models import User, Post, Store, Product, Cart, Account, Like
from django.contrib.auth.decorators import login_required
from django.views.decorators.csrf import csrf_exempt
import os
# Create your views here.
@login_required
def index(request):
return render(request, 'buyAndSell/index.html', context={'allPosts':Post.objects.all().order_by('-dateCreated'), 'len': len(request.user.getProducts())})
@csrf_exempt
def new_post(request):
if request.method == 'POST':
content = request.POST['content']
image = request.FILES['imageUrl']
id = request.POST['user_id']
try:
poster = get_object_or_404(User, id = id)
post = Post.objects.create(content = content, poster = poster, image = image)
post.save()
except Http404:
return JsonResponse({'message': 'You have to login', 'errors': ['You need to login to create posts'], 'status': 403})
return JsonResponse({'message': 'Your post has been successfully uploaded', 'status': 200, 'post_details': post.serialize()})
return JsonResponse({'message': "Post request required", 'status': 403})
@login_required
def new_product(request):
if request.method == 'POST':
if request.user.userType == 'buyer':
return JsonResponse({'message': "Operation DisAllowed, User is not a seller", 'status': 403})
else:
data_sent = json.loads(request.body)
print(data_sent)
name = data_sent['name']
description = data_sent['description']
price = data_sent['price']
imageUrl = data_sent['imageUrl']
store = request.user.getCart()
Product.objects.create(name = name, description = description ,price = price, imageUrl = imageUrl, store = store)
return JsonResponse({'message': 'Product has been added to your store', 'status': 200})
return JsonResponse({'message': "Post Request Required", 'status': 403})
def get_user(request, user_id):
try:
return JsonResponse({'user': get_object_or_404(User, id = user_id).serialize(), 'status': 200})
except Http404 as e:
return JsonResponse({'message': e.__str__(), 'status': 404})
def get_all_users(request):
return JsonResponse({'users': [user.serialize() for user in User.objects.exclude(username = 'admin')], 'status': 200})
def get_all_posts(request):
print(int(request.GET.get('start')))
print(int(request.GET.get('end')))
start = Post.objects.count() - int(request.GET.get('start'))
end = Post.objects.count() - int(request.GET.get('end'))
valid_posts = []
for post in Post.objects.order_by('-dateCreated'):
if post.test(start, end):
valid_posts.append(post)
return JsonResponse({'posts': [post.serialize() for post in valid_posts]})
def get_post(request, post_id):
try:
return JsonResponse(get_object_or_404(Post, id = post_id).serialize())
except Http404 as e:
return JsonResponse({'message': e.__str__(), 'status': 404})
def get_store(request, owner_id):
user = User.objects.get(id = owner_id)
store = user.store.get()
start = store.products.count() - int(request.GET.get('start'))
end = store.products.count() - int(request.GET.get('end'))
valid_posts = []
for product in store.products.order_by('-dateCreated'):
if product.test(start, end):
valid_posts.append(product)
return JsonResponse({'products': [product.serialize() for product in valid_posts], 'status': 200})
@csrf_exempt
def post_operation(request, operation, post_id):
if request.method == "PUT":
try:
post = get_object_or_404(Post, id = post_id)
if operation == 'like':
information_sent = json.loads(request.body)
liker = User.objects.get(id = information_sent['user_id'])
if Like.objects.filter(post = post, liker = liker).count() == 0:
Like.objects.create(post = post, liker = liker)
else:
like = Like.objects.get(post = post, liker = liker)
like.delete()
return JsonResponse({'message': 'Operation has been carried out', 'newLikeCount': Like.objects.filter(post = post).count(), 'status': 200})
elif operation == 'remove':
# do another thing
post.delete()
return JsonResponse({'message': "Post has been deleted", 'status': 200})
elif operation == 'edit':
newText = information_sent['new_text']
post.content = newText
post.save()
else:
return JsonResponse({'message': "Invalid operation", 'status': 403})
except Http404:
return JsonResponse({'message': 'Post with that id not found', 'status': 404})
# if there is a get request
return JsonResponse({'message': "POST or PUT request is required",'status': 403})
@csrf_exempt
def edit_user_profile(request, user_id, operation):
user = User.objects.get(id = user_id)
if request.method == 'POST' and operation == 'edit':
bio = request.POST['bio']
status = request.POST['status']
userProfile = user.profile
userProfile.bio=bio
userProfile.status = status
userProfile.save()
try:
new_profile_image = request.FILES['profile_image']
user.profile_picture = new_profile_image
user.save()
except Exception as e:
print(e)
return JsonResponse({'message': "User profile has been updated", "status": 200})
return JsonResponse({'message': "Post or PUT request required", "status": 400})
```
|
{
"source": "jeremiah-k/algo",
"score": 2
}
|
#### File: algo/library/linode_v4.py
```python
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import traceback
from ansible.module_utils.basic import AnsibleModule, env_fallback, missing_required_lib
from ansible.module_utils.linode import get_user_agent
LINODE_IMP_ERR = None
try:
from linode_api4 import Instance, LinodeClient
HAS_LINODE_DEPENDENCY = True
except ImportError:
LINODE_IMP_ERR = traceback.format_exc()
HAS_LINODE_DEPENDENCY = False
def create_linode(module, client, **kwargs):
"""Creates a Linode instance and handles return format."""
if kwargs['root_pass'] is None:
kwargs.pop('root_pass')
try:
response = client.linode.instance_create(**kwargs)
except Exception as exception:
module.fail_json(msg='Unable to query the Linode API. Saw: %s' % exception)
try:
if isinstance(response, tuple):
instance, root_pass = response
instance_json = instance._raw_json
instance_json.update({'root_pass': root_pass})
return instance_json
else:
return response._raw_json
except TypeError:
module.fail_json(msg='Unable to parse Linode instance creation'
' response. Please raise a bug against this'
' module on https://github.com/ansible/ansible/issues'
)
def maybe_instance_from_label(module, client):
"""Try to retrieve an instance based on a label."""
try:
label = module.params['label']
result = client.linode.instances(Instance.label == label)
return result[0]
except IndexError:
return None
except Exception as exception:
module.fail_json(msg='Unable to query the Linode API. Saw: %s' % exception)
def initialise_module():
"""Initialise the module parameter specification."""
return AnsibleModule(
argument_spec=dict(
label=dict(type='str', required=True),
state=dict(
type='str',
required=True,
choices=['present', 'absent']
),
access_token=dict(
type='str',
required=True,
no_log=True,
fallback=(env_fallback, ['LINODE_ACCESS_TOKEN']),
),
authorized_keys=dict(type='list', required=False),
group=dict(type='str', required=False),
image=dict(type='str', required=False),
region=dict(type='str', required=False),
root_pass=dict(type='str', required=False, no_log=True),
tags=dict(type='list', required=False),
type=dict(type='str', required=False),
stackscript_id=dict(type='int', required=False),
),
supports_check_mode=False,
required_one_of=(
['state', 'label'],
),
required_together=(
['region', 'image', 'type'],
)
)
def build_client(module):
"""Build a LinodeClient."""
return LinodeClient(
module.params['access_token'],
user_agent=get_user_agent('linode_v4_module')
)
def main():
"""Module entrypoint."""
module = initialise_module()
if not HAS_LINODE_DEPENDENCY:
module.fail_json(msg=missing_required_lib('linode-api4'), exception=LINODE_IMP_ERR)
client = build_client(module)
instance = maybe_instance_from_label(module, client)
if module.params['state'] == 'present' and instance is not None:
module.exit_json(changed=False, instance=instance._raw_json)
elif module.params['state'] == 'present' and instance is None:
instance_json = create_linode(
module, client,
authorized_keys=module.params['authorized_keys'],
group=module.params['group'],
image=module.params['image'],
label=module.params['label'],
region=module.params['region'],
root_pass=module.params['root_pass'],
tags=module.params['tags'],
ltype=module.params['type'],
stackscript_id=module.params['stackscript_id'],
)
module.exit_json(changed=True, instance=instance_json)
elif module.params['state'] == 'absent' and instance is not None:
instance.delete()
module.exit_json(changed=True, instance=instance._raw_json)
elif module.params['state'] == 'absent' and instance is None:
module.exit_json(changed=False, instance={})
if __name__ == "__main__":
main()
```
|
{
"source": "jeremiahlewis-vw/snakefmt",
"score": 3
}
|
#### File: snakefmt/snakefmt/types.py
```python
import tokenize
from typing import Iterator, NamedTuple, Tuple
from snakefmt.exceptions import InvalidParameterSyntax
class Token(NamedTuple):
type: int
string: str = ""
start: Tuple[int, int] = (-1, -1)
end: Tuple[int, int] = (-1, -1)
TokenIterator = Iterator[Token]
class Parameter:
"""
Holds the value of a parameter-accepting keyword
"""
def __init__(self, line_nb: str):
self.line_nb = line_nb
self.key = ""
self.value = ""
self.comments = list()
self.len = 0
def __repr__(self):
if self.has_a_key():
return f"{self.key}={self.value}"
else:
return self.value
def has_a_key(self) -> bool:
return len(self.key) > 0
def has_value(self) -> bool:
return len(self.value) > 0
def add_elem(self, token: Token):
if len(self.value) > 0 and token.type == tokenize.NAME:
self.value += " "
self.value += token.string
def to_key_val_mode(self, token: Token):
if not self.has_value():
raise InvalidParameterSyntax(
f"L{token.start[0]}:Operator = used with no preceding key"
)
try:
exec(f"{self.value} = 0")
except SyntaxError:
raise InvalidParameterSyntax(
f"L{token.start[0]}:Invalid key {self.value}"
) from None
self.key = self.value
self.value = ""
```
#### File: snakefmt/tests/__init__.py
```python
from io import StringIO
from snakefmt import DEFAULT_LINE_LENGTH
from snakefmt.formatter import Formatter
from snakefmt.parser.parser import Snakefile
def setup_formatter(snake: str, line_length: int = DEFAULT_LINE_LENGTH):
stream = StringIO(snake)
smk = Snakefile(stream)
return Formatter(smk, line_length=line_length)
```
#### File: snakefmt/tests/test_config.py
```python
from pathlib import Path
from unittest import mock
import black
import click
import pytest
from snakefmt.exceptions import MalformattedToml
from snakefmt.formatter import TAB
from snakefmt.snakefmt import main, read_snakefmt_defaults_from_pyproject_toml
from tests import setup_formatter
class TestConfigAdherence:
def test_config_adherence_for_python_outside_rules(self, cli_runner, tmp_path):
stdin = "include: 'a'\nlist_of_lots_of_things = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]"
line_length = 30
config = tmp_path / "pyproject.toml"
config.write_text(f"[tool.snakefmt]\nline_length = {line_length}\n")
params = ["--config", str(config), "-"]
actual = cli_runner.invoke(main, params, input=stdin)
assert actual.exit_code == 0
expected_output = """include: \"a\"
list_of_lots_of_things = [
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
]
"""
assert actual.output == expected_output
def test_config_adherence_for_code_inside_rules(self, cli_runner, tmp_path):
stdin = (
f"rule a:\n"
f"{TAB}input:\n"
f"{TAB*2}list_of_lots_of_things = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]"
)
line_length = 30
config = tmp_path / "pyproject.toml"
config.write_text(f"[tool.snakefmt]\nline_length = {line_length}\n")
params = ["--config", str(config), "-"]
actual = cli_runner.invoke(main, params, input=stdin)
assert actual.exit_code == 0
expected_output = (
"rule a:\n"
f"{TAB*1}input:\n"
f"{TAB*2}list_of_lots_of_things=[\n"
f"{TAB*3}1,\n{TAB*3}2,\n{TAB*3}3,\n{TAB*3}4,\n{TAB*3}5,\n"
f"{TAB*3}6,\n{TAB*3}7,\n{TAB*3}8,\n{TAB*3}9,\n{TAB*3}10,\n"
f"{TAB*2}],\n"
)
assert actual.output == expected_output
class TestReadSnakefmtDefaultsFromPyprojectToml:
def test_no_value_passed_and_no_pyproject_changes_nothing(self, testdir):
default_map = dict()
ctx = click.Context(click.Command("snakefmt"), default_map=default_map)
param = mock.MagicMock()
value = None
return_val = read_snakefmt_defaults_from_pyproject_toml(ctx, param, value)
assert return_val is None
actual_default_map = ctx.default_map
expected_default_map = dict()
assert actual_default_map == expected_default_map
def test_pyproject_present_but_empty_changes_nothing_returns_pyproject_path(
self, testdir
):
pyproject = Path("pyproject.toml")
pyproject.touch()
default_map = dict()
ctx = click.Context(click.Command("snakefmt"), default_map=default_map)
param = mock.MagicMock()
value = None
actual_config_path = read_snakefmt_defaults_from_pyproject_toml(
ctx, param, value
)
expected_config_path = str(pyproject)
assert actual_config_path == expected_config_path
assert ctx.default_map == dict()
def test_no_value_passed_and_pyproject_present_changes_default_line_length(
self, testdir
):
pyproject = Path("pyproject.toml")
pyproject.write_text("[tool.snakefmt]\nline_length = 4")
default_map = dict(line_length=88)
ctx = click.Context(click.Command("snakefmt"), default_map=default_map)
param = mock.MagicMock()
value = None
actual_config_path = read_snakefmt_defaults_from_pyproject_toml(
ctx, param, value
)
expected_config_path = str(pyproject)
assert actual_config_path == expected_config_path
actual_default_map = ctx.default_map
expected_default_map = dict(line_length=4)
assert actual_default_map == expected_default_map
def test_no_value_passed_and_pyproject_present_unknown_param_adds_to_default_map(
self, testdir
):
pyproject = Path("pyproject.toml")
pyproject.write_text("[tool.snakefmt]\nfoo = true")
default_map = dict()
ctx = click.Context(click.Command("snakefmt"), default_map=default_map)
param = mock.MagicMock()
value = None
actual_config_path = read_snakefmt_defaults_from_pyproject_toml(
ctx, param, value
)
expected_config_path = str(pyproject)
assert actual_config_path == expected_config_path
actual_default_map = ctx.default_map
expected_default_map = dict(foo=True)
assert actual_default_map == expected_default_map
def test_value_passed_reads_from_path(self, testdir):
pyproject = Path("snakefmt.toml")
pyproject.write_text("[tool.snakefmt]\nfoo = true")
default_map = dict()
ctx = click.Context(click.Command("snakefmt"), default_map=default_map)
param = mock.MagicMock()
actual_config_path = read_snakefmt_defaults_from_pyproject_toml(
ctx, param, value=str(pyproject)
)
expected_config_path = str(pyproject)
assert actual_config_path == expected_config_path
actual_default_map = ctx.default_map
expected_default_map = dict(foo=True)
assert actual_default_map == expected_default_map
def test_value_passed_but_default_map_is_None_still_updates_defaults(self, testdir):
pyproject = Path("snakefmt.toml")
pyproject.write_text("[tool.snakefmt]\nfoo = true")
default_map = None
ctx = click.Context(click.Command("snakefmt"), default_map=default_map)
param = mock.MagicMock()
value = str(pyproject)
actual_config_path = read_snakefmt_defaults_from_pyproject_toml(
ctx, param, value
)
expected_config_path = str(pyproject)
assert actual_config_path == expected_config_path
actual_default_map = ctx.default_map
expected_default_map = dict(foo=True)
assert actual_default_map == expected_default_map
def test_value_passed_in_overrides_pyproject(self, testdir):
snakefmt_config = Path("snakefmt.toml")
snakefmt_config.write_text("[tool.snakefmt]\nfoo = true")
pyproject = Path("pyproject.toml")
pyproject.write_text("[tool.snakefmt]\n\nfoo = false\nline_length = 90")
default_map = dict()
ctx = click.Context(click.Command("snakefmt"), default_map=default_map)
param = mock.MagicMock()
value = str(snakefmt_config)
actual_config_path = read_snakefmt_defaults_from_pyproject_toml(
ctx, param, value
)
expected_config_path = str(snakefmt_config)
assert actual_config_path == expected_config_path
actual_default_map = ctx.default_map
expected_default_map = dict(foo=True)
assert actual_default_map == expected_default_map
def test_malformatted_toml_raises_error(self, testdir):
pyproject = Path("pyproject.toml")
pyproject.write_text("foo:bar,baz\n{dict}&&&&")
default_map = dict()
ctx = click.Context(click.Command("snakefmt"), default_map=default_map)
param = mock.MagicMock()
value = None
with pytest.raises(click.FileError):
read_snakefmt_defaults_from_pyproject_toml(ctx, param, value)
class TestReadBlackConfig:
def test_config_doesnt_exist_raises_error(self, tmp_path):
formatter = setup_formatter("")
path = tmp_path / "config.toml"
with pytest.raises(FileNotFoundError):
formatter.read_black_config(path)
def test_config_exists_but_no_black_settings(self, tmp_path):
formatter = setup_formatter("")
path = tmp_path / "config.toml"
path.write_text("[tool.snakefmt]\nline_length = 99")
actual = formatter.read_black_config(path)
expected = black.FileMode(line_length=formatter.line_length)
assert actual == expected
def test_config_exists_with_black_settings(self, tmp_path):
formatter = setup_formatter("")
path = tmp_path / "config.toml"
black_line_length = 9
path.write_text(f"[tool.black]\nline_length = {black_line_length}")
actual = formatter.read_black_config(path)
expected = black.FileMode(line_length=black_line_length)
assert actual == expected
def test_config_exists_with_no_line_length_uses_snakefmt_line_length(
self, tmp_path
):
line_length = 9
formatter = setup_formatter("", line_length=line_length)
path = tmp_path / "config.toml"
path.write_text("[tool.black]\nstring_normalization = false")
actual = formatter.read_black_config(path)
expected = black.FileMode(line_length=line_length, string_normalization=False)
assert actual == expected
def test_config_exists_with_invalid_black_options_ignores_it(self, tmp_path):
formatter = setup_formatter("")
path = tmp_path / "config.toml"
path.write_text("[tool.black]\nfoo = false")
actual = formatter.read_black_config(path)
expected = black.FileMode()
assert actual == expected
def test_malformatted_toml_raises_error(self, tmp_path):
formatter = setup_formatter("")
path = tmp_path / "config.toml"
path.write_text("[tool.black]\n{key}: I am not json:\n or yaml = false")
with pytest.raises(MalformattedToml) as error:
formatter.read_black_config(path)
assert error.match("invalid character")
def test_skip_string_normalisation_handled_with_snakecase(self, tmp_path):
line_length = 88
formatter = setup_formatter("", line_length=line_length)
path = tmp_path / "config.toml"
path.write_text("[tool.black]\nskip_string_normalization = false")
actual = formatter.read_black_config(path)
expected = black.FileMode(line_length=line_length, string_normalization=True)
assert actual == expected
def test_skip_string_normalisation_handled_with_kebabcase(self, tmp_path):
line_length = 88
formatter = setup_formatter("", line_length=line_length)
path = tmp_path / "config.toml"
path.write_text("[tool.black]\nskip-string-normalization = 0")
actual = formatter.read_black_config(path)
expected = black.FileMode(line_length=line_length, string_normalization=True)
assert actual == expected
def test_string_normalisation_handled(self, tmp_path):
line_length = 88
formatter = setup_formatter("", line_length=line_length)
path = tmp_path / "config.toml"
path.write_text("[tool.black]\nstring-normalization = false")
actual = formatter.read_black_config(path)
expected = black.FileMode(line_length=line_length, string_normalization=False)
assert actual == expected
```
#### File: snakefmt/tests/test_snakefmt.py
```python
import re
import tempfile
from collections import Counter
from pathlib import Path
from typing import Optional
from unittest import mock
import pytest
from black import get_gitignore
from snakefmt.diff import ExitCode
from snakefmt.formatter import TAB
from snakefmt.snakefmt import construct_regex, get_snakefiles_in_dir, main
class TestCLIBasic:
def test_noArgsPassed_printsNothingToDo(self, cli_runner):
params = []
actual = cli_runner.invoke(main, params)
assert actual.exit_code == 0
assert "Nothing to do" in actual.output
def test_nonExistentParam_nonZeroExit(self, cli_runner):
params = ["--fake"]
actual = cli_runner.invoke(main, params)
assert actual.exit_code != 0
assert "no such option" in actual.output
def test_invalidPath_nonZeroExit(self, cli_runner):
params = ["fake.txt"]
actual = cli_runner.invoke(main, params)
assert actual.exit_code != 0
expected_pattern = re.compile(
r"Path [\'\"]{}[\'\"] does not exist".format(params[0])
)
assert expected_pattern.search(actual.output)
def test_dashMixedWithFiles_nonZeroExit(self, cli_runner):
params = ["-", str(Path().resolve())]
actual = cli_runner.invoke(main, params)
assert actual.exit_code != 0
assert "Cannot mix stdin (-) with other files" in actual.output
def test_stdinAsSrc_WritesToStdout(self, cli_runner):
stdin = f"rule all:\n{TAB}input: 'c'"
params = ["--verbose", "-"]
actual = cli_runner.invoke(main, params, input=stdin)
print(actual.exception)
assert actual.exit_code == 0
expected_output = f'rule all:\n{TAB}input:\n{TAB*2}"c",\n'
assert actual.output == expected_output
def test_src_dir_arg_files_modified_inplace(self, cli_runner):
with tempfile.TemporaryDirectory() as tmpdir:
content = 'include: "a"'
abs_tmpdir = Path(tmpdir).resolve()
snakedir = abs_tmpdir / "workflows"
snakedir.mkdir()
snakefile = snakedir / "Snakefile"
snakefile.write_text(content)
params = [str(tmpdir)]
cli_runner.invoke(main, params)
expected_contents = content + "\n"
actual_contents = snakefile.read_text()
assert actual_contents == expected_contents
def test_file_arg_write_back_happens(self, cli_runner, tmp_path):
content = 'include: "a"'
file = tmp_path / "Snakefile"
file.write_text(content)
params = [str(file)]
original_stat = file.stat()
cli_runner.invoke(main, params)
actual_stat = file.stat()
assert actual_stat != original_stat
actual_content = file.read_text()
expected_content = content + "\n"
assert actual_content == expected_content
def test_file_arg_file_requires_no_changes_no_write_back_happens(
self, cli_runner, tmp_path
):
content = 'include: "a"\n'
file = tmp_path / "Snakefile"
file.write_text(content)
params = [str(file)]
expected_stat = file.stat()
cli_runner.invoke(main, params)
actual_stat = file.stat()
assert actual_stat == expected_stat
class TestCLICheck:
def test_check_file_needs_no_changes_correct_exit_code(self, cli_runner):
stdin = 'include: "a"\n'
params = ["--check", "-"]
actual = cli_runner.invoke(main, params, input=stdin)
assert ExitCode(actual.exit_code) is ExitCode.NO_CHANGE
def test_check_file_needs_changes_correct_exit_code(self, cli_runner):
stdin = 'include: "a"\n'
params = ["--check", "-"]
actual = cli_runner.invoke(main, params, input=stdin)
assert ExitCode(actual.exit_code) is ExitCode.WOULD_CHANGE
def test_check_file_syntax_correct_exit_code(self, cli_runner):
stdin = "foo: \n"
params = ["--check", "-"]
actual = cli_runner.invoke(main, params, input=stdin)
assert ExitCode(actual.exit_code) is ExitCode.ERROR
def test_check_does_not_format_file(self, cli_runner, tmp_path):
content = "include: 'a'\nlist_of_lots_of_things = [1, 2, 3, 4, 5, 6, 7, 8]"
snakefile = tmp_path / "Snakefile"
snakefile.write_text(content)
params = ["--check", str(snakefile)]
result = cli_runner.invoke(main, params)
assert ExitCode(result.exit_code) is ExitCode.WOULD_CHANGE
expected_contents = content
actual_contents = snakefile.read_text()
assert actual_contents == expected_contents
def test_check_two_files_both_unchanged(self, cli_runner, tmp_path):
content = 'include: "a"\n'
file1 = tmp_path / "Snakefile"
file1.write_text(content)
file2 = tmp_path / "Snakefile2"
file2.write_text(content)
params = ["--check", str(file1), str(file2)]
result = cli_runner.invoke(main, params)
assert ExitCode(result.exit_code) is ExitCode.NO_CHANGE
def test_check_two_files_one_will_change(self, cli_runner, tmp_path):
content = 'include: "a"\n'
file1 = tmp_path / "Snakefile"
file1.write_text(content)
file2 = tmp_path / "Snakefile2"
content += "x='foo'"
file2.write_text(content)
params = ["--check", str(file1), str(file2)]
result = cli_runner.invoke(main, params)
assert ExitCode(result.exit_code) is ExitCode.WOULD_CHANGE
def test_check_two_files_one_has_errors(self, cli_runner, tmp_path):
content = 'include: "a"\n'
file1 = tmp_path / "Snakefile"
file1.write_text(content)
file2 = tmp_path / "Snakefile2"
content += "if:"
file2.write_text(content)
params = ["--check", str(file1), str(file2)]
result = cli_runner.invoke(main, params)
assert ExitCode(result.exit_code) is ExitCode.ERROR
def test_check_and_diff_runs_both_with_check_exit_code(self, cli_runner):
stdin = "x='foo'\n"
params = ["--check", "--diff", "-"]
result = cli_runner.invoke(main, params, input=stdin)
assert ExitCode(result.exit_code) is ExitCode.WOULD_CHANGE
expected_output = "=====> Diff for stdin <=====\n\n- x='foo'\n+ x = \"foo\"\n\n"
assert result.output == expected_output
def test_check_and_diff_doesnt_output_diff_if_error(self, cli_runner):
stdin = "rule:rule:\n"
params = ["--check", "--diff", "-"]
result = cli_runner.invoke(main, params, input=stdin)
assert ExitCode(result.exit_code) is ExitCode.ERROR
assert result.output == ""
class TestCLIDiff:
def test_diff_works_as_expected(self, cli_runner):
stdin = "include: 'a'\n"
params = ["--diff", "-"]
result = cli_runner.invoke(main, params, input=stdin)
expected_exit_code = 0
assert result.exit_code == expected_exit_code
expected_output = (
"=====> Diff for stdin <=====\n"
"\n"
"- include: 'a'\n"
"? ^ ^\n"
'+ include: "a"\n'
"? ^ ^\n\n"
)
assert result.output == expected_output
def test_compact_diff_works_as_expected(self, cli_runner):
stdin = "include: 'a'\n"
params = ["--compact-diff", "-"]
result = cli_runner.invoke(main, params, input=stdin)
expected_exit_code = 0
assert result.exit_code == expected_exit_code
expected_output = (
"=====> Diff for stdin <=====\n"
"\n"
"--- original\n"
"+++ new\n"
"@@ -1 +1 @@\n"
"-include: 'a'\n"
'+include: "a"\n\n'
)
assert result.output == expected_output
def test_compact_diff_and_diff_given_runs_compact_diff(self, cli_runner):
stdin = "include: 'a'\n"
params = ["--compact-diff", "--diff", "-"]
result = cli_runner.invoke(main, params, input=stdin)
expected_exit_code = 0
assert result.exit_code == expected_exit_code
expected_output = (
"=====> Diff for stdin <=====\n"
"\n"
"--- original\n"
"+++ new\n"
"@@ -1 +1 @@\n"
"-include: 'a'\n"
'+include: "a"\n\n'
)
assert result.output == expected_output
def test_diff_does_not_format_file(self, cli_runner, tmp_path):
content = "include: 'a'\nlist_of_lots_of_things = [1, 2, 3, 4, 5, 6, 7, 8]"
snakefile = tmp_path / "Snakefile"
snakefile.write_text(content)
params = ["--diff", str(snakefile)]
result = cli_runner.invoke(main, params)
expected_exit_code = 0
assert result.exit_code == expected_exit_code
expected_contents = content
actual_contents = snakefile.read_text()
assert actual_contents == expected_contents
def test_diff_doesnt_output_diff_if_error(self, cli_runner):
stdin = "rule:rule:\n"
params = ["--diff", "-"]
result = cli_runner.invoke(main, params, input=stdin)
assert type(result.exception) == SyntaxError
assert result.exit_code != 0
assert result.output == ""
class TestConstructRegex:
def test_noNewline_returnsCompiledRegex(self):
regex = r"\.smk$"
actual = construct_regex(regex)
expected = re.compile(regex)
assert actual == expected
def test_containsNewline_returnsCompiledRegexWithMultilineSetting(self):
regex = r"""
(
/(
\.eggs # exclude a few common directories in the
| \.git # root of the project
| \.snakemake
)/
)
"""
actual = construct_regex(regex)
expected = re.compile(regex, re.MULTILINE | re.VERBOSE)
assert actual == expected
def test_invalid_regex_raises_error(self):
regex = r"?"
with pytest.raises(re.error):
construct_regex(regex)
class TestCLIInvalidRegex:
def test_invalidIncludeRegex_nonZeroExit(self, cli_runner):
params = ["--include", "?", str(Path().resolve())]
actual = cli_runner.invoke(main, params)
assert actual.exit_code != 0
assert "Invalid regular expression" in str(actual.exception)
def test_invalidExcludeRegex_nonZeroExit(self, cli_runner):
params = ["--exclude", "?", str(Path().resolve())]
actual = cli_runner.invoke(main, params)
assert actual.exit_code != 0
assert "Invalid regular expression" in str(actual.exception)
class TestCLIValidRegex:
filesystem = [
"Snakefile",
"Snakefile-dev",
"scripts/run.py",
"rules/map.smk",
"rules/test/test.smk",
"data/file.txt",
"config.yml",
"a/b/c/d/e/Snakefil",
"a/b/c/d/foo.bar",
]
def create_temp_filesystem_in(self, tmpdir: Path):
for p in self.filesystem:
path = tmpdir / p
parent = path.parent
parent.mkdir(exist_ok=True, parents=True)
path.touch()
def find_files_to_format(
self, include, exclude, gitignore, gitignored: Optional[str] = None
):
with tempfile.TemporaryDirectory() as tmpdir:
abs_tmpdir = Path(tmpdir).resolve()
self.create_temp_filesystem_in(abs_tmpdir)
if gitignored is not None:
with (abs_tmpdir / ".gitignore").open("w") as fout:
fout.write(gitignored)
gitignore = get_gitignore(abs_tmpdir)
snakefiles = get_snakefiles_in_dir(
path=abs_tmpdir, include=include, exclude=exclude, gitignore=gitignore,
)
snakefiles = list(map(lambda p: str(p.relative_to(abs_tmpdir)), snakefiles))
return Counter(snakefiles)
@mock.patch("pathspec.PathSpec")
def test_excludeAllFiles_returnsEmpty(self, mock_gitignore: mock.MagicMock):
mock_gitignore.match_file.return_value = False
include = re.compile(r"\.meow$")
exclude = re.compile(r".*")
actual = self.find_files_to_format(include, exclude, mock_gitignore)
expected = Counter()
assert actual == expected
@mock.patch("pathspec.PathSpec")
def test_includeAllFiles_returnAll(self, mock_gitignore: mock.MagicMock):
mock_gitignore.match_file.return_value = False
include = re.compile(r".*")
exclude = re.compile(r"")
actual = self.find_files_to_format(include, exclude, mock_gitignore)
expected = Counter(self.filesystem)
assert actual == expected
@mock.patch("pathspec.PathSpec")
def test_includeOnlySnakefiles_returnsOnlySnakefiles(
self, mock_gitignore: mock.MagicMock
):
mock_gitignore.match_file.return_value = False
include = re.compile(r"(\.smk$|^Snakefile)")
exclude = re.compile(r"")
actual = self.find_files_to_format(include, exclude, mock_gitignore)
expected = Counter(
["Snakefile", "Snakefile-dev", "rules/map.smk", "rules/test/test.smk"]
)
assert actual == expected
def test_gitignore_paths_excluded(self,):
include = re.compile(r"(\.smk$|^Snakefile)")
exclude = re.compile(r"")
ignored_gitignore = get_gitignore(Path())
actual_gitignored = "Snakefile*"
actual = self.find_files_to_format(
include, exclude, ignored_gitignore, gitignored=actual_gitignored
)
expected = Counter(["rules/map.smk", "rules/test/test.smk"])
assert actual == expected
class TestCliConfig:
def test_black_skip_string_norm_is_obeyed(self, tmp_path, cli_runner):
path = tmp_path / "config.toml"
path.write_text("[tool.black]\nskip-string-normalization = 1")
stdin = "x = 'foo'\n\n\nconfigfile: 'a'\n"
params = ["--config", str(path), "--check", "-"]
result = cli_runner.invoke(main, params, input=stdin)
expected_exit_code = 0
assert result.exit_code == expected_exit_code
def test_black_string_norm_is_obeyed(self, tmp_path, cli_runner):
path = tmp_path / "config.toml"
path.write_text("[tool.black]\nskip-string-normalization = false")
stdin = "x = 'foo'\n\n\nconfigfile: 'a'\n"
params = ["--config", str(path), "--check", "-"]
result = cli_runner.invoke(main, params, input=stdin)
assert result.exit_code != 0
```
|
{
"source": "jeremiahmarks/dangerzone",
"score": 4
}
|
#### File: python/anagrams/grams.py
```python
import string
import random
def getWordList():
words=set()
wordFile=open('mypy/words.txt','r')
for word in wordFile:
words.add(word[:-1])
wordFile.close()
return words
def checkLetters(mainstring, teststring):
#print "checkLetters is checking "+mainstring + " against "+teststring
isThere=True
for letter in teststring:
if (teststring.count(letter)>mainstring.count(letter)):return False
return isThere
def getwords(aString):
stringasset=set(aString)
allWords=getWordList()
potentialWords=[]
phrases=[]
for word in allWords:
if (set(word).issubset(stringasset)&checkLetters(aString,word)):
potentialWords.append(word)
"""for words in potentialWords:
tempstring=aString
for letter in words:
tempstring=tempstring.replace(letter,'',1)
for theword in potentialWords:
if (set(theword).issubset(set(tempstring))&checkLetters(tempstring,theword)):
phrases.append(words+" "+theword)"""
return potentialWords
def removeletters(oldstring, wordtoremove):
for letter in wordtoremove:
oldstring=oldstring.replace(letter,'',1)
return oldstring
def getoneword(astring):
return getwords(astring)[0]
def getlongestword(astring):
words=getwords(astring)
leadingword=''
for word in words:
if (len(word)>len(leadingword)):
leadingword=word
return leadingword
def getrandomword(astring):
return random.choice(getwords(astring))
def getrandomwordlong(astring):
words=getwords(astring)
wordlength=len(getlongestword(astring))
wordtoreturn=random.choice(words)
if ((len(wordtoreturn)>1) or wordlength==1):
return wordtoreturn
else:
return getrandomwordlong(astring)
def phrasemaker(astring):
words=getwords(astring)
phrases=[]
for word in words:
phrases.append([word, removeletters(astring,word)])
newlist=[]
for phrase in phrases:
while (len(phrase[1])>0):
wordtoadd=getrandomwordlong(phrase[1])
phrase[0]=phrase[0]+' '+wordtoadd
phrase[1]= removeletters(phrase[1],wordtoadd)
newlist.append(phrase)
print phrase
return newlist
```
#### File: python/anagrams/moregrams.py
```python
from grams import getwords, checkLetters
def grams(originalstring, listofwords, starterphrase=''):
listofphrases = []
for word in listofwords:
thisphrase=starterphrase+word+' '
templist=[]
tempstring=originalstring
for letter in word:
tempstring=tempstring.replace(letter, '',1)
for eachword in listofwords:
if checkLetters(tempstring, eachword):
templist.append(eachword)
if len(templist)==0:
listofphrases.append(thisphrase)
continue
else:
listofphrases.extend(grams(tempstring, templist, thisphrase))
return listofphrases
object deruzzle():
def __init__(ruzzlestring):
""" The ruzzlestring is the 16 letters that are on a ruzzleboard
with color modifiers in the style of a_red_. and example ruzzleboard
board would be entered as such: pn_yellow_reei_blue_ama_red_e_green_i_yellow_seslt
p n_y_ r e
e i_b_ a m
a_r_ e_g_ i_y_ s
e s l t
"""
while not(len(ruzzlestring)==0):
```
#### File: python/calculus/ch2.py
```python
from mypy.physics import constants
def averageVelocity(positionEquation, startTime, endTime):
"""
The position equation is in the form of a one variable lambda and the
averagevelocity=(changeinposition)/(timeelapsed)
"""
startTime=float(startTime)
endTime=float(endTime)
vAvg=(positionEquation(startTime)-positionEquation(endTime))/(startTime-endTime)
return vAvg
```
#### File: python/challengesCollectionsEtc/fibonacci.py
```python
def isPerfSquare(x):
return(int(x**(0.5))**2==x)
def isFib(x):
return (isPerfSquare(5*x*x+4) or isPerfSquare(5*x*x-4))
```
#### File: python/clock/clockrings.py
```python
from mypy.clock import romans
import datetime
from fvh import MyTurtle
from time import sleep
"""
Sample input:
from mypy.clock import clockrings
a=clockrings.clockface()
a.maketurtles([40,30,20])
a.setuptheclock([150,300,400])
a.run()
To Do:
add arabic number support
resolve spacing issues with ones and fiftys
rewrote all roman numerals so that all units will now start in the
top left hand corner, rather than the top right hand corner.
resolve height issue with fives
see resolution for spacing issue of ones and fiftys
set 0 to twentyfour for hours and figure out something to do for minutes and seconds
add support for partial unit completion and smooth out transition(perhaps sleep for 1/10 of a second rather than one second)
build it into a square shape rather than a circle
make numbers that actually signify the time stand out.
"""
def convtoroman(data):
"""
This module is designed to accept a numeral and then convert it to its
value in roman numerals. It should accept values between 1 and 3999
"""
I=(1,'I')
V=(5,'V')
X=(10,'X')
L=(50,'L')
C=(100,'C')
D=(500,'D')
M=(1000,'M')
allvals=[I,V,X,L,C,D,M]
if data==0:
data=1
romanstring=''
thousands=data/1000
hundreds=(data/100)%10
tens=(data/10)%10
ones=data%10
for m in range(thousands):
romanstring=romanstring+M[1]
if hundreds==4:
romanstring=romanstring+"CD"
elif hundreds==9:
romanstring=romanstring+"CM"
else:
for d in range(hundreds/5):
romanstring=romanstring+D[1]
for c in range(hundreds%5):
romanstring=romanstring+C[1]
if tens==4:
romanstring=romanstring+"XL"
elif tens==9:
romanstring=romanstring+"XC"
else:
for l in range(tens/5):
romanstring=romanstring+L[1]
for x in range(tens%5):
romanstring=romanstring+X[1]
if ones==4:
romanstring=romanstring+"IV"
elif ones==9:
romanstring=romanstring+"IX"
else:
for v in range(ones/5):
romanstring=romanstring+V[1]
for i in range(ones%5):
romanstring=romanstring+I[1]
return romanstring
class timebox(object):
def __init__(self, numbertodisplay, numbersize):
self.number=numbertodisplay
self.size=numbersize
class clockface(object):
def __init__(self):
self.starttime=datetime.datetime.now()
self.hoursring=clockring('hour', self.starttime)
self.minutesring=clockring('minute',self.starttime)
self.secondsring=clockring('second',self.starttime)
self.rings=[self.hoursring, self.minutesring, self.secondsring]
def maketurtles(self, listofnumbers):
ringsizes=zip(self.rings, listofnumbers)
for eachring in ringsizes:
eachring[0].createnumbers(eachring[1])
def setuptheclock(self, listofdiameters):
clockdias=zip(self.rings, listofdiameters)
for eachring in clockdias:
eachring[0].createClock(eachring[1])
def run(self):
while True:
newtime=datetime.datetime.now()
for eachring in self.rings:
eachring.update(newtime)
#print newtime
sleep(0.1)
class clockring(object):
def __init__(self, unit, starttime):
self.unit=unit
self.starttime=starttime
if self.unit=='hour':
self.numberofunits=24
self.current=self.starttime.hour
self.percentpassed=self.starttime.minute/60.0
elif self.unit=='minute':
self.numberofunits=60
self.current=self.starttime.minute
self.percentpassed=self.starttime.second/60.0
elif self.unit=='second':
self.numberofunits=60
self.current=self.starttime.second
self.percentpassed=self.starttime.microsecond/1000000.0
def createnumbers(self, size):
self.size=size
self.numbers=[]
for eachnumber in range(self.numberofunits):
#numbers.append(timebox(eachnumber, self.size))
romannumbers=convtoroman(eachnumber)
lm=MyTurtle()
lm.tracer(False)
pos=0
lm.begin_poly()
for letter in romannumbers:
if letter=='I':
romans.one(startpos=(pos,0), lm=lm, cube=self.size)
pos=pos+(self.size/2.0)
elif letter=='V':
romans.five(startpos=(pos,0), lm=lm, cube=self.size)
pos=pos+self.size
elif letter=='X':
romans.ten(startpos=(pos,0), lm=lm, cube=self.size)
pos=pos+self.size
elif letter=='L':
romans.fifty(startpos=(pos,0), lm=lm, cube=self.size)
pos=pos+self.size/2.0
lm.end_poly()
lm.tracer(False)
lms=lm.getscreen()
lms.addshape(self.unit+str(eachnumber), lm.get_poly())
lm.shape(self.unit+str(eachnumber))
lm.st()
lm.clear()
lm.pu()
lm.seth(0)
# lm.goto(self.size*len(romannumbers),25*len(self.unit))
lm.getscreen().update()
lm.settiltangle(90)
self.numbers.append(lm)
def createClock(self, diameter):
self.diameter=diameter
self.offset=360.0/self.numberofunits
x=0
for eachnumber in range(self.current, self.current+self.numberofunits):
self.numbers[eachnumber%self.numberofunits].goto(0,0)
self.numbers[eachnumber%self.numberofunits].seth(x*self.offset)
self.numbers[eachnumber%self.numberofunits].fd(diameter)
x+=1
self.numbers[0].getscreen().update()
def update(self,time):
if self.unit=='hour':
self.current=time.hour
self.percentpassed=self.starttime.minute/60.0
elif self.unit=='minute':
self.current=time.minute
self.percentpassed=self.starttime.second/60.0
elif self.unit=='second':
self.current=time.second
self.percentpassed=self.starttime.microsecond/1000000.0
#print self.percentpassed
x=0
for eachnumber in range(self.current, self.current+self.numberofunits):
self.numbers[eachnumber%self.numberofunits].goto(0,0)
self.numbers[eachnumber%self.numberofunits].seth((x*self.offset)+(self.offset*self.percentpassed))
print self.unit + str((self.offset*self.percentpassed))
self.numbers[eachnumber%self.numberofunits].fd(self.diameter)
if x==0:
self.numbers[eachnumber%self.numberofunits].color('red')
else:
self.numbers[eachnumber%self.numberofunits].color('black')
x+=1
self.numbers[0].getscreen().update()
```
#### File: python/clock/cubeclock.py
```python
from mypy.clock import arabic
import datetime
from fvh import MyTurtle
from time import sleep
class timebox(object):
"""
The time box is used to house a numerical representation of an element of the time
It will also provide an easy to reference indication of how much space the
digits will need in order to be displayed properly, and, should the need
arise, provide a way to keep two seperate turtle objects evenly spaced
"""
def __init__(self, numbertodisplay, numbersize):
self.number=numbertodisplay
self.size=float(numbersize)
lm=MyTurtle()
lm.tracer(False)
pos=0.0
#on size: for a '1', the height=numbersize, width=numbersize/8
# for everything else: h=numbersize, w=numbersize/2
lm.begin_poly()
for digit in self.number:
if digit=='0':
arabic.zero(startpos=(pos,0), lm=lm, height=self.size)
pos=pos+(self.size/2.0)
elif digit=='1':
arabic.one(startpos=(pos,0), lm=lm, height=self.size)
pos=pos+(self.size/8.0)
elif digit=='2':
arabic.two(startpos=(pos,0), lm=lm, height=self.size)
pos=pos+(self.size/2.0)
elif digit=='3':
arabic.three(startpos=(pos,0), lm=lm, height=self.size)
pos=pos+(self.size/2.0)
elif digit=='4':
arabic.four(startpos=(pos,0), lm=lm, height=self.size)
pos=pos+(self.size/2.0)
elif digit=='5':
arabic.five(startpos=(pos,0), lm=lm, height=self.size)
pos=pos+(self.size/2.0)
elif digit=='6':
arabic.six(startpos=(pos,0), lm=lm, height=self.size)
pos=pos+(self.size/2.0)
elif digit=='7':
arabic.seven(startpos=(pos,0), lm=lm, height=self.size)
pos=pos+(self.size/2.0)
elif digit=='8':
arabic.eight(startpos=(pos,0), lm=lm, height=self.size)
pos=pos+(self.size/2.0)
elif digit=='9':
arabic.nine(startpos=(pos,0), lm=lm, height=self.size)
pos=pos+(self.size/2.0)
pos=pos+(self.size/5.0)
lm.end_poly()
lms=lm.getscreen()
lms.addshape(str(self.size)+self.number, lm.get_poly())
lm.shape(str(self.size)+self.number)
lm.st()
self.lm=lm
self.width=pos
#return lm
class clockface(object):
def __init__(self):
self.starttime=datetime.datetime.now()
self.hourcube=clockcube('hour', self.starttime)
self.minutecube=clockcube('minute', self.starttime)
self.secondcube=clockcube('second', self.starttime)
self.cubes=[self.hourcube, self.minutecube, self.secondcube]
def createNumbers(self,listOfSizes):
"""
This method accepts a list of sizes and then creates the cubes for each
unit at the desired size
"""
numbersizes=zip(self.cubes, listOfSizes)
for cube in numbersizes:
cube[0].createnumbercubes(cube[1])
class clockcube(object):
def __init__(self, unit, starttime):
self.unit=unit
self.starttime=starttime
if self.unit=='hour':
self.numberofunits=24
self.current=self.starttime.hour
self.percentpassed=self.starttime.minute/60.0
elif self.unit=='minute':
self.numberofunits=60
self.current=self.starttime.minute
self.percentpassed=self.starttime.second/60.0
elif self.unit=='second':
self.numberofunits=60
self.current=self.starttime.second
self.percentpassed=self.starttime.microsecond/1000000.0
def createnumbercubes(self,size):
self.size=size
self.numbers=[]
self.widest=0
for eachnumber in range(self.numberofunits):
abox=timebox('%02d' % eachnumber, self.size)
if abox.width>self.widest: self.widest=abox.width
self.numbers.append(abox)
def arrangetocube(self):
self.numbersinhor=(len(self.numbers)-1)/3
self.numbersinvert=(len(self.numbers)-1)-(2*self.numbersinhor)
self.boxwidth=self.numbersinhor*self.widest
self.boxheight=self.numbersinvert*self.size
self.innerbox=[(self.boxwidth/2.0, self.boxheight/2.0), (-self.boxwidth/2.0, self.boxheight/2.0),(-self.boxwidth/2.0, -self.boxheight/2.0),(self.boxwidth/2.0, -self.boxheight/2.0)]
nexposition=[self.innerbox[0][0]-self.widest, self.innerbox[0][1]]
for value in range(self.current+1, self.current+self.numberofunits):
if (nexposition[0]>=(-self.boxwidth/2.0) and nexposition[1]==self.innerbox[0][1]):
self.numbers[value%self.numberofunits].lm.goto(nexposition[0],nexposition[1])
if ((nexposition[0]-self.widest)>-self.boxwidth/2.0):
nexposition[0]=nexposition[0]-self.widest
else:
nexposition[0]=nexposition[0]-self.widest
nexposition[1]=nexposition[1]-self.size
elif nexposition[1]>-self.boxheight/2.0:
self.numbers[value%self.numberofunits].lm.goto(nexposition[0],nexposition[1])
if ((nexposition[1]-self.size)>(-self.boxheight/2.0)):
nexposition[1]=nexposition[1]-self.size
else:
nexposition[1]=nexposition[1]-self.size
nexposition[0]=nexposition[0]+self.widest
else:
self.numbers[value%self.numberofunits].lm.goto(nexposition[0],nexposition[1])
nexposition[0]=nexposition[0]+self.widest
```
#### File: python/clock/roman.py
```python
from fvh import MyTurtle
import math
def one(starth=270, startpos=(0,0), lm=None, cube=[60,60]):
if not lm:
lm=MyTurtle()
lm.tracer(False)
lm.pu()
lm.goto(startpos)
lm.seth(starth)
lm.pd()
lm.ht()
lm.fd(5)
lm.right(90)
lm.fd(10)
lm.left(90)
lm.fd(40)
lm.left(90)
lm.fd(10)
lm.right(90)
lm.fd(5)
lm.right(90)
lm.fd(30)
lm.right(90)
lm.fd(5)
lm.right(90)
lm.fd(10)
lm.left(90)
lm.fd(40)
lm.left(90)
lm.fd(10)
lm.right(90)
lm.fd(5)
lm.right(90)
lm.fd(30)
lm.tracer(True)
def two(starth=270, startpos=(0,0), lm=None):
if not lm:
lm=MyTurtle()
lm.tracer(False)
lm.pu()
lm.goto(startpos)
lm.seth(starth)
lm.pd()
lm.ht()
lm.fd(5)
lm.right(90)
lm.fd(10)
lm.left(90)
lm.fd(40)
lm.left(90)
lm.fd(10)
lm.right(90)
lm.fd(5)
lm.right(90)
lm.fd(60)
lm.right(90)
lm.fd(5)
lm.right(90)
lm.fd(10)
lm.left(90)
lm.fd(40)
lm.left(90)
lm.fd(10)
lm.right(90)
lm.fd(5)
lm.right(90)
lm.fd(60)
lm.pu()
lm.rt(180)
lm.fd(20)
lm.left(90)
lm.fd(5)
lm.pd()
lm.fd(40)
lm.right(90)
lm.fd(20)
lm.right(90)
lm.fd(40)
lm.right(90)
lm.fd(20)
lm.tracer(True)
def three(starth=270, startpos=(0,0), lm=None):
if not lm:
lm=MyTurtle()
lm.tracer(False)
lm.pu()
lm.goto(startpos)
lm.seth(starth)
lm.pd()
lm.ht()
lm.fd(5)
lm.right(90)
lm.fd(10)
lm.left(90)
lm.fd(40)
lm.left(90)
lm.fd(10)
lm.right(90)
lm.fd(5)
lm.right(90)
lm.fd(90)
lm.right(90)
lm.fd(5)
lm.right(90)
lm.fd(10)
lm.left(90)
lm.fd(40)
lm.left(90)
lm.fd(10)
lm.right(90)
lm.fd(5)
lm.right(90)
lm.fd(90)
lm.pu()
lm.rt(180)
lm.fd(20)
lm.left(90)
lm.fd(5)
lm.pd()
lm.fd(40)
lm.right(90)
lm.fd(20)
lm.right(90)
lm.fd(40)
lm.right(90)
lm.fd(20)
lm.pu()
lm.rt(180)
lm.fd(30)
lm.left(90)
lm.pd()
lm.fd(40)
lm.right(90)
lm.fd(20)
lm.right(90)
lm.fd(40)
lm.right(90)
lm.fd(20)
lm.tracer(True)
def five(starth=270, startpos=(0,0), lm=None):
if not lm:
lm=MyTurtle()
lm.tracer(False)
lm.pu()
lm.goto(startpos)
lm.seth(starth)
lm.pd()
lm.ht()
lm.fd(5)
lm.right(90)
lm.fd(10)
top0=lm.pos()
topheading=lm.heading()
theta=math.degrees(math.asin(40.0/((15.0**2+40.0**2)**0.5)))
lm.seth(topheading+theta)
lm.fd((15.0**2+40.0**2)**0.5)
lm.seth(topheading-180)
lm.fd(25)
lm.right(90)
lm.fd(5)
lm.right(90)
lm.fd(60)
lm.right(90)
lm.fd(5)
lm.right(90)
lm.fd(25)
lm.seth(topheading-theta)
lm.fd((15.0**2+40.0**2)**0.5)
lm.seth(topheading)
lm.fd(10)
lm.right(90)
lm.fd(5)
lm.right(90)
lm.fd(60)
lm.pu()
lm.right(180)
lm.fd(20)
lm.left(90)
lm.fd(5)
lm.pd()
innertheta=math.degrees(math.asin(30/((10.0**2+30.0**2)**0.5)))
lm.seth(topheading+innertheta)
lm.fd((10.0**2+30.0**2)**0.5)
lm.seth(topheading-innertheta)
lm.fd((10.0**2+30.0**2)**0.5)
lm.seth(topheading-180.0)
lm.fd(20)
lm.tracer(True)
def ten(starth=270, startpos=(0,0), lm=None):
if not lm:
lm=MyTurtle()
lm.tracer(False)
lm.pu()
lm.goto(startpos)
lm.seth(starth)
lm.pd()
lm.ht()
lm.fd(5)
lm.right(90)
lm.fd(10)
topheading=lm.heading()
outtertheta=math.degrees(math.asin(25.0/((15.0**2+25.0**2)**0.5)))
lm.seth(topheading+outtertheta) #top right
lm.fd((15.0**2+25.0**2)**0.5)
lm.seth(topheading-(180+outtertheta)) # middle right
lm.fd((15.0**2+25.0**2)**0.5)
lm.seth(topheading-180)
lm.fd(10)
lm.right(90)
lm.fd(5)
lm.right(90)
lm.fd(60)
lm.right(90)
lm.fd(5)
lm.right(90)
lm.fd(10)
lm.seth(topheading+(180+outtertheta)) # bottom left
lm.fd((15.0**2+25.0**2)**0.5)
lm.seth(topheading-outtertheta) # middle left
lm.fd((15.0**2+25.0**2)**0.5)
lm.seth(topheading)
lm.fd(10)
lm.right(90)
lm.fd(5)
lm.right(90)
lm.fd(60)
lm.pu()
lm.right(180)
lm.fd(20)
lm.left(90)
lm.fd(5)
lm.right(90)
lm.pd()
lm.fd(20)
lm.seth(180+(topheading-outtertheta))
lm.fd((2.0/3.0)*((15.0**2+25.0**2)**0.5))
lm.seth(topheading+(180+outtertheta))
lm.fd((2.0/3.0)*((15.0**2+25.0**2)**0.5))
lm.pu()
lm.seth(90+topheading)
lm.fd(50)
lm.pd()
lm.seth(topheading)
lm.fd(20)
lm.seth(topheading+(180+outtertheta))
lm.fd((2.0/3.0)*((15.0**2+25.0**2)**0.5))
lm.seth(180+(topheading-outtertheta))
lm.fd((2.0/3.0)*((15.0**2+25.0**2)**0.5))
lm.tracer(True)
def fifty(starth=270, startpos=(0,0), lm=None):
if not lm:
lm=MyTurtle()
lm.ht()
lm.tracer(False)
lm.pu()
lm.goto(startpos)
lm.seth(starth)
lm.fd(35)
lm.pd()
lm.fd(15)
lm.right(90)
lm.fd(30)
lm.right(90)
lm.fd(50)
lm.right(90)
lm.fd(10)
lm.right(90)
lm.fd(40)
lm.left(90)
lm.fd(15)
lm.left(45)
lm.fd(50**0.5)
lm.tracer(True)
```
#### File: scripts/python/euler302.py
```python
import fractions
import math
primesAndNums={}
primesAndNums["primes"]=[]
primesAndNums["powerful"]=[]
primesAndNums["perfectPowers"]=set()
primesAndNums["achillesNums"]=[]
primesAndNums['strongAchillesNums']=[]
primesAndNums["totients"]={}
primesAndNums['factors']={}
def newDD():
primesAndNums={}
primesAndNums["primes"]=[]
primesAndNums["powerful"]=[]
primesAndNums["perfectPowers"]=[]
primesAndNums["achillesNums"]=[]
primesAndNums['strongAchillesNums']=[]
primesAndNums["totients"]={}
primesAndNums['factors']={}
def isPrime(number):
isprime = False
if number==1:
return True
elif number==2:
return True
elif number%2==0:
return False
for x in range(3, int(number**0.5) + 1, 2):
if number%x==0:
return False
return True
def breakUpToPrimes(number):
primes={}
counter=0
myPrimes=iter(primesAndNums["primes"])
thisPrime=myPrimes.next()
while (number>1):
if (thisPrime<2):
thisPrime=myPrimes.next()
continue
if (number%thisPrime == 0 ):
counter+=1
primes[thisPrime] = counter
number = number/thisPrime
else:
if not(counter==0):
primes[thisPrime] = counter
counter = 0
thisPrime=myPrimes.next()
return primes
def countTotient(number):
totients = 0
for potentialTotient in range(1, number + 1):
if fractions.gcd(number, potentialTotient) == 1:
totients += 1
return totients
def findAllPerfectRoots(maxVal):
maxRoot = int(maxVal**(0.5))+1
for x in range(2,maxRoot):
thisRoot=2
while(x**thisRoot<maxVal):
thisVal = x**thisRoot
primesAndNums['perfectPowers'].add(thisVal)
thisRoot+=1
def test(anum):
newDD()
x = 0
findAllPerfectRoots(anum)
# primesAndNums = newDD()
while(x<anum-1):
x+=1
if isPrime(x):
primesAndNums['primes'].append(x)
else:
primesAndNums['factors'][x]= breakUpToPrimes(x)
# Determine if is powerful
powers=primesAndNums['factors'][x].values()
isPowerful = not(1 in powers)
if isPowerful:
primesAndNums['powerful'].append(x)
# Determin if is perfect
isPerfect= x in primesAndNums['perfectPowers']
isAchilles = (isPowerful and not(isPerfect))
if isAchilles:
if len(primesAndNums['factors'][x].keys())>1:
print str(x) + " has been found to be an Achilles number"
primesAndNums['achillesNums'].append(x)
primesAndNums['totients'][x]=countTotient(x)
if (primesAndNums['totients'][x] in primesAndNums['achillesNums']):
print str(x) + " has been found to be a strongAchilles number"
primesAndNums['strongAchillesNums'].append(x)
return primesAndNums
if __name__ == '__main__':
results = test(10**8)
print len(results['strongAchillesNums'])
```
#### File: python/hackerrank/circleCity.py
```python
t = int(raw_input()) # number of test cases
for x in range(t):
placesNeeded=0
radiusSquared,numberOfStations = map(int,raw_input().split())
radius = int(radiusSquared**0.5)
if not(radius**2==radiusSquared):
radius+=1
for x in range(radius):
if ((int((radiusSquared-(x**2))**0.5))**2==(radiusSquared-(x**2))):
placesNeeded+=4
if (placesNeeded<=numberOfStations):
print "possible"
else:
print "impossible"
##Testing Function:
def testingFunction(radiusSquared, numberOfStations):
placesNeeded=0
#radiusSquared,numberOfStations = map(int,raw_input().split())
radius = int(radiusSquared**0.5)
for x in range(radius+1):
if ((int((radiusSquared-(x**2))**0.5))**2==(radiusSquared-(x**2))):
placesNeeded+=4
if (placesNeeded<=numberOfStations):
print "possible"
else:
print "impossible"
```
#### File: python/hackerrank/findMedian.py
```python
def swapPositions(ar, pos1, pos2):
val1=ar[pos1]
ar[pos1]=ar[pos2]
ar[pos2]=val1
print str(ar).replace('[','').replace(']','').replace(',','')
def quicksortInPlace(ar,startLoc=0, endLoc=None):
if (endLoc==None):
endLoc=len(ar)-1
pivotValue=ar[endLoc]
for location in range(startLoc,endLoc+1):
locVal=ar[location]
if (locVal<pivotValue):
pass
elif (locVal==pivotValue):
pivoted=False
for newLocation in range(startLoc,endLoc+1):
if ((ar[newLocation]>=pivotValue) and not(pivoted)):
swapPositions(ar,newLocation,endLoc)
pivoted=True
if ((newLocation - startLoc)>1):
quicksortInPlace(ar,startLoc=startLoc, endLoc=newLocation-1)
if ((endLoc - newLocation)>1):
quicksortInPlace(ar,startLoc=newLocation+1, endLoc=endLoc)
else:
currentLocation = location + 1
placed = False
while ((not placed) and (currentLocation<endLoc)):
if (ar[currentLocation]<pivotValue):
currentLocationValue=ar[currentLocation]
ar[currentLocation] = ar[location]
ar[location] = currentLocationValue
placed = True
currentLocation+=1
return ar
m = input()
ar = [int(i) for i in raw_input().strip().split()]
quicksortInplace(ar,m)
```
#### File: python/hackerrank/quicksort1.py
```python
def partition(ar):
m=ar[0]
p1=[]
p2=[]
for value in ar:
if (value<m):
p1.append(value)
else:
p2.append(value)
print str(p1+p2).replace('[','').replace(']','').replace(',','')+"\n"
m = int(input())
ar = [int(i) for i in raw_input().strip().split()]
partition(ar)
```
#### File: scripts/python/markdownMaker.py
```python
import os
class Crawler(object):
def __init__(self):
self.mywd = os.getcwd()
self.openreadme()
self.findTOC()
self.updateToc()
self.closeUp()
def openreadme(self):
if not os.path.isfile('README.md'):
open('README.md', 'a').close()
self.readme = open('README.md', 'r+')
def findTOC(self):
tocStartString="""
####################################################
#TOC START
####################################################
"""
self.fileLines=[]
for eachLine in self.readme:
if not(eachLine.find("#TOC START")==-1):
if len(self.fileLines)>2:
precedingLine=self.fileLines.pop()
break
else:
self.fileLines.append(eachLine)
self.fileLines.append(tocStartString)
self.readme.close()
open ('README.md','w').close()
self.readme = open('README.md', 'r+')
def updateToc(self):
self.subCrawlers=[]
for root, dirs, files in os.walk(self.mywd):
level = root.replace(self.mywd, '/').count(os.sep)
indent = ' ' * 2 * (level +1)
if level == 2:
if not (root.replace(self.mywd,'')=='.git'):
os.chdir(root)
# print os.getcwd()
self.subCrawlers.append(Crawler())
os.chdir(self.mywd)
self.fileLines.append('* [{}]({}/README.md)\n'.format(root.replace(self.mywd, ''),root.replace(self.mywd, './')))
for f in files:
self.fileLines.append('{}* [{}]({}/{})'.format(indent, f, root.replace(self.mywd, '.'),f)+'\n')
for eachLine in self.fileLines:
self.readme.write(eachLine)
def closeUp(self):
self.readme.close()
if __name__ == '__main__':
a=Crawler()
```
#### File: python/oneTimeScripts/life.py
```python
from numpy import *
import sys
import math
# compute next generation of conway's game of life
def next_generation( current, next ) :
next[:,:] = 0 # zero out next board
# bound examination area
bend0 = (current.shape[0]-3)+1
bend1 = (current.shape[1]-3)+1
for i in xrange( bend0 ) :
for j in xrange( bend1 ) :
neighbours = sum( current[i:i+3, j:j+3] )
if current[i+1, j+1] == 1 :
neighbours -= 1 # do not count yourself
if 2 <= neighbours <= 3 :
next[i+1, j+1] = 1
else:
if neighbours == 3 :
next[i+1,j+1] = 1
if len(sys.argv) != 3 :
print "usage:", sys.argv[0], "init-board generations"
sys.exit( 1 )
init = sys.argv[1]
generations = int( sys.argv[2] )
board_sz = math.ceil( math.sqrt(len(init)) )
# board_sz+2 includes the border of zeros
board = zeros( (board_sz+2,board_sz+2), uint8)
next_board = zeros_like( board ) # same shape
# fill the board
i = 0
j = 0
for index,ch in enumerate(init) :
if ch == '1' :
board[i+1,j+1] = 1
j = (j+1) % board_sz
if ((index+1) % board_sz) == 0 : i += 1
for gen in xrange(generations) :
next_generation( board, next_board )
board, next_board = next_board, board # swap boards
print board[1:-1,1:-1] # do not print the border
```
#### File: python/remoteAccessScripts/crawl.py
```python
import sftptoserver
#ssh=sftptoserver.getSSH()
print 'ssh got'
def printAll(tupleOfChannelFiles):
for eachFile in tupleOfChannelFiles:
try:
print(eachFile.readlines())
print"============================"
except IOError:
print "That didn't work"
#results=ssh.exec_command('ls -l')
print 'results got'
#clearAll(results)
```
#### File: python/remoteAccessScripts/dirTree2.py
```python
import pw
import os
host=pw.hn
username=pw.un
#folder=pw.fo
remote_folder=pw.rf
local_folder=pw.lf
output=pw.fi
connectionString='sshfs '+username+'@'+host+':'+remote_folder+' '+local_folder
#os.system(connectionString)
def list_files(startpath):
os.system(connectionString)
k=open(output, 'w')
for root, dirs, files in os.walk(startpath):
level = root.replace(startpath, '').count(os.sep)
indent = ' ' * 4 * (level)
k.write('{}{}'.format(indent,root.replace(startpath, 'http://jlmarks.org/')+'\n'))
#print('{}{}/'.format(indent, os.path.basename(root)))
subindent = ' ' * 4 * (level + 1)
for f in files:
k.write('{}{}/{}'.format(subindent,root.replace(startpath, 'http://jlmarks.org/'),f)+'\n')
#print('{}{}'.format(subindent, f))
k.close()
print""" Suggested use:\n\tdirTree.list_files(dirTree.local_folder) """
```
#### File: python/remoteAccessScripts/requesthandler.py
```python
from twisted.web import http
class MyRequestHandler(http.Request):
pages = {
'/': '<h1>Home</h1>Home page',
'/test': '<h1>Test</h1>Test page',
}
def process(self):
self.setHeader('Content-Type', 'text/html')
if self.pages.has_key(self.path):
self.write(self.pages[self.path])
else:
self.setResponseCode(http.NOT_FOUND)
self.write("<h1>Not Found</h1>Sorry, no such page.")
self.finish()
class MyHttp(http.HTTPChannel):
requestFactory = MyRequestHandler
class MyHttpFactory(http.HTTPFactory):
protocol = MyHttp
if __name__ == "__main__":
from twisted.internet import reactor
reactor.listenTCP(8000, MyHttpFactory())
reactor.run()
```
#### File: scripts/python/scratchpad_dp6Mar15_hard_numberChains.py
```python
class numberChain(object):
def __init__(self, goalValue, numberOfSteps):
self.valuegoal=goalValue
self.linksgoal=numberOfSteps
self.links=[]
self.links.append(1)
def calculate(self):
self.potentialNextValues=[]
self.currentValue=sum(self.links)
self.currentLinks=len(self.links)
currentDifference=self.valuegoal-self.currentValue
largestWithoutGoingOver={}
if currentDifference==0:
if (self.linksgoal-self.currentLinks == 0):
print "YAY! that was it!"
print self.links
else:
print "At least the value is right with this: "
print self.links
if((self.currentValue>self.valuegoal) or (self.currentLinks>self.linksgoal)):
self.links.pop()
self.calculate()
for eachlinkLocation in range(len(self.links)):
dval=self.links[eachlinkLocation]*2
if (dval<=self.valuegoal):
self.potentialNextValues.append(dval)
if largestWithoutGoingOver.has_key(dval):
largestWithoutGoingOver[dval].append([self.links[eachlinkLocation], self.links[eachlinkLocation]])
else:
largestWithoutGoingOver[dval]=[[self.links[eachlinkLocation], self.links[eachlinkLocation]],]
if (dval==self.valuegoal):
self.links.append(dval)
self.calculate()
for eachSecondValue in range(eachlinkLocation+1, len(self.links)):
aval=self.links[eachlinkLocation] + self.links[eachSecondValue]
self.potentialNextValues.append(aval)
if (aval<=self.valuegoal):
if largestWithoutGoingOver.has_key(aval):
largestWithoutGoingOver[aval].append([self.links[eachlinkLocation], self.links[eachSecondValue]])
else:
largestWithoutGoingOver[aval]=[[self.links[eachlinkLocation], self.links[eachSecondValue]],]
if (aval==self.valuegoal):
self.links.append(self.links[eachlinkLocation] + self.links[eachSecondValue])
self.calculate()
if True:
"hey, you made it this far!"
nval=max(largestWithoutGoingOver.keys())
self.links.append(nval)
self.calculate
class NumChain2(object):
def __init__(self,targetValue, chainLength):
self.valuegoal=targetValue
self.linksgoal=chainLength
self.links=[]
self.potentialLinks=[]
self.links.append(1)
self.potentialLinks.append(set([1]))
self.fulfilled=False
def createNextPotentialLinks(self):
self.potentialNextValues=set()
self.currentValue=sum(self.links)
self.currentLinks=len(self.links)
for eachFirstLocation in range(len(self.links)):
for eachSecondValue in range(eachFirstLocation,len(self.links)):
self.potentialNextValues.add(self.links[eachFirstLocation]+self.links[eachSecondValue])
# toremove=[]
# for eachPotential in self.potentialNextValues:
# if (eachPotential>self.valuegoal):
# toremove.append(eachPotential)
# for eachbad in toremove:
# self.potentialNextValues.remove(eachbad)
def iterate(self):
self.createNextPotentialLinks()
self.potentialLinks.append(self.potentialNextValues)
# print "potentialNextValues:"
# for eachnum in sorted(self.potentialNextValues):
# print eachnum
if (self.currentLinks>=self.linksgoal-1):
self.fulfilled=True
elif (self.valuegoal in self.potentialNextValues):
self.fulfilled=True
self.setNextValue(self.valuegoal)
# elif (max(self.potentialNextValues)==self.links[-1]):
# """That means we have did this last time"""
# if (max(self.potentialNextValues)==self.valuegoal):
# self.fulfilled=True
self.setNextValue(max(self.potentialNextValues))
def setNextValue(self,value):
self.links.append(value)
def theLogicController(self):
while not self.fulfilled:
self.iterate()
print self.links
import random
import itertools
class spad(object):
def __init__(self, numberOfLinks, desiredValue):
self.numberOfLinks = numberOfLinks
self.desiredValue = desiredValue
self.links=[]
self.links.append(1)
self.stringGroup=[]
self.allPotentialValues=set()
for x in range(numberOfLinks+1):
self.stringGroup.append([x,])
print self.calculateNextLinks()
self.cleanLists()
def calculateNextLinks(self):
potentialNextValues = set()
currentNumberOfLinks=len(self.links)
if (self.links[-1]==self.desiredValue):
return self.links
elif (currentNumberOfLinks>self.numberOfLinks):
return False
else:
self.stringGroup[currentNumberOfLinks].append(self.links[:])
for outterLinkLocation in range(currentNumberOfLinks):
for innerLinkLocation in range(outterLinkLocation, currentNumberOfLinks):
self.allPotentialValues.add(self.links[outterLinkLocation]+self.links[innerLinkLocation])
potentialNextValues.add(self.links[outterLinkLocation]+self.links[innerLinkLocation])
while (len(potentialNextValues)>0):
eachLink = random.choice(list(potentialNextValues))
potentialNextValues.discard(eachLink)
self.links.append(eachLink)
done=self.calculateNextLinks()
if (done):
return done
else:
self.links.pop()
return False
def cleanLists(self):
self.finalLists=[]
for x in self.stringGroup:
x.pop(0)
for y in x:
y.sort()
x.sort()
self.finalLists.append([key for key,_ in itertools.groupby(x)])
#Description
# An "addition chain" is a sequence of numbers that starts with 1 and where each number is the sum of two previous numbers (or the same number taken twice), and that ends at some predetermined value.
# An example will make this clearer: the sequence [1, 2, 3, 5, 10, 11, 21, 42, 84] is an addition chain for the number 84. This is because it starts with 1 and ends with 84, and each number is the sum of two previous numbers. To demonstrate:
# (chain starts as [1])
# 1 + 1 = 2 (chain is now [1, 2])
# 1 + 2 = 3 (chain is now [1, 2, 3])
# 2 + 3 = 5 (chain is now [1, 2, 3, 5])
# 5 + 5 = 10 (chain is now [1, 2, 3, 5, 10])
# 1 + 10 = 11 (chain is now [1, 2, 3, 5, 10, 11])
# 10 + 11 = 21 (chain is now [1, 2, 3, 5, 10, 11, 21])
# 21 + 21 = 42 (chain is now [1, 2, 3, 5, 10, 11, 21, 42])
# 42 + 42 = 84 (chain is now [1, 2, 3, 5, 10, 11, 21, 42, 84])
# Notice that the right hand side of the equations make up the chain, and left hand side of all the equations is a sum of two numbers that occur earlier in the chain (sometimes the same number twice).
# We say that this chain is of length 8, because it took 8 additions to generate it (this is one less than the total amount of numbers in the chain).
# There are a several different addition chains of length 8 for the number 84 (another one is [1, 2, 4, 8, 16, 32, 64, 68, 84], for instance), but there are no shorter ones. This is as short as we can get.
# Your task today is to try and generate addition chains of a given length and last number.
# (by the way, you may think this looks similar to the Fibonacci sequence, but it's not, there's a crucial difference: you don't just add the last two numbers of the chain to get the next number, you can add *any* two previous numbers to get the next number. The challenge is figuring out, for each step, which two numbers to add)
# #Formal inputs & outputs
# ##Input description
# You will be given one line with two numbers on it. The first number will be the length of the addition chain you are to generate, and the second the final number.
# Just to remind you: the length of the addition chain is equal to the number of additions it took to generate it, which is the same as **one less** than the total amount of numbers in it.
# ##Output description
# You will output the entire addition chain, one number per line. There will be several different addition chains of the given length, but you only need to output one of them.
# Note that going by the strict definition of addition chains, they don't necessarily have to be strictly increasing. However, any addition chain that is not strictly increasing can be reordered into one that is, so you can safely assume that all addition chains are increasing. In fact, making this assumption is probably a very good idea!
# #Examples
# ##Input 1
# 7 43
# ##Output 1
# (one of several possible outputs)
# 1
# 2
# 3
# 5
# 10
# 20
# 40
# 43
# ##Input 2
# 9 95
# ##Output 2
# (one of several possible outputs)
# 1
# 2
# 3
# 5
# 7
# 14
# 19
# 38
# 57
# 95
# #Challenge inputs
# ##Input 1
# 10 127
# ##Input 2
# 13 743
# #Bonus
# 19 123456
# If you want *even more* of a challenge than that input, consider this: when I, your humble moderator, was developing this challenge, my code would not be able to calculate the answer to this input in any reasonable time (even though solutions exist):
# 25 1234567
# If you can solve that input, you will officially have written a much better program than me!
# #Notes
# I would like to note that while this challenge looks very "mathy", you don't need any higher level training in mathematics in order to solve it (at least not any more than is needed to understand the problem). There's not some secret formula that you have to figure out. It's still not super-easy though, and a good working knowledge of programming techniques will certainly be helpful!
# In other words, in order to solve this problem (and especially the bonus), you need to be clever, but you don't need to be a mathematician.
# As always, if you have any suggestions for problems, hop on over to /r/dailyprogrammer_ideas and let us know!
```
#### File: python/someMoreFVH/fvh2.py
```python
import fvh
import fvhmans
import math
import datetime
from wand.image import Image
def anothercircle(innumofpoints, outnumofpoints, innerrad, outterrad, lm=None):
if not lm:
lm=fvh.MyTurtle()
lm.speed(0)
lm.ht()
lm.tracer(False)
innercircle={}
outtercircle={}
innerdeg=360.0/innumofpoints
outterdeg=360.0/outnumofpoints
for point in range(innumofpoints):
lm.gohome()
lm.setheading(point*innerdeg)
lm.pu()
lm.fd(innerrad)
innercircle[point]=lm.position()
for apoint in range(outnumofpoints):
lm.gohome()
lm.setheading(apoint*outterdeg)
lm.pu()
lm.fd(outterrad)
outtercircle[apoint]=lm.position()
for start in range(len(innercircle)):
for end in range(len(outtercircle)):
lm.goto(innercircle[start])
lm.pd()
lm.goto(outtercircle[end])
#print outtercircle[end], innercircle[start]
lm.pu()
lm.tracer(True)
savetocircles(lm)
def getCirclePoints(numofPoints, radius):
lm=fvh.MyTurtle()
lm.speed(0)
lm.ht()
circlepoints={}
degperpt=360.0/numofPoints
for point in range(numofPoints):
lm.gohome()
lm.pu()
lm.setheading(point*degperpt)
lm.fd(radius)
circlepoints[point]=lm.position()
#lm.bye()
return circlepoints
def savetocircles(aturtle,afilename=None,aheight=None,awidth=None,ax=None,ay=None, togif=True, topng=False):
if not afilename:
datetime.datetime.now()
afilename='circles/'+datetime.datetime.now().strftime('%Y%b%d%H%M%S%f'+'.eps')
else:
afilename='circles/'+datetime.datetime.now().strftime('%Y%b%d%H%M%S%f'+afilename+'.eps')
aturtle.getscreen().getcanvas().postscript(file=afilename, height=aheight, width=awidth, x=ax, y=ay)
if togif:
with Image(filename=afilename) as img:
with img.convert('gif') as newimg:
newfilename=afilename[:-3]+'gif'
newimg.save(filename=newfilename)
if topng:
with Image(filename=afilename) as img:
with img.convert('png') as newimg:
newfilename=afilename[:-3]+'png'
newimg.save(filename=newfilename)
def graph():
lm=fvh.MyTurtle()
lm.ht()
lm.speed(0)
for x in range(0,10000,5):
lm.goto(x/100.0, 10*math.sin(x/100.0))
def cirstr():
lm=fvh.MyTurtle()
lm.speed(0)
lm.ht()
lm.tracer(False)
circlepoints=getCirclePoints(20,800)
strpoints={}
for x in range(0,100,8):
strpoints[x]=(300,x*20)
strpoints[x+1]=(300,-(x*20))
strpoints[x+2]=(-300,-(x*20))
strpoints[x+3]=(-300,(x*20))
strpoints[x+4]=(x*20,300)
strpoints[x+5]=(-(x*20),300)
strpoints[x+6]=(-(x*20),-300)
strpoints[x+7]=((x*20),-300)
#for y in range(8):
#print x,y,strpoints[x+y]
minScreenSize=fvhmans.getSS([circlepoints, strpoints])
minScreenSize=(minScreenSize[0]*2, minScreenSize[1]*2)
lm.getscreen().screensize(minScreenSize[0],minScreenSize[1])
for start in range(len(circlepoints)):
for end in range(len(strpoints)):
lm.pu()
lm.goto(circlepoints[start])
lm.pd()
lm.goto(strpoints[end])
#print str(start) +'to' + str(end)
#print circlepoints[start],strpoints[end]
lm.tracer(True)
fname="circles/fvh2cirstrw_.eps"
print fname
savetocircles(lm ,afilename=fname,awidth=minScreenSize[0], aheight=minScreenSize[1],ax=-minScreenSize[0]/2.0,ay=-minScreenSize[1]/2.0 )
def acirstr(pointsincircle, circleDiameter, strstop, strtopes,lm=None):
if not lm:
lm=fvh.MyTurtle()
lm.speed(0)
lm.ht()
lm.tracer(False)
circlepoints=getCirclePoints(pointsincircle,circleDiameter)
strpoints={}
for x in range(0,strstop,8):
strpoints[x]=(strtopes,x*20)
strpoints[x+1]=(strtopes,-(x*20))
strpoints[x+2]=(-strtopes,-(x*20))
strpoints[x+3]=(-strtopes,(x*20))
strpoints[x+4]=(x*20,strtopes)
strpoints[x+5]=(-(x*20),strtopes)
strpoints[x+6]=(-(x*20),-strtopes)
strpoints[x+7]=((x*20),-strtopes)
print len(strpoints)
#for y in range(8):
#print x,y,strpoints[x+y]
minScreenSize=fvhmans.getSS([circlepoints, strpoints])
minScreenSize=(minScreenSize[0]*2, minScreenSize[1]*2)
lm.getscreen().screensize(minScreenSize[0],minScreenSize[1])
for start in range(len(circlepoints)):
for end in range(len(strpoints)):
lm.pu()
lm.goto(circlepoints[start])
lm.pd()
lm.goto(strpoints[end])
#print str(start) +'to' + str(end)
#print circlepoints[start],strpoints[end]
lm.tracer(True)
fname="circles/fvh2cirstrw_"+str(pointsincircle)+'o'+str(circleDiameter)+'o'+str(strstop)+'o'+str(strtopes)+".eps"
print fname
savetocircles(lm ,afilename=fname,awidth=minScreenSize[0], aheight=minScreenSize[1],ax=-minScreenSize[0]/2.0,ay=-minScreenSize[1]/2.0,togif=True )
def cirstra(pointsinCircle,circlediameter,pointsinsquare,squaresize,lm):
#lm=fvh.MyTurtle()
lm.speed(0)
lm.ht()
lm.tracer(False)
circlepoints=getCirclePoints(pointsinCircle,circlediameter)
strpoints={}
squarestep=squaresize/float(pointsinsquare)
for x in range(0,(pointsinsquare*8),8):
strpoints[x]=(squaresize,(x/8)*squarestep)
strpoints[x+1]=(squaresize,-((x/8)*squarestep))
strpoints[x+2]=(-squaresize,-((x/8)*squarestep))
strpoints[x+3]=(-squaresize,((x/8)*squarestep))
strpoints[x+4]=((x/8)*squarestep,squaresize)
strpoints[x+5]=(-((x/8)*squarestep),squaresize)
strpoints[x+6]=(-((x/8)*squarestep),-squaresize)
strpoints[x+7]=(((x/8)*squarestep),-squaresize)
#for y in range(8):
#print x,y,strpoints[x+y]
minScreenSize=fvhmans.getSS([circlepoints, strpoints])
minScreenSize=(minScreenSize[0]*2, minScreenSize[1]*2)
lm.getscreen().screensize(minScreenSize[0],minScreenSize[1])
#lm.getscreen().setup(minScreenSize[0]+200, minScreenSize[1]+200)
circlepoints=fvhmans.center(circlepoints)
strpoints=fvhmans.center(strpoints)
for start in range(len(circlepoints)):
for end in range(len(strpoints)):
lm.pu()
lm.goto(circlepoints[start])
lm.pd()
lm.goto(strpoints[end])
#print str(start) +'to' + str(end)
#print circlepoints[start],strpoints[end]
lm.tracer(True)
savetocircles(lm ,awidth=minScreenSize[0], aheight=minScreenSize[1],ax=-minScreenSize[0]/2.0,ay=-minScreenSize[1]/2.0 )
def twoshapes(firstpoints, secondpoints,lm):
lm.reset()
lm.setup()
minScreenSize=fvhmans.getSS([firstpoints, secondpoints])
minScreenSize=(minScreenSize[0]*2, minScreenSize[1]*2)
b=lm.getscreen()
b.screensize(minScreenSize[0],minScreenSize[1])
b.setup(minScreenSize[0]*2,minScreenSize[1]*2)
afilename='circles/twoshapes/'+datetime.datetime.now().strftime('%Y-%b-%d_%H:%M:%S.%f'+'.eps')
for start in range(len(firstpoints)):
for firstend in range(len(secondpoints)):
lm.pu()
lm.goto(firstpoints[start])
lm.pd()
lm.goto(secondpoints[firstend])
lm.pu()
for secondend in range(start,len(firstpoints)):
lm.pu()
lm.goto(firstpoints[start])
lm.pd()
lm.goto(firstpoints[secondend])
lm.pu()
for secondstart in range(len(secondpoints)):
for thirdend in range(secondstart,len(secondpoints)):
lm.pu()
lm.goto(secondpoints[secondstart])
lm.pd()
lm.goto(secondpoints[thirdend])
lm.pu()
savetocircles(lm, afilename=afilename, ax=-minScreenSize[0], ay=-minScreenSize[1], awidth=2*minScreenSize[0], aheight=2*minScreenSize[1], togif=True)
def drawaxis():
lm=fvh.MyTurtle()
lm.setup()
for x in range(4):
lm.goto(0,0)
lm.seth(x*90)
for x in range(100):
lm.write(x*20)
lm.fd(20)
def circlearound(point,radius,lm):
lm.pu()
lm.goto(point[0],point[1]-radius)
lm.pd()
lm.seth(0)
lm.circle(radius)
lm.pu()
def interlappingcircles():
circles=[]
circle0=((0,0),200)
circle1=((200,0),100)
circles.append(circle0)
circles.append(circle1)
lm=fvh.MyTurtle()
lm.setup()
circlearound(circle0[0],circle0[1],lm)
circlearound(circle1[0],circle1[1],lm)
newcircles=fvhmans.circleinter(circle0[0][0],circle0[0][1], circle0[1],circle1[0][0],circle1[0][1], circle1[1])
circlearound(newcircles[0],50,lm)
circlearound(newcircles[1],50,lm)
def manyshapes(listofdictionariesofpoints,lm):
ld=listofdictionariesofpoints
lm.reset()
lm.setup()
minScreenSize=fvhmans.getSS(ld)
minScreenSize=(minScreenSize[0]*2, minScreenSize[1]*2)
b=lm.getscreen()
b.screensize(minScreenSize[0],minScreenSize[1])
b.setup(minScreenSize[0]*2,minScreenSize[1]*2)
afilename='circles/manyshapes/'+datetime.datetime.now().strftime('%Y-%b-%d_%H:%M:%S.%f'+'.eps')
for shapepos in range(len(ld)-1):
lm.tracer(False)
first=ld[shapepos]
second=ld[shapepos+1]
for start in range(len(first)):
for firstend in range(len(second)):
lm.pu()
lm.goto(first[start])
lm.pd()
lm.goto(second[firstend])
lm.pu()
for secondend in range(start,len(first)):
lm.pu()
lm.goto(first[start])
lm.pd()
lm.goto(second[secondend])
for secondstart in range(len(second)):
for thirdend in range(secondstart,len(second)):
lm.pu()
lm.goto(second[secondstart])
lm.pd()
lm.goto(second[thirdend])
lm.tracer(True)
savetocircles(lm, afilename=afilename, ax=-minScreenSize[0], ay=-minScreenSize[1], awidth=2*minScreenSize[0], aheight=2*minScreenSize[1], togif=True)
def allconnected(listofdictionariesofpoints,lm):
ld=listofdictionariesofpoints
lm.reset()
lm.setup()
lm.tracer(False)
minScreenSize=fvhmans.getSS(ld)
minScreenSize=(minScreenSize[0]*2, minScreenSize[1]*2)
b=lm.getscreen()
b.screensize(minScreenSize[0],minScreenSize[1])
b.setup(minScreenSize[0]*2,minScreenSize[1]*2)
afilename='circles/manyshapes/'+datetime.datetime.now().strftime('%Y-%b-%d_%H:%M:%S.%f'+'.eps')
allpoints=[]
for eachdic in ld:
for eachval in eachdic.itervalues():
allpoints.append(eachval)
for firstpoint in range(len(allpoints)):
for secondpoint in range(firstpoint,len(allpoints)):
lm.pu()
lm.goto(allpoints[firstpoint])
lm.pd()
lm.goto(allpoints[secondpoint])
lm.pu()
lm.tracer(True)
savetocircles(lm, afilename=afilename, ax=-minScreenSize[0], ay=-minScreenSize[1], awidth=2*minScreenSize[0], aheight=2*minScreenSize[1], togif=True)
```
#### File: python/someMoreFVH/somemorefvh.py
```python
from dangerzone import fvh
from dangerzone import fvh2
from dangerzone import fvhmans
tur=fvh.MyTurtle()
tur.setup
def run1():
fvh.coolercirclesaa(tur)
tur.cleanhome()
for x in range(20):
for y in range(20):
fvh.pdraw(x,y,tur)
tur.cleanhome()
for x in range(20):
for y in range(20):
fvh.pdraw(x,y,tur)
tur.cleanhome()
for x in range(20):
for y in range(20):
fvh.pdraw(x,y,tur)
tur.cleanhome()
for x in range(3,30,3):
for y in range(5,75,5):
fvh.pdraw(x,y,tur)
tur.cleanhome()
for x in range(3,30,3):
for y in range(5,75,5):
fvh.pdraw(x,y,tur)
tur.cleanhome()
for x in range(3,30,3):
for y in range(5,75,5):
fvh.pdraw(x,y,tur)
tur.cleanhome()
for x in range(3,30):
fvh.coolcircles(x,tur)
tur.cleanhome()
for x in range(3,30):
fvh.coolcircles(x,tur)
tur.cleanhome()
```
#### File: python/turtleRelated/circleint.py
```python
import math
import fvh2, fvh
import supercircle
masterCircleSet=set()
circlecalled = 0
checkcirclescalled = 0
MINOFFSET=5
class Circle():
def __init__(self,x,y,r,lm=None, keep=True):
global circlecalled
circlecalled+=1
self.keep = keep
self.center=(x,y)
self.radius=r
self.checkString=(int(x)/MINOFFSET*MINOFFSET,int(y)/MINOFFSET*MINOFFSET,r)
masterCircleSet.add(self.checkString)
self.color="black"
if not lm:
self.lm=fvh2.fvh.MyTurtle()
self.lm.tracer(False)
else:
self.lm=lm
#self.draw()
def draw(self):
#self.lm=fvh2.fvh.MyTurtle()
self.lm.pencolor(self.color)
self.lm.setup()
self.lm.penup()
fvh2.circlearound(self.center, self.radius,self.lm)
if not self.keep:
self.lm.undo()
self.lm.undo()
def drawred(self):
self.lm.pencolor('red')
self.lm.penup()
fvh2.circlearound(self.center, self.radius,self.lm)
def drawwhite(self):
self.lm.pencolor('white')
self.lm.penup()
fvh2.circlearound(self.center, self.radius,self.lm)
def setcolor(self, color):
self.color=color
def realCards(self):
self.realcards=[]
self.lm.pu()
for x in range(4):
self.lm.goto(self.center)
self.lm.seth(self.lm.towards(0,0)+90*x)
self.lm.fd(self.radius)
self.realcards.append(Circle(self.lm.xcor(), self.lm.ycor(), self.radius/2))
def extendedCards(self, numberOfexteriorCircles):
self.cardinals=[]
angle=360.0/numberOfexteriorCircles
for x in range(numberOfexteriorCircles):
self.lm.pu()
self.lm.goto(self.center)
self.lm.seth(self.lm.towards(0,0)+180+x*angle)
self.lm.fd(self.radius)
a=Circle(self.lm.xcor(), self.lm.ycor(), self.radius/2, self.lm, self.keep)
self.cardinals.append(a)
if (self.radius/2>=4):
a.extendedCards(numberOfexteriorCircles)
for card in a.cardinals:
self.cardinals.append(card)
def innerextendedCards(self, numberOfexteriorCircles):
self.cardinals=[]
angle=360.0/numberOfexteriorCircles
for x in range(numberOfexteriorCircles):
self.lm.pu()
self.lm.goto(self.center)
self.lm.seth(self.lm.towards(0,0)+x*angle)
self.lm.fd(self.radius)
a=Circle(self.lm.xcor(), self.lm.ycor(), self.radius/2, self.lm, self.keep)
self.cardinals.append(a)
if (self.radius/2>=4):
a.innerextendedCards(numberOfexteriorCircles)
for card in a.cardinals:
self.cardinals.append(card)
def differentcards(self, numberOfexteriorCircles):
self.cardinals=[]
angle=360.0/numberOfexteriorCircles
for x in range(numberOfexteriorCircles):
self.lm.pu()
self.lm.goto(self.center)
self.lm.seth(self.lm.towards(0,0)+180+x*angle)
self.lm.fd(self.radius)
self.cardinals.append(Circle(self.lm.xcor(), self.lm.ycor(), self.radius/2, self.lm, self.keep))
def addCardinals(self):
self.cardinals=[]
self.cardinals.append(Circle(self.center[0]+self.radius, self.center[1], self.radius/2))
self.cardinals.append(Circle(self.center[0]-self.radius, self.center[1], self.radius/2))
self.cardinals.append(Circle(self.center[0], self.center[1]+self.radius, self.radius/2))
self.cardinals.append(Circle(self.center[0], self.center[1]-self.radius, self.radius/2))
#for eachcircle in self.cardinals:
# eachcircle.draw()
def comparetoCardinals(self):
self.primarytocardinals=[]
for eachcircle in self.cardinals:
intersectionpoints=circleinter(self.center, self.radius, eachcircle.center, eachcircle.radius)
self.primarytocardinals.append(Circle(intersectionpoints[0][0], intersectionpoints[0][1], self.radius))
self.primarytocardinals.append(Circle(intersectionpoints[1][0], intersectionpoints[1][1], self.radius))
def checkCircles(circle1, circle2):
global checkcirclescalled
checkcirclescalled+=1
points=circleinter(circle1.center, circle1.radius, circle2.center, circle2.radius)
if points:
points=((float("%.2f" % points[0][0]),float("%.2f" % points[0][1])),(float("%.2f" % points[1][0]),float("%.2f" % points[1][1])))
return points
def circleinter((x0, y0), r0, (x1, y1), r1):
"""
This modules accepts two circles and then determines where they meet.
the circles are submitted as x,y,r where x,y is the center of the circle
and r is the radius.
"""
dx=float(x1-x0)
dy=float(y1-y0)
d=(dx**2+dy**2)**0.5
if (d>(r0+r1)):
return None
if (d< math.fabs(r0-r1)):
return None
if (d==0):
return None
a = ((r0*r0) - (r1*r1) + (d*d)) / (2.0 * d)
x2 = x0 + (dx * a/d)
y2 = y0 + (dy * a/d)
h = ((r0*r0) - (a*a))**0.5
rx = -dy * (h/d)
ry = dx * (h/d)
xi = x2 + rx
xi_prime = x2 - rx
yi = y2 + ry
yi_prime = y2 - ry
return (xi,yi),(xi_prime,yi_prime)
def differentCircles(primaryCircleRadius, secondaryCircleRadius, numberOfSecondaryCircles, secondaryCircleTheta,lm=None):
filenameStrings=['primaryCircleRadius','secondaryCircleRadius','numberOfSecondaryCircles','secondaryCircleTheta']
filenameValues=[primaryCircleRadius, secondaryCircleRadius, numberOfSecondaryCircles, secondaryCircleTheta]
filenameZip=zip(filenameStrings,filenameValues)
filename=''
for values in filenameZip:
filename=filename+values[0]+str(values[1])
filename='circles/'+filename+'.eps'
if not lm:
lm=fvh2.fvh.MyTurtle()
lm.setup()
lm.tracer(False)
ts=lm.getscreen()
circlelist=[]
newlist=[]
primaryCircle=Circle(0,0,primaryCircleRadius,lm)
primaryCircle.draw()
circlelist.append(primaryCircle)
for circle in range(numberOfSecondaryCircles):
lm.pu()
lm.goto(primaryCircle.center)
lm.seth(circle*secondaryCircleTheta)
lm.fd(primaryCircleRadius)
temp=Circle(lm.xcor(), lm.ycor(), secondaryCircleRadius, lm)
temp.draw()
circlelist.append(temp)
totalbefore=len(circlelist)
totalafter=0
counter=0
while(totalbefore!=totalafter):
totalbefore=len(circlelist)
for firstCircleplace in range(len(circlelist)):
firstCircle=circlelist[firstCircleplace]
for secondCircleplace in range(firstCircleplace,len(circlelist)):
secondCircle=circlelist[secondCircleplace]
thisRadius=min(firstCircle.radius, secondCircle.radius)/2
if (thisRadius<10):
continue
newCircles=checkCircles(firstCircle, secondCircle)
if newCircles:
if ((int(newCircles[0][0])/MINOFFSET*MINOFFSET,int(newCircles[0][1])/MINOFFSET*MINOFFSET,thisRadius) not in masterCircleSet):
temp=Circle(newCircles[0][0], newCircles[0][1], thisRadius,lm)
temp.draw()
newlist.append(temp)
if ((int(newCircles[1][0])/MINOFFSET*MINOFFSET,int(newCircles[1][1])/MINOFFSET*MINOFFSET,thisRadius) not in masterCircleSet):
temp=Circle(newCircles[1][0], newCircles[1][1], thisRadius,lm)
temp.draw()
newlist.append(temp)
ts.update()
counter=len(circlelist)
for item in newlist:
item.draw()
circlelist.append(item)
ts.update()
newlist=[]
totalafter=len(circlelist)
fvh2.savetocircles(lm,filename)
def differentCirclesforViewing(primaryCircleRadius, secondaryCircleRadius, numberOfSecondaryCircles, secondaryCircleTheta,lm=None):
"""
This is designed with something like the following in mind:
lm=circleint.fvh2.fvh.MyTurtle()
for a in range(2,100):
for b in range(3600):
circleint.differentCirclesforAnimation(200,15,a,b/10.0,lm)
lm.clear()
and then make a gif of the results
"""
global masterCircleSet
masterCircleSet=set()
filenameStrings=['primaryCircleRadius','secondaryCircleRadius','numberOfSecondaryCircles','secondaryCircleTheta']
filenameValues=[primaryCircleRadius, secondaryCircleRadius, numberOfSecondaryCircles, secondaryCircleTheta]
filenameZip=zip(filenameStrings,filenameValues)
filename=''
for values in filenameZip:
filename=filename+values[0]+'%03d' % values[1]
filename='circles/testa/'+filename+'.eps'
if not lm:
lm=fvh2.fvh.MyTurtle()
lm.setup()
lm.tracer(False)
ts=lm.getscreen()
circlelist=[]
newlist=[]
primaryCircle=Circle(0,0,primaryCircleRadius,lm)
primaryCircle.draw()
circlelist.append(primaryCircle)
colorcounter=0
for circle in range(numberOfSecondaryCircles):
lm.pu()
lm.goto(primaryCircle.center)
lm.seth((secondaryCircleTheta+(circle*secondaryCircleTheta))%360)
lm.fd(primaryCircleRadius)
temp=Circle(lm.xcor(), lm.ycor(), secondaryCircleRadius, lm)
temp.setcolor(fvh.allcolors[colorcounter%len(fvh.allcolors)])
colorcounter+=1
temp.draw()
circlelist.append(temp)
totalbefore=len(circlelist)
totalafter=0
counter=0
while(totalbefore!=totalafter):
totalbefore=len(circlelist)
for firstCircleplace in range(len(circlelist)):
firstCircle=circlelist[firstCircleplace]
for secondCircleplace in range(len(circlelist)):
secondCircle=circlelist[secondCircleplace]
thisRadius=min(firstCircle.radius, secondCircle.radius)/2
if (thisRadius<10):
continue
newCircles=checkCircles(firstCircle, secondCircle)
if newCircles:
if ((int(newCircles[0][0])/MINOFFSET*MINOFFSET,int(newCircles[0][1])/MINOFFSET*MINOFFSET,thisRadius) not in masterCircleSet):
temp=Circle(newCircles[0][0], newCircles[0][1], thisRadius,lm)
temp.setcolor(fvh.allcolors[colorcounter%len(fvh.allcolors)])
colorcounter+=1
temp.draw()
newlist.append(temp)
if ((int(newCircles[1][0])/MINOFFSET*MINOFFSET,int(newCircles[1][1])/MINOFFSET*MINOFFSET,thisRadius) not in masterCircleSet):
temp=Circle(newCircles[1][0], newCircles[1][1], thisRadius,lm)
temp.setcolor(fvh.allcolors[colorcounter%len(fvh.allcolors)])
colorcounter+=1
temp.draw()
newlist.append(temp)
ts.update()
#masterCircleSet=set()
counter=len(circlelist)
for item in newlist:
#item.draw()
circlelist.append(item)
ts.update()
newlist=[]
totalafter=len(circlelist)
#fvh2.savetocircles(lm,filename,aheight=(primaryCircleRadius+secondaryCircleRadius),awidth=(primaryCircleRadius+secondaryCircleRadius),ax=-(primaryCircleRadius+secondaryCircleRadius)/2.0, ay=-(primaryCircleRadius+secondaryCircleRadius)/2.0 )
fvh2.savetocircles(lm,filename,togif=True)#,aheight=(primaryCircleRadius+secondaryCircleRadius),awidth=(primaryCircleRadius+secondaryCircleRadius))#,ax=-(primaryCircleRadius+secondaryCircleRadius)/2.0, ay=-(primaryCircleRadius+secondaryCircleRadius)/2.0 )
def differentCirclesforAnimation(primaryCircleRadius, secondaryCircleRadius, numberOfSecondaryCircles, secondaryCircleTheta,lm=None):
"""
This is designed with something like the following in mind:
lm=circleint.fvh2.fvh.MyTurtle()
for a in range(2,100):
for b in range(3600):
circleint.differentCirclesforAnimation(200,15,a,b/10.0,lm)
lm.clear()
and then make a gif of the results
"""
filenameStrings=['primaryCircleRadius','secondaryCircleRadius','numberOfSecondaryCircles','secondaryCircleTheta']
filenameValues=[primaryCircleRadius, secondaryCircleRadius, numberOfSecondaryCircles, secondaryCircleTheta]
filenameZip=zip(filenameStrings,filenameValues)
filename=''
for values in filenameZip:
filename=filename+values[0]+str(values[1])
filename='circles/neatani/'+filename+'.eps'
if not lm:
lm=fvh2.fvh.MyTurtle()
lm.setup()
lm.tracer(False)
ts=lm.getscreen()
circlelist=[]
newlist=[]
primaryCircle=Circle(0,0,primaryCircleRadius,lm)
#primaryCircle.draw()
circlelist.append(primaryCircle)
colorcounter=0
for circle in range(numberOfSecondaryCircles):
lm.pu()
lm.goto(primaryCircle.center)
lm.seth((secondaryCircleTheta+(circle*secondaryCircleTheta))%360)
lm.fd(primaryCircleRadius)
temp=Circle(lm.xcor(), lm.ycor(), secondaryCircleRadius, lm)
temp.setcolor(fvh.allcolors[colorcounter%len(fvh.allcolors)])
colorcounter+=1
temp.draw()
circlelist.append(temp)
totalbefore=len(circlelist)
totalafter=0
counter=0
while(totalbefore!=totalafter):
totalbefore=len(circlelist)
for firstCircleplace in range(len(circlelist)):
firstCircle=circlelist[firstCircleplace]
for secondCircleplace in range(firstCircleplace,len(circlelist)):
secondCircle=circlelist[secondCircleplace]
thisRadius=min(firstCircle.radius, secondCircle.radius)/2
if (thisRadius<10):
continue
newCircles=checkCircles(firstCircle, secondCircle)
if newCircles:
if ((int(newCircles[0][0])/MINOFFSET*MINOFFSET,int(newCircles[0][1])/MINOFFSET*MINOFFSET,thisRadius) not in masterCircleSet):
temp=Circle(newCircles[0][0], newCircles[0][1], thisRadius,lm)
temp.setcolor(fvh.allcolors[colorcounter%len(fvh.allcolors)])
colorcounter+=1
temp.draw()
newlist.append(temp)
if ((int(newCircles[1][0])/MINOFFSET*MINOFFSET,int(newCircles[1][1])/MINOFFSET*MINOFFSET,thisRadius) not in masterCircleSet):
temp=Circle(newCircles[1][0], newCircles[1][1], thisRadius,lm)
temp.setcolor(fvh.allcolors[colorcounter%len(fvh.allcolors)])
colorcounter+=1
temp.draw()
newlist.append(temp)
ts.update()
counter=len(circlelist)
for item in newlist:
#item.draw()
circlelist.append(item)
ts.update()
newlist=[]
totalafter=len(circlelist)
#fvh2.savetocircles(lm,filename)
def createDrawing(bigdiameter,diameter):
lm=fvh2.fvh.MyTurtle()
lm.setup()
lm.tracer(False)
a=Circle(0,0,bigdiameter,lm)
b=Circle(bigdiameter,0,diameter,lm)
circlelist=[a,b]
totalbefore=len(masterCircleSet)
totalafter=0
newlist=[]
counter=0
#print totalbefore
while((totalbefore!=totalafter) and (len(masterCircleSet)<750)):
#print (circlecalled, checkcirclescalled)
#print totalbefore, totalafter
#raw_input()
print len(masterCircleSet)
totalbefore=len(masterCircleSet)
for firstCircleplace in range(counter,len(circlelist)):
firstCircle=circlelist[firstCircleplace]
for secondCircleplace in range(len(circlelist)):
secondCircle=circlelist[secondCircleplace]
newCircles=checkCircles(firstCircle, secondCircle)
#print newCircles, len(newlist)
#raw_input((totalbefore,totalafter))
if newCircles:
if ((int(newCircles[0][0])/MINOFFSET*MINOFFSET,int(newCircles[0][1])/MINOFFSET*MINOFFSET,diameter) not in masterCircleSet):
newlist.append(Circle(newCircles[0][0], newCircles[0][1], diameter,lm))
else:
print newCircles[0]
if ((int(newCircles[1][0])/MINOFFSET*MINOFFSET,int(newCircles[1][1])/MINOFFSET*MINOFFSET,diameter) not in masterCircleSet):
newlist.append(Circle(newCircles[1][0], newCircles[1][1], diameter,lm))
else:
print newCircles[1]
counter=len(circlelist)
for item in newlist:
item.draw()
circlelist.append(item)
newlist=[]
totalafter=len(masterCircleSet)
lm.tracer(True)
a.lm.tracer(True)
fvh2.savetocircles(a.lm)
def createanotherdrawing(startSize):
a=Circle(0,0,startSize)
smallestsize=startSize
a.addCardinals()
a.lm.undo()
a.lm.undo()
circlelist=[]
circlelist.append(a)
for eachitem in a.cardinals:
circlelist.append(eachitem)
eachitem.lm.undo()
eachitem.lm.undo()
totalbefore=len(masterCircleSet)
totalafter=0
while ((totalbefore!=totalafter)):
print "Just started new while loop. number of circles in circlelist: "+str(len(circlelist))
totalbefore=len(masterCircleSet)
newlist=[]
for firstCircle in circlelist:
for secondCircle in circlelist:
thisDiameter=min(firstCircle.radius, secondCircle.radius)/2
if (thisDiameter<=1):
#print "first break"
break
if thisDiameter<smallestsize:
smallestsize=thisDiameter
print "New Smallest Size: "+ str(smallestsize)
newCircles=checkCircles(firstCircle, secondCircle)
if newCircles:
for x in newCircles:
if ((int(x[0])/MINOFFSET*MINOFFSET, int(x[1])/MINOFFSET*MINOFFSET, thisDiameter) not in masterCircleSet):
newCircle=Circle(x[0], x[1],thisDiameter)
newCircle.draw()
circlelist.append(newCircle)
#for eachCard in newCircle.cardinals:
#circlelist.append(eachCard)
#if (thisDiameter<=1):
#print "second break"
for item in newlist:
circlelist.append(item)
totalafter=len(masterCircleSet)
if (totalafter==totalbefore):
print "no more moves"
fvh2.savetocircles(a.lm)
def yetanotherdrawing(startdiameter,numberofoutsidecircles):
lm=fvh2.fvh.MyTurtle()
lm.setup()
lm.tracer(False)
smallestsize=startdiameter
a=Circle(0,0,startdiameter,lm)
a.lm.undo()
a.lm.undo()
a.differentcards(numberofoutsidecircles)
circlelist=[]
circlelist.append(a)
for eachitem in a.cardinals:
eachitem.lm.undo()
eachitem.lm.undo()
circlelist.append(eachitem)
totalbefore=len(masterCircleSet)
totalafter=0
while ((totalbefore!=totalafter)):
print "Just started new while loop. number of circles in circlelist: "+str(len(circlelist))
totalbefore=len(masterCircleSet)
newlist=[]
for firstCircle in circlelist:
print "new firstCircle : " + str(firstCircle.checkString)
print "Current number of circles in circlelist: "+str(len(circlelist))
#firstCircle.drawred()
for secondCircle in circlelist:
#secondCircle.drawred()
thisDiameter=min(firstCircle.radius, secondCircle.radius)/2.0
if (thisDiameter<=1):
#print "first break"
#secondCircle.draw()
break
if thisDiameter<smallestsize:
smallestsize=thisDiameter
print "New Smallest Size: "+ str(smallestsize)
newCircles=checkCircles(firstCircle, secondCircle)
if newCircles:
for x in newCircles:
if ((int(x[0])/MINOFFSET*MINOFFSET, int(x[1])/MINOFFSET*MINOFFSET, thisDiameter) not in masterCircleSet):
newCircle=Circle(x[0], x[1],thisDiameter,lm)
#newCircle.realCards()
circlelist.append(newCircle)
#for eachCard in newCircle.realcards:
# circlelist.append(eachCard)
#secondCircle.draw()
#if (thisDiameter<=1):
#print "second break"
#firstCircle.draw()
for item in newlist:
circlelist.append(item)
newlist=[]
totalafter=len(masterCircleSet)
if (totalafter==totalbefore):
print "no more moves"
for acircle in circlelist:
acircle.draw()
lm.tracer(True)
fvh2.savetocircles(a.lm)
def yetanotherdrawingagain(startdiameter,numberofoutsidecircles, recursive=False, lm=None):
global masterCircleSet
masterCircleSet=set()
if not lm:
lm=fvh2.fvh.MyTurtle()
lm.setup()
lm.tracer(False)
smallestsize=startdiameter
a=Circle(0,0,startdiameter,lm)
# a.lm.undo()
# a.lm.undo()
a.differentcards(numberofoutsidecircles)
circlelist=[]
circlelist.append(a)
for eachitem in a.cardinals:
#eachitem.lm.undo()
#eachitem.lm.undo()
eachitem.differentcards(numberofoutsidecircles)
for subitem in eachitem.cardinals:
#subitem.lm.undo()
#subitem.lm.undo()
circlelist.append(subitem)
circlelist.append(eachitem)
totalbefore=len(masterCircleSet)
totalafter=0
while ((totalbefore!=totalafter)):
#print "Just started new while loop. number of circles in circlelist: "+str(len(circlelist))
totalbefore=len(masterCircleSet)
newlist=[]
for firstCircle in circlelist:
#print "new firstCircle : " + str(firstCircle.checkString)
#print "Current number of circles in circlelist: "+str(len(circlelist))
#firstCircle.drawred()
for secondCircle in circlelist:
#secondCircle.drawred()
thisDiameter=min(firstCircle.radius, secondCircle.radius)/2.0
if (min(firstCircle.radius, secondCircle.radius)<=1):
#print "first break"
#secondCircle.draw()
break
if thisDiameter<smallestsize:
smallestsize=thisDiameter
#print "New Smallest Size: "+ str(smallestsize)
newCircles=checkCircles(firstCircle, secondCircle)
if newCircles:
for x in newCircles:
if ((int(x[0])/MINOFFSET*MINOFFSET, int(x[1])/MINOFFSET*MINOFFSET, thisDiameter) not in masterCircleSet):
newCircle=Circle(x[0], x[1],thisDiameter,lm)
newlist.append(newCircle)
if recursive:
newCircle.differentcards(numberofoutsidecircles)
for eachCard in newCircle.cardinals:
circlelist.append(eachCard)
#secondCircle.draw()
#if (thisDiameter<=1):
#print "second break"
#firstCircle.draw()
for item in newlist:
item.draw()
circlelist.append(item)
newlist=[]
totalafter=len(masterCircleSet)
if (totalafter==totalbefore):
print "no more moves"
lm.tracer(True)
fvh2.savetocircles(a.lm)
def yetanotherdrawingagainwithmax(startdiameter,numberofoutsidecircles, recursive=False, lm=None,stepsize=2):
global masterCircleSet
masterCircleSet=set()
if not lm:
lm=fvh2.fvh.MyTurtle()
lm.setup()
lm.tracer(False)
smallestsize=startdiameter
a=Circle(0,0,startdiameter,lm,False)
# a.lm.undo()
# a.lm.undo()
a.differentcards(numberofoutsidecircles)
circlelist=[]
circlelist.append(a)
for eachitem in a.cardinals:
#eachitem.lm.undo()
#eachitem.lm.undo()
eachitem.differentcards(numberofoutsidecircles)
for subitem in eachitem.cardinals:
#subitem.lm.undo()
#subitem.lm.undo()
circlelist.append(subitem)
circlelist.append(eachitem)
totalbefore=len(masterCircleSet)
totalafter=0
while ((totalbefore!=totalafter)):
# print "Just started new while loop. number of circles in circlelist: "+str(len(circlelist))
totalbefore=len(masterCircleSet)
newlist=[]
for firstCircle in circlelist:
#print "new firstCircle : " + str(firstCircle.checkString)
#print "Current number of circles in circlelist: "+str(len(circlelist))
#firstCircle.drawred()
for secondCircle in circlelist:
#firstCircle.drawred()
#secondCircle.drawred()
thisDiameter=min(firstCircle.radius, secondCircle.radius)/float(stepsize)
if (min(firstCircle.radius, secondCircle.radius)<=1):
#print "first break"
#secondCircle.draw()
break
if thisDiameter<smallestsize:
smallestsize=thisDiameter
#print "New Smallest Size: "+ str(smallestsize)
newCircles=checkCircles(firstCircle, secondCircle)
if newCircles:
for x in newCircles:
if ((int(x[0])/MINOFFSET*MINOFFSET, int(x[1])/MINOFFSET*MINOFFSET, thisDiameter) not in masterCircleSet):
newCircle=Circle(x[0], x[1],thisDiameter,lm)
newCircle.draw()
circlelist.append(newCircle)
if recursive:
newCircle.differentcards(numberofoutsidecircles)
for eachCard in newCircle.cardinals:
eachCard.draw()
circlelist.append(eachCard)
#secondCircle.draw()
#firstCircle.draw()
#if (thisDiameter<=1):
#print "second break"
#firstCircle.draw()
for item in newlist:
circlelist.append(item)
newlist=[]
totalafter=len(masterCircleSet)
if (totalafter==totalbefore):
print "no more moves"
lm.tracer(True)
fvh2.savetocircles(a.lm)
def yadwm(startdiameter):
smallestsize=startdiameter
a=Circle(0,0,startdiameter)
a.addCardinals()
a.lm.undo()
a.lm.undo()
circlelist=[]
circlelist.append(a)
for eachitem in a.cardinals:
eachitem.lm.undo()
eachitem.lm.undo()
circlelist.append(eachitem)
totalbefore=len(masterCircleSet)
totalafter=0
while ((totalbefore!=totalafter)):
print "Just started new while loop. number of circles in circlelist: "+str(len(circlelist))
totalbefore=len(masterCircleSet)
newlist=[]
for firstCircle in circlelist:
for secondCircle in circlelist:
thisDiameter=max(firstCircle.radius, secondCircle.radius)/2.0
if (thisDiameter<=32):
#print "first break"
break
if thisDiameter<smallestsize:
smallestsize=thisDiameter
print "New Smallest Size: "+ str(smallestsize)
newCircles=checkCircles(firstCircle, secondCircle)
if newCircles:
#lm.tracer(False)
for x in newCircles:
if ((int(x[0])/MINOFFSET*MINOFFSET, int(x[1])/MINOFFSET*MINOFFSET, thisDiameter) not in masterCircleSet):
newCircle=Circle(x[0], x[1],thisDiameter)
newCircle.addCardinals()
newCircle.draw()
circlelist.append(newCircle)
for eachCard in newCircle.cardinals:
eachCard.draw()
circlelist.append(eachCard)
#lm.tracer(True)
#if (thisDiameter<=1):
#print "second break"
for item in newlist:
circlelist.append(item)
totalafter=len(masterCircleSet)
if (totalafter==totalbefore):
print "no more moves"
fvh2.savetocircles(a.lm)
def makeart1():
for size in range(7,11):
for numberofsides in range(1,10):
for recursive in (False, True):
print 2**size,numberofsides,recursive
lm=fvh2.fvh.MyTurtle()
ts=lm.getscreen()
ts.screensize(2**(size+2),2**(size+2),'grey50')
ts.setup(2**(size+3),2**(size+3),0,0)
yetanotherdrawingagain(2**size,numberofsides,recursive,lm)
tc=ts.getcanvas()
filename="circles/startSize"+str(size)+"numberofsides"+str(numberofsides)+str(recursive)+'.eps'
ts.update()
tc.postscript(file=filename, height=2**(size+2), width=2**(size+2),x=-2**(size+1),y=-2**(size+1))
ts.bye()
def makeart2():
for size in range(8,11):
for numberofsides in range(6,10):
for recursive in (False, True):
for stepsize in range(2,4):
print stepsize**size,numberofsides,recursive
lm=fvh2.fvh.MyTurtle()
ts=lm.getscreen()
ts.screensize(stepsize**(size+2),stepsize**(size+2),'grey50')
ts.setup(stepsize**(size+3),stepsize**(size+3),0,0)
yetanotherdrawingagainwithmax(stepsize**size,numberofsides,recursive,lm,stepsize)
tc=ts.getcanvas()
filename="circles/max"+str(size)+str(numberofsides)+str(recursive)+'.eps'
tc.postscript(file=filename, height=stepsize**(size+2), width=stepsize**(size+2),x=-stepsize**(size+1),y=-stepsize**(size+1))
ts.bye()
def yetanotherdrawingagainwithcontinue(startdiameter,numberofoutsidecircles, recursive=False, lm=None):
global masterCircleSet
masterCircleSet=set()
if not lm:
lm=fvh2.fvh.MyTurtle()
lm.setup()
lm.tracer(False)
smallestsize=startdiameter
a=Circle(0,0,startdiameter,lm)
a.draw()
a.lm.undo()
a.lm.undo()
a.differentcards(numberofoutsidecircles)
circlelist=[]
circlelist.append(a)
for eachitem in a.cardinals:
eachitem.draw()
eachitem.lm.undo()
eachitem.lm.undo()
#eachitem.draw()
eachitem.differentcards(numberofoutsidecircles)
for subitem in eachitem.cardinals:
subitem.draw()
subitem.lm.undo()
subitem.lm.undo()
circlelist.append(subitem)
circlelist.append(eachitem)
totalbefore=len(masterCircleSet)
totalafter=0
while ((totalbefore!=totalafter)):
#print "Just started new while loop. number of circles in circlelist: "+str(len(circlelist))
totalbefore=len(masterCircleSet)
newlist=[]
for firstCircle in circlelist:
#print "new firstCircle : " + str(firstCircle.checkString)
#print "Current number of circles in circlelist: "+str(len(circlelist))
#firstCircle.drawred()
for secondCircle in circlelist:
#secondCircle.drawred()
thisDiameter=min(firstCircle.radius, secondCircle.radius)/2.0
if (min(firstCircle.radius, secondCircle.radius)<=4):
#print "first break"
#secondCircle.draw()
continue
if thisDiameter<smallestsize:
smallestsize=thisDiameter
#print "New Smallest Size: "+ str(smallestsize)
newCircles=checkCircles(firstCircle, secondCircle)
if newCircles:
for x in newCircles:
if ((int(x[0])/MINOFFSET*MINOFFSET, int(x[1])/MINOFFSET*MINOFFSET, thisDiameter) not in masterCircleSet):
newCircle=Circle(x[0], x[1],thisDiameter,lm)
newCircle.draw()
newlist.append(newCircle)
if recursive:
newCircle.differentcards(numberofoutsidecircles)
for eachCard in newCircle.cardinals:
eachCard.draw()
circlelist.append(eachCard)
#secondCircle.draw()
#if (thisDiameter<=1):
#print "second break"
#firstCircle.draw()
for item in newlist:
circlelist.append(item)
newlist=[]
totalafter=len(masterCircleSet)
if (totalafter==totalbefore):
print "no more moves"
lm.tracer(True)
fvh2.savetocircles(a.lm)
def yetanotherdrawingagainwithcontinueandextended(startdiameter,numberofoutsidecircles, recursive=False, lm=None):
global masterCircleSet
masterCircleSet=set()
if not lm:
lm=fvh2.fvh.MyTurtle()
lm.setup()
smallestsize=startdiameter
a=Circle(0,0,startdiameter,lm)
# a.lm.undo()
# a.lm.undo()
a.extendedCards(numberofoutsidecircles)
circlelist=[]
circlelist.append(a)
for eachitem in a.cardinals:
#eachitem.lm.undo()
#eachitem.lm.undo()
#eachitem.differentcards(numberofoutsidecircles)
#for subitem in eachitem.cardinals:
#subitem.lm.undo()
#subitem.lm.undo()
#circlelist.append(subitem)
circlelist.append(eachitem)
totalbefore=len(masterCircleSet)
totalafter=0
while ((totalbefore!=totalafter)):
print "Just started new while loop. number of circles in circlelist: "+str(len(circlelist))
totalbefore=len(masterCircleSet)
newlist=[]
for firstCircle in circlelist:
#print "new firstCircle : " + str(firstCircle.checkString)
#print "Current number of circles in circlelist: "+str(len(circlelist))
#firstCircle.drawred()
for secondCircle in circlelist:
#secondCircle.drawred()
thisDiameter=min(firstCircle.radius, secondCircle.radius)/2.0
if (min(firstCircle.radius, secondCircle.radius)<=4):
#print "first break"
#secondCircle.draw()
continue
if thisDiameter<smallestsize:
smallestsize=thisDiameter
#print "New Smallest Size: "+ str(smallestsize)
newCircles=checkCircles(firstCircle, secondCircle)
if newCircles:
for x in newCircles:
if ((int(x[0])/MINOFFSET*MINOFFSET, int(x[1])/MINOFFSET*MINOFFSET, thisDiameter) not in masterCircleSet):
newCircle=Circle(x[0], x[1],thisDiameter,lm)
newlist.append(newCircle)
if recursive:
newCircle.extendedCards(numberofoutsidecircles)
for eachCard in newCircle.cardinals:
circlelist.append(eachCard)
#secondCircle.draw()
#if (thisDiameter<=1):
#print "second break"
#firstCircle.draw()
for item in newlist:
circlelist.append(item)
newlist=[]
totalafter=len(masterCircleSet)
if (totalafter==totalbefore):
print "no more moves"
fvh2.savetocircles(a.lm)
return circlelist
def yadei(startdiameter,numberofoutsidecircles, recursive=False, lm=None):
global masterCircleSet
masterCircleSet=set()
if not lm:
lm=fvh2.fvh.MyTurtle()
lm.setup()
smallestsize=startdiameter
a=Circle(0,0,startdiameter,lm)
# a.lm.undo()
# a.lm.undo()
a.innerextendedCards(numberofoutsidecircles)
circlelist=[]
circlelist.append(a)
#for eachitem in a.cardinals:
#eachitem.lm.undo()
#eachitem.lm.undo()
#eachitem.differentcards(numberofoutsidecircles)
#for subitem in eachitem.cardinals:
#subitem.lm.undo()
#subitem.lm.undo()
#circlelist.append(subitem)
#circlelist.append(eachitem)
totalbefore=len(masterCircleSet)
totalafter=0
while ((totalbefore!=totalafter)):
print "Just started new while loop. number of circles in circlelist: "+str(len(circlelist))
totalbefore=len(masterCircleSet)
newlist=[]
for firstCircle in circlelist:
#print "new firstCircle : " + str(firstCircle.checkString)
#print "Current number of circles in circlelist: "+str(len(circlelist))
#firstCircle.drawred()
for secondCircle in circlelist:
#secondCircle.drawred()
thisDiameter=min(firstCircle.radius, secondCircle.radius)/2.0
if (min(firstCircle.radius, secondCircle.radius)<=4):
#print "first break"
#secondCircle.draw()
continue
if thisDiameter<smallestsize:
smallestsize=thisDiameter
#print "New Smallest Size: "+ str(smallestsize)
newCircles=checkCircles(firstCircle, secondCircle)
if newCircles:
for x in newCircles:
if ((int(x[0])/MINOFFSET*MINOFFSET, int(x[1])/MINOFFSET*MINOFFSET, thisDiameter) not in masterCircleSet):
newCircle=Circle(x[0], x[1],thisDiameter,lm)
newlist.append(newCircle)
if recursive:
newCircle.innerextendedCards(numberofoutsidecircles)
for eachCard in newCircle.cardinals:
circlelist.append(eachCard)
#secondCircle.draw()
#if (thisDiameter<=1):
#print "second break"
#firstCircle.draw()
for item in newlist:
circlelist.append(item)
newlist=[]
totalafter=len(masterCircleSet)
if (totalafter==totalbefore):
print "no more moves"
fvh2.savetocircles(a.lm)
return circlelist
def itsOct():
pass
```
#### File: python/turtleRelated/siteimages.py
```python
import os
from wand.image import Image
import wand
def makeindex(pictureDir, picwidth, picheight , filetypes=['jpg','gif','png']):
blacksort(pictureDir)
allfiles=os.listdir(pictureDir)
allfiles.sort()
indexname=pictureDir+'index.html'
if not os.path.exists(indexname):
f=open(indexname, 'w')
f.close()
f=open(indexname, 'rb+')
filecontents="""<html>
<head>
<script type="text/javascript" src="http://jlmarks.org/javascripts/sliderman.1.3.7.js"></script>
<link rel="stylesheet" type="text/css" href="http://jlmarks.org/css/sliderman.css" />
</head>
<body>
<div id="wrapper">
<div id="outer">
<div id="slider_container_2">
<div id="SliderName_2" class="SliderName_2">
"""
tail="""
</div>
<div id="SliderNameNavigation_2"></div>
</div>
<script type="text/javascript">
var myslider=Sliderman.slider({container: 'SliderName_2', width:"""+str(picwidth)+""", height: """+str(picheight)+""",effects: 'fade', display: {autoplay: 1500}});
</script>
</div>
</body>
</html>
"""
x=0
first=True
total=len(allfiles)
for eachfile in allfiles:
print str(x)+" of "+str(total)
#if first and eachfile[-3:] in filetypes:
#newline='\n<img src="'+eachfile+'" width="'+str(picwidth)+'" height="'+str(picheight)+'" alt="sometext" title="'+eachfile+'" usemap="#img1map" />\n <map'
if eachfile[-3:] in filetypes:
newline='\n<img src="'+eachfile+'" width="'+str(picwidth)+'" height="'+str(picheight)+'" alt="sometext" title="'+eachfile+'" />\n'
filecontents=filecontents+newline
x+=1
filecontents=filecontents+tail
f.write(filecontents)
f.close()
def wdivide(inputDir, filetypes=['gif','jpg','png'], sizediff=100):
sizediff=int(sizediff)
allfiles=os.listdir(inputDir)
for eachfile in allfiles:
if eachfile[-3:] in filetypes:
with Image(filename=inputDir+eachfile) as img:
endwidth=((int(img.size[0])/sizediff)*sizediff)+sizediff
endheight=((int(img.size[1])/sizediff)*sizediff)+sizediff
borderw=(endwidth-int(img.size[0]))/2
borderh=(endheight-int(img.size[1]))/2
#bordercommand='convert '+inputDir+eachfile+' -matte -bordercolor none -border '+borderw+'x'+borderh+' '+inputDir+size+'/'+eachfile
size=str(endwidth)+'x'+str(endheight)
if not os.path.exists(inputDir+size):
os.mkdir(inputDir+size)
command = 'convert '+inputDir+eachfile+' -matte -bordercolor none -border '+str(borderw)+'x'+str(borderh)+' '+inputDir+size+'/'+eachfile
os.system(command)
def bringtoonedir(mainDir, someDir=None):
"""
This is designed to bring all of the files from different subdirectories into
one main directory
"""
if someDir==None:someDir=''
curDir=mainDir+someDir
print curDir, mainDir, someDir
allfiles=os.listdir(curDir)
for eachfile in allfiles:
if os.path.isdir(curDir+eachfile):
print 'isdir! '+someDir+eachfile+'/'
bringtoonedir(mainDir, someDir+eachfile+'/')
else:
command='mv '+curDir+eachfile+' '+mainDir
os.system(command)
def blacksort(dirtosort, filetypes=['gif','jpg','png']):
allfiles=os.listdir(dirtosort)
letters=lambda x: chr(97+((x/(26**10))%26))+chr(97+((x/(26**9))%26))+chr(97+((x/(26**8))%26))+chr(97+((x/(26**7))%26))+chr(97+((x/(26**6))%26))+chr(97+((x/(26**5))%26))+chr(97+((x/(26**4))%26))+chr(97+((x/(26**3))%26))+chr(97+((x/(26*26))%26))+chr(97+((x/26)%26))+chr(97+(x%26))
x=0
blacks=[]
for eachfile in allfiles:
if eachfile[-3:] in filetypes:
with Image(filename=dirtosort+eachfile) as img:
if wand.color.Color('rgb(0,0,0)') in img.histogram.keys():
blacks.append([letters(x)+'.'+eachfile[-3:], img.histogram[wand.color.Color('rgb(0,0,0)')]])
else:
blacks.append([letters(x)+'.'+eachfile[-3:], 0])
os.system('mv '+dirtosort+eachfile+' '+dirtosort+letters(x)+'.'+eachfile[-3:])
x+=1
x=0
blacks.sort(key=lambda x: x[1])
for eachfiles in blacks:
os.system('mv '+dirtosort+eachfiles[0]+' '+dirtosort+'%08d' %x + eachfiles[0][-4:])
x+=1
```
#### File: python/turtleRelated/temprom.py
```python
def one(starth=0, startpos=(0,0), lm=None, cube=60):
if not lm:
lm=MyTurtle()
lm.ht()
#lm.tracer(False)
lm.pu()
lm.goto(startpos)
lm.seth(starth)
unit=float(cube)/12
lm.pd()
lm.fd(6*unit)
lm.right(90)
lm.fd(unit)
lm.right(90)
lm.fd(2*unit)
lm.left(90)
lm.fd(10*unit)
lm.left(90)
lm.fd(2*unit)
lm.right(90)
lm.fd(unit)
lm.right(90)
lm.fd(6*unit)
lm.right(90)
lm.fd(unit)
lm.right(90)
lm.fd(2*unit)
lm.left(90)
lm.fd(10*unit)
lm.left(90)
lm.fd(2*unit)
lm.right(90)
lm.fd(unit)
def five(starth=0, startpos=(0,0), lm=None, cube=60):
if not lm:
lm=MyTurtle()
lm.ht()
#lm.tracer(False)
lm.pu()
lm.goto(startpos)
lm.seth(starth)
lm.pd()
unit=float(cube)/12
innertheta=math.degrees(math.asin((2*unit)/(((2*unit)**2+(5*unit)**2)**0.5)))
innerdistance=(((2*unit)**2+(5*unit)**2)**0.5)
outtertheta=math.degrees(math.asin((10*unit)/(((10*unit)**2+(3*unit)**2)**0.5)))
outterdistance=(((10*unit)**2+(3*unit)**2)**0.5)
lm.fd(4*unit)
lm.pu()
lm.right(90)
lm.fd(unit)
lm.pd()
lm.seth(lm.heading()+innertheta)
lm.fd(innerdistance)
lm.seth(starth+(90-innertheta))
lm.fd(innerdistance)
lm.seth(starth+180)
lm.fd(4*unit)
lm.pu()
lm.right(90)
lm.fd(unit)
lm.pd()
lm.right(90)
lm.fd(8*unit)
lm.right(90)
lm.fd(unit)
lm.right(90)
lm.fd(2*unit)
lm.left(90-outtertheta)
lm.fd(outterdistance)
lm.seth(starth)
lm.fd(5*unit)
lm.right(90)
lm.fd(unit)
lm.right(90)
lm.fd(12*unit)
lm.right(90)
lm.fd(unit)
lm.right(90)
lm.fd(5*unit)
lm.seth(starth+90+innertheta)
lm.fd(2*innerdistance)
lm.seth(starth+180)
lm.fd(2*unit)
lm.right(90)
lm.fd(unit)
def ten(starth=0, startpos=(0,0), lm=None, cube=60):
if not lm:
lm=MyTurtle()
lm.ht()
#lm.tracer(False)
lm.pu()
lm.goto(startpos)
lm.seth(starth)
lm.pd()
unit=float(cube)/12
theta=math.degrees(math.asin(25.0/((15.0**2+25.0**2)**0.5)))
upandright=starth+theta
downandright=starth-theta
upandleft=(180+starth)-theta
downandleft=(180+starth)+theta
outterxlengths=((3*unit)**2+(5*unit)**2)**0.5
innerxlengths=(2.0/3.0)*outterxlengths
##All the math is done, lets draw!
lm.fd(4*unit)
lm.pu()
lm.right(90)
lm.fd(unit)
lm.pd()
lm.seth(downandright)
lm.fd(innerxlengths)
lm.seth(upandright)
lm.fd(innerxlengths)
lm.seth(180+starth)
lm.fd(4*unit)
lm.pu()
lm.right(90)
lm.fd(unit)
lm.pd()
lm.right(90)
lm.fd(8*unit)
lm.right(90)
lm.fd(unit)
lm.right(90)
lm.fd(2*unit)
lm.seth(downandleft)
lm.fd(outterxlengths)
lm.seth(downandright)
lm.fd(outterxlengths)
lm.seth(starth)
lm.fd(2*unit)
lm.right(90)
lm.fd(unit)
lm.right(90)
lm.fd(4*unit)
lm.pu()
lm.right(90)
lm.fd(unit)
lm.pd()
lm.seth(upandleft)
lm.fd(innerxlengths)
lm.seth(downandleft)
lm.fd(innerxlengths)
lm.seth(starth)
lm.fd(4*unit)
lm.pu()
lm.right(90)
lm.fd(unit)
lm.pd()
lm.right(90)
lm.fd(8*unit)
lm.right(90)
lm.fd(unit)
lm.right(90)
lm.fd(2*unit)
lm.seth(upandright)
lm.fd(outterxlengths)
lm.seth(upandleft)
lm.fd(outterxlengths)
lm.seth(180-starth)
lm.fd(2*unit)
lm.right(90)
lm.fd(unit)
def fifty(starth=0, startpos=(0,0), lm=None, cube=60):
if not lm:
lm=MyTurtle()
lm.ht()
#lm.tracer(False)
lm.pu()
lm.goto(startpos)
lm.seth(starth)
unit=float(cube)/12
lm.pd()
lm.fd(2*unit)
lm.right(90)
lm.fd(10*unit)
lm.left(90)
lm.fd(3*unit)
lm.left(45)
lm.fd((2*(unit**2))**0.5)
lm.right(135)
lm.fd(3*unit)
lm.right(90)
lm.fd(6*unit)
lm.right(90)
lm.fd(12*unit)
```
#### File: python/uploadT.py/__init__.py
```python
import my.pw
import MySQLdb
def main():
db = MySQLdb.connect(host=my.pw.DBPATH, user=my.pw.DBUSE, passwd=my.pw.DBPASSWORD, db=my.pw.DBTOUSE)
cur = db.cursor()
cur.execute("SELECT VERSION()")
for row in cur.fetchall() :
#data from rows
version = str(row[0])
#print
print "The MySQL version is " + version
# close the cursor
cur.close()
# close the connection
db.close ()
if __name__ == '__main__':
main()
# if __name__ == '__main__':
# db = MySQLdb.connect(host=my.pw.DBPATH, user=my.pw.DBUSE, passwd=<PASSWORD>, db=my.pw.DBTOUSE)
# cur = db.cursor()
# cur.execute("SELECT VERSION()")
# for row in cur.fetchall() :
# #data from rows
# version = str(row[0])
# #print
# print "The MySQL version is " + version
# # close the cursor
# cur.close()
# # close the connection
# db.close ()
```
|
{
"source": "JeremiahNgige/awwwards",
"score": 2
}
|
#### File: awwwards/awwwwards/views.py
```python
from django.shortcuts import render
from django.views.generic import ListView, DetailView, CreateView
from .models import Post
from django.contrib.auth.mixins import LoginRequiredMixin
import random
# Create your views here.
def index(request):
try:
posts=Post.objects.all()
posts = posts[::-1]
one_post = random.randint(0, len(posts)-1)
random_post = posts[one_post]
print(random_post)
except:
posts = None
return render(request, 'awwwwards/index.html', locals())
class PostListView(ListView):
model = Post
template_name = 'awwwwards/index.html'
context_object_name = 'posts'
ordering = ['-date_posted']
class PostDetailView(DetailView):
model = Post
class PostCreateView(LoginRequiredMixin, CreateView):
model = Post
fields = ['title', 'description', 'image_url', 'image']
def form_valid(self, form):
form.instance.author = self.request.user
return super().form_valid(form)
```
|
{
"source": "JeremiahNgige/News-app",
"score": 3
}
|
#### File: News-app/app/news_top_articles.py
```python
class latestTopArticles:
'''
class to define latest top articles objects
'''
def __init__(self,author,description,url,urlToImage,publishedAt):
self.author = author
self.description = description
self.url = url
self.urlToImage = urlToImage
self.publishedAt = publishedAt
```
|
{
"source": "JeremiahNgige/password_locker",
"score": 4
}
|
#### File: JeremiahNgige/password_locker/credentials.py
```python
class Credentials:
"""
create class for user credentials
"""
def __init__(self, account_name, account_username, account_password):
self.account_name = account_name
self.account_username = account_username
self.account_password = account_password
credentials_list = []
def save_credentials(self):
"""
method that saves credentials' object in credentials' list
"""
self.credentials_list.append(self)
def delete_credentials(self):
"""
method that deletes a credential
"""
Credentials.credentials_list.remove(self)
@classmethod
def locate_by_name(cls, account_name):
"""
method that takes in a name and returns a credential that matches the specific name
Args:
name: account_name that has a password
Returns:
The account_name and its corresponding password
"""
for credential in cls.credentials_list:
if credential.account_name == account_name:
return credential
@classmethod
def credentials_exists(cls, name):
"""
method that checks if credentials exists from credentials_list.
Args:
name: Username to search for its existance
Returns:
Boolean: True or false depending if the credentials exists
"""
for credential in cls.credentials_list:
if credential.account_name == name:
return True
return False
@classmethod
def display_credentials(cls):
"""
method that returns credentials' list
"""
return cls.credentials_list
# pyperclip.copy(credentials_located.account_password)
```
|
{
"source": "jeremiahpslewis/alec",
"score": 2
}
|
#### File: model-orchestrator/src/main.py
```python
from dagster import ModeDefinition, PresetDefinition, execute_pipeline, pipeline, solid
import os
from typing import Union
import boto3
import modAL
import numpy as np
import pandas as pd
import sklearn
from modAL.models import ActiveLearner
from modAL.uncertainty import uncertainty_sampling
from sklearn import linear_model
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from yaml import safe_load
bucket_name = os.getenv("S3_BUCKET_NAME")
# NOTE: counterfactual_default is defined as default outcome had applicant been granted loan
simulation_indices = ["simulation_id", "application_id"]
simulation_metadata = [
"counterfactual_default",
"scenario_id",
"idiosyncratic_individual_risk",
"total_default_risk",
"age_var",
"application_date",
]
X_vars = ["age"]
y_var = ["default"]
full_application_col_set = [*simulation_indices, *simulation_metadata, *X_vars]
full_portfolio_col_set = [
*simulation_indices,
"portfolio",
"credit_granted",
"funding_probability",
]
full_outcome_col_set = [*simulation_indices, "default"]
def get_scenario_df():
"""
Set of scenarios which will be modeled, specified in YAML.
"""
with open("scenarios.yml", "r") as f:
scenarios = safe_load(f)
scenario_df = pd.DataFrame(scenarios["scenarios"])
return scenario_df
def get_raw_data(simulation_id, scenario_id):
"""
Raw dataset drawn from synthetic data based on simulation_id and labeled with scenario_id.
"""
raw_df = pd.read_parquet(
f"s3://{bucket_name}/synthetic_data/{simulation_id}.parquet"
)
raw_df = raw_df.loc[raw_df.simulation_id == simulation_id].copy()
raw_df.reset_index(inplace=True, drop=True)
raw_df["counterfactual_default"] = raw_df["default"]
raw_df["scenario_id"] = scenario_id
return raw_df
def get_historical_data(
simulation_id, scenario_id
) -> list[pd.DataFrame, pd.DataFrame, pd.DataFrame]:
"""
Fetch historical data. First period data is assumed to be available at start of simulation.
"""
df = get_raw_data(simulation_id, scenario_id)
df["portfolio"] = "business"
df["credit_granted"] = True
df["funding_probability"] = 1
df_hist = (
df.loc[df.application_date == df.application_date.min()]
.copy()
.reset_index(drop=True)
)
hist_application_df = (
df_hist.loc[
:,
full_application_col_set,
]
.copy()
.reset_index(drop=True)
)
hist_portfolio_df = (
df_hist.loc[:, full_portfolio_col_set].copy().reset_index(drop=True)
)
hist_outcome_df = df_hist.loc[:, full_outcome_col_set].copy().reset_index(drop=True)
return {
"applications": hist_application_df,
"portfolio": hist_portfolio_df,
"outcomes": hist_outcome_df,
}
@solid(config_schema={"simulation_id": str, "scenario_id": str})
def get_historical_application_data(context):
"""
Fetch first period application data.
"""
simulation_id = context.solid_config["simulation_id"]
scenario_id = context.solid_config["scenario_id"]
return get_historical_data(simulation_id, scenario_id)["applications"]
@solid(config_schema={"simulation_id": str, "scenario_id": str})
def get_historical_portfolio_data(context):
"""Fetch first period portfolio (granted loans) data."""
simulation_id = context.solid_config["simulation_id"]
scenario_id = context.solid_config["scenario_id"]
return get_historical_data(simulation_id, scenario_id)["portfolio"]
@solid(config_schema={"simulation_id": str, "scenario_id": str})
def get_historical_outcome_data(context):
"""
Fetch first period outcome data.
"""
simulation_id = context.solid_config["simulation_id"]
scenario_id = context.solid_config["scenario_id"]
return get_historical_data(simulation_id, scenario_id)["outcomes"]
def get_feature_pipeline():
"""
Fetch feature pipeline.
"""
column_trans = ColumnTransformer(
[
(
"age",
"passthrough",
["age"],
),
],
remainder="drop",
)
return column_trans
def get_model_pipeline_object():
"""
Fetch model pipeline artifact.
"""
column_trans = get_feature_pipeline()
model_pipeline = make_pipeline(column_trans, linear_model.LogisticRegression())
return model_pipeline
@solid(config_schema={"scenario_id": str})
def get_model_pipeline(context) -> sklearn.pipeline.Pipeline:
"""
Fetch model pipeline.
"""
scenario_id = context.solid_config["scenario_id"]
scenario_df = get_scenario_df()
column_trans = get_feature_pipeline()
model_pipeline = get_model_pipeline_object()
return model_pipeline
@solid(config_schema={"scenario_id": str})
def get_active_learning_pipeline(context):
"""
Fetch active learning pipeline.
"""
scenario_id = context.solid_config["scenario_id"]
scenario_df = get_scenario_df()
active_learning_spec = scenario_df.loc[
scenario_df.id == scenario_id, "active_learning_spec"
].iloc[0]
model_pipeline = get_model_pipeline_object()
if active_learning_spec == "random":
return None
elif active_learning_spec != "random":
return getattr(modAL.uncertainty, active_learning_spec)
def prepare_training_data(
application_df: pd.DataFrame, portfolio_df: pd.DataFrame, outcome_df
):
"""
Join datasets to create training data file.
"""
training_df = pd.merge(
application_df, portfolio_df, on=["application_id", "simulation_id"], how="left"
)
training_df = pd.merge(
training_df,
outcome_df,
on=["application_id", "simulation_id"],
how="left",
)
assert (
training_df.application_id.duplicated().sum() == 0
), training_df.application_date.max()
assert (
training_df.shape[0] == application_df.shape[0]
), training_df.simulation_id.value_counts()
assert training_df.portfolio.notnull().sum() == portfolio_df.shape[0]
assert training_df.default.notnull().sum() == outcome_df.shape[0]
assert training_df.shape[0] > 0
return training_df.reset_index(drop=True)
@solid
def train_model(
context,
application_df: pd.DataFrame,
portfolio_df: pd.DataFrame,
outcome_df,
model_pipeline: sklearn.pipeline.Pipeline,
) -> sklearn.pipeline.Pipeline:
"""
training_data: data collected from previous loans granted, as pd.DataFrame
model: machine learning model (pipeline) which can be applied to training data
"""
training_df = prepare_training_data(application_df, portfolio_df, outcome_df)
# NOTE: Currently all cases without observed default are dropped for ML model!
training_df = training_df.loc[training_df.default.notnull()].copy()
model_pipeline.fit(training_df.loc[:, X_vars], training_df["default"].astype("int"))
return model_pipeline
@solid(
config_schema={"application_date": int, "simulation_id": str, "scenario_id": str}
)
def get_applications(context, application_df) -> pd.DataFrame:
"""
gets applications for new loans from customers
"""
application_date = context.solid_config["application_date"]
simulation_id = context.solid_config["simulation_id"]
scenario_id = context.solid_config["scenario_id"]
raw_application_df = get_raw_data(simulation_id, scenario_id)
new_application_df = raw_application_df.loc[
raw_application_df.application_date == application_date
].copy()
new_application_df.reset_index(inplace=True, drop=True)
return application_df.append(
new_application_df[full_application_col_set]
).reset_index(drop=True)
@solid(config_schema={"application_date": int, "scenario_id": str})
def choose_business_portfolio(
context,
application_df: pd.DataFrame,
portfolio_df: pd.DataFrame,
model_pipeline: sklearn.pipeline.Pipeline,
) -> pd.DataFrame:
"""
Decide whom to grant loans to (for profit)
applications: pd.DataFrame
model: machine learning model (pipeline) which can be applied to applications, based on training data
"""
application_date = context.solid_config["application_date"]
scenario_id = context.solid_config["scenario_id"]
scenario_df = get_scenario_df()
current_application_df = (
application_df.loc[application_df.application_date == application_date]
.copy()
.reset_index(drop=True)
)
# NOTE: No applications this application_date!
if current_application_df.shape[0] == 0:
return portfolio_df
current_application_df["est_default_prob"] = pd.DataFrame(
model_pipeline.predict_proba(current_application_df.loc[:, X_vars])
).loc[:, 1]
assert (
current_application_df.est_default_prob.isna().sum() == 0
), "Some estimated default probabilities NaN"
# NOTE: All applicants below 10% risk threshold accepted
business_portfolio_df = (
current_application_df.loc[current_application_df["est_default_prob"] <= 0.10]
.copy()[["application_id", "simulation_id"]]
.reset_index(drop=True)
)
business_portfolio_df["portfolio"] = "business"
business_portfolio_df["funding_probability"] = 1
business_portfolio_df["credit_granted"] = True
return portfolio_df.append(business_portfolio_df[full_portfolio_col_set])
@solid(config_schema={"application_date": int, "scenario_id": str})
def choose_research_portfolio(
context,
application_df: pd.DataFrame,
portfolio_df: pd.DataFrame,
outcome_df: pd.DataFrame,
model_pipeline: sklearn.pipeline.Pipeline,
active_learning_pipeline,
) -> pd.DataFrame:
"""
Decide whom to grant loans to (for research / profit in subsequent rounds)
business_portfolio: {"application_id", "credit_granted"} as pd.DataFrame
"""
application_date = context.solid_config["application_date"]
scenario_id = context.solid_config["scenario_id"]
scenario_df = get_scenario_df()
active_learning_spec = scenario_df.loc[
scenario_df.id == scenario_id, "active_learning_spec"
].iloc[0]
research_acceptance_rate = scenario_df.loc[
scenario_df.id == scenario_id, "research_acceptance_rate"
].iloc[0]
current_applications = application_df[
application_df.application_date == application_date
].copy()
unfunded_applications = current_applications[
~application_df.application_id.isin(portfolio_df.application_id.tolist())
].copy()
# NOTE: No applications this application_date!
if unfunded_applications.shape[0] == 0:
return portfolio_df
# NOTE: If research_acceptance_rate is no-active-learning, no research loans are made
if scenario_id == "no-active-learning":
return portfolio_df
n_research_loans = int(current_applications.shape[0] * research_acceptance_rate)
if active_learning_spec == "random":
research_portfolio_df = unfunded_applications.sample(
min(n_research_loans, unfunded_applications.shape[0])
)
else:
active_learning_df = prepare_training_data(
unfunded_applications,
portfolio_df.loc[
portfolio_df.application_id.isin(
unfunded_applications.application_id.tolist()
)
],
outcome_df.loc[
outcome_df.application_id.isin(
unfunded_applications.application_id.tolist()
)
],
)
if active_learning_df.shape[0] <= n_research_loans:
research_portfolio_df = active_learning_df.copy()
else:
research_portfolio_df = active_learning_df.copy()
research_loan_index = active_learning_pipeline(
classifier=model_pipeline,
X=active_learning_df.loc[:, X_vars],
n_instances=n_research_loans,
)
research_portfolio_df = active_learning_df.loc[research_loan_index].copy()
research_portfolio_df = (
research_portfolio_df[["application_id", "simulation_id"]]
.reset_index(drop=True)
.copy()
)
research_portfolio_df["portfolio"] = "research"
research_portfolio_df["credit_granted"] = True
research_portfolio_df["funding_probability"] = np.nan
return portfolio_df.append(research_portfolio_df[full_portfolio_col_set])
@solid(
config_schema={"application_date": int, "simulation_id": str, "scenario_id": str}
)
def observe_outcomes(
context, portfolio_df: pd.DataFrame, outcome_df: pd.DataFrame
) -> pd.DataFrame:
"""
Observe outcomes to granted credit.
"""
application_date = context.solid_config["application_date"]
simulation_id = context.solid_config["simulation_id"]
scenario_id = context.solid_config["scenario_id"]
raw_data = get_raw_data(simulation_id, scenario_id)
new_loan_outcomes = raw_data.loc[
(~raw_data.application_id.isin(outcome_df.application_id.tolist()))
& (raw_data.application_id.isin(portfolio_df.application_id.tolist()))
].copy()
return outcome_df.append(new_loan_outcomes[full_outcome_col_set])
@solid(config_schema={"simulation_id": str, "scenario_id": str})
def export_results(
context,
application_df: pd.DataFrame,
portfolio_df: pd.DataFrame,
outcome_df: pd.DataFrame,
):
"""
Export simulation results to s3 for later analysis.
"""
simulation_id = context.solid_config["simulation_id"]
scenario_id = context.solid_config["scenario_id"]
application_df.to_parquet(
f"s3://{bucket_name}/applications/{scenario_id}/{simulation_id}.parquet"
)
portfolio_df.to_parquet(
f"s3://{bucket_name}/portfolios/{scenario_id}/{simulation_id}.parquet"
)
outcome_df.to_parquet(
f"s3://{bucket_name}/outcomes/{scenario_id}/{simulation_id}.parquet"
)
get_scenario_df().to_parquet(
f"s3://{bucket_name}/scenarios/{scenario_id}/{simulation_id}.parquet"
)
def var_if_gr_1(i, var):
"""
Helper function for associating dagster tasks with config variables
"""
if i > 1:
return f"{var}_{i}"
else:
return var
def run_simulation(simulation_id, scenario_id):
"""
Helper function for carrying out simulation for a given scenario.
"""
solids_dict = {
var_if_gr_1(i + 1, var): {
"config": {
"application_date": range(2021, 2031)[i],
"scenario_id": scenario_id,
}
}
for i in range(9)
for var in [
"choose_business_portfolio",
"choose_research_portfolio",
]
}
solids_dict.update(
{
"get_model_pipeline": {"config": {"scenario_id": scenario_id}},
"get_active_learning_pipeline": {"config": {"scenario_id": scenario_id}},
}
)
solids_dict.update(
{
var_if_gr_1(i + 1, var): {
"config": {
"application_date": range(2021, 2031)[i],
"simulation_id": simulation_id,
"scenario_id": scenario_id,
}
}
for i in range(9)
for var in [
"get_applications",
"observe_outcomes",
]
}
)
solids_dict.update(
{
var: {
"config": {"simulation_id": simulation_id, "scenario_id": scenario_id}
}
for var in [
"get_historical_application_data",
"get_historical_portfolio_data",
"get_historical_outcome_data",
"export_results",
]
}
)
run_config = {"solids": solids_dict}
@pipeline(
mode_defs=[ModeDefinition("unittest")],
preset_defs=[
PresetDefinition(
"unittest",
run_config=run_config,
mode="unittest",
)
],
)
def active_learning_experiment_credit():
"""
Active learning 'main' function.
"""
application_df = get_historical_application_data()
portfolio_df = get_historical_portfolio_data()
outcome_df = get_historical_outcome_data()
model_pipeline = get_model_pipeline()
active_learning_pipeline = get_active_learning_pipeline()
for t in range(9):
trained_model = train_model(
application_df,
portfolio_df,
outcome_df,
model_pipeline,
)
application_df = get_applications(application_df)
portfolio_df = choose_business_portfolio(
application_df, portfolio_df, trained_model
)
portfolio_df = choose_research_portfolio(
application_df,
portfolio_df,
outcome_df,
trained_model,
active_learning_pipeline,
)
outcome_df = observe_outcomes(portfolio_df, outcome_df)
export_results(application_df, portfolio_df, outcome_df)
execute_pipeline(active_learning_experiment_credit, run_config=run_config)
if __name__ == "__main__":
s3 = boto3.resource("s3")
s3_alec = s3.Bucket(bucket_name)
# Empty bucket of alec objects
for folder in ["applications", "portfolios", "outcomes", "scenarios"]:
s3_alec.objects.filter(Prefix=f"{folder}/").delete()
s3_alec.objects.filter(Prefix=f"{folder}/").delete()
simulation_ids = [f.key for f in s3_alec.objects.filter(Prefix="synthetic_data/")]
simulation_ids = [
simulation_id.split("/")[1].split(".")[0] for simulation_id in simulation_ids
]
scenario_df = get_scenario_df()
for scenario_id in scenario_df.id.tolist():
for simulation_id in simulation_ids:
run_simulation(simulation_id, scenario_id)
```
|
{
"source": "jeremiahpslewis/oxigraph",
"score": 2
}
|
#### File: oxigraph/bench/bsbm-plot.py
```python
import xml.etree.ElementTree as ET
import matplotlib.pyplot as plt
from collections import defaultdict
from glob import glob
from numpy import array
def plot_y_per_x_per_plot(data, xlabel, ylabel, file, log=False):
plt.figure(file)
bar_width = 1 / (len(data) + 1)
for i, (label, xys) in enumerate(sorted(data.items())):
plt.bar(array(list(xys.keys())) + bar_width * (i + 1 - len(data) / 2), array(list(xys.values())), bar_width, label=label)
plt.legend()
plt.xlabel(xlabel)
plt.ylabel(ylabel)
if log:
plt.yscale('log')
plt.savefig(file)
def plot_usecase(name: str):
aqet = defaultdict(dict)
for file in glob('bsbm.{}.*.xml'.format(name)):
run = file.replace('bsbm.{}.'.format(name), '').replace('.xml', '')
for query in ET.parse(file).getroot().find('queries').findall('query'):
val = float(query.find('aqet').text)
if val > 0:
aqet[run][int(query.attrib['nr'])] = val
plot_y_per_x_per_plot(aqet, 'query id', 'execution time (s)', 'bsbm.{}.svg'.format(name))
plot_usecase('explore')
plot_usecase('exploreAndUpdate')
plot_usecase('businessIntelligence')
plt.show()
```
|
{
"source": "jeremiahpslewis/qlever",
"score": 3
}
|
#### File: qlever/misc/broccoli_words_file_to_context_file.py
```python
import argparse
import sys
from broccoli_ontology_txt_to_nt import handleSubject
__author__ = 'buchholb'
parser = argparse.ArgumentParser()
parser.add_argument('--wordsfile',
type=str,
help='Broccoli Wordsfile.',
required=True)
def writeContextFileToStdout(wordsfile):
for line in open(wordsfile):
cols = line.strip('\n').split('\t')
if cols[0].startswith(':e:'):
cols[0] = handleSubject(cols[0][3:])
entityFlag = '1'
else:
entityFlag = '0'
print('\t'.join([cols[0], entityFlag, cols[1], cols[2]]))
def main():
args = vars(parser.parse_args())
wordsfile = args['wordsfile']
writeContextFileToStdout(wordsfile)
if __name__ == '__main__':
main()
```
#### File: qlever/misc/compare_performance_only_own.py
```python
import argparse
import sys
import os
import subprocess
import compare_performance
from subprocess import Popen
__author__ = 'buchholb'
parser = argparse.ArgumentParser()
parser.add_argument('--queryfile',
type=str,
help='One line per (standard) SPARQL query. TS specific \
transformations are made by the python script.',
required=True)
parser.add_argument('--index',
type=str,
help='Index to use.',
required=True)
parser.add_argument('binaries', metavar='B', type=str, nargs='+',
help='binaries to use where each binary is specified as 3 \
string <binary>, <name> and <cost factor>')
def get_query_times(query_file, name, binary, costFactors, index):
with open('__tmp.myqueries', 'w') as tmpfile:
for line in open(query_file):
try:
tmpfile.write(
compare_performance.
expanded_to_my_syntax(
line.strip().split('\t')[1]) + '\n')
except IndexError:
print("Problem with tabs in : " + line)
exit(1)
coutfile = open('__tmp.cout.' + name, 'w')
myout = subprocess.check_output(
[binary, '-i', index, '-t', '--queryfile', '__tmp.myqueries', '-c',
costFactors]).decode('utf-8')
print(myout, file=coutfile)
print('\n\n\n', file=coutfile)
times = []
nof_matches_no_limit = []
nof_matches_limit = []
for line in myout.split('\n'):
i = line.find('Done. Time: ')
if i >= 0:
j = line.find('ms')
times.append(line[i + 12: j + 2])
i = line.find('Number of matches (no limit): ')
if i >= 0:
nof_matches_no_limit.append(line[i + 30:])
i = line.find('Number of matches (limit): ')
if i >= 0:
nof_matches_limit.append(line[i + 27:])
# os.remove('__tmp.myqueries')
queries = []
for line in open(query_file):
queries.append(line.strip())
if len(times) != len(queries) or len(times) != len(
nof_matches_no_limit) or len(times) != len(nof_matches_limit):
print('PROBLEM PROCESSING: ' + name + ' WITH PATH: ' + binary)
return list(zip(times, nof_matches_no_limit, nof_matches_limit))
def process_queries_and_print_stats(query_file, binaries, index):
queries = []
for line in open(query_file):
queries.append(line.strip())
th_titles = ['id', 'query']
results = []
for (name, path, costFactors) in binaries:
th_titles.append(name + "_times")
th_titles.append(name + "_nof_matches_no_limit")
# th_titles.append(name + "_nof_matches_limit")
r = get_query_times(query_file, name, path, costFactors, index)
results.append(r)
print('\t'.join(th_titles))
print('\t'.join(['----'] * len(th_titles)))
for i in range(0, len(queries)):
line_strs = [queries[i]]
for res in results:
line_strs.append(res[i][0])
line_strs.append(res[i][1])
# line_strs.append(res[i][2])
print('\t'.join(line_strs))
def main():
args = vars(parser.parse_args())
queries = args['queryfile']
index = args['index']
arg_bins = args['binaries']
assert len(arg_bins) % 3 == 0
binaries = []
for i in range(0, len(arg_bins) // 3):
binaries.append(
(arg_bins[3 * i], arg_bins[3 * i + 1], arg_bins[3 * i + 2]))
process_queries_and_print_stats(queries, binaries, index)
if __name__ == '__main__':
main()
```
#### File: qlever/misc/compare_performance_permutate_queries.py
```python
import compare_performance
import argparse
from random import shuffle
import statistics
__author__ = 'buchholb'
parser = argparse.ArgumentParser()
parser.add_argument('--queryfile',
type=str,
help='One line per (standard) SPARQL query. TS specific transformations are made by the python script.',
required=True)
parser.add_argument('--virtuoso-pwd',
type=str,
help='Password for the (already running) virtuoso instance',
required=True)
parser.add_argument('--all', action='store_true', help='should all be executed?',
default=False)
parser.add_argument('--bifc-inc', action='store_true', help='should bifc_inc be executed?',
default=False)
parser.add_argument('--mine', action='store_true', help='should mine be executed?',
default=False)
parser.add_argument('--rdf3x', action='store_true', help='should rdf3x be executed?',
default=False)
nof_permutations = 5
def print_result_table(headers, query_to_approach_to_times):
print('\t'.join(headers))
print('\t'.join(['---'] * len(headers)))
approach_to_run_times = {}
for q, att in query_to_approach_to_times.items():
for a, t in att.items():
approach_to_run_times[a] = []
for i in range(0, nof_permutations):
approach_to_run_times[a].append([])
break
for q, att in query_to_approach_to_times.items():
row = [q]
for approach in headers[2:]:
row.append(produce_cell(att[approach]))
for perm_num, time in enumerate(att[approach]):
approach_to_run_times[approach][perm_num].append(time)
print('\t'.join(row))
row = ['average', ' ']
for approach in headers[2:]:
run_times = approach_to_run_times[approach]
avgs = []
for ts in run_times:
cleaned_ts = [t for t in ts if t != -1.0]
avgs.append(sum(cleaned_ts) / len(cleaned_ts))
row.append(produce_cell(avgs))
print('\t'.join(row))
def produce_cell(times):
avg = sum(times) / len(times)
dev = statistics.stdev(times)
cell = ' '.join(['{0:.2f}'.format(t) for t in times])
cell += ' | avg = ' + '{0:.2f}'.format(avg) + ' | stdev = ' + '{0:.2f}'.format(dev)
return cell
def main():
args = vars(parser.parse_args())
query_file = args['queryfile']
queries = []
all = args['all']
bifc_inc = args['bifc_inc']
rdf3x = args['rdf3x']
mine = args['mine']
for line in open(query_file):
queries.append(line.strip())
for permutation_num in range(0, nof_permutations):
shuffle(queries)
with open(query_file + '_perm' + str(permutation_num), 'w') as f:
for q in queries:
print(q, file=f)
headers = []
headers.append('id')
headers.append('query')
if all or bifc_inc:
headers.append('bifc_inc')
if all or rdf3x:
headers.append('rdf3x')
if all or mine:
headers.append('mine')
query_to_approach_to_times = {}
for q in queries:
query_to_approach_to_times[q] = {}
for approach in headers[2:]:
query_to_approach_to_times[q][approach] = []
for permutation_num in range(0, nof_permutations):
qfile = query_file + '_perm' + str(permutation_num)
qs = []
for line in open(qfile):
qs.append(line.strip())
if all or bifc_inc:
times, matches = compare_performance.get_virtuoso_bifc_inc_query_times(qfile, args['virtuoso_pwd'])
for i, q in enumerate(qs):
query_to_approach_to_times[q]['bifc_inc'].append(float(times[i].strip().strip('ms').strip()))
if all or rdf3x:
times, matches = compare_performance.get_rdf3X_query_times(qfile)
for i, q in enumerate(qs):
if (times[i] != '-'):
query_to_approach_to_times[q]['rdf3x'].append(float(times[i].strip().strip('ms').strip()))
else:
query_to_approach_to_times[q]['rdf3x'].append(-1.0)
if all or mine:
times, matches = compare_performance.get_my_query_times(qfile)
for i, q in enumerate(qs):
query_to_approach_to_times[q]['mine'].append(float(times[i].strip().strip('ms').strip()))
print_result_table(headers, query_to_approach_to_times)
if __name__ == '__main__':
main()
```
#### File: qlever/misc/words-and-docs-file-from-nt.py
```python
import sys
import re
def write_words_and_docs_file_from_nt(
nt_file_name, words_file_name, docs_file_name):
"""
Read triples from NT file and for each literal, create a text record
containing that literal as entity and the words from the literal as words.
The content in the docs file is not important, but we need a docs file
because QLever currently crashes without a <base>.text.docsDB file
"""
with open(nt_file_name, "r") as nt_file, \
open(words_file_name, "w") as words_file, \
open(docs_file_name, "w") as docs_file:
record_id = 0
for triple in nt_file:
# Check if object is a literal.
literal = re.search("(\"(.*)\"(\^\^<.*>|@[a-z]+|))\s*\.?\s*$", triple)
if not literal:
continue
entity = literal.group(1)
contents = literal.group(2)
# Write entity and words to words file.
print("%s\t1\t%d\t1" % (entity, record_id), file = words_file)
for word in re.split("\W+", contents):
if len(word) > 0:
print("%s\t0\t%d\t1" % (word.lower(), record_id),
file = words_file)
# Write dummy entry to docs file.
print("%d\tLiteral #%d" % (record_id, record_id), file = docs_file)
record_id += 1
if __name__ == "__main__":
if len(sys.argv) != 2:
print("Usage: python3 words-and-docs-file-from-nt.py <base name>")
sys.exit(1)
base_name = sys.argv[1]
nt_file_name = base_name + ".nt"
words_file_name = base_name + ".wordsfile.tsv"
docs_file_name = base_name + ".docsfile.tsv"
write_words_and_docs_file_from_nt(
nt_file_name, words_file_name, docs_file_name)
```
|
{
"source": "jeremiahsavage/cwltool",
"score": 2
}
|
#### File: cwltool/cwltool/factory.py
```python
from . import main
from . import load_tool
from . import workflow
import os
from .process import Process
from typing import Any, Text, Union
from typing import Callable as tCallable
import argparse
class Callable(object):
def __init__(self, t, factory): # type: (Process, Factory) -> None
self.t = t
self.factory = factory
def __call__(self, **kwargs):
# type: (**Any) -> Union[Text, Dict[Text, Text]]
execkwargs = self.factory.execkwargs.copy()
execkwargs["basedir"] = os.getcwd()
return self.factory.executor(self.t, kwargs, **execkwargs)
class Factory(object):
def __init__(self, makeTool=workflow.defaultMakeTool,
executor=main.single_job_executor,
**execkwargs):
# type: (tCallable[[Dict[Text, Any], Any], Process],tCallable[...,Union[Text,Dict[Text,Text]]], **Any) -> None
self.makeTool = makeTool
self.executor = executor
self.execkwargs = execkwargs
def make(self, cwl):
"""Instantiate a CWL object from a CWl document."""
load = load_tool.load_tool(cwl, self.makeTool)
if isinstance(load, int):
raise Exception("Error loading tool")
return Callable(load, self)
```
#### File: salad/tests/test_examples.py
```python
import unittest
import schema_salad.ref_resolver
import schema_salad.main
import schema_salad.schema
from schema_salad.jsonld_context import makerdf
import rdflib
import ruamel.yaml as yaml
import json
import os
try:
from ruamel.yaml import CSafeLoader as SafeLoader
except ImportError:
from ruamel.yaml import SafeLoader
class TestSchemas(unittest.TestCase):
def test_schemas(self):
l = schema_salad.ref_resolver.Loader({})
ra, _ = l.resolve_all({
u"$schemas": [u"tests/EDAM.owl"],
u"$namespaces": {u"edam": u"http://edamontology.org/"},
u"edam:has_format": u"edam:format_1915"
}, "")
self.assertEqual({
u"$schemas": [u"tests/EDAM.owl"],
u"$namespaces": {u"edam": u"http://edamontology.org/"},
u'http://edamontology.org/has_format': u'http://edamontology.org/format_1915'
}, ra)
# def test_domain(self):
# l = schema_salad.ref_resolver.Loader({})
# l.idx["http://example.com/stuff"] = {
# "$schemas": ["tests/EDAM.owl"],
# "$namespaces": {"edam": "http://edamontology.org/"},
# }
# ra, _ = l.resolve_all({
# "$import": "http://example.com/stuff",
# "edam:has_format": "edam:format_1915"
# }, "")
# self.assertEquals(ra, {
# "$schemas": ["tests/EDAM.owl"],
# "$namespaces": {"edam": "http://edamontology.org/"},
# 'http://edamontology.org/has_format': 'http://edamontology.org/format_1915'
# })
def test_self_validate(self):
self.assertEqual(0, schema_salad.main.main(argsl=["schema_salad/metaschema/metaschema.yml"]))
self.assertEqual(0, schema_salad.main.main(argsl=["schema_salad/metaschema/metaschema.yml",
"schema_salad/metaschema/metaschema.yml"]))
def test_avro_regression(self):
self.assertEqual(0, schema_salad.main.main(argsl=["tests/Process.yml"]))
def test_jsonld_ctx(self):
ldr, _, _, _ = schema_salad.schema.load_schema({
"$base": "Y",
"name": "X",
"$namespaces": {
"foo": "http://example.com/foo#"
},
"$graph": [{
"name": "ExampleType",
"type": "enum",
"symbols": ["asym", "bsym"]}]
})
ra, _ = ldr.resolve_all({"foo:bar": "asym"}, "X")
self.assertEqual(ra, {
'http://example.com/foo#bar': 'asym'
})
maxDiff = None
def test_idmap(self):
ldr = schema_salad.ref_resolver.Loader({})
ldr.add_context({
"inputs": {
"@id": "http://example.com/inputs",
"mapSubject": "id",
"mapPredicate": "a"
},
"outputs": {
"@type": "@id",
"identity": True,
},
"id": "@id"})
ra, _ = ldr.resolve_all({
"id": "stuff",
"inputs": {
"zip": 1,
"zing": 2
},
"outputs": ["out"],
"other": {
'n': 9
}
}, "http://example2.com/")
self.assertEqual("http://example2.com/#stuff", ra["id"])
for item in ra["inputs"]:
if item["a"] == 2:
self.assertEquals('http://example2.com/#stuff/zing', item["id"])
else:
self.assertEquals('http://example2.com/#stuff/zip', item["id"])
self.assertEquals(['http://example2.com/#stuff/out'], ra['outputs'])
self.assertEquals({'n': 9}, ra['other'])
def test_scoped_ref(self):
ldr = schema_salad.ref_resolver.Loader({})
ldr.add_context({
"scatter": {
"@type": "@id",
"refScope": 0,
},
"source": {
"@type": "@id",
"refScope": 2,
},
"in": {
"mapSubject": "id",
"mapPredicate": "source"
},
"out": {
"@type": "@id",
"identity": True
},
"inputs": {
"mapSubject": "id",
"mapPredicate": "type"
},
"outputs": {
"mapSubject": "id",
},
"steps": {
"mapSubject": "id"
},
"id": "@id"})
ra, _ = ldr.resolve_all({
"inputs": {
"inp": "string",
"inp2": "string"
},
"outputs": {
"out": {
"type": "string",
"source": "step2/out"
}
},
"steps": {
"step1": {
"in": {
"inp": "inp",
"inp2": "#inp2",
"inp3": ["inp", "inp2"]
},
"out": ["out"],
"scatter": "inp"
},
"step2": {
"in": {
"inp": "step1/out"
},
"scatter": "inp",
"out": ["out"]
}
}
}, "http://example2.com/")
self.assertEquals(
{'inputs': [{
'id': 'http://example2.com/#inp',
'type': 'string'
}, {
'id': 'http://example2.com/#inp2',
'type': 'string'
}],
'outputs': [{
'id': 'http://example2.com/#out',
'type': 'string',
'source': 'http://example2.com/#step2/out'
}],
'steps': [{
'id': 'http://example2.com/#step1',
'scatter': 'http://example2.com/#step1/inp',
'in': [{
'id': 'http://example2.com/#step1/inp',
'source': 'http://example2.com/#inp'
}, {
'id': 'http://example2.com/#step1/inp2',
'source': 'http://example2.com/#inp2'
}, {
'id': 'http://example2.com/#step1/inp3',
'source': ['http://example2.com/#inp', 'http://example2.com/#inp2']
}],
"out": ["http://example2.com/#step1/out"],
}, {
'id': 'http://example2.com/#step2',
'scatter': 'http://example2.com/#step2/inp',
'in': [{
'id': 'http://example2.com/#step2/inp',
'source': 'http://example2.com/#step1/out'
}],
"out": ["http://example2.com/#step2/out"],
}]
}, ra)
def test_examples(self):
self.maxDiff = None
for a in ["field_name", "ident_res", "link_res", "vocab_res"]:
ldr, _, _, _ = schema_salad.schema.load_schema(
"schema_salad/metaschema/%s_schema.yml" % a)
with open("schema_salad/metaschema/%s_src.yml" % a) as src_fp:
src = ldr.resolve_all(
yaml.load(src_fp, Loader=SafeLoader), "", checklinks=False)[0]
with open("schema_salad/metaschema/%s_proc.yml" % a) as src_proc:
proc = yaml.load(src_proc, Loader=SafeLoader)
self.assertEqual(proc, src)
def test_yaml_float_test(self):
self.assertEqual(yaml.load("float-test: 2e-10")["float-test"], 2e-10)
def test_typedsl_ref(self):
ldr = schema_salad.ref_resolver.Loader({})
ldr.add_context({
"File": "http://example.com/File",
"null": "http://example.com/null",
"array": "http://example.com/array",
"type": {
"@type": "@vocab",
"typeDSL": True
}
})
ra, _ = ldr.resolve_all({"type": "File"}, "")
self.assertEqual({'type': 'File'}, ra)
ra, _ = ldr.resolve_all({"type": "File?"}, "")
self.assertEqual({'type': ['null', 'File']}, ra)
ra, _ = ldr.resolve_all({"type": "File[]"}, "")
self.assertEqual({'type': {'items': 'File', 'type': 'array'}}, ra)
ra, _ = ldr.resolve_all({"type": "File[]?"}, "")
self.assertEqual({'type': ['null', {'items': 'File', 'type': 'array'}]}, ra)
def test_scoped_id(self):
ldr = schema_salad.ref_resolver.Loader({})
ctx = {
"id": "@id",
"location": {
"@id": "@id",
"@type": "@id"
},
"bar": "http://example.com/bar",
"ex": "http://example.com/"
}
ldr.add_context(ctx)
ra, _ = ldr.resolve_all({
"id": "foo",
"bar": {
"id": "baz"
}
}, "http://example.com")
self.assertEqual({'id': 'http://example.com/#foo',
'bar': {
'id': 'http://example.com/#foo/baz'},
}, ra)
g = makerdf(None, ra, ctx)
print(g.serialize(format="n3"))
ra, _ = ldr.resolve_all({
"location": "foo",
"bar": {
"location": "baz"
}
}, "http://example.com", checklinks=False)
self.assertEqual({'location': 'http://example.com/foo',
'bar': {
'location': 'http://example.com/baz'},
}, ra)
g = makerdf(None, ra, ctx)
print(g.serialize(format="n3"))
ra, _ = ldr.resolve_all({
"id": "foo",
"bar": {
"location": "baz"
}
}, "http://example.com", checklinks=False)
self.assertEqual({'id': 'http://example.com/#foo',
'bar': {
'location': 'http://example.com/baz'},
}, ra)
g = makerdf(None, ra, ctx)
print(g.serialize(format="n3"))
ra, _ = ldr.resolve_all({
"location": "foo",
"bar": {
"id": "baz"
}
}, "http://example.com", checklinks=False)
self.assertEqual({'location': 'http://example.com/foo',
'bar': {
'id': 'http://example.com/#baz'},
}, ra)
g = makerdf(None, ra, ctx)
print(g.serialize(format="n3"))
def test_mixin(self):
ldr = schema_salad.ref_resolver.Loader({})
ra = ldr.resolve_ref({"$mixin": "mixin.yml", "one": "five"},
base_url="file://"+os.getcwd()+"/tests/")
self.assertEqual({'id': 'four', 'one': 'five'}, ra[0])
ldr = schema_salad.ref_resolver.Loader({"id": "@id"})
base_url="file://"+os.getcwd()+"/tests/"
ra = ldr.resolve_all([{
"id": "a",
"m": {"$mixin": "mixin.yml"}
}, {
"id": "b",
"m": {"$mixin": "mixin.yml"}
}], base_url=base_url)
self.assertEqual([{
'id': base_url+'#a',
'm': {
'id': base_url+u'#a/four',
'one': 'two'
},
}, {
'id': base_url+'#b',
'm': {
'id': base_url+u'#b/four',
'one': 'two'}
}], ra[0])
if __name__ == '__main__':
unittest.main()
```
#### File: salad/schema_salad/jsonld_context.py
```python
import collections
import shutil
import json
import ruamel.yaml as yaml
try:
from ruamel.yaml import CSafeLoader as SafeLoader
except ImportError:
from ruamel.yaml import SafeLoader # type: ignore
import os
import subprocess
import copy
import pprint
import re
import sys
import rdflib
from rdflib import Graph, URIRef
import rdflib.namespace
from rdflib.namespace import RDF, RDFS
import urlparse
import logging
from .aslist import aslist
from typing import Any, cast, Dict, Iterable, Tuple, Union
from .ref_resolver import Loader
_logger = logging.getLogger("salad")
def pred(datatype, field, name, context, defaultBase, namespaces):
# type: (Dict[str, Union[Dict, str]], Dict, str, Loader.ContextType, str, Dict[str, rdflib.namespace.Namespace]) -> Union[Dict, str]
split = urlparse.urlsplit(name)
vee = None # type: Union[str, unicode]
if split.scheme:
vee = name
(ns, ln) = rdflib.namespace.split_uri(unicode(vee))
name = ln
if ns[0:-1] in namespaces:
vee = unicode(namespaces[ns[0:-1]][ln])
_logger.debug("name, v %s %s", name, vee)
v = None # type: Any
if field and "jsonldPredicate" in field:
if isinstance(field["jsonldPredicate"], dict):
v = {}
for k, val in field["jsonldPredicate"].items():
v[("@" + k[1:] if k.startswith("_") else k)] = val
if "@id" not in v:
v["@id"] = vee
else:
v = field["jsonldPredicate"]
elif "jsonldPredicate" in datatype:
if isinstance(datatype["jsonldPredicate"], collections.Iterable):
for d in datatype["jsonldPredicate"]:
if isinstance(d, dict):
if d["symbol"] == name:
v = d["predicate"]
else:
raise Exception(
"entries in the jsonldPredicate List must be "
"Dictionaries")
else:
raise Exception("jsonldPredicate must be a List of Dictionaries.")
# if not v:
# if field and "jsonldPrefix" in field:
# defaultBase = field["jsonldPrefix"]
# elif "jsonldPrefix" in datatype:
# defaultBase = datatype["jsonldPrefix"]
ret = v or vee
if not ret:
ret = defaultBase + name
if name in context:
if context[name] != ret:
raise Exception("Predicate collision on %s, '%s' != '%s'" %
(name, context[name], ret))
else:
_logger.debug("Adding to context '%s' %s (%s)", name, ret, type(ret))
context[name] = ret
return ret
def process_type(t, g, context, defaultBase, namespaces, defaultPrefix):
# type: (Dict[str, Any], Graph, Loader.ContextType, str, Dict[str, rdflib.namespace.Namespace], str) -> None
if t["type"] == "record":
recordname = t["name"]
_logger.debug("Processing record %s\n", t)
classnode = URIRef(recordname)
g.add((classnode, RDF.type, RDFS.Class))
split = urlparse.urlsplit(recordname)
if "jsonldPrefix" in t:
predicate = "%s:%s" % (t["jsonldPrefix"], recordname)
elif split.scheme:
(ns, ln) = rdflib.namespace.split_uri(unicode(recordname))
predicate = recordname
recordname = ln
else:
predicate = "%s:%s" % (defaultPrefix, recordname)
if context.get(recordname, predicate) != predicate:
raise Exception("Predicate collision on '%s', '%s' != '%s'" % (
recordname, context[recordname], predicate))
if not recordname:
raise Exception()
_logger.debug("Adding to context '%s' %s (%s)",
recordname, predicate, type(predicate))
context[recordname] = predicate
for i in t.get("fields", []):
fieldname = i["name"]
_logger.debug("Processing field %s", i)
v = pred(t, i, fieldname, context, defaultPrefix, namespaces)
if isinstance(v, basestring):
v = v if v[0] != "@" else None
else:
v = v["_@id"] if v.get("_@id", "@")[0] != "@" else None
if v:
(ns, ln) = rdflib.namespace.split_uri(unicode(v))
if ns[0:-1] in namespaces:
propnode = namespaces[ns[0:-1]][ln]
else:
propnode = URIRef(v)
g.add((propnode, RDF.type, RDF.Property))
g.add((propnode, RDFS.domain, classnode))
# TODO generate range from datatype.
if isinstance(i["type"], dict) and "name" in i["type"]:
process_type(i["type"], g, context, defaultBase,
namespaces, defaultPrefix)
if "extends" in t:
for e in aslist(t["extends"]):
g.add((classnode, RDFS.subClassOf, URIRef(e)))
elif t["type"] == "enum":
_logger.debug("Processing enum %s", t["name"])
for i in t["symbols"]:
pred(t, None, i, context, defaultBase, namespaces)
def salad_to_jsonld_context(j, schema_ctx):
# type: (Iterable, Dict[str, Any]) -> Tuple[Loader.ContextType, Graph]
context = {} # type: Loader.ContextType
namespaces = {}
g = Graph()
defaultPrefix = ""
for k, v in schema_ctx.items():
context[k] = v
namespaces[k] = rdflib.namespace.Namespace(v)
if "@base" in context:
defaultBase = cast(str, context["@base"])
del context["@base"]
else:
defaultBase = ""
for k, v in namespaces.items():
g.bind(k, v)
for t in j:
process_type(t, g, context, defaultBase, namespaces, defaultPrefix)
return (context, g)
def fix_jsonld_ids(obj, ids):
# type: (Union[Dict[unicode, Any], List[Dict[unicode, Any]]], List[unicode]) -> None
if isinstance(obj, dict):
for i in ids:
if i in obj:
obj["@id"] = obj[i]
for v in obj.values():
fix_jsonld_ids(v, ids)
if isinstance(obj, list):
for entry in obj:
fix_jsonld_ids(entry, ids)
def makerdf(workflow, wf, ctx, graph=None):
# type: (Union[str, unicode], Union[List[Dict[unicode, Any]], Dict[unicode, Any]], Loader.ContextType, Graph) -> Graph
prefixes = {}
idfields = []
for k, v in ctx.iteritems():
if isinstance(v, dict):
url = v["@id"]
else:
url = v
if url == "@id":
idfields.append(k)
doc_url, frg = urlparse.urldefrag(url)
if "/" in frg:
p = frg.split("/")[0]
prefixes[p] = u"%s#%s/" % (doc_url, p)
fix_jsonld_ids(wf, idfields)
if graph is None:
g = Graph()
else:
g = graph
if isinstance(wf, list):
for w in wf:
w["@context"] = ctx
g.parse(data=json.dumps(w), format='json-ld', location=workflow)
else:
wf["@context"] = ctx
g.parse(data=json.dumps(wf), format='json-ld', location=workflow)
# Bug in json-ld loader causes @id fields to be added to the graph
for sub, pred, obj in g.triples((None, URIRef("@id"), None)):
g.remove((sub, pred, obj))
for k2, v2 in prefixes.iteritems():
g.namespace_manager.bind(k2, v2)
return g
```
|
{
"source": "jeremiahsavage/gdc-petri",
"score": 2
}
|
#### File: gdc-petri/gdc-petri/main.py
```python
import argparse
import logging
import os
import sys
import sqlalchemy
from snakes import nets
def setup_logging(tool_name, args, uuid):
logging.basicConfig(
filename=os.path.join(uuid + '_' + tool_name + '.log'),
level=args.level,
filemode='w',
format='%(asctime)s %(levelname)s %(message)s',
datefmt='%Y-%m-%d_%H:%M:%S_%Z',
)
logging.getLogger('sqlalchemy.engine').setLevel(logging.INFO)
logger = logging.getLogger(__name__)
return logger
def make_net():
gdc_net = nets.PetriNet('gdc-net')
p_bqsr_normal_wxs_bam = nets.Place('bqsr-normal-wxs-bam') # upon placement of bam in node
p_input_normal_bam = nets.Place('input-normal-bam') # must consume bam nodes
p_input_normal_metadata = nets.Place('input-normal-metadata') # must consume metadata nodes
t_dnaseq_normal_wxs_workflow = nets.Place('dnaseq-normal-wxs-workflow')
gdc_net.add_place(p_input_normal_metadata)
gdc_net.add_place(p_input_normal_bam)
gdc_net.add_place(p_bqsr_normal_wxs_bam)
gdc_net.add_transition(t_dnaseq_normal_wxs_workflow)
gdc_net.add_input('input-normal-bam', 'dnaseq-normal-wxs-workflow')
gdc_net.add_input('input-normal-metadata', 'dnaseq-normal-wxs-workflow')
gdc_net.add_output('bwsr-normal-wxs-bam', 'dnaseq-normal-wxs-workflow')
def main():
parser = argparse.ArgumentParser('a petri gdc')
# Logging flags.
parser.add_argument('-d', '--debug',
action = 'store_const',
const = logging.DEBUG,
dest = 'level',
help = 'Enable debug logging.',
)
parser.set_defaults(level = logging.INFO)
parser.add_argument('--bam',
required = True
)
parser.add_argument('--input_state',
required = True
)
parser.add_argument('--metric_name',
required = True
)
parser.add_argument('--metric_path',
required = True
)
parser.add_argument('--uuid',
required = True
)
# setup required parameters
args = parser.parse_args()
bam = args.input_state
input_state = args.input_state
metric_name = args.metric_name
metric_path = args.metric_path
uuid = args.uuid
logger = setup_logging('samtools_' + metric_name, args, uuid)
sqlite_name = uuid + '.db'
engine_path = 'sqlite:///' + sqlite_name
engine = sqlalchemy.create_engine(engine_path, isolation_level='SERIALIZABLE')
make_net()
return
if __name__ == '__main__':
main()
```
|
{
"source": "jeremiahwander/sample-metadata",
"score": 3
}
|
#### File: db/python/connect.py
```python
import json
import logging
import os
from typing import Optional
# import asyncio
import databases
from db.python.tables.project import ProjectPermissionsTable
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
def to_db_json(val):
"""Convert val to json for DB"""
# return psycopg2.extras.Json(val)
return json.dumps(val)
class Connection:
"""Stores a DB connection, project and author"""
def __init__(
self,
connection: databases.Database,
project: Optional[int],
author: str,
):
self.connection: databases.Database = connection
self.project: Optional[int] = project
self.author: str = author
def assert_requires_project(self):
"""Assert the project is set, or return an exception"""
if self.project is None:
raise Exception(
'An internal error has occurred when passing the project context, '
'please send this stacktrace to your system administrator'
)
class NotFoundError(Exception):
"""Custom error when you can't find something"""
class DatabaseConfiguration:
"""Class to hold information about a MySqlConfiguration"""
def __init__(
self,
dbname,
host=None,
port=None,
username=None,
password=<PASSWORD>,
):
self.dbname = dbname
self.host = host
self.port = port
self.username = username
self.password = password
@staticmethod
def dev_config() -> 'DatabaseConfiguration':
"""Dev config for local database with name 'sm_dev'"""
# consider pulling from env variables
return DatabaseConfiguration(
dbname=os.environ.get('SM_DEV_DB_NAME', 'sm_dev'),
username=os.environ.get('SM_DEV_DB_USER', 'root'),
password=os.environ.get('SM_DEV_DB_PASSWORD', ''),
host=os.environ.get('SM_DEV_DB_HOST', '127.0.0.1'),
port=os.environ.get('SM_DEV_DB_PORT', '3306'),
)
class SMConnections:
"""Contains useful functions for connecting to the database"""
# _connected = False
# _connections: Dict[str, databases.Database] = {}
# _admin_db: databases.Database = None
_credentials: Optional[DatabaseConfiguration] = None
@staticmethod
def _get_config():
if SMConnections._credentials:
return SMConnections._credentials
config = DatabaseConfiguration.dev_config()
creds_from_env = os.getenv('SM_DBCREDS')
if creds_from_env is not None:
config = DatabaseConfiguration(**json.loads(creds_from_env))
logger.info(f'Using supplied SM DB CREDS: {config.host}')
SMConnections._credentials = config
return SMConnections._credentials
@staticmethod
def make_connection(config: DatabaseConfiguration):
"""Create connection from dbname"""
# the connection string will prepare pooling automatically
return databases.Database(
SMConnections.prepare_connection_string(
host=config.host,
database=config.dbname,
username=config.username,
password=<PASSWORD>,
port=config.port,
)
)
@staticmethod
def prepare_connection_string(
host,
database,
username,
password=<PASSWORD>,
port=None,
# min_pool_size=5,
# max_pool_size=20,
):
"""Prepares the connection string for mysql / mariadb"""
_host = host or 'localhost'
u_p = username
if password:
u_p += f':{password}'
if port:
_host += f':{port}'
options = {} # {'min_size': min_pool_size, 'max_size': max_pool_size}
_options = '&'.join(f'{k}={v}' for k, v in options.items())
url = f'mysql://{u_p}@{_host}/{database}?{_options}'
return url
@staticmethod
async def connect():
"""Create connections to database"""
# this will now not connect, new connection will be made on every request
return False
@staticmethod
async def disconnect():
"""Disconnect from database"""
return False
@staticmethod
async def _get_made_connection():
credentials = SMConnections._get_config()
if credentials is None:
raise Exception(
'The server has been misconfigured, please '
'contact your system administrator'
)
conn = SMConnections.make_connection(credentials)
await conn.connect()
return conn
@staticmethod
async def get_connection(*, author: str, project_name: str, readonly: bool):
"""Get a db connection from a project and user"""
# maybe it makes sense to perform permission checks here too
logger.debug(f'Authenticate connection to {project_name} with "{author}"')
conn = await SMConnections._get_made_connection()
pt = ProjectPermissionsTable(connection=conn)
project_id = await pt.get_project_id_from_name_and_user(
user=author, project_name=project_name, readonly=readonly
)
return Connection(connection=conn, author=author, project=project_id)
@staticmethod
async def get_connection_no_project(author: str):
"""Get a db connection from a project and user"""
# maybe it makes sense to perform permission checks here too
logger.debug(f'Authenticate no-project connection with "{author}"')
conn = await SMConnections._get_made_connection()
# we don't authenticate project-less connection, but rely on the
# the endpoint to validate the resources
return Connection(connection=conn, author=author, project=None)
class DbBase:
"""Base class for table subclasses"""
@classmethod
async def from_project(cls, project, author, readonly: bool):
"""Create the Db object from a project with user details"""
return cls(
connection=await SMConnections.get_connection(
project_name=project, author=author, readonly=readonly
),
)
def __init__(self, connection: Connection):
if connection is None:
raise Exception(
f'No connection was provided to the table "{self.__class__.__name__}"'
)
if not isinstance(connection, Connection):
raise Exception(
f'Expected connection type Connection, received {type(connection)}, '
f'did you mean to call self._connection?'
)
self._connection = connection
self.connection: databases.Database = connection.connection
self.author = connection.author
self.project = connection.project
if self.author is None:
raise Exception(f'Must provide author to {self.__class__.__name__}')
# piped from the connection
```
#### File: python/tables/family.py
```python
from typing import List, Optional, Set, Any, Dict
from db.python.connect import DbBase, NotFoundError
from db.python.tables.project import ProjectId
from models.models.family import Family
class FamilyTable(DbBase):
"""
Capture Analysis table operations and queries
"""
table_name = 'family'
async def get_projects_by_family_ids(self, family_ids: List[int]) -> Set[ProjectId]:
"""Get project IDs for sampleIds (mostly for checking auth)"""
_query = """
SELECT project FROM family
WHERE project in :family_ids
GROUP BY project
"""
if len(family_ids) == 0:
raise ValueError('Received no family IDs to get project ids for')
rows = await self.connection.fetch_all(_query, {'family_ids': family_ids})
projects = set(r['project'] for r in rows)
if not projects:
raise ValueError(
'No projects were found for given families, this is likely an error'
)
return projects
async def get_families(self, project: int = None) -> List[Family]:
"""Get all families for some project"""
_query = """\
SELECT id, external_id, description, coded_phenotype, project
FROM family
WHERE project = :project"""
rows = await self.connection.fetch_all(
_query, {'project': project or self.project}
)
families = [Family.from_db(dict(r)) for r in rows]
return families
async def update_family(
self,
id_: int,
external_id: str = None,
description: str = None,
coded_phenotype: str = None,
author: str = None,
) -> bool:
"""Update values for a family"""
values: Dict[str, Any] = {'author': author or self.author}
if external_id:
values['external_id'] = external_id
if description:
values['description'] = description
if coded_phenotype:
values['coded_phenotype'] = coded_phenotype
setters = ', '.join(f'{field} = :{field}' for field in values)
_query = f"""
UPDATE family
SET {setters}
WHERE id = :id
"""
await self.connection.execute(_query, {**values, 'id': id_})
return True
async def create_family(
self,
external_id: str,
description: Optional[str],
coded_phenotype: Optional[str],
author: str = None,
project: ProjectId = None,
) -> int:
"""
Create a new sample, and add it to database
"""
updater = {
'external_id': external_id,
'description': description,
'coded_phenotype': coded_phenotype,
'author': author or self.author,
'project': project or self.project,
}
keys = list(updater.keys())
str_keys = ', '.join(keys)
placeholder_keys = ', '.join(f':{k}' for k in keys)
_query = f"""
INSERT INTO family
({str_keys})
VALUES
({placeholder_keys})
RETURNING id
"""
return await self.connection.fetch_val(_query, updater)
async def insert_or_update_multiple_families(
self,
external_ids: List[str],
descriptions: List[str],
coded_phenotypes: List[Optional[str]],
project: int = None,
author: str = None,
):
"""Upsert"""
updater = [
{
'external_id': eid,
'description': descr,
'coded_phenotype': cph,
'author': author or self.author,
'project': project or self.project,
}
for eid, descr, cph in zip(external_ids, descriptions, coded_phenotypes)
]
keys = list(updater[0].keys())
str_keys = ', '.join(keys)
placeholder_keys = ', '.join(f':{k}' for k in keys)
update_only_keys = [k for k in keys if k not in ('external_id', 'project')]
str_uo_placeholder_keys = ', '.join(f'{k} = :{k}' for k in update_only_keys)
_query = f"""\
INSERT INTO family
({str_keys})
VALUES
({placeholder_keys})
ON DUPLICATE KEY UPDATE
{str_uo_placeholder_keys}
"""
await self.connection.execute_many(_query, updater)
return True
async def get_id_map_by_external_ids(
self, family_ids: List[str], allow_missing=False, project: Optional[int] = None
):
"""Get map of {external_id: internal_id} for a family"""
_query = 'SELECT external_id, id FROM family WHERE external_id in :external_ids AND project = :project'
results = await self.connection.fetch_all(
_query, {'external_ids': family_ids, 'project': project or self.project}
)
id_map = {r['external_id']: r['id'] for r in results}
if not allow_missing and len(id_map) != len(family_ids):
provided_external_ids = set(family_ids)
# do the check again, but use the set this time
# (in case we're provided a list with duplicates)
if len(id_map) != len(provided_external_ids):
# we have families missing from the map, so we'll 404 the whole thing
missing_family_ids = provided_external_ids - set(id_map.keys())
raise NotFoundError(
f"Couldn't find families with external IDS: {', '.join(missing_family_ids)}"
)
return id_map
async def get_id_map_by_internal_ids(
self, family_ids: List[int], allow_missing=False
):
"""Get map of {external_id: internal_id} for a family"""
if len(family_ids) == 0:
return {}
_query = 'SELECT id, external_id FROM family WHERE id in :ids'
results = await self.connection.fetch_all(_query, {'ids': family_ids})
id_map = {r['id']: r['external_id'] for r in results}
if not allow_missing and len(id_map) != len(family_ids):
provided_external_ids = set(family_ids)
# do the check again, but use the set this time
# (in case we're provided a list with duplicates)
if len(id_map) != len(provided_external_ids):
# we have families missing from the map, so we'll 404 the whole thing
missing_family_ids = provided_external_ids - set(id_map.keys())
raise NotFoundError(
f"Couldn't find families with internal IDS: {', '.join(str(m) for m in missing_family_ids)}"
)
return id_map
```
#### File: models/models/sample.py
```python
import json
import os
from typing import Optional, Dict, Union, List, Sequence, Type
from models.base import SMBase
from models.enums.sample import SampleType
SAMPLE_PREFIX = os.getenv('SM_SAMPLEPREFIX', 'CPGLCL').upper()
CHECKSUM_OFFSET = int(os.getenv('SM_SAMPLECHECKOFFSET', '2'))
class Sample(SMBase):
"""Model for a Sample"""
id: Union[str, int]
external_id: str
participant_id: Optional[str] = None
active: Optional[bool] = None
meta: Optional[Dict] = None
type: Optional[SampleType] = None
project: Optional[int] = None
author: Optional[str] = None
@staticmethod
def from_db(d: Dict):
"""
Convert from db keys, mainly converting id to id_
"""
_id = sample_id_format(d.pop('id', None))
type_ = d.pop('type', None)
meta = d.pop('meta', None)
active = d.pop('active', None)
if active is not None:
active = bool(active)
if meta:
if isinstance(meta, bytes):
meta = meta.decode()
if isinstance(meta, str):
meta = json.loads(meta)
return Sample(id=_id, type=SampleType(type_), meta=meta, active=active, **d)
SampleIdRaw = Union[str, int]
def sample_id_transform_to_raw_list(
identifier: Sequence[SampleIdRaw], strict=True
) -> List[int]:
"""
Transform LIST of STRING sample identifier (CPGXXXH) to XXX by:
- validating prefix
- validating checksum
"""
return [sample_id_transform_to_raw(s, strict=strict) for s in identifier]
def sample_id_transform_to_raw(identifier: SampleIdRaw, strict=True) -> int:
"""
Transform STRING sample identifier (CPGXXXH) to XXX by:
- validating prefix
- validating checksum
"""
expected_type: Type[Union[str, SampleType]] = str if strict else SampleType
if not isinstance(identifier, expected_type):
raise TypeError(
f'Expected identifier type to be "{expected_type}", received "{type(identifier)}"'
)
if isinstance(identifier, int):
return identifier
if not isinstance(identifier, str):
raise ValueError('Programming error related to sample checks')
if not identifier.startswith(SAMPLE_PREFIX):
raise Exception(
f'Invalid prefix found for {SAMPLE_PREFIX} sample identifier "{identifier}"'
)
stripped_identifier = identifier.lstrip(SAMPLE_PREFIX)
if not stripped_identifier.isdigit():
raise ValueError(f'Invalid sample identifier "{identifier}"')
sample_id_with_checksum = int(stripped_identifier)
if not luhn_is_valid(sample_id_with_checksum, offset=CHECKSUM_OFFSET):
raise ValueError(f'The provided sample ID was not valid: "{identifier}"')
return int(stripped_identifier[:-1])
def sample_id_format_list(sample_ids: Sequence[Union[int, str]]) -> List[str]:
"""
Transform LIST of raw (int) sample identifier to format (CPGXXXH) where:
- CPG is the prefix
- XXX is the original identifier
- H is the Luhn checksum
"""
return [sample_id_format(s) for s in sample_ids]
def sample_id_format(sample_id: Union[int, str]) -> str:
"""
Transform raw (int) sample identifier to format (CPGXXXH) where:
- CPG is the prefix
- XXX is the original identifier
- H is the Luhn checksum
>>> sample_id_format(10)
'CPG109'
>>> sample_id_format(12345)
'CPG123455'
"""
if isinstance(sample_id, str) and not sample_id.isdigit():
if sample_id.startswith(SAMPLE_PREFIX):
return sample_id
raise ValueError(f'Unexpected format for sample identifier "{sample_id}"')
sample_id = int(sample_id)
checksum = luhn_compute(sample_id, offset=CHECKSUM_OFFSET)
return f'{SAMPLE_PREFIX}{sample_id}{checksum}'
def luhn_is_valid(n, offset=0):
"""
Based on: https://stackoverflow.com/a/21079551
>>> luhn_is_valid(4532015112830366)
True
>>> luhn_is_valid(6011514433546201)
True
>>> luhn_is_valid(6771549495586802)
True
"""
def digits_of(n):
return [int(d) for d in str(n)]
digits = digits_of(n)
odd_digits = digits[-1::-2]
even_digits = digits[-2::-2]
checksum = sum(odd_digits) + sum(sum(digits_of(d * 2)) for d in even_digits)
return checksum % 10 == offset
def luhn_compute(n, offset=0):
"""
Compute Luhn check digit of number given as string
>>> luhn_compute(453201511283036)
6
>>> luhn_compute(601151443354620)
1
>>> luhn_compute(677154949558680)
2
"""
m = [int(d) for d in reversed(str(n))]
result = sum(m) + sum(d + (d >= 5) for d in m[::2])
checksum = ((-result % 10) + offset) % 10
return checksum
```
#### File: sample_metadata/parser/sample_file_map_parser.py
```python
from typing import List
import logging
from sample_metadata.parser.generic_metadata_parser import GenericMetadataParser
SAMPLE_ID_COL_NAME = 'Individual ID'
READS_COL_NAME = 'Filenames'
__DOC = """
The SampleFileMapParser is used for parsing files with format:
- 'Individual ID'
- 'Filenames'
EG:
Individual ID Filenames
<sample-id> <sample-id>.filename-R1.fastq.gz,<sample-id>.filename-R2.fastq.gz
# OR
<sample-id2> <sample-id2>.filename-R1.fastq.gz
<sample-id2> <sample-id2>.filename-R2.fastq.gz
This format is useful for ingesting filenames for the seqr loading pipeline
"""
logger = logging.getLogger(__file__)
logger.addHandler(logging.StreamHandler())
logger.setLevel(logging.INFO)
class SampleFileMapParser(GenericMetadataParser):
"""Parser for SampleFileMap"""
def __init__(
self,
search_locations: List[str],
sample_metadata_project: str,
default_sequence_type='wgs',
default_sample_type='blood',
):
super().__init__(
search_locations=search_locations,
sample_metadata_project=sample_metadata_project,
sample_name_column=SAMPLE_ID_COL_NAME,
reads_column=READS_COL_NAME,
default_sequence_type=default_sequence_type,
default_sample_type=default_sample_type,
sample_meta_map={},
sequence_meta_map={},
qc_meta_map={},
)
```
#### File: sample-metadata/scripts/create_test_subset.py
```python
from typing import Dict, List, Optional, Tuple
import logging
import os
import random
import subprocess
import traceback
import typing
from collections import Counter
import click
from google.cloud import storage
from peddy import Ped
from sample_metadata import exceptions
from sample_metadata.apis import (
AnalysisApi,
SequenceApi,
SampleApi,
FamilyApi,
ParticipantApi,
)
from sample_metadata.configuration import _get_google_auth_token
from sample_metadata.models import (
AnalysisType,
NewSequence,
NewSample,
AnalysisModel,
SampleUpdateModel,
)
logger = logging.getLogger(__file__)
logging.basicConfig(format='%(levelname)s (%(name)s %(lineno)s): %(message)s')
logger.setLevel(logging.INFO)
sapi = SampleApi()
aapi = AnalysisApi()
seqapi = SequenceApi()
fapi = FamilyApi()
papi = ParticipantApi()
DEFAULT_SAMPLES_N = 10
@click.command()
@click.option(
'--project',
required=True,
help='The sample-metadata project ($DATASET)',
)
@click.option(
'-n',
'--samples',
'samples_n',
type=int,
help='Number of samples to subset',
)
@click.option(
'--families',
'families_n',
type=int,
help='Minimal number of families to include',
)
def main(
project: str,
samples_n: Optional[int],
families_n: Optional[int],
):
"""
Script creates a test subset for a given project.
A new project with a prefix -test is created, and for any files in sample/meta,
sequence/meta, or analysis/output a copy in the -test namespace is created.
"""
samples_n, families_n = _validate_opts(samples_n, families_n)
all_samples = sapi.get_samples(
body_get_samples_by_criteria_api_v1_sample_post={
'project_ids': [project],
'active': True,
}
)
logger.info(f'Found {len(all_samples)} samples')
if samples_n and samples_n >= len(all_samples):
resp = str(
input(
f'Requesting {samples_n} samples which is >= '
f'than the number of available samples ({len(all_samples)}). '
f'The test project will be a copy of the production project. '
f'Please confirm (y): '
)
)
if resp.lower() != 'y':
raise SystemExit()
random.seed(42) # for reproducibility
pid_sid = papi.get_external_participant_id_to_internal_sample_id(project)
sample_id_by_participant_id = dict(pid_sid)
ped_lines = export_ped_file(project, replace_with_participant_external_ids=True)
if families_n is not None:
ped = Ped(ped_lines)
families = list(ped.families.values())
logger.info(f'Found {len(families)} families, by size:')
_print_fam_stats(families)
families = random.sample(families, families_n)
logger.info(f'After subsetting to {len(families)} families:')
_print_fam_stats(families)
sample_ids = []
for fam in families:
for s in fam.samples:
sample_ids.append(sample_id_by_participant_id[s.sample_id])
samples = [s for s in all_samples if s['id'] in sample_ids]
else:
assert samples_n
samples = random.sample(all_samples, samples_n)
sample_ids = [s['id'] for s in samples]
logger.info(
f'Subset to {len(samples)} samples (internal ID / external ID): '
f'{_pretty_format_samples(samples)}'
)
# Populating test project
target_project = project + '-test'
logger.info('Checking any existing test samples in the target test project')
test_sample_by_external_id = _process_existing_test_samples(target_project, samples)
try:
seq_infos: List[Dict] = seqapi.get_sequences_by_sample_ids(sample_ids)
except exceptions.ApiException:
seq_info_by_s_id = {}
else:
seq_info_by_s_id = dict(zip(sample_ids, seq_infos))
analysis_by_sid_by_type: Dict[str, Dict] = {'cram': {}, 'gvcf': {}}
for a_type, analysis_by_sid in analysis_by_sid_by_type.items():
try:
analyses: List[Dict] = aapi.get_latest_analysis_for_samples_and_type(
project=project,
analysis_type=AnalysisType(a_type),
request_body=sample_ids,
)
except exceptions.ApiException:
traceback.print_exc()
else:
for a in analyses:
analysis_by_sid[a['sample_ids'][0]] = a
logger.info(f'Will copy {a_type} analysis entries: {analysis_by_sid}')
for s in samples:
logger.info(f'Processing sample {s["id"]}')
if s['external_id'] in test_sample_by_external_id:
new_s_id = test_sample_by_external_id[s['external_id']]['id']
logger.info(f'Sample already in test project, with ID {new_s_id}')
else:
logger.info('Creating test sample entry')
new_s_id = sapi.create_new_sample(
project=target_project,
new_sample=NewSample(
external_id=s['external_id'],
type=s['type'],
meta=_copy_files_in_dict(s['meta'], project),
),
)
seq_info = seq_info_by_s_id.get(s['id'])
if seq_info:
logger.info('Processing sequence entry')
new_meta = _copy_files_in_dict(seq_info.get('meta'), project)
logger.info('Creating sequence entry in test')
seqapi.create_new_sequence(
new_sequence=NewSequence(
sample_id=new_s_id,
meta=new_meta,
type=seq_info['type'],
status=seq_info['status'],
)
)
for a_type in ['cram', 'gvcf']:
analysis = analysis_by_sid_by_type[a_type].get(s['id'])
if analysis:
logger.info(f'Processing {a_type} analysis entry')
am = AnalysisModel(
type=a_type,
output=_copy_files_in_dict(analysis['output'], project),
status=analysis['status'],
sample_ids=[s['id']],
)
logger.info(f'Creating {a_type} analysis entry in test')
aapi.create_new_analysis(project=target_project, analysis_model=am)
logger.info(f'-')
def _validate_opts(samples_n, families_n) -> Tuple[Optional[int], Optional[int]]:
if samples_n is not None and families_n is not None:
raise click.BadParameter('Please specify only one of --samples or --families')
if samples_n is None and families_n is None:
samples_n = DEFAULT_SAMPLES_N
logger.info(
f'Neither --samples nor --families specified, defaulting to selecting '
f'{samples_n} samples'
)
if samples_n is not None and samples_n < 1:
raise click.BadParameter('Please specify --samples higher than 0')
if families_n is not None and families_n < 1:
raise click.BadParameter('Please specify --families higher than 0')
if families_n is not None and families_n >= 30:
resp = str(
input(
f'You requested a subset of {families_n} families. '
f'Please confirm (y): '
)
)
if resp.lower() != 'y':
raise SystemExit()
if samples_n is not None and samples_n >= 100:
resp = str(
input(
f'You requested a subset of {samples_n} samples. '
f'Please confirm (y): '
)
)
if resp.lower() != 'y':
raise SystemExit()
return samples_n, families_n
def _print_fam_stats(families: List):
fam_by_size: typing.Counter[int] = Counter()
for fam in families:
fam_by_size[len(fam.samples)] += 1
for fam_size in sorted(fam_by_size):
if fam_size == 1:
label = 'singles'
elif fam_size == 2:
label = 'duos'
elif fam_size == 3:
label = 'trios'
else:
label = f'{fam_size} members'
logger.info(f' {label}: {fam_by_size[fam_size]}')
def _copy_files_in_dict(d, dataset: str):
"""
Replaces all `gs://cpg-{project}-main*/` paths
into `gs://cpg-{project}-test*/` and creates copies if needed
If `d` is dict or list, recursively calls this function on every element
If `d` is str, replaces the path
"""
if not d:
return d
if isinstance(d, str) and d.startswith(f'gs://cpg-{dataset}-main'):
old_path = d
if not file_exists(old_path):
logger.warning(f'File {old_path} does not exist')
return d
new_path = old_path.replace(
f'gs://cpg-{dataset}-main', f'gs://cpg-{dataset}-test'
)
if not file_exists(new_path):
cmd = f'gsutil cp "{old_path}" "{new_path}"'
logger.info(f'Copying file in metadata: {cmd}')
subprocess.run(cmd, check=False, shell=True)
extra_exts = ['.md5']
if new_path.endswith('.vcf.gz'):
extra_exts.append('.tbi')
if new_path.endswith('.cram'):
extra_exts.append('.crai')
for ext in extra_exts:
if file_exists(old_path + ext) and not file_exists(new_path + ext):
cmd = f'gsutil cp "{old_path + ext}" "{new_path + ext}"'
logger.info(f'Copying extra file in metadata: {cmd}')
subprocess.run(cmd, check=False, shell=True)
return new_path
if isinstance(d, list):
return [_copy_files_in_dict(x, dataset) for x in d]
if isinstance(d, dict):
return {k: _copy_files_in_dict(v, dataset) for k, v in d.items()}
return d
def _pretty_format_samples(samples: List[Dict]) -> str:
return ', '.join(f"{s['id']}/{s['external_id']}" for s in samples)
def _process_existing_test_samples(test_project: str, samples: List) -> Dict:
"""
Removes samples that need to be removed and returns those that need to be kept
"""
test_samples = sapi.get_samples(
body_get_samples_by_criteria_api_v1_sample_post={
'project_ids': [test_project],
'active': True,
}
)
external_ids = [s['external_id'] for s in samples]
test_samples_to_remove = [
s for s in test_samples if s['external_id'] not in external_ids
]
test_samples_to_keep = [s for s in test_samples if s['external_id'] in external_ids]
if test_samples_to_remove:
logger.info(
f'Removing test samples: {_pretty_format_samples(test_samples_to_remove)}'
)
for s in test_samples_to_remove:
sapi.update_sample(s['id'], SampleUpdateModel(active=False))
if test_samples_to_keep:
logger.info(
f'Test samples already exist: {_pretty_format_samples(test_samples_to_keep)}'
)
return {s['external_id']: s for s in test_samples_to_keep}
def file_exists(path: str) -> bool:
"""
Check if the object exists, where the object can be:
* local file
* local directory
* Google Storage object
:param path: path to the file/directory/object
:return: True if the object exists
"""
if path.startswith('gs://'):
bucket = path.replace('gs://', '').split('/')[0]
path = path.replace('gs://', '').split('/', maxsplit=1)[1]
gs = storage.Client()
return gs.get_bucket(bucket).get_blob(path)
return os.path.exists(path)
def export_ped_file( # pylint: disable=invalid-name
project: str,
replace_with_participant_external_ids: bool = False,
replace_with_family_external_ids: bool = False,
) -> List[str]:
"""
Generates a PED file for the project, returs PED file lines in a list
"""
route = f'/api/v1/family/{project}/pedigree'
opts = []
if replace_with_participant_external_ids:
opts.append('replace_with_participant_external_ids=true')
if replace_with_family_external_ids:
opts.append('replace_with_family_external_ids=true')
if opts:
route += '?' + '&'.join(opts)
cmd = f"""\
curl --location --request GET \
'https://sample-metadata.populationgenomics.org.au{route}' \
--header "Authorization: Bearer {_get_google_auth_token()}"
"""
lines = subprocess.check_output(cmd, shell=True).decode().strip().split('\n')
return lines
if __name__ == '__main__':
# pylint: disable=no-value-for-parameter
main()
```
#### File: sample-metadata/scripts/parse_nagim.py
```python
import logging
import subprocess
import tempfile
from dataclasses import dataclass, field
from os.path import join, exists, basename
from typing import List, Dict, Any, Optional, Tuple, Callable, Union
import json
import gcsfs
import click
import pandas as pd
from cpg_pipes.pipeline import setup_batch
from cpg_pipes.resources import DRIVER_IMAGE
from cpg_pipes.utils import can_reuse
from sample_metadata.models import (
AnalysisStatus,
AnalysisType,
AnalysisModel,
)
from sample_metadata.apis import SampleApi
from sample_metadata.parser.generic_parser import GenericParser, GroupedRow
logger = logging.getLogger(__file__)
logger.setLevel(logging.INFO)
NAGIM_PROJ_ID = 'nagim'
NAMESPACE = 'main'
# Mapping the KCCG project IDs to internal CPG project IDs
PROJECT_ID_MAP = {
'1KB': 'thousand-genomes',
'ALS': 'csiro-als',
'AMP-PD': 'amp-pd',
'HGDP': 'hgdp',
'MGRB': 'mgrb',
'TOB': 'tob-wgs',
'acute_care': 'acute-care',
}
# 2 columns: sample IDs used in the NAGIM run, and a project ID.
SAMPLE_TO_PROJECT_TSV_PATH = 'gs://cpg-nagim-main/metadata/nagim-terra-samples.tsv'
SRC_BUCKETS = {
'test': {
'Australia': [ # Australian Terra workspace
'gs://fc-7d762f69-bb45-48df-901b-b3bcec656ee0/2232b739-5183-4935-bb84-452a631c31ea',
],
'US': [ # The US Terra workspace§
'gs://fc-bda68b2d-bed3-495f-a63c-29477968feff/1a9237ff-2e6e-4444-b67d-bd2715b8a156',
],
},
'main': {
'Australia': [
'gs://fc-7d762f69-bb45-48df-901b-b3bcec656ee0/8b5c4805-a08c-4b22-9521-f003e1e02153',
'gs://fc-975676a8-4e21-46af-bc02-816044ad7448/1e968324-0d1d-4061-86d5-2f2678363e5a',
'gs://fc-7d762f69-bb45-48df-901b-b3bcec656ee0/376b7e6e-3e9a-4608-899b-3ae56f42b8ae',
'gs://fc-fa51701d-03df-4ca7-8408-5c859458759d/1c6b5f64-1b83-4f98-9ba8-0cc7918677a9',
'gs://fc-10674f84-3eed-440a-b6fd-f6b0a7a3f3d0/a521fa83-0974-4b0b-8ffd-de8bb7363adc',
'gs://fc-7d762f69-bb45-48df-901b-b3bcec656ee0/95b12dea-5d83-4e19-9a9d-4616d69ec9a3',
'gs://fc-7d762f69-bb45-48df-901b-b3bcec656ee0/4ee1f6ce-8045-49c5-8fd0-6409b3bd063f',
'gs://fc-7d762f69-bb45-48df-901b-b3bcec656ee0/bc178a03-ad33-4eba-8581-a5ee441d1370',
'gs://fc-f42ce9c2-17c2-4ae9-ac49-657ad9783280/2a991598-d7bc-4aea-af81-ff376d131c3b',
'gs://fc-30c132a7-2e19-4b73-9d70-e23c405740a2/9585ddb4-fa1c-499a-b424-32cf9def33a5',
'gs://fc-79767284-d7a5-4565-9816-61c6e28e9f7f/37959029-3ed9-4415-aa0a-f4c2337b9c14',
'gs://fc-7d762f69-bb45-48df-901b-b3bcec656ee0/ceaed9aa-9e17-4b19-9926-a320ee614d6e',
'gs://fc-7312af9d-7217-4eef-a6c0-c3637ade1662/d0bbd0be-3f66-4308-9376-34844d520073',
'gs://fc-79767284-d7a5-4565-9816-61c6e28e9f7f/65bca9dc-99b5-4eac-9e29-a82ef94c542c',
'gs://fc-fa51701d-03df-4ca7-8408-5c859458759d/fe652736-53aa-4fab-bc24-8fec9f7cea8e',
'gs://fc-ddb2e6d7-319a-4dc2-aa79-f640c2f889d3/defa7f3c-b04d-4a2d-ae80-16379be145e8',
'gs://fc-79cf62c1-c8c6-4934-93cd-dcd792d905d8/e47071c6-cc81-4c77-a860-56bd5fb75fff',
'gs://fc-3a36f1b1-761b-4d24-ba78-f8f72a55daab/d57f15fb-c7ae-45e2-bf17-f305493efa4a',
],
'US': [
'gs://fc-bda68b2d-bed3-495f-a63c-29477968feff/153e4788-1c48-4a51-864e-9707dbae5c59',
'gs://fc-bda68b2d-bed3-495f-a63c-29477968feff/b4a00407-f6c6-4fd0-b71f-820e047f792c',
'gs://fc-bda68b2d-bed3-495f-a63c-29477968feff/914b7deb-9156-4cc8-8eb0-b13a6d008e2b',
'gs://fc-bda68b2d-bed3-495f-a63c-29477968feff/bfa7f93d-06c8-40d5-b1da-de68b390d8cf',
'gs://fc-bda68b2d-bed3-495f-a63c-29477968feff/b9fab668-3b28-4e58-8af2-5d443d7aae2f',
'gs://fc-bda68b2d-bed3-495f-a63c-29477968feff/884b65af-adba-4cbf-a068-48ea9e948524',
],
},
}
class Source:
"""
Type of files we pull (e.g. CRAM, GVCF, QC)
"""
def __init__(
self,
name: str,
search_pattern_by_ending: Dict[str, str],
upload_bucket: Union[str, Callable],
):
self.name = name
self.id = name.lower()
self.search_pattern_by_ending = search_pattern_by_ending
self._upload_bucket = upload_bucket
def get_upload_bucket(self, ending=None):
"""
Upload bucket can be a string or a lambda taking filename ending as an argument
"""
if isinstance(self._upload_bucket, str):
return self._upload_bucket
assert ending
return self._upload_bucket(ending)
def __repr__(self):
return self.name
def transfer(self, hbatch):
"""
Search files in buckets using search patterns and copy to CPG upload buckets
"""
for region, buckets in SRC_BUCKETS[NAMESPACE].items():
for bucket in buckets:
for ending, pattern in self.search_pattern_by_ending.items():
_add_batch_job(
cmd=(
f"gsutil ls '{bucket}/{pattern}'"
f' | gsutil -m cp -I {self.get_upload_bucket(ending)}/'
),
hbatch=hbatch,
job_name=(
f'{region}: transfer {self.name} {ending} files '
f'from {bucket}'
),
)
# Instantiating file sources
SOURCES = {
s.name: s
for s in [
Source(
name='CRAM',
search_pattern_by_ending={
'cram': '**/call-ConvertToCram/**/*.cram',
'cram.crai': '**/call-ConvertToCram/**/*.cram.crai',
'cram.md5': '**/call-ConvertToCram/**/*.cram.md5',
},
upload_bucket=f'gs://cpg-{NAGIM_PROJ_ID}-{NAMESPACE}-upload/cram',
),
Source(
name='GVCF',
search_pattern_by_ending={
'hard-filtered.g.vcf.gz': '**/call-MergeVCFs/**/*.hard-filtered.g.vcf.gz',
'hard-filtered.g.vcf.gz.tbi': '**/call-MergeVCFs/**/*.hard-filtered.g.vcf.gz.tbi',
},
upload_bucket=f'gs://cpg-{NAGIM_PROJ_ID}-{NAMESPACE}-upload/gvcf',
),
Source(
name='QC',
upload_bucket=lambda ending: f'gs://cpg-{NAGIM_PROJ_ID}-{NAMESPACE}-upload/QC/{ending}',
search_pattern_by_ending={
e: f'**/*.{e}'
for e in [
'alignment_summary_metrics',
'bait_bias_detail_metrics',
'bait_bias_summary_metrics',
'detail_metrics',
'duplicate_metrics',
'insert_size_metrics',
'pre_adapter_detail_metrics',
'pre_adapter_summary_metrics',
'quality_distribution_metrics',
'raw_wgs_metrics',
'summary_metrics',
'variant_calling_detail_metrics',
'variant_calling_summary_metrics',
'wgs_metrics',
'preBqsr.selfSM',
]
},
),
]
}
# Metrics we extract from MultiQC and put into Sequence.meta and Analysis
QC_METRICS = [
# id, multiqc id
('freemix', 'FREEMIX'),
('median_coverage', 'MEDIAN_COVERAGE'),
('pct_chimeras', 'PCT_CHIMERAS'),
('pct_30x', 'PCT_30X'),
('pct_reads_aligned_in_pairs', 'PCT_READS_ALIGNED_IN_PAIRS'),
('percent_duplication', 'PERCENT_DUPLICATION'),
('median_insert_size', 'summed_median'),
]
# Only process the following sources:
SOURCES_TO_PROCESS = [
'QC',
# 'GVCF',
# 'CRAM',
]
@dataclass
class Sample:
"""
Represent a parsed sample, so we can check that all required files for
a sample exist, and also populate and fix sample IDs.
"""
nagim_id: str
cpg_id: Optional[str] = None
ext_id: Optional[str] = None
project_id: Optional[str] = None
# File paths indexed by Source and file ending
files: Dict[Tuple[str, str], str] = field(default_factory=dict)
gvcf: Optional[str] = None
tbi: Optional[str] = None
cram: Optional[str] = None
crai: Optional[str] = None
cram_md5: Optional[str] = None
# File paths indexed by ending
qc_files: Dict[str, str] = field(default_factory=dict)
# QC stats indexed by ending
qc_values: Dict[str, str] = field(default_factory=dict)
@click.group()
def cli():
"""
Click group to handle multiple CLI commands defined further
"""
@cli.command()
@click.option('--tmp-dir', 'tmp_dir')
@click.option('--use-batch', is_flag=True, help='Use a Batch job to transfer data')
@click.option('--dry-run', 'dry_run', is_flag=True)
def transfer(
tmp_dir,
use_batch: bool,
dry_run: bool,
):
"""
Transfer data from the Terra workspaces to the GCP bucket. Must be run with
a personal account, because the read permissions to Terra buckets match
to the Terra user emails for whom the workspace is sharred, so Hail service
acounts won't work here.
"""
if not tmp_dir:
tmp_dir = tempfile.gettempdir()
if use_batch:
hbatch = setup_batch(
title='Transferring NAGIM data',
keep_scratch=False,
tmp_bucket=f'cpg-{NAGIM_PROJ_ID}-{NAMESPACE}-tmp',
analysis_project_name=NAGIM_PROJ_ID,
)
else:
hbatch = None
for source in SOURCES_TO_PROCESS:
SOURCES[source].transfer(hbatch)
if use_batch:
hbatch.run(wait=True, dry_run=dry_run)
if dry_run:
return
samples = _parse_sample_project_map(SAMPLE_TO_PROJECT_TSV_PATH)
# Find GVCFs, CRAMs and other files after transferring, and checks that all
# of them have corresponding tbi/crai/md5.
_find_upload_files(
samples,
tmp_dir,
overwrite=True, # Force finding files
)
def _find_upload_files(samples: List[Sample], tmp_dir, overwrite=False):
"""
Populate fields for each sample and verify that every sample has an expected
set of files.
"""
sample_by_sid = {s.nagim_id: s for s in samples}
# Find files
for source_name in SOURCES_TO_PROCESS:
source = SOURCES[source_name]
for ending in source.search_pattern_by_ending:
paths = _cache_bucket_ls(
ending_to_search=ending,
source_bucket=source.get_upload_bucket(ending),
tmp_dir=tmp_dir,
overwrite=overwrite,
)
for path in paths:
assert path.endswith(f'.{ending}')
sid = basename(path)[: -len(f'.{ending}')]
if sid not in sample_by_sid:
continue
sample_by_sid[sid].files[(source.name, ending)] = path
# Tally found files
for source_name in SOURCES_TO_PROCESS:
source = SOURCES[source_name]
for ending in source.search_pattern_by_ending:
found_samples = len(
[s for s in sample_by_sid.values() if (source.name, ending) in s.files]
)
logger.info(
f'Found {found_samples}/{len(sample_by_sid)} '
f'{source.name}/{ending} files'
)
# For each sample, verify that the set of found files is consistent
for sample in sample_by_sid.values():
if 'GVCF' in SOURCES_TO_PROCESS:
sample.gvcf = sample.files.get(('GVCF', 'hard-filtered.g.vcf.gz'))
sample.tbi = sample.files.get(('GVCF', 'hard-filtered.g.vcf.gz.tbi'))
if sample.gvcf and not sample.tbi:
logger.warning(f'Found GVCF without TBI: {sample.nagim_id}')
elif sample.tbi and not sample.gvcf:
logger.warning(f'Found TBI without GVCF: {sample.nagim_id}')
elif not sample.gvcf:
logger.warning(f'Not found GVCF: {sample.nagim_id}')
if 'CRAM' in SOURCES_TO_PROCESS:
sample.cram = sample.files.get((SOURCES['CRAM'].name, 'cram'))
sample.crai = sample.files.get((SOURCES['CRAM'].name, 'cram.crai'))
sample.cram_md5 = sample.files.get((SOURCES['CRAM'].name, 'cram.md5'))
if sample.cram and not sample.crai:
logger.warning(f'Found CRAM without CRAI: {sample.nagim_id}')
if sample.cram and not sample.cram_md5:
logger.warning(f'Found CRAM without md5: {sample.nagim_id}')
if sample.crai and not sample.cram:
logger.warning(f'Found CRAI without CRAM: {sample.nagim_id}')
if 'QC' in SOURCES_TO_PROCESS:
for qc_ending in [
'alignment_summary_metrics',
'duplicate_metrics',
'insert_size_metrics',
'preBqsr.selfSM',
'wgs_metrics',
]:
no_qc = 0
key = (SOURCES['QC'].name, qc_ending)
if not sample.files.get(key):
if sample.gvcf:
logger.warning(
f'Found GVCF without QC {qc_ending}: {sample.nagim_id}'
)
no_qc += 1
continue
if no_qc:
logger.warning(f'Not found QC {qc_ending} for {no_qc} samples')
sample.qc_files[qc_ending] = sample.files[key]
@cli.command()
@click.option('--tmp-dir', 'tmp_dir')
@click.option(
'--confirm', is_flag=True, help='Confirm with user input before updating server'
)
@click.option('--dry-run', 'dry_run', is_flag=True)
@click.option(
'--overwrite-multiqc',
'overwrite_multiqc',
is_flag=True,
help='Redo MultiQC even if report/json exist',
)
@click.option(
'--skip-checking-objects',
'skip_checking_objects',
is_flag=True,
help='Do not check objects on buckets (existence, size, md5)',
)
def parse(
tmp_dir,
confirm: bool,
dry_run: bool,
overwrite_multiqc: bool,
skip_checking_objects: bool,
):
"""
Assuming the data is transferred to the CPG bucket, populate the SM projects.
"""
if not tmp_dir:
tmp_dir = tempfile.gettempdir()
samples = _parse_sample_project_map(SAMPLE_TO_PROJECT_TSV_PATH)
# Find GVCFs, CRAMs and other files after transferring, and checks that all
# of them have corresponding tbi/crai/md5.
_find_upload_files(samples, tmp_dir)
# Some samples processed with Terra use CPG IDs, checking if we already
# have them in the SMDB and fixing the external IDs.
_fix_sample_ids(samples)
multiqc_html_path = join(
f'gs://cpg-{NAGIM_PROJ_ID}-{NAMESPACE}-web/qc/multiqc.html'
)
multiqc_json_path = join(
f'gs://cpg-{NAGIM_PROJ_ID}-{NAMESPACE}-analysis/qc/multiqc_data.json'
)
if 'QC' in SOURCES_TO_PROCESS:
logger.info('Running MultiQC on QC files')
parsed_json_fpath = _run_multiqc(
samples, multiqc_html_path, multiqc_json_path, overwrite=overwrite_multiqc
)
gfs = gcsfs.GCSFileSystem()
with gfs.open(parsed_json_fpath) as f:
row_by_sample = json.load(f)
for s in samples:
if s.nagim_id in row_by_sample:
s.qc_values = row_by_sample[s.nagim_id]
# Creating a parser for each project separately, because `sample_metadata_project`
# is an initialization parameter, and we want to write to multiple projects.
for proj in PROJECT_ID_MAP.values():
sm_proj = _get_sm_proj_id(proj)
sample_tsv_file = join(tmp_dir, f'sm-nagim-parser-samples-{sm_proj}.csv')
rows = []
for s in samples:
if s.project_id != proj:
continue
row = dict(
cpg_id=s.cpg_id,
ext_id=s.ext_id,
gvcf=s.gvcf,
cram=s.cram,
project=s.project_id,
)
for metric, val in s.qc_values.items():
row[f'qc_value_{metric}'] = val
rows.append(row)
if len(rows) == 0:
logger.info(f'No samples for project {sm_proj} found, skipping')
continue
df = pd.DataFrame(rows)
df.to_csv(sample_tsv_file, index=False)
logger.info(
f'Processing {len(df)} samples for project {sm_proj}, '
f'sample manifest: {sample_tsv_file}'
)
parser = NagimParser(
path_prefix=None,
sample_metadata_project=sm_proj,
skip_checking_gcs_objects=skip_checking_objects,
verbose=False,
multiqc_html_path=multiqc_html_path,
multiqc_json_path=multiqc_json_path,
)
with open(sample_tsv_file) as f:
parser.parse_manifest(f, dry_run=dry_run, confirm=confirm)
def _run_multiqc(
samples: List[Sample],
html_fpath: str,
json_fpath: str,
overwrite: bool = False,
) -> str:
"""
Runs MultiQC on QC files from Picard and VerifyBAMID.
Generates an HTML report and puts in into nagim web bucket.
Generates a JSON with metrics, extracts useful metrics into another JSON
indexed by sample, and returns path to this JSON.
"""
tmp_bucket = f'gs://cpg-{NAGIM_PROJ_ID}-{NAMESPACE}-tmp/qc'
row_by_sample_json_path = f'{tmp_bucket}/parsed-qc.json'
if can_reuse(row_by_sample_json_path, overwrite):
return row_by_sample_json_path
b = setup_batch(
title='Run MultiQC on NAGIM',
keep_scratch=False,
tmp_bucket=f'cpg-{NAGIM_PROJ_ID}-main-tmp',
analysis_project_name=NAGIM_PROJ_ID,
)
if not can_reuse([json_fpath, html_fpath], overwrite):
j = b.new_job('Run MultiQC')
j.image(DRIVER_IMAGE)
qc_endings = set()
qc_paths = []
for s in samples:
for qc_ending, qc_path in s.qc_files.items():
qc_paths.append(qc_path)
qc_endings.add(qc_ending)
file_list_path = f'{tmp_bucket}/multiqc-file-list.txt'
df = pd.DataFrame({'_': path} for path in qc_paths)
df.to_csv(file_list_path, header=None, index=None)
file_list = b.read_input(file_list_path)
j.env('GOOGLE_APPLICATION_CREDENTIALS', '/gsa-key/key.json')
j.command(f'pip install multiqc')
j.cpu(16)
j.storage('100G')
j.command(
f'gcloud -q auth activate-service-account --key-file=$GOOGLE_APPLICATION_CREDENTIALS'
)
j.command(f'mkdir inputs')
j.command(f'cat {file_list} | gsutil -m cp -I inputs/')
ending_list = ', '.join(f'.{ending}' for ending in qc_endings)
mqc_conf = f'extra_fn_clean_exts: [{ending_list}]'
j.command(
f'multiqc inputs -o output -f --fn_as_s_name --cl_config "{mqc_conf}"'
)
j.command(f'cp output/multiqc_report.html {j.report_html}')
j.command(f'cp output/multiqc_data/multiqc_data.json {j.json}')
b.write_output(j.report_html, html_fpath)
b.write_output(j.json, json_fpath)
logger.info(f'Written MultiQC reports to {html_fpath}')
multiqc_json = j.json
else:
multiqc_json = b.read_input(json_fpath)
def _parse_multiqc_json(json_fpath) -> Dict:
with open(json_fpath) as f:
d = json.load(f)
row_by_sample = {}
for tool_d in d['report_general_stats_data']:
for sample, val_by_metric in tool_d.items():
if sample not in row_by_sample:
row_by_sample[sample] = dict(s=sample)
row = row_by_sample[sample]
for metric, multiqc_metric in QC_METRICS:
if multiqc_metric in val_by_metric:
row[metric] = val_by_metric[multiqc_metric]
return row_by_sample
parse_j = b.new_python_job('Parse MultiQC JSON')
row_by_sample_resource = parse_j.call(_parse_multiqc_json, multiqc_json)
b.write_output(row_by_sample_resource.as_json(), row_by_sample_json_path)
b.run(wait=True)
return row_by_sample_json_path
def _get_sm_proj_id(proj: str, namespace='main'):
"""
Matching the project ID to a sample-metadata project.
"""
if proj == 'csiro-als': # We don't have a project for ALS yet
proj = 'nagim'
if namespace != 'main':
proj = f'{proj}-test'
return proj
def _fix_sample_ids(samples: List[Sample], namespace: str = 'main'):
"""
Some samples processed with Terra use CPG IDs, so checking if we already
have them in the SMDB, and fixing the external IDs.
"""
sm_proj_ids = [_get_sm_proj_id(proj, namespace) for proj in PROJECT_ID_MAP.values()]
sapi = SampleApi()
sm_sample_dicts = sapi.get_samples(
body_get_samples_by_criteria_api_v1_sample_post={
'project_ids': sm_proj_ids,
'active': True,
}
)
cpgid_to_extid = {s['id']: s['external_id'] for s in sm_sample_dicts}
extid_to_cpgid = {s['external_id']: s['id'] for s in sm_sample_dicts}
# Fixing sample IDs. Some samples (tob-wgs and acute-care)
# have CPG IDs as nagim ids, some don't
for sample in samples:
if sample.nagim_id in extid_to_cpgid:
sample.ext_id = sample.nagim_id
sample.cpg_id = extid_to_cpgid[sample.nagim_id]
elif sample.nagim_id in cpgid_to_extid:
sample.ext_id = cpgid_to_extid[sample.nagim_id]
sample.cpg_id = sample.nagim_id
else:
sample.ext_id = sample.nagim_id
def _parse_sample_project_map(tsv_path: str) -> List[Sample]:
"""
Initialize list of Sample object and set project IDs.
"""
sample_by_nagim_id = {}
df = pd.read_csv(tsv_path, sep='\t', header=None, names=['nagim_id', 'proj'])
for (nagim_id, proj) in zip(df.nagim_id, df.proj):
if proj in PROJECT_ID_MAP.values():
cpg_proj = proj
elif proj in PROJECT_ID_MAP:
cpg_proj = PROJECT_ID_MAP[proj]
else:
raise ValueError(
f'Unknown project {proj}. Known project IDs: {PROJECT_ID_MAP}'
)
sample_by_nagim_id[nagim_id] = Sample(
nagim_id=nagim_id,
project_id=cpg_proj,
)
logger.info(f'Read {len(sample_by_nagim_id)} samples from {tsv_path}')
return list(sample_by_nagim_id.values())
def _add_batch_job(cmd: str, hbatch, job_name: str):
"""
Add cmd as a Batch job.
"""
j = hbatch.new_job(job_name)
j.cpu(32)
j.memory('lowmem')
j.image('australia-southeast1-docker.pkg.dev/cpg-common/images/aspera:v1')
j.command('export GOOGLE_APPLICATION_CREDENTIALS=/gsa-key/key.json')
j.command(
'gcloud -q auth activate-service-account --key-file=$GOOGLE_APPLICATION_CREDENTIALS'
)
j.command(cmd)
return j
def _call(cmd):
"""
Call subprocess command locally.
"""
logger.info(cmd)
subprocess.run(cmd, shell=True, check=True)
class NagimParser(GenericParser):
"""
Inherits from sample_metadata's GenericParser class and implements parsing
logic specific to the NAGIM project.
"""
def get_sample_meta(self, sample_id: str, row: GroupedRow) -> Dict[str, Any]:
return {}
def __init__(self, multiqc_html_path, multiqc_json_path, **kwargs):
super().__init__(**kwargs)
self.multiqc_html_path = multiqc_html_path
self.multiqc_json_path = multiqc_json_path
def get_sample_id(self, row: Dict[str, Any]) -> str:
return row['ext_id']
def get_analyses(
self,
sample_id: str,
row: GroupedRow,
cpg_id: Optional[str],
) -> List[AnalysisModel]:
"""
Creating "staging" analyses for uploaded GVCFs and CRAMs.
"""
assert not isinstance(row, list)
results = []
for analysis_type in ['gvcf', 'cram']:
file_path = row.get(analysis_type)
if not file_path:
continue
results.append(
AnalysisModel(
sample_ids=['<none>'],
type=AnalysisType(analysis_type),
status=AnalysisStatus('completed'),
output=file_path,
meta={
# To distinguish TOB processed on Terra as part from NAGIM
# from those processed at the KCCG:
'source': 'nagim',
# Indicating that files need to be renamed to use CPG IDs,
# and moved from -upload to -test/-main. (For gvcf, also
# need to reblock):
'staging': True,
'project': row.get('project'),
},
)
)
return results
def get_qc_meta(self, sample_id: str, row: GroupedRow) -> Optional[Dict[str, Any]]:
"""
Create a QC analysis entry for found QC files.
"""
assert not isinstance(row, list)
if 'QC' not in SOURCES:
return None
qc_data = {}
for metric, _ in QC_METRICS:
value = row.get(f'qc_value_{metric}')
if not value:
continue
qc_data[metric] = value
return {
'metrics': qc_data,
'html_file': self.multiqc_html_path,
'json_file': self.multiqc_json_path,
# To distinguish TOB processed on Terra as part from NAGIM
# from those processed at the KCCG:
'source': 'nagim',
'project': row.get('project'),
}
def get_sequence_meta(self, sample_id: str, row: GroupedRow) -> Dict[str, Any]:
if isinstance(row, list):
row = row[0]
result = {}
for metric, _ in QC_METRICS:
if f'qc_value_{metric}' in row:
result[metric] = row[f'qc_value_{metric}']
return result
def _cache_bucket_ls(
ending_to_search: str,
source_bucket,
tmp_dir,
overwrite,
) -> List[str]:
output_path = join(tmp_dir, f'sm-nagim-parser-gs-ls-{ending_to_search}.txt')
if overwrite or not exists(output_path):
_call(f'test ! -e {output_path} || rm {output_path}')
_call(f'touch {output_path}')
_call(f'gsutil ls "{source_bucket}/*.{ending_to_search}" >> {output_path}')
with open(output_path) as f:
return [line.strip() for line in f.readlines() if line.strip()]
cli.add_command(transfer)
cli.add_command(parse)
if __name__ == '__main__':
# pylint: disable=no-value-for-parameter
cli() # pylint: disable=unexpected-keyword-arg
```
|
{
"source": "jeremiahws/DLAE",
"score": 2
}
|
#### File: jeremiahws/DLAE/perfusion_cgan_experiment.py
```python
import tensorflow as tf
import numpy as np
import os
import argparse
import h5py
from src.utils.data_generators import FCN2DDatasetGenerator
class batch_norm(object):
def __init__(self, epsilon=1e-5, momentum=0.9, name="batch_norm"):
with tf.variable_scope(name):
self.epsilon = epsilon
self.momentum = momentum
self.name = name
def __call__(self, x, train=True):
return tf.contrib.layers.batch_norm(x, decay=self.momentum,
updates_collections=None,
epsilon=self.epsilon,
scale=True, scope=self.name)
def conv2d(image, output_dim, k_size=5, stride=2, stddev=0.02, name="conv2d"):
with tf.variable_scope(name):
if name[0:2] == 'g_':
w = tf.get_variable("kernel", shape=[k_size, k_size, image.get_shape()[-1], output_dim],
initializer=tf.truncated_normal_initializer(mean=0.0, stddev=stddev),
regularizer=orthogonal_regularizer(0.0001))
else:
w = tf.get_variable("kernel", shape=[k_size, k_size, image.get_shape()[-1], output_dim],
initializer=tf.truncated_normal_initializer(mean=0.0, stddev=stddev),
regularizer=None)
x = tf.nn.conv2d(input=image, filter=spectral_norm(w), strides=[1, stride, stride, 1], padding='SAME')
bias = tf.get_variable("bias", [output_dim], initializer=tf.constant_initializer(0.0))
x = tf.nn.bias_add(x, bias)
return x
def tpconv2d(image, output_shape, k_size=5, stride=2, stddev=0.02, name='tpconv2d', with_w=False):
with tf.variable_scope(name):
x_shape = image.get_shape().as_list()
output_shape = [x_shape[0], x_shape[1] * stride, x_shape[2] * stride, output_shape[-1]]
w = tf.get_variable("kernel", shape=[k_size, k_size, output_shape[-1], image.get_shape()[-1]],
initializer=tf.truncated_normal_initializer(mean=0.0, stddev=stddev),
regularizer=orthogonal_regularizer(0.0001))
x = tf.nn.conv2d_transpose(image, filter=spectral_norm(w),
output_shape=output_shape, strides=[1, stride, stride, 1])
bias = tf.get_variable("bias", [output_shape[-1]], initializer=tf.constant_initializer(0.0))
x = tf.nn.bias_add(x, bias)
if with_w:
return x, w, bias
else:
return x
def orthogonal_regularizer(scale):
def ortho_reg(w) :
_, _, _, c = w.get_shape().as_list()
w = tf.reshape(w, [-1, c])
identity = tf.eye(c)
w_transpose = tf.transpose(w)
w_mul = tf.matmul(w_transpose, w)
reg = tf.subtract(w_mul, identity)
ortho_loss = tf.nn.l2_loss(reg)
return scale * ortho_loss
return ortho_reg
def leaky_relu(x, alpha=0.2):
return tf.maximum(x, alpha * x)
def linear(tensor, output_size, scope=None, stddev=0.02, bias_start=0.0, with_w=False):
shape = tensor.get_shape().as_list()
with tf.variable_scope(scope or "Linear"):
matrix = tf.get_variable("Matrix", [shape[1], output_size], tf.float32,
tf.random_normal_initializer(stddev=stddev))
bias = tf.get_variable("bias", [output_size],
initializer=tf.constant_initializer(bias_start))
if with_w:
return tf.matmul(tensor, matrix) + bias, matrix, bias
else:
return tf.matmul(tensor, matrix) + bias
def spectral_norm(w, iteration=1):
w_shape = w.shape.as_list()
w = tf.reshape(w, [-1, w_shape[-1]])
u = tf.get_variable("u", [1, w_shape[-1]], initializer=tf.random_normal_initializer(), trainable=False)
u_hat = u
v_hat = None
for i in range(iteration):
v_ = tf.matmul(u_hat, tf.transpose(w))
v_hat = tf.nn.l2_normalize(v_)
u_ = tf.matmul(v_hat, w)
u_hat = tf.nn.l2_normalize(u_)
u_hat = tf.stop_gradient(u_hat)
v_hat = tf.stop_gradient(v_hat)
sigma = tf.matmul(tf.matmul(v_hat, w), tf.transpose(u_hat))
with tf.control_dependencies([u.assign(u_hat)]):
w_norm = w / sigma
w_norm = tf.reshape(w_norm, w_shape)
return w_norm
class CascadedCGAN(object):
def __init__(self, sess, image_size=256,
batch_size=1, output_size=256,
gf_dim=64, df_dim=64, l1_lambda=100,
input_c_dim=60, output_c_dim=1,
checkpoint_dir=None,
load_checkpoint=False,
train_data_gen=None,
valid_data_gen=None):
self.sess = sess
self.batch_size = batch_size
self.image_size = image_size
self.output_size = output_size
self.gf_dim = gf_dim
self.df_dim = df_dim
self.input_c_dim = input_c_dim
self.output_c_dim = output_c_dim
self.l1_lambda = l1_lambda
# batch normalization : deals with poor initialization helps gradient flow
self.d_bn1 = batch_norm(name='d_bn1')
self.d_bn2 = batch_norm(name='d_bn2')
self.d_bn3 = batch_norm(name='d_bn3')
self.g_bn_e2 = batch_norm(name='g_bn_e2')
self.g_bn_e3 = batch_norm(name='g_bn_e3')
self.g_bn_e4 = batch_norm(name='g_bn_e4')
self.g_bn_e5 = batch_norm(name='g_bn_e5')
self.g_bn_e6 = batch_norm(name='g_bn_e6')
self.g_bn_e7 = batch_norm(name='g_bn_e7')
self.g_bn_e8 = batch_norm(name='g_bn_e8')
self.g_bn_d1 = batch_norm(name='g_bn_d1')
self.g_bn_d2 = batch_norm(name='g_bn_d2')
self.g_bn_d3 = batch_norm(name='g_bn_d3')
self.g_bn_d4 = batch_norm(name='g_bn_d4')
self.g_bn_d5 = batch_norm(name='g_bn_d5')
self.g_bn_d6 = batch_norm(name='g_bn_d6')
self.g_bn_d7 = batch_norm(name='g_bn_d7')
self.g_bn_d8 = batch_norm(name='g_bn_d8')
self.g_bn_e1_2 = batch_norm(name='g_bn_e1_2')
self.g_bn_e2_2 = batch_norm(name='g_bn_e2_2')
self.g_bn_e3_2 = batch_norm(name='g_bn_e3_2')
self.g_bn_e4_2 = batch_norm(name='g_bn_e4_2')
self.g_bn_e5_2 = batch_norm(name='g_bn_e5_2')
self.g_bn_e6_2 = batch_norm(name='g_bn_e6_2')
self.g_bn_e7_2 = batch_norm(name='g_bn_e7_2')
self.g_bn_e8_2 = batch_norm(name='g_bn_e8_2')
self.g_bn_d1_2 = batch_norm(name='g_bn_d1_2')
self.g_bn_d2_2 = batch_norm(name='g_bn_d2_2')
self.g_bn_d3_2 = batch_norm(name='g_bn_d3_2')
self.g_bn_d4_2 = batch_norm(name='g_bn_d4_2')
self.g_bn_d5_2 = batch_norm(name='g_bn_d5_2')
self.g_bn_d6_2 = batch_norm(name='g_bn_d6_2')
self.g_bn_d7_2 = batch_norm(name='g_bn_d7_2')
self.g_bn_d8_2 = batch_norm(name='g_bn_d8_2')
self.g_bn_e1_3 = batch_norm(name='g_bn_e1_3')
self.g_bn_e2_3 = batch_norm(name='g_bn_e2_3')
self.g_bn_e3_3 = batch_norm(name='g_bn_e3_3')
self.g_bn_e4_3 = batch_norm(name='g_bn_e4_3')
self.g_bn_e5_3 = batch_norm(name='g_bn_e5_3')
self.g_bn_e6_3 = batch_norm(name='g_bn_e6_3')
self.g_bn_e7_3 = batch_norm(name='g_bn_e7_3')
self.g_bn_e8_3 = batch_norm(name='g_bn_e8_3')
self.g_bn_d1_3 = batch_norm(name='g_bn_d1_3')
self.g_bn_d2_3 = batch_norm(name='g_bn_d2_3')
self.g_bn_d3_3 = batch_norm(name='g_bn_d3_3')
self.g_bn_d4_3 = batch_norm(name='g_bn_d4_3')
self.g_bn_d5_3 = batch_norm(name='g_bn_d5_3')
self.g_bn_d6_3 = batch_norm(name='g_bn_d6_3')
self.g_bn_d7_3 = batch_norm(name='g_bn_d7_3')
self.checkpoint_dir = checkpoint_dir
self.load_checkpoint = load_checkpoint
self.train_batches = len(train_data_gen)
self.train_data_gen = train_data_gen.generate()
self.valid_data_gen = valid_data_gen.generate()
self.build_model()
def build_model(self):
self.train_data = tf.placeholder(tf.float32,
[self.batch_size, self.image_size, self.image_size,
self.input_c_dim + self.output_c_dim],
name='real_dce_and_bv_images_train')
self.val_data = tf.placeholder(tf.float32,
[self.batch_size, self.image_size, self.image_size,
self.input_c_dim + self.output_c_dim],
name='real_dce_and_bv_images_val')
self.real_dce_t = self.train_data[:, :, :, :self.input_c_dim]
self.real_bv_t = self.train_data[:, :, :, self.input_c_dim:self.input_c_dim + self.output_c_dim]
self.real_dce_v = self.val_data[:, :, :, :self.input_c_dim]
self.real_bv_v = self.val_data[:, :, :, self.input_c_dim:self.input_c_dim + self.output_c_dim]
self.fake_bv_t = self.generator(self.real_dce_t)
self.real_dceANDbv = tf.concat([self.real_dce_t, self.real_bv_t], 3)
self.fake_dceANDbv = tf.concat([self.real_dce_t, self.fake_bv_t], 3)
self.D, self.D_logits = self.discriminator(self.real_dceANDbv, reuse=False)
self.D_, self.D_logits_ = self.discriminator(self.fake_dceANDbv, reuse=True)
self.fake_bv_t_sample = self.sampler(self.real_dce_t)
self.fake_bv_v_sample = self.sampler(self.real_dce_v)
self.d_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=self.D_logits,
labels=tf.ones_like(self.D)))
self.d_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=self.D_logits_,
labels=tf.zeros_like(self.D_)))
self.d_loss = self.d_loss_real + self.d_loss_fake
self.l1_penalty = self.l1_lambda * tf.reduce_mean(tf.abs(self.real_bv_t - self.fake_bv_t))
self.l1_penalty_v = self.l1_lambda * tf.reduce_mean(tf.abs(self.real_bv_v - self.fake_bv_v_sample))
self.g_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=self.D_logits_,
labels=tf.ones_like(self.D_))) + self.l1_penalty
self.d_sum = tf.summary.histogram("d", self.D)
self.d__sum = tf.summary.histogram("d_", self.D_)
self.bv_t_sum = tf.summary.image('real_vs_fake_bv_train', tf.concat([self.real_bv_t, self.fake_bv_t_sample], 2))
self.dce_t_ex = tf.concat([self.real_dce_t[:, :, :, 5],
self.real_dce_t[:, :, :, 10],
self.real_dce_t[:, :, :, 25],
self.real_dce_t[:, :, :, 40]], 2)
self.dce_t_ex = tf.expand_dims(self.dce_t_ex, axis=-1)
self.dce_t_sum = tf.summary.image('dce_input_train', self.dce_t_ex)
self.bv_v_sum = tf.summary.image('real_vs_fake_bv_val', tf.concat([self.real_bv_v, self.fake_bv_v_sample], 2))
self.dce_v_ex = tf.concat([self.real_dce_v[:, :, :, 5],
self.real_dce_v[:, :, :, 10],
self.real_dce_v[:, :, :, 25],
self.real_dce_v[:, :, :, 40]], 2)
self.dce_v_ex = tf.expand_dims(self.dce_v_ex, axis=-1)
self.dce_v_sum = tf.summary.image('dce_input_val', self.dce_v_ex)
self.d_loss_real_sum = tf.summary.scalar("d_loss_real", self.d_loss_real)
self.d_loss_fake_sum = tf.summary.scalar("d_loss_fake", self.d_loss_fake)
self.g_loss_sum = tf.summary.scalar("g_loss", self.g_loss)
self.l1_penalty_sum = tf.summary.scalar("l1_penalty", self.l1_penalty)
self.d_loss_sum = tf.summary.scalar("d_loss", self.d_loss)
self.l1_penalty_sum_v = tf.summary.scalar("l1_penalty_v", self.l1_penalty_v)
t_vars = tf.trainable_variables()
self.d_vars = [var for var in t_vars if 'd_' in var.name]
self.g_vars = [var for var in t_vars if 'g_' in var.name]
self.saver = tf.train.Saver()
def train_graph(self, lr=0.0002, beta1=0.5, epochs=100):
d_optim = tf.train.AdamOptimizer(lr, beta1=beta1).minimize(self.d_loss, var_list=self.d_vars)
g_optim = tf.train.AdamOptimizer(lr, beta1=beta1).minimize(self.g_loss, var_list=self.g_vars)
init_op = tf.global_variables_initializer()
self.sess.run(init_op)
self.g_sum = tf.summary.merge([self.d__sum, self.bv_t_sum,
self.dce_t_sum, self.bv_v_sum,
self.dce_v_sum, self.d_loss_fake_sum,
self.g_loss_sum, self.l1_penalty_sum,
self.l1_penalty_sum_v])
self.d_sum = tf.summary.merge([self.d_sum, self.d_loss_real_sum, self.d_loss_sum])
self.writer = tf.summary.FileWriter("./logs", self.sess.graph)
counter = 1
if self.load_checkpoint is True:
self.load_model(self.checkpoint_dir)
for epoch in range(epochs):
for idx in range(self.train_batches):
t_data = next(self.train_data_gen)
train_sample = np.concatenate((t_data[0], t_data[1]), axis=-1)
v_data = next(self.valid_data_gen)
valid_sample = np.concatenate((v_data[0], v_data[1]), axis=-1)
# Update D network
_, summary_str = self.sess.run([d_optim, self.d_sum], feed_dict={self.train_data: train_sample})
self.writer.add_summary(summary_str, counter)
# Update G network
_, summary_str = self.sess.run([g_optim, self.g_sum], feed_dict={self.train_data: train_sample,
self.val_data: valid_sample})
self.writer.add_summary(summary_str, counter)
# Run g_optim twice to make sure that d_loss does not go to zero (different from paper)
_, summary_str = self.sess.run([g_optim, self.g_sum], feed_dict={self.train_data: train_sample,
self.val_data: valid_sample})
self.writer.add_summary(summary_str, counter)
errD_fake = self.d_loss_fake.eval({self.train_data: train_sample})
errD_real = self.d_loss_real.eval({self.train_data: train_sample})
errG = self.g_loss.eval({self.train_data: train_sample})
print(errD_fake, errD_real, errG)
counter += 1
#TODO print summary
if np.mod(counter, 500) == 2:
self.save_model(self.checkpoint_dir, counter)
def discriminator(self, image, reuse=False):
with tf.variable_scope("discriminator") as scope:
if reuse:
tf.get_variable_scope().reuse_variables()
else:
assert tf.get_variable_scope().reuse == False
h0 = leaky_relu(conv2d(image, self.df_dim, name='d_h0_conv'))
h1 = leaky_relu(self.d_bn1(conv2d(h0, self.df_dim*2, name='d_h1_conv')))
h2 = leaky_relu(self.d_bn2(conv2d(h1, self.df_dim*4, name='d_h2_conv')))
h3 = leaky_relu(self.d_bn3(conv2d(h2, self.df_dim*8, stride=1, name='d_h3_conv')))
h4 = linear(tf.reshape(h3, [self.batch_size, -1]), 1, 'd_h3_lin')
return tf.nn.sigmoid(h4), h4
def generator(self, image):
with tf.variable_scope("generator") as scope:
s = self.output_size
s2, s4, s8, s16, s32, s64, s128 = int(s/2), int(s/4), int(s/8), int(s/16), int(s/32), int(s/64), int(s/128)
e1 = conv2d(image, self.gf_dim, name='g_e1_conv')
e2 = self.g_bn_e2(conv2d(leaky_relu(e1), self.gf_dim*2, name='g_e2_conv'))
e3 = self.g_bn_e3(conv2d(leaky_relu(e2), self.gf_dim*4, name='g_e3_conv'))
e4 = self.g_bn_e4(conv2d(leaky_relu(e3), self.gf_dim*8, name='g_e4_conv'))
e5 = self.g_bn_e5(conv2d(leaky_relu(e4), self.gf_dim*8, name='g_e5_conv'))
e6 = self.g_bn_e6(conv2d(leaky_relu(e5), self.gf_dim*8, name='g_e6_conv'))
e7 = self.g_bn_e7(conv2d(leaky_relu(e6), self.gf_dim*8, name='g_e7_conv'))
e8 = self.g_bn_e8(conv2d(leaky_relu(e7), self.gf_dim*8, name='g_e8_conv'))
self.d1, self.d1_w, self.d1_b = tpconv2d(tf.nn.relu(e8),
[self.batch_size, s128, s128, self.gf_dim*8],
name='g_d1', with_w=True)
d1 = tf.nn.dropout(self.g_bn_d1(self.d1), 0.5)
d1 = tf.concat([d1, e7], 3)
# d1 = tf.concat([self.g_bn_d1(self.d1), e7], 3)
self.d2, self.d2_w, self.d2_b = tpconv2d(tf.nn.relu(d1),
[self.batch_size, s64, s64, self.gf_dim * 8],
name='g_d2', with_w=True)
d2 = tf.nn.dropout(self.g_bn_d2(self.d2), 0.5)
d2 = tf.concat([d2, e6], 3)
# d2 = tf.concat([self.g_bn_d2(self.d2), e6], 3)
self.d3, self.d3_w, self.d3_b = tpconv2d(tf.nn.relu(d2),
[self.batch_size, s32, s32, self.gf_dim * 8],
name='g_d3', with_w=True)
d3 = tf.nn.dropout(self.g_bn_d3(self.d3), 0.5)
d3 = tf.concat([d3, e5], 3)
# d3 = tf.concat([self.g_bn_d3(self.d3), e5], 3)
self.d4, self.d4_w, self.d4_b = tpconv2d(tf.nn.relu(d3),
[self.batch_size, s16, s16, self.gf_dim*8],
name='g_d4', with_w=True)
d4 = self.g_bn_d4(self.d4)
d4 = tf.concat([d4, e4], 3)
self.d5, self.d5_w, self.d5_b = tpconv2d(tf.nn.relu(d4),
[self.batch_size, s8, s8, self.gf_dim*4],
name='g_d5', with_w=True)
d5 = self.g_bn_d5(self.d5)
d5 = tf.concat([d5, e3], 3)
self.d6, self.d6_w, self.d6_b = tpconv2d(tf.nn.relu(d5),
[self.batch_size, s4, s4, self.gf_dim*2],
name='g_d6', with_w=True)
d6 = self.g_bn_d6(self.d6)
d6 = tf.concat([d6, e2], 3)
self.d7, self.d7_w, self.d7_b = tpconv2d(tf.nn.relu(d6),
[self.batch_size, s2, s2, self.gf_dim],
name='g_d7', with_w=True)
d7 = self.g_bn_d7(self.d7)
d7 = tf.concat([d7, e1], 3)
self.d8, self.d8_w, self.d8_b = tpconv2d(tf.nn.relu(d7),
[self.batch_size, s, s, self.output_c_dim],
name='g_d8', with_w=True)
d8 = self.g_bn_d8(self.d8)
e1_2 = self.g_bn_e1_2(conv2d(leaky_relu(d8), self.gf_dim, name='g_e1_conv_2'))
e2_2 = self.g_bn_e2_2(conv2d(leaky_relu(e1_2), self.gf_dim * 2, name='g_e2_conv_2'))
e3_2 = self.g_bn_e3_2(conv2d(leaky_relu(e2_2), self.gf_dim * 4, name='g_e3_conv_2'))
e4_2 = self.g_bn_e4_2(conv2d(leaky_relu(e3_2), self.gf_dim * 8, name='g_e4_conv_2'))
e5_2 = self.g_bn_e5_2(conv2d(leaky_relu(e4_2), self.gf_dim * 8, name='g_e5_conv_2'))
e6_2 = self.g_bn_e6_2(conv2d(leaky_relu(e5_2), self.gf_dim * 8, name='g_e6_conv_2'))
e7_2 = self.g_bn_e7_2(conv2d(leaky_relu(e6_2), self.gf_dim * 8, name='g_e7_conv_2'))
e8_2 = self.g_bn_e8_2(conv2d(leaky_relu(e7_2), self.gf_dim * 8, name='g_e8_conv_2'))
self.d1_2, self.d1_w_2, self.d1_b_2 = tpconv2d(tf.nn.relu(e8_2),
[self.batch_size, s128, s128, self.gf_dim * 8],
name='g_d1_2', with_w=True)
d1_2 = tf.nn.dropout(self.g_bn_d1_2(self.d1_2), 0.5)
d1_2 = tf.concat([d1_2, e7_2], 3)
# d1_2 = tf.concat([self.g_bn_d1_2(self.d1_2), e7_2], 3)
self.d2_2, self.d2_w_2, self.d2_b_2 = tpconv2d(tf.nn.relu(d1_2),
[self.batch_size, s64, s64, self.gf_dim * 8],
name='g_d2_2', with_w=True)
d2_2 = tf.nn.dropout(self.g_bn_d2_2(self.d2_2), 0.5)
d2_2 = tf.concat([d2_2, e6_2], 3)
# d2_2 = tf.concat([self.g_bn_d2_2(self.d2_2), e6_2], 3)
self.d3_2, self.d3_w_2, self.d3_b_2 = tpconv2d(tf.nn.relu(d2_2),
[self.batch_size, s32, s32, self.gf_dim * 8],
name='g_d3_2', with_w=True)
d3_2 = tf.nn.dropout(self.g_bn_d3_2(self.d3_2), 0.5)
d3_2 = tf.concat([d3_2, e5_2], 3)
# d3_2 = tf.concat([self.g_bn_d3_2(self.d3_2), e5_2], 3)
self.d4_2, self.d4_w_2, self.d4_b_2 = tpconv2d(tf.nn.relu(d3_2),
[self.batch_size, s16, s16, self.gf_dim * 8],
name='g_d4_2', with_w=True)
d4_2 = self.g_bn_d4_2(self.d4_2)
d4_2 = tf.concat([d4_2, e4_2], 3)
self.d5_2, self.d5_w_2, self.d5_b_2 = tpconv2d(tf.nn.relu(d4_2),
[self.batch_size, s8, s8, self.gf_dim * 4],
name='g_d5_2', with_w=True)
d5_2 = self.g_bn_d5_2(self.d5_2)
d5_2 = tf.concat([d5_2, e3_2], 3)
self.d6_2, self.d6_w_2, self.d6_b_2 = tpconv2d(tf.nn.relu(d5_2),
[self.batch_size, s4, s4, self.gf_dim * 2],
name='g_d6_2', with_w=True)
d6_2 = self.g_bn_d6_2(self.d6_2)
d6_2 = tf.concat([d6_2, e2_2], 3)
self.d7_2, self.d7_w_2, self.d7_b_2 = tpconv2d(tf.nn.relu(d6_2),
[self.batch_size, s2, s2, self.gf_dim],
name='g_d7_2', with_w=True)
d7_2 = self.g_bn_d7_2(self.d7_2)
d7_2 = tf.concat([d7_2, e1_2], 3)
self.d8_2, self.d8_w_2, self.d8_b_2 = tpconv2d(tf.nn.relu(d7_2),
[self.batch_size, s, s, self.output_c_dim],
name='g_d8_2', with_w=True)
# d8_2 = self.g_bn_d8_2(self.d8_2)
#
# e1_3 = self.g_bn_e1_3(conv2d(leaky_relu(d8_2), self.gf_dim, name='g_e1_conv_3'))
# e2_3 = self.g_bn_e2_3(conv2d(leaky_relu(e1_3), self.gf_dim * 2, name='g_e2_conv_3'))
# e3_3 = self.g_bn_e3_3(conv2d(leaky_relu(e2_3), self.gf_dim * 4, name='g_e3_conv_3'))
# e4_3 = self.g_bn_e4_3(conv2d(leaky_relu(e3_3), self.gf_dim * 8, name='g_e4_conv_3'))
# e5_3 = self.g_bn_e5_3(conv2d(leaky_relu(e4_3), self.gf_dim * 8, name='g_e5_conv_3'))
# e6_3 = self.g_bn_e6_3(conv2d(leaky_relu(e5_3), self.gf_dim * 8, name='g_e6_conv_3'))
# e7_3 = self.g_bn_e7_3(conv2d(leaky_relu(e6_3), self.gf_dim * 8, name='g_e7_conv_3'))
# e8_3 = self.g_bn_e8_3(conv2d(leaky_relu(e7_3), self.gf_dim * 8, name='g_e8_conv_3'))
#
# self.d1_3, self.d1_w_3, self.d1_b_3 = tpconv2d(tf.nn.relu(e8_3),
# [self.batch_size, s128, s128, self.gf_dim * 8],
# name='g_d1_3', with_w=True)
# d1_3 = tf.nn.dropout(self.g_bn_d1_3(self.d1_3), 0.5)
# d1_3 = tf.concat([d1_3, e7_3], 3)
#
# self.d2_3, self.d2_w_3, self.d2_b_3 = tpconv2d(tf.nn.relu(d1_3),
# [self.batch_size, s64, s64, self.gf_dim * 8],
# name='g_d2_3', with_w=True)
# d2_3 = tf.nn.dropout(self.g_bn_d2_3(self.d2_3), 0.5)
# d2_3 = tf.concat([d2_3, e6_3], 3)
#
# self.d3_3, self.d3_w_3, self.d3_b_3 = tpconv2d(tf.nn.relu(d2_3),
# [self.batch_size, s32, s32, self.gf_dim * 8],
# name='g_d3_3', with_w=True)
# d3_3 = tf.nn.dropout(self.g_bn_d3_3(self.d3_3), 0.5)
# d3_3 = tf.concat([d3_3, e5_3], 3)
#
# self.d4_3, self.d4_w_3, self.d4_b_3 = tpconv2d(tf.nn.relu(d3_3),
# [self.batch_size, s16, s16, self.gf_dim * 8],
# name='g_d4_3', with_w=True)
# d4_3 = self.g_bn_d4_3(self.d4_3)
# d4_3 = tf.concat([d4_3, e4_3], 3)
#
# self.d5_3, self.d5_w_3, self.d5_b_3 = tpconv2d(tf.nn.relu(d4_3),
# [self.batch_size, s8, s8, self.gf_dim * 4],
# name='g_d5_3', with_w=True)
# d5_3 = self.g_bn_d5_3(self.d5_3)
# d5_3 = tf.concat([d5_3, e3_3], 3)
#
# self.d6_3, self.d6_w_3, self.d6_b_3 = tpconv2d(tf.nn.relu(d5_3),
# [self.batch_size, s4, s4, self.gf_dim * 2],
# name='g_d6_3', with_w=True)
# d6_3 = self.g_bn_d6_3(self.d6_3)
# d6_3 = tf.concat([d6_3, e2_3], 3)
#
# self.d7_3, self.d7_w_3, self.d7_b_3 = tpconv2d(tf.nn.relu(d6_3),
# [self.batch_size, s2, s2, self.gf_dim],
# name='g_d7_3', with_w=True)
# d7_3 = self.g_bn_d7_3(self.d7_3)
# d7_3 = tf.concat([d7_3, e1_3], 3)
#
# self.d8_3, self.d8_w_3, self.d8_b_3 = tpconv2d(tf.nn.relu(d7_3),
# [self.batch_size, s, s, self.output_c_dim],
# name='g_d8_3', with_w=True)
#
# return tf.nn.tanh(self.d8_3)
return tf.nn.tanh(self.d8_2)
def sampler(self, image):
with tf.variable_scope("generator") as scope:
scope.reuse_variables()
s = self.output_size
s2, s4, s8, s16, s32, s64, s128 = int(s/2), int(s/4), int(s/8), int(s/16), int(s/32), int(s/64), int(s/128)
e1 = conv2d(image, self.gf_dim, name='g_e1_conv')
e2 = self.g_bn_e2(conv2d(leaky_relu(e1), self.gf_dim * 2, name='g_e2_conv'))
e3 = self.g_bn_e3(conv2d(leaky_relu(e2), self.gf_dim * 4, name='g_e3_conv'))
e4 = self.g_bn_e4(conv2d(leaky_relu(e3), self.gf_dim * 8, name='g_e4_conv'))
e5 = self.g_bn_e5(conv2d(leaky_relu(e4), self.gf_dim * 8, name='g_e5_conv'))
e6 = self.g_bn_e6(conv2d(leaky_relu(e5), self.gf_dim * 8, name='g_e6_conv'))
e7 = self.g_bn_e7(conv2d(leaky_relu(e6), self.gf_dim * 8, name='g_e7_conv'))
e8 = self.g_bn_e8(conv2d(leaky_relu(e7), self.gf_dim * 8, name='g_e8_conv'))
self.d1, self.d1_w, self.d1_b = tpconv2d(tf.nn.relu(e8),
[self.batch_size, s128, s128, self.gf_dim * 8],
name='g_d1', with_w=True)
d1 = tf.nn.dropout(self.g_bn_d1(self.d1), 0.5)
d1 = tf.concat([d1, e7], 3)
# d1 = tf.concat([self.g_bn_d1(self.d1), e7], 3)
self.d2, self.d2_w, self.d2_b = tpconv2d(tf.nn.relu(d1),
[self.batch_size, s64, s64, self.gf_dim * 8],
name='g_d2', with_w=True)
d2 = tf.nn.dropout(self.g_bn_d2(self.d2), 0.5)
d2 = tf.concat([d2, e6], 3)
# d2 = tf.concat([self.g_bn_d2(self.d2), e6], 3)
self.d3, self.d3_w, self.d3_b = tpconv2d(tf.nn.relu(d2),
[self.batch_size, s32, s32, self.gf_dim * 8],
name='g_d3', with_w=True)
d3 = tf.nn.dropout(self.g_bn_d3(self.d3), 0.5)
d3 = tf.concat([d3, e5], 3)
# d3 = tf.concat([self.g_bn_d3(self.d3), e5], 3)
self.d4, self.d4_w, self.d4_b = tpconv2d(tf.nn.relu(d3),
[self.batch_size, s16, s16, self.gf_dim * 8],
name='g_d4', with_w=True)
d4 = self.g_bn_d4(self.d4)
d4 = tf.concat([d4, e4], 3)
self.d5, self.d5_w, self.d5_b = tpconv2d(tf.nn.relu(d4),
[self.batch_size, s8, s8, self.gf_dim * 4],
name='g_d5', with_w=True)
d5 = self.g_bn_d5(self.d5)
d5 = tf.concat([d5, e3], 3)
self.d6, self.d6_w, self.d6_b = tpconv2d(tf.nn.relu(d5),
[self.batch_size, s4, s4, self.gf_dim * 2],
name='g_d6', with_w=True)
d6 = self.g_bn_d6(self.d6)
d6 = tf.concat([d6, e2], 3)
self.d7, self.d7_w, self.d7_b = tpconv2d(tf.nn.relu(d6),
[self.batch_size, s2, s2, self.gf_dim],
name='g_d7', with_w=True)
d7 = self.g_bn_d7(self.d7)
d7 = tf.concat([d7, e1], 3)
self.d8, self.d8_w, self.d8_b = tpconv2d(tf.nn.relu(d7),
[self.batch_size, s, s, self.output_c_dim],
name='g_d8', with_w=True)
d8 = self.g_bn_d8(self.d8)
e1_2 = self.g_bn_e1_2(conv2d(leaky_relu(d8), self.gf_dim, name='g_e1_conv_2'))
e2_2 = self.g_bn_e2_2(conv2d(leaky_relu(e1_2), self.gf_dim * 2, name='g_e2_conv_2'))
e3_2 = self.g_bn_e3_2(conv2d(leaky_relu(e2_2), self.gf_dim * 4, name='g_e3_conv_2'))
e4_2 = self.g_bn_e4_2(conv2d(leaky_relu(e3_2), self.gf_dim * 8, name='g_e4_conv_2'))
e5_2 = self.g_bn_e5_2(conv2d(leaky_relu(e4_2), self.gf_dim * 8, name='g_e5_conv_2'))
e6_2 = self.g_bn_e6_2(conv2d(leaky_relu(e5_2), self.gf_dim * 8, name='g_e6_conv_2'))
e7_2 = self.g_bn_e7_2(conv2d(leaky_relu(e6_2), self.gf_dim * 8, name='g_e7_conv_2'))
e8_2 = self.g_bn_e8_2(conv2d(leaky_relu(e7_2), self.gf_dim * 8, name='g_e8_conv_2'))
self.d1_2, self.d1_w_2, self.d1_b_2 = tpconv2d(tf.nn.relu(e8_2),
[self.batch_size, s128, s128, self.gf_dim * 8],
name='g_d1_2', with_w=True)
d1_2 = tf.nn.dropout(self.g_bn_d1_2(self.d1_2), 0.5)
d1_2 = tf.concat([d1_2, e7_2], 3)
# d1_2 = tf.concat([self.g_bn_d1_2(self.d1_2), e7_2], 3)
self.d2_2, self.d2_w_2, self.d2_b_2 = tpconv2d(tf.nn.relu(d1_2),
[self.batch_size, s64, s64, self.gf_dim * 8],
name='g_d2_2', with_w=True)
d2_2 = tf.nn.dropout(self.g_bn_d2_2(self.d2_2), 0.5)
d2_2 = tf.concat([d2_2, e6_2], 3)
# d2_2 = tf.concat([self.g_bn_d2_2(self.d2_2), e6_2], 3)
self.d3_2, self.d3_w_2, self.d3_b_2 = tpconv2d(tf.nn.relu(d2_2),
[self.batch_size, s32, s32, self.gf_dim * 8],
name='g_d3_2', with_w=True)
d3_2 = tf.nn.dropout(self.g_bn_d3_2(self.d3_2), 0.5)
d3_2 = tf.concat([d3_2, e5_2], 3)
# d3_2 = tf.concat([self.g_bn_d3_2(self.d3_2), e5_2], 3)
self.d4_2, self.d4_w_2, self.d4_b_2 = tpconv2d(tf.nn.relu(d3_2),
[self.batch_size, s16, s16, self.gf_dim * 8],
name='g_d4_2', with_w=True)
d4_2 = self.g_bn_d4_2(self.d4_2)
d4_2 = tf.concat([d4_2, e4_2], 3)
self.d5_2, self.d5_w_2, self.d5_b_2 = tpconv2d(tf.nn.relu(d4_2),
[self.batch_size, s8, s8, self.gf_dim * 4],
name='g_d5_2', with_w=True)
d5_2 = self.g_bn_d5_2(self.d5_2)
d5_2 = tf.concat([d5_2, e3_2], 3)
self.d6_2, self.d6_w_2, self.d6_b_2 = tpconv2d(tf.nn.relu(d5_2),
[self.batch_size, s4, s4, self.gf_dim * 2],
name='g_d6_2', with_w=True)
d6_2 = self.g_bn_d6_2(self.d6_2)
d6_2 = tf.concat([d6_2, e2_2], 3)
self.d7_2, self.d7_w_2, self.d7_b_2 = tpconv2d(tf.nn.relu(d6_2),
[self.batch_size, s2, s2, self.gf_dim],
name='g_d7_2', with_w=True)
d7_2 = self.g_bn_d7_2(self.d7_2)
d7_2 = tf.concat([d7_2, e1_2], 3)
self.d8_2, self.d8_w_2, self.d8_b_2 = tpconv2d(tf.nn.relu(d7_2),
[self.batch_size, s, s, self.output_c_dim],
name='g_d8_2', with_w=True)
# d8_2 = self.g_bn_d8_2(self.d8_2)
#
# e1_3 = self.g_bn_e1_3(conv2d(leaky_relu(d8_2), self.gf_dim, name='g_e1_conv_3'))
# e2_3 = self.g_bn_e2_3(conv2d(leaky_relu(e1_3), self.gf_dim * 2, name='g_e2_conv_3'))
# e3_3 = self.g_bn_e3_3(conv2d(leaky_relu(e2_3), self.gf_dim * 4, name='g_e3_conv_3'))
# e4_3 = self.g_bn_e4_3(conv2d(leaky_relu(e3_3), self.gf_dim * 8, name='g_e4_conv_3'))
# e5_3 = self.g_bn_e5_3(conv2d(leaky_relu(e4_3), self.gf_dim * 8, name='g_e5_conv_3'))
# e6_3 = self.g_bn_e6_3(conv2d(leaky_relu(e5_3), self.gf_dim * 8, name='g_e6_conv_3'))
# e7_3 = self.g_bn_e7_3(conv2d(leaky_relu(e6_3), self.gf_dim * 8, name='g_e7_conv_3'))
# e8_3 = self.g_bn_e8_3(conv2d(leaky_relu(e7_3), self.gf_dim * 8, name='g_e8_conv_3'))
#
# self.d1_3, self.d1_w_3, self.d1_b_3 = tpconv2d(tf.nn.relu(e8_3),
# [self.batch_size, s128, s128, self.gf_dim * 8],
# name='g_d1_3', with_w=True)
# d1_3 = tf.nn.dropout(self.g_bn_d1_3(self.d1_3), 0.5)
# d1_3 = tf.concat([d1_3, e7_3], 3)
#
# self.d2_3, self.d2_w_3, self.d2_b_3 = tpconv2d(tf.nn.relu(d1_3),
# [self.batch_size, s64, s64, self.gf_dim * 8],
# name='g_d2_3', with_w=True)
# d2_3 = tf.nn.dropout(self.g_bn_d2_3(self.d2_3), 0.5)
# d2_3 = tf.concat([d2_3, e6_3], 3)
#
# self.d3_3, self.d3_w_3, self.d3_b_3 = tpconv2d(tf.nn.relu(d2_3),
# [self.batch_size, s32, s32, self.gf_dim * 8],
# name='g_d3_3', with_w=True)
# d3_3 = tf.nn.dropout(self.g_bn_d3_3(self.d3_3), 0.5)
# d3_3 = tf.concat([d3_3, e5_3], 3)
#
# self.d4_3, self.d4_w_3, self.d4_b_3 = tpconv2d(tf.nn.relu(d3_3),
# [self.batch_size, s16, s16, self.gf_dim * 8],
# name='g_d4_3', with_w=True)
# d4_3 = self.g_bn_d4_3(self.d4_3)
# d4_3 = tf.concat([d4_3, e4_3], 3)
#
# self.d5_3, self.d5_w_3, self.d5_b_3 = tpconv2d(tf.nn.relu(d4_3),
# [self.batch_size, s8, s8, self.gf_dim * 4],
# name='g_d5_3', with_w=True)
# d5_3 = self.g_bn_d5_3(self.d5_3)
# d5_3 = tf.concat([d5_3, e3_3], 3)
#
# self.d6_3, self.d6_w_3, self.d6_b_3 = tpconv2d(tf.nn.relu(d5_3),
# [self.batch_size, s4, s4, self.gf_dim * 2],
# name='g_d6_3', with_w=True)
# d6_3 = self.g_bn_d6_3(self.d6_3)
# d6_3 = tf.concat([d6_3, e2_3], 3)
#
# self.d7_3, self.d7_w_3, self.d7_b_3 = tpconv2d(tf.nn.relu(d6_3),
# [self.batch_size, s2, s2, self.gf_dim],
# name='g_d7_3', with_w=True)
# d7_3 = self.g_bn_d7_3(self.d7_3)
# d7_3 = tf.concat([d7_3, e1_3], 3)
#
# self.d8_3, self.d8_w_3, self.d8_b_3 = tpconv2d(tf.nn.relu(d7_3),
# [self.batch_size, s, s, self.output_c_dim],
# name='g_d8_3', with_w=True)
#
# return tf.nn.tanh(self.d8_3)
return tf.nn.tanh(self.d8_2)
def save_model(self, checkpoint_dir, step):
model_name = "cGAN.model"
model_dir = "%s_%s_%s" % ('test', self.batch_size, self.output_size)
checkpoint_dir = os.path.join(checkpoint_dir, model_dir)
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
self.saver.save(self.sess,
os.path.join(checkpoint_dir, model_name),
global_step=step)
def load_model(self, checkpoint_dir):
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
ckpt_name = os.path.basename(ckpt.model_checkpoint_path)
self.saver.restore(self.sess, os.path.join(checkpoint_dir, ckpt_name))
def predict_on_graph(self, test_gen=None, file_name=None):
init_op = tf.global_variables_initializer()
self.sess.run(init_op)
self.load_model(self.checkpoint_dir)
n_samples = len(test_gen)
data = test_gen.generate()
f = h5py.File(file_name, 'w')
for i in range(n_samples):
sample = next(data)
sample_images = sample[0]
sample_images = sample_images[np.newaxis, :, :, :]
blank_bv = np.zeros((self.batch_size, self.image_size, self.image_size, self.output_c_dim))
sample_images = np.concatenate((sample_images, blank_bv), axis=-1)
samples = self.sess.run(self.fake_bv_t_sample, feed_dict={self.train_data: sample_images})
f.create_dataset(str(i), data=samples)
f.close()
class CGAN(object):
def __init__(self, sess, image_size=256,
batch_size=1, output_size=256,
gf_dim=64, df_dim=64, l1_lambda=100,
input_c_dim=60, output_c_dim=1,
checkpoint_dir=None,
load_checkpoint=False,
train_data_gen=None,
valid_data_gen=None):
self.sess = sess
self.batch_size = batch_size
self.image_size = image_size
self.output_size = output_size
self.gf_dim = gf_dim
self.df_dim = df_dim
self.input_c_dim = input_c_dim
self.output_c_dim = output_c_dim
self.l1_lambda = l1_lambda
self.d_bn1 = batch_norm(name='d_bn1')
self.d_bn2 = batch_norm(name='d_bn2')
self.d_bn3 = batch_norm(name='d_bn3')
self.g_bn_e2 = batch_norm(name='g_bn_e2')
self.g_bn_e3 = batch_norm(name='g_bn_e3')
self.g_bn_e4 = batch_norm(name='g_bn_e4')
self.g_bn_e5 = batch_norm(name='g_bn_e5')
self.g_bn_e6 = batch_norm(name='g_bn_e6')
self.g_bn_e7 = batch_norm(name='g_bn_e7')
self.g_bn_e8 = batch_norm(name='g_bn_e8')
self.g_bn_d1 = batch_norm(name='g_bn_d1')
self.g_bn_d2 = batch_norm(name='g_bn_d2')
self.g_bn_d3 = batch_norm(name='g_bn_d3')
self.g_bn_d4 = batch_norm(name='g_bn_d4')
self.g_bn_d5 = batch_norm(name='g_bn_d5')
self.g_bn_d6 = batch_norm(name='g_bn_d6')
self.g_bn_d7 = batch_norm(name='g_bn_d7')
self.checkpoint_dir = checkpoint_dir
self.load_checkpoint = load_checkpoint
self.train_batches = len(train_data_gen)
self.train_data_gen = train_data_gen.generate()
self.valid_data_gen = valid_data_gen.generate()
self.build_model()
def build_model(self):
self.train_data = tf.placeholder(tf.float32,
[self.batch_size, self.image_size, self.image_size,
self.input_c_dim + self.output_c_dim],
name='real_dce_and_bv_images_train')
self.val_data = tf.placeholder(tf.float32,
[self.batch_size, self.image_size, self.image_size,
self.input_c_dim + self.output_c_dim],
name='real_dce_and_bv_images_val')
self.real_dce_t = self.train_data[:, :, :, :self.input_c_dim]
self.real_bv_t = self.train_data[:, :, :, self.input_c_dim:self.input_c_dim + self.output_c_dim]
self.real_dce_v = self.val_data[:, :, :, :self.input_c_dim]
self.real_bv_v = self.val_data[:, :, :, self.input_c_dim:self.input_c_dim + self.output_c_dim]
self.fake_bv_t = self.generator(self.real_dce_t)
self.real_dceANDbv = tf.concat([self.real_dce_t, self.real_bv_t], 3)
self.fake_dceANDbv = tf.concat([self.real_dce_t, self.fake_bv_t], 3)
self.D, self.D_logits = self.discriminator(self.real_dceANDbv, reuse=False)
self.D_, self.D_logits_ = self.discriminator(self.fake_dceANDbv, reuse=True)
self.fake_bv_t_sample = self.sampler(self.real_dce_t)
self.fake_bv_v_sample = self.sampler(self.real_dce_v)
self.d_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=self.D_logits,
labels=tf.ones_like(self.D)))
self.d_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=self.D_logits_,
labels=tf.zeros_like(self.D_)))
self.d_loss = self.d_loss_real + self.d_loss_fake
self.l1_penalty = self.l1_lambda * tf.reduce_mean(tf.abs(self.real_bv_t - self.fake_bv_t))
self.l1_penalty_v = self.l1_lambda * tf.reduce_mean(tf.abs(self.real_bv_v - self.fake_bv_v_sample))
self.g_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=self.D_logits_,
labels=tf.ones_like(self.D_))) + self.l1_penalty
self.d_sum = tf.summary.histogram("d", self.D)
self.d__sum = tf.summary.histogram("d_", self.D_)
self.bv_t_sum = tf.summary.image('real_vs_fake_bv_train', tf.concat([self.real_bv_t, self.fake_bv_t_sample], 2))
self.dce_t_ex = tf.concat([self.real_dce_t[:, :, :, 5],
self.real_dce_t[:, :, :, 10],
self.real_dce_t[:, :, :, 25],
self.real_dce_t[:, :, :, 40]], 2)
self.dce_t_ex = tf.expand_dims(self.dce_t_ex, axis=-1)
self.dce_t_sum = tf.summary.image('dce_input_train', self.dce_t_ex)
self.bv_v_sum = tf.summary.image('real_vs_fake_bv_val', tf.concat([self.real_bv_v, self.fake_bv_v_sample], 2))
self.dce_v_ex = tf.concat([self.real_dce_v[:, :, :, 5],
self.real_dce_v[:, :, :, 10],
self.real_dce_v[:, :, :, 25],
self.real_dce_v[:, :, :, 40]], 2)
self.dce_v_ex = tf.expand_dims(self.dce_v_ex, axis=-1)
self.dce_v_sum = tf.summary.image('dce_input_val', self.dce_v_ex)
self.d_loss_real_sum = tf.summary.scalar("d_loss_real", self.d_loss_real)
self.d_loss_fake_sum = tf.summary.scalar("d_loss_fake", self.d_loss_fake)
self.g_loss_sum = tf.summary.scalar("g_loss", self.g_loss)
self.l1_penalty_sum = tf.summary.scalar("l1_penalty", self.l1_penalty)
self.d_loss_sum = tf.summary.scalar("d_loss", self.d_loss)
self.l1_penalty_sum_v = tf.summary.scalar("l1_penalty_v", self.l1_penalty_v)
t_vars = tf.trainable_variables()
self.d_vars = [var for var in t_vars if 'd_' in var.name]
self.g_vars = [var for var in t_vars if 'g_' in var.name]
self.saver = tf.train.Saver()
def train_graph(self, lr=0.0002, beta1=0.5, epochs=100):
d_optim = tf.train.AdamOptimizer(lr, beta1=beta1).minimize(self.d_loss, var_list=self.d_vars)
g_optim = tf.train.AdamOptimizer(lr, beta1=beta1).minimize(self.g_loss, var_list=self.g_vars)
init_op = tf.global_variables_initializer()
self.sess.run(init_op)
self.g_sum = tf.summary.merge([self.d__sum, self.bv_t_sum,
self.dce_t_sum, self.bv_v_sum,
self.dce_v_sum, self.d_loss_fake_sum,
self.g_loss_sum, self.l1_penalty_sum])
self.d_sum = tf.summary.merge([self.d_sum, self.d_loss_real_sum, self.d_loss_sum])
self.writer = tf.summary.FileWriter("./logs", self.sess.graph)
counter = 1
if self.load_checkpoint is True:
self.load_model(self.checkpoint_dir)
for epoch in range(epochs):
for idx in range(self.train_batches):
t_data = next(self.train_data_gen)
train_sample = np.concatenate((t_data[0], t_data[1]), axis=-1)
v_data = next(self.valid_data_gen)
valid_sample = np.concatenate((v_data[0], v_data[1]), axis=-1)
_, summary_str = self.sess.run([d_optim, self.d_sum], feed_dict={self.train_data: train_sample})
self.writer.add_summary(summary_str, counter)
_, summary_str = self.sess.run([g_optim, self.g_sum], feed_dict={self.train_data: train_sample,
self.val_data: valid_sample})
self.writer.add_summary(summary_str, counter)
_, summary_str = self.sess.run([g_optim, self.g_sum], feed_dict={self.train_data: train_sample,
self.val_data: valid_sample})
self.writer.add_summary(summary_str, counter)
errD_fake = self.d_loss_fake.eval({self.train_data: train_sample})
errD_real = self.d_loss_real.eval({self.train_data: train_sample})
errG = self.g_loss.eval({self.train_data: train_sample})
print(errD_fake, errD_real, errG)
counter += 1
if np.mod(counter, 500) == 2:
self.save_model(self.checkpoint_dir, counter)
def discriminator(self, image, reuse=False):
with tf.variable_scope("discriminator") as scope:
if reuse:
tf.get_variable_scope().reuse_variables()
else:
assert tf.get_variable_scope().reuse == False
h0 = leaky_relu(conv2d(image, self.df_dim, name='d_h0_conv'))
h1 = leaky_relu(self.d_bn1(conv2d(h0, self.df_dim*2, name='d_h1_conv')))
h2 = leaky_relu(self.d_bn2(conv2d(h1, self.df_dim*4, name='d_h2_conv')))
h3 = leaky_relu(self.d_bn3(conv2d(h2, self.df_dim*8, stride=1, name='d_h3_conv')))
h4 = linear(tf.reshape(h3, [self.batch_size, -1]), 1, 'd_h3_lin')
return tf.nn.sigmoid(h4), h4
def generator(self, image):
with tf.variable_scope("generator") as scope:
s = self.output_size
s2, s4, s8, s16, s32, s64, s128 = int(s/2), int(s/4), int(s/8), int(s/16), int(s/32), int(s/64), int(s/128)
e1 = conv2d(image, self.gf_dim, name='g_e1_conv')
e2 = self.g_bn_e2(conv2d(leaky_relu(e1), self.gf_dim*2, name='g_e2_conv'))
e3 = self.g_bn_e3(conv2d(leaky_relu(e2), self.gf_dim*4, name='g_e3_conv'))
e4 = self.g_bn_e4(conv2d(leaky_relu(e3), self.gf_dim*8, name='g_e4_conv'))
e5 = self.g_bn_e5(conv2d(leaky_relu(e4), self.gf_dim*8, name='g_e5_conv'))
e6 = self.g_bn_e6(conv2d(leaky_relu(e5), self.gf_dim*8, name='g_e6_conv'))
e7 = self.g_bn_e7(conv2d(leaky_relu(e6), self.gf_dim*8, name='g_e7_conv'))
e8 = self.g_bn_e8(conv2d(leaky_relu(e7), self.gf_dim*8, name='g_e8_conv'))
self.d1, self.d1_w, self.d1_b = tpconv2d(tf.nn.relu(e8),
[self.batch_size, s128, s128, self.gf_dim*8],
name='g_d1', with_w=True)
d1 = tf.nn.dropout(self.g_bn_d1(self.d1), 0.5)
d1 = tf.concat([d1, e7], 3)
# d1 = tf.concat([self.g_bn_d1(self.d1), e7], 3)
self.d2, self.d2_w, self.d2_b = tpconv2d(tf.nn.relu(d1),
[self.batch_size, s64, s64, self.gf_dim*8],
name='g_d2', with_w=True)
d2 = tf.nn.dropout(self.g_bn_d2(self.d2), 0.5)
d2 = tf.concat([d2, e6], 3)
# d2 = tf.concat([self.g_bn_d2(self.d2), e6], 3)
self.d3, self.d3_w, self.d3_b = tpconv2d(tf.nn.relu(d2),
[self.batch_size, s32, s32, self.gf_dim*8],
name='g_d3', with_w=True)
d3 = tf.nn.dropout(self.g_bn_d3(self.d3), 0.5)
d3 = tf.concat([d3, e5], 3)
# d3 = tf.concat([self.g_bn_d3(self.d3), e5], 3)
self.d4, self.d4_w, self.d4_b = tpconv2d(tf.nn.relu(d3),
[self.batch_size, s16, s16, self.gf_dim*8],
name='g_d4', with_w=True)
d4 = self.g_bn_d4(self.d4)
d4 = tf.concat([d4, e4], 3)
self.d5, self.d5_w, self.d5_b = tpconv2d(tf.nn.relu(d4),
[self.batch_size, s8, s8, self.gf_dim*4],
name='g_d5', with_w=True)
d5 = self.g_bn_d5(self.d5)
d5 = tf.concat([d5, e3], 3)
self.d6, self.d6_w, self.d6_b = tpconv2d(tf.nn.relu(d5),
[self.batch_size, s4, s4, self.gf_dim*2],
name='g_d6', with_w=True)
d6 = self.g_bn_d6(self.d6)
d6 = tf.concat([d6, e2], 3)
self.d7, self.d7_w, self.d7_b = tpconv2d(tf.nn.relu(d6),
[self.batch_size, s2, s2, self.gf_dim],
name='g_d7', with_w=True)
d7 = self.g_bn_d7(self.d7)
d7 = tf.concat([d7, e1], 3)
self.d8, self.d8_w, self.d8_b = tpconv2d(tf.nn.relu(d7),
[self.batch_size, s, s, self.output_c_dim],
name='g_d8', with_w=True)
return tf.nn.tanh(self.d8)
def sampler(self, image):
with tf.variable_scope("generator") as scope:
scope.reuse_variables()
s = self.output_size
s2, s4, s8, s16, s32, s64, s128 = int(s/2), int(s/4), int(s/8), int(s/16), int(s/32), int(s/64), int(s/128)
e1 = conv2d(image, self.gf_dim, name='g_e1_conv')
e2 = self.g_bn_e2(conv2d(leaky_relu(e1), self.gf_dim*2, name='g_e2_conv'))
e3 = self.g_bn_e3(conv2d(leaky_relu(e2), self.gf_dim*4, name='g_e3_conv'))
e4 = self.g_bn_e4(conv2d(leaky_relu(e3), self.gf_dim*8, name='g_e4_conv'))
e5 = self.g_bn_e5(conv2d(leaky_relu(e4), self.gf_dim*8, name='g_e5_conv'))
e6 = self.g_bn_e6(conv2d(leaky_relu(e5), self.gf_dim*8, name='g_e6_conv'))
e7 = self.g_bn_e7(conv2d(leaky_relu(e6), self.gf_dim*8, name='g_e7_conv'))
e8 = self.g_bn_e8(conv2d(leaky_relu(e7), self.gf_dim*8, name='g_e8_conv'))
self.d1, self.d1_w, self.d1_b = tpconv2d(tf.nn.relu(e8),
[self.batch_size, s128, s128, self.gf_dim*8],
name='g_d1', with_w=True)
self.d1, self.d1_w, self.d1_b = tpconv2d(tf.nn.relu(e8),
[self.batch_size, s128, s128, self.gf_dim * 8],
name='g_d1', with_w=True)
d1 = tf.nn.dropout(self.g_bn_d1(self.d1), 0.5)
d1 = tf.concat([d1, e7], 3)
# d1 = tf.concat([self.g_bn_d1(self.d1), e7], 3)
self.d2, self.d2_w, self.d2_b = tpconv2d(tf.nn.relu(d1),
[self.batch_size, s64, s64, self.gf_dim * 8],
name='g_d2', with_w=True)
d2 = tf.nn.dropout(self.g_bn_d2(self.d2), 0.5)
d2 = tf.concat([d2, e6], 3)
# d2 = tf.concat([self.g_bn_d2(self.d2), e6], 3)
self.d3, self.d3_w, self.d3_b = tpconv2d(tf.nn.relu(d2),
[self.batch_size, s32, s32, self.gf_dim * 8],
name='g_d3', with_w=True)
d3 = tf.nn.dropout(self.g_bn_d3(self.d3), 0.5)
d3 = tf.concat([d3, e5], 3)
# d3 = tf.concat([self.g_bn_d3(self.d3), e5], 3)
self.d4, self.d4_w, self.d4_b = tpconv2d(tf.nn.relu(d3),
[self.batch_size, s16, s16, self.gf_dim*8],
name='g_d4', with_w=True)
d4 = self.g_bn_d4(self.d4)
d4 = tf.concat([d4, e4], 3)
self.d5, self.d5_w, self.d5_b = tpconv2d(tf.nn.relu(d4),
[self.batch_size, s8, s8, self.gf_dim*4],
name='g_d5', with_w=True)
d5 = self.g_bn_d5(self.d5)
d5 = tf.concat([d5, e3], 3)
self.d6, self.d6_w, self.d6_b = tpconv2d(tf.nn.relu(d5),
[self.batch_size, s4, s4, self.gf_dim*2],
name='g_d6', with_w=True)
d6 = self.g_bn_d6(self.d6)
d6 = tf.concat([d6, e2], 3)
self.d7, self.d7_w, self.d7_b = tpconv2d(tf.nn.relu(d6),
[self.batch_size, s2, s2, self.gf_dim],
name='g_d7', with_w=True)
d7 = self.g_bn_d7(self.d7)
d7 = tf.concat([d7, e1], 3)
self.d8, self.d8_w, self.d8_b = tpconv2d(tf.nn.relu(d7),
[self.batch_size, s, s, self.output_c_dim],
name='g_d8', with_w=True)
return tf.nn.tanh(self.d8)
def save_model(self, checkpoint_dir, step):
model_name = "cGAN.model"
model_dir = "%s_%s_%s" % ('test', self.batch_size, self.output_size)
checkpoint_dir = os.path.join(checkpoint_dir, model_dir)
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
self.saver.save(self.sess,
os.path.join(checkpoint_dir, model_name),
global_step=step)
def load_model(self, checkpoint_dir):
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
ckpt_name = os.path.basename(ckpt.model_checkpoint_path)
self.saver.restore(self.sess, os.path.join(checkpoint_dir, ckpt_name))
def predict_on_graph(self, test_gen=None, file_name=None):
init_op = tf.global_variables_initializer()
self.sess.run(init_op)
self.load_model(self.checkpoint_dir)
n_samples = len(test_gen)
data = test_gen.generate()
f = h5py.File(file_name, 'w')
for i in range(n_samples):
sample = next(data)
sample_images = sample[0]
sample_images = sample_images[np.newaxis, :, :, :]
blank_bv = np.zeros((self.batch_size, self.image_size, self.image_size, self.output_c_dim))
sample_images = np.concatenate((sample_images, blank_bv), axis=-1)
samples = self.sess.run(self.fake_bv_t_sample, feed_dict={self.train_data: sample_images})
f.create_dataset(str(i), data=samples)
f.close()
def main(_):
train_imgs = r'Y:\Prostate Brachytherapy\ABTI_clean_files\abti_train_images.h5'
train_annos = r'Y:\Prostate Brachytherapy\ABTI_clean_files\abti_train_annos.h5'
valid_imgs = r'Y:\Prostate Brachytherapy\ABTI_clean_files\abti_validation_images.h5'
valid_annos = r'Y:\Prostate Brachytherapy\ABTI_clean_files\abti_validation_annos.h5'
test_imgs = r'Y:\Prostate Brachytherapy\ABTI_clean_files\abti_test_images_1.h5'
# ckpt_dir = r'Y:\Prostate Brachytherapy\ABTI_clean_files\ismrm_unet_cgan_run7'
ckpt_dir = './abti_ckpt'
# preds_file_name = r'Y:\Prostate Brachytherapy\ABTI_clean_files\abti_test_preds_1.h5'
load_ckpt = False
# load_ckpt = True
dl_action = 'train'
cgan_type = 'cgan'
epochs = 100
# dl_action = 'test'
train_data = FCN2DDatasetGenerator(train_imgs,
annos_hdf5_path=train_annos,
flip_horizontal=True,
shuffle_data=True,
rounds=1,
batch_size=1,
subset='train',
normalization='samplewise_negpos_xy',
apply_aug=False)
val_data = FCN2DDatasetGenerator(valid_imgs,
annos_hdf5_path=valid_annos,
shuffle_data=True,
batch_size=1,
subset='validation',
normalization='samplewise_negpos_xy',
apply_aug=False)
test_data = FCN2DDatasetGenerator(test_imgs,
shuffle_data=True,
batch_size=1,
subset='test',
normalization='samplewise_negpos_xy',
apply_aug=False)
with tf.Session() as sess:
if cgan_type == 'cgan':
model = CGAN(sess,
image_size=256,
batch_size=1,
output_size=256,
gf_dim=64,
df_dim=64,
l1_lambda=100,
input_c_dim=60,
output_c_dim=1,
checkpoint_dir=ckpt_dir,
load_checkpoint=load_ckpt,
train_data_gen=train_data,
valid_data_gen=val_data)
elif cgan_type == 'cascaded_cgan':
model = CascadedCGAN(sess,
image_size=256,
batch_size=1,
output_size=256,
gf_dim=64,
df_dim=64,
l1_lambda=100,
input_c_dim=60,
output_c_dim=1,
checkpoint_dir=ckpt_dir,
load_checkpoint=load_ckpt,
train_data_gen=train_data,
valid_data_gen=val_data)
else:
ValueError('cgan_type must be either cgan or cascaded_cgan')
if dl_action == 'train':
model.train_graph(epochs=epochs)
else:
model.predict_on_graph(test_data, preds_file_name)
if __name__ == '__main__':
tf.app.run()
```
#### File: gui/data_menu/constructor.py
```python
import tkinter as tk
from tkinter import filedialog
from src.gui.data_menu.preprocessing import Preprocessing
from src.gui.data_menu.augmentation import Augmentation
class DataMenuConstructor(tk.Menu):
def __init__(self, parent, controller):
"""
Constructor class to build the data menu for the graphical user interface.
:param parent: lowest level parent menu
:param controller: the GUI variable controller
Attributes:
parent -- lowest level parent menu
controller -- constructs and handles all editable GUI variables
data_menu -- data_menu built on the parent
preprocessing -- pop-out menu for preprocessing steps
postprocessing -- pop-out menu for postprocessing steps
"""
tk.Menu.__init__(self, parent)
self.parent = parent
self.controller = controller
self.data_menu = tk.Menu(self.parent)
self.parent.add_cascade(label='Data', menu=self.data_menu)
self.data_menu.add_command(label='Load train X', command=self.load_train_X)
self.data_menu.add_separator()
self.data_menu.add_command(label='Load train y', command=self.load_train_y)
self.data_menu.add_separator()
self.data_menu.add_command(label='Load validation X', command=self.load_val_X)
self.data_menu.add_separator()
self.data_menu.add_command(label='Load validation y', command=self.load_val_y)
self.data_menu.add_separator()
self.data_menu.add_command(label='Load test X', command=self.load_test_X)
self.data_menu.add_separator()
self.preprocessing = Preprocessing(self.controller)
self.data_menu.add_command(label='Preprocessing', command=self.preprocessing.show)
self.data_menu.add_separator()
self.augmentation = Augmentation(self.controller)
self.data_menu.add_command(label='Augmentation', command=self.augmentation.show)
def load_train_X(self):
self.controller.data_menu.train_X_path = tk.filedialog.askopenfile(filetypes=(('HDF5 files', '*.h5'), ('All files', '*.*')))
if self.controller.data_menu.train_X_path is None:
return
else:
self.controller.data_menu.train_X_path = self.controller.data_menu.train_X_path.name
self.controller.data_menu.s_train_X_path.set(self.controller.data_menu.train_X_path)
def load_train_y(self):
self.controller.data_menu.train_y_path = tk.filedialog.askopenfile(filetypes=(('HDF5 files', '*.h5'), ('All files', '*.*')))
if self.controller.data_menu.train_y_path is None:
return
else:
self.controller.data_menu.train_y_path = self.controller.data_menu.train_y_path.name
self.controller.data_menu.s_train_y_path.set(self.controller.data_menu.train_y_path)
def load_val_X(self):
self.controller.data_menu.val_X_path = tk.filedialog.askopenfile(filetypes=(('HDF5 files', '*.h5'), ('All files', '*.*')))
if self.controller.data_menu.val_X_path is None:
return
else:
self.controller.data_menu.val_X_path = self.controller.data_menu.val_X_path.name
self.controller.data_menu.s_val_X_path.set(self.controller.data_menu.val_X_path)
def load_val_y(self):
self.controller.data_menu.val_y_path = tk.filedialog.askopenfile(filetypes=(('HDF5 files', '*.h5'), ('All files', '*.*')))
if self.controller.data_menu.val_y_path is None:
return
else:
self.controller.data_menu.val_y_path = self.controller.data_menu.val_y_path.name
self.controller.data_menu.s_val_y_path.set(self.controller.data_menu.val_y_path)
def load_test_X(self):
self.controller.data_menu.test_X_path = tk.filedialog.askopenfile(filetypes=(('HDF5 files', '*.h5'), ('All files', '*.*')))
if self.controller.data_menu.test_X_path is None:
return
else:
self.controller.data_menu.test_X_path = self.controller.data_menu.test_X_path.name
self.controller.data_menu.s_test_X_path.set(self.controller.data_menu.test_X_path)
def load_test_y(self):
self.controller.data_menu.test_y_path = tk.filedialog.askopenfile(filetypes=(('HDF5 files', '*.h5'), ('All files', '*.*')))
if self.controller.data_menu.test_y_path is None:
return
else:
self.controller.data_menu.test_y_path = self.controller.data_menu.test_y_path.name
self.controller.data_menu.s_test_y_path.set(self.controller.data_menu.test_y_path)
```
#### File: gui/file_menu/config_file.py
```python
import tkinter as tk
class ConfigurationFile:
def __init__(self, controller):
self.controller = controller
self.button_heights = 1
self.button_widths = 15
self.label_heights = 1
self.label_widths = 15
self.entry_widths = 15
self.tl_config_def = tk.Toplevel()
self.tl_config_def.title('Config file type...')
self.tl_config_def.resizable(width=False, height=False)
self.tl_config_def.wm_protocol('WM_DELETE_WINDOW', self.tl_config_def.withdraw)
self.om_model_signal = tk.OptionMenu(self.tl_config_def, self.controller.file_menu.s_model_signal, *self.controller.file_menu.o_model_signal)
self.om_model_signal.config()
self.om_model_signal.grid(row=0, column=0, sticky=tk.N+tk.S+tk.E+tk.W)
self.om_type_signal = tk.OptionMenu(self.tl_config_def, self.controller.file_menu.s_type_signal, *self.controller.file_menu.o_type_signal)
self.om_type_signal.config()
self.om_type_signal.grid(row=0, column=1, sticky=tk.N+tk.S+tk.E+tk.W)
self.l_input_shape = tk.Label(self.tl_config_def, text='Input shape:').grid(row=1, column=0, sticky=tk.N+tk.S+tk.E+tk.W)
self.e_input_shape = tk.Entry(self.tl_config_def, textvariable=self.controller.file_menu.s_input_shape).grid(row=1, column=1)
self.tl_config_def.withdraw()
def show(self):
self.tl_config_def.deiconify()
```
#### File: gui/file_menu/variables.py
```python
import tkinter as tk
class FileMenuVariables(object):
def __init__(self):
self.load_file_path = ""
self.load_ckpt_file_path = ""
self.load_model_file_path = ""
self.s_load_file_path = tk.StringVar(value=self.load_file_path)
self.s_load_ckpt_file_path = tk.StringVar(value=self.load_ckpt_file_path)
self.s_load_model_file_path = tk.StringVar(value=self.load_model_file_path)
self.save_file_path = ""
```
#### File: gui/home_menu/prebuilt_gan.py
```python
import tkinter as tk
from src.utils.general_utils import load_config
import os
import tensorflow as tf
class PrebuiltGenerativeAdversarialNetwork:
def __init__(self, controller):
self.tl_prebuilt_gan = tk.Toplevel()
self.tl_prebuilt_gan.title('Prebuilt GANs')
self.tl_prebuilt_gan.wm_protocol('WM_DELETE_WINDOW', self.tl_prebuilt_gan.withdraw)
self.controller = controller
self.p_pix2pix = tk.PhotoImage(file='src/gui/button_figs/prebuilt_gan/pix2pix.png')
self.b_pix2pix = tk.Button(self.tl_prebuilt_gan, image=self.p_pix2pix, command=self.pix2pix).grid(row=0, column=0)
self.p_cyclegan = tk.PhotoImage(file='src/gui/button_figs/prebuilt_gan/cyclegan.png')
self.b_cyclegan = tk.Button(self.tl_prebuilt_gan, image=self.p_cyclegan, command=self.cyclegan).grid(row=0, column=1)
self.tl_prebuilt_gan.resizable(width=False, height=False)
self.tl_prebuilt_gan.withdraw()
def show(self):
self.tl_prebuilt_gan.deiconify()
def pix2pix(self):
self.tl_prebuilt_gan.withdraw()
tf.reset_default_graph()
cwd = os.getcwd()
config_file = os.path.join(cwd, "prebuilt_configs/pix2pix.json")
configs = load_config(config_file)
self.controller.set_configs(configs)
def cyclegan(self):
self.tl_prebuilt_gan.withdraw()
tf.reset_default_graph()
cwd = os.getcwd()
config_file = os.path.join(cwd, "prebuilt_configs/cyclegan.json")
configs = load_config(config_file)
self.controller.set_configs(configs)
```
#### File: gui/layers_menu/constructor.py
```python
import tkinter as tk
from src.gui.layers_menu.convolutional_layers import ConvolutionalLayers
from src.gui.layers_menu.pooling_layers import PoolingLayers
from src.gui.layers_menu.utility_layers import UtilityLayers
from src.gui.layers_menu.advanced_activations import AdvancedActivations
from src.gui.layers_menu.pretrained_networks import PretrainedNetworks
class LayersMenuConstructor(tk.Menu):
def __init__(self, parent, controller):
tk.Menu.__init__(self, parent)
self.parent = parent
self.controller = controller
self.layers_menu = tk.Menu(self.parent)
self.parent.add_cascade(label='Layers', menu=self.layers_menu)
self.conv_layers = ConvolutionalLayers(self.controller)
self.layers_menu.add_command(label='Convolution layers', command=self.conv_layers.show)
self.layers_menu.add_separator()
self.pool_layers = PoolingLayers(self.controller)
self.layers_menu.add_command(label='Pooling layers', command=self.pool_layers.show)
self.layers_menu.add_separator()
self.util_layers = UtilityLayers(self.controller)
self.layers_menu.add_command(label='Utility layers', command=self.util_layers.show)
self.layers_menu.add_separator()
self.adv_acts = AdvancedActivations(self.controller)
self.layers_menu.add_command(label='Advanced activations', command=self.adv_acts.show)
self.layers_menu.add_separator()
self.pretrained_nets = PretrainedNetworks(self.controller)
self.layers_menu.add_command(label='Pretrained networks', command=self.pretrained_nets.show)
```
#### File: gui/options_menu/constructor.py
```python
import tkinter as tk
from src.gui.options_menu.learning_rate import LearningRateSchedule
from src.gui.options_menu.loss_function import LossFunction
from src.gui.options_menu.optimizer import Optimizer
from src.gui.options_menu.train_configs import TrainingConfigurations
from src.gui.options_menu.monitors import Monitors
from src.gui.options_menu.save_configs import SaveConfigurations
from src.gui.options_menu.bbd_options import BbdOptions
class OptionsMenuConstructor(tk.Menu):
def __init__(self, parent, controller):
tk.Menu.__init__(self, parent)
self.parent = parent
self.controller = controller
self.options_menu = tk.Menu(self.parent)
self.parent.add_cascade(label='Options', menu=self.options_menu)
self.learning_rate_schedule = LearningRateSchedule(self.controller)
self.options_menu.add_command(label='Learning rate schedule', command=self.learning_rate_schedule.show)
self.options_menu.add_separator()
self.loss_config = LossFunction(self.controller)
self.options_menu.add_command(label='Loss function', command=self.loss_config.show)
self.options_menu.add_separator()
self.optimizer = Optimizer(self.controller)
self.options_menu.add_command(label='Optimizer', command=self.optimizer.show)
self.options_menu.add_separator()
self.training_configs = TrainingConfigurations(self.controller)
self.options_menu.add_command(label='Training configurations', command=self.training_configs.show)
self.options_menu.add_separator()
self.monitors = Monitors(self.controller)
self.options_menu.add_command(label='Monitors', command=self.monitors.show)
self.options_menu.add_separator()
self.save_configs = SaveConfigurations(self.controller)
self.options_menu.add_command(label='Save configurations', command=self.save_configs.show)
self.options_menu.add_separator()
self.bbd_options = BbdOptions(self.controller)
self.options_menu.add_command(label='BBD options', command=self.bbd_options.show)
```
#### File: gui/options_menu/variables.py
```python
import tkinter as tk
class OptionsMenuVariables(object):
def __init__(self):
##############################################################################
# Variables for loss submenu
##############################################################################
self.o_loss = ('categorical_crossentropy', 'weighted_categorical_crossentropy',
'sparse_categorical_crossentropy', 'mean_squared_error',
'mean_absolute_error', 'tversky', 'pix2pix', 'cyclegan', 'ssd',
'jaccard', 'focal', 'soft_dice')
self.s_loss = tk.StringVar(value=self.o_loss[0])
self.s_loss_param1 = tk.StringVar(value="0.0")
self.s_loss_param2 = tk.StringVar(value="0.0")
##############################################################################
# Variables for learning rate schedule submenu
##############################################################################
self.s_base_lr = tk.StringVar(value="0.0001")
self.s_lr_decay = tk.StringVar(value="0.0")
self.bool_decay_on_plateau = tk.BooleanVar(value=True)
self.s_decay_on_plateau_factor = tk.StringVar(value="0.2")
self.s_decay_on_plateau_patience = tk.StringVar(value="3")
self.bool_step_decay = tk.BooleanVar(value=False)
self.s_step_decay_factor = tk.StringVar(value="0.1")
self.s_step_decay_period = tk.StringVar(value="3")
self.s_d_lr = tk.StringVar(value="0.0001:0.0")
self.s_gan_lr = tk.StringVar(value="0.0001:0.0")
##############################################################################
# Variables for optimizer submenu
##############################################################################
self.o_optimizer = ('Adam', 'NAdam', 'SGD', 'RMSprop', 'Adagrad', 'Adadelta', 'Adamax')
self.s_optimizer = tk.StringVar(value=self.o_optimizer[0])
self.s_optimizer_beta1 = tk.StringVar(value="0.9")
self.s_optimizer_beta2 = tk.StringVar(value="0.999")
self.s_optimizer_rho = tk.StringVar(value="0.9")
self.s_optimizer_momentum = tk.StringVar(value="0.0")
self.s_optimizer_epsilon = tk.StringVar(value="None")
self.s_d_optimizer = tk.StringVar(value="Adam:0.9:0.999:0.9:0.0:None")
self.s_gan_optimizer = tk.StringVar(value="Adam:0.9:0.999:0.9:0.0:None")
##############################################################################
# Variables for training configurations submenu
##############################################################################
self.o_hardware = ('gpu', 'multi-gpu', 'cpu')
self.s_hardware = tk.StringVar(value=self.o_hardware[0])
self.s_n_gpus = tk.StringVar(value="1")
self.bool_early_stop = tk.BooleanVar(value=True)
self.s_early_stop_patience = tk.StringVar(value="10")
self.s_batch_size = tk.StringVar(value="32")
self.s_epochs = tk.StringVar(value="500")
self.bool_shuffle = tk.BooleanVar(value=True)
self.s_val_split = tk.StringVar(value="0.2")
##############################################################################
# Variables for monitors submenu
##############################################################################
self.bool_mse_monitor = tk.BooleanVar(value=False)
self.bool_mae_monitor = tk.BooleanVar(value=False)
self.bool_acc_monitor = tk.BooleanVar(value=True)
##############################################################################
# Variables for save configurations submenu
##############################################################################
self.bool_save_model = tk.BooleanVar(value=False)
self.s_save_model_path = tk.StringVar()
self.bool_save_csv = tk.BooleanVar(value=False)
self.s_save_csv_path = tk.StringVar()
self.bool_save_checkpoints = tk.BooleanVar(value=False)
self.s_save_checkpoints_path = tk.StringVar()
self.s_save_checkpoints_frequency = tk.StringVar(value="1")
self.bool_tensorboard = tk.BooleanVar(value=False)
self.s_tensorboard_path = tk.StringVar()
self.s_tensorboard_frequency = tk.StringVar(value="5")
##############################################################################
# Variables for BBD options
##############################################################################
self.o_scaling = ('global', 'per predictor layer')
self.s_scaling = tk.StringVar(value=self.o_scaling[0])
self.s_scales = tk.StringVar(value="0.1, 0.9")
self.o_aspect_ratios = ('global', 'per predictor layer')
self.s_aspect_ratios = tk.StringVar(value=self.o_aspect_ratios[0])
self.s_ARs = tk.StringVar(value="(0.5, 1.0, 1.5, 2.0)")
self.s_n_classes = tk.StringVar(value="1")
self.s_steps = tk.StringVar(value="(8, 16, 32, 64, 128)")
self.s_offsets = tk.StringVar(value="None")
self.s_variances = tk.StringVar(value="(0.1, 0.1, 0.2, 0.2)")
self.s_conf_thresh = tk.StringVar(value="0.5")
self.s_iou_thresh = tk.StringVar(value="0.5")
self.s_top_k = tk.StringVar(value="200")
self.s_nms_max_output = tk.StringVar(value="400")
self.o_coords_type = ('centroids', 'minmax', 'corners')
self.s_coords_type = tk.StringVar(value=self.o_coords_type[0])
self.bool_2_for_1 = tk.BooleanVar(value=False)
self.bool_clip_boxes = tk.BooleanVar(value=False)
self.bool_norm_coords = tk.BooleanVar(value=False)
self.s_pos_iou_thresh = tk.StringVar(value="0.5")
self.s_neg_iou_limit = tk.StringVar(value="0.3")
```
#### File: gui/tools_menu/constructor.py
```python
import tkinter as tk
import threading
import webbrowser
import socket
import os
class ToolsMenuConstructor(tk.Menu):
def __init__(self, parent, controller):
tk.Menu.__init__(self, parent)
self.parent = parent
self.controller = controller
self.tools_menu = tk.Menu(self.parent)
self.parent.add_cascade(label='Tools', menu=self.tools_menu)
self.tools_menu.add_command(label='Delete model', command=self.delete_model)
self.tools_menu.add_separator()
self.tools_menu.add_command(label='Delete generator', command=self.delete_gen)
self.tools_menu.add_separator()
self.tools_menu.add_command(label='Delete discriminator', command=self.delete_discrim)
self.tools_menu.add_separator()
self.tools_menu.add_command(label='Open tensorboard', command=self.open_tensorboard)
def delete_model(self):
self.controller.layers_list_box.delete(0, tk.END)
self.controller.layers_list_box_serial.delete(0, tk.END)
self.controller.home_menu.s_model_built.set('Serial model deleted')
def delete_gen(self):
self.controller.layers_list_box_gen.delete(0, tk.END)
self.controller.home_menu.s_model_built.set('Generator deleted')
def delete_discrim(self):
self.controller.layers_list_box_discrim.delete(0, tk.END)
self.controller.home_menu.s_model_built.set('Discriminator deleted')
def tensorboard_clicked(self):
tensorbaord_dir = self.controller.options_menu.s_tensorboard_path.get()
command = 'tensorboard --logdir==' + tensorbaord_dir
os.system(command=command)
def open_tensorboard(self):
local_host = socket.gethostname()
url = 'http://' + local_host + ':6006'
threading.Thread(target=self.tensorboard_clicked).start()
webbrowser.open(url, new=1)
def get_configs(self):
pass
```
#### File: gui/view_menu/layer_list.py
```python
import tkinter as tk
class LayerList:
def __init__(self):
self.button_heights = 1
self.button_widths = 15
self.label_heights = 1
self.label_widths = 15
self.entry_widths = 15
self.tl_layer_list = tk.Toplevel()
self.tl_layer_list.title('List of layers')
self.tl_layer_list.wm_protocol('WM_DELETE_WINDOW', self.tl_layer_list.withdraw)
self.tl_layer_list.resizable(width=False, height=False)
self.b_serial_layers = tk.Button(self.tl_layer_list, text='View serial layers', command=self.view_serial_layers, height=self.button_heights).grid(row=0, column=0, columnspan=3, sticky=tk.E+tk.W)
self.b_gen_layers = tk.Button(self.tl_layer_list, text='View generator layers', command=self.view_gen_layers, height=self.button_heights).grid(row=0, column=3, columnspan=3, sticky=tk.E+tk.W)
self.b_discrim_layers = tk.Button(self.tl_layer_list, text='View discriminator layers', command=self.view_discrim_layers, height=self.button_heights).grid(row=0, column=6, columnspan=3, sticky=tk.E+tk.W)
self.b_serial_layers = tk.Button(self.tl_layer_list, text='Rebuild model', command=self.rebuild_serial_layers, height=self.button_heights).grid(row=2, column=0, columnspan=3, sticky=tk.E+tk.W)
self.b_gen_layers = tk.Button(self.tl_layer_list, text='Rebuild generator', command=self.rebuild_gen_layers, height=self.button_heights).grid(row=2, column=3, columnspan=3, sticky=tk.E+tk.W)
self.b_discrim_layers = tk.Button(self.tl_layer_list, text='Rebuild discriminator', command=self.rebuild_discrim_layers, height=self.button_heights).grid(row=2, column=6, columnspan=3, sticky=tk.E+tk.W)
self.lb_layers_list = tk.Listbox(self.tl_layer_list)
self.lb_layers_list.bind('<<ListboxSelect>>', self.cursor_select)
self.lb_layers_list.config(width=85, height=25)
self.lb_layers_list.grid(row=1, column=0, columnspan=9, sticky=tk.N+tk.S+tk.E+tk.W)
self.sb_layers_list = tk.Scrollbar(self.tl_layer_list, orient="vertical")
self.sb_layers_list.config(command=self.lb_layers_list.yview)
self.sb_layers_list.grid(row=1, column=9, sticky=tk.N+tk.S)
self.lb_layers_list.config(yscrollcommand=self.sb_layers_list.set)
self.lb_layers_list_serial = tk.Listbox(self.tl_layer_list)
self.lb_layers_list_gen = tk.Listbox(self.tl_layer_list)
self.lb_layers_list_discrim = tk.Listbox(self.tl_layer_list)
self.s_layer_to_modify = tk.StringVar(value="No layer selected")
self.i_index = tk.IntVar()
self.b_layer_to_modify = tk.Button(self.tl_layer_list, text='Update layer', command=self.change_layer, height=self.button_heights).grid(row=3, column=0, columnspan=3, sticky=tk.E+tk.W)
self.b_inject_layer = tk.Button(self.tl_layer_list, text='Inject layer', command=self.inject_layer, height=self.button_heights).grid(row=3, column=3, columnspan=3, sticky=tk.E+tk.W)
self.b_delete_layer = tk.Button(self.tl_layer_list, text='Delete layer', command=self.delete_layer, height=self.button_heights).grid(row=3, column=6, columnspan=3, sticky=tk.E+tk.W)
self.e_layer_to_modify = tk.Entry(self.tl_layer_list, textvariable=self.s_layer_to_modify, width=self.entry_widths).grid(row=4, column=0, columnspan=9, sticky=tk.E+tk.W)
self.tl_layer_list.withdraw()
def cursor_select(self, event):
try:
index = self.lb_layers_list.curselection()[0]
selection = self.lb_layers_list.get(index)
self.i_index.set(index)
self.s_layer_to_modify.set(selection)
except:
pass
def change_layer(self):
self.lb_layers_list.delete(self.i_index.get())
self.lb_layers_list.insert(self.i_index.get(), self.s_layer_to_modify.get())
def inject_layer(self):
self.lb_layers_list.insert(self.i_index.get() + 1, self.s_layer_to_modify.get())
def delete_layer(self):
self.lb_layers_list.delete(self.i_index.get())
def show(self):
self.tl_layer_list.deiconify()
def view_serial_layers(self):
layers = self.lb_layers_list_serial.get(0, tk.END)
if any(layers):
self.lb_layers_list.delete(0, tk.END)
[self.lb_layers_list.insert(tk.END, layer) for layer in layers]
else:
self.lb_layers_list.delete(0, tk.END)
def view_gen_layers(self):
layers = self.lb_layers_list_gen.get(0, tk.END)
if any(layers):
self.lb_layers_list.delete(0, tk.END)
[self.lb_layers_list.insert(tk.END, layer) for layer in layers]
else:
self.lb_layers_list.delete(0, tk.END)
def view_discrim_layers(self):
layers = self.lb_layers_list_discrim.get(0, tk.END)
if any(layers):
self.lb_layers_list.delete(0, tk.END)
[self.lb_layers_list.insert(tk.END, layer) for layer in layers]
else:
self.lb_layers_list.delete(0, tk.END)
def rebuild_serial_layers(self):
layers = self.lb_layers_list.get(0, tk.END)
self.lb_layers_list_serial.delete(0, tk.END)
[self.lb_layers_list_serial.insert(tk.END, layer) for layer in layers]
self.lb_layers_list.delete(0, tk.END)
def rebuild_gen_layers(self):
layers = self.lb_layers_list.get(0, tk.END)
self.lb_layers_list_gen.delete(0, tk.END)
[self.lb_layers_list_gen.insert(tk.END, layer) for layer in layers]
self.lb_layers_list.delete(0, tk.END)
def rebuild_discrim_layers(self):
layers = self.lb_layers_list.get(0, tk.END)
self.lb_layers_list_discrim.delete(0, tk.END)
[self.lb_layers_list_discrim.insert(tk.END, layer) for layer in layers]
self.lb_layers_list.delete(0, tk.END)
```
#### File: src/utils/engine_utils.py
```python
from src.engine.layers import *
import keras.backend.tensorflow_backend as K
from keras.layers import Concatenate
from src.engine.configurations import EngineConfigurations
from src.utils.general_utils import str2bool
import os
from ast import literal_eval
def l1_loss(y_true, y_pred):
return K.sum(K.abs(y_pred - y_true))
def create_layer(layer_definition):
layer_configs = layer_definition.split(':')
layer_configs = ['None' if x is '' else x for x in layer_configs]
if layer_configs[0] == 'Input':
layer = InputLayer(layer_configs[1])
elif layer_configs[0] == 'Reshape':
layer = ReshapeLayer(layer_configs[1])
elif layer_configs[0] == 'Dropout':
layer = DropoutLayer(layer_configs[1])
elif layer_configs[0] == 'Dense':
layer = DenseLayer(layer_configs[1])
elif layer_configs[0] == 'Activation':
layer = ActivationLayer(layer_configs[1])
elif layer_configs[0] == 'Permute':
layer = PermuteLayer(layer_configs[1])
elif layer_configs[0] == 'Flatten':
layer = FlattenLayer()
elif layer_configs[0] == 'Spatial dropout 2D':
layer = SpatialDropout2DLayer(layer_configs[1])
elif layer_configs[0] == 'Spatial dropout 3D':
layer = SpatialDropout3DLayer(layer_configs[1])
elif layer_configs[0] == 'Convolution 2D':
layer = Conv2DLayer(layer_configs[1], layer_configs[2], layer_configs[3], layer_configs[4], layer_configs[5], layer_configs[6], layer_configs[7], layer_configs[8], layer_configs[9], layer_configs[10])
elif layer_configs[0] == 'Separable convolution 2D':
layer = SeparableConv2DLayer(layer_configs[1], layer_configs[2], layer_configs[3], layer_configs[4], layer_configs[5], layer_configs[6], layer_configs[7], layer_configs[8], layer_configs[9], layer_configs[10])
elif layer_configs[0] == 'Depthwise separable convolution 2D':
layer = DepthwiseSeparableConv2DLayer(layer_configs[1], layer_configs[2], layer_configs[3], layer_configs[4], layer_configs[5], layer_configs[6], layer_configs[7], layer_configs[8], layer_configs[9])
elif layer_configs[0] == 'Transpose convolution 2D':
layer = ConvTranspose2DLayer(layer_configs[1], layer_configs[2], layer_configs[3], layer_configs[4], layer_configs[5], layer_configs[6], layer_configs[7], layer_configs[8], layer_configs[9], layer_configs[10])
elif layer_configs[0] == 'Resize convolution 2D':
layer = ResizeConv2DLayer(layer_configs[1], layer_configs[2], layer_configs[3], layer_configs[4], layer_configs[5], layer_configs[6], layer_configs[7], layer_configs[8], layer_configs[9], layer_configs[10], layer_configs[11])
elif layer_configs[0] == 'Convolution 3D':
layer = Conv3DLayer(layer_configs[1], layer_configs[2], layer_configs[3], layer_configs[4], layer_configs[5], layer_configs[6], layer_configs[7], layer_configs[8], layer_configs[9], layer_configs[10])
elif layer_configs[0] == 'Transpose convolution 3D':
layer = ConvTranspose3DLayer(layer_configs[1], layer_configs[2], layer_configs[3], layer_configs[4], layer_configs[5], layer_configs[6], layer_configs[7], layer_configs[8], layer_configs[9], layer_configs[10])
elif layer_configs[0] == 'Resize convolution 3D':
layer = ResizeConv3DLayer(layer_configs[1], layer_configs[2], layer_configs[3], layer_configs[4], layer_configs[5], layer_configs[6], layer_configs[7], layer_configs[8], layer_configs[9], layer_configs[10], layer_configs[11])
elif layer_configs[0] == 'Upsample 2D':
layer = Upsample2DLayer(layer_configs[1])
elif layer_configs[0] == 'Upsample 3D':
layer = Upsample3DLayer(layer_configs[1])
elif layer_configs[0] == 'Zero padding 2D':
layer = ZeroPad2DLayer(layer_configs[1])
elif layer_configs[0] == 'Zero padding 3D':
layer = ZeroPad3DLayer(layer_configs[1])
elif layer_configs[0] == 'Cropping 2D':
layer = Cropping2DLayer(layer_configs[1])
elif layer_configs[0] == 'Cropping 3D':
layer = Cropping3DLayer(layer_configs[1])
elif layer_configs[0] == 'Leaky reLU':
layer = LeakyReluLayer(layer_configs[1])
elif layer_configs[0] == 'ELU':
layer = EluLayer(layer_configs[1])
elif layer_configs[0] == 'Thresholded reLU':
layer = ThresholdedReluLayer(layer_configs[1])
elif layer_configs[0] == 'PreLU':
layer = PreluLayer()
elif layer_configs[0] == 'Max pooling 2D':
layer = MaxPool2DLayer(layer_configs[1], layer_configs[2])
elif layer_configs[0] == 'Average pooling 2D':
layer = AvgPool2DLayer(layer_configs[1], layer_configs[2])
elif layer_configs[0] == 'Global max pooling 2D':
layer = GlobalMaxPool2DLayer()
elif layer_configs[0] == 'Global average pooling 2D':
layer = GlobalAvgPool2DLayer()
elif layer_configs[0] == 'Max pooling 3D':
layer = MaxPool3DLayer(layer_configs[1], layer_configs[2])
elif layer_configs[0] == 'Average pooling 3D':
layer = AvgPool3DLayer(layer_configs[1], layer_configs[2])
elif layer_configs[0] == 'Global max pooling 3D':
layer = GlobalMaxPool3DLayer()
elif layer_configs[0] == 'Global average pooling 3D':
layer = GlobalAvgPool3DLayer()
elif layer_configs[0] == 'Batch normalization':
layer = BatchNormalizationLayer(layer_configs[1], layer_configs[2])
elif layer_configs[0] == 'Gaussian dropout':
layer = GaussianDropoutLayer(layer_configs[1])
elif layer_configs[0] == 'Gaussian noise':
layer = GaussianNoiseLayer(layer_configs[1])
elif layer_configs[0] == 'Alpha dropout':
layer = AlphaDropoutLayer(layer_configs[1])
elif layer_configs[0] == 'Outer skip source':
layer = OuterSkipConnectionSourceLayer(layer_configs[1])
elif layer_configs[0] == 'Outer skip target':
layer = OuterSkipConnectionTargetLayer(layer_configs[1])
elif layer_configs[0] == 'Inner skip source':
layer = InnerSkipConnectionSourceLayer(layer_configs[1])
elif layer_configs[0] == 'Inner skip target':
layer = InnerSkipConnectionTargetLayer(layer_configs[1])
elif layer_configs[0] == 'Hook connection source':
layer = HookConnectionSourceLayer()
elif layer_configs[0] == 'Xception':
layer = XceptionLayer(layer_configs[1], layer_configs[2], layer_configs[3], layer_configs[4], layer_configs[5])
elif layer_configs[0] == 'VGG16':
layer = VGG16Layer(layer_configs[1], layer_configs[2], layer_configs[3], layer_configs[4], layer_configs[5])
elif layer_configs[0] == 'VGG19':
layer = VGG19Layer(layer_configs[1], layer_configs[2], layer_configs[3], layer_configs[4], layer_configs[5])
elif layer_configs[0] == 'ResNet50':
layer = ResNet50Layer(layer_configs[1], layer_configs[2], layer_configs[3], layer_configs[4], layer_configs[5])
elif layer_configs[0] == 'ResNet101':
layer = ResNet101Layer(layer_configs[1], layer_configs[2], layer_configs[3], layer_configs[4], layer_configs[5])
elif layer_configs[0] == 'ResNet152':
layer = ResNet152Layer(layer_configs[1], layer_configs[2], layer_configs[3], layer_configs[4], layer_configs[5])
elif layer_configs[0] == 'ResNet50V2':
layer = ResNet50V2Layer(layer_configs[1], layer_configs[2], layer_configs[3], layer_configs[4], layer_configs[5])
elif layer_configs[0] == 'ResNet101V2':
layer = ResNet101V2Layer(layer_configs[1], layer_configs[2], layer_configs[3], layer_configs[4], layer_configs[5])
elif layer_configs[0] == 'ResNet152V2':
layer = ResNet152V2Layer(layer_configs[1], layer_configs[2], layer_configs[3], layer_configs[4], layer_configs[5])
elif layer_configs[0] == 'ResNeXt50':
layer = ResNeXt50Layer(layer_configs[1], layer_configs[2], layer_configs[3], layer_configs[4], layer_configs[5])
elif layer_configs[0] == 'ResNeXt101':
layer = ResNeXt101Layer(layer_configs[1], layer_configs[2], layer_configs[3], layer_configs[4], layer_configs[5])
elif layer_configs[0] == 'InceptionV3':
layer = InceptionV3Layer(layer_configs[1], layer_configs[2], layer_configs[3], layer_configs[4], layer_configs[5])
elif layer_configs[0] == 'InceptionResNetV2':
layer = InceptionResNetV2Layer(layer_configs[1], layer_configs[2], layer_configs[3], layer_configs[4], layer_configs[5])
elif layer_configs[0] == 'DenseNet121':
layer = DenseNet121Layer(layer_configs[1], layer_configs[2], layer_configs[3], layer_configs[4], layer_configs[5])
elif layer_configs[0] == 'DenseNet169':
layer = DenseNet169Layer(layer_configs[1], layer_configs[2], layer_configs[3], layer_configs[4], layer_configs[5])
elif layer_configs[0] == 'DenseNet201':
layer = DenseNet201Layer(layer_configs[1], layer_configs[2], layer_configs[3], layer_configs[4], layer_configs[5])
elif layer_configs[0] == 'MobileNet':
layer = MobileNetLayer(layer_configs[1], layer_configs[2], layer_configs[3], layer_configs[4], layer_configs[5])
elif layer_configs[0] == 'MobileNetV2':
layer = MobileNetV2Layer(layer_configs[1], layer_configs[2], layer_configs[3], layer_configs[4], layer_configs[5])
return layer
class ModelMGPU(keras.models.Model):
'''
Enable multi_gpu_model compatibility with ModelCheckpoiznt callback.
:param ser_model: serial model
:param gpus: number of GPUs
Pulled from:
https://github.com/keras-team/keras/issues/2436#issuecomment-354882296
'''
def __init__(self, ser_model, gpus):
pmodel = keras.utils.multi_gpu_model(ser_model, gpus)
self.__dict__.update(pmodel.__dict__)
self._smodel = ser_model
def __getattribute__(self, attrname):
'''
Override load and save methods to be used from the serial-model. The
serial-model holds references to the weights in the multi-gpu model.
'''
if 'load' in attrname or 'save' in attrname:
return getattr(self._smodel, attrname)
return super(ModelMGPU, self).__getattribute__(attrname)
def level_one_error_checking(configs):
errors = []
warnings = []
if any(configs['config_file']['model_signal'] in x for x in ['CNN', 'FCN', 'GAN', 'BBD']):
pass
else:
errors.append('Level1Error:NonexistentDispatcherModelSignal')
if any(configs['config_file']['type_signal'] in x for x in ['Train', 'Train from Checkpoint', 'Inference']):
pass
else:
errors.append('Level1Error:NonexistentDispatcherTypeSignal')
if any(configs['config_file']['input_shape']):
try:
if type(literal_eval(configs['config_file']['input_shape'])) is not tuple:
errors.append('Level1Error:InputShapeShouldBeTuple')
except ValueError:
errors.append('Level1Error:InputShapeShouldBeTuple')
else:
errors.append('Level1Error:MustDefineInputShape')
if os.path.exists(configs['paths']['load_config']) or any(configs['paths']['load_config']) is False:
pass
else:
errors.append('Level1Error:LoadConfigurationPathDoesNotExist')
if os.path.exists(configs['paths']['load_checkpoint']) or any(configs['paths']['load_checkpoint']) is False:
pass
else:
errors.append('Level1Error:LoadCheckpointPathDoesNotExist')
if os.path.exists(configs['paths']['load_model']) or any(configs['paths']['load_model']) is False:
pass
else:
errors.append('Level1Error:LoadModelPathDoesNotExist')
if os.path.exists(configs['paths']['train_X']) or any(configs['paths']['train_X']) is False:
pass
else:
errors.append('Level1Error:LoadTrainXPathDoesNotExist')
if os.path.exists(configs['paths']['train_y']) or any(configs['paths']['train_y']) is False:
pass
else:
errors.append('Level1Error:LoadTrainyPathDoesNotExist')
if os.path.exists(configs['paths']['validation_X']) or any(configs['paths']['validation_X']) is False:
pass
else:
errors.append('Level1Error:LoadValidationXPathDoesNotExist')
if os.path.exists(configs['paths']['validation_y']) or any(configs['paths']['validation_y']) is False:
pass
else:
errors.append('Level1Error:LoadValidationyPathDoesNotExist')
if os.path.exists(configs['paths']['test_X']) or any(configs['paths']['test_X']) is False:
pass
else:
errors.append('Level1Error:LoadTestXPathDoesNotExist')
if any(configs['preprocessing']['image_context'] in x for x in ['2D', '3D']):
pass
else:
errors.append('Level1Error:NonexistentImageContext')
if any(configs['preprocessing']['normalization_type'] in x for x in ['samplewise_unity_x', 'samplewise_negpos_x',
'global_unity_x', 'global_negpos_x',
'samplewise_unity_xy', 'samplewise_negpos_xy',
'global_unity_xy', 'global_negpos_xy', 'none']):
if any(configs['preprocessing']['normalization_type'] in x for x in ['global_unity_x', 'global_negpos_x']):
if any(configs['preprocessing']['minimum_image_intensity']):
try:
float(configs['preprocessing']['minimum_image_intensity'])
except ValueError:
errors.append('Warning:MinimumImageIntensityShouldBeFloat')
else:
errors.append('Level1Error:SpecifyMinimumImageIntensitytoPerformNormalization')
if any(configs['preprocessing']['maximum_image_intensity']):
try:
float(configs['preprocessing']['maximum_image_intensity'])
except ValueError:
errors.append('Warning:MaximumImageIntensityShouldBeFloat')
else:
errors.append('Level1Error:SpecifyMaximumImageIntensitytoPerformNormalization')
elif any(configs['preprocessing']['normalization_type'] in x for x in ['global_unity_xy', 'global_negpos_xy']):
try:
minimums = configs['preprocessing']['minimum_image_intensity'].split(',')
if len(minimums) == 2:
try:
float(minimums[0])
float(minimums[1])
except ValueError:
errors.append('Level1Error:SpecifyTwoValuesSeparatedbyCommaforMinimumImageIntensityforSelectedImageNormalization')
else:
errors.append('Level1Error:SpecifyTwoValuesSeparatedbyCommaforMinimumImageIntensityforSelectedImageNormalization')
except ValueError:
errors.append('Level1Error:SpecifyTwoValuesSeparatedbyCommaforMinimumImageIntensityforSelectedImageNormalization')
try:
maximums = configs['preprocessing']['maximum_image_intensity'].split(',')
if len(maximums) == 2:
try:
float(maximums[0])
float(maximums[1])
except ValueError:
errors.append('Level1Error:SpecifyTwoValuesSeparatedbyCommaforMaximumImageIntensityforSelectedImageNormalization')
else:
errors.append('Level1Error:SpecifyTwoValuesSeparatedbyCommaforMaximumImageIntensityforSelectedImageNormalization')
except ValueError:
errors.append('Level1Error:SpecifyTwoValuesSeparatedbyCommaforMaximumImageIntensityforSelectedImageNormalization')
else:
errors.append('Level1Error:NonexistentImageNormalizationType')
try:
str2bool(configs['preprocessing']['categorical_switch'])
except ValueError:
warnings.append('Warning:ConvertToCategoricalSwitchShouldBeBool')
configs['preprocessing']['categorical_switch'] = 'False'
if any(configs['preprocessing']['categories']):
try:
int(configs['preprocessing']['categories'])
except ValueError:
warnings.append('Warning:NumberOfCategoriesShouldBeInt')
configs['preprocessing']['categories'] = '2'
else:
configs['preprocessing']['categories'] = '2'
try:
str2bool(configs['preprocessing']['weight_loss_switch'])
except ValueError:
warnings.append('Warning:WeightLossFunctionSwitchShouldBeBool')
configs['preprocessing']['weight_loss_switch'] = 'False'
try:
str2bool(configs['preprocessing']['repeat_X_switch'])
except ValueError:
warnings.append('Warning:RepeatXSwitchShouldBeBool')
configs['preprocessing']['repeat_X_switch'] = 'False'
if any(configs['preprocessing']['repeat_X_quantity']):
try:
int(configs['preprocessing']['repeat_X_quantity'])
except ValueError:
warnings.append('Warning:RepeatXQuantityShouldBeInt')
configs['preprocessing']['repeat_X_quantity'] = '3'
else:
configs['preprocessing']['repeat_X_quantity'] = '3'
try:
str2bool(configs['augmentation']['apply_augmentation_switch'])
except ValueError:
warnings.append('Warning:ApplyAugmentationSwitchShouldBeBool')
configs['augmentation']['apply_augmentation_switch'] = 'False'
try:
str2bool(configs['augmentation']['featurewise_centering_switch'])
except ValueError:
warnings.append('Warning:FeaturewiseCenteringSwitchShouldBeBool')
configs['augmentation']['featurewise_centering_switch'] = 'False'
try:
str2bool(configs['augmentation']['samplewise_centering_switch'])
except ValueError:
warnings.append('Warning:SamplewiseCenteringSwitchShouldBeBool')
configs['augmentation']['samplewise_centering_switch'] = 'False'
try:
str2bool(configs['augmentation']['featurewise_normalization_switch'])
except ValueError:
warnings.append('Warning:FeaturewiseNormalizationSwitchShouldBeBool')
configs['augmentation']['featurewise_normalization_switch'] = 'False'
try:
str2bool(configs['augmentation']['samplewise_normalization_switch'])
except ValueError:
warnings.append('Warning:SamplewiseNormalizationSwitchShouldBeBool')
configs['augmentation']['samplewise_normalization_switch'] = 'False'
if any(configs['augmentation']['width_shift']):
try:
float(configs['augmentation']['width_shift'])
except ValueError:
warnings.append('Warning:WidthShiftShouldBeFloat')
else:
configs['augmentation']['width_shift'] = '0.1'
if any(configs['augmentation']['height_shift']):
try:
float(configs['augmentation']['height_shift'])
except ValueError:
warnings.append('Warning:HeightShiftShouldBeFloat')
else:
configs['augmentation']['height_shift'] = '0.1'
if any(configs['augmentation']['rotation_range']):
try:
int(configs['augmentation']['rotation_range'])
except ValueError:
warnings.append('Warning:RotationRangeShouldBeInt')
else:
configs['augmentation']['rotation_range'] = '0'
if any(configs['augmentation']['brightness_range']):
try:
if type(literal_eval(configs['augmentation']['brightness_range'])) is tuple\
or literal_eval(configs['augmentation']['brightness_range']) is None:
pass
except ValueError:
warnings.append('Warning:rBrightnessRangeShouldBeTupleorNone')
configs['augmentation']['brightness_range'] = 'None'
else:
configs['augmentation']['brightness_range'] = 'None'
if any(configs['augmentation']['shear_range']):
try:
float(configs['augmentation']['shear_range'])
except ValueError:
warnings.append('Warning:ShearRangeShouldBeFloat')
else:
configs['augmentation']['shear_range'] = '0.0'
if any(configs['augmentation']['zoom_range']):
try:
float(configs['augmentation']['zoom_range'])
except ValueError:
warnings.append('Warning:ZoomRangeShouldBeFloat')
else:
configs['augmentation']['zoom_range'] = '0.0'
if any(configs['augmentation']['channel_shift_range']):
try:
float(configs['augmentation']['channel_shift_range'])
except ValueError:
warnings.append('Warning:ChannelShiftRangeShouldBeFloat')
else:
configs['augmentation']['channel_shift_range'] = '0.0'
if any(configs['augmentation']['fill_mode'] in x for x in ['nearest', 'constant', 'reflect', 'wrap']):
pass
else:
errors.append('Level1Error:NonexistentFillMode')
if any(configs['augmentation']['cval']):
try:
float(configs['augmentation']['cval'])
except ValueError:
warnings.append('Warning:CvalShouldBeFloat')
else:
configs['augmentation']['cval'] = '0.0'
try:
str2bool(configs['augmentation']['horizontal_flip_switch'])
except ValueError:
warnings.append('Warning:HorizontalFlipSwitchShouldBeBool')
configs['augmentation']['horizontal_flip_switch'] = 'False'
try:
str2bool(configs['augmentation']['vertical_flip_switch'])
except ValueError:
warnings.append('Warning:VerticalFlipSwitchShouldBeBool')
configs['augmentation']['vertical_flip_switch'] = 'False'
if any(configs['augmentation']['zca_epsilon']):
try:
if type(literal_eval(configs['augmentation']['zca_epsilon'])) is float\
or literal_eval(configs['augmentation']['zca_epsilon']) is None:
pass
except ValueError:
warnings.append('Warning:ZCAWhiteningEpsilonShouldBeFloatorNone')
configs['augmentation']['zca_epsilon'] = 'None'
else:
configs['augmentation']['zca_epsilon'] = 'None'
if any(configs['augmentation']['random_seed']):
try:
int(configs['augmentation']['random_seed'])
except ValueError:
warnings.append('Warning:RandomSeedShouldBeInt')
configs['augmentation']['random_seed'] = '1'
else:
configs['augmentation']['random_seed'] = '1'
if any(configs['augmentation']['rounds']):
try:
int(configs['augmentation']['rounds'])
except ValueError:
warnings.append('Warning:RoundsShouldBeInt')
configs['augmentation']['rounds'] = '1'
else:
configs['augmentation']['rounds'] = '1'
if any(configs['loss_function']['loss'] in x for x in ['categorical_crossentropy',
'weighted_categorical_crossentropy',
'sparse_categorical_crossentropy', 'mean_squared_error',
'mean_absolute_error', 'tversky', 'pix2pix',
'cyclegan', 'ssd', 'jaccard', 'focal', 'soft_dice']):
pass
else:
errors.append('Level1Error:NonexistentLossFunction')
if any(configs['loss_function']['parameter1']):
try:
float(configs['loss_function']['parameter1'])
except ValueError:
warnings.append('Warning:Parameter1ShouldBeFloat')
else:
configs['loss_function']['parameter1'] = '0.0'
if any(configs['loss_function']['parameter2']):
try:
float(configs['loss_function']['parameter2'])
except ValueError:
warnings.append('Warning:Parameter2ShouldBeFloat')
else:
configs['loss_function']['parameter2'] = '0.0'
if any(configs['learning_rate_schedule']['learning_rate']):
try:
float(configs['learning_rate_schedule']['learning_rate'])
except ValueError:
warnings.append('Warning:LearningRateShouldBeFloat')
else:
configs['learning_rate_schedule']['learning_rate'] = '0.0001'
if any(configs['learning_rate_schedule']['learning_rate_decay_factor']):
try:
float(configs['learning_rate_schedule']['learning_rate_decay_factor'])
except ValueError:
warnings.append('Warning:LearningRateDecayFactorShouldBeFloat')
else:
configs['learning_rate_schedule']['learning_rate_decay_factor'] = '0.0'
try:
str2bool(configs['learning_rate_schedule']['decay_on_plateau_switch'])
except ValueError:
warnings.append('Warning:DecayOnPlateauSwitchShouldBeBool')
configs['learning_rate_schedule']['decay_on_plateau_switch'] = 'False'
if any(configs['learning_rate_schedule']['decay_on_plateau_factor']):
try:
float(configs['learning_rate_schedule']['decay_on_plateau_factor'])
except ValueError:
warnings.append('Warning:DecayOnPlateauFactorShouldBeFloat')
else:
configs['learning_rate_schedule']['decay_on_plateau_factor'] = '0.0'
if any(configs['learning_rate_schedule']['decay_on_plateau_patience']):
try:
int(configs['learning_rate_schedule']['decay_on_plateau_patience'])
except ValueError:
warnings.append('Warning:DecayOnPlateauPatienceShouldBeInt')
else:
configs['learning_rate_schedule']['decay_on_plateau_patience'] = '3'
try:
str2bool(configs['learning_rate_schedule']['step_decay_switch'])
except ValueError:
warnings.append('Warning:StepDecaySwitchShouldBeBool')
configs['learning_rate_schedule']['step_decay_switch'] = 'False'
if any(configs['learning_rate_schedule']['step_decay_factor']):
try:
float(configs['learning_rate_schedule']['step_decay_factor'])
except ValueError:
warnings.append('Warning:StepDecayFactorShouldBeFloat')
else:
configs['learning_rate_schedule']['step_decay_factor'] = '0.0'
if any(configs['learning_rate_schedule']['step_decay_period']):
try:
int(configs['learning_rate_schedule']['step_decay_period'])
except ValueError:
warnings.append('Warning:StepDecayPeriodShouldBeInt')
else:
configs['learning_rate_schedule']['step_decay_period'] = '3'
if any(configs['learning_rate_schedule']['discriminator_learning_rate']):
try:
values = configs['learning_rate_schedule']['discriminator_learning_rate'].split(':')
if type(literal_eval(values[0])) is float:
pass
else:
warnings.append('Warning:DiscriminatorLearningRateShouldBeFloat')
values[0] = '0.0001'
if type(literal_eval(values[1])) is float:
pass
else:
warnings.append('Warning:DiscriminatorLearningRateDecayShouldBeFloat')
values[1] = '0.0'
configs['learning_rate_schedule']['discriminator_learning_rate'] = ':'.join([values[0], values[1]])
except ValueError:
errors.append('Level1Error:CannotDetermineDiscriminatorLearningRateConfigurations')
else:
configs['learning_rate_schedule']['discriminator_learning_rate'] = '0.0001:0.0'
if any(configs['learning_rate_schedule']['gan_learning_rate']):
try:
values = configs['learning_rate_schedule']['gan_learning_rate'].split(':')
if type(literal_eval(values[0])) is float:
pass
else:
warnings.append('Warning:GANLearningRateShouldBeFloat')
values[0] = '0.0001'
if type(literal_eval(values[0])) is float:
pass
else:
warnings.append('Warning:GANLearningRateDecayShouldBeFloat')
values[1] = '0.0'
configs['learning_rate_schedule']['gan_learning_rate'] = ':'.join([values[0], values[1]])
except ValueError:
errors.append('Level1Error:CannotDetermineGANLearningRateConfigurations')
else:
configs['learning_rate_schedule']['gan_learning_rate'] = '0.0001:0.0'
if any(configs['optimizer']['optimizer'] in x for x in ['Adam', 'NAdam', 'SGD', 'RMSprop',
'Adagrad', 'Adadelta', 'Adamax']):
pass
else:
errors.append('Level1Error:NonexistentOptimizer')
if any(configs['optimizer']['beta1']):
try:
float(configs['optimizer']['beta1'])
except ValueError:
warnings.append('Warning:OptimizerBeta1ShouldBeFloat')
configs['optimizer']['beta1'] = '0.9'
else:
configs['optimizer']['beta1'] = '0.9'
if any(configs['optimizer']['beta2']):
try:
float(configs['optimizer']['beta2'])
except ValueError:
warnings.append('Warning:OptimizerBeta2ShouldBeFloat')
configs['optimizer']['beta2'] = '0.999'
else:
configs['optimizer']['beta2'] = '0.999'
if any(configs['optimizer']['rho']):
try:
float(configs['optimizer']['rho'])
except ValueError:
warnings.append('Warning:OptimizerRhoShouldBeFloat')
configs['optimizer']['rho'] = '0.9'
else:
configs['optimizer']['rho'] = '0.9'
if any(configs['optimizer']['momentum']):
try:
float(configs['optimizer']['momentum'])
except ValueError:
warnings.append('Warning:OptimizerMomentumShouldBeFloat')
configs['optimizer']['momentum'] = '0.0'
else:
configs['optimizer']['momentum'] = '0.0'
if any(configs['optimizer']['epsilon']):
try:
if type(literal_eval(configs['optimizer']['epsilon'])) is float\
or literal_eval(configs['optimizer']['epsilon']) is None:
pass
except ValueError:
warnings.append('Warning:OptimizerEpsilonShouldBeFloatorNone')
configs['optimizer']['epsilon'] = 'None'
else:
configs['optimizer']['epsilon'] = 'None'
if any(configs['optimizer']['discriminator_optimizer']):
try:
values = configs['optimizer']['discriminator_optimizer'].split(':')
if any(values[0] in x for x in ['Adam', 'NAdam', 'SGD', 'RMSprop', 'Adagrad', 'Adadelta', 'Adamax']):
pass
else:
errors.append('Level1Error:NonexistentDiscriminatorOptimizer')
if type(literal_eval(values[1])) is float:
pass
else:
warnings.append('Warning:DiscriminatorOptimizerBeta1ShouldBeFloat')
values[1] = '0.9'
if type(literal_eval(values[2])) is float:
pass
else:
warnings.append('Warning:DiscriminatorOptimizerBeta2ShouldBeFloat')
values[2] = '0.999'
if type(literal_eval(values[3])) is float:
pass
else:
warnings.append('Warning:DiscriminatorOptimizerRhoShouldBeFloat')
values[3] = '0.9'
if type(literal_eval(values[4])) is float:
pass
else:
warnings.append('Warning:DiscriminatorOptimizerMomentumShouldBeFloat')
values[4] = '0.0'
if type(literal_eval(values[5])) is float or literal_eval(values[5]) is None:
pass
else:
warnings.append('Warning:DiscriminatorOptimizerEpsilonShouldBeFloatorNone')
values[5] = 'None'
configs['optimizer']['discriminator_optimizer'] = ':'.join([values[0], values[1], values[2],
values[3], values[4], values[5]])
except ValueError:
errors.append('Level1Error:CannotDetermineDiscriminatorOptimizerConfigurations')
else:
configs['optimizer']['discriminator_optimizer'] = 'Adam:0.9:0.999:0.9:0.0:None'
if any(configs['optimizer']['gan_optimizer']):
try:
values = configs['optimizer']['gan_optimizer'].split(':')
if any(values[0] in x for x in ['Adam', 'NAdam', 'SGD', 'RMSprop', 'Adagrad', 'Adadelta', 'Adamax']):
pass
else:
errors.append('Level1Error:NonexistentGANOptimizer')
if type(literal_eval(values[1])) is float:
pass
else:
warnings.append('Warning:GANOptimizerBeta1ShouldBeFloat')
values[1] = '0.9'
if type(literal_eval(values[2])) is float:
pass
else:
warnings.append('Warning:GANOptimizerBeta2ShouldBeFloat')
values[2] = '0.999'
if type(literal_eval(values[3])) is float:
pass
else:
warnings.append('Warning:GANOptimizerRhoShouldBeFloat')
values[3] = '0.9'
if type(literal_eval(values[4])) is float:
pass
else:
warnings.append('Warning:GANOptimizerMomentumShouldBeFloat')
values[4] = '0.0'
if type(literal_eval(values[5])) is float or literal_eval(values[5]) is None:
pass
else:
warnings.append('Warning:GANOptimizerEpsilonShouldBeFloatorNone')
values[5] = 'None'
configs['optimizer']['gan_optimizer'] = ':'.join([values[0], values[1], values[2],
values[3], values[4], values[5]])
except ValueError:
errors.append('Level1Error:CannotDetermineGANOptimizerConfigurations')
else:
configs['optimizer']['gan_optimizer'] = 'Adam:0.9:0.999:0.9:0.0:None'
if any(configs['training_configurations']['hardware'] in x for x in ['gpu', 'multi-gpu', 'cpu']):
pass
else:
errors.append('Level1Error:NonexistentHardware')
if any(configs['training_configurations']['number_of_gpus']):
try:
int(configs['training_configurations']['number_of_gpus'])
except ValueError:
warnings.append('Warning:NumberOfGpusShouldBeInt')
configs['training_configurations']['number_of_gpus'] = '1'
else:
configs['training_configurations']['number_of_gpus'] = '1'
try:
str2bool(configs['training_configurations']['early_stop_switch'])
except ValueError:
warnings.append('Warning:EarlyStopSwitchShouldBeBool')
configs['training_configurations']['early_stop_switch'] = 'False'
if any(configs['training_configurations']['early_stop_patience']):
try:
int(configs['training_configurations']['early_stop_patience'])
except ValueError:
warnings.append('Warning:EarlyStopPatienceShouldBeInt')
configs['training_configurations']['early_stop_patience'] = '10'
else:
configs['training_configurations']['early_stop_patience'] = '10'
if any(configs['training_configurations']['batch_size']):
try:
int(configs['training_configurations']['batch_size'])
except ValueError:
warnings.append('Warning:BatchSizeShouldBeInt')
configs['training_configurations']['batch_size'] = '32'
else:
configs['training_configurations']['batch_size'] = '32'
if any(configs['training_configurations']['epochs']):
try:
int(configs['training_configurations']['epochs'])
except ValueError:
warnings.append('Warning:EpochsShouldBeInt')
configs['training_configurations']['epochs'] = '500'
else:
configs['training_configurations']['epochs'] = '500'
try:
str2bool(configs['training_configurations']['shuffle_data_switch'])
except ValueError:
warnings.append('Warning:ShuffleDataSwitchShouldBeBool')
configs['training_configurations']['shuffle_data_switch'] = 'True'
if any(configs['training_configurations']['validation_split']):
try:
float(configs['training_configurations']['validation_split'])
except ValueError:
warnings.append('Warning:ValidationSplitShouldBeFloat')
configs['training_configurations']['validation_split'] = '0.0'
else:
configs['training_configurations']['validation_split'] = '0.0'
try:
str2bool(configs['monitors']['mse_switch'])
except ValueError:
warnings.append('Warning:MSESwitchShouldBeBool')
configs['monitors']['mse_switch'] = 'False'
try:
str2bool(configs['monitors']['mae_switch'])
except ValueError:
warnings.append('Warning:MAESwitchShouldBeBool')
configs['monitors']['mae_switch'] = 'False'
try:
str2bool(configs['monitors']['accuracy_switch'])
except ValueError:
warnings.append('Warning:AccuracySwitchShouldBeBool')
configs['monitors']['accuracy_switch'] = 'True'
try:
str2bool(configs['save_configurations']['save_model_switch'])
except ValueError:
warnings.append('Warning:SaveModelSwitchShouldBeBool')
configs['save_configurations']['save_model_switch'] = 'False'
if any(configs['save_configurations']['save_model_path']):
if os.path.exists(os.path.dirname(configs['save_configurations']['save_model_path'])) is False:
errors.append('Level1Error:NonexistentSaveModelDirectory')
file, ext = os.path.splitext(configs['save_configurations']['save_model_path'])
if ext != '.h5':
warnings.append('Warning:SaveModelFileExtensionMustBeh5')
configs['save_configurations']['save_model_path'] = file + '.h5'
try:
str2bool(configs['save_configurations']['save_csv_switch'])
except ValueError:
warnings.append('Warning:SaveCSVSwitchShouldBeBool')
configs['save_configurations']['save_csv_switch'] = 'False'
if any(configs['save_configurations']['save_csv_path']):
if os.path.exists(os.path.dirname(configs['save_configurations']['save_csv_path'])) is False:
errors.append('Level1Error:NonexistentSaveCSVDirectory')
file, ext = os.path.splitext(configs['save_configurations']['save_csv_path'])
if ext != '.csv':
warnings.append('Warning:SaveCSVFileExtensionMustBecsv')
configs['save_configurations']['save_csv_path'] = file + '.csv'
try:
str2bool(configs['save_configurations']['save_checkpoints_switch'])
except ValueError:
warnings.append('Warning:SaveModelCheckpointsSwitchShouldBeBool')
configs['save_configurations']['save_checkpoints_switch'] = 'False'
if any(configs['save_configurations']['save_checkpoints_path']):
if os.path.exists(os.path.dirname(configs['save_configurations']['save_checkpoints_path'])) is False:
errors.append('Level1Error:NonexistentSaveModelCheckpointsDirectory')
file, ext = os.path.splitext(configs['save_configurations']['save_checkpoints_path'])
if ext != '.h5':
warnings.append('Warning:SaveModelCheckpointsFileExtensionMustBeh5')
configs['save_configurations']['save_checkpoints_path'] = file + '.h5'
if any(configs['save_configurations']['save_checkpoints_frequency']):
try:
int(configs['save_configurations']['save_checkpoints_frequency'])
except ValueError:
warnings.append('Warning:SaveCheckpointsFrequencyShouldBeInt')
try:
str2bool(configs['save_configurations']['save_tensorboard_switch'])
except ValueError:
warnings.append('Warning:SaveTensorboardSwitchShouldBeBool')
configs['save_configurations']['save_tensorboard_switch'] = 'False'
if any(configs['save_configurations']['save_tensorboard_path']):
if os.path.exists(os.path.dirname(configs['save_configurations']['save_tensorboard_path'])) is False:
errors.append('Level1Error:NonexistentSaveTensorboardDirectory')
if any(configs['save_configurations']['save_tensorboard_frequency']):
try:
int(configs['save_configurations']['save_tensorboard_frequency'])
except ValueError:
warnings.append('Warning:SaveTensorboardFrequencyShouldBeInt')
if any(configs['layers']['serial_layer_list']):
for layer in configs['layers']['serial_layer_list']:
if type(layer) is not str:
errors.append('Level1Error:SerialLayersListContainsInvalidLayer')
break
if any(configs['layers']['generator_layer_list']):
for layer in configs['layers']['generator_layer_list']:
if type(layer) is not str:
errors.append('Level1Error:GeneratorLayersListContainsInvalidLayer')
break
if any(configs['layers']['discriminator_layer_list']):
for layer in configs['layers']['discriminator_layer_list']:
if type(layer) is not str:
errors.append('Level1Error:DiscriminatorLayersListContainsInvalidLayer')
break
if any(configs['bbd_options']['scaling_type'] in x for x in ['global', 'per predictor layer']):
pass
else:
errors.append('Level1Error:NonexistentScalingType')
if any(configs['bbd_options']['scales']):
values = configs['bbd_options']['scales'].split(',')
if len(values) == 1:
try:
literal_eval(values)
except ValueError:
errors.append('Level1Error:ScalesMustBeNoneorFloatorMultipleFloatsSeparatedbyComma')
else:
try:
[float(value) for value in values]
except ValueError:
errors.append('Level1Error:ScalesMustBeNoneorFloatorMultipleFloatsSeparatedbyComma')
else:
warnings.append('Warning:NoBbdScalesSpecified')
configs['bbd_options']['scales'] = 'None'
if any(configs['bbd_options']['aspect_ratios_type'] in x for x in ['global', 'per predictor layer']):
pass
else:
errors.append('Level1Error:NonexistentAspectRatiosType')
if any(configs['bbd_options']['aspect_ratios']):
try:
ars = literal_eval(configs['bbd_options']['aspect_ratios'])
if type(ars) is tuple:
for ar in ars:
if type(ar) is tuple:
try:
[float(ar_val) for ar_val in ar]
except ValueError:
errors.append('Level1Error:AspectRatiosMustbeTupleofFloatsorTupleofTuplesofFloats')
else:
try:
float(ar)
except ValueError:
errors.append('Level1Error:AspectRatiosMustbeTupleofFloatsorTupleofTuplesofFloats')
break
else:
errors.append('Level1Error:AspectRatiosMustbeTupleofFloatsorTupleofTuplesofFloats')
except ValueError:
errors.append('Level1Error:AspectRatiosMustbeTupleofFloatsorTupleofTuplesofFloats')
else:
errors.append('Level1Error:AspectRatiosMustbeSpecified')
if any(configs['bbd_options']['number_classes']):
try:
int(configs['bbd_options']['number_classes'])
except ValueError:
errors.append('Level1Error:NoNumberofBbdClassesSpecified')
else:
errors.append('Level1Error:NoNumberofBbdClassesSpecified')
if any(configs['bbd_options']['steps']):
try:
steps = literal_eval(configs['bbd_options']['steps'])
if type(steps) is tuple:
for step in steps:
if type(step) is tuple:
try:
[float(step_val) for step_val in step]
except ValueError:
errors.append('Level1Error:StepsMustbeNoneorTupleofFloatsorTupleofTuplesofTwoFloats')
else:
try:
float(step)
except ValueError:
errors.append('Level1Error:StepsMustbeNoneorTupleofFloatsorTupleofTuplesofTwoFloats')
break
elif steps is None:
pass
else:
errors.append('Level1Error:StepsMustbeNoneorTupleofFloatsorTupleofTuplesofTwoFloats')
except ValueError:
errors.append('Level1Error:StepsMustbeNoneorTupleofFloatsorTupleofTuplesofTwoFloats')
else:
warnings.append('Warning:NoStepsSpecified')
configs['bbd_options']['steps'] = 'None'
if any(configs['bbd_options']['offsets']):
try:
offsets = literal_eval(configs['bbd_options']['offsets'])
if type(offsets) is tuple:
for offset in offsets:
if type(offset) is tuple:
try:
[float(offset_val) for offset_val in offset]
except ValueError:
errors.append('Level1Error:OffsetsMustbeNoneorTupleofFloatsorTupleofTuplesofTwoFloats')
else:
try:
float(offset)
except ValueError:
errors.append('Level1Error:OffsetsMustbeNoneorTupleofFloatsorTupleofTuplesofTwoFloats')
break
elif offsets is None:
pass
else:
errors.append('Level1Error:OffsetsMustbeNoneorTupleofFloatsorTupleofTuplesofTwoFloats')
except ValueError:
errors.append('Level1Error:OffsetsMustbeNoneorTupleofFloatsorTupleofTuplesofTwoFloats')
else:
warnings.append('Warning:NoOffsetsSpecified')
configs['bbd_options']['offsets'] = 'None'
if any(configs['bbd_options']['variances']):
try:
variances = literal_eval(configs['bbd_options']['variances'])
if type(variances) is tuple:
if len(variances) == 4:
try:
[float(variance) for variance in variances]
except ValueError:
errors.append('Level1Error:VariancesMustbeTupleofFourFloatsGreaterthanZero')
else:
errors.append('Level1Error:VariancesMustbeTupleofFourFloatsGreaterthanZero')
else:
errors.append('Level1Error:VariancesMustbeTupleofFourFloatsGreaterthanZero')
except ValueError:
errors.append('Level1Error:VariancesMustbeTupleofFourFloatsGreaterthanZero')
else:
warnings.append('Warning:NoOffsetsSpecified')
configs['bbd_options']['variances'] = '(1.0, 1.0, 1.0, 1.0)'
if any(configs['bbd_options']['confidence_threshold']):
try:
float(configs['bbd_options']['confidence_threshold'])
except ValueError:
warnings.append('Warning:ConfidenceThresholdShouldBeFloat')
configs['bbd_options']['confidence_threshold'] = '0.1'
else:
configs['bbd_options']['confidence_threshold'] = '0.1'
if any(configs['bbd_options']['iou_threshold']):
try:
float(configs['bbd_options']['iou_threshold'])
except ValueError:
warnings.append('Warning:IoUThresholdShouldBeFloat')
configs['bbd_options']['iou_threshold'] = '0.5'
else:
configs['bbd_options']['iou_threshold'] = '0.5'
if any(configs['bbd_options']['top_k']):
try:
int(configs['bbd_options']['top_k'])
except ValueError:
warnings.append('Warning:NoBbdTopKSpecified')
configs['bbd_options']['top_k'] = '200'
else:
warnings.append('Warning:NoBbdTopKSpecified')
configs['bbd_options']['top_k'] = '200'
if any(configs['bbd_options']['nms_maximum_output']):
try:
int(configs['bbd_options']['nms_maximum_output'])
except ValueError:
warnings.append('Warning:NoBbdNmsSpecified')
configs['bbd_options']['nms_maximum_output'] = '400'
else:
warnings.append('Warning:NoBbdNmsSpecified')
configs['bbd_options']['nms_maximum_output'] = '400'
if any(configs['bbd_options']['coordinates_type'] in x for x in ['centroids', 'minmax', 'corners']):
pass
else:
errors.append('Level1Error:NonexistentCoordinatesType')
try:
str2bool(configs['bbd_options']['two_boxes_for_AR1_switch'])
except ValueError:
warnings.append('Warning:TwoBoxesforAR1ShouldBeBool')
configs['bbd_options']['two_boxes_for_AR1_switch'] = 'False'
try:
str2bool(configs['bbd_options']['clip_boxes_switch'])
except ValueError:
warnings.append('Warning:ClipBoxesShouldBeBool')
configs['bbd_options']['clip_boxes_switch'] = 'False'
try:
str2bool(configs['bbd_options']['normalize_coordinates_switch'])
except ValueError:
warnings.append('Warning:NormalizeCoordinatesShouldBeBool')
configs['bbd_options']['normalize_coordinates_switch'] = 'False'
if any(configs['bbd_options']['positive_iou_threshold']):
try:
float(configs['bbd_options']['positive_iou_threshold'])
except ValueError:
warnings.append('Warning:PositiveIoUThresholdShouldBeFloat')
configs['bbd_options']['positive_iou_threshold'] = '0.5'
else:
configs['bbd_options']['positive_iou_threshold'] = '0.5'
if any(configs['bbd_options']['negative_iou_limit']):
try:
float(configs['bbd_options']['negative_iou_limit'])
except ValueError:
warnings.append('Warning:NegativeIoULimitShouldBeFloat')
configs['bbd_options']['negative_iou_limit'] = '0.3'
else:
configs['bbd_options']['negative_iou_limit'] = '0.3'
return configs, errors, warnings
def level_two_error_checking(configs):
engine_configs = EngineConfigurations(configs)
errors = engine_configs.train_data.errors\
+ engine_configs.val_data.errors\
+ engine_configs.test_data.errors\
+ engine_configs.saver.errors
warnings = engine_configs.train_data.warnings\
+ engine_configs.val_data.warnings\
+ engine_configs.test_data.warnings\
+ engine_configs.saver.warnings
return engine_configs, errors, warnings
def get_io(layer_definitions):
inner_skip_starts = []
outer_skip_starts = []
bbd_hooks = []
errors = []
inputs = None
x = None
for i, layer_definition in enumerate(layer_definitions):
try:
layer = create_layer(layer_definition)
if i == 0:
if layer.type != 'Input':
errors.append('Level3Error:FirstLayerMustBeInput')
break
else:
inputs = layer.keras_layer
elif i == 1:
try:
if layer.type in ['Xception', 'VGG16', 'VGG19', 'ResNet50', 'ResNet101', 'ResNet152',
'ResNet50V2', 'ResNet101V2', 'ResNet152V2', 'ResNeXt50', 'ResNeXt101',
'InceptionV3', 'InceptionResNetV2', 'DenseNet121', 'DenseNet169',
'DenseNet201', 'MobileNet', 'MobileNetV2']:
inputs = layer.keras_layer.input
x = layer.keras_layer.output
if literal_eval(layer.include_skips):
outer_skip_starts = layer.skips
if literal_eval(layer.include_hooks):
bbd_hooks = layer.hooks
else:
x = layer.keras_layer(inputs)
except:
errors.append('Level3Error:CouldNotAdd ' + layer.type + ' AsALayer')
elif layer.type == 'Resize convolution 2D' or layer.type == 'Resize convolution 3D':
try:
x = layer.keras_upsample_layer(x)
x = layer.keras_conv_layer(x)
except:
errors.append('Level3Error:CouldNotAdd ' + layer.type + ' AsALayer')
elif layer.type == 'Outer skip source':
try:
outer_skip_starts.append(x)
except:
errors.append('Level3Error:CouldNotAdd ' + layer.type + ' AsALayer')
elif layer.type in ['Xception', 'VGG16', 'VGG19', 'ResNet50', 'ResNet101', 'ResNet152',
'ResNet50V2', 'ResNet101V2', 'ResNet152V2', 'ResNeXt50', 'ResNeXt101',
'InceptionV3', 'InceptionResNetV2', 'DenseNet121', 'DenseNet169',
'DenseNet201', 'MobileNet', 'MobileNetV2']:
try:
inputs = layer.keras_layer.input
x = layer.keras_layer.output
if literal_eval(layer.include_skips):
outer_skip_starts = layer.skips
if literal_eval(layer.include_hooks):
bbd_hooks = layer.hooks
except:
errors.append('Level3Error:CouldNotAdd ' + layer.type + ' AsALayer')
elif layer.type == 'Outer skip target':
try:
if layer.skip_type == 'concatenate':
x = keras.layers.Concatenate()([outer_skip_starts[-1], x])
if layer.skip_type == 'add':
x = keras.layers.Add()([outer_skip_starts[-1], x])
if layer.skip_type == 'subtract':
x = keras.layers.Subtract()([outer_skip_starts[-1], x])
if layer.skip_type == 'multiply':
x = keras.layers.Multiply()([outer_skip_starts[-1], x])
if layer.skip_type == 'average':
x = keras.layers.Average()([outer_skip_starts[-1], x])
if layer.skip_type == 'maximum':
x = keras.layers.Maximum()([outer_skip_starts[-1], x])
outer_skip_starts.pop()
except:
errors.append('Level3Error:CouldNotAdd ' + layer.type + ' AsALayer')
elif layer.type == 'Inner skip source':
try:
inner_skip_starts.append(x)
except:
errors.append('Level3Error:CouldNotAdd ' + layer.type + ' AsALayer')
elif layer.type == 'Inner skip target':
try:
if layer.skip_type == 'concatenate':
x = keras.layers.Concatenate()([inner_skip_starts[0], x])
if layer.skip_type == 'add':
x = keras.layers.Add()([inner_skip_starts[0], x])
if layer.skip_type == 'subtract':
x = keras.layers.Subtract()([inner_skip_starts[0], x])
if layer.skip_type == 'multiply':
x = keras.layers.Multiply()([inner_skip_starts[0], x])
if layer.skip_type == 'average':
x = keras.layers.Average()([inner_skip_starts[0], x])
if layer.skip_type == 'maximum':
x = keras.layers.Maximum()([inner_skip_starts[0], x])
inner_skip_starts.pop()
except:
errors.append('Level3Error:CouldNotAdd ' + layer.type + ' AsALayer')
elif layer.type == 'Hook connection source':
try:
bbd_hooks.append(x)
except:
errors.append('Level3Error:CouldNotAdd ' + layer.type + ' AsALayer')
else:
try:
x = layer.keras_layer(x)
except:
errors.append('Level3Error:CouldNotAdd ' + layer.type + ' AsALayer')
except:
errors.append('Level3Error:CouldNotCreateLayerFromLayerSpecifications')
return inputs, x, bbd_hooks, errors
def get_cgan_d_io(layer_definitions, gen_input):
inner_skip_starts = []
outer_skip_starts = []
errors = []
inputs = None
x = None
for i, layer_definition in enumerate(layer_definitions):
try:
layer = create_layer(layer_definition)
if i == 0:
if layer.type != 'Input':
errors.append('Level3Error:FirstLayerMustBeInput')
break
else:
source_layer = create_layer(gen_input)
source = source_layer.keras_layer
target = layer.keras_layer
inputs = Concatenate(axis=-1)([target, source])
elif i == 1:
try:
if layer.type in ['Xception', 'VGG16', 'VGG19', 'ResNet50', 'ResNet101', 'ResNet152',
'ResNet50V2', 'ResNet101V2', 'ResNet152V2', 'ResNeXt50', 'ResNeXt101',
'InceptionV3', 'InceptionResNetV2', 'DenseNet121', 'DenseNet169',
'DenseNet201', 'MobileNet', 'MobileNetV2']:
inputs = layer.keras_layer.input
x = layer.keras_layer.output
if literal_eval(layer.include_skips):
outer_skip_starts = layer.skips
else:
x = layer.keras_layer(inputs)
except:
errors.append('Level3Error:CouldNotAdd ' + layer.type + ' AsALayer')
elif layer.type == 'Resize convolution 2D' or layer.type == 'Resize convolution 3D':
try:
x = layer.keras_upsample_layer(x)
x = layer.keras_conv_layer(x)
except:
errors.append('Level3Error:CouldNotAdd ' + layer.type + ' AsALayer')
elif layer.type == 'Outer skip source':
try:
outer_skip_starts.append(x)
except:
errors.append('Level3Error:CouldNotAdd ' + layer.type + ' AsALayer')
elif layer.type in ['Xception', 'VGG16', 'VGG19', 'ResNet50', 'ResNet101', 'ResNet152',
'ResNet50V2', 'ResNet101V2', 'ResNet152V2', 'ResNeXt50', 'ResNeXt101',
'InceptionV3', 'InceptionResNetV2', 'DenseNet121', 'DenseNet169',
'DenseNet201', 'MobileNet', 'MobileNetV2']:
try:
inputs = layer.keras_layer.input
x = layer.keras_layer.output
if literal_eval(layer.include_skips):
outer_skip_starts = layer.skips
except:
errors.append('Level3Error:CouldNotAdd ' + layer.type + ' AsALayer')
elif layer.type == 'Outer skip target':
try:
if layer.skip_type == 'concatenate':
x = keras.layers.Concatenate()([outer_skip_starts[-1], x])
if layer.skip_type == 'add':
x = keras.layers.Add()([outer_skip_starts[-1], x])
if layer.skip_type == 'subtract':
x = keras.layers.Subtract()([outer_skip_starts[-1], x])
if layer.skip_type == 'multiply':
x = keras.layers.Multiply()([outer_skip_starts[-1], x])
if layer.skip_type == 'average':
x = keras.layers.Average()([outer_skip_starts[-1], x])
if layer.skip_type == 'maximum':
x = keras.layers.Maximum()([outer_skip_starts[-1], x])
outer_skip_starts.pop()
except:
errors.append('Level3Error:CouldNotAdd ' + layer.type + ' AsALayer')
elif layer.type == 'Inner skip source':
try:
inner_skip_starts.append(x)
except:
errors.append('Level3Error:CouldNotAdd ' + layer.type + ' AsALayer')
elif layer.type == 'Inner skip target':
try:
if layer.skip_type == 'concatenate':
x = keras.layers.Concatenate()([inner_skip_starts[0], x])
if layer.skip_type == 'add':
x = keras.layers.Add()([inner_skip_starts[0], x])
if layer.skip_type == 'subtract':
x = keras.layers.Subtract()([inner_skip_starts[0], x])
if layer.skip_type == 'multiply':
x = keras.layers.Multiply()([inner_skip_starts[0], x])
if layer.skip_type == 'average':
x = keras.layers.Average()([inner_skip_starts[0], x])
if layer.skip_type == 'maximum':
x = keras.layers.Maximum()([inner_skip_starts[0], x])
inner_skip_starts.pop()
except:
errors.append('Level3Error:CouldNotAdd ' + layer.type + ' AsALayer')
else:
try:
x = layer.keras_layer(x)
except:
errors.append('Level3Error:CouldNotAdd ' + layer.type + ' AsALayer')
except:
errors.append('Level3Error:CouldNotCreateLayerFromLayerSpecifications')
return [target, source], x, errors
```
#### File: src/utils/gui_utils.py
```python
import tkinter as tk
from src.gui.file_menu.variables import FileMenuVariables
from src.gui.data_menu.variables import DataMenuVariables
from src.gui.layers_menu.variables import LayersMenuVariables
from src.gui.options_menu.variables import OptionsMenuVariables
from src.gui.home_menu.variables import HomeMenuVariables
class GuiParameterController(object):
def __init__(self):
self.file_menu = FileMenuVariables()
self.data_menu = DataMenuVariables()
self.layers_menu = LayersMenuVariables()
self.options_menu = OptionsMenuVariables()
self.tools_menu = None
self.home_menu = HomeMenuVariables()
self.layers_list_box = None
self.layers_list_box_serial = None
self.layers_list_box_gen = None
self.layers_list_box_discrim = None
self.errors_list_box = None
def get_configs(self):
configs = {}
configs['config_file'] = {}
configs['config_file']['model_signal'] = self.home_menu.s_model_signal.get()
configs['config_file']['type_signal'] = self.home_menu.s_type_signal.get()
configs['config_file']['input_shape'] = self.home_menu.s_input_shape.get()
configs['paths'] = {}
configs['paths']['load_config'] = self.file_menu.s_load_file_path.get()
configs['paths']['load_checkpoint'] = self.file_menu.s_load_ckpt_file_path.get()
configs['paths']['load_model'] = self.file_menu.s_load_model_file_path.get()
configs['paths']['train_X'] = self.data_menu.s_train_X_path.get()
configs['paths']['train_y'] = self.data_menu.s_train_y_path.get()
configs['paths']['validation_X'] = self.data_menu.s_val_X_path.get()
configs['paths']['validation_y'] = self.data_menu.s_val_y_path.get()
configs['paths']['test_X'] = self.data_menu.s_test_X_path.get()
configs['preprocessing'] = {}
configs['preprocessing']['minimum_image_intensity'] = self.data_menu.s_data_min.get()
configs['preprocessing']['maximum_image_intensity'] = self.data_menu.s_data_max.get()
configs['preprocessing']['image_context'] = self.data_menu.s_image_context.get()
configs['preprocessing']['normalization_type'] = self.data_menu.s_normalization_type.get()
configs['preprocessing']['categorical_switch'] = str(self.data_menu.bool_to_categorical.get())
configs['preprocessing']['categories'] = self.data_menu.s_num_categories.get()
configs['preprocessing']['weight_loss_switch'] = str(self.data_menu.bool_weight_loss.get())
configs['preprocessing']['repeat_X_switch'] = str(self.data_menu.bool_repeatX.get())
configs['preprocessing']['repeat_X_quantity'] = self.data_menu.s_repeatX.get()
configs['augmentation'] = {}
configs['augmentation']['apply_augmentation_switch'] = str(self.data_menu.bool_augmentation.get())
configs['augmentation']['featurewise_centering_switch'] = str(self.data_menu.bool_fw_centering.get())
configs['augmentation']['samplewise_centering_switch'] = str(self.data_menu.bool_sw_centering.get())
configs['augmentation']['featurewise_normalization_switch'] = str(self.data_menu.bool_fw_normalization.get())
configs['augmentation']['samplewise_normalization_switch'] = str(self.data_menu.bool_sw_normalization.get())
configs['augmentation']['width_shift'] = self.data_menu.s_width_shift.get()
configs['augmentation']['height_shift'] = self.data_menu.s_height_shift.get()
configs['augmentation']['rotation_range'] = self.data_menu.s_rotation_range.get()
configs['augmentation']['brightness_range'] = self.data_menu.s_brightness_range.get()
configs['augmentation']['shear_range'] = self.data_menu.s_shear_range.get()
configs['augmentation']['zoom_range'] = self.data_menu.s_zoom_range.get()
configs['augmentation']['channel_shift_range'] = self.data_menu.s_channel_shift_range.get()
configs['augmentation']['fill_mode'] = self.data_menu.s_fill_mode.get()
configs['augmentation']['cval'] = self.data_menu.s_cval.get()
configs['augmentation']['horizontal_flip_switch'] = str(self.data_menu.bool_horizontal_flip.get())
configs['augmentation']['vertical_flip_switch'] = str(self.data_menu.bool_vertical_flip.get())
configs['augmentation']['rounds'] = self.data_menu.s_rounds.get()
configs['augmentation']['zca_epsilon'] = self.data_menu.s_zca_epsilon.get()
configs['augmentation']['random_seed'] = self.data_menu.s_random_seed.get()
configs['loss_function'] = {}
configs['loss_function']['loss'] = self.options_menu.s_loss.get()
configs['loss_function']['parameter1'] = self.options_menu.s_loss_param1.get()
configs['loss_function']['parameter2'] = self.options_menu.s_loss_param2.get()
configs['learning_rate_schedule'] = {}
configs['learning_rate_schedule']['learning_rate'] = self.options_menu.s_base_lr.get()
configs['learning_rate_schedule']['learning_rate_decay_factor'] = self.options_menu.s_lr_decay.get()
configs['learning_rate_schedule']['decay_on_plateau_switch'] = str(self.options_menu.bool_decay_on_plateau.get())
configs['learning_rate_schedule']['decay_on_plateau_factor'] = self.options_menu.s_decay_on_plateau_factor.get()
configs['learning_rate_schedule']['decay_on_plateau_patience'] = self.options_menu.s_decay_on_plateau_patience.get()
configs['learning_rate_schedule']['step_decay_switch'] = str(self.options_menu.bool_step_decay.get())
configs['learning_rate_schedule']['step_decay_factor'] = self.options_menu.s_step_decay_factor.get()
configs['learning_rate_schedule']['step_decay_period'] = self.options_menu.s_step_decay_period.get()
configs['learning_rate_schedule']['discriminator_learning_rate'] = self.options_menu.s_d_lr.get()
configs['learning_rate_schedule']['gan_learning_rate'] = self.options_menu.s_gan_lr.get()
configs['optimizer'] = {}
configs['optimizer']['optimizer'] = self.options_menu.s_optimizer.get()
configs['optimizer']['beta1'] = self.options_menu.s_optimizer_beta1.get()
configs['optimizer']['beta2'] = self.options_menu.s_optimizer_beta2.get()
configs['optimizer']['rho'] = self.options_menu.s_optimizer_rho.get()
configs['optimizer']['momentum'] = self.options_menu.s_optimizer_momentum.get()
configs['optimizer']['epsilon'] = self.options_menu.s_optimizer_epsilon.get()
configs['optimizer']['discriminator_optimizer'] = self.options_menu.s_d_optimizer.get()
configs['optimizer']['gan_optimizer'] = self.options_menu.s_gan_optimizer.get()
configs['training_configurations'] = {}
configs['training_configurations']['hardware'] = self.options_menu.s_hardware.get()
configs['training_configurations']['number_of_gpus'] = self.options_menu.s_n_gpus.get()
configs['training_configurations']['early_stop_switch'] = str(self.options_menu.bool_early_stop.get())
configs['training_configurations']['early_stop_patience'] = self.options_menu.s_early_stop_patience.get()
configs['training_configurations']['batch_size'] = self.options_menu.s_batch_size.get()
configs['training_configurations']['epochs'] = self.options_menu.s_epochs.get()
configs['training_configurations']['shuffle_data_switch'] = str(self.options_menu.bool_shuffle.get())
configs['training_configurations']['validation_split'] = self.options_menu.s_val_split.get()
configs['monitors'] = {}
configs['monitors']['mse_switch'] = str(self.options_menu.bool_mse_monitor.get())
configs['monitors']['mae_switch'] = str(self.options_menu.bool_mae_monitor.get())
configs['monitors']['accuracy_switch'] = str(self.options_menu.bool_acc_monitor.get())
configs['save_configurations'] = {}
configs['save_configurations']['save_model_switch'] = str(self.options_menu.bool_save_model.get())
configs['save_configurations']['save_model_path'] = self.options_menu.s_save_model_path.get()
configs['save_configurations']['save_csv_switch'] = str(self.options_menu.bool_save_csv.get())
configs['save_configurations']['save_csv_path'] = self.options_menu.s_save_csv_path.get()
configs['save_configurations']['save_checkpoints_switch'] = str(self.options_menu.bool_save_checkpoints.get())
configs['save_configurations']['save_checkpoints_path'] = self.options_menu.s_save_checkpoints_path.get()
configs['save_configurations']['save_checkpoints_frequency'] = self.options_menu.s_save_checkpoints_frequency.get()
configs['save_configurations']['save_tensorboard_switch'] = str(self.options_menu.bool_tensorboard.get())
configs['save_configurations']['save_tensorboard_path'] = self.options_menu.s_tensorboard_path.get()
configs['save_configurations']['save_tensorboard_frequency'] = self.options_menu.s_tensorboard_frequency.get()
configs['bbd_options'] = {}
configs['bbd_options']['scaling_type'] = self.options_menu.s_scaling.get()
configs['bbd_options']['scales'] = self.options_menu.s_scales.get()
configs['bbd_options']['aspect_ratios_type'] = self.options_menu.s_aspect_ratios.get()
configs['bbd_options']['aspect_ratios'] = self.options_menu.s_ARs.get()
configs['bbd_options']['number_classes'] = self.options_menu.s_n_classes.get()
configs['bbd_options']['steps'] = self.options_menu.s_steps.get()
configs['bbd_options']['offsets'] = self.options_menu.s_offsets.get()
configs['bbd_options']['variances'] = self.options_menu.s_variances.get()
configs['bbd_options']['confidence_threshold'] = self.options_menu.s_conf_thresh.get()
configs['bbd_options']['iou_threshold'] = self.options_menu.s_iou_thresh.get()
configs['bbd_options']['top_k'] = self.options_menu.s_top_k.get()
configs['bbd_options']['nms_maximum_output'] = self.options_menu.s_nms_max_output.get()
configs['bbd_options']['coordinates_type'] = self.options_menu.s_coords_type.get()
configs['bbd_options']['two_boxes_for_AR1_switch'] = str(self.options_menu.bool_2_for_1.get())
configs['bbd_options']['clip_boxes_switch'] = str(self.options_menu.bool_clip_boxes.get())
configs['bbd_options']['normalize_coordinates_switch'] = str(self.options_menu.bool_norm_coords.get())
configs['bbd_options']['positive_iou_threshold'] = self.options_menu.s_pos_iou_thresh.get()
configs['bbd_options']['negative_iou_limit'] = self.options_menu.s_neg_iou_limit.get()
configs['layers'] = {}
configs['layers']['serial_layer_list'] = self.layers_list_box_serial.get(0, tk.END)
configs['layers']['generator_layer_list'] = self.layers_list_box_gen.get(0, tk.END)
configs['layers']['discriminator_layer_list'] = self.layers_list_box_discrim.get(0, tk.END)
return configs
def set_configs(self, configs):
self.home_menu.s_model_signal.set(configs['config_file']['model_signal'])
self.home_menu.s_type_signal.set(configs['config_file']['type_signal'])
self.home_menu.s_input_shape.set(configs['config_file']['input_shape'])
self.file_menu.s_load_file_path.set(configs['paths']['load_config'])
self.file_menu.s_load_ckpt_file_path.set(configs['paths']['load_checkpoint'])
self.file_menu.s_load_model_file_path.set(configs['paths']['load_model'])
self.data_menu.s_train_X_path.set(configs['paths']['train_X'])
self.data_menu.s_train_y_path.set(configs['paths']['train_y'])
self.data_menu.s_val_X_path.set(configs['paths']['validation_X'])
self.data_menu.s_val_y_path.set(configs['paths']['validation_y'])
self.data_menu.s_test_X_path.set(configs['paths']['test_X'])
self.data_menu.s_data_min.set(configs['preprocessing']['minimum_image_intensity'])
self.data_menu.s_data_max.set(configs['preprocessing']['maximum_image_intensity'])
self.data_menu.s_image_context.set(configs['preprocessing']['image_context'])
self.data_menu.s_normalization_type.set(configs['preprocessing']['normalization_type'])
self.data_menu.bool_to_categorical.set(configs['preprocessing']['categorical_switch'])
self.data_menu.s_num_categories.set(configs['preprocessing']['categories'])
self.data_menu.bool_weight_loss.set(configs['preprocessing']['weight_loss_switch'])
self.data_menu.bool_repeatX.set(configs['preprocessing']['repeat_X_switch'])
self.data_menu.s_repeatX.set(configs['preprocessing']['repeat_X_quantity'])
self.data_menu.bool_augmentation.set(configs['augmentation']['apply_augmentation_switch'])
self.data_menu.bool_fw_centering.set(configs['augmentation']['featurewise_centering_switch'])
self.data_menu.bool_sw_centering.set(configs['augmentation']['samplewise_centering_switch'])
self.data_menu.bool_fw_normalization.set(configs['augmentation']['featurewise_normalization_switch'])
self.data_menu.bool_sw_normalization.set(configs['augmentation']['samplewise_normalization_switch'])
self.data_menu.s_width_shift.set(configs['augmentation']['width_shift'])
self.data_menu.s_height_shift.set(configs['augmentation']['height_shift'])
self.data_menu.s_rotation_range.set(configs['augmentation']['rotation_range'])
self.data_menu.s_brightness_range.set(configs['augmentation']['brightness_range'])
self.data_menu.s_shear_range.set(configs['augmentation']['shear_range'])
self.data_menu.s_zoom_range.set(configs['augmentation']['zoom_range'])
self.data_menu.s_channel_shift_range.set(configs['augmentation']['channel_shift_range'])
self.data_menu.s_fill_mode.set(configs['augmentation']['fill_mode'])
self.data_menu.s_cval.set(configs['augmentation']['cval'])
self.data_menu.bool_horizontal_flip.set(configs['augmentation']['horizontal_flip_switch'])
self.data_menu.bool_vertical_flip.set(configs['augmentation']['vertical_flip_switch'])
self.data_menu.s_rounds.set(configs['augmentation']['rounds'])
self.data_menu.s_zca_epsilon.set(configs['augmentation']['zca_epsilon'])
self.data_menu.s_random_seed.set(configs['augmentation']['random_seed'])
self.options_menu.s_loss.set(configs['loss_function']['loss'])
self.options_menu.s_loss_param1.set(configs['loss_function']['parameter1'])
self.options_menu.s_loss_param2.set(configs['loss_function']['parameter2'])
self.options_menu.s_base_lr.set(configs['learning_rate_schedule']['learning_rate'])
self.options_menu.s_lr_decay.set(configs['learning_rate_schedule']['learning_rate_decay_factor'])
self.options_menu.bool_decay_on_plateau.set(configs['learning_rate_schedule']['decay_on_plateau_switch'])
self.options_menu.s_decay_on_plateau_factor.set(configs['learning_rate_schedule']['decay_on_plateau_factor'])
self.options_menu.s_decay_on_plateau_patience.set(configs['learning_rate_schedule']['decay_on_plateau_patience'])
self.options_menu.bool_step_decay.set(configs['learning_rate_schedule']['step_decay_switch'])
self.options_menu.s_step_decay_factor.set(configs['learning_rate_schedule']['step_decay_factor'])
self.options_menu.s_step_decay_period.set(configs['learning_rate_schedule']['step_decay_period'])
self.options_menu.s_d_lr.set(configs['learning_rate_schedule']['discriminator_learning_rate'])
self.options_menu.s_gan_lr.set(configs['learning_rate_schedule']['gan_learning_rate'])
self.options_menu.s_optimizer.set(configs['optimizer']['optimizer'])
self.options_menu.s_optimizer_beta1.set(configs['optimizer']['beta1'])
self.options_menu.s_optimizer_beta2.set(configs['optimizer']['beta2'])
self.options_menu.s_optimizer_rho.set(configs['optimizer']['rho'])
self.options_menu.s_optimizer_momentum.set(configs['optimizer']['momentum'])
self.options_menu.s_optimizer_epsilon.set(configs['optimizer']['epsilon'])
self.options_menu.s_d_optimizer.set(configs['optimizer']['discriminator_optimizer'])
self.options_menu.s_gan_optimizer.set(configs['optimizer']['gan_optimizer'])
self.options_menu.s_hardware.set(configs['training_configurations']['hardware'])
self.options_menu.s_n_gpus.set(configs['training_configurations']['number_of_gpus'])
self.options_menu.bool_early_stop.set(configs['training_configurations']['early_stop_switch'])
self.options_menu.s_early_stop_patience.set(configs['training_configurations']['early_stop_patience'])
self.options_menu.s_batch_size.set(configs['training_configurations']['batch_size'])
self.options_menu.s_epochs.set(configs['training_configurations']['epochs'])
self.options_menu.bool_shuffle.set(configs['training_configurations']['shuffle_data_switch'])
self.options_menu.s_val_split.set(configs['training_configurations']['validation_split'])
self.options_menu.bool_mse_monitor.set(configs['monitors']['mse_switch'])
self.options_menu.bool_mae_monitor.set(configs['monitors']['mae_switch'])
self.options_menu.bool_acc_monitor.set(configs['monitors']['accuracy_switch'])
self.options_menu.bool_save_model.set(configs['save_configurations']['save_model_switch'])
self.options_menu.s_save_model_path.set(configs['save_configurations']['save_model_path'])
self.options_menu.bool_save_csv.set(configs['save_configurations']['save_csv_switch'])
self.options_menu.s_save_csv_path.set(configs['save_configurations']['save_csv_path'])
self.options_menu.bool_save_checkpoints.set(configs['save_configurations']['save_checkpoints_switch'])
self.options_menu.s_save_checkpoints_path.set(configs['save_configurations']['save_checkpoints_path'])
self.options_menu.s_save_checkpoints_frequency.set(configs['save_configurations']['save_checkpoints_frequency'])
self.options_menu.bool_tensorboard.set(configs['save_configurations']['save_tensorboard_switch'])
self.options_menu.s_tensorboard_path.set(configs['save_configurations']['save_tensorboard_path'])
self.options_menu.s_tensorboard_frequency.set(configs['save_configurations']['save_tensorboard_frequency'])
self.options_menu.s_scaling.set(configs['bbd_options']['scaling_type'])
self.options_menu.s_scales.set(configs['bbd_options']['scales'])
self.options_menu.s_aspect_ratios.set(configs['bbd_options']['aspect_ratios_type'])
self.options_menu.s_ARs.set(configs['bbd_options']['aspect_ratios'])
self.options_menu.s_n_classes.set(configs['bbd_options']['number_classes'])
self.options_menu.s_steps.set(configs['bbd_options']['steps'])
self.options_menu.s_offsets.set(configs['bbd_options']['offsets'])
self.options_menu.s_variances.set(configs['bbd_options']['variances'])
self.options_menu.s_conf_thresh.set(configs['bbd_options']['confidence_threshold'])
self.options_menu.s_iou_thresh.set(configs['bbd_options']['iou_threshold'])
self.options_menu.s_top_k.set(configs['bbd_options']['top_k'])
self.options_menu.s_nms_max_output.set(configs['bbd_options']['nms_maximum_output'])
self.options_menu.s_coords_type.set(configs['bbd_options']['coordinates_type'])
self.options_menu.bool_2_for_1.set(configs['bbd_options']['two_boxes_for_AR1_switch'])
self.options_menu.bool_clip_boxes.set(configs['bbd_options']['clip_boxes_switch'])
self.options_menu.bool_norm_coords.set(configs['bbd_options']['normalize_coordinates_switch'])
self.options_menu.s_pos_iou_thresh.set(configs['bbd_options']['positive_iou_threshold'])
self.options_menu.s_neg_iou_limit.set(configs['bbd_options']['negative_iou_limit'])
self.layers_list_box.delete(0, tk.END)
self.layers_list_box_serial.delete(0, tk.END)
self.layers_list_box_gen.delete(0, tk.END)
self.layers_list_box_discrim.delete(0, tk.END)
[self.layers_list_box_serial.insert(tk.END, layer) for layer in configs['layers']['serial_layer_list']]
if any(configs['layers']['generator_layer_list']):
[self.layers_list_box.insert(tk.END, layer) for layer in configs['layers']['generator_layer_list']]
else:
[self.layers_list_box.insert(tk.END, layer) for layer in configs['layers']['serial_layer_list']]
[self.layers_list_box_gen.insert(tk.END, layer) for layer in configs['layers']['generator_layer_list']]
[self.layers_list_box_discrim.insert(tk.END, layer) for layer in configs['layers']['discriminator_layer_list']]
if any(configs['layers']['serial_layer_list']):
self.home_menu.s_model_built.set('Serial model built')
elif any(configs['layers']['generator_layer_list']) and any(configs['layers']['discriminator_layer_list']):
self.home_menu.s_model_built.set('Gen & discrim built')
elif not any(configs['layers']['serial_layer_list']):
self.home_menu.s_model_built.set('No layers defined')
else:
self.home_menu.s_model_built.set('Multiple models defined')
return
```
|
{
"source": "JeremiaMakabata/ticket-system",
"score": 2
}
|
#### File: ticketsapi/tickets/views.py
```python
from rest_framework import viewsets
from .models import Ticket, UserProfile, User
from .serializers import TicketSerializer, UserSerializer, UserProfileSerializer
from .permissions import IsOwnerOrReadOnly
from rest_framework import permissions
class UserViewSet(viewsets.ReadOnlyModelViewSet):
queryset = User.objects.all()
serializer_class = UserSerializer
permission_classes = [permissions.IsAuthenticatedOrReadOnly,
IsOwnerOrReadOnly]
class UserProfileViewSet(viewsets.ModelViewSet):
queryset = UserProfile.objects.all()
serializer_class = UserProfileSerializer
permission_classes = [permissions.IsAuthenticatedOrReadOnly,
IsOwnerOrReadOnly]
def perform_create(self, serializer):
serializer.save(owner=self.request.user)
class TicketViewSet(viewsets.ModelViewSet):
queryset = Ticket.objects.all()
serializer_class = TicketSerializer
permission_classes = [permissions.IsAuthenticatedOrReadOnly,
IsOwnerOrReadOnly]
def perform_create(self, serializer):
serializer.save(owner=self.request.user)
```
|
{
"source": "JeremiasFuentes/ASCII-Media-Player",
"score": 3
}
|
#### File: JeremiasFuentes/ASCII-Media-Player/generate.py
```python
import sys
from pynput import keyboard
from PIL import Image
import numpy as np
import os
import cv2
import pysrt
ASCII_CHARS = "`^\",:;Il!i~+_-?][}{1)(|\\/tfjrxnuvczXYUJCLQ0OZmwqpdbkhao*#MW&8%B@$"
MAX_PIXEL_VALUE = 255
pause = False
def vid_render(st_matrix, st, ed, option):
pixels = [st_matrix[i][:] for i in range(st, ed)]
# CONFIG OPTION - intensity measure
intensity_matrix = get_intensity_matrix(pixels, 3)
intensity_matrix = normalize_intensity_matrix(intensity_matrix)
color_matrix = get_color_matrix(pixels)
ascii_matrix = []
for i in range(len(intensity_matrix)):
ascii_row = []
for j in range(len(intensity_matrix[0])):
intensity = intensity_matrix[i][j]
symbol_index = int(intensity / MAX_PIXEL_VALUE * len(ASCII_CHARS)) - 1
symbol_index = symbol_index + 1 if symbol_index < 0 else symbol_index
if option == 1:
color = color_matrix[i][j]
ascii_row.append(color)
ascii_row.append(ASCII_CHARS[symbol_index])
ascii_matrix.append(ascii_row)
print_matrix(ascii_matrix, st)
def subtitle_show(subs, tstamp_ms):
# minutes =
parts = subs.slice(starts_before={'milliseconds': int(tstamp_ms)}, ends_after={'milliseconds': int(tstamp_ms)})
size = os.get_terminal_size()
print("\033[" + str(size.lines - 2) + ";1H", end='')
for i in range(0, 2):
print(" " * int(size.columns))
print("\033[" + str(size.lines - 2) + ";1H", end='')
for part in parts:
print(part.text)
def get_pixel_matrix(image):
image = image.convert("RGB")
# current row and column size definitions
ac_row, ac_col = image.size
# d1 and d2 are the width and height of image resp
size = os.get_terminal_size()
d2 = min(size.lines - 3, int((ac_col * size.columns) / ac_row))
d1 = min(int(size.columns / 3), int((ac_row * d2) / ac_col))
# set image to determined d1 and column size
im = image.resize((d1, d2))
pixels = list(im.getdata())
return [pixels[i:i + im.width] for i in range(0, len(pixels), im.width)]
def print_matrix(ascii_matrix, st):
count = 1
for line in ascii_matrix:
line_extended = [p + p + p for p in line]
print("\033[" + str(st + count) + ";1H", end='')
print("".join(line_extended))
count += 1
def get_color_matrix(pixels):
color_matrix = []
for row in pixels:
color_matrix_row = []
for p in row:
color_matrix_row.append("\033[38;2;" + str(p[0]) + ";" + str(p[1]) + ";" + str(p[2]) + "m")
color_matrix.append(color_matrix_row)
return color_matrix
def get_intensity_matrix(pixels, option):
"""Set the measure of brightness to be used depending upon the
option chosen, we chose between three measures namely luminance,
lightness and average pixel values
"""
intensity_matrix = []
for row in pixels:
intensity_matrix_row = []
for p in row:
intensity = 0
if option == 1:
intensity = ((p[0] + p[1] + p[2]) / 3.0)
elif option == 2:
intensity = (max(p[0], p[1], p[2]) + min(p[0], p[1], p[2])) / 2
elif option == 3:
intensity = (0.299 * p[0] * p[0] + 0.587 * p[1] * p[1] + 0.114 * p[2] * p[2]) ** 0.5
else:
raise Exception("Unrecognised intensity option: %d" % option)
intensity_matrix_row.append(intensity)
intensity_matrix.append(intensity_matrix_row)
return intensity_matrix
def normalize_intensity_matrix(intensity_matrix):
normalized_intensity_matrix = []
max_pixel = max(map(max, intensity_matrix))
min_pixel = min(map(min, intensity_matrix))
for row in intensity_matrix:
rescaled_row = []
for p in row:
denm = float(max_pixel - min_pixel)
if denm == 0:
denm = 1
r = MAX_PIXEL_VALUE * (p - min_pixel) / denm
rescaled_row.append(r)
normalized_intensity_matrix.append(rescaled_row)
return normalized_intensity_matrix
def print_from_image(filename, option):
"""Taking in an image, use its RGB values to decide upon an ASCII character
to represent it. This ASCII character will be based upon the brightness
measure calculated
"""
try:
with Image.open(filename) as image:
pixels = get_pixel_matrix(image)
print("\033[40m\033[37m", end='')
vid_render(pixels, 0, len(pixels), option)
print("\033[0m", end='')
except OSError:
print("Could not open image file!")
def read_media_sub(vidfile, subfile, option):
vidcap = cv2.VideoCapture(vidfile)
subs = pysrt.open(subfile)
i = 0
# control frame rate in image
frame_skip = 0
os.system("clear")
def on_press(key):
global pause
if key == keyboard.Key.space:
pause = not pause
listener = keyboard.Listener(on_press=on_press)
listener.start()
while vidcap.isOpened():
# read frames from the image
success, image = vidcap.read()
if not success:
break
if i > frame_skip - 1:
# CONFIG OPTION - contrast and brightness
# enhance the image (increase contrast and brightness) for terminal display
# TURN OFF (by commenting) IF YOU PREFER THE ORIGINAL COLOURS
while pause:
if not pause:
break
if option == 1:
image = cv2.convertScaleAbs(image, alpha=1.25, beta=50)
cv2.imwrite("./data/frame.jpg", image)
i = 0
print_from_image("./data/frame.jpg", option)
subtitle_show(subs, vidcap.get(cv2.CAP_PROP_POS_MSEC))
continue
i += 1
vidcap.release()
cv2.destroyAllWindows()
def read_media(vidfile, option):
vidcap = cv2.VideoCapture(vidfile)
i = 0
# control frame rate in image
frame_skip = 0
os.system("clear")
def on_press(key):
global pause
if key == keyboard.Key.space:
pause = not pause
listener = keyboard.Listener(on_press=on_press)
listener.start()
while vidcap.isOpened():
# read frames from the image
success, image = vidcap.read()
if not success:
break
if i > frame_skip - 1:
# CONFIG OPTION - contrast and brightness
# enhance the image (increase contrast and brightness) for terminal display
# TURN OFF (by commenting) IF YOU PREFER THE ORIGINAL COLOURS
while pause:
if not pause:
break
if option == 1:
image = cv2.convertScaleAbs(image, alpha=1.25, beta=50)
cv2.imwrite("./data/frame.jpg", image)
i = 0
print_from_image("./data/frame.jpg", option)
continue
i += 1
vidcap.release()
cv2.destroyAllWindows()
if len(sys.argv) == 3:
vidfile = sys.argv[1]
colored_output = int(sys.argv[2])
read_media(vidfile, colored_output)
else:
vidfile = sys.argv[1]
subfile = sys.argv[2]
colored_output = int(sys.argv[3])
read_media_sub(vidfile, subfile, colored_output)
```
|
{
"source": "JeremiasKnoblauch/MXFusion",
"score": 2
}
|
#### File: components/distributions/beta.py
```python
from ...common.config import get_default_MXNet_mode
from .univariate import UnivariateDistribution
class Beta(UnivariateDistribution):
"""
The one-dimensional beta distribution. The beta distribution can be defined over a scalar random variable or an
array of random variables. In case of an array of random variables, a and b are broadcasted to the
shape of the output random variable (array).
:param alpha: a parameter (alpha) of the beta distribution.
:type alpha: Variable
:param beta: b parameter (beta) of the beta distribution.
:type beta: Variable
:param rand_gen: the random generator (default: MXNetRandomGenerator).
:type rand_gen: RandomGenerator
:param dtype: the data type for float point numbers.
:type dtype: numpy.float32 or numpy.float64
:param ctx: the mxnet context (default: None/current context).
:type ctx: None or mxnet.cpu or mxnet.gpu
"""
def __init__(self, alpha, beta, rand_gen=None, dtype=None, ctx=None):
inputs = [('alpha', alpha), ('beta', beta)]
input_names = [k for k, _ in inputs]
output_names = ['random_variable']
super(Beta, self).__init__(inputs=inputs, outputs=None,
input_names=input_names,
output_names=output_names,
rand_gen=rand_gen, dtype=dtype, ctx=ctx)
def log_pdf_impl(self, alpha, beta, random_variable, F=None):
"""
Computes the logarithm of the probability density function (PDF) of the beta distribution.
:param alpha: the a parameter (alpha) of the beta distribution.
:type alpha: MXNet NDArray or MXNet Symbol
:param beta: the b parameter (beta) of the beta distributions.
:type beta: MXNet NDArray or MXNet Symbol
:param random_variable: the random variable of the beta distribution.
:type random_variable: MXNet NDArray or MXNet Symbol
:param F: the MXNet computation mode (mxnet.symbol or mxnet.ndarray).
:returns: log pdf of the distribution.
:rtypes: MXNet NDArray or MXNet Symbol
"""
F = get_default_MXNet_mode() if F is None else F
log_x = F.log(random_variable)
log_1_minus_x = F.log(1 - random_variable)
log_beta_ab = F.gammaln(alpha) + F.gammaln(beta) - \
F.gammaln(alpha + beta)
log_likelihood = F.broadcast_add((alpha - 1) * log_x, ((beta - 1) * log_1_minus_x)) - log_beta_ab
return log_likelihood
def draw_samples_impl(self, alpha, beta, rv_shape, num_samples=1, F=None):
"""
Draw samples from the beta distribution.
If X and Y are independent, with $X \sim \Gamma(\alpha, \theta)$ and $Y \sim \Gamma(\beta, \theta)$ then
$\frac {X}{X+Y}}\sim \mathrm {B} (\alpha ,\beta ).}$
:param alpha: the a parameter (alpha) of the beta distribution.
:type alpha: MXNet NDArray or MXNet Symbol
:param beta: the b parameter (beta) of the beta distributions.
:type beta: MXNet NDArray or MXNet Symbol
:param rv_shape: the shape of each sample.
:type rv_shape: tuple
:param num_samples: the number of drawn samples (default: one).
:type num_samples: int
:param F: the MXNet computation mode (mxnet.symbol or mxnet.ndarray).
:returns: a set samples of the beta distribution.
:rtypes: MXNet NDArray or MXNet Symbol
"""
F = get_default_MXNet_mode() if F is None else F
if alpha.shape != (num_samples, ) + rv_shape:
raise ValueError("Shape mismatch between inputs {} and random variable {}".format(
alpha.shape, (num_samples, ) + rv_shape))
# Note output shape is determined by input dimensions
out_shape = () # (num_samples,) + rv_shape
ones = F.ones_like(alpha)
# Sample X from Gamma(a, 1)
x = self._rand_gen.sample_gamma(
alpha=alpha, beta=ones, shape=out_shape, dtype=self.dtype,
ctx=self.ctx, F=F)
# Sample Y from Gamma(b, 1)
y = self._rand_gen.sample_gamma(
alpha=beta, beta=ones, shape=out_shape, dtype=self.dtype,
ctx=self.ctx, F=F)
# Return X / (X + Y)
return F.broadcast_div(x, F.broadcast_add(x, y))
@staticmethod
def define_variable(alpha=1., beta=1., shape=None, rand_gen=None,
dtype=None, ctx=None):
"""
Creates and returns a random variable drawn from a beta distribution.
:param a: The a parameter (alpha) of the distribution.
:param b: The b parameter (beta) of the distribution.
:param shape: the shape of the random variable(s).
:type shape: tuple or [tuple]
:param rand_gen: the random generator (default: MXNetRandomGenerator).
:type rand_gen: RandomGenerator
:param dtype: the data type for float point numbers.
:type dtype: numpy.float32 or numpy.float64
:param ctx: the mxnet context (default: None/current context).
:type ctx: None or mxnet.cpu or mxnet.gpu
:returns: the random variables drawn from the beta distribution.
:rtypes: Variable
"""
beta = Beta(alpha=alpha, beta=beta, rand_gen=rand_gen, dtype=dtype,
ctx=ctx)
beta._generate_outputs(shape=shape)
return beta.random_variable
```
#### File: components/distributions/dirichlet.py
```python
from ..variables import Variable
from ...common.config import get_default_MXNet_mode
from .distribution import Distribution
class Dirichlet(Distribution):
"""
The Dirichlet distribution.
:param Variable a: alpha, the concentration parameters of the distribution.
:param boolean normalization: If true, L1 normalization is applied.
:param RandomGenerator rand_gen: the random generator (default: MXNetRandomGenerator).
:param dtype: the data type for float point numbers.
:type dtype: numpy.float32 or numpy.float64
:param ctx: the mxnet context (default: None/current context).
:type ctx: None or mxnet.cpu or mxnet.gpu
"""
def __init__(self, alpha, normalization=True,
rand_gen=None, dtype=None, ctx=None):
inputs = [('alpha', alpha)]
input_names = ['alpha']
output_names = ['random_variable']
super().__init__(inputs=inputs, outputs=None, input_names=input_names,
output_names=output_names, rand_gen=rand_gen,
dtype=dtype, ctx=ctx)
self.normalization = normalization
def log_pdf_impl(self, alpha, random_variable, F=None):
"""
Computes the logarithm of the probability density function (pdf) of the Dirichlet distribution.
:param a: the a parameter (alpha) of the Dirichlet distribution.
:type a: MXNet NDArray or MXNet Symbol
:param random_variable: the random variable of the Dirichlet distribution.
:type random_variable: MXNet NDArray or MXNet Symbol
:param F: the MXNet computation mode (mxnet.symbol or mxnet.ndarray).
:returns: log pdf of the distribution.
:rtypes: MXNet NDArray or MXNet Symbol
"""
F = get_default_MXNet_mode() if F is None else F
if self.normalization:
random_variable = F.broadcast_div(
random_variable, F.expand_dims(F.norm(random_variable, ord=1,
axis=2), axis=2))
power = F.broadcast_power(random_variable, alpha - 1)
prod = F.prod(power, axis=2)
beta = F.prod(F.gamma(alpha), axis=2)/F.gamma(F.sum(alpha, axis=2))
logL = F.log(prod/beta)
return logL
def draw_samples_impl(self, alpha, rv_shape, num_samples=1, F=None):
"""
Draw samples from the Dirichlet distribution.
:param a: the a parameter (alpha) of the Dirichlet distribution.
:type a: MXNet NDArray or MXNet Symbol
:param tuple rv_shape: the shape of each sample (this variable is not used because the shape of the random var
is given by the shape of a)
:param int num_samples: the number of drawn samples (default: one).
:param F: the MXNet computation mode (mxnet.symbol or mxnet.ndarray).
:returns: a set samples of the Dirichlet distribution.
:rtypes: MXNet NDArray or MXNet Symbol
"""
F = get_default_MXNet_mode() if F is None else F
ones = F.ones_like(alpha)
y = self._rand_gen.sample_gamma(alpha=alpha, beta=ones,
dtype=self.dtype, ctx=self.ctx)
return F.broadcast_div(y, F.sum(y))
@staticmethod
def define_variable(alpha, shape=None, normalization=True,
rand_gen=None, dtype=None, ctx=None):
"""
Creates and returns a random variable drawn from a Dirichlet distribution.
:param Variable a: alpha, the concentration parameters of the distribution.
:param boolean normalization: If true, L1 normalization is applied.
:param RandomGenerator rand_gen: the random generator (default: MXNetRandomGenerator).
:param dtype: the data type for float point numbers.
:type dtype: numpy.float32 or numpy.float64
:param ctx: the mxnet context (default: None/current context).
:type ctx: None or mxnet.cpu or mxnet.gpu
:returns: the random variables drawn from the Dirichlet distribution.
:rtypes: Variable
"""
dirichlet = Dirichlet(alpha=alpha, normalization=normalization,
rand_gen=rand_gen, dtype=dtype, ctx=ctx)
dirichlet._generate_outputs(shape=shape)
return dirichlet.random_variable
def _generate_outputs(self, shape):
"""
Set the output variable of the distribution.
:param shape: the shape of the random distribution.
:type shape: tuple
"""
self.outputs = [('random_variable', Variable(value=self, shape=shape))]
def replicate_self(self, attribute_map=None):
"""
This functions as a copy constructor for the object.
In order to do a copy constructor we first call ``__new__`` on the class which creates a blank object.
We then initialize that object using the methods standard init procedures, and do any extra copying of
attributes.
Replicates this Factor, using new inputs, outputs, and a new uuid.
Used during model replication to functionally replicate a factor into a new graph.
:param inputs: new input variables of the factor.
:type inputs: List of tuples of name to node e.g. [('random_variable': Variable y)] or None
:param outputs: new output variables of the factor.
:type outputs: List of tuples of name to node e.g. [('random_variable': Variable y)] or None
"""
replicant = super().replicate_self(attribute_map=attribute_map)
replicant.normalization = self.normalization
return replicant
```
#### File: components/distributions/gamma.py
```python
from ...common.config import get_default_MXNet_mode
from .univariate import UnivariateDistribution
class Gamma(UnivariateDistribution):
"""
Gamma distribution parameterized using Alpha and Beta.
Takes dependency on Scipy to compute the log-gamma function.
:param alpha: the alpha parameter of the Gamma distribution.
:type alpha: Variable
:param beta: beta parameter of the Gamma distribution.
:type beta: Variable
:param rand_gen: the random generator (default: MXNetRandomGenerator).
:type rand_gen: RandomGenerator
:param dtype: the data type for float point numbers.
:type dtype: numpy.float32 or numpy.float64
:param ctx: the mxnet context (default: None/current context).
:type ctx: None or mxnet.cpu or mxnet.gpu
"""
def __init__(self, alpha, beta, rand_gen=None, dtype=None, ctx=None):
inputs = [('alpha', alpha), ('beta', beta)]
input_names = [k for k, _ in inputs]
output_names = ['random_variable']
super(Gamma, self).__init__(inputs=inputs, outputs=None,
input_names=input_names,
output_names=output_names,
rand_gen=rand_gen, dtype=dtype, ctx=ctx)
def log_pdf_impl(self, alpha, beta, random_variable, F=None):
"""
Computes the logarithm of the probability density function (PDF) of the Gamma distribution.
:param random_variable: the random variable of the Gamma distribution.
:type random_variable: MXNet NDArray or MXNet Symbol
:param F: the MXNet computation mode (mxnet.symbol or mxnet.ndarray).
:returns: log pdf of the distribution.
:rtypes: MXNet NDArray or MXNet Symbol
"""
F = get_default_MXNet_mode() if F is None else F
g_alpha = F.gammaln(alpha)
p1 = (alpha - 1.) * F.log(random_variable)
return (p1 - beta * random_variable) - (g_alpha - alpha * F.log(beta))
def draw_samples_impl(self, alpha, beta, rv_shape, num_samples=1, F=None):
"""
Draw samples from the Gamma distribution.
:param rv_shape: the shape of each sample.
:type rv_shape: tuple
:param num_samples: the number of drawn samples (default: one).
:type num_samples: int
:param F: the MXNet computation mode (mxnet.symbol or mxnet.ndarray).
:returns: a set samples of the Gamma distribution.
:rtypes: MXNet NDArray or MXNet Symbol
"""
F = get_default_MXNet_mode() if F is None else F
return F.random.gamma(alpha=alpha, beta=beta, dtype=self.dtype,
ctx=self.ctx)
@staticmethod
def define_variable(alpha=0., beta=1., shape=None, rand_gen=None,
dtype=None, ctx=None):
"""
Creates and returns a random variable drawn from a Gamma distribution parameterized with a and b parameters.
:param alpha: beta parameter of the Gamma random variable (also known as rate)
:type alpha: float
:param beta: alpha parameter of the Gamma random variable (also known as shape)
:type beta: float
:param shape: the shape of the random variable(s).
:type shape: tuple or [tuple]
:param rand_gen: the random generator (default: MXNetRandomGenerator).
:type rand_gen: RandomGenerator
:param dtype: the data type for float point numbers.
:type dtype: numpy.float32 or numpy.float64
:param ctx: the mxnet context (default: None/current context).
:type ctx: None or mxnet.cpu or mxnet.gpu
:returns: the random variables drawn from the Gamma distribution.
:rtypes: Variable
"""
dist = Gamma(alpha=alpha, beta=beta, rand_gen=rand_gen,
dtype=dtype, ctx=ctx)
dist._generate_outputs(shape=shape)
return dist.random_variable
class GammaMeanVariance(UnivariateDistribution):
"""
Gamma distribution parameterized using Mean and Variance.
Takes dependency on Scipy to compute the log-gamma function.
:param mean: the mean parameter of the Gamma distribution.
:type mean: Variable
:param variance: variance parameter of the Gamma distribution.
:type variance: Variable
:param rand_gen: the random generator (default: MXNetRandomGenerator).
:type rand_gen: RandomGenerator
:param dtype: the data type for float point numbers.
:type dtype: numpy.float32 or numpy.float64
:param ctx: the mxnet context (default: None/current context).
:type ctx: None or mxnet.cpu or mxnet.gpu
"""
def __init__(self, mean, variance, rand_gen=None, dtype=None, ctx=None):
inputs = [('mean', mean), ('variance', variance)]
input_names = [k for k, _ in inputs]
output_names = ['random_variable']
super(GammaMeanVariance, self).__init__(
inputs=inputs, outputs=None, input_names=input_names,
output_names=output_names, rand_gen=rand_gen, dtype=dtype, ctx=ctx)
def _get_alpha_beta(self, a, b):
"""
Returns the alpha/beta representation of the input variables.
Based on if the variable was parameterized using alpha/beta or with mean/variance.
:param a: alpha or mean
:type a: mx.ndarray.array or mx.symbol.array
:param b: beta or variance
:type b: mx.ndarray.array or mx.symbol.array
"""
beta = a / b
alpha = a * beta
return alpha, beta
def log_pdf_impl(self, mean, variance, random_variable, F=None):
"""
Computes the logarithm of the probability density function (PDF) of the Gamma distribution.
:param mean: mean of the Gamma random variable (alpha / beta)
:type mean: float
:param variance: variance of the Gamma random variable (alpha / beta**2)
:type variance: float
:param random_variable: the random variable of the Gamma distribution.
:type random_variable: MXNet NDArray or MXNet Symbol
:param F: the MXNet computation mode (mxnet.symbol or mxnet.ndarray).
:returns: log pdf of the distribution.
:rtypes: MXNet NDArray or MXNet Symbol
"""
F = get_default_MXNet_mode() if F is None else F
alpha, beta = self._get_alpha_beta(mean, variance)
g_alpha = F.gammaln(alpha)
p1 = (alpha - 1.) * F.log(random_variable)
return (p1 - beta * random_variable) - (g_alpha - alpha * F.log(beta))
def draw_samples_impl(self, mean, variance, rv_shape, num_samples=1, F=None):
"""
Draw samples from the Gamma distribution.
:param rv_shape: the shape of each sample.
:type rv_shape: tuple
:param num_samples: the number of drawn samples (default: one).
:type num_samples: int
:param F: the MXNet computation mode (mxnet.symbol or mxnet.ndarray).
:returns: a set samples of the Gamma distribution.
:rtypes: MXNet NDArray or MXNet Symbol
"""
F = get_default_MXNet_mode() if F is None else F
alpha, beta = self._get_alpha_beta(mean, variance)
return F.random.gamma(alpha=alpha, beta=beta, dtype=self.dtype,
ctx=self.ctx)
@staticmethod
def define_variable(mean=0., variance=1., shape=None, rand_gen=None, dtype=None, ctx=None):
"""
Creates and returns a random variable drawn from a Gamma distribution parameterized with mean and variance.
:param shape: the shape of the random variable(s).
:type shape: tuple or [tuple]
:param rand_gen: the random generator (default: MXNetRandomGenerator).
:type rand_gen: RandomGenerator
:param dtype: the data type for float point numbers.
:type dtype: numpy.float32 or numpy.float64
:param ctx: the mxnet context (default: None/current context).
:type ctx: None or mxnet.cpu or mxnet.gpu
:returns: the random variables drawn from the Gamma distribution.
:rtypes: Variable
"""
dist = GammaMeanVariance(mean=mean, variance=variance,
rand_gen=rand_gen, dtype=dtype, ctx=ctx)
dist._generate_outputs(shape=shape)
return dist.random_variable
```
#### File: gp/kernels/linear.py
```python
from .kernel import NativeKernel
from ....variables import Variable
from ....variables import PositiveTransformation
class Linear(NativeKernel):
"""
Linear kernel
.. math::
k(x,y) = \\sum_{i=1}^{\\text{input_dim}} \\sigma^2_i x_iy_i
:param input_dim: the number of dimensions of the kernel. (The total number of active dimensions) .
:type input_dim: int
:param ARD: a binary switch for Automatic Relevance Determination (ARD). If true, the squared distance is divided
by a lengthscale for individual dimensions.
:type ARD: boolean
:param variances: the initial value for the variances parameter, which scales the input dimensions.
:type variances: float or MXNet NDArray
:param name: the name of the kernel. The name is used to access kernel parameters.
:type name: str
:param active_dims: The dimensions of the inputs that are taken for the covariance matrix computation.
(default: None, taking all the dimensions).
:type active_dims: [int] or None
:param dtype: the data type for float point numbers.
:type dtype: numpy.float32 or numpy.float64
:param ctx: the mxnet context (default: None/current context).
:type ctx: None or mxnet.cpu or mxnet.gpu
"""
broadcastable = True
def __init__(self, input_dim, ARD=False, variances=1., name='linear',
active_dims=None, dtype=None, ctx=None):
super(Linear, self).__init__(input_dim=input_dim, name=name,
active_dims=active_dims, dtype=dtype,
ctx=ctx)
self.ARD = ARD
if not isinstance(variances, Variable):
variances = Variable(shape=(input_dim if ARD else 1,),
transformation=PositiveTransformation(),
initial_value=variances)
self.variances = variances
def _compute_K(self, F, X, variances, X2=None):
"""
The internal interface for the actual covariance matrix computation.
:param F: MXNet computation type <mx.sym, mx.nd>.
:param X: the first set of inputs to the kernel.
:type X: MXNet NDArray or MXNet Symbol
:param X2: (optional) the second set of arguments to the kernel. If X2 is None, this computes a square
covariance matrix of X. In other words, X2 is internally treated as X.
:type X2: MXNet NDArray or MXNet Symbol
:param variances: the variances parameter, which scales the input dimensions.
:type variances: MXNet NDArray or MXNet Symbol
:return: The covariance matrix.
:rtype: MXNet NDArray or MXNet Symbol
"""
if self.ARD:
var_sqrt = F.expand_dims(F.sqrt(variances), axis=-2)
if X2 is None:
xsc = X * var_sqrt
return F.linalg.syrk(xsc)
else:
xsc = X * var_sqrt
x2sc = X2 * var_sqrt
return F.linalg.gemm2(xsc, x2sc, False, True)
else:
if X2 is None:
A = F.linalg.syrk(X)
else:
A = F.linalg.gemm2(X, X2, False, True)
return A * F.expand_dims(variances, axis=-1)
def _compute_Kdiag(self, F, X, variances):
"""
The internal interface for the actual computation for the diagonal of the covariance matrix.
:param F: MXNet computation type <mx.sym, mx.nd>.
:param X: the first set of inputs to the kernel.
:type X: MXNet NDArray or MXNet Symbol
:param variances: the variances parameter, which scales the input dimensions.
:type variances: MXNet NDArray or MXNet Symbol
:return: The covariance matrix.
:rtype: MXNet NDArray or MXNet Symbol
"""
X2 = F.square(X)
return F.sum(X2 * F.expand_dims(variances, axis=-2), axis=-1)
def replicate_self(self, attribute_map=None):
"""
The copy constructor for a kernel.
"""
replicant = super(Linear, self).replicate_self(attribute_map)
replicant.ARD = self.ARD
return replicant
```
#### File: gp/kernels/matern.py
```python
import numpy as np
import mxnet as mx
from .stationary import StationaryKernel
class Matern(StationaryKernel):
"""
Matern kernel:
.. math::
k(r^2) = \\sigma^2 \\exp \\bigg(- \\frac{1}{2} r^2 \\bigg)
:param input_dim: the number of dimensions of the kernel. (The total number of active dimensions)
:type input_dim: int
:param ARD: a binary switch for Automatic Relevance Determination (ARD). If true, the squared distance is divided
by a lengthscale for individual dimensions.
:type ARD: boolean
:param variance: the initial value for the variance parameter (scalar), which scales the whole covariance matrix.
:type variance: float or MXNet NDArray
:param lengthscale: the initial value for the lengthscale parameter.
:type lengthscale: float or MXNet NDArray
:param name: the name of the kernel. The name is used to access kernel parameters.
:type name: str
:param active_dims: The dimensions of the inputs that are taken for the covariance matrix computation.
(default: None, taking all the dimensions).
:type active_dims: [int] or None
:param dtype: the data type for float point numbers.
:type dtype: numpy.float32 or numpy.float64
:param ctx: the mxnet context (default: None/current context).
:type ctx: None or mxnet.cpu or mxnet.gpu
"""
broadcastable = True
def __init__(self, input_dim, order, ARD=False, variance=1.,
lengthscale=1., name='matern', active_dims=None, dtype=None,
ctx=None):
super(Matern, self).__init__(
input_dim=input_dim, ARD=ARD, variance=variance,
lengthscale=lengthscale, name=name, active_dims=active_dims,
dtype=dtype, ctx=ctx)
self.order = order
class Matern52(Matern):
def __init__(self, input_dim, ARD=False, variance=1., lengthscale=1.,
name='matern52', active_dims=None, dtype=None, ctx=None):
super(Matern52, self).__init__(
input_dim=input_dim, order=2, ARD=ARD, variance=variance,
lengthscale=lengthscale, name=name, active_dims=active_dims,
dtype=dtype, ctx=ctx)
def _compute_K(self, F, X, lengthscale, variance, X2=None):
"""
The internal interface for the actual covariance matrix computation.
:param F: MXNet computation type <mx.sym, mx.nd>.
:param X: the first set of inputs to the kernel.
:type X: MXNet NDArray or MXNet Symbol
:param X2: (optional) the second set of arguments to the kernel. If X2 is None, this computes a square
covariance matrix of X. In other words, X2 is internally treated as X.
:type X2: MXNet NDArray or MXNet Symbol
:param variance: the variance parameter (scalar), which scales the whole covariance matrix.
:type variance: MXNet NDArray or MXNet Symbol
:param lengthscale: the lengthscale parameter.
:type lengthscale: MXNet NDArray or MXNet Symbol
:return: The covariance matrix.
:rtype: MXNet NDArray or MXNet Symbol
"""
R2 = self._compute_R2(F, X, lengthscale, variance, X2=X2)
R = F.sqrt(F.clip(R2, 1e-14, np.inf))
return F.broadcast_mul(
(1+np.sqrt(5)*R+5/3.*R2)*F.exp(-np.sqrt(5)*R),
F.expand_dims(variance, axis=-2))
class Matern32(Matern):
def __init__(self, input_dim, ARD=False, variance=1., lengthscale=1.,
name='matern32', active_dims=None, dtype=None, ctx=None):
super(Matern32, self).__init__(
input_dim=input_dim, order=1, ARD=ARD, variance=variance,
lengthscale=lengthscale, name=name, active_dims=active_dims,
dtype=dtype, ctx=ctx)
def _compute_K(self, F, X, lengthscale, variance, X2=None):
"""
The internal interface for the actual covariance matrix computation.
:param F: MXNet computation type <mx.sym, mx.nd>.
:param X: the first set of inputs to the kernel.
:type X: MXNet NDArray or MXNet Symbol
:param X2: (optional) the second set of arguments to the kernel. If X2 is None, this computes a square
covariance matrix of X. In other words, X2 is internally treated as X.
:type X2: MXNet NDArray or MXNet Symbol
:param variance: the variance parameter (scalar), which scales the whole covariance matrix.
:type variance: MXNet NDArray or MXNet Symbol
:param lengthscale: the lengthscale parameter.
:type lengthscale: MXNet NDArray or MXNet Symbol
:return: The covariance matrix.
:rtype: MXNet NDArray or MXNet Symbol
"""
R2 = self._compute_R2(F, X, lengthscale, variance, X2=X2)
R = F.sqrt(F.clip(R2, 1e-14, np.inf))
return F.broadcast_mul(
(1+np.sqrt(3)*R)*F.exp(-np.sqrt(3)*R),
F.expand_dims(variance, axis=-2))
class Matern12(Matern):
def __init__(self, input_dim, ARD=False, variance=1., lengthscale=1.,
name='matern12', active_dims=None, dtype=None, ctx=None):
super(Matern12, self).__init__(
input_dim=input_dim, order=0, ARD=ARD, variance=variance,
lengthscale=lengthscale, name=name, active_dims=active_dims,
dtype=dtype, ctx=ctx)
def _compute_K(self, F, X, lengthscale, variance, X2=None):
"""
The internal interface for the actual covariance matrix computation.
:param F: MXNet computation type <mx.sym, mx.nd>.
:param X: the first set of inputs to the kernel.
:type X: MXNet NDArray or MXNet Symbol
:param X2: (optional) the second set of arguments to the kernel. If X2 is None, this computes a square
covariance matrix of X. In other words, X2 is internally treated as X.
:type X2: MXNet NDArray or MXNet Symbol
:param variance: the variance parameter (scalar), which scales the whole covariance matrix.
:type variance: MXNet NDArray or MXNet Symbol
:param lengthscale: the lengthscale parameter.
:type lengthscale: MXNet NDArray or MXNet Symbol
:return: The covariance matrix.
:rtype: MXNet NDArray or MXNet Symbol
"""
R = F.sqrt(F.clip(self._compute_R2(F, X, lengthscale, variance, X2=X2),
1e-14, np.inf))
return F.broadcast_mul(
F.exp(-R), F.expand_dims(variance, axis=-2))
```
#### File: components/distributions/laplace.py
```python
from ...common.config import get_default_MXNet_mode
from ..variables import Variable
from .univariate import UnivariateDistribution
class Laplace(UnivariateDistribution):
"""
The one-dimensional Laplace distribution. The Laplace distribution can be defined over a scalar random variable
or an array of random variables. In case of an array of random variables, the location and scale are broadcasted
to the shape of the output random variable (array).
:param location: Location of the Laplace distribution.
:type location: Variable
:param scale: Scale of the Laplace distribution.
:type scale: Variable
:param rand_gen: the random generator (default: MXNetRandomGenerator).
:type rand_gen: RandomGenerator
:param dtype: the data type for float point numbers.
:type dtype: numpy.float32 or numpy.float64
:param ctx: the mxnet context (default: None/current context).
:type ctx: None or mxnet.cpu or mxnet.gpu
"""
def __init__(self, location, scale, rand_gen=None, dtype=None, ctx=None):
if not isinstance(location, Variable):
location = Variable(value=location)
if not isinstance(scale, Variable):
scale = Variable(value=scale)
inputs = [('location', location), ('scale', scale)]
input_names = [k for k, _ in inputs]
output_names = ['random_variable']
super(Laplace, self).__init__(inputs=inputs, outputs=None,
input_names=input_names,
output_names=output_names,
rand_gen=rand_gen, dtype=dtype, ctx=ctx)
def log_pdf_impl(self, location, scale, random_variable, F=None):
"""
Computes the logarithm of the probability density function (PDF) of the Laplace distribution.
:param location: the location of the Laplace distribution.
:type location: MXNet NDArray or MXNet Symbol
:param scale: the scale of the Laplace distributions.
:type scale: MXNet NDArray or MXNet Symbol
:param random_variable: the random variable of the Laplace distribution.
:type random_variable: MXNet NDArray or MXNet Symbol
:param F: the MXNet computation mode (mxnet.symbol or mxnet.ndarray).
:returns: log pdf of the distribution.
:rtypes: MXNet NDArray or MXNet Symbol
"""
F = get_default_MXNet_mode() if F is None else F
logvar = -F.log(2 * scale)
logL = F.broadcast_minus(logvar, F.broadcast_div(
F.abs(F.broadcast_minus(random_variable, location)), scale)) * self.log_pdf_scaling
return logL
def draw_samples_impl(self, location, scale, rv_shape, num_samples=1, F=None):
"""
Draw samples from the Laplace distribution.
:param location: the location of the Laplace distribution.
:type location: MXNet NDArray or MXNet Symbol
:param scale: the scale of the Laplace distributions.
:type scale: MXNet NDArray or MXNet Symbol
:param rv_shape: the shape of each sample.
:type rv_shape: tuple
:param num_samples: the number of drawn samples (default: one).
:type num_samples: int
:param F: the MXNet computation mode (mxnet.symbol or mxnet.ndarray).
:returns: a set samples of the Laplace distribution.
:rtypes: MXNet NDArray or MXNet Symbol
"""
F = get_default_MXNet_mode() if F is None else F
out_shape = (num_samples,) + rv_shape
return F.broadcast_add(F.broadcast_mul(self._rand_gen.sample_laplace(
shape=out_shape, dtype=self.dtype, ctx=self.ctx),
scale), location)
@staticmethod
def define_variable(location=0., scale=1., shape=None, rand_gen=None, dtype=None, ctx=None):
"""
Creates and returns a random variable drawn from a Laplace distribution.
:param location: Location of the distribution.
:param scale: Scale of the distribution.
:param shape: the shape of the random variable(s).
:type shape: tuple or [tuple]
:param rand_gen: the random generator (default: MXNetRandomGenerator).
:type rand_gen: RandomGenerator
:param dtype: the data type for float point numbers.
:type dtype: numpy.float32 or numpy.float64
:param ctx: the mxnet context (default: None/current context).
:type ctx: None or mxnet.cpu or mxnet.gpu
:returns: the random variables drawn from the Laplace distribution.
:rtypes: Variable
"""
var = Laplace(location=location, scale=scale, rand_gen=rand_gen, dtype=dtype, ctx=ctx)
var._generate_outputs(shape=shape)
return var.random_variable
```
#### File: components/distributions/normal.py
```python
import numpy as np
import mxnet as mx
import itertools
from ...util.special import log_determinant
from ...common.config import get_default_MXNet_mode
from ..variables import Variable
from .distribution import Distribution
from .univariate import UnivariateDistribution
class Normal(UnivariateDistribution):
"""
The one-dimensional normal distribution. The normal distribution can be defined over a scalar random variable or an
array of random variables. In case of an array of random variables, the mean and variance are broadcasted to the
shape of the output random variable (array).
:param mean: Mean of the normal distribution.
:type mean: Variable
:param variance: Variance of the normal distribution.
:type variance: Variable
:param rand_gen: the random generator (default: MXNetRandomGenerator).
:type rand_gen: RandomGenerator
:param dtype: the data type for float point numbers.
:type dtype: numpy.float32 or numpy.float64
:param ctx: the mxnet context (default: None/current context).
:type ctx: None or mxnet.cpu or mxnet.gpu
"""
def __init__(self, mean, variance, rand_gen=None, dtype=None, ctx=None):
inputs = [('mean', mean), ('variance', variance)]
input_names = [k for k, _ in inputs]
output_names = ['random_variable']
super(Normal, self).__init__(inputs=inputs, outputs=None,
input_names=input_names,
output_names=output_names,
rand_gen=rand_gen, dtype=dtype, ctx=ctx)
def log_pdf_impl(self, mean, variance, random_variable, F=None):
"""
Computes the logarithm of the probability density function (PDF) of the normal distribution.
:param mean: the mean of the normal distribution.
:type mean: MXNet NDArray or MXNet Symbol
:param variance: the variance of the normal distributions.
:type variance: MXNet NDArray or MXNet Symbol
:param random_variable: the random variable of the normal distribution.
:type random_variable: MXNet NDArray or MXNet Symbol
:param F: the MXNet computation mode (mxnet.symbol or mxnet.ndarray).
:returns: log pdf of the distribution.
:rtypes: MXNet NDArray or MXNet Symbol
"""
F = get_default_MXNet_mode() if F is None else F
logvar = np.log(2 * np.pi) / -2 + F.log(variance) / -2
logL = F.broadcast_add(logvar, F.broadcast_div(F.square(
F.broadcast_minus(random_variable, mean)), -2 * variance)) * self.log_pdf_scaling
return logL
def draw_samples_impl(self, mean, variance, rv_shape, num_samples=1, F=None):
"""
Draw samples from the normal distribution.
:param mean: the mean of the normal distribution.
:type mean: MXNet NDArray or MXNet Symbol
:param variance: the variance of the normal distributions.
:type variance: MXNet NDArray or MXNet Symbol
:param rv_shape: the shape of each sample.
:type rv_shape: tuple
:param num_samples: the number of drawn samples (default: one).
:type num_samples: int
:param F: the MXNet computation mode (mxnet.symbol or mxnet.ndarray).
:returns: a set samples of the normal distribution.
:rtypes: MXNet NDArray or MXNet Symbol
"""
F = get_default_MXNet_mode() if F is None else F
out_shape = (num_samples,) + rv_shape
return F.broadcast_add(F.broadcast_mul(self._rand_gen.sample_normal(
shape=out_shape, dtype=self.dtype, ctx=self.ctx),
F.sqrt(variance)), mean)
@staticmethod
def define_variable(mean=0., variance=1., shape=None, rand_gen=None,
dtype=None, ctx=None):
"""
Creates and returns a random variable drawn from a normal distribution.
:param mean: Mean of the distribution.
:param variance: Variance of the distribution.
:param shape: the shape of the random variable(s).
:type shape: tuple or [tuple]
:param rand_gen: the random generator (default: MXNetRandomGenerator).
:type rand_gen: RandomGenerator
:param dtype: the data type for float point numbers.
:type dtype: numpy.float32 or numpy.float64
:param ctx: the mxnet context (default: None/current context).
:type ctx: None or mxnet.cpu or mxnet.gpu
:returns: the random variables drawn from the normal distribution.
:rtypes: Variable
"""
normal = Normal(mean=mean, variance=variance, rand_gen=rand_gen,
dtype=dtype, ctx=ctx)
normal._generate_outputs(shape=shape)
return normal.random_variable
class MultivariateNormal(Distribution):
"""
The multi-dimensional normal distribution.
:param mean: Mean of the normal distribution.
:type mean: Variable
:param covariance: Covariance matrix of the distribution.
:type covariance: Variable
:param rand_gen: the random generator (default: MXNetRandomGenerator).
:type rand_gen: RandomGenerator
:param dtype: the data type for float point numbers.
:type dtype: numpy.float32 or numpy.float64
:param ctx: the mxnet context (default: None/current context).
:type ctx: None or mxnet.cpu or mxnet.gpu
"""
def __init__(self, mean, covariance, rand_gen=None, minibatch_ratio=1.,
dtype=None, ctx=None):
inputs = [('mean', mean), ('covariance', covariance)]
input_names = ['mean', 'covariance']
output_names = ['random_variable']
super(MultivariateNormal, self).__init__(inputs=inputs, outputs=None,
input_names=input_names,
output_names=output_names,
rand_gen=rand_gen, dtype=dtype, ctx=ctx)
def replicate_self(self, attribute_map=None):
"""
Replicates this Factor, using new inputs, outputs, and a new uuid.
Used during model replication to functionally replicate a factor into a new graph.
:param inputs: new input variables of the factor.
:type inputs: a dict of {'name' : Variable} or None
:param outputs: new output variables of the factor.
:type outputs: a dict of {'name' : Variable} or None
"""
replicant = super(MultivariateNormal, self).replicate_self(attribute_map)
return replicant
def log_pdf_impl(self, mean, covariance, random_variable, F=None):
"""
Computes the logarithm of the probability density function (PDF) of the normal distribution.
:param mean: the mean of the normal distribution.
:type mean: MXNet NDArray or MXNet Symbol
:param covariance: the covariance of the distribution.
:type covariance: MXNet NDArray or MXNet Symbol
:param random_variable: the random variable of the normal distribution.
:type random_variable: MXNet NDArray or MXNet Symbol
:param F: the MXNet computation mode (mxnet.symbol or mxnet.ndarray).
:returns: log pdf of the distribution.
:rtypes: MXNet NDArray or MXNet Symbol
"""
F = get_default_MXNet_mode() if F is None else F
N = mean.shape[-1]
lmat = F.linalg.potrf(covariance)
logdetl = - F.linalg.sumlogdiag(F.abs(lmat)) # maybe sum if n x d x d
targets = random_variable - mean
zvec = F.sum(F.linalg.trsm(lmat, F.expand_dims(targets, axis=-1)), axis=-1)
sqnorm_z = - F.sum(F.square(zvec), axis=-1)
return (0.5 * (sqnorm_z - (N * np.log(2 * np.pi))) + logdetl)* self.log_pdf_scaling
def draw_samples_impl(self, mean, covariance, rv_shape, num_samples=1, F=None):
"""
Draw a number of samples from the normal distribution.
:param mean: the mean of the normal distribution.
:type mean: MXNet NDArray or MXNet Symbol
:param covariance: the covariance of the normal distributions.
:type covariance: MXNet NDArray or MXNet Symbol
:param rv_shape: the shape of each sample.
:type rv_shape: tuple
:param num_samples: the number of drawn samples (default: one).
:type num_samples: int
:param F: the MXNet computation mode (mxnet.symbol or mxnet.ndarray).
:returns: a set samples of the normal distribution
:rtypes: MXNet NDArray or MXNet Symbol
"""
F = get_default_MXNet_mode() if F is None else F
out_shape = (num_samples,) + rv_shape + (1,)
lmat = F.linalg.potrf(covariance)
epsilon = self._rand_gen.sample_normal(
shape=out_shape, dtype=self.dtype, ctx=self.ctx)
lmat_eps = F.linalg.trmm(lmat, epsilon)
return F.broadcast_add(lmat_eps.sum(-1), mean)
@staticmethod
def define_variable(shape, mean=0., covariance=None, rand_gen=None, minibatch_ratio=1., dtype=None, ctx=None):
"""
Creates and returns a random variable drawn from a normal distribution.
:param mean: Mean of the distribution.
:param covariance: Variance of the distribution.
:param shape: the shape of the random variable(s).
:type shape: tuple or [tuple]
:param rand_gen: the random generator (default: MXNetRandomGenerator).
:type rand_gen: RandomGenerator
:param dtype: the data type for float point numbers.
:type dtype: numpy.float32 or numpy.float64
:param ctx: the mxnet context (default: None/current context).
:type ctx: None or mxnet.cpu or mxnet.gpu
:returns: the random variables drawn from the normal distribution.
:rtypes: Variable
"""
covariance = covariance if covariance is not None else mx.nd.array(np.eye(N=shape[-1]), dtype=dtype, ctx=ctx)
normal = MultivariateNormal(mean=mean, covariance=covariance,
rand_gen=rand_gen,
dtype=dtype, ctx=ctx)
normal._generate_outputs(shape=shape)
return normal.random_variable
def _generate_outputs(self, shape):
"""
Set the output variable of the distribution.
:param shape: the shape of the random distribution.
:type shape: tuple
"""
self.outputs = [('random_variable', Variable(value=self, shape=shape))]
class NormalMeanPrecision(UnivariateDistribution):
"""
The one-dimensional normal distribution, parameterized by mean and precision rather than mean and variance.
The normal distribution can be defined over a scalar random variable
or an array of random variables. In case of an array of random variables, the mean and precisions are broadcasted
to the shape of the output random variable (array).
:param mean: Mean of the normal distribution.
:type mean: Variable
:param precision: Precision of the normal distribution.
:type precision: Variable
:param rand_gen: the random generator (default: MXNetRandomGenerator).
:type rand_gen: RandomGenerator
:param dtype: the data type for float point numbers.
:type dtype: numpy.float32 or numpy.float64
:param ctx: the mxnet context (default: None/current context).
:type ctx: None or mxnet.cpu or mxnet.gpu
"""
def __init__(self, mean, precision, rand_gen=None, dtype=None, ctx=None):
inputs = [('mean', mean), ('precision', precision)]
input_names = [k for k, _ in inputs]
output_names = ['random_variable']
super(NormalMeanPrecision, self).__init__(inputs=inputs, outputs=None,
input_names=input_names,
output_names=output_names,
rand_gen=rand_gen, dtype=dtype, ctx=ctx)
def log_pdf_impl(self, mean, precision, random_variable, F=None):
"""
Computes the logarithm of the probability density function (PDF) of the normal distribution.
:param mean: the mean of the normal distribution.
:type mean: MXNet NDArray or MXNet Symbol
:param precision: the precision of the normal distributions.
:type precision: MXNet NDArray or MXNet Symbol
:param random_variable: the random variable of the normal distribution.
:type random_variable: MXNet NDArray or MXNet Symbol
:param F: the MXNet computation mode (mxnet.symbol or mxnet.ndarray).
:returns: log pdf of the distribution.
:rtypes: MXNet NDArray or MXNet Symbol
"""
F = get_default_MXNet_mode() if F is None else F
logvar = (F.log(precision) - np.log(2 * np.pi)) / 2
logL = F.broadcast_add(logvar, F.broadcast_mul(F.square(
F.broadcast_minus(random_variable, mean)), -precision / 2)) * self.log_pdf_scaling
return logL
def draw_samples_impl(self, mean, precision, rv_shape, num_samples=1, F=None):
"""
Draw samples from the normal distribution.
:param mean: the mean of the normal distribution.
:type mean: MXNet NDArray or MXNet Symbol
:param precision: the precision of the normal distributions.
:type precision: MXNet NDArray or MXNet Symbol
:param rv_shape: the shape of each sample.
:type rv_shape: tuple
:param num_samples: the number of drawn samples (default: one).
:type num_samples: int
:param F: the MXNet computation mode (mxnet.symbol or mxnet.ndarray).
:returns: a set samples of the normal distribution.
:rtypes: MXNet NDArray or MXNet Symbol
"""
F = get_default_MXNet_mode() if F is None else F
out_shape = (num_samples,) + rv_shape
return F.broadcast_add(F.broadcast_div(self._rand_gen.sample_normal(
shape=out_shape, dtype=self.dtype, ctx=self.ctx),
F.sqrt(precision)), mean)
@staticmethod
def define_variable(mean=0., precision=1., shape=None, rand_gen=None,
dtype=None, ctx=None):
"""
Creates and returns a random variable drawn from a normal distribution.
:param mean: Mean of the distribution.
:param precision: Precision of the distribution.
:param shape: the shape of the random variable(s).
:type shape: tuple or [tuple]
:param rand_gen: the random generator (default: MXNetRandomGenerator).
:type rand_gen: RandomGenerator
:param dtype: the data type for float point numbers.
:type dtype: numpy.float32 or numpy.float64
:param ctx: the mxnet context (default: None/current context).
:type ctx: None or mxnet.cpu or mxnet.gpu
:returns: the random variables drawn from the normal distribution.
:rtypes: Variable
"""
normal = NormalMeanPrecision(mean=mean, precision=precision, rand_gen=rand_gen, dtype=dtype, ctx=ctx)
normal._generate_outputs(shape=shape)
return normal.random_variable
class MultivariateNormalMeanPrecision(Distribution):
"""
The multi-dimensional normal distribution parameterized by mean and precision rather than mean and variance.
:param mean: Mean of the normal distribution.
:type mean: Variable
:param precision: Precision matrix of the distribution.
:type precision: Variable
:param rand_gen: the random generator (default: MXNetRandomGenerator).
:type rand_gen: RandomGenerator
:param dtype: the data type for float point numbers.
:type dtype: numpy.float32 or numpy.float64
:param ctx: the mxnet context (default: None/current context).
:type ctx: None or mxnet.cpu or mxnet.gpu
"""
def __init__(self, mean, precision, rand_gen=None, minibatch_ratio=1.,
dtype=None, ctx=None):
inputs = [('mean', mean), ('precision', precision)]
input_names = ['mean', 'precision']
output_names = ['random_variable']
super(MultivariateNormalMeanPrecision, self).__init__(
inputs=inputs, outputs=None, input_names=input_names,
output_names=output_names, rand_gen=rand_gen, dtype=dtype, ctx=ctx)
def replicate_self(self, attribute_map=None):
"""
Replicates this Factor, using new inputs, outputs, and a new uuid.
Used during model replication to functionally replicate a factor into a new graph.
:param inputs: new input variables of the factor.
:type inputs: a dict of {'name' : Variable} or None
:param outputs: new output variables of the factor.
:type outputs: a dict of {'name' : Variable} or None
"""
replicant = super(MultivariateNormalMeanPrecision, self).replicate_self(attribute_map)
return replicant
def log_pdf_impl(self, mean, precision, random_variable, F=None):
"""
Computes the logarithm of the probability density function (PDF) of the normal distribution.
:param mean: the mean of the normal distribution.
:type mean: MXNet NDArray or MXNet Symbol
:param precision: the precision of the distribution.
:type precision: MXNet NDArray or MXNet Symbol
:param random_variable: the random variable of the normal distribution.
:type random_variable: MXNet NDArray or MXNet Symbol
:param F: the MXNet computation mode (mxnet.symbol or mxnet.ndarray).
:returns: log pdf of the distribution.
:rtypes: MXNet NDArray or MXNet Symbol
"""
F = get_default_MXNet_mode() if F is None else F
N = mean.shape[-1]
c = N * np.log(2 * np.pi)
logdetl = -log_determinant(precision)
targets = random_variable - mean
# TODO: Should be a way to do this without loops
sqnorm_z = F.zeros(random_variable.shape[:-1], dtype=self.dtype)
for ix in itertools.product(*map(range, random_variable.shape[:-1])):
sqnorm_z[ix] = F.dot(F.dot(targets[ix], precision[ix], transpose_a=True), targets[ix])
return -0.5 * (sqnorm_z + c + logdetl) * self.log_pdf_scaling
def draw_samples_impl(self, mean, precision, rv_shape, num_samples=1, F=None):
"""
Draw a number of samples from the normal distribution.
:param mean: the mean of the normal distribution.
:type mean: MXNet NDArray or MXNet Symbol
:param precision: the precision of the normal distributions.
:type precision: MXNet NDArray or MXNet Symbol
:param rv_shape: the shape of each sample.
:type rv_shape: tuple
:param num_samples: the number of drawn samples (default: one).
:type num_samples: int
:param F: the MXNet computation mode (mxnet.symbol or mxnet.ndarray).
:returns: a set samples of the normal distribution
:rtypes: MXNet NDArray or MXNet Symbol
"""
F = get_default_MXNet_mode() if F is None else F
out_shape = (num_samples,) + rv_shape + (1,)
# Use potri instead of potrf:
# https://mxnet.incubator.apache.org/api/python/symbol/linalg.html#mxnet.symbol.linalg.potri
lmat = F.linalg.potri(precision)
epsilon = self._rand_gen.sample_normal(
shape=out_shape, dtype=self.dtype, ctx=self.ctx)
lmat_eps = F.linalg.trmm(lmat, epsilon)
return F.broadcast_add(lmat_eps.sum(-1), mean)
@staticmethod
def define_variable(shape, mean=0., precision=None, rand_gen=None,
minibatch_ratio=1., dtype=None, ctx=None):
"""
Creates and returns a random variable drawn from a normal distribution.
:param mean: Mean of the distribution.
:param precision: Precision of the distribution.
:param shape: the shape of the random variable(s).
:type shape: tuple or [tuple]
:param rand_gen: the random generator (default: MXNetRandomGenerator).
:type rand_gen: RandomGenerator
:param dtype: the data type for float point numbers.
:type dtype: numpy.float32 or numpy.float64
:param ctx: the mxnet context (default: None/current context).
:type ctx: None or mxnet.cpu or mxnet.gpu
:returns: the random variables drawn from the normal distribution.
:rtypes: Variable
"""
precision = precision if precision is not None else mx.nd.array(np.eye(N=shape[-1]), dtype=dtype, ctx=ctx)
normal = MultivariateNormalMeanPrecision(mean=mean, precision=precision,
rand_gen=rand_gen,
dtype=dtype, ctx=ctx)
normal._generate_outputs(shape=shape)
return normal.random_variable
def _generate_outputs(self, shape):
"""
Set the output variable of the distribution.
:param shape: the shape of the random distribution.
:type shape: tuple
"""
self.outputs = [('random_variable', Variable(value=self, shape=shape))]
```
#### File: components/distributions/random_gen.py
```python
from abc import ABC
import mxnet as mx
from ...common.config import get_default_dtype, get_default_MXNet_mode
class RandomGenerator(ABC):
"""
The abstract class of the pseudo-random number generator.
"""
@staticmethod
def sample_normal(loc=0, scale=1, shape=None, dtype=None, out=None, ctx=None):
pass
@staticmethod
def sample_gamma(alpha=1, beta=1, shape=None, dtype=None, out=None, ctx=None):
pass
@staticmethod
def sample_multinomial(data, get_prob=True, dtype='int32', F=None):
pass
@staticmethod
def sample_bernoulli(prob_true=0.5, dtype='bool', F=None):
pass
@staticmethod
def sample_uniform(low=0., high=1., shape=None, dtype=None, out=None, ctx=None, F=None):
pass
@staticmethod
def sample_laplace(location=0., scale=1., shape=None, dtype=None, out=None, ctx=None, F=None):
pass
class MXNetRandomGenerator(RandomGenerator):
"""
The MXNet pseudo-random number generator.
"""
@staticmethod
def _sample_univariate(func, shape=None, dtype=None, out=None, ctx=None, F=None, **kwargs):
"""
Wrapper for univariate sampling functions (Normal, Gamma etc.)
:param func: The function to use for sampling, e.g. F.random.normal
:param shape: The shape of the samples
:param dtype: The data type
:param out: Output variable
:param ctx: The execution context
:param F: MXNet node
:param kwargs: keyword arguments for the sampling function (loc, scale etc)
:return: Array of samples
"""
dtype = get_default_dtype() if dtype is None else dtype
if F is mx.ndarray:
# This is required because MXNet uses _Null instead of None as shape default
if shape is None:
return func(dtype=dtype, ctx=ctx, out=out, **kwargs)
else:
return func(shape=shape, dtype=dtype, ctx=ctx, out=out, **kwargs)
else:
return func(shape=shape, dtype=dtype, out=out, **kwargs)
@staticmethod
def sample_normal(loc=0, scale=1, shape=None, dtype=None, out=None, ctx=None, F=None):
"""
Sample Normal distributed variables
:param loc: location (mean)
:param scale: scale (variance)
:param shape: Array shape of samples
:param dtype: Data type
:param out: Output variable
:param ctx: execution context
:param F: MXNet node
:return: Array of samples
"""
F = get_default_MXNet_mode() if F is None else F
return MXNetRandomGenerator._sample_univariate(
func=F.random.normal, loc=loc, scale=scale,
shape=shape, dtype=dtype, out=out, ctx=ctx, F=F)
@staticmethod
def sample_multinomial(data, shape=None, get_prob=False, dtype='int32', F=None):
"""
Sample Multinomial distributed variables
:param data: An *n* dimensional array whose last dimension has length `k`, where
`k` is the number of possible outcomes of each multinomial distribution.
For example, data with shape `(m, n, k)` specifies `m*n` multinomial
distributions each with `k` possible outcomes.
:param shape: Shape of the random variable
:param get_prob: If true, a second array containing log likelihood of the drawn
samples will also be returned.
This is usually used for reinforcement learning, where you can provide
reward as head gradient w.r.t. this array to estimate gradient.
:param dtype: Data type
:param F: MXNet node
:return: Array of samples
"""
F = get_default_MXNet_mode() if F is None else F
if shape:
return F.random.multinomial(
data=data, shape=shape, get_prob=get_prob, dtype=dtype)
else:
return F.random.multinomial(
data=data, get_prob=get_prob, dtype=dtype)
@staticmethod
def sample_bernoulli(prob_true=0.5, dtype=None, shape=None, F=None):
"""
Sample Bernoulli distributed variables
:param shape: Array shape of samples
:param prob_true: Probability of being true
:param dtype: data type
:param F: MXNet node
:return: Array of samples
"""
F = get_default_MXNet_mode() if F is None else F
return F.random.uniform(low=0, high=1, shape=shape, dtype=dtype) > prob_true
@staticmethod
def sample_gamma(alpha=1, beta=1, shape=None, dtype=None, out=None, ctx=None, F=None):
"""
Sample Gamma distributed variables
:param alpha: Also known as shape
:param beta: Also known as rate
:param shape: The number of samples to draw. If shape is, e.g., (m, n) and alpha and beta are scalars, output
shape will be (m, n). If alpha and beta are NDArrays with shape, e.g., (x, y), then output will have shape
(x, y, m, n), where m*n samples are drawn for each [alpha, beta) pair.
:param dtype: Data type
:param out: output variable
:param ctx: execution context
:param F: MXNet node
:return: Array of samples
"""
F = get_default_MXNet_mode() if F is None else F
return MXNetRandomGenerator._sample_univariate(
func=F.random.gamma, alpha=alpha, beta=beta,
shape=shape, dtype=dtype, out=out, ctx=ctx, F=F)
@staticmethod
def sample_uniform(low=0., high=1., shape=None, dtype=None, out=None, ctx=None, F=None):
"""
Sample uniformly distributed variables
Samples are uniformly distributed over the half-open interval [low, high) (includes low, but excludes high).
:param low: lower boundary of output interval
:param high: upper boundary of output interval
:param shape: Array shape of samples
:param dtype: Data type
:param out: output variable
:param ctx: execution context
:param F: MXNet node
:return: Array of samples
"""
F = get_default_MXNet_mode() if F is None else F
samples = MXNetRandomGenerator._sample_univariate(
func=F.random.uniform, shape=shape, dtype=dtype, out=out, ctx=ctx, F=F)
# samples = F.broadcast_add(F.broadcast_mul(samples, F.broadcast_sub(high, low)), low)
samples = samples * (high - low) + low
return samples
@staticmethod
def sample_laplace(location=0., scale=1., shape=None, dtype=None, out=None, ctx=None, F=None):
"""
Sample Laplace distributed variables
:param location: Location parameter (=mean)
:param scale: (>0) Also known as diversity
:param shape: Array shape of samples
:param dtype: Data type
:param out: output variable
:param ctx: execution context
:param F: MXNet node
:return: Array of samples
"""
F = get_default_MXNet_mode() if F is None else F
# Given a random variable U drawn from the uniform distribution in the interval (-1/2,1/2], the random variable
# X =\mu - b\, \sgn(U)\, \ln(1 - 2 | U |)
# has a Laplace distribution with parameters \mu and b
U = MXNetRandomGenerator.sample_uniform(low=-0.5, high=0.5, shape=shape, dtype=dtype, out=out, ctx=ctx, F=F)
if isinstance(scale, F.NDArray):
b_sgn_U = F.broadcast_mul(scale, F.sign(U))
else:
b_sgn_U = scale * F.sign(U)
ln_1_2_U = F.log(1 - 2 * F.abs(U))
if isinstance(location, F.NDArray):
samples = F.broadcast_minus(location, F.broadcast_mul(b_sgn_U, ln_1_2_U))
else:
samples = location - F.broadcast_mul(b_sgn_U, ln_1_2_U)
return samples
```
#### File: mxfusion/components/model_component.py
```python
from uuid import uuid4
from ..common.exceptions import ModelSpecificationError
class ModelComponent(object):
"""
The building block of a Model in MXFusion.
ModelComponents exist in one of two modes.
**Mode 1 - Bi-directional mode**
If a node is not attached to a FactorGraph, it maintains a list of all of its predecessors and successors directly.
These are stored in the ``self._predecessors`` and ``self._successors`` methods.
**Mode 2 - Graph mode**
If a node is attached to a FactorGraph, it does not store direct references to its successors and predecessors.
When accessed, the predecessors/successors properties directly query the graph they are attached to to find out
what the respective neighbor nodes are.
"""
def __init__(self):
self.name = None
self._uuid = str(uuid4()).replace('-', '_')
self._parent_graph = None
self._successors = [] # either [('name', Variable), ('factor', Factor), ...]
self._predecessors = []
self.attributes = []
@property
def uuid(self):
"""
Return the UUID of this graph
"""
return self._uuid
def __hash__(self):
return self._uuid.__hash__()
def __eq__(self, other):
return self._uuid.__hash__() == other.__hash__()
def __repr__(self):
return self.uuid
def as_json(self):
return {'uuid': self._uuid,
'name': self.name,
'attributes': [a.uuid for a in self.attributes]}
@property
def graph(self):
"""
Return the object's graph
"""
return self._parent_graph
@graph.setter
def graph(self, graph):
"""
Attaches the node to a graph, switching from Bidirectional mode to Graph mode if it is not already
in Graph mode.
A node cannot be re-attached to a different graph once it is attached. Use the ``replicate()`` functionality
if you need to do this.
:param graph: The ``components_graph`` of the ``FactorGraph`` this node is attaching to.
:type graph: networkx.DiGraph
"""
if self._parent_graph is not None:
if self._parent_graph == graph:
return
elif graph is not None:
raise ModelSpecificationError("Trying to reset a variables graph is bad!")
self._parent_graph = graph
if self._parent_graph is not None:
self._parent_graph.add_node(self)
self.predecessors = self._predecessors
self.successors = self._successors
self._update_attributes()
self._predecessors = []
self._successors = []
def _update_attributes(self):
"""
Adds all of a node's attributes to its graph.
"""
for a in self.attributes:
self.graph.add_node(a)
def _align_graph_modes(self, edge_nodes):
"""
This function will update the current node and all nodes passed in to be in Graph mode if any of edge_nodes are
in Graph mode.
:param edge_nodes: All the nodes to align to the same graph mode. I.E. predecessors or successors.
:type edge_nodes: List of tuples of name to node e.g. [('random_variable': Variable y)]
"""
if self.graph is None and any([node.graph is not None for _, node in edge_nodes]):
# Put self into the graph, put all the other nodes in that graph (if more than one just error).
graphs = set([node.graph for _, node in edge_nodes if node.graph is not None])
if len(graphs) > 1:
raise ModelSpecificationError("Too many graphs!")
graph = list(graphs)[0]
self.graph = graph
for _, node in edge_nodes:
node.graph = graph
@property
def successors(self):
"""
Return a list of nodes pointed to by the edges of this node.
Note: The ordering of this list is not guaranteed to be consistent with assigned order.
"""
if self.graph is not None:
succ = [(e['name'], v) for v, edges in self.graph.succ[self].items() for e in edges.values()]
return succ
else:
return self._successors
@successors.setter
def successors(self, successors):
"""
Sets this node's successors to those passed in.
:param successors: List of tuples of name to node e.g. [('random_variable': Variable y)].
:type successors: List of tuples of name to node e.g. [('random_variable': Variable y)]
"""
def add_predecessor(successor, predecessor, successor_name):
if successor.graph is None:
successor._predecessors.append((successor_name, predecessor))
if successor.graph is not None:
raise ModelSpecificationError(
"Internal Error. Cannot add predecessor when a component is attached to a graph.")
self._align_graph_modes(successors)
if self.graph is not None:
for _, successor in self.successors:
self.graph.remove_edge(self, successor)
for name, successor in successors:
successor.graph = self.graph
self.graph.add_edge(self, successor, key=name, name=name)
else:
self._successors = successors
for name, successor in successors:
add_predecessor(successor, self, name)
@property
def predecessors(self):
"""
Return a list of nodes whose edges point into this node.
Note: The ordering of this list is not guaranteed to be consistent with assigned order.
"""
if self.graph is not None:
pred = [(e['name'], v) for v, edges in self.graph.pred[self].items() for e in edges.values()]
return pred
else:
return self._predecessors
@predecessors.setter
def predecessors(self, predecessors):
"""
Sets this node's predecessors to be those passed in.
:param predecessors: List of tuples of name to node e.g. [('random_variable': Variable y)]
:type predecessors: List of tuples of name to node e.g. [('random_variable': Variable y)]
"""
def add_successor(predecessor, successor, predecessor_name):
if predecessor.graph is None:
predecessor._successors.append((predecessor_name, successor))
if predecessor.graph is not None:
raise ModelSpecificationError(
"Internal Error. Cannot add a successor when a component is attached to a graph.")
self._align_graph_modes(predecessors)
if self.graph is not None:
for _, predecessor in self.predecessors:
self.graph.remove_edge(predecessor, self)
for name, predecessor in predecessors:
predecessor.graph = self.graph
self.graph.add_edge(predecessor, self, key=name, name=name)
else:
self._predecessors = predecessors
for name, predecessor in predecessors:
add_successor(predecessor, self, name)
def _replicate_self_with_attributes(self, var_map):
"""
Replicates self if not in ``var_map``. Also replicates all of self's attributes.
:param var_map: A mapping from the original model's components to the replicated components.
:type var_map: {original_node: new_node}
:rtype: ModelComponent
"""
var_map = var_map if var_map is not None else {}
if self in var_map:
return var_map[self]
attributes_map = {}
for a in self.attributes:
if a in var_map:
attributes_map[a] = var_map[a]
else:
attributes_map[a] = a.replicate_self()
var_map[a] = attributes_map[a]
replicated_component = self.replicate_self(attributes_map)
var_map[self] = replicated_component
return replicated_component
def _replicate_neighbors(self, var_map, neighbors, recurse_type, replication_function):
"""
Helper function that returns a replicated list of neighbors based on the recurse_type passed in.
:param var_map: A mapping from the original model's components to the replicated components.
:type var_map: {original_node: new_node}
:param neighbors: Dictionary containing the list of a node's neighbors in one direction
(predecessors or successors).
:type neighbors: List of tuples of name to node e.g. [('random_variable': Variable y)]
:param recurse_type: Parameter that decides how to replicate the neighbor nodes. Must be one of: 'recursive',
'one_level', or None.
:type recurse_type: String or None
:param replication_function: A function that takes in a ModelComponent and returns an answer for how to
replicate that node's predecessors and successors.
:type replication_function: function
"""
if recurse_type == 'recursive':
replicated_neighbors = [(name, i.replicate(var_map=var_map, replication_function=replication_function))
for name, i in neighbors]
elif recurse_type == 'one_level':
replicated_neighbors = [(name, i._replicate_self_with_attributes(var_map=var_map))
for name, i in neighbors]
elif recurse_type is None:
replicated_neighbors = []
else:
raise ModelSpecificationError("Parameter 'recurse_type' must be 'recursive', 'one_level', or None.")
return replicated_neighbors
def replicate(self, var_map=None, replication_function=None):
"""
Replicates this component and its neighbors based on the replication_function logic passed in.
:param var_map: A mapping from the original model's components to the replicated components. This is used to
track which components have already been replicated in a dynamic programming style.
:type var_map: {original_node: new_node}
:param replication_function: A function that takes in a ModelComponent and returns an answer for how to
replicate that node's predecessors and successors. If None, only replicates this node.
:type replication_function: function
"""
var_map = var_map if var_map is not None else {}
if self in var_map:
return var_map[self]
replicated_component = self._replicate_self_with_attributes(var_map)
if replication_function is not None:
pred_recursion, succ_recursion = replication_function(self)
else:
pred_recursion, succ_recursion = None, None
predecessors = self._replicate_neighbors(var_map, self.predecessors, pred_recursion, replication_function)
successors = self._replicate_neighbors(var_map, self.successors, succ_recursion, replication_function)
replicated_component.predecessors = predecessors
replicated_component.successors = successors
return replicated_component
```
#### File: mxfusion/inference/expectation.py
```python
from ..common.exceptions import InferenceError
from ..components.variables import Variable, VariableType
from .variational import StochasticVariationalInference
from .inference_alg import SamplingAlgorithm
from .inference import TransferInference
from .map import MAP
from ..components.variables.runtime_variable import expectation
class ExpectationAlgorithm(SamplingAlgorithm):
"""
Sampling-based inference algorithm that returns the expectation of each variable in the model.
:param model: the definition of the probabilistic model
:type model: Model
:param observed: A list of observed variables
:type observed: [Variable]
:param num_samples: the number of samples used in estimating the variational lower bound
:type num_samples: int
:param target_variables: (optional) the target variables to sample
:type target_variables: [UUID]
:param extra_graphs: a list of extra FactorGraph used in the inference
algorithm.
:type extra_graphs: [FactorGraph]
"""
def compute(self, F, variables):
"""
Compute the inference algorithm
:param F: the execution context (mxnet.ndarray or mxnet.symbol)
:type F: Python module
:param variables: the set of MXNet arrays that holds the values of
variables at runtime.
:type variables: {str(UUID): MXNet NDArray or MXNet Symbol}
:returns: the outcome of the inference algorithm
:rtype: mxnet.ndarray.ndarray.NDArray or mxnet.symbol.symbol.Symbol
"""
samples = self.model.draw_samples(
F=F, variables=variables,
num_samples=self.num_samples)
samples = {k: expectation(F,v) for k, v in samples.items()}
if self.target_variables:
return tuple(samples[v] for v in self.target_variables)
else:
return samples
class ExpectationScoreFunctionAlgorithm(SamplingAlgorithm):
"""
Sampling-based inference algorithm that computes the expectation of the model w.r.t. some loss function in that model, specified as the target variable. It does so via the score function trick sampling the necessary inputs to the function and using them to compute a Monte Carlo estimate of the loss function's gradient.
:param model: the definition of the probabilistic model
:type model: Model
:param observed: A list of observed variables
:type observed: [Variable]
:param num_samples: the number of samples used in estimating the variational lower bound
:type num_samples: int
:param target_variables: the target function in the model to optimize. should only be one for this.
:type target_variables: [UUID]
:param extra_graphs: a list of extra FactorGraph used in the inference
algorithm.
:type extra_graphs: [FactorGraph]
"""
def compute(self, F, variables):
"""
Compute the inference algorithm
:param F: the execution context (mxnet.ndarray or mxnet.symbol)
:type F: Python module
:param variables: the set of MXNet arrays that holds the values of
variables at runtime.
:type variables: {str(UUID): MXNet NDArray or MXNet Symbol}
:returns: the outcome of the inference algorithm
:rtype: mxnet.ndarray.ndarray.NDArray or mxnet.symbol.symbol.Symbol
"""
samples = self.model.draw_samples(
F=F, variables=variables,
num_samples=self.num_samples)
variables.update(samples)
targets = [v for v in self.model.get_latent_variables(self.observed_variables) if v.type == VariableType.RANDVAR]
q_z_lambda = self.model.log_pdf(F=F, variables=variables, targets=targets)
p_x_z = variables[self.target_variables[0]]
gradient_lambda = F.mean(q_z_lambda * F.stop_gradient(p_x_z), axis=0)
# TODO known issue.
# This will double count the gradient of any distribution using the
# reparameterization trick (i.e. Normal). Issue #91
gradient_theta = F.mean(p_x_z, axis=0)
gradient_log_L = gradient_lambda + gradient_theta
return gradient_theta, gradient_log_L
```
#### File: mxfusion/inference/grad_loop.py
```python
from abc import ABC, abstractmethod
class GradLoop(ABC):
"""
The abstract class for the loop of gradient-based inference.
"""
@abstractmethod
def run(self, infr_executor, data, param_dict, ctx, optimizer='adam',
learning_rate=1e-3, max_iter=2000, verbose=False):
"""
:param infr_executor: The MXNet function that computes the training objective.
:type infr_executor: MXNet Gluon Block
:param data: a list of observed variables
:type data: [mxnet.ndarray]
:param param_dict: The MXNet ParameterDict for Gradient-based optimization
:type param_dict: mxnet.gluon.ParameterDict
:param ctx: MXNet context
:type ctx: mxnet.cpu or mxnet.gpu
:param optimizer: the choice of optimizer (default: 'adam')
:type optimizer: str
:param learning_rate: the learning rate of the gradient optimizer (default: 0.001)
:type learning_rate: float
:param max_iter: the maximum number of iterations of gradient optimization
:type max_iter: int
:param verbose: whether to print per-iteration messages.
:type verbose: boolean
"""
pass
```
#### File: mxfusion/models/factor_graph.py
```python
from uuid import uuid4
import warnings
import networkx as nx
from networkx.exception import NetworkXError
import networkx.algorithms.dag
from ..components import Distribution, Factor, ModelComponent, Variable, VariableType
from ..modules.module import Module
from ..common.exceptions import ModelSpecificationError, InferenceError
from ..components.functions import FunctionEvaluation
from ..components.variables.runtime_variable import expectation
class FactorGraph(object):
"""
A graph defining how Factor objects relate to one another.
The two primary functionalities of this class are:
* ``compute_log_prob`` which computes the log probability of some variables in the graph and
* ``draw_samples`` which draws samples for some target variables from the graph
"""
def __init__(self, name, verbose=False):
"""
Initializes the FactorGraph with a UUID and structures to hold components.
"""
self.name = name
self._uuid = str(uuid4())
self._var_ties = {}
self._components_graph = nx.MultiDiGraph()
self._verbose = verbose
def __repr__(self):
"""
Return a string summary of this object
"""
out_str = '{} ({})\n'.format(self.__class__.__name__, self._uuid[:5])
for f in self.ordered_factors:
if isinstance(f, FunctionEvaluation):
out_str += ', '.join([str(v) for _, v in f.outputs])+' = '+str(f)+'\n'
elif isinstance(f, (Distribution, Module)):
out_str += ', '.join([str(v) for _, v in f.outputs])+' ~ '+str(f)+'\n'
return out_str[:-1]
def __getitem__(self, key):
if key in self.components:
return self.components[key]
for m in self.modules.values():
if key in m:
return m[key]
return self.components[key]
def __contains__(self, key):
return key in self.components or any([key in m for m in self.modules.values()])
def __setattr__(self, name, value):
"""
Called whenever an attribute is attached to a FactorGraph object.
This method adds the attribute's value to it's internal graph representation
if it's an object derived from the ModelComponent class.
:param name: The attribute name.
:type name: str
:param value: The value being assigned to the attribute.
:type value: Anything, but if it is type ModelComponent it is added to some internal data structures.
"""
if isinstance(value, ModelComponent):
if self._verbose:
print("Variable {} ({}) {} ({})".format(name, value.uuid, value.type, value.shape))
if value.name is not None:
warnings.warn("The value {} has already been assigned in the model.".format(str(value)))
value.name = name
value.graph = self.components_graph
super(FactorGraph, self).__setattr__(name, value)
@property
def components_graph(self):
"""
Return the Graph object of the component
:returns: dict of ModelComponents
:rtype: { UUID : ModelComponent }
"""
return self._components_graph
@property
def components(self):
"""
Return all the ModelComponents held in the FactorGraph.
:returns: dict of ModelComponents
:rtype: { UUID : ModelComponent }
"""
return {node.uuid: node for node in self.components_graph.nodes()}
@property
def distributions(self):
"""
Return the distributions held in the FactorGraph.
:returns: dict of Distributions
:rtype: { UUID : Distribution }
"""
return {node.uuid: node for node in self.components_graph.nodes() if isinstance(node, Distribution)}
@property
def functions(self):
"""
Return the functions held in the FactorGraph.
:returns: dict of Functions
:rtype: { UUID : Function }
"""
return {node.uuid: node for node in self.components_graph.nodes() if isinstance(node, FunctionEvaluation)}
@property
def modules(self):
"""
Return the modules held in the FactorGraph.
:returns: dict of Modules
:rtype: { UUID : Module }
"""
return {node.uuid: node for node in self.components_graph.nodes() if isinstance(node, Module)}
@property
def variables(self):
"""
Return the variables held in the FactorGraph.
:returns: dict of Variables
:rtype: { UUID : Variable }
"""
return {node.uuid: node for node in self.components_graph.nodes() if isinstance(node, Variable)}
@property
def ordered_factors(self):
"""
Return a sorted list of Factors in the graph.
:rtype: A topologically sorted list of Factors in the graph.
"""
return [node for node in nx.topological_sort(self.components_graph) if isinstance(node, Factor)]
@property
def roots(self):
"""
Return all root notes in the graph.
"""
return [node for node, degree in self.components_graph.in_degree() if degree == 0]
@property
def leaves(self):
"""
Return all leaf nodes in the graph.
"""
return [node for node, degree in self.components_graph.out_degree() if degree == 0]
@property
def var_ties(self):
"""
Return a mapping of Variables in the FactorGraph that are tied together (i.e. the same / aliases of each other.)
:returns: dict of UUIDs of the variables to tie
:rtype: { uuid_of_var_to_tie : uuid_of_var_to_tie_to }
"""
return self._var_ties
def log_pdf(self, F, variables, targets=None):
"""
Compute the logarithm of the probability/probability density of a set of random variables in the factor graph.
The set of random variables are specified in the "target" argument and any necessary conditional variables are
specified in the "conditionals" argument. Any relevant constants are specified in the "constants" argument.
:param F: the MXNet computation mode (``mxnet.symbol`` or ``mxnet.ndarray``).
:param variables: The set of variables
:type variables: {UUID : MXNet NDArray or MXNet Symbol}
:param targets: Variables to compute the log probability of.
:type targets: {uuid : mxnet NDArray or mxnet Symbol}
:returns: the sum of the log probability of all the target variables.
:rtype: mxnet NDArray or mxnet Symbol
"""
if targets is not None:
targets = set(targets) if isinstance(targets, (list, tuple)) \
else targets
logL = 0.
for f in self.ordered_factors:
if isinstance(f, FunctionEvaluation):
outcome = f.eval(F=F, variables=variables,
always_return_tuple=True)
outcome_uuid = [v.uuid for _, v in f.outputs]
for v, uuid in zip(outcome, outcome_uuid):
if uuid in variables:
warnings.warn('Function evaluation in FactorGraph.compute_log_prob_RT: the outcome variable '
+ str(uuid) + ' of the function evaluation ' + str(f) +
' has already existed in the variable set.')
variables[uuid] = v
elif isinstance(f, Distribution):
if targets is None or f.random_variable.uuid in targets:
logL = logL + F.sum(expectation(F, f.log_pdf(
F=F, variables=variables)))
elif isinstance(f, Module):
if targets is None:
module_targets = [v.uuid for _, v in f.outputs
if v.uuid in variables]
else:
module_targets = [v.uuid for _, v in f.outputs
if v.uuid in targets]
if len(module_targets) > 0:
logL = logL + F.sum(expectation(F, f.log_pdf(
F=F, variables=variables, targets=module_targets)))
else:
raise ModelSpecificationError("There is an object in the factor graph that isn't a factor." +
"That shouldn't happen.")
return logL
def draw_samples(self, F, variables, num_samples=1, targets=None):
"""
Draw samples from the target variables of the Factor Graph. If the ``targets`` argument is None, draw samples
from all the variables that are *not* in the conditional variables. If the ``targets`` argument is given,
this method returns a list of samples of variables in the order of the target argument, otherwise it returns a
dict of samples where the keys are the UUIDs of variables and the values are the samples.
:param F: the MXNet computation mode (``mxnet.symbol`` or ``mxnet.ndarray``).
:param variables: The set of variables
:type variables: {UUID : MXNet NDArray or MXNet Symbol}
:param num_samples: The number of samples to draw for the target variables.
:type num_samples: int
:param targets: a list of Variables to draw samples from.
:type targets: [UUID]
:returns: the samples of the target variables.
:rtype: (MXNet NDArray or MXNet Symbol,) or {str(UUID): MXNet NDArray or MXNet Symbol}
"""
samples = {}
for f in self.ordered_factors:
if isinstance(f, FunctionEvaluation):
outcome = f.eval(F=F, variables=variables,
always_return_tuple=True)
outcome_uuid = [v.uuid for _, v in f.outputs]
for v, uuid in zip(outcome, outcome_uuid):
if uuid in variables:
warnings.warn('Function evaluation in FactorGraph.draw_samples_RT: '
'the outcome of the function evaluation ' + str(f) +
' has already existed in the variable set.')
variables[uuid] = v
samples[uuid] = v
elif isinstance(f, Distribution):
known = [v in variables for _, v in f.outputs]
if all(known):
continue
elif any(known):
raise InferenceError("Part of the outputs of the distribution " +
f.__class__.__name__ + " has been observed!")
outcome_uuid = [v.uuid for _, v in f.outputs]
outcome = f.draw_samples(
F=F, num_samples=num_samples, variables=variables, always_return_tuple=True)
for v, uuid in zip(outcome, outcome_uuid):
variables[uuid] = v
samples[uuid] = v
elif isinstance(f, Module):
outcome_uuid = [v.uuid for _, v in f.outputs]
outcome = f.draw_samples(
F=F, variables=variables, num_samples=num_samples,
targets=outcome_uuid)
for v, uuid in zip(outcome, outcome_uuid):
variables[uuid] = v
samples[uuid] = v
else:
raise ModelSpecificationError("There is an object in the factor graph that isn't a factor." +
"That shouldn't happen.")
if targets:
return tuple(samples[uuid] for uuid in targets)
else:
return samples
def remove_component(self, component):
"""
Removes the specified component from the factor graph.
:param component: The component to remove.
:type component: ModelComponent
"""
if not isinstance(component, ModelComponent):
raise ModelSpecificationError(
"Attempted to remove an object that isn't a ModelComponent.")
try:
self.components_graph.remove_node(component) # implicitly removes edges
except NetworkXError as e:
raise ModelSpecificationError("Attempted to remove a node " + str(component) + " that isn't in the graph.")
if component.name is not None:
try:
if getattr(self, component.name) is component:
delattr(self, component.name)
except AttributeError:
pass
component.graph = None
def _replicate_class(self, **kwargs):
"""
Returns a new instance of the derived FactorGraph's class.
"""
return FactorGraph(**kwargs)
def get_markov_blanket(self, node):
"""
Gets the Markov Blanket for a node, which is the node's predecessors, the nodes successors, and those
successors' other predecessors.
"""
def get_variable_predecessors(node):
return [v2 for k1,v1 in node.predecessors for k2,v2 in v1.predecessors if isinstance(v2, Variable)]
def get_variable_successors(node):
return [v2 for k1,v1 in node.successors for k2,v2 in v1.successors if isinstance(v2, Variable)]
def flatten(node_list):
return set([p for varset in node_list for p in varset])
successors = set(get_variable_successors(node))
n = set([node])
pred = set(get_variable_predecessors(node))
succs_preds = flatten([get_variable_predecessors(s) for s in successors])
return n.union(pred.union(successors.union(succs_preds)))
def get_descendants(self, node):
"""
Recursively gets all successors in the graph for the given node.
:rtype: set of all nodes in the graph descended from the node.
"""
return set(filter(lambda x: isinstance(x, Variable),
networkx.algorithms.dag.descendants(self.components_graph, node).union({node})))
def remove_subgraph(self, node):
"""
Removes a node and its parent graph recursively.
"""
if isinstance(node, Variable):
self.remove_component(node)
if node.factor is not None:
self.remove_subgraph(node.factor)
elif isinstance(node, Factor):
self.remove_component(node)
for _, v in node.inputs:
self.remove_subgraph(v)
del node
def replace_subgraph(self, target_variable, new_subgraph):
"""
Replaces the target_variable with the new_subgraph.
TODO If the factor of target_variable or new_subgraph as multiple outputs, this will fail.
:param target_variable: The variable that will be replaced by the new_subgraph.
:type target_variable: Variable
:param new_subgraph: We assume this is a Variable right now (aka the graph ends in a Variable).
:type new_subgraph: Variable
"""
new_factor = new_subgraph.factor
new_factor.successors = []
old_predecessors = target_variable.predecessors
target_variable.predecessors = []
for _, p in old_predecessors:
self.remove_subgraph(p)
target_variable.assign_factor(new_factor)
def extract_distribution_of(self, variable):
"""
Extracts the distribution of the variable passed in, returning a replicated copy of the passed in variable with
only its parent subgraph attached (also replicated).
:param variable: The variable to extract the distribution from.
:type variable: Variable
:returns: a replicated copy of the passed in variable.
:rtype: Variable
"""
def extract_distribution_function(component):
if isinstance(component, Factor):
predecessor_direction = 'recursive'
successor_direction = 'one_level'
return predecessor_direction, successor_direction
else:
predecessor_direction = 'recursive'
successor_direction = None
return predecessor_direction, successor_direction
return variable.replicate(replication_function=extract_distribution_function)
def clone(self, leaves=None):
"""
Clones a model, maintaining the same functionality and topology. Replicates all of its ModelComponents,
while maintaining the same UUIDs.
Starts upward from the leaves and copies everything in the graph recursively.
:param leaves: If None, use the leaves in this model, otherwise use the provided leaves.
:return: the cloned model
"""
new_model = self._replicate_class(name=self.name, verbose=self._verbose)
return self._clone(new_model, leaves)
def _clone(self, new_model, leaves=None):
"""
Clones a model, maintaining the same functionality and topology. Replicates all of its ModelComponents,
while maintaining the same UUIDs.
Starts upward from the leaves and copies everything in the graph recursively.
:param leaves: If None, use the leaves in this model, otherwise use the provided leaves.
:returns: the cloned model
"""
var_map = {} # from old model to new model
leaves = self.leaves if leaves is None else leaves
for v in leaves:
if v.name is not None:
new_leaf = v.replicate(var_map=var_map, replication_function=lambda x: ('recursive', 'recursive'))
setattr(new_model, v.name, new_leaf)
else:
v.graph = new_model.graph
for v in self.variables.values():
if v.name is not None:
setattr(new_model, v.name, new_model[v.uuid])
return new_model
def get_parameters(self, excluded=None, include_inherited=True):
"""
Get all the parameters not in the excluded list.
:param excluded: a list of variables to be excluded.
:type excluded: set(UUID) or [UUID]
:param include_inherited: whether inherited variables are included.
:type include_inherited: boolean
:returns: the list of constant variables.
:rtype: [Variable]
"""
if include_inherited:
return [v for v in self.variables.values() if (v.type == VariableType.PARAMETER and v.uuid not in excluded)]
else:
return [v for v in self.variables.values() if (v.type == VariableType.PARAMETER and v.uuid not in excluded
and not v.isInherited)]
def get_constants(self):
"""
Get all the constants in the factor graph.
:returns: the list of constant variables.
:rtype: [Variable]
"""
return [v for v in self.variables.values() if v.type == VariableType.CONSTANT]
@staticmethod
def reconcile_graphs(current_graphs, primary_previous_graph, secondary_previous_graphs=None,
primary_current_graph=None):
"""
Reconciles two sets of graphs, matching the model components in the previous graph to the current graph.
This is primarily used when loading back a graph from a file and matching it to an existing in-memory graph in
order to load the previous graph's parameters correctly.
:param current_graphs: A list of the graphs we are reconciling a loaded factor graph against. This must be a
fully built set of graphs generated through the model definition process.
:param primary_previous_graph: A graph which may have been loaded in from a file and be partially specified, or
could be a full graph built through model definition.
:param secondary_previous_graphs: A list of secondary graphs (e.g. posteriors) that share components with the
primary_previous_graph.
:param primary_current_graph: Optional parameter to specify the primary_current_graph, otherwise it is taken to
be the model in the current_graphs (which should be unique).
:rtype: {previous ModelComponent : current ModelComponent}
"""
def update_with_named_components(previous_components, current_components, component_map, nodes_to_traverse_from):
name_pre = {c.name: c for c in previous_components if c.name}
name_cur = {c.name: c for c in current_components if c.name}
for name, previous_c in name_pre.items():
current_c = name_cur[name]
component_map[previous_c.uuid] = current_c.uuid
nodes_to_traverse_from[previous_c.uuid] = current_c.uuid
component_map = {}
nodes_to_traverse_from = {}
current_graph = primary_current_graph if primary_current_graph is not None else current_graphs[0]
secondary_current_graphs = current_graphs[1:]
secondary_previous_graphs = secondary_previous_graphs if secondary_previous_graphs is not None else []
if len(secondary_current_graphs) != len(secondary_previous_graphs):
raise ModelSpecificationError("Different number of secondary graphs passed in {} {}".format(
secondary_current_graphs, secondary_previous_graphs))
update_with_named_components(primary_previous_graph.components.values(), current_graph.components.values(),
component_map, nodes_to_traverse_from)
# Reconcile the primary graph
FactorGraph._reconcile_graph(nodes_to_traverse_from, component_map,
current_graph, primary_previous_graph)
# Reconcile the other graphs
if len(secondary_current_graphs) > 0 and len(secondary_previous_graphs) > 0:
for cg, pg in zip(secondary_current_graphs,
secondary_previous_graphs):
nodes_to_traverse_from = {pc: cc for pc, cc in component_map.items()
if pc in pg.components.keys()}
update_with_named_components(pg.components.values(), cg.components.values(), component_map,
nodes_to_traverse_from)
FactorGraph._reconcile_graph(
nodes_to_traverse_from, component_map, cg, pg)
# Resolve the remaining ambiguities here.
return component_map
@staticmethod
def _reconcile_graph(nodes_to_traverse_from, component_map, current_graph, previous_graph):
"""
Traverses the components (breadth first) in nodes_to_traverse_from of the current_graph/previous_graph,
matching components where possible and generating new calls to _reconcile_graph where the graph is still
incompletely traversed. This method makes no attempt to resolve ambiguities in naming between the graphs and
request the user to more completely specify names in their graph if such an ambiguity exists. Such
naming can be more completely specified by attaching names to each leaf node in the original graph.
:param nodes_to_traverse_from: A list of items to traverse the graph upwards from.
:type nodes_to_traverse_from: [previous ModelComponents]
:param component_map: The current mapping from the previous graph's MCs to the current_graph's MCs.
This is used and modified during reconciliation.
:type component_map: {previous_graph ModelComponent : current_graph ModelComponent}
:param current_graph: The current graph to match components against.
:type current_graph: FactorGraph
:param previous_graph: The previous graph to match components from.
:type previous_graph: FactorGraph
"""
def reconcile_direction(direction, previous_c, current_c, new_level, component_map):
if direction == 'predecessor':
previous_neighbors = previous_c.predecessors
current_neighbors = current_c.predecessors
elif direction == 'successor':
previous_neighbors = previous_c.successors
current_neighbors = current_c.successors
names = list(map(lambda x: x[0], previous_neighbors))
duplicate_names = set([x for x in names if names.count(x) > 1])
for edge_name, node in previous_neighbors:
if node.uuid not in component_map:
if edge_name in duplicate_names:
# TODO if all the other parts of the ambiguity are resolved, we have the answer still.
# Otherwise throw an exception
raise Exception("Multiple edges connecting unnamed nodes have the same name, "
"this isn't supported currently.") # TODO Support the ambiguities :)
current_node = [item for name, item in current_neighbors if edge_name == name][0]
component_map[node.uuid] = current_node.uuid
new_level[node.uuid] = current_node.uuid
if isinstance(node, Module):
module_component_map = current_node.reconcile_with_module(node)
component_map.update(module_component_map)
new_level = {}
for previous_c, current_c in nodes_to_traverse_from.items():
reconcile_direction('predecessor', previous_graph[previous_c], current_graph[current_c], new_level,
component_map)
"""
TODO Reconciling in both directions currently breaks the reconciliation process and can cause multiple
previous_uuid's to map to the same current_uuid. It's unclear why that happens. This shouldn't be necessary
until we implement multi-output Factors though (and even then, only if not all the outputs are in a
named chain).
"""
# reconcile_direction('successor', previous_graph[c], current_graph[current_c], new_level, component_map)
if len(new_level) > 0:
return FactorGraph._reconcile_graph(new_level, component_map, current_graph, previous_graph)
def load_from_json(self, json_graph):
components_graph = nx.readwrite.json_graph.node_link_graph(
json_graph, directed=True)
components = {node.uuid: node for node in components_graph.nodes()}
for node in components_graph.nodes():
node._parent_graph = components_graph
node.attributes = [components[a] for a in node.attributes]
self._components_graph = components_graph
for node in self._components_graph.nodes():
if node.name is not None:
self.__setattr__(node.name, node)
return self
@staticmethod
def load_graphs(graphs_list, existing_graphs=None):
"""
Method to load back in a graph. The graphs should have been saved down using the save method, and be a JSON
representation of the graph generated by the [networkx](https://networkx.github.io) library.
:param graphs_list: A list of raw json dicts loaded in from memory representing the FactorGraphs to create.
:type graphs_list: list of dicts loaded in using the ModelComponentDecoder class.
"""
import json
existing_graphs = existing_graphs if existing_graphs is not None else [FactorGraph(graph['name'])
for graph in graphs_list]
return [existing_graph.load_from_json(graph) for existing_graph, graph in zip(existing_graphs, graphs_list)]
def as_json(self):
"""
Returns the FactorGraph in a form suitable for JSON serialization.
This is assuming a JSON serializer that knows how to handle ModelComponents
such as the one defined in mxfusion.util.serialization.
"""
json_graph = nx.readwrite.json_graph.node_link_data(self._components_graph)
json_graph['name'] = self.name
return json_graph
@staticmethod
def save(graph_file, json_graphs):
"""
Method to save this graph down into a file. The graph file will be saved down as a JSON representation of the
graph generated by the [networkx](https://networkx.github.io) library.
:param graph_file: The file containing the primary model to load back for this inference algorithm.
:type graph_file: str of filename
"""
json_graphs = [json_graphs] if not isinstance(json_graphs, type([])) else json_graphs
import json
from ..util.serialization import ModelComponentEncoder
if graph_file is not None:
with open(graph_file, 'w') as f:
json.dump(json_graphs, f, ensure_ascii=False, cls=ModelComponentEncoder)
```
#### File: components/functions/mxfusion_function_test.py
```python
import unittest
import numpy as np
import mxnet.gluon.nn as nn
import mxnet as mx
from mxnet.gluon import HybridBlock
from mxnet.gluon.nn import Lambda
from mxnet.initializer import Zero
from mxfusion.components.functions.mxfusion_function import MXFusionFunction
from mxfusion.components import Variable
from mxfusion.components.variables.runtime_variable import add_sample_dimension, array_has_samples
class TestMXFusionFunctionTests(unittest.TestCase):
"""
Tests the MXFusion.core.factor_graph.FactorGraph class.
"""
def instantialize_base_function(self):
return MXFusionFunction('test_func')
def instantialize_customize_function(self):
class TestFunction(MXFusionFunction):
def __init__(self, func_name, dtype=None, broadcastable=False):
super(TestFunction, self).__init__(
func_name, dtype, broadcastable)
self._params = {'C': Variable()}
def eval(self, F, A, B, C):
return A+B+C, A*B+C
@property
def parameters(self):
return self._params
@property
def input_names(self):
return ['A', 'B', 'C']
@property
def output_names(self):
return ['out1', 'out2']
@property
def parameter_names(self):
return ['C']
func_mx = Lambda(lambda A, B, C: (A+B+C, A*B+C))
return TestFunction('test_func'), func_mx
def test_raise_errors(self):
f = self.instantialize_base_function()
self.assertRaises(NotImplementedError, lambda: f.parameters)
self.assertRaises(NotImplementedError, lambda: f.input_names)
self.assertRaises(NotImplementedError, lambda: f.output_names)
self.assertRaises(NotImplementedError, lambda: f.parameter_names)
self.assertRaises(NotImplementedError, lambda: f.eval(mx.nd, X=1))
def test_function_execution(self):
f, f_mx = self.instantialize_customize_function()
np.random.seed(0)
A = mx.nd.array(np.random.rand(1, 3, 2))
B = mx.nd.array(np.random.rand(1, 3, 2))
C = mx.nd.array(np.random.rand(1))
A_mf = Variable(shape=A.shape)
B_mf = Variable(shape=B.shape)
outs = f(A_mf, B_mf)
assert len(outs) == 2
eval = f(A_mf, B_mf)[0].factor
variables = {A_mf.uuid: A, B_mf.uuid: B, eval.C.uuid: C}
res_eval = eval.eval(F=mx.nd, variables=variables)
res_mx = f_mx(A, B, C)
assert np.allclose(res_eval[0].asnumpy(), res_mx[0].asnumpy())
assert np.allclose(res_eval[1].asnumpy(), res_mx[1].asnumpy())
```
#### File: components/functions/operators_test.py
```python
import pytest
import mxnet as mx
import numpy as np
from mxfusion import Variable, Model
from mxfusion.common.exceptions import ModelSpecificationError
from mxfusion.components.functions.operators import *
@pytest.mark.usefixtures("set_seed")
class TestOperators(object):
def _test_operator(self, operator, inputs, properties=None):
"""
inputs are mx.nd.array
properties are just the operator properties needed at model def time.
"""
properties = properties if properties is not None else {}
m = Model()
variables = [Variable() for _ in inputs]
m.r = operator(*variables, **properties)
vs = [v for v in m.r.factor.inputs]
variables = {v[1].uuid: inputs[i] for i,v in enumerate(vs)}
evaluation = m.r.factor.eval(mx.nd, variables=variables)
return evaluation
@pytest.mark.parametrize("mxf_operator, mxnet_operator, inputs, properties", [
(reshape, mx.nd.reshape, [mx.nd.array(np.random.rand(1,4))], {'shape':(2,2), 'reverse': False}),
])
def test_operator_replicate(self, mxf_operator, mxnet_operator, inputs, properties):
properties = properties if properties is not None else {}
m = Model()
variables = [Variable() for _ in inputs]
m.r = mxf_operator(*variables, **properties)
vs = [v for v in m.r.factor.inputs]
variables = {v[1].uuid: inputs[i] for i,v in enumerate(vs)}
evaluation = m.r.factor.eval(mx.nd, variables=variables)
r_clone = m.extract_distribution_of(m.r)
vs = [v for v in r_clone.factor.inputs]
variables = {v[1].uuid: inputs[i] for i,v in enumerate(vs)}
evaluation2 = r_clone.factor.eval(mx.nd, variables=variables)
assert np.allclose(evaluation.asnumpy(), evaluation2.asnumpy()), (evaluation, evaluation2)
@pytest.mark.parametrize("mxf_operator, mxnet_operator, inputs, case", [
(add, mx.nd.add, [mx.nd.array(np.random.rand(1,4)), mx.nd.array(np.random.rand(1,4))], "add"),
(subtract, mx.nd.subtract, [mx.nd.array(np.random.rand(1,4)), mx.nd.array(np.random.rand(1,4))], "sub"),
(multiply, mx.nd.multiply, [mx.nd.array(np.random.rand(1,4)), mx.nd.array(np.random.rand(1,4))], "mul"),
(divide, mx.nd.divide, [mx.nd.array(np.random.rand(1,4)), mx.nd.array(np.random.rand(1,4))], "div"),
(power, mx.nd.power, [mx.nd.array(np.random.rand(1,4)), mx.nd.array(np.random.rand(1,4))], "pow"),
(transpose, mx.nd.transpose, [mx.nd.array(np.random.rand(1,4))], "transpose"),
])
def test_operators_variable_builtins(self, mxf_operator, mxnet_operator, inputs, case):
m = Model()
v1 = Variable()
v2 = Variable()
variables = [v1, v2] if len(inputs) > 1 else [v1]
m.r = mxf_operator(*variables)
vs = [v for v in m.r.factor.inputs]
variables_rt = {v[1].uuid: inputs[i] for i,v in enumerate(vs)}
r_eval = m.r.factor.eval(mx.nd, variables=variables_rt)
m2 = Model()
v12 = Variable()
v22 = Variable()
variables2 = [v12, v22] if len(inputs) > 1 else [v12]
if case == "add":
m2.r = v12 + v22
elif case == "sub":
m2.r = v12 - v22
elif case == "mul":
m2.r = v12 * v22
elif case == "div":
m2.r = v12 / v22
elif case == "pow":
m2.r = v12 ** v22
elif case == "transpose":
m2.r = transpose(v12)
vs2 = [v for v in m2.r.factor.inputs]
variables_rt2 = {v[1].uuid: inputs[i] for i,v in enumerate(vs2)}
p_eval = m2.r.factor.eval(mx.nd, variables=variables_rt2)
assert np.allclose(r_eval.asnumpy(), p_eval.asnumpy()), (r_eval, p_eval)
@pytest.mark.parametrize("mxf_operator, mxnet_operator, inputs, properties", [
(add, mx.nd.add, [mx.nd.array(np.random.rand(1,4)), mx.nd.array(np.random.rand(1,4))], {}),
(subtract, mx.nd.subtract, [mx.nd.array(np.random.rand(1,4)), mx.nd.array(np.random.rand(1,4))], {}),
(multiply, mx.nd.multiply, [mx.nd.array(np.random.rand(1,4)), mx.nd.array(np.random.rand(1,4))], {}),
(divide, mx.nd.divide, [mx.nd.array(np.random.rand(1,4)), mx.nd.array(np.random.rand(1,4))], {}),
(power, mx.nd.power, [mx.nd.array(np.random.rand(1,4)), mx.nd.array(np.random.rand(1,4))], {}),
(square, mx.nd.square, [mx.nd.array(np.random.rand(1,4))], {}),
(exp, mx.nd.exp, [mx.nd.array(np.random.rand(1,4))], {}),
(log, mx.nd.log, [mx.nd.array(np.random.rand(1,4))], {}),
(sum, mx.nd.sum, [mx.nd.array(np.random.rand(1,4))], {}),
(mean, mx.nd.mean, [mx.nd.array(np.random.rand(1,4))], {}),
(prod, mx.nd.prod, [mx.nd.array(np.random.rand(1,4))], {}),
(dot, mx.nd.dot, [mx.nd.array(np.random.rand(1,4,1)), mx.nd.array(np.random.rand(1,1,4))], {}),
(dot, mx.nd.dot, [mx.nd.array(np.random.rand(1,1,4)), mx.nd.array(np.random.rand(1,4,1))], {}),
(diag, mx.nd.diag, [mx.nd.array(np.random.rand(1,4,4))], {}),
(reshape, mx.nd.reshape, [mx.nd.array(np.random.rand(1,4))], {'shape':(2,2), 'reverse': False}),
(reshape, mx.nd.reshape, [mx.nd.array(np.random.rand(1,4,4))], {'shape':(1,16), 'reverse': False}),
(transpose, mx.nd.transpose, [mx.nd.array(np.random.rand(1,4))], {}),
])
def test_operators(self, mxf_operator, mxnet_operator, inputs, properties):
mxf_result = self._test_operator(mxf_operator, inputs, properties)
inputs_unsampled = [v[0] for v in inputs]
mxnet_result = mxnet_operator(*inputs_unsampled, **properties)
assert np.allclose(mxf_result.asnumpy(), mxnet_result.asnumpy()), (mxf_result, mxnet_result)
@pytest.mark.parametrize("mxf_operator", [
(add),
(reshape),
])
def test_empty_operator(self, mxf_operator):
with pytest.raises(ModelSpecificationError, message="Operator should fail if not passed the correct arguments.") as excinfo:
mxf_result = mxf_operator()
assert excinfo.value is not None
```
#### File: components/variables/var_trans_test.py
```python
import pytest
import mxnet as mx
import numpy as np
import numpy.testing as npt
from mxfusion.components.variables.var_trans import PositiveTransformation, Logistic
@pytest.mark.usefixtures("set_seed")
class TestVariableTransformation(object):
"""
Tests the MXFusion.core.var_trans file for variable transformations
"""
def test_softplus(self):
v_orig = mx.nd.array([-10.], dtype=np.float64)
p = PositiveTransformation()
v_pos = p.transform(v_orig)
v_inv_trans = p.inverseTransform(v_pos)
assert v_orig.asnumpy()[0] < 0
assert v_pos.asnumpy()[0] > 0
assert v_inv_trans.asnumpy()[0] < 0
npt.assert_allclose(v_inv_trans.asnumpy()[0], v_orig.asnumpy()[0], rtol=1e-7, atol=1e-10)
@pytest.mark.parametrize("x, rtol, atol", [
(mx.nd.array([10], dtype=np.float64), 1e-7, 1e-10),
(mx.nd.array([1e-30], dtype=np.float64), 1e-7, 1e-10),
(mx.nd.array([5], dtype=np.float32), 1e-4, 1e-5),
(mx.nd.array([1e-6], dtype=np.float32), 1e-4, 1e-5)
])
def test_softplus_numerical(self, x, rtol, atol):
p = PositiveTransformation()
mf_pos = p.transform(x)
mf_inv = p.inverseTransform(mf_pos)
np_pos = np.log1p(np.exp(x.asnumpy()))
np_inv = np.log(np.expm1(np_pos))
npt.assert_allclose(mf_pos.asnumpy(), np_pos, rtol=rtol, atol=atol)
npt.assert_allclose(mf_inv.asnumpy(), np_inv, rtol=rtol, atol=atol)
npt.assert_allclose(mf_inv.asnumpy(), x.asnumpy(), rtol=rtol, atol=atol)
@pytest.mark.parametrize("x, upper, lower, rtol, atol", [
(mx.nd.array([10], dtype=np.float64), 2, 20, 1e-7, 1e-10),
(mx.nd.array([1e-3], dtype=np.float64), 1e-6, 1e-2, 1e-7, 1e-10),
(mx.nd.array([1], dtype=np.float32), 1, 200000, 1e-4, 1e-5),
(mx.nd.array([5], dtype=np.float32), 2, 10000, 1e-4, 1e-5)
])
def test_logistic(self, x, upper, lower, rtol, atol):
transform = Logistic(upper, lower)
x_trans = transform.transform(x)
x_inversed = transform.inverseTransform(x_trans)
assert x_inversed.dtype == x.dtype
assert np.isclose(x.asnumpy(), x_inversed.asnumpy(), rtol=rtol, atol=atol)
```
#### File: testing/inference/expectation_test.py
```python
import mxnet as mx
import numpy as np
import pytest
import mxfusion as mf
from mxfusion import Model, Variable
from mxfusion.inference import GradBasedInference, TransferInference, ExpectationScoreFunctionAlgorithm, ExpectationAlgorithm
@pytest.mark.usefixtures("set_seed")
class TestExpectationInference(object):
"""
Test class that tests the MXFusion.inference.expectation classes.
"""
def make_model(self):
class Func(mx.gluon.HybridBlock):
def hybrid_forward(self, F, v2, v3, v4, v1):
return - (F.sum(v2 * F.minimum(v4, v1) - v3 * v1))
m = Model()
N = 1
m.v1 = Variable(shape=(N,))
m.v2 = Variable(shape=(N,))
m.v3 = Variable(shape=(N,))
m.v4 = mf.components.distributions.Gamma.define_variable(alpha=mx.nd.array([1]),
beta=mx.nd.array([0.1]),
shape=(N,))
v5 = mf.components.functions.MXFusionGluonFunction(Func(), num_outputs=1)
m.v5 = v5(m.v2, m.v3, m.v4, m.v1)
return m
@pytest.mark.parametrize("v2, v3", [
(mx.nd.random.uniform(1,100) * 2, mx.nd.random.uniform(1,100) * 0.5),
])
def test_inference_basic_run(self, v2, v3):
# TODO test correctness
m = self.make_model()
observed = [m.v2, m.v3]
target_variables = [m.v5]
infr = GradBasedInference(
ExpectationScoreFunctionAlgorithm(m, observed, num_samples=10, target_variables=target_variables))
infr.run(max_iter=1, v2=v2, v3=v3, verbose=True)
infr2 = TransferInference(
ExpectationAlgorithm(m, observed, num_samples=10, target_variables=target_variables), infr_params=infr.params)
infr2.run(max_iter=1, v2=v2, v3=v3, verbose=True)
```
#### File: testing/util/customop_test.py
```python
import pytest
import mxnet as mx
import numpy as np
from mxfusion.util.customop import broadcast_to_w_samples
@pytest.mark.usefixtures("set_seed")
class TestBroadcastToWithSamplesOp(object):
"""
Tests the custom operator BroadcastToWithSamples
"""
def make_block(self, isSamples, shape):
class Testop(mx.gluon.HybridBlock):
def __init__(self, isSamples, shape, **kw):
self.isSamples = isSamples
self.shape = shape
super(Testop, self).__init__(**kw)
def hybrid_forward(self, F, x, **kw):
return broadcast_to_w_samples(F, x, self.shape, self.isSamples)
return Testop(isSamples, shape)
@pytest.mark.parametrize("data, isSamples, shape, hybridize", [
(np.array([[2, 3, 4], [3, 4, 5]]), False, (5, 4, 2, 3), True),
(np.array([[2, 3, 4], [3, 4, 5]]), True, (2, 4, 5, 3), True),
(np.array([2, 3, 4]), False, (2, 4, 5, 3), True),
(np.array([[2, 3, 4], [3, 4, 5]]), False, (5, 4, 2, 3), False),
(np.array([[2, 3, 4], [3, 4, 5]]), True, (2, 4, 5, 3), False),
(np.array([2, 3, 4]), False, (2, 4, 5, 3), False)
])
def test_forward(self, data, isSamples, shape, hybridize):
block = self.make_block(isSamples, shape)
if hybridize:
block.hybridize()
res = block(mx.nd.array(data, dtype=np.float64))
res_np = np.empty(shape)
if isSamples:
res_np[:] = data.reshape(*((data.shape[0],)+(1,)*(len(shape)-len(data.shape))+data.shape[1:]))
else:
res_np[:] = data
assert res.shape == shape
assert np.all(res_np == res.asnumpy())
@pytest.mark.parametrize("data, isSamples, shape, w, hybridize", [
(np.array([[2, 3, 4], [3, 4, 5]]), False, (5, 4, 2, 3), np.random.rand(5, 4, 2, 3), True),
(np.array([[2, 3, 4], [3, 4, 5]]), True, (2, 4, 5, 3), np.random.rand(2, 4, 5, 3), True),
(np.array([2, 3, 4]), False, (2, 4, 5, 3), np.random.rand(2, 4, 5, 3), True),
(np.array([[2, 3, 4], [3, 4, 5]]), False, (5, 4, 2, 3), np.random.rand(5, 4, 2, 3), False),
(np.array([[2, 3, 4], [3, 4, 5]]), True, (2, 4, 5, 3), np.random.rand(2, 4, 5, 3), False),
(np.array([2, 3, 4]), False, (2, 4, 5, 3), np.random.rand(2, 4, 5, 3), False)
])
def test_backward(self, data, isSamples, shape, w, hybridize):
block = self.make_block(isSamples, shape)
data_mx = mx.nd.array(data, dtype=np.float64)
data_mx.attach_grad()
w_mx = mx.nd.array(w, dtype=np.float64)
with mx.autograd.record():
b = block(data_mx)
loss = mx.nd.sum(b * w_mx)
loss.backward()
data_grad = data_mx.grad.asnumpy()
if isSamples:
grad_np = w.reshape(*((data.shape[0], -1) + data.shape[1:])).sum(1)
else:
grad_np = w.reshape(*((-1,) + data.shape)).sum(0)
assert data_grad.shape == data.shape
assert np.allclose(data_grad, grad_np)
```
|
{
"source": "JereMIbq1995/genie-core",
"score": 4
}
|
#### File: genie_core/cast/actors.py
```python
from collections import defaultdict
from genie_core.cast.actor import Actor
class Actors:
"""A collection of actors.
The responsibility of Actors is to keep track of them. It provides methods
for adding, removing and finding them in a variety of ways.
You might be wondering why we don't just use dictionaries or other
generic collections in place of this class. Encapsulating actors in this
way gives us precise control over how they are modified. For example, we
might decide to delay removals instead of executing them immediately.
Another important benefit of encapsulating actors this way is that it
allows us to change the underlying data structure and algorithms at any
time without affecting the rest of the project.
Attributes:
_current_actors: Set[cls: Type[Actor]]
_removed_actors: Set[cls: Type[Actor]]
"""
def __init__(self):
"""Initializes a new instance of Cast."""
self._current_actors = set()
self._removed_actors = set()
def add_actor(self, actor):
"""Adds the given actor to the cast.
Args:
actors: Actor, The actor to add.
"""
self._current_actors.add(actor)
def apply_changes(self):
"""Permantely removes all of the dead actors."""
self._current_actors -= self._removed_actors
self._removed_actors.clear()
def remove_actor(self, actor):
"""Marks the given actor for removal from the cast. Clients must call clean_actors to permanently remove actors from the cast.
Args:
actor: Actor, The actor to remove.
"""
self._removed_actors.add(actor)
def with_traits(self, *types):
"""Finds those actors with the given types of traits.
types: a tuple of traits passed in as data types (not object)
Returns:
set: A set of actors.
"""
return [a for a in self._current_actors if a.has_traits(*types)]
```
#### File: genie_core/cast/Body.py
```python
from .trait import Trait
class Body(Trait):
def __init__(self, x : float = 0,
y : float = 0,
vx : float = 0,
vy : float = 0,
height : float = 0,
width : float = 0):
self._x = x
self._y = y
self._vx = vx
self._vy = vy
self._height = height
self._width = width
def get_position(self):
return (self._x, self._y)
def set_position(self, x : float, y : float):
self._x = x
self._y = y
def get_x(self):
return self._x
def get_y(self):
return self._y
def set_x(self, x):
self._x = x
def set_y(self, y):
self._y = y
def get_vx(self):
return self._vx
def get_vy(self):
return self._vy
def set_vx(self, vx):
self._vx = vx
def set_vy(self, vy):
self._vy = vy
def get_height(self):
return self._height
def set_height(self, height : float):
self._height = height
def get_width(self):
return self._width
def set_width(self, width : float):
self._width = width
def incr_x(self, dx):
self._x += dx
def incr_y(self, dy):
self._y += dy
def move(self):
"""
Move the object if the velocities are > 0
There's more to think about this function
"""
self._x += self._vx
self._y += self._vy
```
#### File: genie_core/services/KeyBoardService.py
```python
class KeyBoardService():
def __init__(self):
pass
def is_key_pressed(self, *keys):
pass
def is_key_released(self, *key):
pass
```
|
{
"source": "JereMIbq1995/genie-plugins",
"score": 3
}
|
#### File: test/services/TestMouseService.py
```python
import sys
import unittest
import pygame
sys.path.append('..\\..')
sys.path.append('..')
from genie_plugins.constants import mouse
from genie_plugins.services.PygameMouseService import PygameMouseService
FPS = 60
W_SIZE = (900, 500)
WIN = pygame.display.set_mode(W_SIZE)
WHITE = (255, 255, 255)
BLACK = (0, 0, 0, 0)
def main():
"""
A simple game loop with nothing going on except for the checking of
whether the mouse buttons are pressed
If none of the mouse buttons are pressed, the loop while continually print out:
"All mouse buttons released!"
If any of the mouse buttons are pressed, the console will print out:
"<button> mouse is pressed"
... and the "All mouse buttons released!" message will not print out
The loop will also continually print out whether the mouse has moved
from the last iteration AND the current coordinates of the mouse
"""
# What we're trying to test:
ms = PygameMouseService()
# Game loop:
clock = pygame.time.Clock()
running = True
while running:
clock.tick(FPS)
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
# Test is_key_pressed():
left_pressed = ms.is_button_pressed(mouse.LEFT)
middle_pressed = ms.is_button_pressed(mouse.MIDDLE)
right_pressed = ms.is_button_pressed(mouse.RIGHT)
extra1_pressed = ms.is_button_pressed(mouse.EXTRA1)
extra2_pressed = ms.is_button_pressed(mouse.EXTRA2)
if left_pressed:
print("LEFT mouse is pressed")
if middle_pressed:
print("MIDDLE mouse is pressed")
if right_pressed:
print("RIGHT mouse is pressed")
if extra1_pressed:
print("EXTRA1 mouse is pressed")
if extra2_pressed:
print("EXTRA2 mouse is pressed")
# Test is_key_released():
left_released = ms.is_button_released(mouse.LEFT)
middle_released = ms.is_button_released(mouse.MIDDLE)
right_released = ms.is_button_released(mouse.RIGHT)
extra1_released = ms.is_button_released(mouse.EXTRA1)
extra2_released = ms.is_button_released(mouse.EXTRA2)
if (left_released and middle_released and right_released and extra1_released and extra2_released):
print("All mouse buttons released!")
# Test has_mouse_moved()
mouse_moved = ms.has_mouse_moved()
print("Mouse moved: ", mouse_moved)
# Test get_current_coordinates()
mouse_position = ms.get_current_coordinates()
print ("Mouse coordinates: ", mouse_position)
pygame.quit()
if __name__ == "__main__":
main()
```
#### File: test/services/TestScreenService.py
```python
import sys
from typing import Dict
import unittest
import pygame
from genie_core.cast.actors import Actors
from genie_core.cast.actor import Actor
from genie_core.cast.trait import Trait
from genie_core.cast.Body import Body
from genie_core.cast.Image import Image
sys.path.append('..\\..')
sys.path.append('..')
from genie_plugins.constants import mouse, keys
from genie_plugins.services.PygameScreenService import PygameScreenService
from genie_plugins.services.PygameKeyboardService import PygameKeyboardService
FPS = 120
W_SIZE = (900, 500)
SCREEN_CENTER = (W_SIZE[0]/2, W_SIZE[1]/2)
# WIN = pygame.display.set_mode(W_SIZE)
START_POS_1 = (W_SIZE[0]/5, W_SIZE[1]/2)
START_POS_2 = (4*W_SIZE[0]/5, W_SIZE[1]/2)
WHITE = (255, 255, 255)
BLACK = (0, 0, 0, 0)
VEL = 5
def yellow_input(keys_state, ship : Actor):
if keys_state[keys.A] and ship.get_trait(Body).get_position()[0] > 0:
ship.get_trait(Body).incr_x(-VEL)
if keys_state[keys.D] and ship.get_trait(Body).get_position()[0] < W_SIZE[0] / 2:
ship.get_trait(Body).incr_x(VEL)
if keys_state[keys.S] and ship.get_trait(Body).get_position()[1] < W_SIZE[1]:
ship.get_trait(Body).incr_y(VEL)
if keys_state[keys.W] and ship.get_trait(Body).get_position()[1] > 0:
ship.get_trait(Body).incr_y(-VEL)
def red_input(keys_state, ship : Actor):
if keys_state[keys.LEFT] and ship.get_trait(Body).get_position()[0] > W_SIZE[0] / 2:
ship.get_trait(Body).incr_x(-VEL)
if keys_state[keys.RIGHT] and ship.get_trait(Body).get_position()[0] < W_SIZE[0]:
ship.get_trait(Body).incr_x(VEL)
if keys_state[keys.DOWN] and ship.get_trait(Body).get_position()[1] < W_SIZE[1]:
ship.get_trait(Body).incr_y(VEL)
if keys_state[keys.UP] and ship.get_trait(Body).get_position()[1] > 0:
ship.get_trait(Body).incr_y(-VEL)
def main():
"""
A simple game loop with nothing going on except for the checking of
whether the mouse buttons are pressed
If none of the mouse buttons are pressed, the loop while continually print out:
"All mouse buttons released!"
If any of the mouse buttons are pressed, the console will print out:
"<button> mouse is pressed"
... and the "All mouse buttons released!" message will not print out
The loop will also continually print out whether the mouse has moved
from the last iteration AND the current coordinates of the mouse
"""
# What we're trying to test:
ss = PygameScreenService(W_SIZE)
ks = PygameKeyboardService()
# First let's create a cast with 2 actors: yellow_space_ship and red_space_ship
game_cast = Actors()
background_image = Actor()
background_image.add_trait(Body(0, 0, 0, 0, 900, 500))
background_image.add_trait(Image("../../test/assets/space.png", 0.5, 90))
# Creating a yellow_space_ship:
yellow_space_ship = Actor()
yellow_space_ship.add_trait(Body(200, 250, 0, 0, 40, 55))
yellow_space_ship.add_trait(Image("../assets/spaceship_yellow.png", 0.1, 90))
# Creating a red_space_ship:
red_space_ship = Actor()
red_space_ship.add_trait(Body(700, 250, 0, 0, 40, 55))
red_space_ship.add_trait(Image("../assets/spaceship_red.png", 0.1, 270))
# Add the 2 spaceships to the cast:
game_cast.add_actor(yellow_space_ship)
game_cast.add_actor(red_space_ship)
# Game loop:
clock = pygame.time.Clock()
running = True
while running:
clock.tick(FPS)
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
# Input:
key_states = ks.get_keys_state(keys.A, keys.S, keys.D, keys.W, keys.LEFT, keys.RIGHT, keys.UP, keys.DOWN)
yellow_input(key_states, yellow_space_ship)
red_input(key_states, red_space_ship)
# Draw everything:
ss.draw_frame(game_cast, background_image=background_image)
pygame.quit()
if __name__ == "__main__":
main()
```
|
{
"source": "jeremicaleksandar/pdaj2",
"score": 3
}
|
#### File: double_pendulum/tasks/worker.py
```python
from math import log
from ..app import app
import sys
import numpy as np
from scipy.integrate import odeint
#based on https://scipython.com/blog/the-double-pendulum/
# The gravitational acceleration (m.s-2).
g = 9.81
def deriv(y, t, L1, L2, m1, m2):
"""Return the first derivatives of y = theta1, z1, theta2, z2."""
theta1, z1, theta2, z2 = y
c, s = np.cos(theta1-theta2), np.sin(theta1-theta2)
theta1dot = z1
z1dot = (m2*g*np.sin(theta2)*c - m2*s*(L1*z1**2*c + L2*z2**2) -
(m1+m2)*g*np.sin(theta1)) / L1 / (m1 + m2*s**2)
theta2dot = z2
z2dot = ((m1+m2)*(L1*z1**2*s - g*np.sin(theta2) + g*np.sin(theta1)*c) +
m2*L2*z2**2*s*c) / L2 / (m1 + m2*s**2)
return theta1dot, z1dot, theta2dot, z2dot
def solve(L1, L2, m1, m2, tmax, dt, y0):
t = np.arange(0, tmax+dt, dt)
# Do the numerical integration of the equations of motion
y = odeint(deriv, y0, t, args=(L1, L2, m1, m2))
theta1, theta2 = y[:,0], y[:,2]
# Convert to Cartesian coordinates of the two bob positions.
x1 = L1 * np.sin(theta1)
y1 = -L1 * np.cos(theta1)
x2 = x1 + L2 * np.sin(theta2)
y2 = y1 - L2 * np.cos(theta2)
return theta1, theta2, x1, y1, x2, y2
@app.task
def simulate_pendulum_instance(L1, L2, m1, m2, tmax, dt, theta1_init, theta2_init):
y0 = np.array([
theta1_init,
0.0,
theta2_init,
0.0
])
#vracam i thete da znam za cega su ovi rezultati (da ih posle mogu
#pisati u fajl)
return theta1_init, theta2_init, solve(L1, L2, m1, m2, tmax, dt, y0)
```
|
{
"source": "jeremicaleksandar/pdaj",
"score": 3
}
|
#### File: jeremicaleksandar/pdaj/shell.py
```python
import argparse
import logging
import os
from defaults import __version__, DEFAULT_THETA_RES, DEFAULT_TMAX, DEFAULT_DT, DEFAULT_FILENAME
from double import do_the_thing
def main():
# Setup command line option parser
parser = argparse.ArgumentParser(
description='Double pendulum simulation.'
)
parser.add_argument(
'-r',
'--theta_resolution',
type = int,
default=DEFAULT_THETA_RES
)
parser.add_argument(
'--tmax',
type = int,
default=DEFAULT_TMAX,
help='end time'
)
parser.add_argument(
'--dt',
type = int,
default=DEFAULT_DT,
help='delta time'
)
parser.add_argument(
'-o',
'--output_filename',
default=DEFAULT_FILENAME,
help='output csv file filename'
)
parser.add_argument(
'-g',
'--graph',
action='store_true',
help='Draw the graph too.'
)
parser.add_argument(
'-p',
'--parallel',
action='store_true',
help='Use multiprocessing to parallelize the code.'
)
'''
parser.add_argument(
'-q',
'--quiet',
action='store_const',
const=logging.WARN,
dest='verbosity',
help='Be quiet, show only warnings and errors'
)
parser.add_argument(
'-v',
'--verbose',
action='store_const',
const=logging.DEBUG,
dest='verbosity',
help='Be very verbose, show debug information'
)
'''
parser.add_argument(
'--version',
action='version',
version="%(prog)s " + __version__
)
args = parser.parse_args()
# Configure logging
#log_level = args.verbosity or logging.INFO
#logging.basicConfig(level=log_level, format="%(asctime)s [%(levelname)s] %(message)s")
#if not args.results_file:
# args.results_file = os.path.splitext(args.data_file)[0] + '.hdf5'
do_the_thing(
theta_resolution=args.theta_resolution,
tmax=args.tmax,
dt=args.dt,
filename=args.output_filename,
graph=args.graph,
parallel=args.parallel
)
if __name__ == '__main__':
main()
```
|
{
"source": "jere-mie/anonymousblog",
"score": 3
}
|
#### File: anonymousblog/blog/routes.py
```python
from flask import render_template, url_for, flash, redirect, request
from blog import app, db
from blog.forms import Registration, Login, PostForm
from blog.models import User, Post, Reply
from flask_login import login_user, current_user, logout_user, login_required
@app.route('/', methods=['GET'])
@app.route('/home', methods=['GET'])
def home():
posts = Post.query.all()
posts.reverse()
return render_template('home.html', posts=posts)
@app.route('/about', methods=['GET'])
def about():
return render_template('about.html')
@app.route('/register', methods=['GET', 'POST'])
def register():
if current_user.is_authenticated:
return redirect(url_for('home'))
form = Registration()
if form.validate_on_submit():
user = User(username=form.username.data, password=<PASSWORD>.data)
db.session.add(user)
db.session.commit()
flash(f'Created account for {form.username.data}. You may now log in.', 'success')
return redirect(url_for('login'))
return render_template("register.html", form=form)
@app.route('/login', methods=['GET', 'POST'])
def login():
if current_user.is_authenticated:
return redirect(url_for('home'))
form = Login()
if form.validate_on_submit():
user = User.query.filter_by(username=form.username.data).first()
if user and form.password.data==user.password:
login_user(user, remember=form.rememberMe.data)
next_page = request.args.get('next')
return redirect(next_page) if next_page else redirect(url_for('home'))
else:
flash('Error Logging In', 'danger')
return render_template("login.html", form=form)
@app.route('/logout')
def logout():
logout_user()
return redirect(url_for('home'))
@app.route('/newpost', methods=['GET', 'POST'])
@login_required
def newPost():
form = PostForm()
if form.validate_on_submit():
post = Post(title=form.title.data, content=form.content.data, author=current_user)
db.session.add(post)
db.session.commit()
flash('You have successfully made a post!', 'success')
return redirect(url_for("home"))
return render_template("newPost.html", form=form, legend='Create a Post')
@app.route('/post/<post_id>/delete', methods=['GET','POST'])
@login_required
def deletePost(post_id):
post = Post.query.get_or_404(post_id)
if post.author!=current_user:
flash('You cannot delete someone else\'s post!', 'danger')
return redirect(url_for('home'))
for reply in post.replies:
db.session.delete(reply)
db.session.delete(post)
db.session.commit()
flash('Successfully deleted post!', 'success')
return redirect(url_for('home'))
@app.route('/post/<post_id>/update', methods=['GET', 'POST'])
@login_required
def updatePost(post_id):
post = Post.query.get_or_404(post_id)
if post.author!=current_user:
flash('You cannot update someone else\'s post!', 'danger')
return redirect(url_for('home'))
form = PostForm()
if form.validate_on_submit():
post.title = form.title.data
post.content = form.content.data
db.session.commit()
flash('You have successfully updated the post!', 'success')
return redirect(url_for("home"))
elif request.method == 'GET':
form.title.data = post.title
form.content.data = post.content
return render_template("newPost.html", form=form, legend='Update Post')
@app.route('/post/<post_id>', methods=['GET', 'POST'])
def seePost(post_id):
post = Post.query.get_or_404(post_id)
return render_template('post.html', post=post)
@app.route('/post/<post_id>/reply', methods=['GET', 'POST'])
def reply(post_id):
post = Post.query.get_or_404(post_id)
form = PostForm()
if form.validate_on_submit():
reply = Reply(title=form.title.data, content=form.content.data, writer=current_user, original=post)
db.session.add(reply)
db.session.commit()
flash('You have successfully added a post!', 'success')
return render_template('post.html', post=post)
return render_template("newPost.html", form=form, legend='Reply to a Post')
```
|
{
"source": "JeremieBeaudoin/LavalUniversityStockExchange",
"score": 3
}
|
#### File: populateTables/pythonModules/watchListCreation.py
```python
import random
from dotenv import load_dotenv
from src.app.database.utility import SqlUtility
load_dotenv()
def getUserTuples():
userRequest = """SELECT U.uuid FROM users U;"""
userTuplesFound = SqlUtility.executeSelectRequest(userRequest)
if len(userTuplesFound) == 0:
raise ValueError('No tuples founds')
return userTuplesFound
def getStockTuples():
requestString = """SELECT S.ticker, S.suffix, S.regular_market_price FROM stocks S;"""
stockTuplesFound = SqlUtility.executeSelectRequest(requestString)
if len(stockTuplesFound) == 0:
raise ValueError('No tuples founds')
return stockTuplesFound
def addStocksInUsersWatchlist(users, stocks):
stockTupleLength = len(stocks)
insertionCounter = 0
for userTuple in users:
randomNumberOfStocks = random.randrange(0, 10)
for i in range(randomNumberOfStocks):
try:
randomStockIndex = random.randrange(0, stockTupleLength - 1)
pickedStock = stocks[randomStockIndex]
watchQuery = f"""INSERT INTO watch VALUES ('{userTuple[0]}', '{pickedStock[0]}', '{pickedStock[1]}', {pickedStock[2]})"""
SqlUtility.executeInsertRequest(watchQuery)
insertionCounter += 1
except Exception:
print('error!')
return insertionCounter
# userTuples = getUserTuples()
# stockTuples = getStockTuples()
# tuplesInserted = addStocksInUsersWatchlist(userTuples, stockTuples)
# print(tuplesInserted)
```
#### File: database/utility/SqlUtility.py
```python
import os
import pymysql.cursors
from src.app.database.utility.QueryExceptions import InvalidRequest
def getPyMySqlConnection(useProductionDatabase):
return pymysql.connect(host = os.getenv('DB_HOST_NAME'),
user = os.getenv('MY_SQL_USER'),
password = os.getenv('<PASSWORD>'),
db = os.getenv('PRODUCTION_DB_NAME'))
def executeSelectRequest(sqlSelectRequest, useProductionDatabase = True):
dbConnection = getPyMySqlConnection(useProductionDatabase)
tuples = ()
try:
with dbConnection.cursor() as cur:
cur.execute(sqlSelectRequest)
tuples = cur.fetchall()
except pymysql.Error as e:
print(e)
finally:
dbConnection.close()
return tuples
def executeInsertRequest(sqlInsertRequest, useProductionDatabase = True):
dbConnection = getPyMySqlConnection(useProductionDatabase)
try:
affectedRowCount = 0
with dbConnection.cursor() as cur:
cur.execute(sqlInsertRequest)
affectedRowCount = cur.rowcount
if affectedRowCount > 0:
dbConnection.commit()
finally:
dbConnection.close()
return affectedRowCount
def executeUpdateRequest(sqlUpdateRequest, useProductionDatabase = True):
dbConnection = getPyMySqlConnection(useProductionDatabase)
updatedRowCount = 0
try:
with dbConnection.cursor() as cur:
cur.execute(sqlUpdateRequest)
updatedRowCount = cur.rowcount
if updatedRowCount > 0:
dbConnection.commit()
except pymysql.Error as e:
print(e)
finally:
dbConnection.close()
return updatedRowCount
def executeDeleteRequest(sqlDeleteRequest, useProductionDatabase = True):
dbConnection = getPyMySqlConnection(useProductionDatabase)
deletedRowCount = 0
try:
with dbConnection.cursor() as cur:
cur.execute(sqlDeleteRequest)
deletedRowCount = cur.rowcount
if deletedRowCount > 0:
dbConnection.commit()
finally:
dbConnection.close()
return deletedRowCount
def executeProcedureRequest(sqlProcedureRequest, useProductionDatabase = True):
dbConnection = getPyMySqlConnection(useProductionDatabase)
try:
with dbConnection.cursor() as cur:
cur.execute(sqlProcedureRequest)
dbConnection.commit()
except Exception:
raise InvalidRequest('Could not complete procedure execution.')
finally:
dbConnection.close()
```
#### File: collections/orders/ordersDatabaseAccess.py
```python
from uuid import uuid4
import src.app.domain.collections.orders.ordersDatabaseUtility as orderUtility
import src.app.domain.collections.orders.ordersQueries as ordersQueries
from src.app.database.utility import SqlUtility
from src.app.database.utility.QueryExceptions import InvalidRequest
def getAllOrdersForUser(userId):
request = ordersQueries.getAllOrdersForUserId(userId)
tuples = SqlUtility.executeSelectRequest(request)
responseObject = orderUtility.parseTuplesAsOrderList(tuples)
return responseObject
def getAllOrdersForPortfolio(portfolioId):
request = ordersQueries.getAllOrdersForPortfolioId(portfolioId)
tuples = SqlUtility.executeSelectRequest(request)
responseObject = orderUtility.parseTuplesAsOrderList(tuples)
return responseObject
def createOrder(userId, portfolioId, orderObject):
newOrderUUID = uuid4()
orderUtility.validateOrderObject(orderObject)
if orderObject['nb_shares'] == 0:
raise InvalidRequest('An order cannot be made on 0 shares')
if orderObject['buy'] == 0:
orderObject['nb_shares'] = -1 * abs(orderObject['nb_shares'])
if 'limit_price' not in orderObject.keys():
orderObject['limit_price'] = "null"
portfolioInsertRequest = ordersQueries.createOrder(portfolioId, newOrderUUID, orderObject)
SqlUtility.executeProcedureRequest(portfolioInsertRequest)
orderUtility.sendOrderConfirmation(userId, orderObject)
return {'id': str(newOrderUUID)}
def cancelOrder(orderId):
orderCancelRequest = ordersQueries.cancelOrder(orderId)
SqlUtility.executeProcedureRequest(orderCancelRequest)
return
```
#### File: collections/orders/ordersQueries.py
```python
def getAllOrdersForUserId(userId):
return f"""SELECT CO.*
FROM portfolios P
INNER JOIN closed_orders CO ON P.uuid = CO.portfolio_uuid
WHERE P.user_uuid = '{userId}'
UNION
SELECT PO.*
FROM portfolios P
INNER JOIN pending_orders PO ON P.uuid = PO.portfolio_uuid
WHERE P.user_uuid = '{userId}'
ORDER BY placed_time DESC;"""
def getAllOrdersForPortfolioId(portfolioId):
return f"""SELECT CO.*
FROM closed_orders CO
WHERE CO.portfolio_uuid = '{portfolioId}'
UNION
SELECT PO.*
FROM pending_orders PO
WHERE PO.portfolio_uuid = '{portfolioId}'
ORDER BY placed_time DESC;"""
def createOrder(portfolioId, orderId, orderObject):
return f"""CALL processNewOrder(
'{orderId}', '{portfolioId}', '{orderObject['stock_ticker']}',
'{orderObject['stock_suffix']}', {orderObject['nb_shares']},
{orderObject['limit_price']}, '{orderObject['expiration_time']}',
'{orderObject['type']}');"""
def cancelOrder(orderId):
return f"""CALL cancelOrder('{orderId}');"""
```
#### File: collections/portfolios/portfoliosDatabaseUtility.py
```python
from src.app.database.utility.QueryExceptions import InvalidRequest
import src.app.domain.collections.portfolios.portfoliosQueries as portfolioQueries
from src.app.database.utility import SqlUtility
def parseTuplesAsPortfolioList(tuples):
tupleList = []
tupleCounter = 0
for row in tuples:
portfolio = parseTuplesAsPortfolioObject(row)
tupleList += [portfolio]
tupleCounter += 1
return {'portfolios': tupleList, 'total': tupleCounter}
def parseTuplesAsPortfolioObject(queryTuple):
if len(queryTuple) != 10:
raise ValueError("Invalid amount of data in portfolio tuple")
portfolioObject = {
'id': queryTuple[0],
'user_id': queryTuple[1],
'name': queryTuple[2],
'type': queryTuple[3],
'currency': queryTuple[4],
'market_value': float(queryTuple[5] or 0),
'invested_amount': float(queryTuple[6] or 0),
'cash_amount': float(queryTuple[7] or 0),
'frozen_amount': float(queryTuple[8] or 0),
'all_time_gains': float(queryTuple[9] or 0)
}
return portfolioObject
def validatePortfolioObject(portfolio):
return None
def validateUserHasPortfolio(userId, portfolioId):
request = portfolioQueries.getPortfolioByIdForUser(userId, portfolioId)
queryResponse = SqlUtility.executeSelectRequest(request)
if queryResponse == ():
raise InvalidRequest('This user, if it exists, has no portfolio with this ID')
```
#### File: collections/transactions/transactionsDatabaseAccess.py
```python
from src.app.domain.collections.transactions.transactionsDatabaseUtility import *
import src.app.domain.collections.transactions.transactionsQueries as transactionsQueries
import src.app.domain.collections.portfolios.portfoliosDatabaseUtility as portfolioDatabaseUtility
from src.app.database.utility import SqlUtility
from src.app.database.utility.QueryExceptions import InvalidRequest
def getAllTransactions(userId):
request = transactionsQueries.getAllTransactions(userId)
print(request)
tuples = SqlUtility.executeSelectRequest(request)
responseObject = parseTuplesAsTransactionList(tuples)
return responseObject
def getTransactionsByPortfolioId(userId, portfolioId):
portfolioDatabaseUtility.validateUserHasPortfolio(userId, portfolioId)
transaction = transactionsQueries.getTransactionsByPortfolioId(portfolioId)
tuples = SqlUtility.executeSelectRequest(transaction)
responseObject = parseTuplesAsTransactionList(tuples)
return responseObject
def createTransactions(userId, portfolioId, transaction):
validateTransaction(transaction)
portfolioDatabaseUtility.validateUserHasPortfolio(userId, portfolioId)
insertRequest = transactionsQueries.insertTransaction(portfolioId, transaction)
insertedRowCount = SqlUtility.executeInsertRequest(insertRequest)
if insertedRowCount != 1:
raise InvalidRequest('Transaction creation failed (server error or duplicate error occurred)')
```
#### File: collections/transactions/transactionsQueries.py
```python
def getAllTransactions(userId):
return f"""SELECT * FROM transactions T, portfolios P
WHERE T.portfolio_uuid = P.uuid
AND P.user_uuid = '{userId}';"""
def getTransactionsByPortfolioId(portfolioId):
return f"""SELECT * FROM transactions T
WHERE '{portfolioId}' = T.portfolio_uuid;"""
def insertTransaction(portfolioId, transaction):
if transaction['type'] == 'withdrawal':
transaction['value'] = -1 * abs(transaction['value'])
return f"""CALL processNewTransaction('{portfolioId}', {transaction['value']}, '{transaction['bank_account']}',
'{transaction['description']}');"""
def countTransactions():
return f"SELECT COUNT(*) FROM transactions;"
```
#### File: collections/watchlist/watchlistDatabaseAccess.py
```python
from src.app.domain.collections.watchlist.watchlistDatabaseUtility import *
import src.app.domain.collections.watchlist.watchlistQueries as watchlistQueries
from src.app.database.utility import SqlUtility
from src.app.database.utility.StockUtility import splitTickerAggregate
from src.app.database.utility.QueryExceptions import InvalidRequest
def getAllStocksInWatchlistForUser(userId):
request = watchlistQueries.getAllStocksInUserWatchlist(userId)
tuples = SqlUtility.executeSelectRequest(request)
responseObject = parseTuplesAsWatchlist(tuples)
return responseObject
def postStockToWatchListForUser(userId, watchlistObject):
validateWatchlistObject(watchlistObject)
stockTickerAggregate = watchlistObject['ticker']
stockTicker, stockSuffix = splitTickerAggregate(stockTickerAggregate)
watchPrice = watchlistObject['watch_price']
watchlistRequest = watchlistQueries.addStockToWatchlistForUser(userId, stockTicker, stockSuffix, watchPrice)
insertedRowCount = SqlUtility.executeInsertRequest(watchlistRequest)
if insertedRowCount != 1:
raise InvalidRequest('Request post body is invalid or stock is already in watchlist')
return
def removeStockFromWatchlistForUser(userId, stockTickerAggregate):
stockTicker, stockSuffix = splitTickerAggregate(stockTickerAggregate)
request = watchlistQueries.deleteStockFromWatchlistForUser(userId, stockTicker, stockSuffix)
deletedRowCount = SqlUtility.executeDeleteRequest(request)
if deletedRowCount != 1:
raise InvalidRequest('This stock ticker and user id combination was not found')
return deletedRowCount
```
#### File: domain/communication/emailResource.py
```python
import os
import uuid
import yagmail
from dotenv import load_dotenv
from src.app.domain.communication.emailTemplates import *
load_dotenv()
baseUrl = os.getenv('API_BASE_URL')
def sendEmailConfirmationEmail(userId, targetEmail, userFirstName):
subject = 'LUSE - Email confirmation'
confirmationLink = f'{baseUrl}/accounts/confirm/{userId}'
body = emailConfirmationTemplate(userFirstName, confirmationLink)
sendEmail(targetEmail, subject, body)
def sendOrderConfirmationEmail(targetEmail, nbShares, ticker):
subject = 'LUSE - Order confirmation'
numberOfShares = int(nbShares)
if numberOfShares > 0:
body = orderBuyConfirmationTemplate(ticker, abs(numberOfShares))
else:
body = orderSellConfirmationTemplate(ticker, abs(numberOfShares))
sendEmail(targetEmail, subject, body)
def sendPasswordResetEmail(targetEmail):
subject = 'Reset your LUSE password'
resetLink = f'{baseUrl}/accounts/reset/{targetEmail}/{uuid.uuid4()}'
body = passwordResetConfirmationTemplate(resetLink)
sendEmail(targetEmail, subject, body)
def sendEmail(targetEmail, emailSubject, emailContent):
try:
yag = yagmail.SMTP('<EMAIL>', '7$9xw#78iK@45*s&2@a')
yag.send(to=targetEmail, subject=emailSubject, contents=emailContent)
except:
print('email not sent')
```
#### File: app/domain/stockRefreshModule.py
```python
import time
import yfinance as yf
import threading
from dotenv import load_dotenv
import src.app.domain.collections.stocks.stocksQueries as stockQueries
from random import randrange
from src.app.domain.collections.stocks.stocksDatabaseAccess import refreshRegularPriceForStock
from src.app.domain.collections.stocks.stocksDatabaseUtility import *
from src.app.database.utility import SqlUtility
from src.app.database.utility.StockUtility import *
def refreshAllStockAnalytics():
refreshThread = threading.Thread(target = startRefreshLoop)
refreshThread.start()
def startRefreshLoop():
while True:
print('Refreshing stock data from YF API')
load_dotenv()
selectAllTickersQueryString = stockQueries.getAllStockTickersAndSuffixes()
queryResponseTuples = SqlUtility.executeSelectRequest(selectAllTickersQueryString)
stockTickersString = ''
stockTickersList = []
for stockTuple in queryResponseTuples:
stockTicker, stockSuffix = stockTuple
stockTickerAggregate = concatenateTickerAndSuffix(stockTicker, stockSuffix)
stockTickersString += ' ' + stockTickerAggregate
stockTickersList += [stockTickerAggregate]
try:
yfStockResponse = yf.Tickers(stockTickersString)
except Exception:
print('YFinance API rejected Tickers request')
return
for tickerObject in yfStockResponse.tickers:
refreshStockByTickerAndSuffix(tickerObject)
for tickerObject in stockTickersList:
refreshPriceForStock(tickerObject)
print('Refresh successful')
time.sleep(180)
def refreshStockByTickerAndSuffix(yfStockObject):
try:
time.sleep(randrange(1, 2))
print('Updating for ' + str(yfStockObject))
stock = correctStockMissingOrInvalidFields(yfStockObject.info)
except Exception:
print('YFinance API rejected .info request')
return
stockTicker, stockSuffix = splitTickerAggregate(stock['symbol'])
updateQuery = stockQueries.getRefreshStockQueryForTickerAndSuffix(stockTicker, stockSuffix, stock)
SqlUtility.executeUpdateRequest(updateQuery)
def refreshPriceForStock(stockTicker):
try:
time.sleep(randrange(1, 2))
print('Adjusting price for ' + stockTicker)
refreshRegularPriceForStock(stockTicker)
except Exception:
print('YFinance API rejected download request')
```
|
{
"source": "jeremie-borel/pyfilemaker2",
"score": 3
}
|
#### File: pyfilemaker2/pyfilemaker2/caster.py
```python
import datetime
import numbers
# from builtins import str
__all__ = ['default_cast_map']
FM_NUMBER = 'number'
FM_TEXT = 'text'
FM_DATE = 'date'
FM_TIME = 'time'
FM_TIMESTAMP = 'timestamp'
class TypeCast:
"""Type caster, get's initiated with the corresponding FmFieldData"""
def __init__(self, fm_field, fm_meta):
pass
def __call__(self, value):
return value
class NumberCast(TypeCast):
def __call__(self, value):
try:
return float(value)
except Exception:
# return NaN
return float('nan')
class CommaDecimalNumberCast(TypeCast):
def __call__(self, value):
try:
return float(value.replace(',', '.'))
except Exception:
# return NaN
return float('nan')
class TextCast(TypeCast):
def __call__(self, value):
if value:
return value
return ''
DummyCast = TextCast
class DateCast(TypeCast):
def __init__(self, fm_field, fm_meta):
self.pat = fm_meta.date_pattern
def __call__(self, value):
try:
d = datetime.datetime.strptime(
value,
self.pat,
)
return d.date()
except (ValueError, TypeError):
return None
class TimeCast(TypeCast):
def __init__(self, fm_field, fm_meta):
self.pat = fm_meta.time_pattern
def __call__(self, value):
try:
return datetime.datetime.strptime(
value,
self.pat,
).time()
except (ValueError, TypeError):
return None
class TimestampCast(TypeCast):
def __init__(self, fm_field, fm_meta):
self.pat = fm_meta.timestamp_pattern
self.tz = fm_meta.server_timezone
def __call__(self, value):
try:
d = datetime.datetime.strptime(
value,
self.pat,
)
if self.tz:
d = self.tz.localize(d)
d = self.tz.normalize(d)
return d
except (ValueError, TypeError):
return None
class BackCast:
"""Cast from python to xml in do_edit or do_new or find arguments"""
FM_DEFAULT_DATE = "%m/%d/%Y"
FM_DEFAULT_TIME = "%H:%M:%S"
FM_DEFAULT_TIMESTAMP = "%m/%d/%Y %H:%M:%S"
def __init__(self, fm_server=None):
"""
The :fm_server: object is passed at the initialisation of this class.
It can be used to cast some field in a different way
"""
if fm_server:
self.tz = fm_server.options['server_timezone']
def __call__(self, field, value):
if isinstance(value, datetime.datetime):
# if server timezone is set and the datetime is aware:
if (
self.tz and
value.tzinfo is not None and
value.tzinfo.utcoffset(value) is not None
):
if self.tz != value.tzinfo:
value = value.astimezone(self.tz)
return value.strftime(self.__class__.FM_DEFAULT_TIMESTAMP)
elif isinstance(value, datetime.date):
return value.strftime(self.__class__.FM_DEFAULT_DATE)
elif isinstance(value, datetime.time):
return value.strftime(self.__class__.FM_DEFAULT_TIME)
elif isinstance(value, bytes):
return value.decode('utf8')
elif isinstance(value, numbers.Number):
return value
return str(value)
default_cast_map = {
FM_NUMBER: NumberCast,
FM_TEXT: TextCast,
FM_DATE: DateCast,
FM_TIME: TimeCast,
FM_TIMESTAMP: TimestampCast,
}
```
#### File: pyfilemaker2/tests/test_caster.py
```python
import unittest
import datetime
import pytz
from pyfilemaker2.caster import BackCast
class TestBackCast(unittest.TestCase):
def test_backcast_timezone(self):
tz = pytz.timezone('Europe/Zurich')
d = datetime.datetime(
year=2018,
month=9,
day=1,
hour=11,
minute=54,
second=7
)
d = tz.normalize(tz.localize(d))
tz2 = pytz.UTC
bc = BackCast()
bc.tz = tz2
self.assertEqual(bc(field=None, value=d), '09/01/2018 09:54:07')
def test_backcast_timezone2(self):
tz2 = pytz.UTC
d = datetime.datetime(
year=2018,
month=9,
day=1,
hour=11,
minute=54,
second=7
)
d = tz2.normalize(tz2.localize(d))
tz = pytz.timezone('Europe/Zurich')
bc = BackCast()
bc.tz = tz
self.assertEqual(bc(field=None, value=d), '09/01/2018 13:54:07')
def test_backcast_timezone3(self):
d = datetime.datetime(
year=2018,
month=9,
day=1,
hour=11,
minute=54,
second=7
)
tz = pytz.timezone('Europe/Zurich')
bc = BackCast()
bc.tz = tz
# naive must stay naive :/
self.assertEqual(bc(field=None, value=d), '09/01/2018 11:54:07')
def test_backcast_types(self):
tests = [
(9, 9),
('abc', 'abc'),
(u'abc', 'abc'),
(b'abc', 'abc'),
(u'en été'.encode('utf8'), 'en été'),
]
bc = BackCast()
for value, result in tests:
self.assertEqual(bc(field=None, value=value), result)
if __name__ == '__main__':
unittest.main()
```
|
{
"source": "JeremieBou/AlexaSkills",
"score": 2
}
|
#### File: IndiaFacts/lambda/lambda_function.py
```python
import random
import logging
from ask_sdk_core.skill_builder import SkillBuilder
from ask_sdk_core.dispatch_components import (
AbstractRequestHandler, AbstractExceptionHandler,
AbstractRequestInterceptor, AbstractResponseInterceptor)
from ask_sdk_core.utils import is_request_type, is_intent_name
from ask_sdk_core.handler_input import HandlerInput
from ask_sdk_model.ui import SimpleCard
from ask_sdk_model import Response
SKILL_NAME = "India Facts"
GET_FACT_MESSAGE = "Here's your fact: "
HELP_MESSAGE = "You can say tell me an Indian fact, or, you can say exit... What can I help you with?"
HELP_REPROMPT = "What can I help you with?"
STOP_MESSAGE = "Alavida!"
FALLBACK_MESSAGE = "The India Facts skill can't help you with that. It can help you discover facts about India if you say tell me a India fact. What can I help you with?"
FALLBACK_REPROMPT = 'What can I help you with?'
EXCEPTION_MESSAGE = "Sorry. I cannot help you with that."
data = [
'The best Indian is <phoneme alphabet=\'ipa\' ph=\'pri-ma\'>Preema</phoneme>.'
'In 2001, a British Governement minister called Tikka Masala a true British national dish.',
'Butter chicken comes from Dehlhi, India.',
'India has a population size of 1,324,171,354 people!',
'India has 780 languages, which is the second highest number of languages!'
]
sb = SkillBuilder()
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
# Built-in Intent Handlers
class GetNewFactHandler(AbstractRequestHandler):
"""Handler for Skill Launch and GetNewFact Intent."""
def can_handle(self, handler_input):
# type: (HandlerInput) -> bool
return (is_request_type("LaunchRequest")(handler_input) or
is_intent_name("GetNewSpaceFactIntent")(handler_input))
def handle(self, handler_input):
# type: (HandlerInput) -> Response
logger.info("In GetNewFactHandler")
random_fact = random.choice(data)
speech = GET_FACT_MESSAGE + random_fact
handler_input.response_builder.speak(speech).set_card(
SimpleCard(SKILL_NAME, random_fact))
return handler_input.response_builder.response
class HelpIntentHandler(AbstractRequestHandler):
"""Handler for Help Intent."""
def can_handle(self, handler_input):
# type: (HandlerInput) -> bool
return is_intent_name("AMAZON.HelpIntent")(handler_input)
def handle(self, handler_input):
# type: (HandlerInput) -> Response
logger.info("In HelpIntentHandler")
handler_input.response_builder.speak(HELP_MESSAGE).ask(
HELP_REPROMPT).set_card(SimpleCard(
SKILL_NAME, HELP_MESSAGE))
return handler_input.response_builder.response
class CancelOrStopIntentHandler(AbstractRequestHandler):
"""Single handler for Cancel and Stop Intent."""
def can_handle(self, handler_input):
# type: (HandlerInput) -> bool
return (is_intent_name("AMAZON.CancelIntent")(handler_input) or
is_intent_name("AMAZON.StopIntent")(handler_input))
class FallbackIntentHandler(AbstractRequestHandler):
"""Handler for Fallback Intent.
AMAZON.FallbackIntent is only available in en-US locale.
This handler will not be triggered except in that locale,
so it is safe to deploy on any locale.
"""
def can_handle(self, handler_input):
# type: (HandlerInput) -> bool
return is_intent_name("AMAZON.FallbackIntent")(handler_input)
def handle(self, handler_input):
# type: (HandlerInput) -> Response
logger.info("In FallbackIntentHandler")
handler_input.response_builder.speak(FALLBACK_MESSAGE).ask(
FALLBACK_REPROMPT)
return handler_input.response_builder.response
class SessionEndedRequestHandler(AbstractRequestHandler):
"""Handler for Session End."""
def can_handle(self, handler_input):
# type: (HandlerInput) -> bool
return is_request_type("SessionEndedRequest")(handler_input)
def handle(self, handler_input):
# type: (HandlerInput) -> Response
logger.info("In SessionEndedRequestHandler")
logger.info("Session ended reason: {}".format(
handler_input.request_envelope.request.reason))
return handler_input.response_builder.response
# Exception Handler
class CatchAllExceptionHandler(AbstractExceptionHandler):
"""Catch all exception handler, log exception and
respond with custom message.
"""
def can_handle(self, handler_input, exception):
# type: (HandlerInput, Exception) -> bool
return True
def handle(self, handler_input, exception):
# type: (HandlerInput, Exception) -> Response
logger.info("In CatchAllExceptionHandler")
logger.error(exception, exc_info=True)
handler_input.response_builder.speak(EXCEPTION_MESSAGE).ask(
HELP_REPROMPT)
return handler_input.response_builder.response
# Request and Response loggers
class RequestLogger(AbstractRequestInterceptor):
"""Log the alexa requests."""
def process(self, handler_input):
# type: (HandlerInput) -> None
logger.debug("Alexa Request: {}".format(
handler_input.request_envelope.request))
class ResponseLogger(AbstractResponseInterceptor):
"""Log the alexa responses."""
def process(self, handler_input, response):
# type: (HandlerInput, Response) -> None
logger.debug("Alexa Response: {}".format(response))
# Register intent handlers
sb.add_request_handler(GetNewFactHandler())
sb.add_request_handler(HelpIntentHandler())
sb.add_request_handler(CancelOrStopIntentHandler())
sb.add_request_handler(FallbackIntentHandler())
sb.add_request_handler(SessionEndedRequestHandler())
# Register exception handlers
sb.add_exception_handler(CatchAllExceptionHandler())
# TODO: Uncomment the following lines of code for request, response logs.
sb.add_global_request_interceptor(RequestLogger())
sb.add_global_response_interceptor(ResponseLogger())
# Handler name that is used on AWS lambda
lambda_handler = sb.lambda_handler()
```
|
{
"source": "JeremieBou/stix_generator",
"score": 3
}
|
#### File: JeremieBou/stix_generator/make_nodes.py
```python
import sys
import os
from stix_generator.util import Util as u
from stix_generator.stix_generator import Generator
def main():
"""
example script for STIX Generator
makes random stix data using the generator with set peramaters in the script
"""
path = os.path.realpath('static/data') + "/view.json"
if(len(sys.argv) is 2):
total_num = 100
sightings_num = 0
marking_num = 0
granular_marking_num = 0
M_0_num = 2
indicator_num = 50
observed_data_num = 0
report_num = 0
print "M_0 = " + str(M_0_num)
print "Generating " + str(total_num) + " nodes"
print "Generating " + str(sightings_num) + " sightingss"
print "Generating " + str(marking_num) + " markings"
print "Generating " + str(granular_marking_num) + " granular_markings"
print "Generating " + str(indicator_num) + " indicators"
print "Generating " + str(observed_data_num) + " observed_datas"
print "Generating " + str(report_num) + " reports"
sg = Generator(total_num, sightings_num, marking_num, granular_marking_num, M_0_num, indicator_num, observed_data_num, report_num)
stix = sg.generate()
print "Done generating, making output"
u.make_output(stix, str(sys.argv[1]))
print "Complete"
# No Arguments given
else:
print "Please specify the ouput directory."
if __name__ == "__main__":
main()
```
|
{
"source": "JeremieCharest/pyhydroquebec",
"score": 3
}
|
#### File: pyhydroquebec/pyhydroquebec/customer.py
```python
from datetime import datetime, timedelta
import json
from bs4 import BeautifulSoup
import cachetools
from pyhydroquebec.consts import (ANNUAL_DATA_URL, CONTRACT_CURRENT_URL_1,
CONTRACT_CURRENT_URL_2, CONTRACT_URL_3,
DAILY_DATA_URL, HOURLY_DATA_URL_1,
HOURLY_DATA_URL_2, MONTHLY_DATA_URL,
REQUESTS_TTL, DAILY_MAP, MONTHLY_MAP,
ANNUAL_MAP, CURRENT_MAP,
)
class Customer():
"""Represents a HydroQuebec account.
The account_id is called 'noPartenaireDemandeur' in the HydroQuebec API
The customer_id is called 'Customer number' in the HydroQuebec 'My accounts' UI
The contract_id is called 'Contract' in the HydroQuebec 'At a glance' UI
"""
def __init__(self, client, account_id, customer_id, timeout, logger):
"""Constructor."""
self._client = client
self.account_id = account_id
self.customer_id = customer_id
self.contract_id = None
self._timeout = timeout
self._logger = logger.getChild(customer_id)
self._balance = None
self._current_period = {}
self._current_annual_data = {}
self._compare_annual_data = {}
self._current_monthly_data = {}
self._compare_monthly_data = {}
self._current_daily_data = {}
self._compare_daily_data = {}
self._hourly_data = {}
@cachetools.cached(cachetools.TTLCache(maxsize=128, ttl=60*REQUESTS_TTL))
async def fetch_summary(self):
"""Fetch data from overview page.
UI URL: https://session.hydroquebec.com/portail/en/group/clientele/gerer-mon-compte
"""
self._logger.info("Fetching summary page")
await self._client.select_customer(self.account_id, self.customer_id)
res = await self._client.http_request(CONTRACT_URL_3, "get")
content = await res.text()
soup = BeautifulSoup(content, 'html.parser')
raw_balance = soup.find('p', {'class': 'solde'}).text
self._balance = float(raw_balance[:-2].replace(",", ".").
replace("\xa0", ""))
raw_contract_id = soup.find('div', {'class': 'contrat'}).text
self.contract_id = (raw_contract_id
.split("Contrat", 1)[-1]
.replace("\t", "")
.replace("\n", ""))
# Needs to load the consumption profile page to not break
# the next loading of the other pages
await self._client.http_request(CONTRACT_CURRENT_URL_1, "get")
@property
def balance(self):
"""Return the collected balance."""
return self._balance
@cachetools.cached(cachetools.TTLCache(maxsize=128, ttl=60*REQUESTS_TTL))
async def fetch_current_period(self):
"""Fetch data of the current period.
UI URL: https://session.hydroquebec.com/portail/en/group/clientele/portrait-de-consommation
"""
self._logger.info("Fetching current period data")
await self._client.select_customer(self.account_id, self.customer_id)
await self._client.http_request(CONTRACT_CURRENT_URL_1, "get")
headers = {"Content-Type": "application/json"}
res = await self._client.http_request(CONTRACT_CURRENT_URL_2, "get", headers=headers)
text_res = await res.text()
# We can not use res.json() because the response header are not application/json
json_res = json.loads(text_res)['results'][0]
self._current_period = {}
for key, data in CURRENT_MAP.items():
self._current_period[key] = json_res[data['raw_name']]
@property
def current_period(self):
"""Return collected current period data."""
return self._current_period
@cachetools.cached(cachetools.TTLCache(maxsize=128, ttl=60*REQUESTS_TTL))
async def fetch_annual_data(self):
"""Fetch data of the current and last year.
API URL: https://cl-ec-spring.hydroquebec.com/portail/fr/group/clientele/
portrait-de-consommation/resourceObtenirDonneesConsommationAnnuelles
"""
self._logger.info("Fetching annual data")
await self._client.select_customer(self.account_id, self.customer_id)
headers = {"Content-Type": "application/json"}
res = await self._client.http_request(ANNUAL_DATA_URL, "get", headers=headers)
# We can not use res.json() because the response header are not application/json
json_res = json.loads(await res.text())
if not json_res.get('results'):
return
json_res = json_res['results'][0]
for key, raw_key in ANNUAL_MAP:
self._current_annual_data[key] = json_res['courant'][raw_key]
self._compare_annual_data[key] = json_res['compare'][raw_key]
@property
def current_annual_data(self):
"""Return collected current year data."""
return self._current_annual_data
@property
def compare_annual_data(self):
"""Return collected previous year data."""
return self._compare_annual_data
@cachetools.cached(cachetools.TTLCache(maxsize=128, ttl=60*REQUESTS_TTL))
async def fetch_monthly_data(self):
"""Fetch data of the current and last year.
API URL: https://cl-ec-spring.hydroquebec.com/portail/fr/group/clientele/
portrait-de-consommation/resourceObtenirDonneesConsommationMensuelles
"""
self._logger.info("Fetching monthly data")
await self._client.select_customer(self.account_id, self.customer_id)
headers = {"Content-Type": "application/json"}
res = await self._client.http_request(MONTHLY_DATA_URL, "get", headers=headers)
text_res = await res.text()
# We can not use res.json() because the response header are not application/json
json_res = json.loads(text_res)
if not json_res.get('results'):
return
for month_data in json_res.get('results', []):
month = month_data['courant']['dateDebutMois'][:-3]
self._current_monthly_data[month] = {}
if 'compare' in month_data:
self._compare_monthly_data[month] = {}
for key, raw_key in MONTHLY_MAP:
self._current_monthly_data[month][key] = month_data['courant'][raw_key]
if 'compare' in month_data:
self._compare_monthly_data[month][key] = month_data['compare'][raw_key]
@property
def current_monthly_data(self):
"""Return collected monthly data of the current year."""
return self._current_monthly_data
@property
def compare_monthly_data(self):
"""Return collected monthly data of the previous year."""
return self._compare_monthly_data
@cachetools.cached(cachetools.TTLCache(maxsize=128, ttl=60*REQUESTS_TTL))
async def fetch_daily_data(self, start_date=None, end_date=None):
"""Fetch data of the current and last year.
API URL: https://cl-ec-spring.hydroquebec.com/portail/fr/group/clientele/
portrait-de-consommation/resourceObtenirDonneesQuotidiennesConsommation
"""
self._logger.info("Fetching daily data between %s and %s", start_date, end_date)
await self._client.select_customer(self.account_id, self.customer_id)
if start_date is None:
# Get yesterday
yesterday = datetime.now() - timedelta(days=1)
start_date_str = yesterday.strftime("%Y-%m-%d")
elif hasattr(start_date, "strftime"):
start_date_str = start_date.strftime("%Y-%m-%d")
else:
try:
datetime.strptime(start_date, "%Y-%m-%d")
except ValueError:
print("Start date bad format. It must match %Y-%m-%d")
return
start_date_str = start_date
end_date_str = None
if end_date is None:
pass
elif hasattr(end_date, "strftime"):
end_date_str = end_date.strftime("%Y-%m-%d")
else:
try:
datetime.strptime(end_date, "%Y-%m-%d")
except ValueError:
print("Start date bad format. It must match %Y-%m-%d")
return
end_date_str = end_date
headers = {"Content-Type": "application/json"}
params = {"dateDebut": start_date_str}
if end_date_str:
params.update({"dateFin": end_date_str})
res = await self._client.http_request(DAILY_DATA_URL, "get",
params=params, headers=headers)
text_res = await res.text()
# We can not use res.json() because the response header are not application/json
json_res = json.loads(text_res)
if not json_res.get('results'):
return
for day_data in json_res.get('results', []):
day = day_data['courant']['dateJourConso']
self._current_daily_data[day] = {}
if 'compare' in day_data:
self._compare_daily_data[day] = {}
for key, data in DAILY_MAP.items():
self._current_daily_data[day][key] = day_data['courant'][data['raw_name']]
if 'compare' in day_data:
self._compare_daily_data[day][key] = day_data['compare'][data['raw_name']]
@property
def current_daily_data(self):
"""Return collected daily data of the current year."""
return self._current_daily_data
@property
def compare_daily_data(self):
"""Return collected daily data of the previous year."""
return self._compare_daily_data
@cachetools.cached(cachetools.TTLCache(maxsize=128, ttl=60*REQUESTS_TTL))
async def fetch_hourly_data(self, day=None):
"""Fetch data of the current and last year.
API URL: https://cl-ec-spring.hydroquebec.com/portail/fr/group/clientele/
portrait-de-consommation/resourceObtenirDonneesConsommationHoraires
"""
self._logger.info("Fetching hourly data for %s", day)
await self._client.select_customer(self.account_id, self.customer_id)
await self._client.select_customer(self.account_id, self.customer_id)
if day is None:
# Get yesterday
yesterday = datetime.now() - timedelta(days=1)
day_str = yesterday.strftime("%Y-%m-%d")
elif hasattr(day, "strftime"):
day_str = day.strftime("%Y-%m-%d")
else:
try:
datetime.strptime(day, "%Y-%m-%d")
except ValueError:
print("Start date bad format. It must match %Y-%m-%d")
return
day_str = day
params = {"dateDebut": day_str, "dateFin": day_str}
res = await self._client.http_request(HOURLY_DATA_URL_2, "get",
params=params, )
# We can not use res.json() because the response header are not application/json
json_res = json.loads(await res.text())
self._hourly_data[day_str] = {
'day_mean_temp': json_res['results'][0]['tempMoyJour'],
'day_min_temp': json_res['results'][0]['tempMinJour'],
'day_max_temp': json_res['results'][0]['tempMaxJour'],
'hours': {},
}
tmp_hour_dict = dict((h, {}) for h in range(24))
for hour, temp in enumerate(json_res['results'][0]['listeTemperaturesHeure']):
tmp_hour_dict[hour]['average_temperature'] = temp
params = {"date": day_str}
res = await self._client.http_request(HOURLY_DATA_URL_1, "get", params=params)
# We can not use res.json() because the response header are not application/json
json_res = json.loads(await res.text())
for hour, data in enumerate(json_res['results']['listeDonneesConsoEnergieHoraire']):
tmp_hour_dict[hour]['lower_price_consumption'] = data['consoReg']
tmp_hour_dict[hour]['higher_price_consumption'] = data['consoHaut']
tmp_hour_dict[hour]['total_consumption'] = data['consoTotal']
self._hourly_data[day_str]['hours'] = tmp_hour_dict.copy()
@property
def hourly_data(self):
"""Return collected hourly data."""
return self._hourly_data
```
|
{
"source": "jeremiecoullon/SGMCMCJax",
"score": 2
}
|
#### File: SGMCMCJax/sgmcmcjax/diffusions.py
```python
import jax.numpy as jnp
from jax import lax, random
from .diffusion_util import diffusion, diffusion_sghmc, diffusion_palindrome
### diffusions
@diffusion
def sgld(dt):
"https://www.ics.uci.edu/~welling/publications/papers/stoclangevin_v6.pdf"
dt = make_schedule(dt)
def init_fn(x):
return x
def update(i, k, g, x):
return x + dt(i)*g + jnp.sqrt(2*dt(i))*random.normal(k, shape=jnp.shape(x))
def get_params(x):
return x
return init_fn, update, get_params
@diffusion
def psgld(dt, alpha=0.99, eps=1e-5):
"https://arxiv.org/pdf/1512.07666.pdf"
dt = make_schedule(dt)
def init_fn(x):
v = jnp.zeros_like(x)
return x, v
def update(i, k, g, state):
x, v = state
v = alpha*v + (1-alpha)*jnp.square(g)
G = 1./(jnp.sqrt(v)+eps)
return x + dt(i)*0.5*G*g + jnp.sqrt(dt(i)*G)*random.normal(k, shape=jnp.shape(x)), v
def get_params(state):
x, _ = state
return x
return init_fn, update, get_params
@diffusion
def sgldAdam(dt, beta1=0.9, beta2=0.999, eps=1e-8):
"https://arxiv.org/abs/2105.13059"
dt = make_schedule(dt)
def init_fn(x):
m = jnp.zeros_like(x)
v = jnp.zeros_like(x)
return x, m, v
def update(i, k, g, state):
x,m,v = state
m = beta1*m + (1-beta1)*g
v = beta2*v + (1-beta2)*jnp.square(g)
m_hat = m/(1-beta1**(i+1))
v_hat = v/(1-beta2**(i+1))
adapt_dt = dt(i)/(jnp.sqrt(v_hat) + eps)
return x + adapt_dt*0.5*m_hat + jnp.sqrt(adapt_dt)*random.normal(key=k, shape=jnp.shape(x)), m, v
def get_params(state):
x, _, _ = state
return x
return init_fn, update, get_params
@diffusion_sghmc
def sghmc(dt, alpha=0.01, beta=0):
"https://arxiv.org/abs/1402.4102"
dt = make_schedule(dt)
def init_fn(x):
v = jnp.zeros_like(x)
return x, v
def update(i, k, g, state):
x, v = state
x = x + v
v = v + dt(i)*g - alpha*v + jnp.sqrt(2*(alpha - beta)*dt(i))*random.normal(k, shape=jnp.shape(x))
return x, v
def get_params(state):
x, _ = state
return x
def resample_momentum(i, k, x):
v = jnp.sqrt(dt(i))*random.normal(k, shape=jnp.shape(x))
return x, v
return init_fn, update, get_params, resample_momentum
@diffusion_palindrome
def baoab(dt, gamma, tau=1):
dt = make_schedule(dt)
def init_fn(x):
v = jnp.zeros_like(x)
return x, v
def update1(i, k, g, state):
x, v = state
v = v + dt(i)*0.5*g
x = x + v*dt(i)*0.5
c1 = jnp.exp(-gamma*dt(i))
c2 = jnp.sqrt(1 - c1**2)
v = c1*v + tau*c2*random.normal(k, shape=jnp.shape(v))
x = x + v*dt(i)*0.5
return x, v
def update2(i, k, g, state):
x, v = state
v = v + dt(i)*0.5*g
return x, v
def get_params(state):
x, _ = state
return x
return init_fn, (update1, update2), get_params
@diffusion
def sgnht(dt, a=0.01):
"http://people.ee.duke.edu/~lcarin/sgnht-4.pdf: Algorithm 2"
dt = make_schedule(dt)
def init_fn(x):
v = jnp.zeros_like(x)
alpha = a
return x, v, alpha
def initial_momentum(kv):
"sample momentum at the first iteration"
k, v = kv
key, subkey = random.split(k)
v = jnp.sqrt(dt(0))*random.normal(subkey, shape=v.shape)
return key, v
def update(i, k, g, state):
x, v, alpha = state
k,v = lax.cond(i==0,
initial_momentum,
lambda kv: (k,v),
(k,v)
)
v = v - alpha*v + dt(i)*g + jnp.sqrt(2*a*dt(i))*random.normal(k, shape=jnp.shape(x))
x = x + v
alpha = alpha + (jnp.linalg.norm(v)**2)/v.size - dt(i)
return x, v, alpha
def get_params(state):
x, _, _ = state
return x
return init_fn, update, get_params
@diffusion_palindrome
def badodab(dt, a=0.01):
"https://arxiv.org/abs/1505.06889"
dt = make_schedule(dt)
def init_fn(x):
v = jnp.zeros_like(x)
alpha = a
return x, v, alpha
def update(i, k, g, state):
x, v, alpha = state
dt2 = dt(i)/2
mu = 1.
sigma = 1.
v = v + dt2*g
x = x + dt2*v
alpha = alpha + (dt2/mu)*(jnp.linalg.norm(v) - v.size)
c1 = jnp.exp(-alpha*dt(i))
c2 = jnp.where(alpha==0, jnp.sqrt(dt(i)), jnp.sqrt(jnp.abs((1-c1**2)/(2*alpha))))
v = c1*v + c2*sigma*random.normal(k, shape=jnp.shape(v))
alpha = alpha + (dt2/mu)*(jnp.linalg.norm(v) - v.size)
x = x + dt2*v
return x, v, alpha
def update2(i, k, g, state):
x, v, alpha = state
v = v + dt(i)*0.5*g
return x, v, alpha
def get_params(state):
x, _, _ = state
return x
return init_fn, (update, update2), get_params
### step size schedules
def constant(step_size):
def schedule(i):
return step_size
return schedule
def welling_teh_schedule(a,b, gamma=0.55):
"Polynomial schedule from https://www.ics.uci.edu/~welling/publications/papers/stoclangevin_v6.pdf"
def schedule(i):
return a*(b+i)**(-gamma)
return schedule
def cyclical_schedule(alpha_0, M, K):
"https://arxiv.org/abs/1902.03932"
def schedule(i):
mod_term = (i-1) % jnp.ceil(K/M)
return alpha_0*0.5*(jnp.cos( jnp.pi*mod_term /jnp.ceil(K/M) ) + 1)
return schedule
def make_schedule(scalar_or_schedule):
if callable(scalar_or_schedule):
return scalar_or_schedule
elif jnp.ndim(scalar_or_schedule) == 0:
return constant(scalar_or_schedule)
else:
raise TypeError(type(scalar_or_schedule))
```
#### File: SGMCMCJax/sgmcmcjax/ksd.py
```python
from jax import jit, vmap, lax
import jax.numpy as jnp
@jit
def k_0_fun(parm1, parm2, gradlogp1, gradlogp2, c=1., beta=-0.5):
"""
KSD kernel with the 2 norm
"""
diff = parm1-parm2
dim = parm1.shape[0]
base = (c**2 + jnp.dot(diff, diff))
term1 = jnp.dot(gradlogp1,gradlogp2)*base**beta
term2 = -2*beta * jnp.dot(gradlogp1, diff) * base**(beta-1)
term3 = 2*beta * jnp.dot(gradlogp2, diff) * base**(beta-1)
term4 = -2*dim*beta*(base**(beta-1))
term5 = -4*beta* (beta-1)*base**(beta-2)*jnp.sum(jnp.square(diff))
return term1 + term2 + term3 + term4 + term5
batch_k_0_fun_rows = jit(vmap(k_0_fun, in_axes=(None,0,None,0,None,None)))
@jit
def imq_KSD(sgld_samples, sgld_grads):
"""
KSD with imq kernel
"""
c, beta = 1., -0.5
N = sgld_samples.shape[0]
def body_ksd(le_sum, x):
my_sample, my_grad = x
le_sum += jnp.sum(batch_k_0_fun_rows(my_sample, sgld_samples, my_grad, sgld_grads, c, beta))
return le_sum, None
le_sum, _ = lax.scan(body_ksd, 0., (sgld_samples, sgld_grads))
return jnp.sqrt(le_sum)/N
```
#### File: SGMCMCJax/tests/models.py
```python
import jax.numpy as jnp
from jax import random
# Parameter shape: array
def loglikelihood_array(theta, x):
return -0.5*jnp.dot(x-theta, x-theta)
def logprior_array(theta):
return -0.5*jnp.dot(theta, theta)*0.01
# Parameter shape: list of 2 arrays
def loglikelihood_list_array(theta, x):
param1, param2 = theta
return -0.5*jnp.dot(x-param1, x-param1) - 0.1*jnp.dot(x-param2, x-param2)
def logprior_list_array(theta):
param1, param2 = theta
return -0.001*jnp.dot(param1, param1) -0.001*jnp.dot(param2, param2)
# generate dataset
N, D = 1000, 5
key = random.PRNGKey(0)
X_data = random.normal(key, shape=(N, D))
```
|
{
"source": "Jeremie-C/python-bmp180",
"score": 2
}
|
#### File: python-bmp180/bmp180/bmp180.py
```python
import smbus
import time
# BMP180 default
DEVICE = 0x77
# Register
REG_CHIPID = 0xD0
REG_RESET = 0xE0
REG_CTRL_MEAS = 0xF4
# Resolutions
RES_1 = 0x00
RES_2 = 0x01
RES_4 = 0x02
RES_8 = 0x03
# Classe
class bmp180:
def __init__(self, i2cbus=0, device_address=DEVICE, res=RES_1):
self.bus = smbus.SMBus(i2cbus)
self.adr = device_address
# Resolution
if res not in [RES_1, RES_2, RES_4, RES_8]:
raise ValueError('Unexpectedres value {0}.'.format(res))
self.res = res
# Load Calibration
self._load_calibration()
def get_chip_id(self):
chip_id = self.bus.read_byte_data(self.adr, REG_CHIPID)
return hex(chip_id)
def reset(self):
self.bus.write_byte_data(self.adr, REG_RESET, 0xB6)
def is_measuring(self):
return (self.bus.read_byte_data(self.adr, REG_CTRL_MEAS) & 0x05) != 0x00
def get_resolution(self):
return self.res
def set_resolution(self, res):
self.res = res
def get_temperature(self):
UT = self._get_temp_raw()
# Calculate true Temperature
X1 = ((UT - self.AC6) * self.AC5) >> 15
X2 = (self.MC << 11) / (X1 + self.MD)
B5 = X1 + X2
t = ((B5 + 8) >> 4) / 10.0
return t
def get_pressure(self):
UT = self._get_temp_raw()
UP = self._get_press_raw()
# Calculate true Pressure
X1 = ((UT - self.AC6) * self.AC5) >> 15
X2 = (self.MC << 11) / (X1 + self.MD)
B5 = X1 + X2
# Get B3
B6 = B5 - 4000
X1 = (self.B2 * (B6 * B6) >> 12) >> 11
X2 = (self.AC2 * B6) >> 11
X3 = X1 + X2
B3 = (((self.AC1 * 4 + X3) << self.res) + 2) / 4
# Get B4 and B7
X1 = (self.AC3 * B6) >> 13
X2 = (self.B1 * ((B6 * B6) >> 12)) >> 16
X3 = ((X1 + X2) + 2) >> 2
B4 = (self.AC4 * (X3 + 32768)) >> 15
B7 = (UP - B3) * (50000 >> self.res)
if B7 < 0x80000000:
p = (B7 * 2) / B4
else:
p = (B7 / B4) * 2
# Final
X1 = (p >> 8) * (p >> 8)
X1 = (X1 * 3038) >> 16
X2 = (-7357 * p) >> 16
p = p + ((X1 + X2 + 3791) >> 4)
return p
def get_temp_f(self):
temp_c = self.get_temperature()
temp_f = (temp_c * 1.8) + 32
return temp_f
def get_press_mmhg(self):
press_pa = self.get_pressure()
press_mm = press_pa * 0.0075
return press_mm
def get_altitude(self, pa_sealevel=101325.0):
press = float(self.get_pressure())
altitude = 44330.0 * (1.0 - pow(press / pa_sealevel, (1.0/5.255)))
return altitude
def get_altitude_ft(self, pa_sealevel=101325.0):
alti = self.get_altitude(pa_sealevel)
alti_ft = alti / 0.3048
return alti_ft
def get_pasealevel(self, alti=0.0):
press = float(self.get_pressure())
pasea = press / pow(1.0 - alti/44330.0, 5.255)
return pasea
def get_pasealevel_mmhg(self, alti=0.0):
pasea = self.get_pasealevel(alti)
pasea_mm = pasea * 0.0075
return pasea_mm
def _get_temp_raw(self):
self.bus.write_byte_data(self.adr, REG_CTRL_MEAS, 0x2E)
# Wait for ready
time.sleep(0.005)
# Ready to read
data = self.bus.read_i2c_block_data(self.adr, 0xF6, 2)
UT = (data[0] << 8) + data[1]
return UT
def _get_press_raw(self):
self.bus.write_byte_data(self.adr, REG_CTRL_MEAS, 0x34 + (self.res << 6))
# Wait for ready
if self.res == RES_1:
time.sleep(0.005)
elif self.res == RES_4:
time.sleep(0.014)
elif self.res == RES_8:
time.sleep(0.026)
else:
time.sleep(0.008)
# Ready to read
data = self.bus.read_i2c_block_data(self.adr, 0xF6, 3)
UP = ((data[0] << 16) + (data[1] << 8) + data[2]) >> (8 - self.res)
return UP
def _reads16(self, reg):
result = self._readu16(reg)
if result > 32767 :
result -= 65536
return result
def _readu16(self, reg):
MSB = self.bus.read_byte_data(self.adr, reg)
LSB = self.bus.read_byte_data(self.adr, reg+1)
return (MSB << 8) + LSB
# Calibration
def _load_calibration(self):
# read all calibration registers
self.AC1 = self._reads16(0xAA)
self.AC2 = self._reads16(0xAC)
self.AC3 = self._reads16(0xAE)
self.AC4 = self._readu16(0xB0)
self.AC5 = self._readu16(0xB2)
self.AC6 = self._readu16(0xB4)
self.B1 = self._reads16(0xB6)
self.B2 = self._reads16(0xB8)
self.MB = self._reads16(0xBA)
self.MC = self._reads16(0xBC)
self.MD = self._reads16(0xBE)
```
|
{
"source": "jeremiedbb/joblib",
"score": 2
}
|
#### File: joblib/joblib/parallel.py
```python
from __future__ import division
import os
import sys
from math import sqrt
import functools
import time
import threading
import itertools
from numbers import Integral
import warnings
from ._multiprocessing_helpers import mp
from .format_stack import format_outer_frames
from .logger import Logger, short_format_time
from .my_exceptions import TransportableException
from .disk import memstr_to_bytes
from ._parallel_backends import (FallbackToBackend, MultiprocessingBackend,
ThreadingBackend, SequentialBackend,
LokyBackend)
from ._compat import _basestring
from .externals.cloudpickle import dumps, loads
from .externals import loky
# Make sure that those two classes are part of the public joblib.parallel API
# so that 3rd party backend implementers can import them from here.
from ._parallel_backends import AutoBatchingMixin # noqa
from ._parallel_backends import ParallelBackendBase # noqa
try:
import queue
except ImportError: # backward compat for Python 2
import Queue as queue
BACKENDS = {
'multiprocessing': MultiprocessingBackend,
'threading': ThreadingBackend,
'sequential': SequentialBackend,
'loky': LokyBackend,
}
# name of the backend used by default by Parallel outside of any context
# managed by ``parallel_backend``.
DEFAULT_BACKEND = 'loky'
DEFAULT_N_JOBS = 1
DEFAULT_THREAD_BACKEND = 'threading'
# Thread local value that can be overridden by the ``parallel_backend`` context
# manager
_backend = threading.local()
VALID_BACKEND_HINTS = ('processes', 'threads', None)
VALID_BACKEND_CONSTRAINTS = ('sharedmem', None)
def _register_dask():
""" Register Dask Backend if called with parallel_backend("dask") """
try:
from ._dask import DaskDistributedBackend
register_parallel_backend('dask', DaskDistributedBackend)
except ImportError:
msg = ("To use the dask.distributed backend you must install both "
"the `dask` and distributed modules.\n\n"
"See https://dask.pydata.org/en/latest/install.html for more "
"information.")
raise ImportError(msg)
EXTERNAL_BACKENDS = {
'dask': _register_dask,
}
def get_active_backend(prefer=None, require=None, verbose=0):
"""Return the active default backend"""
if prefer not in VALID_BACKEND_HINTS:
raise ValueError("prefer=%r is not a valid backend hint, "
"expected one of %r" % (prefer, VALID_BACKEND_HINTS))
if require not in VALID_BACKEND_CONSTRAINTS:
raise ValueError("require=%r is not a valid backend constraint, "
"expected one of %r"
% (require, VALID_BACKEND_CONSTRAINTS))
if prefer == 'processes' and require == 'sharedmem':
raise ValueError("prefer == 'processes' and require == 'sharedmem'"
" are inconsistent settings")
backend_and_jobs = getattr(_backend, 'backend_and_jobs', None)
if backend_and_jobs is not None:
# Try to use the backend set by the user with the context manager.
backend, n_jobs = backend_and_jobs
nesting_level = backend.nesting_level
supports_sharedmem = getattr(backend, 'supports_sharedmem', False)
if require == 'sharedmem' and not supports_sharedmem:
# This backend does not match the shared memory constraint:
# fallback to the default thead-based backend.
sharedmem_backend = BACKENDS[DEFAULT_THREAD_BACKEND](
nesting_level=nesting_level)
if verbose >= 10:
print("Using %s as joblib.Parallel backend instead of %s "
"as the latter does not provide shared memory semantics."
% (sharedmem_backend.__class__.__name__,
backend.__class__.__name__))
return sharedmem_backend, DEFAULT_N_JOBS
else:
return backend_and_jobs
# We are outside of the scope of any parallel_backend context manager,
# create the default backend instance now.
backend = BACKENDS[DEFAULT_BACKEND](nesting_level=0)
supports_sharedmem = getattr(backend, 'supports_sharedmem', False)
uses_threads = getattr(backend, 'uses_threads', False)
if ((require == 'sharedmem' and not supports_sharedmem) or
(prefer == 'threads' and not uses_threads)):
# Make sure the selected default backend match the soft hints and
# hard constraints:
backend = BACKENDS[DEFAULT_THREAD_BACKEND](nesting_level=0)
return backend, DEFAULT_N_JOBS
class parallel_backend(object):
"""Change the default backend used by Parallel inside a with block.
If ``backend`` is a string it must match a previously registered
implementation using the ``register_parallel_backend`` function.
By default the following backends are available:
- 'loky': single-host, process-based parallelism (used by default),
- 'threading': single-host, thread-based parallelism,
- 'multiprocessing': legacy single-host, process-based parallelism.
'loky' is recommended to run functions that manipulate Python objects.
'threading' is a low-overhead alternative that is most efficient for
functions that release the Global Interpreter Lock: e.g. I/O-bound code or
CPU-bound code in a few calls to native code that explicitly releases the
GIL.
In addition, if the `dask` and `distributed` Python packages are installed,
it is possible to use the 'dask' backend for better scheduling of nested
parallel calls without over-subscription and potentially distribute
parallel calls over a networked cluster of several hosts.
Alternatively the backend can be passed directly as an instance.
By default all available workers will be used (``n_jobs=-1``) unless the
caller passes an explicit value for the ``n_jobs`` parameter.
This is an alternative to passing a ``backend='backend_name'`` argument to
the ``Parallel`` class constructor. It is particularly useful when calling
into library code that uses joblib internally but does not expose the
backend argument in its own API.
>>> from operator import neg
>>> with parallel_backend('threading'):
... print(Parallel()(delayed(neg)(i + 1) for i in range(5)))
...
[-1, -2, -3, -4, -5]
Warning: this function is experimental and subject to change in a future
version of joblib.
Joblib also tries to limit the oversubscription by limiting the number of
threads usable in some third-party library threadpools like OpenBLAS, MKL
or OpenMP. The default limit in each worker is set to
``max(cpu_count() // effective_n_jobs, 1)`` but this limit can be
overwritten with the ``inner_max_num_threads`` argument which will be used
to set this limit in the child processes.
.. versionadded:: 0.10
"""
def __init__(self, backend, n_jobs=-1, inner_max_num_threads=None,
**backend_params):
if isinstance(backend, _basestring):
if backend not in BACKENDS and backend in EXTERNAL_BACKENDS:
register = EXTERNAL_BACKENDS[backend]
register()
backend = BACKENDS[backend](**backend_params)
if inner_max_num_threads is not None:
msg = ("{} does not accept setting the inner_max_num_threads "
"argument.".format(backend.__class__.__name__))
assert backend.supports_inner_max_num_threads, msg
backend.inner_max_num_threads = inner_max_num_threads
# If the nesting_level of the backend is not set previously, use the
# nesting level from the previous active_backend to set it
current_backend_and_jobs = getattr(_backend, 'backend_and_jobs', None)
if backend.nesting_level is None:
if current_backend_and_jobs is None:
nesting_level = 0
else:
nesting_level = current_backend_and_jobs[0].nesting_level
backend.nesting_level = nesting_level
# Save the backends info and set the active backend
self.old_backend_and_jobs = current_backend_and_jobs
self.new_backend_and_jobs = (backend, n_jobs)
_backend.backend_and_jobs = (backend, n_jobs)
def __enter__(self):
return self.new_backend_and_jobs
def __exit__(self, type, value, traceback):
self.unregister()
def unregister(self):
if self.old_backend_and_jobs is None:
if getattr(_backend, 'backend_and_jobs', None) is not None:
del _backend.backend_and_jobs
else:
_backend.backend_and_jobs = self.old_backend_and_jobs
# Under Linux or OS X the default start method of multiprocessing
# can cause third party libraries to crash. Under Python 3.4+ it is possible
# to set an environment variable to switch the default start method from
# 'fork' to 'forkserver' or 'spawn' to avoid this issue albeit at the cost
# of causing semantic changes and some additional pool instantiation overhead.
DEFAULT_MP_CONTEXT = None
if hasattr(mp, 'get_context'):
method = os.environ.get('JOBLIB_START_METHOD', '').strip() or None
if method is not None:
DEFAULT_MP_CONTEXT = mp.get_context(method=method)
class BatchedCalls(object):
"""Wrap a sequence of (func, args, kwargs) tuples as a single callable"""
def __init__(self, iterator_slice, backend_and_jobs, pickle_cache=None):
self.items = list(iterator_slice)
self._size = len(self.items)
if isinstance(backend_and_jobs, tuple):
self._backend, self._n_jobs = backend_and_jobs
else:
# this is for backward compatibility purposes. Before 0.12.6,
# nested backends were returned without n_jobs indications.
self._backend, self._n_jobs = backend_and_jobs, None
self._pickle_cache = pickle_cache if pickle_cache is not None else {}
def __call__(self):
# Set the default nested backend to self._backend but do not set the
# change the default number of processes to -1
with parallel_backend(self._backend, n_jobs=self._n_jobs):
return [func(*args, **kwargs)
for func, args, kwargs in self.items]
def __len__(self):
return self._size
###############################################################################
# CPU count that works also when multiprocessing has been disabled via
# the JOBLIB_MULTIPROCESSING environment variable
def cpu_count():
"""Return the number of CPUs."""
if mp is None:
return 1
return loky.cpu_count()
###############################################################################
# For verbosity
def _verbosity_filter(index, verbose):
""" Returns False for indices increasingly apart, the distance
depending on the value of verbose.
We use a lag increasing as the square of index
"""
if not verbose:
return True
elif verbose > 10:
return False
if index == 0:
return False
verbose = .5 * (11 - verbose) ** 2
scale = sqrt(index / verbose)
next_scale = sqrt((index + 1) / verbose)
return (int(next_scale) == int(scale))
###############################################################################
def delayed(function, check_pickle=None):
"""Decorator used to capture the arguments of a function."""
if check_pickle is not None:
warnings.warn('check_pickle is deprecated in joblib 0.12 and will be'
' removed in 0.13', DeprecationWarning)
# Try to pickle the input function, to catch the problems early when
# using with multiprocessing:
if check_pickle:
dumps(function)
def delayed_function(*args, **kwargs):
return function, args, kwargs
try:
delayed_function = functools.wraps(function)(delayed_function)
except AttributeError:
" functools.wraps fails on some callable objects "
return delayed_function
###############################################################################
class BatchCompletionCallBack(object):
"""Callback used by joblib.Parallel's multiprocessing backend.
This callable is executed by the parent process whenever a worker process
has returned the results of a batch of tasks.
It is used for progress reporting, to update estimate of the batch
processing duration and to schedule the next batch of tasks to be
processed.
"""
def __init__(self, dispatch_timestamp, batch_size, parallel):
self.dispatch_timestamp = dispatch_timestamp
self.batch_size = batch_size
self.parallel = parallel
def __call__(self, out):
self.parallel.n_completed_tasks += self.batch_size
this_batch_duration = time.time() - self.dispatch_timestamp
self.parallel._backend.batch_completed(self.batch_size,
this_batch_duration)
self.parallel.print_progress()
with self.parallel._lock:
if self.parallel._original_iterator is not None:
self.parallel.dispatch_next()
###############################################################################
def register_parallel_backend(name, factory, make_default=False):
"""Register a new Parallel backend factory.
The new backend can then be selected by passing its name as the backend
argument to the Parallel class. Moreover, the default backend can be
overwritten globally by setting make_default=True.
The factory can be any callable that takes no argument and return an
instance of ``ParallelBackendBase``.
Warning: this function is experimental and subject to change in a future
version of joblib.
.. versionadded:: 0.10
"""
BACKENDS[name] = factory
if make_default:
global DEFAULT_BACKEND
DEFAULT_BACKEND = name
def effective_n_jobs(n_jobs=-1):
"""Determine the number of jobs that can actually run in parallel
n_jobs is the number of workers requested by the callers. Passing n_jobs=-1
means requesting all available workers for instance matching the number of
CPU cores on the worker host(s).
This method should return a guesstimate of the number of workers that can
actually perform work concurrently with the currently enabled default
backend. The primary use case is to make it possible for the caller to know
in how many chunks to slice the work.
In general working on larger data chunks is more efficient (less scheduling
overhead and better use of CPU cache prefetching heuristics) as long as all
the workers have enough work to do.
Warning: this function is experimental and subject to change in a future
version of joblib.
.. versionadded:: 0.10
"""
backend, backend_n_jobs = get_active_backend()
if n_jobs is None:
n_jobs = backend_n_jobs
return backend.effective_n_jobs(n_jobs=n_jobs)
###############################################################################
class Parallel(Logger):
''' Helper class for readable parallel mapping.
Read more in the :ref:`User Guide <parallel>`.
Parameters
-----------
n_jobs: int, default: None
The maximum number of concurrently running jobs, such as the number
of Python worker processes when backend="multiprocessing"
or the size of the thread-pool when backend="threading".
If -1 all CPUs are used. If 1 is given, no parallel computing code
is used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all
CPUs but one are used.
None is a marker for 'unset' that will be interpreted as n_jobs=1
(sequential execution) unless the call is performed under a
parallel_backend context manager that sets another value for
n_jobs.
backend: str, ParallelBackendBase instance or None, default: 'loky'
Specify the parallelization backend implementation.
Supported backends are:
- "loky" used by default, can induce some
communication and memory overhead when exchanging input and
output data with the worker Python processes.
- "multiprocessing" previous process-based backend based on
`multiprocessing.Pool`. Less robust than `loky`.
- "threading" is a very low-overhead backend but it suffers
from the Python Global Interpreter Lock if the called function
relies a lot on Python objects. "threading" is mostly useful
when the execution bottleneck is a compiled extension that
explicitly releases the GIL (for instance a Cython loop wrapped
in a "with nogil" block or an expensive call to a library such
as NumPy).
- finally, you can register backends by calling
register_parallel_backend. This will allow you to implement
a backend of your liking.
It is not recommended to hard-code the backend name in a call to
Parallel in a library. Instead it is recommended to set soft hints
(prefer) or hard constraints (require) so as to make it possible
for library users to change the backend from the outside using the
parallel_backend context manager.
prefer: str in {'processes', 'threads'} or None, default: None
Soft hint to choose the default backend if no specific backend
was selected with the parallel_backend context manager. The
default process-based backend is 'loky' and the default
thread-based backend is 'threading'. Ignored if the ``backend``
parameter is specified.
require: 'sharedmem' or None, default None
Hard constraint to select the backend. If set to 'sharedmem',
the selected backend will be single-host and thread-based even
if the user asked for a non-thread based backend with
parallel_backend.
verbose: int, optional
The verbosity level: if non zero, progress messages are
printed. Above 50, the output is sent to stdout.
The frequency of the messages increases with the verbosity level.
If it more than 10, all iterations are reported.
timeout: float, optional
Timeout limit for each task to complete. If any task takes longer
a TimeOutError will be raised. Only applied when n_jobs != 1
pre_dispatch: {'all', integer, or expression, as in '3*n_jobs'}
The number of batches (of tasks) to be pre-dispatched.
Default is '2*n_jobs'. When batch_size="auto" this is reasonable
default and the workers should never starve.
batch_size: int or 'auto', default: 'auto'
The number of atomic tasks to dispatch at once to each
worker. When individual evaluations are very fast, dispatching
calls to workers can be slower than sequential computation because
of the overhead. Batching fast computations together can mitigate
this.
The ``'auto'`` strategy keeps track of the time it takes for a batch
to complete, and dynamically adjusts the batch size to keep the time
on the order of half a second, using a heuristic. The initial batch
size is 1.
``batch_size="auto"`` with ``backend="threading"`` will dispatch
batches of a single task at a time as the threading backend has
very little overhead and using larger batch size has not proved to
bring any gain in that case.
temp_folder: str, optional
Folder to be used by the pool for memmapping large arrays
for sharing memory with worker processes. If None, this will try in
order:
- a folder pointed by the JOBLIB_TEMP_FOLDER environment
variable,
- /dev/shm if the folder exists and is writable: this is a
RAM disk filesystem available by default on modern Linux
distributions,
- the default system temporary folder that can be
overridden with TMP, TMPDIR or TEMP environment
variables, typically /tmp under Unix operating systems.
Only active when backend="loky" or "multiprocessing".
max_nbytes int, str, or None, optional, 1M by default
Threshold on the size of arrays passed to the workers that
triggers automated memory mapping in temp_folder. Can be an int
in Bytes, or a human-readable string, e.g., '1M' for 1 megabyte.
Use None to disable memmapping of large arrays.
Only active when backend="loky" or "multiprocessing".
mmap_mode: {None, 'r+', 'r', 'w+', 'c'}
Memmapping mode for numpy arrays passed to workers.
See 'max_nbytes' parameter documentation for more details.
Notes
-----
This object uses workers to compute in parallel the application of a
function to many different arguments. The main functionality it brings
in addition to using the raw multiprocessing or concurrent.futures API
are (see examples for details):
* More readable code, in particular since it avoids
constructing list of arguments.
* Easier debugging:
- informative tracebacks even when the error happens on
the client side
- using 'n_jobs=1' enables to turn off parallel computing
for debugging without changing the codepath
- early capture of pickling errors
* An optional progress meter.
* Interruption of multiprocesses jobs with 'Ctrl-C'
* Flexible pickling control for the communication to and from
the worker processes.
* Ability to use shared memory efficiently with worker
processes for large numpy-based datastructures.
Examples
--------
A simple example:
>>> from math import sqrt
>>> from joblib import Parallel, delayed
>>> Parallel(n_jobs=1)(delayed(sqrt)(i**2) for i in range(10))
[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]
Reshaping the output when the function has several return
values:
>>> from math import modf
>>> from joblib import Parallel, delayed
>>> r = Parallel(n_jobs=1)(delayed(modf)(i/2.) for i in range(10))
>>> res, i = zip(*r)
>>> res
(0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5)
>>> i
(0.0, 0.0, 1.0, 1.0, 2.0, 2.0, 3.0, 3.0, 4.0, 4.0)
The progress meter: the higher the value of `verbose`, the more
messages:
>>> from time import sleep
>>> from joblib import Parallel, delayed
>>> r = Parallel(n_jobs=2, verbose=10)(delayed(sleep)(.2) for _ in range(10)) #doctest: +SKIP
[Parallel(n_jobs=2)]: Done 1 tasks | elapsed: 0.6s
[Parallel(n_jobs=2)]: Done 4 tasks | elapsed: 0.8s
[Parallel(n_jobs=2)]: Done 10 out of 10 | elapsed: 1.4s finished
Traceback example, note how the line of the error is indicated
as well as the values of the parameter passed to the function that
triggered the exception, even though the traceback happens in the
child process:
>>> from heapq import nlargest
>>> from joblib import Parallel, delayed
>>> Parallel(n_jobs=2)(delayed(nlargest)(2, n) for n in (range(4), 'abcde', 3)) #doctest: +SKIP
#...
---------------------------------------------------------------------------
Sub-process traceback:
---------------------------------------------------------------------------
TypeError Mon Nov 12 11:37:46 2012
PID: 12934 Python 2.7.3: /usr/bin/python
...........................................................................
/usr/lib/python2.7/heapq.pyc in nlargest(n=2, iterable=3, key=None)
419 if n >= size:
420 return sorted(iterable, key=key, reverse=True)[:n]
421
422 # When key is none, use simpler decoration
423 if key is None:
--> 424 it = izip(iterable, count(0,-1)) # decorate
425 result = _nlargest(n, it)
426 return map(itemgetter(0), result) # undecorate
427
428 # General case, slowest method
TypeError: izip argument #1 must support iteration
___________________________________________________________________________
Using pre_dispatch in a producer/consumer situation, where the
data is generated on the fly. Note how the producer is first
called 3 times before the parallel loop is initiated, and then
called to generate new data on the fly:
>>> from math import sqrt
>>> from joblib import Parallel, delayed
>>> def producer():
... for i in range(6):
... print('Produced %s' % i)
... yield i
>>> out = Parallel(n_jobs=2, verbose=100, pre_dispatch='1.5*n_jobs')(
... delayed(sqrt)(i) for i in producer()) #doctest: +SKIP
Produced 0
Produced 1
Produced 2
[Parallel(n_jobs=2)]: Done 1 jobs | elapsed: 0.0s
Produced 3
[Parallel(n_jobs=2)]: Done 2 jobs | elapsed: 0.0s
Produced 4
[Parallel(n_jobs=2)]: Done 3 jobs | elapsed: 0.0s
Produced 5
[Parallel(n_jobs=2)]: Done 4 jobs | elapsed: 0.0s
[Parallel(n_jobs=2)]: Done 6 out of 6 | elapsed: 0.0s remaining: 0.0s
[Parallel(n_jobs=2)]: Done 6 out of 6 | elapsed: 0.0s finished
'''
def __init__(self, n_jobs=None, backend=None, verbose=0, timeout=None,
pre_dispatch='2 * n_jobs', batch_size='auto',
temp_folder=None, max_nbytes='1M', mmap_mode='r',
prefer=None, require=None):
active_backend, context_n_jobs = get_active_backend(
prefer=prefer, require=require, verbose=verbose)
nesting_level = active_backend.nesting_level
if backend is None and n_jobs is None:
# If we are under a parallel_backend context manager, look up
# the default number of jobs and use that instead:
n_jobs = context_n_jobs
if n_jobs is None:
# No specific context override and no specific value request:
# default to 1.
n_jobs = 1
self.n_jobs = n_jobs
self.verbose = verbose
self.timeout = timeout
self.pre_dispatch = pre_dispatch
self._ready_batches = queue.Queue()
if isinstance(max_nbytes, _basestring):
max_nbytes = memstr_to_bytes(max_nbytes)
self._backend_args = dict(
max_nbytes=max_nbytes,
mmap_mode=mmap_mode,
temp_folder=temp_folder,
prefer=prefer,
require=require,
verbose=max(0, self.verbose - 50),
)
if DEFAULT_MP_CONTEXT is not None:
self._backend_args['context'] = DEFAULT_MP_CONTEXT
elif hasattr(mp, "get_context"):
self._backend_args['context'] = mp.get_context()
if backend is None:
backend = active_backend
elif isinstance(backend, ParallelBackendBase):
# Use provided backend as is, with the current nesting_level if it
# is not set yet.
if backend.nesting_level is None:
backend.nesting_level = nesting_level
elif hasattr(backend, 'Pool') and hasattr(backend, 'Lock'):
# Make it possible to pass a custom multiprocessing context as
# backend to change the start method to forkserver or spawn or
# preload modules on the forkserver helper process.
self._backend_args['context'] = backend
backend = MultiprocessingBackend(nesting_level=nesting_level)
else:
try:
backend_factory = BACKENDS[backend]
except KeyError:
raise ValueError("Invalid backend: %s, expected one of %r"
% (backend, sorted(BACKENDS.keys())))
backend = backend_factory(nesting_level=nesting_level)
if (require == 'sharedmem' and
not getattr(backend, 'supports_sharedmem', False)):
raise ValueError("Backend %s does not support shared memory"
% backend)
if (batch_size == 'auto' or isinstance(batch_size, Integral) and
batch_size > 0):
self.batch_size = batch_size
else:
raise ValueError(
"batch_size must be 'auto' or a positive integer, got: %r"
% batch_size)
self._backend = backend
self._output = None
self._jobs = list()
self._managed_backend = False
# This lock is used coordinate the main thread of this process with
# the async callback thread of our the pool.
self._lock = threading.RLock()
def __enter__(self):
self._managed_backend = True
self._initialize_backend()
return self
def __exit__(self, exc_type, exc_value, traceback):
self._terminate_backend()
self._managed_backend = False
def _initialize_backend(self):
"""Build a process or thread pool and return the number of workers"""
try:
n_jobs = self._backend.configure(n_jobs=self.n_jobs, parallel=self,
**self._backend_args)
if self.timeout is not None and not self._backend.supports_timeout:
warnings.warn(
'The backend class {!r} does not support timeout. '
"You have set 'timeout={}' in Parallel but "
"the 'timeout' parameter will not be used.".format(
self._backend.__class__.__name__,
self.timeout))
except FallbackToBackend as e:
# Recursively initialize the backend in case of requested fallback.
self._backend = e.backend
n_jobs = self._initialize_backend()
return n_jobs
def _effective_n_jobs(self):
if self._backend:
return self._backend.effective_n_jobs(self.n_jobs)
return 1
def _terminate_backend(self):
if self._backend is not None:
self._backend.terminate()
def _dispatch(self, batch):
"""Queue the batch for computing, with or without multiprocessing
WARNING: this method is not thread-safe: it should be only called
indirectly via dispatch_one_batch.
"""
# If job.get() catches an exception, it closes the queue:
if self._aborting:
return
self.n_dispatched_tasks += len(batch)
self.n_dispatched_batches += 1
dispatch_timestamp = time.time()
cb = BatchCompletionCallBack(dispatch_timestamp, len(batch), self)
with self._lock:
job_idx = len(self._jobs)
job = self._backend.apply_async(batch, callback=cb)
# A job can complete so quickly than its callback is
# called before we get here, causing self._jobs to
# grow. To ensure correct results ordering, .insert is
# used (rather than .append) in the following line
self._jobs.insert(job_idx, job)
def dispatch_next(self):
"""Dispatch more data for parallel processing
This method is meant to be called concurrently by the multiprocessing
callback. We rely on the thread-safety of dispatch_one_batch to protect
against concurrent consumption of the unprotected iterator.
"""
if not self.dispatch_one_batch(self._original_iterator):
self._iterating = False
self._original_iterator = None
def dispatch_one_batch(self, iterator):
"""Prefetch the tasks for the next batch and dispatch them.
The effective size of the batch is computed here.
If there are no more jobs to dispatch, return False, else return True.
The iterator consumption and dispatching is protected by the same
lock so calling this function should be thread safe.
"""
if self.batch_size == 'auto':
batch_size = self._backend.compute_batch_size()
else:
# Fixed batch size strategy
batch_size = self.batch_size
with self._lock:
# to ensure an even distribution of the workolad between workers,
# we look ahead in the original iterators more than batch_size
# tasks - However, we keep consuming only one batch at each
# dispatch_one_batch call. The extra tasks are stored in a local
# queue, _ready_batches, that is looked-up prior to re-consuming
# tasks from the origal iterator.
try:
tasks = self._ready_batches.get(block=False)
except queue.Empty:
# slice the iterator n_jobs * batchsize items at a time. If the
# slice returns less than that, then the current batchsize puts
# too much weight on a subset of workers, while other may end
# up starving. So in this case, re-scale the batch size
# accordingly to distribute evenly the last items between all
# workers.
n_jobs = self._cached_effective_n_jobs
big_batch_size = batch_size * n_jobs
islice = list(itertools.islice(iterator, big_batch_size))
if len(islice) == 0:
return False
elif (iterator is self._original_iterator
and len(islice) < big_batch_size):
# We reached the end of the original iterator (unless
# iterator is the ``pre_dispatch``-long initial slice of
# the original iterator) -- decrease the batch size to
# account for potential variance in the batches running
# time.
final_batch_size = max(1, len(islice) // (10 * n_jobs))
else:
final_batch_size = max(1, len(islice) // n_jobs)
# enqueue n_jobs batches in a local queue
for i in range(0, len(islice), final_batch_size):
tasks = BatchedCalls(islice[i:i + final_batch_size],
self._backend.get_nested_backend(),
self._pickle_cache)
self._ready_batches.put(tasks)
# finally, get one task.
tasks = self._ready_batches.get(block=False)
if len(tasks) == 0:
# No more tasks available in the iterator: tell caller to stop.
return False
else:
self._dispatch(tasks)
return True
def _print(self, msg, msg_args):
"""Display the message on stout or stderr depending on verbosity"""
# XXX: Not using the logger framework: need to
# learn to use logger better.
if not self.verbose:
return
if self.verbose < 50:
writer = sys.stderr.write
else:
writer = sys.stdout.write
msg = msg % msg_args
writer('[%s]: %s\n' % (self, msg))
def print_progress(self):
"""Display the process of the parallel execution only a fraction
of time, controlled by self.verbose.
"""
if not self.verbose:
return
elapsed_time = time.time() - self._start_time
# Original job iterator becomes None once it has been fully
# consumed : at this point we know the total number of jobs and we are
# able to display an estimation of the remaining time based on already
# completed jobs. Otherwise, we simply display the number of completed
# tasks.
if self._original_iterator is not None:
if _verbosity_filter(self.n_dispatched_batches, self.verbose):
return
self._print('Done %3i tasks | elapsed: %s',
(self.n_completed_tasks,
short_format_time(elapsed_time), ))
else:
index = self.n_completed_tasks
# We are finished dispatching
total_tasks = self.n_dispatched_tasks
# We always display the first loop
if not index == 0:
# Display depending on the number of remaining items
# A message as soon as we finish dispatching, cursor is 0
cursor = (total_tasks - index + 1 -
self._pre_dispatch_amount)
frequency = (total_tasks // self.verbose) + 1
is_last_item = (index + 1 == total_tasks)
if (is_last_item or cursor % frequency):
return
remaining_time = (elapsed_time / index) * \
(self.n_dispatched_tasks - index * 1.0)
# only display status if remaining time is greater or equal to 0
self._print('Done %3i out of %3i | elapsed: %s remaining: %s',
(index,
total_tasks,
short_format_time(elapsed_time),
short_format_time(remaining_time),
))
def retrieve(self):
self._output = list()
while self._iterating or len(self._jobs) > 0:
if len(self._jobs) == 0:
# Wait for an async callback to dispatch new jobs
time.sleep(0.01)
continue
# We need to be careful: the job list can be filling up as
# we empty it and Python list are not thread-safe by default hence
# the use of the lock
with self._lock:
job = self._jobs.pop(0)
try:
if getattr(self._backend, 'supports_timeout', False):
self._output.extend(job.get(timeout=self.timeout))
else:
self._output.extend(job.get())
except BaseException as exception:
# Note: we catch any BaseException instead of just Exception
# instances to also include KeyboardInterrupt.
# Stop dispatching any new job in the async callback thread
self._aborting = True
# If the backend allows it, cancel or kill remaining running
# tasks without waiting for the results as we will raise
# the exception we got back to the caller instead of returning
# any result.
backend = self._backend
if (backend is not None and
hasattr(backend, 'abort_everything')):
# If the backend is managed externally we need to make sure
# to leave it in a working state to allow for future jobs
# scheduling.
ensure_ready = self._managed_backend
backend.abort_everything(ensure_ready=ensure_ready)
if isinstance(exception, TransportableException):
# Capture exception to add information on the local
# stack in addition to the distant stack
this_report = format_outer_frames(context=10,
stack_start=1)
raise exception.unwrap(this_report)
else:
raise
def __call__(self, iterable):
if self._jobs:
raise ValueError('This Parallel instance is already running')
# A flag used to abort the dispatching of jobs in case an
# exception is found
self._aborting = False
if not self._managed_backend:
n_jobs = self._initialize_backend()
else:
n_jobs = self._effective_n_jobs()
# self._effective_n_jobs should be called in the Parallel.__call__
# thread only -- store its value in an attribute for further queries.
self._cached_effective_n_jobs = n_jobs
backend_name = self._backend.__class__.__name__
if n_jobs == 0:
raise RuntimeError("%s has no active worker." % backend_name)
self._print("Using backend %s with %d concurrent workers.",
(backend_name, n_jobs))
if hasattr(self._backend, 'start_call'):
self._backend.start_call()
iterator = iter(iterable)
pre_dispatch = self.pre_dispatch
if pre_dispatch == 'all' or n_jobs == 1:
# prevent further dispatch via multiprocessing callback thread
self._original_iterator = None
self._pre_dispatch_amount = 0
else:
self._original_iterator = iterator
if hasattr(pre_dispatch, 'endswith'):
pre_dispatch = eval(pre_dispatch)
self._pre_dispatch_amount = pre_dispatch = int(pre_dispatch)
# The main thread will consume the first pre_dispatch items and
# the remaining items will later be lazily dispatched by async
# callbacks upon task completions.
# TODO: this iterator should be batch_size * n_jobs
iterator = itertools.islice(iterator, self._pre_dispatch_amount)
self._start_time = time.time()
self.n_dispatched_batches = 0
self.n_dispatched_tasks = 0
self.n_completed_tasks = 0
# Use a caching dict for callables that are pickled with cloudpickle to
# improve performances. This cache is used only in the case of
# functions that are defined in the __main__ module, functions that are
# defined locally (inside another function) and lambda expressions.
self._pickle_cache = dict()
try:
# Only set self._iterating to True if at least a batch
# was dispatched. In particular this covers the edge
# case of Parallel used with an exhausted iterator. If
# self._original_iterator is None, then this means either
# that pre_dispatch == "all", n_jobs == 1 or that the first batch
# was very quick and its callback already dispatched all the
# remaining jobs.
self._iterating = False
if self.dispatch_one_batch(iterator):
self._iterating = self._original_iterator is not None
while self.dispatch_one_batch(iterator):
pass
if pre_dispatch == "all" or n_jobs == 1:
# The iterable was consumed all at once by the above for loop.
# No need to wait for async callbacks to trigger to
# consumption.
self._iterating = False
with self._backend.retrieval_context():
self.retrieve()
# Make sure that we get a last message telling us we are done
elapsed_time = time.time() - self._start_time
self._print('Done %3i out of %3i | elapsed: %s finished',
(len(self._output), len(self._output),
short_format_time(elapsed_time)))
finally:
if hasattr(self._backend, 'stop_call'):
self._backend.stop_call()
if not self._managed_backend:
self._terminate_backend()
self._jobs = list()
self._pickle_cache = None
output = self._output
self._output = None
return output
def __repr__(self):
return '%s(n_jobs=%s)' % (self.__class__.__name__, self.n_jobs)
```
|
{
"source": "jeremiedbb/scikit-learn-mooc",
"score": 4
}
|
#### File: scikit-learn-mooc/python_scripts/linear_models.py
```python
import pandas as pd
data = pd.read_csv("../datasets/penguins.csv")
data.head()
# %% [markdown]
# This dataset contains measurements taken of penguins. We will formulate the
# following problem: using the flipper length of a penguin, we would like
# to infer its mass.
# %%
import seaborn as sns
feature_names = "Flipper Length (mm)"
target_name = "Body Mass (g)"
sns.scatterplot(data=data, x=feature_names, y=target_name)
# select the features of interest
X = data[[feature_names]].dropna()
y = data[target_name].dropna()
# %% [markdown]
# In this problem, penguin mass is our target. It is a continuous
# variable that roughly varies between 2700 g and 6300 g. Thus, this is a
# regression problem (in contrast to classification). We also see that there is
# almost a linear relationship between the body mass of the penguin and the
# flipper length. The longer the flipper, the heavier the penguin.
#
# Thus, we could come up with a simple formula, where given a flipper length
# we could compute the body mass of a penguin using a linear relationship of
# of the form `y = a * x + b` where `a` and `b` are the 2 parameters of our
# model.
# %%
def linear_model_flipper_mass(
flipper_length, weight_flipper_length, intercept_body_mass
):
"""Linear model of the form y = a * x + b"""
body_mass = weight_flipper_length * flipper_length + intercept_body_mass
return body_mass
# %% [markdown]
# Using the model we defined above, we can check the body mass values
# predicted for a range of flipper lengths. We will set `weight_flipper_length`
# to be 45 and `intercept_body_mass` to be -5000.
# %%
import matplotlib.pyplot as plt
import numpy as np
def plot_data_and_model(
flipper_length_range, weight_flipper_length, intercept_body_mass,
ax=None,
):
"""Compute and plot the prediction."""
inferred_body_mass = linear_model_flipper_mass(
flipper_length_range,
weight_flipper_length=weight_flipper_length,
intercept_body_mass=intercept_body_mass,
)
if ax is None:
_, ax = plt.subplots()
sns.scatterplot(data=data, x=feature_names, y=target_name, ax=ax)
ax.plot(
flipper_length_range,
inferred_body_mass,
linewidth=3,
label=(
f"{weight_flipper_length:.2f} (g / mm) * flipper length + "
f"{intercept_body_mass:.2f} (g)"
),
)
plt.legend()
weight_flipper_length = 45
intercept_body_mass = -5000
flipper_length_range = np.linspace(X.min(), X.max(), num=300)
plot_data_and_model(
flipper_length_range, weight_flipper_length, intercept_body_mass
)
# %% [markdown]
# The variable `weight_flipper_length` is a weight applied to the feature
# `flipper_length` in
# order to make the inference. When this coefficient is positive, it means that
# penguins with longer flipper lengths will have larger body masses.
# If the coefficient is negative, it means that penguins with shorter flipper
# flipper lengths have larger body masses. Graphically, this coefficient is
# represented by the slope of the curve in the plot. Below we show what the
# curve would look like when the `weight_flipper_length` coefficient is
# negative.
# %%
weight_flipper_length = -40
intercept_body_mass = 13000
flipper_length_range = np.linspace(X.min(), X.max(), num=300)
plot_data_and_model(
flipper_length_range, weight_flipper_length, intercept_body_mass
)
# %% [markdown]
# In our case, this coefficient has a meaningful unit: g/mm.
# For instance, a coefficient of 40 g/mm, means that for each
# additional millimeter in flipper length, the body weight predicted will
# increase by 40 g.
body_mass_180 = linear_model_flipper_mass(
flipper_length=180, weight_flipper_length=40, intercept_body_mass=0
)
body_mass_181 = linear_model_flipper_mass(
flipper_length=181, weight_flipper_length=40, intercept_body_mass=0
)
print(
f"The body mass for a flipper length of 180 mm is {body_mass_180} g and "
f"{body_mass_181} g for a flipper length of 181 mm"
)
# %% [markdown]
# We can also see that we have a parameter `intercept_body_mass` in our model.
# This parameter corresponds to the value on the y-axis if `flipper_length=0`
# (which in our case is only a mathematical consideration, as in our data,
# the value of `flipper_length` only goes from 170mm to 230mm). This y-value when
# x=0 is called the y-intercept.
# If `intercept_body_mass` is 0, the curve will
# pass through the origin:
# %%
weight_flipper_length = 25
intercept_body_mass = 0
flipper_length_range = np.linspace(0, X.max(), num=300)
plot_data_and_model(
flipper_length_range, weight_flipper_length, intercept_body_mass
)
# %% [markdown]
# Otherwise, it will pass through the `intercept_body_mass` value:
# %%
weight_flipper_length = 45
intercept_body_mass = -5000
flipper_length_range = np.linspace(0, X.max(), num=300)
plot_data_and_model(
flipper_length_range, weight_flipper_length, intercept_body_mass
)
# %% [markdown]
# Now, that we understand how our model is inferring data, one should ask
# how do we find the best value for the parameters. Indeed, it seems that we
# can have many models, depending of the choice of parameters:
# %%
_, ax = plt.subplots()
flipper_length_range = np.linspace(X.min(), X.max(), num=300)
for weight, intercept in zip([-40, 45, 90], [15000, -5000, -14000]):
plot_data_and_model(
flipper_length_range, weight, intercept, ax=ax,
)
# %% [markdown]
# To choose a model, we could use a metric that indicates how good our model is
# at fitting our data.
# %%
from sklearn.metrics import mean_squared_error
for weight, intercept in zip([-40, 45, 90], [15000, -5000, -14000]):
inferred_body_mass = linear_model_flipper_mass(
X,
weight_flipper_length=weight,
intercept_body_mass=intercept,
)
model_error = mean_squared_error(y, inferred_body_mass)
print(
f"The following model \n "
f"{weight:.2f} (g / mm) * flipper length + {intercept:.2f} (g) \n"
f"has a mean squared error of: {model_error:.2f}"
)
# %% [markdown]
# Thus, the best model will be the one with the lowest error.
# Hopefully, this problem of finding the best parameters values
# (i.e. that result in the lowest error)
# can be solved without the need to check every
# potential parameter combination. Indeed, this problem has a closed-form
# solution: the best parameter values can be found by solving an equation. This
# avoids the need for brute-force search. This strategy is
# implemented in scikit-learn.
# %%
from sklearn.linear_model import LinearRegression
linear_regression = LinearRegression()
linear_regression.fit(X, y)
# %% [markdown]
# The instance `linear_regression` will store the parameter values in the
# attributes `coef_` and `intercept_`. We can check what the optimal model
# found is:
# %%
weight_flipper_length = linear_regression.coef_[0]
intercept_body_mass = linear_regression.intercept_
flipper_length_range = np.linspace(X.min(), X.max(), num=300)
plot_data_and_model(
flipper_length_range, weight_flipper_length, intercept_body_mass
)
inferred_body_mass = linear_regression.predict(X)
model_error = mean_squared_error(y, inferred_body_mass)
print(f"The error of the optimal model is {model_error:.2f}")
# %% [markdown]
# ### What if your data doesn't have a linear relationship
# Now, we will define a new problem where the feature and the target are not
# linearly linked. For instance, we could define `x` to be the years of
# experience (normalized) and `y` the salary (normalized). Therefore, the
# problem here would be to infer the salary given the years of experience.
# %%
# data generation
# fix the seed for reproduction
rng = np.random.RandomState(0)
n_sample = 100
x_max, x_min = 1.4, -1.4
len_x = (x_max - x_min)
x = rng.rand(n_sample) * len_x - len_x/2
noise = rng.randn(n_sample) * .3
y = x ** 3 - 0.5 * x ** 2 + noise
# plot the data
plt.scatter(x, y, color='k', s=9)
plt.xlabel('x', size=26)
_ = plt.ylabel('y', size=26)
# %% [markdown]
# ### Exercise 1
#
# In this exercise, you are asked to approximate the target `y` using a linear
# function `f(x)`. i.e. find the best coefficients of the function `f` in order
# to minimize the mean squared error. Here you shall find the coefficient manually
# via trial and error (just as in the previous cells with weight and intercept).
#
# Then you can compare the mean squared error of your model with the mean
# squared error found by `LinearRegression` (which shall be the minimal one).
# %%
def f(x):
intercept = 0 # TODO: update the parameters here
weight = 0 # TODO: update the parameters here
y_predict = weight * x + intercept
return y_predict
# plot the slope of f
grid = np.linspace(x_min, x_max, 300)
plt.scatter(x, y, color='k', s=9)
plt.plot(grid, f(grid), linewidth=3)
plt.xlabel("x", size=26)
plt.ylabel("y", size=26)
mse = mean_squared_error(y, f(x))
print(f"Mean squared error = {mse:.2f}")
# %% [markdown]
# ### Solution 1. by fiting a linear regression
# %%
from sklearn import linear_model
linear_regression = linear_model.LinearRegression()
# X should be 2D for sklearn
X = x.reshape((-1, 1))
linear_regression.fit(X, y)
# plot the best slope
y_best = linear_regression.predict(grid.reshape(-1, 1))
plt.plot(grid, y_best, linewidth=3)
plt.scatter(x, y, color="k", s=9)
plt.xlabel("x", size=26)
plt.ylabel("y", size=26)
mse = mean_squared_error(y, linear_regression.predict(X))
print(f"Lowest mean squared error = {mse:.2f}")
# %% [markdown]
# Here the coefficients learnt by `LinearRegression` is the best curve that
# fits the data. We can inspect the coefficients using the attributes of the
# model learnt as follows:
# %%
print(
f"best coef: w1 = {linear_regression.coef_[0]:.2f}, "
f"best intercept: w0 = {linear_regression.intercept_:.2f}"
)
# %% [markdown]
# It is important to note that the model learnt will not be able to handle
# the non-linear relationship between `x` and `y` since linear models assume
# the relationship between `x` and `y` to be linear. To obtain a better model,
# we have 3 main solutions:
#
# 1. choose a model that natively can deal with non-linearity,
# 2. "augment" features by including expert knowledge which can be used by
# the model, or
# 2. use a "kernel" to have a locally-based decision function instead of a
# global linear decision function.
#
# Let's illustrate quickly the first point by using a decision tree regressor
# which can natively handle non-linearity.
# %%
from sklearn.tree import DecisionTreeRegressor
tree = DecisionTreeRegressor(max_depth=3).fit(X, y)
y_pred = tree.predict(grid.reshape(-1, 1))
plt.plot(grid, y_pred, linewidth=3)
plt.scatter(x, y, color="k", s=9)
plt.xlabel("x", size=26)
plt.ylabel("y", size=26)
mse = mean_squared_error(y, tree.predict(X))
print(f"Lowest mean squared error = {mse:.2f}")
# %% [markdown]
# In this case, the model can handle non-linearity. Instead of having a model
# which can natively deal with non-linearity, we could also modify our data: we
# could create new features, derived from the original features, using some
# expert knowledge. For instance, here we know that we have a cubic and squared
# relationship between `x` and `y` (because we generated the data). Indeed,
# we could create two new features (`x^2` and `x^3`) using this information.
# %%
X = np.vstack([x, x ** 2, x ** 3]).T
linear_regression.fit(X, y)
grid_augmented = np.vstack([grid, grid ** 2, grid ** 3]).T
y_pred = linear_regression.predict(grid_augmented)
plt.plot(grid, y_pred, linewidth=3)
plt.scatter(x, y, color="k", s=9)
plt.xlabel("x", size=26)
plt.ylabel("y", size=26)
mse = mean_squared_error(y, linear_regression.predict(X))
print(f"Lowest mean squared error = {mse:.2f}")
# %% [markdown]
# We can see that even with a linear model, we can overcome the linearity
# limitation of the model by adding the non-linear component into the design of
# additional
# features. Here, we created new feature by knowing the way the target was
# generated. In practice, this is usually not the case. Instead, one is usually
# creating interaction between features (e.g. $x_1 * x_2$) with different orders
# (e.g. $x_1, x_1^2, x_1^3$), at the risk of
# creating a model with too much expressivity and which might overfit. In
# scikit-learn, the `PolynomialFeatures` is a transformer to create such
# feature interactions which we could have used instead of manually creating
# new features.
#
#
# To demonstrate `PolynomialFeatures`, we are going to use a scikit-learn
# pipeline which will first create the new features and then fit the model.
# We come back to scikit-learn pipelines and discuss them in more detail later.
# %%
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import PolynomialFeatures
X = x.reshape(-1, 1)
model = make_pipeline(
PolynomialFeatures(degree=3), LinearRegression()
)
model.fit(X, y)
y_pred = model.predict(grid.reshape(-1, 1))
plt.plot(grid, y_pred, linewidth=3)
plt.scatter(x, y, color="k", s=9)
plt.xlabel("x", size=26)
plt.ylabel("y", size=26)
mse = mean_squared_error(y, model.predict(X))
print(f"Lowest mean squared error = {mse:.2f}")
# %% [markdown]
# Thus, we saw that `PolynomialFeatures` is actually doing the same
# operation that we did manually above.
# %% [markdown]
# **FIXME: it might be to complex to be introduced here but it seems good in
# the flow. However, we go away from linear model.**
#
# The last possibility to make a linear model more expressive is to use a
# "kernel". Instead of learning a weight per feature as we previously
# emphasized, a weight will be assign by sample instead. However, not all
# samples will be used. This is the base of the support vector machine
# algorithm.
# %%
from sklearn.svm import SVR
svr = SVR(kernel="linear").fit(X, y)
y_pred = svr.predict(grid.reshape(-1, 1))
plt.plot(grid, y_pred, linewidth=3)
plt.scatter(x, y, color="k", s=9)
plt.xlabel("x", size=26)
plt.ylabel("y", size=26)
mse = mean_squared_error(y, svr.predict(X))
print(f"Lowest mean squared error = {mse:.2f}")
# %% [markdown]
# The algorithm can be modified such that it can use non-linear kernel. Then,
# it will compute interaction between samples using this non-linear
# interaction.
svr = SVR(kernel="poly", degree=3).fit(X, y)
y_pred = svr.predict(grid.reshape(-1, 1))
plt.plot(grid, y_pred, linewidth=3)
plt.scatter(x, y, color="k", s=9)
plt.xlabel("x", size=26)
plt.ylabel("y", size=26)
mse = mean_squared_error(y, svr.predict(X))
print(f"Lowest mean squared error = {mse:.2f}")
# %% [markdown]
# Therefore, kernel can make a model more expressive.
# %% [markdown]
# ### Linear regression in higher dimension
# In the previous example, we only used a single feature. But we have
# already shown that we could add new feature to make the model more expressive
# by deriving new features, based on the original feature.
#
# Indeed, we could also use additional features (not related to the
# original feature) and these could help us to predict the target.
#
# We will load a dataset about house prices in California.
# The dataset consists of 8 features regarding the demography and geography of
# districts in California and the aim is to predict the median house price of
# each district. We will use all 8 features to predict the target, median
# house price.
# %%
from sklearn.datasets import fetch_california_housing
X, y = fetch_california_housing(as_frame=True, return_X_y=True)
X.head()
# %% [markdown]
# We will compare the score of `LinearRegression` and `Ridge` (which is a
# regularized version of linear regression).
#
# The scorer we will use to evaluate our model is the mean squared error, as in
# the previous example. The lower the score, the better.
# %% [markdown]
# Here, we will divide our data into a training set, a validation set and a
# testing set.
# The validation set will be used to evaluate selection of the
# hyper-parameters, while the testing set should only be used to calculate the
# score of our final model.
# %%
from sklearn.model_selection import train_test_split
X_train_valid, X_test, y_train_valid, y_test = train_test_split(
X, y, random_state=1
)
X_train, X_valid, y_train, y_valid = train_test_split(
X_train_valid, y_train_valid, random_state=1
)
# %% [markdown]
# Note that in the first example, we did not care about scaling our data in
# order to keep the original units and have better intuition. However, it is
# good practice to scale the data such that each feature has a similar standard
# deviation. It will be even more important if the solver used by the model
# is a gradient-descent-based solver.
# %%
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
X_train_scaled = scaler.fit(X_train).transform(X_train)
X_valid_scaled = scaler.transform(X_valid)
# %% [markdown]
# Scikit-learn provides several tools to preprocess the data. The
# `StandardScaler` transforms the data such that each feature will have a mean
# of zero and a standard deviation of 1.
#
# This scikit-learn estimator is known as a transformer: it computes some
# statistics (i.e the mean and the standard deviation) and stores them as
# attributes (scaler.mean_, scaler.scale_)
# when calling `fit`. Using these statistics, it
# transform the data when `transform` is called. Therefore, it is important to
# note that `fit` should only be called on the training data, similar to
# classifiers and regressors.
# %%
print('mean records on the training set:', scaler.mean_)
print('standard deviation records on the training set:', scaler.scale_)
# %% [markdown]
# In the example above, `X_train_scaled` is the data scaled, using the
# mean and standard deviation of each feature, computed using the training
# data `X_train`.
# %%
linear_regression = LinearRegression()
linear_regression.fit(X_train_scaled, y_train)
y_pred = linear_regression.predict(X_valid_scaled)
print(
f"Mean squared error on the validation set: "
f"{mean_squared_error(y_valid, y_pred):.4f}"
)
# %% [markdown]
# Instead of calling the transformer to transform the data and then calling
# the regressor, scikit-learn provides a `Pipeline`, which 'chains' the
# transformer and regressor together. The pipeline allows you to use a
# sequence of transformer(s) followed by a regressor or a classifier, in one
# call. (i.e. fitting the pipeline will fit both the transformer(s) and the regressor.
# Then predicting from the pipeline will first transform the data through the transformer(s)
# then predict with the regressor from the transformed data)
# This pipeline exposes the same API as the regressor and classifier
# and will manage the calls to `fit` and `transform` for you, avoiding any
# problems with data leakage (when knowledge of the test data was
# inadvertently included in training a model, as when fitting a transformer
# on the test data).
#
# We already presented `Pipeline` in the second notebook and we will use it
# here to combine both the scaling and the linear regression.
#
# We will can create a `Pipeline` by using `make_pipeline` and giving as
# arguments the transformation(s) to be performed (in order) and the regressor
# model.
#
# So the two cells above can be reduced to this new one:
# %%
from sklearn.pipeline import make_pipeline
linear_regression = make_pipeline(StandardScaler(), LinearRegression())
linear_regression.fit(X_train, y_train)
y_pred_valid = linear_regression.predict(X_valid)
linear_regression_score = mean_squared_error(y_valid, y_pred_valid)
y_pred_test = linear_regression.predict(X_test)
print(
f"Mean squared error on the validation set: "
f"{mean_squared_error(y_valid, y_pred_valid):.4f}"
)
print(
f"Mean squared error on the test set: "
f"{mean_squared_error(y_test, y_pred_test):.4f}"
)
# %% [markdown]
# Now we want to compare this basic `LinearRegression` versus its regularized
# form `Ridge`.
#
# We will tune the parameter `alpha` in `Ridge` and compare the results with
# the `LinearRegression` model which is not regularized.
# %%
from sklearn.linear_model import Ridge
ridge = make_pipeline(StandardScaler(), Ridge())
list_alphas = np.logspace(-2, 2.1, num=40)
list_ridge_scores = []
for alpha in list_alphas:
ridge.set_params(ridge__alpha=alpha)
ridge.fit(X_train, y_train)
y_pred = ridge.predict(X_valid)
list_ridge_scores.append(mean_squared_error(y_valid, y_pred))
plt.plot(
list_alphas, [linear_regression_score] * len(list_alphas), '--',
label='LinearRegression',
)
plt.plot(list_alphas, list_ridge_scores, "+-", label='Ridge')
plt.xlabel('alpha (regularization strength)')
plt.ylabel('Mean squared error (lower is better')
_ = plt.legend()
# %% [markdown]
# We see that, just like adding salt in cooking, adding regularization in our
# model could improve its error on the validation set. But too much
# regularization, like too much salt, decreases its performance.
#
# We can see visually that the best `alpha` should be around 40.
# %%
best_alpha = list_alphas[np.argmin(list_ridge_scores)]
best_alpha
# %% [markdown]
# Note that, we selected this alpha *without* using the testing set ; but
# instead by using the validation set which is a subset of the training
# data. This is so we do not "overfit" the test data and
# can be seen in the lesson *basic hyper-parameters tuning*.
# We can finally compare the performance of the `LinearRegression` model to the
# best `Ridge` model, on the testing set.
# %%
print("Linear Regression")
y_pred_test = linear_regression.predict(X_test)
print(
f"Mean squared error on the test set: "
f"{mean_squared_error(y_test, y_pred_test):.4f}"
)
print("Ridge Regression")
ridge.set_params(ridge__alpha=alpha)
ridge.fit(X_train, y_train)
y_pred_test = ridge.predict(X_test)
print(
f"Mean squared error on the test set: "
f"{mean_squared_error(y_test, y_pred_test):.4f}"
)
# FIXME add explication why Ridge is not better (equivalent) than linear
# regression here.
# %% [markdown]
# The hyper-parameter search could have been made using `GridSearchCV`
# instead of manually splitting the training data (into training and
# validation subsets) and selecting the best alpha.
# %%
from sklearn.model_selection import GridSearchCV
ridge = GridSearchCV(
make_pipeline(StandardScaler(), Ridge()),
param_grid={"ridge__alpha": list_alphas},
)
ridge.fit(X_train_valid, y_train_valid)
print(ridge.best_params_)
# %% [markdown]
# The `GridSearchCV` tests all possible given `alpha` values and picks
# the best one with a cross-validation scheme. We can now compare with
# `LinearRegression`.
# %%
print("Linear Regression")
linear_regression.fit(X_train_valid, y_train_valid)
y_pred_test = linear_regression.predict(X_test)
print(
f"Mean squared error on the test set: "
f"{mean_squared_error(y_test, y_pred_test):.4f}"
)
print("Ridge Regression")
y_pred_test = ridge.predict(X_test)
print(
f"Mean squared error on the test set: "
f"{mean_squared_error(y_test, y_pred_test):.4f}"
)
# %% [markdown]
# It is also interesting to know that several regressors and classifiers
# in scikit-learn are optimized to make this parameter tuning. They usually
# finish with the term "CV" for "Cross Validation" (e.g. `RidgeCV`).
# They are more efficient than using `GridSearchCV` and you should use them
# instead.
#
# We will repeat the equivalent of the hyper-parameter search but instead of
# using a `GridSearchCV`, we will use `RidgeCV`.
# %%
from sklearn.linear_model import RidgeCV
ridge = make_pipeline(
StandardScaler(), RidgeCV(alphas=[.1, .5, 1, 5, 10, 50, 100])
)
ridge.fit(X_train_valid, y_train_valid)
ridge[-1].alpha_
# %%
print("Linear Regression")
y_pred_test = linear_regression.predict(X_test)
print(
f"Mean squared error on the test set: "
f"{mean_squared_error(y_test, y_pred_test):.4f}"
)
print("Ridge Regression")
y_pred_test = ridge.predict(X_test)
print(
f"Mean squared error on the test set: "
f"{mean_squared_error(y_test, y_pred_test):.4f}"
)
# %% [markdown]
# Note that the best hyper-parameter value is different because the
# cross-validation used in the different approach is internally different.
# %% [markdown]
# ## 2. Classification
# In regression, we saw that the target to be predicted was a continuous
# variable. In classification, this target will be discrete (e.g. categorical).
#
# We will go back to our penguin dataset. However, this time we will try to
# predict the penguin species using the culmen information. We will also
# simplify our classification problem by selecting only 2 of the penguin
# species to solve a binary classification problem.
# %%
data = pd.read_csv("../datasets/penguins.csv")
# select the features of interest
culmen_columns = ["Culmen Length (mm)", "Culmen Depth (mm)"]
target_column = "Species"
data = data[culmen_columns + [target_column]]
data[target_column] = data[target_column].str.split().str[0]
data = data[data[target_column].apply(lambda x: x in ("Adelie", "Chinstrap"))]
data = data.dropna()
# %% [markdown]
# We can quickly start by visualizing the feature distribution by class:
# %%
_ = sns.pairplot(data=data, hue="Species")
# %% [markdown]
# We can observe that we have quite a simple problem. When the culmen
# length increases, the probability that the penguin is a Chinstrap is closer
# to 1. However, the culmen depth is not helpful for predicting the penguin
# species.
#
# For model fitting, we will separate the target from the data and
# we will create a training and a testing set.
# %%
from sklearn.model_selection import train_test_split
X, y = data[culmen_columns], data[target_column]
X_train, X_test, y_train, y_test = train_test_split(
X, y, stratify=y, random_state=0,
)
# %% [markdown]
# To visualize the separation found by our classifier, we will define an helper
# function `plot_decision_function`. In short, this function will fit our
# classifier and plot the edge of the decision function, where the probability
# to be an Adelie or Chinstrap will be equal (p=0.5).
# %%
def plot_decision_function(X, y, clf, title="auto", ax=None):
"""Plot the boundary of the decision function of a classifier."""
from sklearn.preprocessing import LabelEncoder
clf.fit(X, y)
# create a grid to evaluate all possible samples
plot_step = 0.02
feature_0_min, feature_0_max = (
X.iloc[:, 0].min() - 1,
X.iloc[:, 0].max() + 1,
)
feature_1_min, feature_1_max = (
X.iloc[:, 1].min() - 1,
X.iloc[:, 1].max() + 1,
)
xx, yy = np.meshgrid(
np.arange(feature_0_min, feature_0_max, plot_step),
np.arange(feature_1_min, feature_1_max, plot_step),
)
# compute the associated prediction
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = LabelEncoder().fit_transform(Z)
Z = Z.reshape(xx.shape)
# make the plot of the boundary and the data samples
if ax is None:
_, ax = plt.subplots()
ax.contourf(xx, yy, Z, alpha=0.4)
sns.scatterplot(
data=pd.concat([X, y], axis=1),
x=X.columns[0],
y=X.columns[1],
hue=y.name,
ax=ax,
)
if title == "auto":
C = clf[-1].C if hasattr(clf[-1], "C") else clf[-1].C_
ax.set_title(f"C={C}\n with coef={clf[-1].coef_[0]}")
else:
ax.set_title(title)
# %% [markdown]
# ### Un-penalized logistic regression
#
# The linear regression that we previously saw will predict a continuous
# output. When the target is a binary outcome, one can use the logistic
# function to model the probability. This model is known as logistic
# regression.
#
# Scikit-learn provides the class `LogisticRegression` which implements this
# algorithm.
# %%
from sklearn.linear_model import LogisticRegression
logistic_regression = make_pipeline(
StandardScaler(), LogisticRegression(penalty="none")
)
plot_decision_function(X_train, y_train, logistic_regression)
# %% [markdown]
# Thus, we see that our decision function is represented by a line separating
# the 2 classes. Since the line is oblique, it means that we used a
# combination of both features:
# %%
print(logistic_regression[-1].coef_)
# %% [markdown]
# Indeed, both coefficients are non-null.
#
# ### Apply some regularization when fitting the logistic model
#
# The `LogisticRegression` model allows one to apply regularization via the
# parameter `C`. It would be equivalent to shifting from `LinearRegression`
# to `Ridge`. Ccontrary to `Ridge`, the value of the
# `C` parameter is inversely proportional to the regularization strength:
# a smaller `C` will lead to a more regularized model. We can check the effect
# of regularization on our model:
# %%
_, axs = plt.subplots(ncols=3, figsize=(12, 4))
for ax, C in zip(axs, [0.02, 0.1, 1]):
logistic_regression = make_pipeline(
StandardScaler(), LogisticRegression(C=C)
)
plot_decision_function(
X_train, y_train, logistic_regression, ax=ax,
)
# %% [markdown]
# A more regularized model will make the coefficients tend to 0. Since one of
# the features is considered less important when fitting the model (lower
# coefficient magnitude), only one of the feature will be used when C is small.
# This feature is the culmen length which is in line with our first insight
# when plotting the marginal feature probabilities.
#
# Just like the `RidgeCV` class which automatically finds the optimal `alpha`,
# one can use `LogisticRegressionCV` to find the best `C` on the training data.
# %%
from sklearn.linear_model import LogisticRegressionCV
logistic_regression = make_pipeline(
StandardScaler(), LogisticRegressionCV(Cs=[0.01, 0.1, 1, 10])
)
plot_decision_function(X_train, y_train, logistic_regression)
# %% [markdown]
# ### Beyond linear separation
#
# As we saw in regression, the linear classification model expects the data
# to be linearly separable. When this assumption does not hold, the model
# is not expressive enough to properly fit the data. One needs to apply the
# same tricks as in regression: feature augmentation (potentially using
# expert-knowledge) or using a kernel based method.
#
# We will provide examples where we will use a kernel support vector machine
# to perform classification on some toy-datasets where it is impossible to
# find a perfect linear separation.
# %%
from sklearn.datasets import (
make_moons, make_classification, make_gaussian_quantiles,
)
X_moons, y_moons = make_moons(n_samples=500, noise=.13, random_state=42)
X_class, y_class = make_classification(
n_samples=500, n_features=2, n_redundant=0, n_informative=2,
random_state=2,
)
X_gauss, y_gauss = make_gaussian_quantiles(
n_samples=50, n_features=2, n_classes=2, random_state=42,
)
datasets = [
[pd.DataFrame(X_moons, columns=["Feature #0", "Feature #1"]),
pd.Series(y_moons, name="class")],
[pd.DataFrame(X_class, columns=["Feature #0", "Feature #1"]),
pd.Series(y_class, name="class")],
[pd.DataFrame(X_gauss, columns=["Feature #0", "Feature #1"]),
pd.Series(y_gauss, name="class")],
]
# %%
from sklearn.svm import SVC
_, axs = plt.subplots(ncols=3, nrows=2, figsize=(12, 9))
linear_model = make_pipeline(StandardScaler(), SVC(kernel="linear"))
kernel_model = make_pipeline(StandardScaler(), SVC(kernel="rbf"))
for ax, (X, y) in zip(axs[0], datasets):
plot_decision_function(X, y, linear_model, title="Linear kernel", ax=ax)
for ax, (X, y) in zip(axs[1], datasets):
plot_decision_function(X, y, kernel_model, title="RBF kernel", ax=ax)
# %% [markdown]
# We see that the $R^2$ score decreases on each dataset, so we can say that each
# dataset is "less linearly separable" than the previous one.
# %% [markdown]
# # Main take away
#
# - `LinearRegression` find the best slope which minimize the mean squared
# error on the train set
# - `Ridge` could be better on the test set, thanks to its regularization
# - `RidgeCV` and `LogisiticRegressionCV` find the best relugarization thanks
# to cross validation on the training data
# - `pipeline` can be used to combinate a scaler and a model
# - If the data are not linearly separable, we shall use a more complex model
# or use feature augmentation
#
# %%
```
|
{
"source": "jeremiedbb/scipy",
"score": 3
}
|
#### File: scipy/optimize/_numdiff.py
```python
from __future__ import division
import numpy as np
from numpy.linalg import norm
from scipy.sparse.linalg import LinearOperator
from ..sparse import issparse, csc_matrix, csr_matrix, coo_matrix, find
from ._group_columns import group_dense, group_sparse
EPS = np.finfo(np.float64).eps
def _adjust_scheme_to_bounds(x0, h, num_steps, scheme, lb, ub):
"""Adjust final difference scheme to the presence of bounds.
Parameters
----------
x0 : ndarray, shape (n,)
Point at which we wish to estimate derivative.
h : ndarray, shape (n,)
Desired finite difference steps.
num_steps : int
Number of `h` steps in one direction required to implement finite
difference scheme. For example, 2 means that we need to evaluate
f(x0 + 2 * h) or f(x0 - 2 * h)
scheme : {'1-sided', '2-sided'}
Whether steps in one or both directions are required. In other
words '1-sided' applies to forward and backward schemes, '2-sided'
applies to center schemes.
lb : ndarray, shape (n,)
Lower bounds on independent variables.
ub : ndarray, shape (n,)
Upper bounds on independent variables.
Returns
-------
h_adjusted : ndarray, shape (n,)
Adjusted step sizes. Step size decreases only if a sign flip or
switching to one-sided scheme doesn't allow to take a full step.
use_one_sided : ndarray of bool, shape (n,)
Whether to switch to one-sided scheme. Informative only for
``scheme='2-sided'``.
"""
if scheme == '1-sided':
use_one_sided = np.ones_like(h, dtype=bool)
elif scheme == '2-sided':
h = np.abs(h)
use_one_sided = np.zeros_like(h, dtype=bool)
else:
raise ValueError("`scheme` must be '1-sided' or '2-sided'.")
if np.all((lb == -np.inf) & (ub == np.inf)):
return h, use_one_sided
h_total = h * num_steps
h_adjusted = h.copy()
lower_dist = x0 - lb
upper_dist = ub - x0
if scheme == '1-sided':
x = x0 + h_total
violated = (x < lb) | (x > ub)
fitting = np.abs(h_total) <= np.maximum(lower_dist, upper_dist)
h_adjusted[violated & fitting] *= -1
forward = (upper_dist >= lower_dist) & ~fitting
h_adjusted[forward] = upper_dist[forward] / num_steps
backward = (upper_dist < lower_dist) & ~fitting
h_adjusted[backward] = -lower_dist[backward] / num_steps
elif scheme == '2-sided':
central = (lower_dist >= h_total) & (upper_dist >= h_total)
forward = (upper_dist >= lower_dist) & ~central
h_adjusted[forward] = np.minimum(
h[forward], 0.5 * upper_dist[forward] / num_steps)
use_one_sided[forward] = True
backward = (upper_dist < lower_dist) & ~central
h_adjusted[backward] = -np.minimum(
h[backward], 0.5 * lower_dist[backward] / num_steps)
use_one_sided[backward] = True
min_dist = np.minimum(upper_dist, lower_dist) / num_steps
adjusted_central = (~central & (np.abs(h_adjusted) <= min_dist))
h_adjusted[adjusted_central] = min_dist[adjusted_central]
use_one_sided[adjusted_central] = False
return h_adjusted, use_one_sided
relative_step = {"2-point": EPS**0.5,
"3-point": EPS**(1/3),
"cs": EPS**0.5}
def _compute_absolute_step(rel_step, x0, method):
if rel_step is None:
rel_step = relative_step[method]
sign_x0 = (x0 >= 0).astype(float) * 2 - 1
return rel_step * sign_x0 * np.maximum(1.0, np.abs(x0))
def _prepare_bounds(bounds, x0):
lb, ub = [np.asarray(b, dtype=float) for b in bounds]
if lb.ndim == 0:
lb = np.resize(lb, x0.shape)
if ub.ndim == 0:
ub = np.resize(ub, x0.shape)
return lb, ub
def group_columns(A, order=0):
"""Group columns of a 2-D matrix for sparse finite differencing [1]_.
Two columns are in the same group if in each row at least one of them
has zero. A greedy sequential algorithm is used to construct groups.
Parameters
----------
A : array_like or sparse matrix, shape (m, n)
Matrix of which to group columns.
order : int, iterable of int with shape (n,) or None
Permutation array which defines the order of columns enumeration.
If int or None, a random permutation is used with `order` used as
a random seed. Default is 0, that is use a random permutation but
guarantee repeatability.
Returns
-------
groups : ndarray of int, shape (n,)
Contains values from 0 to n_groups-1, where n_groups is the number
of found groups. Each value ``groups[i]`` is an index of a group to
which ith column assigned. The procedure was helpful only if
n_groups is significantly less than n.
References
----------
.. [1] <NAME>, <NAME>, and <NAME>, "On the estimation of
sparse Jacobian matrices", Journal of the Institute of Mathematics
and its Applications, 13 (1974), pp. 117-120.
"""
if issparse(A):
A = csc_matrix(A)
else:
A = np.atleast_2d(A)
A = (A != 0).astype(np.int32)
if A.ndim != 2:
raise ValueError("`A` must be 2-dimensional.")
m, n = A.shape
if order is None or np.isscalar(order):
rng = np.random.RandomState(order)
order = rng.permutation(n)
else:
order = np.asarray(order)
if order.shape != (n,):
raise ValueError("`order` has incorrect shape.")
A = A[:, order]
if issparse(A):
groups = group_sparse(m, n, A.indices, A.indptr)
else:
groups = group_dense(m, n, A)
groups[order] = groups.copy()
return groups
def approx_derivative(fun, x0, method='3-point', rel_step=None, f0=None,
bounds=(-np.inf, np.inf), sparsity=None,
as_linear_operator=False, args=(), kwargs={}):
"""Compute finite difference approximation of the derivatives of a
vector-valued function.
If a function maps from R^n to R^m, its derivatives form m-by-n matrix
called the Jacobian, where an element (i, j) is a partial derivative of
f[i] with respect to x[j].
Parameters
----------
fun : callable
Function of which to estimate the derivatives. The argument x
passed to this function is ndarray of shape (n,) (never a scalar
even if n=1). It must return 1-D array_like of shape (m,) or a scalar.
x0 : array_like of shape (n,) or float
Point at which to estimate the derivatives. Float will be converted
to a 1-D array.
method : {'3-point', '2-point', 'cs'}, optional
Finite difference method to use:
- '2-point' - use the first order accuracy forward or backward
difference.
- '3-point' - use central difference in interior points and the
second order accuracy forward or backward difference
near the boundary.
- 'cs' - use a complex-step finite difference scheme. This assumes
that the user function is real-valued and can be
analytically continued to the complex plane. Otherwise,
produces bogus results.
rel_step : None or array_like, optional
Relative step size to use. The absolute step size is computed as
``h = rel_step * sign(x0) * max(1, abs(x0))``, possibly adjusted to
fit into the bounds. For ``method='3-point'`` the sign of `h` is
ignored. If None (default) then step is selected automatically,
see Notes.
f0 : None or array_like, optional
If not None it is assumed to be equal to ``fun(x0)``, in this case
the ``fun(x0)`` is not called. Default is None.
bounds : tuple of array_like, optional
Lower and upper bounds on independent variables. Defaults to no bounds.
Each bound must match the size of `x0` or be a scalar, in the latter
case the bound will be the same for all variables. Use it to limit the
range of function evaluation. Bounds checking is not implemented
when `as_linear_operator` is True.
sparsity : {None, array_like, sparse matrix, 2-tuple}, optional
Defines a sparsity structure of the Jacobian matrix. If the Jacobian
matrix is known to have only few non-zero elements in each row, then
it's possible to estimate its several columns by a single function
evaluation [3]_. To perform such economic computations two ingredients
are required:
* structure : array_like or sparse matrix of shape (m, n). A zero
element means that a corresponding element of the Jacobian
identically equals to zero.
* groups : array_like of shape (n,). A column grouping for a given
sparsity structure, use `group_columns` to obtain it.
A single array or a sparse matrix is interpreted as a sparsity
structure, and groups are computed inside the function. A tuple is
interpreted as (structure, groups). If None (default), a standard
dense differencing will be used.
Note, that sparse differencing makes sense only for large Jacobian
matrices where each row contains few non-zero elements.
as_linear_operator : bool, optional
When True the function returns an `scipy.sparse.linalg.LinearOperator`.
Otherwise it returns a dense array or a sparse matrix depending on
`sparsity`. The linear operator provides an efficient way of computing
``J.dot(p)`` for any vector ``p`` of shape (n,), but does not allow
direct access to individual elements of the matrix. By default
`as_linear_operator` is False.
args, kwargs : tuple and dict, optional
Additional arguments passed to `fun`. Both empty by default.
The calling signature is ``fun(x, *args, **kwargs)``.
Returns
-------
J : {ndarray, sparse matrix, LinearOperator}
Finite difference approximation of the Jacobian matrix.
If `as_linear_operator` is True returns a LinearOperator
with shape (m, n). Otherwise it returns a dense array or sparse
matrix depending on how `sparsity` is defined. If `sparsity`
is None then a ndarray with shape (m, n) is returned. If
`sparsity` is not None returns a csr_matrix with shape (m, n).
For sparse matrices and linear operators it is always returned as
a 2-D structure, for ndarrays, if m=1 it is returned
as a 1-D gradient array with shape (n,).
See Also
--------
check_derivative : Check correctness of a function computing derivatives.
Notes
-----
If `rel_step` is not provided, it assigned to ``EPS**(1/s)``, where EPS is
machine epsilon for float64 numbers, s=2 for '2-point' method and s=3 for
'3-point' method. Such relative step approximately minimizes a sum of
truncation and round-off errors, see [1]_.
A finite difference scheme for '3-point' method is selected automatically.
The well-known central difference scheme is used for points sufficiently
far from the boundary, and 3-point forward or backward scheme is used for
points near the boundary. Both schemes have the second-order accuracy in
terms of Taylor expansion. Refer to [2]_ for the formulas of 3-point
forward and backward difference schemes.
For dense differencing when m=1 Jacobian is returned with a shape (n,),
on the other hand when n=1 Jacobian is returned with a shape (m, 1).
Our motivation is the following: a) It handles a case of gradient
computation (m=1) in a conventional way. b) It clearly separates these two
different cases. b) In all cases np.atleast_2d can be called to get 2-D
Jacobian with correct dimensions.
References
----------
.. [1] W. H. Press et. al. "Numerical Recipes. The Art of Scientific
Computing. 3rd edition", sec. 5.7.
.. [2] <NAME>, <NAME>, and <NAME>, "On the estimation of
sparse Jacobian matrices", Journal of the Institute of Mathematics
and its Applications, 13 (1974), pp. 117-120.
.. [3] <NAME>, "Generation of Finite Difference Formulas on
Arbitrarily Spaced Grids", Mathematics of Computation 51, 1988.
Examples
--------
>>> import numpy as np
>>> from scipy.optimize import approx_derivative
>>>
>>> def f(x, c1, c2):
... return np.array([x[0] * np.sin(c1 * x[1]),
... x[0] * np.cos(c2 * x[1])])
...
>>> x0 = np.array([1.0, 0.5 * np.pi])
>>> approx_derivative(f, x0, args=(1, 2))
array([[ 1., 0.],
[-1., 0.]])
Bounds can be used to limit the region of function evaluation.
In the example below we compute left and right derivative at point 1.0.
>>> def g(x):
... return x**2 if x >= 1 else x
...
>>> x0 = 1.0
>>> approx_derivative(g, x0, bounds=(-np.inf, 1.0))
array([ 1.])
>>> approx_derivative(g, x0, bounds=(1.0, np.inf))
array([ 2.])
"""
if method not in ['2-point', '3-point', 'cs']:
raise ValueError("Unknown method '%s'. " % method)
x0 = np.atleast_1d(x0)
if x0.ndim > 1:
raise ValueError("`x0` must have at most 1 dimension.")
lb, ub = _prepare_bounds(bounds, x0)
if lb.shape != x0.shape or ub.shape != x0.shape:
raise ValueError("Inconsistent shapes between bounds and `x0`.")
if as_linear_operator and not (np.all(np.isinf(lb))
and np.all(np.isinf(ub))):
raise ValueError("Bounds not supported when "
"`as_linear_operator` is True.")
def fun_wrapped(x):
f = np.atleast_1d(fun(x, *args, **kwargs))
if f.ndim > 1:
raise RuntimeError("`fun` return value has "
"more than 1 dimension.")
return f
if f0 is None:
f0 = fun_wrapped(x0)
else:
f0 = np.atleast_1d(f0)
if f0.ndim > 1:
raise ValueError("`f0` passed has more than 1 dimension.")
if np.any((x0 < lb) | (x0 > ub)):
raise ValueError("`x0` violates bound constraints.")
if as_linear_operator:
if rel_step is None:
rel_step = relative_step[method]
return _linear_operator_difference(fun_wrapped, x0,
f0, rel_step, method)
else:
h = _compute_absolute_step(rel_step, x0, method)
if method == '2-point':
h, use_one_sided = _adjust_scheme_to_bounds(
x0, h, 1, '1-sided', lb, ub)
elif method == '3-point':
h, use_one_sided = _adjust_scheme_to_bounds(
x0, h, 1, '2-sided', lb, ub)
elif method == 'cs':
use_one_sided = False
if sparsity is None:
return _dense_difference(fun_wrapped, x0, f0, h,
use_one_sided, method)
else:
if not issparse(sparsity) and len(sparsity) == 2:
structure, groups = sparsity
else:
structure = sparsity
groups = group_columns(sparsity)
if issparse(structure):
structure = csc_matrix(structure)
else:
structure = np.atleast_2d(structure)
groups = np.atleast_1d(groups)
return _sparse_difference(fun_wrapped, x0, f0, h,
use_one_sided, structure,
groups, method)
def _linear_operator_difference(fun, x0, f0, h, method):
m = f0.size
n = x0.size
if method == '2-point':
def matvec(p):
if np.array_equal(p, np.zeros_like(p)):
return np.zeros(m)
dx = h / norm(p)
x = x0 + dx*p
df = fun(x) - f0
return df / dx
elif method == '3-point':
def matvec(p):
if np.array_equal(p, np.zeros_like(p)):
return np.zeros(m)
dx = 2*h / norm(p)
x1 = x0 - (dx/2)*p
x2 = x0 + (dx/2)*p
f1 = fun(x1)
f2 = fun(x2)
df = f2 - f1
return df / dx
elif method == 'cs':
def matvec(p):
if np.array_equal(p, np.zeros_like(p)):
return np.zeros(m)
dx = h / norm(p)
x = x0 + dx*p*1.j
f1 = fun(x)
df = f1.imag
return df / dx
else:
raise RuntimeError("Never be here.")
return LinearOperator((m, n), matvec)
def _dense_difference(fun, x0, f0, h, use_one_sided, method):
m = f0.size
n = x0.size
J_transposed = np.empty((n, m))
h_vecs = np.diag(h)
for i in range(h.size):
if method == '2-point':
x = x0 + h_vecs[i]
dx = x[i] - x0[i] # Recompute dx as exactly representable number.
df = fun(x) - f0
elif method == '3-point' and use_one_sided[i]:
x1 = x0 + h_vecs[i]
x2 = x0 + 2 * h_vecs[i]
dx = x2[i] - x0[i]
f1 = fun(x1)
f2 = fun(x2)
df = -3.0 * f0 + 4 * f1 - f2
elif method == '3-point' and not use_one_sided[i]:
x1 = x0 - h_vecs[i]
x2 = x0 + h_vecs[i]
dx = x2[i] - x1[i]
f1 = fun(x1)
f2 = fun(x2)
df = f2 - f1
elif method == 'cs':
f1 = fun(x0 + h_vecs[i]*1.j)
df = f1.imag
dx = h_vecs[i, i]
else:
raise RuntimeError("Never be here.")
J_transposed[i] = df / dx
if m == 1:
J_transposed = np.ravel(J_transposed)
return J_transposed.T
def _sparse_difference(fun, x0, f0, h, use_one_sided,
structure, groups, method):
m = f0.size
n = x0.size
row_indices = []
col_indices = []
fractions = []
n_groups = np.max(groups) + 1
for group in range(n_groups):
# Perturb variables which are in the same group simultaneously.
e = np.equal(group, groups)
h_vec = h * e
if method == '2-point':
x = x0 + h_vec
dx = x - x0
df = fun(x) - f0
# The result is written to columns which correspond to perturbed
# variables.
cols, = np.nonzero(e)
# Find all non-zero elements in selected columns of Jacobian.
i, j, _ = find(structure[:, cols])
# Restore column indices in the full array.
j = cols[j]
elif method == '3-point':
# Here we do conceptually the same but separate one-sided
# and two-sided schemes.
x1 = x0.copy()
x2 = x0.copy()
mask_1 = use_one_sided & e
x1[mask_1] += h_vec[mask_1]
x2[mask_1] += 2 * h_vec[mask_1]
mask_2 = ~use_one_sided & e
x1[mask_2] -= h_vec[mask_2]
x2[mask_2] += h_vec[mask_2]
dx = np.zeros(n)
dx[mask_1] = x2[mask_1] - x0[mask_1]
dx[mask_2] = x2[mask_2] - x1[mask_2]
f1 = fun(x1)
f2 = fun(x2)
cols, = np.nonzero(e)
i, j, _ = find(structure[:, cols])
j = cols[j]
mask = use_one_sided[j]
df = np.empty(m)
rows = i[mask]
df[rows] = -3 * f0[rows] + 4 * f1[rows] - f2[rows]
rows = i[~mask]
df[rows] = f2[rows] - f1[rows]
elif method == 'cs':
f1 = fun(x0 + h_vec*1.j)
df = f1.imag
dx = h_vec
cols, = np.nonzero(e)
i, j, _ = find(structure[:, cols])
j = cols[j]
else:
raise ValueError("Never be here.")
# All that's left is to compute the fraction. We store i, j and
# fractions as separate arrays and later construct coo_matrix.
row_indices.append(i)
col_indices.append(j)
fractions.append(df[i] / dx[j])
row_indices = np.hstack(row_indices)
col_indices = np.hstack(col_indices)
fractions = np.hstack(fractions)
J = coo_matrix((fractions, (row_indices, col_indices)), shape=(m, n))
return csr_matrix(J)
def check_derivative(fun, jac, x0, bounds=(-np.inf, np.inf), args=(),
kwargs={}):
"""Check correctness of a function computing derivatives (Jacobian or
gradient) by comparison with a finite difference approximation.
Parameters
----------
fun : callable
Function of which to estimate the derivatives. The argument x
passed to this function is ndarray of shape (n,) (never a scalar
even if n=1). It must return 1-D array_like of shape (m,) or a scalar.
jac : callable
Function which computes Jacobian matrix of `fun`. It must work with
argument x the same way as `fun`. The return value must be array_like
or sparse matrix with an appropriate shape.
x0 : array_like of shape (n,) or float
Point at which to estimate the derivatives. Float will be converted
to 1-D array.
bounds : 2-tuple of array_like, optional
Lower and upper bounds on independent variables. Defaults to no bounds.
Each bound must match the size of `x0` or be a scalar, in the latter
case the bound will be the same for all variables. Use it to limit the
range of function evaluation.
args, kwargs : tuple and dict, optional
Additional arguments passed to `fun` and `jac`. Both empty by default.
The calling signature is ``fun(x, *args, **kwargs)`` and the same
for `jac`.
Returns
-------
accuracy : float
The maximum among all relative errors for elements with absolute values
higher than 1 and absolute errors for elements with absolute values
less or equal than 1. If `accuracy` is on the order of 1e-6 or lower,
then it is likely that your `jac` implementation is correct.
See Also
--------
approx_derivative : Compute finite difference approximation of derivative.
Examples
--------
>>> import numpy as np
>>> from scipy.optimize import check_derivative
>>>
>>>
>>> def f(x, c1, c2):
... return np.array([x[0] * np.sin(c1 * x[1]),
... x[0] * np.cos(c2 * x[1])])
...
>>> def jac(x, c1, c2):
... return np.array([
... [np.sin(c1 * x[1]), c1 * x[0] * np.cos(c1 * x[1])],
... [np.cos(c2 * x[1]), -c2 * x[0] * np.sin(c2 * x[1])]
... ])
...
>>>
>>> x0 = np.array([1.0, 0.5 * np.pi])
>>> check_derivative(f, jac, x0, args=(1, 2))
2.4492935982947064e-16
"""
J_to_test = jac(x0, *args, **kwargs)
if issparse(J_to_test):
J_diff = approx_derivative(fun, x0, bounds=bounds, sparsity=J_to_test,
args=args, kwargs=kwargs)
J_to_test = csr_matrix(J_to_test)
abs_err = J_to_test - J_diff
i, j, abs_err_data = find(abs_err)
J_diff_data = np.asarray(J_diff[i, j]).ravel()
return np.max(np.abs(abs_err_data) /
np.maximum(1, np.abs(J_diff_data)))
else:
J_diff = approx_derivative(fun, x0, bounds=bounds,
args=args, kwargs=kwargs)
abs_err = np.abs(J_to_test - J_diff)
return np.max(abs_err / np.maximum(1, np.abs(J_diff)))
```
|
{
"source": "jeremiedbb/threadpoolctl",
"score": 2
}
|
#### File: threadpoolctl/tests/utils.py
```python
import os
import pytest
from glob import glob
def skip_func(msg):
def test_func(*args, **kwargs):
pytest.skip(msg)
return test_func
# Path to shipped openblas for libraries such as numpy or scipy
libopenblas_patterns = []
# A decorator to run tests only when numpy is available
try:
# make sure the mkl/blas are loaded for test_threadpool_limits
import numpy as np
np.dot(np.ones(1000), np.ones(1000))
libopenblas_patterns.append(os.path.join(np.__path__[0], ".libs",
"libopenblas*"))
except ImportError:
pass
try:
import scipy
import scipy.linalg # noqa: F401
scipy.linalg.svd([[1, 2], [3, 4]])
libopenblas_patterns.append(os.path.join(scipy.__path__[0], ".libs",
"libopenblas*"))
except ImportError:
scipy = None
libopenblas_paths = set(path for pattern in libopenblas_patterns
for path in glob(pattern))
# A decorator to run tests only when check_openmp_n_threads is available
try:
from ._openmp_test_helper import check_openmp_num_threads # noqa: F401
def with_check_openmp_num_threads(func):
"""A decorator to skip tests if check_openmp_n_threads is not compiled.
"""
return func
except ImportError:
def with_check_openmp_num_threads(func):
"""A decorator to skip tests if check_openmp_n_threads is not compiled.
"""
return skip_func('Test requires check_openmp_n_threads to be compiled')
```
|
{
"source": "jeremiedecock/botsim",
"score": 3
}
|
#### File: botsim/utils/plot_part_dat.py
```python
import numpy as np
import matplotlib.pyplot as plt
import math
import argparse
def parse_part_log_file(filename):
log_data = np.loadtxt(filename)
data_dict = {}
data_dict["time_sec"] = log_data[:, 0]
data_dict["position_x"] = log_data[:, 1]
data_dict["position_y"] = log_data[:, 2]
data_dict["position_z"] = log_data[:, 3]
data_dict["angle_x"] = log_data[:, 4]
data_dict["angle_y"] = log_data[:, 5]
data_dict["angle_z"] = log_data[:, 6]
data_dict["angle_w"] = log_data[:, 7]
data_dict["linear_velocity_x"] = log_data[:, 8]
data_dict["linear_velocity_y"] = log_data[:, 9]
data_dict["linear_velocity_z"] = log_data[:,10]
data_dict["angular_velocity_x"] = log_data[:,11]
data_dict["angular_velocity_y"] = log_data[:,12]
data_dict["angular_velocity_z"] = log_data[:,13]
data_dict["total_force_x"] = log_data[:,14]
data_dict["total_force_y"] = log_data[:,15]
data_dict["total_force_z"] = log_data[:,16]
data_dict["total_torque_x"] = log_data[:,17]
data_dict["total_torque_y"] = log_data[:,18]
data_dict["total_torque_z"] = log_data[:,19]
return data_dict
def main():
"""Main function"""
# PARSE OPTIONS ###################
parser = argparse.ArgumentParser(description='Plot one or several part(s).')
parser.add_argument('filenames', nargs='+', metavar='FILE', help='DAT file to read')
parser.add_argument("--title", "-t", help="set the title of the figure", metavar="STRING")
args = parser.parse_args()
title = args.title
# PLOT DATA #######################
fig = plt.figure(figsize=(16.0, 10.0))
#fig = plt.figure()
ax = fig.add_subplot(111)
#ax.grid(True)
for index, filename in enumerate(args.filenames):
print(index, filename)
data_dict = parse_part_log_file(filename)
ax.plot(data_dict["time_sec"], data_dict["position_z"], label=filename)
# TITLE AND LABELS ################
FONTSIZE = 26
FONTSIZE_S = 22
if title is None:
title = "Parts position with respect to time."
ax.set_title(title, fontsize=FONTSIZE)
ax.set_xlabel("Time (sec)", fontsize=FONTSIZE)
ax.set_ylabel("Position", fontsize=FONTSIZE)
ax.legend(loc='best', fontsize=FONTSIZE_S)
# SAVE FILES ######################
fig_filename = "parts.pdf"
plt.savefig(fig_filename)
# PLOT ############################
plt.show()
if __name__ == '__main__':
main()
```
|
{
"source": "jeremiedecock/fits-viewer",
"score": 3
}
|
#### File: fitsviewer/utils/png2fits.py
```python
import argparse
from astropy.io import fits
import os
import PIL.Image as pil_img # PIL.Image is a module not a class...
import numpy as np
def load_image(input_file_path):
"""
Load the 'input_file_path' and return a 2D numpy array of the image it contains.
"""
image_array = np.array(pil_img.open(input_file_path).convert('L'))
return image_array
def save_fits_file(image_array, output_file_path):
"""
image_array is the image and it should be a 2D numpy array with values in
the range [0,255].
"""
# FLIP THE IMAGE IN THE UP/DOWN DIRECTION #############
# WARNING: with fits, the (0,0) point is at the BOTTOM left corner
# whereas with pillow, the (0,0) point is at the TOP left corner
# thus the image should be converted
image_array = np.flipud(image_array)
# CREATE THE FITS STRUCTURE ###########################
hdu = fits.PrimaryHDU(image_array)
# SAVE THE FITS FILE ##################################
# Save the FITS file (overwrite the file if it already exists)
try:
hdu.writeto(output_file_path, overwrite=True)
except TypeError:
hdu.writeto(output_file_path, clobber=True)
def main():
# PARSE OPTIONS ###########################################################
desc = "Convert PNG or JPEG files to FITS images"
parser = argparse.ArgumentParser(description=desc)
parser.add_argument("filearg", nargs=1, metavar="FILE",
help="the FITS file to convert")
args = parser.parse_args()
input_file_path = args.filearg[0]
output_file_path = os.path.splitext(input_file_path)[0] + ".fits"
# READ AND SAVE DATA ######################################################
image_array = load_image(input_file_path) # image_array is a 2D numpy array
save_fits_file(image_array, output_file_path)
if __name__ == "__main__":
main()
```
|
{
"source": "jeremiedecock/job_advert_manager",
"score": 2
}
|
#### File: job_advert_manager/jobmanager/add_and_edit_container.py
```python
from gi.repository import Gtk as gtk
import datetime
import json
import category_list
DEFAULT_SCORE = 5
class AddAndEditContainer(gtk.Grid):
def __init__(self, main_window, job_adverts_model, edit_mode=False, treeview=None):
"""
...
"""
super(AddAndEditContainer, self).__init__()
self.main_window = main_window
self.job_adverts_model = job_adverts_model
self.edit_mode = edit_mode
self.treeview = treeview
self.category_combobox = gtk.ComboBoxText()
self.organization_entry = gtk.Entry()
self.url_entry = gtk.Entry()
if self.edit_mode:
self.url_entry.set_editable(False)
self.title_entry = gtk.Entry()
self.score_spin_button = gtk.SpinButton()
self.pros_textview = gtk.TextView()
self.cons_textview = gtk.TextView()
self.desc_textview = gtk.TextView()
# Category
category_label = gtk.Label(label="Category")
self.category_combobox.set_entry_text_column(0)
for category in category_list.CATEGORY_LIST:
self.category_combobox.append_text(category)
self.category_combobox.set_active(-1) # -1 = no active item selected
# Organization
organization_label = gtk.Label(label="Organization")
# URL
url_label = gtk.Label(label="Url")
# Title
title_label = gtk.Label(label="Title")
# Score
score_label = gtk.Label(label="Score")
self.score_spin_button.set_increments(step=1, page=5)
self.score_spin_button.set_range(min=0, max=5)
self.score_spin_button.set_value(5)
self.score_spin_button.set_numeric(True)
self.score_spin_button.set_update_policy(gtk.SpinButtonUpdatePolicy.IF_VALID)
# Pros
pros_label = gtk.Label(label="Pros")
self.pros_textview.set_wrap_mode(gtk.WrapMode.WORD)
pros_scrolled_window = gtk.ScrolledWindow()
pros_scrolled_window.set_border_width(3)
pros_scrolled_window.set_shadow_type(gtk.ShadowType.OUT)
pros_scrolled_window.set_policy(gtk.PolicyType.AUTOMATIC, gtk.PolicyType.ALWAYS)
pros_scrolled_window.add(self.pros_textview)
# Cons
cons_label = gtk.Label(label="Cons")
self.cons_textview.set_wrap_mode(gtk.WrapMode.WORD)
cons_scrolled_window = gtk.ScrolledWindow()
cons_scrolled_window.set_border_width(3)
cons_scrolled_window.set_shadow_type(gtk.ShadowType.OUT)
cons_scrolled_window.set_policy(gtk.PolicyType.AUTOMATIC, gtk.PolicyType.ALWAYS)
cons_scrolled_window.add(self.cons_textview)
# Description
desc_label = gtk.Label(label="Description")
self.desc_textview.set_wrap_mode(gtk.WrapMode.WORD)
desc_scrolled_window = gtk.ScrolledWindow()
desc_scrolled_window.set_border_width(3)
desc_scrolled_window.set_shadow_type(gtk.ShadowType.OUT)
desc_scrolled_window.set_policy(gtk.PolicyType.AUTOMATIC, gtk.PolicyType.ALWAYS)
desc_scrolled_window.add(self.desc_textview)
# Buttons
add_button = gtk.Button(label="Save")
add_button.connect("clicked", self.saveCallBack)
cancel_button = gtk.Button(label="Cancel")
cancel_button.connect("clicked", self.clearCallBack)
# The grid container
self.set_column_homogeneous(False)
self.set_row_homogeneous(False)
self.set_column_spacing(12)
self.set_row_spacing(6)
self.set_border_width(18)
# Set hexpand, vexpand, halign, valign
# See https://developer.gnome.org/gtk3/stable/ch29s02.html
self.category_combobox.set_hexpand(True)
self.organization_entry.set_hexpand(True)
self.url_entry.set_hexpand(True)
self.score_spin_button.set_hexpand(True)
self.title_entry.set_hexpand(True)
pros_scrolled_window.set_hexpand(True)
pros_scrolled_window.set_vexpand(True)
cons_scrolled_window.set_hexpand(True)
cons_scrolled_window.set_vexpand(True)
desc_scrolled_window.set_hexpand(True)
desc_scrolled_window.set_vexpand(True)
# Align labels to the right
# See https://developer.gnome.org/gtk3/stable/ch29s02.html
category_label.set_halign(gtk.Align.END)
organization_label.set_halign(gtk.Align.END)
url_label.set_halign(gtk.Align.END)
score_label.set_halign(gtk.Align.END)
title_label.set_halign(gtk.Align.END)
# Align labels to the left
# See https://developer.gnome.org/gtk3/stable/ch29s02.html
pros_label.set_halign(gtk.Align.START)
cons_label.set_halign(gtk.Align.START)
desc_label.set_halign(gtk.Align.START)
# Add the widgets to the container
self.attach(title_label, left=0, top=0, width=1, height=1)
self.attach(self.title_entry, left=1, top=0, width=3, height=1)
self.attach(category_label, left=0, top=1, width=1, height=1)
self.attach(self.category_combobox, left=1, top=1, width=1, height=1)
self.attach(organization_label, left=2, top=1, width=1, height=1)
self.attach(self.organization_entry, left=3, top=1, width=1, height=1)
self.attach(url_label, left=0, top=2, width=1, height=1)
self.attach(self.url_entry, left=1, top=2, width=1, height=1)
self.attach(score_label, left=2, top=2, width=1, height=1)
self.attach(self.score_spin_button, left=3, top=2, width=1, height=1)
self.attach(pros_label, left=0, top=3, width=2, height=1)
self.attach(cons_label, left=2, top=3, width=2, height=1)
self.attach(pros_scrolled_window, left=0, top=4, width=2, height=1)
self.attach(cons_scrolled_window, left=2, top=4, width=2, height=1)
self.attach(desc_label, left=0, top=5, width=4, height=1)
self.attach(desc_scrolled_window, left=0, top=6, width=4, height=6)
self.attach(add_button, left=0, top=13, width=2, height=1)
self.attach(cancel_button, left=2, top=13, width=2, height=1)
def saveCallBack(self, widget):
"""
Save the current job advert.
"""
# Get data from entry widgets ###########
category = self.category_combobox.get_active_text()
organization = self.organization_entry.get_text()
url = self.url_entry.get_text()
tooltip = url.replace('&', '&')
title = self.title_entry.get_text()
score = self.score_spin_button.get_value_as_int()
pros_buffer = self.pros_textview.get_buffer()
pros = pros_buffer.get_text(pros_buffer.get_start_iter(), pros_buffer.get_end_iter(), True)
cons_buffer = self.cons_textview.get_buffer()
cons = cons_buffer.get_text(cons_buffer.get_start_iter(), cons_buffer.get_end_iter(), True)
desc_buffer = self.desc_textview.get_buffer()
desc = desc_buffer.get_text(desc_buffer.get_start_iter(), desc_buffer.get_end_iter(), True)
if self.edit_mode:
date = self.job_adverts_model.json_database["job_adverts"][url]["date"]
else:
date = datetime.date.isoformat(datetime.date.today())
# Check data ############################
error_msg_list = []
if category is None:
error_msg_list.append("You must select a category.")
if len(url) == 0:
error_msg_list.append("You must enter an url.")
elif url in self.job_adverts_model.json_database["job_adverts"] and not self.edit_mode:
error_msg_list.append("This job advert already exists in the database.")
try:
if score not in range(6):
error_msg_list.append("The score must be a number between 0 and 5.")
except:
error_msg_list.append("The score must be a number between 0 and 5.")
# Save data or display error ############
if len(error_msg_list) == 0:
job_advert_dict = {"date": date,
"category": category,
"organization": organization,
"title": title,
"score": score,
"pros": pros,
"cons": cons,
"desc": desc}
# Save the job advert in the database
self.job_adverts_model.json_database["job_adverts"][url] = job_advert_dict
# Save the job advert in the JSON file
self.job_adverts_model.save_json_file()
# Update the GtkListStore (TODO: redundant with the previous JSON data structure)
if self.edit_mode:
model, treeiter = self.treeview.get_selection().get_selected()
self.job_adverts_model.liststore.set_value(treeiter, 2, category) # category
self.job_adverts_model.liststore.set_value(treeiter, 3, organization) # organization
self.job_adverts_model.liststore.set_value(treeiter, 4, score) # score
self.job_adverts_model.liststore.set_value(treeiter, 6, title) # title
else:
self.job_adverts_model.liststore.append([url, tooltip, category, organization, score, date, title])
# Clear all entries
self.clearCallBack()
else:
dialog = gtk.MessageDialog(self.main_window, 0, gtk.MessageType.ERROR, gtk.ButtonsType.OK, "Error")
dialog.format_secondary_text("\n".join(error_msg_list))
dialog.run()
dialog.destroy()
def clearCallBack(self, widget=None, data=None):
if self.edit_mode:
# Clear the current form: reset the entry widgets to their default value.
model, treeiter = self.treeview.get_selection().get_selected()
url = None
if treeiter != None:
url = self.job_adverts_model.liststore[treeiter][0]
if url is None:
self.url_entry.set_text("")
self.category_combobox.set_active(-1) # -1 = no active item selected
self.organization_entry.set_text("")
self.score_spin_button.set_value(0)
self.title_entry.set_text("")
self.pros_textview.get_buffer().set_text("")
self.cons_textview.get_buffer().set_text("")
self.desc_textview.get_buffer().set_text("")
else:
category = self.job_adverts_model.json_database["job_adverts"][url]["category"]
organization = self.job_adverts_model.json_database["job_adverts"][url]["organization"]
score = self.job_adverts_model.json_database["job_adverts"][url]["score"]
title = self.job_adverts_model.json_database["job_adverts"][url]["title"]
pros = self.job_adverts_model.json_database["job_adverts"][url]["pros"]
cons = self.job_adverts_model.json_database["job_adverts"][url]["cons"]
desc = self.job_adverts_model.json_database["job_adverts"][url]["desc"]
self.url_entry.set_text(url)
self.category_combobox.set_active(category_list.CATEGORY_LIST.index(category))
self.organization_entry.set_text(organization)
self.score_spin_button.set_value(score)
self.title_entry.set_text(title)
self.pros_textview.get_buffer().set_text(pros)
self.cons_textview.get_buffer().set_text(cons)
self.desc_textview.get_buffer().set_text(desc)
else:
# Clear all entries except "category_combobox" and "organization_entry"
self.url_entry.set_text("")
#self.organization_entry.set_text("")
self.title_entry.set_text("")
self.score_spin_button.set_value(DEFAULT_SCORE)
self.pros_textview.get_buffer().set_text("")
self.cons_textview.get_buffer().set_text("")
self.desc_textview.get_buffer().set_text("")
```
#### File: job_advert_manager/jobmanager/search_container.py
```python
from gi.repository import Gtk as gtk
import os
import datetime
import json
import webbrowser
JSON_FILENAME = "~/job_adverts_web_sites.json"
JOB_SEARCH_TREE_VIEW_COLUMN_LABEL_LIST = ["Url", "Tooltip", "Name", "Category", "Last visit", "Today status"]
TODAY_STATUS_LIST = ["None", "Partial", "Full"]
class SearchContainer(gtk.Box):
def __init__(self, job_adverts_model):
super(SearchContainer, self).__init__(orientation=gtk.Orientation.VERTICAL, spacing=6)
self.set_border_width(18)
self.job_adverts_model = job_adverts_model
# Load the JSON database
# {"url": {"label": "", "category": ""}, ...}
self.json_database = {}
try:
fd = open(os.path.expanduser(JSON_FILENAME), "r")
self.json_database = json.load(fd)
fd.close()
except FileNotFoundError:
pass
# Creating the Combo Status ListStore model
liststore_today_status = gtk.ListStore(str)
for item in TODAY_STATUS_LIST:
liststore_today_status.append([item])
# Creating the TreeView ListStore model
# {"url": {"date": "status", ...}, ...}
self.liststore_job_search = gtk.ListStore(str, str, str, str, str, str)
for url, web_site_dict in self.json_database.items():
tooltip = url.replace('&', '&')
label = web_site_dict["label"]
category = web_site_dict["category"]
today_datetime = datetime.datetime.today()
today_iso_str = datetime.date.isoformat(today_datetime)
try:
today_status = self.job_adverts_model.json_database["job_searchs"][url][today_iso_str]
except KeyError:
today_status = "None"
num_days_since_last_visit_str = self.set_last_visit_field_in_model(url)
self.liststore_job_search.append([url, tooltip, label, category, num_days_since_last_visit_str, today_status])
# Creating the treeview, making it use the filter as a model, and
# adding the columns
job_search_treeview = gtk.TreeView(self.liststore_job_search)
for column_index, column_title in enumerate(JOB_SEARCH_TREE_VIEW_COLUMN_LABEL_LIST):
if column_title == "Today status":
renderer = gtk.CellRendererCombo()
else:
renderer = gtk.CellRendererText()
column = gtk.TreeViewColumn(column_title, renderer, text=column_index)
column.set_resizable(True) # Let the column be resizable
if column_title in ("Url", "Tooltip"):
column.set_visible(False) # Hide the "url" column (this column should not be displayed but is required for tooltip and webbrowser redirection)
if column_title == "Name":
column.set_sort_column_id(2)
elif column_title == "Category":
column.set_sort_column_id(3)
elif column_title == "Last visit":
column.set_sort_column_id(4)
elif column_title == "Today status":
column.set_sort_column_id(5)
#if column_title == "Last visit":
# if self.liststore_job_search[...][4] = "-"
# renderer.set_property('cell-background', 'red')
# elif self.liststore_job_search[...][4] = "-"
# renderer.set_property('cell-background', 'green')
# else:
# renderer.set_property('cell-background', 'orange')
if column_title == "Today status":
renderer.set_property("editable", True)
renderer.set_property("model", liststore_today_status)
renderer.set_property("text-column", 0)
renderer.set_property("has-entry", False)
renderer.connect("edited", self.on_combo_changed_cb)
#renderer.set_property('cell-background', 'red')
#renderer.set_property('cell-background', 'orange')
#renderer.set_property('cell-background', 'green')
job_search_treeview.append_column(column)
job_search_treeview.set_tooltip_column(1) # set the tooltip
# Connect to the "row-activated" signal (double click)
job_search_treeview.connect("row-activated", treeview_double_click_cb)
#select = job_search_treeview.get_selection()
#select.connect("changed", self.treeview_selection_changed_cb)
# Scrolled window
adverts_src_scrolled_window = gtk.ScrolledWindow()
adverts_src_scrolled_window.set_border_width(18)
adverts_src_scrolled_window.set_shadow_type(gtk.ShadowType.IN)
adverts_src_scrolled_window.set_policy(gtk.PolicyType.AUTOMATIC, gtk.PolicyType.ALWAYS)
adverts_src_scrolled_window.add(job_search_treeview)
self.pack_start(adverts_src_scrolled_window, expand=True, fill=True, padding=0)
def on_combo_changed_cb(self, widget, path, text):
# Liststore
self.liststore_job_search[path][5] = text
# Json
url = self.liststore_job_search[path][0]
today = datetime.date.isoformat(datetime.date.today())
if url not in self.job_adverts_model.json_database["job_searchs"]:
self.job_adverts_model.json_database["job_searchs"][url] = {}
self.job_adverts_model.json_database["job_searchs"][url][today] = text
# Save the JSON file
self.job_adverts_model.save_json_file()
# Update 'Last Visit' field in the model
num_days_since_last_visit_str = self.set_last_visit_field_in_model(url)
self.liststore_job_search[path][4] = num_days_since_last_visit_str
def set_last_visit_field_in_model(self, url):
today_datetime = datetime.datetime.today()
today_iso_str = datetime.date.isoformat(today_datetime)
try:
if len(self.job_adverts_model.json_database["job_searchs"][url]) > 0:
# FULL
filtered_date_iso_str_list = [key for (key, val) in self.job_adverts_model.json_database["job_searchs"][url].items() if val=='Full']
last_date_iso_str = sorted(filtered_date_iso_str_list)[-1]
last_datetime = datetime.datetime.strptime(last_date_iso_str, "%Y-%m-%d")
num_days_since_last_full_visit = (today_datetime - last_datetime).days
# PARTIAL
filtered_date_iso_str_list = [key for (key, val) in self.job_adverts_model.json_database["job_searchs"][url].items() if val in ('Full', 'Partial')]
last_date_iso_str = sorted(filtered_date_iso_str_list)[-1]
last_datetime = datetime.datetime.strptime(last_date_iso_str, "%Y-%m-%d")
num_days_since_last_partial_visit = (today_datetime - last_datetime).days
num_days_since_last_visit_str = "{} - {}".format(num_days_since_last_full_visit, num_days_since_last_partial_visit)
else:
num_days_since_last_visit_str = "-"
except KeyError:
num_days_since_last_visit_str = "-"
return num_days_since_last_visit_str
def treeview_double_click_cb(tree_view, tree_path, tree_view_column):
"""Inspired from http://stackoverflow.com/questions/17109634/hyperlink-in-cellrenderertext-markup"""
model = tree_view.get_model()
url = model[tree_path][0]
webbrowser.open(url)
```
|
{
"source": "jeremiedecock/mrif",
"score": 3
}
|
#### File: pywi/data/__init__.py
```python
import numpy as np
import os
# Inspired by https://github.com/scikit-image/scikit-image/blob/master/skimage/data/__init__.py
data_dir = os.path.abspath(os.path.dirname(__file__))
__all__ = ['galaxy']
def galaxy():
"""Gray-level "galaxy" image.
Often used for tutorials and examples.
This is the Whirlpool Galaxy, also known as M51 or NGC 5194.
Credits: NASA and The Hubble Heritage Team (STScI/AURA), 5 April 2001.
Copyright
---------
This file is in the public domain because it was created by NASA
and ESA. NASA Hubble material (and ESA Hubble material prior to 2009) is
copyright-free and may be freely used as in the public domain without fee,
on the condition that only NASA, STScI, and/or ESA is credited as the
source of the material.
(https://commons.wikimedia.org/wiki/File:Whirpool_Galaxy.jpg)
Sources
-------
- http://hubblesite.org/image/1038/news_release/2001-10
- https://commons.wikimedia.org/wiki/File:Whirpool_Galaxy.jpg
Returns
-------
galaxy : (256, 256) uint8 ndarray
Galaxy image.
"""
return np.load(os.path.join(data_dir, "galaxy.npy"))
```
#### File: pywi/io/fits.py
```python
__all__ = ['load_fits_image',
'save_fits_image']
from astropy.io import fits
# EXCEPTIONS #################################################################
class FitsError(Exception):
pass
class WrongHDUError(FitsError):
"""Exception raised when trying to access a wrong HDU in a FITS file.
Attributes:
file_path -- the FITS file concerned by the error
hdu_index -- the HDU index concerned by the error
"""
def __init__(self, file_path, hdu_index):
super().__init__("File {} doesn't have data in HDU {}.".format(file_path, hdu_index))
self.file_path = file_path
self.hdu_index = hdu_index
class NotAnImageError(FitsError):
"""Exception raised when trying to load a FITS file which doesn't contain a
valid image in the given HDU.
Attributes:
file_path -- the FITS file concerned by the error
hdu_index -- the HDU index concerned by the error
"""
def __init__(self, file_path, hdu_index):
super().__init__("HDU {} in file {} doesn't contain any image.".format(hdu_index, file_path))
self.file_path = file_path
self.hdu_index = hdu_index
class WrongDimensionError(FitsError):
""" Exception raised when trying to save a FITS with more than 3 dimensions
or less than 2 dimensions.
"""
def __init__(self):
super().__init__("The input image should be a 2D or a 3D numpy array.")
class WrongFitsFileStructure(FitsError):
"""Exception raised when trying to load a FITS file which doesn't contain a
valid structure (for benchmark).
Attributes:
file_path -- the FITS file concerned by the error
"""
def __init__(self, file_path):
super().__init__("File {} doesn't contain a valid structure.".format(file_path))
self.file_path = file_path
##############################################################################
def load_fits_image(input_file_path, hdu_index=0):
"""Return the image array contained in the given HDU of the given FITS file.
Parameters
----------
input_file_path : str
The path of the FITS file to load
hdu_index : int
The HDU to load within the FITS file (one FITS file can contain several
images stored in different HDU)
Returns
-------
ndarray
The loaded image
Raises
------
WrongHDUError
If `input_file_path` doesn't contain the HDU `hdu_index`
NotAnImageError
If `input_file_path` doesn't contain a valid image in the HDU
`hdu_index`
"""
hdu_list = fits.open(input_file_path) # open the FITS file
if not (0 <= hdu_index < len(hdu_list)):
hdu_list.close()
raise WrongHDUError(input_file_path, hdu_index)
hdu = hdu_list[hdu_index]
if not hdu.is_image:
hdu_list.close()
raise NotAnImageError(input_file_path, hdu_index)
image_array = hdu.data # "hdu.data" is a Numpy Array
hdu_list.close()
return image_array
def save_fits_image(image_array, output_file_path):
"""Save the `image_array` image (array_like) to the `output_file_path` FITS file.
Parameters
----------
image_array : array_like
The image to save (should be a 2D or a 3D numpy array)
output_file_path : str
The path of the FITS file where to save the `image_array`
Raises
------
WrongDimensionError
If `image_array` has more than 3 dimensions or less than 2 dimensions.
"""
if image_array.ndim not in (2, 3):
raise WrongDimensionError()
hdu = fits.PrimaryHDU(image_array)
hdu.writeto(output_file_path, overwrite=True) # overwrite=True: overwrite the file if it already exists
```
#### File: pywi/io/images.py
```python
__all__ = ['fill_nan_pixels',
'image_files_in_dir',
'image_files_in_paths',
'load_image',
'save_image']
import numpy as np
import os
from pywi.io.pil import load_pil_image, save_pil_image
from pywi.io.fits import load_fits_image, save_fits_image
from pywi.io.plot import plot
DEBUG = False
# FILL NAN PIXELS #############################################################
def fill_nan_pixels(image, noise_distribution=None):
"""Replace *in-place* `NaN` values in `image` by zeros or by random noise.
Images containing `NaN` values generate undesired harmonics with wavelet
image cleaning. This function should be used to "fix" images before each
wavelet image cleaning.
Replace `NaN` ("Not a Number") values in `image` by zeros if
`noise_distribution` is `None`.
Otherwise, `NaN` values are replaced by random noise drawn by the
`noise_distribution` random generator.
Parameters
----------
image : array_like
The image to process. `NaN` values are replaced **in-place** thus this
function changes the provided object.
noise_distribution : `pywi.denoising.inverse_transform_sampling.EmpiricalDistribution`
The random generator to use to replace `NaN` pixels by random noise.
Returns
-------
array_like
Returns a boolean mask array indicating whether pixels in `images`
initially contained `NaN` values (`True`) of not (`False`). This array
is defined by the instruction `np.isnan(image)`.
Notes
-----
`NaN` values are replaced **in-place** in the provided `image`
parameter.
Examples
--------
>>> import numpy as np
>>> img = np.array([[1, 2, np.nan],[4, np.nan, 6],[np.nan, 8, np.nan]])
>>> fill_nan_pixels(img)
... # doctest: +NORMALIZE_WHITESPACE
array([[False, False, True],
[False, True, False],
[ True, False, True]], dtype=bool)
>>> img
... # doctest: +NORMALIZE_WHITESPACE
array([[ 1., 2., 0.],
[ 4., 0., 6.],
[ 0., 8., 0.]])
"""
# See https://stackoverflow.com/questions/29365194/replacing-missing-values-with-random-in-a-numpy-array
nan_mask = np.isnan(image)
if DEBUG:
print(image)
plot(image, "In")
plot(nan_mask, "Mask")
if noise_distribution is not None:
nan_noise_size = np.count_nonzero(nan_mask)
image[nan_mask] = noise_distribution.rvs(size=nan_noise_size)
else:
image[nan_mask] = 0
if DEBUG:
print(image)
plot(image, "Noise injected")
return nan_mask
# DIRECTORY PARSER ############################################################
def image_files_in_dir(directory_path, max_num_files=None, file_ext=(".fits", ".fit")):
"""Return the path of FITS and Simtel files in `directory_path`.
Return the path of all (or `max_num_files`) files having the extension
".simtel", ".simtel.gz", ".fits" or ".fit" in `directory_path`.
Parameters
----------
directory_path : str
The directory's path where FITS and Simtel files are searched.
max_num_files : int
The maximum number of files to return.
Yields
------
str
The path of the next FITS or Simtel files in `directory_path`.
"""
directory_path = os.path.expanduser(directory_path)
files_counter = 0
for file_name in os.listdir(directory_path):
file_path = os.path.join(directory_path, file_name)
if os.path.isfile(file_path) and file_name.lower().endswith(file_ext):
files_counter += 1
if (max_num_files is not None) and (files_counter > max_num_files):
break
else:
yield file_path
def image_files_in_paths(path_list, max_num_files=None):
"""Return the path of FITS and Simtel files in `path_list`.
Return the path of all (or `max_num_files`) files having the extension
".simtel", ".simtel.gz", ".fits" or ".fit" in `path_list`.
Parameters
----------
path_list : str
The list of directory's path where FITS and Simtel files are searched.
It can also directly contain individual file paths (or a mix of files
and directories path).
max_num_files : int
The maximum number of files to return.
Yields
------
str
The path of the next FITS or Simtel files in `path_list`.
"""
files_counter = 0
for path in path_list:
if os.path.isdir(path):
# If path is a directory
for file_path in image_files_in_dir(path):
files_counter += 1
if (max_num_files is not None) and (files_counter > max_num_files):
break
else:
yield file_path
elif os.path.isfile(path):
# If path is a regular file
files_counter += 1
if (max_num_files is not None) and (files_counter > max_num_files):
break
else:
yield path
else:
raise Exception("Wrong item:", path)
# LOAD AND SAVE FITS FILES ###################################################
def load_image(input_file_path, **kwargs):
"""Return the image array contained in the given image file.
So far, this function convert all multi-channel input images as
mono-channel grayscale.
The list of supported formats is available in the following page:
https://pillow.readthedocs.io/en/stable/handbook/image-file-formats.html
Fits format is also supported thanks to astropy.
Parameters
----------
input_file_path : str
The path of the image file to load
Returns
-------
ndarray
The loaded image
"""
if input_file_path.lower().endswith((".fits", ".fit")):
# FITS FILES
image_array = load_fits_image(input_file_path, **kwargs)
else:
image_array = load_pil_image(input_file_path, **kwargs)
return image_array
def save_image(image_array, output_file_path, **kwargs):
"""Save the image array `image` in the given file `output_file_path`.
The list of supported formats is available in the following page:
https://pillow.readthedocs.io/en/stable/handbook/image-file-formats.html
Fits format is also supported thanks to astropy.
Parameters
----------
image : array_like
The image to save
output_file_path : str
The destination path of the image
"""
if output_file_path.lower().endswith((".fits", ".fit")):
# FITS FILES
save_fits_image(image_array, output_file_path)
else:
save_pil_image(image_array, output_file_path)
# DEBUG #######################################################################
def export_image_as_plain_text(image, output_file_path):
fd = open(output_file_path, 'w')
for x in image:
for y in x:
print("{:5.2f}".format(y), end=" ", file=fd)
print("", file=fd)
fd.close()
```
#### File: io/tests/test_images.py
```python
from pywi.io import images
from pywi.io.fits import WrongHDUError, NotAnImageError, WrongDimensionError, WrongFitsFileStructure
import numpy as np
import os
import tempfile
import unittest
class TestImages(unittest.TestCase):
"""
Contains unit tests for the "io.images" module.
"""
# Test the "save" and "load_image" functions ####################################
def test_load_and_save(self):
"""Check the `images.load_image` and `images.save_image` functions."""
img = np.random.randint(128, size=(4, 6))
# Make a temporary directory to store fits files
with tempfile.TemporaryDirectory() as temp_dir_path:
img_path = os.path.join(temp_dir_path, "test.fits")
# Save the image
images.save_image(img, img_path)
# Load the saved image
loaded_img = images.load_image(img_path)
# Check img vs loaded_img
np.testing.assert_array_equal(img, loaded_img)
# The temporary directory and all its contents are removed now
def test_load_and_save_with_nan(self):
"""Check the `images.load_image` and `images.save_image` functions."""
img = np.random.uniform(size=(4, 6))
img[1,1] = np.nan
# Make a temporary directory to store fits files
with tempfile.TemporaryDirectory() as temp_dir_path:
img_path = os.path.join(temp_dir_path, "test.fits")
# Save the image
images.save_image(img, img_path)
# Load the saved image
loaded_img = images.load_image(img_path)
# Check img vs loaded_img
np.testing.assert_array_equal(img, loaded_img)
# Check the NaN pixel value is kept
self.assertTrue(np.isnan(loaded_img[1,1]))
# The temporary directory and all its contents are removed now
# Test the "save" function exceptions #####################################
def test_save_wrong_dimension_error(self):
"""Check the call to `images.load_image` fails with an WrongDimensionError
when saved images have more than 3 dimensions or less than 2
dimensions."""
img_1d = np.random.randint(128, size=(3)) # Make a 1D image
img_2d = np.random.randint(128, size=(3, 3)) # Make a 2D image
img_3d = np.random.randint(128, size=(3, 3, 3)) # Make a 3D image
img_4d = np.random.randint(128, size=(3, 3, 3, 3)) # Make a 4D image
# Make a temporary directory to store fits files
with tempfile.TemporaryDirectory() as temp_dir_path:
img_path = os.path.join(temp_dir_path, "test.fits")
# Save the 1D image (should raise an exception)
with self.assertRaises(WrongDimensionError):
images.save_image(img_1d, img_path)
# Save the 2D image (should not raise any exception)
try:
images.save_image(img_2d, img_path)
except WrongDimensionError:
self.fail("images.save() raised WrongDimensionError unexpectedly!")
# Save the 3D image (should not raise any exception)
try:
images.save_image(img_3d, img_path)
except WrongDimensionError:
self.fail("images.save() raised WrongDimensionError unexpectedly!")
# Save the 4D image (should raise an exception)
with self.assertRaises(WrongDimensionError):
images.save_image(img_4d, img_path)
# The temporary directory and all its contents are removed now
# Test the "load_image" function exceptions #####################################
def test_load_wrong_hdu_error(self):
"""Check the call to `images.load_image` fails with a WrongHDUError
when trying to load a FITS image from an HDU index that does not exist."""
img = np.random.randint(128, size=(3, 3)) # Make a 2D image
# Make a temporary directory to store fits files
with tempfile.TemporaryDirectory() as temp_dir_path:
img_path = os.path.join(temp_dir_path, "test.fits")
# Save the image
images.save_image(img, img_path)
# Load the saved image (should raise an exception)
with self.assertRaises(WrongHDUError):
loaded_img = images.load_image(img_path, hdu_index=1000)
# Load the saved image (should not raise any exception)
try:
loaded_img = images.load_image(img_path, hdu_index=0)
except WrongHDUError:
self.fail("images.load_image() raised WrongHDUError unexpectedly!")
# The temporary directory and all its contents are removed now
if __name__ == '__main__':
unittest.main()
```
#### File: processing/transform/starlet.py
```python
__all__ = ['StarletError',
'WrongDimensionError',
'wavelet_transform',
'inverse_wavelet_transform']
"""Starlet Transform.
This module contains a "naive" (i.e. non-optimized) implementation of the
Starlet transform.
"""
import numpy as np
import warnings
try:
from numba import jit
except ModuleNotFoundError:
warnings.warn("Cannot use Numba. Switch to low performance mode.")
# Make a decorator that does nothing
def jit(f):
return f
from pywi.io import images
# CONSTANTS ##################################################################
AVAILABLE_LAST_SCALE_OPTIONS = ('keep', 'drop', 'mask', 'posmask')
DEFAULT_LAST_SCALE_TREATMENT = 'mask'
# EXCEPTIONS #################################################################
class StarletError(Exception):
"""Common `starlet` module's error."""
pass
class WrongDimensionError(StarletError):
"""Raised when data having a wrong number of dimensions is given.
Attributes
----------
msg : str
Explanation of the error.
"""
def __init__(self, msg=None):
if msg is None:
self.msg = "The data has a wrong number of dimension."
##############################################################################
@jit
def get_pixel_value(image, x, y, type_border):
if type_border == 0:
#try:
pixel_value = image[x, y]
return pixel_value
#except IndexError as e:
# return 0
elif type_border == 1:
num_lines, num_col = image.shape # TODO
x = x % num_lines
y = y % num_col
pixel_value = image[x, y]
return pixel_value
elif type_border == 2:
num_lines, num_col = image.shape # TODO
if x >= num_lines:
x = num_lines - 2 - x
elif x < 0:
x = abs(x)
if y >= num_col:
y = num_col - 2 - y
elif y < 0:
y = abs(y)
pixel_value = image[x, y]
return pixel_value
elif type_border == 3:
num_lines, num_col = image.shape # TODO
if x >= num_lines:
x = num_lines - 1 - x
elif x < 0:
x = abs(x) - 1
if y >= num_col:
y = num_col - 1 - y
elif y < 0:
y = abs(y) - 1
pixel_value = image[x, y]
return pixel_value
else:
raise ValueError()
@jit
def smooth_bspline(input_image, type_border, step_trou):
"""Apply a convolution kernel on the image using the "à trou" algorithm.
Pseudo code:
**convolve(scale, $s_i$):**
$c_0 \leftarrow 3/8$
$c_1 \leftarrow 1/4$
$c_2 \leftarrow 1/16$
$s \leftarrow \lfloor 2^{s_i} + 0.5 \rfloor$
**for** all columns $x_i$
$\quad$ **for** all rows $y_i$
$\quad\quad$ scale[$x_i$, $y_i$] $\leftarrow$ $c_0$ . scale[$x_i$, $y_i$] + $c_1$ . scale[$x_i-s$, $y_i$] + $c_1$ . scale[$x_i+s$, $y_i$] + $c_2$ . scale[$x_i-2s$, $y_i$] + $c_2$ . scale[$x_i+2s$, $y_i$]
**for** all columns $x_i$
$\quad$ **for** all rows $y_i$
$\quad\quad$ scale[$x_i$, $y_i$] $\leftarrow$ $c_0$ . scale[$x_i$, $y_i$] + $c_1$ . scale[$x_i$, $y_i-s$] + $c_1$ . scale[$x_i$, $y_i+s$] + $c_2$ . scale[$x_i$, $y_i-2s$] + $c_2$ . scale[$x_i$, $y_i+2s$]
Inspired by Sparce2D mr_transform (originally implemented in *isap/cxx/sparse2d/src/libsparse2d/IM_Smooth.cc* in the
*smooth_bspline()* function.
```cpp
void smooth_bspline (const Ifloat & Im_in,
Ifloat &Im_out,
type_border Type, int Step_trou) {
int Nl = Im_in.nl(); // num lines in the image
int Nc = Im_in.nc(); // num columns in the image
int i,j,Step;
float Coeff_h0 = 3. / 8.;
float Coeff_h1 = 1. / 4.;
float Coeff_h2 = 1. / 16.;
Ifloat Buff(Nl,Nc,"Buff smooth_bspline");
Step = (int)(pow((double)2., (double) Step_trou) + 0.5);
for (i = 0; i < Nl; i ++)
for (j = 0; j < Nc; j ++)
Buff(i,j) = Coeff_h0 * Im_in(i,j)
+ Coeff_h1 * ( Im_in (i, j-Step, Type)
+ Im_in (i, j+Step, Type))
+ Coeff_h2 * ( Im_in (i, j-2*Step, Type)
+ Im_in (i, j+2*Step, Type));
for (i = 0; i < Nl; i ++)
for (j = 0; j < Nc; j ++)
Im_out(i,j) = Coeff_h0 * Buff(i,j)
+ Coeff_h1 * ( Buff (i-Step, j, Type)
+ Buff (i+Step, j, Type))
+ Coeff_h2 * ( Buff (i-2*Step, j, Type)
+ Buff (i+2*Step, j, Type));
}
```
Parameters
----------
input_image
type_border
step_trou
Returns
-------
"""
input_image = input_image.astype('float64', copy=True)
coeff_h0 = 3. / 8.
coeff_h1 = 1. / 4.
coeff_h2 = 1. / 16.
num_lines, num_col = input_image.shape # TODO
buff = np.zeros(input_image.shape, dtype='float64')
img_out = np.zeros(input_image.shape, dtype='float64')
step = int(pow(2., step_trou) + 0.5)
for i in range(num_lines):
for j in range(num_col):
buff[i,j] = coeff_h0 * get_pixel_value(input_image, i, j, type_border)
buff[i,j] += coeff_h1 * ( get_pixel_value(input_image, i, j-step, type_border) \
+ get_pixel_value(input_image, i, j+step, type_border))
buff[i,j] += coeff_h2 * ( get_pixel_value(input_image, i, j-2*step, type_border) \
+ get_pixel_value(input_image, i, j+2*step, type_border))
for i in range(num_lines):
for j in range(num_col):
img_out[i,j] = coeff_h0 * get_pixel_value(buff, i, j, type_border)
img_out[i,j] += coeff_h1 * ( get_pixel_value(buff, i-step, j, type_border) \
+ get_pixel_value(buff, i+step, j, type_border))
img_out[i,j] += coeff_h2 * ( get_pixel_value(buff, i-2*step, j, type_border) \
+ get_pixel_value(buff, i+2*step, j, type_border))
return img_out
@jit
def wavelet_transform(input_image,
number_of_scales=4,
noise_distribution=None,
debug=False):
"""Compute the starlet transform of `input_image`.
Pseudo code:
**wavelet_transform(input_image, num_scales):**
scales[0] $\leftarrow$ input_image
**for** $i \in [0, \dots, \text{num_scales} - 2]$
$\quad$ scales[$i + 1$] $\leftarrow$ convolve(scales[$i$], $i$)
$\quad$ scales[$i$] $\leftarrow$ scales[$i$] - scales[$i + 1$]
Inspired by Sparce2D mr_transform (originally implemented in *isap/cxx/sparse2d/src/libsparse2d/MR_Trans.cc*)
```cpp
static void mr_transform (Ifloat &Image,
MultiResol &MR_Transf,
Bool EdgeLineTransform,
type_border Border,
Bool Details) {
// [...]
MR_Transf.band(0) = Image;
for (s = 0; s < Nbr_Plan -1; s++) {
smooth_bspline (MR_Transf.band(s),MR_Transf.band(s+1),Border,s);
MR_Transf.band(s) -= MR_Transf.band(s+1);
}
// [...]
}
```
Parameters
----------
input_image : array_like
The input image to transform.
number_of_scales : int, optional
The number of scales used to transform `input_image` or in other words
the number of wavelet planes returned.
noise_distribution : `EmpiricalDistribution`, optional
The noise distribution used to fill 'empty' NaN pixels with the
appropriate random noise distribution. If none, NaN pixels are fill
with zeros (which may add unwanted harmonics in wavelet planes).
Returns
-------
list
Return a list containing the wavelet planes.
Raises
------
WrongDimensionError
If `input_image` is not a 2D array.
"""
input_image = input_image.astype('float64', copy=True)
if input_image.ndim != 2:
msg = "The data should be a 2D array."
raise WrongDimensionError(msg)
# INJECT NOISE IN NAN PIXELS ###########################################
# TODO: should this noise injection be done in the abstract 'run()' function ?
nan_mask = images.fill_nan_pixels(input_image, noise_distribution)
# DO THE WAVELET TRANSFORM #############################################
wavelet_planes_list = []
wavelet_planes_list.append(input_image)
for scale_index in range(number_of_scales - 1):
previous_scale = wavelet_planes_list[scale_index]
next_scale = smooth_bspline(previous_scale, 3, scale_index)
previous_scale -= next_scale
wavelet_planes_list.append(next_scale)
# INJECT NOISE IN NAN: PUT BACK NAN VALUES #############
if noise_distribution is not None:
for plane in wavelet_planes_list:
plane[nan_mask] = np.nan
return wavelet_planes_list
def inverse_wavelet_transform(wavelet_planes,
last_plane=DEFAULT_LAST_SCALE_TREATMENT):
"""Compute the inverse wavelet transform of `wavelet_planes`.
Parameters
----------
wavelet_planes : list of array_like
The wavelet planes to (inverse) transform.
last_plane : str, optional
Define what to do with the last plane: 'keep' to keep it in the inverse
transform, 'drop' to remove it in the inverse transform, 'mask' to keep
only pixels that are *significant* in the others planes.
Returns
-------
array_like
Return the cleaned image.
"""
output_image = np.zeros(wavelet_planes[0].shape)
for plane in wavelet_planes[0:-1]:
# Sum all planes except the last one (residuals plane)
output_image += plane
# Apply a special treatment with the last plane (residuals plane)
if last_plane == "keep":
# Keep the last plane
output_image += wavelet_planes[-1]
elif last_plane == "mask":
# Only keep last plane's pixels that are *significant* in the others planes
significant_pixels_mask = np.zeros(wavelet_planes[0].shape)
for plane in wavelet_planes[0:-1]:
significant_pixels_mask[plane != 0] = 1
output_image += wavelet_planes[-1] * significant_pixels_mask
elif last_plane == "posmask":
# Only keep last plane's pixels that are *significant* with a *positive coefficient* in the others planes
significant_pixels_mask = np.zeros(wavelet_planes[0].shape)
for plane in wavelet_planes[0:-1]:
significant_pixels_mask[plane > 0] = 1
output_image += wavelet_planes[-1] * significant_pixels_mask
return output_image
```
#### File: pywi/ui/commons.py
```python
import copy
import datetime
import json
import os
import numpy as np
import random
import sys
import time
import traceback
from pywi.benchmark.metrics.refbased import mse
from pywi.processing.filtering.pixel_clusters import filter_pixels_clusters_stats
from pywi.processing.filtering.pixel_clusters import number_of_pixels_clusters
from pywi.io.images import image_generator
import pywi.io.images
###############################################################################
class AbstractCleaningAlgorithm(object):
"""A convenient optional wrapper to simplify the image cleaning analysis.
Common processing to run and assess the image cleaning procedure on a set
of images and save results. This class gather some common procedures to
avoid code duplication in image cleaning modules:
* call the cleaning algorithm on an image set;
* assess the cleaning procedure using a set of estimators;
* apply various pre-processing and post-processing procedures (e.g.
geometry conversion);
* collect and save metadata, results and intermediate values that are
useful for analysis;
* measure and save the execution time;
* manage exceptions;
* ...
This abstract class is supposed to be inherited by the others image
cleaning classes."""
def __init__(self):
self.label = "Unknown" # Name to show in plots
self.verbose = False # Debug mode
def __call__(self, *pargs, **kargs):
return self.clean_image(*pargs, **kargs)
def __str__(self):
return "{}".format(self.algorithm_label)
def run(self,
cleaning_function_params,
input_file_or_dir_path_list,
benchmark_method,
output_file_path,
plot=False,
saveplot=None,
ref_img_as_input=False, # A hack to easily produce CSV files...
max_num_img=None,
debug=False):
"""A convenient optional wrapper to simplify the image cleaning analysis.
Apply the image cleaning analysis on `input_file_or_dir_path_list`,
apply some pre-processing and post-processing procedures, collect and
return results, intermediate values and metadata.
Parameters
----------
cleaning_function_params
A dictionary containing the parameters required for the image
cleaning method.
input_file_or_dir_path_list
A list of file to clean. Can be a list of simtel files, fits files
or directories containing such files.
benchmark_method
The list of estimators to use to assess the image cleaning. If
`None`, images are cleaned but nothing is returned (can be used
with e.g. the `plot` and/or `saveplot` options).
output_file_path
The result file path (a JSON file).
plot
The result of each cleaning is plot if `True`.
saveplot
The result of each cleaning is saved if `True`.
ref_img_as_input
This option is a hack to easily produce a "flatten" CSV results
files.
max_num_img
The number of images to process among the input set
(`input_file_or_dir_path_list`).
debug
Stop the execution and print the full traceback when an exception
is encountered if this parameter is `True`. Report exceptions and
continue with the next input image if this parameter is `False`.
Returns
-------
dict
Results, intermediate values and metadata.
"""
launch_time = time.perf_counter()
if benchmark_method is not None:
io_list = [] # The list of returned dictionaries
for image in image_generator(input_file_or_dir_path_list,
max_num_images=max_num_img):
input_file_path = image.meta['file_path']
if self.verbose:
print(input_file_path)
# `image_dict` contains metadata (to be returned) on the current image
image_dict = {"input_file_path": input_file_path}
try:
# READ THE INPUT FILE #####################################
reference_img = image.reference_image
pixels_position = image.pixels_position
if ref_img_as_input:
# This option is a hack to easily produce CSV files with
# the "null_ref" "cleaning" module...
input_img = copy.deepcopy(reference_img)
else:
input_img = image.input_image
image_dict.update(image.meta)
if benchmark_method is not None:
# FETCH ADDITIONAL IMAGE METADATA #####################
delta_pe, delta_abs_pe, delta_num_pixels = filter_pixels_clusters_stats(reference_img) # TODO: NaN
num_islands = number_of_pixels_clusters(reference_img) # TODO: NaN
image_dict["img_ref_islands_delta_pe"] = delta_pe
image_dict["img_ref_islands_delta_abs_pe"] = delta_abs_pe
image_dict["img_ref_islands_delta_num_pixels"] = delta_num_pixels
image_dict["img_ref_num_islands"] = num_islands
image_dict["img_ref_sum_pe"] = float(np.nansum(reference_img))
image_dict["img_ref_min_pe"] = float(np.nanmin(reference_img))
image_dict["img_ref_max_pe"] = float(np.nanmax(reference_img))
image_dict["img_ref_num_pix"] = int( (reference_img[np.isfinite(reference_img)] > 0).sum() )
image_dict["img_in_sum_pe"] = float(np.nansum(input_img))
image_dict["img_in_min_pe"] = float(np.nanmin(input_img))
image_dict["img_in_max_pe"] = float(np.nanmax(input_img))
image_dict["img_in_num_pix"] = int( (input_img[np.isfinite(input_img)] > 0).sum() )
# CLEAN THE INPUT IMAGE ###################################
# Copy the image (otherwise some cleaning functions like Tailcut may change it)
#input_img_copy = copy.deepcopy(input_img)
input_img_copy = input_img.astype('float64', copy=True)
cleaning_function_params["output_data_dict"] = {}
initial_time = time.perf_counter()
cleaned_img = self.clean_image(input_img_copy, **cleaning_function_params) # TODO: NaN
full_clean_execution_time_sec = time.perf_counter() - initial_time
if benchmark_method is not None:
image_dict.update(cleaning_function_params["output_data_dict"])
del cleaning_function_params["output_data_dict"]
# ASSESS OR PRINT THE CLEANED IMAGE #######################
if benchmark_method is not None:
# ASSESS THE CLEANING #################################
kwargs = {} # TODO GEOM
score = mse(cleaned_img, reference_img)
image_dict["score"] = [score]
image_dict["score_name"] = ["mse"]
image_dict["full_clean_execution_time_sec"] = full_clean_execution_time_sec
image_dict["img_cleaned_sum_pe"] = float(np.nansum(cleaned_img))
image_dict["img_cleaned_min_pe"] = float(np.nanmin(cleaned_img))
image_dict["img_cleaned_max_pe"] = float(np.nanmax(cleaned_img))
image_dict["img_cleaned_num_pix"] = int( (cleaned_img[np.isfinite(cleaned_img)] > 0).sum() )
# PLOT IMAGES #########################################################
if plot or (saveplot is not None):
image_list = [input_img, reference_img, cleaned_img]
title_list = ["Input image", "Reference image", "Cleaned image"]
if plot:
pywi.io.images.plot_list(image_list,
title_list=title_list,
metadata_dict=image.meta)
if saveplot is not None:
plot_file_path = saveplot
print("Saving {}".format(plot_file_path))
pywi.io.images.mpl_save_list(image_list,
output_file_path=plot_file_path,
title_list=title_list,
metadata_dict=image.meta)
except Exception as e:
print("Abort image {}: {} ({})".format(input_file_path, e, type(e)))
if debug:
# The following line print the full trackback
traceback.print_tb(e.__traceback__, file=sys.stdout)
if benchmark_method is not None:
# http://docs.python.org/2/library/sys.html#sys.exc_info
exc_type, exc_value, exc_traceback = sys.exc_info() # most recent (if any) by default
'''
Reason this _can_ be bad: If an (unhandled) exception happens AFTER this,
or if we do not delete the labels on (not much) older versions of Py, the
reference we created can linger.
traceback.format_exc/print_exc do this very thing, BUT note this creates a
temp scope within the function.
'''
error_dict = {
'filename': exc_traceback.tb_frame.f_code.co_filename,
'lineno' : exc_traceback.tb_lineno,
'name' : exc_traceback.tb_frame.f_code.co_name,
'type' : exc_type.__name__,
#'message' : exc_value.message
'message' : str(e)
}
del(exc_type, exc_value, exc_traceback) # So we don't leave our local labels/objects dangling
# This still isn't "completely safe", though!
#error_dict = {"type": str(type(e)),
# "message": str(e)}
image_dict["error"] = error_dict
finally:
if benchmark_method is not None:
io_list.append(image_dict)
if benchmark_method is not None:
error_list = [image_dict["error"] for image_dict in io_list if "error" in image_dict]
print("{} images aborted".format(len(error_list)))
# GENERAL EXPERIMENT METADATA
output_dict = {}
output_dict["benchmark_execution_time_sec"] = str(time.perf_counter() - launch_time)
output_dict["date_time"] = str(datetime.datetime.now())
output_dict["class_name"] = self.__class__.__name__
output_dict["algo_code_ref"] = str(self.__class__.clean_image.__code__)
output_dict["label"] = self.label
output_dict["cmd"] = " ".join(sys.argv)
output_dict["algo_params"] = cleaning_function_params
if "noise_distribution" in output_dict["algo_params"]:
del output_dict["algo_params"]["noise_distribution"] # not JSON serializable...
output_dict["benchmark_method"] = benchmark_method
output_dict["system"] = " ".join(os.uname())
output_dict["io"] = io_list
with open(output_file_path, "w") as fd:
json.dump(output_dict, fd, sort_keys=True, indent=4) # pretty print format
return output_dict
```
|
{
"source": "jeremiedecock/opencal-gui-pyqt",
"score": 2
}
|
#### File: qt/widgets/plot.py
```python
from opencal.core.data import card_list_to_dataframes
import numpy as np
import pandas as pd
import datetime
import matplotlib.dates as mdates
import math
from PyQt5.QtWidgets import QSizePolicy
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.figure import Figure
#import seaborn as sns
from matplotlib.dates import MO, TU, WE, TH, FR, SA, SU
BLUE = '#1f77b4'
RED = '#d62728'
YELLOW = '#ff7f0e'
GREEN = '#2ca02c'
def plot_card_addition(df, ax):
#tk1 = list(reversed(-np.arange(0, -df.wushift.min(), 30)))
#tk2 = list(np.arange(0, df.wushift.max(), 30))
df.loc[df.cdate > datetime.datetime.now() - datetime.timedelta(days=30)].groupby("cdate").hidden.count().plot(x='cdate',
y='hidden',
kind='bar',
color=BLUE,
#yticks=tk1 + tk2,
ax=ax)
# set locator
#self.ax2.xaxis.set_major_locator(mdates.WeekdayLocator(byweekday=MO))
##self.ax2.xaxis.set_minor_locator(mdates.DayLocator(interval=1))
# set formatter
##self.ax2.xaxis.set_major_formatter(mdates.DateFormatter('%a %d-%m'))
#self.ax2.xaxis.set_minor_formatter(mdates.DateFormatter('%d'))
# set font and rotation for date tick labels
#self.fig.autofmt_xdate()
ax.grid(True, axis="y", linestyle=':', alpha=0.75)
ax.set_title("Card addition")
ax.set_xlabel("")
ax.set_ylabel("Number of cards")
if ax.get_legend() is not None:
ax.get_legend().remove()
def plot_card_review(df, ax):
#tk1 = list(reversed(-np.arange(0, -df.wushift.min(), 30)))
#tk2 = list(np.arange(0, df.wushift.max(), 30))
df.loc[df.rdate > datetime.datetime.now() - datetime.timedelta(days=30)].groupby("rdate").result.count().plot(x='rdate',
y='result',
kind='bar',
color=BLUE,
#yticks=tk1 + tk2,
ax=ax)
ax.grid(True, axis="y", linestyle=':', alpha=0.75)
ax.set_title("Card review")
ax.set_xlabel("")
ax.set_ylabel("Number of cards")
if ax.get_legend() is not None:
ax.get_legend().remove()
class PlotCanvas(FigureCanvas):
"""This is a Matplotlib QWidget.
See https://matplotlib.org/examples/user_interfaces/embedding_in_qt5.html
"""
def __init__(self, card_list, parent, width=5, height=4, dpi=100):
self.fig = Figure(figsize=(width, height), dpi=dpi)
nrows = 1
ncols = 2
self.ax1 = self.fig.add_subplot(nrows, ncols, 1)
self.ax2 = self.fig.add_subplot(nrows, ncols, 2)
self.card_list = card_list
self.compute_initial_figure()
FigureCanvas.__init__(self, self.fig)
self.setParent(parent)
FigureCanvas.setSizePolicy(self, QSizePolicy.Expanding, QSizePolicy.Expanding)
FigureCanvas.updateGeometry(self)
def compute_initial_figure(self):
self.update_figure(init=True)
def update_figure(self, init=False):
if not init:
self.ax1.cla()
self.ax2.cla()
try:
card_df, review_df = card_list_to_dataframes(self.card_list)
#df['date_fmt'] = df['date'].dt.strftime('%a %d/%m')
#df.loc[::2, 'date_fmt'] = ''
###################################
plot_card_addition(card_df, self.ax1)
plot_card_review(review_df, self.ax2)
###################################
self.fig.tight_layout()
except IndexError as e:
# Happen when data is empty
print(e)
#pass
if not init:
self.draw()
```
|
{
"source": "jeremiedecock/piclockradio",
"score": 3
}
|
#### File: jeremiedecock/piclockradio/subprocess_test.py
```python
import os
import sys
import subprocess
TIMEOUT=3 # seconds
# If you launch a sub-process (even with shell=False), then the
# subprocess.Popen.kill() function will only kill that sub-process (so if there
# are any "grandchild" processes, they won't be terminated.).
# See: http://stackoverflow.com/questions/3908063/python-subprocess-with-shell-true-redirections-and-platform-independent-subproc
#
# The solution is to use preexec_fn to cause the subprocess to acquire it's own
# session group (then a signal is sent to all processes in that session group).
# See: http://stackoverflow.com/questions/3876886/timeout-a-subprocess
def execute(args):
try:
output = subprocess.check_output(args, stderr=subprocess.STDOUT, universal_newlines=True, timeout=TIMEOUT)
#output = subprocess.check_output(args, stderr=subprocess.STDOUT, universal_newlines=True, timeout=TIMEOUT, preexec_fn=os.setsid)
print(output)
except OSError as e:
print("Execution failed:", e, file=sys.stderr)
except subprocess.CalledProcessError as e:
print("Execution failed:", e, file=sys.stderr)
print(" Cmd:", e.cmd, file=sys.stderr)
print(" Args:", e.args, file=sys.stderr)
print(" Return code:", e.returncode, file=sys.stderr)
print(" Output message:", e.output, file=sys.stderr)
except subprocess.TimeoutExpired as e:
print("Execution stopped:", e, file=sys.stderr)
print(" Cmd:", e.cmd, file=sys.stderr)
print(" Args:", e.args, file=sys.stderr)
print(" Output message:", e.output, file=sys.stderr)
print(" Timeout:", e.timeout, file=sys.stderr)
def main():
"""Main function"""
# subprocess.check_output is a convenience functions (a wrapper).
# For more advanced use cases, the underlying subprocess.Popen interface
# can be used directly.
# Test 1
print("TEST1")
execute(["ls", "."])
print()
# Test 2
print("TEST2")
execute(["ls", "unknown_file"])
print()
# Test 3
print("TEST3")
execute(["unknown_cmd"])
print()
# Test 4
print("TEST4")
execute(["sleep", "10"])
if __name__ == '__main__':
main()
```
|
{
"source": "jeremiedecock/pyai",
"score": 2
}
|
#### File: mdp/agent/brute_force.py
```python
from . import agent
import itertools
import numpy as np
class Agent(agent.Agent):
"""
Brut force policy search.
Requires discount factor in [0;1[ (will bug if discout_factor==1).
No references.
"""
def __init__(self, environment):
assert 0 < environment.discountFactor < 1 # TODO: may get singular matrices if discountFactor == 1
self.policy = {}
initial_state = environment.initialState
print("Computing the policy... please wait.")
S = environment.stateSet
A = environment.actionSet
number_of_possible_policies = pow(len(A), len(S))
print("Number of possible policies :", number_of_possible_policies)
best_policy = None
best_policy_values_dict = {state: float("-inf") for state in S}
# The set of all possible (stationary) policies for an environment with
# len(S) states and len(A) actions (where A is the set of all possible
# actions and S the set of all possible states) is:
#
# {itertools.product(*[A] * len(S))}
#
# There are len(A)^len(S) possible policies (|A|^|S|) where
# len(A)^len(S) is the cartesian product A x A x A x ...
#
# itertools.product() can be used to enumerate all possible policies
# an other solution is to use len(S) nested loops but this is not
# really convenient...
#
# itertools.product(*[A] * 2) := itertools.product(A, A) := A x A cartesian product := [(a1, a2) for a1 in A for a2 in A] for the set of all possible policies for an environment with 2 states and len(A) actions (A is the set of all actions)
# itertools.product(*[A] * 3) := itertools.product(A, A, A) := A x A x A cartesian product := [(a1, a2, a3) for a1 in A for a2 in A for a3 in A] for the set of all possible policies for an environment with 3 states and len(A) actions (A is the set of all actions)
# itertools.product(*[A] * 4) := itertools.product(A, A, A, A) := A x A x A x A cartesian product := [(a1, a2, a3, a4) for a1 in A for a2 in A for a3 in A for a4 in A] for the set of all possible policies for an environment with 4 states and len(A) actions (A is the set of all actions)
# ...
#
# Example:
# A = {'←', '→', '↓', '↑'}
# S = {1, 2}
# P = itertools.product(*[A] * len(S))
# [p for p in P]
# >>> [('↓', '↓'),
# ('↓', '→'),
# ('↓', '↑'),
# ('↓', '←'),
# ('→', '↓'),
# ('→', '→'),
# ('→', '↑'),
# ('→', '←'),
# ('↑', '↓'),
# ('↑', '→'),
# ('↑', '↑'),
# ('↑', '←'),
# ('←', '↓'),
# ('←', '→'),
# ('←', '↑'),
# ('←', '←')]
policy_it = 1
for pi in itertools.product(*[A] * len(S)):
# Print progress
if policy_it%1000 == 0:
print("{0}% ({1}/{2})".format(float(policy_it)/number_of_possible_policies * 100., policy_it, number_of_possible_policies))
self.policy = {p[0]:p[1] for p in zip(S, pi)}
# Evaluate the policy (stochastic environment)
policy_values_dict = evaluatePolicy(self.policy, environment)
# Update best_policy
if policy_values_dict[initial_state] > best_policy_values_dict[initial_state]:
best_policy = self.policy
best_policy_values_dict = policy_values_dict
environment.displayPolicy(self, iteration=policy_it)
environment.displayValueFunction(policy_values_dict, iteration=policy_it)
policy_it += 1
self.policy = best_policy
environment.displayPolicy(self)
print("Done.")
def getAction(self, state):
action = self.policy[state]
return action
def evaluatePolicy(policy, environment):
"""
Evaluate the policy (ie. compute V^{\pi_i}(s) \forall s in S)
"""
state_list = list(environment.stateSet)
#print("STATES", state_list)
# Make the transition matrix
transition_list = []
for state_from in state_list:
if state_from in environment.finalStateSet:
transition_list.append([0 for state_to in state_list])
else:
action = policy[state_from]
transition_list.append([-environment.discountFactor * environment.transition(state_from, action)[state_to] for state_to in state_list])
transition_matrix = np.array(transition_list)
transition_matrix = transition_matrix + np.eye(len(transition_list))
#print("TRANSITION\n", transition_matrix)
# Make the reward vector
reward_vector = np.array([environment.reward(state) for state in state_list])
#print("REWARD", reward_vector)
# Solve the system of simplified Bellman equations
#value_vector = np.dot(np.linalg.inv(transition_matrix), reward_vector)
value_vector = np.linalg.solve(transition_matrix, reward_vector)
#print("VALUE", value_vector)
value_of_the_current_policy_dict = {state: value_vector[state_index] for state_index, state in enumerate(state_list)}
#print(value_of_the_current_policy_dict)
return value_of_the_current_policy_dict
if __name__ == '__main__':
pass
```
#### File: signal/signal/signal_function.py
```python
import numpy as np
import numbers
__all__ = ['SignalFunction'] # TODO
class SignalFunction(object):
"""
SignalFunction function class.
"""
signal_name = "Unknown" # TODO (class member ?)
function_formula = None # TODO (class member ?)
# EVAL ####################################################################
def __call__(self, *pargs, **kargs):
"""
Evaluate f(x) where f is this signal funcion.
If x is a numpy array of dimension 1 (x.ndim=1 i.e. a vector not an
matrix), then return the value f(x) of the point x.
This value y=f(x) is then a scalar number (not a numpy array).
If x is a numpy array of dimension 2 (x.ndim=2), then return the value
yi=f(xi) of each point xi in x.
The x array is considered as following:
number_of_points := x.shape[0]
dimension_of_each_point := x.shape[1]
with:
x = [[x1],
[x2],
[x3],
...]
For instance, the following array x means 3 points defined in R
(1 dimension) have to be evaluated:
x = [[ 2.],
[ 3.],
[ 4.]]
For instance, the following array x means 3 points defined in RxR
(2 dimensions) have to be evaluated:
x = [[ 2., 2.],
[ 3., 3.],
[ 4., 4.]]
Values yi=f(xi) are scalar numbers (not vectors i.e.).
"""
x = pargs[0]
if x.ndim == 1:
# Only one point ##########
# Assert the number of elements of the vector x (i.e. the dimension
# of the point x) is equals to the dimension of the function (self).
assert x.shape[0] == self.ndim, "x = " + str(x) + "; x.shape[0] = " + str(x.shape[0]) + "; self.ndim = " + str(self.ndim)
# Get the value of the point x.
y = self._eval_one_sample(x)
# Assert y is a (scalar) number.
assert isinstance(y, numbers.Number), "y = " + str(y)
elif x.ndim == 2:
# Multiple points #########
number_of_points = x.shape[0]
dimension_of_each_point = x.shape[1]
# Assert the number of elements of the vector x (i.e. the dimension
# of the point x) is equals to the dimension of the function (self).
# For instance, the following numpy array x means 3 points defined in R
# (1 dimension) have to be evaluated:
# x = [[ 2.],
# [ 3.],
# [ 4.]]
# For instance, the following numpy array x means 3 points defined in RxR
# (2 dimensions) have to be evaluated:
# x = [[ 2., 2.],
# [ 3., 3.],
# [ 4., 4.]]
assert dimension_of_each_point == self.ndim, "x.shape[1] = " + str(x) + "; self.ndim =" + str(self.ndim)
y = self._eval_multiple_samples(x)
# Assert there is one value yi=f(xi) for each point xi in x
# and assert each yi is a scalar number (not a numpy array).
assert y.ndim == 1, "y.ndim = " + str(y.ndim)
assert y.shape[0] == number_of_points, "y.shape = " + str(y.shape) + "x.shape = " + str(x.shape)
else:
raise Exception("Wrong value for x.")
return y
def _eval_one_sample(self, x):
"""
Return the value y=f(x) of the function at the point x.
This function must be redefined.
The argument x must be a numpy array of dimension 1 (x.ndim=1 i.e. a
vector not a matrix).
The returned value y=f(x) is a scalar number (not a numpy array).
This function should never be called by other functions than __call__()
because all tests (assert) on arguments are made in __call__()
(i.e. this function assume arguments are well defined and doesn't test
them). The main reason of this choice is to avoid to rewrite all
tests (assert) in sub classes; all tests are written once for all
in __call__().
"""
raise NotImplementedError
def _eval_multiple_samples(self, x):
"""
Return the value yi=f(xi) of the function at the point xi in x.
This function can be redefined to speedup computations.
The argument x must a numpy array of dimension 2 (x.ndim=2).
The returned value yi=f(xi) of each point xi in x are scalar
numbers (not vectors).
Therefore, the returned value y must have y.ndim=1 and
y.shape[0]=x.shape[0].
The x array given as argument is considered as following:
number_of_points := x.shape[0]
dimension_of_each_point := x.shape[1]
with:
x = [[x1],
[x2],
[x3],
...]
For instance, the following array x means 3 points defined in R
(1 dimension) have to be evaluated:
x = [[ 2.],
[ 3.],
[ 4.]]
For instance, the following array x means 3 points defined in RxR
(2 dimensions) have to be evaluated:
x = [[ 2., 2.],
[ 3., 3.],
[ 4., 4.]]
This function should not be called by other functions than __call__()
because all tests (assert) on arguments are made in __call__()
(i.e. this function assume arguments are well defined and doesn't test
them). The main reason of this choice is to avoid to rewrite all
tests (assert) in sub classes; all tests are written once for all
in __call__().
"""
assert x.ndim == 2 # There are multiple points in x
number_of_points = x.shape[0]
dimension_of_each_point = x.shape[1]
assert dimension_of_each_point == self.ndim, "x.shape[1] = " + str(x) + "; self.ndim =" + str(self.ndim)
y_list = []
for xi in x:
yi = self._eval_one_sample(xi)
# Assert yi is a (scalar) number.
assert isinstance(yi, numbers.Number), "yi = " + str(yi)
y_list.append(yi)
return np.array(y_list)
# STR #####################################################################
def __str__(self):
if self.function_formula is not None:
return "%s: %s" % (self.signal_name, self.function_formula)
else:
return "%s" % (self.signal_name)
# PLOT ####################################################################
def plot(self):
"""
Plot the function for x in the domain of the function.
This only works for 1D and 2D functions.
"""
if self.ndim == 1:
# 1D FUNCTIONS
import matplotlib.pyplot as plt
assert self.domain_min.ndim == 1
assert self.domain_max.ndim == 1
assert self.domain_min.shape[0] == 1
assert self.domain_max.shape[0] == 1
xmin = self.domain_min[0]
xmax = self.domain_max[0]
assert xmin < xmax
xstep = (xmax - xmin) / 1000.
x_range = np.arange(xmin, xmax, xstep)
y_array = self(x_range.reshape([-1, 1])) # a 1dim numpy array
try:
label = self.label
except:
label = "f(x)"
fig = plt.figure(figsize=(16.0, 10.0))
ax = fig.add_subplot(111)
ax.plot(x_range, y_array, "-", label=label)
# TITLE AND LABELS
ax.set_title('Objective function\n$' + str(self) + '$', fontsize=20)
ax.set_xlabel(r"$x$", fontsize=32)
ax.set_ylabel(r"$f(x)$", fontsize=32)
# LEGEND
ax.legend(loc='lower right', fontsize=20)
# SAVE FILES ######################
#filename = label + ".pdf"
#plt.savefig(filename)
# PLOT ############################
plt.show()
elif self.ndim == 2:
# 2D FUNCTIONS
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import axes3d
from matplotlib import cm
# BUILD DATA ################
assert self.domain_min.ndim == 1
assert self.domain_max.ndim == 1
assert self.domain_min.shape[0] == 2
assert self.domain_max.shape[0] == 2
x1min = self.domain_min[0]
x1max = self.domain_max[0]
assert x1min < x1max
x2min = self.domain_min[1]
x2max = self.domain_max[1]
assert x2min < x2max
x1step = (x1max - x1min) / 200.
x2step = (x2max - x2min) / 200.
range_x1 = np.arange(x1min, x1max, x1step)
range_x2 = np.arange(x2min, x2max, x2step)
mesh_x1, mesh_x2 = np.meshgrid(range_x1, range_x2)
# TODO: take advantage of meshgrid, for now, it's not optimized at
# all and it's not very well written
z = np.zeros(mesh_x1.shape)
for x1i in range(z.shape[0]):
for x2i in range(z.shape[1]):
point = np.array([mesh_x1[x1i, x2i], mesh_x2[x1i, x2i]])
z[x1i, x2i] = self(point)
# PLOT DATA #################
fig = plt.figure()
#ax = axes3d.Axes3D(fig)
#ax.plot_wireframe(mesh_x1, mesh_x2, z)
ax = fig.gca(projection='3d')
ax.plot_surface(mesh_x1, mesh_x2, z, rstride=5, cstride=5, alpha=0.3)
cset = ax.contourf(mesh_x1, mesh_x2, z, zdir='z', offset=0, cmap=cm.coolwarm)
# TITLE AND LABELS
ax.set_title('Objective function\n$' + str(self) + '$', fontsize=20)
ax.set_xlabel(r'$x_1$', fontsize=32)
ax.set_ylabel(r'$x_2$', fontsize=32)
ax.set_zlabel(r'$f(x)$', fontsize=32)
# SHOW ############################
plt.show()
else:
pass
# TODO: doit être un objet qui permet de connaître:
# - le dommaine de définition de x
# - continu / discret ?
# - le nombre de dimensions de x
```
#### File: ailib/utils/plot.py
```python
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from mpl_toolkits.mplot3d import axes3d
###############################################################################
def plot_2d_contour_solution_space(func,
xmin=-np.ones(2),
xmax=np.ones(2),
xstar=None,
title="",
vmin=None,
vmax=None,
zlog=True,
output_file_name=None):
"""TODO
"""
fig, ax = plt.subplots(figsize=(12, 8))
x1_space = np.linspace(xmin[0], xmax[0], 200)
x2_space = np.linspace(xmin[1], xmax[1], 200)
x1_mesh, x2_mesh = np.meshgrid(x1_space, x2_space)
zz = func(np.array([x1_mesh.ravel(), x2_mesh.ravel()])).reshape(x1_mesh.shape)
############################
if xstar.ndim == 1:
min_value = func(xstar)
else:
min_value = min(func(xstar))
max_value = zz.max()
if vmin is None:
if zlog:
vmin = 0.1 # TODO
else:
vmin = min_value
if vmax is None:
vmax = max_value
if zlog:
norm = colors.LogNorm()
else:
norm = None
levels = np.logspace(0.1, 3., 5) # TODO
im = ax.pcolormesh(x1_mesh, x2_mesh, zz,
vmin=vmin,
vmax=vmax,
norm=norm,
shading='gouraud',
cmap='gnuplot2') # 'jet' # 'gnuplot2'
plt.colorbar(im, ax=ax)
cs = plt.contour(x1_mesh, x2_mesh, zz, levels,
linewidths=(2, 2, 2, 2, 3),
linestyles=('dotted', '-.', 'dashed', 'solid', 'solid'),
alpha=0.5,
colors='white')
ax.clabel(cs, inline=False, fontsize=12)
############################
if xstar is not None:
ax.scatter(xstar[0],
xstar[1],
c='red',
label="$x^*$")
ax.set_title(title)
ax.set_xlabel(r"$x_1$")
ax.set_ylabel(r"$x_2$")
ax.legend(fontsize=12)
if output_file_name is not None:
plt.savefig(output_file_name, transparent=True)
plt.show()
###############################################################################
def plot_2d_solution_space(func,
xmin=-np.ones(2),
xmax=np.ones(2),
xstar=None,
angle_view=None,
title="",
zlog=True,
output_file_name=None):
"""TODO
"""
fig = plt.figure(figsize=(12, 8))
ax = axes3d.Axes3D(fig)
if angle_view is not None:
ax.view_init(angle_view[0], angle_view[1])
x1_space = np.linspace(xmin[0], xmax[0], 100)
x2_space = np.linspace(xmin[1], xmax[1], 100)
x1_mesh, x2_mesh = np.meshgrid(x1_space, x2_space)
zz = func(np.array([x1_mesh.ravel(), x2_mesh.ravel()])).reshape(x1_mesh.shape) # TODO
############################
if zlog:
norm = colors.LogNorm()
else:
norm = None
surf = ax.plot_surface(x1_mesh,
x2_mesh,
zz,
cmap='gnuplot2', # 'jet' # 'gnuplot2'
norm=norm,
rstride=1,
cstride=1,
#color='b',
shade=False)
ax.set_zlabel(r"$f(x_1, x_2)$")
fig.colorbar(surf, shrink=0.5, aspect=5)
```
#### File: pyai/examples/mdp_framework_example.py
```python
"""
A PyAI (Markov Decision Processes framework) demo.
"""
from ailib.mdp.environment.maze import Environment
#from ailib.mdp.environment.graph import Environment
#from ailib.mdp.environment.maze import Agent
#from ailib.mdp.agent.brute_force import Agent
#from ailib.mdp.agent.value_iteration import Agent
#from ailib.mdp.agent.value_iteration_gauss_seidel import Agent
#from ailib.mdp.agent.value_iteration_error_rate import Agent
from ailib.mdp.agent.policy_iteration import Agent
#from ailib.mdp.agent.direct_utility_estimation import Agent
def main():
"""Main function"""
#initial_state = (0,0)
#environment = Environment(initial_state = initial_state)
#environment = Environment()
environment = Environment(discount_factor=0.999)
agent = Agent(environment)
environment.displayReward()
environment.displayPolicy(agent)
# Do the simulation
(state_list, action_list, reward_list) = environment.simulate(agent)
# Display states
print("States:", state_list)
print("Actions:", action_list)
print("Rewards:", reward_list)
for iteration, (state, action) in enumerate(zip(state_list, action_list)):
environment.displayStateAction(current_state=state, current_action=action, iteration=iteration)
print("{0}: {1} {2} {3}".format(iteration, state, action, state_list[iteration+1]))
environment.displayStateAction(current_state=state_list[-1], iteration=len(state_list)-1)
print("Global reward =", sum(reward_list))
if __name__ == '__main__':
main()
```
|
{
"source": "jeremiedecock/pyarm",
"score": 3
}
|
#### File: pyarm/agent/sigmoid.py
```python
import math
class Agent:
def __init__(self):
pass
def get_commands(self, angles, velocities, time):
return (0.,
0.,
sigmoid(time, 3.),
0.,
0.,
0.)
def sigmoid(t, offset):
return 1. / (1. + math.exp(-8. * (t - offset)))
```
#### File: pyarm/pyarm/clock.py
```python
__all__ = ['RealtimeClock',
'SimulationtimeClock']
import time
class RealtimeClock:
delta_time = None
time = None
_former_time = None
_init_time = None
def __init__(self):
self._init_time = time.time()
self._former_time = self._init_time
self.time = 0
def update(self):
"Update the clock (add elapsed time since the last call)"
current_time = time.time()
self.delta_time = current_time - self._former_time
self.time = current_time - self._init_time
self._former_time = current_time
class SimulationtimeClock:
delta_time = None
time = None
def __init__(self, delta_time):
self.delta_time = delta_time
self.time = 0
def update(self):
"Update the clock (add delta_time value)"
self.time += self.delta_time
```
#### File: model/muscle/fake_muscle_model.py
```python
import numpy as np
from pyarm import fig
class MuscleModel:
"Muscle model."
# CONSTANTS ###############################################################
name = 'Fake'
###########################################################################
def __init__(self):
# Init datas to plot
fig.subfig('command',
title='Command',
xlabel='time (s)',
ylabel='Command',
ylim=[-0.1, 1.1])
#legend=('shoulder +', 'shoulder -',
# 'elbow +', 'elbow -'))
def compute_torque(self, angles, velocities, command):
"Compute the torque"
torque = np.zeros(2)
if len(command) > 2:
torque[0] = (command[0] - command[1])
torque[1] = (command[2] - command[3])
fig.append('command', command[0:4])
else:
torque = np.array(command)
fig.append('command', command[0:2])
return torque
```
#### File: jeremiedecock/pyarm/tool-replay.py
```python
import sys
import os
import shutil
import getopt
from pyarm import fig
from pyarm import clock as clock_mod
## Lionel's old format
#COMMAND_SLICE = slice(8, 14)
#ANGLES_SLICE = slice(2, 4)
#VELOCITIES_SLICE = slice(0, 2)
# Lionel's new format
COMMAND_SLICE = slice(18, 24)
ANGLES_SLICE = slice(10, 12)
VELOCITIES_SLICE = slice(8, 10)
TARGETS_ANGLES_SLICE = slice(2, 4)
def usage():
"""Print help message"""
print '''Usage : ./pyarm -d DELTA_TIME [-m MUSCLE] [-a ARM] [-A AGENT] [-g GUI]
[-D GUI_DELTA_TIME] [-s] [-l] FILENAME
Replay a simulation from FILENAME (experimental).
-m, --muscle
the muscle model to use (kambara, mitrovic, li or none)
-a, --arm
the arm model to use (kambara, mitrovic, li or sagittal)
-g, --gui
the graphical user interface to use (tk, gtk, cairo)
-d, --deltatime
timestep value in second (should be near to 0.005 seconds)
-D, --guideltatime
set the interval between two display in milliseconds (default = 0.04)
-s, --screencast
make a screencast
-h, --help
display this help and exit
'''
def main():
"""The main function.
The purpose of this function is to get the list of modules to load and
launch the simulator."""
# Parse options ###########################################################
muscle = 'none'
arm = 'li'
gui = 'tk'
delta_time = None
gui_delta_time = 0.04
screencast = False
unbounded = False
log_file = None
try:
opts, args = getopt.getopt(sys.argv[1:],
'm:a:g:d:D:sh',
["muscle=", "arm=", "gui=", "deltatime=",
"guideltatime=", "screencast", "help"])
except getopt.GetoptError, err:
# will print something like "option -x not recognized"
print str(err)
usage()
sys.exit(1)
for o, a in opts:
if o in ("-h", "--help"):
usage()
sys.exit(0)
elif o in ("-m", "--muscle"):
muscle = a
elif o in ("-a", "--arm"):
arm = a
elif o in ("-g", "--gui"):
gui = a
elif o in ("-d", "--deltatime"):
delta_time = float(a)
elif o in ("-D", "--guideltatime"):
gui_delta_time = float(a)
elif o in ("-s", "--screencast"):
screencast = True
else:
assert False, "unhandled option"
if muscle not in ('none', 'kambara', 'mitrovic', 'li') \
or arm not in ('kambara', 'mitrovic', 'li', 'sagittal') \
or gui not in ('tk', 'gtk', 'cairo'):
usage()
sys.exit(2)
try:
log_file = args[0]
except IndexError: # TODO
usage()
exit(3)
# Init ####################################################################
# Erase the screencast directory
if screencast:
shutil.rmtree('screencast', True)
os.mkdir('screencast')
# Muscle module
if muscle == 'none':
from pyarm.model.muscle import fake_muscle_model as muscle_module
elif muscle == 'kambara':
from pyarm.model.muscle import kambara_muscle_model as muscle_module
elif muscle == 'mitrovic':
from pyarm.model.muscle import mitrovic_muscle_model as muscle_module
elif muscle == 'li':
from pyarm.model.muscle import weiwei_muscle_model as muscle_module
else:
usage()
sys.exit(2)
# Arm module
if arm == 'kambara':
from pyarm.model.arm import kambara_arm_model as arm_module
elif arm == 'mitrovic':
from pyarm.model.arm import mitrovic_arm_model as arm_module
elif arm == 'li':
from pyarm.model.arm import weiwei_arm_model as arm_module
elif arm == 'sagittal':
from pyarm.model.arm import sagittal_arm_model as arm_module
else:
usage()
sys.exit(2)
# GUI module
if gui == 'tk':
from pyarm.gui import tkinter_gui as gui_mod
elif gui == 'gtk':
from pyarm.gui import gtk_gui as gui_mod
elif gui == 'cairo':
raise NotImplementedError()
else:
usage()
sys.exit(2)
# Init instances
arm = arm_module.ArmModel(unbounded)
muscle = muscle_module.MuscleModel()
clock = None
if delta_time is None:
print "error : -d option isn't set"
sys.exit(1)
else:
clock = clock_mod.SimulationtimeClock(delta_time)
gui = gui_mod.GUI(muscle, arm, clock, screencast)
# Miscellaneous initialization
fig.CLOCK = clock
former_gui_time = 0
gui.shoulder_point = [70, 70]
gui.scale = 1200. # px/m (pixels per meter)
# The mainloop ############################################################
fd = file(log_file, 'rU')
line = fd.readline()
while gui.running and line != '': # TODO
if not line.lstrip().startswith('#'):
datas = [float(num) for num in line.split()]
# Update clock
clock.update()
# Get input signals
commands = datas[COMMAND_SLICE]
# Update angles (physics)
arm.angles = datas[ANGLES_SLICE]
arm.velocities = datas[VELOCITIES_SLICE]
torque = [0, 0]
acceleration = [0, 0]
# Update target
gui.target_angle = datas[TARGETS_ANGLES_SLICE]
# Update GUI
current_time = clock.time
if current_time - former_gui_time >= gui_delta_time:
gui.update(commands, torque, acceleration)
former_gui_time = current_time
line = fd.readline()
fd.close()
# Quit ####################################################################
if screencast:
print "Making screencast..."
cmd = "ffmpeg2theora -v 9 -f image2 %(path)s/%%05d.%(format)s -o %(path)s/screencast.ogv" % {'path': gui.screencast_path, 'format': gui.screenshot_format}
print cmd
os.system(cmd)
if __name__ == '__main__':
main()
```
|
{
"source": "jeremiedecock/pyca",
"score": 2
}
|
#### File: pyca/tests/test_rules.py
```python
from ca.rules.conway import Rules
from ca.grid.grid2d import Grid
import unittest
class RulesPacket(unittest.TestCase):
"""
Contains unit tests for the "rules" module.
"""
# Tests for the next_state function #######################################
def test_next_state_func(self):
"""Check that the next_state function returns the expected result."""
rules = Rules() # TODO mone in the preprocessing function
state = Grid(grid=[[0, 1, 0], [0, 1, 0], [0, 1, 0]]) # TODO mone in the preprocessing function
next_state = rules.next_state(state)
expected_next_state = Grid(grid=[[0, 0, 0], [1, 1, 1], [0, 0, 0]])
self.assertEqual(next_state, expected_next_state)
if __name__ == '__main__':
unittest.main()
```
|
{
"source": "jeremiedecock/pywi-cta",
"score": 2
}
|
#### File: benchmark/tests/test_assess.py
```python
import numpy as np
from pywicta.benchmark import assess
from pywicta.io import geometry_converter
###############################################################################
def test_all_zeros():
"""Test all metrics with an input, output and reference image full of zeros."""
cam_id = "LSTCam"
geom1d = geometry_converter.get_geom1d(cam_id)
shape = geom1d.pix_x.shape
dtype = "float"
inp_img = np.zeros(shape=shape, dtype=dtype)
out_img = np.zeros(shape=shape, dtype=dtype)
ref_img = np.zeros(shape=shape, dtype=dtype)
inp_img_2d = geometry_converter.image_1d_to_2d(inp_img, cam_id)
out_img_2d = geometry_converter.image_1d_to_2d(out_img, cam_id)
ref_img_2d = geometry_converter.image_1d_to_2d(ref_img, cam_id)
score_tuple, metrics_name_tuple = assess.assess_image_cleaning(inp_img_2d,
out_img_2d,
ref_img_2d,
benchmark_method="all",
geom=geom1d)
assert len(score_tuple) == len(metrics_name_tuple)
assert len(score_tuple) > 1
def test_metric_roc():
ref = np.array([[0, 1, 0],
[2, 3, 2],
[0, 1, 0]])
out = np.array([[0., 3., 0.],
[2., 0., 3.],
[1., 1., 2.]])
res = assess.metric_roc(None, out, ref)
assert res.roc_true_positives == 4 \
and res.roc_false_positives == 2 \
and res.roc_true_negatives == 2 \
and res.roc_false_negatives == 1
def test_metric_roc_with_nan_values():
ref = np.array([[0, 1, 0],
[2, 3, 2],
[0, 1, np.nan]])
out = np.array([[0., 3., np.nan],
[2., 0., np.nan],
[1., 1., np.nan]])
res = assess.metric_roc(None, out, ref)
# NaN is count as True
assert res.roc_true_positives == 5 \
and res.roc_false_positives == 2 \
and res.roc_true_negatives == 1 \
and res.roc_false_negatives == 1
```
#### File: pywicta/data/__init__.py
```python
import os
# Inspired by https://github.com/scikit-image/scikit-image/blob/master/skimage/data/__init__.py
data_dir = os.path.abspath(os.path.dirname(__file__))
__all__ = ['lst']
def fits_gen(instrument_str, particle=None):
FILE_EXT = (".fits", ".fit")
particle = "" if particle is None else particle
for file_name in os.listdir(data_dir):
file_path = os.path.join(data_dir, file_name)
name = file_name.lower()
start_str = instrument_str + "_" + particle
if os.path.isfile(file_path) and name.endswith(FILE_EXT) and name.startswith(start_str):
yield file_path
def lst(ids=None, particle="gamma"):
"""A tuple of FITS files paths containing simulated LST images.
Often used for tutorials and examples.
Parameters
----------
ids : a tuple of files name or None
The selection of FITS files to return.
Returns the path of all LST images if `ids` is set to `None` (default behavior).
Returns
-------
str or list of str
The path of the selected FITS files.
"""
if ids is None:
return list(fits_gen("lst", particle=particle))
else:
path_list = []
for file_path in fits_gen("lst", particle=particle):
if os.path.splitext(os.path.basename(file_path))[0] in ids:
path_list.append(file_path)
return path_list
```
#### File: pywicta/denoising/wavelets_mrtransform.py
```python
__all__ = ['WaveletTransform']
"""Wavelet Transform image cleaning.
This script use mr_transform -- a program written CEA/CosmoStat
(www.cosmostat.org) -- to make Wavelet Transform.
Usage
-----
wavelets_mrtransform.py [-h] [--type-of-filtering STRING]
[--filter-thresholds FLOAT LIST]
[--last-scale STRING]
[--detect-only-positive-structures]
[--kill-isolated-pixels]
[--noise-cdf-file FILE] [--tmp-dir DIRECTORY]
[--verbose] [--debug] [--max-images INTEGER]
[--telid INTEGER] [--eventid INTEGER]
[--camid STRING] [--benchmark STRING]
[--label STRING] [--plot] [--saveplot FILE]
[--output FILE]
FILE [FILE ...]
Denoise FITS images with Wavelet Transform.
positional arguments:
FILE The files image to process (FITS).If fileargs is a
directory,all FITS files it contains are processed.
optional arguments:
-h, --help show this help message and exit
--type-of-filtering STRING, -f STRING
Type of filtering: hard_filtering,
ksigma_hard_filtering
--filter-thresholds FLOAT LIST, -t FLOAT LIST
Thresholds used for the plane filtering.
--last-scale STRING, -L STRING
Last plane treatment: keep, drop, mask
--detect-only-positive-structures, -p
Detect only positive structure
--kill-isolated-pixels
Suppress isolated pixels in the support (scipy
implementation)
--noise-cdf-file FILE
The JSON file containing the Cumulated Distribution
Function of the noise model used to inject artificial
noise in blank pixels (those with a NaN value).
Default=None.
--tmp-dir DIRECTORY The directory where temporary files are written.
--verbose, -v Verbose mode
--debug Debug mode
--max-images INTEGER The maximum number of images to process
--telid INTEGER Only process images from the specified telescope
--eventid INTEGER Only process images from the specified event
--camid STRING Only process images from the specified camera
--benchmark STRING, -b STRING
The benchmark method to use to assess the algorithm
for thegiven images
--label STRING, -l STRING
The label attached to the produced results
--plot Plot images
--saveplot FILE The output file where to save plotted images
--output FILE, -o FILE
The output file path (JSON)
Examples
--------
./wavelets_mrtransform.py -h
./wavelets_mrtransform.py ./test.fits
ipython3 -- ./wavelets_mrtransform.py -t 21.5,11.7 ./test.fits
Notes
-----
This script requires the mr_transform program
(http://www.cosmostat.org/software/isap/).
It also requires Numpy and Matplotlib Python libraries.
"""
import argparse
import os
import time
import shutil
from pywicta.denoising.abstract_cleaning_algorithm import AbstractCleaningAlgorithm
from pywicta.denoising.inverse_transform_sampling import EmpiricalDistribution
from pywicta.io import images
from pywi.processing.filtering import hard_filter
from pywi.processing.filtering.hard_filter import filter_planes
from pywi.processing.filtering.pixel_clusters import filter_pixels_clusters as scipy_kill_isolated_pixels
from pywi.processing.filtering.pixel_clusters import filter_pixels_clusters_stats
from pywi.processing.filtering.pixel_clusters import number_of_pixels_clusters
from pywi.processing.transform import mrtransform_wrapper
from pywi.processing.transform.mrtransform_wrapper import inverse_wavelet_transform
from pywi.processing.transform.mrtransform_wrapper import wavelet_transform
from pywi.ui.argparse_commons import add_common_arguments
from pywi.ui.filter_with_mrtransform import add_arguments
# CONSTANTS ##################################################################
DEBUG = False
##############################################################################
# TODO: remove this redundant class (already defined in pywi.processing.compositing)
class WaveletTransform(AbstractCleaningAlgorithm):
"""The wavelet transform wrapper for ctapipe."""
def __init__(self):
super().__init__()
self.label = "WT (mr_transform)" # Name to show in plots
def clean_image(self,
input_image,
type_of_filtering=hard_filter.DEFAULT_TYPE_OF_FILTERING,
filter_thresholds=hard_filter.DEFAULT_FILTER_THRESHOLDS,
last_scale_treatment=mrtransform_wrapper.DEFAULT_LAST_SCALE_TREATMENT,
detect_only_positive_structures=False,
kill_isolated_pixels=False,
noise_distribution=None,
tmp_files_directory=".",
output_data_dict=None,
clusters_threshold=0,
**kwargs):
"""Clean the `input_image` image.
Apply the wavelet transform, filter planes and return the reverse
transformed image.
Parameters
----------
input_image : array_like
The image to clean.
type_of_filtering : str
Type of filtering: 'hard_filtering' or 'ksigma_hard_filtering'.
filter_thresholds : list of float
Thresholds used for the plane filtering.
last_scale_treatment : str
Last plane treatment: 'keep', 'drop' or 'mask'.
detect_only_positive_structures : bool
Detect only positive structures.
kill_isolated_pixels : bool
Suppress isolated pixels in the support.
noise_distribution : bool
The JSON file containing the Cumulated Distribution Function of the
noise model used to inject artificial noise in blank pixels (those
with a NaN value).
tmp_files_directory : str
The path of the directory where temporary files are written.
output_data_dict : dict
A dictionary used to return results and intermediate results.
Returns
-------
Return the cleaned image.
"""
if DEBUG:
print("Filter thresholds:", filter_thresholds)
number_of_scales = len(filter_thresholds) + 1
if not (1 < number_of_scales <= 10):
# This range ]1,10] is a hard constraint from mr_transform
raise ValueError("bad number of scales: {}. Should be 1 < Nbr Scales <= 10. Check that filter_thresholds is a list of number and not a string.".format(number_of_scales))
if DEBUG:
print("Number of scales:", number_of_scales)
# COMPUTE THE WAVELET TRANSFORM #######################################
wavelet_planes = wavelet_transform(input_image,
number_of_scales=number_of_scales,
tmp_files_directory=tmp_files_directory,
noise_distribution=noise_distribution)
if DEBUG:
for index, plane in enumerate(wavelet_planes):
images.plot(plane, "Plane " + str(index))
# FILTER WAVELET PLANES ###############################################
filtered_wavelet_planes = filter_planes(wavelet_planes,
method=type_of_filtering,
thresholds=filter_thresholds,
detect_only_positive_structures=detect_only_positive_structures)
#if DEBUG:
# for index, plane in enumerate(filtered_wavelet_planes):
# images.plot(plane, "Filtered plane " + str(index))
# COMPUTE THE INVERSE TRANSFORM #######################################
cleaned_image = inverse_wavelet_transform(filtered_wavelet_planes,
last_plane=last_scale_treatment)
if DEBUG:
images.plot(cleaned_image, "Cleaned image")
# KILL ISOLATED PIXELS ################################################
kill_islands = filter_pixels_clusters_stats(cleaned_image)
img_cleaned_islands_delta_pe, img_cleaned_islands_delta_abs_pe, img_cleaned_islands_delta_num_pixels = kill_islands
img_cleaned_num_islands = number_of_pixels_clusters(cleaned_image)
if output_data_dict is not None:
output_data_dict["img_cleaned_islands_delta_pe"] = img_cleaned_islands_delta_pe
output_data_dict["img_cleaned_islands_delta_abs_pe"] = img_cleaned_islands_delta_abs_pe
output_data_dict["img_cleaned_islands_delta_num_pixels"] = img_cleaned_islands_delta_num_pixels
output_data_dict["img_cleaned_num_islands"] = img_cleaned_num_islands
if kill_isolated_pixels:
cleaned_image = scipy_kill_isolated_pixels(cleaned_image, threshold=clusters_threshold)
if DEBUG:
images.plot(cleaned_image, "Cleaned image after island kill")
return cleaned_image
def main():
"""The main module execution function.
Contains the instructions executed when the module is not imported but
directly called from the system command line.
"""
# PARSE OPTIONS ###########################################################
parser = argparse.ArgumentParser(description="Denoise FITS images with Wavelet Transform.")
parser = add_arguments(parser)
parser = add_common_arguments(parser, nargs="+")
# COMMON OPTIONS
CAM_IDS = ("ASTRICam", "CHEC", "DigiCam", "FlashCam", "NectarCam", "LSTCam")
parser.add_argument("--cluster-threshold", type=float, metavar="FLOAT",
help="The threshold for the pixels clusters filtering")
parser.add_argument("--max-images", type=int, metavar="INTEGER",
help="The maximum number of images to process")
parser.add_argument("--telid", type=int, metavar="INTEGER",
help="Only process images from the specified telescope")
parser.add_argument("--eventid", type=int, metavar="INTEGER",
help="Only process images from the specified event")
parser.add_argument("--camid", metavar="STRING",
help="Only process images from the specified camera: {}".format(str(CAM_IDS)))
parser.add_argument("--benchmark", "-b", metavar="STRING",
help="The benchmark method to use to assess the algorithm for the"
"given images")
parser.add_argument("--label", "-l", default=None,
metavar="STRING",
help="The label attached to the produced results")
parser.add_argument("--output", "-o", default=None,
metavar="FILE",
help="The output file path (JSON)")
args = parser.parse_args()
type_of_filtering = args.type_of_filtering
filter_thresholds_str = args.filter_thresholds
last_scale_treatment = args.last_scale
detect_only_positive_structures = args.detect_only_positive_structures
kill_isolated_pixels = args.kill_isolated_pixels
noise_cdf_file = args.noise_cdf_file
tmp_dir = args.tmp_dir
verbose = args.verbose
debug = args.debug
cluster_threshold = args.cluster_threshold # TODO: move this argument in PyWI
max_images = args.max_images
tel_id = args.telid
event_id = args.eventid
cam_id = args.camid
benchmark_method = args.benchmark
label = args.label
plot = args.plot
saveplot = args.saveplot
input_file_or_dir_path_list = args.fileargs
# CHECK OPTIONS #############################
if type_of_filtering not in hard_filter.AVAILABLE_TYPE_OF_FILTERING:
raise ValueError('Unknown type of filterning: "{}". Should be in {}'.format(type_of_filtering,
hard_filter.AVAILABLE_TYPE_OF_FILTERING))
try:
filter_thresholds = [float(threshold_str) for threshold_str in filter_thresholds_str.split(",")]
except:
raise ValueError('Wrong filter thresholds: "{}". Should be in a list of figures separated by a comma (e.g. "3,2,3")'.format(filter_thresholds_str))
if last_scale_treatment not in mrtransform_wrapper.AVAILABLE_LAST_SCALE_OPTIONS:
raise ValueError('Unknown type of last scale treatment: "{}". Should be in {}'.format(last_scale_treatment ,
mrtransform_wrapper.AVAILABLE_LAST_SCALE_OPTIONS))
# TODO: check the noise_cdf_file value
# TODO: check the tmp_dir value
#############################################
if args.output is None:
output_file_path = "score_wavelets_benchmark_{}.json".format(benchmark_method)
else:
output_file_path = args.output
if noise_cdf_file is not None:
noise_distribution = EmpiricalDistribution(noise_cdf_file)
else:
noise_distribution = None
# Make the temp file directory
suffix = "{}_{}".format(os.getpid(), time.time())
tmp_files_directory = os.path.join(tmp_dir, suffix)
if os.path.exists(tmp_files_directory):
raise Exception("Cannot use {} as a directory for temporary files, the directory already exist.".format(tmp_files_directory))
else:
os.makedirs(tmp_files_directory)
while not os.path.exists(tmp_files_directory):
print('Waiting for the creation of', tmp_files_directory)
time.sleep(1)
cleaning_function_params = {
"type_of_filtering": type_of_filtering,
"filter_thresholds": filter_thresholds,
"last_scale_treatment": last_scale_treatment,
"detect_only_positive_structures": detect_only_positive_structures,
"kill_isolated_pixels": kill_isolated_pixels,
"noise_distribution": noise_distribution,
"tmp_files_directory": tmp_files_directory,
"verbose": verbose,
"cluster_threshold": cluster_threshold
}
cleaning_algorithm = WaveletTransform()
if verbose:
cleaning_algorithm.verbose = True
if label is not None:
cleaning_algorithm.label = label
output_dict = cleaning_algorithm.run(cleaning_function_params,
input_file_or_dir_path_list,
benchmark_method,
output_file_path,
plot=plot,
saveplot=saveplot,
max_num_img=max_images,
tel_id=tel_id,
event_id=event_id,
cam_id=cam_id,
debug=debug)
try:
# Remove the temp file directory
shutil.rmtree(tmp_files_directory)
except Exception as e:
print(e)
if __name__ == "__main__":
main()
```
|
{
"source": "jeremiedecock/snippets",
"score": 3
}
|
#### File: boost_python/reflect_virtual_function_and_extand_it_in_python/test.py
```python
import cppclasses
import sys
# Hello #############################
print
print '*** Instanciate "Hello" ***'
print
msg = cppclasses.Hello()
msg.message()
# HelloFr ###########################
print
print '*** Extend "Hello" from Python ***'
print
class HelloFr(cppclasses.Hello):
def message(self):
print "Bonjour", self.get_name(), "!"
msg = HelloFr()
msg.message()
print
#help(cppclasses)
```
#### File: snippets/gnuplot/MAKE_VIDEO.py
```python
import os
import sys
import math
"""
Crée une représentation animée de données avec gnuplot.
"""
def main():
outfile = 'video'
angle_x = 45
num_frames = 360
# Make video ####################################################
for frame_index in range(num_frames):
print('{:.2f}%'.format(float(frame_index) / float(num_frames) * 100.0))
# Write gnuplot file #########################################
with open('/tmp/tmp.gp', 'w') as fd:
print('set zeroaxis', file=fd)
print('set view {},{}'.format(angle_x, frame_index%360), file=fd)
print('set isosamples 100', file=fd)
print('set hidden3d', file=fd)
print("set term pngcairo size 1920,1080 enhanced font 'Verdana,10'", file=fd)
print('set output "/tmp/frame_{:04d}.png"'.format(frame_index), file=fd)
#print('splot [-pi:pi][-pi:pi] sin(x**2+y**2)/(x**2+y**2)', file=fd)
print('splot [-2*pi:2*pi][-2*pi:2*pi] sin((x+{0})**2+(y)**2)/((x+{0})**2+(y)**2)'.format(frame_index/100.), file=fd)
print('set output', file=fd)
print('set term X11', file=fd)
# Plot datas ################################################
os.system('gnuplot /tmp/tmp.gp')
os.remove('/tmp/tmp.gp')
# Write video file ##############################################
# Using FFMPEG
#os.system('ffmpeg2theora -f image2 /tmp/frame_%04d.png -o {}.ogv'.format(outfile))
# Using AVCONV (apt-get install libav-tools)
os.system('avconv -f image2 -i /tmp/frame_%04d.png {}.mp4'.format(outfile))
if __name__ == "__main__":
main()
```
#### File: python/curses/hello_unsafe_manual_init.py
```python
import curses
def main():
"""Main function"""
# INIT ################################################
# Determine the terminal type, send any required setup codes to the
# terminal, and create various internal data structures.
# This returns a window object representing the entire screen.
stdscr = curses.initscr()
# Turn off automatic echoing of keys to the screen.
curses.noecho()
# React to keys instantly, without requiring the Enter key to be pressed.
curses.cbreak()
# Terminals usually return special keys, such as the cursor keys or
# navigation keys such as Page Up and Home, as a multibyte escape sequence.
# While you could write your application to expect such sequences and
# process them accordingly, curses can do it for you, returning a special
# value such as curses.KEY_LEFT. To get curses to do the job, you’ll have
# to enable keypad mode.
stdscr.keypad(True)
# APP'S CODE ##########################################
# Clear screen
stdscr.clear()
# Print a message
stdscr.addstr('Hello, press any key to quit.')
# Display the message
stdscr.refresh()
# Wait for a key
stdscr.getkey()
# QUIT ################################################
# Reverse the curses-friendly terminal settings.
curses.nocbreak()
stdscr.keypad(False)
curses.echo()
# Restore the terminal to its original operating mode
curses.endwin()
if __name__ == '__main__':
main()
```
#### File: python/curses/print_char.py
```python
import curses
def main(stdscr):
"""Main function"""
# Let the cursor be invisible
curses.curs_set(False)
# Clear screen
stdscr.clear()
# Print something...
for x in range(curses.COLS-1):
for y in range(curses.LINES-1):
c = '+' if (x+y)%2 == 0 else 'x'
# Print a single character
stdscr.addch(y, x, c)
# Display the message
stdscr.refresh()
# Wait for a key press
stdscr.getkey()
if __name__ == '__main__':
# A common problem when debugging a curses application is to get your
# terminal messed up when the application dies without restoring the
# terminal to its previous state. In Python this commonly happens when your
# code is buggy and raises an uncaught exception. Keys are no longer echoed
# to the screen when you type them, for example, which makes using the
# shell difficult.
# Use curses.wrapper() to avoid these difficulties. The callable is called
# inside a try...except that catches exceptions, restores the state of the
# terminal, and then re-raises the exception. Therefore your terminal won't
# be left in a funny state on exception and you'll be able to read the
# exception's message and traceback.
# This wrapper also initializes curses at the beginning of the callable
# object given in argument and restore the original state of the terminal
# when the end.
curses.wrapper(main)
```
#### File: distutils/example_without_dependency/setup.py
```python
from jdhp_distutils_demo import __version__ as VERSION
from distutils.core import setup
# See : http://pypi.python.org/pypi?%3Aaction=list_classifiers
CLASSIFIERS = ['Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Topic :: Software Development :: Libraries']
# You can either specify manually the list of packages to include in the
# distribution or use "setuptools.find_packages()" to include them
# automatically with a recursive search (from the root directory of the
# project).
#PACKAGES = find_packages()
PACKAGES = ['jdhp_distutils_demo']
SCRIPTS = ['scripts/distutils-demo-nox', 'scripts/distutils-demo']
README_FILE = 'README.rst'
def get_long_description():
with open(README_FILE, 'r') as fd:
desc = fd.read()
return desc
setup(author='<NAME>',
author_email='<EMAIL>',
maintainer='<NAME>',
maintainer_email='<EMAIL>',
name='jdhp-distutils-demo',
description='A snippet to test distutils and PyPI',
long_description=get_long_description(),
url='http://www.jdhp.org/',
download_url='http://www.jdhp.org/',# where the package may be downloaded
scripts=SCRIPTS,
classifiers=CLASSIFIERS,
#license='MIT license', # Useless if license is already in CLASSIFIERS
packages=PACKAGES,
version=VERSION)
```
#### File: python/element_tree/parse_xml_from_string.py
```python
import xml.etree.ElementTree as et
DATA = """<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<library>
<book date="1998-03-01" isbn="0262193981">
<title><![CDATA[Reinforcement Learning: An Introduction]]></title>
<author><NAME></author>
<author><NAME></author>
<tag>Computer Science</tag>
<tag>Artificial Intelligence</tag>
<tag>Reinforcement Learning</tag>
</book>
<book date="2009-12-11" isbn="0136042594">
<title><![CDATA[Artificial Intelligence: A Modern Approach]]></title>
<author><NAME></author>
<author><NAME></author>
<tag>Computer Science</tag>
<tag>Artificial Intelligence</tag>
</book>
</library>
"""
# Recursive (top-down) Depth-First Search tree traversal
def walk(element):
print(element.tag, element.attrib, element.text.strip())
for child in element:
walk(child)
def main():
root = et.fromstring(DATA)
walk(root)
if __name__ == '__main__':
main()
```
#### File: flask/static_files/hello.py
```python
from flask import Flask, url_for
app = Flask(__name__)
@app.route("/")
def hello_world():
return '<a href="{}">foo<a>'.format(url_for('static', filename='foo.html'))
```
#### File: snippets/python/flock_single_app_instance.py
```python
"""
This module show how to ensure a single instance of an application in Linux.
See http://stackoverflow.com/questions/220525/ensure-a-single-instance-of-an-application-in-linux
https://docs.python.org/3.5/library/fcntl.html
"""
import fcntl
import time
import sys
LOCK_FILENAME = ".lock"
def main():
print("Acquire an exclusive lock on ", LOCK_FILENAME)
fd = open(LOCK_FILENAME, "w")
try:
# LOCK_EX = acquire an exclusive lock on fd
# LOCK_NB = make a nonblocking request
fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
time.sleep(10)
# LOCK_UN = unlock fd
fcntl.flock(fd, fcntl.LOCK_UN)
print("Unlock ", LOCK_FILENAME)
except IOError:
print(LOCK_FILENAME + " is locked ; another instance is running. Exit.")
sys.exit(1)
if __name__ == '__main__':
main()
```
#### File: gegl/pygobject_introspection/test.py
```python
import argparse
from gi.repository import Gegl as gegl
def main():
# Parse options
parser = argparse.ArgumentParser(description='An argparse snippet.')
parser.add_argument("--infile", "-i", help="the input file", required=True, metavar="STRING")
parser.add_argument("--outfile", "-o", help="the output file", required=True, metavar="STRING")
args = parser.parse_args()
infile = args.infile
outfile = args.outfile
# GEGL ######################################
gegl.init([])
#print(gegl.list_operations())
# Make nodes
node1 = gegl.Node()
node2 = gegl.Node() # png-load
node3 = gegl.Node() # invert
node4 = gegl.Node() # png-save
# Set properties
node2.set_property("operation", "gegl:png-load")
node2.set_property("path", infile)
node3.set_property("operation", "gegl:invert")
node4.set_property("operation", "gegl:png-save")
node4.set_property("path", outfile)
# Make the graph
node1.add_child(node2)
node1.add_child(node3)
node1.add_child(node4)
node2.connect_to("output", node3, "input")
node3.connect_to("output", node4, "input")
# Process
node4.process()
if __name__ == '__main__':
main()
```
#### File: python/gitlab_api/get_project_list.py
```python
import requests
import json
def get_request(get_url):
resp = requests.get(get_url, headers=HEADER_DICT)
json_list = json.loads(resp.text)
if resp.status_code != 200:
raise Exception("Error:" + resp.text)
return json_list, resp
with open("GITLAB_SECRET_TOKEN", "r") as fd:
GITLAB_TOKEN = fd.read().strip()
with open("GITLAB_HOST", "r") as fd:
GITLAB_HOST = fd.read().strip()
GET_URL = GITLAB_HOST + "/api/v4/projects?pagination=keyset&per_page=50&order_by=id&sort=asc"
HEADER_DICT = {"PRIVATE-TOKEN": GITLAB_TOKEN}
project_list = []
json_list, resp = get_request(GET_URL)
while "Link" in resp.headers:
print(".", end="", flush=True)
next_page = resp.headers["Link"][1:].split(">")[0]
project_list.extend(json_list)
json_list, resp = get_request(next_page)
print(project_list[0].keys())
for project_dict in project_list:
print("{:3d}. {}".format(project_dict["id"], project_dict["path_with_namespace"]))
```
#### File: python/gitlab_api/init_issues_db.py
```python
import requests
import json
import sqlite3
import datetime
TABLE_NAME = "issues"
with open("GITLAB_SECRET_TOKEN", "r") as fd:
GITLAB_TOKEN = fd.read().strip()
with open("GITLAB_HOST", "r") as fd:
GITLAB_HOST = fd.read().strip()
HEADER_DICT = {"PRIVATE-TOKEN": GITLAB_TOKEN}
def str_to_datetime(datetime_str):
"""e.g. : 2021-11-16T16:07:05.688Z -> 2021-11-16T16:07:05.688+00:00"""
return datetime.datetime.fromisoformat(datetime_str.replace("Z", "+00:00"))
def get_request(get_url):
resp = requests.get(get_url, headers=HEADER_DICT)
json_list = json.loads(resp.text)
if resp.status_code != 200:
raise Exception("Error:" + resp.text)
return json_list, resp
def fetch_issues(update_after=None):
issue_list = []
params_str = "updated_after={},".format(update_after) if update_after is not None else ""
json_list, resp = get_request(GITLAB_HOST + "/api/v4/issues?{}per_page=100&page=1&scope=all".format(params_str))
num_pages = int(resp.headers['X-Total-Pages'])
for page in range(2, num_pages+1):
print("page {}/{}".format(page, num_pages))
issue_list.extend(json_list)
next_page = GITLAB_HOST + "/api/v4/issues?{}per_page=100&page={}&scope=all".format(params_str, page)
json_list, resp = get_request(next_page)
return issue_list
def make_sqlite_database(issue_list):
con = sqlite3.connect("issues.sqlite")
cur = con.cursor()
# DELETE TABLE ##############
try:
cur.execute("DROP TABLE {}".format(TABLE_NAME))
except:
pass
# CREATE TABLE ##############
sql_query_str = """CREATE TABLE {} (
id INTEGER,
state TEXT,
title TEXT,
description TEXT,
labels TEXT,
created_at TEXT,
updated_at TEXT,
milestone_id INTEGER,
web_url TEXT,
project_id INTEGER,
iid INTEGER,
upload_required INTEGER,
PRIMARY KEY (id)
)""".format(TABLE_NAME)
cur.execute(sql_query_str)
# FETCH JSON DATA ###########
sql_insert_params = [
(
issue_dict["id"],
issue_dict["state"],
issue_dict["title"],
issue_dict["description"],
",".join(issue_dict["labels"]),
issue_dict["created_at"],
issue_dict["updated_at"],
#issue_dict["milestone"]["id"] if ("milestone" in issue_dict and "id" in issue_dict["milestone"]) else "",
issue_dict["milestone"]["id"] if (issue_dict["milestone"] is not None) else "",
issue_dict["web_url"],
issue_dict["project_id"],
issue_dict["iid"],
0,
) for issue_dict in issue_list
]
# INSERT SQL DATA ###########
question_marks = ", ".join(["?" for x in sql_insert_params[0]])
query_str = "INSERT INTO {} VALUES ({})".format(TABLE_NAME, question_marks)
cur.executemany(query_str, sql_insert_params)
con.commit()
con.close()
issue_list = fetch_issues()
make_sqlite_database(issue_list)
```
#### File: graph_and_tree/tree_structure/node.py
```python
"""
Provides Node classes for graph and tree traversal snippets.
"""
class Node:
"""
A basic node class for graph and tree traversal snippets.
Attributes:
_value: the value of the node.
_child_nodes: a list of node's children.
"""
def __init__(self, value, child_nodes=[]):
self._value = value
self._child_nodes = child_nodes
def getValue(self):
return self._value
def getChildNodes(self):
return self._child_nodes
class GraphvizNode:
"""A node class using Graphviz for display"""
node_list = []
iteration_counter = 0
id_counter = 0
def __init__(self, value, child_nodes=[]):
self._value = value
self._child_nodes = child_nodes
GraphvizNode.id_counter += 1
self._id_str = "node_{}".format(GraphvizNode.id_counter)
self.status = "not_visited"
GraphvizNode.node_list.append(self)
def getValue(self):
GraphvizNode.iteration_counter += 1
file_name = "{}.dot".format(GraphvizNode.iteration_counter)
self.writeGraphvizFile(file_name)
self.status = "visited"
return self._value
def getChildNodes(self):
return self._child_nodes
def writeGraphvizFile(self, file_name):
"""Write graphviz file"""
with open(file_name, 'w') as fd:
fd.write("digraph G {\n")
for node in GraphvizNode.node_list:
if node is self:
fd.write('\t{str_id}[label="{label}", color=red, style=filled, shape=circle];\n'.format(str_id=node._id_str, label=node._value))
elif node.status == "visited":
fd.write('\t{str_id}[label="{label}", color=lightgray, style=filled, shape=circle];\n'.format(str_id=node._id_str, label=node._value))
else:
fd.write('\t{str_id}[label="{label}", shape=circle];\n'.format(str_id=node._id_str, label=node._value))
for node in GraphvizNode.node_list:
if len(node._child_nodes) > 0:
children_str = ", ".join([child_node._id_str for child_node in node._child_nodes])
fd.write("\t{} -> {};\n".format(node._id_str, children_str))
fd.write("}")
```
#### File: python/numpy/savetxt_with_header.py
```python
import numpy as np
def save_np_array(output_file_path, data_array, header_list):
np.savetxt(output_file_path,
data_array,
#fmt="%10.5f",
#delimiter=" ",
header="; ".join(header_list),
#comments="# " # String that will be prepended to the ``header`` and ``footer`` strings, to mark them as comments. Default: '# '.
)
data = np.random.rand(10, 4)
header = ["rand 1", "rand 2", "rand 3", "rand 4"]
save_np_array("test.dat", data, header)
```
#### File: opencv_2/images/write_image.py
```python
from __future__ import print_function
import cv2 as cv
import argparse
def main():
# Parse the programm options (get the path of the image files to read and write)
parser = argparse.ArgumentParser(description='An opencv snippet.')
parser.add_argument("--infile", "-i", help="The picture file to read", required=True, metavar="FILE")
parser.add_argument("--outfile", "-o", help="The picture file to write", required=True, metavar="FILE")
args = parser.parse_args()
infile_str = args.infile
outfile_str = args.outfile
# OpenCV
# imread_flags is a flag which specifies the way image should be read:
# - cv.IMREAD_COLOR loads a color image. Any transparency of image will be neglected. It is the default flag.
# - cv.IMREAD_GRAYSCALE loads image in grayscale mode
# - cv.IMREAD_UNCHANGED loads image as such including alpha channel
imread_flags = cv.IMREAD_GRAYSCALE
img_np_array = cv.imread(infile_str, imread_flags) # Read the image
cv.imwrite(outfile_str, img_np_array) # Write the image
if __name__ == '__main__':
main()
```
#### File: opencv_2/videos/get_capture_properties.py
```python
from __future__ import print_function
import cv2 as cv
import argparse
def main():
# Parse the programm options (get the path of the image file to read) #####
parser = argparse.ArgumentParser(description='An opencv snippet.')
parser.add_argument("--cameraid", "-i", help="The camera ID number (default: 0)", type=int, default=0, metavar="INTEGER")
args = parser.parse_args()
device_number = args.cameraid
# OpenCV ##################################################################
video_capture = cv.VideoCapture(device_number)
# Get properties ######################################
# See http://docs.opencv.org/modules/highgui/doc/reading_and_writing_images_and_video.html#videocapture-get
# Resolution of the video stream
width = video_capture.get(cv.cv.CV_CAP_PROP_FRAME_WIDTH)
height = video_capture.get(cv.cv.CV_CAP_PROP_FRAME_HEIGHT)
print("Frame resolution:", width, "x", height)
# Frame rate
fps = video_capture.get(cv.cv.CV_CAP_PROP_FPS)
print("Frame rate:", fps)
# 4-character code of codec
codec = video_capture.get(cv.cv.CV_CAP_PROP_FOURCC)
print("Codec:", codec)
# Brightness of the image (only for cameras)
brightness = video_capture.get(cv.cv.CV_CAP_PROP_BRIGHTNESS)
print("Brightness:", brightness)
# Contrast of the image (only for cameras)
contrast = video_capture.get(cv.cv.CV_CAP_PROP_CONTRAST)
print("Contrast:", contrast)
# Saturation of the image (only for cameras)
saturation = video_capture.get(cv.cv.CV_CAP_PROP_SATURATION)
print("Saturation:", saturation)
# HUE of the image (only for cameras)
hue = video_capture.get(cv.cv.CV_CAP_PROP_HUE)
print("HUE:", hue)
# Gain of the image (only for cameras)
gain = video_capture.get(cv.cv.CV_CAP_PROP_GAIN)
print("Gain:", gain)
# Exposure of the image (only for cameras)
exposure = video_capture.get(cv.cv.CV_CAP_PROP_EXPOSURE)
print("Exposure:", exposure)
#######################################################
print()
print("Press q to quit.")
while(True):
# Capture frame-by-frame.
# 'ret' is a boolean ('True' if frame is read correctly, 'False' otherwise).
# 'img_np' is an numpy array.
ret, img_np = video_capture.read()
# Display the resulting frame
cv.imshow('video capture snippet', img_np)
if cv.waitKey(1) & 0xFF == ord('q'):
break
video_capture.release()
cv.destroyAllWindows()
if __name__ == '__main__':
main()
```
#### File: python/os/walk.py
```python
import argparse
import os
import sys
DESCRIPTION = "Walk through a directory tree."
EPILOG = "Please report bugs to <<EMAIL>>."
VERSION = "1.0"
def main():
"""Main function"""
# PARSE OPTIONS ###########################################################
parser = argparse.ArgumentParser(description=DESCRIPTION, epilog=EPILOG)
parser.add_argument("root_paths", nargs='+', metavar="DIRECTORY", help="directory to explore")
args = parser.parse_args()
for path in args.root_paths:
if not os.path.isdir(path):
parser.error("{0} is not a directory.".format(path))
# WALK THROUGH THE TREES ##################################################
# For each root path specified in command line argmuents
for path in args.root_paths:
# Walk through 'path':
# root = a string, the path to the directory.
# dirs = a list of the names (strings) of the subdirectories in
# dirpath (excluding '.' and '..').
# files = a list of the names (strings) of the non-directory files
# in dirpath.
for root, dirs, files in os.walk(path, topdown=False):
print root
# Print all directories in the 'root' directory
for dir_str in dirs:
print " " * 3, "[dir]", os.path.join(root, dir_str)
# Print all files in the 'root' directory
for file_str in files:
print " " * 3, "[file]", os.path.join(root, file_str)
if __name__ == '__main__':
main()
```
#### File: python/pillow/create_and_save_greyscale_numpy.py
```python
import PIL.Image as pil_img # PIL.Image is a module not a class...
import numpy as np
SIZE_X = 320
SIZE_Y = 200
def normalize(array):
"""Normalize the values of a Numpy array in the range [0,1].
Parameters
----------
array : array like
The array to normalize
Returns
-------
ndarray
The normalized array
"""
min_value = array.min()
max_value = array.max()
size = max_value - min_value
if size > 0:
array = array.astype('float64', copy=True)
norm_array = (array - min_value)/size
else:
norm_array = array
return norm_array
def main():
"""Main function"""
# Make the data
image_array = np.random.normal(size=(SIZE_Y, SIZE_X))
# Make the data (pixels value in [0;255])
image_array = normalize(image_array) * 255.
image_array = image_array.astype('uint8', copy=True)
print(image_array)
# Make the image
mode = "L" # Grayscale
size_y, size_x = image_array.shape
image_pil = pil_img.new(mode, (size_x, size_y))
# WARNING: nested list and 2D numpy arrays are silently rejected!!!
# data *must* be a list or a 1D numpy array!
image_pil.putdata(image_array.flatten())
# Save the image
image_pil.save("create_and_save_greyscale_numpy.png")
if __name__ == '__main__':
main()
```
#### File: python/pillow/create_and_save_greyscale.py
```python
import PIL.Image as pil_img # PIL.Image is a module not a class...
SIZE_X = 320
SIZE_Y = 200
def main():
"""Main function"""
# Make the image
mode = "L" # Grayscale
size = (SIZE_X, SIZE_Y)
img = pil_img.new(mode, size)
# Make the data (pixels value in [0;255])
# WARNING: nested list and 2D numpy arrays are silently rejected!!!
# data *must* be a list or a 1D numpy array!
data = [(x+y)/(size[0]+size[1])*255 for y in range(size[1]) for x in range(size[0])]
img.putdata(data)
# Save the image
img.save("create_and_save_greyscale.png")
if __name__ == '__main__':
main()
```
#### File: pygtk/python_gtk3_pygobject/combobox.py
```python
from gi.repository import Gtk as gtk
COMBOBOX_TEXT_LIST = ["Hello World!", "Hi!", "Goodbye."]
def print_text(widget, data):
"""
Print the content of the ComboBoxText widget.
This is an usage example fo gtk.ComboBoxText.get_active_text().
"""
combobox = data
print(combobox.get_active_text()) # data is a gtk.ComboBoxText widget
def reset_selection(widget, data):
"""
Clear the content of the ComboBoxText widget.
This is an usage example fo gtk.ComboBoxText.set_active().
"""
combobox = data
combobox.set_active(-1) # -1 = no active item selected ; data is a gtk.ComboBoxText widget
def main():
window = gtk.Window()
vertical_box = gtk.Box(orientation = gtk.Orientation.VERTICAL, spacing=6) # 6 pixels are placed between children
window.add(vertical_box)
# Label and Combobox ##############
horizontal_box1 = gtk.Box(orientation = gtk.Orientation.HORIZONTAL, spacing=6) # 6 pixels are placed between children
label = gtk.Label(label="Text to print:")
label.set_alignment(0, 0.5) # Align left
horizontal_box1.pack_start(label, expand=True, fill=True, padding=0)
combobox = gtk.ComboBoxText()
combobox.set_entry_text_column(0) # sets the model column which ComboBox should use to get strings from to be text_column
for text in COMBOBOX_TEXT_LIST:
combobox.append_text(text) # fill the combobox
combobox.set_active(0) # 0 = select the first item
horizontal_box1.pack_start(combobox, expand=True, fill=True, padding=0)
vertical_box.pack_start(horizontal_box1, expand=True, fill=True, padding=0)
# Buttons #########################
horizontal_box2 = gtk.Box(orientation = gtk.Orientation.HORIZONTAL, spacing=6) # 6 pixels are placed between children
# Print button
button1 = gtk.Button(label="Print")
button1.connect("clicked", print_text, combobox) # connect("event", callback, data)
horizontal_box2.pack_start(button1, expand=True, fill=True, padding=0)
# Clean button
button2 = gtk.Button(label="Reset")
button2.connect("clicked", reset_selection, combobox) # connect("event", callback, data)
horizontal_box2.pack_start(button2, expand=True, fill=True, padding=0)
vertical_box.pack_start(horizontal_box2, expand=True, fill=True, padding=0)
###
window.connect("delete-event", gtk.main_quit) # ask to quit the application when the close button is clicked
window.show_all() # display the window
gtk.main() # GTK+ main loop
if __name__ == '__main__':
main()
```
#### File: pygtk/python_gtk3_pygobject/container_box_vertical.py
```python
from gi.repository import Gtk as gtk
def main():
window = gtk.Window()
vertical_box = gtk.Box(orientation = gtk.Orientation.VERTICAL, spacing=6) # 6 pixels are placed between children
window.add(vertical_box)
button1 = gtk.Button(label="Btn 1")
vertical_box.pack_start(button1, expand=True, fill=True, padding=0)
button2 = gtk.Button(label="Btn 2")
vertical_box.pack_start(button2, expand=True, fill=True, padding=0)
window.connect("delete-event", gtk.main_quit) # ask to quit the application when the close button is clicked
window.show_all() # display the window
gtk.main() # GTK+ main loop
if __name__ == '__main__':
main()
```
#### File: pygtk/python_gtk3_pygobject/dialog_alone.py
```python
from gi.repository import Gtk as gtk
def main():
dialog = gtk.MessageDialog(parent=None, flags=0, message_type=gtk.MessageType.ERROR, buttons=gtk.ButtonsType.OK, message_format="This is an ERROR MessageDialog")
dialog.format_secondary_text("And this is the secondary text that explains things.")
dialog.run()
dialog.destroy()
if __name__ == '__main__':
main()
```
#### File: pygtk/python_gtk3_pygobject/dualscreen.py
```python
from gi.repository import Gtk as gtk
def main():
window1 = gtk.Window()
window2 = gtk.Window()
# Monitors
screen = window1.get_screen()
print("Screen size: ", screen.width(), " x ", screen.height())
print("Monitors geometrie:")
monitor_list = []
for index, monitor in enumerate(range(screen.get_n_monitors())):
monitor_geometry = screen.get_monitor_geometry(monitor)
monitor_list.append(monitor_geometry)
print(" monitor", index, " = height:", monitor_geometry.height, " width:", monitor_geometry.width, " x:", monitor_geometry.x, " y:", monitor_geometry.y)
print(len(monitor_list), "monitors detected.")
if(len(monitor_list) != 2):
print("This snippet requires exactly 2 monitors.")
sys.exit(1)
window1.move(monitor_list[0].x, monitor_list[0].y)
window2.move(monitor_list[1].x, monitor_list[1].y)
window1.maximize()
window2.maximize()
window1.fullscreen()
window2.fullscreen()
print("Monitor of the current active window:", screen.get_monitor_at_window(screen.get_active_window()))
# Label
label1 = gtk.Label(label="Window1\n(press Alt+F4 to quit)")
window1.add(label1)
label2 = gtk.Label(label="Window2\n(press Alt+F4 to quit)")
window2.add(label2)
# Run
window1.connect("delete-event", gtk.main_quit) # ask to quit the application when the close button is clicked
window1.show_all() # display the window
window2.connect("delete-event", gtk.main_quit) # ask to quit the application when the close button is clicked
window2.show_all() # display the window
gtk.main() # GTK+ main loop
if __name__ == '__main__':
main()
```
#### File: pygtk/python_gtk3_pygobject/link_button.py
```python
from gi.repository import Gtk as gtk
def main():
window = gtk.Window()
window.set_border_width(10)
button = gtk.LinkButton(uri="http://www.jdhp.org", label="Visit www.jdhp.org")
window.add(button)
# TO GET THE URI: button.get_uri()
# TO SET THE URI: button.set_uri("...")
window.connect("delete-event", gtk.main_quit) # ask to quit the application when the close button is clicked
window.show_all() # display the window
gtk.main() # GTK+ main loop
if __name__ == '__main__':
main()
```
#### File: pygtk/python_gtk3_pygobject/radio_button.py
```python
from gi.repository import Gtk as gtk
def on_button_toggled(widget):
if widget.get_active():
print(widget.get_label(), " was turned ON")
else:
print(widget.get_label(), " was turned OFF")
def main():
window = gtk.Window()
window.set_border_width(10)
grid = gtk.Grid()
window.add(grid)
button1 = gtk.RadioButton(label="Button 1 (group 1)")
button1.connect("toggled", on_button_toggled)
button2 = gtk.RadioButton(label="Button 2 (group 1)")
button2.connect("toggled", on_button_toggled)
button3 = gtk.RadioButton(label="Button 3 (group 1)")
button3.connect("toggled", on_button_toggled)
button4 = gtk.RadioButton(label="Button 4 (group 2)")
button4.connect("toggled", on_button_toggled)
button5 = gtk.RadioButton(label="Button 5 (group 2)")
button5.connect("toggled", on_button_toggled)
button2.join_group(button1)
button3.join_group(button1)
button5.join_group(button4)
button3.set_active(True)
grid.attach(button1, left=0, top=0, width=1, height=1)
grid.attach(button2, left=1, top=0, width=1, height=1)
grid.attach(button3, left=2, top=0, width=1, height=1)
grid.attach(button4, left=0, top=1, width=1, height=1)
grid.attach(button5, left=1, top=1, width=1, height=1)
window.connect("delete-event", gtk.main_quit) # ask to quit the application when the close button is clicked
window.show_all() # display the window
gtk.main() # GTK+ main loop
if __name__ == '__main__':
main()
```
#### File: pyqt/pyqt5/drag_and_drop.py
```python
import sys
from PyQt5.QtWidgets import QApplication, QWidget, QLineEdit, QLabel
class MyWidget(QWidget):
def __init__(self):
super().__init__()
editBox = QLineEdit('Drag this', self)
editBox.setDragEnabled(True)
editBox.move(10, 10)
editBox.resize(100, 32)
button = CustomLabel('Drop here.', self)
button.move(130, 15)
self.show()
class CustomLabel(QLabel):
def __init__(self, title, parent):
super().__init__(title, parent)
self.setAcceptDrops(True)
def dragEnterEvent(self, e):
if e.mimeData().hasFormat('text/plain'):
e.accept()
else:
e.ignore()
def dropEvent(self, e):
self.setText(e.mimeData().text())
if __name__ == '__main__':
app = QApplication(sys.argv)
widget = MyWidget()
widget.show()
# The mainloop of the application. The event handling starts from this point.
# The exec_() method has an underscore. It is because the exec is a Python keyword. And thus, exec_() was used instead.
exit_code = app.exec_()
# The sys.exit() method ensures a clean exit.
# The environment will be informed, how the application ended.
sys.exit(exit_code)
```
#### File: pyqt/pyqt5/widget_QCheckBox.py
```python
import sys
from PyQt5.QtWidgets import QApplication, QMainWindow, QCheckBox
from PyQt5.QtCore import Qt
def cb_callback(state):
if state == Qt.Checked:
print('Checked')
else:
print('Not checked')
app = QApplication(sys.argv)
# The default constructor has no parent.
# A widget with no parent is a window.
window = QMainWindow()
window.resize(250, 150)
window.setWindowTitle('Hello')
cb = QCheckBox('Hello', window)
cb.stateChanged.connect(cb_callback)
window.show()
# The mainloop of the application. The event handling starts from this point.
# The exec_() method has an underscore. It is because the exec is a Python keyword. And thus, exec_() was used instead.
exit_code = app.exec_()
# The sys.exit() method ensures a clean exit.
# The environment will be informed, how the application ended.
sys.exit(exit_code)
```
#### File: pyqt/pyqt5/widget_QPainter_mousse_event_and_paint.py
```python
import sys
from PyQt5.QtWidgets import QApplication, QWidget, QMainWindow, QLabel, QVBoxLayout
from PyQt5.QtGui import QPainter, QColor, QPen
from PyQt5.QtCore import Qt
import random
class MyWidget(QWidget):
def __init__(self):
super().__init__()
self.setMouseTracking(True)
self.cursor_pos = None
# Warning: by default mouseMoveEvent sent signal only when mouse buttons are pressed (drag)!
# To send mouseMoveEvent even when buttons are not pressed, "mouse tracking" should be activated : self.setMouseTracking(True)
def mouseMoveEvent(self, event):
self.cursor_pos = event.pos()
self.update() # call paintEvent()
def paintEvent(self, event):
painter = QPainter(self)
if self.cursor_pos is not None:
painter.drawEllipse(self.cursor_pos.x()-5, self.cursor_pos.y()-5, 10, 10)
if __name__ == '__main__':
app = QApplication(sys.argv)
widget = MyWidget()
widget.show()
# The mainloop of the application. The event handling starts from this point.
# The exec_() method has an underscore. It is because the exec is a Python keyword. And thus, exec_() was used instead.
exit_code = app.exec_()
# The sys.exit() method ensures a clean exit.
# The environment will be informed, how the application ended.
sys.exit(exit_code)
```
#### File: pyqt/pyqt5/widget_QTableView_for_pandas_dataframes.py
```python
import numpy as np
import pandas as pd
import sys
from PyQt5.QtCore import Qt, QAbstractTableModel, QVariant
from PyQt5.QtWidgets import QApplication, QTableView
class PandasModel(QAbstractTableModel):
def __init__(self, df, parent=None):
super().__init__(parent)
self._df = df
def rowCount(self, parent):
return self._df.values.shape[0]
def columnCount(self, parent):
return self._df.values.shape[1]
def data(self, index, role):
row = index.row()
column = index.column()
if role == Qt.DisplayRole:
return str(self._df.iloc[row, column])
return None
def headerData(self, index, orientation, role):
if role == Qt.DisplayRole:
if orientation == Qt.Horizontal:
return self._df.columns[index]
elif orientation == Qt.Vertical:
return self._df.index[index]
return None
# def sort(self, column_index, order):
# """Sort table by given column number."""
# try:
# self.layoutAboutToBeChanged.emit()
# self._df = self._df.sort_values(self._df.columns[column_index], ascending=not order)
# self.layoutChanged.emit()
# except Exception as e:
# print(e)
if __name__ == '__main__':
app = QApplication(sys.argv)
NUM_ROWS = 50
NUM_COLUMNS = 10
df = pd.DataFrame(np.random.randint(0, 100, size=[NUM_ROWS, NUM_COLUMNS]),
index=["row{}".format(index) for index in range(NUM_ROWS)],
columns=["col{}".format(index) for index in range(NUM_COLUMNS)])
table_view = QTableView()
my_model = PandasModel(df=df)
table_view.setModel(my_model)
table_view.show()
# The mainloop of the application. The event handling starts from this point.
# The exec_() method has an underscore. It is because the exec is a Python keyword. And thus, exec_() was used instead.
exit_code = app.exec_()
# The sys.exit() method ensures a clean exit.
# The environment will be informed, how the application ended.
sys.exit(exit_code)
```
#### File: pyqt/pyqt5/widget_QTabWidget.py
```python
import sys
from PyQt5.QtWidgets import QApplication, QPushButton, QWidget, QTabWidget, QVBoxLayout
class MyTabWidget(QWidget):
def __init__(self):
super().__init__()
# Initialize tab screen
self.tabs = QTabWidget()
self.tabs.resize(300, 200)
tab1 = QWidget()
tab2 = QWidget()
# Add tabs
self.tabs.addTab(tab1, "Tab 1")
self.tabs.addTab(tab2, "Tab 2")
# Populate the first tab
button1 = QPushButton("PyQt5 button")
tab1_layout = QVBoxLayout()
tab1_layout.addWidget(button1)
tab1.setLayout(tab1_layout)
# Set the layout
layout = QVBoxLayout()
layout.addWidget(self.tabs)
self.setLayout(layout)
if __name__ == '__main__':
app = QApplication(sys.argv)
widget = MyTabWidget()
widget.show()
# The mainloop of the application. The event handling starts from this point.
# The exec_() method has an underscore. It is because the exec is a Python keyword. And thus, exec_() was used instead.
exit_code = app.exec_()
# The sys.exit() method ensures a clean exit.
# The environment will be informed, how the application ended.
sys.exit(exit_code)
```
#### File: python/pyserial/write.py
```python
import argparse
import serial
import time
def main():
# PARSE OPTIONS
parser = argparse.ArgumentParser(description='A pyserial snippet.')
parser.add_argument("--baudrate", "-b", help="The baudrate speed (e.g. 9600)", metavar="INTEGER", type=int, default=9600)
parser.add_argument("--timeout", "-t", help="The timeout value for the connection", metavar="FLOAT", type=float, default=0.1)
parser.add_argument("--port", "-p", help="The serial device to connect with (e.g. '/dev/ttyUSB0' for Unix users)", metavar="STRING", default="/dev/ttyUSB0")
args = parser.parse_args()
# CONNECT TO THE SERIAL PORT
serial_connection = serial.Serial(port=args.port,
baudrate=args.baudrate,
timeout=args.timeout,
bytesize=serial.EIGHTBITS,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE)
serial_connection.flushOutput()
# WRITE DATA
data_byte_array = bytearray("Hello")
while(True):
time.sleep(0.1)
serial_connection.write(data_byte_array)
if __name__ == '__main__':
main()
```
#### File: pyside/pyside6/drag_and_drop_firefox_tabs.py
```python
import sys
from PySide6 import QtCore, QtWidgets
class Windows(QtWidgets.QWidget):
def __init__(self):
super().__init__()
self.setAcceptDrops(True)
self.text = QtWidgets.QLabel("Drop content here...", alignment=QtCore.Qt.AlignCenter)
self.layout = QtWidgets.QVBoxLayout(self)
self.layout.addWidget(self.text)
def dragEnterEvent(self, event):
#if (event.mimeData().hasFormat("text/plain"))
event.acceptProposedAction()
def dropEvent(self, event):
url_bytes = event.mimeData().data("text/x-moz-text-internal")
url_str = url_bytes.data().decode("utf-16")
print(url_str)
event.acceptProposedAction()
if __name__ == "__main__":
app = QtWidgets.QApplication([])
widget = Windows()
widget.resize(800, 600)
widget.show()
sys.exit(app.exec())
```
#### File: pyside/pyside6/hello.py
```python
import sys
from PySide6 import QtCore, QtWidgets
class MyWidget(QtWidgets.QWidget):
def __init__(self):
super().__init__()
self.text = QtWidgets.QLabel("Hello World", alignment=QtCore.Qt.AlignCenter)
self.layout = QtWidgets.QVBoxLayout(self)
self.layout.addWidget(self.text)
if __name__ == "__main__":
app = QtWidgets.QApplication([])
widget = MyWidget()
widget.resize(800, 600)
widget.show()
sys.exit(app.exec())
```
#### File: pyside/pyside6/multithreading_2.py
```python
from PySide6.QtWidgets import QVBoxLayout, QLabel, QPushButton, QWidget, QMainWindow, QApplication
from PySide6.QtCore import QTimer, QRunnable, Slot, Signal, QObject, QThreadPool
import sys
import time
import traceback
class WorkerSignals(QObject):
'''
Defines the signals available from a running worker thread.
Supported signals are:
error
tuple (exctype, value, traceback.format_exc() )
progress
int indicating % progress
'''
error = Signal(tuple)
progress = Signal(int)
class Worker(QRunnable):
'''
Worker thread
Inherits from QRunnable to handler worker thread setup, signals and wrap-up.
:param callback: The function callback to run on this worker thread. Supplied args and
kwargs will be passed through to the runner.
:type callback: function
:param args: Arguments to pass to the callback function
:param kwargs: Keywords to pass to the callback function
'''
def __init__(self):
super(Worker, self).__init__()
self.signals = WorkerSignals()
@Slot()
def run(self):
'''
Initialise the runner function with passed args, kwargs.
'''
# Retrieve args/kwargs here; and fire processing using them
try:
for n in range(0, 5):
time.sleep(1)
self.signals.progress.emit(n/4*100)
except:
traceback.print_exc()
exctype, value = sys.exc_info()[:2]
self.signals.error.emit((exctype, value, traceback.format_exc()))
class MainWindow(QMainWindow):
def __init__(self, *args, **kwargs):
super(MainWindow, self).__init__(*args, **kwargs)
self.counter = 0
self.label = QLabel("Start")
btn = QPushButton("Run a new job")
btn.pressed.connect(self.btn_callback)
layout = QVBoxLayout()
layout.addWidget(self.label)
layout.addWidget(btn)
central_widget = QWidget()
central_widget.setLayout(layout)
self.setCentralWidget(central_widget)
self.show()
self.threadpool = QThreadPool()
print("Multithreading with maximum {} threads".format(self.threadpool.maxThreadCount()))
self.timer = QTimer()
self.timer.setInterval(1000)
self.timer.timeout.connect(self.timer_callback)
self.timer.start()
def btn_callback(self):
# Pass the function to execute
worker = Worker()
worker.signals.progress.connect(self.progress_callback)
# Execute
self.threadpool.start(worker)
def progress_callback(self, percent_progress):
print(r"{}% done".format(percent_progress))
def timer_callback(self):
self.counter +=1
self.label.setText("Counter: %d" % self.counter)
app = QApplication(sys.argv)
window = MainWindow()
app.exec_()
```
#### File: official_examples/model_view_sql_books/bookdelegate.py
```python
import copy
from PySide6.QtSql import QSqlRelationalDelegate
from PySide6.QtWidgets import QSpinBox, QStyle
from PySide6.QtGui import QPixmap, QPalette
from PySide6.QtCore import QEvent, QSize, Qt
class BookDelegate(QSqlRelationalDelegate):
"""Books delegate to rate the books"""
def __init__(self, parent=None):
QSqlRelationalDelegate.__init__(self, parent)
self.star = QPixmap(":/images/star.png")
def paint(self, painter, option, index):
""" Paint the items in the table.
If the item referred to by <index> is a StarRating, we
handle the painting ourselves. For the other items, we
let the base class handle the painting as usual.
In a polished application, we'd use a better check than
the column number to find out if we needed to paint the
stars, but it works for the purposes of this example.
"""
if index.column() != 5:
# Since we draw the grid ourselves:
opt = copy.copy(option)
opt.rect = option.rect.adjusted(0, 0, -1, -1)
QSqlRelationalDelegate.paint(self, painter, opt, index)
else:
model = index.model()
if option.state & QStyle.State_Enabled:
if option.state & QStyle.State_Active:
color_group = QPalette.Normal
else:
color_group = QPalette.Inactive
else:
color_group = QPalette.Disabled
if option.state & QStyle.State_Selected:
painter.fillRect(option.rect,
option.palette.color(color_group, QPalette.Highlight))
rating = model.data(index, Qt.DisplayRole)
width = self.star.width()
height = self.star.height()
x = option.rect.x()
y = option.rect.y() + (option.rect.height() / 2) - (height / 2)
for i in range(rating):
painter.drawPixmap(x, y, self.star)
x += width
# Since we draw the grid ourselves:
self.drawFocus(painter, option, option.rect.adjusted(0, 0, -1, -1))
pen = painter.pen()
painter.setPen(option.palette.color(QPalette.Mid))
painter.drawLine(option.rect.bottomLeft(), option.rect.bottomRight())
painter.drawLine(option.rect.topRight(), option.rect.bottomRight())
painter.setPen(pen)
def sizeHint(self, option, index):
""" Returns the size needed to display the item in a QSize object. """
if index.column() == 5:
size_hint = QSize(5 * self.star.width(), self.star.height()) + QSize(1, 1)
return size_hint
# Since we draw the grid ourselves:
return QSqlRelationalDelegate.sizeHint(self, option, index) + QSize(1, 1)
def editorEvent(self, event, model, option, index):
if index.column() != 5:
return False
if event.type() == QEvent.MouseButtonPress:
mouse_pos = event.position()
new_stars = int(0.7 + (mouse_pos.x() - option.rect.x()) / self.star.width())
stars = max(0, min(new_stars, 5))
model.setData(index, stars)
# So that the selection can change
return False
return True
def createEditor(self, parent, option, index):
if index.column() != 4:
return QSqlRelationalDelegate.createEditor(self, parent, option, index)
# For editing the year, return a spinbox with a range from -1000 to 2100.
spinbox = QSpinBox(parent)
spinbox.setFrame(False)
spinbox.setMaximum(2100)
spinbox.setMinimum(-1000)
return spinbox
```
#### File: pyside/pyside6/widget_QSqlTableModel_sqlite_from_file_with_sort_and_filter_plus_add_and_remove_rows.py
```python
import sys
import sqlite3
from PySide6 import QtCore, QtWidgets
from PySide6.QtCore import Qt, QSortFilterProxyModel, QModelIndex
from PySide6.QtWidgets import QApplication, QWidget, QTableView, QLineEdit, QVBoxLayout, QAbstractItemView
from PySide6.QtGui import QAction
from PySide6.QtSql import QSqlDatabase, QSqlQuery, QSqlTableModel
# INIT THE DATABASE #############################
con = sqlite3.connect("employee.db")
cur = con.cursor()
cur.execute("DROP TABLE employee")
cur.execute("CREATE TABLE employee (id INTEGER PRIMARY KEY AUTOINCREMENT, first_name TEXT, last_name TEXT)")
params_list = [
("Jean", "Dupont"),
("Paul", "Dupond"),
("Jeanne", "Durand"),
("Anne", "Dupuit"),
]
cur.executemany("INSERT INTO employee (first_name, last_name) VALUES(?, ?)", params_list)
con.commit()
con.close()
# OPEN THE DATABASE #############################
db = QSqlDatabase.addDatabase("QSQLITE")
db.setDatabaseName("./employee.db")
assert db.open()
#################################################
app = QApplication(sys.argv)
window = QWidget()
# Make widgets ##############
edit = QLineEdit()
table_view = QTableView()
edit.setPlaceholderText("Filter text (on col. 1)")
# Set the layout ############
vbox = QVBoxLayout()
vbox.addWidget(edit)
vbox.addWidget(table_view)
window.setLayout(vbox)
#############################
model = QSqlTableModel()
model.setTable("employee")
#model.setEditStrategy(QSqlTableModel.OnManualSubmit)
model.select()
model.setHeaderData(0, Qt.Horizontal, "First Name")
model.setHeaderData(1, Qt.Horizontal, "<NAME>")
table_view.setModel(model)
table_view.setSortingEnabled(True)
table_view.setSelectionBehavior(QAbstractItemView.SelectRows) # Select the full row when a cell is selected (See http://doc.qt.io/qt-5/qabstractitemview.html#selectionBehavior-prop )
table_view.hideColumn(0) # don't show the ID
# Set LineEdit slot #########################
def filter_callback():
filter_str = edit.text()
if filter_str == '':
model.setFilter("")
else:
model.setFilter("first_name LIKE '%{}%'".format(filter_str))
print(filter_str)
edit.textChanged.connect(filter_callback)
#############################
def add_row_callback():
# See https://doc.qt.io/qtforpython/overviews/sql-model.html#using-the-sql-model-classes
row = 0
model.insertRows(row, 1)
#model.setData(model.index(row, 0), 1013)
model.setData(model.index(row, 1), "n/a")
model.setData(model.index(row, 2), "n/a")
model.submitAll()
#model.select()
def remove_row_callback():
# See https://doc.qt.io/qt-5/qsqltablemodel.html#removeRows
# See https://doc.qt.io/qtforpython/overviews/sql-model.html#using-the-sql-model-classes
# See http://doc.qt.io/qt-5/model-view-programming.html#handling-selections-in-item-views
selection_proxy_index_list = table_view.selectionModel().selectedRows()
selected_row_list = [source_index.row() for source_index in selection_proxy_index_list]
for row_index in sorted(selected_row_list, reverse=True):
# Remove rows one by one to allow the removql of non-contiguously selected rows (e.g. "rows 0, 2 and 3")
success = model.removeRow(row_index)
if not success:
raise Exception("Unknown error...") # TODO
model.submitAll() # When you’re finished changing a record, you should always call submitAll() to ensure that the changes are written to the database
model.select()
# Add row action
add_action = QAction(table_view)
add_action.setShortcut(Qt.CTRL | Qt.Key_N)
add_action.triggered.connect(add_row_callback)
table_view.addAction(add_action)
# Delete action
del_action = QAction(table_view)
del_action.setShortcut(Qt.Key_Delete)
del_action.triggered.connect(remove_row_callback)
table_view.addAction(del_action)
#############################
window.show()
# The mainloop of the application. The event handling starts from this point.
exit_code = app.exec()
# The sys.exit() method ensures a clean exit.
# The environment will be informed, how the application ended.
sys.exit(exit_code)
```
#### File: python/scikit_learn/datasets.py
```python
from __future__ import print_function
from sklearn import datasets
import matplotlib.pyplot as plt
def main():
# http://scikit-learn.org/stable/tutorial/basic/tutorial.html#loading-an-example-dataset
# "A dataset is a dictionary-like object that holds all the data and some
# metadata about the data. This data is stored in the .data member, which
# is a n_samples, n_features array. In the case of supervised problem, one
# or more response variables are stored in the .target member."
# Toy datasets
iris = datasets.load_iris() # The iris dataset (classification)
digits = datasets.load_digits() # The digits dataset (classification)
#boston = datasets.load_boston() # The boston house-prices dataset (regression)
#diabetes = datasets.load_diabetes() # The diabetes dataset (regression)
#linnerud = datasets.load_linnerud() # The linnerud dataset (multivariate regression)
print(iris.feature_names)
print(iris.data)
print(iris.target_names)
print(iris.target)
print(digits.images[0])
print(digits.target_names)
print(digits.target)
plt.imshow(digits.images[0], cmap='gray', interpolation='nearest')
plt.show()
# Others datasets
# See: http://scikit-learn.org/stable/datasets/index.html#datasets
if __name__ == '__main__':
main()
```
#### File: tkinter/python3/event_keyboard.py
```python
import tkinter as tk
root = tk.Tk()
label = tk.Label(root, text="Press some keys", width=50, height=10)
label.pack()
# SETUP KEYBOARD EVENT CALLBACKS
def keypress_callback(event):
if event.keysym == "Up":
print("keypress: <Up>")
elif event.keysym == "Down":
print("keypress: <Down>")
elif event.keysym == "Left":
print("keypress: <Left>")
elif event.keysym == "Right":
print("keypress: <Right>")
elif event.keysym == "Return":
print("keypress: <Return>")
elif event.keysym == "Escape":
print("keypress: <Escape>")
elif event.keysym == "space":
print("keypress: <space>")
elif event.keysym == "Control_R":
print("keypress: <Control_R>")
elif event.keysym == "Control_L":
print("keypress: <Control_L>")
elif event.keysym == "Shift_R":
print("keypress: <Shift_R>")
elif event.keysym == "Shift_L":
print("keypress: <Shift_L>")
elif event.keysym == "Tab":
print("keypress: <Tab>")
elif event.keysym == "Super_R":
print("keypress: <Super_R>")
elif event.keysym == "Super_L":
print("keypress: <Super_L>")
elif event.keysym == "BackSpace":
print("keypress: <BackSpace>")
elif event.keysym == "Prior": # PgUp
print("keypress: <Prior>")
elif event.keysym == "Next": # PgDown
print("keypress: <Next>")
elif event.char == "a":
print("keypress: <a>")
elif event.char == "b":
print("keypress: <b>")
elif event.char == "c":
print("keypress: <c>")
elif event.char == "d":
print("keypress: <d>")
elif event.char == "A":
print("keypress: <A>")
elif event.char == "B":
print("keypress: <B>")
elif event.char == "C":
print("keypress: <C>")
elif event.char == "D":
print("keypress: <D>")
elif event.char == "1":
print("keypress: <1>")
elif event.char == "2":
print("keypress: <2>")
elif event.char == "3":
print("keypress: <3>")
else:
print("keypress:", event.char, event.keysym)
def keyrelease_callback(event):
if event.keysym == "Up":
print("keyrelease: <Up>")
elif event.keysym == "Down":
print("keyrelease: <Down>")
elif event.keysym == "Left":
print("keyrelease: <Left>")
elif event.keysym == "Right":
print("keyrelease: <Right>")
elif event.keysym == "Return":
print("keyrelease: <Return>")
elif event.keysym == "Escape":
print("keyrelease: <Escape>")
elif event.keysym == "space":
print("keyrelease: <space>")
elif event.keysym == "Control_R":
print("keyrelease: <Control_R>")
elif event.keysym == "Control_L":
print("keyrelease: <Control_L>")
elif event.keysym == "Shift_R":
print("keyrelease: <Shift_R>")
elif event.keysym == "Shift_L":
print("keyrelease: <Shift_L>")
elif event.keysym == "Tab":
print("keyrelease: <Tab>")
elif event.keysym == "Super_R":
print("keyrelease: <Super_R>")
elif event.keysym == "Super_L":
print("keyrelease: <Super_L>")
elif event.keysym == "BackSpace":
print("keyrelease: <BackSpace>")
elif event.keysym == "Prior": # PgUp
print("keyrelease: <Prior>")
elif event.keysym == "Next": # PgDown
print("keyrelease: <Next>")
elif event.char == "a":
print("keyrelease: <a>")
elif event.char == "b":
print("keyrelease: <b>")
elif event.char == "c":
print("keyrelease: <c>")
elif event.char == "d":
print("keyrelease: <d>")
elif event.char == "A":
print("keyrelease: <A>")
elif event.char == "B":
print("keyrelease: <B>")
elif event.char == "C":
print("keyrelease: <C>")
elif event.char == "D":
print("keyrelease: <D>")
elif event.char == "1":
print("keyrelease: <1>")
elif event.char == "2":
print("keyrelease: <2>")
elif event.char == "3":
print("keyrelease: <3>")
else:
print("keyrelease:", event.char, event.keysym)
root.bind("<KeyPress>", keypress_callback)
root.bind("<KeyRelease>", keyrelease_callback)
root.mainloop()
```
#### File: tkinter/python3/listbox.py
```python
import tkinter as tk
root = tk.Tk()
# LISTBOX #############################
# The "selectmode" can be:
# - SINGLE: just a single choice
# - BROWSE: same, but the selection can be moved using the mouse
# - MULTIPLE: multiple item can be choosen, by clicking at them one at a
# time
# - EXTENDED: multiple ranges of items can be chosen, using the Shift and
# Control keyboard modifiers
listbox = tk.Listbox(root, selectmode=tk.EXTENDED)
listbox.pack()
items = ["banana", "apple", "mango", "orange"]
for item in items:
listbox.insert(tk.END, item)
# BUTTON ##############################
def print_selection():
selection_id_tuple = listbox.curselection()
selection_label_tuple = tuple(listbox.get(item) for item in selection_id_tuple)
print(selection_id_tuple)
print(selection_label_tuple)
button = tk.Button(root, text="Print selection", width=15, command=print_selection)
button.pack()
# MAIN LOOP ###########################
root.mainloop()
```
#### File: tkinter/python3/radiobutton.py
```python
import tkinter as tk
root = tk.Tk()
# Each group of Radiobutton widgets should be associated with single variable.
# Each button then represents a single value for that variable.
test_var = tk.IntVar()
# Initialize
test_var.set(2)
def callback():
print("var = ", test_var.get())
radiobutton1 = tk.Radiobutton(root, text="One", variable=test_var, value=1, command=callback)
radiobutton2 = tk.Radiobutton(root, text="Two", variable=test_var, value=2, command=callback)
radiobutton3 = tk.Radiobutton(root, text="Three", variable=test_var, value=3, command=callback)
radiobutton1.pack(anchor=tk.W)
radiobutton2.pack(anchor=tk.W)
radiobutton3.pack(anchor=tk.W)
root.mainloop()
```
#### File: physics_python/standalone_modules/pointmass_spring.py
```python
import numpy as np
class State:
def __init__(self, ndim):
self.ndim = ndim
self.position = np.zeros(self.ndim)
self.velocity = np.zeros(self.ndim)
class Model:
def __init__(self, mass=1., stiffness=1.):
self.mass = mass
self.stiffness = stiffness
def compute_acceleration(self, state):
# F = -k.x
self.stretch = state.position[0] # 0 is the "rest" point
total_external_force = -1. * self.stiffness * self.stretch
# a = f/m
acceleration = total_external_force / self.mass
return acceleration
def compute_new_state(state, acceleration, delta_time):
"Compute the forward kinematics with finite difference method."
new_state = State(state.ndim)
# Velocity (m/s) at time_n+1
new_state.velocity = state.velocity + acceleration * delta_time
# Position (m) at time_n+1
new_state.position = state.position + state.velocity * delta_time
return new_state
def main():
state = State(ndim=1)
state.position[0] = 1. # initial stiffness (0 is the "rest" point)
time = 0.
delta_time = 0.01
model = Model()
print("#time acceleration velocity position")
while time < 12:
time = time + delta_time
# Update state (physics)
acceleration = model.compute_acceleration(state)
state = compute_new_state(state, acceleration, delta_time)
print(time, acceleration, state.velocity[0], state.position[0])
if __name__ == "__main__":
main()
```
|
{
"source": "jeremiedecock/tictactoe-py",
"score": 2
}
|
#### File: tictactoe-py/utils/stats.py
```python
import argparse
import json
def main():
"""TODO..."""
# PARSE OPTIONS ###########################################################
parser = argparse.ArgumentParser(description="Make statistics on results files (JSON files).")
parser.add_argument("fileargs", nargs=1, metavar="FILE",
help="The JSON file to process")
args = parser.parse_args()
json_file_path = args.fileargs[0]
# PARSE THE RESULTS FILES #################################################
with open(json_file_path, "r") as fd:
data = json.load(fd)
result_list = [game["winner"]for game in data["game_log_list"]]
print("player1: {} ({:.2f}%)".format(result_list.count(0), 100. * result_list.count(0)/len(result_list)))
print("player2: {} ({:.2f}%)".format(result_list.count(1), 100. * result_list.count(1)/len(result_list)))
print("draw: {} ({:.2f}%)".format(result_list.count(None), 100. * result_list.count(None)/len(result_list)))
if __name__ == '__main__':
main()
```
|
{
"source": "jeremieflrnt/template-gauge-python",
"score": 2
}
|
#### File: step_impl/utils/driver.py
```python
import os
import logging
import platform
from getgauge.python import before_scenario, after_scenario, custom_screenshot_writer, Screenshots, after_step
from selenium import webdriver
from selenium.webdriver.chrome.options import Options as ChromeOptions
from selenium.webdriver.firefox.options import Options as FirefoxOptions
from step_impl.utils.date_utils import timestamp
class Driver(object):
logging.basicConfig(level=logging.INFO)
driver = None
@before_scenario
def init(self):
if os.getenv("BROWSER") == "CHROME":
options = ChromeOptions()
options.add_argument("--ignore-certificate-errors")
options.add_argument("--disable-infobars")
options.add_argument("--disable-gpu")
options.add_argument("--no-sandbox")
if os.getenv("HEADLESS") == "True":
options.add_argument("--headless")
options.add_argument("--window-size=1920,4320")
else:
options.add_argument("--window-size=1280,1024")
if platform.system() == "Linux":
driver_path = "env/chrome/chromedriver/linux64/" + os.getenv("VERSION") + "/chromedriver"
elif platform.system() == "Windows":
driver_path = "env\\chrome\\chromedriver\\win32\\" + os.getenv("VERSION") + "\\chromedriver"
# options.binary_location =
Driver.driver = webdriver.Chrome(executable_path=driver_path, options=options)
elif os.getenv("BROWSER") == "FIREFOX":
options = FirefoxOptions()
if os.getenv("HEADLESS") == "True":
options.add_argument("--headless")
options.add_argument("--width=1920")
options.add_argument("--height=4320")
else:
options.add_argument("--width=1280")
options.add_argument("--height=1024")
if platform.system() == "Linux":
driver_path = "env/firefox/geckodriver/linux64/" + os.getenv("VERSION") + "/geckodriver"
elif platform.system() == "Windows":
driver_path = "env\\firefox\\geckodriver\\win32\\" + os.getenv("VERSION") + "\\geckodriver"
Driver.driver = webdriver.Firefox(executable_path=driver_path, options=options)
@after_scenario
def close(self):
Driver.driver.close()
@custom_screenshot_writer
def take_screenshot():
file_name = os.path.join(os.getenv("gauge_screenshots_dir"), ("screenshot-{0}.png".format(timestamp())))
Driver.driver.save_screenshot(file_name)
return os.path.basename(file_name)
@after_step
def after_step_screenshot():
Screenshots.capture_screenshot()
```
|
{
"source": "JeremieGince/AutoMLpy",
"score": 2
}
|
#### File: AutoMLpy/examples/parameter_generator_save_and_load.py
```python
from typing import Union, Tuple
import time
import numpy as np
import pandas as pd
import pprint
# Tensorflow
import tensorflow as tf
import tensorflow_datasets as tfds
# Importing the HPOptimizer and the RandomHpSearch from the AutoMLpy package.
from AutoMLpy import HpOptimizer, RandomHpSearch
def normalize_img(image, label):
"""Normalizes images: `uint8` -> `float32`."""
return tf.cast(image, tf.float32) / 255., label
def get_tf_mnist_dataset(**kwargs):
# https://www.tensorflow.org/datasets/keras_example
(ds_train, ds_test), ds_info = tfds.load(
'mnist',
split=['train', 'test'],
shuffle_files=True,
as_supervised=True,
with_info=True,
)
# Build training pipeline
ds_train = ds_train.map(normalize_img, num_parallel_calls=tf.data.experimental.AUTOTUNE)
ds_train = ds_train.cache()
ds_train = ds_train.shuffle(ds_info.splits['train'].num_examples)
ds_train = ds_train.batch(128)
ds_train = ds_train.prefetch(tf.data.experimental.AUTOTUNE)
# Build evaluation pipeline
ds_test = ds_test.map(normalize_img, num_parallel_calls=tf.data.experimental.AUTOTUNE)
ds_test = ds_test.batch(128)
ds_test = ds_test.cache()
ds_test = ds_test.prefetch(tf.data.experimental.AUTOTUNE)
return ds_train, ds_test
def get_tf_mnist_model(**hp):
if hp.get("use_conv", False):
model = tf.keras.models.Sequential([
# Convolution layers
tf.keras.layers.Conv2D(10, 3, padding="same", input_shape=(28, 28, 1)),
tf.keras.layers.MaxPool2D((2, 2)),
tf.keras.layers.Conv2D(50, 3, padding="same"),
tf.keras.layers.MaxPool2D((2, 2)),
# Dense layers
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(120, activation='relu'),
tf.keras.layers.Dense(84, activation='relu'),
tf.keras.layers.Dense(10)
])
else:
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(120, activation='relu'),
tf.keras.layers.Dense(84, activation='relu'),
tf.keras.layers.Dense(10)
])
return model
class KerasMNISTHpOptimizer(HpOptimizer):
def build_model(self, **hp) -> tf.keras.Model:
model = get_tf_mnist_model(**hp)
model.compile(
optimizer=tf.keras.optimizers.SGD(
learning_rate=hp.get("learning_rate", 1e-3),
nesterov=hp.get("nesterov", True),
momentum=hp.get("momentum", 0.99),
),
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=[tf.keras.metrics.SparseCategoricalAccuracy()],
)
return model
def fit_dataset_model_(
self,
model: tf.keras.Model,
dataset,
**hp
) -> tf.keras.Model:
history = model.fit(
dataset,
epochs=hp.get("epochs", 1),
verbose=False,
)
return model
def score_on_dataset(
self,
model: tf.keras.Model,
dataset,
**hp
) -> float:
test_loss, test_acc = model.evaluate(dataset, verbose=0)
return test_acc
if __name__ == '__main__':
# --------------------------------------------------------------------------------- #
# Initialization #
# --------------------------------------------------------------------------------- #
mnist_train, mnist_test = get_tf_mnist_dataset()
mnist_hp_optimizer = KerasMNISTHpOptimizer()
hp_space = dict(
epochs=list(range(1, 16)),
learning_rate=np.linspace(1e-4, 1e-1, 50),
nesterov=[True, False],
momentum=np.linspace(0.01, 0.99, 50),
use_conv=[True, False],
)
param_gen = RandomHpSearch(hp_space, max_seconds=2*60, max_itr=100)
save_kwargs = dict(
save_name=f"tf_mnist_hp_opt",
title="Random search: MNIST",
)
# --------------------------------------------------------------------------------- #
# Optimization #
# --------------------------------------------------------------------------------- #
param_gen = mnist_hp_optimizer.optimize_on_dataset(
param_gen, mnist_train, save_kwargs=save_kwargs,
stop_criterion=1.0,
)
opt_hp = param_gen.get_best_param()
pp = pprint.PrettyPrinter(indent=4)
pp.pprint(opt_hp)
param_gen.show_optimization("learning_rate")
fig = param_gen.write_optimization_to_html(show=True, dark_mode=True, marker_size=10, **save_kwargs)
print(param_gen.get_optimization_table())
pp.pprint(param_gen.history)
# --------------------------------------------------------------------------------- #
# Save/Load #
# --------------------------------------------------------------------------------- #
print('-' * 50, "Saving Param Gen", '-' * 50)
param_gen.save_history(**save_kwargs)
save_path = param_gen.save_obj(**save_kwargs)
print('-'*50, "delete Param Gen and reload", '-'*50)
del param_gen
param_gen = RandomHpSearch.load_obj(save_path)
print(param_gen.get_optimization_table())
pp.pprint(param_gen.history)
pp.pprint(opt_hp)
print('-' * 50, "re-optimize", '-' * 50)
# Change the budget to be able to optimize again
param_gen.max_itr = param_gen.max_itr + 100
param_gen.max_seconds = param_gen.max_seconds + 60
param_gen = mnist_hp_optimizer.optimize_on_dataset(
param_gen, mnist_train, save_kwargs=save_kwargs,
stop_criterion=1.0, reset_gen=False,
)
opt_hp = param_gen.get_best_param()
fig_from_re_opt = param_gen.write_optimization_to_html(show=True, dark_mode=True, marker_size=10, **save_kwargs)
print(param_gen.get_optimization_table())
pp.pprint(param_gen.history)
pp.pprint(opt_hp)
# --------------------------------------------------------------------------------- #
# Test #
# --------------------------------------------------------------------------------- #
print('-' * 50, "Test", '-' * 50)
model = mnist_hp_optimizer.build_model(**opt_hp)
mnist_hp_optimizer.fit_dataset_model_(
model, mnist_train, **opt_hp
)
test_acc = mnist_hp_optimizer.score_on_dataset(
model, mnist_test, **opt_hp
)
print(f"test_acc: {test_acc * 100:.3f}%")
```
#### File: AutoMLpy/values_generators/values_generator.py
```python
import numpy as np
from typing import Tuple, Union
class ValuesGenerator:
def __init__(
self,
bounds: Tuple[Union[int, float], Union[int, float]],
resolution: int = 1_000,
seed: int = None,
):
self.bounds = bounds
self.resolution = resolution
self.seed = seed
def __call__(self, n: int = 1):
raise NotImplementedError()
```
#### File: tests/comparisons/optimisation-time_per_nb-workers.py
```python
from src.AutoMLpy.parameter_generators import SearchType
from src.AutoMLpy import logs_file_setup, log_device_setup
from tests import execute_optimisation
import pandas as pd
import numpy as np
import plotly.graph_objects as go
import os
from src.AutoMLpy.tools import plotly_colors
import logging
import multiprocessing
import tracemalloc
try:
import tensorflow as tf
tf.get_logger().setLevel(logging.FATAL)
except ImportError:
pass
def compute_stats_per_workers_table(
max_workers: int = 10,
dim: int = 1,
iterations_per_workers: int = 10,
compute_delay: float = 1.0,
**kwargs
):
algo_types = kwargs.get("algos", SearchType)
columns = ["Workers", *[st.name for st in algo_types]]
iterations_results = pd.DataFrame(columns=columns)
time_results = pd.DataFrame(columns=columns)
memory_results = pd.DataFrame(columns=columns)
for w in range(1, max_workers+1):
logging.info(f"\n{'-'*50} {w} Workers {'-'*50}")
new_iterations_results = {"Workers": w, **{st.name: [] for st in algo_types}}
new_time_results = {"Workers": w, **{st.name: [] for st in algo_types}}
new_memory_results = {"Workers": w, **{st.name: [] for st in algo_types}}
for _search_type in algo_types:
logging.info(f"\n{'-'*10} {_search_type.name} search {'-'*10}")
ell_itr, ell_time, ell_mem = [], [], []
for itr_seed in range(iterations_per_workers):
tracemalloc.start()
param_gen = execute_optimisation(
_search_type,
dim=dim,
nb_workers=w,
compute_delay=compute_delay,
optimize_kwargs=dict(stop_criterion=kwargs.get("stop_criterion", 0.9)),
seed=itr_seed,
**kwargs
)
current_mem, peak_mem = tracemalloc.get_traced_memory()
ell_itr.append(param_gen.current_itr)
ell_time.append(param_gen.last_itr_elapse_time)
ell_mem.append(peak_mem * 1e-6) # convert bytes to MB
tracemalloc.stop()
new_iterations_results[_search_type.name] = (np.mean(ell_itr), np.std(ell_itr))
new_time_results[_search_type.name] = (np.mean(ell_time), np.std(ell_time))
new_memory_results[_search_type.name] = (np.mean(ell_mem), np.std(ell_mem))
iterations_results = iterations_results.append(new_iterations_results, ignore_index=True)
time_results = time_results.append(new_time_results, ignore_index=True)
memory_results = memory_results.append(new_memory_results, ignore_index=True)
return iterations_results, time_results, memory_results
def show_stats_per_dimension(
max_workers: int = 10,
dim: int = 1,
iterations_per_workers: int = 10,
compute_delay: float = 1.0,
**kwargs
):
iterations_results, time_results, memory_results = compute_stats_per_workers_table(
max_workers, dim, iterations_per_workers, compute_delay, **kwargs
)
keys = [st.name for st in kwargs.get("algos", SearchType)]
iterations_results_mean = {
st: np.array([x[0] for x in iterations_results[st]])
for st in keys
}
iterations_results_std = {
st: np.array([x[1] for x in iterations_results[st]])
for st in keys
}
time_results_mean = {
st: np.array([x[0] for x in time_results[st]])
for st in keys
}
time_results_std = {
st: np.array([x[1] for x in time_results[st]])
for st in keys
}
memory_results_mean = {
st: np.array([x[0] for x in memory_results[st]])
for st in keys
}
memory_results_std = {
st: np.array([x[1] for x in memory_results[st]])
for st in keys
}
iterations_y_list = []
time_y_list = []
memory_y_list = []
# --------------------------------------------------------------------------------- #
# Initialize figure #
# --------------------------------------------------------------------------------- #
fig = go.Figure()
for i, st in enumerate(keys):
x = list(iterations_results["Workers"])
itr_std_upper = list(iterations_results_mean[st] + iterations_results_std[st])
itr_std_lower = list(iterations_results_mean[st] - iterations_results_std[st])
itr_mean = list(iterations_results_mean[st])
itr_std = itr_std_lower + itr_std_upper[::-1]
time_std_upper = list(time_results_mean[st] + time_results_std[st])
time_std_lower = list(time_results_mean[st] - time_results_std[st])
time_mean = list(time_results_mean[st])
time_std = time_std_lower + time_std_upper[::-1]
memory_std_upper = list(memory_results_mean[st] + memory_results_std[st])
memory_std_lower = list(memory_results_mean[st] - memory_results_std[st])
memory_mean = list(memory_results_mean[st])
memory_std = memory_std_lower + memory_std_upper[::-1]
fig.add_trace(
go.Scatter(x=x,
y=itr_mean,
mode='lines',
name=f"{st} Search mean",
line_color=plotly_colors[i], )
)
fig.add_trace(
go.Scatter(x=x+x[::-1],
y=itr_std,
mode='lines',
fill="toself",
fillcolor=plotly_colors[i],
name=f"{st} Search std",
line=dict(width=0.0),
opacity=0.5,)
)
iterations_y_list.append(itr_mean)
iterations_y_list.append(itr_std)
time_y_list.append(time_mean)
time_y_list.append(time_std)
memory_y_list.append(memory_mean)
memory_y_list.append(memory_std)
fig.update_xaxes(title=f"Workers")
fig.update_yaxes(title="Iterations [-]")
fig.update_layout(
title=kwargs.get("title", f"Iterations required to obtain a score of {kwargs['stop_criterion']}"),
autosize=True,
margin=dict(t=150, b=150, l=150, r=150),
template="plotly_dark" if kwargs.get("dark_mode", True) else "seaborn",
font=dict(
size=18,
)
)
# --------------------------------------------------------------------------------- #
# Add Dropdown #
# --------------------------------------------------------------------------------- #
fig.update_layout(
updatemenus=[
dict(
buttons=list([
dict(
args=[
dict(
y=y_list,
),
{
"title": f"{' '.join(label.split(' ')[:-1])} "
f"required to obtain a score of {kwargs['stop_criterion']}",
"xaxis.title.text": f"Workers [-]",
"yaxis.title.text": f"{label}",
}
],
label=f"yaxis: {label}",
method="update"
)
for y_list, label in zip(
[iterations_y_list, time_y_list, memory_y_list],
["Iterations [-]", "Time [s]", "Memory [MB]"]
)
]),
direction="down",
pad={"r": 10, "t": 10},
showactive=True,
x=0.9,
xanchor="left",
y=1.1,
yanchor="middle"
),
]
)
# --------------------------------------------------------------------------------- #
# Saving/showing #
# --------------------------------------------------------------------------------- #
save_dir = kwargs.get("save_dir", f"figures/html_files/")
os.makedirs(save_dir, exist_ok=True)
if kwargs.get("save", True):
fig.write_html(f"{save_dir}/algorithms_workers_comparison-algos[{'-'.join(keys)}]"
f"-maxworkers{max_workers}-dim{dim}-iteration{iterations_per_workers}.html")
fig.show()
return iterations_results, time_results, memory_results
if __name__ == '__main__':
logs_file_setup(__file__, level=logging.INFO)
log_device_setup()
iterations_results, time_results, memory_results = show_stats_per_dimension(
max_workers=min(3, multiprocessing.cpu_count()//2),
dim=1,
iterations_per_workers=2,
compute_delay=0.05,
stop_criterion=0.75,
algos=[SearchType.Grid, ],
dark_mode=False
)
# --------------------------------------------------------------------------------- #
# Iteration per Workers results #
# --------------------------------------------------------------------------------- #
logging.info('\n' + ('-' * 125) + '\n' + "Iteration per Workers results" + '\n' + ('-' * 125))
logging.info(iterations_results)
logging.info(('-' * 125) + '\n')
logging.info('\n' + ('-' * 125) + '\n' + "Iteration per Workers results LaTex" + '\n' + ('-' * 125))
logging.info(iterations_results.to_latex())
logging.info(('-' * 125) + '\n')
# --------------------------------------------------------------------------------- #
# Time per Workers results #
# --------------------------------------------------------------------------------- #
logging.info('\n' + ('-' * 125) + '\n' + "Time per Workers results" + '\n' + ('-' * 125))
logging.info(time_results)
logging.info(('-' * 125) + '\n')
logging.info('\n' + ('-' * 125) + '\n' + "Time per Workers results LaTex" + '\n' + ('-' * 125))
logging.info(time_results.to_latex())
logging.info(('-' * 125) + '\n')
# --------------------------------------------------------------------------------- #
# Memory per Workers results #
# --------------------------------------------------------------------------------- #
logging.info('\n' + ('-' * 125) + '\n' + "Memory per Workers results" + '\n' + ('-' * 125))
logging.info(memory_results)
logging.info(('-' * 125) + '\n')
logging.info('\n' + ('-' * 125) + '\n' + "Memory per Workers results LaTex" + '\n' + ('-' * 125))
logging.info(memory_results.to_latex())
logging.info(('-' * 125) + '\n')
```
#### File: tests/pytorch_items/pytorch_training.py
```python
import torch
from torch import nn
from torch.utils.data import Subset, DataLoader, TensorDataset
import numpy as np
from typing import Dict, Tuple
import logging
import enum
import tqdm
class PhaseType(enum.Enum):
train = 0
val = 1
test = 2
def train_pytorch_network(
network,
loaders,
verbose: bool = False,
**training_kwargs,
):
"""
Fit the given network with the given training data.
Parameters
----------
network: The neural network to fit.
loaders: The data loaders as a dictionary with keys: {train, valid}.
verbose: True to show some training stats else False.
training_kwargs:
optimiser (torch.optim): The optimizer used to make the weights updates.
momentum (float): The momentum of the optimiser if the optimiser is not given.
nesterov (bool): The nesterov of the optimiser if the optimiser is not given.
use_cuda (bool): True to use cuda device else False.
scheduler (): A learning rate scheduler.
Returns
-------
last train accuracy, last validation accuracy, the training history.
"""
training_kwargs.setdefault(
"optimizer",
torch.optim.SGD(
(p for p in network.parameters() if p.requires_grad),
lr=training_kwargs.get("lr", 1e-3),
momentum=training_kwargs.get("momentum", 0.9),
nesterov=training_kwargs.get("nesterov", True),
)
)
training_kwargs.setdefault(
"criterion",
torch.nn.CrossEntropyLoss()
)
history = []
nb_epochs = training_kwargs.get("epochs", 5)
for epoch in range(nb_epochs):
epoch_logs = {}
train_logs = execute_phase(network, loaders["train"], PhaseType.train, verbose, **training_kwargs)
epoch_logs["train"] = train_logs
if "valid" in loaders:
val_logs = execute_phase(network, loaders["valid"], PhaseType.val, verbose, **training_kwargs)
epoch_logs["val"] = val_logs
history.append(epoch_logs)
return history
def execute_phase(
network: nn.Module,
data_loader: DataLoader,
phase_type: PhaseType = PhaseType.train,
verbose: bool = False,
**kwargs
) -> Dict[str, float]:
"""
Execute a training phase on a network. The possible phase are {train, val, test}.
Parameters
----------
network: The model to fit.
data_loader: The data loader used to make the current training phase.
phase_type: The phase type in {train, val, test}.
verbose: True to show some training stats else False.
kwargs:
use_cuda (bool): True to use cuda device else False.
scheduler (): A learning rate scheduler.
Returns
-------
The phase logs.
"""
if phase_type == PhaseType.train:
network.train()
else:
network.eval()
if kwargs.get("use_cuda", True):
device = "cuda"
if torch.cuda.is_available():
network.to(device)
else:
device = "cpu"
if "scheduler" in kwargs and kwargs["scheduler"] is not None:
kwargs["scheduler"].step()
phase_logs = {"loss": 0, "acc": 0}
if verbose:
phase_progress = tqdm.tqdm(range(len(data_loader)), unit="batch")
phase_progress.set_description_str(f"Phase: {phase_type.name}")
for j, (inputs, targets) in enumerate(data_loader):
if device == "cuda":
if torch.cuda.is_available():
inputs = inputs.float().to(device)
targets = targets.to(device)
batch_logs = execute_batch_training(network, inputs, targets, phase_type, verbose, **kwargs)
for metric_name, metric in batch_logs.items():
phase_logs[metric_name] = (j * phase_logs[metric_name] + metric) / (j + 1)
if verbose:
phase_progress.update()
phase_progress.set_postfix_str(' '.join([str(_m)+': '+str(f"{_v:.5f}")
for _m, _v in phase_logs.items()]))
if verbose:
phase_progress.close()
return phase_logs
def execute_batch_training(
network: nn.Module,
inputs,
targets,
phase_type: PhaseType = PhaseType.train,
verbose: bool = False,
**kwargs
) -> Dict[str, float]:
"""
Execute a training batch on a network.
Parameters
----------
network: The model to fit.
inputs: The inputs of the model.
targets: The targets of the model.
phase_type: The phase type in {train, val, test}.
verbose: True to show some training stats else False.
kwargs:
optimiser (torch.optim): The optimizer used to make the weights updates.
Returns
-------
Batch logs as dict.
"""
network.zero_grad()
output = network(inputs)
if verbose:
logging.debug(f"\n {output}")
batch_logs = dict(loss=kwargs["criterion"](output, targets))
if phase_type == PhaseType.train:
batch_logs["loss"].backward()
kwargs["optimizer"].step()
batch_logs['acc'] = np.mean((torch.argmax(output, dim=-1) == targets).cpu().detach().numpy())
batch_logs["loss"] = batch_logs["loss"].cpu().detach().numpy()
return batch_logs
```
#### File: AutoMLpy/tests/random_optimizer_test.py
```python
import unittest
from src.AutoMLpy import RandomHpSearch
# Pytorch
from tests.objective_functions.objective_function import ObjectiveFuncHpOptimizer
from tests.objective_functions.vectorized_objective_function import VectorizedObjectiveFuncHpOptimizer
from tests.pytorch_items.pytorch_datasets import get_torch_MNIST_X_y, get_torch_Cifar10_X_y
from tests.pytorch_items.pytorch_hp_optimizers import TorchCifar10HpOptimizer, TorchMNISTHpOptimizer
# Tensorflow
from tests.tensorflow_items.tf_datasets import get_tf_mnist_dataset
from tests.tensorflow_items.tf_hp_optimizers import KerasMNISTHpOptimizer
# Utility modules
import time
import numpy as np
class TestRandomHpOptimizerObjFunc(unittest.TestCase):
def test_optimize_objective_func(self):
obj_func_hp_optimizer = ObjectiveFuncHpOptimizer()
param_gen = RandomHpSearch(obj_func_hp_optimizer.hp_space, max_seconds=60, max_itr=1_000)
save_kwargs = dict(
save_name=f"obj_func_hp_opt",
title="Random search: Objective function",
)
start_time = time.time()
param_gen = obj_func_hp_optimizer.optimize(
param_gen,
np.ones((2, 2)),
np.ones((2, 2)),
n_splits=2,
save_kwargs=save_kwargs,
)
end_time = time.time()
elapsed_time = end_time - start_time
opt_hp = param_gen.get_best_param()
test_acc = obj_func_hp_optimizer.score(obj_func_hp_optimizer.build_model(**opt_hp),
x0=opt_hp["x0"], x1=opt_hp["x1"])
param_gen.write_optimization_to_html(show=True, **save_kwargs)
self.assertTrue(test_acc >= 0.99, f"objective_func --> Random Gen result: {test_acc*100:.3f}%"
f" in {elapsed_time:.2f} [s]")
self.assertTrue(elapsed_time <= 1.1*param_gen.max_seconds)
self.assertTrue(param_gen.current_itr <= param_gen.max_itr)
def test_vectorized_optimize_objective_func(self):
np.random.seed(42)
obj_func_hp_optimizer = VectorizedObjectiveFuncHpOptimizer()
obj_func_hp_optimizer.set_dim(3)
_ = obj_func_hp_optimizer.build_model()
param_gen = RandomHpSearch(obj_func_hp_optimizer.hp_space, max_seconds=60*15, max_itr=10_000)
save_kwargs = dict(
save_name=f"vec_obj_func_hp_opt",
title="Random search: Vectorized objective function",
)
start_time = time.time()
param_gen = obj_func_hp_optimizer.optimize(
param_gen,
np.ones((2, 2)),
np.ones((2, 2)),
n_splits=2,
save_kwargs=save_kwargs,
)
end_time = time.time()
elapsed_time = end_time - start_time
opt_hp = param_gen.get_best_param()
test_acc = obj_func_hp_optimizer.score(obj_func_hp_optimizer.build_model(**opt_hp), **opt_hp)
param_gen.write_optimization_to_html(show=True, **save_kwargs)
self.assertTrue(test_acc >= 0.99, f"Vectorized objective_func --> Random Gen result: {test_acc*100:.3f}%"
f" in {elapsed_time:.2f} [s]")
self.assertTrue(elapsed_time <= 1.1*param_gen.max_seconds)
self.assertTrue(param_gen.current_itr <= param_gen.max_itr)
class TestRandomHpOptimizerVisionProblemPytorch(unittest.TestCase):
def test_optimize_Cifar10(self):
# http://rodrigob.github.io/are_we_there_yet/build/classification_datasets_results.html#43494641522d3130
cifar10_X_y_dict = get_torch_Cifar10_X_y()
cifar10_hp_optimizer = TorchCifar10HpOptimizer()
hp_space = dict(
epochs=list(range(1, 26)),
batch_size=[32, 64],
learning_rate=np.linspace(1e-4, 1e-1, 50),
nesterov=[True, False],
momentum=np.linspace(0.01, 0.99, 50),
use_batchnorm=[True, False],
pre_normalized=[True, False],
)
param_gen = RandomHpSearch(hp_space, max_seconds=60 * 60 * 1, max_itr=1_000)
save_kwargs = dict(
save_name=f"cifar10_hp_opt",
title="Random search: Cifar10",
)
start_time = time.time()
param_gen = cifar10_hp_optimizer.optimize(
param_gen,
cifar10_X_y_dict["train"]["x"],
cifar10_X_y_dict["train"]["y"],
n_splits=2,
stop_criterion=0.75,
save_kwargs=save_kwargs,
)
end_time = time.time()
elapsed_time = end_time - start_time
opt_hp = param_gen.get_best_param()
model = cifar10_hp_optimizer.build_model(**opt_hp)
cifar10_hp_optimizer.fit_model_(
model,
cifar10_X_y_dict["train"]["x"],
cifar10_X_y_dict["train"]["y"],
**opt_hp
)
test_acc = cifar10_hp_optimizer.score(
model.cpu(),
cifar10_X_y_dict["test"]["x"],
cifar10_X_y_dict["test"]["y"],
**opt_hp
)
param_gen.write_optimization_to_html(show=True, **save_kwargs)
self.assertTrue(
test_acc >= 0.7,
f"Cifar10 --> Random Gen result: {test_acc*100:.3f}%"
)
self.assertTrue(
elapsed_time <= 1.15 * param_gen.max_seconds,
f"Had a budget of {param_gen.max_seconds}s and take {elapsed_time}s"
)
self.assertTrue(
param_gen.current_itr <= param_gen.max_itr,
f"Had a budget of {param_gen.max_itr}itr and take {param_gen.current_itr}itr"
)
def test_optimize_MNIST(self):
# http://rodrigob.github.io/are_we_there_yet/build/classification_datasets_results.html#43494641522d3130
mnist_X_y_dict = get_torch_MNIST_X_y()
mnist_hp_optimizer = TorchMNISTHpOptimizer()
hp_space = dict(
epochs=list(range(1, 16)),
batch_size=[32, 64, 128],
learning_rate=np.linspace(1e-4, 1e-1, 50),
nesterov=[True, False],
momentum=np.linspace(0.01, 0.99, 50),
pre_normalized=[False, True],
)
param_gen = RandomHpSearch(hp_space, max_seconds=60*60*1, max_itr=1_000)
save_kwargs = dict(
save_name=f"mnist_hp_opt",
title="Random search: MNIST",
)
start_time = time.time()
param_gen = mnist_hp_optimizer.optimize(
param_gen,
mnist_X_y_dict["train"]["x"],
mnist_X_y_dict["train"]["y"],
n_splits=2,
stop_criterion=0.99,
save_kwargs=save_kwargs,
)
end_time = time.time()
elapsed_time = end_time - start_time
opt_hp = param_gen.get_best_param()
model = mnist_hp_optimizer.build_model(**opt_hp)
mnist_hp_optimizer.fit_model_(
model,
mnist_X_y_dict["train"]["x"],
mnist_X_y_dict["train"]["y"],
**opt_hp
)
test_acc = mnist_hp_optimizer.score(
model.cpu(),
mnist_X_y_dict["test"]["x"],
mnist_X_y_dict["test"]["y"],
**opt_hp
)
param_gen.write_optimization_to_html(show=True, **save_kwargs)
self.assertTrue(
test_acc >= 0.985,
f"MNIST --> Random Gen result: {test_acc*100:.3f}%"
)
self.assertTrue(
elapsed_time <= 1.15 * param_gen.max_seconds,
f"Had a budget of {param_gen.max_seconds}s and take {elapsed_time}s"
)
self.assertTrue(
param_gen.current_itr <= param_gen.max_itr,
f"Had a budget of {param_gen.max_itr}itr and take {param_gen.current_itr}itr"
)
class TestRandomHpOptimizerVisionProblemTensorflow(unittest.TestCase):
def test_optimize_MNIST(self):
# http://rodrigob.github.io/are_we_there_yet/build/classification_datasets_results.html#43494641522d3130
mnist_train, mnist_test = get_tf_mnist_dataset()
mnist_hp_optimizer = KerasMNISTHpOptimizer()
hp_space = dict(
epochs=list(range(1, 16)),
learning_rate=np.linspace(1e-4, 1e-1, 50),
nesterov=[True, False],
momentum=np.linspace(0.01, 0.99, 50),
use_conv=[True, False],
)
param_gen = RandomHpSearch(hp_space, max_seconds=60*60*1, max_itr=1_000)
save_kwargs = dict(
save_name=f"tf_mnist_hp_opt",
title="Random search: MNIST",
)
start_time = time.time()
param_gen = mnist_hp_optimizer.optimize_on_dataset(
param_gen, mnist_train, save_kwargs=save_kwargs, stop_criterion=0.99,
)
end_time = time.time()
elapsed_time = end_time - start_time
opt_hp = param_gen.get_best_param()
model = mnist_hp_optimizer.build_model(**opt_hp)
mnist_hp_optimizer.fit_dataset_model_(
model, mnist_train, **opt_hp
)
test_acc = mnist_hp_optimizer.score_on_dataset(
model, mnist_test, **opt_hp
)
param_gen.write_optimization_to_html(show=True, **save_kwargs)
self.assertTrue(
test_acc >= 0.985,
f"MNIST --> Random Gen result: {test_acc*100:.3f}%"
)
self.assertTrue(
elapsed_time <= 1.15 * param_gen.max_seconds,
f"Had a budget of {param_gen.max_seconds}s and take {elapsed_time}s"
)
self.assertTrue(
param_gen.current_itr <= param_gen.max_itr,
f"Had a budget of {param_gen.max_itr}itr and take {param_gen.current_itr}itr"
)
if __name__ == '__main__':
unittest.main()
```
#### File: tests/tensorflow_items/tf_hp_optimizers.py
```python
import tensorflow as tf
from src.AutoMLpy.optimizers.optimizer import HpOptimizer
from tests.tensorflow_items.tf_models import get_tf_mnist_model
class KerasMNISTHpOptimizer(HpOptimizer):
def build_model(self, **hp) -> tf.keras.Model:
model = get_tf_mnist_model(**hp)
model.compile(
optimizer=tf.keras.optimizers.SGD(
learning_rate=hp.get("learning_rate", 1e-3),
nesterov=hp.get("nesterov", True),
momentum=hp.get("momentum", 0.99),
),
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=[tf.keras.metrics.SparseCategoricalAccuracy()],
)
return model
def fit_dataset_model_(
self,
model: tf.keras.Model,
dataset,
**hp
) -> tf.keras.Model:
history = model.fit(
dataset,
epochs=hp.get("epochs", 1),
verbose=False,
)
return model
def score_on_dataset(
self,
model: tf.keras.Model,
dataset,
**hp
) -> float:
test_loss, test_acc = model.evaluate(dataset, verbose=0)
return test_acc
```
|
{
"source": "JeremieGince/MLIntroduction",
"score": 3
}
|
#### File: solution/from_scratch/knn.py
```python
import numpy as np
from typing import Optional
class KNN:
def __init__(self, k=15):
self.k = k
self.X = None
self.y = None
def fit(self, X: np.ndarray, y: np.ndarray):
self.X = X
self.y = y
def predict(self, X: np.ndarray, y: Optional[np.ndarray] = None) -> np.ndarray:
out = []
for x in X:
dist = np.sum((self.X - x)**2, axis=-1)
sorted_idx = np.argsort(dist)
k_indexes = sorted_idx[:self.k]
y_nn = self.y[k_indexes]
y_nn, y_nn_idx = np.unique(y_nn, return_counts=True)
out.append(y_nn[np.argmax(y_nn_idx)])
out = np.asarray(out)
if y is not None:
acc = np.isclose(np.abs(y - out), 0).astype(dtype=int).sum() / y.shape[0]
return out, acc
return out
```
|
{
"source": "JeremieGince/ParallelismIntroduction",
"score": 3
}
|
#### File: solution/sensors/dewpoint.py
```python
import pandas as pd
from scipy.stats import truncnorm
import time
from .sensor import Sensor
class DewPointSensor(Sensor):
def __init__(self, sensor_id: int, name: str = "dewPointSensor", units='Temperature [${}^\circ F$]'):
"""
Instanciateur de la classe DewPointSensor, une classe créant des objets simulant des capteurs de point de rosée.
:param sensor_id: id du senseur courant. N'a pas vraiment d'importance.
:param name: Nom du senseur courant. Défaut à "dewPointSensor".
:param units: Unités du senseur courant. Défaut à "Temperature [${}^\circ F$]", soit des degrés F.
"""
super(DewPointSensor, self).__init__(sensor_id, name, units=units)
self.acquisition_time = 0.1
@property
def columns_names(self):
"""
Propriété permettant d'accéder au nom des colonnes qui sont modifiées dans le log par le SensorLogger qui
s'occupe du senseur courant.
:return: le nom des colonnes qui sont modifiées par le SensorLogger qui s'occuper du senseur courant.
Dans le cas présent, c'est "DewPointLowF", "DewPointHighF", "DewPointAvgF".
"""
return ["DewPointLowF", "DewPointHighF", "DewPointAvgF"]
def read(self):
"""
Méthode simulant la lecture du capteur courant. On dort un certain temps pour simuler l'acquisition.
:return: la valeur du senseur courant
"""
time.sleep(self.acquisition_time)
cols = self.columns_names
data = pd.read_csv(Sensor.rawData, index_col="Date")
low, high, avg = data.loc[self._date, cols]
scale = max(high - avg, avg - low)
a, b = (low - avg) / scale, (high - avg) / scale
val = truncnorm.rvs(a, b, loc=avg, size=1, scale=scale).item()
return val
```
#### File: solution/sensors/humidity.py
```python
import pandas as pd
from scipy.stats import truncnorm
import time
from .sensor import Sensor
class HumiditySensor(Sensor):
def __init__(self, sensor_id: int, name="humiditySensor", units: str = 'Humidity [%]'):
super(HumiditySensor, self).__init__(sensor_id, name, units=units)
self.acquisition_time = 0.1
@property
def columns_names(self):
return ["HumidityLowPercent", "HumidityHighPercent", "HumidityAvgPercent"]
def read(self):
time.sleep(self.acquisition_time)
cols = self.columns_names
data = pd.read_csv(Sensor.rawData, index_col="Date")
low, high, avg = data.loc[self._date, cols]
scale = max(high - avg, avg - low)
a, b = (low - avg) / scale, (high - avg) / scale
val = truncnorm.rvs(a, b, loc=avg, size=1, scale=scale).item()
return val
```
|
{
"source": "JeremieHornus/fonttools",
"score": 4
}
|
#### File: fontTools/pens/recordingPen.py
```python
from fontTools.misc.py23 import *
from fontTools.pens.basePen import AbstractPen, DecomposingPen
from fontTools.pens.pointPen import AbstractPointPen
__all__ = [
"replayRecording",
"RecordingPen",
"DecomposingRecordingPen",
"RecordingPointPen",
]
def replayRecording(recording, pen):
"""Replay a recording, as produced by RecordingPen or DecomposingRecordingPen,
to a pen.
Note that recording does not have to be produced by those pens.
It can be any iterable of tuples of method name and tuple-of-arguments.
Likewise, pen can be any objects receiving those method calls.
"""
for operator,operands in recording:
getattr(pen, operator)(*operands)
class RecordingPen(AbstractPen):
"""Pen recording operations that can be accessed or replayed.
The recording can be accessed as pen.value; or replayed using
pen.replay(otherPen).
Usage example:
==============
from fontTools.ttLib import TTFont
from fontTools.pens.recordingPen import RecordingPen
glyph_name = 'dollar'
font_path = 'MyFont.otf'
font = TTFont(font_path)
glyphset = font.getGlyphSet()
glyph = glyphset[glyph_name]
pen = RecordingPen()
glyph.draw(pen)
print(pen.value)
"""
def __init__(self):
self.value = []
def moveTo(self, p0):
self.value.append(('moveTo', (p0,)))
def lineTo(self, p1):
self.value.append(('lineTo', (p1,)))
def qCurveTo(self, *points):
self.value.append(('qCurveTo', points))
def curveTo(self, *points):
self.value.append(('curveTo', points))
def closePath(self):
self.value.append(('closePath', ()))
def endPath(self):
self.value.append(('endPath', ()))
def addComponent(self, glyphName, transformation):
self.value.append(('addComponent', (glyphName, transformation)))
def replay(self, pen):
replayRecording(self.value, pen)
class DecomposingRecordingPen(DecomposingPen, RecordingPen):
""" Same as RecordingPen, except that it doesn't keep components
as references, but draws them decomposed as regular contours.
The constructor takes a single 'glyphSet' positional argument,
a dictionary of glyph objects (i.e. with a 'draw' method) keyed
by thir name.
>>> class SimpleGlyph(object):
... def draw(self, pen):
... pen.moveTo((0, 0))
... pen.curveTo((1, 1), (2, 2), (3, 3))
... pen.closePath()
>>> class CompositeGlyph(object):
... def draw(self, pen):
... pen.addComponent('a', (1, 0, 0, 1, -1, 1))
>>> glyphSet = {'a': SimpleGlyph(), 'b': CompositeGlyph()}
>>> for name, glyph in sorted(glyphSet.items()):
... pen = DecomposingRecordingPen(glyphSet)
... glyph.draw(pen)
... print("{}: {}".format(name, pen.value))
a: [('moveTo', ((0, 0),)), ('curveTo', ((1, 1), (2, 2), (3, 3))), ('closePath', ())]
b: [('moveTo', ((-1, 1),)), ('curveTo', ((0, 2), (1, 3), (2, 4))), ('closePath', ())]
"""
# raises KeyError if base glyph is not found in glyphSet
skipMissingComponents = False
class RecordingPointPen(AbstractPointPen):
"""PointPen recording operations that can be accessed or replayed.
The recording can be accessed as pen.value; or replayed using
pointPen.replay(otherPointPen).
Usage example:
==============
from defcon import Font
from fontTools.pens.recordingPen import RecordingPointPen
glyph_name = 'a'
font_path = 'MyFont.ufo'
font = Font(font_path)
glyph = font[glyph_name]
pen = RecordingPointPen()
glyph.drawPoints(pen)
print(pen.value)
new_glyph = font.newGlyph('b')
pen.replay(new_glyph.getPointPen())
"""
def __init__(self):
self.value = []
def beginPath(self, **kwargs):
self.value.append(("beginPath", (), kwargs))
def endPath(self):
self.value.append(("endPath", (), {}))
def addPoint(self, pt, segmentType=None, smooth=False, name=None, **kwargs):
d = {}
for k, v in kwargs.items():
d[k] = 'None' if not v else v
self.value.append(("addPoint", (pt, segmentType, smooth, name), d))
def addComponent(self, baseGlyphName, transformation, **kwargs):
self.value.append(("addComponent", (baseGlyphName, transformation), kwargs))
def replay(self, pointPen):
for operator, args, kwargs in self.value:
getattr(pointPen, operator)(*args, **kwargs)
class RecordingPointPenCompact(AbstractPointPen):
def __init__(self):
self.value = []
def beginPath(self, **kwargs):
self.value.append(("beginPath"))
def endPath(self):
self.value.append(("endPath"))
def addPoint(self, pt, segmentType=None, smooth=False, name=None, **kwargs):
# self.value.append(("addPoint", (pt, segmentType, smooth)))
d = {"x":pt[0], "y":pt[1]}
if segmentType:
d["type"] = segmentType
self.value.append(("point", d))
def addComponent(self, baseGlyphName, transformation, **kwargs):
self.value.append(("addComponent", (baseGlyphName, transformation)))
# def addDeepComponent(self, glyphName, transformation, coord):
# self.value.append(("addDeepComponent", (glyphName, transformation, coord)))
# def addGlyphVariationLayers(self, glyphVariationLayers: list):
# pass
# def addVariationGlyphs(self, variationGlyphs: list):
# pass
def replay(self, pointPen):
for operator, args, kwargs in self.value:
getattr(pointPen, operator)(*args, **kwargs)
if __name__ == "__main__":
from fontTools.pens.basePen import _TestPen
pen = RecordingPen()
pen.moveTo((0, 0))
pen.lineTo((0, 100))
pen.curveTo((50, 75), (60, 50), (50, 25))
pen.closePath()
from pprint import pprint
pprint(pen.value)
```
|
{
"source": "JeremieHornus/ufo2ft",
"score": 2
}
|
#### File: ufo2ft/filters/decomposeComponents.py
```python
from __future__ import absolute_import, division, print_function, unicode_literals
from fontTools.misc.transform import Transform
import ufo2ft.util
from ufo2ft.filters import BaseFilter
class DecomposeComponentsFilter(BaseFilter):
def filter(self, glyph):
if not glyph.components:
return False
ufo2ft.util.deepCopyContours(self.context.glyphSet, glyph, glyph, Transform())
glyph.clearComponents()
return True
```
#### File: ufo2ft/filters/sortContours.py
```python
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import fontTools.pens.boundsPen
from ufo2ft.filters import BaseFilter
logger = logging.getLogger(__name__)
class SortContoursFilter(BaseFilter):
"""Sort contours by their bounding box.
ATTENTION: This filter should be run after decomposition! Mixed contours and
components cannot meaningfully be sorted.
This is to work around the undefined contour order in pyclipper, see
https://sourceforge.net/p/polyclipping/bugs/195/. It only strikes on glyphs
that contain a lot of contours on the same height (think word marks or glyphs
like U+FFFC OBJECT REPLACEMENT CHARACTER, U+034F COMBINING GRAPHEME JOINER
or U+2591 LIGHT SHADE).
"""
def filter(self, glyph):
if len(glyph) == 0: # As in, no contours.
return False
if glyph.components:
logger.warning(
"Glyph '%s' contains components which will not be sorted.", glyph.name,
)
contours = sorted(
(c for c in glyph), key=lambda contour: _control_bounding_box(contour)
)
glyph.clearContours()
if hasattr(glyph, "appendContour"): # defcon
for contour in contours:
glyph.appendContour(contour)
else: # ufoLib2
glyph.contours.extend(contours)
return True
def _control_bounding_box(contour):
pen = fontTools.pens.boundsPen.ControlBoundsPen(None)
p2s_pen = fontTools.pens.pointPen.PointToSegmentPen(pen)
contour.drawPoints(p2s_pen)
return pen.bounds
```
#### File: Lib/ufo2ft/preProcessor.py
```python
from __future__ import print_function, division, absolute_import, unicode_literals
from fontTools.misc.py23 import basestring
from ufo2ft.constants import (
COLOR_LAYERS_KEY,
COLOR_LAYER_MAPPING_KEY,
COLOR_PALETTES_KEY,
)
from ufo2ft.fontInfoData import getAttrWithFallback
from ufo2ft.filters import loadFilters
from ufo2ft.filters.decomposeComponents import DecomposeComponentsFilter
from ufo2ft.util import _GlyphSet
class BasePreProcessor(object):
"""Base class for objects that performs pre-processing operations on
the UFO glyphs, such as decomposing composites, removing overlaps, or
applying custom filters.
By default the input UFO is **not** modified. The ``process`` method
returns a dictionary containing the new modified glyphset, keyed by
glyph name. If ``inplace`` is True, the input UFO is modified directly
without the need to first copy the glyphs.
Subclasses can override the ``initDefaultFilters`` method and return
a list of built-in filters which are performed in a predefined order,
between the user-defined pre- and post-filters.
The extra kwargs passed to the constructor can be used to customize the
initialization of the default filters.
Custom filters can be applied before or after the default filters.
These are specified in the UFO lib.plist under the private key
"com.github.googlei18n.ufo2ft.filters".
"""
def __init__(
self, ufo, inplace=False, layerName=None, skipExportGlyphs=None, **kwargs
):
self.ufo = ufo
self.inplace = inplace
self.layerName = layerName
self.glyphSet = _GlyphSet.from_layer(
ufo, layerName, copy=not inplace, skipExportGlyphs=skipExportGlyphs
)
self.defaultFilters = self.initDefaultFilters(**kwargs)
self.preFilters, self.postFilters = loadFilters(ufo)
def initDefaultFilters(self, **kwargs):
return [] # pragma: no cover
def process(self):
ufo = self.ufo
glyphSet = self.glyphSet
for func in self.preFilters + self.defaultFilters + self.postFilters:
func(ufo, glyphSet)
return glyphSet
def _init_explode_color_layer_glyphs_filter(ufo, filters):
# Initialize ExplodeColorLayerGlyphsFilter, which copies color glyph layers
# as standalone glyphs to the default glyph set (for building COLR table), if the
# UFO contains the required 'colorPalettes' key, as well as 'colorLayerMapping' lib
# keys (in either the font's or glyph's lib).
# Skip doing that if an explicit 'colorLayers' key is already present.
if (
COLOR_PALETTES_KEY in ufo.lib
and COLOR_LAYERS_KEY not in ufo.lib
and (
COLOR_LAYER_MAPPING_KEY in ufo.lib
or any(COLOR_LAYER_MAPPING_KEY in g.lib for g in ufo)
)
):
from ufo2ft.filters.explodeColorLayerGlyphs import ExplodeColorLayerGlyphsFilter
filters.append(ExplodeColorLayerGlyphsFilter())
class OTFPreProcessor(BasePreProcessor):
"""Preprocessor for building CFF-flavored OpenType fonts.
By default, it decomposes all the components.
If ``removeOverlaps`` is True, it performs a union boolean operation on
all the glyphs' contours.
By default, booleanOperations is used to remove overlaps. You can choose
skia-pathops by setting ``overlapsBackend`` to the enum value
``RemoveOverlapsFilter.SKIA_PATHOPS``, or the string "pathops".
"""
def initDefaultFilters(self, removeOverlaps=False, overlapsBackend=None):
filters = []
_init_explode_color_layer_glyphs_filter(self.ufo, filters)
filters.append(DecomposeComponentsFilter())
if removeOverlaps:
from ufo2ft.filters.removeOverlaps import RemoveOverlapsFilter
if overlapsBackend is not None:
filters.append(RemoveOverlapsFilter(backend=overlapsBackend))
else:
filters.append(RemoveOverlapsFilter())
return filters
class TTFPreProcessor(OTFPreProcessor):
"""Preprocessor for building TrueType-flavored OpenType fonts.
By default, it decomposes all the glyphs with mixed component/contour
outlines.
If ``removeOverlaps`` is True, it performs a union boolean operation on
all the glyphs' contours.
By default, booleanOperations is used to remove overlaps. You can choose
skia-pathops by setting ``overlapsBackend`` to the enum value
``RemoveOverlapsFilter.SKIA_PATHOPS``, or the string "pathops".
By default, it also converts all the PostScript cubic Bezier curves to
TrueType quadratic splines. If the outlines are already quadratic, you
can skip this by setting ``convertCubics`` to False.
The optional ``conversionError`` argument controls the tolerance
of the approximation algorithm. It is measured as the maximum distance
between the original and converted curve, and it's relative to the UPM
of the font (default: 1/1000 or 0.001).
When converting curves to quadratic, it is assumed that the contours'
winding direction is set following the PostScript counter-clockwise
convention. Thus, by default the direction is reversed, in order to
conform to opposite clockwise convention for TrueType outlines.
You can disable this by setting ``reverseDirection`` to False.
If both ``inplace`` and ``rememberCurveType`` options are True, the curve
type "quadratic" is saved in font' lib under a private cu2qu key; the
preprocessor will not try to convert them again if the curve type is
already set to "quadratic".
"""
def initDefaultFilters(
self,
removeOverlaps=False,
overlapsBackend=None,
convertCubics=True,
conversionError=None,
reverseDirection=True,
rememberCurveType=True,
):
filters = []
_init_explode_color_layer_glyphs_filter(self.ufo, filters)
# len(g) is the number of contours, so we include the all glyphs
# that have both components and at least one contour
filters.append(DecomposeComponentsFilter(include=lambda g: len(g)))
if removeOverlaps:
from ufo2ft.filters.removeOverlaps import RemoveOverlapsFilter
if overlapsBackend is not None:
filters.append(RemoveOverlapsFilter(backend=overlapsBackend))
else:
filters.append(RemoveOverlapsFilter())
if convertCubics:
from ufo2ft.filters.cubicToQuadratic import CubicToQuadraticFilter
filters.append(
CubicToQuadraticFilter(
conversionError=conversionError,
reverseDirection=reverseDirection,
rememberCurveType=rememberCurveType and self.inplace,
)
)
return filters
class TTFInterpolatablePreProcessor(object):
"""Preprocessor for building TrueType-flavored OpenType fonts with
interpolatable quadratic outlines.
The constructor takes a list of UFO fonts, and the ``process`` method
returns the modified glyphsets (list of dicts) in the same order.
The pre-processor performs the conversion from cubic to quadratic on
all the UFOs at once, then decomposes mixed contour/component glyphs.
Additional pre/post custom filter are also applied to each single UFOs,
respectively before or after the default filters, if they are specified
in the UFO's lib.plist under the private key
"com.github.googlei18n.ufo2ft.filters".
NOTE: If you use any custom filters, the resulting glyphsets may no longer
be interpolation compatible, depending on the particular filter used or
whether they are applied to only some vs all of the UFOs.
The ``conversionError``, ``reverseDirection`` and ``rememberCurveType``
arguments work in the same way as in the ``TTFPreProcessor``.
"""
def __init__(
self,
ufos,
inplace=False,
conversionError=None,
reverseDirection=True,
rememberCurveType=True,
layerNames=None,
skipExportGlyphs=None,
):
from cu2qu.ufo import DEFAULT_MAX_ERR
self.ufos = ufos
self.inplace = inplace
if layerNames is None:
layerNames = [None] * len(ufos)
assert len(ufos) == len(layerNames)
self.layerNames = layerNames
self.glyphSets = [
_GlyphSet.from_layer(
ufo, layerName, copy=not inplace, skipExportGlyphs=skipExportGlyphs
)
for ufo, layerName in zip(ufos, layerNames)
]
self._conversionErrors = [
(conversionError or DEFAULT_MAX_ERR)
* getAttrWithFallback(ufo.info, "unitsPerEm")
for ufo in ufos
]
self._reverseDirection = reverseDirection
self._rememberCurveType = rememberCurveType
self.preFilters, self.postFilters = [], []
for ufo in ufos:
pre, post = loadFilters(ufo)
self.preFilters.append(pre)
self.postFilters.append(post)
def process(self):
from cu2qu.ufo import fonts_to_quadratic
# first apply all custom pre-filters
for funcs, ufo, glyphSet in zip(self.preFilters, self.ufos, self.glyphSets):
for func in funcs:
func(ufo, glyphSet)
fonts_to_quadratic(
self.glyphSets,
max_err=self._conversionErrors,
reverse_direction=self._reverseDirection,
dump_stats=True,
remember_curve_type=self._rememberCurveType and self.inplace,
)
decompose = DecomposeComponentsFilter(include=lambda g: len(g))
for ufo, glyphSet in zip(self.ufos, self.glyphSets):
decompose(ufo, glyphSet)
# finally apply all custom post-filters
for funcs, ufo, glyphSet in zip(self.postFilters, self.ufos, self.glyphSets):
for func in funcs:
func(ufo, glyphSet)
return self.glyphSets
```
#### File: ufo2ft/tests/featureCompiler_test.py
```python
from __future__ import print_function, division, absolute_import, unicode_literals
from textwrap import dedent
import logging
import re
from fontTools import ttLib
from fontTools.feaLib.error import IncludedFeaNotFound, FeatureLibError
from ufo2ft.featureWriters import (
BaseFeatureWriter,
KernFeatureWriter,
FEATURE_WRITERS_KEY,
ast,
)
from ufo2ft.featureCompiler import FeatureCompiler, parseLayoutFeatures, logger
import py
import pytest
from .testSupport import pushd
class ParseLayoutFeaturesTest(object):
def test_include(self, FontClass, tmpdir):
tmpdir.join("test.fea").write_text(
dedent(
"""\
# hello world
"""
),
encoding="utf-8",
)
ufo = FontClass()
ufo.features.text = dedent(
"""\
include(test.fea)
"""
)
ufo.save(str(tmpdir.join("Test.ufo")))
fea = parseLayoutFeatures(ufo)
assert "# hello world" in str(fea)
def test_include_no_ufo_path(self, FontClass, tmpdir):
ufo = FontClass()
ufo.features.text = dedent(
"""\
include(test.fea)
"""
)
with pushd(str(tmpdir)):
with pytest.raises(IncludedFeaNotFound):
parseLayoutFeatures(ufo)
def test_include_not_found(self, FontClass, tmpdir, caplog):
caplog.set_level(logging.ERROR)
tmpdir.join("test.fea").write_text(
dedent(
"""\
# hello world
"""
),
encoding="utf-8",
)
ufo = FontClass()
ufo.features.text = dedent(
"""\
include(../test.fea)
"""
)
ufo.save(str(tmpdir.join("Test.ufo")))
with caplog.at_level(logging.WARNING, logger=logger.name):
with pytest.raises(IncludedFeaNotFound):
parseLayoutFeatures(ufo)
assert len(caplog.records) == 1
assert "change the file name in the include" in caplog.text
class FeatureCompilerTest(object):
def test_ttFont(self, FontClass):
ufo = FontClass()
ufo.newGlyph("f")
ufo.newGlyph("f_f")
ufo.features.text = dedent(
"""\
feature liga {
sub f f by f_f;
} liga;
"""
)
ttFont = ttLib.TTFont()
ttFont.setGlyphOrder(["f", "f_f"])
compiler = FeatureCompiler(ufo, ttFont)
compiler.compile()
assert "GSUB" in ttFont
gsub = ttFont["GSUB"].table
assert gsub.FeatureList.FeatureCount == 1
assert gsub.FeatureList.FeatureRecord[0].FeatureTag == "liga"
def test_ttFont_None(self, FontClass):
ufo = FontClass()
ufo.newGlyph("f")
ufo.newGlyph("f_f")
ufo.features.text = dedent(
"""\
feature liga {
sub f f by f_f;
} liga;
"""
)
compiler = FeatureCompiler(ufo)
ttFont = compiler.compile()
assert "GSUB" in ttFont
gsub = ttFont["GSUB"].table
assert gsub.FeatureList.FeatureCount == 1
assert gsub.FeatureList.FeatureRecord[0].FeatureTag == "liga"
def test_deprecated_methods(self, FontClass):
compiler = FeatureCompiler(FontClass())
with pytest.warns(UserWarning, match="method is deprecated"):
compiler.setupFile_features()
compiler.features = ""
with pytest.warns(UserWarning, match="method is deprecated"):
compiler.setupFile_featureTables()
class UserCompiler(FeatureCompiler):
def setupFile_features(self):
self.features = "# hello world"
def setupFile_featureTables(self):
self.ttFont = ttLib.TTFont()
compiler = UserCompiler(FontClass())
with pytest.warns(UserWarning, match="method is deprecated"):
compiler.compile()
def test_deprecated_mtiFeatures_argument(self, FontClass):
with pytest.warns(UserWarning, match="argument is ignored"):
FeatureCompiler(FontClass(), mtiFeatures="whatever")
def test_featureWriters_empty(self, FontClass):
kernWriter = KernFeatureWriter(ignoreMarks=False)
ufo = FontClass()
ufo.newGlyph("a")
ufo.newGlyph("v")
ufo.kerning.update({("a", "v"): -40})
compiler = FeatureCompiler(ufo, featureWriters=[kernWriter])
ttFont1 = compiler.compile()
assert "GPOS" in ttFont1
compiler = FeatureCompiler(ufo, featureWriters=[])
ttFont2 = compiler.compile()
assert "GPOS" not in ttFont2
def test_loadFeatureWriters_from_UFO_lib(self, FontClass):
ufo = FontClass()
ufo.newGlyph("a")
ufo.newGlyph("v")
ufo.kerning.update({("a", "v"): -40})
ufo.lib[FEATURE_WRITERS_KEY] = [{"class": "KernFeatureWriter"}]
compiler = FeatureCompiler(ufo)
ttFont = compiler.compile()
assert len(compiler.featureWriters) == 1
assert isinstance(compiler.featureWriters[0], KernFeatureWriter)
assert "GPOS" in ttFont
def test_GSUB_writers_run_first(self, FontClass):
class FooFeatureWriter(BaseFeatureWriter):
tableTag = "GSUB"
def write(self, font, feaFile, compiler=None):
foo = ast.FeatureBlock("FOO ")
foo.statements.append(
ast.SingleSubstStatement(
"a", "v", prefix="", suffix="", forceChain=None
)
)
feaFile.statements.append(foo)
featureWriters = [KernFeatureWriter, FooFeatureWriter]
ufo = FontClass()
ufo.newGlyph("a")
ufo.newGlyph("v")
ufo.kerning.update({("a", "v"): -40})
compiler = FeatureCompiler(ufo, featureWriters=featureWriters)
assert len(compiler.featureWriters) == 2
assert compiler.featureWriters[0].tableTag == "GSUB"
assert compiler.featureWriters[1].tableTag == "GPOS"
ttFont = compiler.compile()
assert "GSUB" in ttFont
gsub = ttFont["GSUB"].table
assert gsub.FeatureList.FeatureCount == 1
assert gsub.FeatureList.FeatureRecord[0].FeatureTag == "FOO "
def test_buildTables_FeatureLibError(self, FontClass, caplog):
caplog.set_level(logging.CRITICAL)
ufo = FontClass()
ufo.newGlyph("f")
ufo.newGlyph("f.alt01")
ufo.newGlyph("f_f")
features = dedent(
"""\
feature BUGS {
# invalid
lookup MIXED_TYPE {
sub f by f.alt01;
sub f f by f_f;
} MIXED_TYPE;
} BUGS;
"""
)
ufo.features.text = features
compiler = FeatureCompiler(ufo)
tmpfile = None
try:
with caplog.at_level(logging.ERROR, logger=logger.name):
with pytest.raises(FeatureLibError):
compiler.compile()
assert len(caplog.records) == 1
assert "Compilation failed! Inspect temporary file" in caplog.text
tmpfile = py.path.local(re.findall(".*: '(.*)'$", caplog.text)[0])
assert tmpfile.exists()
assert tmpfile.read_text("utf-8") == features
finally:
if tmpfile is not None:
tmpfile.remove(ignore_errors=True)
```
#### File: ufo2ft/tests/testSupport.py
```python
from __future__ import print_function, division, absolute_import
import sys
import os
import types
import contextlib
from fontTools.misc.py23 import tostr
class _TempModule(object):
"""Temporarily replace a module in sys.modules with an empty namespace"""
def __init__(self, mod_name):
mod_name = tostr(mod_name, encoding="ascii")
self.mod_name = mod_name
self.module = types.ModuleType(mod_name)
self._saved_module = []
def __enter__(self):
mod_name = self.mod_name
try:
self._saved_module.append(sys.modules[mod_name])
except KeyError:
pass
sys.modules[mod_name] = self.module
return self
def __exit__(self, *args):
if self._saved_module:
sys.modules[self.mod_name] = self._saved_module[0]
else:
del sys.modules[self.mod_name]
self._saved_module = []
@contextlib.contextmanager
def pushd(target):
saved = os.getcwd()
os.chdir(target)
try:
yield saved
finally:
os.chdir(saved)
```
|
{
"source": "JeremieHornus/ufoLib2",
"score": 2
}
|
#### File: ufoLib2/objects/misc.py
```python
from abc import abstractmethod
from collections.abc import Mapping, MutableMapping
from copy import deepcopy
from typing import (
Any,
Dict,
Iterator,
List,
NamedTuple,
Optional,
Sequence,
Set,
Type,
TypeVar,
Union,
)
import attr
from fontTools.misc.transform import Transform
from fontTools.pens.boundsPen import BoundsPen, ControlBoundsPen
from fontTools.ufoLib import UFOReader, UFOWriter
from ufoLib2.typing import Drawable
class BoundingBox(NamedTuple):
"""Represents a bounding box as a tuple of (xMin, yMin, xMax, yMax)."""
xMin: float
yMin: float
xMax: float
yMax: float
def getBounds(drawable: Drawable, layer: Any) -> Optional[BoundingBox]:
# XXX: layer should behave like a mapping of glyph names to Glyph objects, but
# cyclic imports...
pen = BoundsPen(layer)
# raise 'KeyError' when a referenced component is missing from glyph set
pen.skipMissingComponents = False
drawable.draw(pen)
return None if pen.bounds is None else BoundingBox(*pen.bounds)
def getControlBounds(drawable: Drawable, layer: Any) -> Optional[BoundingBox]:
# XXX: layer should behave like a mapping of glyph names to Glyph objects, but
# cyclic imports...
pen = ControlBoundsPen(layer)
# raise 'KeyError' when a referenced component is missing from glyph set
pen.skipMissingComponents = False
drawable.draw(pen)
return None if pen.bounds is None else BoundingBox(*pen.bounds)
def _deepcopy_unlazify_attrs(self: Any, memo: Any) -> Any:
if getattr(self, "_lazy", True) and hasattr(self, "unlazify"):
self.unlazify()
return self.__class__(
**{
(a.name if a.name[0] != "_" else a.name[1:]): deepcopy(
getattr(self, a.name), memo
)
for a in attr.fields(self.__class__)
if a.init and a.metadata.get("copyable", True)
},
)
class Placeholder:
"""Represents a sentinel value to signal a "lazy" object hasn't been loaded yet."""
_NOT_LOADED = Placeholder()
# Create a generic variable for mypy that can be 'DataStore' or any subclass.
Tds = TypeVar("Tds", bound="DataStore")
@attr.s(auto_attribs=True, slots=True, repr=False)
class DataStore(MutableMapping):
"""Represents the base class for ImageSet and DataSet.
Both behave like a dictionary that loads its "values" lazily by default and only
differ in which reader and writer methods they call.
"""
_data: Dict[str, Union[bytes, Placeholder]] = attr.ib(factory=dict)
_reader: Optional[UFOReader] = attr.ib(
default=None, init=False, repr=False, eq=False
)
_scheduledForDeletion: Set[str] = attr.ib(factory=set, init=False, repr=False)
@classmethod
def read(cls: Type[Tds], reader: UFOReader, lazy: bool = True) -> Tds:
"""Instantiate the data store from a :class:`fontTools.ufoLib.UFOReader`."""
self = cls()
for fileName in cls.list_contents(reader):
if lazy:
self._data[fileName] = _NOT_LOADED
else:
self._data[fileName] = cls.read_data(reader, fileName)
if lazy:
self._reader = reader
return self
@staticmethod
@abstractmethod
def list_contents(reader: UFOReader) -> List[str]:
"""Returns a list of POSIX filename strings in the data store."""
...
@staticmethod
@abstractmethod
def read_data(reader: UFOReader, filename: str) -> bytes:
"""Returns the data at filename within the store."""
...
@staticmethod
@abstractmethod
def write_data(writer: UFOWriter, filename: str, data: bytes) -> None:
"""Writes the data to filename within the store."""
...
@staticmethod
@abstractmethod
def remove_data(writer: UFOWriter, filename: str) -> None:
"""Remove the data at filename within the store."""
...
def unlazify(self) -> None:
"""Load all data into memory."""
for _ in self.items():
pass
__deepcopy__ = _deepcopy_unlazify_attrs
# MutableMapping methods
def __len__(self) -> int:
return len(self._data)
def __iter__(self) -> Iterator[str]:
return iter(self._data)
def __getitem__(self, fileName: str) -> bytes:
data_object = self._data[fileName]
if isinstance(data_object, Placeholder):
data_object = self._data[fileName] = self.read_data(self._reader, fileName)
return data_object
def __setitem__(self, fileName: str, data: bytes) -> None:
# should we forbid overwrite?
self._data[fileName] = data
if fileName in self._scheduledForDeletion:
self._scheduledForDeletion.remove(fileName)
def __delitem__(self, fileName: str) -> None:
del self._data[fileName]
self._scheduledForDeletion.add(fileName)
def __repr__(self) -> str:
n = len(self._data)
return "<{}.{} ({}) at {}>".format(
self.__class__.__module__,
self.__class__.__name__,
"empty" if n == 0 else "{} file{}".format(n, "s" if n > 1 else ""),
hex(id(self)),
)
def write(self, writer: UFOWriter, saveAs: Optional[bool] = None) -> None:
"""Write the data store to a :class:`fontTools.ufoLib.UFOWriter`."""
if saveAs is None:
saveAs = self._reader is not writer
# if in-place, remove deleted data
if not saveAs:
for fileName in self._scheduledForDeletion:
self.remove_data(writer, fileName)
# Write data. Iterating over _data.items() prevents automatic loading.
for fileName, data in self._data.items():
# Two paths:
# 1) We are saving in-place. Only write to disk what is loaded, it
# might be modified.
# 2) We save elsewhere. Load all data files to write them back out.
# XXX: Move write_data into `if saveAs` branch to simplify code?
if isinstance(data, Placeholder):
if saveAs:
data = self.read_data(self._reader, fileName)
self._data[fileName] = data
else:
continue
self.write_data(writer, fileName, data)
self._scheduledForDeletion = set()
if saveAs:
# all data was read by now, ref to reader no longer needed
self._reader = None
@property
def fileNames(self) -> List[str]:
"""Returns a list of filenames in the data store."""
return list(self._data.keys())
class AttrDictMixin(Mapping):
"""Read attribute values using mapping interface.
For use with Anchors and Guidelines classes, where client code
expects them to behave as dict.
"""
# XXX: Use generics?
def __getitem__(self, key: str) -> Any:
try:
return getattr(self, key)
except AttributeError:
raise KeyError(key)
def __iter__(self) -> Iterator[Any]:
for key in attr.fields_dict(self.__class__):
if getattr(self, key) is not None:
yield key
def __len__(self) -> int:
return sum(1 for _ in self)
def _convert_transform(t: Union[Transform, Sequence[float]]) -> Transform:
"""Return a passed-in Transform as is, otherwise convert a sequence of
numbers to a Transform if need be."""
return t if isinstance(t, Transform) else Transform(*t)
```
|
{
"source": "jeremie-koster/galaxy-classification-yotta-project",
"score": 3
}
|
#### File: gzoo/infra/data.py
```python
import glob
from os import path as osp
import numpy as np
import pandas as pd
import PIL.Image as Image
import torch
import torchvision.datasets as datasets
import torchvision.transforms as transforms
from sklearn.model_selection import train_test_split
from torch.utils.data import Dataset
from torchvision.utils import save_image
VAL_SPLIT_RATIO = 0.10
COLOR_JITTER_FACTOR = 0.10
def pil_loader(path):
# open path as file to avoid ResourceWarning
# (https://github.com/python-pillow/Pillow/issues/835)
with open(path, "rb") as f:
with Image.open(f) as img:
return img.convert("RGB")
class GalaxyTrainSet(Dataset):
"""Train/Val dataset.
Args:
split (str): "train", "val"
opt (namespace): options from config
Returns (__getitem__):
image (torch.Tensor)
label (torch.Tensor)
"""
def __init__(self, split, opt):
super(GalaxyTrainSet, self).__init__()
self.split = split
self.task = opt.task
self.seed = opt.seed if opt.seed is not None else 0
self.datadir = opt.dataset.dir
if not osp.exists(self.datadir):
raise FileNotFoundError(
"Please download them from "
"https://www.kaggle.com/c/galaxy-zoo-the-galaxy-challenge/data"
)
self.image_dir = osp.join(self.datadir, opt.dataset.images)
self.label_file = osp.join(self.datadir, opt.dataset.train_labels)
if opt.evaluate:
self.label_file = osp.join(self.datadir, opt.dataset.test_labels)
df = pd.read_csv(self.label_file, header=0, sep=",")
self.indexes, self.labels = self._split_dataset(df, opt.evaluate)
self.image_tf = self._build_transforms(opt)
def _split_dataset(self, df, evaluate):
indexes = df.iloc[:, 0]
labels = df.iloc[:, 1:]
if self.task == "classification" and not evaluate:
idx_train, idx_val, lbl_train, lbl_val = train_test_split(
indexes,
labels,
test_size=VAL_SPLIT_RATIO,
random_state=self.seed,
stratify=labels,
)
if self.split == "train":
indexes = idx_train
labels = lbl_train
elif self.split == "val":
indexes = idx_val
labels = lbl_val
elif self.task == "regression" and not evaluate:
indices = np.random.RandomState(seed=self.seed).permutation(indexes.shape[0])
val_len = int(len(indexes) * VAL_SPLIT_RATIO)
val_idx, train_idx = indices[:val_len], indices[val_len:]
if self.split == "train":
indexes = indexes[train_idx]
elif self.split == "val":
indexes = indexes[val_idx]
return indexes.reset_index(drop=True), labels.reset_index(drop=True)
def _build_transforms(self, opt):
image_tf = []
if self.split == "train" and opt.preprocess.augmentation:
if opt.preprocess.rotate:
image_tf.append(transforms.RandomRotation(180))
if opt.preprocess.flip:
image_tf.extend(
[
transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip(),
]
)
if opt.preprocess.colorjitter:
image_tf.extend(
[
transforms.ColorJitter(
brightness=COLOR_JITTER_FACTOR,
contrast=COLOR_JITTER_FACTOR,
# saturation=COLOR_JITTER_FACTOR,
# hue=COLOR_JITTER_FACTOR,
),
]
)
image_tf.extend(
[
transforms.CenterCrop(224),
transforms.ToTensor(),
]
)
return transforms.Compose(image_tf)
def __getitem__(self, idx):
image_id = self.indexes.iloc[idx]
path = osp.join(self.image_dir, f"{image_id}.jpg")
image = pil_loader(path)
# -- DEBUG --
# tens = transforms.ToTensor()
# save_image(tens(image), f'logs/{idx}_raw.png')
image = self.image_tf(image)
# save_image(image, f'logs/{idx}_tf.png')
# breakpoint()
label = self.labels.iloc[idx]
if self.task == "classification":
label = torch.tensor(label).long()
elif self.task == "regression":
label = torch.tensor(label).float()
return image, label
def __len__(self):
return len(self.indexes)
class GalaxyTestSet(Dataset):
"""Test dataset.
Args:
split (str): "train", "val"
opt (namespace): options from config
Returns (__getitem__):
image (torch.Tensor)
image_id (int)
"""
def __init__(self, opt):
super(GalaxyTestSet, self).__init__()
self.datadir = opt.dataset.dir
if not osp.exists(self.datadir):
raise FileNotFoundError(
"Please download them from "
"https://www.kaggle.com/c/galaxy-zoo-the-galaxy-challenge/data"
)
self.image_dir = osp.join(self.datadir, "images_test_rev1")
image_list = []
for filename in glob.glob(f"{self.image_dir}/*.jpg"):
idx = filename.split("/")[-1][:-4]
image_list.append(idx)
self.indexes = pd.Series(image_list)
image_tf = []
image_tf.extend(
[
transforms.CenterCrop(224),
transforms.ToTensor(),
]
)
self.image_tf = transforms.Compose(image_tf)
def __getitem__(self, idx):
image_id = self.indexes.iloc[idx]
path = osp.join(self.image_dir, f"{image_id}.jpg")
image = pil_loader(path)
image = self.image_tf(image)
return image, image_id
def __len__(self):
return len(self.indexes)
def ImageNet(opt):
traindir = osp.join(opt.dataset.dir, "train")
valdir = osp.join(opt.dataset.dir, "val")
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
train_set = datasets.ImageFolder(
traindir,
transforms.Compose(
[
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]
),
)
test_set = datasets.ImageFolder(
valdir,
transforms.Compose(
[
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
]
),
)
return train_set, test_set
```
#### File: gzoo/infra/options.py
```python
import argparse
import collections
import copy
import inspect
import json
import os
from collections import OrderedDict
import yaml
from yaml import Dumper
from gzoo.infra.utils import merge_dictionaries
class OptionsDict(OrderedDict):
"""Dictionary of options contained in the Options class"""
def __init__(self, *args, **kwargs):
self.__locked = False
super(OptionsDict, self).__init__(*args, **kwargs)
def __getitem__(self, key):
if OrderedDict.__contains__(self, key):
val = OrderedDict.__getitem__(self, key)
elif "." in key:
keys = key.split(".")
val = self[keys[0]]
for k in keys[1:]:
val = val[k]
else:
return OrderedDict.__getitem__(self, key)
return val
def __contains__(self, key):
# Cannot use "key in" due to recursion, reusing rules for dotted keys from __getitem__
try:
self[key]
return True
except KeyError:
return False
def __setitem__(self, key, val):
if key == "_{}__locked".format(type(self).__name__):
OrderedDict.__setitem__(self, key, val)
elif hasattr(self, "_{}__locked".format(type(self).__name__)):
if self.__locked:
raise PermissionError("Options' dictionnary is locked and cannot be changed.")
if type(val) == dict:
val = OptionsDict(val)
OrderedDict.__setitem__(self, key, val)
elif isinstance(key, str) and "." in key:
first_key, other_keys = key.split(".", maxsplit=1)
if first_key not in self:
self[first_key] = OptionsDict({})
self[first_key][other_keys] = val
else:
OrderedDict.__setitem__(self, key, val)
else:
raise PermissionError(
"Tried to access Options' dictionnary bypassing the lock feature."
)
def __getattr__(self, key):
if key in self:
return self[key]
else:
return OrderedDict.__getattr__(self, key)
# def __setattr__(self, key, value):
# self[key] = value
def __repr__(self):
dictrepr = dict.__repr__(self)
return "{}({})".format(type(self).__name__, dictrepr)
def get(self, key, default):
if key in self:
return self[key]
else:
return default
def update(self, *args, **kwargs):
for k, v in OrderedDict(*args, **kwargs).items():
self[k] = v
def asdict(self):
d = {}
for k, v in self.items():
if isinstance(v, dict):
d[k] = dict(v)
else:
d[k] = v
return d
def lock(self):
self.__locked = True
for key, value in self.items():
if type(value) == OptionsDict:
self[key].lock()
def islocked(self):
return self.__locked
def unlock(self):
stack_this = inspect.stack()[1]
stack_caller = inspect.stack()[2]
if (
stack_this.filename != stack_caller.filename
or stack_this.function != stack_caller.function
):
for _i in range(10):
print(
"WARNING: Options unlocked by {}[{}]: {}.".format(
stack_caller.filename, stack_caller.lineno, stack_caller.function
)
)
self.__locked = False
for key, value in self.items():
if type(value) == OptionsDict:
self[key].unlock()
# https://stackoverflow.com/questions/6760685/creating-a-singleton-in-python
class Options(object):
"""Options is a singleton. It parses a yaml file to generate rules to the argument parser.
If a path to a yaml file is not provided, it relies on the `-o/--path_opts` command line argument.
Args:
source(str|dict): path to the yaml file, or dictionary containing options
arguments_callback(func): function to be called after running argparse,
if values need to be preprocessed
lock(bool): if True, Options will be locked and no changes to values authorized
run_parser(bool): if False, argparse will not be executed, and values from options
file will be used as is
Example usage:
.. code-block:: python
# parse the yaml file and create options
Options(path_yaml='bootstrap/options/example.yaml', run_parser=False)
opt = Options() # get the options dictionary from the singleton
print(opt['exp']) # display a dictionary
print(opt['exp.dir']) # display a value
print(opt['exp']['dir']) # display the same value
# the values cannot be changed by command line because run_parser=False
"""
# Attributs
__instance = None # singleton instance of this class
options = None # dictionnary of the singleton
path_yaml = None
class MissingOptionsException(Exception):
pass
class HelpParser(argparse.ArgumentParser):
def error(self, message):
print("\nError: %s\n" % message)
self.print_help()
self.exit()
def exit(self, status=0, message=None):
if status == 0:
raise Options.MissingOptionsException()
super().exit(status, message)
def __new__(cls, source=None, arguments_callback=None, lock=False, run_parser=True):
# Options is a singleton, we will only build if it has not been built before
if not Options.__instance:
Options.__instance = object.__new__(Options)
if source:
cls.source = source
else:
# Parsing only the path_opts argument to find yaml file
optfile_parser = Options.HelpParser(add_help=True)
optfile_parser.add_argument("-o", "--path_opts", type=str, required=False)
cls.source = optfile_parser.parse_known_args()[0].path_opts
options_dict = Options.load_yaml_opts(cls.source)
if run_parser:
fullopt_parser = Options.HelpParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
fullopt_parser.add_argument("-o", "--path_opts", type=str, required=True)
Options.__instance.add_options(fullopt_parser, options_dict)
arguments = fullopt_parser.parse_args()
if arguments_callback:
arguments = arguments_callback(Options.__instance, arguments, options_dict)
Options.__instance.options = OptionsDict()
for argname in vars(arguments):
nametree = argname.split(".")
value = getattr(arguments, argname)
position = Options.__instance.options
for piece in nametree[:-1]:
if piece in position and isinstance(
position[piece], collections.abc.Mapping
):
position = position[piece]
else:
position[piece] = {}
position = position[piece]
position[nametree[-1]] = value
else:
Options.__instance.options = options_dict
if lock:
Options.__instance.lock()
return Options.__instance
def __getitem__(self, key):
""""""
val = self.options[key]
return val
def __setitem__(self, key, val):
self.options[key] = val
def __getattr__(self, key):
if key in self:
return self[key]
else:
return object.__getattr__(self, key)
def __contains__(self, item):
return item in self.options
def __str__(self):
return json.dumps(self.options, indent=2)
def get(self, key, default):
return self.options.get(key, default)
def has_key(self, k):
return k in self.options
def keys(self):
return self.options.keys()
def values(self):
return self.options.values()
def items(self):
return self.options.items()
def add_options(self, parser, options, prefix=""):
if prefix:
prefix += "."
for key, value in options.items():
if isinstance(value, dict):
self.add_options(parser, value, "{}{}".format(prefix, key))
else:
argname = "--{}{}".format(prefix, key)
nargs = "*" if isinstance(value, list) else "?"
if value is None:
datatype = str
elif isinstance(value, bool):
datatype = self.str_to_bool
elif isinstance(value, list):
if len(value) == 0:
datatype = str
else:
datatype = type(value[0])
else:
datatype = type(value)
parser.add_argument(
argname, help="Default: %(default)s", default=value, nargs=nargs, type=datatype
)
def str_to_bool(self, v):
true_strings = ["yes", "true"]
false_strings = ["no", "false"]
if isinstance(v, str):
if v.lower() in true_strings:
return True
elif v.lower() in false_strings:
return False
raise argparse.ArgumentTypeError(
"{} cant be converted to bool (".format(v)
+ "|".join(true_strings + false_strings)
+ " can be)"
)
def save(self, path_yaml):
"""Write options dictionary to a yaml file"""
Options.save_yaml_opts(self.options, path_yaml)
def lock(self):
Options.__instance.options.lock()
def unlock(self):
Options.__instance.options.unlock()
# Static methods
@staticmethod
def load_yaml_opts(source):
"""Load options dictionary from a yaml file"""
result = {}
if isinstance(source, str):
with open(source, "r") as yaml_file:
options_dict = yaml.safe_load(yaml_file)
elif isinstance(source, dict):
options_dict = source
else:
raise TypeError("Unsupported source type: {}".format(type(source)))
includes = options_dict.get("__include__", False)
if includes:
if type(includes) != list:
includes = [includes]
for include in includes:
filename = "{}/{}".format(os.path.dirname(source), include)
if os.path.isfile(filename):
parent = Options.load_yaml_opts(filename)
else:
parent = Options.load_yaml_opts(include)
merge_dictionaries(result, parent)
merge_dictionaries(
result, options_dict
) # to be sure the main options overwrite the parent options
result.pop("__include__", None)
result = OptionsDict(result)
return result
@staticmethod
def save_yaml_opts(opts, path_yaml):
# Warning: copy is not nested
options = copy.copy(opts)
if "path_opts" in options:
del options["path_opts"]
# https://gist.github.com/oglops/c70fb69eef42d40bed06
def dict_representer(dumper, data):
return dumper.represent_dict(data.items())
Dumper.add_representer(OptionsDict, dict_representer)
with open(path_yaml, "w") as yaml_file:
yaml.dump(options, yaml_file, Dumper=Dumper, default_flow_style=False)
```
|
{
"source": "jeremie-koster/product-subscription-yotta-project",
"score": 3
}
|
#### File: src/application/train.py
```python
import pickle
from warnings import simplefilter
from pandas.core.common import SettingWithCopyWarning
from sklearn.model_selection import RandomizedSearchCV, train_test_split
from sklearn.pipeline import Pipeline
from sklearn.ensemble import RandomForestClassifier
import src.config.base as base
import src.config.column_names as col
from src.domain.build_features import feature_engineering_transformer
from src.domain.cleaning import correct_wrong_entries, impute_missing_eco_data, MissingValueTreatment
from src.infrastructure.build_dataset import DataBuilderFactory, DataMerger
# Ignorer les warnings pour améliorer la lisibilité
simplefilter(action='ignore', category=FutureWarning)
simplefilter(action="ignore", category=SettingWithCopyWarning)
def main():
# Builds datasets.
print('Building datasets...')
client_builder = DataBuilderFactory(base.TRAIN_CLIENT_DATA_PATH,
base.config_client_data,
base.ALL_CLIENT_DATA_TRANSLATION)
client_data = client_builder.preprocess_data().data
eco_builder = DataBuilderFactory(base.TRAIN_ECO_DATA_PATH,
base.config_eco_data)
eco_data = eco_builder.preprocess_data().data
print('Preprocessing...')
# Imputes NaN from the eco dataset.
# This step is done outside the pipeline to avoid duplication of NaN while merging.
eco_data = impute_missing_eco_data(eco_data)
# Fixes erroneous entries in client dataset.
client_data = correct_wrong_entries(client_data, base.config_client_data.get('wrong_entries'))
# Merges client and eco datasets.
print('Merging the client and economic datasets together...')
merged = DataMerger(client_data, eco_data, col.MERGER_FIELD)
merged.merge_datasets()
merged_data = merged.joined_datasets
merged_data_X = merged_data.drop(columns=col.TARGET)
merged_data_y = merged_data[col.TARGET]
# Loads pipeline.
class_weight = {0: 1, 1: 9}
pipeline = Pipeline([('imputation', MissingValueTreatment()),
('feature_engineering', feature_engineering_transformer()),
('rf_clf', RandomForestClassifier(class_weight=class_weight))
])
# Splits train and test sets.
print('Splitting train and test...')
merged_data_y = merged_data_y.eq('Yes').astype(int)
X_train, X_test, y_train, y_test = train_test_split(merged_data_X, merged_data_y,
test_size=0.2,
random_state=base.SEED,
stratify=merged_data_y)
pipeline.fit(X_train, y_train)
# Initializes random search.
print('Initializing random search...')
clf = RandomizedSearchCV(estimator=pipeline,
param_distributions=base.RF_PARAM,
scoring='average_precision',
random_state=base.SEED,
cv=5)
# Fits the model.
print('Fitting model...')
clf.fit(X_train, y_train)
# Saves model.
print('Saving model...')
with open(base.SAVED_MODEL_PATH, 'wb') as file:
pickle.dump(clf, file)
if __name__ == '__main__':
main()
```
|
{
"source": "JeremieLGDP/AI-CG1",
"score": 2
}
|
#### File: AI-CG1/ui/csvflight.py
```python
import csv
from os import name, wait
from ssl import ALERT_DESCRIPTION_INTERNAL_ERROR
from sys import dont_write_bytecode
from cflib import crazyflie
import logging
import time
import cflib.crtp
from cflib.crazyflie import Crazyflie
from cflib.crazyflie.syncCrazyflie import SyncCrazyflie
from cflib.utils import uri_helper
from cflib.crazyflie.log import LogConfig
from cflib.crazyflie.syncLogger import SyncLogger
from cflib.crazyflie import Commander
logging.basicConfig(level=logging.ERROR)
import cflib.positioning.position_hl_commander as pc
#URIbase = 'radio://0/27/2M/E7E7E7E7'
def flightplan(filename = "fp1.csv"):
doc = []
with open(filename, newline='') as csvfile:
f = csv.reader(csvfile, delimiter=',', quotechar='|')
for row in f :
r = []
for char in row :
i = float(char)
r.append(i)
doc.append(r)
print(doc)
return doc
def flight(filename = "fp1.csv", channel = '01'):
URIbase = 'radio://0/27/2M/E7E7E7E7'
adress = URIbase + channel
URI = uri_helper.uri_from_env(default=adress)
fp = flightplan(filename)
cflib.crtp.init_drivers()
with SyncCrazyflie(URI, cf=Crazyflie(rw_cache='./cache')) as scf:
mc=pc.PositionHlCommander(scf)
home = mc.get_position
#print("home: " + home)
start=time.time()
end = False
while((time.time()-start<30) & end != True):
mc.take_off()
for row in fp :
mc.go_to(home+row)
end = True
mc.go_to(home + [0,0,0.5])
mc.land()
```
#### File: Scripts/Hands/DroneHandDemo.py
```python
import torch
import joblib
import torch.nn as nn
import numpy as np
import cv2
from cvzone.HandTrackingModule import HandDetector
import torch.nn.functional as F
import time
import CustomModelCNN
# load label binarizer
lb = joblib.load('output/lb.pkl')
model = CustomModelCNN.CustomCNN()
model.load_state_dict(torch.load('output/best.pth'))
print(model)
print('Model loaded')
detector = HandDetector(detectionCon=0.8, maxHands=1)
def hand_area(img, x, y, w, h):
if w<224:
w=224
if h<224:
h=224
w1=max(int(x-w/2),0)
w2=max(int(x+w/2),0)
h1=max(int(y-h/2),0)
h2=max(int(y+h/2),0)
hand = img[w1:w1+224, h1:h1+224]
hand = cv2.resize(hand, (224,224))
return hand
cap = cv2.VideoCapture(0)
if (cap.isOpened() == False):
print('Error while trying to open camera. Plese check again...')
# get the frame width and height
frame_width = int(cap.get(3))
frame_height = int(cap.get(4))
# define codec and create VideoWriter object
out = cv2.VideoWriter('output/asl2.mp4', cv2.VideoWriter_fourcc(*'mp4v'), 30, (frame_width,frame_height))
while(cap.isOpened()):
# capture each frame of the video
ret, frame = cap.read()
hands, frame = detector.findHands(frame)
if hands:
hand1 = hands[0]
centerPoint1 = hand1["center"] #Center of the hand cx,cy
bbox1 = hand1["bbox"] # BBox info: x, y, w, h
handType1 = hand1["type"] # left or right hand
else:
bbox1 = [112,112,224,224]
# get the hand area on the video capture screen
#cv2.rectangle(frame, (100, 100), (324, 324), (20, 34, 255), 2)
hand = hand_area(frame,bbox1[0],bbox1[1],bbox1[2],bbox1[3])
image = hand
image = np.transpose(image, (2, 0, 1)).astype(np.float32)
image = torch.tensor(image, dtype=torch.float)
image = image.unsqueeze(0)
outputs = model(image)
_, preds = torch.max(outputs.data, 1)
cv2.putText(frame, lb.classes_[preds], (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 0, 255), 2)
#plt.imshow(frame, cmap = 'gray', interpolation = 'bicubic')
#plt.xticks([]), plt.yticks([]) # to hide tick values on X and Y axis
#plt.show()
cv2.imshow('image', frame)
#exit()
out.write(frame)
# press `q` to exit
if cv2.waitKey(27) & 0xFF == ord('q'):
break
# release VideoCapture()
cap.release()
# close all frames and video windows
cv2.destroyAllWindows()
```
|
{
"source": "JeremieMelo/ADEPT",
"score": 2
}
|
#### File: ADEPT/core/builder.py
```python
from typing import Tuple
import torch
import torch.nn as nn
from pyutils.config import configs
from pyutils.datasets import get_dataset
from pyutils.loss import AdaptiveLossSoft, KLLossMixed
from pyutils.lr_scheduler.warmup_cosine_restart import CosineAnnealingWarmupRestarts
from pyutils.optimizer.sam import SAM
from pyutils.typing import DataLoader, Optimizer, Scheduler
from torch.nn.modules.batchnorm import _BatchNorm
from torch.nn.modules.conv import _ConvNd
from torch.types import Device
from core.datasets import CIFAR10Dataset, FashionMNISTDataset, MNISTDataset, SVHNDataset
from core.models import *
__all__ = [
"make_dataloader",
"make_model",
"make_weight_optimizer",
"make_arch_optimizer",
"make_optimizer",
"make_scheduler",
"make_criterion",
]
def make_dataloader(name: str = None) -> Tuple[DataLoader, DataLoader]:
name = (name or configs.dataset.name).lower()
if name == "mnist":
train_dataset, validation_dataset, test_dataset = (
MNISTDataset(
root=configs.dataset.root,
split=split,
train_valid_split_ratio=configs.dataset.train_valid_split_ratio,
center_crop=configs.dataset.center_crop,
resize=configs.dataset.img_height,
resize_mode=configs.dataset.resize_mode,
binarize=False,
binarize_threshold=0.273,
digits_of_interest=list(range(10)),
n_test_samples=configs.dataset.n_test_samples,
n_valid_samples=configs.dataset.n_valid_samples,
)
for split in ["train", "valid", "test"]
)
elif name == "fashionmnist":
train_dataset, validation_dataset, test_dataset = (
FashionMNISTDataset(
root=configs.dataset.root,
split=split,
train_valid_split_ratio=configs.dataset.train_valid_split_ratio,
center_crop=configs.dataset.center_crop,
resize=configs.dataset.img_height,
resize_mode=configs.dataset.resize_mode,
binarize=False,
n_test_samples=configs.dataset.n_test_samples,
n_valid_samples=configs.dataset.n_valid_samples,
)
for split in ["train", "valid", "test"]
)
elif name == "cifar10":
train_dataset, validation_dataset, test_dataset = (
CIFAR10Dataset(
root=configs.dataset.root,
split=split,
train_valid_split_ratio=configs.dataset.train_valid_split_ratio,
center_crop=configs.dataset.center_crop,
resize=configs.dataset.img_height,
resize_mode=configs.dataset.resize_mode,
binarize=False,
grayscale=False,
n_test_samples=configs.dataset.n_test_samples,
n_valid_samples=configs.dataset.n_valid_samples,
)
for split in ["train", "valid", "test"]
)
elif name == "svhn":
train_dataset, validation_dataset, test_dataset = (
SVHNDataset(
root=configs.dataset.root,
split=split,
train_valid_split_ratio=configs.dataset.train_valid_split_ratio,
center_crop=configs.dataset.center_crop,
resize=configs.dataset.img_height,
resize_mode=configs.dataset.resize_mode,
binarize=False,
grayscale=False,
n_test_samples=configs.dataset.n_test_samples,
n_valid_samples=configs.dataset.n_valid_samples,
)
for split in ["train", "valid", "test"]
)
else:
train_dataset, test_dataset = get_dataset(
name,
configs.dataset.img_height,
configs.dataset.img_width,
dataset_dir=configs.dataset.root,
transform=configs.dataset.transform,
)
validation_dataset = None
train_loader = torch.utils.data.DataLoader(
dataset=train_dataset,
batch_size=configs.run.batch_size,
shuffle=int(configs.dataset.shuffle),
pin_memory=True,
num_workers=configs.dataset.num_workers,
)
validation_loader = (
torch.utils.data.DataLoader(
dataset=validation_dataset,
batch_size=configs.run.batch_size,
shuffle=False,
pin_memory=True,
num_workers=configs.dataset.num_workers,
)
if validation_dataset is not None
else None
)
test_loader = torch.utils.data.DataLoader(
dataset=test_dataset,
batch_size=configs.run.batch_size,
shuffle=False,
pin_memory=True,
num_workers=configs.dataset.num_workers,
)
return train_loader, validation_loader, test_loader
def make_model(device: Device, random_state: int = None) -> nn.Module:
if "mlp" in configs.model.name.lower():
model = eval(configs.model.name)(
n_feat=configs.dataset.img_height * configs.dataset.img_width,
n_class=configs.dataset.n_class,
hidden_list=configs.model.hidden_list,
block_list=[int(i) for i in configs.model.block_list],
in_bit=configs.quantize.input_bit,
w_bit=configs.quantize.weight_bit,
mode=configs.model.mode,
v_max=configs.quantize.v_max,
v_pi=configs.quantize.v_pi,
act_thres=configs.model.act_thres,
photodetect=False,
bias=False,
device=device,
).to(device)
model.reset_parameters(random_state)
elif "cnn" in configs.model.name.lower():
model = eval(configs.model.name)(
img_height=configs.dataset.img_height,
img_width=configs.dataset.img_width,
in_channels=configs.dataset.in_channels,
num_classes=configs.dataset.num_classes,
kernel_list=configs.model.kernel_list,
kernel_size_list=configs.model.kernel_size_list,
pool_out_size=configs.model.pool_out_size,
stride_list=configs.model.stride_list,
padding_list=configs.model.padding_list,
hidden_list=configs.model.hidden_list,
block_list=[int(i) for i in configs.model.block_list],
in_bit=configs.quantize.input_bit,
w_bit=configs.quantize.weight_bit,
v_max=configs.quantize.v_max,
# v_pi=configs.quantize.v_pi,
act_thres=configs.model.act_thres,
photodetect=configs.model.photodetect,
bias=False,
device=device,
super_layer_name=configs.super_layer.name,
super_layer_config=configs.super_layer.arch,
bn_affine=configs.model.bn_affine,
).to(device)
model.reset_parameters(random_state)
# model.super_layer.set_sample_arch(configs.super_layer.sample_arch)
elif "vgg" in configs.model.name.lower():
model = eval(configs.model.name)(
img_height=configs.dataset.img_height,
img_width=configs.dataset.img_width,
in_channels=configs.dataset.in_channels,
num_classes=configs.dataset.num_classes,
kernel_list=configs.model.kernel_list,
kernel_size_list=configs.model.kernel_size_list,
pool_out_size=configs.model.pool_out_size,
stride_list=configs.model.stride_list,
padding_list=configs.model.padding_list,
hidden_list=configs.model.hidden_list,
block_list=[int(i) for i in configs.model.block_list],
in_bit=configs.quantize.input_bit,
w_bit=configs.quantize.weight_bit,
v_max=configs.quantize.v_max,
# v_pi=configs.quantize.v_pi,
act_thres=configs.model.act_thres,
photodetect=configs.model.photodetect,
bias=False,
device=device,
super_layer_name=configs.super_layer.name,
super_layer_config=configs.super_layer.arch,
bn_affine=configs.model.bn_affine,
).to(device)
model.reset_parameters(random_state)
elif "resnet" in configs.model.name.lower():
model = eval(configs.model.name)(
img_height=configs.dataset.img_height,
img_width=configs.dataset.img_width,
in_channel=configs.dataset.in_channel,
n_class=configs.dataset.n_class,
block_list=[int(i) for i in configs.model.block_list],
in_bit=configs.quantize.input_bit,
w_bit=configs.quantize.weight_bit,
mode=configs.model.mode,
v_max=configs.quantize.v_max,
v_pi=configs.quantize.v_pi,
act_thres=configs.model.act_thres,
photodetect=False,
bias=False,
device=device,
).to(device)
model.reset_parameters(random_state)
else:
model = None
raise NotImplementedError(f"Not supported model name: {configs.model.name}")
return model
def make_weight_optimizer(model: nn.Module, name: str = None) -> Optimizer:
name = (name or configs.weight_optimizer.name).lower()
weight_decay = float(getattr(configs.weight_optimizer, "weight_decay", 0))
bn_weight_decay = float(getattr(configs.weight_optimizer, "bn_weight_decay", 0))
bias_decay = float(getattr(configs.weight_optimizer, "bias_decay", 0))
perm_decay = float(getattr(configs.weight_optimizer, "perm_decay", 0))
dc_decay = float(getattr(configs.weight_optimizer, "dc_decay", 0))
groups = {
str(d): []
for d in set(
[
weight_decay,
bn_weight_decay,
bias_decay,
perm_decay,
dc_decay,
]
)
}
conv_linear = tuple([nn.Linear, _ConvNd] + list(getattr(model, "_conv_linear", [])))
for m in model.modules():
if isinstance(m, conv_linear):
groups[str(weight_decay)].append(m.weight)
if m.bias is not None and m.bias.requires_grad:
groups[str(bias_decay)].append(m.bias)
elif isinstance(m, _BatchNorm) and not bn_weight_decay:
if m.weight is not None and m.weight.requires_grad:
groups[str(bn_weight_decay)].append(m.weight)
if m.bias is not None and m.bias.requires_grad:
groups[str(bn_weight_decay)].append(m.bias)
elif isinstance(m, SuperCRLayer):
if hasattr(m, "weight") and m.weight.requires_grad:
groups[str(perm_decay)].append(m.weight)
elif isinstance(m, SuperDCFrontShareLayer):
if hasattr(m, "weight") and m.weight.requires_grad:
groups[str(dc_decay)].append(m.weight)
selected_params = []
for v in groups.values():
selected_params += v
params_grad = model.weight_params
other_params = list(set(params_grad) - set(selected_params))
groups[
str(weight_decay)
] += other_params # unassigned parameters automatically assigned to weight decay group
assert len(params_grad) == sum(len(p) for p in groups.values())
params = [dict(params=params, weight_decay=float(decay_rate)) for decay_rate, params in groups.items()]
return make_optimizer(params, name, configs.weight_optimizer)
def make_arch_optimizer(model: nn.Module, name: str = None) -> Optimizer:
name = (name or configs.arch_optimizer.name).lower()
theta_decay = float(getattr(configs.arch_optimizer, "weight_decay", 5e-4))
theta = [model.super_layer.sampling_coeff]
params = [
dict(params=theta, weight_decay=theta_decay),
]
return make_optimizer(params, name, configs.arch_optimizer)
def make_optimizer(params, name: str = None, configs=None) -> Optimizer:
if name == "sgd":
optimizer = torch.optim.SGD(
params,
lr=configs.lr,
momentum=configs.momentum,
weight_decay=configs.weight_decay,
nesterov=True,
)
elif name == "adam":
optimizer = torch.optim.Adam(
params,
lr=configs.lr,
weight_decay=configs.weight_decay,
betas=getattr(configs, "betas", (0.9, 0.999)),
)
elif name == "adamw":
optimizer = torch.optim.AdamW(
params,
lr=configs.lr,
weight_decay=configs.weight_decay,
)
elif name == "sam_sgd":
base_optimizer = torch.optim.SGD
optimizer = SAM(
params,
base_optimizer=base_optimizer,
rho=getattr(configs, "rho", 0.5),
adaptive=getattr(configs, "adaptive", True),
lr=configs.lr,
weight_decay=configs.weight_decay,
momenum=0.9,
)
elif name == "sam_adam":
base_optimizer = torch.optim.Adam
optimizer = SAM(
params,
base_optimizer=base_optimizer,
rho=getattr(configs, "rho", 0.001),
adaptive=getattr(configs, "adaptive", True),
lr=configs.lr,
weight_decay=configs.weight_decay,
)
else:
raise NotImplementedError(name)
return optimizer
def make_scheduler(optimizer: Optimizer, name: str = None) -> Scheduler:
name = (name or configs.scheduler.name).lower()
if name == "constant":
scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda epoch: 1)
elif name == "cosine":
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
optimizer, T_max=int(configs.run.n_epochs), eta_min=float(configs.scheduler.lr_min)
)
elif name == "cosine_warmup":
scheduler = CosineAnnealingWarmupRestarts(
optimizer,
first_cycle_steps=configs.run.n_epochs,
max_lr=configs.optimizer.lr,
min_lr=configs.scheduler.lr_min,
warmup_steps=int(configs.scheduler.warmup_steps),
)
elif name == "exp":
scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=configs.scheduler.lr_gamma)
else:
raise NotImplementedError(name)
return scheduler
def make_criterion(name: str = None) -> nn.Module:
name = (name or configs.criterion.name).lower()
if name == "nll":
criterion = nn.NLLLoss()
elif name == "mse":
criterion = nn.MSELoss()
elif name == "mae":
criterion = nn.L1Loss()
elif name == "ce":
criterion = nn.CrossEntropyLoss()
elif name == "adaptive":
criterion = AdaptiveLossSoft(alpha_min=-1.0, alpha_max=1.0)
elif name == "mixed_kl":
criterion = KLLossMixed(
T=getattr(configs.criterion, "T", 3),
alpha=getattr(configs.criterion, "alpha", 0.9),
)
else:
raise NotImplementedError(name)
return criterion
```
#### File: models/layers/super_conv2d.py
```python
import logging
from typing import Dict, Optional, Union
import numpy as np
import torch
import torch.nn.functional as F
from pyutils.compute import get_complex_energy, im2col_2d
from pyutils.quantize import input_quantize_fn
from torch import Tensor
from torch.nn import Parameter, init, Module
from torch.types import Device, _size
__all__ = ["SuperBlockConv2d"]
class SuperBlockConv2d(torch.nn.Module):
"""
description: SVD-based Linear layer. Blocking matrix multiplication.
"""
def __init__(
self,
in_channel: int,
out_channel: int,
kernel_size: int = 3,
mini_block: int = 8,
bias: bool = False,
stride: Union[int, _size] = 1,
padding: Union[int, _size] = 0,
dilation: Union[int, _size] = 1,
groups: int = 1,
mode: str = "weight",
v_max: float = 4.36, # 0-pi for clements, # 6.166 is v_2pi, 0-2pi for reck
v_pi: float = 4.36,
w_bit: int = 16,
in_bit: int = 16,
photodetect: bool = False,
super_layer: Module = None,
device: Device = torch.device("cuda"),
) -> None:
super(SuperBlockConv2d, self).__init__()
self.in_channel = in_channel
self.out_channel = out_channel
self.kernel_size = kernel_size
self.mini_block = mini_block
self.stride = stride
self.padding = padding
self.dilation = dilation
self.groups = groups
self.super_layer = super_layer
self.super_ps_layers = None
self.mode = mode
self.v_max = v_max
self.v_pi = v_pi
self.gamma = np.pi / self.v_pi ** 2
self.w_bit = w_bit
self.in_bit = in_bit
self.photodetect = photodetect
self.device = device
# build parameters
self.build_parameters()
# quantization tool
self.input_quantizer = input_quantize_fn(self.in_bit, device=self.device)
# default set to slow forward
self.disable_fast_forward()
# default set no phase noise
self.set_phase_variation(0)
# default set no gamma noise
self.set_gamma_noise(0)
# default set no crosstalk
self.set_crosstalk_factor(0)
# zero pad for input
self.x_zero_pad = None
if bias:
self.bias = Parameter(torch.Tensor(out_channel).to(self.device))
else:
self.register_parameter("bias", None)
self.reset_parameters()
def build_parameters(self) -> None:
### TODO, construct balanced weight
mini_block = self.mini_block
n = self.in_channel * self.kernel_size ** 2
if self.in_channel % 2 == 0: # even channel
self.grid_dim_x = 2 * int(np.ceil(n / 2 / mini_block))
else: # odd channel, mostly in the first conv layer
self.grid_dim_x = int(np.ceil((n // 2) / mini_block)) + int(np.ceil((n - n // 2) / mini_block))
self.grid_dim_y = int(np.ceil(self.out_channel / mini_block))
self.in_channel_pad = self.grid_dim_x * mini_block
self.out_channel_pad = self.grid_dim_y * mini_block
self.weight = Parameter(
torch.empty(
self.grid_dim_y, self.grid_dim_x, self.mini_block, dtype=torch.cfloat, device=self.device
)
)
self.eye = torch.eye(self.mini_block, self.mini_block, dtype=torch.cfloat, device=self.device)
self.U = self.V = self.eye
def reset_parameters(self) -> None:
temp = torch.empty(self.grid_dim_y*self.mini_block, self.grid_dim_x*self.mini_block, device=self.device)
init.kaiming_normal_(temp)
temp = temp.view(self.grid_dim_y, self.mini_block, self.grid_dim_x, self.mini_block).permute(0,2,1,3)
_, s, _ = torch.svd(temp, compute_uv=False)
self.weight.data.copy_(s)
if self.bias is not None:
init.uniform_(self.bias, 0, 0)
def set_super_layer_transfer_matrices(self, U: Tensor, V: Tensor) -> None:
self.U = U
self.V = V
def build_weight(self) -> Tensor:
# [k,k] -> [k,k]
# [p, q, k, 1] * [1, 1, k, k] complex = [p, q, k, k] complex
weight = self.super_layer.get_weight_matrix(self.super_ps_layers, self.weight)
weight = weight.permute(0, 2, 1, 3).reshape(self.out_channel_pad, self.in_channel_pad)[
: self.out_channel, : self.in_channel * self.kernel_size ** 2
]
return weight
def enable_fast_forward(self) -> None:
self.fast_forward_flag = True
def disable_fast_forward(self) -> None:
self.fast_forward_flag = False
def set_phase_variation(self, noise_std: float, random_state: Optional[int] = None) -> None:
self.phase_noise_std = noise_std
def set_crosstalk_factor(self, crosstalk_factor: float) -> None:
self.crosstalk_factor = crosstalk_factor
def set_gamma_noise(self, noise_std: float = 0, random_state: Optional[int] = None) -> None:
self.gamma_noise_std = noise_std
def set_weight_bitwidth(self, w_bit: int) -> None:
self.w_bit = w_bit
# self.phase_U_quantizer.set_bitwidth(w_bit)
# self.phase_S_quantizer.set_bitwidth(w_bit)
# self.phase_V_quantizer.set_bitwidth(w_bit)
def load_parameters(self, param_dict: Dict) -> None:
"""
description: update parameters based on this parameter dictionary\\
param param_dict {dict of dict} {layer_name: {param_name: param_tensor, ...}, ...}
"""
for name, param in param_dict.items():
getattr(self, name).data.copy_(param)
if self.mode == "phase":
self.build_weight(update_list=param_dict)
def switch_mode_to(self, mode: str) -> None:
self.mode = mode
def get_power(self, mixtraining_mask: Optional[Tensor] = None) -> float:
masks = (
mixtraining_mask
if mixtraining_mask is not None
else (self.mixedtraining_mask if self.mixedtraining_mask is not None else None)
)
if masks is not None:
power = ((self.phase_U.data * masks["phase_U"]) % (2 * np.pi)).sum()
power += ((self.phase_S.data * masks["phase_S"]) % (2 * np.pi)).sum()
power += ((self.phase_V.data * masks["phase_V"]) % (2 * np.pi)).sum()
else:
power = ((self.phase_U.data) % (2 * np.pi)).sum()
power += ((self.phase_S.data) % (2 * np.pi)).sum()
power += ((self.phase_V.data) % (2 * np.pi)).sum()
return power.item()
def get_output_dim(self, img_height, img_width):
h_out = (img_height - self.kernel_size + 2 * self.padding) / self.stride + 1
w_out = (img_width - self.kernel_size + 2 * self.padding) / self.stride + 1
return int(h_out), int(w_out)
def forward(self, x: Tensor) -> Tensor:
if self.in_bit < 16:
x = self.input_quantizer(x)
if not self.fast_forward_flag or self.weight is None:
weight = self.build_weight() # [p, q, k, k] or u, s, v
else:
weight = self.weight
_, x, h_out, w_out = im2col_2d(
W=None,
X=x,
stride=self.stride,
padding=self.padding,
w_size=(self.out_channel, self.in_channel, self.kernel_size, self.kernel_size),
)
# inc_pos = int(np.ceil(self.grid_dim_x / 2) * self.mini_block)
inc_pos = int(np.ceil(weight.size(1) / 2))
x = x.to(torch.complex64)
x_pos = weight[:, :inc_pos].matmul(x[:inc_pos]) # [outc, h*w*bs]
x_neg = weight[:, inc_pos:].matmul(x[inc_pos:]) # [outc, h*w*bs]
if self.photodetect:
x = get_complex_energy(torch.view_as_real(x_pos)) - get_complex_energy(torch.view_as_real(x_neg))
else:
x = x_pos - x_neg
out = x.view(self.out_channel, h_out, w_out, -1).permute(3, 0, 1, 2)
# out_real = F.conv2d(
# x,
# weight.real,
# bias=None,
# stride=self.stride,
# padding=self.padding,
# dilation=self.dilation,
# groups=self.groups,
# )
# out_imag = F.conv2d(
# x,
# weight.imag,
# bias=None,
# stride=self.stride,
# padding=self.padding,
# dilation=self.dilation,
# groups=self.groups,
# )
# out = torch.complex(out_real, out_imag)
# if self.photodetect:
# out = get_complex_energy(out)
if self.bias is not None:
out = out + self.bias.unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
return out
```
#### File: models/layers/super_linear.py
```python
import logging
from typing import Dict, Optional
import numpy as np
import torch
from torch.nn import Module
import torch.nn.functional as F
from pyutils.compute import get_complex_energy
from pyutils.quantize import input_quantize_fn
from torch import Tensor
from torch.nn import Parameter, init
from torch.types import Device
__all__ = ["SuperBlockLinear"]
class SuperBlockLinear(torch.nn.Module):
"""
description: SVD-based Linear layer. Blocking matrix multiplication.
"""
def __init__(
self,
in_channel: int,
out_channel: int,
mini_block: int = 8,
bias: bool = False,
mode: str = "weight",
v_max: float = 4.36, # 0-pi for clements, # 6.166 is v_2pi, 0-2pi for reck
v_pi: float = 4.36,
w_bit: int = 16,
in_bit: int = 16,
photodetect: bool = False,
super_layer: Module = None,
device: Device = torch.device("cuda"),
) -> None:
super(SuperBlockLinear, self).__init__()
self.in_channel = in_channel
self.out_channel = out_channel
self.mini_block = mini_block
self.grid_dim_x = int(np.ceil(self.in_channel / mini_block))
self.grid_dim_y = int(np.ceil(self.out_channel / mini_block))
self.in_channel_pad = self.grid_dim_x * mini_block
self.out_channel_pad = self.grid_dim_y * mini_block
self.mode = mode
self.super_layer = super_layer
self.super_ps_layers = None
self.v_max = v_max
self.v_pi = v_pi
self.gamma = np.pi / self.v_pi ** 2
self.w_bit = w_bit
self.in_bit = in_bit
self.photodetect = photodetect
self.device = device
# build parameters
self.build_parameters()
# quantization tool
self.input_quantizer = input_quantize_fn(self.in_bit, device=self.device)
# default set to slow forward
self.disable_fast_forward()
# default set no phase noise
self.set_phase_variation(0)
# default set no gamma noise
self.set_gamma_noise(0)
# default set no crosstalk
self.set_crosstalk_factor(0)
# zero pad for input
self.x_zero_pad = None
if bias:
self.bias = Parameter(torch.Tensor(out_channel).to(self.device))
else:
self.register_parameter("bias", None)
self.reset_parameters()
def build_parameters(self, mode: str = "weight") -> None:
self.weight = Parameter(
torch.empty(
self.grid_dim_y, self.grid_dim_x, self.mini_block, dtype=torch.cfloat, device=self.device
)
)
self.eye = torch.eye(self.mini_block, self.mini_block, dtype=torch.cfloat, device=self.device)
self.U = self.V = self.eye
def reset_parameters(self) -> None:
temp = torch.empty(self.grid_dim_y*self.mini_block, self.grid_dim_x*self.mini_block, device=self.device)
init.kaiming_normal_(temp)
temp = temp.view(self.grid_dim_y, self.mini_block, self.grid_dim_x, self.mini_block).permute(0,2,1,3)
_, s, _ = torch.svd(temp, compute_uv=False)
self.weight.data.copy_(s)
if self.bias is not None:
init.uniform_(self.bias, 0, 0)
def set_super_layer_transfer_matrices(self, U: Tensor, V: Tensor) -> None:
self.U = U
self.V = V
def build_weight(self) -> Tensor:
# [k,k] -> [k,k]
weight = self.super_layer.get_weight_matrix(self.super_ps_layers, self.weight)
weight = weight.permute(0, 2, 1, 3).reshape(self.out_channel_pad, self.in_channel_pad)[
: self.out_channel, : self.in_channel
]
return weight
def enable_fast_forward(self) -> None:
self.fast_forward_flag = True
def disable_fast_forward(self) -> None:
self.fast_forward_flag = False
def set_phase_variation(self, noise_std: float, random_state: Optional[int] = None) -> None:
self.phase_noise_std = noise_std
def set_crosstalk_factor(self, crosstalk_factor: float) -> None:
self.crosstalk_factor = crosstalk_factor
def set_gamma_noise(self, noise_std: float = 0, random_state: Optional[int] = None) -> None:
self.gamma_noise_std = noise_std
def set_weight_bitwidth(self, w_bit: int) -> None:
self.w_bit = w_bit
def load_parameters(self, param_dict: Dict) -> None:
"""
description: update parameters based on this parameter dictionary\\
param param_dict {dict of dict} {layer_name: {param_name: param_tensor, ...}, ...}
"""
for name, param in param_dict.items():
getattr(self, name).data.copy_(param)
if self.mode == "phase":
self.build_weight(update_list=param_dict)
def switch_mode_to(self, mode: str) -> None:
self.mode = mode
def get_power(self, mixtraining_mask: Optional[Tensor] = None) -> float:
masks = (
mixtraining_mask
if mixtraining_mask is not None
else (self.mixedtraining_mask if self.mixedtraining_mask is not None else None)
)
if masks is not None:
power = ((self.phase_U.data * masks["phase_U"]) % (2 * np.pi)).sum()
power += ((self.phase_S.data * masks["phase_S"]) % (2 * np.pi)).sum()
power += ((self.phase_V.data * masks["phase_V"]) % (2 * np.pi)).sum()
else:
power = ((self.phase_U.data) % (2 * np.pi)).sum()
power += ((self.phase_S.data) % (2 * np.pi)).sum()
power += ((self.phase_V.data) % (2 * np.pi)).sum()
return power.item()
def forward(self, x: Tensor) -> Tensor:
if self.in_bit < 16:
x = self.input_quantizer(x)
if not self.fast_forward_flag or self.weight is None:
weight = self.build_weight() # [p, q, k, k] or u, s, v
else:
weight = self.weight
inc_pos = int(np.ceil(weight.size(1)/2))
weight = weight.t()
x = x.to(torch.complex64)
x_pos = x[..., :inc_pos].matmul(weight[:inc_pos, :]) # [bs, outc]
x_neg = x[..., inc_pos:].matmul(weight[inc_pos:, :]) # [outc, bs]
if self.photodetect:
out = get_complex_energy(torch.view_as_real(x_pos)) - get_complex_energy(
torch.view_as_real(x_neg)
)
else:
out = x_pos - x_neg
if self.bias is not None:
out = out + self.bias.unsqueeze(0)
return out
```
#### File: JeremieMelo/ADEPT/train.py
```python
import argparse
import os
from typing import Callable, Iterable
import mlflow
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from pyutils.config import configs
from pyutils.general import logger as lg
from pyutils.torch_train import (
BestKModelSaver,
count_parameters,
get_learning_rate,
load_model,
set_torch_deterministic,
)
from pyutils.typing import Criterion, DataLoader, Optimizer, Scheduler
from core import builder
from core.models.layers.super_utils import ArchSampler, get_named_sample_arch
from core.models.layers.utils import clip_grad_value_
def legalize_perm(model, area_loss_func: Callable):
"""Stochastic permutation legalization (SPL)
Args:
model (_type_): _description_
area_loss_func (Callable): _description_
"""
from core.models.layers import SuperCRLayer
optimizer = torch.optim.Adam(
[m.weight for m in model.super_layer.super_layers_all if isinstance(m, SuperCRLayer)], lr=1e-3
) # max_lambda = 200
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=2000, eta_min=2e-4)
lg.info(f"Force to legalize permutation")
for step in range(3000):
model.build_arch_mask(mode="gumbel_soft")
optimizer.zero_grad()
alm_perm_loss = model.get_alm_perm_loss(rho=1e-7)
area_loss = area_loss_func()
cross_density_loss = model.get_crossing_density_loss(margin=0.8)
loss = alm_perm_loss + area_loss + 1 * cross_density_loss
loss.backward()
optimizer.step()
scheduler.step()
model.update_alm_multiplier(rho=1e-7)
with torch.no_grad():
if step % 200 == 0:
legal = model.check_perm()
perm_loss = model.get_perm_loss()
num_cr = model.get_num_crossings()
lg.info(
f"Step: {step}, Perm Loss: {perm_loss}, Perm legality: {legal}, Area Loss: {area_loss.data.item()}, Area: {model.area.item()}, CR Density Loss: {cross_density_loss.data.item()}, #CR: {num_cr}"
)
legal = model.check_perm()
lg.info(f"Legalize permutation...")
model.sinkhorn_perm(n_step=200, t_min=0.005, noise_std=0.01, svd=True, legal_mask=legal)
legal = model.check_perm()
lg.info(f"Final perm legality: {legal}...")
if all(legal):
lg.info("All permutations are legal")
else:
lg.info("Not all permutations are legal!")
def train(
model: nn.Module,
train_loader: DataLoader,
weight_optimizer: Optimizer,
arch_optimizer: Optimizer,
scheduler: Scheduler,
epoch: int,
criterion: Criterion,
device: torch.device,
teacher: nn.Module = None,
) -> None:
model.train()
step = epoch * len(train_loader)
correct = 0
init_T = float(getattr(configs.super_layer, "init_gumbel_temperature", 5))
gamma_T = float(getattr(configs.super_layer, "gumbel_decay_rate", 0.956))
ps_weight = float(getattr(configs.super_layer.arch.device_cost, "ps_weight", 1))
dc_weight = float(getattr(configs.super_layer.arch.device_cost, "dc_weight", 1))
cr_weight = float(getattr(configs.super_layer.arch.device_cost, "cr_weight", 1))
area_upper_bound = float(getattr(configs.super_layer.arch.device_cost, "area_upper_bound", 100))
area_lower_bound = float(getattr(configs.super_layer.arch.device_cost, "area_lower_bound", 80))
first_active_block = bool(getattr(configs.super_layer.arch.device_cost, "first_active_block", 1))
area_loss_rho = float(getattr(configs.criterion, "area_loss_rho", 0))
cross_density_loss_rho = float(getattr(configs.criterion, "cross_density_loss_rho", 0))
perm_loss_rho = float(getattr(configs.criterion, "perm_loss_rho", 0))
perm_loss_rho_gamma = float(getattr(configs.criterion, "perm_loss_rho_gamma", 1))
max_lambda = float(getattr(configs.criterion, "max_lambda", 1))
force_perm_legal_epoch = int(getattr(configs.run, "force_perm_legal_epoch", 60))
train_arch_epoch = int(getattr(configs.run, "train_arch_epoch", 10))
train_arch_interval = int(getattr(configs.run, "train_arch_interval", 3))
phase_noise_std = float(getattr(configs.noise, "phase_noise_std", 0))
dc_noise_std = float(getattr(configs.noise, "dc_noise_std", 0))
model.set_phase_noise(phase_noise_std)
model.set_dc_noise(dc_noise_std)
if epoch >= train_arch_epoch:
perm_loss_rho = perm_loss_rho * perm_loss_rho_gamma ** (epoch - train_arch_epoch)
lg.info(f"Permutation ALM Rho: {perm_loss_rho}")
# set gumbel softmax temperature
T = init_T * gamma_T ** (epoch - 1)
model.set_gumbel_temperature(T)
lg.info(f"Gumbel temperature: {T:.4f}/{init_T}")
arch_mask_mode = getattr(configs.super_layer, "arch_mask_mode", "gumbel_soft")
train_arch_flag = False
model.enable_weight_params()
if epoch == force_perm_legal_epoch:
legalize_perm(
model,
lambda x=None: area_loss_rho
* model.get_area_bound_loss(
ps_weight,
dc_weight,
cr_weight,
area_upper_bound,
area_lower_bound,
first_active_block=first_active_block,
),
)
for batch_idx, (data, target) in enumerate(train_loader):
data = data.to(device, non_blocking=True)
target = target.to(device, non_blocking=True)
if epoch >= train_arch_epoch and batch_idx % train_arch_interval == (train_arch_interval - 1):
model.enable_arch_params()
model.freeze_weight_params()
arch_optimizer.zero_grad()
train_arch_flag = True
else:
model.enable_weight_params()
model.freeze_arch_params()
weight_optimizer.zero_grad()
train_arch_flag = False
def _get_loss(output, target):
if teacher:
with torch.no_grad():
teacher_score = teacher(data).detach()
loss = criterion(output, teacher_score)
else:
loss = criterion(output, target)
return loss
# sample random subnet
model.build_arch_mask(mode=arch_mask_mode)
output = model(data)
pred = output.data.max(1)[1]
correct += pred.eq(target.data).cpu().sum()
loss = _get_loss(output, target)
class_loss = loss
if epoch >= train_arch_epoch and area_loss_rho > 0: # no area penalty in warmup
area_loss = model.get_area_bound_loss(
ps_weight,
dc_weight,
cr_weight,
area_upper_bound,
area_lower_bound,
first_active_block=first_active_block,
)
loss = loss + area_loss_rho * area_loss
else:
area_loss = torch.zeros(1)
# if train_arch_flag and perm_loss_rho > 0: # only train permutation in arch opt phase
if (
epoch >= train_arch_epoch and not train_arch_flag and perm_loss_rho > 0
): # only train permutation in weight opt phase; no constraints in warmup
alm_perm_loss = model.get_alm_perm_loss(rho=perm_loss_rho)
loss = loss + alm_perm_loss
with torch.no_grad():
perm_loss = model.get_perm_loss()
if cross_density_loss_rho > 0 and not train_arch_flag:
cross_density_loss = model.get_crossing_density_loss(margin=0.95)
loss = loss + cross_density_loss_rho * cross_density_loss
else:
cross_density_loss = torch.zeros(1)
loss.backward()
if train_arch_flag:
arch_optimizer.step()
else:
weight_optimizer.step()
# update permutation ALM multiplier
if epoch >= train_arch_epoch and not train_arch_flag and perm_loss_rho > 0:
model.update_alm_multiplier(perm_loss_rho, max_lambda=max_lambda)
step += 1
if batch_idx % int(configs.run.log_interval) == 0:
lg.info(
"Train Epoch: {} ({}) [{:7d}/{:7d} ({:3.0f}%)] Loss: {:.4f} Class Loss: {:.4f} Perm Loss: {} Perm ALM: {} Area Loss: {:.4f} Area ALM: {:.4f} Area Aux: {:.4f} Area: {:.2f} CR_D_Loss: {:.4f} N_CR: {} N_DC: {} Theta\n{}".format(
epoch,
"train_arch" if train_arch_flag else "train_weight",
batch_idx * len(data),
len(train_loader.dataset),
100.0 * batch_idx / len(train_loader),
loss.data.item(),
class_loss.data.item(),
perm_loss,
model.get_alm_multiplier(),
area_loss.item(),
model.get_area_multiplier().item(),
model.area_aux_variable.data.item(),
model.area.data,
cross_density_loss.item(),
model.get_num_crossings(),
model.get_num_dc(),
model.super_layer.sampling_coeff.data,
)
)
lg.info(f"arch_mask:\n{model.super_layer.arch_mask.data}")
lg.info(f"Check permutation legality: {model.check_perm()}")
mlflow.log_metrics({"train_loss": loss.item()}, step=step)
scheduler.step()
accuracy = 100.0 * correct.float() / len(train_loader.dataset)
lg.info(f"Train Accuracy: {correct}/{len(train_loader.dataset)} ({accuracy:.2f})%")
mlflow.log_metrics({"train_acc": accuracy.item(), "lr": get_learning_rate(weight_optimizer)}, step=epoch)
def validate(
model: nn.Module,
validation_loader: DataLoader,
epoch: int,
criterion: Criterion,
loss_vector: Iterable,
accuracy_vector: Iterable,
device: torch.device,
) -> None:
model.eval()
val_loss, correct = 0, 0
with torch.no_grad():
for data, target in validation_loader:
data = data.to(device, non_blocking=True)
target = target.to(device, non_blocking=True)
output = model(data)
val_loss += criterion(output, target).data.item()
pred = output.data.max(1)[1]
correct += pred.eq(target.data).cpu().sum()
val_loss /= len(validation_loader)
loss_vector.append(val_loss)
accuracy = 100.0 * correct.float() / len(validation_loader.dataset)
accuracy_vector.append(accuracy)
lg.info(
"\nValidation set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)\n".format(
val_loss, correct, len(validation_loader.dataset), accuracy
)
)
mlflow.log_metrics({"val_acc": accuracy.data.item(), "val_loss": val_loss}, step=epoch)
def test(
model: nn.Module,
test_loader: DataLoader,
epoch: int,
criterion: Criterion,
loss_vector: Iterable,
accuracy_vector: Iterable,
device: torch.device,
) -> None:
model.eval()
val_loss, correct = 0, 0
with torch.no_grad():
for data, target in test_loader:
data = data.to(device, non_blocking=True)
target = target.to(device, non_blocking=True)
output = model(data)
val_loss += criterion(output, target).data.item()
pred = output.data.max(1)[1]
correct += pred.eq(target.data).cpu().sum()
val_loss /= len(test_loader)
loss_vector.append(val_loss)
accuracy = 100.0 * correct.float() / len(test_loader.dataset)
accuracy_vector.append(accuracy)
lg.info(
"\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)\n".format(
val_loss, correct, len(test_loader.dataset), accuracy
)
)
mlflow.log_metrics({"test_acc": accuracy.data.item(), "test_loss": val_loss}, step=epoch)
def main() -> None:
parser = argparse.ArgumentParser()
parser.add_argument("config", metavar="FILE", help="config file")
# parser.add_argument('--run-dir', metavar='DIR', help='run directory')
# parser.add_argument('--pdb', action='store_true', help='pdb')
args, opts = parser.parse_known_args()
configs.load(args.config, recursive=True)
configs.update(opts)
if torch.cuda.is_available() and int(configs.run.use_cuda):
torch.cuda.set_device(configs.run.gpu_id)
device = torch.device("cuda:" + str(configs.run.gpu_id))
torch.backends.cudnn.benchmark = True
else:
device = torch.device("cpu")
torch.backends.cudnn.benchmark = False
if int(configs.run.deterministic) == True:
set_torch_deterministic()
model = builder.make_model(
device, int(configs.run.random_state) if int(configs.run.deterministic) else None
)
model.partition_parameters(arch_param_list=["theta"])
train_loader, validation_loader, test_loader = builder.make_dataloader()
weight_optimizer = builder.make_weight_optimizer(model)
arch_optimizer = builder.make_arch_optimizer(model)
scheduler = builder.make_scheduler(weight_optimizer)
criterion = builder.make_criterion().to(device)
saver = BestKModelSaver(k=int(configs.checkpoint.save_best_model_k))
lg.info(f"Number of parameters: {count_parameters(model)}")
model_name = f"{configs.model.name}"
checkpoint = (
f"./checkpoint/{configs.checkpoint.checkpoint_dir}/{model_name}_{configs.checkpoint.model_comment}.pt"
)
lg.info(f"Current checkpoint: {checkpoint}")
mlflow.set_experiment(configs.run.experiment)
experiment = mlflow.get_experiment_by_name(configs.run.experiment)
# run_id_prefix = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
mlflow.start_run(run_name=model_name)
mlflow.log_params(
{
"exp_name": configs.run.experiment,
"exp_id": experiment.experiment_id,
"run_id": mlflow.active_run().info.run_id,
"inbit": configs.quantize.input_bit,
"wbit": configs.quantize.weight_bit,
"init_weight_lr": configs.weight_optimizer.lr,
"init_arch_lr": configs.arch_optimizer.lr,
"checkpoint": checkpoint,
"restore_checkpoint": configs.checkpoint.restore_checkpoint,
"pid": os.getpid(),
}
)
lossv, accv = [0], [0]
epoch = 0
try:
lg.info(
f"Experiment {configs.run.experiment} ({experiment.experiment_id}) starts. Run ID: ({mlflow.active_run().info.run_id}). PID: ({os.getpid()}). PPID: ({os.getppid()}). Host: ({os.uname()[1]})"
)
lg.info(configs)
solution, score = None, None
if int(configs.checkpoint.resume):
load_model(
model,
configs.checkpoint.restore_checkpoint,
ignore_size_mismatch=int(configs.checkpoint.no_linear),
)
lg.info("Validate resumed model...")
validate(
model,
validation_loader,
0,
criterion,
lossv,
accv,
device=device,
)
state_dict = torch.load(configs.checkpoint.restore_checkpoint)
state_dict = state_dict.get("state_dict", state_dict)
if "solution" in state_dict.keys():
solution = state_dict["solution"]
lg.info(f"Loading the solution {solution}")
lg.info(f"Original score: {state_dict['score']}")
model.set_sample_arch(solution["arch"])
if configs.teacher.name and configs.teacher.checkpoint:
lg.info(f"Build teacher model {configs.teacher.name}")
teacher = builder.make_model(
device,
int(configs.run.random_state) if int(configs.run.deterministic) else None,
model_name=configs.teacher.name,
)
load_model(teacher, path=configs.teacher.checkpoint)
teacher_criterion = builder.make_criterion(name="ce").to(device)
teacher.eval()
lg.info(f"Validate teacher model {configs.teacher.name}")
validate(teacher, validation_loader, -1, teacher_criterion, [], [], False, device)
else:
teacher = None
arch_sampler = ArchSampler(
model=model,
strategy=configs.super_layer.sampler.strategy.dict(),
n_layers_per_block=configs.super_layer.arch.n_layers_per_block,
)
arch_sampler.set_total_steps(configs.run.n_epochs * len(train_loader))
sample_arch = get_named_sample_arch(model.arch_space, name="largest")
model.set_sample_arch(sample_arch)
for epoch in range(1, int(configs.run.n_epochs) + 1):
train(
model,
train_loader,
weight_optimizer,
arch_optimizer,
scheduler,
epoch,
criterion,
device,
teacher=teacher,
)
if epoch > int(configs.run.n_epochs) - 10: # validate and store in the last 10 epochs
lg.info(f"Validating...")
lossv_cur, accv_cur = [], []
for _ in range(5):
validate(
model,
validation_loader,
epoch,
teacher_criterion if teacher else criterion,
lossv_cur,
accv_cur,
device=device,
)
avg_acc, std_acc = np.mean(accv_cur), np.std(accv_cur)
accv.append(avg_acc)
lg.info(f"Validation: average acc: {avg_acc}, std acc: {std_acc}")
lg.info(f"Test...")
test(
model,
test_loader,
epoch,
teacher_criterion if teacher else criterion,
[],
[],
device=device,
)
saver.save_model(
model, accv[-1], epoch=epoch, path=checkpoint, save_model=False, print_msg=True
)
except KeyboardInterrupt:
lg.warning("Ctrl-C Stopped")
if __name__ == "__main__":
main()
```
|
{
"source": "JeremieMelo/dct_cuda",
"score": 2
}
|
#### File: src/dct/dct_unitest.py
```python
import os
import sys
import numpy as np
import unittest
import torch
from torch.autograd import Function, Variable
import time
import scipy
from scipy import fftpack
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from src import dct
from src import dct_lee
#from src import dct_lee as dct
from src import discrete_spectral_transform
sys.path.pop()
import pdb
dtype = torch.float32
class DCTOpTest(unittest.TestCase):
def test_dctRandom(self):
N = 4
x = torch.empty(N, N, dtype=dtype).uniform_(0, 10.0)
#x = Variable(torch.tensor([[1, 2, 7, 9, 20, 31], [4, 5, 9, 2, 1, 6]], dtype=dtype))
golden_value = discrete_spectral_transform.dct_2N(x).data.numpy()
print("golden_value")
print(golden_value)
# test cpu using N-FFT
#pdb.set_trace()
custom = dct.DCT(algorithm='N')
dct_value = custom.forward(x)
print("dct_value")
print(dct_value.data.numpy())
np.testing.assert_allclose(dct_value.data.numpy(), golden_value, rtol=1e-6, atol=1e-5)
# test cpu using 2N-FFT
#pdb.set_trace()
custom = dct.DCT(algorithm='2N')
dct_value = custom.forward(x)
print("dct_value")
print(dct_value.data.numpy())
np.testing.assert_allclose(dct_value.data.numpy(), golden_value, rtol=1e-6, atol=1e-5)
# test gpu
custom = dct.DCT(algorithm='N')
dct_value = custom.forward(x.cuda()).cpu()
print("dct_value cuda")
print(dct_value.data.numpy())
np.testing.assert_allclose(dct_value.data.numpy(), golden_value, rtol=1e-6, atol=1e-5)
# test gpu
custom = dct.DCT(algorithm='2N')
dct_value = custom.forward(x.cuda()).cpu()
print("dct_value cuda")
print(dct_value.data.numpy())
np.testing.assert_allclose(dct_value.data.numpy(), golden_value, rtol=1e-6, atol=1e-5)
#golden_value = discrete_spectral_transform.dct2_2N(x).data.numpy()
#print("2D golden_value")
#print(golden_value)
#custom = dct.DCT()
#dct2_value = custom.forward(dct_value.cuda().t().contiguous()).cpu()
#dct2_value = dct2_value.t().contiguous()
#print("dct2_value cuda")
#print(dct2_value.data.numpy())
#np.testing.assert_allclose(dct2_value.data.numpy(), golden_value)
def test_idctRandom(self):
N = 4
x = torch.empty(N, N, dtype=dtype).uniform_(0, 10.0)
#x = Variable(torch.tensor([[1, 2, 7, 9, 20, 31], [4, 5, 9, 2, 1, 6]], dtype=dtype))
print("x")
print(x)
y = discrete_spectral_transform.dct_N(x)
print("y")
print(y.data.numpy())
golden_value = discrete_spectral_transform.idct_2N(y).data.numpy()
print("2D golden_value")
print(golden_value)
# test cpu use N-FFT
#pdb.set_trace()
custom = dct.IDCT(algorithm='N')
dct_value = custom.forward(y)
print("idct_value")
print(dct_value.data.numpy())
np.testing.assert_allclose(dct_value.data.numpy(), golden_value, rtol=1e-5)
# test cpu use 2N-FFT
#pdb.set_trace()
custom = dct.IDCT(algorithm='2N')
dct_value = custom.forward(y)
print("idct_value")
print(dct_value.data.numpy())
np.testing.assert_allclose(dct_value.data.numpy(), golden_value, rtol=1e-5)
# test gpu
custom = dct.IDCT(algorithm='N')
dct_value = custom.forward(y.cuda()).cpu()
print("idct_value cuda")
print(dct_value.data.numpy())
np.testing.assert_allclose(dct_value.data.numpy(), golden_value, rtol=1e-5)
# test gpu
custom = dct.IDCT(algorithm='2N')
dct_value = custom.forward(y.cuda()).cpu()
print("idct_value cuda")
print(dct_value.data.numpy())
np.testing.assert_allclose(dct_value.data.numpy(), golden_value, rtol=1e-5)
def test_dct2Random(self):
torch.manual_seed(10)
M = 4
N = 8
x = torch.empty(M, N, dtype=dtype).uniform_(0, 10.0)
golden_value = discrete_spectral_transform.dct2_N(x).data.numpy()
print("2D golden_value")
print(golden_value)
# test cpu using N-FFT
#pdb.set_trace()
custom = dct.DCT2(algorithm='N')
dct_value = custom.forward(x)
print("2D dct_value")
print(dct_value.data.numpy())
np.testing.assert_allclose(dct_value.data.numpy(), golden_value, rtol=1e-6, atol=1e-5)
# test cpu using 2N-FFT
#pdb.set_trace()
custom = dct.DCT2(algorithm='2N')
dct_value = custom.forward(x)
print("2D dct_value")
print(dct_value.data.numpy())
np.testing.assert_allclose(dct_value.data.numpy(), golden_value, rtol=1e-6, atol=1e-5)
# test gpu
custom = dct.DCT2(algorithm='N')
dct_value = custom.forward(x.cuda()).cpu()
print("2D dct_value cuda")
print(dct_value.data.numpy())
np.testing.assert_allclose(dct_value.data.numpy(), golden_value, rtol=1e-6, atol=1e-5)
# test gpu
custom = dct.DCT2(algorithm='2N')
dct_value = custom.forward(x.cuda()).cpu()
print("2D dct_value cuda")
print(dct_value.data.numpy())
np.testing.assert_allclose(dct_value.data.numpy(), golden_value, rtol=1e-6, atol=1e-5)
def test_idct2Random(self):
torch.manual_seed(10)
M = 4
N = 8
x = torch.tensor(torch.empty(M, N, dtype=torch.int32).random_(0, 10), dtype=dtype)
print("2D x")
print(x)
y = discrete_spectral_transform.dct2_2N(x)
golden_value = discrete_spectral_transform.idct2_2N(y).data.numpy()
print("2D golden_value")
print(golden_value)
# test cpu using N-FFT
#pdb.set_trace()
custom = dct.IDCT2(algorithm='N')
dct_value = custom.forward(y)
print("2D dct_value")
print(dct_value.data.numpy())
np.testing.assert_allclose(dct_value.data.numpy(), golden_value, rtol=1e-6, atol=1e-5)
# test cpu using 2N-FFT
#pdb.set_trace()
custom = dct.IDCT2(algorithm='2N')
dct_value = custom.forward(y)
print("2D dct_value")
print(dct_value.data.numpy())
np.testing.assert_allclose(dct_value.data.numpy(), golden_value, rtol=1e-6, atol=1e-5)
# test gpu
custom = dct.IDCT2(algorithm='N')
dct_value = custom.forward(y.cuda()).cpu()
print("2D dct_value cuda")
print(dct_value.data.numpy())
np.testing.assert_allclose(dct_value.data.numpy(), golden_value, rtol=1e-6, atol=1e-5)
# test gpu
custom = dct.IDCT2(algorithm='2N')
dct_value = custom.forward(y.cuda()).cpu()
print("2D dct_value cuda")
print(dct_value.data.numpy())
np.testing.assert_allclose(dct_value.data.numpy(), golden_value, rtol=1e-6, atol=1e-5)
def test_idxct2Random(self):
torch.manual_seed(10)
M = 4
N = 8
x = torch.empty(M, N, dtype=torch.int32).random_(0, 10).double()
print("2D x")
print(x)
golden_value = discrete_spectral_transform.idxt(x, 0).data.numpy()
print("2D golden_value")
print(golden_value)
# test cpu
#pdb.set_trace()
custom = dct.IDXCT()
dct_value = custom.forward(x)
print("dxt_value")
print(dct_value.data.numpy())
np.testing.assert_allclose(dct_value.data.numpy(), golden_value, atol=1e-14)
# test gpu
custom = dct.IDXCT()
dct_value = custom.forward(x.cuda()).cpu()
print("dxt_value cuda")
print(dct_value.data.numpy())
np.testing.assert_allclose(dct_value.data.numpy(), golden_value, atol=1e-14)
class DSTOpTest(unittest.TestCase):
def test_dstRandom(self):
N = 4
x = torch.empty(N, N, dtype=dtype).uniform_(0, 10.0)
#x = Variable(torch.tensor([[1, 2, 7, 9, 20, 31], [4, 5, 9, 2, 1, 6]], dtype=dtype))
import scipy
from scipy import fftpack
#golden_value = discrete_spectral_transform.dst(x).data.numpy()
golden_value = torch.from_numpy(fftpack.dst(x.data.numpy())).data.numpy() / N
print("golden_value")
print(golden_value)
# test cpu
#pdb.set_trace()
custom = dct.DST()
dst_value = custom.forward(x)
print("dst_value")
print(dst_value.data.numpy())
np.testing.assert_allclose(dst_value.data.numpy(), golden_value, rtol=1e-5)
# test gpu
custom = dct.DST()
dst_value = custom.forward(x.cuda()).cpu()
print("dst_value cuda")
print(dst_value.data.numpy())
np.testing.assert_allclose(dst_value.data.numpy(), golden_value, rtol=1e-5)
def test_idstRandom(self):
N = 4
x = torch.empty(N, N, dtype=dtype).uniform_(0, 10.0)
#x = Variable(torch.tensor([[1, 2, 7, 9, 20, 31], [4, 5, 9, 2, 1, 6]], dtype=dtype))
print("x")
print(x)
import scipy
from scipy import fftpack
#y = discrete_spectral_transform.dst(x)
y = torch.from_numpy(fftpack.dst(x.data.numpy()))
print("y")
print(y.data.numpy())
#golden_value = discrete_spectral_transform.idst(y).data.numpy()
golden_value = torch.from_numpy(fftpack.idst(y.data.numpy())).data.numpy()
print("2D golden_value")
print(golden_value)
# test cpu
#pdb.set_trace()
custom = dct.IDST()
dst_value = custom.forward(y)
print("idst_value")
print(dst_value.data.numpy())
np.testing.assert_allclose(dst_value.data.numpy(), golden_value, rtol=1e-5)
# test gpu
custom = dct.IDST()
dst_value = custom.forward(y.cuda()).cpu()
print("idst_value cuda")
print(dst_value.data.numpy())
np.testing.assert_allclose(dst_value.data.numpy(), golden_value, rtol=1e-5)
def test_idxst2Random(self):
torch.manual_seed(10)
M = 4
N = 8
x = torch.empty(M, N, dtype=torch.int32).random_(0, 10).double()
print("2D x")
print(x)
golden_value = discrete_spectral_transform.idxt(x, 1).data.numpy()
print("2D golden_value")
print(golden_value)
# test cpu
#pdb.set_trace()
custom = dct.IDXST()
dst_value = custom.forward(x)
print("dxt_value")
print(dst_value.data.numpy())
np.testing.assert_allclose(dst_value.data.numpy(), golden_value, atol=1e-14)
# test gpu
custom = dct.IDXST()
dst_value = custom.forward(x.cuda()).cpu()
print("dxt_value cuda")
print(dst_value.data.numpy())
np.testing.assert_allclose(dst_value.data.numpy(), golden_value, atol=1e-14)
class DXTOpTest(unittest.TestCase):
def test_idcct2Random(self):
torch.manual_seed(10)
M = 4
N = 8
x = torch.empty(M, N, dtype=torch.int32).random_(0, 10).double()
print("2D x")
print(x)
golden_value = discrete_spectral_transform.idcct2(x).data.numpy()
print("2D golden_value")
print(golden_value)
# test cpu
#pdb.set_trace()
custom = dct.IDCCT2()
dst_value = custom.forward(x)
print("dxt_value")
print(dst_value.data.numpy())
np.testing.assert_allclose(dst_value.data.numpy(), golden_value, atol=1e-14)
# test gpu
custom = dct.IDCCT2()
dst_value = custom.forward(x.cuda()).cpu()
print("dxt_value cuda")
print(dst_value.data.numpy())
np.testing.assert_allclose(dst_value.data.numpy(), golden_value, atol=1e-14)
def test_idcst2Random(self):
torch.manual_seed(10)
M = 4
N = 8
x = torch.empty(M, N, dtype=torch.int32).random_(0, 10).double()
print("2D x")
print(x)
golden_value = discrete_spectral_transform.idcst2(x).data.numpy()
print("2D golden_value")
print(golden_value)
# test cpu
#pdb.set_trace()
custom = dct.IDCST2()
dst_value = custom.forward(x)
print("dxt_value")
print(dst_value.data.numpy())
np.testing.assert_allclose(dst_value.data.numpy(), golden_value, atol=1e-14)
# test gpu
custom = dct.IDCST2()
dst_value = custom.forward(x.cuda()).cpu()
print("dxt_value cuda")
print(dst_value.data.numpy())
np.testing.assert_allclose(dst_value.data.numpy(), golden_value, atol=1e-14)
def test_idsct2Random(self):
torch.manual_seed(10)
M = 4
N = 8
x = torch.empty(M, N, dtype=torch.int32).random_(0, 10).double()
print("2D x")
print(x)
golden_value = discrete_spectral_transform.idsct2(x).data.numpy()
print("2D golden_value")
print(golden_value)
# test cpu
#pdb.set_trace()
custom = dct.IDSCT2()
dst_value = custom.forward(x)
print("dxt_value")
print(dst_value.data.numpy())
np.testing.assert_allclose(dst_value.data.numpy(), golden_value, atol=1e-14)
# test gpu
custom = dct.IDSCT2()
dst_value = custom.forward(x.cuda()).cpu()
print("dxt_value cuda")
print(dst_value.data.numpy())
np.testing.assert_allclose(dst_value.data.numpy(), golden_value, atol=1e-14)
def eval_runtime():
# x = torch.tensor([1, 2, 7, 9, 20, 31], dtype=torch.float64)
# print(dct_N(x))
N = 4096
runs = 100
# x = torch.empty(10, N, N, dtype=torch.float64).uniform_(0, 10.0).cuda()
with open("../result_2d.dat", "r") as f:
lines = f.readlines()
M = int(lines[0].strip())
N = int(lines[1].strip())
x = np.resize(np.array([float(i)
for i in lines[2:]]).astype(np.float64), [M, N])
x = torch.Tensor(x).to(torch.float64).cuda()
expk0 = discrete_spectral_transform.get_expk(M, dtype=x.dtype, device=x.device)
expk1 = discrete_spectral_transform.get_expk(N, dtype=x.dtype, device=x.device)
print("M = {}, N = {}".format(M, N))
'''
x_numpy = x.data.cpu().numpy()
tt = time.time()
for i in range(runs):
y = fftpack.dct(fftpack.dct(x_numpy.T, norm=None).T/N, norm=None)/M
print("CPU: scipy.fftpack.dct2d takes %f ms" % ((time.time()-tt)/runs*1000))
# 9s for 200 iterations 1024x1024 on GTX 1080
torch.cuda.synchronize()
tt = time.time()
#with torch.autograd.profiler.profile(use_cuda=True) as prof:
for i in range(runs):
y_2N = discrete_spectral_transform.dct2_2N(x, expk0=expk0, expk1=expk1)
torch.cuda.synchronize()
#print(prof)
print("Pytorch: dct2d_2N takes %.5f ms" % ((time.time()-tt)/runs*1000))
# 11s for 200 iterations 1024x1024 on GTX 1080
perm0 = discrete_spectral_transform.get_perm(M, dtype=torch.int64, device=x.device)
perm1 = discrete_spectral_transform.get_perm(N, dtype=torch.int64, device=x.device)
torch.cuda.synchronize()
tt = time.time()
#with torch.autograd.profiler.profile(use_cuda=True) as prof:
for i in range(runs):
y_N = discrete_spectral_transform.dct2_N(x, perm0=perm0, expk0=expk0, perm1=perm1, expk1=expk1)
torch.cuda.synchronize()
#print(prof)
print("Pytorch: dct2d_N takes %.5f ms" % ((time.time()-tt)/runs*1000))
dct2func = dct.DCT2(expk0, expk1, algorithm='2N')
torch.cuda.synchronize()
tt = time.time()
#with torch.autograd.profiler.profile(use_cuda=True) as prof:
for i in range(runs):
y_2N = dct2func.forward(x)
torch.cuda.synchronize()
#print(prof)
print("CUDA: DCT2d_2N Function takes %.5f ms" % ((time.time()-tt)/runs*1000))
dct2func = dct.DCT2(expk0, expk1, algorithm='N')
y_N = dct2func.forward(x)
torch.cuda.synchronize()
# with torch.autograd.profiler.profile(use_cuda=True) as prof:
tt = time.time()
for i in range(runs):
y_N = dct2func.forward(x)
torch.cuda.synchronize()
#print(prof)
print("CUDA: DCT2d_N Function takes %.5f ms" % ((time.time()-tt)/runs*1000))
exit()
dct2func = dct_lee.DCT2(expk0, expk1)
torch.cuda.synchronize()
tt = time.time()
#with torch.autograd.profiler.profile(use_cuda=True) as prof:
for i in range(runs):
y_N = dct2func.forward(x)
torch.cuda.synchronize()
#print(prof)
print("CUDA: DCT2d_Lee Function takes %.5f ms" % ((time.time()-tt)/runs*1000))
exit()
'''
y_N = discrete_spectral_transform.idct2_2N(x, expk0=expk0, expk1=expk1)
torch.cuda.synchronize()
tt = time.time()
#with torch.autograd.profiler.profile(use_cuda=True) as prof:
for i in range(runs):
y_N = discrete_spectral_transform.idct2_2N(x, expk0=expk0, expk1=expk1)
torch.cuda.synchronize()
#print(prof)
print("idct2_2N takes %.5f ms" % ((time.time()-tt)/runs*1000))
idct2func = dct.IDCT2(expk0, expk1, algorithm='2N')
y_N = idct2func.forward(x)
torch.cuda.synchronize()
tt = time.time()
#with torch.autograd.profiler.profile(use_cuda=True) as prof:
for i in range(runs):
y_N = idct2func.forward(x)
torch.cuda.synchronize()
#print(prof)
print("IDCT2_2N Function takes %.5f ms" % ((time.time()-tt)/runs*1000))
idct2func = dct.IDCT2(expk0, expk1, algorithm='N')
y_N = idct2func.forward(x)
torch.cuda.synchronize()
tt = time.time()
#with torch.autograd.profiler.profile(use_cuda=True) as prof:
for i in range(runs):
y_N = idct2func.forward(x)
torch.cuda.synchronize()
#print(prof)
print("IDCT2_N Function takes %.5f ms" % ((time.time()-tt)/runs*1000))
y_N = discrete_spectral_transform.idxt(x, 1, expk=expk1)
torch.cuda.synchronize()
tt = time.time()
#with torch.autograd.profiler.profile(use_cuda=True) as prof:
for i in range(runs):
y_N = discrete_spectral_transform.idxt(x, 1, expk=expk1)
torch.cuda.synchronize()
#print(prof)
print("idxt takes %.5f ms" % ((time.time()-tt)/runs*1000))
idxct_func = dct.IDXST(expk1)
y_N = idxct_func.forward(x)
torch.cuda.synchronize()
tt = time.time()
#with torch.autograd.profiler.profile(use_cuda=True) as prof:
for i in range(runs):
y_N = idxct_func.forward(x)
torch.cuda.synchronize()
#print(prof)
print("IDXCT Function takes %.5f ms" % ((time.time()-tt)/runs*1000))
# torch.cuda.synchronize()
# tt = time.time()
# #with torch.autograd.profiler.profile(use_cuda=True) as prof:
# for i in range(runs):
# y_N = torch.rfft(x[i%10].view([1, N, N]), signal_ndim=2, onesided=False)
# torch.cuda.synchronize()
# #print(prof)
# print("torch.rfft2d takes %.5f ms" % ((time.time()-tt)/runs*1000))
y_N = discrete_spectral_transform.idcct2(x, expk_0=expk0, expk_1=expk1)
torch.cuda.synchronize()
tt = time.time()
#with torch.autograd.profiler.profile(use_cuda=True) as prof:
for i in range(runs):
y_N = discrete_spectral_transform.idcct2(x, expk_0=expk0, expk_1=expk1)
torch.cuda.synchronize()
#print(prof)
print("idcct2 takes %.5f ms" % ((time.time()-tt)/runs*1000))
func = dct.IDCCT2(expk0, expk1)
y_N = func.forward(x)
torch.cuda.synchronize()
tt = time.time()
#with torch.autograd.profiler.profile(use_cuda=True) as prof:
for i in range(runs):
y_N = func.forward(x)
torch.cuda.synchronize()
#print(prof)
print("IDCCT2 Function takes %.5f ms" % ((time.time()-tt)/runs*1000))
y_N = discrete_spectral_transform.idcst2(x, expk_0=expk0, expk_1=expk1)
torch.cuda.synchronize()
tt = time.time()
#with torch.autograd.profiler.profile(use_cuda=True) as prof:
for i in range(runs):
y_N = discrete_spectral_transform.idcst2(x, expk_0=expk0, expk_1=expk1)
torch.cuda.synchronize()
#print(prof)
print("idcst2 takes %.5f ms" % ((time.time()-tt)/runs*1000))
func = dct.IDCST2(expk0, expk1)
y_N = func.forward(x)
torch.cuda.synchronize()
tt = time.time()
#with torch.autograd.profiler.profile(use_cuda=True) as prof:
for i in range(runs):
y_N = func.forward(x)
torch.cuda.synchronize()
#print(prof)
print("IDCST2 Function takes %.5f ms" % ((time.time()-tt)/runs*1000))
y_N = discrete_spectral_transform.idsct2(x, expk_0=expk0, expk_1=expk1)
torch.cuda.synchronize()
tt = time.time()
#with torch.autograd.profiler.profile(use_cuda=True) as prof:
for i in range(runs):
y_N = discrete_spectral_transform.idsct2(x, expk_0=expk0, expk_1=expk1)
torch.cuda.synchronize()
#print(prof)
print("idsct2 takes %.5f ms" % ((time.time()-tt)/runs*1000))
func = dct.IDSCT2(expk0, expk1)
y_N = func.forward(x)
torch.cuda.synchronize()
tt = time.time()
#with torch.autograd.profiler.profile(use_cuda=True) as prof:
for i in range(runs):
y_N = func.forward(x)
torch.cuda.synchronize()
#print(prof)
print("IDSCT2 Function takes %.5f ms" % ((time.time()-tt)/runs*1000))
if __name__ == '__main__':
# torch.manual_seed(10)
# np.random.seed(10)
# unittest.main()
eval_runtime()
```
|
{
"source": "JeremieMelo/L2ight",
"score": 2
}
|
#### File: core/models/sparse_bp_cnn.py
```python
from collections import OrderedDict
from typing import Callable, Dict, List, Optional, Tuple, Union
import numpy as np
import torch
import torch.nn.functional as F
from pyutils.general import logger
from torch import Tensor, nn
from torch.types import Device, _size
from .layers.activation import ReLUN
from .layers.custom_conv2d import MZIBlockConv2d
from .layers.custom_linear import MZIBlockLinear
from .sparse_bp_base import SparseBP_Base
__all__ = ["SparseBP_MZI_CNN"]
class ConvBlock(nn.Module):
def __init__(
self,
in_channel: int,
out_channel: int,
kernel_size: int = 3,
miniblock: int = 8,
bias: bool = False,
stride: Union[int, _size] = 1,
padding: Union[int, _size] = 0,
mode: str = "weight",
v_max: float = 4.36, # 0-pi for clements, # 6.166 is v_2pi, 0-2pi for reck
v_pi: float = 4.36,
w_bit: int = 16,
in_bit: int = 16,
photodetect: bool = False,
act_thres: int = 6,
device: Device = torch.device("cuda"),
) -> None:
super().__init__()
self.conv = MZIBlockConv2d(
in_channel,
out_channel,
kernel_size,
miniblock,
bias,
stride,
padding,
mode=mode,
v_max=v_max,
v_pi=v_pi,
w_bit=w_bit,
in_bit=in_bit,
photodetect=photodetect,
device=device,
)
self.bn = nn.BatchNorm2d(out_channel)
self.activation = ReLUN(act_thres, inplace=True)
def forward(self, x: Tensor) -> Tensor:
return self.activation(self.bn(self.conv(x)))
class LinearBlock(nn.Module):
def __init__(
self,
in_channel: int,
out_channel: int,
miniblock: int = 8,
bias: bool = False,
mode: str = "weight",
v_max: float = 4.36, # 0-pi for clements, # 6.166 is v_2pi, 0-2pi for reck
v_pi: float = 4.36,
w_bit: int = 16,
in_bit: int = 16,
photodetect: bool = False,
activation: bool = True,
act_thres: int = 6,
device: Device = torch.device("cuda"),
) -> None:
super().__init__()
self.linear = MZIBlockLinear(
in_channel, out_channel, miniblock, bias, mode, v_max, v_pi, w_bit, in_bit, photodetect, device
)
self.activation = ReLUN(act_thres, inplace=True) if activation else None
def forward(self, x: Tensor) -> Tensor:
x = self.linear(x)
if self.activation is not None:
x = self.activation(x)
return x
class SparseBP_MZI_CNN(SparseBP_Base):
"""MZI CNN (Shen+, Nature Photonics 2017). Support sparse backpropagation. Blocking matrix multiplication."""
def __init__(
self,
img_height: int,
img_width: int,
in_channel: int,
n_class: int,
kernel_list: List[int] = [32],
kernel_size_list: List[int] = [3],
pool_out_size: int = 5,
stride_list=[1],
padding_list=[1],
hidden_list: List[int] = [32],
block_list: List[int] = [8],
in_bit: int = 32,
w_bit: int = 32,
mode: str = "usv",
v_max: float = 10.8,
v_pi: float = 4.36,
act_thres: float = 6.0,
photodetect: bool = True,
bias: bool = False,
device: Device = torch.device("cuda"),
) -> None:
super().__init__()
self.img_height = img_height
self.img_width = img_width
self.in_channel = in_channel
self.n_class = n_class
self.kernel_list = kernel_list
self.kernel_size_list = kernel_size_list
self.stride_list = stride_list
self.padding_list = padding_list
self.pool_out_size = pool_out_size
self.hidden_list = hidden_list
self.block_list = block_list
self.in_bit = in_bit
self.w_bit = w_bit
self.mode = mode
self.v_max = v_max
self.v_pi = v_pi
self.act_thres = act_thres
self.photodetect = photodetect
self.bias = bias
self.device = device
self.build_layers()
self.drop_masks = None
self.reset_parameters()
self.gamma_noise_std = 0
self.crosstalk_factor = 0
def build_layers(self):
self.features = OrderedDict()
for idx, out_channel in enumerate(self.kernel_list, 0):
layer_name = "conv" + str(idx + 1)
in_channel = self.in_channel if (idx == 0) else self.kernel_list[idx - 1]
self.features[layer_name] = ConvBlock(
in_channel,
out_channel,
self.kernel_size_list[idx],
self.block_list[idx],
self.bias,
self.stride_list[idx],
self.padding_list[idx],
self.mode,
self.v_max,
self.v_pi,
self.w_bit,
self.in_bit,
self.photodetect,
self.act_thres,
self.device,
)
self.features = nn.Sequential(self.features)
if self.pool_out_size > 0:
self.pool2d = nn.AdaptiveAvgPool2d(self.pool_out_size)
feature_size = self.kernel_list[-1] * self.pool_out_size * self.pool_out_size
else:
self.pool2d = None
img_height, img_width = self.img_height, self.img_width
for layer in self.modules():
if isinstance(layer, MZIBlockConv2d):
img_height, img_width = layer.get_output_dim(img_height, img_width)
feature_size = img_height * img_width * self.kernel_list[-1]
self.classifier = OrderedDict()
for idx, hidden_dim in enumerate(self.hidden_list, 0):
layer_name = "fc" + str(idx + 1)
in_channel = feature_size if idx == 0 else self.hidden_list[idx - 1]
out_channel = hidden_dim
self.classifier[layer_name] = LinearBlock(
in_channel,
out_channel,
miniblock=self.block_list[idx + len(self.kernel_list)],
bias=self.bias,
mode=self.mode,
v_max=self.v_max,
v_pi=self.v_pi,
in_bit=self.in_bit,
w_bit=self.w_bit,
photodetect=self.photodetect,
activation=True,
act_thres=self.act_thres,
device=self.device,
)
layer_name = "fc" + str(len(self.hidden_list) + 1)
self.classifier[layer_name] = MZIBlockLinear(
self.hidden_list[-1] if len(self.hidden_list) > 0 else feature_size,
self.n_class,
miniblock=self.block_list[-1],
bias=self.bias,
mode=self.mode,
v_max=self.v_max,
v_pi=self.v_pi,
in_bit=self.in_bit,
w_bit=self.w_bit,
photodetect=self.photodetect,
device=self.device,
)
self.classifier = nn.Sequential(self.classifier)
def forward(self, x: Tensor) -> Tensor:
x = self.features(x)
if self.pool2d is not None:
x = self.pool2d(x)
x = torch.flatten(x, 1)
x = self.classifier(x)
return x
```
#### File: core/models/sparse_bp_resnet.py
```python
from typing import Callable, Dict, List, Optional, Tuple, Union
import numpy as np
import torch
import torch.nn.functional as F
from pyutils.general import logger
from torch import Tensor, nn
from torch.nn.modules.activation import ReLU
from torch.types import Device, _size
from .layers.activation import ReLUN
from .layers.custom_conv2d import MZIBlockConv2d
from .layers.custom_linear import MZIBlockLinear
from .sparse_bp_base import SparseBP_Base
__all__ = [
"SparseBP_MZI_ResNet18",
"SparseBP_MZI_ResNet34",
"SparseBP_MZI_ResNet50",
"SparseBP_MZI_ResNet101",
"SparseBP_MZI_ResNet152",
]
def conv3x3(
in_planes,
out_planes,
miniblock: int = 8,
bias: bool = False,
stride: Union[int, _size] = 1,
padding: Union[int, _size] = 0,
mode: str = "weight",
v_max: float = 4.36, # 0-pi for clements, # 6.166 is v_2pi, 0-2pi for reck
v_pi: float = 4.36,
w_bit: int = 16,
in_bit: int = 16,
photodetect: bool = False,
device: Device = torch.device("cuda"),
):
conv = MZIBlockConv2d(
in_planes,
out_planes,
3,
miniblock,
bias,
stride,
padding,
mode=mode,
v_max=v_max,
v_pi=v_pi,
w_bit=w_bit,
in_bit=in_bit,
photodetect=photodetect,
device=device,
)
return conv
def conv1x1(
in_planes,
out_planes,
miniblock: int = 8,
bias: bool = False,
stride: Union[int, _size] = 1,
padding: Union[int, _size] = 0,
mode: str = "weight",
v_max: float = 4.36, # 0-pi for clements, # 6.166 is v_2pi, 0-2pi for reck
v_pi: float = 4.36,
w_bit: int = 16,
in_bit: int = 16,
photodetect: bool = False,
device: Device = torch.device("cuda"),
):
conv = MZIBlockConv2d(
in_planes,
out_planes,
1,
miniblock,
bias,
stride,
padding,
mode=mode,
v_max=v_max,
v_pi=v_pi,
w_bit=w_bit,
in_bit=in_bit,
photodetect=photodetect,
device=device,
)
return conv
def Linear(
in_channel,
out_channel,
miniblock: int = 8,
bias: bool = False,
mode: str = "weight",
v_max: float = 4.36, # 0-pi for clements, # 6.166 is v_2pi, 0-2pi for reck
v_pi: float = 4.36,
w_bit: int = 16,
in_bit: int = 16,
photodetect: bool = False,
device: Device = torch.device("cuda"),
):
# linear = nn.Linear(in_channel, out_channel)
linear = MZIBlockLinear(
in_channel, out_channel, miniblock, bias, mode, v_max, v_pi, w_bit, in_bit, photodetect, device=device
)
return linear
class BasicBlock(nn.Module):
expansion = 1
def __init__(
self,
in_planes,
planes,
stride=1,
# unique parameters
miniblock: int = 8,
mode: str = "weight",
v_max: float = 4.36, # 0-pi for clements, # 6.166 is v_2pi, 0-2pi for reck
v_pi: float = 4.36,
w_bit: int = 16,
in_bit: int = 16,
photodetect: bool = False,
act_thres: int = 6,
device: Device = torch.device("cuda"),
) -> None:
super(BasicBlock, self).__init__()
# self.conv1 = nn.Conv2d(
# in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.conv1 = conv3x3(
in_planes,
planes,
miniblock=miniblock,
bias=False,
stride=stride,
padding=1,
mode=mode,
v_max=v_max,
v_pi=v_pi,
in_bit=in_bit,
w_bit=w_bit,
photodetect=photodetect,
device=device,
)
self.bn1 = nn.BatchNorm2d(planes)
self.act1 = ReLUN(act_thres, inplace=True) if act_thres <= 6 else ReLU(inplace=True)
# self.conv2 = nn.Conv2d(planes, planes, kernel_size=3,
# stride=1, padding=1, bias=False)
self.conv2 = conv3x3(
planes,
planes,
miniblock=miniblock,
bias=False,
stride=1,
padding=1,
mode=mode,
v_max=v_max,
v_pi=v_pi,
in_bit=in_bit,
w_bit=w_bit,
photodetect=photodetect,
device=device,
)
self.bn2 = nn.BatchNorm2d(planes)
self.act2 = ReLUN(act_thres, inplace=True) if act_thres <= 6 else ReLU(inplace=True)
self.shortcut = nn.Identity()
# self.shortcut.conv1_spatial_sparsity = self.conv1.bp_input_sampler.spatial_sparsity
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
conv1x1(
in_planes,
self.expansion * planes,
miniblock=miniblock,
bias=False,
stride=stride,
padding=0,
mode=mode,
v_max=v_max,
v_pi=v_pi,
in_bit=in_bit,
w_bit=w_bit,
photodetect=photodetect,
device=device,
),
nn.BatchNorm2d(self.expansion * planes),
)
def forward(self, x):
out = self.act1(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = self.act2(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(
self,
in_planes: int,
planes: int,
stride: int = 1,
# unique parameters
miniblock: int = 8,
mode: str = "weight",
v_max: float = 4.36, # 0-pi for clements, # 6.166 is v_2pi, 0-2pi for reck
v_pi: float = 4.36,
w_bit: int = 16,
in_bit: int = 16,
photodetect: bool = False,
act_thres: int = 6,
device: Device = torch.device("cuda"),
) -> None:
super(Bottleneck, self).__init__()
# self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.conv1 = conv1x1(
in_planes,
planes,
miniblock=miniblock,
bias=False,
stride=1,
padding=0,
mode=mode,
v_max=v_max,
v_pi=v_pi,
in_bit=in_bit,
w_bit=w_bit,
photodetect=photodetect,
device=device,
)
self.bn1 = nn.BatchNorm2d(planes)
self.act1 = ReLUN(act_thres, inplace=True) if act_thres <= 6 else ReLU(inplace=True)
# self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.conv2 = conv3x3(
planes,
planes,
miniblock=miniblock,
bias=False,
stride=stride,
padding=1,
mode=mode,
v_max=v_max,
v_pi=v_pi,
in_bit=in_bit,
w_bit=w_bit,
photodetect=photodetect,
device=device,
)
self.bn2 = nn.BatchNorm2d(planes)
self.act2 = ReLUN(act_thres, inplace=True) if act_thres <= 6 else ReLU(inplace=True)
self.conv3 = nn.Conv2d(planes, self.expansion * planes, kernel_size=1, bias=False)
self.conv3 = conv1x1(
planes,
self.expansion * planes,
miniblock=miniblock,
bias=False,
stride=1,
padding=0,
mode=mode,
v_max=v_max,
v_pi=v_pi,
in_bit=in_bit,
w_bit=w_bit,
photodetect=photodetect,
device=device,
)
self.bn3 = nn.BatchNorm2d(self.expansion * planes)
self.act3 = ReLUN(act_thres, inplace=True) if act_thres <= 6 else ReLU(inplace=True)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
# nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
conv1x1(
in_planes,
self.expansion * planes,
miniblock=miniblock,
bias=False,
stride=stride,
padding=0,
mode=mode,
v_max=v_max,
v_pi=v_pi,
in_bit=in_bit,
w_bit=w_bit,
photodetect=photodetect,
device=device,
),
nn.BatchNorm2d(self.expansion * planes),
)
def forward(self, x):
out = self.act1(self.bn1(self.conv1(x)))
out = self.act2(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = self.act3(out)
return out
class ResNet(SparseBP_Base):
"""MZI ResNet (Shen+, Nature Photonics 2017). Support sparse backpropagation. Blocking matrix multiplication."""
def __init__(
self,
block,
num_blocks,
img_height: int,
img_width: int,
in_channel: int,
n_class: int,
block_list: List[int] = [8],
in_bit: int = 32,
w_bit: int = 32,
mode: str = "usv",
v_max: float = 10.8,
v_pi: float = 4.36,
act_thres: float = 6.0,
photodetect: bool = True,
bias: bool = False,
device: Device = torch.device("cuda"),
) -> None:
super().__init__()
# resnet params
self.block = block
self.num_blocks = num_blocks
self.in_planes = 64
self.img_height = img_height
self.img_width = img_width
self.in_channel = in_channel
self.n_class = n_class
# list of block size
self.block_list = block_list
self.in_bit = in_bit
self.w_bit = w_bit
self.mode = mode
self.v_max = v_max
self.v_pi = v_pi
self.act_thres = act_thres
self.photodetect = photodetect
self.device = device
# build layers
blkIdx = 0
self.conv1 = conv3x3(
in_channel,
64,
miniblock=self.block_list[0],
bias=False,
stride=1 if img_height <= 64 else 2, # downsample for imagenet, dogs, cars
padding=1,
mode=mode,
v_max=self.v_max,
v_pi=self.v_pi,
in_bit=self.in_bit,
w_bit=self.w_bit,
photodetect=self.photodetect,
device=self.device,
)
self.bn1 = nn.BatchNorm2d(64)
blkIdx += 1
self.layer1 = self._make_layer(
block,
64,
num_blocks[0],
stride=1,
miniblock=self.block_list[0],
mode=self.mode,
v_max=self.v_max,
v_pi=self.v_pi,
in_bit=self.in_bit,
w_bit=self.w_bit,
photodetect=self.photodetect,
device=device,
)
blkIdx += 1
self.layer2 = self._make_layer(
block,
128,
num_blocks[1],
stride=2,
miniblock=self.block_list[0],
mode=self.mode,
v_max=self.v_max,
v_pi=self.v_pi,
in_bit=self.in_bit,
w_bit=self.w_bit,
photodetect=self.photodetect,
device=device,
)
blkIdx += 1
self.layer3 = self._make_layer(
block,
256,
num_blocks[2],
stride=2,
miniblock=self.block_list[0],
mode=self.mode,
v_max=self.v_max,
v_pi=self.v_pi,
in_bit=self.in_bit,
w_bit=self.w_bit,
photodetect=self.photodetect,
device=device,
)
blkIdx += 1
self.layer4 = self._make_layer(
block,
512,
num_blocks[3],
stride=2,
miniblock=self.block_list[0],
mode=self.mode,
v_max=self.v_max,
v_pi=self.v_pi,
in_bit=self.in_bit,
w_bit=self.w_bit,
photodetect=self.photodetect,
device=device,
)
blkIdx += 1
self.linear = Linear(
512 * block.expansion,
self.n_class,
miniblock=self.block_list[0],
bias=False,
mode=self.mode,
v_max=self.v_max,
v_pi=self.v_pi,
in_bit=self.in_bit,
w_bit=self.w_bit,
photodetect=self.photodetect,
device=device,
)
self.drop_masks = None
self.reset_parameters()
self.gamma_noise_std = 0
self.crosstalk_factor = 0
def _make_layer(
self,
block,
planes,
num_blocks,
stride,
# unique parameters
miniblock: int = 8,
mode: str = "usv",
v_max: float = 10.8,
v_pi: float = 4.36,
in_bit: int = 32,
w_bit: int = 32,
act_thres: float = 6.0,
photodetect: bool = True,
device: Device = torch.device("cuda"),
):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for stride in strides:
layers.append(
block(
self.in_planes,
planes,
stride,
miniblock=miniblock,
mode=mode,
v_max=v_max,
v_pi=v_pi,
in_bit=in_bit,
w_bit=w_bit,
act_thres=act_thres,
photodetect=photodetect,
device=device,
)
)
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x: Tensor) -> Tensor:
out = F.relu(self.bn1(self.conv1(x)), inplace=True)
if x.size(-1) > 64: # 224 x 224, e.g., cars, dogs, imagenet
out = F.max_pool2d(out, kernel_size=3, stride=2, padding=1)
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.adaptive_avg_pool2d(out, 1)
out = torch.flatten(out, 1)
out = self.linear(out)
return out
def SparseBP_MZI_ResNet18(*args, **kwargs):
return ResNet(BasicBlock, [2, 2, 2, 2], *args, **kwargs)
def SparseBP_MZI_ResNet34(*args, **kwargs):
return ResNet(BasicBlock, [3, 4, 6, 3], *args, **kwargs)
def SparseBP_MZI_ResNet50(*args, **kwargs):
return ResNet(Bottleneck, [3, 4, 6, 3], *args, **kwargs)
def SparseBP_MZI_ResNet101(*args, **kwargs):
return ResNet(Bottleneck, [3, 4, 23, 3], *args, **kwargs)
def SparseBP_MZI_ResNet152(*args, **kwargs):
return ResNet(Bottleneck, [3, 8, 36, 3], *args, **kwargs)
def test():
device = torch.device("cuda")
net = SparseBP_MZI_ResNet18(
in_channel=3,
n_class=10,
block_list=[8, 8, 8, 8, 8, 8],
in_bit=32,
w_bit=32,
mode="usv",
v_max=10.8,
v_pi=4.36,
act_thres=6,
photodetect=True,
device=device,
).to(device)
x = torch.randn(2, 3, 32, 32).to(device)
print(net)
y = net(x)
print(y.shape)
if __name__ == "__main__":
test()
```
#### File: cifar100/resnet18/parse_train_from_map.py
```python
import os
import re
def parse_map_log():
root = "log/cifar10/vgg8/cs"
p = re.compile(r".*Run ID: \(([0-9a-z]+)\).*PID.*")
run_ids = []
accs = [59.62,66.23,69.89,76.06,83.19,85.19,89.13]
for acc in accs:
file = os.path.join(root, f'ds-0.5_fbs-0.6_norm-none_first-0_ss-0_cs-0.6_mapacc-{acc}.log')
with open(file, "r") as f:
lines = f.read()
res = p.search(lines)
run_ids.append(res.groups(1)[0])
print(f"map: total: {len(run_ids)} runs")
print("accs =", accs)
print("runs =", run_ids)
if __name__ == "__main__":
parse_map_log()
```
#### File: script/cnn-L/parse_cs_ss_log.py
```python
import os
import re
def parse_cs_log():
root = "log/fmnist/cnn3/cs"
p = re.compile(r".*Run ID: \(([0-9a-z]+)\).*PID.*")
run_ids = []
for s in [0.2, 0.6, 0.9]:
file = os.path.join(root, f'norm-none_ss-0_cs-{s}.log')
with open(file, "r") as f:
lines = f.read()
res = p.search(lines)
run_ids.append(res.groups(1)[0])
print(f"cs: total: {len(run_ids)} runs")
print("cs_runs =", run_ids)
def parse_ss_log():
root = "log/fmnist/cnn3/ss"
p = re.compile(r".*Run ID: \(([0-9a-z]+)\).*PID.*")
run_ids = []
for s in [0.2,0.6, 0.9]:
file = os.path.join(root, f'norm-none_cs-0_ss-{s}.log')
with open(file, "r") as f:
lines = f.read()
res = p.search(lines)
run_ids.append(res.groups(1)[0])
print(f"ss: total: {len(run_ids)} runs")
print("ss_runs =", run_ids)
if __name__ == "__main__":
parse_cs_log()
parse_ss_log()
```
#### File: script/cnn-L/parse_feedback_log.py
```python
import os
import re
def parse_uniform_log():
root = "log/fmnist/cnn3/fbs"
p = re.compile(r".*Run ID: \(([0-9a-z]+)\).*PID.*")
run_ids = []
for s in [0.2, 0.4, 0.6, 0.8, 0.9]:
file = os.path.join(root, f'uniform_norm-none_fbs-{s}.log')
with open(file, "r") as f:
lines = f.read()
res = p.search(lines)
run_ids.append(res.groups(1)[0])
print(f"uniform: total: {len(run_ids)} runs")
print("uniform_runs =", run_ids)
def parse_topk_log():
root = "log/fmnist/cnn3/fbs"
p = re.compile(r".*Run ID: \(([0-9a-z]+)\).*PID.*")
run_ids = []
for s in [0.2, 0.4, 0.6, 0.8, 0.9]:
file = os.path.join(root, f'btopk_norm-none_fbs-{s}.log')
with open(file, "r") as f:
lines = f.read()
res = p.search(lines)
run_ids.append(res.groups(1)[0])
print(f"topk: total: {len(run_ids)} runs")
print("topk_runs =", run_ids)
def parse_gtopk_log():
root = "log/fmnist/cnn3/fbs"
p = re.compile(r".*Run ID: \(([0-9a-z]+)\).*PID.*")
run_ids = []
for s in [0.2, 0.4, 0.6, 0.8, 0.9]:
file = os.path.join(root, f'gtopk_norm-none_fbs-{s}.log')
with open(file, "r") as f:
lines = f.read()
res = p.search(lines)
run_ids.append(res.groups(1)[0])
print(f"gtopk: total: {len(run_ids)} runs")
print("gtopk_runs =", run_ids)
if __name__ == "__main__":
parse_uniform_log()
parse_topk_log()
parse_gtopk_log()
```
#### File: script/cnn-L/train_feedback_gtopk.py
```python
import os
import subprocess
from multiprocessing import Pool
import mlflow
from pyutils.general import ensure_dir, logger
from pyutils.config import configs
root = "log/fmnist/cnn3/fbs"
script = 'train_learn.py'
config_file = 'config/fmnist/cnn3/fbs/learn.yml'
configs.load(config_file, recursive=True)
def task_launcher(s: float):
pres = ['python3',
script,
config_file
]
with open(os.path.join(root, f'gtopk_norm-none_fbs-{s}.log'), 'w') as wfid:
exp = [f"--sparse.bp_feedback_weight_sparsity={s}",
"--sparse.bp_feedback_alg=gtopk",
"--sparse.bp_feedback_norm=none",
"--checkpoint.model_comment=gtopk"]
logger.info(f"running command {pres + exp}")
subprocess.call(pres + exp, stderr=wfid, stdout=wfid)
if __name__ == '__main__':
# PID 21133 . 06:02 AM
ensure_dir(root)
mlflow.set_experiment(configs.run.experiment) # set experiments first
s = [0.2, 0.4, 0.6, 0.8, 0.9]
with Pool(5) as p:
p.map(task_launcher, s)
logger.info(f"Exp: {configs.run.experiment} Done.")
```
|
{
"source": "JeremieMelo/Memory-Efficient-Multi-Level-Generation",
"score": 3
}
|
#### File: core/models/resnet.py
```python
from builtins import isinstance
import torch
import torch.nn as nn
import torch.nn.functional as F
from core.models.layers import MLGConv2d, MLGLinear
from .model_base import MLGBaseModel
__all__ = ["ResNet18", "ResNet34", "ResNet50", "ResNet101", "ResNet152"]
def conv3x3(
in_planes,
out_planes,
stride=1,
padding=1,
bias=False,
### unique parameters
in_bit=16,
w_bit=16,
device=torch.device("cuda"),
):
conv = MLGConv2d(
in_planes,
out_planes,
3,
stride=stride,
padding=padding,
bias=bias,
in_bit=in_bit,
w_bit=w_bit,
device=device,
)
return conv
def conv1x1(
in_planes,
out_planes,
stride=1,
bias=False,
### unique parameters
in_bit=16,
w_bit=16,
device=torch.device("cuda"),
):
"""1x1 convolution"""
conv = MLGConv2d(
in_planes,
out_planes,
1,
stride=stride,
padding=0,
bias=bias,
in_bit=in_bit,
w_bit=w_bit,
device=device,
)
return conv
def Linear(
in_channel,
out_channel,
bias=False,
### unique parameters
in_bit=16,
w_bit=16,
device=torch.device("cuda"),
):
linear = MLGLinear(in_channel, out_channel, bias=False, in_bit=in_bit, w_bit=w_bit, device=device)
return linear
class BasicBlock(nn.Module):
expansion = 1
def __init__(
self,
in_planes,
planes,
stride=1,
## unique parameters
in_bit=16,
w_bit=16,
device=torch.device("cuda"),
):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(
in_planes, planes, stride=stride, padding=1, bias=False, in_bit=in_bit, w_bit=w_bit, device=device
)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = conv3x3(
planes, planes, stride=1, padding=1, bias=False, in_bit=in_bit, w_bit=w_bit, device=device
)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
conv1x1(
in_planes,
self.expansion * planes,
stride=stride,
bias=False,
in_bit=in_bit,
w_bit=w_bit,
device=device,
),
nn.BatchNorm2d(self.expansion * planes),
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)), inplace=True)
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out, inplace=True)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(
self,
in_planes,
planes,
stride=1,
## unique parameters
in_bit=16,
w_bit=16,
device=torch.device("cuda"),
):
super(Bottleneck, self).__init__()
self.conv1 = conv1x1(
in_planes,
planes,
stride=1,
bias=False,
in_bit=in_bit,
w_bit=w_bit,
device=device,
)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = conv3x3(
planes, planes, stride=stride, padding=1, bias=False, in_bit=in_bit, w_bit=w_bit, device=device
)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = conv1x1(
planes,
self.expansion * planes,
stride=1,
bias=False,
in_bit=in_bit,
w_bit=w_bit,
device=device,
)
self.bn3 = nn.BatchNorm2d(self.expansion * planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
conv1x1(
in_planes,
self.expansion * planes,
stride=stride,
bias=False,
in_bit=in_bit,
w_bit=w_bit,
device=device,
),
nn.BatchNorm2d(self.expansion * planes),
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)), inplace=True)
out = F.relu(self.bn2(self.conv2(out)), inplace=True)
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = F.relu(out, inplace=True)
return out
class ResNet(MLGBaseModel):
_conv = (MLGConv2d,)
_linear = (MLGLinear,)
_conv_linear = (MLGConv2d, MLGLinear)
def __init__(
self,
block,
num_blocks,
num_classes=10,
### unique parameters
in_channels=3,
in_bit=16,
w_bit=16,
device=torch.device("cuda"),
**kwargs,
):
super().__init__()
self.in_bit = in_bit
self.w_bit = w_bit
self.device = device
self.in_planes = 64
self.conv1 = conv3x3(
in_channels, 64, stride=1, padding=1, bias=False, in_bit=in_bit, w_bit=w_bit, device=device
)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(
block, 64, num_blocks[0], stride=1, in_bit=in_bit, w_bit=w_bit, device=device
)
self.layer2 = self._make_layer(
block, 128, num_blocks[1], stride=2, in_bit=in_bit, w_bit=w_bit, device=device
)
self.layer3 = self._make_layer(
block, 256, num_blocks[2], stride=2, in_bit=in_bit, w_bit=w_bit, device=device
)
self.layer4 = self._make_layer(
block, 512, num_blocks[3], stride=2, in_bit=in_bit, w_bit=w_bit, device=device
)
self.linear = Linear(
512 * block.expansion, num_classes, bias=False, in_bit=in_bit, w_bit=w_bit, device=device
)
self.reset_parameters()
def reset_parameters(self):
for m in self.modules():
if isinstance(m, self._conv_linear):
m.reset_parameters()
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _make_layer(
self,
block,
planes,
num_blocks,
stride,
### unique parameters
in_bit=16,
w_bit=16,
device=torch.device("cuda"),
):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride, in_bit=in_bit, w_bit=w_bit, device=device))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)), inplace=True)
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.adaptive_avg_pool2d(out, 1)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def ResNet18(**kwargs):
return ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
def ResNet34(**kwargs):
return ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
def ResNet50(**kwargs):
return ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
def ResNet101(**kwargs):
return ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
def ResNet152(**kwargs):
return ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
def test():
net = ResNet18()
y = net(torch.randn(1, 3, 32, 32))
print(y.size())
```
#### File: cifar10/resnet18/train.py
```python
import os
import subprocess
from multiprocessing import Pool
import mlflow
from pyutils.general import ensure_dir, logger
from torchpack.utils.config import configs
dataset = "cifar10"
model = "resnet18"
exp = "train"
root = f"log/{dataset}/{model}/{exp}"
script = "train.py"
config_file = f"configs/{dataset}/{model}/train/{exp}.yml"
configs.load(config_file, recursive=True)
def task_launcher(args):
pres = ["python3", script, config_file]
base_in, base_out, qb, qu, qv, ortho, ckpt, id = args
with open(
os.path.join(root, f"bi-{base_in}_bo-{base_out}_qb-{qb}_qu-{qu}_qv-{qv}_ortho-{ortho}_run-{id}.log"), "w"
) as wfid:
exp = [
f"--teacher.checkpoint={ckpt}",
f"--criterion.ortho_loss_weight={ortho}",
f"--mlg.projection_alg=train",
f"--mlg.kd=1",
f"--mlg.base_in={base_in}",
f"--mlg.base_out={base_out}",
f"--mlg.basis_bit={qb}",
f"--mlg.coeff_in_bit={qu}",
f"--mlg.coeff_out_bit={qv}",
f"--run.random_state={41+id}",
]
logger.info(f"running command {' '.join(pres + exp)}")
subprocess.call(pres + exp, stderr=wfid, stdout=wfid)
if __name__ == "__main__":
ensure_dir(root)
mlflow.set_experiment(configs.run.experiment) # set experiments first
tasks = [(2, 44, 3, 6, 3, 0.05, "PATH-TO-TEACHER-CHECKPOINT", 1)]
with Pool(1) as p:
p.map(task_launcher, tasks)
logger.info(f"Exp: {configs.run.experiment} Done.")
```
|
{
"source": "JeremieMelo/pytorch-onn",
"score": 2
}
|
#### File: core/models/mzi_cnn.py
```python
from torchonn.op.mzi_op import project_matrix_to_unitary
from typing import List, Union
import torch
from torch import Tensor, nn
from torch.types import Device, _size
from torchonn.layers import MZIBlockConv2d, MZIBlockLinear
from torchonn.models import ONNBaseModel
from collections import OrderedDict
__all__ = ["MZI_CLASS_CNN"]
class ConvBlock(nn.Module):
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: int = 3,
stride: Union[int, _size] = 1,
padding: Union[int, _size] = 0,
dilation: _size = 1,
groups: int = 1,
bias: bool = False,
miniblock: int = 8,
mode: str = "weight",
decompose_alg: str = "clements",
photodetect: bool = False,
device: Device = torch.device("cuda"),
) -> None:
super().__init__()
self.conv = MZIBlockConv2d(
in_channels,
out_channels,
kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias,
miniblock=miniblock,
mode=mode,
decompose_alg=decompose_alg,
photodetect=photodetect,
device=device,
)
self.bn = nn.BatchNorm2d(out_channels)
self.activation = nn.ReLU(inplace=True)
def forward(self, x: Tensor) -> Tensor:
return self.activation(self.bn(self.conv(x)))
class LinearBlock(nn.Module):
def __init__(
self,
in_features: int,
out_features: int,
bias: bool = False,
miniblock: int = 8,
mode: str = "weight",
decompose_alg: str = "clements",
photodetect: bool = False,
activation: bool = True,
device: Device = torch.device("cuda"),
) -> None:
super().__init__()
self.linear = MZIBlockLinear(
in_features,
out_features,
bias=bias,
miniblock=miniblock,
mode=mode,
decompose_alg=decompose_alg,
photodetect=photodetect,
device=device,
)
self.activation = nn.ReLU(inplace=True) if activation else None
def forward(self, x: Tensor) -> Tensor:
x = self.linear(x)
if self.activation is not None:
x = self.activation(x)
return x
class MZI_CLASS_CNN(ONNBaseModel):
"""
MZI CNN for classification.
Blocking matrix multiplication, which is much faster and more scalable than implementing the entire weight matrix on an MZI array.
Each block is implemented by a square MZI array
"""
_conv_linear = (MZIBlockConv2d, MZIBlockLinear)
_conv = (MZIBlockConv2d,)
_linear = (MZIBlockLinear,)
def __init__(
self,
img_height: int,
img_width: int,
in_channels: int,
num_classes: int,
kernel_list: List[int] = [32],
kernel_size_list: List[int] = [3],
stride_list: List[int] = [1],
padding_list: List[int] = [1],
dilation_list: List[int] = [1],
pool_out_size: int = 5,
hidden_list: List[int] = [32],
block_list: List[int] = [8],
mode: str = "usv",
decompose_alg: str = "clements",
photodetect: bool = True,
bias: bool = False,
device: Device = torch.device("cuda"),
) -> None:
super().__init__()
self.img_height = img_height
self.img_width = img_width
self.in_channels = in_channels
self.num_classes = num_classes
self.kernel_list = kernel_list
self.kernel_size_list = kernel_size_list
self.stride_list = stride_list
self.padding_list = padding_list
self.dilation_list = dilation_list
self.pool_out_size = pool_out_size
self.hidden_list = hidden_list
self.block_list = block_list
self.mode = mode
self.decompose_alg = decompose_alg
self.photodetect = photodetect
self.bias = bias
self.device = device
self.build_layers()
self.reset_parameters()
def build_layers(self):
self.features = OrderedDict()
for idx, out_channels in enumerate(self.kernel_list, 0):
layer_name = "conv" + str(idx + 1)
in_channels = self.in_channels if (idx == 0) else self.kernel_list[idx - 1]
self.features[layer_name] = ConvBlock(
in_channels,
out_channels,
kernel_size=self.kernel_size_list[idx],
stride=self.stride_list[idx],
padding=self.padding_list[idx],
dilation=self.dilation_list[idx],
groups=1,
bias=self.bias,
miniblock=self.block_list[idx],
mode=self.mode,
decompose_alg=self.decompose_alg,
photodetect=self.photodetect,
device=self.device,
)
self.features = nn.Sequential(self.features)
if self.pool_out_size > 0:
self.pool2d = nn.AdaptiveAvgPool2d(self.pool_out_size)
feature_size = self.kernel_list[-1] * self.pool_out_size * self.pool_out_size
else:
self.pool2d = None
img_height, img_width = self.img_height, self.img_width
for layer in self.modules():
if isinstance(layer, self._conv):
img_height, img_width = layer.get_output_dim(img_height, img_width)
feature_size = img_height * img_width * self.kernel_list[-1]
self.classifier = OrderedDict()
for idx, hidden_dim in enumerate(self.hidden_list, 0):
layer_name = "fc" + str(idx + 1)
in_channel = feature_size if idx == 0 else self.hidden_list[idx - 1]
out_channel = hidden_dim
self.classifier[layer_name] = LinearBlock(
in_channel,
out_channel,
bias=self.bias,
miniblock=self.block_list[idx + len(self.kernel_list)],
mode=self.mode,
decompose_alg=self.decompose_alg,
photodetect=self.photodetect,
activation=True,
device=self.device,
)
layer_name = "fc" + str(len(self.hidden_list) + 1)
self.classifier[layer_name] = LinearBlock(
self.hidden_list[-1] if len(self.hidden_list) > 0 else feature_size,
self.num_classes,
bias=self.bias,
miniblock=self.block_list[-1],
mode=self.mode,
decompose_alg=self.decompose_alg,
photodetect=self.photodetect,
activation=False,
device=self.device,
)
self.classifier = nn.Sequential(self.classifier)
def unitary_projection(self) -> None:
assert self.mode == "usv", "Unitary projection can only be applied in usv mode"
for m in self.modules():
if isinstance(m, self._conv_linear):
m.U.data.copy_(project_matrix_to_unitary(m.U.data))
m.V.data.copy_(project_matrix_to_unitary(m.V.data))
def forward(self, x: Tensor) -> Tensor:
x = self.features(x)
if self.pool2d is not None:
x = self.pool2d(x)
x = torch.flatten(x, 1)
x = self.classifier(x)
return x
```
#### File: torchonn/layers/base_layer.py
```python
from typing import Any, Dict, Optional
import torch
from torch import nn
from torch.types import Device
__all__ = ["ONNBaseLayer"]
class ONNBaseLayer(nn.Module):
def __init__(self, *args, device: Device = torch.device("cpu"), **kwargs) -> None:
super().__init__(*args, **kwargs)
# cuda or cpu, defaults to cpu
self.device = device
def build_parameters(self) -> None:
raise NotImplementedError
def reset_parameters(self) -> None:
raise NotImplementedError
def get_num_parameters(self) -> int:
return sum(p.numel() for p in self.parameters() if p.requires_grad)
def enable_fast_forward(self) -> None:
self.fast_forward_flag = True
def disable_fast_forward(self) -> None:
self.fast_forward_flag = False
def set_phase_variation(self, noise_std: float, random_state: Optional[int] = None) -> None:
self.phase_noise_std = noise_std
def set_gamma_noise(self, noise_std: float, random_state: Optional[int] = None) -> None:
self.gamma_noise_std = noise_std
def set_crosstalk_factor(self, crosstalk_factor: float) -> None:
self.crosstalk_factor = crosstalk_factor
def set_weight_bitwidth(self, w_bit: int) -> None:
self.w_bit = w_bit
def set_input_bitwidth(self, in_bit: int) -> None:
self.in_bit = in_bit
def load_parameters(self, param_dict: Dict[str, Any]) -> None:
"""
description: update parameters based on this parameter dictionary\\
param param_dict {dict of dict} {param_name: param_tensor, ...}
"""
for name, param in param_dict.items():
getattr(self, name).data.copy_(param)
def switch_mode_to(self, mode: str) -> None:
self.mode = mode
def forward(self, x):
raise NotImplementedError
def extra_repr(self) -> str:
return ""
```
#### File: torchonn/models/base_model.py
```python
from typing import Any, Dict, Optional, Callable
from torch import nn, Tensor
from torch.types import Device
from pyutils.torch_train import set_torch_deterministic
__all__ = ["ONNBaseModel"]
class ONNBaseModel(nn.Module):
_conv_linear = tuple()
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def reset_parameters(self, random_state: int = None) -> None:
for name, m in self.named_modules():
if isinstance(m, self._conv_linear):
if random_state is not None:
# deterministic seed, but different for different layer, and controllable by random_state
set_torch_deterministic(random_state + sum(map(ord, name)))
m.reset_parameters()
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def set_phase_variation(self, noise_std: float = 0.0, random_state: Optional[int] = None) -> None:
self.phase_noise_std = noise_std
for layer in self.modules():
if isinstance(layer, self._conv_linear):
layer.set_phase_variation(noise_std, random_state=random_state)
def set_gamma_noise(self, noise_std: float = 0.0, random_state: Optional[int] = None) -> None:
self.gamma_noise_std = noise_std
for layer in self.modules():
if isinstance(layer, self._conv_linear):
layer.set_gamma_noise(noise_std, random_state=random_state)
def set_crosstalk_factor(self, crosstalk_factor: float = 0.0) -> None:
self.crosstalk_factor = crosstalk_factor
for layer in self.modules():
if isinstance(layer, self._conv_linear):
layer.set_crosstalk_factor(crosstalk_factor)
def set_weight_bitwidth(self, w_bit: int) -> None:
self.w_bit = w_bit
for layer in self.modules():
if isinstance(layer, self._conv_linear):
layer.set_weight_bitwidth(w_bit)
def set_input_bitwidth(self, in_bit: int) -> None:
self.in_bit = in_bit
for layer in self.modules():
if isinstance(layer, self._conv_linear):
layer.set_input_bitwidth(in_bit)
def load_parameters(self, param_dict: Dict[str, Dict[str, Tensor]]) -> None:
"""
description: update parameters based on this parameter dictionary\\
param param_dict {dict of dict} {layer_name: {param_name: param_tensor, ...}, ...}
"""
for name, m in self.named_modules():
if name in param_dict:
m.load_parameters(param_dict[name])
def build_obj_fn(self, X: Tensor, y: Tensor, criterion: Callable) -> Callable:
def obj_fn(X_cur=None, y_cur=None, param_dict=None):
if param_dict is not None:
self.load_parameters(param_dict)
if X_cur is None or y_cur is None:
data, target = X, y
else:
data, target = X_cur, y_cur
pred = self.forward(data)
return criterion(pred, target)
return obj_fn
def enable_fast_forward(self) -> None:
for layer in self.modules():
if isinstance(layer, self._conv_linear):
layer.enable_fast_forward()
def disable_fast_forward(self) -> None:
for layer in self.modules():
if isinstance(layer, self._conv_linear):
layer.disable_fast_forward()
def sync_parameters(self, src: str = "weight") -> None:
for layer in self.modules():
if isinstance(layer, self._conv_linear):
layer.sync_parameters(src=src)
def switch_mode_to(self, mode: str) -> None:
for layer in self.modules():
if isinstance(layer, self._conv_linear):
layer.switch_mode_to(mode)
def get_num_parameters(self) -> int:
return sum(p for p in self.parameters() if p.requires_grad)
def forward(self, x):
raise NotImplementedError
```
|
{
"source": "JeremieMelo/pyutility",
"score": 2
}
|
#### File: pyutility/pyutils/config.py
```python
import hashlib
import json
import yaml
import os
from ast import literal_eval
from typing import Any, Dict, List, Tuple, Union
from multimethod import multimethod
__all__ = [
"Config",
"configs",
]
class Config(dict):
def __getattr__(self, key: str) -> Any:
if key not in self:
d = self
## try hierarchical access
keys = key.split(".")
for k in keys:
if k not in d:
raise AttributeError(key)
d = d[k]
return d
else:
return self[key]
def __setattr__(self, key: str, value: Any) -> None:
self[key] = value
def __delattr__(self, key: str) -> None:
del self[key]
def load(self, fpath: str, *, recursive: bool = False) -> None:
if not os.path.exists(fpath):
raise FileNotFoundError(fpath)
fpaths = [fpath]
if recursive:
while fpath:
fpath = os.path.dirname(fpath)
for fname in ["default.yaml", "default.yml"]:
fpaths.append(os.path.join(fpath, fname))
for fpath in reversed(fpaths):
if os.path.exists(fpath):
with open(fpath, "r") as f:
cfg_dict = yaml.safe_load(f)
self.update(cfg_dict)
def reload(self, fpath: str, *, recursive: bool = False) -> None:
self.clear()
self.load(fpath, recursive=recursive)
@multimethod
def update(self, other: Dict) -> None:
for key, value in other.items():
if isinstance(value, dict):
if key not in self or not isinstance(self[key], Config):
self[key] = Config()
self[key].update(value)
else:
self[key] = value
@multimethod
def update(self, opts: Union[List, Tuple]) -> None:
index = 0
while index < len(opts):
opt = opts[index]
if opt.startswith("--"):
opt = opt[2:]
if "=" in opt:
key, value = opt.split("=", 1)
index += 1
else:
key, value = opt, opts[index + 1]
index += 2
current = self
subkeys = key.split(".")
try:
value = literal_eval(value)
except:
pass
for subkey in subkeys[:-1]:
current = current.setdefault(subkey, Config())
current[subkeys[-1]] = value
def dict(self) -> Dict[str, Any]:
configs = dict()
for key, value in self.items():
if isinstance(value, Config):
value = value.dict()
configs[key] = value
return configs
def flat_dict(self) -> Dict[str, Any]:
def _flatten_dict(dd, separator: str = "_", prefix: str = ""):
return (
{
prefix + separator + k if prefix else k: v
for kk, vv in dd.items()
for k, v in _flatten_dict(vv, separator, kk).items()
}
if isinstance(dd, dict)
else {prefix: dd}
)
return _flatten_dict(self.dict(), separator=".")
def hash(self) -> str:
buffer = json.dumps(self.dict(), sort_keys=True)
return hashlib.sha256(buffer.encode()).hexdigest()
def dump_to_yml(self, path: str) -> None:
with open(path, "w") as f:
yaml.safe_dump(self.dict(), f)
def __str__(self) -> str:
texts = []
for key, value in self.items():
if isinstance(value, Config):
seperator = "\n"
else:
seperator = " "
text = key + ":" + seperator + str(value)
lines = text.split("\n")
for k, line in enumerate(lines[1:]):
lines[k + 1] = (" " * 2) + line
texts.extend(lines)
return "\n".join(texts)
configs = Config()
```
#### File: pyutility/pyutils/general.py
```python
import os
import argparse
import json
import logging
import logging.handlers
import time
from collections import OrderedDict
from datetime import datetime
from pathlib import Path
from typing import Optional
import numpy as np
import torch
__all__ = [
"ensure_dir",
"read_json",
"write_json",
"profile",
"print_stat",
"Timer",
"TimerCtx",
"TorchTracemalloc",
"fullprint",
"setup_default_logging",
"Logger",
"logger",
"get_logger",
"ArgParser",
"disable_tf_warning",
"AverageMeter",
]
def ensure_dir(dirname, exist_ok: bool = True):
dirname = Path(dirname)
if not dirname.is_dir():
dirname.mkdir(parents=True, exist_ok=exist_ok)
def read_json(fname):
with open(fname, "rt") as handle:
return json.load(handle, object_hook=OrderedDict)
def write_json(content, fname):
with open(fname, "wt") as handle:
json.dump(content, handle, indent=4, sort_keys=False)
def profile(func=None, timer=True):
from functools import wraps, partial
import time
if func == None:
return partial(profile, timer=timer)
@wraps(func)
def wrapper(*args, **kw):
if timer:
local_time = time.time()
res = func(*args, **kw)
end_time = time.time()
print("[I] <%s> runtime: %.3f ms" % (func.__name__, (end_time - local_time) * 1000))
else:
res = func(*args, **kw)
return res
return wrapper
def print_stat(x):
if isinstance(x, torch.Tensor):
print(
f"min = {x.min().data.item():-15f} max = {x.max().data.item():-15f} mean = {x.mean().data.item():-15f} std = {x.std().data.item():-15f}"
)
elif isinstance(x, np.ndarray):
print(
f"min = {np.min(x):-15f} max = {np.max(x):-15f} mean = {np.mean(x):-15f} std = {np.std(x):-15f}"
)
class Timer(object):
def __init__(self):
self.cache = datetime.now()
def check(self):
now = datetime.now()
duration = now - self.cache
self.cache = now
return duration.total_seconds()
def reset(self):
self.cache = datetime.now()
class TimerCtx:
def __enter__(self):
self.start = time.time()
return self
def __exit__(self, *args):
self.end = time.time()
self.interval = self.end - self.start
class TorchTracemalloc(object):
def __init__(self, verbose: bool = False) -> None:
super().__init__()
self.verbose = verbose
def __enter__(self):
self.begin = self._b2mb(torch.cuda.memory_allocated())
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
return self
def _b2mb(self, x):
return x / 2 ** 20
def __exit__(self, *exc):
self.end = self._b2mb(torch.cuda.memory_allocated())
self.peak = self._b2mb(torch.cuda.max_memory_allocated())
self.used = self.end - self.begin
self.peaked = self.peak - self.begin
if self.verbose:
print(f"Delta used/peaked {self.used:.2f} MB / {self.peaked:.2f} MB")
print(f"Current used/peaked {self.end:.2f} MB / {self.peak:.2f} MB")
class fullprint:
"context manager for printing full numpy arrays"
def __init__(self, **kwargs):
"""linewidth=75; precision=8"""
kwargs.setdefault("threshold", np.inf)
self.opt = kwargs
def __enter__(self):
self._opt = np.get_printoptions()
np.set_printoptions(**self.opt)
def __exit__(self, type, value, traceback):
np.set_printoptions(**self._opt)
class CustomFormatter(logging.Formatter):
"""Logging Formatter to add colors and count warning / errors"""
grey = "\x1b[38;21m"
yellow = "\x1b[33;21m"
red = "\x1b[31;21m"
bold_red = "\x1b[31;1m"
green = "\x1b[32;21m"
reset = "\x1b[0m"
# format = "%(asctime)s - %(name)s - %(levelname)s - %(message)s (%(filename)s:%(lineno)d)"
format = "%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s"
FORMATS = {
logging.DEBUG: grey + format + reset,
logging.INFO: grey + format + reset,
logging.WARNING: yellow + format + reset,
logging.ERROR: red + format + reset,
logging.CRITICAL: bold_red + format + reset,
}
def format(self, record):
log_fmt = self.FORMATS.get(record.levelno)
formatter = logging.Formatter(log_fmt)
return formatter.format(record)
def setup_default_logging(default_level=logging.INFO, default_file_level=logging.INFO, log_path=""):
console_handler = logging.StreamHandler()
console_handler.setFormatter(CustomFormatter())
logging.root.addHandler(console_handler)
logging.root.setLevel(default_level)
if log_path:
file_handler = logging.handlers.RotatingFileHandler(log_path, maxBytes=(1024 ** 2 * 2), backupCount=3)
file_formatter = logging.Formatter(
"%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s"
)
file_handler.setFormatter(file_formatter)
file_handler.setLevel(default_file_level)
logging.root.addHandler(file_handler)
class Logger(object):
def __init__(self, console=True, logfile=None, console_level=logging.INFO, logfile_level=logging.INFO):
super().__init__()
self.logfile = logfile
self.console_level = console_level
self.logifle_level = logfile_level
assert (
console == True or logfile is not None
), "At least enable one from console or logfile for Logger"
# 第一步,创建一个logger
self.logger = logging.getLogger("my_logger")
self.logger.setLevel(logging.INFO) # Log等级总开关
self.logger.propagate = False
# formatter = logging.Formatter(
# "%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s")
formatter = CustomFormatter()
# 第三步,再创建一个handler,用于输出到控制台
if console:
ch = logging.StreamHandler()
ch.setLevel(self.console_level) # 输出到console的log等级的开关
ch.setFormatter(formatter)
self.logger.addHandler(ch)
if self.logfile is not None:
fh = logging.FileHandler(self.logfile, mode="w")
fh.setLevel(self.logifle_level) # 输出到file的log等级的开关
fh.setFormatter(formatter)
self.logger.addHandler(fh)
def debug(self, message):
self.logger.debug(message)
def info(self, message):
self.logger.info(message)
def warning(self, message):
self.logger.warning(message)
def error(self, message):
self.logger.error(message)
def critical(self, message):
self.logger.critical(message)
def get_logger(name="default", default_level=logging.INFO, default_file_level=logging.INFO, log_path=""):
setup_default_logging(
default_level=default_level, default_file_level=default_file_level, log_path=log_path
)
return logging.getLogger(name)
logger = get_logger()
class ArgParser(object):
def __init__(self, load_json=None, save_json=None):
super().__init__()
self.load_json = load_json
self.save_json = save_json
self.args = None
self.parser = argparse.ArgumentParser("Argument Parser")
def add_arg(self, *args, **keywords):
self.parser.add_argument(*args, **keywords)
def parse_args(self):
if self.load_json is not None:
assert os.path.exists(self.load_json), logging.error(
f"Configuration JSON {self.load_json} not found"
)
json = read_json(self.load_json)
t_args = argparse.Namespace()
t_args.__dict__.update(json)
self.args = self.parser.parse_args(args=[], namespace=t_args)
else:
self.args = self.parser.parse_args()
return self.args
def print_args(self):
# Print arguments to std out
# and save argument values to yaml file
print("Arguments:")
for p in vars(self.args).items():
print(f"\t{p[0]:30}{str(p[1]):20}")
print("\n")
def dump_args(self, json_file=None):
if json_file is None:
if self.save_json is None:
logging.error("Skip dump configuration JSON. Please specify json_file")
return False
else:
ensure_dir(os.path.dirname(self.save_json))
logging.warning(f"Dump to the initialized JSON file {self.save_json}")
write_json(vars(self.args), self.save_json)
else:
ensure_dir(os.path.dirname(json_file))
logging.info(f"Dump to JSON file {json_file}")
write_json(vars(self.args), json_file)
# with open(self.file, 'w') as f:
# yaml.dump(vars(self.args), f, default_flow_style=False)
# print(f"[I] Arguments dumped to {file}")
def disable_tf_warning():
import os
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
import warnings
warnings.filterwarnings("ignore", category=FutureWarning)
warnings.filterwarnings("ignore", category=DeprecationWarning)
import tensorflow as tf
if hasattr(tf, "contrib") and type(tf.contrib) != type(tf):
tf.contrib._warning = None
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
# tf.logging.set_verbosity(tf.logging.ERROR)
import logging
logging.getLogger("tensorflow").setLevel(logging.ERROR)
class Meter(object):
"""Base class for Meters."""
def __init__(self):
pass
def state_dict(self):
return {}
def load_state_dict(self, state_dict):
pass
def reset(self):
raise NotImplementedError
@property
def smoothed_value(self) -> float:
"""Smoothed value used for logging."""
raise NotImplementedError
def safe_round(number, ndigits):
if hasattr(number, "__round__"):
return round(number, ndigits)
elif torch is not None and torch.is_tensor(number) and number.numel() == 1:
return safe_round(number.item(), ndigits)
elif np is not None and np.ndim(number) == 0 and hasattr(number, "item"):
return safe_round(number.item(), ndigits)
else:
return number
def type_as(a, b):
if torch.is_tensor(a) and torch.is_tensor(b):
return a.to(b)
else:
return a
class AverageMeter(Meter):
"""Computes and stores the average and current value"""
def __init__(self, name: str, fmt: str = ":f", round: Optional[int] = None) -> None:
self.name = name
self.fmt = fmt
self.round = round
self.reset()
def reset(self):
self.val = None # most recent update
self.sum = 0 # sum from all updates
self.count = 0 # total n from all updates
self.avg = 0
def update(self, val, n=1):
if val is not None:
self.val = val
if n > 0:
self.sum = type_as(self.sum, val) + (val * n)
self.count = type_as(self.count, n) + n
self.avg = self.sum / self.count if self.count > 0 else self.val
def state_dict(self):
return {
"val": self.val,
"sum": self.sum,
"count": self.count,
"round": self.round,
}
def load_state_dict(self, state_dict):
self.val = state_dict["val"]
self.sum = state_dict["sum"]
self.count = state_dict["count"]
self.round = state_dict.get("round", None)
@property
def smoothed_value(self) -> float:
val = self.avg
if self.round is not None and val is not None:
val = safe_round(val, self.round)
return val
def __str__(self) -> str:
fmtstr = "{name} {val" + self.fmt + "} ({avg" + self.fmt + "})"
return fmtstr.format(**self.__dict__)
```
#### File: pyutils/lr_scheduler/warmup_cosine_restart.py
```python
import torch
import math
from torch.optim.lr_scheduler import _LRScheduler
__all__ = ["CosineAnnealingWarmupRestarts"]
class CosineAnnealingWarmupRestarts(_LRScheduler):
"""
optimizer (Optimizer): Wrapped optimizer.
first_cycle_steps (int): First cycle step size.
cycle_mult(float): Cycle steps magnification. Default: -1.
max_lr(float): First cycle's max learning rate. Default: 0.1.
min_lr(float): Min learning rate. Default: 0.001.
warmup_steps(int): Linear warmup step size. Default: 0.
gamma(float): Decrease rate of max learning rate by cycle. Default: 1.
last_epoch (int): The index of last epoch. Default: -1.
"""
def __init__(
self,
optimizer: torch.optim.Optimizer,
first_cycle_steps: int,
cycle_mult: float = 1.0,
max_lr: float = 0.1,
min_lr: float = 0.001,
warmup_steps: int = 0,
gamma: float = 1.0,
last_epoch: int = -1,
):
assert warmup_steps < first_cycle_steps
self.first_cycle_steps = first_cycle_steps # first cycle step size
self.cycle_mult = cycle_mult # cycle steps magnification
self.base_max_lr = max_lr # first max learning rate
self.max_lr = max_lr # max learning rate in the current cycle
self.min_lr = min_lr # min learning rate
self.warmup_steps = warmup_steps # warmup step size
self.gamma = gamma # decrease rate of max learning rate by cycle
self.cur_cycle_steps = first_cycle_steps # first cycle step size
self.cycle = 0 # cycle count
self.step_in_cycle = last_epoch # step size of the current cycle
super(CosineAnnealingWarmupRestarts, self).__init__(optimizer, last_epoch)
# set learning rate min_lr
self.init_lr()
def init_lr(self):
self.base_lrs = []
for param_group in self.optimizer.param_groups:
param_group["lr"] = self.min_lr
self.base_lrs.append(self.min_lr)
def get_lr(self):
if self.step_in_cycle == -1:
return self.base_lrs
elif self.step_in_cycle < self.warmup_steps:
return [
(self.max_lr - base_lr) * self.step_in_cycle / self.warmup_steps + base_lr
for base_lr in self.base_lrs
]
else:
return [
base_lr
+ (self.max_lr - base_lr)
* (
1
+ math.cos(
math.pi
* (self.step_in_cycle - self.warmup_steps)
/ (self.cur_cycle_steps - self.warmup_steps)
)
)
/ 2
for base_lr in self.base_lrs
]
def step(self, epoch=None):
if epoch is None:
epoch = self.last_epoch + 1
self.step_in_cycle = self.step_in_cycle + 1
if self.step_in_cycle >= self.cur_cycle_steps:
self.cycle += 1
self.step_in_cycle = self.step_in_cycle - self.cur_cycle_steps
self.cur_cycle_steps = (
int((self.cur_cycle_steps - self.warmup_steps) * self.cycle_mult) + self.warmup_steps
)
else:
if epoch >= self.first_cycle_steps:
if self.cycle_mult == 1.0:
self.step_in_cycle = epoch % self.first_cycle_steps
self.cycle = epoch // self.first_cycle_steps
else:
n = int(
math.log(
(epoch / self.first_cycle_steps * (self.cycle_mult - 1) + 1), self.cycle_mult
)
)
self.cycle = n
self.step_in_cycle = epoch - int(
self.first_cycle_steps * (self.cycle_mult ** n - 1) / (self.cycle_mult - 1)
)
self.cur_cycle_steps = self.first_cycle_steps * self.cycle_mult ** (n)
else:
self.cur_cycle_steps = self.first_cycle_steps
self.step_in_cycle = epoch
self.max_lr = self.base_max_lr * (self.gamma ** self.cycle)
self.last_epoch = math.floor(epoch)
for param_group, lr in zip(self.optimizer.param_groups, self.get_lr()):
param_group["lr"] = lr
```
|
{
"source": "JeremieMelo/SqueezeLight",
"score": 2
}
|
#### File: SqueezeLight/core/builder.py
```python
from typing import Tuple
import torch
import torch.nn as nn
import torchvision
from pyutils.config import configs
from pyutils.datasets import get_dataset
from pyutils.typing import DataLoader, Optimizer, Scheduler
from torch.types import Device
from torchonn.devices import *
from torchvision import datasets, transforms
from core.models import *
__all__ = ["make_dataloader", "make_model", "make_optimizer", "make_scheduler", "make_criterion"]
def make_dataloader() -> Tuple[DataLoader, DataLoader]:
transform = configs.dataset.transform
img_height, img_width = configs.dataset.img_height, configs.dataset.img_width
dataset_dir = configs.dataset.root
if configs.dataset.name == "cifar10":
if transform == "basic":
t = []
if (img_height, img_width) != (32, 32):
t.append(transforms.Resize((img_height, img_width), interpolation=2))
transform_test = transform_train = transforms.Compose(t + [transforms.ToTensor()])
else:
transform_train = transforms.Compose(
[
transforms.RandomCrop(32, padding=4),
transforms.Resize((img_height, img_width), interpolation=2),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
# transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
]
)
transform_test = transforms.Compose(
[
transforms.Resize((img_height, img_width), interpolation=2),
transforms.ToTensor(),
# transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
]
)
train_dataset = datasets.CIFAR10(dataset_dir, train=True, download=True, transform=transform_train)
validation_dataset = datasets.CIFAR10(dataset_dir, train=False, transform=transform_test)
elif configs.dataset.name == "cifar100":
if transform == "basic":
t = []
if (img_height, img_width) != (32, 32):
t.append(transforms.Resize((img_height, img_width), interpolation=2))
transform_test = transform_train = transforms.Compose(t + [transforms.ToTensor()])
else:
# CIFAR100_TRAIN_MEAN = (0.5070751592371323, 0.48654887331495095, 0.4409178433670343)
# CIFAR100_TRAIN_STD = (0.2673342858792401, 0.2564384629170883, 0.27615047132568404)
transform_train = transforms.Compose(
[
transforms.RandomCrop(32, padding=4),
transforms.Resize((img_height, img_width), interpolation=2),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
# transforms.Normalize(CIFAR100_TRAIN_MEAN, CIFAR100_TRAIN_STD),
]
)
transform_test = transforms.Compose(
[
transforms.Resize((img_height, img_width), interpolation=2),
transforms.ToTensor(),
# transforms.Normalize(CIFAR100_TRAIN_MEAN, CIFAR100_TRAIN_STD),
]
)
train_dataset = datasets.CIFAR100(dataset_dir, train=True, download=True, transform=transform_train)
validation_dataset = datasets.CIFAR100(dataset_dir, train=False, transform=transform_test)
elif configs.dataset.name == "svhn":
if transform == "basic":
t = []
if (img_height, img_width) != (32, 32):
t.append(transforms.Resize((img_height, img_width), interpolation=2))
transform_test = transform_train = transforms.Compose(t + [transforms.ToTensor()])
else:
# SVHN_TRAIN_MEAN = (0.4377, 0.4438, 0.4728)
# SVHN_TRAIN_STD = (0.1980, 0.2010, 0.1970)
transform_train = transforms.Compose(
[
transforms.RandomCrop(32, padding=4),
transforms.Resize((img_height, img_width), interpolation=2),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
# transforms.Normalize(SVHN_TRAIN_MEAN, SVHN_TRAIN_STD),
]
)
transform_test = transforms.Compose(
[
transforms.Resize((img_height, img_width), interpolation=2),
transforms.ToTensor(),
# transforms.Normalize(SVHN_TRAIN_MEAN, SVHN_TRAIN_STD),
]
)
train_dataset = datasets.SVHN(dataset_dir, split="train", download=True, transform=transform_train)
validation_dataset = datasets.SVHN(dataset_dir, split="test", download=True, transform=transform_test)
else:
train_dataset, validation_dataset = get_dataset(
configs.dataset.name,
configs.dataset.img_height,
configs.dataset.img_width,
dataset_dir=configs.dataset.root,
transform=configs.dataset.transform,
)
train_loader = torch.utils.data.DataLoader(
dataset=train_dataset,
batch_size=configs.run.batch_size,
shuffle=int(configs.dataset.shuffle),
pin_memory=True,
num_workers=configs.dataset.num_workers,
)
validation_loader = torch.utils.data.DataLoader(
dataset=validation_dataset,
batch_size=configs.run.batch_size,
shuffle=False,
pin_memory=True,
num_workers=configs.dataset.num_workers,
)
return train_loader, validation_loader
def make_model(device: Device, random_state: int = None) -> nn.Module:
if "mlp" in configs.model.name.lower():
model = eval(configs.model.name)(
n_feat=configs.dataset.img_height * configs.dataset.img_width,
n_class=configs.dataset.n_class,
hidden_list=configs.model.hidden_list,
block_list=configs.model.block_list,
in_bit=configs.quantize.input_bit,
w_bit=configs.quantize.weight_bit,
mode=configs.model.mode,
v_max=configs.quantize.v_max,
v_pi=configs.quantize.v_pi,
act_thres=configs.model.act_thres,
photodetect=False,
bias=False,
device=device,
).to(device)
model.reset_parameters(random_state, morr_init=int(configs.morr.morr_init))
elif "cnn" in configs.model.name.lower():
model = eval(configs.model.name)(
img_height=configs.dataset.img_height,
img_width=configs.dataset.img_width,
in_channels=configs.dataset.in_channel,
num_classes=configs.dataset.n_class,
kernel_list=configs.model.kernel_list,
kernel_size_list=configs.model.kernel_size_list,
pool_out_size=configs.model.pool_out_size,
stride_list=configs.model.stride_list,
padding_list=configs.model.padding_list,
hidden_list=configs.model.hidden_list,
block_list=configs.model.block_list,
in_bit=configs.quantize.input_bit,
w_bit=configs.quantize.weight_bit,
mode=configs.model.mode,
v_max=configs.quantize.v_max,
v_pi=configs.quantize.v_pi,
act_thres=configs.model.act_thres,
photodetect=False,
bias=False,
# morr configuartion
MORRConfig=eval(configs.morr.config),
trainable_morr_bias=configs.morr.trainable_bias,
trainable_morr_scale=configs.morr.trainable_scale,
device=device,
).to(device)
model.reset_parameters(random_state, morr_init=int(configs.morr.morr_init))
else:
model = None
raise NotImplementedError(f"Not supported model name: {configs.model.name}")
return model
def make_optimizer(model: nn.Module) -> Optimizer:
if configs.optimizer.name == "sgd":
optimizer = torch.optim.SGD(
(p for p in model.parameters() if p.requires_grad),
lr=configs.optimizer.lr,
momentum=configs.optimizer.momentum,
weight_decay=configs.optimizer.weight_decay,
nesterov=True,
)
elif configs.optimizer.name == "adam":
optimizer = torch.optim.Adam(
(p for p in model.parameters() if p.requires_grad),
lr=configs.optimizer.lr,
weight_decay=configs.optimizer.weight_decay,
)
elif configs.optimizer.name == "adamw":
optimizer = torch.optim.AdamW(
(p for p in model.parameters() if p.requires_grad),
lr=configs.optimizer.lr,
weight_decay=configs.optimizer.weight_decay,
)
else:
raise NotImplementedError(configs.optimizer.name)
return optimizer
def make_scheduler(optimizer: Optimizer) -> Scheduler:
if configs.scheduler.name == "constant":
scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda epoch: 1)
elif configs.scheduler.name == "cosine":
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
optimizer, T_max=configs.run.n_epochs, eta_min=configs.scheduler.lr_min
)
elif configs.scheduler.name == "exp":
scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=configs.scheduler.lr_gamma)
else:
raise NotImplementedError(configs.scheduler.name)
return scheduler
def make_criterion() -> nn.Module:
if configs.criterion.name == "nll":
criterion = nn.NLLLoss()
elif configs.criterion.name == "mse":
criterion = nn.MSELoss()
elif configs.criterion.name == "ce":
criterion = nn.CrossEntropyLoss()
else:
raise NotImplementedError(configs.criterion.name)
return criterion
```
#### File: models/layers/morr_linear.py
```python
import numpy as np
import torch
import torch.fft
from pyutils.compute import toeplitz
from pyutils.general import logger
from pyutils.initializer import morr_uniform_
from pyutils.quantize import input_quantize_fn, weight_quantize_fn
from torch import nn
from torch.nn import Parameter, init
from torchonn.devices.mrr import MORRConfig_20um_MQ
from torchonn.op.mrr_op import mrr_roundtrip_phase_to_tr_func, mrr_roundtrip_phase_to_tr_fused
from torchonn.op.mzi_op import phase_quantize_fn, voltage_quantize_fn
__all__ = ["AllPassMORRCirculantLinear"]
class AllPassMORRCirculantLinear(nn.Module):
"""
description: All-pass MORR Linear layer, assumes (1) block-circulant matrix (2) differential rails (3) learnable balancing factors.
"""
def __init__(
self,
in_channel,
out_channel,
bias=False,
miniblock=4,
mode="weight",
v_max=10.8,
v_pi=4.36,
w_bit=16,
in_bit=16,
### mrr parameter
MORRConfig=MORRConfig_20um_MQ,
### trainable MORR nonlinearity
trainable_morr_bias=False,
trainable_morr_scale=False,
device=torch.device("cuda"),
):
super(AllPassMORRCirculantLinear, self).__init__()
self.in_channel = in_channel
self.out_channel = out_channel
self.mode = mode
self.miniblock = miniblock
assert mode in {"weight", "phase", "voltage"}, logger.error(
f"Mode not supported. Expected one from (weight, phase, voltage) but got {mode}."
)
self.v_max = v_max
self.v_pi = v_pi
self.gamma = np.pi / self.v_pi ** 2
self.w_bit = w_bit
self.in_bit = in_bit
self.MORRConfig = MORRConfig
self.mrr_a = MORRConfig.attenuation_factor
self.mrr_r = MORRConfig.coupling_factor
self.device = device
self.trainable_morr_bias = trainable_morr_bias
self.trainable_morr_scale = trainable_morr_scale
### calculate FWHM (rad)
self.morr_fwhm = (
-4
* np.pi ** 2
* MORRConfig.radius
* MORRConfig.effective_index
* (
1 / MORRConfig.resonance_wavelength
- 1 / (MORRConfig.resonance_wavelength - MORRConfig.bandwidth / 2)
)
)
### allocate parameters
self.weight = None
self.x_zero_pad = None
self.morr_output_scale = None ## learnable balancing factors implelemt by MRRs
self.morr_input_bias = None ## round-trip phase shift bias within MORR
self.morr_input_scale = None ## scaling factor for the round-trip phase shift within MORR
self.morr_gain = (
100 / (self.in_channel // self.miniblock)
) ** 0.5 ## TIA gain, calculated such that output variance is around 1
### build trainable parameters
self.build_parameters(mode)
### quantization tool
self.input_quantizer = input_quantize_fn(self.in_bit, device=self.device)
self.weight_quantizer = weight_quantize_fn(
self.w_bit, alg="dorefa_pos"
) ## [0-1] positive only, maintain the original scale
self.morr_output_scale_quantizer = weight_quantize_fn(
self.w_bit, alg="dorefa_sym"
) ## [-1,1] full-range
self.voltage_quantizer = voltage_quantize_fn(self.w_bit, self.v_pi, self.v_max)
self.phase_quantizer = phase_quantize_fn(self.w_bit, self.v_pi, self.v_max, gamma_noise_std=0)
self.mrr_roundtrip_phase_to_tr = mrr_roundtrip_phase_to_tr_func(
a=self.mrr_a, r=self.mrr_r, intensity=True
)
### default set to slow forward
self.disable_fast_forward()
### default set no gamma noise
self.set_gamma_noise(0)
### default set no crosstalk
self.disable_crosstalk()
### default set no phase variation
self.disable_phase_variation()
if bias:
self.bias = Parameter(torch.Tensor(out_channel).to(self.device))
else:
self.register_parameter("bias", None)
self.finegrain_drop_mask = None
def build_parameters(self, mode="weight"):
## weight mode
self.in_channel_pad = int(np.ceil(self.in_channel / self.miniblock).item() * self.miniblock)
self.out_channel_pad = int(np.ceil(self.out_channel / self.miniblock).item() * self.miniblock)
self.grid_dim_y = self.out_channel_pad // self.miniblock
self.grid_dim_x = self.in_channel_pad // self.miniblock
if mode in {"weight"}:
self.weight = Parameter(
torch.ones(
self.grid_dim_y, self.grid_dim_x, self.miniblock, device=self.device, dtype=torch.float
)
)
self.morr_output_scale = Parameter(
torch.randn(1, 1, max(1, self.grid_dim_x // 2) + 1, 1, device=self.device)
)
if self.trainable_morr_bias:
### initialize with the finest-granularity, i.e., per mini-block
self.morr_input_bias = Parameter(
torch.zeros(self.grid_dim_y, self.grid_dim_x, device=self.device, dtype=torch.float)
)
if self.trainable_morr_scale:
### initialize with the finest-granularity, i.e., per mini-block
self.morr_input_scale = Parameter(
torch.zeros(self.grid_dim_y, self.grid_dim_x, device=self.device, dtype=torch.float)
)
elif mode == "phase":
raise NotImplementedError
self.phase = Parameter(self.phase)
elif mode == "voltage":
raise NotImplementedError
self.voltage = Parameter(self.voltage)
else:
raise NotImplementedError
def reset_parameters(self, morr_init: bool = False) -> None:
### nonlinear curve aware initialization
if morr_init:
## initialize weight
morr_uniform_(
self.weight,
MORRConfig=self.MORRConfig,
n_op=self.miniblock,
biased=self.w_bit >= 16,
gain=2 if self.in_bit < 16 else 1,
) # quantization needs zero-center
self.sigma_weight = self.weight.data.std().item()
self.weight_quant_gain = None
## output distribution aware initialization to output scaling factor
# init.uniform_(self.morr_output_scale, -1, 1) ## scaling need to performed after quantization
t1 = mrr_roundtrip_phase_to_tr_fused(
torch.tensor([0]).float(), a=self.mrr_a, r=self.mrr_r, intensity=True
)
t2 = mrr_roundtrip_phase_to_tr_fused(
torch.tensor([self.morr_fwhm * 2.4]).float(), a=self.mrr_a, r=self.mrr_r, intensity=True
)
g = ((t2 - t1) / (2.4 * self.morr_fwhm)).item() ## 0~2.4 FWHM slope as a linear approximation
self.sigma_out_scale = 4 / (3 * self.grid_dim_x ** 0.5 * g * self.morr_fwhm)
self.out_scale_quant_gain = None
init.normal_(self.morr_output_scale, 0, self.sigma_out_scale)
else:
init.kaiming_normal_(self.weight.data)
init.normal_(self.morr_output_scale.data)
self.sigma_weight = self.weight.data.std().item()
self.weight_quant_gain = None
self.sigma_out_scale = self.morr_output_scale.data.std().item()
self.out_scale_quant_gain = None
if self.morr_input_bias is not None:
self.morr_input_bias.data.zero_()
if self.morr_input_scale is not None:
init.normal_(self.morr_input_scale.data, 2, 0.1)
if self.bias is not None:
init.uniform_(self.bias, 0, 0)
def sync_parameters(self, src="weight"):
"""
description: synchronize all parameters from the source parameters
"""
raise NotImplementedError
def build_weight(self):
if self.w_bit < 16:
### differentiable quantizer based on STE to enable QAT (Dorefa-Net, arXiv 2016)
weight = self.weight_quantizer(self.weight)
## rescale weights after quantization can maintain the initialization distribution
if self.weight_quant_gain is None:
self.weight_quant_gain = self.sigma_weight / weight.data.std()
if self.trainable_morr_scale:
morr_scale = self.morr_scale * self.weight_quant_gain
else:
morr_scale = self.weight_quant_gain
weight = weight.mul(morr_scale) ### gain factor from Tanh used in quantization
# if(self.trainable_morr_scale):
# weight = weight.mul(self.morr_scale)
### quantize learnable balancing factor
morr_output_scale = self.morr_output_scale_quantizer(self.morr_output_scale)
## rescale after quantization is harmful
# if(self.out_scale_quant_gain is None):
# self.sigma_out_scale_quant_gain = self.sigma_out_scale / morr_output_scale.data.std().item()
# morr_output_scale = morr_output_scale.mul(self.sigma_out_scale_quant_gain)### gain factor from Tanh used in quantization
else:
weight = self.weight.abs() # positive only
morr_output_scale = self.morr_output_scale - self.morr_output_scale.data.mean()
if self.finegrain_drop_mask is not None:
weight = weight.mul(self.finegrain_drop_mask.float())
## differential balancing factor concatenation
scale = morr_output_scale[..., :-1, :]
scale_pad = morr_output_scale[..., -1:, :]
if self.grid_dim_x % 2 == 0:
# even blocks
scale = torch.cat([scale, -scale], dim=2) # [1, 1, q, 1]
else:
# odd blocks
if self.grid_dim_x > 1:
scale = torch.cat([morr_output_scale, -scale], dim=2) # [1, 1, q, 1]
else:
scale = scale_pad # [1, 1, q, 1]
morr_output_scale = scale.squeeze(-1).unsqueeze(0) # [1 ,1, 1, q]
return weight, morr_output_scale
def enable_fast_forward(self):
self.fast_forward_flag = True
def disable_fast_forward(self):
self.fast_forward_flag = False
def set_gamma_noise(self, noise_std, random_state=None):
self.gamma_noise_std = noise_std
# self.phase_quantizer.set_gamma_noise(noise_std, random_state)
def set_crosstalk_factor(self, crosstalk_factor):
self.crosstalk_factor = crosstalk_factor
self.phase_quantizer.set_crosstalk_factor(crosstalk_factor)
def load_parameters(self, param_dict):
"""
description: update parameters based on this parameter dictionary\\
param param_dict {dict of dict} {layer_name: {param_name: param_tensor, ...}, ...}
"""
for name, param in param_dict.items():
getattr(self, name).data.copy_(param)
# if(self.mode == "phase"):
# self.build_weight(update_list=param_dict)
def switch_mode_to(self, mode):
self.mode = mode
def get_power(self, mixtraining_mask=None):
raise NotImplementedError
masks = (
mixtraining_mask
if mixtraining_mask is not None
else (self.mixedtraining_mask if self.mixedtraining_mask is not None else None)
)
if masks is not None:
power = ((self.phase_U.data * masks["phase_U"]) % (2 * np.pi)).sum()
power += ((self.phase_S.data * masks["phase_S"]) % (2 * np.pi)).sum()
power += ((self.phase_V.data * masks["phase_V"]) % (2 * np.pi)).sum()
else:
power = ((self.phase_U.data) % (2 * np.pi)).sum()
power += ((self.phase_S.data) % (2 * np.pi)).sum()
power += ((self.phase_V.data) % (2 * np.pi)).sum()
return power.item()
def get_num_params(self, fullrank=False):
if (self.dynamic_weight_flag == True) and (fullrank == False):
total = self.basis.numel()
if self.coeff_in is not None:
total += self.coeff_in.numel()
if self.coeff_out is not None:
total += self.coeff_out.numel()
else:
total = self.out_channel * self.in_channel
if self.bias is not None:
total += self.bias.numel()
return total
def get_param_size(self, fullrank=False, fullprec=False):
if (self.dynamic_weight_flag == True) and (fullrank == False):
total = self.basis.numel() * self.w_bit / 8
if self.coeff_in is not None:
total += self.coeff_in.numel() * self.w_bit / 8
if self.coeff_out is not None:
total += self.coeff_out.numel() * self.w_bit / 8
else:
if fullprec:
total = (self.out_channel * self.in_channel) * 4
else:
total = (self.out_channel * self.in_channel) * self.w_bit / 8
if self.bias is not None:
total += self.bias.numel() * 4
return total
def input_modulator(self, x):
### voltage to power, which is proportional to the phase shift
return x * x
def set_crosstalk_coupling_matrix(self, coupling_factor, drop_perc=0):
### crosstalk coupling matrix is a symmetric matrix, but the intra-MORR crosstalk can be taken as a round-trip phase shift scaling factor, which is proportional to the number of segments after pruned.
### drop-perc is the pruning percentage.
assert 0 <= coupling_factor <= 1, logger.error(
f"Coupling factor must in [0,1], but got {coupling_factor}"
)
self.crosstalk_factor = 1 + max(3, (self.miniblock * (1 - drop_perc) - 1)) * coupling_factor
def enable_crosstalk(self):
self.enable_thermal_crosstalk = True
def disable_crosstalk(self):
self.enable_thermal_crosstalk = False
def set_phase_variation(self, phase_noise_std=0):
self.phase_noise_std = phase_noise_std
def enable_phase_variation(self):
self.enable_phase_noise = True
def disable_phase_variation(self):
self.enable_phase_noise = False
def enable_trainable_morr_scale(self):
self.trainable_morr_scale = True
def disable_trainable_morr_scale(self):
self.trainable_morr_scale = False
def enable_trainable_morr_bias(self):
self.trainable_morr_bias = True
def disable_trainable_morr_bias(self):
self.trainable_morr_bias = False
@property
def morr_bias(self):
if self.morr_input_bias is None:
return None
# return 2 * self.morr_fwhm * torch.sigmoid(self.morr_input_bias.unsqueeze(0).unsqueeze(-1))
return self.morr_fwhm * torch.tanh(self.morr_input_bias.unsqueeze(0).unsqueeze(-1))
@property
def morr_scale(self):
if self.morr_input_scale is None:
return None
return torch.sigmoid(self.morr_input_scale.unsqueeze(-1)) + 0.2 # [p, q, 1]
def propagate_morr(self, weight, x, morr_output_scale):
"""
@description: propagate through the analytically calculated transfer matrix of molg. We implement circulant matrix multiplication using fast circ matmul
@param weight {torch.Tensor} two phase shifters in the MZI-based attenuators
@param x {torch.Tensor} complex-valued input
@param morr_output_scale {torch.Tensor} learnable balancing factors
@return: y {torch.Tensor} output of attenuators
"""
### x : [bs, q, k]
### weights: [p, q, k]
### morr_output_scale: [1, 1, 1, q]
## build circulant weight matrix
# crosstalk on the weights are much cheaper to compute than on the phase shift
if self.enable_thermal_crosstalk and self.crosstalk_factor > 1:
weight = weight * self.crosstalk_factor
weight = toeplitz(weight).unsqueeze(0) # [1, p, q, k, k]
x = x.unsqueeze(1).unsqueeze(-1) # [bs, 1, q, k, 1]
x = weight.matmul(x).squeeze(-1) # [bs, p, q, k]
if self.enable_phase_noise and self.phase_noise_std > 1e-5:
x = x + torch.zeros_like(x).normal_(0, self.phase_noise_std)
### Use theoretical transmission function
### x is the phase detuning, x=0 means on-resonance
### phase: [bs, p, q, k]
x = self.mrr_roundtrip_phase_to_tr(x)
x = morr_output_scale.matmul(x) # [1, 1, 1, q] x [bs, p, q, k] = [bs, p, 1, k]
x = x.flatten(1) # [bs, p*k]
return x
def get_finegrain_drop_mask(self, topk):
if self.w_bit < 16:
weight = self.weight_quantizer(self.weight.data) # [p, q, k]
else:
weight = self.weight.data.abs()
indices = weight.argsort(dim=-1)
mask = torch.ones_like(weight, dtype=torch.bool, device=weight.device)
# drop_idx = int(drop_perc * weight.size(2))
# drop_idx = weight.size(2) - max(4, weight.size(2) - drop_idx)
drop_indices = indices[:, :, 0:-topk]
mask.scatter_(2, drop_indices, 0)
self.finegrain_drop_mask = mask
return mask
def apply_finegrain_drop_mask(self, mask):
if self.w_bit < 16:
self.weight.data.masked_fill_(~mask.view_as(self.weight.data), -1000)
else:
self.weight.data.masked_fill_(~mask.view_as(self.weight.data), 0)
def forward_slow(self, x):
assert (
x.size(-1) == self.in_channel
), f"[E] Input dimension does not match the weight size {self.out_channel, self.in_channel}, but got input size ({tuple(x.size())}))"
if self.in_bit < 16:
x = self.input_quantizer(x)
# if(not self.fast_forward_flag or self.weight is None):
# weight = self.build_weight()
# else:
# weight = self.weight #.view(self.out_channel, -1)[:, :self.in_channel]
weight = self.build_weight()
if self.in_channel_pad > self.in_channel:
if self.x_zero_pad is None or self.x_zero_pad.size(0) != x.size(0):
self.x_zero_pad = torch.zeros(
x.size(0), self.in_channel_pad - self.in_channel, device=x.device, dtype=x.dtype
)
x = torch.cat([x, self.x_zero_pad], dim=1)
x = x.view(-1, self.grid_dim_x, self.miniblock)
# print(x.size())
### modulation
### assume the real input is the magnitude of the modulator output with fixed phase response
### x: [bs, q, k] -> [bs, q, k, 2]
x = self.input_modulator(x)
### propagate through attenuator (weight)
### x: [bs, q, k, 2] -> [bs, p, q, k, 2]
x = self.propagate_morr(weight, x)
# print(x.size())
### propagate through photodetection, from optics to voltages
### x: [bs, p, q, k, 2] -> [bs, p, q, k]
x = self.propagate_photodetection(x)
# print(x.size())
### postprocessing before activation
### x: [bs, outc] -> [bs, outc]
# out = self.postprocessing(x)
if self.out_channel < self.out_channel_pad:
x = x[..., : self.out_channel]
if self.bias is not None:
x = x + self.bias.unsqueeze(0)
return x
def forward(self, x):
assert (
x.size(-1) == self.in_channel
), f"[E] Input dimension does not match the weight size {self.out_channel, self.in_channel}, but got input size ({tuple(x.size())}))"
if self.in_bit < 16:
x = self.input_quantizer(x)
weight, morr_output_scale = self.build_weight()
if self.in_channel_pad > self.in_channel:
if self.x_zero_pad is None or self.x_zero_pad.size(0) != x.size(0):
self.x_zero_pad = torch.zeros(
x.size(0), self.in_channel_pad - self.in_channel, device=x.device, dtype=x.dtype
)
x = torch.cat([x, self.x_zero_pad], dim=1)
x = x.view(-1, self.grid_dim_x, self.miniblock)
### modulation
### assume the real input is the magnitude of the modulator output with fixed phase response
### x: [bs, q, k] -> [bs, q, k]
x = self.input_modulator(x)
### propagate through morr array (weight)
### x: [bs, q, k] -> [bs, p*k]
x = self.propagate_morr(weight, x, morr_output_scale)
if self.out_channel < self.out_channel_pad:
x = x[..., : self.out_channel]
if self.bias is not None:
x = x + self.bias.unsqueeze(0)
return x
```
#### File: core/models/morr_base.py
```python
from typing import Callable, Dict, Optional, Tuple, Union
import numpy as np
import torch
import torch.fft
import torch.nn.functional as F
from pyutils.general import logger
from pyutils.torch_train import set_torch_deterministic
from torch import Tensor, nn
from torchonn.op.mrr_op import mrr_roundtrip_phase_to_tr_grad_fused
from .layers import AllPassMORRCirculantConv2d, AllPassMORRCirculantLinear
__all__ = ["MORR_CLASS_BASE"]
class MORR_CLASS_BASE(nn.Module):
"""MORR CNN for classification (MORR-ONN). MORR array-based convolution with learnable nonlinearity [SqueezeLight, DATE'21]"""
_conv_linear = (AllPassMORRCirculantConv2d, AllPassMORRCirculantLinear)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def reset_parameters(self, random_state: int = None, morr_init: bool = False) -> None:
for name, m in self.named_modules():
if isinstance(m, self._conv_linear):
if random_state is not None:
# deterministic seed, but different for different layer, and controllable by random_state
set_torch_deterministic(random_state + sum(map(ord, name)))
m.reset_parameters(morr_init)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def set_gamma_noise(self, noise_std: float = 0.0, random_state: Optional[int] = None) -> None:
self.gamma_noise_std = noise_std
for layer in self.modules():
if isinstance(layer, self._conv_linear):
layer.set_gamma_noise(noise_std, random_state=random_state)
def set_crosstalk_factor(self, crosstalk_factor: float = 0.0) -> None:
self.crosstalk_factor = crosstalk_factor
for layer in self.modules():
if isinstance(layer, self._conv_linear):
layer.set_crosstalk_factor(crosstalk_factor)
def set_weight_bitwidth(self, w_bit: int) -> None:
self.w_bit = w_bit
for layer in self.modules():
if isinstance(layer, self._conv_linear):
layer.set_weight_bitwidth(w_bit)
def load_parameters(self, param_dict: Dict[str, Dict[str, Tensor]]) -> None:
"""
description: update parameters based on this parameter dictionary\\
param param_dict {dict of dict} {layer_name: {param_name: param_tensor, ...}, ...}
"""
for layer_name, layer_param_dict in param_dict.items():
self.layers[layer_name].load_parameters(layer_param_dict)
def build_obj_fn(self, X: Tensor, y: Tensor, criterion: Callable) -> Callable:
def obj_fn(X_cur=None, y_cur=None, param_dict=None):
if param_dict is not None:
self.load_parameters(param_dict)
if X_cur is None or y_cur is None:
data, target = X, y
else:
data, target = X_cur, y_cur
pred = self.forward(data)
return criterion(pred, target)
return obj_fn
def enable_fast_forward(self) -> None:
for layer in self.modules():
if isinstance(layer, self._conv_linear):
layer.enable_fast_forward()
def disable_fast_forward(self) -> None:
for layer in self.modules():
if isinstance(layer, self._conv_linear):
layer.disable_fast_forward()
def sync_parameters(self, src: str = "weight") -> None:
for layer in self.modules():
if isinstance(layer, self._conv_linear):
layer.sync_parameters(src=src)
def switch_mode_to(self, mode: str) -> None:
for layer in self.modules():
if isinstance(layer, self._conv_linear):
layer.switch_mode_to(mode)
def enable_morr_phase_loss(self):
self.morr_phase_loss_flag = True
def disable_morr_phase_loss(self):
self.morr_phase_loss_flag = False
def calc_morr_phase_loss(self, phase, threshold=1):
return torch.relu(phase - threshold).mean()
def register_morr_phase_loss(self, loss):
self.morr_phase_loss = loss
def get_morr_phase_loss(self):
return self.morr_phase_loss
def enable_morr_gradient_loss(self):
self.morr_gradient_loss_flag = True
def disable_morr_gradient_loss(self):
self.morr_gradient_loss_flag = False
def calc_morr_gradient_loss(self, layer, phase):
# return polynomial(phase, layer.morr_lambda_to_mag_curve_coeff_half_grad).abs().mean()
return mrr_roundtrip_phase_to_tr_grad_fused(
phase, layer.MORRConfig.attenuation_factor, layer.MORRConfig.coupling_factor, intensity=True
)
def register_morr_gradient_loss(self, loss):
self.morr_gradient_loss = loss
def get_morr_gradient_loss(self):
return self.morr_gradient_loss
def requires_morr_grad(self, mode=True):
for layer in self.modules():
if isinstance(layer, self._conv_linear):
if layer.morr_input_bias is not None:
layer.morr_input_bias.requires_grad_(mode)
layer.morr_input_scale.requires_grad_(mode)
def get_finegrain_drop_mask(self, topk):
## each module stores a local pruning mask and uses the mask during forward without changing the weight tensor values.
self.finegrain_drop_mask = {}
for layer_name, layer in self.named_modules():
if isinstance(layer, self._conv_linear):
mask = layer.get_finegrain_drop_mask(topk=topk)
self.finegrain_drop_mask[layer_name] = mask
return self.finegrain_drop_mask
def apply_finegrain_drop_mask(self):
## permanently apply pruning mask to the weight tensor
if self.finegrain_drop_mask is None:
print("[W] No finegrained drop mask is available.")
return
for layer_name, layer in self.named_modules():
if isinstance(layer, self._conv_linear):
mask = self.finegrain_drop_mask[layer_name]
layer.apply_finegrain_drop_mask(mask=mask)
def enable_crosstalk(self):
for layer in self.modules():
if isinstance(layer, self._conv_linear):
layer.enable_crosstalk()
def disable_crosstalk(self):
for layer in self.modules():
if isinstance(layer, self._conv_linear):
layer.disable_crosstalk()
def set_crosstalk_coupling_matrix(self, coupling_factor, drop_perc=0):
for layer in self.modules():
if isinstance(layer, self._conv_linear):
layer.set_crosstalk_coupling_matrix(coupling_factor, drop_perc)
def enable_phase_variation(self):
for layer in self.modules():
if isinstance(layer, self._conv_linear):
layer.enable_phase_variation()
def disable_phase_variation(self):
for layer in self.modules():
if isinstance(layer, self._conv_linear):
layer.disable_phase_variation()
def set_phase_variation(self, phase_noise_std=0):
for layer in self.modules():
if isinstance(layer, self._conv_linear):
layer.set_phase_variation(phase_noise_std)
def get_num_MORR(self):
n_morr = {}
for layer in self.modules():
if isinstance(layer, self._conv_linear):
k = layer.miniblock
n_morr[k] = n_morr.get(k, 0) + layer.grid_dim_x * layer.grid_dim_y
n_morr[1] = n_morr.get(1, 0) + layer.grid_dim_x
return n_morr, sum(i for i in n_morr.values())
def forward(self, x):
raise NotImplementedError
```
|
{
"source": "JeremieRapin/accesslink-example-python",
"score": 3
}
|
#### File: JeremieRapin/accesslink-example-python/accesslink_example.py
```python
from __future__ import print_function
from utils import load_config, save_config, pretty_print_json
from accesslink import AccessLink
try:
input = raw_input
except NameError:
pass
CONFIG_FILENAME = "config.yml"
class PolarAccessLinkExample(object):
"""Example application for Polar Open AccessLink v3."""
def __init__(self):
self.config = load_config(CONFIG_FILENAME)
if "access_token" not in self.config:
print("Authorization is required. Run authorization.py first.")
return
self.accesslink = AccessLink(client_id=self.config["client_id"],
client_secret=self.config["client_secret"])
self.running = True
self.show_menu()
def show_menu(self):
while self.running:
print("\nChoose an option:\n" +
"-----------------------\n" +
"1) Get user information\n" +
"2) Check available data\n" +
"3) Revoke access token\n" +
"4) Today nightly recharge\n" +
"5) Sleep\n" +
"6) Exit\n" +
"-----------------------")
self.get_menu_choice()
def get_menu_choice(self):
choice = input("> ")
{
"1": self.get_user_information,
"2": self.check_available_data,
"3": self.revoke_access_token,
"4": self.today_nightly_recharge,
"5": self.today_sleep,
"6": self.exit,
}.get(choice, self.get_menu_choice)()
def get_user_information(self):
user_info = self.accesslink.users.get_information(user_id=self.config["user_id"],
access_token=self.config["access_token"])
pretty_print_json(user_info)
def check_available_data(self):
available_data = self.accesslink.pull_notifications.list()
if not available_data:
print("No new data available.")
return
print("Available data:")
pretty_print_json(available_data)
for item in available_data["available-user-data"]:
if item["data-type"] == "EXERCISE":
self.get_exercises()
elif item["data-type"] == "ACTIVITY_SUMMARY":
self.get_daily_activity()
elif item["data-type"] == "PHYSICAL_INFORMATION":
self.get_physical_info()
def revoke_access_token(self):
self.accesslink.users.delete(user_id=self.config["user_id"],
access_token=self.config["access_token"])
del self.config["access_token"]
del self.config["user_id"]
save_config(self.config, CONFIG_FILENAME)
print("Access token was successfully revoked.")
self.exit()
def exit(self):
self.running = False
def get_exercises(self):
transaction = self.accesslink.training_data.create_transaction(user_id=self.config["user_id"],
access_token=self.config["access_token"])
if not transaction:
print("No new exercises available.")
return
resource_urls = transaction.list_exercises()["exercises"]
for url in resource_urls:
exercise_summary = transaction.get_exercise_summary(url)
print("Exercise summary:")
pretty_print_json(exercise_summary)
transaction.commit()
def get_daily_activity(self):
transaction = self.accesslink.daily_activity.create_transaction(user_id=self.config["user_id"],
access_token=self.config["access_token"])
if not transaction:
print("No new daily activity available.")
return
resource_urls = transaction.list_activities()["activity-log"]
for url in resource_urls:
activity_summary = transaction.get_activity_summary(url)
print("Activity summary:")
pretty_print_json(activity_summary)
transaction.commit()
def get_physical_info(self):
transaction = self.accesslink.physical_info.create_transaction(user_id=self.config["user_id"],
access_token=self.config["access_token"])
if not transaction:
print("No new physical information available.")
return
resource_urls = transaction.list_physical_infos()["physical-informations"]
for url in resource_urls:
physical_info = transaction.get_physical_info(url)
print("Physical info:")
pretty_print_json(physical_info)
transaction.commit()
def today_nightly_recharge(self):
nightly_recharge = self.accesslink.nightly_recharge.get_nightly_recharge_by_date(access_token=self.config["access_token"])
if not nightly_recharge:
print("Today has no nightly recharge")
return
print("Today nightly recharge:")
pretty_print_json(nightly_recharge)
def today_sleep(self):
sleep = self.accesslink.sleep.get_sleep_by_date(access_token=self.config["access_token"])
if not sleep:
print("Today has no sleep")
return
print("Today sleep:")
pretty_print_json(sleep)
if __name__ == "__main__":
PolarAccessLinkExample()
```
|
{
"source": "jere-mie/routing-prank",
"score": 2
}
|
#### File: routing-prank/website/routes.py
```python
from flask import render_template, url_for, flash, redirect, request
from website import app, db, login_manager
from website.forms import LinkForm, EditForm, LoginForm
from website.models import Link, User
import json
import re
from flask_login import LoginManager, login_user, current_user, logout_user, login_required
from better_profanity import profanity
@login_manager.user_loader
def user_loader(username):
user = User()
user.id = username
return user
@app.route('/home', methods=['GET'])
@app.route('/', methods=['GET'])
def home():
return render_template('home.html')
@app.route('/about', methods=['GET'])
def about():
with open('config.json') as f:
data = json.load(f)
return render_template('about.html', domain=data['domain'])
@app.route('/rickrolled', methods=['GET'])
def rickrolled():
return render_template('rickrolled.html')
@app.route('/login', methods=['GET', 'POST'])
def login():
if current_user.is_authenticated:
return redirect(url_for('home'))
form = LoginForm()
with open('config.json') as f:
data = json.load(f)
if form.validate_on_submit():
if form.password.data == data['password']:
user = User()
user.id = "Rick"
login_user(user, remember=True)
flash('Successfully logged in!', 'success')
return redirect(url_for('home'))
else:
flash('Incorrect password!', 'danger')
return render_template('login.html', form=form, data=data)
@app.route('/logout')
@login_required
def logout():
logout_user()
return redirect(url_for('home'))
@app.route('/admin', methods=['GET'])
@login_required
def admin():
links = Link.query.all()
links.reverse()
totClicks = sum([link.clicks for link in links])
with open('config.json') as f:
data = json.load(f)
return render_template('admin.html', links=links, domain=data['domain'], totClicks=totClicks)
@app.route('/new', methods=['GET', 'POST'])
def new():
form = LinkForm()
if form.validate_on_submit():
link = Link(link=form.link.data, title=form.title.data, name = form.name.data, desc=form.desc.data, image=form.image.data, url='https://www.youtube.com/watch?v=dQw4w9WgXcQ')
with open('bad-words.txt', 'r') as f:
wordlist = [i.strip() for i in f.readlines()]
profanity.load_censor_words()
profanity.add_censor_words(wordlist)
if profanity.contains_profanity(f'{link.link} {link.title} {link.name} {link.desc}'):
flash('NOTE: EXCESSIVE PROFANITY IS NOT PERMITTED ON THIS PLATFORM. CONTINUED EXCESSIVE PROFANITY MAY RESULT IN AN IP BAN FROM THIS PLATFORM', 'danger')
link.link = profanity.censor(link.link, 'X')
link.title = profanity.censor(link.title, 'X')
link.name = profanity.censor(link.name, 'X')
link.desc = profanity.censor(link.desc, 'X')
link.link = link.link.replace(' ','-')
link.link = re.sub(r'[^a-zA-Z0-9-]', '-', link.link)
# ensure uniqueness of link
existinglink = Link.query.filter_by(link=link.link).first()
while existinglink:
link.link = link.link + 'X'
existinglink = Link.query.filter_by(link=link.link).first()
db.session.add(link)
db.session.commit()
# getting config details
with open('config.json') as f:
data = json.load(f)
flash(f"Created link {data['domain']}/l/{link.link}", 'success')
return redirect(url_for('home'))
return render_template('new.html', form=form, legend='New Link')
@app.route('/l/<link_url>/edit', methods=['GET', 'POST'])
@login_required
def edit(link_url):
link = Link.query.filter_by(link=link_url).first()
form = EditForm()
if form.validate_on_submit():
link.link = form.link.data
link.link = link.link.replace(' ','-')
link.link = re.sub(r'[^a-zA-Z0-9-]', '-', link.link)
link.url = 'https://www.youtube.com/watch?v=dQw4w9WgXcQ'
link.title = form.title.data
link.name = form.name.data
link.desc = form.desc.data
link.image = form.image.data
db.session.commit()
flash('Successfully updated link!', 'success')
return redirect(url_for('home'))
elif request.method == 'GET':
form.link.data = link.link
form.title.data = link.title
form.name.data = link.name
form.desc.data = link.desc
form.image.data = link.image
return render_template('new.html', form=form, legend='Update Link')
@app.route('/l/<link_url>', methods=['GET'])
def redir(link_url):
link = Link.query.filter_by(link=link_url).first()
if not link:
return '<h1>Either you manually typed an incorrect link or the link you clicked was removed for exsessive profanity.</h1>'
link.clicks +=1
db.session.commit()
return render_template('redir.html', title=link.title, name=link.name, desc=link.desc, image=link.image, url=link.url)
@app.route('/l/<link_url>/delete', methods=['GET','POST'])
@login_required
def delete(link_url):
link = Link.query.filter_by(link=link_url).first()
db.session.delete(link)
db.session.commit()
flash('Successfully deleted link!', 'success')
return redirect(url_for('home'))
```
|
{
"source": "jeremietharaud/aws-cfn-cli",
"score": 3
}
|
#### File: aws-cfn-cli/tests/test_params.py
```python
import cfncli.cfncli as cfncli
import yaml
from typing import Dict, List
yaml_variables = """
Name: 'test-stack'
Tags:
Project: 'cfncli'
Env: 'tst'
Name: 'test-stack'
Variables:
RepositoryName: 'test-123'
"""
def load_test_yaml_params() -> Dict[str, str]:
streams = yaml.safe_load(yaml_variables)
return streams.get('Variables')
def load_test_tags() -> Dict[str, str]:
streams = yaml.safe_load(yaml_variables)
return streams.get('Tags')
def test_to_cf_params() -> None:
empty_params: Dict[str, str] = None
params: Dict[str, str] = load_test_yaml_params()
expected_empty_params: List[Dict[str, str]] = []
expected_params: List[Dict[str, str]] = [{'ParameterKey': 'RepositoryName', 'ParameterValue': 'test-123'}]
# Test empty param list
assert cfncli.to_cf_params(empty_params) == expected_empty_params
# Test yaml param list
assert cfncli.to_cf_params(params) == expected_params
def test_tags_to_cf_params() -> None:
empty_tags: Dict[str, str] = None
tags: Dict[str, str] = load_test_tags()
expected_empty_tags: List[Dict[str, str]] = []
expected_tags: List[Dict[str, str]] = [{'Key': 'Project', 'Value': 'cfncli'}, {'Key': 'Env', 'Value': 'tst'}, {'Key': 'Name', 'Value': 'test-stack'}]
# Test empty param list
assert cfncli.tags_to_cf_params(empty_tags) == expected_empty_tags
# Test tags
assert cfncli.tags_to_cf_params(tags) == expected_tags
def test_str_tags_to_cf_params() -> None:
empty_tags: str = ""
tags: str = "Project=cfncli,Env=tst,Name=test-stack"
expected_tags: List[Dict[str, str]] = [{'Key': 'Project', 'Value': 'cfncli'}, {'Key': 'Env', 'Value': 'tst'}, {'Key': 'Name', 'Value': 'test-stack'}]
# Test empty param list
try:
cfncli.str_tags_to_cf_params(empty_tags)
assert False
except Exception as e:
assert str(e) == 'dictionary update sequence element #0 has length 1; 2 is required'
# Test tags
assert cfncli.str_tags_to_cf_params(tags) == expected_tags
def test_str_to_cf_params() -> None:
empty_params: str = ""
params: str = "RepositoryName=test-123,Stack=test-stack"
expected_params: List[Dict[str, str]] = [{'ParameterKey': 'RepositoryName', 'ParameterValue': 'test-123'}, {'ParameterKey': 'Stack', 'ParameterValue': 'test-stack'}] # noqa E501
# Test empty param list
try:
cfncli.str_to_cf_params(empty_params)
assert False
except Exception as e:
assert str(e) == 'dictionary update sequence element #0 has length 1; 2 is required'
# Test params list
assert cfncli.str_to_cf_params(params) == expected_params
```
#### File: aws-cfn-cli/tests/test_validate_stack.py
```python
import cfncli.cfncli as cfncli
import tempfile
import os
import botocore
from moto import mock_cloudformation
yaml_bad_template = """"""
yaml_bad_template2 = """
AWSTemplateFormatVersion: '2010-09-09'
Description: Simple CloudFormation Test Template
"""
yaml_valid_template = """
AWSTemplateFormatVersion: '2010-09-09'
Description: Simple CloudFormation Test Template
Resources:
S3Bucket:
Type: AWS::S3::Bucket
Properties:
BucketName: cf-test-bucket-1
"""
# yaml_valid_template = """
# AWSTemplateFormatVersion: 2010-09-09
# Description: 'Test stack that works'
# Parameters:
# RepositoryName:
# Description: 'Name of the ECR repository'
# Type: String
# Resources:
# TestEcrRepository:
# Type: 'AWS::ECR::Repository'
# Properties:
# RepositoryName: !Ref RepositoryName
# """
@mock_cloudformation
def test_validate_cfn_stack() -> None:
client = cfncli.get_cfn_client_session(region='eu-west-1', assume_role_arn=None)
# Test validation of invalid stack
try:
cfncli.validate_cfn_stack(template=yaml_bad_template, client=client)
assert False
except botocore.exceptions.ParamValidationError as e:
assert ('Invalid length for parameter TemplateBody, value: 0, valid min length: 1' in str(e))
# Test validation of invalid stac
try:
cfncli.validate_cfn_stack(template=yaml_bad_template2, client=client)
assert False
except botocore.exceptions.ClientError as e:
assert ('Stack with id Missing top level template section Resources does not exist' in str(e))
# Test validation of valid stack
assert cfncli.validate_cfn_stack(template=yaml_valid_template, client=client) is None
@mock_cloudformation
def test_validate() -> None:
client = cfncli.get_cfn_client_session(region='eu-west-1', assume_role_arn=None)
# Test validation of invalid stack
file_descriptor1, yaml_bad_template_file = tempfile.mkstemp()
with open(yaml_bad_template_file, "w") as f:
f.write(yaml_bad_template)
try:
cfncli.validate(stack_name="test", stack_file=yaml_bad_template_file, client=client)
assert False
except botocore.exceptions.ParamValidationError as e:
assert ('Invalid length for parameter TemplateBody, value: 0, valid min length: 1' in str(e))
os.close(file_descriptor1)
os.remove(yaml_bad_template_file)
# Test validation of invalid stack
file_descriptor2, yaml_bad_template_file2 = tempfile.mkstemp()
with open(yaml_bad_template_file2, "w") as f:
f.write(yaml_bad_template2)
try:
cfncli.validate(stack_name="test", stack_file=yaml_bad_template_file2, client=client)
except botocore.exceptions.ClientError as e:
assert ('Stack with id Missing top level template section Resources does not exist' in str(e))
os.close(file_descriptor2)
os.remove(yaml_bad_template_file2)
# Test validation of valid stack
file_descriptor3, yaml_valid_template_file = tempfile.mkstemp()
with open(yaml_valid_template_file, "w") as f:
f.write(yaml_valid_template)
assert cfncli.validate(stack_name="test", stack_file=yaml_valid_template_file, client=client) is None
os.close(file_descriptor3)
os.remove(yaml_valid_template_file)
```
|
{
"source": "jeremievallee/kubeflow",
"score": 2
}
|
#### File: kubeflow/ci/kfctl_go_test_utils.py
```python
import datetime
import logging
import os
import tempfile
import urllib
import uuid
import re
import requests
import yaml
from kubeflow.testing import util
from retrying import retry
# retry 4 times, waiting 3 minutes between retries
@retry(stop_max_attempt_number=4, wait_fixed=180000)
def run_with_retries(*args, **kwargs):
util.run(*args, **kwargs)
def get_kfctl_go_build_dir_binary_path():
"""return the build directory and path to kfctl go binary.
Args:
None
Return:
build_dir (str): Path to start build will be Kubeflow/kubeflow/bootstrap/
kfctl_path (str): Path where kfctl go binary has been built.
will be Kubeflow/kubeflow/bootstrap/bin/kfctl
"""
this_dir = os.path.dirname(__file__)
root = os.path.abspath(os.path.join(this_dir, "..", "..", "..", ".."))
build_dir = os.path.join(root, "bootstrap")
kfctl_path = os.path.join(build_dir, "bin", "kfctl")
return build_dir, kfctl_path
def build_kfctl_go():
"""build the kfctl go binary and return the path for the same.
Args:
None
Return:
kfctl_path (str): Path where kfctl go binary has been built.
will be Kubeflow/kubeflow/bootstrap/bin/kfctl
"""
build_dir, kfctl_path = get_kfctl_go_build_dir_binary_path()
# We need to use retry builds because when building in the test cluster
# we see intermittent failures pulling dependencies
run_with_retries(["make", "build-kfctl"], cwd=build_dir)
return kfctl_path
def get_or_create_app_path_and_parent_dir(app_path):
"""Get a valid app_path and parent dir. Create if they are not existing.
"""
if not app_path:
logging.info("--app_path not specified")
stamp = datetime.datetime.now().strftime("%H%M")
parent_dir = tempfile.gettempdir()
app_path = os.path.join(
parent_dir, "kfctl-{0}-{1}".format(stamp,
uuid.uuid4().hex[0:4]))
else:
parent_dir = os.path.dirname(app_path)
if not os.path.exists(parent_dir):
os.makedirs(parent_dir)
if not os.path.exists(app_path):
os.makedirs(app_path)
return app_path, parent_dir
def load_config(config_path):
"""Load specified KFDef.
Args:
config_path: Path to a YAML file containing a KFDef object.
Can be a local path or a URI like
https://raw.githubusercontent.com/kubeflow/manifests/master/kfdef/kfctl_gcp_iap.yaml
Returns:
config_spec: KfDef spec
"""
url_for_spec = urllib.parse.urlparse(config_path)
if url_for_spec.scheme in ["http", "https"]:
data = requests.get(config_path)
return yaml.load(data.content)
else:
with open(config_path, 'r') as f:
config_spec = yaml.load(f)
return config_spec
def set_env_init_args(use_basic_auth, use_istio):
# Is it really needed?
init_args = []
# Set ENV for basic auth username/password.
if use_basic_auth:
# Don't log the password.
# logging.info("Setting environment variables KUBEFLOW_USERNAME and KUBEFLOW_PASSWORD")
os.environ["KUBEFLOW_USERNAME"] = "kf-test-user"
os.environ["KUBEFLOW_PASSWORD"] = str(uuid.uuid4().hex)
init_args = ["--use_basic_auth"]
else:
# Owned by project kubeflow-ci-deployment.
logging.info("Setting environment variables CLIENT_SECRET and CLIENT_ID")
os.environ["CLIENT_SECRET"] = "CJ4qVPLTi0j0GJMkONj7Quwt"
os.environ["CLIENT_ID"] = (
"29647740582-7meo6c7a9a76jvg54j0g2lv8lrsb4l8g"
".apps.googleusercontent.com")
if use_istio:
init_args.append("--use_istio")
else:
init_args.append("--use_istio=false")
def filter_spartakus(spec):
"""Filter our Spartakus from KfDef spec.
Args:
spec: KfDef spec
Returns:
spec: Filtered KfDef spec
"""
for i, app in enumerate(spec["applications"]):
if app["name"] == "spartakus":
spec["applications"].pop(i)
break
return spec
def get_config_spec(config_path, project, email, zone, app_path):
"""Generate KfDef spec.
Args:
config_path: Path to a YAML file containing a KFDef object.
Can be a local path or a URI like
https://raw.githubusercontent.com/kubeflow/manifests/master/kfdef/kfctl_gcp_iap.yaml
project: The GCP project to use.
email: a valid email of the GCP account
zone: a valid GCP zone for the cluster.
app_path: The path to the Kubeflow app.
Returns:
config_spec: Updated KfDef spec
"""
# TODO(https://github.com/kubeflow/kubeflow/issues/2831): Once kfctl
# supports loading version from a URI we should use that so that we
# pull the configs from the repo we checked out.
config_spec = load_config(config_path)
config_spec["spec"]["project"] = project
config_spec["spec"]["email"] = email
config_spec["spec"]["zone"] = zone
config_spec["spec"] = filter_spartakus(config_spec["spec"])
# Set KfDef name to be unique
# TODO(swiftdiaries): this is already being set at app_name
# we need to reuse that
regex = re.compile('[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?')
kfdef_name = regex.findall(app_path)[-1]
config_spec["metadata"]["name"] = kfdef_name
repos = config_spec["spec"]["repos"]
if os.getenv("REPO_NAME") == "manifests":
# kfctl_go_test.py was triggered on presubmit from the kubeflow/manifests
# repository. In this case we want to use the specified PR of the
# kubeflow/manifests repository; so we need to change the repo specification
# in the KFDef spec.
# TODO(jlewi): We should also point to a specific commit when triggering
# postsubmits from the kubeflow/manifests repo
for repo in repos:
for key, value in repo.items():
if value == "https://github.com/kubeflow/manifests/archive/master.tar.gz":
repo["uri"] = str("https://github.com/kubeflow/manifests/archive/pull/" + str(
os.getenv("PULL_NUMBER")) + "/head.tar.gz")
logging.info(str(config_spec))
return config_spec
def kfctl_deploy_kubeflow(app_path, project, use_basic_auth, use_istio, config_path, kfctl_path, build_and_apply):
"""Deploy kubeflow.
Args:
app_path: The path to the Kubeflow app.
project: The GCP project to use.
use_basic_auth: Whether to use basic_auth.
use_istio: Whether to use Istio or not
config_path: Path to the KFDef spec file.
kfctl_path: Path to the kfctl go binary
build_and_apply: whether to build and apply or apply
Returns:
app_path: Path where Kubeflow is installed
"""
# build_and_apply is a boolean used for testing both the new semantics
# test case 1: build_and_apply
# kfctl build -f <config file>
# kfctl apply
# test case 2: apply
# kfctl apply -f <config file>
if not os.path.exists(kfctl_path):
msg = "kfctl Go binary not found: {path}".format(path=kfctl_path)
logging.error(msg)
raise RuntimeError(msg)
app_path, parent_dir = get_or_create_app_path_and_parent_dir(app_path)
logging.info("Project: %s", project)
logging.info("app path %s", app_path)
logging.info("kfctl path %s", kfctl_path)
# TODO(nrchakradhar): Probably move all the environ sets to set_env_init_args
zone = 'us-central1-a'
if not zone:
raise ValueError("Could not get zone being used")
# We need to specify a valid email because
# 1. We need to create appropriate RBAC rules to allow the current user
# to create the required K8s resources.
# 2. Setting the IAM policy will fail if the email is invalid.
email = util.run(["gcloud", "config", "get-value", "account"])
if not email:
raise ValueError("Could not determine GCP account being used.")
if not project:
raise ValueError("Could not get project being used")
config_spec = get_config_spec(config_path, project, email, zone, app_path)
with open(os.path.join(app_path, "tmp.yaml"), "w") as f:
yaml.dump(config_spec, f)
# TODO(jlewi): When we switch to KfDef v1beta1 this logic will need to change because
# use_base_auth will move into the plugin spec
use_basic_auth = config_spec["spec"].get("useBasicAuth", False)
logging.info("use_basic_auth=%s", use_basic_auth)
use_istio = config_spec["spec"].get("useIstio", True)
logging.info("use_istio=%s", use_istio)
# Set ENV for basic auth username/password.
set_env_init_args(use_basic_auth, use_istio)
# build_and_apply
logging.info("running kfctl with build and apply: %s \n", build_and_apply)
logging.info("switching working directory to: %s \n", app_path)
os.chdir(app_path)
# Do not run with retries since it masks errors
logging.info("Running kfctl with config:\n%s", yaml.safe_dump(config_spec))
if build_and_apply:
build_and_apply_kubeflow(kfctl_path, app_path)
else:
apply_kubeflow(kfctl_path, app_path)
return app_path
def apply_kubeflow(kfctl_path, app_path):
util.run([kfctl_path, "apply", "-V", "-f=" + os.path.join(app_path, "tmp.yaml")], cwd=app_path)
return app_path
def build_and_apply_kubeflow(kfctl_path, app_path):
util.run([kfctl_path, "build", "-V", "-f=" + os.path.join(app_path, "tmp.yaml")], cwd=app_path)
util.run([kfctl_path, "apply", "-V"], cwd=app_path)
return app_path
def verify_kubeconfig(app_path):
"""Verify kubeconfig.
Args:
app_path: KfDef spec path
"""
name = os.path.basename(app_path)
context = util.run(["kubectl", "config", "current-context"]).strip()
if name == context:
logging.info("KUBECONFIG current context name matches app name: %s", name)
else:
msg = "KUBECONFIG not having expected context: {expected} v.s. {actual}".format(
expected=name, actual=context)
logging.error(msg)
raise RuntimeError(msg)
```
|
{
"source": "Jeremip11/precog",
"score": 2
}
|
#### File: Jeremip11/precog/git.py
```python
from os.path import relpath, join, isdir
from os import environ, mkdir, walk
from tempfile import gettempdir
from urlparse import urlparse
from logging import getLogger
from datetime import datetime
from base64 import b64decode
from hashlib import sha1
from io import BytesIO
from time import time
from re import match
import tarfile
import json
from dateutil.parser import parse, tz
from requests_oauthlib import OAuth2Session
from uritemplate import expand as expand_uri
import requests
import yaml
from util import (
extend_querystring,
ERR_NO_REPOSITORY, ERR_TESTS_PENDING, ERR_TESTS_FAILED, ERR_NO_REF_STATUS
)
github_client_id = environ.get('GITHUB_CLIENT_ID') or r'e62e0d541bb6d0125b62'
github_client_secret = environ.get('GITHUB_CLIENT_SECRET') or r'1f488407e92a59beb897814e9240b5a06a2020e3'
FAKE_TOKEN = '<fake token, will fail>'
_GITHUB_USER_URL = 'https://api.github.com/user'
_GITHUB_REPO_URL = 'https://api.github.com/repos/{owner}/{repo}'
_GITHUB_REPO_HEAD_URL = 'https://api.github.com/repos/{owner}/{repo}/git/{head}'
_GITHUB_COMMIT_URL = 'https://api.github.com/repos/{owner}/{repo}/commits/{sha}'
_GITHUB_TREE_URL = 'https://api.github.com/repos/{owner}/{repo}/git/trees/{ref}'
_GITHUB_HEADS_URL = 'https://api.github.com/repos/{owner}/{repo}/git/refs/heads'
_GITHUB_STATUS_URL = 'https://api.github.com/repos/{owner}/{repo}/statuses/{ref}'
_CIRCLECI_ARTIFACTS_URL = 'https://circleci.com/api/v1.1/project/{build}/artifacts?circle-token={token}'
_LONGTIME = 3600
_defaultcache = {}
PRECOG_TARBALL_NAME = 'precog-content.tar.gz'
class GithubDisallowed (RuntimeError): pass
class Getter:
''' Wrapper for HTTP GET from requests.
'''
def __init__(self, github_auth, cache=_defaultcache, throws4XX=False):
self.github_auth = github_auth
self.responses = cache
self.throws4XX = throws4XX
def _flush(self):
''' Flush past-deadline responses.
'''
for (k, (r, d)) in self.responses.items():
if (time() > d):
self.responses.pop(k)
def get(self, url, lifespan=5, timeout=2):
self._flush()
host = urlparse(url).hostname
is_github = (host == 'api.github.com')
is_noauth = (self.github_auth and self.github_auth[0] == FAKE_TOKEN)
auth = self.github_auth if is_github else None
key = (url, auth)
if key in self.responses:
c_resp = self.responses[key][0]
if is_github and is_noauth and self.throws4XX and c_resp.status_code in range(400, 499):
raise GithubDisallowed('Got {} response from Github API'.format(c_resp.status_code))
return c_resp
if is_github:
if is_noauth:
# https://developer.github.com/v3/#increasing-the-unauthenticated-rate-limit-for-oauth-applications
auth = None
args = dict(client_id=github_client_id, client_secret=github_client_secret)
url = extend_querystring(url, args)
getLogger('precog').warning('GET {}'.format(url))
resp = requests.get(url, auth=auth, headers=dict(Accept='application/json'), timeout=timeout)
self.responses[key] = (resp, time() + lifespan)
if is_github and is_noauth and self.throws4XX and resp.status_code in range(400, 499):
raise GithubDisallowed('Got {} response from Github API'.format(resp.status_code))
return resp
def is_authenticated(GET):
''' Return True if given username/password is valid for a Github user.
'''
user_resp = GET(_GITHUB_USER_URL)
return bool(user_resp.status_code == 200)
def repo_exists(owner, repo, GET):
''' Return True if given owner/repo exists in Github.
'''
repo_url = _GITHUB_REPO_URL.format(owner=owner, repo=repo)
repo_resp = GET(repo_url)
return bool(repo_resp.status_code == 200)
def split_branch_path(owner, repo, path, GET):
''' Return existing branch name and remaining path for a given path.
Branch name might contain slashes.
'''
branch_parts, path_parts = [], path.split('/')
while path_parts:
branch_parts.append(path_parts.pop(0))
ref = '/'.join(branch_parts)
if len(branch_parts) == 1:
# See if it's a regular commit first.
commit_url = _GITHUB_COMMIT_URL.format(owner=owner, repo=repo, sha=ref)
commit_resp = GET(commit_url)
if commit_resp.status_code == 200:
# Stop early, we've found a commit.
return ref, '/'.join(path_parts)
head = 'refs/heads/{}'.format(ref)
head_url = _GITHUB_REPO_HEAD_URL.format(owner=owner, repo=repo, head=head)
head_resp = GET(head_url)
if head_resp.status_code != 200:
# Not found at all.
continue
if not hasattr(head_resp.json(), 'get'):
# There are more refs under this path, get more specific.
continue
if head_resp.json().get('ref') != head:
# Found a single ref and it is wrong.
break
return ref, '/'.join(path_parts)
return None, path
def find_base_path(owner, repo, ref, GET):
''' Return artifacts base path after reading Circle config.
'''
tree_url = _GITHUB_TREE_URL.format(owner=owner, repo=repo, ref=ref)
tree_resp = GET(tree_url)
paths = {item['path']: item['url'] for item in tree_resp.json()['tree']}
if 'circle.yml' not in paths:
return '$CIRCLE_ARTIFACTS'
blob_url = paths['circle.yml']
blob_resp = GET(blob_url, _LONGTIME)
blob_yaml = b64decode(blob_resp.json()['content']).decode('utf8')
try:
circle_config = yaml.load(blob_yaml)
except yaml.reader.ReaderError as err:
raise RuntimeError('Problem reading configuration from circle: {}'.format(err))
paths = circle_config.get('general', {}).get('artifacts', [])
if not paths:
return '$CIRCLE_ARTIFACTS'
return join('/home/ubuntu/{}/'.format(repo), paths[0])
class Branch:
def __init__(self, name, age, link):
self.name = name
self.link = link
self.age = age
def get_branch_link(owner, repo, branch):
''' Return link inside branch if it matches a pattern.
Currently, just "foo/blog-bar" patterns in mapzen/blog are recognized.
'''
if (owner, repo) == ('mapzen', 'blog'):
if match(r'^\w+/blog($|-|/)', branch):
return 'blog'
return None
def get_branch_info(owner, repo, GET):
''' Return list of Branch instances.
'''
heads_url = _GITHUB_HEADS_URL.format(owner=owner, repo=repo)
heads_resp = GET(heads_url)
heads_list = heads_resp.json()
next_url = heads_resp.links.get('next', {}).get('url')
# Iterate over links, if any.
while next_url:
next_resp = GET(next_url)
next_url = next_resp.links.get('next', {}).get('url')
heads_list.extend(next_resp.json())
branch_info = list()
for head in heads_list:
if head['object']['type'] != 'commit':
continue
obj_name = relpath(head['ref'], 'refs/heads/')
obj_resp = GET(head['object']['url'], _LONGTIME)
obj_link = get_branch_link(owner, repo, obj_name)
obj_date = parse(obj_resp.json().get('committer', {}).get('date', {}))
obj_age = datetime.now(tz=obj_date.tzinfo) - obj_date
branch_info.append(Branch(obj_name, obj_age, obj_link))
return branch_info
def get_circle_artifacts(owner, repo, ref, GET):
''' Return dictionary of CircleCI artifacts for a given Github repo ref.
'''
circle_token = environ.get('CIRCLECI_TOKEN') or '<KEY>'
status_url = _GITHUB_STATUS_URL.format(owner=owner, repo=repo, ref=ref)
status_resp = GET(status_url)
if status_resp.status_code == 404:
raise RuntimeError(ERR_NO_REPOSITORY, None)
elif status_resp.status_code != 200:
raise RuntimeError('some other HTTP status: {}'.format(status_resp.status_code))
statuses = [s for s in status_resp.json() if s['context'] == 'ci/circleci']
if len(statuses) == 0:
raise RuntimeError(ERR_NO_REF_STATUS, None)
status = statuses[0]
if status['state'] == 'pending':
raise RuntimeError(ERR_TESTS_PENDING, status['target_url'])
elif status['state'] in ('error', 'failure'):
raise RuntimeError(ERR_TESTS_FAILED, status['target_url'])
elif status['state'] != 'success':
raise RuntimeError('some other test outcome: {state}'.format(**status))
circle_url = status['target_url'] if (status['state'] == 'success') else None
circle_build = relpath(urlparse(circle_url).path, '/gh/')
artifacts_base = find_base_path(owner, repo, ref, GET)
artifacts_url = _CIRCLECI_ARTIFACTS_URL.format(build=circle_build, token=circle_token)
artifacts_list = GET(artifacts_url, _LONGTIME, timeout=10).json()
return _prepare_artifacts(artifacts_list, artifacts_base, circle_token)
def _prepare_artifacts(list, base, circle_token):
'''
'''
artifacts = {relpath(a['pretty_path'], base): '{}?circle-token={}'.format(a['url'], circle_token)
for a in list}
if PRECOG_TARBALL_NAME in artifacts:
tarball_artifacts = _make_local_tarball(artifacts[PRECOG_TARBALL_NAME])
artifacts, raw_artifacts = tarball_artifacts, artifacts
# Files in artifacts override those in tarball
artifacts.update(raw_artifacts)
return artifacts
def _make_local_tarball(url):
'''
'''
local_path = join(gettempdir(), 'precog-{}'.format(sha1(url).hexdigest()))
if not isdir(local_path):
response = requests.get(url)
tarball = tarfile.open(fileobj=BytesIO(response.content), mode='r:gz')
mkdir(local_path)
tarball.extractall(local_path)
artifacts = dict()
for (dirpath, dirnames, filenames) in walk(local_path):
for filename in filenames:
full_path = join(dirpath, filename)
short_path = relpath(full_path, local_path)
artifacts[short_path] = 'file://' + full_path
return artifacts
def select_path(paths, path):
'''
'''
if path in paths:
return path
if path == '':
return 'index.html'
return '{}/index.html'.format(path.rstrip('/'))
def skip_webhook_payload(payload):
''' Return True if this payload should not be processed.
'''
if 'action' in payload and 'pull_request' in payload:
return bool(payload['action'] == 'closed')
if 'commits' in payload and 'head_commit' in payload:
# Deleted refs will not have a status URL.
return bool(payload.get('deleted') == True)
return True
def get_webhook_commit_info(app, payload):
''' Get owner, repository, commit SHA and Github status API URL from webhook payload.
'''
if 'pull_request' in payload:
commit_sha = payload['pull_request']['head']['sha']
status_url = payload['pull_request']['statuses_url']
elif 'head_commit' in payload:
commit_sha = payload['head_commit']['id']
status_url = payload['repository']['statuses_url']
status_url = expand_uri(status_url, dict(sha=commit_sha))
else:
raise ValueError('Unintelligible payload')
if 'repository' not in payload:
raise ValueError('Unintelligible payload')
repo = payload['repository']
owner = repo['owner'].get('name') or repo['owner'].get('login')
repository = repo['name']
app.logger.debug('Status URL {}'.format(status_url))
return owner, repository, commit_sha, status_url
def post_github_status(status_url, status_json, github_auth):
''' POST status JSON to Github status API.
'''
if status_url is None:
return
# Github only wants 140 chars of description.
status_json['description'] = status_json['description'][:140]
posted = requests.post(status_url, data=json.dumps(status_json), auth=github_auth,
headers={'Content-Type': 'application/json'})
if posted.status_code not in range(200, 299):
raise ValueError('Failed status post to {}'.format(status_url))
if posted.json()['state'] != status_json['state']:
raise ValueError('Mismatched status post to {}'.format(status_url))
```
|
{
"source": "jeremite/Yolo4-ladder-detection-flask-app",
"score": 3
}
|
#### File: jeremite/Yolo4-ladder-detection-flask-app/app.py
```python
from flask import Flask, request, jsonify,render_template,session,redirect,url_for
from PIL import Image
import numpy as np
import base64
import io
import os
import cv2
from backend.yolo_inference import load_model, inference
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
app = Flask(__name__)
app.config['DEBUG']=False
app.config['SECRET_KEY']='secret'
img_arr = np.array([])
path = "backend/model_config/ladder/images/"
path_config = "D:\WM\Project\ladder\yolov4\darknet\images"
threhold = 20
#print("cur dir is",os.getcwd())
@app.route('/')
def index():
num_f = get_newData_cnt()
return render_template('index.html',num_f=num_f)
@app.route('/api', methods=["GET","POST"])
def main_interface():
response = request.get_json()
print('data is',request.files)
data_str = response['image']
point = data_str.find(',')
base64_str = data_str[point:] # remove unused part like this: "data:image/jpeg;base64,"
image = base64.b64decode(base64_str)
img = Image.open(io.BytesIO(image))
print('img',img)
if(img.mode!='RGB'):
img = img.convert("RGB")
# convert to numpy array.
global img_arr
#img_arr = np.array(img)
im1 = img.save("test.jpg")
print("shapre is ",img_arr.shape)
img_arr = cv2.imread("test.jpg")
# do object detection in inference function.
results = inference(img_arr, conf_thresh=0.5, max_suppression_thresh=0.4)
is_ladder=1
if not results['results']:
is_ladder=0
results['is_ladder']=is_ladder
print(results)
return jsonify(results)
@app.route('/save', methods=["POST"])
def save():
response = request.get_json()
coordinates = response['coordinates']
filename = response['filename']
w = response['width']
h = response['height']
print('coordinates',coordinates)
# save the coor and image
write_new_data(filename,coordinates,w,h)
# caculate new data number
num_f = get_newData_cnt()
#if num_f >= threhold:
#write_new_config()
print('num_f new is ',num_f)
return jsonify({"num_f":num_f})
def write_new_data(filename,co,w,h):
im = Image.fromarray(img_arr)
im.save(os.path.join(path,filename))
#cv2.imwrite(os.path.join(path,filename), img_arr)
new_cor = []
for c in co:
x,y,wi,he = c
x = (x+wi/2.)/w
y = (y+he/2.)/h
wi = wi/w
he = he/h
new_cor.append([0,x,y,wi,he])
with open(os.path.join(path,filename.split(".")[0]+'.txt'), "w") as result:
result.write("\n".join([' '.join(map(str, item)) for item in new_cor]))
def get_newData_cnt(): return int(len(next(os.walk("backend/model_config/ladder/images/"))[2])/2)
@app.after_request
def add_headers(response):
response.headers.add('Access-Control-Allow-Origin', '*')
response.headers.add('Access-Control-Allow-Headers', 'Content-Type,Authorization')
return response
if __name__ == '__main__':
app.run(host='0.0.0.0',port=5000)
```
|
{
"source": "jeremmm/multizab",
"score": 2
}
|
#### File: multizab/multizab/config-template.py
```python
import os
import logging
import json
basedir = os.path.abspath(os.path.dirname(__file__))
class BaseConfig(object):
DEBUG = False
TESTING = False
SECRET_KEY = '<KEY>'
LOGGING_FORMAT = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
LOGGING_LOCATION = 'multizab.log'
LOGGING_LEVEL = logging.DEBUG
DATABASE_FILE = os.path.join(basedir, 'hosts.json')
class DevelopmentConfig(BaseConfig):
DEBUG = True
TESTING = True
SECRET_KEY = '<KEY>'
class TestingConfig(BaseConfig):
DEBUG = False
TESTING = True
SECRET_KEY = '<KEY>'
config = {
"development": "multizab.config.DevelopmentConfig",
"testing": "multizab.config.TestingConfig",
"default": "multizab.config.DevelopmentConfig"
}
def database(path):
if not os.path.exists(path):
with open(path, 'wb') as f:
json.dump({'hosts': []}, f, ensure_ascii=False)
def configure_app(app):
config_name = os.getenv('FLASK_CONFIGURATION', 'default')
app.config.from_object(config[config_name])
app.config.from_pyfile('config.cfg', silent=True)
# Configure logging
handler = logging.FileHandler(app.config['LOGGING_LOCATION'])
handler.setLevel(app.config['LOGGING_LEVEL'])
formatter = logging.Formatter(app.config['LOGGING_FORMAT'])
handler.setFormatter(formatter)
app.logger.addHandler(handler)
database(app.config['DATABASE_FILE'])
```
|
{
"source": "jerem-uzoma/django-blog",
"score": 2
}
|
#### File: django-blog/blog/celery.py
```python
from __future__ import absolute_import, unicode_literals
import os
from celery import Celery
# Set the DJANGO_SETTINGS_MODULE environment variable for the celery program
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'blog.settings')
app = Celery('blog')
# Import the CELERY settings from the Django settings file
app.config_from_object('django.conf:settings', namespace='CELERY')
# Load the task module from all registered apps
app.autodiscover_tasks()
@app.task(bind=True)
def debug_task(self):
print('Request: {0!r}'.format(self.request))
```
|
{
"source": "jerem-uzoma/pythonanywhere",
"score": 3
}
|
#### File: pythonanywhere/app/models.py
```python
from django.db import models
from django.core.urlresolvers import reverse
# Create your models here.
class Article(models.Model):
STATUS_CHOICES = (
('d', 'part'),
('p', 'published'),
)
title = models.CharField('Title', max_length=100)
body = models.TextField('Text')
created_time = models.DateTimeField('created time', auto_now_add=True)
# auto_now_add : Create timestamps that will not be overwritten
last_modified_time = models.DateTimeField('change time', auto_now=True)
# auto_now: Automatically overwrite the time with the current one
objects = EntryQuerySet.as_manager()
status = models.CharField('Article status', max_length=1, choices=STATUS_CHOICES)
abstract = models.CharField('Summary', max_length=54, blank=True, null=True,
help_text="Type summary here, not more than 54 characters")
views = models.PositiveIntegerField('Views', default=0)
likes = models.PositiveIntegerField('Likes', default=0)
# If article should be at the top
topped = models.BooleanField('Stick top', default=False)
category = models.ForeignKey('Category', verbose_name='classification',
null=True,
on_delete=models.SET_NULL)
tags = models.ManyToManyField('Tag', verbose_name='Label collection', blank=True)
def __str__(self):
return self.title
class Meta:
# Meta contains a series of options, where ordering represents the sort, - indicates the reverse order
# that is, when the article from the database, the article to the final revision time in reverse order
ordering = ['-last_modified_time']
def get_absolute_url(self):
return reverse('app:detail', kwargs={'article_id': self.pk})
class Category(models.Model):
"""
this stores the classification of the article information
"""
name = models.CharField('Classification name', max_length=20)
created_time = models.DateTimeField('Time of creation', auto_now_add=True)
last_modified_time = models.DateTimeField('Last Modified', auto_now=True)
def __str__(self):
return self.name
class BlogComment(models.Model):
user_name = models.CharField('Name', max_length=100)
body = models.TextField('Comment')
created_time = models.DateTimeField('Created time', auto_now_add=True)
article = models.ForeignKey('Article', verbose_name='Comment on the article', on_delete=models.CASCADE)
def __str__(self):
return self.body[:20]
class Tag(models.Model):
"""
tag(Tag cloud)corresponding to the database
"""
name = models.CharField('Name', max_length=20)
created_time = models.DateTimeField('Created time', auto_now_add=True)
last_modified_time = models.DateTimeField('Modified', auto_now=True)
def __str__(self):
return self.name
class Suggest(models.Model):
suggest = models.TextField('suggestion', max_length=200)
suggest_time = models.DateTimeField('Suggested', auto_now_add=True)
def __str__(self):
return self.suggest
class EntryQuerySet(models.QuerySet):
def published_aritcles(self):
return filter(self.published = True)
```
|
{
"source": "jerem/Whoosh",
"score": 3
}
|
#### File: Whoosh/scripts/release.py
```python
import sys, os.path
from ConfigParser import ConfigParser
from optparse import OptionParser
from os import system
# Script to build and upload a release of Whoosh to PyPI and build
# and upload the
def build_docs():
system("python setup.py build_sphinx")
def upload_docs(user, server, base, version, build=True, latest=True):
opts = {"user": user,
"srv": server,
"base": base,
"ver": version}
system('ssh %(user)s@%(srv)s "mkdir %(base)s/%(ver)s"' % opts)
system("scp -r docs/build/html/* %(user)s@%(srv)s:%(base)s/%(ver)s" % opts)
system('ssh %(user)s@%(srv)s "cd %(base)s;ln -s %(ver)s latest"' % opts)
def upload_pypi(tag=None):
system("python setup.py sdist bdist_egg upload")
if tag:
tag = str(tag)
opts = {"base": "http://svn.whoosh.ca/projects/whoosh",
"tag": tag,
"msg": "Tagging trunk as %s" % tag}
system('svn copy %(base)s/trunk %(base)s/tags/%(tag)s -m "%(msg)s"' % opts)
if __name__ == '__main__':
sys.path.insert(0, os.path.abspath("src"))
from whoosh import __version__
version = ".".join(str(n) for n in __version__)
parser = OptionParser()
parser.add_option("-c", "--config", dest="configfile",
help="Configuration file",
metavar="INIFILE",
default="whoosh.ini")
parser.add_option("-d", "--no-docs", dest="dodocs",
help="Don't build or upload docs",
action="store_false",
default=True)
parser.add_option("-D", "--no-build-docs", dest="builddocs",
help="Skip building docs",
action="store_false",
default=True)
parser.add_option("-t", "--tag", dest="tag",
help="Tag the trunk as this",
default=None)
(options, args) = parser.parse_args()
cp = ConfigParser()
cp.read(options.configfile)
if options.dodocs:
upload_docs(cp.get("website", "username"),
cp.get("website", "server"),
cp.get("website", "docbase"), version,
build=options.builddocs)
upload_pypi(tag=options.tag)
```
#### File: src/whoosh/fields.py
```python
import datetime
import re
from whoosh.analysis import IDAnalyzer, RegexAnalyzer, KeywordAnalyzer
from whoosh.analysis import StandardAnalyzer, NgramAnalyzer
from whoosh.formats import Format, Existence, Frequency, Positions
# Exceptions
class FieldConfigurationError(Exception):
pass
class UnknownFieldError(Exception):
pass
# Field Types
class FieldType(object):
"""Represents a field configuration.
The FieldType object supports the following attributes:
* format (fields.Format): the storage format for the field's contents.
* vector (fields.Format): the storage format for the field's vectors
(forward index), or None if the field should not store vectors.
* scorable (boolean): whether searches against this field may be scored.
This controls whether the index stores per-document field lengths for
this field.
* stored (boolean): whether the content of this field is stored for each
document. For example, in addition to indexing the title of a document,
you usually want to store the title so it can be presented as part of
the search results.
* unique (boolean): whether this field's value is unique to each document.
For example, 'path' or 'ID'. IndexWriter.update_document() will use
fields marked as 'unique' to find the previous version of a document
being updated.
The constructor for the base field type simply lets you supply your
own configured field format, vector format, and scorable and stored
values. Subclasses may configure some or all of this for you.
"""
format = vector = scorable = stored = unique = None
indexed = True
__inittypes__ = dict(format=Format, vector=Format,
scorable=bool, stored=bool, unique=bool)
def __init__(self, format, vector = None,
scorable = False, stored = False,
unique = False):
self.format = format
self.vector = vector
self.scorable = scorable
self.stored = stored
self.unique = unique
def __repr__(self):
return "%s(format=%r, vector=%r, scorable=%s, stored=%s, unique=%s)"\
% (self.__class__.__name__, self.format, self.vector,
self.scorable, self.stored, self.unique)
def __eq__(self, other):
return all((isinstance(other, FieldType),
(self.format == other.format),
(self.vector == other.vector),
(self.scorable == other.scorable),
(self.stored == other.stored),
(self.unique == other.unique)))
def clean(self):
"""Clears any cached information in the field and any child objects."""
if self.format and hasattr(self.format, "clean"):
self.format.clean()
if self.vector and hasattr(self.vector, "clean"):
self.vector.clean()
def index(self, value, **kwargs):
"""Returns an iterator of (termtext, frequency, encoded_value) tuples.
"""
if not self.format:
raise Exception("%s field cannot index without a format" % self.__class__)
if not isinstance(value, unicode):
raise ValueError("%r is not unicode" % value)
return self.format.word_values(value, mode="index", **kwargs)
class ID(FieldType):
"""Configured field type that indexes the entire value of the field as one
token. This is useful for data you don't want to tokenize, such as the
path of a file.
"""
__inittypes__ = dict(stored=bool, unique=bool, field_boost=float)
def __init__(self, stored = False, unique = False, field_boost = 1.0):
"""
:param stored: Whether the value of this field is stored with the document.
"""
self.format = Existence(analyzer = IDAnalyzer(), field_boost = field_boost)
self.stored = stored
self.unique = unique
class IDLIST(FieldType):
"""Configured field type for fields containing IDs separated by whitespace
and/or puntuation.
"""
__inittypes__ = dict(stored=bool, unique=bool, expression=bool, field_boost=float)
def __init__(self, stored = False, unique = False, expression = None, field_boost = 1.0):
"""
:param stored: Whether the value of this field is stored with the document.
:param unique: Whether the value of this field is unique per-document.
:param expression: The regular expression object to use to extract tokens.
The default expression breaks tokens on CRs, LFs, tabs, spaces, commas,
and semicolons.
"""
expression = expression or re.compile(r"[^\r\n\t ,;]+")
analyzer = RegexAnalyzer(expression = expression)
self.format = Existence(analyzer = analyzer, field_boost = field_boost)
self.stored = stored
self.unique = unique
class DATETIME(FieldType):
__inittypes__ = dict(stored=bool, unique=bool)
def __init__(self, stored = True, unique = False):
"""
:param stored: Whether the value of this field is stored with the document.
:param unique: Whether the value of this field is unique per-document.
"""
self.stored = stored
self.unique = unique
self.format = Existence()
def index(self, dt):
if not isinstance(dt, datetime.datetime):
raise ValueError("Value of DATETIME field must be a datetime object: %r" % dt)
text = dt.isoformat()
text = text.replace(" ", "").replace(":", "").replace("-", "").replace(".", "")
return [(text, 1, '')]
class STORED(FieldType):
"""Configured field type for fields you want to store but not index.
"""
indexed = False
stored = True
def __init__(self):
pass
class KEYWORD(FieldType):
"""Configured field type for fields containing space-separated or comma-separated
keyword-like data (such as tags). The default is to not store positional information
(so phrase searching is not allowed in this field) and to not make the field scorable.
"""
__inittypes__ = dict(stored=bool, lowercase=bool, commas=bool, scorable=bool,
unique=bool, field_boost=float)
def __init__(self, stored = False, lowercase = False, commas = False,
scorable = False, unique = False, field_boost = 1.0):
"""
:param stored: Whether to store the value of the field with the document.
:param comma: Whether this is a comma-separated field. If this is False
(the default), it is treated as a space-separated field.
:param scorable: Whether this field is scorable.
"""
ana = KeywordAnalyzer(lowercase = lowercase, commas = commas)
self.format = Frequency(analyzer = ana, field_boost = field_boost)
self.scorable = scorable
self.stored = stored
self.unique = unique
class TEXT(FieldType):
"""Configured field type for text fields (for example, the body text of an article). The
default is to store positional information to allow phrase searching. This field type
is always scorable.
"""
__inittypes__ = dict(analyzer=object, phrase=bool, vector=Format,
stored=bool, field_boost=float)
def __init__(self, analyzer = None, phrase = True, vector = None,
stored = False, field_boost = 1.0):
"""
:param stored: Whether to store the value of this field with the document. Since
this field type generally contains a lot of text, you should avoid storing it
with the document unless you need to, for example to allow fast excerpts in the
search results.
:param phrase: Whether the store positional information to allow phrase searching.
:param analyzer: The analysis.Analyzer to use to index the field contents. See the
analysis module for more information. If you omit this argument, the field uses
analysis.StandardAnalyzer.
"""
ana = analyzer or StandardAnalyzer()
if phrase:
formatclass = Positions
else:
formatclass = Frequency
self.format = formatclass(analyzer = ana, field_boost = field_boost)
self.vector = vector
self.scorable = True
self.stored = stored
class NGRAM(FieldType):
"""Configured field that indexes text as N-grams. For example, with a field type
NGRAM(3,4), the value "hello" will be indexed as tokens
"hel", "hell", "ell", "ello", "llo".
"""
__inittypes__ = dict(minsize=int, maxsize=int, stored=bool, field_boost=float)
def __init__(self, minsize = 2, maxsize = 4, stored = False, field_boost = 1.0):
"""
:param stored: Whether to store the value of this field with the document. Since
this field type generally contains a lot of text, you should avoid storing it
with the document unless you need to, for example to allow fast excerpts in the
search results.
:param minsize: The minimum length of the N-grams.
:param maxsize: The maximum length of the N-grams.
"""
self.format = Frequency(analyzer = NgramAnalyzer(minsize, maxsize),
field_boost = field_boost)
self.scorable = True
self.stored = stored
# Schema class
class Schema(object):
"""Represents the collection of fields in an index. Maps field names to
FieldType objects which define the behavior of each field.
Low-level parts of the index use field numbers instead of field names
for compactness. This class has several methods for converting between
the field name, field number, and field object itself.
"""
def __init__(self, **fields):
"""
All keyword arguments to the constructor are treated as fieldname = fieldtype
pairs. The fieldtype can be an instantiated FieldType object, or a FieldType
sub-class (in which case the Schema will instantiate it with the default
constructor before adding it).
For example::
s = Schema(content = TEXT,
title = TEXT(stored = True),
tags = KEYWORD(stored = True))
"""
self._by_number = []
self._names = []
self._by_name = {}
self._numbers = {}
for name in sorted(fields.keys()):
self.add(name, fields[name])
def __eq__(self, other):
if not isinstance(other, Schema): return False
return self._by_name == other._by_name
def __repr__(self):
return "<Schema: %s>" % repr(self._names)
def __iter__(self):
"""
Yields the sequence of fields in this schema.
"""
return iter(self._by_number)
def __getitem__(self, id):
"""
Returns the field associated with the given field name or number.
:param id: A field name or field number.
"""
if isinstance(id, basestring):
return self._by_name[id]
return self._by_number[id]
def __len__(self):
"""
Returns the number of fields in this schema.
"""
return len(self._by_number)
def __contains__(self, fieldname):
"""
Returns True if a field by the given name is in this schema.
:param fieldname: The name of the field.
"""
return fieldname in self._by_name
def field_by_name(self, name):
"""
Returns the field object associated with the given name.
:param name: The name of the field to retrieve.
"""
return self._by_name[name]
def field_by_number(self, number):
"""
Returns the field object associated with the given number.
:param number: The number of the field to retrieve.
"""
return self._by_number[number]
def fields(self):
"""
Yields ("fieldname", field_object) pairs for the fields
in this schema.
"""
return self._by_name.iteritems()
def field_names(self):
"""
Returns a list of the names of the fields in this schema.
"""
return self._names
def add(self, name, fieldtype):
"""
Adds a field to this schema. This is a low-level method; use keyword
arguments to the Schema constructor to create the fields instead.
:param name: The name of the field.
:param fieldtype: An instantiated fields.FieldType object, or a FieldType subclass.
If you pass an instantiated object, the schema will use that as the field
configuration for this field. If you pass a FieldType subclass, the schema
will automatically instantiate it with the default constructor.
"""
if name.startswith("_"):
raise FieldConfigurationError("Field names cannot start with an underscore")
elif name in self._by_name:
raise FieldConfigurationError("Schema already has a field named %s" % name)
if type(fieldtype) is type:
try:
fieldtype = fieldtype()
except Exception, e:
raise FieldConfigurationError("Error: %s instantiating field %r: %r" % (e, name, fieldtype))
if not isinstance(fieldtype, FieldType):
raise FieldConfigurationError("%r is not a FieldType object" % fieldtype)
fnum = len(self._by_number)
self._numbers[name] = fnum
self._by_number.append(fieldtype)
self._names.append(name)
self._by_name[name] = fieldtype
def to_number(self, id):
"""Given a field name or number, returns the field's number.
"""
if isinstance(id, int):
return id
else:
return self.name_to_number(id)
def to_name(self, id):
if isinstance(id, int):
return self.number_to_name(id)
else:
return id
def name_to_number(self, name):
"""Given a field name, returns the field's number.
"""
try:
return self._numbers[name]
except KeyError:
raise KeyError("No field named %r in %r" % (name, self._numbers.keys()))
def number_to_name(self, number):
"""Given a field number, returns the field's name.
"""
return self._names[number]
def has_vectored_fields(self):
"""Returns True if any of the fields in this schema store term vectors.
"""
return any(ftype.vector for ftype in self._by_number)
def vectored_fields(self):
"""Returns a list of field numbers corresponding to the fields that are
vectored.
"""
return [i for i, ftype in enumerate(self._by_number) if ftype.vector]
def scorable_fields(self):
"""Returns a list of field numbers corresponding to the fields that
store length information.
"""
return [i for i, field in enumerate(self) if field.scorable]
def stored_fields(self):
"""Returns a list of field numbers corresponding to the fields that are stored.
"""
return [i for i, field in enumerate(self) if field.stored]
def stored_field_names(self):
"""Returns the names, in order, of fields that are stored."""
bn = self._by_name
return [name for name in self._names if bn[name].stored]
def analyzer(self, fieldname):
"""Returns the content analyzer for the given fieldname, or None if
the field has no analyzer
"""
field = self[fieldname]
if field.format and field.format.analyzer:
return field.format.analyzer
```
#### File: src/whoosh/__init__.py
```python
__version__ = (0, 3, 4)
def versionstring(build=True, extra=True):
"""Returns the version number of Whoosh as a string.
:param build: Whether to include the build number in the string.
:param extra: Whether to include alpha/beta/rc etc. tags. Only
checked if build is True.
:rtype: str
"""
if build:
first = 3
else:
first = 2
s = ".".join(str(n) for n in __version__[:first])
if build and extra:
s += "".join(str(n) for n in __version__[3:])
return s
```
#### File: src/whoosh/searching.py
```python
from __future__ import division
from heapq import heappush, heapreplace
from math import log
import sys, time
from whoosh import classify, query, scoring
from whoosh.scoring import Sorter, FieldSorter
from whoosh.support.bitvector import BitVector
if sys.platform == 'win32':
now = time.clock
else:
now = time.time
# Searcher class
class Searcher(object):
"""Wraps an :class:`~whoosh.reading.IndexReader` object and provides methods
for searching the index.
"""
def __init__(self, ixreader, weighting = scoring.BM25F):
"""
:param ixreader: An :class:`~whoosh.reading.IndexReader` object for
the index to search.
:param weighting: A :class:`whoosh.scoring.Weighting` object to use to
score found documents.
"""
self.ixreader = ixreader
# Copy attributes/methods from wrapped reader
for name in ("stored_fields", "postings", "vector", "vector_as", "schema"):
setattr(self, name, getattr(ixreader, name))
if type(weighting) is type:
self.weighting = weighting()
else:
self.weighting = weighting
self.is_closed = False
self._idf_cache = {}
#def __del__(self):
# if hasattr(self, "is_closed") and not self.is_closed:
# self.close()
def close(self):
self.ixreader.close()
self.is_closed = True
def reader(self):
"""Returns the underlying :class:`~whoosh.reading.IndexReader`."""
return self.ixreader
def idf(self, fieldid, text):
"""Calculates the Inverse Document Frequency of the
current term. Subclasses may want to override this.
"""
fieldnum = self.fieldname_to_num(fieldid)
cache = self._idf_cache
term = (fieldnum, text)
if term in cache: return cache[term]
df = self.ixreader.doc_frequency(fieldnum, text)
idf = log(self.ixreader.doc_count_all() / (df + 1)) + 1.0
cache[term] = idf
return idf
def document(self, **kw):
"""Convenience method returns the stored fields of a document
matching the given keyword arguments, where the keyword keys are
field names and the values are terms that must appear in the field.
This method is equivalent to::
searcher.stored_fields(searcher.document_number(<keyword args>))
Where Searcher.documents() returns a generator, this function returns
either a dictionary or None. Use it when you assume the given keyword
arguments either match zero or one documents (i.e. at least one of the
fields is a unique key).
>>> stored_fields = searcher.document(path=u"/a/b")
>>> if stored_fields:
... print stored_fields['title']
... else:
... print "There is no document with the path /a/b"
"""
for p in self.documents(**kw):
return p
def documents(self, **kw):
"""Convenience method returns the stored fields of a document
matching the given keyword arguments, where the keyword keys are
field names and the values are terms that must appear in the field.
Returns a generator of dictionaries containing the
stored fields of any documents matching the keyword arguments.
>>> for stored_fields in searcher.documents(emailto=u"<EMAIL>"):
... print "Email subject:", stored_fields['subject']
"""
ixreader = self.ixreader
return (ixreader.stored_fields(docnum) for docnum in self.document_numbers(**kw))
def document_number(self, **kw):
"""Returns the document number of the document matching the
given keyword arguments, where the keyword keys are
field names and the values are terms that must appear in the field.
>>> docnum = searcher.document_number(path=u"/a/b")
Where Searcher.document_numbers() returns a generator, this function returns
either an int or None. Use it when you assume the given keyword arguments
either match zero or one documents (i.e. at least one of the fields is a
unique key).
:rtype: int
"""
for docnum in self.document_numbers(**kw):
return docnum
def document_numbers(self, **kw):
"""Returns a generator of the document numbers for documents
matching the given keyword arguments, where the keyword keys are
field names and the values are terms that must appear in the field.
>>> docnums = list(searcher.document_numbers(emailto=u"<EMAIL>"))
"""
q = query.And([query.Term(k, v) for k, v in kw.iteritems()])
q = q.normalize()
if q:
return q.docs(self)
def key_terms(self, docnums, fieldname, numterms = 5,
model = classify.Bo1Model, normalize = True):
"""Returns the 'numterms' most important terms from the documents listed
(by number) in 'docnums'. You can get document numbers for the documents
your interested in with the document_number() and document_numbers() methods.
>>> docnum = searcher.document_number(path=u"/a/b")
>>> keywords = list(searcher.key_terms([docnum], "content"))
"Most important" is generally defined as terms that occur
frequently in the top hits but relatively infrequently in the collection as
a whole.
:param fieldname: Look at the terms in this field. This field must store vectors.
:param docnums: A sequence of document numbers specifying which documents to
extract key terms from.
:param numterms: Return this number of important terms.
:param model: The classify.ExpansionModel to use. See the classify module.
"""
ixreader = self.ixreader
fieldnum = self.fieldname_to_num(fieldname)
expander = classify.Expander(self, fieldname, model = model)
for docnum in docnums:
expander.add(ixreader.vector_as(docnum, fieldnum, "weight"))
return expander.expanded_terms(numterms, normalize = normalize)
def find(self, defaultfield, querystring, limit = 5000, sortedby = None, reverse = False):
"""Parses the query string using :class:`whoosh.qparser.QueryParser` and runs
the parsed query, returning a Results object.
:param defaultfield: the name of the field to search in for terms in the query
string that aren't qualified with an explicit field.
:param querystring: a unicode string containing the unparsed query.
:param limit: the maximum number of documents to score. If you're only interested in
the top N documents, you can set limit=N to limit the scoring for a faster
search.
:param sortedby: if this parameter is not None, the results are sorted instead of scored.
If this value is a string, the results are sorted by the field named in the string.
If this value is a list or tuple, it is assumed to be a sequence of strings and the
results are sorted by the fieldnames in the sequence. Otherwise 'sortedby' should be
a scoring.Sorter object.
The fields you want to sort by must be indexed.
For example, to sort the results by the 'path' field::
searcher.find(q, sortedby = "path")
To sort the results by the 'path' field and then the 'category' field::
searcher.find(q, sortedby = ("path", "category"))
To use a sorting object::
searcher.find(q, sortedby = scoring.FieldSorter("path", key=mykeyfn))
Using a string or tuple simply instantiates a :class:`whoosh.scoring.FieldSorter`
or :class:`whoosh.scoring.MultiFieldSorter` object for you. To get a custom sort
order, instantiate your own ``FieldSorter`` with a ``key`` argument, or write
a custom :class:`whoosh.scoring.Sorter` class.
FieldSorter and MultiFieldSorter cache the document order, using 4 bytes times
the number of documents in the index, and taking time to cache. To increase
performance, instantiate your own sorter and re-use it (but remember you need
to recreate it if the index changes).
:param reverse: if ``sortedby`` is not None, this reverses the direction of the sort.
:rtype: :class:`Results`
"""
from qparser import QueryParser
qp = QueryParser(defaultfield)
q = qp.parse(querystring)
return self.search(q, limit=limit, sortedby=sortedby, reverse=reverse)
def search(self, query, limit = 5000, sortedby = None, reverse = False):
"""Runs the query represented by the query object and returns a Results object.
See the help for :meth:`~Searcher.find` for information on the parameters.
:param query: a :class:`whoosh.query.Query` object.
:rtype: :class:`Results`
"""
ixreader = self.ixreader
t = now()
if sortedby is not None:
if isinstance(sortedby, basestring):
sorter = scoring.FieldSorter(sortedby)
elif isinstance(sortedby, (list, tuple)):
sorter = scoring.MultiFieldSorter([FieldSorter(fn) for fn in sortedby])
elif isinstance(sortedby, Sorter):
sorter = sortedby
else:
raise ValueError("sortedby argument must be a string, list, or Sorter (%r)" % sortedby)
scored_list = sorter.order(self, query.docs(self), reverse = reverse)
scores = None
docvector = BitVector(ixreader.doc_count_all(), source = scored_list)
if len(scored_list) > limit:
scored_list = list(scored_list)[:limit]
else:
# Sort by scores
topdocs = TopDocs(limit, ixreader.doc_count_all())
final = self.weighting.final
topdocs.add_all((docnum, final(self, docnum, score))
for docnum, score in query.doc_scores(self))
best = topdocs.best()
if best:
# topdocs.best() returns a list like
# [(docnum, score), (docnum, score), ... ]
# This unpacks that into two lists: docnums and scores
scored_list, scores = zip(*topdocs.best())
else:
scored_list = []
scores = []
docvector = topdocs.docs
t = now() - t
return Results(self, query, scored_list, docvector, runtime = t, scores = scores)
def fieldname_to_num(self, fieldid):
"""Returns the field number of the given field name.
"""
return self.schema.to_number(fieldid)
def field(self, fieldid):
"""Returns the :class:`whoosh.fields.Field` object for the given field name.
"""
return self.schema[fieldid]
class TopDocs(object):
"""This is like a list that only remembers the top N values that are added
to it. This increases efficiency when you only want the top N values, since
you don't have to sort most of the values (once the object reaches capacity
and the next item to consider has a lower score than the lowest item in the
collection, you can just throw it away).
The reason we use this instead of heapq.nlargest is this object keeps
track of all docnums that were added, even if they're not in the "top N".
"""
def __init__(self, capacity, max_doc, docvector = None):
self.capacity = capacity
self.docs = docvector or BitVector(max_doc)
self.heap = []
self._total = 0
def __len__(self):
return len(self.sorted)
def add_all(self, sequence):
"""Adds a sequence of (item, score) pairs.
"""
heap = self.heap
docs = self.docs
capacity = self.capacity
subtotal = 0
for docnum, score in sequence:
docs.set(docnum)
subtotal += 1
if len(heap) >= capacity:
if score <= heap[0][0]:
continue
else:
heapreplace(heap, (score, docnum))
else:
heappush(heap, (score, docnum))
self._total += subtotal
def total(self):
"""Returns the total number of documents added so far.
"""
return self._total
def best(self):
"""Returns the "top N" items. Note that this call
involves sorting and reversing the internal queue, so you may
want to cache the results rather than calling this method
multiple times.
"""
# Throw away the score and just return a list of items
return [(item, score) for score, item in reversed(sorted(self.heap))]
class Results(object):
"""
This object is not instantiated by the user; it is returned by a Searcher.
This object represents the results of a search query. You can mostly
use it as if it was a list of dictionaries, where each dictionary
is the stored fields of the document at that position in the results.
"""
def __init__(self, searcher, query, scored_list, docvector,
scores = None, runtime = 0):
"""
:param searcher: the :class:`Searcher` object that produced these
results.
:param query: the original query that created these results.
:param scored_list: an ordered list of document numbers
representing the 'hits'.
:param docvector: a BitVector object where the indices are
document numbers and an 'on' bit means that document is
present in the results.
:param scores: a list of scores corresponding to the document
numbers in scored_list, or None if no scores are available.
:param runtime: the time it took to run this search.
"""
self.searcher = searcher
self.query = query
self.scored_list = scored_list
self.scores = scores
self.docs = docvector
self.runtime = runtime
def __repr__(self):
return "<%s/%s Results for %r runtime=%s>" % (len(self), self.docs.count(),
self.query,
self.runtime)
def __len__(self):
"""Returns the TOTAL number of documents found by this search. Note this
may be greater than the number of ranked documents.
"""
return self.docs.count()
def __getitem__(self, n):
stored_fields = self.searcher.stored_fields
if isinstance(n, slice):
return [stored_fields(i) for i in self.scored_list.__getitem__(n)]
else:
return stored_fields(self.scored_list[n])
def __iter__(self):
"""Yields the stored fields of each result document in ranked order.
"""
stored_fields = self.searcher.stored_fields
for docnum in self.scored_list:
yield stored_fields(docnum)
def copy(self):
"""Returns a copy of this results object.
"""
# Scores might be None, so only copy if it if it's a list
scores = self.scores
if isinstance(scores, list):
scores = scores[:]
# Scored_list might be a tuple, so only copy it if it's a list
scored_list = self.scored_list
if isinstance(scored_list, list):
scored_list = scored_list[:]
return self.__class__(self.searcher, self.query,
scored_list=scored_list, docvector=self.docs.copy(),
scores=scores, runtime=self.runtime)
def score(self, n):
"""Returns the score for the document at the Nth position in the
list of results. If the search was not scored, returns None."""
if self.scores:
return self.scores[n]
else:
return None
def scored_length(self):
"""Returns the number of RANKED documents. Note this may be fewer
than the total number of documents the query matched, if you used
the 'limit' keyword of the Searcher.search() method to limit the
scoring."""
return len(self.scored_list)
def docnum(self, n):
"""Returns the document number of the result at position n in the
list of ranked documents. Use __getitem__ (i.e. Results[n]) to
get the stored fields directly.
"""
return self.scored_list[n]
def key_terms(self, fieldname, docs = 10, numterms = 5,
model = classify.Bo1Model, normalize = True):
"""Returns the 'numterms' most important terms from the top 'numdocs' documents
in these results. "Most important" is generally defined as terms that occur
frequently in the top hits but relatively infrequently in the collection as
a whole.
:param fieldname: Look at the terms in this field. This field must store vectors.
:param docs: Look at this many of the top documents of the results.
:param terms: Return this number of important terms.
:param model: The classify.ExpansionModel to use. See the classify module.
:returns: list of unicode strings.
"""
docs = min(docs, self.scored_length())
if docs <= 0: return
reader = self.searcher.reader()
fieldnum = self.searcher.fieldname_to_num(fieldname)
expander = classify.Expander(reader, fieldname, model = model)
for docnum in self.scored_list[:docs]:
expander.add(reader.vector_as("weight", docnum, fieldnum))
return expander.expanded_terms(numterms, normalize = normalize)
def extend(self, results):
"""Appends hits from 'results' (that are not already in this
results object) to the end of these results.
:param results: another results object.
"""
docs = self.docs
self.scored_list.extend(docnum for docnum in results.scored_list
if docnum not in docs)
self.docs = docs | results.docs
# TODO: merge the query terms?
def filter(self, results):
"""Removes any hits that are not also in the other results object.
"""
docs = self.docs & results.docs
self.scored_list = [docnum for docnum in self.scored_list if docnum in docs]
self.docs = docs
def upgrade(self, results, reverse = False):
"""Re-sorts the results so any hits that are also in 'results' appear before
hits not in 'results', otherwise keeping their current relative positions.
This does not add the documents in the other results object to this one.
:param results: another results object.
:param reverse: if True, lower the position of hits in the other
results object instead of raising them.
"""
scored_list = self.scored_list
otherdocs = results.docs
arein = [docnum for docnum in scored_list if docnum in otherdocs]
notin = [docnum for docnum in scored_list if docnum not in otherdocs]
if reverse:
self.scored_list = notin + arein
else:
self.scored_list = arein + notin
def upgrade_and_extend(self, results):
"""Combines the effects of extend() and increase(): hits that are
also in 'results' are raised. Then any hits from 'results' that are
not in this results object are appended to the end of these
results.
:param results: another results object.
"""
docs = self.docs
otherdocs = results.docs
scored_list = self.scored_list
arein = [docnum for docnum in scored_list if docnum in otherdocs]
notin = [docnum for docnum in scored_list if docnum not in otherdocs]
other = [docnum for docnum in results.scored_list if docnum not in docs]
self.docs = docs | otherdocs
self.scored_list = arein + notin + other
# Utilities
class Paginator(object):
"""
Helper class that divides search results into pages, for use in
displaying the results.
"""
def __init__(self, results, perpage = 10):
"""
:param results: the searching.Results object from a search.
:param perpage: the number of hits on each page.
"""
self.results = results
self.perpage = perpage
def from_to(self, pagenum):
"""Returns the lowest and highest indices on the given
page. For example, with 10 results per page, from_to(1)
would return (0, 9).
"""
lr = len(self.results)
perpage = self.perpage
lower = (pagenum - 1) * perpage
upper = lower + perpage
if upper > lr:
upper = lr
return (lower, upper)
def pagecount(self):
"""Returns the total number of pages of results.
"""
return len(self.results) // self.perpage + 1
def page(self, pagenum):
"""Returns a list of the stored fields for the documents
on the given page.
"""
lower, upper = self.from_to(pagenum)
return self.results[lower:upper]
if __name__ == '__main__':
pass
```
#### File: src/whoosh/util.py
```python
import re
from collections import deque, defaultdict
from functools import wraps
from struct import pack, unpack
from time import time, clock
# Functions
# Varint cache
# Build a cache of the varint byte sequences for the first
# N integers, so we don't have to constantly recalculate them
# on the fly. This makes a small but noticeable difference.
def _varint(i):
s = ""
while (i & ~0x7F) != 0:
s += chr((i & 0x7F) | 0x80)
i = i >> 7
s += chr(i)
return s
_varint_cache_size = 512
_varint_cache = []
for i in xrange(0, _varint_cache_size):
_varint_cache.append(_varint(i))
_varint_cache = tuple(_varint_cache)
def varint(i):
"""Encodes the given integer into a string of the minimum number
of bytes.
"""
if i < len(_varint_cache):
return _varint_cache[i]
return _varint(i)
def read_varint(readfn):
"""
Reads a variable-length encoded integer.
:param readfn: a callable that reads a given number of bytes,
like file.read().
"""
b = ord(readfn(1))
i = b & 0x7F
shift = 7
while b & 0x80 != 0:
b = ord(readfn(1))
i |= (b & 0x7F) << shift
shift += 7
return i
_fib_cache = {}
def fib(n):
"""Returns the nth value in the Fibonacci sequence."""
if n <= 2: return n
if n in _fib_cache: return _fib_cache[n]
result = fib(n - 1) + fib(n - 2)
_fib_cache[n] = result
return result
def float_to_byte(value, mantissabits = 5, zeroexp = 2):
"""Encodes a floating point number in a single byte.
"""
# Assume int size == float size
fzero = (63 - zeroexp) << mantissabits
bits = unpack("i", pack("f", value))[0]
smallfloat = bits >> (24 - mantissabits)
if smallfloat < fzero:
# Map negative numbers and 0 to 0
# Map underflow to next smallest non-zero number
if bits <= 0:
return chr(0)
else:
return chr(1)
elif smallfloat >= fzero + 0x100:
# Map overflow to largest number
return chr(255)
else:
return chr(smallfloat - fzero)
def byte_to_float(b, mantissabits = 5, zeroexp = 2):
"""Decodes a floating point number stored in a single
byte.
"""
b = ord(b)
if b == 0:
return 0.0
bits = (b & 0xff) << (24 - mantissabits)
bits += (63 - zeroexp) << 24
return unpack("f", pack("i", bits))[0]
def first_diff(a, b):
"""Returns the position of the first differing character in the strings
a and b. For example, first_diff('render', 'rending') == 4. This
function limits the return value to 255 so the difference can be encoded
in a single byte.
"""
i = -1
for i in xrange(0, len(a)):
if a[i] != b[1]:
return i
if i == 255: return i
def prefix_encode(a, b):
"""Compresses string b as an integer (encoded in a byte) representing
the prefix it shares with a, followed by the suffix encoded as UTF-8.
"""
i = first_diff(a, b)
return chr(i) + b[i:].encode("utf8")
def prefix_encode_all(ls):
"""Compresses the given list of (unicode) strings by storing each string
(except the first one) as an integer (encoded in a byte) representing
the prefix it shares with its predecessor, followed by the suffix encoded
as UTF-8.
"""
last = u''
for w in ls:
i = first_diff(last, w)
yield chr(i) + w[i:].encode("utf8")
last = w
def prefix_decode_all(ls):
"""Decompresses a list of strings compressed by prefix_encode().
"""
last = u''
for w in ls:
i = ord(w[0])
decoded = last[:i] + w[1:].decode("utf8")
yield decoded
last = decoded
_nkre = re.compile(r"\D+|\d+", re.UNICODE)
def _nkconv(i):
try:
return int(i)
except ValueError:
return i.lower()
def natural_key(s):
"""Converts string ``s`` into a tuple that will sort "naturally"
(i.e., ``name5`` will come before ``name10`` and ``1`` will come
before ``A``). This function is designed to be used as the ``key``
argument to sorting functions.
:param s: the str/unicode string to convert.
:rtype: tuple
"""
# Use _nkre to split the input string into a sequence of
# digit runs and non-digit runs. Then use _nkconv() to convert
# the digit runs into ints and the non-digit runs to lowercase.
return tuple(_nkconv(m) for m in _nkre.findall(s))
class ClosableMixin(object):
"""Mix-in for classes with a close() method to allow them to be
used as a context manager.
"""
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.close()
def protected(func):
"""Decorator for storage-access methods. This decorator
(a) checks if the object has already been closed, and
(b) synchronizes on a threading lock. The parent object must
have 'is_closed' and '_sync_lock' attributes.
"""
@wraps(func)
def wrapper(self, *args, **kwargs):
if self.is_closed:
raise Exception("%r has been closed" % self)
if self._sync_lock.acquire(False):
try:
return func(self, *args, **kwargs)
finally:
self._sync_lock.release()
else:
raise Exception("Could not acquire sync lock")
return wrapper
def lru_cache(size):
"""Decorator that adds a least-recently-accessed cache to a method.
:param size: the maximum number of items to keep in the cache.
"""
def decorate_function(func):
prefix = "_%s_" % func.__name__
@wraps(func)
def wrapper(self, *args):
if not hasattr(self, prefix + "cache"):
cache = {}
queue = deque()
refcount = defaultdict(int)
setattr(self, prefix + "cache", cache)
setattr(self, prefix + "queue", queue)
setattr(self, prefix + "refcount", refcount)
else:
cache = getattr(self, prefix + "cache")
queue = getattr(self, prefix + "queue")
refcount = getattr(self, prefix + "refcount")
qpend = queue.append
qpop = queue.popleft
# Get cache entry or compute if not found
try:
result = cache[args]
except KeyError:
result = cache[args] = func(self, *args)
# Record that this key was recently accessed
qpend(args)
refcount[args] += 1
# Purge least recently accessed cache contents
while len(cache) > size:
k = qpop()
refcount[k] -= 1
if not refcount[k]:
del cache[k]
del refcount[k]
# Periodically compact the queue by removing duplicate keys
if len(queue) > size * 4:
for _ in xrange(len(queue)):
k = qpop()
if refcount[k] == 1:
qpend(k)
else:
refcount[k] -= 1
#assert len(queue) == len(cache) == len(refcount) == sum(refcount.itervalues())
return result
return wrapper
return decorate_function
```
#### File: Whoosh/tests/test_fields.py
```python
import unittest
from whoosh import fields, index
class TestSchema(unittest.TestCase):
def test_schema_eq(self):
a = fields.Schema()
b = fields.Schema()
self.assertEqual(a, b)
a = fields.Schema(id=fields.ID)
b = fields.Schema(id=fields.ID)
self.assertEqual(a[0], b[0])
self.assertEqual(a, b)
c = fields.Schema(id=fields.TEXT)
self.assertNotEqual(a, c)
def test_creation1(self):
s = fields.Schema()
s.add("content", fields.TEXT(phrase = True))
s.add("title", fields.TEXT(stored = True))
s.add("path", fields.ID(stored = True))
s.add("tags", fields.KEYWORD(stored = True))
s.add("quick", fields.NGRAM)
s.add("note", fields.STORED)
self.assertEqual(s.field_names(), ["content", "title", "path", "tags", "quick", "note"])
self.assert_("content" in s)
self.assertFalse("buzz" in s)
self.assert_(isinstance(s["tags"], fields.KEYWORD))
self.assert_(isinstance(s[3], fields.KEYWORD))
self.assert_(s[0] is s.field_by_number(0))
self.assert_(s["title"] is s.field_by_name("title"))
self.assert_(s.name_to_number("path") == 2)
self.assert_(s.number_to_name(4) == "quick")
self.assertEqual(s.scorable_fields(), [0, 1, 4])
def test_creation2(self):
s = fields.Schema(content = fields.TEXT(phrase = True, field_boost=2.0),
title = fields.TEXT(stored = True),
path = fields.ID(stored = True),
tags = fields.KEYWORD(stored = True),
quick = fields.NGRAM)
if __name__ == '__main__':
unittest.main()
```
#### File: Whoosh/tests/test_reading.py
```python
import unittest
from whoosh import analysis, fields
from whoosh.filedb.filestore import RamStorage
from whoosh.filedb.filewriting import NO_MERGE
class TestReading(unittest.TestCase):
def _create_index(self):
s = fields.Schema(f1 = fields.KEYWORD(stored = True),
f2 = fields.KEYWORD,
f3 = fields.KEYWORD)
st = RamStorage()
ix = st.create_index(s)
return ix
def _one_segment_index(self):
ix = self._create_index()
w = ix.writer()
w.add_document(f1 = u"A B C", f2 = u"1 2 3", f3 = u"X Y Z")
w.add_document(f1 = u"D E F", f2 = u"4 5 6", f3 = u"Q R S")
w.add_document(f1 = u"A E C", f2 = u"1 4 6", f3 = u"X Q S")
w.add_document(f1 = u"A A A", f2 = u"2 3 5", f3 = u"Y R Z")
w.add_document(f1 = u"A B", f2 = u"1 2", f3 = u"X Y")
w.commit()
return ix
def _multi_segment_index(self):
ix = self._create_index()
w = ix.writer()
w.add_document(f1 = u"A B C", f2 = u"1 2 3", f3 = u"X Y Z")
w.add_document(f1 = u"D E F", f2 = u"4 5 6", f3 = u"Q R S")
w.commit()
w = ix.writer()
w.add_document(f1 = u"A E C", f2 = u"1 4 6", f3 = u"X Q S")
w.add_document(f1 = u"A A A", f2 = u"2 3 5", f3 = u"Y R Z")
w.commit(NO_MERGE)
w = ix.writer()
w.add_document(f1 = u"A B", f2 = u"1 2", f3 = u"X Y")
w.commit(NO_MERGE)
return ix
def test_readers(self):
target = [(0, u'A', 4, 6), (0, u'B', 2, 2), (0, u'C', 2, 2),
(0, u'D', 1, 1), (0, u'E', 2, 2), (0, u'F', 1, 1),
(1, u'1', 3, 3), (1, u'2', 3, 3), (1, u'3', 2, 2),
(1, u'4', 2, 2), (1, u'5', 2, 2), (1, u'6', 2, 2),
(2, u'Q', 2, 2), (2, u'R', 2, 2), (2, u'S', 2, 2),
(2, u'X', 3, 3), (2, u'Y', 3, 3), (2, u'Z', 2, 2)]
stored = [{"f1": "A B C"}, {"f1": "D E F"}, {"f1": "A E C"},
{"f1": "A A A"}, {"f1": "A B"}]
def t(ix):
r = ix.reader()
self.assertEqual(list(r.all_stored_fields()), stored)
self.assertEqual(list(r), target)
ix = self._one_segment_index()
self.assertEqual(len(ix.segments), 1)
t(ix)
ix = self._multi_segment_index()
self.assertEqual(len(ix.segments), 3)
t(ix)
def test_term_inspection(self):
schema = fields.Schema(title=fields.TEXT(stored=True),
content=fields.TEXT)
st = RamStorage()
ix = st.create_index(schema)
writer = ix.writer()
writer.add_document(title=u"My document",
content=u"AA AA BB BB CC AA AA AA BB BB CC DD EE EE")
writer.add_document(title=u"My other document",
content=u"AA AB BB CC EE EE AX AX DD")
writer.commit()
reader = ix.reader()
self.assertEqual(list(reader.lexicon("content")),
[u'aa', u'ab', u'ax', u'bb', u'cc', u'dd', u'ee'])
self.assertEqual(list(reader.expand_prefix("content", "a")),
[u'aa', u'ab', u'ax'])
self.assertEqual(list(reader.all_terms()),
[('content', u'aa'), ('content', u'ab'), ('content', u'ax'),
('content', u'bb'), ('content', u'cc'), ('content', u'dd'),
('content', u'ee'), ('title', u'document'), ('title', u'my'),
('title', u'other')])
# (text, doc_freq, index_freq)
self.assertEqual(list(reader.iter_field("content")),
[(u'aa', 2, 6), (u'ab', 1, 1), (u'ax', 1, 2),
(u'bb', 2, 5), (u'cc', 2, 3), (u'dd', 2, 2),
(u'ee', 2, 4)])
self.assertEqual(list(reader.iter_field("content", prefix="c")),
[(u'cc', 2, 3), (u'dd', 2, 2), (u'ee', 2, 4)])
self.assertEqual(list(reader.most_frequent_terms("content")),
[(6, u'aa'), (5, u'bb'), (4, u'ee'), (3, u'cc'), (2, u'dd')])
self.assertEqual(list(reader.most_frequent_terms("content", prefix="a")),
[(6, u'aa'), (2, u'ax'), (1, u'ab')])
def test_vector_postings(self):
s = fields.Schema(id=fields.ID(stored=True, unique=True),
content=fields.TEXT(vector=fields.Positions(analyzer=analysis.StandardAnalyzer())))
st = RamStorage()
ix = st.create_index(s)
writer = ix.writer()
writer.add_document(id=u'1', content=u'the quick brown fox jumped over the lazy dogs')
writer.commit()
r = ix.reader()
terms = list(r.vector_as("weight", 0, 0))
self.assertEqual(terms, [(u'brown', 1.0),
(u'dogs', 1.0),
(u'fox', 1.0),
(u'jumped', 1.0),
(u'lazy', 1.0),
(u'over', 1.0),
(u'quick', 1.0),
])
if __name__ == '__main__':
unittest.main()
```
#### File: Whoosh/tests/test_spelling.py
```python
import unittest
from whoosh import index, spelling
from whoosh.filedb.filestore import RamStorage
class TestSpelling(unittest.TestCase):
def test_spelling(self):
st = RamStorage()
sp = spelling.SpellChecker(st, mingram=2)
wordlist = ["render", "animation", "animate", "shader",
"shading", "zebra", "koala", "lamppost",
"ready", "kismet", "reaction", "page",
"delete", "quick", "brown", "fox", "jumped",
"over", "lazy", "dog", "wicked", "erase",
"red", "team", "yellow", "under", "interest",
"open", "print", "acrid", "sear", "deaf",
"feed", "grow", "heal", "jolly", "kilt",
"low", "zone", "xylophone", "crown",
"vale", "brown", "neat", "meat", "reduction",
"blunder", "preaction"]
sp.add_words([unicode(w) for w in wordlist])
sugs = sp.suggest(u"reoction")
self.assertNotEqual(len(sugs), 0)
self.assertEqual(sugs, [u"reaction", u"reduction", u"preaction"])
if __name__ == '__main__':
unittest.main()
print 10 + 20
```
|
{
"source": "JeremXu/MXNet-Deep-Learning-in-Action",
"score": 2
}
|
#### File: MXNet-Deep-Learning-in-Action/chapter10-segmentation/train.py
```python
import argparse
import os
import numpy as np
import logging
from data.voc import VocSegData
from symbol.FCN import *
from utils.mean_IoU import MeanIoU
from utils.pixel_accuracy import PixelAccuracy
from utils.pixel_ce import PixelCrossEntropy
def init_deconv(args, fcnxs, fcnxs_args):
arr_name = fcnxs.list_arguments()
shape_dic = {}
if args.model == 'fcn32s':
bigscore_kernel_size = 64
init_layer = ["bigscore_weight"]
elif args.model == 'fcn16s':
bigscore_kernel_size = 32
init_layer = ["bigscore_weight", "score2_weight"]
else:
bigscore_kernel_size = 16
init_layer = ["bigscore_weight", "score4_weight"]
shape_dic["bigscore_weight"] = {"in_channels": 21, "out_channels": 21,
"kernel_size": bigscore_kernel_size}
shape_dic["score2_weight"] = {"in_channels": 21, "out_channels": 21,
"kernel_size": 4}
shape_dic["score4_weight"] = {"in_channels": 21, "out_channels": 21,
"kernel_size": 4}
for arr in arr_name:
if arr in init_layer:
kernel_size = shape_dic[arr]["kernel_size"]
in_channels = shape_dic[arr]["in_channels"]
out_channels = shape_dic[arr]["out_channels"]
factor = (kernel_size + 1) // 2
if kernel_size % 2 == 1:
center = factor - 1
else:
center = factor - 0.5
og = np.ogrid[:kernel_size, :kernel_size]
filt = (1-abs(og[0]-center)/factor)*(1-abs(og[1]-center)/factor)
weight = np.zeros(shape=(in_channels, out_channels,
kernel_size, kernel_size),
dtype='float32')
weight[range(in_channels), range(out_channels), :, :] = filt
fcnxs_args[arr] = mx.nd.array(weight, dtype='float32')
return fcnxs_args
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('--batch-size', type=int, help='batch size for train', default=1)
parser.add_argument('--lr', type=float, help='learning rate', default=0.0001)
parser.add_argument('--mom', type=float, default=0.9, help='momentum for sgd')
parser.add_argument('--wd', type=float, default=0.0001, help='weight decay for sgd')
parser.add_argument('--gpus', type=str, default='0')
parser.add_argument('--num-classes', type=int, help="number of classes", default=21)
parser.add_argument('--data-dir', type=str, help="path for data", default='data/VOC2012')
parser.add_argument('--model', type=str, help="type of FCN", default='fcn32s')
parser.add_argument('--prefix', type=str, help="pretrain model", default='model/VGG_FC_ILSVRC_16_layers')
parser.add_argument('--pretrain-epoch', type=int, help="index of pretrain model", default=74)
parser.add_argument('--begin-epoch', type=int, help="begin epoch fro training", default=0)
parser.add_argument('--num-epoch', type=int, help="number of training epoch", default=50)
parser.add_argument('--rgb-mean', type=tuple, help="tuple of RGB mean", default=(123.68, 116.779, 103.939))
parser.add_argument('--save-result', type=str, default='output/FCN32s/')
parser.add_argument('--num-examples', type=int, default=1464)
parser.add_argument('--step', type=str, default='40')
parser.add_argument('--factor', type=int, default=0.2)
args = parser.parse_args()
return args
def multi_factor_scheduler(args, epoch_size):
step = range(args.step, args.num_epoch, args.step)
step_bs = [epoch_size * (x - args.begin_epoch) for x in step
if x - args.begin_epoch > 0]
if step_bs:
return mx.lr_scheduler.MultiFactorScheduler(step=step_bs,
factor=args.factor)
return None
def main():
args = parse_arguments()
if not os.path.exists(args.save_result):
os.makedirs(args.save_result)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
stream_handler = logging.StreamHandler()
logger.addHandler(stream_handler)
file_handler = logging.FileHandler(args.save_result + 'train.log')
logger.addHandler(file_handler)
logger.info(args)
if args.gpus == '':
ctx = mx.cpu()
else:
ctx = [mx.gpu(int(i)) for i in args.gpus.split(',')]
if args.model == "fcn32s":
fcn = symbol_fcn32s(num_classes=args.num_classes)
elif args.model == "fcn16s":
fcn = symbol_fcn16s(num_classes=args.num_classes)
elif args.model == "fcn8s":
fcn = symbol_fcn8s(num_classes=args.num_classes)
else:
print("Please set model as fcn32s or fcn16s or fcn8s.")
_, arg_params, aux_params = mx.model.load_checkpoint(args.prefix,
args.pretrain_epoch)
arg_params = init_deconv(args, fcn, arg_params)
train_data = VocSegData(data_dir=args.data_dir,
lst_name="train.lst",
rgb_mean=args.rgb_mean)
val_data = VocSegData(data_dir=args.data_dir,
lst_name="val.lst",
rgb_mean=args.rgb_mean)
epoch_size = max(int(args.num_examples / args.batch_size), 1)
step = [int(step_i.strip()) for step_i in args.step.split(",")]
step_bs = [epoch_size * (x - args.begin_epoch) for x in step
if x - args.begin_epoch > 0]
if step_bs:
lr_scheduler = mx.lr_scheduler.MultiFactorScheduler(step=step_bs,
factor=args.factor)
else:
lr_scheduler = None
optimizer_params = {'learning_rate': args.lr,
'momentum': args.mom,
'wd': args.wd,
'lr_scheduler': lr_scheduler}
initializer = mx.init.Xavier(rnd_type='gaussian',
factor_type="in",
magnitude=2)
model = mx.mod.Module(context=ctx, symbol=fcn)
batch_callback = mx.callback.Speedometer(args.batch_size, 500)
epoch_callback = mx.callback.do_checkpoint(args.save_result + args.model,
period=2)
eval_metric = mx.metric.CompositeEvalMetric()
eval_metric.add(PixelCrossEntropy())
val_metric = mx.metric.CompositeEvalMetric()
val_metric.add(PixelAccuracy())
val_metric.add(MeanIoU())
model.fit(train_data=train_data,
eval_data=val_data,
begin_epoch=args.begin_epoch,
num_epoch=args.num_epoch,
eval_metric=eval_metric,
validation_metric=val_metric,
optimizer='sgd',
optimizer_params=optimizer_params,
arg_params=arg_params,
aux_params=aux_params,
initializer=initializer,
allow_missing=True,
batch_end_callback=batch_callback,
epoch_end_callback=epoch_callback)
if __name__ == '__main__':
main()
```
#### File: 11.1-GluonBasis/11.1.2-nn-API/nn_block.py
```python
import mxnet as mx
from mxnet.gluon import nn
class Bottleneck(nn.Block):
def __init__(self, **kwargs):
super(Bottleneck, self).__init__(**kwargs)
self.body = nn.Sequential()
self.body.add(nn.Conv2D(channels=64, kernel_size=1),
nn.BatchNorm(),
nn.Activation(activation='relu'),
nn.Conv2D(channels=64, kernel_size=3, padding=1),
nn.BatchNorm(),
nn.Activation(activation='relu'),
nn.Conv2D(channels=256, kernel_size=1),
nn.BatchNorm())
self.relu = nn.Activation(activation='relu')
def forward(self, x):
residual = x
x = self.body(x)
x = self.relu(x + residual)
return x
net = Bottleneck()
net.initialize()
data = mx.nd.random.uniform(1,5,shape=(2,256,224,224))
output = net(data)
```
#### File: MXNet-Deep-Learning-in-Action/chapter4-toyClassification/test_mnist_code4-2.py
```python
import mxnet as mx
import numpy as np
def load_model(model_prefix, index, context, data_shapes, label_shapes):
sym, arg_params, aux_params = mx.model.load_checkpoint(model_prefix, index)
model = mx.mod.Module(symbol=sym, context=context)
model.bind(data_shapes=data_shapes, label_shapes=label_shapes,
for_training=False)
model.set_params(arg_params=arg_params, aux_params=aux_params,
allow_missing=True)
return model
def load_data(data_path):
data = mx.image.imread(data_path, flag=0)
cla_cast_aug = mx.image.CastAug()
cla_resize_aug = mx.image.ForceResizeAug(size=[28, 28])
cla_augmenters = [cla_cast_aug, cla_resize_aug]
for aug in cla_augmenters:
data = aug(data)
data = mx.nd.transpose(data, axes=(2, 0, 1))
data = mx.nd.expand_dims(data, axis=0)
data = mx.io.DataBatch([data])
return data
def get_output(model, data):
model.forward(data)
cla_prob = model.get_outputs()[0][0].asnumpy()
cla_label = np.argmax(cla_prob)
return cla_label
if __name__ == '__main__':
model_prefix = "output/LeNet"
index = 10
context = mx.gpu(0)
data_shapes = [('data', (1,1,28,28))]
label_shapes = [('softmax_label', (1,))]
model = load_model(model_prefix, index, context, data_shapes, label_shapes)
data_path = "test_image/test1.png"
data = load_data(data_path)
cla_label = get_output(model, data)
print("Predict result: {}".format(cla_label))
```
#### File: MXNet-Deep-Learning-in-Action/chapter7-trainModel/train_mnist.py
```python
import mxnet as mx
import argparse
import numpy as np
import gzip
import struct
import logging
from custom_metric import *
def get_network(num_classes):
"""
LeNet
"""
data = mx.sym.Variable("data")
conv1 = mx.sym.Convolution(data=data, kernel=(5,5), num_filter=6,
name="conv1")
relu1 = mx.sym.Activation(data=conv1, act_type="relu", name="relu1")
pool1 = mx.sym.Pooling(data=relu1, kernel=(2,2), stride=(2,2),
pool_type="max", name="pool1")
conv2 = mx.sym.Convolution(data=pool1, kernel=(5, 5), num_filter=16,
name="conv2")
relu2 = mx.sym.Activation(data=conv2, act_type="relu", name="relu2")
pool2 = mx.sym.Pooling(data=relu2, kernel=(2, 2), stride=(2, 2),
pool_type="max", name="pool2")
fc1 = mx.sym.FullyConnected(data=pool2, num_hidden=120, name="fc1")
relu3 = mx.sym.Activation(data=fc1, act_type="relu", name="relu3")
fc2 = mx.sym.FullyConnected(data=relu3, num_hidden=84, name="fc2")
relu4 = mx.sym.Activation(data=fc2, act_type="relu", name="relu4")
fc3 = mx.sym.FullyConnected(data=relu4, num_hidden=num_classes, name="fc3")
sym = mx.sym.SoftmaxOutput(data=fc3, name="softmax")
return sym
def get_args():
parser = argparse.ArgumentParser(description='score a model on a dataset')
parser.add_argument('--num-classes', type=int, default=10)
parser.add_argument('--gpus', type=str, default='0')
parser.add_argument('--batch-size', type=int, default=64)
parser.add_argument('--num-epoch', type=int, default=10)
parser.add_argument('--lr', type=float, default=0.1, help="learning rate")
parser.add_argument('--save-result', type=str, default='output/')
parser.add_argument('--save-name', type=str, default='LeNet')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = get_args()
if args.gpus:
context = [mx.gpu(int(index)) for index in
args.gpus.strip().split(",")]
else:
context = mx.cpu()
# get data
train_data = mx.io.MNISTIter(
image='train-images.idx3-ubyte',
label='train-labels.idx1-ubyte',
batch_size=args.batch_size,
shuffle=1)
val_data = mx.io.MNISTIter(
image='t10k-images.idx3-ubyte',
label='t10k-labels.idx1-ubyte',
batch_size=args.batch_size,
shuffle=0)
# get network(symbol)
sym = get_network(num_classes=args.num_classes)
optimizer_params = {'learning_rate': args.lr}
initializer = mx.init.Xavier(rnd_type='gaussian', factor_type="in",
magnitude=2)
mod = mx.mod.Module(symbol=sym, context=context)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
stream_handler = logging.StreamHandler()
logger.addHandler(stream_handler)
file_handler = logging.FileHandler('output/train.log')
logger.addHandler(file_handler)
logger.info(args)
checkpoint = mx.callback.do_checkpoint(prefix=args.save_result +
args.save_name, period=20)
batch_callback = mx.callback.Speedometer(args.batch_size, 200)
# metric
eval_metric = mx.metric.CompositeEvalMetric()
eval_metric.add(Recall(name="class0_recall"))
eval_metric.add(['acc','ce'])
mod.fit(train_data=train_data,
eval_data=val_data,
eval_metric = eval_metric,
optimizer_params=optimizer_params,
optimizer='sgd',
batch_end_callback=batch_callback,
initializer=initializer,
num_epoch = args.num_epoch,
epoch_end_callback=checkpoint)
```
#### File: chapter9-objectDetection/9.1-basis/9.1.5-target.py
```python
import mxnet as mx
import matplotlib.pyplot as plt
import matplotlib.patches as patches
def plot_anchors(anchors, img, text, linestyle='-'):
height, width, _ = img.shape
colors = ['r','y','b','c','m']
for num_i in range(anchors.shape[0]):
for index, anchor in enumerate(anchors[num_i,:,:].asnumpy()):
xmin = anchor[0]*width
ymin = anchor[1]*height
xmax = anchor[2]*width
ymax = anchor[3]*height
rect = patches.Rectangle(xy=(xmin,ymin), width=xmax-xmin,
height=ymax-ymin, edgecolor=colors[index],
facecolor='None', linestyle=linestyle,
linewidth=1.5)
ax.text(xmin, ymin, text[index],
bbox=dict(facecolor=colors[index], alpha=0.5))
ax.add_patch(rect)
img = mx.img.imread("target_demo/000001.jpg")
fig,ax = plt.subplots(1)
ax.imshow(img.asnumpy())
ground_truth = mx.nd.array([[[0, 0.136,0.48,0.552,0.742],
[1, 0.023,0.024,0.997,0.996]]])
plot_anchors(anchors=ground_truth[:, :, 1:], img=img,
text=['dog','person'])
anchor = mx.nd.array([[[0.1, 0.3, 0.4, 0.6],
[0.15, 0.1, 0.85, 0.8],
[0.1, 0.2, 0.6, 0.4],
[0.25, 0.5, 0.55, 0.7],
[0.05, 0.08, 0.95, 0.9]]])
plot_anchors(anchors=anchor, img=img, text=['1','2','3','4','5'],
linestyle=':')
plt.savefig("target_demo/anchor_gt.png")
cls_pred = mx.nd.array([[[0.4, 0.3, 0.2, 0.1, 0.1],
[0.6, 0.7, 0.8, 0.9, 0.9]]])
tmp = mx.nd.contrib.MultiBoxTarget(anchor=anchor,
label=ground_truth,
cls_pred=cls_pred,
overlap_threshold=0.5,
ignore_label=-1,
negative_mining_ratio=3,
variances=[0.1,0.1,0.2,0.2])
print("location target: {}".format(tmp[0]))
print("location target mask: {}".format(tmp[1]))
print("classification target: {}".format(tmp[2]))
```
#### File: 9.2-objectDetection/data/dataiter.py
```python
import mxnet as mx
class CustomDataIter(mx.io.DataIter):
def __init__(self, args, is_trainData=False):
self.args = args
data_shape = (3, args.data_shape, args.data_shape)
if is_trainData:
self.data=mx.io.ImageDetRecordIter(
path_imgrec=args.train_rec,
batch_size=args.batch_size,
data_shape=data_shape,
mean_r=123.68,
mean_g=116.779,
mean_b=103.939,
label_pad_width=420,
random_hue_prob=0.5,
max_random_hue=18,
random_saturation_prob=0.5,
max_random_saturation=32,
random_illumination_prob=0.5,
max_random_illumination=32,
random_contrast_prob=0.5,
max_random_contrast=0.5,
rand_pad_prob=0.5,
fill_value=127,
max_pad_scale=4,
rand_crop_prob=0.833333,
max_crop_aspect_ratios=[2.0, 2.0, 2.0, 2.0, 2.0],
max_crop_object_coverages=[1.0, 1.0, 1.0, 1.0, 1.0],
max_crop_overlaps=[1.0, 1.0, 1.0, 1.0, 1.0],
max_crop_sample_coverages=[1.0, 1.0, 1.0, 1.0, 1.0],
max_crop_scales=[1.0, 1.0, 1.0, 1.0, 1.0],
max_crop_trials=[25, 25, 25, 25, 25],
min_crop_aspect_ratios=[0.5, 0.5, 0.5, 0.5, 0.5],
min_crop_object_coverages=[0.0, 0.0, 0.0, 0.0, 0.0],
min_crop_overlaps=[0.1, 0.3, 0.5, 0.7, 0.9],
min_crop_sample_coverages=[0.0, 0.0, 0.0, 0.0, 0.0],
min_crop_scales=[0.3, 0.3, 0.3, 0.3, 0.3],
num_crop_sampler=5,
inter_method=10,
rand_mirror_prob=0.5,
shuffle=True
)
else:
self.data=mx.io.ImageDetRecordIter(
path_imgrec=args.val_rec,
batch_size=args.batch_size,
data_shape=data_shape,
mean_r=123.68,
mean_g=116.779,
mean_b=103.939,
label_pad_width=420,
shuffle=False
)
self._read_data()
self.reset()
@property
def provide_data(self):
return self.data.provide_data
@property
def provide_label(self):
return self.new_provide_label
def reset(self):
self.data.reset()
def _read_data(self):
self._data_batch = next(self.data)
if self._data_batch is None:
return False
else:
original_label = self._data_batch.label[0]
original_label_length = original_label.shape[1]
label_head_length = int(original_label[0][4].asscalar())
object_label_length = int(original_label[0][5].asscalar())
label_start_idx = 4+label_head_length
label_num = (original_label_length-
label_start_idx+1)//object_label_length
self.new_label_shape = (self.args.batch_size, label_num,
object_label_length)
self.new_provide_label = [(self.args.label_name,
self.new_label_shape)]
new_label = original_label[:,label_start_idx:
object_label_length*label_num+label_start_idx]
self._data_batch.label = [new_label.reshape((-1,label_num,
object_label_length))]
return True
def iter_next(self):
return self._read_data()
def next(self):
if self.iter_next():
return self._data_batch
else:
raise StopIteration
```
|
{
"source": "jeremy24/494-graph-algos",
"score": 3
}
|
#### File: python/hw2/dfs.py
```python
from __future__ import print_function
import sys
from graph import Graph
from graph import make
from graph import GraphException
from graph import Matrix
def go():
if ( len(sys.argv) == 3 ):
filename = str(sys.argv[1])
start = int (sys.argv[2])
graph = make( filename )
visited = graph.dfs(start)
out = ""
for item in visited:
out += str(object=item) + " "
out += "\n"
print (out)
else:
print (GraphException("You must supply a valid graph file"))
go()
```
#### File: python/hw3/go.py
```python
import sys
from graph import Graph
from graph import make
from graph import GraphException
from graph import Matrix
def go():
if ( len(sys.argv) == 4 ):
filename = str(sys.argv[1])
start = int(sys.argv[2])
end = int(sys.argv[3])
graph = make( filename )
res = graph.dij_path(start, end)
st = ""
for i in res["path"]:
st += str(i) + " "
print(st)
print( res["distance"] )
# graph.output()
else:
print (GraphException("You must supply a valid graph file"))
go()
```
#### File: python/hw5/logger.py
```python
from __future__ import print_function
import logging
import sys
import pip
import os
def install(package):
try:
pip.main(["install", package])
except Exception as ex:
raise "Unable to install " + package + ex
try:
install("colorlog")
import colorlog
except Exception as ex:
raise ex
def mk_logger(have_colorlog):
log = logging.getLogger() # root logger
log.setLevel(logging.DEBUG)
format = '%(asctime)s - %(levelname)-8s - %(message)s'
date_format = '%Y-%m-%d %H:%M:%S'
if have_colorlog and os.isatty(2):
cformat = '%(log_color)s' + format
f = colorlog.ColoredFormatter(cformat, date_format,
log_colors = { 'DEBUG' : 'reset', 'INFO' : 'reset',
'WARNING' : 'bold_yellow', 'ERROR': 'bold_red',
'CRITICAL': 'bold_red' })
else:
f = logging.Formatter(format, date_format)
ch = logging.StreamHandler()
ch.setFormatter(f)
log.addHandler(ch)
return logging.getLogger(__name__)
class LogException(Exception):
def __init__(self, message):
self.message = message
def __str__(self):
return repr(self.message)
class Logger():
_level = "debug"
_levels = ["debug", "info", "warn", "error"]
# colors = { "debug": "blue", "info": "green", "warning": "yellow", "error": "red"}
def __init__(self, module):
self.module = str(module)
self.have_colorlog = True
self.logger = mk_logger(True)
@property
def level(self):
return self._level
@property
def levels(self):
return self._levels
@level.setter
def level(self, val):
if val in self.levels():
self._level = val
@property
def form(self, *args):
msg = ""
try:
for arg in args:
print (arg)
# msg += " " + arg
# return msg
except Exception as ex:
print("Error concattng args! " + ex.message)
finally:
return msg
def debug(self, *args):
self.logger.debug("a")
self.logger.debug(args)
a = Logger("test")
a.debug("blah", "blah")
```
#### File: python/hw7/findprufersequence.py
```python
from __future__ import print_function
import sys
from graph import make
from graph import GraphException
from timer import Timer
def main():
if len(sys.argv) == 2:
try:
filename = str(sys.argv[1])
# print(sys.argv)
timer = Timer()
graph = make(filename, zero_index=False)
prufer = graph.prufer()
if prufer == False:
print("The graph is not a tree.")
else:
s= ""
for i in prufer:
s += str(i+1) + " "
print(s)
except Exception as ex:
print ("Exception in main")
raise ex
else:
print (GraphException("You must supply a valid graph file"))
main()
```
#### File: python/hw7/graph.py
```python
from __future__ import print_function
try:
import Queue
import sys
import random
except Exception as ex:
print("Error importing a module: ", ex.message)
raise ex
try:
import numpy as np
except Exception as ex:
print("The numpy package is not installed, If you attempt to call any")
print("\tgraph methods that require numpy, they will fail and throw an exception.\n")
def make(filename, zero_index = True):
""" build a graph """
fin = open(filename, "r")
linenum = 0
graph = None
no_weight = False
if not zero_index:
print("\nYou specified a graph with verts indexed at 1\n",
"Please make sure this is connect\n\n")
for line in fin:
values = line.split("\t")
try:
i = 0
while i < len(values):
values[i] = float(str(values[i]).strip("\n"))
i += 1
except Exception as ex:
print("\nError parsing the file. This is probably from using spaces instead of tabs.")
print("Exiting...\n")
# print(ex)
raise ex
# if first get graph verts n edges
if linenum == 0:
verts = int(values[0])
edges = int(values[1])
graph = Graph(int(verts), int(edges), zero_index=zero_index)
else: # else connect the verts
try:
node1 = int(values[0])
node2 = int(values[1])
if zero_index:
graph.connect(node1, node2)
else:
graph.connect(node1-1, node2-1)
if len(values) == 3:
weight = float(values[2])
if zero_index:
graph.add_cost(node1, node2, weight)
else:
graph.add_cost(node1-1, node2-1, weight)
else:
no_weight = True
except Exception as ex:
print("Error connecting verts or adding weights",
ex.message,"\n")
raise ex
linenum += 1
if no_weight:
print("\nThe file you passed does not contain measures for weighted edges.")
print("Please make sure this is correct.\n")
fin.close()
return graph
class GraphException(Exception):
""" make a graph ex """
def __str__(self):
return repr(self.message)
# not used, just messing with python overloading
class Matrix:
""" make a matrix """
def __init__(self, r, c):
self.rows = r
self.cols = c
self.data = [[0 in range(self.cols)] in range(self.rows)]
def __getitem__(self, key):
print("key: " + str(key))
return self.data[key]
def __setitem__(self, key, value):
print("set key: " + str(key) + " val: " + str(value))
self.data[key] = value
def output(self):
try:
for i in range(self.rows):
row = ""
for j in range(self.cols):
row += (str(int(self.data[i][j])) + " ")
print(row + "\n")
except Exception as ex:
print("Error outputting graph:", ex.message)
raise GraphException(ex)
def set(self, a, b, val):
self.data[a][b] = val
def fill(self, value):
for i in range(self.rows):
for j in range(self.cols):
self.set(i, j, value)
class Graph:
def __init__(self, vs, es, has_weights=False, zero_index = True):
self.verts = vs
self.edges = es
self.data = [[0 for x in range(self.verts)] for y in range(self.verts)]
self.isWeighted = bool(has_weights)
self.zero_index = zero_index
# init all weights to "infinity"
self.weights = [[sys.maxint for x in range(self.verts)] for y in range(self.verts)]
def __getitem__(self, key):
return self.data[key]
def output(self):
for i in range(self.verts):
row = ""
for j in range(self.verts):
if self.zero_index == False:
row += (str(self.data[i+1][j+1]) + " ")
else:
row += (str(self.data[i][j]) + " ")
print(row + "\n")
def add_cost(self, a, b, weight):
self.weights[a][b] = float(weight)
self.weights[b][a] = float(weight)
def connect(self, a, b, weight=None):
self.data[a][b] = 1
self.data[b][a] = 1
if weight is not None:
self.add_cost(a, b, weight)
def remove(self, a, b):
self.data[a][b] = 0
self.data[b][a] = 0
def density(self):
if self.edges == 0 and self.verts == 0:
return 0
else:
top = 2 * float(self.edges)
bottom = float(self.verts) * float(self.verts - 1)
return round((top / bottom), 5)
def edge_cost(self, a, b):
return self.weights[a][b]
def __neighbors_of(self, u):
try:
u = int(u)
except ValueError:
raise GraphException("value passed to neighbors_of is not an int")
if u > self.verts:
raise GraphException("Vert u is larger than the size of the graph")
else:
try:
ret = frozenset()
for neighbor in range(self.verts):
if self.data[u][neighbor] == 1:
ret |= frozenset([neighbor])
return ret
except Exception as ex:
raise GraphException(ex)
def __degree_of(self, u):
# return np.sum(self.data[u])
d = np.sum(self.data[u])
print("Degree of", u, d)
return d
def jaccard(self, u, v):
try:
S = self.__neighbors_of(u)
T = self.__neighbors_of(v)
except Exception as ex:
raise(ex)
# print("u neighbors", S)
# print("v neighbors", T)
try:
return float(len(S.intersection(T))) / float(len(S.union(T)))
except Exception as ex:
raise GraphException("Jaccard error " + str(ex.message))
def __are_connected(self, u, v):
return (u < self.verts and v < self.verts) and self.data[u][v] == 1
def adj_neighbors(self, u):
neighbors = set()
adj = set()
for vert in range(self.verts):
if self.data[u][vert] == 1:
neighbors.add(vert)
for n in neighbors:
for nn in neighbors:
if self.data[n][nn] == 1:
adj.add(frozenset([n, nn]))
return adj
def local_clustering(self, u):
adj_neighbors = self.adj_neighbors(u)
total_neighbors = self.__degree_of(u)
total_neighbors *= (total_neighbors-1)
total_neighbors = float(total_neighbors)
if total_neighbors == 0.0:
return 0.0
return round((2.0 * float(len(adj_neighbors))) / float(total_neighbors), 5)
def __deg_gt(self, degree):
if type(degree) is not int:
raise GraphException("__deg_gt degree must be an int")
ret = list()
for vert in range(self.verts):
if self.__degree_of(vert) > degree:
ret.append(vert)
return ret
def __deg_lt(self, degree):
try:
if type(degree) is not int:
raise GraphException("__deg_lt degree must be an int")
ret = list()
for vert in range(self.verts):
if self.__degree_of(vert) < degree:
ret.append(vert)
return ret
except Exception as ex:
print("__deg_lt error: ", ex.message)
raise GraphException(ex)
def isolated_verts(self):
return self.__deg_lt( 1 )
def fast_p3(self, pretty_print=False):
l = []
l.extend(range(self.verts))
triplets = set()
to_check = set( self.__deg_gt(1) )
checked = 0
front_back_pairs = set()
answers = list()
ends_to_check = set( self.__deg_gt(0) )
for center in to_check:
found = 0
front_back_pairs = front_back_pairs.intersection(set()) ## clear it to {} the empty set
for front in ends_to_check:
if front == center or self.data[front][center] == 0:
continue
for back in ends_to_check:
if back == center or back == front or self.data[center][back] == 0:
continue
if frozenset([center, frozenset([front, back]) ]) in front_back_pairs:
continue
# print("checking", front, center, back)
checked += 1
if self.data[front][center] + self.data[center][back] == 2:
# print("\tkeeping")
to_add = frozenset([center, frozenset([front, back])])
triplets.add(to_add)
front_back_pairs.add(to_add)
if not pretty_print:
return triplets
item = None
o_item = None
try:
for answer in triplets:
answer = set(answer)
first = answer.pop()
second = answer.pop()
string = "("
if type(first) is frozenset:
first = set(first)
string += str(first.pop()) + ", " + str(second) + ", " + str(first.pop())
elif type(second) is frozenset:
second = set(second)
string += str(second.pop()) + ", " + str(first) + ", " + str(second.pop())
else:
string += "error"
string += ")"
answers.append(string)
return sorted(answers)
except Exception as ex:
print(ex.message)
raise GraphException(ex)
def number_of_k3(self):
try:
matrix = np.matrix(self.data)
k3_matrix = np.linalg.matrix_power(matrix, 3)
trace = np.matrix.trace(k3_matrix)
return trace / 6
except Exception as ex:
print("Exception in numnber_of_k3", ex.message)
raise ex
def global_clustering(self):
try:
num_closed_p3 = float(3 * self.number_of_k3())
p3_list = self.fast_p3(pretty_print=True)
# print(p3_list)
btm = float(len(p3_list))
if btm == 0.0:
return 0.0
return num_closed_p3 / btm
except Exception as ex:
print(ex.message)
raise GraphException(ex)
# run a bfs
def bfs(self, start):
visited = list()
queue = Queue.Queue()
queue.put(start)
while not queue.empty():
vert = queue.get()
if vert not in visited:
visited.append(vert)
for index in range(0, len(self.data[vert])):
if self.data[vert][index] == 1:
queue.put(index)
return visited
# run a dfs
def dfs(self, start):
visited = list()
stack = list()
stack.append(start)
while len(stack):
vert = stack.pop()
if vert not in visited:
visited.append(vert)
for index in range(0, len(self.data[vert])):
if self.data[vert][index] == 1:
stack.append(index)
return visited
def __has_cycle(self, v, visited, parent):
try:
visited[v] = True
for i in range(self.verts):
if self.data[v][i] == 0:
continue
if visited[i] == False :
if (self.__has_cycle(i, visited, v)):
return True
elif parent != i and parent != -1:
return True
return False
except Exception as ex:
print("Error deciding whether graph is a tree: ", ex.message)
raise GraphException("is_tree error: " + str(ex.message))
def has_cycle(self):
visited = [False for x in range(self.verts)]
return self.__has_cycle(0, visited, -1)
def is_tree(self):
return len(self.comps()) == 1 and self.has_cycle() == False
def get_leaves(self):
try:
leaves = list()
for vert in range(self.verts):
if np.sum(self.data[vert]) == 1:
leaves.append(vert)
return leaves
except Exception as ex:
print("get_leaves error: ", ex.message)
raise GraphException(ex)
def __get_smallest_leaf(self):
try:
leaves = self.get_leaves()
if len(leaves):
return np.amin(self.get_leaves())
return None
except Exception as ex:
print("__get_smallest_leaf error: ", ex.message)
raise GraphException(ex)
def output_formatted_graph(self):
""" print out a graph in our class format """
## this will take into account vert lists that start at 1 instead of 0
out = list()
pairs = set()
for i in range(self.verts):
for j in range(self.verts):
if self.data[i][j] == 1:
if not self.zero_index:
pairs.add(frozenset([i+1,j+1]))
else:
pairs.add(frozenset([i+1,j+1]))
for i in pairs:
j = list(i)
out.append(str(j.pop()) + "\t" + str(j.pop()))
out.insert(0, str(self.verts) + "\t" + str(len(pairs)))
for line in out:
print(line)
def buildFromPrufer(self, seq):
try:
seq = map(lambda x: x - 1 , list(seq))
degrees = list()
u = None
v = None
for i in range(self.verts):
degrees.append( seq.count(i) + 1 )
for i in seq:
for j in range(len(degrees)):
if j == i:
continue
if degrees[j] == 1:
try:
self.connect(i, j, weight=None)
degrees[i] = degrees[i] - 1
degrees[j] = degrees[j] - 1
break
except Exception as ex:
print("Error connecting:", ex.message)
raise ex
for i in range(len(degrees)):
if degrees[i] == 1:
if u is None:
u = i
else:
v = i
self.connect(u,v, weight=None)
self.output_formatted_graph()
except Exception as ex:
print(ex.message)
raise GraphException(ex)
def prufer(self):
""" compute a prufer sequence this is a destructive call """
try:
removed = set()
if self.is_tree() == False:
return False
i = 0
seq = list()
max_itors = self.verts - 2
leaf = None
leaf_neighbors = None
while i < max_itors:
leaf = self.__get_smallest_leaf()
if leaf is None:
print("No more leaves left")
return False
leaf_neighbors = list(self.__neighbors_of(leaf))
if len(leaf_neighbors) > 1:
raise GraphException("Prufer leaf has > 1 neighbor!")
seq.append(leaf_neighbors[0])
# print("seq at", i, seq)
self.__remove_vert(leaf)
removed.add(leaf)
i += 1
return seq
except Exception as ex:
print("prufer error: ", ex.message)
raise GraphException(ex)
def dij_path(self, start, end):
""" Weight is correct but path is not!! """
if end >= self.verts:
raise GraphException("Cannot find a vertex that is not in the graph")
visited = list()
dists = [sys.maxint for x in range(self.verts)]
dists[start] = 0
search = self.dfs(start)
path = list()
queue = Queue.Queue()
queue.put(start)
while not queue.empty():
vert = queue.get()
if vert not in visited:
visited.append(vert)
for index in range(0, len(self.data[vert])):
if self.data[vert][index] == 1:
queue.put(index)
if (dists[vert] + self.weights[vert][index]) < dists[index]:
# print("its less")
dists[index] = dists[vert] + self.weights[vert][index]
if dists[vert] == sys.maxint:
# print("inf, setting to", self.weights[vert][index])
dists[index] = self.weights[vert][index]
# path.append(vert)
for i in search:
path.append(i)
if i == end:
break
return {"distance": dists[end], "path": path}
def comps(self):
try:
ret = set()
seen = set()
while len(seen) != len(self.data):
for index in range(0, len(self.data[0])):
if index not in seen:
conns = frozenset(self.dfs(index))
seen |= conns # union the sets
ret.add(conns)
return ret
except Exception as ex:
print("Error in comps: ", ex.message)
raise GraphException(ex)
def degree(self, switch):
target = 0
if switch == "min":
target = self.verts - 1
if target < 0:
target = 0
for i in range(self.verts):
tmp = 0
for j in range(self.verts):
tmp += self.data[i][j]
if switch == "max":
if tmp > target:
target = tmp
elif switch == "min":
if tmp < target:
target = tmp
else:
print(GraphException("Invalid switch passed to degree."))
return target
def order_verts(self, direction):
vert_list = list()
for i in range(self.verts):
deg = 0
for j in range(self.verts):
if self.data[i][j] == 1:
deg += 1
vert_list.append([i, deg])
if direction == "max":
vert_list = sorted(vert_list, key=lambda tup: tup[1])
vert_list.reverse()
elif direction == "min":
vert_list = sorted(vert_list, key=lambda tup: tup[1])
elif direction == "random":
vert_list = random.sample(vert_list, len(vert_list))
else:
raise GraphException("Invalid direction passed to order_verts: " + direction)
# pluck out the vert numbers and drop the deg used to order
vert_list = [i for [i, j] in vert_list]
return vert_list
def color(self, direction):
vert_set = None
try:
vert_set = self.order_verts(direction=direction)
except GraphException as ex:
print("Cannot continue, invalid direction given")
raise ex
except Exception as generalEx:
raise GraphException(generalEx)
colors = set()
current_color = 1
colored = dict() # dict[vert]: color
colors.add(0)
try:
for vert in vert_set:
valid_colors = set()
valid_colors |= colors # make all colors initially valid
if vert not in colored:
for i in range(self.verts):
if self.data[vert][i] == 0:
continue
neighbor = i
if neighbor in colored.keys():
try:
# print "neighbor color:", colored[neighbor], "valid color:", colored[neighbor] in valid_colors
if colored[neighbor] in valid_colors:
# remove the neighbor color from valid list
valid_colors.remove(colored[neighbor])
except Exception as ex:
print("neighbor check error for", neighbor)
raise ex
try:
if len(valid_colors) == 0:
colors.add(current_color)
colored[vert] = current_color
current_color += 1
else:
colored[vert] = min(valid_colors)
except Exception as ex:
print("assign error")
raise ex
else:
print("vert", vert, "already colored")
# print colored
# print "took", len(colors), "different colors"
return {"number": len(colors), "colors": colors}
except Exception as ex:
raise ex
def __remove_vert(self, vert):
try:
i = 0
ret = list()
while i < self.verts:
if self.data[i][vert] == 1:
ret.append({"vert": i, "weight": self.weights[i][vert]})
self.data[i][vert] = 0
self.data[vert][i] = 0
i += 1
return ret
except Exception as ex:
print(ex)
raise ex
def __reconnect_vert(self, vert, conns):
try:
i = 0
while i < len(conns):
self.connect(conns[i]["vert"], vert, weight=conns[i]["weight"])
i += 1
except Exception as ex:
print("Error in reconnect_vert", ex)
raise ex
def is_cut_vert(self, vert):
try:
vert = int(x=vert)
old_comps = self.comps()
conns = self.__remove_vert(vert)
new_comps = self.comps()
self.__reconnect_vert(vert, conns)
# print("comp len diff:", len(new_comps) - len(old_comps))
if len(new_comps) - len(old_comps) > 1:
return True
return False
except Exception as ex:
print("error in is_cut_vert", ex)
raise ex
def get_cut_verts(self, pretty_print=False):
cut_verts = set()
try:
i = 0
while i < self.verts:
if self.is_cut_vert(i):
cut_verts.add(i)
i += 1
if pretty_print:
cut_list = sorted(list(cut_verts))
return_string = ""
for i in cut_list:
return_string += str(i) + " "
return return_string
return cut_verts
except Exception as ex:
print("Error in get_cut_verts", ex)
raise GraphException(ex)
def get_cut_edges(self, pretty_print=False):
try:
i = 0
j = 0
checked_edges = set()
cut_edges = set()
while i < self.verts:
j = 0
while j < self.verts:
if self.data[i][j] == 1:
temp_set = frozenset([i, j])
if temp_set not in checked_edges:
checked_edges.add(temp_set)
old_comps = len(self.comps())
self.data[j][i] = 0
self.data[i][j] = 0
new_comps = len(self.comps())
self.data[j][i] = 1
self.data[i][j] = 1
if new_comps - old_comps > 0:
cut_edges.add(frozenset([i, j]))
j += 1
i += 1
if pretty_print:
return_string = ""
cut_edge_list = list(cut_edges)
cut_edge_list = sorted(map(list, cut_edge_list), key=lambda tup: tup[0])
for k in cut_edge_list:
return_string += "(" + str(k[0]) + "," + str(k[1]) + ") "
return return_string
return cut_edges
except Exception as ex:
print("Error getting cut edges", ex)
raise GraphException(ex)
```
|
{
"source": "jeremy2918/data-structures",
"score": 4
}
|
#### File: data-structures/HashTable/hash_table.py
```python
from typing import List
# Hash Table Entry Class Definition
class Entry:
def __init__(self, key, value, hash):
self.key = key
self.value = value
self.hash = hash
# Representation of Entry
def __repr__(self) -> str:
return f'Entry("{self.key}", {self.value})'
# Convertion to string method
def __str__(self) -> str:
return f'{self.key}: {self.value}'
# Custom equality operator
def __eq__(self, other) -> bool:
if isinstance(other, Entry):
if self.hash != other.hash:
return False
return self.key == other.key
return NotImplemented
# Hash Table Class definition
class HashTable:
default_capacity = 3
default_load_factor = 0.75
max_load_factor = capacity = threshold = size = 0
table = []
def __init__(self, capacity: int = 3, max_load_factor: float = 0.75) -> None:
if capacity < 0:
raise Exception("Invalid capacity")
if max_load_factor <= 0 or not isinstance(max_load_factor, float):
raise Exception("Invalid max load factor")
self.max_load_factor = max_load_factor
self.capacity = max(self.default_capacity, capacity)
self.threshold = self.capacity * self.max_load_factor
self.table = [None] * self.capacity
# Calculate the hash for a key
def hash(self, key: str) -> int:
hashsum = 0
for idx, c in enumerate(key):
hashsum += (idx + len(key)) ** ord(c)
hashsum = hashsum % self.capacity
return hashsum
# Wether the table contains a key or not
def contains(self, key: str):
bucket_index = self.hash(key)
return self.__bucket_get_entry(bucket_index, key) != None
# Whether the table is empty
def is_empty(self):
return self.size == 0
# Clear the table
def clear(self):
self.table = [None] * self.capacity
self.size = 0
# Insert or update an entry in the table
def insert(self, key: str, value: int):
if key == None:
raise Exception("Invalid null key")
entry = Entry(key, value, self.hash(key))
return self.__bucket_insert_entry(entry.hash, entry)
# Gets a the value of a key in the table
def get(self, key: str):
if key == None:
raise Exception("Invalid null key")
bucket_index = self.hash(key)
entry = self.__bucket_get_entry(bucket_index, key)
if entry == None:
return None
return entry.value
# Removes the entry with the given key
def remove(self, key: str):
if key == None:
raise Exception("Invalid null key")
return self.__bucket_remove_entry(self.hash(key), key)
# Removes an entry from its corresponding bucket in the table
def __bucket_remove_entry(self, bucket_index: int, key: str):
entry = self.__bucket_get_entry(bucket_index, key)
if entry == None:
return None
bucket = self.table[bucket_index]
bucket.remove(entry)
self.size -= 1
return entry.value
# Inserts an entry in its corresponding bucket in the table
# Returns the old value is the entry was just updates, None if its a new one
def __bucket_insert_entry(self, bucket_index: int, entry: Entry):
bucket = self.table[bucket_index]
# If the bucket was empty, create a new list in it
if bucket == None:
self.table[bucket_index] = bucket = []
entry_exists = self.__bucket_get_entry(bucket_index, entry.key)
if entry_exists:
old_value = entry_exists.value
entry_exists.value = entry.value
return old_value
else:
bucket.append(entry)
self.size += 1
if self.size > self.threshold:
self.__resize_table()
return None
# Returns an entry given its bucket and key
def __bucket_get_entry(self, bucket_index: int, key: str) -> Entry:
if key == None:
return None
bucket = self.table[bucket_index]
if bucket == None:
return None
for entry in bucket:
if entry.key == key:
return entry
return None
# Adds more buckets to the table and re-arranges all the entries in it
def __resize_table(self):
self.capacity *= 2
self.threshold = self.capacity * self.max_load_factor
new_table = [None] * self.capacity
for bucket in self.table:
if bucket != None:
for entry in bucket:
bucket_index = self.hash(entry.key)
new_bucket = new_table[bucket_index]
if new_bucket == None:
new_table[bucket_index] = new_bucket = []
new_bucket.append(entry)
bucket = None
self.table = new_table
# Returns a list of all the keys in the table
def keys(self):
keys, i = [None] * self.size, 0
for bucket in self.table:
if bucket != None:
for entry in bucket:
keys[i] = entry.key
i += 1
return keys
# Returns a list of all the values in the table
def values(self):
values, i = [None] * self.size, 0
for bucket in self.table:
if bucket != None:
for entry in bucket:
values[i] = entry.value
i += 1
return values
# Returns a list of sets containing all the key-value pairs of the table
def items(self):
items, i = [None] * self.size, 0
for bucket in self.table:
if bucket != None:
for entry in bucket:
items[i] = (entry.key, entry.value)
i += 1
return items
# Convert to string method
def __str__(self):
res = "{\n"
for bucket in self.table:
if isinstance(bucket, List):
for entry in bucket:
res += (f' "{entry.key}": {entry.value}\n')
res += "}"
return res
# Returns the number of entries in the table
def __len__(self):
return self.size
```
#### File: data-structures/Heap/heap.py
```python
class Heap():
def __init__(self, initial_data=[]):
self.data = []
if isinstance(initial_data, int):
self.data = [initial_data]
elif isinstance(initial_data, list):
self.data = list(initial_data)
# Returns the number of element in the heap
def size(self):
return len(self.data)
# Add an element to the min heap
def add(self, elem):
self.data.append(elem)
self.swim(len(self.data) - 1)
# Removes and return the element with the highest priority (first element)
def poll(self):
if self.is_empty():
raise Exception("Min heap is empty")
polled = self.data[0]
self.remove(polled)
return polled
# Removes an element form the heap
def remove(self, elem):
if self.is_empty():
raise Exception("Min heap is empty")
index = self.index(elem)
if index == -1:
raise Exception(f"Heap does not contain the element <{elem}>")
self.swap(index, self.size() - 1)
self.data.pop()
# If the element was the last one, do nothing else
if index == self.size():
return
if not self.is_empty():
self.sink(index)
self.swim(index)
# Bubble up an element at a k position
def swim(self, k):
parent = (k - 1) // 2
# Keep swimming while we have not reached the
# root and while we're less than our parent.
while k > 0 and self.data[k] < self.data[parent]:
self.swap(k, parent)
k = parent
parent = (k - 1) // 2
# Bubble down an element at a k position
def sink(self, k):
while True:
left = 2 * k + 1
right = 2 * k + 2
smallest = left
# Take the left children as smallest by default
# Change only if right children is less than left children
if right < self.size() and self.data[right] < self.data[left]:
smallest = right
# Keep swaping while k is less than parent and
# we are not at the last position of the heap
if left >= self.size() or self.data[k] < self.data[smallest]:
break
self.swap(k, smallest)
k = smallest
# Swaps the positions of two elements given their indexes
def swap(self, i1, i2):
elem1 = self.data[i1]
elem2 = self.data[i2]
self.data[i1] = elem2
self.data[i2] = elem1
# Returns whether the heap is empty
def is_empty(self):
return self.size() == 0
# Returns the first element (smallest) of the heap
def peek(self):
return self.data[0] if not self.is_empty() else None
# Returns the index of an element in the heap, -1 if it is not contained
def index(self, elem):
for index, value in enumerate(self.data):
if value == elem:
return index
return -1
# Whether an element in contained in the heap
def contains(self, elem):
return self.index(elem) != -1
# Represetantion method
def __repr__(self):
return f"Heap({repr(self.data)})"
# Convert to string method
def __str__(self):
return str(self.data)
# Len of linked list
def __len__(self):
return len(self.data)
```
|
{
"source": "jeremy2918/recipe-app-api",
"score": 3
}
|
#### File: recipe/tests/test_tag_api.py
```python
from django.contrib.auth import get_user_model
from django.test import TestCase
from django.urls import reverse
from rest_framework.test import APIClient
from core.models import Tag, Recipe
from recipe.serializers import TagSerializer
TAGS_URL = reverse('recipe:tag-list')
class TestPublicTagAPI(TestCase):
"""Tests the public tag API"""
def setUp(self) -> None:
self.client = APIClient()
def test_login_required(self):
"""Tests that the user should be logged in to request the tags"""
res = self.client.get(TAGS_URL)
self.assertEqual(res.status_code, 401)
class TestPrivateTagAPI(TestCase):
"""Tests the public tag API"""
def setUp(self) -> None:
self.user = get_user_model().objects.create_user(
email='<EMAIL>',
password='<PASSWORD>',
name='Test User'
)
self.client = APIClient()
self.client.force_authenticate(self.user)
def test_retrieve_tags(self):
"""Tests retrieveing tags"""
Tag.objects.create(user=self.user, name='Python')
Tag.objects.create(user=self.user, name='Django')
res = self.client.get(TAGS_URL)
tags = Tag.objects.all().order_by('-name')
serializer = TagSerializer(tags, many=True)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.data, serializer.data)
def test_tags_limitted_to_user(self):
"""Tests that the retrieved tags belong to the authenticated user"""
user2 = get_user_model().objects.create_user(
email='<EMAIL>',
password='<PASSWORD>',
name='Test User2'
)
Tag.objects.create(user=user2, name='REST')
tag = Tag.objects.create(user=self.user, name='API')
res = self.client.get(TAGS_URL)
self.assertEqual(res.status_code, 200)
self.assertEqual(len(res.data), 1)
self.assertEqual(res.data[0]['name'], tag.name)
def test_create_tag(self):
"""Tests creating a new tag"""
payload = {'name': 'Python'}
self.client.post(TAGS_URL, payload)
exists = Tag.objects.filter(
user=self.user,
name=payload['name']
).exists()
self.assertTrue(exists)
def test_create_invalid_tag(self):
"""Tests creating a new tag with invalid payload"""
payload = {'name': ''}
res = self.client.post(TAGS_URL, payload)
self.assertEqual(res.status_code, 400)
def test_retrieve_assigned_tags(self):
"""Tests retrieving tags assigned to recipes"""
tag1 = Tag.objects.create(user=self.user, name='Lunch')
tag2 = Tag.objects.create(user=self.user, name='Supper')
recipe = Recipe.objects.create(
user=self.user, title='Sample', time_minutes=15, cost=5.00)
recipe.tags.add(tag1)
res = self.client.get(TAGS_URL, {'assigned_only': 1})
serializer1 = TagSerializer(tag1)
serializer2 = TagSerializer(tag2)
self.assertIn(serializer1.data, res.data)
self.assertNotIn(serializer2.data, res.data)
def test_unique_tag_retrieve(self):
"""Tests filtering assigned only tag (unique)"""
tag1 = Tag.objects.create(user=self.user, name='Lunch')
Tag.objects.create(user=self.user, name='Supper')
recipe1 = Recipe.objects.create(
user=self.user, title='Sample', time_minutes=15, cost=5.00)
recipe1.tags.add(tag1)
recipe2 = Recipe.objects.create(
user=self.user, title='Sample2', time_minutes=10, cost=7.00)
recipe2.tags.add(tag1)
res = self.client.get(TAGS_URL, {'assigned_only': 1})
self.assertEqual(len(res.data), 1)
```
#### File: user/tests/test_users_api.py
```python
from django.test import TestCase
from django.contrib.auth import get_user_model
from django.urls import reverse
from rest_framework.test import APIClient
CREATE_USER_URL = reverse('user:create')
TOKEN_URL = reverse('user:token')
ME_URL = reverse('user:me')
def create_user(**params):
return get_user_model().objects.create_user(**params)
class TestPublicUserAPI(TestCase):
"""Test the public API for users"""
def setUp(self):
self.client = APIClient()
def test_create_valid_user(self):
"""It should create a user when the payload is valid"""
payload = {
'email': '<EMAIL>',
'password': '<PASSWORD>',
'name': 'Test name'
}
res = self.client.post(CREATE_USER_URL, payload)
self.assertEqual(res.status_code, 201)
user = get_user_model().objects.get(**res.data)
self.assertTrue(user.check_password(payload['password']))
self.assertNotIn('password', res.data)
def test_user_exists(self):
"""It should not create a user if it already exists"""
payload = {
'email': '<EMAIL>',
'password': '<PASSWORD>',
'name': 'Test name'
}
create_user(**payload)
res = self.client.post(CREATE_USER_URL, payload)
self.assertEqual(res.status_code, 400)
def test_short_password(self):
"""It should not create a user if the password is less than 8 chars"""
payload = {
'email': '<EMAIL>',
'password': '<PASSWORD>',
'name': 'Test name'
}
res = self.client.post(CREATE_USER_URL, payload)
self.assertEqual(res.status_code, 400)
user_exists = get_user_model().objects.filter(
email=payload['email']
).exists()
self.assertFalse(user_exists)
def test_create_token(self):
"""Tests that a token is created for the user"""
payload = {
'email': '<EMAIL>',
'password': '<PASSWORD>',
'name': 'Test user'
}
create_user(**payload)
res = self.client.post(TOKEN_URL, payload)
self.assertIn('token', res.data)
self.assertEqual(res.status_code, 200)
def test_create_token_invalid_credentials(self):
"""Tests that the token is not create if the payload is invalid"""
create_user(email='<EMAIL>', password='<PASSWORD>')
payload = {
'email': '<EMAIL>',
'password': '<PASSWORD>'
}
res = self.client.post(TOKEN_URL, payload)
self.assertEqual(res.status_code, 400)
self.assertNotIn('token', res.data)
def test_create_token_no_user(self):
"""Tests that the token is not created if the users does not exits"""
payload = {
'email': '<EMAIL>',
'password': '<PASSWORD>'
}
res = self.client.post(TOKEN_URL, payload)
self.assertEqual(res.status_code, 400)
self.assertNotIn('token', res.data)
def test_missing_fields(self):
"""Test that the token is not created if any field is missing"""
res = self.client.post(
TOKEN_URL, {'email': '<EMAIL>', 'password': ''})
self.assertEqual(res.status_code, 400)
self.assertNotIn('token', res.data)
def test_retrieve_user_unauthorized(self):
"""Tests that authentication is required for users"""
res = self.client.post(ME_URL)
self.assertEqual(res.status_code, 401)
class TestPrivateUserAPI(TestCase):
"""Test the API requests that require authentication"""
def setUp(self):
self.user = create_user(
email='<EMAIL>',
password='<PASSWORD>',
name='<NAME>'
)
self.client = APIClient()
self.client.force_authenticate(user=self.user)
def test_retrieve_profile(self):
"""Tests retrieveing profile for logged user"""
res = self.client.get(ME_URL)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.data, {
'name': self.user.name,
'email': self.user.email
})
def test_no_post_request(self):
"""Test that POST is not allowed fro the me url"""
res = self.client.post(ME_URL, {})
self.assertEqual(res.status_code, 405) # 405->method not allowed
def test_user_update(self):
"""Test that the user is updated for the authenticated user"""
payload = {
'name': '<NAME>',
'password': '<PASSWORD>'
}
res = self.client.patch(ME_URL, payload)
self.user.refresh_from_db()
self.assertEqual(self.user.name, payload['name'])
self.assertTrue(self.user.check_password(payload['password']))
self.assertEqual(res.status_code, 200)
```
|
{
"source": "jeremy43/autodp-1",
"score": 2
}
|
#### File: example/private-deep-learning/dpdl_utils.py
```python
from mxnet import nd
import mxnet as mx
def initialize_grad(params,ctx=mx.cpu()):
""" initialize a grad object with format just like those in paramter """
a=[]
for param in params.values():
a.append(nd.zeros(param.shape).as_in_context(ctx))
return a
def reset_grad(grads):
for grad in grads:
grad[:] = 0
def accumuate_grad(grads, params, thresh): # accumuate the thresholded gradient
tmp=grad_norm_in_params(params)
if tmp > thresh:
factor = thresh / tmp
else:
factor = 1.0
for grad, param in zip(grads, params.values()):
grad[:] += param.grad() * factor
def accumulate_params(param_cumu, params, n):
for param2,param in zip(param_cumu, params.values()):
param2[:] = param2 *(n-1)/n + param.data() /n
def iir_filter(mm,gg,beta,order):# helps to accumuate the gradients and second momeent of adam
for m,g in zip(mm,gg):
m[:] = beta*m + (1-beta)*(g**order)
def extract_grad(params, grads):
""" get the gradient attached to "params" and assign to "grads" """
for param,grad in zip(params.values(), grads):
grad[:] = param.grad()
def grad_norm_in_params(params):
"""Calculate the Euclidean norm of the parameters in grad list grads """
a=0
for item in params.values():
a += nd.sum(item.grad() ** 2).asscalar()
return a ** 0.5
def grad_norm(grads):
"""Calculate the Euclidean norm of the parameters in grad list grads """
a=0
for item in grads:
a += nd.sum(item ** 2).asscalar()
return a ** 0.5
def grad_rescale(grads, k):
"""scale the gradient by a factor of k"""
y = grads.deepcopy()
for item in y:
item[:] = item * k
return y # return the parameters
def grad_add(grads_batch):
"""add up the list of gradients in lists"""
y = grads_batch[0].deepcopy()
for xx in grads_batch:
for item1,item2 in zip(xx,y):
item2 += item1
return y # return the parameters with a different gradient
```
#### File: example/private-deep-learning/example_dpdl.py
```python
from autodp import rdp_bank, rdp_acct
# import packages needed for deep learning
import mxnet as mx
from mxnet import nd, autograd
from mxnet import gluon
import dpdl_utils
ctx = mx.cpu()
# ## Get data: standard MNIST
mnist = mx.test_utils.get_mnist()
num_inputs = 784
num_outputs = 10
batch_size = 1 # this is set to get per-example gradient
train_data = mx.io.NDArrayIter(mnist["train_data"], mnist["train_label"],
batch_size, shuffle=True)
test_data = mx.io.NDArrayIter(mnist["test_data"], mnist["test_label"],
64, shuffle=True)
train_data2 = mx.io.NDArrayIter(mnist["train_data"], mnist["train_label"],
64, shuffle=True)
# ## Build a one hidden layer NN with Gluon
num_hidden = 1000
net = gluon.nn.HybridSequential()
with net.name_scope():
net.add(gluon.nn.Dense(num_hidden, in_units=num_inputs,activation="relu"))
net.add(gluon.nn.Dense(num_outputs,in_units=num_hidden))
# get and save the parameters
params = net.collect_params()
params.initialize(mx.init.Xavier(magnitude=2.24), ctx=ctx)
params.setattr('grad_req', 'write')
# define loss function
softmax_cross_entropy = gluon.loss.SoftmaxCrossEntropyLoss()
# ## Use a new optimizer called privateSGD
# Basically, we add Gaussian noise to the stochastic gradient.
# define the update rule
def privateSGD(x, g, lr, sigma,wd=0.0,ctx=mx.cpu()):
for (param,grad) in zip(x.values(), g):
v=param.data()
v[:] = v - lr * (grad +wd*v+ sigma*nd.random_normal(shape = grad.shape).as_in_context(ctx))
# Utility function to evaluate error
def evaluate_accuracy(data_iterator, net):
acc = mx.metric.Accuracy()
loss_fun = .0
data_iterator.reset()
for i, batch in enumerate(data_iterator):
data = batch.data[0].as_in_context(ctx).reshape((-1, 784))
label = batch.label[0].as_in_context(ctx)
output = net(data)
predictions = nd.argmax(output, axis=1)
acc.update(preds=predictions, labels=label)
loss = softmax_cross_entropy(output, label)
loss_fun = loss_fun*i/(i+1) + nd.mean(loss).asscalar()/(i+1)
return acc.get()[1], loss_fun
# ## Now let's try attaching a privacy accountant to this data set
# declare a moment accountant from pydiffpriv
DPobject = rdp_acct.anaRDPacct()
# Specify privacy specific inputs
thresh = 4.0 # limit the norm of individual gradient
sigma = thresh
delta = 1e-5
func = lambda x: rdp_bank.RDP_gaussian({'sigma': sigma/thresh}, x)
# ## We now specify the parameters needed for learning
#
epochs = 10
learning_rate = .1
n = train_data.num_data
batchsz = 100 #
count = 0
niter=0
moving_loss = 0
grads = dpdl_utils.initialize_grad(params,ctx=ctx)
# ## Let's start then!
# declare a few place holder for logging
logs = {}
logs['eps'] = []
logs['loss'] = []
logs['MAloss'] = []
logs['train_acc'] = []
logs['test_acc'] = []
for e in range(epochs):
# train_data.reset() # Reset does not shuffle yet
train_data = mx.io.NDArrayIter(mnist["train_data"], mnist["train_label"],
batch_size, shuffle=True)
for i, batch in enumerate(train_data):
data = batch.data[0].as_in_context(ctx).reshape((-1, 784))
label = batch.label[0].as_in_context(ctx)
with autograd.record():
output = net(data)
loss = softmax_cross_entropy(output, label)
loss.backward()
# calculate an moving average estimate of the loss
count += 1
moving_loss = .999 * moving_loss + .001 * nd.mean(loss).asscalar()
est_loss = moving_loss / (1 - 0.999 ** count)
# Add up the clipped individual gradient
dpdl_utils.accumuate_grad(grads, params, thresh)
#print(i)
if not (i + 1) % batchsz: # update the parameters when we collect enough data
privateSGD(params, grads, learning_rate/batchsz,sigma,wd=0.1,ctx=ctx)
# Keep track of the privacy loss
DPobject.compose_subsampled_mechanism(func,1.0*batchsz/n)
dpdl_utils.reset_grad(grads)
if count % (10*batchsz) is 0:
print("[%s] Loss: %s. Privacy loss: eps = %s, delta = %s " % (((count+1)/batchsz),est_loss,DPobject.get_eps(delta),delta))
logs['MAloss'].append(est_loss)
##########################
# Keep a moving average of the losses
##########################
if count % 60000 is 0:
test_accuracy, loss_test = evaluate_accuracy(test_data, net)
train_accuracy, loss_train = evaluate_accuracy(train_data2, net)
print("Net: Epoch %s. Train Loss: %s, Test Loss: %s, Train_acc %s, Test_acc %s" %
(e, loss_train, loss_test,train_accuracy, test_accuracy))
logs['eps'].append(DPobject.get_eps(delta))
logs['loss'].append(loss_train)
logs['train_acc'].append(train_accuracy)
logs['test_acc'].append(test_accuracy)
learning_rate = learning_rate/2
## Plot some figures!
import matplotlib.pyplot as plt
plt.figure(num=1, figsize=(12, 8), dpi=80, facecolor='w', edgecolor='k')
plt.plot(range(epochs), logs['eps'])
plt.plot(range(epochs), logs['loss'])
plt.plot(range(epochs), logs['train_acc'])
plt.plot(range(epochs), logs['test_acc'])
plt.legend(['\delta = 1e-5', 'Training loss', 'Training accuracy','Test accuracy'], loc='best')
plt.show()
```
|
{
"source": "jeremy43/FCIS",
"score": 2
}
|
#### File: fcis/operator_py/weighted_cross_entropy.py
```python
import mxnet as mx
import numpy as np
class WeightedCEOperator(mx.operator.CustomOp):
def __init__(self, grad_scale, ignore_label, use_ignore):
super(WeightedCEOperator, self).__init__()
self.use_ignore = use_ignore
self.ignore_label = ignore_label
self.grad_scale = float(grad_scale)
self._softmax_out = None
self._mask_weights = None
self._label = None
def forward(self, is_train, req, in_data, out_data, aux):
seg_pred = in_data[0]
mask_weights = in_data[1]
label = in_data[2]
# print seg_pred
# print mask_weights
softmax_out = mx.nd.softmax(seg_pred, axis=1)
self._softmax_out = softmax_out.copy()
self._mask_weights = mask_weights.copy()
self._label = label.copy()
label = label.asnumpy().astype('int32')
label_zero = np.where(label != -1, 1-label, -1)
label = np.concatenate((label_zero, label), axis=1)
cls = softmax_out.asnumpy() + 1e-14
cls_loss = np.where(label != -1, -label * np.log(cls), 0)
assert label.shape == seg_pred.shape, 'shape error'
cls_loss = mx.nd.array(cls_loss)
label = mx.nd.array(label)
self.assign(out_data[0], req[0], cls_loss)
self.assign(out_data[1], req[1], softmax_out)
self.assign(out_data[2], req[2], label)
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
softmax_out = self._softmax_out.asnumpy()
mask_weights = self._mask_weights.asnumpy()
label = self._label.asnumpy().astype('int32')
label_zero = np.where(label != -1, 1-label, -1)
label = np.concatenate((label_zero, label), axis=1)
grad = (softmax_out - label) * self.grad_scale / (softmax_out.shape[2] * softmax_out.shape[3]) * mask_weights
grad = np.where(label != -1, grad, 0)
# print 'mean', np.mean(grad)
# print grad.std()
grad = mx.nd.array(grad)
self.assign(in_grad[0], req[0], grad)
self.assign(in_grad[1], req[1], 0)
self.assign(in_grad[2], req[2], 0)
@mx.operator.register('weighted_cross_entropy')
class WeightedCEProp(mx.operator.CustomOpProp):
def __init__(self, grad_scale=1, ignore_label=-1, use_ignore=False):
super(WeightedCEProp, self).__init__(need_top_grad=False)
self.use_ignore = use_ignore
self.ignore_label = ignore_label
self.grad_scale = grad_scale
def list_arguments(self):
return ['seg_pred', 'mask_weights', 'label']
def list_outputs(self):
return ['ce_loss', 'softmax_out', 'label_out']
def infer_shape(self, in_shape):
output_shape = in_shape[0]
return in_shape, [output_shape, output_shape, output_shape]
def create_operator(self, ctx, shapes, dtypes):
return WeightedCEOperator(self.grad_scale, self.ignore_label, self.use_ignore)
def declare_backward_dependency(self, out_grad, in_data, out_data):
return []
```
#### File: FCIS/fcis/test_cpickle.py
```python
import sys
import os
import numpy as np
import pydensecrf.densecrf as dcrf
from graphcut.superpixel_cache import load_cache
from graphcut.comp_max_flow import solve_masks_with_lists
# Get im{read,write} from somewhere.
try:
from cv2 import imread, imwrite
except ImportError:
# Note that, sadly, skimage unconditionally import scipy and matplotlib,
# so you'll need them if you don't have OpenCV. But you probably have them.
from skimage.io import imread, imsave
imwrite = imsave
# TODO: Use scipy instead.
from pydensecrf.utils import unary_from_labels, create_pairwise_bilateral, create_pairwise_gaussian
im_dir = '/home/yanrui/code/Share/msralab/ILSVRC2015/ResizedData_half'
cache_dir = '/home/yanrui/code/Share/FCIS_video/data/cache'
middle_name = 'VID/train/ILSVRC2015_VID_train_0000/ILSVRC2015_train_00000000'
im_names = ['000010', '000030', '000050', '000070']
roidbs = [load_gt_roidb(config.dataset.dataset, image_set, config.dataset.root_path, config.dataset.dataset_path,
flip=config.TRAIN.FLIP)
for image_set in image_sets]
roidb = merge_roidb(roidbs)
roidb = filter_roidb(roidb, config)
def get_neighbor_matrix(spixel_list):
matrix_size = len(spixel_list)
matrix = np.zeros((matrix_size, matrix_size))
for spixel in spixel_list:
for neighbor in spixel.neighbor:
matrix[spixel.id][neighbor] = 1
matrix[neighbor][spixel.id] = 1
return matrix
segment = load_cache(os.path.join(cache_dir, middle_name, im_names) + '.pkl')
neighbor_matrix = get_neighbor_matrix(segment['spixel'])
color_hist_list = np.array([spixel.color_hist for spixel in segment['spixel']])
texture_hist_list = np.array([spixel.texture_hist for spixel in segment['spixel']])
flow_avg_list = np.array([spixel.flow_avg for spixel in segment['spixel']])
segments = segment['segment']
im = imread(os.path.join(im_dir, middle_name, im_names) + '.jpg')
solve_masks_with_lists(color_hist_list, texture_hist_list, flow_avg_list, neighbor_matrix, segments, gt_boxes, network_term)
```
|
{
"source": "jeremy886/crossword2019",
"score": 4
}
|
#### File: jeremy886/crossword2019/headletters.py
```python
import re
from tex_printable import board_size
def build_01_board(crosswords_lines):
row_num, col_num = board_size(crosswords_lines)
# print(crosswords_lines)
# with open("crosswords_out.txt", "rt") as f:
# crosswords = f.read()
#
# crosswords = "\n".join([line for line in crosswords.splitlines() if line.strip() != ""])
#
pattern = re.compile(r"[A-Z]")
# crosswords1 = pattern1.sub("1", crosswords)
# pattern2 = re.compile(r"\ ")
# crosswords2 = pattern2.sub("0", crosswords1)
board01_lines = []
for line in crosswords_lines:
if len(line) < col_num:
new_line = "0" * col_num
new_line = line + new_line[len(line):]
else:
new_line = line
new_line_01 = pattern.sub("1", new_line.replace(" ", "0"))
board01_lines.append(new_line_01)
# print(board01_lines)
return board01_lines
def check_letter(row, col, board):
"""
check cell in row and col to see if it's a head letter of a word
:param row: row starting from 0
:param col: col starting from 0
:param board: a list consists of 0 and 1 converted from original board
:return: head_value
0 not a head letter
1 or 0b01 is a head letter of a word across
2 or 0b10 is a head letter of a word down
3 or 0b11 is a head letter for both a word across and a word down
"""
# check 11 pattern for first row and first column
# check 011 pattern for other rows and columns
assert row <= len(board) - 1
assert col <= len(board[0]) - 1
head_value = 0
if board[row][col] == "1":
# check down word
if row == 0:
if board[row+1][col] == "1":
head_value += 2
elif board[row-1][col] == "0" and board[row+1][col] == "1":
head_value += 2
# check across word
if col == 0:
if board[row][col+1] == "1":
head_value += 1
elif board[row][col-1] == "0" and board[row][col+1] == "1":
head_value += 1
return head_value
def find_heads(board01_lines):
"""
:param board01_lines:
:return:
a tuple of (number of the order, across/down status)
"""
board = board01_lines
head_letters = {}
order = 1
for i in range(len(board)-1):
for j in range(len(board[0])-1):
if check_letter(i, j, board) > 0:
# print(f"row:{i} col: {j} -->", check_letter(i, j, board))
head_letters[(i, j)] = order, check_letter(i, j, board)
order += 1
return head_letters
if __name__ == '__main__':
board01_lines = build_01_board(crosswords_lines)
head_letters = find_head_letters(board01_lines)
print(head_letters)
```
#### File: jeremy886/crossword2019/tex_printable.py
```python
import os
from string import Template
def crossword_text_to_lines(crossword_text_filename):
crosswords = ""
with open(crossword_text_filename, "rt") as f:
for line in f:
if line.strip() != "":
#print(line.rstrip())
crosswords += line.rstrip()
crosswords += "\n"
crosswords_lines = [line for line in crosswords.splitlines() if line.strip() != ""]
return crosswords_lines
def board_size(crosswords_lines):
row_num = len(crosswords_lines)
col_num = max([len(line) for line in crosswords_lines])
return row_num, col_num
def find_word(crosswords_lines, key, type):
word = ""
if type == "across":
for col, ch in enumerate(crosswords_lines[key[0]]):
if col < key[1]:
continue
if ch != " ":
word += ch
else:
break
elif type == "down":
ch = crosswords_lines[key[0]][key[1]]
row = key[0]
while True and ch != " ":
word += ch
row += 1
try:
ch = crosswords_lines[row][key[1]]
except IndexError:
break
return word
def tex_out(crosswords_lines, head_letters):
row_num, col_num = board_size(crosswords_lines)
hl = head_letters
board_lines = []
fi = " " # filling
for nr, line in enumerate(crosswords_lines):
board_line = ""
for nc, ch in enumerate(line):
if ch == " ":
board_line += f"|*{fi*4}"
elif (nr, nc) in head_letters:
no = hl[nr, nc][0]
board_line += f"|[{no}]{ch}{fi*(2-len(str(no)))}"
else:
board_line += f"|{ch}{fi*4}"
board_line = board_line + f"|*{fi*4}" * (col_num-len(line)) + "|."
board_lines.append(board_line)
# print(r"\begin{Puzzle}{"+ f"{col_num}" + r"}{" + f"{row_num}" + r"}")
# for line in board_lines:
# print(line)
# print(r"\end{Puzzle}")
with open("crossword_puzzle.tex", "w") as texfile:
# tex code for puzzle
texfile.write(r"\begin{Puzzle}{" + f"{col_num}" + r"}{" + f"{row_num}" + r"}")
texfile.write(os.linesep)
texfile.writelines(os.linesep.join(board_lines))
texfile.write(os.linesep)
texfile.write(r"\end{Puzzle}")
texfile.write(os.linesep)
# tex code for clues
texfile.write(os.linesep)
clues_across = []
clues_down = []
for key, (order, type) in head_letters.items():
print(key, order, type)
if type == 0b01: # across
clues_across.append((order, find_word(crosswords_lines, key, "across"), ""))
elif type == 0b10: # down
clues_down.append((order, find_word(crosswords_lines, key, "down"), ""))
elif type == 0b11: # across and down
clues_across.append((order, find_word(crosswords_lines, key, "across"), ""))
clues_down.append((order, find_word(crosswords_lines, key, "down"), ""))
print(clues_across)
print(clues_down)
texfile.write(r"\begin{PuzzleClues}{\textbf{Across}}")
texfile.write(os.linesep)
for clue in clues_across:
clue_template = Template("\\Clue{$order}{$word}{$clue}")
texfile.write(clue_template.safe_substitute(order=clue[0], word=clue[1], clue=clue[2]))
texfile.write(os.linesep)
texfile.write(r"\end{PuzzleClues}")
texfile.write(os.linesep)
texfile.write(r"\begin{PuzzleClues}{\textbf{Down}}")
texfile.write(os.linesep)
for clue in clues_down:
clue_template = Template("\\Clue{$order}{$word}{$clue}")
texfile.write(clue_template.safe_substitute(order=clue[0], word=clue[1], clue=clue[2]))
texfile.write(os.linesep)
texfile.write(r"\end{PuzzleClues}")
def print_tex():
from headletters import find_heads, build_01_board
crosswords_lines = crossword_text_to_lines("crosswords_out.txt")
# print(crosswords_lines)
board01_lines = build_01_board(crosswords_lines)
head_letters = find_heads(board01_lines)
# build cue_list
# print(head_letters)
tex_out(crosswords_lines, head_letters)
```
|
{
"source": "jeremy886/django_bookmarks",
"score": 2
}
|
#### File: bookmarks/views/bookmarks.py
```python
from django.contrib.auth.mixins import LoginRequiredMixin
from django.db.models import Q
from django.http import JsonResponse
from django.shortcuts import get_object_or_404
from django.urls import reverse_lazy
from django.utils import timezone
from django.views import generic
from django.views.generic import RedirectView
from .. import models
class List(LoginRequiredMixin, generic.ListView):
model = models.Bookmark
def get_queryset(self):
queryset = models.Bookmark.objects.current(self.request.user)
tag = self.kwargs.get('tag')
if tag:
queryset = queryset.filter(tags__name__in=[tag])
return queryset
class Create(LoginRequiredMixin, generic.CreateView):
fields = ('url', 'title', 'description', 'tags')
model = models.Bookmark
success_url = reverse_lazy('bookmarks:list')
def form_valid(self, form):
bookmark = form.save(commit=False)
bookmark.user = self.request.user
bookmark.save()
form.save_m2m()
return super().form_valid(form)
class Update(LoginRequiredMixin, generic.UpdateView):
fields = ('url', 'title', 'description', 'tags')
model = models.Bookmark
success_url = reverse_lazy('bookmarks:list')
def get_queryset(self):
return models.Bookmark.objects.current(self.request.user)
class Delete(LoginRequiredMixin, generic.UpdateView):
fields = ()
model = models.Bookmark
success_url = reverse_lazy('bookmarks:list')
template_name = 'bookmarks/bookmark_confirm_delete.html'
def get_queryset(self):
return models.Bookmark.objects.current(self.request.user)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['delete_view'] = True
return context
def form_valid(self, form):
bookmark = form.save(commit=False)
bookmark.deleted_at = timezone.now()
bookmark.save()
return super().form_valid(form)
class Trash(LoginRequiredMixin, generic.ListView):
model = models.Bookmark
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['delete_view'] = True
context['trash_view'] = True
return context
def get_queryset(self):
return models.Bookmark.objects.deleted(self.request.user)
class Undelete(LoginRequiredMixin, RedirectView):
url = reverse_lazy('bookmarks:list')
def get_object(self):
return get_object_or_404(
models.Bookmark,
user=self.request.user,
pk=self.kwargs.get('pk'),
deleted_at__isnull=False
)
def get(self, request, *args, **kwargs):
bookmark = self.get_object()
bookmark.deleted_at = None
bookmark.save()
return super().get(request, *args, **kwargs)
class Search(LoginRequiredMixin, generic.ListView):
model = models.Bookmark
def get_queryset(self):
queryset = models.Bookmark.objects.current(self.request.user)
q_objects = [
Q(title__icontains=word) | Q(description__icontains=word)
for word in self.request.GET.get('q').split()
]
queryset = queryset.filter(*[q for q in q_objects])
return queryset
class AddBookmarkToCollection(LoginRequiredMixin, generic.View):
def get_bookmark(self, request):
bookmark = get_object_or_404(
models.Bookmark,
user=self.request.user,
id=self.request.GET.get('bookmark')
)
return bookmark
def get_collection(self, request):
collection = get_object_or_404(
models.Collection,
user=self.request.user,
slug=self.request.GET.get('collection')
)
return collection
def get_redirect_url(self, *args, **kwargs):
return self.collection.get_absolute_url()
def get(self, request, *args, **kwargs):
self.bookmark = self.get_bookmark(request)
self.collection = self.get_collection(request)
self.bookmark.collections.add(self.collection)
return JsonResponse({'success': True})
```
|
{
"source": "Jeremy98-alt/ADM-HW4",
"score": 2
}
|
#### File: Jeremy98-alt/ADM-HW4/functions.py
```python
from collections import defaultdict
import scipy.integrate as integrate
import scipy.special as special
import numpy as np
import pandas as pd
import math
import re
import random
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.express as px
from wordcloud import WordCloud
import functools
import operator
import nltk
from nltk.corpus import stopwords
from nltk.stem.snowball import PorterStemmer
from nltk.tokenize import RegexpTokenizer
from langdetect import detect
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.decomposition import TruncatedSVD
from sklearn.cluster import KMeans
from tqdm import tqdm
from matplotlib import pyplot as plt
import pickle
from PIL import Image
########## EXERCISE 1 ##########
# Our hash function
def hash_function(string_):
n = 2**32 + 15
result = 0
for char in string_:
result = result * 31 + ord(char)
result = format(result % n, '032b')
return result
# Create buckets
def create_registers():
return defaultdict(lambda :-1)
# Update buckets
def update_register(string_, registers):
b = 12
x = hash_function(string_)
j = int(str(x)[:b],2)
if '1' in set(x[b:]):
rho_w = (x[b:]).index('1')+1
else:
rho_w = len(x[b:])
registers[j] = max(registers[j],rho_w)
# process each row and pass to the register
def process_data(registers):
with open('hash.txt') as f:
while True:
line = f.readline()
if not line:
break
update_register(line.strip(), registers)
# estimate the cardinality
def hyperLogLog(registers):
b = 12
m = 2**b
alpha = (m)*(integrate.quad(lambda u: (math.log((2+u)/(1+u),2))**(m),0,np.infty )[0])
Z =(sum(2**-registers[j] for j in registers.keys()))**(-1)
E = (alpha)**(-1)*(m**2)*Z
return E
# the error of our filter
def error_rate(registers_count):
return 1.3 / math.sqrt(2**registers_count)
########## EXERCISE 2 ##########
# group by product id and concatenate text fields
def groupby_productid_df(df):
productid_df = pd.DataFrame()
product_id = []
reviews = []
new_df = pd.DataFrame()
for product, group in df.groupby('ProductId'):
product_id.append(product)
reviews.append(" ".join(list(group['Text'])))
productid_df['ProductId'] = product_id
productid_df['reviews'] = reviews
return productid_df
# preprocess text
def clean_text(text):
x = re.compile('<.*?>')
text = re.sub(x, '', text)
stop_words = set(stopwords.words('english')) # obtain the stop words
good_words = [] # save the correct words to consider like tokens
tokenizer = RegexpTokenizer("[\w']+") # function to recognize the tokens
words = tokenizer.tokenize(text) # tokenize the text
for word in words:
# check if the word is lower and it isn't a stop word or a number
if word.lower() not in stop_words and word.isalpha():
word = PorterStemmer().stem(word) # use the stemmer function
good_words.append(word.lower()) # insert the good token to lower case
return good_words
# a kmeans implementation
class my_Kmeans():
def __init__(self, n_clusters):
self.n_clusters = n_clusters
self.prev_labels = [1]
self.labels = []
# select random centroids
def initialize_algo(self, matrix):
random_indices = np.random.choice(len(matrix), size= self.n_clusters, replace=False)
self.centroids = matrix[random_indices, :]
# stop if the clusters are the same between two iterations
def stop_iteration_flag(self):
if self.labels == self.prev_labels:
return True
else:
return False
# euclidean distance between two vectors
def compute_distance(self, vec1, vec2):
return np.linalg.norm(vec1 - vec2)
# assign each data point to its closest centroid
def assign_clusters(self, matrix):
self.clusters = {}
self.prev_labels = self.labels.copy()
self.labels = []
for row in matrix:
centroid_idx = np.argmin([self.compute_distance(row, centroid) for centroid in self.centroids])
self.clusters.setdefault(centroid_idx, []).append(row)
self.labels.append(centroid_idx)
# update the centroids by taking the mean of all points in the cluster
def update_centroids(self):
self.centroids = [np.mean(i, axis = 0) for i in self.clusters.values()]
# fit the model
def fit(self, matrix):
self.initialize_algo(matrix)
iter_count = 0
# stop when clusters don't change anymore or we reach 100 iterations
while all((not self.stop_iteration_flag(), iter_count < 100)):
print("iteration no. {0}".format(iter_count))
self.assign_clusters(matrix)
self.update_centroids()
iter_count += 1
return self.labels
# compute the sum of the squared distance between each point and its centroid
def inertia(self, matrix):
sum_distance = 0
for i in range(len(matrix)):
sum_distance += (self.compute_distance(matrix[i], self.centroids[self.labels[i]]))**2
return sum_distance
# special method used for dynamic plotting
def fit_for_plot(self, matrix):
self.initialize_algo(matrix)
iter_count = 0
d = {}
while iter_count <4:
print("iteration no. {0}".format(iter_count))
self.assign_clusters(matrix)
self.update_centroids()
iter_count += 1
d[iter_count] = self.labels
return d
# elbow method plot
def showElbow(elbow):
plt.title('Elbow Method')
plt.xlabel('Number of clusters')
plt.ylabel('Sum of squared distance')
plt.plot(list(elbow.keys()), list(elbow.values()))
plt.grid()
plt.show()
# compares clusters between two models
def compare_models(my_kmeans_output, kmeans_sk_output):
my_kmeans_dict ={}
# store my_kmeans labels and index
for idx, key in enumerate(my_kmeans_output):
my_kmeans_dict.setdefault(key, set()).add(idx)
kmeans_sk_dict = {}
# store kmeans++ labels and index
for idx, key in enumerate(list(kmeans_sk_output)):
kmeans_sk_dict.setdefault(key, set()).add(idx)
cardinality_intersection = {}
# count intersections between clusters
for idx1 in kmeans_sk_dict.keys():
cardinality_intersection[idx1] = [len(my_kmeans_dict[idx2].intersection(kmeans_sk_dict[idx1])) for idx2 in my_kmeans_dict.keys()]
# compute match %
for key in cardinality_intersection:
cardinality_intersection[key] = [round((x / sum(cardinality_intersection[key])*100),2) for x in cardinality_intersection[key]]
return cardinality_intersection
# add a column named cluster
def addClusterColumn(new_df, cluster_labels):
new_df["cluster"] = cluster_labels
return new_df
def ListTokenPerCluster(new_df):
reviews = []
new_dp = pd.DataFrame()
for cluster, group in new_df.groupby('cluster'):
reviews.append(group['reviews'].tolist())
new_dp['reviews'] = reviews
return new_dp
# plots word clouds for each cluster
def show_word_clouds(new_dp):
for k in range(10):
text = functools.reduce(operator.iconcat, new_dp['reviews'][k], [])
wordcloud = WordCloud(collocations = False, colormap = "RdYlGn",background_color='black', max_font_size = 50).generate(" ".join(text))
plt.figure()
plt.imshow(wordcloud, interpolation='bilinear')
plt.axis("off")
plt.title(f"{k} Cluster has this wordcloud")
plt.show()
# computes the number of product for each cluster
def numberOfProduct(cluster_labels):
get_idx, counts_per_cluster = np.unique(cluster_labels, return_counts=True)
print("Show the number of products per each cluster: \n")
for idx, val in enumerate(counts_per_cluster):
print("The cluster {} has {} products".format(idx, val))
# merge dataframes to visualize scores
def dataset_score(new_df, df):
score_distribution = pd.merge(new_df[["ProductId","cluster"]], df[["ProductId","Score"]], on="ProductId")
return score_distribution
# plots the review score distribution for each cluster
def showPlotScoreDistribution(interested_dt):
fig, axes = plt.subplots(5, 2, figsize=(20,20))
sns.barplot(x = "Score", y = "count", data = interested_dt[interested_dt.cluster == 0].groupby([interested_dt.Score]).Score.count().to_frame('count').reset_index(), ax = axes[0, 0], palette = "GnBu")
sns.barplot(x = "Score", y = "count", data = interested_dt[interested_dt.cluster == 1].groupby([interested_dt.Score]).Score.count().to_frame('count').reset_index(), ax = axes[0, 1], palette = "GnBu")
sns.barplot(x = "Score", y = "count", data = interested_dt[interested_dt.cluster == 2].groupby([interested_dt.Score]).Score.count().to_frame('count').reset_index(), ax = axes[1, 0], palette = "GnBu")
sns.barplot(x = "Score", y = "count", data = interested_dt[interested_dt.cluster == 3].groupby([interested_dt.Score]).Score.count().to_frame('count').reset_index(), ax = axes[1, 1], palette = "GnBu")
sns.barplot(x = "Score", y = "count", data = interested_dt[interested_dt.cluster == 4].groupby([interested_dt.Score]).Score.count().to_frame('count').reset_index(), ax = axes[2, 0], palette = "GnBu")
sns.barplot(x = "Score", y = "count", data = interested_dt[interested_dt.cluster == 4].groupby([interested_dt.Score]).Score.count().to_frame('count').reset_index(), ax = axes[2, 1], palette = "GnBu")
sns.barplot(x = "Score", y = "count", data = interested_dt[interested_dt.cluster == 4].groupby([interested_dt.Score]).Score.count().to_frame('count').reset_index(), ax = axes[3, 0], palette = "GnBu")
sns.barplot(x = "Score", y = "count", data = interested_dt[interested_dt.cluster == 4].groupby([interested_dt.Score]).Score.count().to_frame('count').reset_index(), ax = axes[3, 1], palette = "GnBu")
sns.barplot(x = "Score", y = "count", data = interested_dt[interested_dt.cluster == 4].groupby([interested_dt.Score]).Score.count().to_frame('count').reset_index(), ax = axes[4, 0], palette = "GnBu")
sns.barplot(x = "Score", y = "count", data = interested_dt[interested_dt.cluster == 4].groupby([interested_dt.Score]).Score.count().to_frame('count').reset_index(), ax = axes[4, 1], palette = "GnBu")
# gets the unique review users for each cluster
def usersWritingCluster(new_df, dt):
merge_dt = pd.merge(new_df[["ProductId", "cluster"]], dt[["ProductId","UserId"]], on="ProductId")
return merge_dt.groupby(["cluster"]).UserId.nunique()
```
|
{
"source": "jeremy9959/bayesian_changepoint_detection",
"score": 3
}
|
#### File: bayesian_changepoint_detection/bayesian_changepoint_detection/online_changepoint_detection.py
```python
from __future__ import division
import numpy as np
from scipy import stats
def online_changepoint_detection(data, hazard_func, observation_likelihood):
maxes = np.zeros(len(data) + 1)
R = np.zeros((len(data) + 1, len(data) + 1))
R[0, 0] = 1
for t, x in enumerate(data):
# Evaluate the predictive distribution for the new datum under each of
# the parameters. This is the standard thing from Bayesian inference.
predprobs = observation_likelihood.pdf(x)
# Evaluate the hazard function for this interval
H = hazard_func(np.array(range(t+1)))
# Evaluate the growth probabilities - shift the probabilities down and to
# the right, scaled by the hazard function and the predictive
# probabilities.
R[1:t+2, t+1] = R[0:t+1, t] * predprobs * (1-H)
# Evaluate the probability that there *was* a changepoint and we're
# accumulating the mass back down at r = 0.
R[0, t+1] = np.sum( R[0:t+1, t] * predprobs * H)
# Renormalize the run length probabilities for improved numerical
# stability.
R[:, t+1] = R[:, t+1] / np.sum(R[:, t+1])
# Update the parameter sets for each possible run length.
observation_likelihood.update_theta(x)
maxes[t] = R[:, t].argmax()
return R, maxes
def constant_hazard(lam, r):
return 1/lam * np.ones(r.shape)
class StudentT:
def __init__(self, alpha, beta, kappa, mu):
self.alpha0 = self.alpha = np.array([alpha])
self.beta0 = self.beta = np.array([beta])
self.kappa0 = self.kappa = np.array([kappa])
self.mu0 = self.mu = np.array([mu])
def pdf(self, data):
return stats.t.pdf(x=data,
df=2*self.alpha,
loc=self.mu,
scale=np.sqrt(self.beta * (self.kappa+1) / (self.alpha *
self.kappa)))
def update_theta(self, data):
muT0 = np.concatenate((self.mu0, (self.kappa * self.mu + data) / (self.kappa + 1)))
kappaT0 = np.concatenate((self.kappa0, self.kappa + 1.))
alphaT0 = np.concatenate((self.alpha0, self.alpha + 0.5))
betaT0 = np.concatenate((self.beta0, self.beta + (self.kappa * (data -
self.mu)**2) / (2. * (self.kappa + 1.))))
self.mu = muT0
self.kappa = kappaT0
self.alpha = alphaT0
self.beta = betaT0
class Poisson:
def __init__(self, k, theta):
self.k0 = self.k = np.array([k])
self.theta0 = self.theta = np.array([theta])
def pdf(self, data):
return stats.nbinom.pmf(data,self.k, 1/(1+self.theta))
def update_theta(self, data):
kT0 = np.concatenate((self.k0, self.k+data))
thetaT0 = np.concatenate((self.theta0, self.theta/(1+self.theta)))
self.k = kT0
self.theta = thetaT0
```
|
{
"source": "jeremy9959/cnvkit",
"score": 2
}
|
#### File: cnvkit/cnvlib/core.py
```python
from __future__ import absolute_import, division, print_function
from builtins import map
from past.builtins import basestring
import contextlib
import logging
import os
import subprocess
import tempfile
# __________________________________________________________________________
# I/O helpers
def call_quiet(*args):
"""Safely run a command and get stdout; print stderr if there's an error.
Like subprocess.check_output, but silent in the normal case where the
command logs unimportant stuff to stderr. If there is an error, then the
full error message(s) is shown in the exception message.
"""
# args = map(str, args)
if not len(args):
raise ValueError("Must supply at least one argument (the command name)")
try:
proc = subprocess.Popen(args, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except OSError as exc:
raise RuntimeError("Could not find the executable %r" % args[0]
+ " -- is it installed correctly?"
+ "\n(Original error: %s)" % exc)
out, err = proc.communicate()
if proc.returncode != 0:
raise RuntimeError("Subprocess command failed:\n$ %s\n\n%s"
% (' '.join(args), err))
return out
def ensure_path(fname):
"""Create dirs and move an existing file to avoid overwriting, if necessary.
If a file already exists at the given path, it is renamed with an integer
suffix to clear the way.
"""
if '/' in os.path.normpath(fname):
# Ensure the output directory exists
dname = os.path.dirname(os.path.abspath(fname))
if dname and not os.path.isdir(dname):
try:
os.makedirs(dname)
except OSError as exc:
raise OSError("Output path " + fname +
" contains a directory " + dname +
" that cannot be created: %s" % exc)
if os.path.isfile(fname):
# Add an integer suffix to the existing file name
cnt = 1
bak_fname = "%s.%d" % (fname, cnt)
while os.path.isfile(bak_fname):
cnt += 1
bak_fname = "%s.%d" % (fname, cnt)
os.rename(fname, bak_fname)
logging.info("Moved existing file %s -> %s", fname, bak_fname)
return True
@contextlib.contextmanager
def temp_write_text(text, mode="w+b"):
"""Save text to a temporary file.
NB: This won't work on Windows b/c the file stays open.
"""
with tempfile.NamedTemporaryFile(mode=mode) as tmp:
tmp.write(text)
tmp.flush()
yield tmp.name
# __________________________________________________________________________
# More helpers
def assert_equal(msg, **values):
"""Evaluate and compare two or more values for equality.
Sugar for a common assertion pattern. Saves re-evaluating (and retyping)
the same values for comparison and error reporting.
Example:
>>> assert_equal("Mismatch", expected=1, saw=len(['xx', 'yy']))
...
ValueError: Mismatch: expected = 1, saw = 2
"""
ok = True
key1, val1 = values.popitem()
msg += ": %s = %r" % (key1, val1)
for okey, oval in values.items():
msg += ", %s = %r" % (okey, oval)
if oval != val1:
ok = False
if not ok:
raise ValueError(msg)
def check_unique(items, title):
"""Ensure all items in an iterable are identical; return that one item."""
its = set(items)
assert len(its) == 1, ("Inconsistent %s keys: %s"
% (title, ' '.join(map(str, sorted(its)))))
return its.pop()
def fbase(fname):
"""Strip directory and all extensions from a filename."""
base = os.path.basename(fname)
# Gzip extension usually follows another extension
if base.endswith('.gz'):
base = base[:-3]
# Cases to drop more than just the last dot
known_multipart_exts = (
'.antitargetcoverage.cnn', '.targetcoverage.cnn',
'.antitargetcoverage.csv', '.targetcoverage.csv',
# Pipeline suffixes
'.recal.bam', '.deduplicated.realign.bam',
)
for ext in known_multipart_exts:
if base.endswith(ext):
base = base[:-len(ext)]
break
else:
base = base.rsplit('.', 1)[0]
return base
```
#### File: cnvkit/cnvlib/import_rna.py
```python
from __future__ import absolute_import, division, print_function
import logging
import os
import numpy as np
import pandas as pd
from . import rna
def do_import_rna(gene_count_fnames, in_format, gene_resource_fname,
correlations_fname=None, normal_fnames=(),
do_gc=True, do_txlen=True, max_log2=3):
"""Convert a cohort of per-gene read counts to CNVkit .cnr format.
The expected data source is TCGA gene-level expression counts for individual
samples, but other sources should be fine, too.
"""
# Deduplicate and ensure all normals are included in the analysis
gene_count_fnames = sorted(set(list(gene_count_fnames) + list(normal_fnames)))
if in_format == 'rsem':
sample_counts, tx_lengths = aggregate_rsem(gene_count_fnames)
elif in_format == 'counts':
sample_counts = aggregate_gene_counts(gene_count_fnames)
tx_lengths = None
else:
raise RuntimeError("Unrecognized input format name: %r" % in_format)
sample_counts = rna.filter_probes(sample_counts)
logging.info("Loading gene metadata" +
(" and TCGA gene expression/CNV profiles"
if correlations_fname else ""))
gene_info = rna.load_gene_info(gene_resource_fname, correlations_fname)
logging.info("Aligning gene info to sample gene counts")
normal_ids = [os.path.basename(f).split('.')[0] for f in normal_fnames]
gene_info, sample_counts, sample_data_log2 = rna.align_gene_info_to_samples(
gene_info, sample_counts, tx_lengths, normal_ids)
# Summary table has log2-normalized values, not raw counts
# ENH show both, with column header suffixes to distinguish?
all_data = pd.concat([gene_info, sample_data_log2], axis=1)
# CNVkit files have both absolute and log2-normalized read counts
cnrs = rna.attach_gene_info_to_cnr(sample_counts, sample_data_log2,
gene_info)
cnrs = (rna.correct_cnr(cnr, do_gc, do_txlen, max_log2) for cnr in cnrs)
return all_data, cnrs
def aggregate_gene_counts(filenames):
prev_row_count = None
sample_cols = {}
for fname in filenames:
d = (pd.read_table(fname,
header=None,
comment="_",
names=["gene_id", "expected_count"],
converters={"gene_id": rna.before(".")})
.set_index("gene_id"))
# .drop(["__no_feature", "__ambiguous", "__too_low_aQual",
# "__not_aligned", "__alignment_not_unique"]))
if prev_row_count is None:
prev_row_count = len(d)
elif len(d) != prev_row_count:
raise RuntimeError("Number of rows in each input file is not equal")
sample_id = rna.before(".")(os.path.basename(fname))
sample_cols[sample_id] = d.expected_count.fillna(0)
sample_counts = pd.DataFrame(sample_cols)
return sample_counts
def aggregate_rsem(fnames):
"""Pull out the expected read counts from each RSEM file.
The format of RSEM's ``*_rsem.genes.results`` output files is tab-delimited
with a header row. We extract the Ensembl gene ID, expected read counts, and
transcript lengths from each file.
Returns
-------
sample_counts : DataFrame
Row index is Ensembl gene ID, column index is filename.
tx_lengths : Series
Gene lengths.
"""
prev_row_count = None
sample_cols = {}
length_cols = []
length_colname = 'length' # or: 'effective_length'
for fname in fnames:
# NB: read_table(index_col=_) works independently of combine=, dtype=
# so index column needs to be processed separately
# https://github.com/pandas-dev/pandas/issues/9435
d = pd.read_table(fname,
usecols=['gene_id', length_colname, 'expected_count'],
# index_col='gene_id',
converters={'gene_id': rna.before('.')}
).set_index('gene_id')
if prev_row_count is None:
prev_row_count = len(d)
elif len(d) != prev_row_count:
raise RuntimeError("Number of rows in each input file is not equal")
sample_id = rna.before(".")(os.path.basename(fname))
sample_cols[sample_id] = d.expected_count.fillna(0)
length_cols.append(d[length_colname])
sample_counts = pd.DataFrame(sample_cols)
tx_lengths = pd.Series(np.vstack(length_cols).mean(axis=0),
index=sample_counts.index)
return sample_counts, tx_lengths
```
#### File: cnvkit/cnvlib/parallel.py
```python
from __future__ import absolute_import, division, print_function
from builtins import object
import atexit
import tempfile
import gzip
import os
from contextlib import contextmanager
from concurrent import futures
# from concurrent.futures import wait
class SerialPool(object):
"""Mimic the concurrent.futures.Executor interface, but run in serial."""
def __init__(self):
pass
def submit(self, func, *args):
"""Just call the function on the arguments."""
return SerialFuture(func(*args))
def map(self, func, iterable):
"""Just apply the function to `iterable`."""
return map(func, iterable)
def shutdown(self, wait=True):
"""Do nothing."""
pass
class SerialFuture(object):
"""Mimic the concurrent.futures.Future interface."""
def __init__(self, result):
self._result = result
def result(self):
return self._result
@contextmanager
def pick_pool(nprocs):
if nprocs == 1:
yield SerialPool()
else:
if nprocs < 1:
nprocs = None
with futures.ProcessPoolExecutor(max_workers=nprocs) as pool:
yield pool
def rm(path):
"""Safely remove a file."""
try:
os.unlink(path)
except OSError:
pass
def to_chunks(bed_fname, chunk_size=5000):
"""Split a BED file into `chunk_size`-line parts for parallelization."""
k, chunk = 0, 0
fd, name = tempfile.mkstemp(suffix=".bed", prefix="tmp.%s." % chunk)
outfile = os.fdopen(fd, "w")
atexit.register(rm, name)
opener = (gzip.open if bed_fname.endswith(".gz") else open)
with opener(bed_fname) as infile:
for line in infile:
if line[0] == "#":
continue
k += 1
outfile.write(line)
if k % chunk_size == 0:
outfile.close()
yield name
chunk += 1
fd, name = tempfile.mkstemp(suffix=".bed",
prefix="tmp.%s." % chunk)
outfile = os.fdopen(fd, "w")
outfile.close()
if k % chunk_size:
outfile.close()
yield name
```
#### File: cnvkit/cnvlib/reference.py
```python
from __future__ import absolute_import, division, print_function
from builtins import map, zip
import collections
import logging
import numpy as np
import pyfaidx
from skgenome import tabio, GenomicArray as GA
from . import core, fix, descriptives, params
from .cmdutil import read_cna
from .cnary import CopyNumArray as CNA
def do_reference(target_fnames, antitarget_fnames=None, fa_fname=None,
male_reference=False, female_samples=None,
do_gc=True, do_edge=True, do_rmask=True):
"""Compile a coverage reference from the given files (normal samples)."""
if antitarget_fnames:
core.assert_equal("Unequal number of target and antitarget files given",
targets=len(target_fnames),
antitargets=len(antitarget_fnames))
if not fa_fname:
logging.info("No FASTA reference genome provided; "
"skipping GC, RM calculations")
if female_samples is None:
# NB: Antitargets are usually preferred for inferring sex, but might be
# empty files, in which case no inference can be done. Since targets are
# guaranteed to exist, infer from those first, then replace those
# values where antitargets are suitable.
sexes = infer_sexes(target_fnames, male_reference)
if antitarget_fnames:
a_sexes = infer_sexes(antitarget_fnames, male_reference)
for sid, a_is_xx in a_sexes.items():
t_is_xx = sexes.get(sid)
if t_is_xx is None:
sexes[sid] = a_is_xx
elif t_is_xx != a_is_xx and a_is_xx is not None:
logging.warning("Sample %s chromosomal X/Y ploidy looks "
"like %s in targets but %s in antitargets; "
"preferring antitargets",
sid,
"female" if t_is_xx else "male",
"female" if a_is_xx else "male")
sexes[sid] = a_is_xx
else:
sexes = collections.defaultdict(lambda: female_samples)
# Calculate & save probe centers
ref_probes = combine_probes(target_fnames, fa_fname,
male_reference, sexes, True,
do_gc, do_edge, False)
if antitarget_fnames:
ref_probes.add(combine_probes(antitarget_fnames, fa_fname,
male_reference, sexes, False,
do_gc, False, do_rmask))
ref_probes.center_all(skip_low=True)
ref_probes.sort_columns()
warn_bad_bins(ref_probes)
return ref_probes
def do_reference_flat(targets, antitargets=None, fa_fname=None,
male_reference=False):
"""Compile a neutral-coverage reference from the given intervals.
Combines the intervals, shifts chrX values if requested, and calculates GC
and RepeatMasker content from the genome FASTA sequence.
"""
ref_probes = bed2probes(targets)
if antitargets:
ref_probes.add(bed2probes(antitargets))
# Set sex chromosomes by "reference" sex
ref_probes['log2'] = ref_probes.expect_flat_log2(male_reference)
ref_probes['depth'] = np.exp2(ref_probes['log2']) # Shim
# Calculate GC and RepeatMasker content for each probe's genomic region
if fa_fname:
gc, rmask = get_fasta_stats(ref_probes, fa_fname)
ref_probes['gc'] = gc
ref_probes['rmask'] = rmask
# warn_bad_bins(ref_probes)
else:
logging.info("No FASTA reference genome provided; "
"skipping GC, RM calculations")
ref_probes.sort_columns()
return ref_probes
def bed2probes(bed_fname):
"""Create a neutral-coverage CopyNumArray from a file of regions."""
regions = tabio.read_auto(bed_fname)
table = regions.data.loc[:, ("chromosome", "start", "end")]
table["gene"] = (regions.data["gene"] if "gene" in regions.data else '-')
table["log2"] = 0.0
table["spread"] = 0.0
return CNA(table, {"sample_id": core.fbase(bed_fname)})
def infer_sexes(cnn_fnames, is_male_reference):
"""Map sample IDs to inferred chromosomal sex, where possible.
For samples where the source file is empty or does not include either sex
chromosome, that sample ID will not be in the returned dictionary.
"""
sexes = {}
for fname in cnn_fnames:
cnarr = read_cna(fname)
if cnarr:
is_xx = cnarr.guess_xx(is_male_reference)
if is_xx is not None:
sexes[cnarr.sample_id] = is_xx
return sexes
def combine_probes(filenames, fa_fname, is_male_reference, sexes, skip_low,
fix_gc, fix_edge, fix_rmask):
"""Calculate the median coverage of each bin across multiple samples.
Parameters
----------
filenames : list
List of string filenames, corresponding to targetcoverage.cnn and
antitargetcoverage.cnn files, as generated by 'coverage' or
'import-picard'.
fa_fname : str
Reference genome sequence in FASTA format, used to extract GC and
RepeatMasker content of each genomic bin.
is_male_reference : bool
skip_low : bool
fix_gc : bool
fix_edge : bool
fix_rmask : bool
Returns
-------
CopyNumArray
One object summarizing the coverages of the input samples, including
each bin's "average" coverage, "spread" of coverages, and GC content.
"""
columns = {}
# Load coverage from target/antitarget files
logging.info("Loading %s", filenames[0])
cnarr1 = read_cna(filenames[0])
if not len(cnarr1):
# Just create an empty array with the right columns
col_names = ['chromosome', 'start', 'end', 'gene', 'log2', 'depth']
if 'gc' in cnarr1 or fa_fname:
col_names.append('gc')
if fa_fname:
col_names.append('rmask')
col_names.append('spread')
return CNA.from_rows([], col_names, {'sample_id': "reference"})
# Calculate GC and RepeatMasker content for each probe's genomic region
if fa_fname and (fix_rmask or fix_gc):
gc, rmask = get_fasta_stats(cnarr1, fa_fname)
if fix_gc:
columns['gc'] = gc
if fix_rmask:
columns['rmask'] = rmask
elif 'gc' in cnarr1 and fix_gc:
# Reuse .cnn GC values if they're already stored (via import-picard)
gc = cnarr1['gc']
columns['gc'] = gc
# Make the sex-chromosome coverages of male and female samples compatible
is_chr_x = (cnarr1.chromosome == cnarr1._chr_x_label)
is_chr_y = (cnarr1.chromosome == cnarr1._chr_y_label)
flat_coverage = cnarr1.expect_flat_log2(is_male_reference)
def shift_sex_chroms(cnarr):
"""Shift sample X and Y chromosomes to match the reference sex.
Reference values:
XY: chrX -1, chrY -1
XX: chrX 0, chrY -1
Plan:
chrX:
xx sample, xx ref: 0 (from 0)
xx sample, xy ref: -= 1 (from -1)
xy sample, xx ref: += 1 (from 0) +1
xy sample, xy ref: 0 (from -1) +1
chrY:
xx sample, xx ref: = -1 (from -1)
xx sample, xy ref: = -1 (from -1)
xy sample, xx ref: 0 (from -1) +1
xy sample, xy ref: 0 (from -1) +1
"""
is_xx = sexes.get(cnarr.sample_id)
cnarr['log2'] += flat_coverage
if is_xx:
# chrX has same ploidy as autosomes; chrY is just unusable noise
cnarr[is_chr_y, 'log2'] = -1.0 # np.nan is worse
else:
# 1/2 #copies of each sex chromosome
cnarr[is_chr_x | is_chr_y, 'log2'] += 1.0
edge_bias = fix.get_edge_bias(cnarr1, params.INSERT_SIZE)
def bias_correct_coverage(cnarr):
"""Perform bias corrections on the sample."""
cnarr.center_all(skip_low=skip_low)
shift_sex_chroms(cnarr)
# Skip bias corrections if most bins have no coverage (e.g. user error)
if (cnarr['log2'] > params.NULL_LOG2_COVERAGE - params.MIN_REF_COVERAGE
).sum() <= len(cnarr) // 2:
logging.warning("WARNING: most bins have no or very low coverage; "
"check that the right BED file was used")
else:
if 'gc' in columns and fix_gc:
logging.info("Correcting for GC bias...")
cnarr = fix.center_by_window(cnarr, .1, columns['gc'])
if 'rmask' in columns and fix_rmask:
logging.info("Correcting for RepeatMasker bias...")
cnarr = fix.center_by_window(cnarr, .1, columns['rmask'])
if fix_edge:
logging.info("Correcting for density bias...")
cnarr = fix.center_by_window(cnarr, .1, edge_bias)
return cnarr['log2']
# Pseudocount of 1 "flat" sample
all_depths = [cnarr1['depth'] if 'depth' in cnarr1
else np.exp2(cnarr1['log2'])]
all_coverages = [flat_coverage, bias_correct_coverage(cnarr1)]
for fname in filenames[1:]:
logging.info("Loading target %s", fname)
cnarrx = read_cna(fname)
# Bin information should match across all files
if not np.array_equal(
cnarr1.data.loc[:, ('chromosome', 'start', 'end', 'gene')].values,
cnarrx.data.loc[:, ('chromosome', 'start', 'end', 'gene')].values):
raise RuntimeError("%s bins do not match those in %s"
% (fname, filenames[0]))
all_depths.append(cnarrx['depth'] if 'depth' in cnarrx
else np.exp2(cnarrx['log2']))
all_coverages.append(bias_correct_coverage(cnarrx))
all_coverages = np.vstack(all_coverages)
logging.info("Calculating average bin coverages")
cvg_centers = np.apply_along_axis(descriptives.biweight_location, 0,
all_coverages)
depth_centers = np.apply_along_axis(descriptives.biweight_location, 0,
np.vstack(all_depths))
logging.info("Calculating bin spreads")
spreads = np.array([descriptives.biweight_midvariance(a, initial=i)
for a, i in zip(all_coverages.T, cvg_centers)])
columns.update({
'chromosome': cnarr1.chromosome,
'start': cnarr1.start,
'end': cnarr1.end,
'gene': cnarr1['gene'],
'log2': cvg_centers,
'depth': depth_centers,
'spread': spreads,
})
return CNA.from_columns(columns, {'sample_id': "reference"})
def warn_bad_bins(cnarr, max_name_width=50):
"""Warn about target bins where coverage is poor.
Prints a formatted table to stderr.
"""
bad_bins = cnarr[fix.mask_bad_bins(cnarr)]
fg_index = ~bad_bins['gene'].isin(params.ANTITARGET_ALIASES)
fg_bad_bins = bad_bins[fg_index]
if len(fg_bad_bins) > 0:
bad_pct = (100 * len(fg_bad_bins)
/ sum(~cnarr['gene'].isin(params.ANTITARGET_ALIASES)))
logging.info("Targets: %d (%s) bins failed filters "
"(log2 < %s, log2 > %s, spread > %s)",
len(fg_bad_bins),
"%.4f" % bad_pct + '%',
params.MIN_REF_COVERAGE,
-params.MIN_REF_COVERAGE,
params.MAX_REF_SPREAD)
if len(fg_bad_bins) < 500:
gene_cols = min(max_name_width, max(map(len, fg_bad_bins['gene'])))
labels = fg_bad_bins.labels()
chrom_cols = max(labels.apply(len))
last_gene = None
for label, probe in zip(labels, fg_bad_bins):
if probe.gene == last_gene:
gene = ' "'
else:
gene = probe.gene
last_gene = gene
if len(gene) > max_name_width:
gene = gene[:max_name_width-3] + '...'
if 'rmask' in cnarr:
logging.info(" %s %s log2=%.3f spread=%.3f rmask=%.3f",
gene.ljust(gene_cols), label.ljust(chrom_cols),
probe.log2, probe.spread, probe.rmask)
else:
logging.info(" %s %s log2=%.3f spread=%.3f",
gene.ljust(gene_cols), label.ljust(chrom_cols),
probe.log2, probe.spread)
# Count the number of BG bins dropped, too (names are all "Antitarget")
bg_bad_bins = bad_bins[~fg_index]
if len(bg_bad_bins) > 0:
bad_pct = (100 * len(bg_bad_bins)
/ sum(cnarr['gene'].isin(params.ANTITARGET_ALIASES)))
logging.info("Antitargets: %d (%s) bins failed filters",
len(bg_bad_bins), "%.4f" % bad_pct + '%')
def get_fasta_stats(cnarr, fa_fname):
"""Calculate GC and RepeatMasker content of each bin in the FASTA genome."""
logging.info("Calculating GC and RepeatMasker content in %s ...", fa_fname)
gc_rm_vals = [calculate_gc_lo(subseq)
for subseq in fasta_extract_regions(fa_fname, cnarr)]
gc_vals, rm_vals = zip(*gc_rm_vals)
return np.asfarray(gc_vals), np.asfarray(rm_vals)
def calculate_gc_lo(subseq):
"""Calculate the GC and lowercase (RepeatMasked) content of a string."""
cnt_at_lo = subseq.count('a') + subseq.count('t')
cnt_at_up = subseq.count('A') + subseq.count('T')
cnt_gc_lo = subseq.count('g') + subseq.count('c')
cnt_gc_up = subseq.count('G') + subseq.count('C')
tot = float(cnt_gc_up + cnt_gc_lo + cnt_at_up + cnt_at_lo)
if not tot:
return 0.0, 0.0
frac_gc = (cnt_gc_lo + cnt_gc_up) / tot
frac_lo = (cnt_at_lo + cnt_gc_lo) / tot
return frac_gc, frac_lo
def fasta_extract_regions(fa_fname, intervals):
"""Extract an iterable of regions from an indexed FASTA file.
Input: FASTA file name; iterable of (seq_id, start, end) (1-based)
Output: iterable of string sequences.
"""
with pyfaidx.Fasta(fa_fname, as_raw=True) as fa_file:
for chrom, subarr in intervals.by_chromosome():
logging.info("Extracting sequences from chromosome %s", chrom)
for _chrom, start, end in subarr.coords():
yield fa_file[_chrom][int(start):int(end)]
def reference2regions(refarr):
"""Split reference into target and antitarget regions."""
is_bg = (refarr['gene'].isin(params.ANTITARGET_ALIASES))
regions = GA(refarr.data.loc[:, ('chromosome', 'start', 'end', 'gene')],
{'sample_id': 'reference'})
targets = regions[~is_bg]
antitargets = regions[is_bg]
return targets, antitargets
```
#### File: cnvkit/scripts/cnv_annotate.py
```python
from __future__ import absolute_import, division, print_function
import argparse
import logging
import sys
from skgenome import tabio
from cnvlib.cmdutil import read_cna
logging.basicConfig(level=logging.INFO, format="%(message)s")
def main(args):
annot = tabio.read_auto(args.annotate)
cnarr = read_cna(args.cnv_file)
cnarr['gene'] = annot.into_ranges(cnarr, 'gene', '-')
tabio.write(cnarr, args.output or sys.stdout)
# ENH:
# --short-names
# --no-antitarget
# annotation: --merge, --flatten, --exons, ...
# cut antitargets & insert untargeted gene names
# some math for how to update probes, weight
if __name__ == '__main__':
AP = argparse.ArgumentParser(description=__doc__)
AP.add_argument('annotate', help="Genome annotations.")
AP.add_argument('cnv_file', help="CNVkit .cnn or .cnr file.")
AP.add_argument('-o', '--output', help="Output filename.")
main(AP.parse_args())
```
#### File: cnvkit/skgenome/combiners.py
```python
from __future__ import print_function, absolute_import, division
import pandas as pd
def get_combiners(table, stranded=False, combine=None):
"""Get a `combine` lookup suitable for `table`.
Parameters
----------
table : DataFrame
stranded : bool
combine : dict or None
Column names to their value-combining functions, replacing or in
addition to the defaults.
Returns
-------
dict:
Column names to their value-combining functions.
"""
cmb = {
'chromosome': first_of,
'start': first_of,
'end': max,
'gene': join_strings,
'accession': join_strings,
'weight': sum,
'probes': sum,
}
if combine:
cmb.update(combine)
if 'strand' not in cmb:
cmb['strand'] = first_of if stranded else merge_strands
return {k: v for k, v in cmb.items() if k in table.columns}
def first_of(elems):
"""Return the first element of the input."""
return elems[0]
def last_of(elems):
"""Return the last element of the input."""
return elems[-1]
max_of = max
def join_strings(elems, sep=','):
"""Join a Series of strings by commas."""
# ENH if elements are also comma-separated, split+uniq those too
return sep.join(pd.unique(elems))
def merge_strands(elems):
strands = set(elems)
if len(strands) > 1:
return '.'
return elems[0]
def make_const(val):
def const(_elems):
return val
return const
```
#### File: cnvkit/skgenome/gary.py
```python
from __future__ import print_function, absolute_import, division
from builtins import next, object, zip
from past.builtins import basestring
import logging
import warnings
from collections import OrderedDict
import numpy as np
import pandas as pd
from .chromsort import sorter_chrom
from .intersect import by_ranges, into_ranges, iter_ranges, iter_slices
from .merge import flatten, merge
from .rangelabel import to_label
from .subtract import subtract
from .subdivide import subdivide
class GenomicArray(object):
"""An array of genomic intervals. Base class for genomic data structures.
Can represent most BED-like tabular formats with arbitrary additional
columns.
"""
_required_columns = ("chromosome", "start", "end")
_required_dtypes = (str, int, int)
def __init__(self, data_table, meta_dict=None):
# Validation
if (data_table is None or
(isinstance(data_table, (list, tuple)) and not len(data_table)) or
(isinstance(data_table, pd.DataFrame) and not len(data_table.columns))
):
data_table = self._make_blank()
else:
if not isinstance(data_table, pd.DataFrame):
# Rarely if ever needed -- prefer from_rows, from_columns, etc.
data_table = pd.DataFrame(data_table)
if not all(c in data_table.columns for c in self._required_columns):
raise ValueError("data table must have at least columns %r; "
"got %r" % (self._required_columns,
tuple(data_table.columns)))
# Ensure columns are the right type
# (in case they've been automatically converted to the wrong type,
# e.g. chromosome names as integers; genome coordinates as floats)
if len(data_table):
def ok_dtype(col, dt):
return isinstance(data_table[col].iat[0], dt)
else:
def ok_dtype(col, dt):
return data_table[col].dtype == np.dtype(dt)
for col, dtype in zip(self._required_columns, self._required_dtypes):
if not ok_dtype(col, dtype):
data_table[col] = data_table[col].astype(dtype)
self.data = data_table
self.meta = (dict(meta_dict)
if meta_dict is not None and len(meta_dict)
else {})
@classmethod
def _make_blank(cls):
"""Create an empty dataframe with the columns required by this class."""
spec = list(zip(cls._required_columns, cls._required_dtypes))
try:
arr = np.zeros(0, dtype=spec)
return pd.DataFrame(arr)
except TypeError as exc:
raise TypeError("{}: {}".format(exc, spec))
@classmethod
def from_columns(cls, columns, meta_dict=None):
"""Create a new instance from column arrays, given as a dict."""
table = pd.DataFrame.from_dict(columns)
ary = cls(table, meta_dict)
ary.sort_columns()
return ary
@classmethod
def from_rows(cls, rows, columns=None, meta_dict=None):
"""Create a new instance from a list of rows, as tuples or arrays."""
if columns is None:
columns = cls._required_columns
table = pd.DataFrame.from_records(rows, columns=columns)
return cls(table, meta_dict)
def as_columns(self, **columns):
"""Wrap the named columns in this instance's metadata."""
return self.__class__.from_columns(columns, self.meta)
# return self.__class__(self.data.loc[:, columns], self.meta.copy())
def as_dataframe(self, dframe):
"""Wrap the given pandas dataframe in this instance's metadata."""
return self.__class__(dframe.reset_index(drop=True), self.meta.copy())
# def as_index(self, index):
# """Subset with fancy/boolean indexing; reuse this instance's metadata."""
# """Extract rows by indices, reusing this instance's metadata."""
# if isinstance(index, (int, slice)):
# return self.__class__(self.data.iloc[index], self.meta.copy())
# else:
# return self.__class__(self.data[index], self.meta.copy())
def as_rows(self, rows):
"""Wrap the given rows in this instance's metadata."""
try:
out = self.from_rows(rows,
columns=self.data.columns,
meta_dict=self.meta)
except AssertionError:
columns = self.data.columns.tolist()
firstrow = next(iter(rows))
raise RuntimeError("Passed %d columns %r, but "
"%d elements in first row: %s",
len(columns), columns, len(firstrow), firstrow)
return out
# Container behaviour
def __bool__(self):
return bool(len(self.data))
__nonzero__ = __bool__ # Py2.7
def __eq__(self, other):
return (isinstance(other, self.__class__) and
self.data.equals(other.data))
def __len__(self):
return len(self.data)
def __contains__(self, key):
return key in self.data.columns
def __getitem__(self, index):
"""Access a portion of the data.
Cases:
- single integer: a row, as pd.Series
- string row name: a column, as pd.Series
- a boolean array: masked rows, as_dataframe
- tuple of integers: selected rows, as_dataframe
"""
if isinstance(index, int):
# A single row
return self.data.iloc[index]
# return self.as_dataframe(self.data.iloc[index:index+1])
elif isinstance(index, basestring):
# A column, by name
return self.data[index]
elif (isinstance(index, tuple) and
len(index) == 2 and
index[1] in self.data.columns):
# Row index, column index -> cell value
return self.data.loc[index]
elif isinstance(index, slice):
# return self.as_dataframe(self.data.take(index))
return self.as_dataframe(self.data[index])
else:
# Iterable -- selected row indices or boolean array, probably
try:
if isinstance(index, type(None)) or len(index) == 0:
empty = pd.DataFrame(columns=self.data.columns)
return self.as_dataframe(empty)
except TypeError:
raise TypeError("object of type %r " % type(index) +
"cannot be used as an index into a " +
self.__class__.__name__)
return self.as_dataframe(self.data[index])
# return self.as_dataframe(self.data.take(index))
def __setitem__(self, index, value):
"""Assign to a portion of the data."""
if isinstance(index, int):
self.data.iloc[index] = value
elif isinstance(index, basestring):
self.data[index] = value
elif (isinstance(index, tuple) and
len(index) == 2 and
index[1] in self.data.columns):
self.data.loc[index] = value
else:
assert isinstance(index, slice) or len(index) > 0
self.data[index] = value
def __delitem__(self, index):
return NotImplemented
def __iter__(self):
return self.data.itertuples(index=False)
__next__ = next
@property
def chromosome(self):
return self.data['chromosome']
@property
def start(self):
return self.data['start']
@property
def end(self):
return self.data['end']
@property
def sample_id(self):
return self.meta.get('sample_id')
# Traversal
def autosomes(self, also=()):
"""Select chromosomes w/ integer names, ignoring any 'chr' prefixes."""
with warnings.catch_warnings():
# NB: We're not using the deprecated part of this pandas method
# (as_indexer introduced before 0.18.1, deprecated 0.20.1)
warnings.simplefilter("ignore", UserWarning)
kwargs = dict(na=False)
if pd.__version__ < "0.20.1":
kwargs["as_indexer"] = True
is_auto = self.chromosome.str.match(r"(chr)?\d+$", **kwargs)
if not is_auto.any():
# The autosomes, if any, are not named with plain integers
return self
if also:
if isinstance(also, basestring):
also = [also]
for a_chrom in also:
is_auto |= (self.chromosome == a_chrom)
return self[is_auto]
def by_arm(self, min_gap_size=1e5, min_arm_bins=50):
"""Iterate over bins grouped by chromosome arm (inferred)."""
# ENH:
# - Accept GArray of actual centromere regions as input
# -> find largest gap (any size) within cmere region, split there
# - Cache centromere locations once found
self.data.chromosome = self.data.chromosome.astype(str)
for chrom, subtable in self.data.groupby("chromosome", sort=False):
margin = max(min_arm_bins, int(round(.1 * len(subtable))))
if len(subtable) > 2 * margin + 1:
# Found a candidate centromere
gaps = (subtable.start.values[margin+1:-margin] -
subtable.end.values[margin:-margin-1])
cmere_idx = gaps.argmax() + margin + 1
cmere_size = gaps[cmere_idx - margin - 1]
else:
cmere_idx = 0
cmere_size = 0
if cmere_idx and cmere_size >= min_gap_size:
logging.debug("%s centromere at %d of %d bins (size %s)",
chrom, cmere_idx, len(subtable), cmere_size)
p_arm = subtable.index[:cmere_idx]
yield chrom, self.as_dataframe(subtable.loc[p_arm,:])
q_arm = subtable.index[cmere_idx:]
yield chrom, self.as_dataframe(subtable.loc[q_arm,:])
else:
# No centromere found -- emit the whole chromosome
if cmere_idx:
logging.debug("%s: Ignoring centromere at %d of %d bins (size %s)",
chrom, cmere_idx, len(subtable), cmere_size)
else:
logging.debug("%s: Skipping centromere search, too small",
chrom)
yield chrom, self.as_dataframe(subtable)
def by_chromosome(self):
"""Iterate over bins grouped by chromosome name."""
# Workaround for pandas 0.18.0 bug:
# https://github.com/pydata/pandas/issues/13179
self.data.chromosome = self.data.chromosome.astype(str)
for chrom, subtable in self.data.groupby("chromosome", sort=False):
yield chrom, self.as_dataframe(subtable)
def by_ranges(self, other, mode='outer', keep_empty=True):
"""Group rows by another GenomicArray's bin coordinate ranges.
For example, this can be used to group SNVs by CNV segments.
Bins in this array that fall outside the other array's bins are skipped.
Parameters
----------
other : GenomicArray
Another GA instance.
mode : string
Determines what to do with bins that overlap a boundary of the
selection. Possible values are:
- ``inner``: Drop the bins on the selection boundary, don't emit them.
- ``outer``: Keep/emit those bins as they are.
- ``trim``: Emit those bins but alter their boundaries to match the
selection; the bin start or end position is replaced with the
selection boundary position.
keep_empty : bool
Whether to also yield `other` bins with no overlapping bins in
`self`, or to skip them when iterating.
Yields
------
tuple
(other bin, GenomicArray of overlapping rows in self)
"""
for bin_row, subrange in by_ranges(self.data, other.data,
mode, keep_empty):
if len(subrange):
yield bin_row, self.as_dataframe(subrange)
elif keep_empty:
yield bin_row, self.as_rows(subrange)
def coords(self, also=()):
"""Iterate over plain coordinates of each bin: chromosome, start, end.
Parameters
----------
also : str, or iterable of strings
Also include these columns from `self`, in addition to chromosome,
start, and end.
Example, yielding rows in BED format:
>>> probes.coords(also=["gene", "strand"])
"""
cols = list(GenomicArray._required_columns)
if also:
if isinstance(also, basestring):
cols.append(also)
else:
cols.extend(also)
coordframe = self.data.loc[:, cols]
return coordframe.itertuples(index=False)
def labels(self):
return self.data.apply(to_label, axis=1)
def in_range(self, chrom=None, start=None, end=None, mode='outer'):
"""Get the GenomicArray portion within the given genomic range.
Parameters
----------
chrom : str or None
Chromosome name to select. Use None if `self` has only one
chromosome.
start : int or None
Start coordinate of range to select, in 0-based coordinates.
If None, start from 0.
end : int or None
End coordinate of range to select. If None, select to the end of the
chromosome.
mode : str
As in `by_ranges`: ``outer`` includes bins straddling the range
boundaries, ``trim`` additionally alters the straddling bins'
endpoints to match the range boundaries, and ``inner`` excludes
those bins.
Returns
-------
GenomicArray
The subset of `self` enclosed by the specified range.
"""
if isinstance(start, (int, np.int64, float, np.float64)):
start = [int(start)]
if isinstance(end, (int, np.int64, float, np.float64)):
end = [int(end)]
results = iter_ranges(self.data, chrom, start, end, mode)
return self.as_dataframe(next(results))
def in_ranges(self, chrom=None, starts=None, ends=None, mode='outer'):
"""Get the GenomicArray portion within the specified ranges.
Similar to `in_ranges`, but concatenating the selections of all the
regions specified by the `starts` and `ends` arrays.
Parameters
----------
chrom : str or None
Chromosome name to select. Use None if `self` has only one
chromosome.
starts : int array, or None
Start coordinates of ranges to select, in 0-based coordinates.
If None, start from 0.
ends : int array, or None
End coordinates of ranges to select. If None, select to the end of the
chromosome. If `starts` and `ends` are both specified, they must be
arrays of equal length.
mode : str
As in `by_ranges`: ``outer`` includes bins straddling the range
boundaries, ``trim`` additionally alters the straddling bins'
endpoints to match the range boundaries, and ``inner`` excludes
those bins.
Returns
-------
GenomicArray
Concatenation of all the subsets of `self` enclosed by the specified
ranges.
"""
table = pd.concat(iter_ranges(self.data, chrom, starts, ends, mode))
return self.as_dataframe(table)
def into_ranges(self, other, column, default, summary_func=None):
"""Re-bin values from `column` into the corresponding ranges in `other`.
Match overlapping/intersecting rows from `other` to each row in `self`.
Then, within each range in `other`, extract the value(s) from `column`
in `self`, using the function `summary_func` to produce a single value
if multiple bins in `self` map to a single range in `other`.
For example, group SNVs (self) by CNV segments (other) and calculate the
median (summary_func) of each SNV group's allele frequencies.
Parameters
----------
other : GenomicArray
Ranges into which the overlapping values of `self` will be
summarized.
column : string
Column name in `self` to extract values from.
default
Value to assign to indices in `other` that do not overlap any bins in
`self`. Type should be the same as or compatible with the output
field specified by `column`, or the output of `summary_func`.
summary_func : callable, dict of string-to-callable, or None
Specify how to reduce 1 or more `other` rows into a single value for
the corresponding row in `self`.
- If callable, apply to the `column` field each group of rows in
`other` column.
- If a single-element dict of column name to callable, apply to that
field in `other` instead of `column`.
- If None, use an appropriate summarizing function for the datatype
of the `column` column in `other` (e.g. median of numbers,
concatenation of strings).
- If some other value, assign that value to `self` wherever there is
an overlap.
Returns
-------
pd.Series
The extracted and summarized values from `self` corresponding to
other's genomic ranges, the same length as `other`.
"""
if column not in self:
logging.warning("No '%s' column available for summary calculation",
column)
return pd.Series(np.repeat(default, len(other)))
return into_ranges(self.data, other.data, column, default, summary_func)
def iter_ranges_of(self, other, column, mode='outer', keep_empty=True):
"""Group rows by another GenomicArray's bin coordinate ranges.
For example, this can be used to group SNVs by CNV segments.
Bins in this array that fall outside the other array's bins are skipped.
Parameters
----------
other : GenomicArray
Another GA instance.
column : string
Column name in `self` to extract values from.
mode : string
Determines what to do with bins that overlap a boundary of the
selection. Possible values are:
- ``inner``: Drop the bins on the selection boundary, don't emit them.
- ``outer``: Keep/emit those bins as they are.
- ``trim``: Emit those bins but alter their boundaries to match the
selection; the bin start or end position is replaced with the
selection boundary position.
keep_empty : bool
Whether to also yield `other` bins with no overlapping bins in
`self`, or to skip them when iterating.
Yields
------
tuple
(other bin, GenomicArray of overlapping rows in self)
"""
if column not in self.data.columns:
raise ValueError("No column named %r in this object" % column)
ser = self.data[column]
for slc in iter_slices(self.data, other.data, mode, keep_empty):
yield ser[slc]
# Modification
def add(self, other):
"""Combine this array's data with another GenomicArray (in-place).
Any optional columns must match between both arrays.
"""
if not isinstance(other, self.__class__):
raise ValueError("Argument (type %s) is not a %s instance"
% (type(other), self.__class__))
if len(other.data):
self.data = self.data.append(other.data, ignore_index=True)
self.sort()
def concat(self, others):
"""Concatenate several GenomicArrays, keeping this array's metadata.
This array's data table is not implicitly included in the result.
"""
table = pd.concat([otr.data for otr in others], ignore_index=True)
result = self.as_dataframe(table)
result.sort()
return result
def copy(self):
"""Create an independent copy of this object."""
return self.as_dataframe(self.data.copy())
def add_columns(self, **columns):
"""Add the given columns to a copy of this GenomicArray.
Parameters
----------
**columns : array
Keyword arguments where the key is the new column's name and the
value is an array of the same length as `self` which will be the new
column's values.
Returns
-------
GenomicArray or subclass
A new instance of `self` with the given columns included in the
underlying dataframe.
"""
# return self.as_dataframe(self.data.assign(**columns))
result = self.copy()
for key, values in columns.items():
result[key] = values
return result
def keep_columns(self, colnames):
"""Extract a subset of columns, reusing this instance's metadata."""
colnames = self.data.columns.intersection(colnames)
return self.__class__(self.data.loc[:, colnames], self.meta.copy())
def drop_extra_columns(self):
"""Remove any optional columns from this GenomicArray.
Returns
-------
GenomicArray or subclass
A new copy with only the minimal set of columns required by the
class (e.g. chromosome, start, end for GenomicArray; may be more for
subclasses).
"""
table = self.data.loc[:, self._required_columns]
return self.as_dataframe(table)
def filter(self, func=None, **kwargs):
"""Take a subset of rows where the given condition is true.
Parameters
----------
func : callable
A boolean function which will be applied to each row to keep rows
where the result is True.
**kwargs : string
Keyword arguments like ``chromosome="chr7"`` or
``gene="Antitarget"``, which will keep rows where the keyed field
equals the specified value.
Return
------
GenomicArray
Subset of `self` where the specified condition is True.
"""
table = self.data
if func is not None:
table = table[table.apply(func, axis=1)]
for key, val in list(kwargs.items()):
assert key in self
table = table[table[key] == val]
return self.as_dataframe(table)
def shuffle(self):
"""Randomize the order of bins in this array (in-place)."""
order = np.arange(len(self.data))
np.random.seed(0xA5EED)
np.random.shuffle(order)
self.data = self.data.iloc[order]
return order
def sort(self):
"""Sort this array's bins in-place, with smart chromosome ordering."""
sort_key = self.data.chromosome.apply(sorter_chrom)
self.data = (self.data.assign(_sort_key_=sort_key)
.sort_values(by=['_sort_key_', 'start', 'end'],
kind='mergesort')
.drop('_sort_key_', axis=1)
.reset_index(drop=True))
def sort_columns(self):
"""Sort this array's columns in-place, per class definition."""
extra_cols = []
for col in self.data.columns:
if col not in self._required_columns:
extra_cols.append(col)
sorted_colnames = list(self._required_columns) + sorted(extra_cols)
assert len(sorted_colnames) == len(self.data.columns)
self.data = self.data.reindex(columns=sorted_colnames)
# Genome arithmetic
def cut(self, other, combine=None):
"""Split this array's regions at the boundaries in `other`."""
# TODO
return NotImplemented
def flatten(self, combine=None, split_columns=None):
"""Split this array's regions where they overlap."""
return self.as_dataframe(flatten(self.data, combine=combine,
split_columns=split_columns))
def intersection(self, other, mode='outer'):
"""Select the bins in `self` that overlap the regions in `other`.
The extra fields of `self`, but not `other`, are retained in the output.
"""
# TODO options for which extra fields to keep
# by default, keep just the fields in 'table'
if mode == 'trim':
# Slower
chunks = [chunk.data for _, chunk in
self.by_ranges(other, mode=mode, keep_empty=False)]
return self.as_dataframe(pd.concat(chunks))
else:
slices = iter_slices(self.data, other.data, mode, False)
indices = np.concatenate(list(slices))
return self.as_dataframe(self.data.loc[indices])
def merge(self, bp=0, stranded=False, combine=None):
"""Merge adjacent or overlapping regions into single rows.
Similar to 'bedtools merge'.
"""
return self.as_dataframe(merge(self.data, bp, stranded, combine))
def resize_ranges(self, bp, chrom_sizes=None):
"""Resize each genomic bin by a fixed number of bases at each end.
Bin 'start' values have a minimum of 0, and `chrom_sizes` can
specify each chromosome's maximum 'end' value.
Similar to 'bedtools slop'.
Parameters
----------
bp : int
Number of bases in each direction to expand or shrink each bin.
Applies to 'start' and 'end' values symmetrically, and may be
positive (expand) or negative (shrink).
chrom_sizes : dict of string-to-int
Chromosome name to length in base pairs. If given, all chromosomes
in `self` must be included.
"""
table = self.data
limits = dict(lower=0)
if chrom_sizes:
limits['upper'] = self.chromosome.replace(chrom_sizes)
table = table.assign(start=(table['start'] - bp).clip(**limits),
end=(table['end'] + bp).clip(**limits))
if bp < 0:
# Drop any bins that now have zero or negative size
ok_size = table['end'] - table['start'] > 0
logging.debug("Dropping %d bins with size <= 0", (~ok_size).sum())
table = table[ok_size]
# Don't modify the original
return self.as_dataframe(table.copy())
def squash(self, combine=None):
"""Combine some groups of rows, by some criteria, into single rows."""
# TODO
return NotImplemented
def subdivide(self, avg_size, min_size=0, verbose=False):
"""Split this array's regions into roughly equal-sized sub-regions."""
return self.as_dataframe(subdivide(self.data, avg_size, min_size,
verbose))
def subtract(self, other):
"""Remove the overlapping regions in `other` from this array."""
return self.as_dataframe(subtract(self.data, other.data))
def total_range_size(self):
"""Total number of bases covered by all (merged) regions."""
if not len(self):
return 0
regions = merge(self.data, bp=1)
return regions.end.sum() - regions.start.sum()
def _get_gene_map(self):
"""Map unique gene names to their indices in this array.
Returns
-------
OrderedDict
An (ordered) dictionary of unique gene names and the data indices of
their segments in the order of occurrence (genomic order).
"""
if 'gene' not in self.data:
return OrderedDict()
genes = OrderedDict()
for idx, genestr in self.data['gene'].iteritems():
if pd.isnull(genestr):
continue
for gene in genestr.split(','):
if gene not in genes:
genes[gene] = []
genes[gene].append(idx)
return genes
```
#### File: cnvkit/skgenome/intersect.py
```python
from __future__ import print_function, absolute_import, division
from builtins import range, zip
from past.builtins import basestring
import numpy as np
import pandas as pd
from pandas.core.index import Int64Index
from .combiners import first_of, join_strings, make_const
def by_ranges(table, other, mode, keep_empty):
"""Group rows by another GenomicArray's bin coordinate ranges."""
for _chrom, bin_rows, src_rows in by_shared_chroms(other, table,
keep_empty):
if src_rows is not None:
subranges = iter_ranges(src_rows, None, bin_rows['start'],
bin_rows['end'], mode)
for bin_row, subrange in zip(bin_rows.itertuples(index=False),
subranges):
yield bin_row, subrange
elif keep_empty:
for bin_row in bin_rows.itertuples(index=False):
yield bin_row, [] # ENH: empty dframe matching table
def by_shared_chroms(table, other, keep_empty=True):
if table['chromosome'].is_unique and other['chromosome'].is_unique:
yield table['chromosome'].iat[0], table, other
# yield None, table, other
else:
other_chroms = {c: o for c, o in other.groupby(['chromosome'], sort=False)}
for chrom, ctable in table.groupby(['chromosome'], sort=False):
if chrom in other_chroms:
otable = other_chroms[chrom]
yield chrom, ctable, otable
elif keep_empty:
yield chrom, ctable, None
def into_ranges(source, dest, src_col, default, summary_func):
"""Group a column in `source` by regions in `dest` and summarize."""
if not len(source) or not len(dest):
return dest
if summary_func is None:
# Choose a type-appropriate summary function
elem = source[src_col].iat[0]
if isinstance(elem, (basestring, np.string_)):
summary_func = join_strings
elif isinstance(elem, (float, np.float_)):
summary_func = np.nanmedian
else:
summary_func = first_of
elif not callable(summary_func):
# Just fill in the given value, I suppose.
summary_func = make_const(summary_func)
def series2value(ser):
if len(ser) == 0:
return default
if len(ser) == 1:
return ser.iat[0]
return summary_func(ser)
column = source[src_col]
result = [series2value(column[slc])
for slc in iter_slices(source, dest, 'outer', True)]
return pd.Series(result)
def iter_ranges(table, chrom, starts, ends, mode):
"""Iterate through sub-ranges."""
assert mode in ('inner', 'outer', 'trim')
if chrom:
assert isinstance(chrom, basestring) # ENH: accept array?
try:
table = table[table['chromosome'] == chrom]
except KeyError:
raise KeyError("Chromosome %s is not in this probe set" % chrom)
for region_idx, start_val, end_val in idx_ranges(table, None, starts, ends,
'inner' if mode == 'inner' else 'outer'):
subtable = table.iloc[region_idx]
if mode == 'trim':
subtable = subtable.copy()
# Update 5' endpoints to the boundary
if start_val:
subtable.start = subtable.start.clip_lower(start_val)
# Update 3' endpoints to the boundary
if end_val:
subtable.end = subtable.end.clip_upper(end_val)
yield subtable
def iter_slices(table, other, mode, keep_empty):
"""Yields indices to extract ranges from `table`.
Returns an iterable of integer arrays that can apply to Series objects,
i.e. columns of `table`. These indices are of the DataFrame/Series' Index,
not array coordinates -- so be sure to use DataFrame.loc, Series.loc, or
Series getitem, as opposed to .iloc or indexing directly into Numpy arrays.
"""
for _c, bin_rows, src_rows in by_shared_chroms(other, table, keep_empty):
if src_rows is None:
# Emit empty indices since 'table' is missing this chromosome
for _ in range(len(bin_rows)):
yield Int64Index([])
else:
for slc, _s, _e in idx_ranges(src_rows, None, bin_rows.start,
bin_rows.end, mode):
indices = src_rows.index[slc].values
if keep_empty or len(indices):
yield indices
def idx_ranges(table, chrom, starts, ends, mode):
"""Iterate through sub-ranges."""
assert mode in ('inner', 'outer')
# Optional if we've already subsetted by chromosome (not checked!)
if chrom:
assert isinstance(chrom, basestring) # ENH: accept array?
try:
table = table[table['chromosome'] == chrom]
except KeyError:
raise KeyError("Chromosome %s is not in this probe set" % chrom)
# Edge cases
if not len(table) or (starts is None and ends is None):
yield table.index, None, None
else:
# Don't be fooled by nested bins
if ((ends is not None and len(ends)) and
(starts is not None and len(starts))
) and not _monotonic(table.end):
# At least one bin is fully nested -- account for it
irange_func = _irange_nested
else:
irange_func = _irange_simple
for region_idx, start_val, end_val in irange_func(table, starts, ends, mode):
yield region_idx, start_val, end_val
def _irange_simple(table, starts, ends, mode):
"""Slice subsets of table when regions are not nested."""
if starts is not None and len(starts):
if mode == 'inner':
# Only rows entirely after the start point
start_idxs = table.start.searchsorted(starts)
else:
# Include all rows overlapping the start point
start_idxs = table.end.searchsorted(starts, 'right')
else:
starts = np.zeros(len(ends) if ends is not None else 1,
dtype=np.int_)
start_idxs = starts.copy()
if ends is not None and len(ends):
if mode == 'inner':
end_idxs = table.end.searchsorted(ends, 'right')
else:
end_idxs = table.start.searchsorted(ends)
else:
end_idxs = np.repeat(len(table), len(starts))
ends = [None] * len(starts)
for start_idx, start_val, end_idx, end_val in zip(start_idxs, starts,
end_idxs, ends):
yield (slice(start_idx, end_idx), start_val, end_val)
def _irange_nested(table, starts, ends, mode):
"""Slice subsets of table when regions are nested."""
# ENH: Binary Interval Search (BITS) or Layer&Quinlan(2015)
assert len(starts) == len(ends) > 0
for start_val, end_val in zip(starts, ends):
# Mask of table rows to keep for this query region
region_mask = np.ones(len(table), dtype=np.bool_)
if start_val:
if mode == 'inner':
# Only rows entirely after the start point
start_idx = table.start.searchsorted(start_val)
region_mask[:int(start_idx)] = 0
else:
# Include all rows overlapping the start point
region_mask = (table.end.values > start_val)
if end_val is not None:
if mode == 'inner':
# Only rows up to the end point
region_mask &= (table.end.values <= end_val)
else:
# Include all rows overlapping the end point
end_idx = table.start.searchsorted(end_val)
region_mask[int(end_idx):] = 0
yield region_mask, start_val, end_val
def venn(table, other, mode):
# TODO -- implement 'venn' via fjoin algorithm
# 'cut' table at all 'other' boundaries
# -> extra column '_venn_':int (0, 1, 2)
# 0=self only, 1=both, 2=other only
# -> 'cut' just drops the '_venn_' column
# -> 'subtract' drops 1 and 2?
# (is that faster? probably not)
# -> 'jaccard' does math with it...
return table
# Shim for pandas 0.18.1 (chapmanb/bcbio-nextgen#1836)
if hasattr(pd.Series, 'is_monotonic_increasing'):
def _monotonic(ser):
return ser.is_monotonic_increasing
else:
def _monotonic(ser):
return (np.diff(ser) >= 0).all()
```
#### File: cnvkit/skgenome/subdivide.py
```python
from __future__ import print_function, absolute_import, division
from builtins import range
import logging
import pandas as pd
from .merge import merge
def subdivide(table, avg_size, min_size=0, verbose=False):
return pd.DataFrame.from_records(
_split_targets(table, avg_size, min_size, verbose),
columns=table.columns)
def _split_targets(regions, avg_size, min_size, verbose):
"""Split large regions into smaller, consecutive regions.
Output bin metadata and additional columns match the input dataframe.
Parameters
----------
avg_size : int
Split regions into equal-sized subregions of about this size.
Specifically, subregions are no larger than 150% of this size, no
smaller than 75% this size, and the average will approach this size when
subdividing a large region.
min_size : int
Drop any regions smaller than this size.
verbose : bool
Print a log message when subdividing a region.
"""
for row in merge(regions).itertuples(index=False):
span = row.end - row.start
if span >= min_size:
nbins = int(round(span / avg_size)) or 1
if nbins == 1:
yield row
else:
# Divide the region into equal-sized bins
bin_size = span / nbins
bin_start = row.start
if verbose:
label = (row.gene if 'gene' in regions else
"%s:%d-%d" % (row.chromosome, row.start, row.end))
logging.info("Splitting: {:30} {:7} / {} = {:.2f}"
.format(label, span, nbins, bin_size))
for i in range(1, nbins):
bin_end = row.start + int(i * bin_size)
yield row._replace(start=bin_start, end=bin_end)
bin_start = bin_end
yield row._replace(start=bin_start)
```
#### File: cnvkit/skgenome/subtract.py
```python
from __future__ import print_function, absolute_import, division
import logging
import numpy as np
import pandas as pd
from .intersect import by_ranges
def subtract(table, other):
if not len(other):
return table
return pd.DataFrame.from_records(_subtraction(table, other),
columns=table.columns)
def _subtraction(table, other):
for keeper, rows_to_exclude in by_ranges(other, table, 'outer', True):
if len(rows_to_exclude):
logging.debug(" %s:%d-%d : Subtracting %d excluded regions",
keeper.chromosome, keeper.start, keeper.end,
len(rows_to_exclude))
keep_left = (keeper.start < rows_to_exclude.start.iat[0])
keep_right = (keeper.end > rows_to_exclude.end.iat[-1])
if keep_left and keep_right:
# Keep both original edges of the source region
# =========
# -- --
starts = np.r_[keeper.start, rows_to_exclude.end.values]
ends = np.r_[rows_to_exclude.start.values, keeper.end]
elif keep_left:
# Exclusion overlaps only the right side
# =======
# -- ---
starts = np.r_[keeper.start, rows_to_exclude.end.values[:-1]]
ends = rows_to_exclude.start.values
elif keep_right:
# Exclusion overlaps only the left side
# ========
# --- --
starts = rows_to_exclude.end.values
ends = np.r_[rows_to_exclude.start.values[1:], keeper.end]
elif len(rows_to_exclude) > 1:
# Exclusions overlap both edges
# ======
# -- -- ---
starts = rows_to_exclude.end.values[:-1]
ends = rows_to_exclude.start.values[1:]
else:
# Exclusion covers the whole region
continue
for start, end in zip(starts, ends):
if end > start:
yield keeper._replace(start=start, end=end)
else:
logging.debug("Discarding pair: (%d, %d)", start, end)
else:
logging.debug(" %s:%d-%d : No excluded regions",
keeper.chromosome, keeper.start, keeper.end)
yield keeper
```
#### File: cnvkit/test/test_r.py
```python
from __future__ import absolute_import, division, print_function
import unittest
import cnvlib
from cnvlib import segmentation
class RTests(unittest.TestCase):
"""Tests that depend on the R statistical environment."""
def setUp(self):
self.tas_cnr = cnvlib.read('formats/amplicon.cnr')
self.wgs_cnr = cnvlib.read('formats/wgs-chr17.cnr')
def test_cbs(self):
_test_method(self, "cbs")
def test_flasso(self):
_test_method(self, "flasso")
def _test_method(self, method):
for cnr in (self.tas_cnr,
# self.wgs_cnr
):
cns, raw_str = segmentation.do_segmentation(cnr, method, processes=1,
save_dataframe=True)
self.assertGreater(len(cns), 0)
self.assertGreater(len(raw_str), 0)
# Parallel should produce the same results
p_cns, p_raw_str = segmentation.do_segmentation(cnr, method,
processes=2,
save_dataframe=True)
self.assertEqual(cns.data.shape, p_cns.data.shape)
self.assertEqual(len(cns.meta), len(p_cns.meta))
self.assertEqual(raw_str, p_raw_str)
if __name__ == '__main__':
unittest.main(verbosity=2)
```
|
{
"source": "JeremyAdamHart/Tensile",
"score": 2
}
|
#### File: Tensile/Tensile/Hardware.py
```python
from . import Properties
class HardwarePredicate(Properties.Predicate):
@classmethod
def FromISA(cls, isa):
gfxArch = 'gfx'+''.join(map(str, isa))
return cls("AMDGPU", value=cls("Processor", value=gfxArch))
@classmethod
def FromHardware(cls, isa, cuCount=None):
gfxArch = 'gfx'+''.join(map(str, isa))
if cuCount == None:
return cls("AMDGPU", value=cls("Processor", value=gfxArch))
else:
return cls("AMDGPU", value=cls.And([cls("Processor", value=gfxArch),
cls("CUCount", value=cuCount)]))
def __lt__(self, other):
# Use superclass logic for TruePreds
if other.tag == 'TruePred' or self.tag == 'TruePred':
return super().__lt__(other)
# Compute unit counts are embedded as 'And' with
# 'Processor' and 'ComputeUnitCount' as children
if self.value.tag == 'And':
myAndPred = self.value
myProcPred = next(iter(x for x in myAndPred.value if x.tag == "Processor"), None)
myCUPred = next(iter(x for x in myAndPred.value if x.tag == "CUCount"), None)
myCUCount = myCUPred.value if myCUPred != None else 0
else:
myProcPred = self.value
myCUCount = 0
if other.value.tag == 'And':
otherAndPred = other.value
otherProcPred = next(iter(x for x in otherAndPred.value if x.tag == "Processor"), None)
otherCUPred = next(iter(x for x in otherAndPred.value if x.tag == "CUCount"), None)
otherCUCount = otherCUPred.value if otherCUPred != None else 0
else:
otherProcPred = other.value
otherCUCount = 0
# If CU properties are empty, then compare processor predicates
if myCUCount == otherCUCount == 0:
# Make sure that we have valid processor preds
assert myProcPred != None and otherProcPred != None, "Missing processor predicate"
assert myProcPred.tag == otherProcPred.tag == "Processor", "Invalid processor predicate"
# Downgrade to base class so that we don't recurse
myProcPred.__class__ = otherProcPred.__class__ = Properties.Predicate
return myProcPred < otherProcPred
# Higher priority given to higher CU count
return myCUCount > otherCUCount
```
|
{
"source": "jeremyadavis/crdc-api",
"score": 3
}
|
#### File: crdc-api/crdc_api/setup_db.py
```python
from sqlalchemy import create_engine, text
from sqlalchemy.schema import CreateSchema, DropSchema
from sqlalchemy.sql import exists, select
from utils import pretty_print
def connect_to_db(url):
try:
engine = create_engine(url)
engine.connect()
pretty_print("Connected to DB", True)
return engine
except Exception as e:
print("ERROR! Unable to Connect to database with", url)
print(e)
return False
def setup_schema(engine, schema):
with engine.connect() as conn:
has_schema = conn.execute(text(
f"SELECT schema_name FROM information_schema.schemata WHERE schema_name = '{schema}';"))
if not has_schema.scalar():
conn.execute(CreateSchema(schema))
conn.execute(DropSchema(schema, None, True))
conn.execute(CreateSchema(schema))
pretty_print(f"Created Schema {schema}", True)
def setup_db(config):
pretty_print("SETUP DATABASE")
engine = connect_to_db(config["url"])
setup_schema(engine, config["schema"])
return engine
```
|
{
"source": "jeremyadavis/ga-out-of-field",
"score": 2
}
|
#### File: ga-out-of-field/etl/data_maker.py
```python
import yaml
from constants import (
MIGRATION_DIR,
TABLE_NAME
)
from helpers import (
get_data_file,
)
from utils import (
execute_sql,
pretty_print,
get_num_files_in_dir,
create_directory
)
class DataMaker:
def __init__(self, engine, filename):
self.engine = engine
self.df_data = get_data_file(filename)
def make_tables(self):
# DATABASE OUTPUT
self.df_data.to_sql(TABLE_NAME, self.engine,
if_exists="replace",
method="multi",
chunksize=10000)
def make_migrations(self):
self.make_migration_file(
'track_tables', self.make_tracking_migration_yaml)
# self.make_migration_file(
# 'access_roles', self.make_role_access_yaml)
def make_migration_file(self, file_name, markup_method):
migration_version = get_num_files_in_dir(MIGRATION_DIR) + 1
migration_file_name = f"{migration_version}__{file_name}.up.yaml"
pretty_print(f"Making Migration File {migration_file_name}", True)
yaml_data = []
# print(self.df_data.columns)
# for column in self.df_data.columns:
# if(row.is_first_column):
# columns = [self.primary_key]
# if (row.view_column_name != self.primary_key):
# columns.append(row.view_column_name)
# if(row.is_last_column):
migration_data = markup_method({
"table_name": TABLE_NAME,
})
# print('type', type(view_migration_data))
if(migration_data):
if(isinstance(migration_data, (list,))):
for item in migration_data:
yaml_data.append(item)
else:
yaml_data.append(migration_data)
with open(MIGRATION_DIR + migration_file_name, 'w') as yaml_file:
noalias_dumper = yaml.dumper.SafeDumper
noalias_dumper.ignore_aliases = lambda self, data: True
yaml.dump(yaml_data, yaml_file, default_flow_style=False,
Dumper=noalias_dumper)
yaml_file.close()
def make_tracking_migration_yaml(self, args):
return ({
"args": {
"name": args['table_name'],
"schema": 'public'
},
"type": "add_existing_table_or_view"
})
# def make_relationship_migration_yaml(self, args):
# relationships_config = self.config['relationships']
# primary_module = relationships_config['primary_module']
# primary_to_secondary_map = relationships_config['primary_to_secondary_map']
# secondary_to_primary_field = relationships_config['secondary_to_primary_field']
# if((not relationships_config)):
# return None
# data = []
# relationships = []
# if(args['module'] == primary_module):
# for module, name in primary_to_secondary_map.items():
# relationships.append({
# "name": name,
# "remote_view_name": module_to_db_object(
# module, self.config, "")
# })
# else:
# relationships.append({
# "name": secondary_to_primary_field,
# "remote_view_name": module_to_db_object(
# primary_module, self.config, "")
# })
# for relationship in relationships:
# data.append({
# "args": {
# "name": relationship['name'],
# "table": {
# "name": args['view_name'],
# "schema": args['schema']
# },
# "using": {
# "manual_configuration": {
# "column_mapping": {
# self.primary_key: self.primary_key
# },
# "remote_table": {
# "name": relationship['remote_view_name'],
# "schema": args['schema']
# }
# }
# },
# },
# "type": "create_object_relationship"
# })
# return data
# def make_role_access_yaml(self, args):
# roles_config = self.config['roles']
# data = []
# # print('cols', args['columns'])
# for role, settings in roles_config.items():
# filter_config = settings["filter"]
# filter = {}
# if(filter_config):
# condition = {
# filter_config["source_view_column"]: {
# "_eq": filter_config["hasura_variable"]
# }
# }
# # Curr Module is Source Module
# if(filter_config["source_module"] == args['module']):
# filter = condition
# else:
# # Curr Model isn't source module, so have to use relationship field to get to filter column
# relationship_field = self.config['relationships']['secondary_to_primary_field']
# filter = {
# relationship_field: condition
# }
# data.append({
# "args": {
# "permission": {
# "allow_aggregations": True,
# "columns": args['columns'],
# "filter": filter,
# "limit": settings["limit"] if settings["limit"] else None
# },
# "role": role,
# "table": {
# "name": args['view_name'],
# "schema": self.db_schema,
# },
# },
# "type": "create_select_permission"
# })
# return data
```
#### File: ga-out-of-field/etl/helpers.py
```python
import pandas
from constants import (
INPUT_DIR,
DATAFRAME_COLUMNS
)
def get_data_file(filename):
df = pandas.read_csv(INPUT_DIR+filename,
encoding='LATIN-1',
low_memory=False,
header=0,
names=DATAFRAME_COLUMNS
)
df.columns = df.columns.map(lambda x: x.lower())
# print('df', df.head(200))
return df
```
#### File: ga-out-of-field/etl/setup_db.py
```python
import os
from sqlalchemy import create_engine
# from sqlalchemy.schema import CreateSchema, DropSchema
# from sqlalchemy.sql import exists, select
from utils import pretty_print
DB_URL = os.environ["DATABASE_URL"]
def setup_db():
try:
engine = create_engine(DB_URL)
engine.connect()
pretty_print(f"Connected to DB at {DB_URL}", True)
return engine
except Exception as e:
print("ERROR! Unable to Connect to database with", DB_URL)
print(e)
return False
```
#### File: ga-out-of-field/etl/utils.py
```python
import os
import requests
import re
import zipfile
import datetime
import shutil
from sqlalchemy import text
def create_directory(directory, delete_first=False):
try:
if(delete_first):
remove_directory(directory)
if not os.path.isdir(directory):
os.makedirs(directory)
except OSError:
print('Error: Creating directory. ' + directory)
def remove_directory(directory):
try:
if os.path.isdir(directory):
shutil.rmtree(directory)
except OSError:
print('Error: Removing directory. ' + directory)
def fetch_file(url, directory, filename=None):
r = requests.get(url)
# print(r)
"""
SET FILENAME TO DOWNLOADED FILE NAME
"""
# print('-> ', hasattr(r.headers, 'content-disposition'))
if not (filename):
if(hasattr(r.headers, 'content-disposition')):
d = r.headers['content-disposition']
filename = re.findall(
"filename=(.+)", d)[0]
else:
filename = f"extract_file_{datetime.datetime.now().replace(microsecond=0).isoformat()}.zip"
file_path = directory + filename
with open(f"{file_path}", "wb") as code:
code.write(r.content)
return filename
def get_filename_from_url(url, type=".zip"):
fn = url.split('/')
fn.reverse()
return fn[0] if fn[0].endswith(type) else None
def unzip(from_path, to_dir="."):
with zipfile.ZipFile(from_path, 'r') as zip:
zip.printdir()
zip.extractall(path=to_dir)
def rename_files(files_list, src_dir, dest_dir):
# print("rename_files", files_list, src_dir, dest_dir)
for i, file in enumerate(files_list):
src = src_dir+file['src_path']
dest = dest_dir+file['dest_path']
if os.path.exists(src):
os.rename(src, dest)
def downcase(word):
return word[:1].lower() + word[1:] if word else ''
def make_table_name(orig): return orig.replace(
' ', '_').replace('-', '_').lower()
def str2bool(v):
return str(v).lower() in ("yes", "true", "t", "1")
def prefixify(name, prefix):
# if(not name.startswith(prefix)):
return prefix + name
# return name
def tablenamify(name, prefix):
return f"{prefix + name}_table" if prefix else f"{name}_table"
# return prefixify(name + "_table", prefix)
def viewnameify(name, prefix, translations):
if (name in translations['tables']['noprefix']):
return name
return f"{prefix + name}"
def execute_sql(engine, statement):
with engine.connect() as conn:
conn.execute(text(statement))
def pretty_print(text, is_bullet=False):
if(not is_bullet):
output = f"\n--- {text.upper()}"
else:
output = f" * {text}"
print(output)
def get_num_files_in_dir(dir):
return len([name for name in os.listdir(dir)])
def clean_and_join_list(mylist, separator="_"):
return separator.join([x.lower() for x in mylist if len(x)])
```
|
{
"source": "jeremyagray/al-btn-api",
"score": 3
}
|
#### File: al-btn-api/bin/county-cname.py
```python
import json
import sys
# Load the county canonical names.
cnames = []
with open(sys.argv[1], "r") as f:
for line in f:
(name, cname) = line.strip().split(",")
cnames.append({
"name": str(name),
"cname": str(cname),
})
def getCountyCanonicalName(name):
"""Get county canonical county name for provided name."""
for county in cnames:
if county["name"] == name:
return county["cname"]
# Load the county dataset.
data = []
with open(sys.argv[2], "r") as f:
data = json.load(f)
# Merge the codes into the data.
merged = []
for obj in data:
obj["cname"] = getCountyCanonicalName(obj["name"])
merged.append(obj)
print(json.dumps(merged, indent=2))
```
#### File: al-btn-api/bin/county-seat-coordinates.py
```python
import json
import sys
import requests
url = "https://www2.census.gov/geo/docs/maps-data/data/gazetteer/2021_Gazetteer/2021_gaz_place_01.txt"
raw = requests.get(url)
coordinates = []
for line in raw.iter_lines():
fields = line.decode().strip().split(" ")
name = fields[3].strip()
for suf in [" city", " town", " CDP"]:
name = name.removesuffix(suf)
coordinates.append({
"name": name,
"geoid": str(fields[1]),
"lat": str(fields[10]),
"long": str(fields[11]),
})
def getCoordinates(city):
"""Get the city coordinates."""
for place in coordinates:
if place["name"] == city:
return [place['long'], place['lat']]
def getID(city):
"""Get the city GEOID."""
for place in coordinates:
if place["name"] == city:
return place['geoid']
# Load the county dataset.
data = []
with open(sys.argv[1], "r") as f:
data = json.load(f)
# Merge the seat and establishment dates into the data.
merged = []
for obj in data:
obj["seat"]["location"]["coordinates"] = getCoordinates(obj["seat"]["name"])
obj["seat"]["geoid"] = getID(obj["seat"]["name"])
merged.append(obj)
print(json.dumps(merged, indent=2))
```
|
{
"source": "jeremyagray/django-allauth-2f2a",
"score": 2
}
|
#### File: django-allauth-2f2a/allauth_2f2a/middleware.py
```python
import warnings
from allauth.account.adapter import get_adapter
from django.conf import settings
from django.contrib import messages
from django.shortcuts import redirect
from django.urls import NoReverseMatch
from django.urls import resolve
from django.urls import reverse
from django.utils.deprecation import MiddlewareMixin
class AllauthTwoFactorMiddleware(MiddlewareMixin):
"""Prevent partially authenticated users.
Reset the login flow if another page is loaded halfway through the
login. (I.e. if the user has logged in with a username/password,
but not yet entered their two-factor credentials.) This makes sure
a user does not stay half logged in by mistake.
"""
def process_request(self, request):
"""Ensure 2FA completion.
Remove ``allauth_2f2a_user_id`` from session if the URL does
not match the 2FA URL.
"""
match = resolve(request.path)
if not match.url_name or not match.url_name.startswith(
"two-factor-authenticate"
):
try:
del request.session["allauth_2f2a_user_id"]
except KeyError:
pass
class BaseRequire2FAMiddleware(MiddlewareMixin):
"""Require users to configure 2FA.
Ensure that particular users have two-factor authentication
enabled before they have access to the rest of the app.
If they don't have 2FA enabled, they will be redirected to the 2FA
enrollment page and not be allowed to access other pages.
"""
# List of URLs that the user should still be allowed to access.
allowed_pages = [
# Users should be able to log out or change password.
"account_logout",
"account_change_password",
"account_reset_password",
# Users should be able to configure 2FA.
"two-factor-setup",
]
# The message to the user if they don't have 2FA enabled and must
# enable it.
require_2fa_message = (
"You must enable two-factor authentication before doing anything else."
)
def on_require_2fa(self, request):
"""Redirect to 2fa setup if required.
If the current request requires 2fa and the user does not have
it enabled, this is executed. The result of this is returned
from the middleware.
"""
# See allauth.account.adapter.DefaultAccountAdapter.add_message.
if "django.contrib.messages" in settings.INSTALLED_APPS:
# If there is already a pending message related to two-factor (likely
# created by a redirect view), simply update the message text.
storage = messages.get_messages(request)
tag = "2fa_required"
for m in storage:
if m.extra_tags == tag:
m.message = self.require_2fa_message
break
# Otherwise, create a new message.
else:
messages.error(request, self.require_2fa_message, extra_tags=tag)
# Mark the storage as not processed so they'll be shown to the user.
storage.used = False
# Redirect user to two-factor setup page.
return redirect("two-factor-setup")
def require_2fa(self, request):
"""Determine if 2fa is required.
Check if this request is required to have 2FA before accessing
the app.
This should return True if this request requires 2FA. (Note
that the user was already)
You can access anything on the request, but generally
request.user will be most interesting here.
"""
raise NotImplementedError("You must implement require_2fa.")
def process_view(self, request, view_func, view_args, view_kwargs):
"""Process view based on 2fa requirements."""
# The user is not logged in, do nothing.
if request.user.is_anonymous:
return
# If this doesn't require 2FA, then stop processing.
if not self.require_2fa(request):
return
# If the user is on one of the allowed pages, do nothing.
for urlname in self.allowed_pages:
try:
if request.path == reverse(urlname):
return
except NoReverseMatch:
# The developer may have misconfigured the list of
# allowed pages. Let's not outright crash at that
# point, but inform the developer about their mishap.
warnings.warn(
"NoReverseMatch for %s while checking for pages allowed without 2FA"
% urlname
)
# User already has two-factor configured, do nothing.
if get_adapter(request).has_2fa_enabled(request.user):
return
# The request required 2FA but it isn't configured!
return self.on_require_2fa(request)
```
#### File: django-allauth-2f2a/allauth_2f2a/mixins.py
```python
from django.contrib.auth.mixins import AccessMixin
from django.http import HttpResponseRedirect
from django.urls import reverse_lazy
from allauth_2f2a.utils import user_has_valid_totp_device
class ValidTOTPDeviceRequiredMixin(AccessMixin):
"""Require a valid TOTP device."""
no_valid_totp_device_url = reverse_lazy("two-factor-setup")
def dispatch(self, request, *args, **kwargs):
"""Dispatch appropriate view based on device settings."""
if not request.user.is_authenticated:
return self.handle_no_permission()
if not user_has_valid_totp_device(request.user):
return self.handle_missing_totp_device()
return super(ValidTOTPDeviceRequiredMixin, self).dispatch(
request, *args, **kwargs
)
def handle_missing_totp_device(self):
"""Handle missing device.
Redirect to ``self.no_valid_totp_device_url`` if there is not
valid TOTP device configured.
Returns
-------
django.http.HttpResponseRedirect
A redirect to ``self.no_valid_totp_device_url``.
"""
return HttpResponseRedirect(self.no_valid_totp_device_url)
```
#### File: jeremyagray/django-allauth-2f2a/setup.py
```python
import codecs
from setuptools import find_packages
from setuptools import setup
def long_description():
"""Load README.rst for setup's long description."""
with codecs.open("README.rst", encoding="utf8") as f:
return f.read()
setup(
name="django-allauth-2f2a",
version="0.9.1",
packages=find_packages(".", include=("allauth_2f2a", "allauth_2f2a.*")),
include_package_data=True,
install_requires=[
"django>=2.2",
"qrcode>=5.3",
"django-allauth>=0.44",
"django-otp>=1.0.0",
],
author="<NAME>",
author_email="<EMAIL>",
description="Adds two factor authentication to django-allauth.",
license="Apache 2.0",
keywords=["otp", "auth", "two factor authentication", "allauth", "django", "2fa"],
url="https://github.com/jeremyagray/django-allauth-2f2a",
long_description=long_description(),
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Topic :: Software Development :: Libraries :: Python Modules",
"Environment :: Web Environment",
"Topic :: Internet",
"Framework :: Django",
"Framework :: Django :: 2.2",
"Framework :: Django :: 3.2",
"Programming Language :: Python",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"License :: OSI Approved :: Apache Software License",
],
python_requires=">=3.6",
)
```
|
{
"source": "jeremyagray/django-loader",
"score": 2
}
|
#### File: django-loader/loader/loader.py
```python
import json
import os
import sys
import types
from pathlib import Path
import bespon
import toml
from django.core.exceptions import ImproperlyConfigured
from ruamel.yaml import YAML
from ruamel.yaml.error import YAMLError
def generate_secret_key():
"""Generate a secret key for a Django app.
Generate a secret key for a Django app, using
``django.core.management.utils.get_random_secret_key``.
Returns
-------
string
A random secret key.
"""
from django.core.management.utils import get_random_secret_key
return get_random_secret_key()
def load_secrets(fn=".env", prefix="DJANGO_ENV_", **kwargs):
"""Load a list of configuration variables.
Return a dictionary of configuration variables, as loaded from a
configuration file or the environment. Values passed in as
``args`` or as the value in ``kwargs`` will be used as the
configuration variable's default value if one is not found in the
configuration file or environment.
Parameters
----------
fn : string, default=".env"
Configuration filename, defaults to ``.env``. May be in TOML,
JSON, YAML, or BespON formats. Formats will be attempted in this
order.
prefix : string, default="DJANGO_ENV_"
Prefix for environment variables. This prefix will be
prepended to all variable names before searching for them in
the environment.
kwargs : dict, optional
Dictionary with configuration variables as keys and default
values as values.
Returns
-------
dict
A dictionary of configuration variables and their values.
"""
return merge(kwargs, load_file(fn), load_environment(prefix))
def merge(defaults, file, env):
"""Merge configuration from defaults, file, and environment."""
config = defaults
# Merge in file options, if they exist in the defaults.
for (k, v) in file.items():
if k in config:
config[k] = v
# Merge in environment options, if they exist in the defaults.
for (k, v) in env.items():
if k in config:
config[k] = v
return config
def load_file(fn, raise_bad_format=False):
"""Attempt to load configuration variables from ``fn``.
Attempt to load configuration variables from ``fn``. If ``fn``
does not exist or is not a recognized format, return an empty dict
unless ``raise_bad_format`` is ``True``.
Parameters
----------
fn : string
Filename from which to load configuration values.
raise_bad_format : boolean, default=False
Determine whether to raise
``django.core.exceptions.ImproperlyConfigured`` if the file
format is not recognized. Default is ``False``.
Returns
-------
dict
A dictionary, possibly empty, of configuration variables and
values.
Raises
------
django.core.exceptions.ImproperlyConfigured
Raises an ``ImproperlyConfigured`` exception if the file
format is not recognized and ``raise_bad_format`` is ``True``.
"""
# Determine if the file actually exists, and bail if not.
secrets = {}
if not Path(fn).is_file():
return secrets
# Attempt to load TOML, since python.
with open(fn, "r") as f:
try:
secrets = toml.load(f)
except (toml.TomlDecodeError):
pass
# Attempt to load JSON.
with open(fn, "r") as f:
try:
secrets = json.load(f)
except (json.JSONDecodeError):
pass
# Attempt to load YAML, with ruamel.yaml and YAML 1.2.
# Overachiever.
with open(fn, "r") as f:
try:
yaml = YAML(typ="safe")
secrets = yaml.load(f)
except (YAMLError):
pass
# Attempt to load BespON. Geek.
with open(fn, "r") as f:
try:
secrets = bespon.load(f)
except (bespon.erring.DecodingException):
# Everything failed, so raise.
if raise_bad_format:
raise ImproperlyConfigured(
f"Configuration file {fn} is not a recognized format."
)
return secrets
def _keys_are_indices(d):
"""Determine if the keys of a dict are list indices."""
# All integers?
keys = []
for k in d.keys():
try:
keys.append(int(k))
except (ValueError):
return False
keys = sorted(keys)
# Zero start?
if min(keys) != 0:
return False
# Consecutive?
if keys != list(range(0, max(keys) + 1)):
return False
return True
def _convert_dict_to_list(d):
"""Convert a list-style dict to a list."""
keys = sorted(d.keys())
the_list = []
for k in keys:
the_list.append(d[k])
return the_list
def _convert_listdict_to_list(ds):
"""Convert lists as dicts to lists in a data structure."""
for (k, v) in ds.items():
if isinstance(ds[k], dict):
# If the item points a dict, descend.
ds[k] = _convert_listdict_to_list(ds[k])
# We're back. Now check if the dict is a list-style dict
# and maybe convert to a list.
if _keys_are_indices(ds[k]):
ds[k] = _convert_dict_to_list(ds[k])
return ds
def load_environment(prefix="DJANGO_ENV_"):
"""Load Django configuration variables from the enviroment.
This function searches the environment for variables prepended
with ``prefix``. Currently, this function only reliably works for
string variables, but hopefully will work for other types,
dictionaries, and lists in the future.
Parameters
----------
prefix : string, default="DJANGO_ENV_"
Prefix for environment variables. This prefix should be
prepended to all valid variable names in the environment.
Returns
-------
dict
A dictionary, possibly empty, of configuration variables and
values.
"""
config = {}
for (key, value) in os.environ.items():
if key.startswith(prefix):
# Find the prefixed values and strip the prefix.
if sys.version_info >= (3, 6) and sys.version_info < (3, 9):
name = key[len(prefix) :]
else:
name = key.removeprefix(prefix)
if "__" not in name:
# Find the non-dict and non-list pairs and add them to
# the dict.
config[name] = value
else:
# Handle the flattened data structures, treating the
# list type variables as dicts.
# Based on:
# https://gist.github.com/fmder/494aaa2dd6f8c428cede
keys = name.split("__")
sub_config = config
for k in keys[:-1]:
try:
if not isinstance(sub_config[k], dict):
raise ImproperlyConfigured(
f"{k} is defined multiple times in the environment."
)
sub_config = sub_config[k]
except (KeyError):
sub_config[k] = {}
sub_config = sub_config[k]
sub_config[keys[-1]] = value
config = _convert_listdict_to_list(config)
return config
def dump_environment(config, prefix="DJANGO_ENV_", export=True):
"""Dump configuration as an environment variable string.
Parameters
----------
config : dict
The configuration dict.
prefix : string, default="DJANGO_ENV_"
Prefix for environment variables. This prefix should be
prepended to all valid variable names in the environment.
export : boolean, default=True
Prepend each environment variable string with "export ", or
not.
Returns
-------
string
The current configuration as a string setting environment
variables.
"""
stack = []
dumps = []
if export:
exp = "export "
else:
exp = ""
# Convert the config dict into a list (stack).
for (k, v) in config.items():
stack.append((k, v))
while stack:
(k, v) = stack.pop(0)
if isinstance(v, list):
for (i, sv) in enumerate(v):
stack.append((f"{k}_{i}", sv))
elif isinstance(v, dict):
for (sk, sv) in v.items():
stack.append((f"{k}_{sk}", sv))
else:
dumps.append(f"{str(k)}='{str(v)}'")
return "\n".join(f"{exp}{prefix}{line}" for line in dumps)
def validate_not_empty_string(name, val):
"""Validate that ``val`` is not an empty string.
Validate that ``val`` is not an empty string.
Parameters
----------
val : any
Configuration variable to validate.
Returns
-------
boolean
``True`` if ``val`` is not an empty string.
Raises
------
django.core.exceptions.ImproperlyConfigured
Raises an ``ImproperlyConfigured`` exception on empty strings,
with an error message.
"""
if val == "":
raise ImproperlyConfigured(f"{name} is an empty string and should not be")
return True
def validate_falsy(name, val):
"""Validate that ``val`` is falsy.
Validate that ``val`` is falsy according to
https://docs.python.org/3/library/stdtypes.html#truth-value-testing.
Parameters
----------
val : any
Configuration variable to validate.
Returns
-------
boolean
``True`` if ``val`` is falsy.
Raises
------
django.core.exceptions.ImproperlyConfigured
Raises an ``ImproperlyConfigured`` exception on truthy values,
with an error message.
"""
if val:
raise ImproperlyConfigured(
f"{name} has value {val} which is truthy, but should be falsy"
)
return True
def validate_truthy(name, val):
"""Validate that ``val`` is truthy.
Validate that ``val`` is truthy according to
https://docs.python.org/3/library/stdtypes.html#truth-value-testing.
Parameters
----------
val : any
Configuration variable to validate.
Returns
-------
boolean
``True`` if ``val`` is truthy.
Raises
------
django.core.exceptions.ImproperlyConfigured
Raises an ``ImproperlyConfigured`` exception on falsy values,
with an error message.
"""
if not val:
raise ImproperlyConfigured(
f"{name} has value {val} which is falsy, but should be truthy"
)
return True
# def set_or_fail_on_unset(val):
# """Raise ``ImproperlyConfigured()`` if ``val`` is not set.
# Return the configuration value if set, otherwise raise
# ``django.core.exceptions.ImproperlyConfigured()`` to abort.
# Parameters
# ----------
# val : string
# Configuration variable that should be set to a value.
# Returns
# -------
# string
# The variable value, if set.
# """
# if not val:
# raise ImproperlyConfigured("A required configuration variable is not set.")
# return val
# def _validate(name, val, validation=[]):
# """Validate a django configuration variable."""
# env_name = "DJANGO_" + name
# if isinstance(validation, types.FunctionType):
# try:
# return validation(val)
# except ImproperlyConfigured:
# raise
# else:
# if len(validation) > 0:
# if not (val in validation):
# raise ImproperlyConfigured(
# f"{name} can not have value {val};"
# f" must be one of [{', '.join(validation)}]."
# )
# return
# print(f"{name} loaded from {env_name}.")
# return val
def dump_secrets(fmt="TOML", **kwargs):
"""Dump a secrets dictionary to the specified format.
Dump a secrets dictionary to the specified format, defaulting to
TOML.
Parameters
----------
fmt : string, default="TOML"
The dump format, one of ``TOML``, ``JSON``, ``YAML``,
``BespON``, or ``ENV``.
kwargs : dict
A dictionary of configuration variables.
"""
if fmt == "TOML":
return toml.dumps(kwargs)
elif fmt == "JSON":
return json.dumps(kwargs)
elif fmt == "YAML":
# Let's jump through some hoops for the sake of streams.
# https://yaml.readthedocs.io/en/latest/example.html#output-of-dump-as-a-string
from ruamel.yaml.compat import StringIO
stream = StringIO()
yaml = YAML(typ="safe")
yaml.dump(kwargs, stream)
return stream.getvalue()
elif fmt == "BespON":
return bespon.dumps(kwargs)
else:
return dump_environment(kwargs)
def main():
"""Run as script, to access ``dump()`` functions."""
# Desired functions:
# create SECRET_KEY
# load and dump environment
# cli help
# cli copyright/license
print(dump_secrets(**load_secrets(**{"ALLOWED_HOSTS": ["bob is your uncle"]})))
if __name__ == "__main__":
main()
```
|
{
"source": "jeremyagray/fcc-d3-force-directed-graph",
"score": 3
}
|
#### File: fcc-d3-force-directed-graph/bin/create-dataset.py
```python
import json
import re
import sys
prefix = r"[ ├└─│]+"
def level(line):
"""Find the nesting level of a line."""
length = len(line.rstrip()) - len(re.sub(prefix, "", line.rstrip()))
return (length // 3) - 1
def dependency(line):
"""Find the dependency listed on a line."""
return re.sub(prefix, "", line.rstrip()).split("@")[0]
def version(line):
"""Find the version of the dependency listed on a line."""
return re.sub(prefix, "", line.rstrip()).split("@")[1]
def descendants(node):
"""Count the descendants of a node."""
num = 0
if node["children"]:
for child in node["children"]:
# Add the current child.
num += 1
# Add the current child's descendants.
num += descendants(child)
return num
def treeify(dependencies):
"""Treeify a list of dependencies."""
tree = {}
ancestors = []
for dependency in dependencies:
# Dependency is the root of the tree.
if dependency["level"] == 0 and tree == {}:
tree = {
"children": None,
"level": 0,
"dependency": dependency["dependency"],
"version": dependency["version"],
}
ancestors.append(tree)
# Dependency is a daughter of the last ancestor.
elif dependency["level"] == ancestors[-1]["level"] + 1:
if ancestors[-1]["children"] is None:
ancestors[-1]["children"] = []
ancestors[-1]["children"].append({
"children": None,
"level": dependency["level"],
"dependency": dependency["dependency"],
"version": dependency["version"],
})
ancestors.append(ancestors[-1]["children"][-1])
# Dependency is an aunt/sister of the last ancestor.
elif dependency["level"] <= ancestors[-1]["level"]:
while dependency["level"] <= ancestors[-1]["level"]:
ancestors.pop()
if ancestors[-1]["children"] is None:
ancestors[-1]["children"] = []
ancestors[-1]["children"].append({
"children": None,
"level": dependency["level"],
"dependency": dependency["dependency"],
"version": dependency["version"],
})
ancestors.append(ancestors[-1]["children"][-1])
return tree
def nodes(tree):
"""Produce a list of nodes from a tree of dependencies."""
dependencies = [
{
"dependency": tree["dependency"],
"level": tree["level"],
"version": tree["version"],
"group": tree["group"],
},
]
if tree["children"]:
for child in tree["children"]:
dependencies += nodes(child)
return dependencies
def snowflake(nodes, ignore_version=True):
"""Make a unique list."""
names = []
filtered = []
rejects = 0
for node in nodes:
if ignore_version and node["dependency"] not in names:
names.append(node["dependency"])
filtered.append(node)
elif not ignore_version and node not in filtered:
filtered.append(node)
else:
rejects += 1
assert len(nodes) == len(filtered) + rejects
return filtered
def pairs(tree, ignore_version=True):
"""Produce a list of pairs from a tree of dependencies."""
my_pairs = []
if tree["children"]:
for child in tree["children"]:
my_pairs.append((tree["dependency"], child["dependency"],))
my_pairs += pairs(child)
return my_pairs
def _set_group(tree, group):
"""Set the group for a tree of dependencies."""
grouped = {
"dependency": tree["dependency"],
"level": tree["level"],
"version": tree["version"],
"group": group,
"children": [],
}
if tree["children"]:
for child in tree["children"]:
grouped["children"].append(_set_group(child, group))
return grouped
def group(tree, ignore_version=True):
"""Group by the top level dependencies."""
group = 0
grouped = {
"dependency": tree["dependency"],
"level": tree["level"],
"version": tree["version"],
"group": group,
"children": [],
}
if tree["children"]:
for child in tree["children"]:
group += 1
grouped["children"].append(_set_group(child, group))
return grouped
def links(tree, ignore_version=True):
"""Produce a list of links from a tree of dependencies."""
all_pairs = pairs(tree)
accepted_pairs = []
rejected_pairs = []
counts = {}
for pair in all_pairs:
# print(f"pair: {pair}")
if pair in accepted_pairs:
rejected_pairs.append(pair)
counts[pair] += 1
elif (pair[1], pair[0],) in accepted_pairs:
rejected_pairs.append(pair)
counts[(pair[1], pair[0],)] += 1
else:
accepted_pairs.append(pair)
counts[pair] = 1
assert len(all_pairs) == len(accepted_pairs) + len(rejected_pairs)
my_links = []
for (k, v) in counts.items():
my_links.append({
"source": k[0],
"target": k[1],
"links": v,
})
return my_links
def main():
"""Convert npm-ls output to JSON force-directed graph data."""
filename = sys.argv[1]
dependencies = []
with open(filename, "r") as file:
for line in file:
dependencies.append({
"dependency": dependency(line),
"version": version(line),
"level": level(line),
})
tree = treeify(dependencies)
print(json.dumps({
"nodes": snowflake(nodes(group(tree))),
"links": links(tree),
}, indent=2))
# print(json.dumps(tree, indent=2))
# print(json.dumps(group(tree), indent=2))
return
if __name__ == "__main__":
main()
```
|
{
"source": "jeremyagray/pycvodes",
"score": 3
}
|
#### File: pycvodes/pycvodes/_libs.py
```python
def get_libs(config=None):
if config is None:
from . import config
return (
"sundials_nvecserial,sundials_cvodes,sundials_sunlinsolspgmr,sundials_sunlinsolspbcgs,"
"sundials_sunlinsolsptfqmr,sundials_sunmatrixdense,sundials_sunmatrixband" + (
',sundials_sunlinsollapackdense,sundials_sunlinsollapackband' if config["LAPACK"]
else ',sundials_sunlinsoldense,sundials_sunlinsolband'
) + (
",sundials_sunlinsolklu" if config["KLU"]
else ""
)
)
def get_libs_linkline(config=None):
libs = get_libs(config)
if libs:
return " ".join(["-l%s" % lib for lib in libs.split(",")])
else:
return ""
def print_libs_linkline(config=None):
print(get_libs_linkline(config))
```
#### File: pycvodes/pycvodes/_util.py
```python
from __future__ import division
import numpy as np
valid_arg_combs = [{}, {"lband", "uband"}, {"nnz"}]
def _check_jac_type(**kwargs):
nonnull_opts = dict((k, v) for k, v in kwargs.items() if v is not None)
if any(map(set(nonnull_opts).__eq__, valid_arg_combs)):
pass
else:
raise ValueError("Couldn't determine jacobian type from given non-default options: {}".format(nonnull_opts))
def _get_jmat_out(ny, lband=None, uband=None, nnz=None):
if lband is None and nnz is None:
# jmat_out, dfdx_out
return np.empty((ny, ny)), np.empty(ny)
elif nnz is None:
# jmat_out, dfdx_out
return np.empty((1 + lband + uband, ny)), np.empty(ny)
else:
# data, colptrs, rowvals
return np.empty(nnz), np.empty(ny + 1), np.empty(nnz)
def _check_callable(f, j, x0, y0, lband=None, uband=None, nnz=None):
ny = len(y0)
_fout = np.empty(ny)
_ret = f(x0, y0, _fout)
if _ret is not None:
raise ValueError("f() must return None")
if j is None:
return # Not all methods require a jacobian
args = _get_jmat_out(ny, lband=lband, uband=uband,
nnz=nnz)
_ret = j(x0, y0, *args)
if _ret is not None:
raise ValueError("j() must return None")
def _get_jmat_out_short(ny, lband=None, uband=None, nnz=None):
if lband is None and nnz is None:
# jmat_out, dfdx_out
return np.empty((ny, ny - 1)), np.empty(ny)
elif nnz is None:
# jmat_out, dfdx_out
return np.empty((1 + lband + uband, ny - 1)), np.empty(ny)
else:
# data, colptrs, rowvals
return np.empty(nnz - 1), np.empty(ny), np.empty(nnz-1)
def _check_indexing(f, j, x0, y0, lband=None, uband=None, nnz=None):
ny = len(y0)
_fout_short = np.empty(ny - 1)
try:
f(x0, y0, _fout_short)
except (IndexError, ValueError):
pass
else:
raise ValueError("All elements in fout not assigned in f()")
if j is None:
return # Not all methods require a jacobian
args = _get_jmat_out_short(ny, lband=lband, uband=uband, nnz=nnz)
try:
j(x0, y0, *args)
except (IndexError, ValueError):
pass
else:
if nnz:
raise ValueError("In one of (data, colptrs, rowvals), not all elements assigned in j()")
else:
raise ValueError("In either jmat_out or dfdx_out, not all elements assigned in j()")
```
|
{
"source": "JeremyAlain/lottery_ticket_pruner",
"score": 2
}
|
#### File: lottery_ticket_pruner/tests/test_lottery_ticker_pruner_randseed.py
```python
import random
random.seed(1234)
import numpy as np # noqa
np.random.seed(2345)
# Dancing needed to work with TF 1.x and 2.x
import tensorflow # noqa
if hasattr(tensorflow, 'set_random_seed'):
tensorflow.set_random_seed(3456)
else:
tensorflow.random.set_seed(3456)
import unittest # noqa
import numpy as np # noqa
import tensorflow.keras as keras # noqa
import lottery_ticket_pruner # noqa
TEST_DNN_INPUT_DIMS = (64, 64, 3)
TEST_DNN_NUM_CLASSES = 10
class TestLotteryTicketPrunerRandseed(unittest.TestCase):
def _create_test_dnn_model(self):
input = keras.Input(shape=TEST_DNN_INPUT_DIMS, dtype='float32')
x = keras.layers.Conv2D(4,
kernel_size=3,
strides=(2, 2),
padding='valid',
use_bias=True,
name='Conv1')(input)
x = keras.layers.BatchNormalization(axis=1,
epsilon=1e-3,
momentum=0.999,
name='bn_Conv1')(x)
x = keras.layers.ReLU(6., name='Conv1_relu')(x)
x = keras.layers.Conv2D(3,
kernel_size=1,
padding='same',
use_bias=False,
activation=None,
name='Conv2')(x)
x = keras.layers.BatchNormalization(axis=1,
epsilon=1e-3,
momentum=0.999,
name='bn_Conv2')(x)
x = keras.layers.ReLU(6., name='Conv2_relu')(x)
x = keras.layers.GlobalAveragePooling2D()(x)
x = keras.layers.Dense(TEST_DNN_NUM_CLASSES, activation='softmax',
use_bias=True, name='Logits')(x)
model = keras.Model(inputs=input, outputs=x)
return model
#
# calc_prune_mask()
# 'smallest_weights_global'
#
def test_smallest_weights_global(self):
""" Tests case where many or all weights are same value. Hence we might be tempted to mask on all of the
smallest weights rather than honoring only up to the prune rate
"""
model = self._create_test_dnn_model()
interesting_layers = [model.layers[1], model.layers[4], model.layers[8]]
interesting_weights_index = 0
# Make sure no weights are zero so our checks below for zeroes only existing in masked weights are reliable
weight_counts = []
for layer in interesting_layers:
weights = layer.get_weights()
weights[interesting_weights_index][weights[interesting_weights_index] == 0.0] = 0.1234
layer.set_weights(weights)
num_weights = np.prod(weights[interesting_weights_index].shape)
weight_counts.append(num_weights)
pruner = lottery_ticket_pruner.LotteryTicketPruner(model)
num_pruned1 = 0
for layer in interesting_layers:
weights = layer.get_weights()
num_pruned1 += np.sum(weights[interesting_weights_index] == 0.0)
prune_rate = 0.5
pruner.calc_prune_mask(model, prune_rate, 'smallest_weights_global')
# calc_prune_mask() shouldn't do the actual pruning so verify that weights didn't change
num_pruned2 = 0
for layer in interesting_layers:
weights = layer.get_weights()
num_pruned2 += np.sum(weights[interesting_weights_index] == 0.0)
self.assertEqual(num_pruned1, num_pruned2)
pruner.apply_pruning(model)
pruned_counts = []
for layer in interesting_layers:
weights = layer.get_weights()
pruned_counts.append(np.sum(weights[interesting_weights_index] == 0.0))
total_weights = np.sum(weight_counts)
num_pruned = np.sum(pruned_counts)
self.assertAlmostEqual(prune_rate, num_pruned / total_weights, places=1)
# Given the seeding we did at the beginning of this test these results should be reproducible. They were
# obtained by manual inspection.
# Ranges are used here since TF 1.x on python 3.6, 3.7 gives slightly different results from TF 2.x on
# python 3.8. These assertions accomodate both.
self.assertTrue(62 <= pruned_counts[0] <= 67, msg=f'pruned_counts={pruned_counts}')
self.assertTrue(2 <= pruned_counts[1] <= 5, msg=f'pruned_counts={pruned_counts}')
self.assertTrue(5 <= pruned_counts[2] <= 9, msg=f'pruned_counts={pruned_counts}')
self.assertEqual(75, sum(pruned_counts))
# Now prune once more to make sure cumulative pruning works as expected
total_prune_rate = prune_rate
prune_rate = 0.2
total_prune_rate = total_prune_rate + (1.0 - total_prune_rate) * prune_rate
pruner.calc_prune_mask(model, prune_rate, 'smallest_weights_global')
pruner.apply_pruning(model)
pruned_counts = []
for layer in interesting_layers:
weights = layer.get_weights()
pruned_counts.append(np.sum(weights[interesting_weights_index] == 0.0))
total_weights = np.sum(weight_counts)
num_pruned = np.sum(pruned_counts)
self.assertEqual(num_pruned / total_weights, total_prune_rate)
# Given the seeding we did at the beginning of this test these results should be reproducible. They were
# obtained by manual inspection.
# Ranges are used here since TF 1.x on python 3.6, 3.7 gives slightly different results from TF 2.x on
# python 3.8. These assertions accomodate both.
self.assertTrue(74 <= pruned_counts[0] <= 78, msg=f'pruned_counts={pruned_counts}')
self.assertTrue(2 <= pruned_counts[1] <= 5, msg=f'pruned_counts={pruned_counts}')
self.assertTrue(9 <= pruned_counts[2] <= 12, msg=f'pruned_counts={pruned_counts}')
self.assertEqual(90, sum(pruned_counts))
if __name__ == '__main__':
unittest.main()
```
#### File: lottery_ticket_pruner/tests/test_lottery_ticket_pruner.py
```python
import logging
import math
import sys
import unittest
import numpy as np
import tensorflow.keras as keras
import lottery_ticket_pruner
from lottery_ticket_pruner.lottery_ticket_pruner import _prune_func_smallest_weights, \
_prune_func_smallest_weights_global, _prune_func_large_final
TEST_NUM_CLASSES = 3
TEST_DENSE_INPUT_DIMS = (32, )
TEST_DENSE_LAYER_INPUTS = np.prod(TEST_DENSE_INPUT_DIMS)
TEST_DENSE_WEIGHT_COUNT = TEST_DENSE_LAYER_INPUTS * TEST_NUM_CLASSES
TEST_DNN_INPUT_DIMS = (64, 64, 3)
TEST_DNN_NUM_CLASSES = 10
def enable_debug_logging():
logger = logging.getLogger('lottery_ticket_pruner')
logger.setLevel('DEBUG')
logger.addHandler(logging.StreamHandler(sys.stdout))
# enable_debug_logging()
class TestLotteryTicketPruner(unittest.TestCase):
def _create_test_model(self):
input = keras.Input(shape=TEST_DENSE_INPUT_DIMS, dtype='float32')
x = keras.layers.Dense(TEST_NUM_CLASSES)(input)
model = keras.Model(inputs=input, outputs=x)
return model
def _create_test_model_diff_shape(self, diff_input_shape=False, diff_output_shape=False):
input_dims = (64, ) if diff_input_shape else TEST_DENSE_INPUT_DIMS
output_dims = (TEST_NUM_CLASSES + 1) if diff_output_shape else TEST_NUM_CLASSES
input = keras.Input(shape=input_dims, dtype='float32')
x = keras.layers.Dense(output_dims)(input)
model = keras.Model(inputs=input, outputs=x)
return model
def _create_test_mode_extra_layer(self):
input = keras.Input(shape=TEST_DENSE_INPUT_DIMS, dtype='float32')
x = keras.layers.Dense(TEST_NUM_CLASSES)(input)
x = keras.layers.Softmax()(x)
model = keras.Model(inputs=input, outputs=x)
return model
def _create_test_dnn_model(self):
input = keras.Input(shape=TEST_DNN_INPUT_DIMS, dtype='float32')
x = keras.layers.Conv2D(4,
kernel_size=3,
strides=(2, 2),
padding='valid',
use_bias=True,
name='Conv1')(input)
x = keras.layers.BatchNormalization(axis=1,
epsilon=1e-3,
momentum=0.999,
name='bn_Conv1')(x)
x = keras.layers.ReLU(6., name='Conv1_relu')(x)
x = keras.layers.Conv2D(3,
kernel_size=1,
padding='same',
use_bias=False,
activation=None,
name='Conv2')(x)
x = keras.layers.BatchNormalization(axis=1,
epsilon=1e-3,
momentum=0.999,
name='bn_Conv2')(x)
x = keras.layers.ReLU(6., name='Conv2_relu')(x)
x = keras.layers.GlobalAveragePooling2D()(x)
x = keras.layers.Dense(TEST_DNN_NUM_CLASSES, activation='softmax',
use_bias=True, name='Logits')(x)
model = keras.Model(inputs=input, outputs=x)
return model
def _get_test_dnn_training_data(self):
num_samples = 10
X = np.random.random((num_samples,) + TEST_DNN_INPUT_DIMS)
y = np.random.choice([0, 1], num_samples, replace=True)
y = keras.utils.to_categorical(y, num_classes=TEST_DNN_NUM_CLASSES)
return X, y
def _summed_model_weights(self, model):
weights_sum = 0.0
for layer in model.layers:
weights = layer.get_weights()
weights_sum += sum(np.sum(w) for w in weights)
return weights_sum
#
# _prune_func_smallest_weights()
#
def test_prune_func_smallest_weights(self):
actual_mask = _prune_func_smallest_weights(np.array([]), None, np.array([1, 2, 3, 4], dtype=float),
np.array([1, 1, 1, 1]), prune_percentage=0.25)
self.assertTrue(np.array_equal([0, 1, 1, 1], actual_mask))
# Just changed order of weights
actual_mask = _prune_func_smallest_weights(np.array([]), None, np.array([3, 1, 2, 4], dtype=float),
np.array([1, 1, 1, 1]), prune_percentage=0.5)
self.assertTrue(np.array_equal([1, 0, 0, 1], actual_mask))
# Odd number of weights
actual_mask = _prune_func_smallest_weights(np.array([]), None, np.array([5, 3, 1, 2, 4], dtype=float),
np.array([1, 1, 1, 1, 1]), prune_percentage=0.5)
self.assertTrue(np.array_equal([1, 1, 0, 0, 1], actual_mask))
# Current mask masks out one of the lowest weights
actual_mask = _prune_func_smallest_weights(np.array([]), None, np.array([1, 2, 3, 4, 5], dtype=float),
np.array([0, 1, 1, 1, 1]), prune_percentage=0.25)
self.assertTrue(np.array_equal([0, 0, 1, 1, 1], actual_mask))
# Current mask masks out one of the lowest weights
actual_mask = _prune_func_smallest_weights(np.array([]), None, np.array([1, 2, 3, 4], dtype=float),
np.array([0, 1, 1, 0]), prune_percentage=0.25)
self.assertTrue(np.array_equal([0, 0, 1, 0], actual_mask))
# Some negative and some positive weights should be masked
actual_mask = _prune_func_smallest_weights(np.array([]), None, np.array([-1, 2, -3, 4], dtype=float),
np.array([1, 1, 1, 1]), prune_percentage=0.5)
self.assertTrue(np.array_equal([0, 0, 1, 1], actual_mask))
# Many identical values but only some of them should get masked
actual_mask = _prune_func_smallest_weights(np.array([]), None, np.array([1, 1, 1, 1, 2, 2], dtype=float),
np.array([1, 1, 1, 1, 1, 1]), prune_percentage=0.5)
self.assertEqual(3, np.sum(actual_mask))
# Many identical absolute values but only some of them should get masked
actual_mask = _prune_func_smallest_weights(np.array([]), None, np.array([1, -1, -1, 1, 2, -2], dtype=float),
np.array([1, 1, 1, 1, 1, 1]), prune_percentage=0.5)
self.assertEqual(3, np.sum(actual_mask))
#
# _prune_func_smallest_weights_global()
#
def test_prune_func_smallest_weights_global_negative(self):
model = self._create_test_model()
pruner = lottery_ticket_pruner.LotteryTicketPruner(model)
# Both percentage and count are unspecified
with self.assertRaises(ValueError) as ex:
_ = _prune_func_smallest_weights_global(None, None, prune_percentage=None, prune_count=None)
self.assertIn('prune_percentage', str(ex.exception))
self.assertIn('prune_count', str(ex.exception))
# Prune percentage is zero
with unittest.mock.patch('logging.Logger.warning') as warning:
_ = _prune_func_smallest_weights_global(pruner.iterate_prunables(model), None, prune_percentage=0.0,
prune_count=None)
self.assertEqual(1, warning.call_count)
# Prune count is zero
with unittest.mock.patch('logging.Logger.warning') as warning:
_ = _prune_func_smallest_weights_global(pruner.iterate_prunables(model), None, prune_percentage=None,
prune_count=0)
self.assertEqual(1, warning.call_count)
#
# _prune_func_large_final()
#
def test_prune_func_large_final_negative(self):
# Both percentage and count are unspecified
with self.assertRaises(ValueError) as ex:
_ = _prune_func_large_final(None, None, prune_percentage=None, prune_count=None)
self.assertIn('prune_percentage', str(ex.exception))
self.assertIn('prune_count', str(ex.exception))
#
# constructor
#
def test_constructor(self):
model1 = self._create_test_model()
pruner = lottery_ticket_pruner.LotteryTicketPruner(model1)
# Disabled since there are legit cases where the two models may different. E.g when using transfer learning
# one may choose to replace, say, a single head layer in the original model with 2 or more layers in the new
# model.
# # Different number of layers
# model2 = self._create_test_mode_extra_layer()
# with self.assertRaises(ValueError) as ex:
# pruner.calc_prune_mask(model2, 0.2, 'smallest_weights')
# self.assertIn('must have the same number of layers', str(ex.exception))
# Different shapes
model2 = self._create_test_model_diff_shape(diff_input_shape=True)
with self.assertRaises(ValueError) as ex:
pruner.apply_pruning(model2)
self.assertIn('must have the same input shape', str(ex.exception))
model2 = self._create_test_model_diff_shape(diff_output_shape=True)
with self.assertRaises(ValueError) as ex:
pruner.calc_prune_mask(model2, 0.2, 'smallest_weights')
self.assertIn('must have the same output shape', str(ex.exception))
#
# reset_masks()
#
def test_reset_masks(self):
model = self._create_test_model()
pruner = lottery_ticket_pruner.LotteryTicketPruner(model)
interesting_layer_index = 1
interesting_weights_index = 0
tpl = tuple([interesting_layer_index, tuple([interesting_weights_index])])
original_mask = np.array(pruner.prune_masks_map[tpl][interesting_weights_index])
self.assertEqual(TEST_DENSE_WEIGHT_COUNT, np.sum(original_mask))
# Prune and make sure prune mask has changed
pruner.calc_prune_mask(model, 0.2, 'smallest_weights')
pruned_mask = pruner.prune_masks_map[tpl][interesting_weights_index]
num_pruned = np.sum(pruned_mask)
self.assertLess(num_pruned, TEST_DENSE_WEIGHT_COUNT)
# Now reset
pruner.reset_masks()
reset_mask = np.array(pruner.prune_masks_map[tpl][interesting_weights_index])
self.assertEqual(TEST_DENSE_WEIGHT_COUNT, np.sum(reset_mask))
#
# apply_dwr()
#
def test_apply_dwr(self):
model = self._create_test_model()
pruner = lottery_ticket_pruner.LotteryTicketPruner(model)
interesting_layer_index = 1
interesting_weights_index = 0
tpl = (interesting_layer_index, (interesting_weights_index, ))
interesting_layer = model.layers[interesting_layer_index]
# Assign the weights values between 0..N, with 1/2 the weights being negative
weights = interesting_layer.get_weights()
interesting_weights = weights[interesting_weights_index]
num_interesting_layer_weights = np.prod(interesting_weights.shape)
test_weights = np.array(np.random.choice(range(num_interesting_layer_weights),
size=num_interesting_layer_weights, replace=False))
test_weights = test_weights.reshape(interesting_weights.shape)
weights[interesting_weights_index] = test_weights
interesting_layer.set_weights(weights)
prune_rate1 = 0.5
pruner.calc_prune_mask(model, prune_rate1, 'smallest_weights')
pruner.apply_pruning(model)
pruner.apply_dwr(model)
# Mask out any pruned weights
pruned_weights = interesting_layer.get_weights()[interesting_weights_index]
expected_test_weights = test_weights * pruner.prune_masks_map[tpl][interesting_weights_index]
# We expect DWR to have increased the value of unmasked weight by a factor of 2.0 (1.0 / 0.5 = 2.0)
expected_test_weights *= (1.0 / prune_rate1)
np.testing.assert_array_equal(expected_test_weights, pruned_weights)
# Prune again to make sure we accumulate the DWR multiplier as expected
weights[interesting_weights_index] = test_weights
interesting_layer.set_weights(weights)
prune_rate2 = 0.2
pruner.calc_prune_mask(model, prune_rate2, 'smallest_weights')
pruner.apply_pruning(model)
pruner.apply_dwr(model)
# Mask out any pruned weights
pruned_weights = interesting_layer.get_weights()[interesting_weights_index]
expected_test_weights = test_weights * pruner.prune_masks_map[tpl][interesting_weights_index]
# We expect DWR to have increased the value of unmasked weight by a factor of 2.5
# (1.0 / ((1.0 - 0.5) * 0.2) = 2.5)
# But since there is rounding due to counting the number of 1s in the prune mask (an int) the rescaling factor
# is not quite exactly 2.5
num_first_prune_ones = int(num_interesting_layer_weights * prune_rate1)
denominator = (num_interesting_layer_weights - (num_first_prune_ones + int(num_first_prune_ones * prune_rate2)))
rescale_factor = num_interesting_layer_weights / denominator
expected_test_weights *= rescale_factor
np.testing.assert_array_almost_equal(expected_test_weights, pruned_weights, decimal=3)
#
# calc_prune_mask()
# 'smallest_weights'
#
def test_smallest_weights(self):
model = self._create_test_model()
# First layer is the input layer; ignore it
# Second layer is Dense layer with 2 weights. First is fully connected weights. Second is output weights.
interesting_layer_index = 1
interesting_layer = model.layers[interesting_layer_index]
interesting_layer_shape = interesting_layer.weights[0].shape
interesting_layer_weight_count = int(np.prod(interesting_layer_shape))
interesting_key = tuple([interesting_layer_index, tuple([0])])
dl_test_weights = np.random.choice(TEST_DENSE_LAYER_INPUTS * TEST_NUM_CLASSES,
size=TEST_DENSE_LAYER_INPUTS * TEST_NUM_CLASSES, replace=False)
# Get rid of zero weights since we count those below during verification
dl_test_weights += 1
dl_test_weights = dl_test_weights.reshape(interesting_layer_shape)
interesting_layer.set_weights([dl_test_weights, interesting_layer.get_weights()[1]])
pruner = lottery_ticket_pruner.LotteryTicketPruner(model)
pruner.calc_prune_mask(model, 0.5, 'smallest_weights')
num_masked = np.sum(pruner.prune_masks_map[interesting_key][0] == 0)
self.assertEqual(num_masked, int(TEST_DENSE_LAYER_INPUTS * TEST_NUM_CLASSES * 0.5))
pruner.apply_pruning(model)
actual_weights = interesting_layer.get_weights()
actual_weights[0][actual_weights[0] == 0.0] = math.inf
min_weight = np.min(actual_weights[0])
self.assertGreaterEqual(min_weight, int(interesting_layer_weight_count * 0.5))
pruner.calc_prune_mask(model, 0.2, 'smallest_weights')
num_masked = np.sum(pruner.prune_masks_map[interesting_key][0] == 0)
self.assertEqual(num_masked, int(TEST_DENSE_LAYER_INPUTS * TEST_NUM_CLASSES * 0.6))
pruner.apply_pruning(model)
actual_weights = interesting_layer.get_weights()
actual_weights[0][actual_weights[0] == 0.0] = math.inf
min_weight = np.min(actual_weights[0])
self.assertGreaterEqual(min_weight, int(interesting_layer_weight_count * 0.6))
def test_smallest_weights_2(self):
model = self._create_test_model()
# First layer is the input layer; ignore it
# Second layer is Dense layer with 2 weights. First is fully connected weights. Second is output weights.
interesting_layer = model.layers[1]
interesting_layer_shape = interesting_layer.weights[0].shape
dl_test_weights = np.random.choice(TEST_DENSE_LAYER_INPUTS * TEST_NUM_CLASSES,
size=TEST_DENSE_LAYER_INPUTS * TEST_NUM_CLASSES, replace=False)
# Make some weights negative
dl_test_weights -= TEST_DENSE_LAYER_INPUTS * TEST_NUM_CLASSES // 2
dl_test_weights = dl_test_weights.reshape(interesting_layer_shape)
interesting_layer.set_weights([dl_test_weights, interesting_layer.get_weights()[1]])
pruner = lottery_ticket_pruner.LotteryTicketPruner(model)
prune_rate = 0.5
pruner.calc_prune_mask(model, prune_rate, 'smallest_weights')
pruner.apply_pruning(model)
actual_weights = interesting_layer.get_weights()
min_expected_pos = TEST_DENSE_LAYER_INPUTS * TEST_NUM_CLASSES * prune_rate // 2 - 1
max_expected_neg = -TEST_DENSE_LAYER_INPUTS * TEST_NUM_CLASSES * prune_rate // 2 + 1
unpruned_pos = np.sum(actual_weights[0] >= min_expected_pos)
unpruned_neg = np.sum(actual_weights[0] <= max_expected_neg)
unpruned = unpruned_pos + unpruned_neg
self.assertIn(unpruned, [int(TEST_DENSE_LAYER_INPUTS * TEST_NUM_CLASSES * prune_rate),
int(TEST_DENSE_LAYER_INPUTS * TEST_NUM_CLASSES * prune_rate) - 1])
expected_to_be_pruned = TEST_DENSE_LAYER_INPUTS * TEST_NUM_CLASSES - unpruned - 1
self.assertLessEqual(abs(int(TEST_DENSE_LAYER_INPUTS * TEST_NUM_CLASSES * prune_rate) - expected_to_be_pruned),
1)
# Prune again
prune_rate2 = 0.1
expected_to_be_pruned2 = int(TEST_DENSE_LAYER_INPUTS * TEST_NUM_CLASSES * prune_rate2 * (1.0 - prune_rate))
pruner.calc_prune_mask(model, prune_rate2, 'smallest_weights')
pruner.apply_pruning(model)
actual_weights = interesting_layer.get_weights()
min_expected_pos = expected_to_be_pruned2 // 2 - 1
max_expected_neg = -expected_to_be_pruned2 // 2 + 1
unpruned_pos = np.sum(actual_weights[0] >= min_expected_pos)
unpruned_neg = np.sum(actual_weights[0] <= max_expected_neg)
unpruned = unpruned_pos + unpruned_neg
expected_unpruned = TEST_DENSE_LAYER_INPUTS * TEST_NUM_CLASSES - expected_to_be_pruned - expected_to_be_pruned2
self.assertLessEqual(abs(expected_unpruned - unpruned), 1)
def test_smallest_weights_similar_weights(self):
""" Tests case where many or all weights are same value. Hence we might be tempted to mask on all of the
smallest weights rather than honoring only up to the prune rate
"""
model = self._create_test_model()
# First layer is the input layer; ignore it
# Second layer is Dense layer with 2 weights. First is fully connected weights. Second is output weights.
interesting_layer = model.layers[1]
interesting_layer_shape = interesting_layer.weights[0].shape
# Make all weights the same
dl_test_weights = np.ones([TEST_DENSE_LAYER_INPUTS, TEST_NUM_CLASSES], dtype=int)
# Make some weights negative
dl_test_weights = dl_test_weights.reshape(interesting_layer_shape)
interesting_layer.set_weights([dl_test_weights, interesting_layer.get_weights()[1]])
pruner = lottery_ticket_pruner.LotteryTicketPruner(model)
prune_rate = 0.5
pruner.calc_prune_mask(model, prune_rate, 'smallest_weights')
pruner.apply_pruning(model)
actual_weights = interesting_layer.get_weights()
expected = int(TEST_DENSE_LAYER_INPUTS * TEST_NUM_CLASSES * prune_rate)
actual = np.sum(actual_weights[0])
self.assertEqual(expected, actual)
#
# calc_prune_mask()
# 'large_final'
#
def test_prune_func_large_final(self):
""" Tests case where many or all weights are same value. Hence we might be tempted to mask on all of the
smallest weights rather than honoring only up to the prune rate
"""
model = self._create_test_dnn_model()
interesting_layer = model.layers[1]
interesting_weights_index = 0
pruner = lottery_ticket_pruner.LotteryTicketPruner(model)
# Assign the weights values between 0..N, with 1/2 the weights being negative
weights = interesting_layer.get_weights()
interesting_weights = weights[interesting_weights_index]
num_interesting_layer_weights = np.prod(interesting_weights.shape)
new_weights = np.array(np.random.choice(range(num_interesting_layer_weights),
size=num_interesting_layer_weights, replace=False))
rand_multiplier = np.random.choice([1, -1], size=num_interesting_layer_weights, replace=True)
new_weights *= rand_multiplier
new_weights = new_weights.reshape(interesting_weights.shape)
weights[interesting_weights_index] = new_weights
interesting_layer.set_weights(weights)
pruner.set_pretrained_weights(model)
# Now verify that the absolute value of all unpruned weights are as large or larger than the smallest expected
# non-zero weight
prune_rate = 0.2
pruner.calc_prune_mask(model, prune_rate, 'large_final')
pruner.apply_pruning(model)
weights = interesting_layer.get_weights()
pruned_weights = weights[interesting_weights_index]
pruned_weights = np.abs(pruned_weights)
num_zero = np.sum(pruned_weights == 0.0)
self.assertEqual(int(num_interesting_layer_weights * prune_rate), num_zero)
expected_non_zero_min = int(np.prod(pruned_weights.shape) * prune_rate)
num_in_expected_range = np.sum(pruned_weights >= expected_non_zero_min)
self.assertEqual(num_interesting_layer_weights - num_zero, num_in_expected_range)
# Now do another round of pruning
prune_rate = 0.5
new_overall_prune_rate = 0.6 # (1.0 - 0.2) * 0.5
pruner.calc_prune_mask(model, prune_rate, 'large_final')
pruner.apply_pruning(model)
weights = interesting_layer.get_weights()
pruned_weights = weights[interesting_weights_index]
pruned_weights = np.abs(pruned_weights)
num_zero = np.sum(pruned_weights == 0.0)
self.assertEqual(int(num_interesting_layer_weights * new_overall_prune_rate), num_zero)
expected_non_zero_min = int(np.prod(pruned_weights.shape) * new_overall_prune_rate)
num_in_expected_range = np.sum(pruned_weights >= expected_non_zero_min)
self.assertEqual(num_interesting_layer_weights - num_zero, num_in_expected_range)
@unittest.skip('Skipping this since it currently fails but is not a terribly high value issue to fix')
def test_prune_func_large_final_same_weight_values(self):
""" Tests case where many or all weights are same value. Hence we might be tempted to mask on all of the
smallest weights rather than honoring only up to the prune rate
"""
model = self._create_test_dnn_model()
interesting_layer = model.layers[1]
interesting_weights_index = 0
pruner = lottery_ticket_pruner.LotteryTicketPruner(model)
# Assign the weights values between 0..N, with 1/2 the weights being negative
test_weight_value = 1.23
weights = interesting_layer.get_weights()
interesting_weights = weights[interesting_weights_index]
num_interesting_layer_weights = np.prod(interesting_weights.shape)
new_weights = np.array(interesting_weights)
new_weights.fill(test_weight_value)
weights[interesting_weights_index] = new_weights
interesting_layer.set_weights(weights)
pruner.set_pretrained_weights(model)
# Now verify that the absolute value of all unpruned weights are as large or larger than the smallest expected
# non-zero weight
prune_rate = 0.2
pruner.calc_prune_mask(model, prune_rate, 'large_final')
pruner.apply_pruning(model)
weights = interesting_layer.get_weights()
pruned_weights = weights[interesting_weights_index]
num_zero = np.sum(pruned_weights == 0.0)
self.assertEqual(int(num_interesting_layer_weights * prune_rate), num_zero)
num_of_expected_value = np.sum(pruned_weights == test_weight_value)
self.assertEqual(num_interesting_layer_weights - num_zero, num_of_expected_value)
# Now do another round of pruning
prune_rate = 0.5
new_overall_prune_rate = 0.6 # (1.0 - 0.2) * 0.5
pruner.calc_prune_mask(model, prune_rate, 'large_final')
pruner.apply_pruning(model)
weights = interesting_layer.get_weights()
pruned_weights = weights[interesting_weights_index]
num_zero = np.sum(pruned_weights == 0.0)
self.assertEqual(int(num_interesting_layer_weights * new_overall_prune_rate), num_zero)
num_of_expected_value = np.sum(pruned_weights == test_weight_value)
self.assertEqual(num_interesting_layer_weights - num_zero, num_of_expected_value)
def test_prune_large_final_negative(self):
""" Negative tests for 'large_final' pruning strategy
"""
model = self._create_test_dnn_model()
pruner = lottery_ticket_pruner.LotteryTicketPruner(model)
# Don't call this since not calling this is the purpose of this test
# pruner.set_pretrained_weights(model)
# Now verify that the absolute value of all unpruned weights are as large or larger than the smallest expected
# non-zero weight
with self.assertRaises(ValueError) as ex:
pruner.calc_prune_mask(model, 0.2, 'large_final')
self.assertIn('large_final', str(ex.exception))
self.assertIn('LotteryTicketPruner.pretrained_weights()', str(ex.exception))
#
# calc_prune_mask()
# negative
#
def test_calc_prune_mask_negative(self):
model = self._create_test_model()
pruner = lottery_ticket_pruner.LotteryTicketPruner(model)
with self.assertRaises(ValueError) as ex:
pruner.calc_prune_mask(model, 0.3, 'unknown_strategy')
self.assertIn('smallest_weights', str(ex.exception))
self.assertIn('smallest_weights_global', str(ex.exception))
with self.assertRaises(ValueError) as ex:
pruner.calc_prune_mask(model, -0.25, 'smallest_weights_global')
self.assertIn('exclusive', str(ex.exception))
with self.assertRaises(ValueError) as ex:
pruner.calc_prune_mask(model, 1.1, 'smallest_weights_global')
self.assertIn('exclusive', str(ex.exception))
#
# LotteryTicketPruner
#
def test_LotteryTicketPruner_use_case_1(self):
model = self._create_test_model()
starting_weights = model.get_weights()
pruner = lottery_ticket_pruner.LotteryTicketPruner(model)
# First layer is the input layer; ignore it
# Second layer is Dense layer with 2 weights. First is fully connected weights. Second is output weights.
interesting_key = tuple([1, tuple([0])])
num_unmasked = np.sum(pruner.prune_masks_map[interesting_key][0])
self.assertEqual(num_unmasked, TEST_DENSE_LAYER_INPUTS * TEST_NUM_CLASSES)
# No pruning percent specified so no weight should change
initial_model_weights_sum = self._summed_model_weights(model)
pruner.apply_pruning(model)
new_model_weights_sum = self._summed_model_weights(model)
self.assertEqual(initial_model_weights_sum, new_model_weights_sum)
pruner.calc_prune_mask(model, 0.5, 'random')
num_masked = np.sum(pruner.prune_masks_map[interesting_key][0] == 0)
self.assertEqual(num_masked, int(TEST_DENSE_LAYER_INPUTS * TEST_NUM_CLASSES * 0.5))
pruner.calc_prune_mask(model, 0.2, 'random')
num_masked = np.sum(pruner.prune_masks_map[interesting_key][0] == 0)
self.assertEqual(num_masked, int(TEST_DENSE_LAYER_INPUTS * TEST_NUM_CLASSES * 0.6))
model.set_weights(starting_weights)
new_model_weights_sum = self._summed_model_weights(model)
self.assertEqual(initial_model_weights_sum, new_model_weights_sum)
pruner.apply_pruning(model)
num_masked = np.sum(pruner.prune_masks_map[interesting_key][0] == 0)
self.assertEqual(num_masked, int(TEST_DENSE_LAYER_INPUTS * TEST_NUM_CLASSES * 0.6))
if __name__ == '__main__':
unittest.main()
```
|
{
"source": "jeremy-albuixech/scrapper",
"score": 2
}
|
#### File: jeremy-albuixech/scrapper/google_config.py
```python
from flask import Blueprint
from flask import flash
from flask import g
from flask import redirect
from flask import render_template
from flask import request
from flask import url_for
from werkzeug.exceptions import abort
from gmusicapi import Mobileclient
import re
g_music = Mobileclient()
bp = Blueprint("google_config", __name__)
@bp.route("/google_config", methods=("GET", "POST"))
def google_config():
"""Fetch google token and save form data."""
if request.method == "POST":
android_id = request.form["androidid"]
error = None
if not android_id:
error = "Android ID is required."
if error is not None:
flash(error)
else:
try:
if g_music.is_authenticated() is False:
g_music.oauth_login(android_id)
except Exception as googleError:
return render_template("google/index.html", error=googleError)
return render_template("google/index.html", google_auth = g_music.is_authenticated())
@bp.route("/google_logout", methods=["POST"])
def google_logout():
try:
g_music.logout()
return redirect(url_for('google_config.google_config'))
except Exception as googleError:
print(googleError)
return render_template("google/index.html")
```
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.