metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "JoshuaEN/World-of-Tanks-ScoreViewTools-Data-Export-Mods",
"score": 2
} |
#### File: gui/mods/ScoreViewTools_Init.py
```python
from items import vehicles, _xml
from gui.Scaleform.daapi.view.lobby.trainings.training_room import TrainingRoom;
from helpers.statistics import StatisticsCollector;
from game import init
import ScoreViewTools
def exportAll():
ScoreViewTools.Export.init()
ScoreViewTools.Export.cleanup()
ScoreViewTools.Export.gameInfo()
ScoreViewTools.Export.vehicles()
#ScoreViewTools.Export.gameData()
#ScoreViewTools.Export.equipment()
#ScoreViewTools.Export.consumables()
ScoreViewTools.Export.maps()
#ScoreViewTools.Export.serverSettings()
old_noteHangarLoadingState = StatisticsCollector.noteHangarLoadingState
def new_noteHangarLoadingState(self, state, initialState=False, showSummaryNow=False):
old_noteHangarLoadingState(self, state, initialState, showSummaryNow)
StatisticsCollector.noteHangarLoadingState = new_noteHangarLoadingState
print dir(TrainingRoom)
old_onSettingUpdated = TrainingRoom.onSettingUpdated
old_onRostersChanged = TrainingRoom.onRostersChanged
old_onPlayerStateChanged = TrainingRoom.onPlayerStateChanged
old__TrainingRoomBase__showSettings = TrainingRoom._TrainingRoomBase__showSettings
old_showRosters = TrainingRoom._showRosters
first = True
def new_onSettingUpdated(self, functional, settingName, settingValue):
ScoreViewTools.Export.trainingRoomSettings(functional)
old_onSettingUpdated(self, functional, settingName, settingValue)
def new_onRostersChanged(self, functional, rosters, full):
ScoreViewTools.Export.trainingRoomRoster(functional)
old_onRostersChanged(self, functional, rosters, full)
def new_onPlayerStateChanged(self, functional, roster, accountInfo):
ScoreViewTools.Export.trainingRoomRoster(functional)
old_onPlayerStateChanged(self, functional, roster, accountInfo)
def new__TrainingRoomBase__showSettings(self, functional):
ScoreViewTools.Export.trainingRoomSettings(functional)
old__TrainingRoomBase__showSettings(self, functional)
def new_showRosters(self, functional, rosters):
global first
if first:
first = False
exportAll()
ScoreViewTools.Export.trainingRoomRoster(functional)
old_showRosters(self, functional, rosters)
TrainingRoom.onSettingUpdated = new_onSettingUpdated
TrainingRoom.onRostersChanged = new_onRostersChanged
TrainingRoom.onPlayerStateChanged = new_onPlayerStateChanged
TrainingRoom._TrainingRoomBase__showSettings = new__TrainingRoomBase__showSettings
TrainingRoom._showRosters = new_showRosters
``` |
{
"source": "JoshuaErne/FreeDAS",
"score": 3
} |
#### File: FreeDAS/src/demo.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from numpy.core.numeric import indices
import _init_paths
from opts import opts
from detectors.detector_factory import detector_factory
# import speed estimation model
from lib.SpeedEstimator import Realtimespeed, get_annotated_frame, neural_factory
# import lane detection module
from lib.EdgeDetection import LaneDetection
import os
import cv2
import numpy as np
import matplotlib.pyplot as plt
import sys
np.set_printoptions(threshold=sys.maxsize)
SIFT_params = dict( maxCorners = 100,
qualityLevel = 0.1,
minDistance = 7,
blockSize = 1)
# Parameters for lucas kanade optical flow
KLT_params = dict( winSize = (15, 15),
maxLevel = 2,
criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
# Create some random colors for tracking
color = np.random.randint(0, 255, (100, 3))
# Video framerate
FPS = 20
# Seconds per Detection
SPD = 1
def im_pretty_show(prediction_annotation, img1):
road = prediction_annotation
height, width, ch = img1.shape
new_width, new_height = width + width/20, height + height/8
# Crate a new canvas with new width and height.
canvas_orig = np.ones((int(new_height), int(new_width), ch), dtype=np.uint8) * 125
# New replace the center of canvas with original image
padding_top, padding_left = 60, 10
if padding_top + height <= new_height and padding_left + width <= new_width:
canvas_orig[padding_top:padding_top + height, padding_left:padding_left + width] = img1
else:
print("The Given padding exceeds the limits.")
# Crate a new canvas with new width and height.
canvas_road = np.ones((int(new_height), int(new_width), ch), dtype=np.uint8) * 125
# New replace the center of canvas with original image
padding_top, padding_left = 60, 10
if padding_top + height <= new_height and padding_left + width <= new_width:
canvas_road[padding_top:padding_top + height, padding_left:padding_left + width] = road
else:
print("The Given padding exceeds the limits.")
text1 = "Original"
text2 = "Prediction"
texted_image1 = cv2.putText(canvas_orig.copy(), text1, (int(0.25*width), 30), cv2.FONT_HERSHEY_COMPLEX, 1, [255, 0, 0])
texted_image2 = cv2.putText(canvas_road.copy(), text2, (int(0.25*width), 30), cv2.FONT_HERSHEY_COMPLEX, 1, [255, 0, 0])
final = cv2.hconcat((texted_image1, texted_image2))
cv2.imshow("result", final)
image_ext = ['jpg', 'jpeg', 'png', 'webp']
video_ext = ['mp4', 'mov', 'avi', 'mkv']
time_stats = ['tot', 'load', 'pre', 'net', 'dec', 'post', 'merge']
def demo(opt):
os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpus_str
opt.debug = max(opt.debug, 1)
DetectorClass = detector_factory[opt.task]
objectDetector = DetectorClass(opt)
# # Initialize Speed Estimation Model
# speed_model = neural_factory()
# speed_model.load_weights('models/speed_model.h5')
# # Speed Estimation Parameters
# frame_idx = 1
# y_true = [0,0]
# Initialize Lane Detector Class
laneDetector = LaneDetection()
if opt.demo == 'webcam' or \
opt.demo[opt.demo.rfind('.') + 1:].lower() in video_ext:
cam = cv2.VideoCapture(0 if opt.demo == 'webcam' else opt.demo)
objectDetector.pause = False
# ret, img_old = cam.read()
frame_idx = 0
while True:
if frame_idx % (SPD * FPS) == 0:
ret, img_new = cam.read()
if not ret:
print('No frames grabbed!')
break
# img_old_gray = cv2.cvtColor(img_new, cv2.COLOR_BGR2GRAY)
detection_gray = cv2.cvtColor(img_new, cv2.COLOR_BGR2GRAY)
# prediction_annotation, y_true = get_annotated_frame(detection_gray, img_old_gray, frame_idx, speed_model, y_true)
# im_pretty_show(prediction_annotation, img_new)
line_image, radius, offset = laneDetector.canny_edge_detection(img_new)
try:
img_new = cv2.addWeighted(img_new, 0.8, line_image, 1, 0)
img_new = cv2.putText(img_new, radius, (30, 480-40), 0, 1, (0, 255, 0), 1, cv2.LINE_AA)
img_new = cv2.putText(img_new, offset, (30, 480-70), 0, 1, (0, 255, 0), 1, cv2.LINE_AA)
except:
pass
cv2.imshow('input', img_new)
result, debugger = objectDetector.run(img_new)
time_str = ''
for stat in time_stats:
time_str = time_str + '{} {:.3f}s |'.format(stat, result[stat])
print(time_str)
if cv2.waitKey(1) == 27:
return # esc to quit
elif (frame_idx - 1) % (SPD * FPS) == 0:
ret, img_new = cam.read()
if not ret:
print('No frames grabbed!')
break
cv2.imshow('input', img_new)
mask = np.zeros_like(img_new)
mask, centers, indices = objectDetector.create_detections_mask(debugger, mask, result['results'])
current_gray = cv2.cvtColor(img_new, cv2.COLOR_BGR2GRAY)
# calculate optical flow
p1, st, err = cv2.calcOpticalFlowPyrLK(detection_gray, current_gray, centers, None, **KLT_params)
# Select good points
if p1 is not None:
good_new = p1[st==1]
good_old = centers[st==1]
# Create a mask image for drawing purposes
tracker = np.zeros_like(img_new)
tracked = img_new.copy()
# draw the tracks
for i, (new, old) in enumerate(zip(good_new, good_old)):
a, b = new.ravel()
c, d = old.ravel()
tracker = cv2.line(tracker, (int(a), int(b)), (int(c), int(d)), color[i].tolist(), 2)
tracked = cv2.circle(tracked, (int(a), int(b)), 5, color[i].tolist(), -1)
tracked = cv2.add(tracked, tracker)
line_image, radius, offset = laneDetector.canny_edge_detection(img_new)
try:
# tracked = cv2.addWeighted(tracked, 0.8, line_image, 1, 0)
tracked = cv2.putText(tracked, radius, (0, 480-40), 0, 1, (0, 255, 0), 1, cv2.LINE_AA)
tracked = cv2.putText(tracked, offset, (0, 480-70), 0, 1, (0, 255, 0), 1, cv2.LINE_AA)
except:
pass
objectDetector.custom_show_results(debugger, tracked, result['results'])
# cv2.imshow('ctdet', tracked)
if cv2.waitKey(1) == 27:
return # esc to quit
# Now update the previous frame and previous points
prev_gray = current_gray.copy()
p0 = good_new.reshape(-1, 1, 2)
else:
_, img_new = cam.read()
cv2.imshow('input', img_new)
current_gray = cv2.cvtColor(img_new, cv2.COLOR_BGR2GRAY)
# calculate optical flow
p1, st, err = cv2.calcOpticalFlowPyrLK(prev_gray, current_gray, p0, None, **KLT_params)
# Select good points
if p1 is not None:
good_new = p1[st==1]
good_old = p0[st==1]
tracked = img_new.copy()
# draw the tracks
for i, (new, old) in enumerate(zip(good_new, good_old)):
a, b = new.ravel()
c, d = old.ravel()
tracker = cv2.line(tracker, (int(a), int(b)), (int(c), int(d)), color[i].tolist(), 2)
tracked = cv2.circle(tracked, (int(a), int(b)), 5, color[i].tolist(), -1)
tracked = cv2.add(tracked, tracker)
line_image, radius, offset = laneDetector.canny_edge_detection(img_new)
try:
tracked = cv2.addWeighted(tracked, 0.8, line_image, 1, 0)
tracked = cv2.putText(tracked, radius, (30, 480-40), 0, 1, (0, 255, 0), 1, cv2.LINE_AA)
tracked = cv2.putText(tracked, offset, (30, 480-70), 0, 1, (0, 255, 0), 1, cv2.LINE_AA)
except:
pass
# This is prone to corrupting boxes, it is not perfect
# objectDetector.update_boxes(good_new, result['results'], indices)
# objectDetector.custom_show_results(debugger, tracked, result['results'])
# Show tracked centers only
cv2.imshow('ctdet', tracked)
if cv2.waitKey(1) == 27:
return # esc to quit
# Now update the previous frame and previous points
prev_gray = current_gray.copy()
p0 = good_new.reshape(-1, 1, 2)
frame_idx += 1
img_old = img_new
else:
if os.path.isdir(opt.demo):
image_names = []
ls = os.listdir(opt.demo)
for file_name in sorted(ls):
ext = file_name[file_name.rfind('.') + 1:].lower()
if ext in image_ext:
image_names.append(os.path.join(opt.demo, file_name))
else:
image_names = [opt.demo]
for (image_name) in image_names:
result = objectDetector.run(image_name)
time_str = ''
for stat in time_stats:
time_str = time_str + '{} {:.3f}s |'.format(stat, result[stat])
print(time_str)
if __name__ == '__main__':
opt = opts().init()
demo(opt)
``` |
{
"source": "Joshuaf91/itsMeMario",
"score": 3
} |
#### File: save_princess_peach/models/bowsers_castle_map.py
```python
from dataclasses import dataclass, field
from save_princess_peach.constants import (
BOWSER, PEACH, MARIO, PLAYERS, ALL_POSITIONS, UP, DOWN, LEFT, RIGHT, VISITED,
SAFE_POSITION
)
@dataclass
class BowsersCastle:
"""Bowser`s castle"""
map: list = field(default_factory=list)
size: int = field(default=0)
bowser_location: tuple = field(default=None)
peach_location: tuple = field(default=None)
mario_location: tuple = field(default=None)
def location_of_person_on_layer(self, layer: list, layer_level: int) -> tuple:
"""Check layer for any person"""
bowsers_location = None
peach_location = None
mario_location = None
for idx, map_location_value in enumerate(layer):
if map_location_value in PLAYERS:
player_location = (layer_level, idx)
self.set_person_location(map_location_value, player_location)
return bowsers_location, peach_location, mario_location
def set_size(self, size: int):
"""Set size of Boweser's castle map"""
assert(isinstance(size, int))
self.size = size
def add_map_layer(self, layer: str):
"""Add layer to the bottom of the map."""
assert(isinstance(layer, str))
split_text = list(layer)
assert(len(split_text) == self.size)
self.map.append(split_text)
current_map_size = len(self.map)
assert(current_map_size <= self.size)
self.location_of_person_on_layer(split_text, current_map_size - 1)
def get_location_of_person(self, person) -> (int, int):
"""Get Persons Location"""
assert (person in PLAYERS)
location = None
if person == BOWSER:
location = self.bowser_location
elif person == PEACH:
location = self.peach_location
elif person == MARIO:
location = self.mario_location
return location
def set_person_location(self, person: str, location: tuple or None):
"""Set location for Monster"""
assert(person in PLAYERS)
if person == BOWSER:
self.bowser_location = location
elif person == PEACH:
self.peach_location = location
elif person == MARIO:
self.mario_location = location
def set_map_location(self, update: str, location: tuple):
"""Set location for Monster"""
assert(update in ALL_POSITIONS)
row, col = location
self.map[row][col] = update
if update in PLAYERS:
self.set_person_location(update, location)
def move_mario_to_person(self, person):
"""Move Mario towards the person"""
row, col = self.get_location_of_person(person)
m_row, m_col = self.get_location_of_person(MARIO)
castle_map = self.map
up_is_safe = False
try:
if m_row - 1 == -1:
up_is_safe = False
else:
up_is_safe = castle_map[m_row - 1][m_col] in SAFE_POSITION
except IndexError:
pass
down_is_safe = False
try:
down_is_safe = castle_map[m_row + 1][m_col] in SAFE_POSITION
except IndexError:
pass
left_is_safe = False
try:
if m_col - 1 == -1:
left_is_safe = False
else:
left_is_safe = castle_map[m_row][m_col - 1] in SAFE_POSITION
except IndexError:
pass
right_is_safe = False
try:
right_is_safe = castle_map[m_row][m_col + 1] in SAFE_POSITION
except IndexError:
pass
m_new_location = None
# attempt to move vertical until we are on the persons row
if row != m_row:
# Should we move up?
if m_row > row and up_is_safe:
m_new_location = (m_row - 1, m_col)
self.set_map_location(VISITED, (m_row, m_col))
self.set_map_location(MARIO, m_new_location)
if m_new_location == (row, col):
self.set_person_location(person, None)
return UP
# Should we move down?
elif m_row < row and down_is_safe:
m_new_location = (m_row + 1, m_col)
self.set_map_location(VISITED, (m_row, m_col))
self.set_map_location(MARIO, m_new_location)
if m_new_location == (row, col):
self.set_person_location(person, None)
return DOWN
# else move horizontal
# Should we move left?
if left_is_safe:
m_new_location = (m_row, m_col - 1)
self.set_map_location(VISITED, (m_row, m_col))
self.set_map_location(MARIO, m_new_location)
if m_new_location == (row, col):
self.set_person_location(person, None)
return LEFT
# Should we move right?
elif right_is_safe:
m_new_location = (m_row, m_col + 1)
self.set_map_location(VISITED, (m_row, m_col))
self.set_map_location(MARIO, m_new_location)
if m_new_location == (row, col):
self.set_person_location(person, None)
return RIGHT
``` |
{
"source": "joshuafinkelstein/chimera-education",
"score": 3
} |
#### File: chimera-education/index/old_search.py
```python
import numpy as np
import os
import datetime as dt
wd = '/Users/nikolasbaya/Documents/chimera-repo-050319/dataextraction/downloaded_transcripts'
next(os.walk('.'))[1]
subdir_ls = [(x[0],x[2]) for x in os.walk(wd) if len(x[2])>0] #only get directories with files
# Create data structures for searching
def clean(string):
string = string.lower()
bad_char =['.',',','--','?']
for char in bad_char:
string = string.replace(char, '')
return string
def remove_junk_words(string):
word_ls = [word for word in string.lower().split(' ') if len(word)>0]
bad_words = {'the','of','at'}
return ' '.join(list(set(word_ls).difference(bad_words)))
#words, cts = list(word_ct_dict.keys()), list(word_ct_dict.values())
#np.asarray(words)[np.argsort(cts)][::-1].tolist()[:100]
#part 0: Create dict of files
file_name_ls = []
file_content_ls = []
for i, subdir in enumerate(subdir_ls):
print(str(i+1)+' of '+str(len(subdir_ls)))
subdir_path, subdir_files = subdir[0], subdir[1]
file_name_ls += subdir_files
for f in subdir_files:
vtt0 = open(subdir_path+"/"+f, "r").read()
vtt1 = vtt0.split('\n\n')
file_content_ls.append(vtt1)
file_content_dict = dict(zip(file_name_ls,file_content_ls))
#part 1: create corpus of words
vtt_ls = []
word_set = {}
file_text_dict = dict(zip(file_name_ls, ['']*len(file_name_ls)))
for i,f in enumerate(file_name_ls):
# if i>2:
# break
print(f'{i+1} of {len(file_name_ls)}')
vtt1 = file_content_dict[f]
if vtt1 != ['']:
vtt2 = [[tuple(x.split('\n')[0].split(' --> ')),
' '.join(x.split('\n')[1:]).lower()] for x in vtt1 if len(x)>0]
word_ls_tmp = [clean(' '.join(x.split('\n')[1:])) for x in vtt1 if len(x)>0]
file_text = ' '.join(word_ls_tmp)
file_text_dict[f] = file_text
word_ls_tmp = file_text.split(' ')
word_ls_tmp = [x.strip(' ') for x in word_ls_tmp if x.count(':')==0]
word_set_tmp = set(word_ls_tmp)
if len(word_set)==0:
word_set = word_set_tmp
else:
word_set = word_set.union(word_set_tmp)
else:
vtt2 = None
if vtt2 != None:
vtt2.insert(0,f.lower())
vtt_ls.append(vtt2)
assert len(set(file_name_ls))==len(file_name_ls), 'There are overlapping video names!'
#part 2: count words and link words to files
word_ls = list(word_set)
word_ct_dict = dict(zip(word_ls,[0]*len(word_ls)))
word_file_dict = dict(zip(word_ls,[[]]*len(word_ls)))
for i,f in enumerate(file_name_ls):
# if i>2:
# break
print(f'{i+1} of {len(file_name_ls)}')
# print(f)
file_text = file_text_dict[f]
word_ls_tmp = list(set(file_text.split(' ')))
word_ls_tmp = [x.strip(' ') for x in word_ls_tmp if x.count(':')==0]
for j,word in enumerate(word_ls_tmp):
word_ct_dict[word] += file_text.count(word)
word_file_dict[word] = word_file_dict[word] + [f]
vtt_dict = dict(zip([vtt[0].lower() for vtt in vtt_ls],[vtt[2:] for vtt in vtt_ls]))
#part 3: define search function
def do_search(search_str, in_title_score = 1000, in_text_score = 1):
start = dt.datetime.now()
search_ls = list(set(remove_junk_words(search_str).strip(' ').split(' ')))
if search_ls == ['']:
print('ERROR: Choose better search terms')
return None, None
video_scores = dict(zip([f.lower() for f in file_name_ls],[0]*len(vtt_ls)))
tstamps = dict(zip([f.lower() for f in file_name_ls],[[]]*len(vtt_ls)))
match_text_ct = 0
file_match_ls = []
for word in search_ls:
try:
file_ls_tmp = [f.lower() for f in word_file_dict[word]]
file_match_ls += file_ls_tmp
match_text_ct += word_ct_dict[word]
if len(file_ls_tmp) > 0:
for f in file_ls_tmp:
if word in f:
video_scores[f] += in_title_score
for vtt_text in vtt_dict[f]:
if word in vtt_text[1]:
video_scores[f] += in_text_score
tstamps[f] = tstamps[f] + [vtt_text[0]]
except KeyError:
file_ls_tmp = []
file_match_ls = list(set(file_match_ls))
n_results = len(file_match_ls)
if n_results:
results = dict((file,video_scores[file]) for file in file_match_ls)
files, scores = file_match_ls, [video_scores[file] for file in file_match_ls]
scores_idx = np.argsort(scores)
n_top_results = 5 #number of results to show
top_results = np.asarray(files)[scores_idx][::-1][:min(n_results,n_top_results)].tolist()
print(f'\nTop {min(n_results, n_top_results)} results ({n_results} total results):')
for result in top_results:
print(f'\n\n{result}\n\t{tstamps[result][:5]}') #limit tstamps results?
else:
results = None
print('No results')
print('\nTime for search: '+str((dt.datetime.now()-start).total_seconds()) +' sec')
print(f'Number of total results: {n_results}')
print(f'Number of text occurrences: {match_text_ct}')
return results, tstamps
#part 4: test out search
search_str = 'symmetry'
results, tstamps = do_search(search_str)
```
#### File: search/captions/views.py
```python
from captions.models import Caption
from captions.serializers import CaptionSerializer, UserSerializer
from rest_framework import generics
from django.contrib.auth.models import User
from rest_framework import permissions
from captions.permissions import IsOwnerOrReadOnly
class UserList(generics.ListAPIView):
queryset = User.objects.all()
serializer_class = UserSerializer
class UserDetail(generics.RetrieveAPIView):
queryset = User.objects.all()
serializer_class = UserSerializer
class CaptionList(generics.ListCreateAPIView):
queryset = Caption.objects.all()
serializer_class = CaptionSerializer
permission_classes = [permissions.IsAuthenticatedOrReadOnly]
# def perform_create(self, serializer):
# serializer.save(owner=self.request.user)
class CaptionDetail(generics.RetrieveUpdateDestroyAPIView):
queryset = Caption.objects.all()
serializer_class = CaptionSerializer
permission_classes = [permissions.IsAuthenticatedOrReadOnly, IsOwnerOrReadOnly]
``` |
{
"source": "JoshuaFoster-18/Group-Software-Project",
"score": 3
} |
#### File: technical docs/tests/test_forms.py
```python
from django.test import TestCase
from django.contrib.auth import get_user_model
from django.core import mail
User = get_user_model()
class TestForms(TestCase):
# Sets up a new user
def setUp(self):
newUser = User(email='<EMAIL>', username='test_user', first_name='Tess', last_name='Ting')
newUser_password = '<PASSWORD>'
self.newUser_password = <PASSWORD>
newUser.set_password(<PASSWORD>)
newUser.save()
self.newUser = newUser
# Tests that the user is created
def test_user_is_created(self):
user_count = User.objects.all().count()
self.assertEqual(user_count, 1)
self.assertNotEqual(user_count, 0)
# Tests that the user password is created and correct
def test_user_password_is_created(self):
newUser = User.objects.get(username="test_user")
self.assertTrue(newUser.check_password(self.newUser_password))
# Tests that the user password can't be a false value
def test_user_password_not_false(self):
newUser = User.objects.get(username="test_user")
self.assertFalse(newUser.check_password("<PASSWORD>"))
# Tests that the email to reset password is sent
def test_email_sent(self):
mail.send_mail('subject', 'message', '<EMAIL>', ['<EMAIL>'], fail_silently=False)
self.assertEqual(len(mail.outbox), 1)
# Tests that the contents of the email is corrent
def test_mail_contents(self):
mail.send_mail('subject', 'message', '<EMAIL>', ['<EMAIL>'], fail_silently=False)
self.assertEqual(mail.outbox[0].body, 'message')
self.assertEqual(mail.outbox[0].subject, 'subject')
## Tests that all labels are correct
def test_first_name_label(self):
name = User.objects.get(id=1)
label = name._meta.get_field('first_name').verbose_name
self.assertEqual(label, 'first name')
def test_last_name_label(self):
name = User.objects.get(id=1)
label = name._meta.get_field('last_name').verbose_name
self.assertEqual(label, 'last name')
def test_email_label(self):
email = User.objects.get(id=1)
label = email._meta.get_field('email').verbose_name
self.assertEqual(label, 'email address')
def test_username_label(self):
username = User.objects.get(id=1)
label = username._meta.get_field('username').verbose_name
self.assertEqual(label, 'username')
def test_password_label(self):
password = User.objects.get(id=1)
label = password._meta.get_field('password').verbose_name
self.assertEqual(label, 'password')
``` |
{
"source": "joshuafried/DeepPool-Artifact",
"score": 2
} |
#### File: DeepPool-Artifact/examples/inception.py
```python
from collections import namedtuple
import warnings
import torch
import time
from torch import nn, Tensor
import torch.nn.functional as F
# from .utils import load_state_dict_from_url
from typing import Callable, Any, Optional, Tuple, List
import os, sys
currentdir = os.path.dirname(os.path.realpath(__file__))
parentdir = os.path.dirname(currentdir)
sys.path.append(parentdir)
from parallelizationPlanner import CostSim
from clusterClient import ClusterClient
from jobDescription import TrainingJob
__all__ = ['Inception3', 'inception_v3', 'InceptionOutputs', '_InceptionOutputs']
model_urls = {
# Inception v3 ported from TensorFlow
'inception_v3_google': 'https://download.pytorch.org/models/inception_v3_google-1a9a5a14.pth',
}
InceptionOutputs = namedtuple('InceptionOutputs', ['logits', 'aux_logits'])
InceptionOutputs.__annotations__ = {'logits': Tensor, 'aux_logits': Optional[Tensor]}
# Script annotations failed with _GoogleNetOutputs = namedtuple ...
# _InceptionOutputs set here for backwards compat
_InceptionOutputs = InceptionOutputs
def inception_v3(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> "Inception3":
r"""Inception v3 model architecture from
`"Rethinking the Inception Architecture for Computer Vision" <http://arxiv.org/abs/1512.00567>`_.
.. note::
**Important**: In contrast to the other models the inception_v3 expects tensors with a size of
N x 3 x 299 x 299, so ensure your images are sized accordingly.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
aux_logits (bool): If True, add an auxiliary branch that can improve training.
Default: *True*
transform_input (bool): If True, preprocesses the input according to the method with which it
was trained on ImageNet. Default: *False*
"""
# if pretrained:
# if 'transform_input' not in kwargs:
# kwargs['transform_input'] = True
# if 'aux_logits' in kwargs:
# original_aux_logits = kwargs['aux_logits']
# kwargs['aux_logits'] = True
# else:
# original_aux_logits = True
# kwargs['init_weights'] = False # we are loading weights from a pretrained model
# model = Inception3(**kwargs)
# state_dict = load_state_dict_from_url(model_urls['inception_v3_google'],
# progress=progress)
# model.load_state_dict(state_dict)
# if not original_aux_logits:
# model.aux_logits = False
# model.AuxLogits = None
# return model
return Inception3(**kwargs)
class Inception3(nn.Module):
def __init__(
self,
num_classes: int = 1000,
aux_logits: bool = True,
transform_input: bool = False,
inception_blocks: Optional[List[Callable[..., nn.Module]]] = None,
init_weights: Optional[bool] = None
) -> None:
super(Inception3, self).__init__()
if inception_blocks is None:
inception_blocks = [
BasicConv2d, InceptionA, InceptionB, InceptionC,
InceptionD, InceptionE, InceptionAux
]
if init_weights is None:
warnings.warn('The default weight initialization of inception_v3 will be changed in future releases of '
'torchvision. If you wish to keep the old behavior (which leads to long initialization times'
' due to scipy/scipy#11299), please set init_weights=True.', FutureWarning)
init_weights = True
assert len(inception_blocks) == 7
conv_block = inception_blocks[0]
inception_a = inception_blocks[1]
inception_b = inception_blocks[2]
inception_c = inception_blocks[3]
inception_d = inception_blocks[4]
inception_e = inception_blocks[5]
inception_aux = inception_blocks[6]
self.aux_logits = aux_logits
self.transform_input = transform_input
self.Conv2d_1a_3x3 = conv_block(3, 32, kernel_size=3, stride=2)
self.Conv2d_2a_3x3 = conv_block(32, 32, kernel_size=3)
self.Conv2d_2b_3x3 = conv_block(32, 64, kernel_size=3, padding=1)
self.maxpool1 = cs.MaxPool2d(kernel_size=3, stride=2)
self.Conv2d_3b_1x1 = conv_block(64, 80, kernel_size=1)
self.Conv2d_4a_3x3 = conv_block(80, 192, kernel_size=3)
self.maxpool2 = cs.MaxPool2d(kernel_size=3, stride=2)
self.Mixed_5b = inception_a(192, pool_features=32)
self.Mixed_5c = inception_a(256, pool_features=64)
self.Mixed_5d = inception_a(288, pool_features=64)
self.Mixed_6a = inception_b(288)
self.Mixed_6b = inception_c(768, channels_7x7=128)
self.Mixed_6c = inception_c(768, channels_7x7=160)
self.Mixed_6d = inception_c(768, channels_7x7=160)
self.Mixed_6e = inception_c(768, channels_7x7=192)
layerBeforeAux = cs.layers[-1]
self.AuxLogits: Optional[nn.Module] = None
if aux_logits:
self.AuxLogits = inception_aux(768, num_classes)
self.Mixed_7a = inception_d(768, custom_previous_layer=layerBeforeAux)
self.Mixed_7b = inception_e(1280)
self.Mixed_7c = inception_e(2048)
self.avgpool = cs.AdaptiveAvgPool2d((1, 1))
self.dropout = nn.Dropout()
cs.Flatten()
self.fc = cs.Linear(2048, num_classes)
# if init_weights:
# for m in self.modules():
# if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
# import scipy.stats as stats
# stddev = m.stddev if hasattr(m, 'stddev') else 0.1
# X = stats.truncnorm(-2, 2, scale=stddev)
# values = torch.as_tensor(X.rvs(m.weight.numel()), dtype=m.weight.dtype)
# values = values.view(m.weight.size())
# with torch.no_grad():
# m.weight.copy_(values)
# elif isinstance(m, nn.BatchNorm2d):
# nn.init.constant_(m.weight, 1)
# nn.init.constant_(m.bias, 0)
def _transform_input(self, x: Tensor) -> Tensor:
if self.transform_input:
x_ch0 = torch.unsqueeze(x[:, 0], 1) * (0.229 / 0.5) + (0.485 - 0.5) / 0.5
x_ch1 = torch.unsqueeze(x[:, 1], 1) * (0.224 / 0.5) + (0.456 - 0.5) / 0.5
x_ch2 = torch.unsqueeze(x[:, 2], 1) * (0.225 / 0.5) + (0.406 - 0.5) / 0.5
x = torch.cat((x_ch0, x_ch1, x_ch2), 1)
return x
def _forward(self, x: Tensor) -> Tuple[Tensor, Optional[Tensor]]:
# N x 3 x 299 x 299
x = self.Conv2d_1a_3x3(x)
# N x 32 x 149 x 149
x = self.Conv2d_2a_3x3(x)
# N x 32 x 147 x 147
x = self.Conv2d_2b_3x3(x)
# N x 64 x 147 x 147
x = self.maxpool1(x)
# N x 64 x 73 x 73
x = self.Conv2d_3b_1x1(x)
# N x 80 x 73 x 73
x = self.Conv2d_4a_3x3(x)
# N x 192 x 71 x 71
x = self.maxpool2(x)
# N x 192 x 35 x 35
x = self.Mixed_5b(x)
# N x 256 x 35 x 35
x = self.Mixed_5c(x)
# N x 288 x 35 x 35
x = self.Mixed_5d(x)
# N x 288 x 35 x 35
x = self.Mixed_6a(x)
# N x 768 x 17 x 17
x = self.Mixed_6b(x)
# N x 768 x 17 x 17
x = self.Mixed_6c(x)
# N x 768 x 17 x 17
x = self.Mixed_6d(x)
# N x 768 x 17 x 17
x = self.Mixed_6e(x)
# N x 768 x 17 x 17
aux: Optional[Tensor] = None
if self.AuxLogits is not None:
if self.training:
aux = self.AuxLogits(x)
# N x 768 x 17 x 17
x = self.Mixed_7a(x)
# N x 1280 x 8 x 8
x = self.Mixed_7b(x)
# N x 2048 x 8 x 8
x = self.Mixed_7c(x)
# N x 2048 x 8 x 8
# Adaptive average pooling
x = self.avgpool(x)
# N x 2048 x 1 x 1
x = self.dropout(x)
# N x 2048 x 1 x 1
x = torch.flatten(x, 1)
# N x 2048
x = self.fc(x)
# N x 1000 (num_classes)
return x, aux
@torch.jit.unused
def eager_outputs(self, x: Tensor, aux: Optional[Tensor]) -> InceptionOutputs:
if self.training and self.aux_logits:
return InceptionOutputs(x, aux)
else:
return x # type: ignore[return-value]
def forward(self, x: Tensor) -> InceptionOutputs:
x = self._transform_input(x)
x, aux = self._forward(x)
aux_defined = self.training and self.aux_logits
if torch.jit.is_scripting():
if not aux_defined:
warnings.warn("Scripted Inception3 always returns Inception3 Tuple")
return InceptionOutputs(x, aux)
else:
return self.eager_outputs(x, aux)
class InceptionA(nn.Module):
def __init__(
self,
in_channels: int,
pool_features: int,
conv_block: Optional[Callable[..., nn.Module]] = None
) -> None:
super(InceptionA, self).__init__()
if conv_block is None:
conv_block = BasicConv2d
prevLayer = cs.layers[-1]
outputLayers = []
self.branch1x1 = conv_block(in_channels, 64, kernel_size=1, custom_previous_layers=[prevLayer])
outputLayers.append(cs.layers[-1])
self.branch5x5_1 = conv_block(in_channels, 48, kernel_size=1, custom_previous_layers=[prevLayer])
self.branch5x5_2 = conv_block(48, 64, kernel_size=5, padding=2)
outputLayers.append(cs.layers[-1])
self.branch3x3dbl_1 = conv_block(in_channels, 64, kernel_size=1, custom_previous_layers=[prevLayer])
self.branch3x3dbl_2 = conv_block(64, 96, kernel_size=3, padding=1)
self.branch3x3dbl_3 = conv_block(96, 96, kernel_size=3, padding=1)
outputLayers.append(cs.layers[-1])
self.branch_pool_1 = cs.AvgPool2d(kernel_size=3, stride=1, padding=1, custom_previous_layers=[prevLayer])
self.branch_pool_2 = conv_block(in_channels, pool_features, kernel_size=1)
outputLayers.append(cs.layers[-1])
cs.Concat(custom_previous_layers=outputLayers)
def _forward(self, x: Tensor) -> List[Tensor]:
branch1x1 = self.branch1x1(x)
branch5x5 = self.branch5x5_1(x)
branch5x5 = self.branch5x5_2(branch5x5)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl)
# branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)
branch_pool = self.branch_pool_1(x)
branch_pool = self.branch_pool_2(branch_pool)
outputs = [branch1x1, branch5x5, branch3x3dbl, branch_pool]
return outputs
def forward(self, x: Tensor) -> Tensor:
outputs = self._forward(x)
return torch.cat(outputs, 1)
class InceptionB(nn.Module):
def __init__(
self,
in_channels: int,
conv_block: Optional[Callable[..., nn.Module]] = None
) -> None:
super(InceptionB, self).__init__()
if conv_block is None:
conv_block = BasicConv2d
prevLayer = cs.layers[-1]
outputLayers = []
self.branch3x3 = conv_block(in_channels, 384, kernel_size=3, stride=2, custom_previous_layers=[prevLayer])
outputLayers.append(cs.layers[-1])
self.branch3x3dbl_1 = conv_block(in_channels, 64, kernel_size=1, custom_previous_layers=[prevLayer])
self.branch3x3dbl_2 = conv_block(64, 96, kernel_size=3, padding=1)
self.branch3x3dbl_3 = conv_block(96, 96, kernel_size=3, stride=2)
outputLayers.append(cs.layers[-1])
self.branch_pool_1 = cs.MaxPool2d(kernel_size=3, stride=2, custom_previous_layers=[prevLayer])
outputLayers.append(cs.layers[-1])
cs.Concat(custom_previous_layers=outputLayers)
def _forward(self, x: Tensor) -> List[Tensor]:
branch3x3 = self.branch3x3(x)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl)
branch_pool = self.branch_pool_1(x)
# F.max_pool2d(x, kernel_size=3, stride=2)
outputs = [branch3x3, branch3x3dbl, branch_pool]
return outputs
def forward(self, x: Tensor) -> Tensor:
outputs = self._forward(x)
return torch.cat(outputs, 1)
class InceptionC(nn.Module):
def __init__(
self,
in_channels: int,
channels_7x7: int,
conv_block: Optional[Callable[..., nn.Module]] = None
) -> None:
super(InceptionC, self).__init__()
if conv_block is None:
conv_block = BasicConv2d
prevLayer = cs.layers[-1]
outputLayers = []
self.branch1x1 = conv_block(in_channels, 192, kernel_size=1, custom_previous_layers=[prevLayer])
outputLayers.append(cs.layers[-1])
c7 = channels_7x7
self.branch7x7_1 = conv_block(in_channels, c7, kernel_size=1, custom_previous_layers=[prevLayer])
self.branch7x7_2 = conv_block(c7, c7, kernel_size=(1, 7), padding=(0, 3))
self.branch7x7_3 = conv_block(c7, 192, kernel_size=(7, 1), padding=(3, 0))
outputLayers.append(cs.layers[-1])
self.branch7x7dbl_1 = conv_block(in_channels, c7, kernel_size=1, custom_previous_layers=[prevLayer])
self.branch7x7dbl_2 = conv_block(c7, c7, kernel_size=(7, 1), padding=(3, 0))
self.branch7x7dbl_3 = conv_block(c7, c7, kernel_size=(1, 7), padding=(0, 3))
self.branch7x7dbl_4 = conv_block(c7, c7, kernel_size=(7, 1), padding=(3, 0))
self.branch7x7dbl_5 = conv_block(c7, 192, kernel_size=(1, 7), padding=(0, 3))
outputLayers.append(cs.layers[-1])
self.branch_pool_1 = cs.AvgPool2d(kernel_size=3, stride=1, padding=1, custom_previous_layers=[prevLayer])
self.branch_pool_2 = conv_block(in_channels, 192, kernel_size=1)
outputLayers.append(cs.layers[-1])
cs.Concat(custom_previous_layers=outputLayers)
def _forward(self, x: Tensor) -> List[Tensor]:
branch1x1 = self.branch1x1(x)
branch7x7 = self.branch7x7_1(x)
branch7x7 = self.branch7x7_2(branch7x7)
branch7x7 = self.branch7x7_3(branch7x7)
branch7x7dbl = self.branch7x7dbl_1(x)
branch7x7dbl = self.branch7x7dbl_2(branch7x7dbl)
branch7x7dbl = self.branch7x7dbl_3(branch7x7dbl)
branch7x7dbl = self.branch7x7dbl_4(branch7x7dbl)
branch7x7dbl = self.branch7x7dbl_5(branch7x7dbl)
# branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)
# branch_pool = self.branch_pool(branch_pool)
branch_pool = self.branch_pool_1(x)
branch_pool = self.branch_pool_2(branch_pool)
outputs = [branch1x1, branch7x7, branch7x7dbl, branch_pool]
return outputs
def forward(self, x: Tensor) -> Tensor:
outputs = self._forward(x)
return torch.cat(outputs, 1)
class InceptionD(nn.Module):
def __init__(
self,
in_channels: int,
conv_block: Optional[Callable[..., nn.Module]] = None,
custom_previous_layer = None
) -> None:
super(InceptionD, self).__init__()
if conv_block is None:
conv_block = BasicConv2d
if custom_previous_layer == None:
prevLayer = cs.layers[-1]
else:
prevLayer = custom_previous_layer
outputLayers = []
self.branch3x3_1 = conv_block(in_channels, 192, kernel_size=1, custom_previous_layers=[prevLayer])
self.branch3x3_2 = conv_block(192, 320, kernel_size=3, stride=2)
outputLayers.append(cs.layers[-1])
self.branch7x7x3_1 = conv_block(in_channels, 192, kernel_size=1, custom_previous_layers=[prevLayer])
self.branch7x7x3_2 = conv_block(192, 192, kernel_size=(1, 7), padding=(0, 3))
self.branch7x7x3_3 = conv_block(192, 192, kernel_size=(7, 1), padding=(3, 0))
self.branch7x7x3_4 = conv_block(192, 192, kernel_size=3, stride=2)
outputLayers.append(cs.layers[-1])
self.branch_pool_1 = cs.MaxPool2d(kernel_size=3, stride=2, custom_previous_layers=[prevLayer])
outputLayers.append(cs.layers[-1])
cs.Concat(custom_previous_layers=outputLayers)
def _forward(self, x: Tensor) -> List[Tensor]:
branch3x3 = self.branch3x3_1(x)
branch3x3 = self.branch3x3_2(branch3x3)
branch7x7x3 = self.branch7x7x3_1(x)
branch7x7x3 = self.branch7x7x3_2(branch7x7x3)
branch7x7x3 = self.branch7x7x3_3(branch7x7x3)
branch7x7x3 = self.branch7x7x3_4(branch7x7x3)
# branch_pool = F.max_pool2d(x, kernel_size=3, stride=2)
branch_pool = self.branch_pool_1(x)
outputs = [branch3x3, branch7x7x3, branch_pool]
return outputs
def forward(self, x: Tensor) -> Tensor:
outputs = self._forward(x)
return torch.cat(outputs, 1)
class InceptionE(nn.Module):
def __init__(
self,
in_channels: int,
conv_block: Optional[Callable[..., nn.Module]] = None
) -> None:
super(InceptionE, self).__init__()
if conv_block is None:
conv_block = BasicConv2d
prevLayer = cs.layers[-1]
outputLayers = []
self.branch1x1 = conv_block(in_channels, 320, kernel_size=1, custom_previous_layers=[prevLayer])
outputLayers.append(cs.layers[-1])
self.branch3x3_1 = conv_block(in_channels, 384, kernel_size=1, custom_previous_layers=[prevLayer])
prevLayer3x3_2 = cs.layers[-1]
self.branch3x3_2a = conv_block(384, 384, kernel_size=(1, 3), padding=(0, 1), custom_previous_layers=[prevLayer3x3_2])
self.branch3x3_2b = conv_block(384, 384, kernel_size=(3, 1), padding=(1, 0), custom_previous_layers=[prevLayer3x3_2])
cs.Concat(custom_previous_layers=[cs.layers[-2], cs.layers[-1]])
# self.branch3x3_2a = conv_block(384, 384*2, kernel_size=(1, 3), padding=(0, 1))
outputLayers.append(cs.layers[-1])
self.branch3x3dbl_1 = conv_block(in_channels, 448, kernel_size=1, custom_previous_layers=[prevLayer])
self.branch3x3dbl_2 = conv_block(448, 384, kernel_size=3, padding=1)
prevLayer3x3dbl_3 = cs.layers[-1]
self.branch3x3dbl_3a = conv_block(384, 384, kernel_size=(1, 3), padding=(0, 1), custom_previous_layers=[prevLayer3x3dbl_3])
self.branch3x3dbl_3b = conv_block(384, 384, kernel_size=(3, 1), padding=(1, 0), custom_previous_layers=[prevLayer3x3dbl_3])
cs.Concat(custom_previous_layers=[cs.layers[-2], cs.layers[-1]])
# self.branch3x3dbl_3a = conv_block(384, 384*2, kernel_size=(1, 3), padding=(0, 1))
outputLayers.append(cs.layers[-1])
self.branch_pool_1 = cs.AvgPool2d(kernel_size=3, stride=1, padding=1, custom_previous_layers=[prevLayer])
self.branch_pool_2 = conv_block(in_channels, 192, kernel_size=1)
outputLayers.append(cs.layers[-1])
cs.Concat(custom_previous_layers=outputLayers)
def _forward(self, x: Tensor) -> List[Tensor]:
branch1x1 = self.branch1x1(x)
branch3x3 = self.branch3x3_1(x)
branch3x3 = [
self.branch3x3_2a(branch3x3),
self.branch3x3_2b(branch3x3),
]
branch3x3 = torch.cat(branch3x3, 1)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = [
self.branch3x3dbl_3a(branch3x3dbl),
self.branch3x3dbl_3b(branch3x3dbl),
]
branch3x3dbl = torch.cat(branch3x3dbl, 1)
# branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)
branch_pool = self.branch_pool_1(x)
branch_pool = self.branch_pool_2(branch_pool)
outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool]
return outputs
def forward(self, x: Tensor) -> Tensor:
outputs = self._forward(x)
return torch.cat(outputs, 1)
class InceptionAux(nn.Module):
def __init__(
self,
in_channels: int,
num_classes: int,
conv_block: Optional[Callable[..., nn.Module]] = None
) -> None:
super(InceptionAux, self).__init__()
if conv_block is None:
conv_block = BasicConv2d
self.prepool = cs.AvgPool2d(kernel_size=5, stride=3)
self.conv0 = conv_block(in_channels, 128, kernel_size=1)
self.conv1 = conv_block(128, 768, kernel_size=5)
self.postpool = cs.AdaptiveAvgPool2d((1, 1))
self.conv1.stddev = 0.01 # type: ignore[assignment]
self.fc = nn.Linear(768, num_classes)
self.fc.stddev = 0.001 # type: ignore[assignment]
def forward(self, x: Tensor) -> Tensor:
# N x 768 x 17 x 17
# x = F.avg_pool2d(x, kernel_size=5, stride=3)
x = self.prepool(x)
# N x 768 x 5 x 5
x = self.conv0(x)
# N x 128 x 5 x 5
x = self.conv1(x)
# N x 768 x 1 x 1
# Adaptive average pooling
# x = F.adaptive_avg_pool2d(x, (1, 1))
x = self.postpool(x)
# N x 768 x 1 x 1
x = torch.flatten(x, 1)
# N x 768
x = self.fc(x)
# N x 1000
return x
class BasicConv2d(nn.Module):
def __init__(
self,
in_channels: int,
out_channels: int,
**kwargs: Any
) -> None:
super(BasicConv2d, self).__init__()
self.conv = cs.Conv2d(in_channels, out_channels, bias=False, **kwargs)
self.bn = nn.BatchNorm2d(out_channels, eps=0.001)
def forward(self, x: Tensor) -> Tensor:
x = self.conv(x)
x = self.bn(x)
return F.relu(x, inplace=True)
# profiler = GpuProfiler("cuda")
# profiler.loadProfile()
# cs = CostSim(profiler)
# model = Inception3()
# cs.printAllLayers()
# cs.computeInputDimensions((3, 299,299))
# globalBatch = 16
# totalGpus = 4
# # cs.searchBestSplitsV2(totalGpus, globalBatch)
# # cs.searchBestSplits(8, globalBatch)
# for startLayerId in [6, 15, 24, 33]:
# startLayer = cs.layers[startLayerId]
# bestMultiChainTime = 9999999999
# bestJiaTime = 9999999999
# startConfig = cs.listConfigOptions(startLayer, globalBatch, totalGpus)[0]
# startAndEndConfigsToTime = {}
# startAndEndConfigsToTimeJia = {}
# for startConfig in cs.listConfigOptions(startLayer, globalBatch, totalGpus):
# print(startConfig)
# startGpuTime = cs.benchGpuTime(startLayer, startConfig)
# # (endLayer, configToTimeDict, t) = cs.searchMultiChain(startLayer, startConfig, globalBatch, totalGpus)
# (jiaEndLayer, jiaConfigToTimeDict, jiaT) = cs.runMultiChainZhihao(startLayer, startConfig, globalBatch, totalGpus)
# # startAndEndConfigsToTime[startConfig] = configToTimeDict
# startAndEndConfigsToTimeJia[startConfig] = jiaConfigToTimeDict
# for config in jiaConfigToTimeDict:
# multiChainTime = jiaConfigToTimeDict[config][0] + startGpuTime
# jiaTime = jiaConfigToTimeDict[config][0] + startGpuTime
# print(" lastConfig: %20s, multi-chain algo: %7.1f ms Zhihao's time: %7.1f ms" % (str(config), multiChainTime, jiaTime))
# bestMultiChainTime = min(bestMultiChainTime, multiChainTime)
# bestJiaTime = min(bestJiaTime, jiaTime)
# bestConfigToTimeDict = (999999999, None)
# bestEndConfig = None
# for config in jiaConfigToTimeDict:
# if bestConfigToTimeDict[0] > jiaConfigToTimeDict[config][0]:
# bestConfigToTimeDict = jiaConfigToTimeDict[config]
# bestEndConfig = config
# cs.displayMultiChainResult(jiaEndLayer, bestEndConfig, jiaT, bestConfigToTimeDict[1])
# print("Best multi-chain: %.2f best jia: %.2f" % (bestMultiChainTime, bestJiaTime) )
# profiler.saveProfile()
def main(gpuCount, globalBatch, amplificationLimit=2.0, dataParallelBaseline=False, netBw=2.66E5, spatialSplit=False, simResultFilename=None, simOnly=False, use_be=False):
global cs
cs = CostSim(None, netBw=netBw, verbose=True, gpuProfileLoc="profile/A100_inception.prof") #"inceptionLayerGpuProfileA100V2.txt", gpuProfileLocSub="inceptionLayerGpuProfileA100.txt")
model = Inception3(aux_logits=False)
cs.printAllLayers(slient=True)
cs.computeInputDimensions((3,299,299))
# job, iterMs, gpuMs = cs.searchBestSplits(gpuCount, globalBatch, amplificationLimit=amplificationLimit, dataParallelBaseline=dataParallelBaseline, spatialSplit=spatialSplit)
# if dataParallelBaseline:
# dpIterUsec, dpFpUsec, dpBpUsec = profiler.benchModel(model, (3, 299, 299), int(globalBatch / gpuCount))
# print("(DP baseline) whole model bench: %.1f ms (fp: %.1f, bp: %.1f)" % (dpIterUsec / 1000, dpFpUsec / 1000, dpBpUsec / 1000))
job, iterMs, gpuMs, maxGpusUsed = cs.searchBestSplitsV3(gpuCount, globalBatch, amplificationLimit=amplificationLimit, dataParallelBaseline=dataParallelBaseline, spatialSplit=spatialSplit)
print(" %2d %2d %4.1f %4.1f\n" % (globalBatch, maxGpusUsed, iterMs, gpuMs))
cs.to_dot(simResultFilename, globalBatch)
# cs.to_gpuTimeline("Inception v3, Burst Parallel", maxGpusUsed, dataParallelBaseline)
jobInJson = job.dumpInJSON()
# for rank in range(gpuCount):
# print("GPU rank: %d"%rank)
# print(job.dumpSingleRunnableModule(rank))
job2 = TrainingJob("test", None, None, 0, 0, "")
job2.loadJSON(jobInJson)
assert(jobInJson == job2.dumpInJSON())
print("Load/Dump returned the same output? %s" % ("true" if jobInJson == job2.dumpInJSON() else "false"))
# print(jobInJson)
if maxGpusUsed > 8:
print("maxGpusUsed: ", maxGpusUsed, " is bigger than 8. Can't schedule this job.")
exit(-1)
if not spatialSplit and not simOnly:
cc = ClusterClient()
jobName = "InceptionV3_%d_%d_%2.1f%s" % (gpuCount, globalBatch, amplificationLimit, "_DP" if dataParallelBaseline else "")
jobName += "_BE" if use_be else ""
cc.submitTrainingJob(jobName, jobInJson, use_be)
if simResultFilename != None:
f = open(simResultFilename, "a")
f.write(" %2d %2d %4.1f %4.1f\n" % (globalBatch, maxGpusUsed, iterMs, gpuMs))
f.close()
if gpuCount == 8:
f = open(simResultFilename, "r")
print(f.read())
f.close()
def runAllConfigs(modelName: str, clusterType: str, simOnly=True):
if clusterType == "V100":
netBw = 22937
elif clusterType == "A100":
netBw = 2.66E5
elif clusterType == "10Gbps":
netBw = 1.25E3
elif clusterType == "100Gbps":
netBw = 1.25E4
elif clusterType == "10Tbps":
netBw = 1.25E6
else:
print("Wrong cluster type. Put either V100 or A100")
gpuCounts = [1, 2, 4, 8]
# gpuCounts = [1, 2, 4]
globalBatchSize = 32
# globalBatchSize = 16
# globalBatchSize = 8
limitAndBaseline = [(2.0, True, False), (1.5, False, False), (2.0, False, False), (2.5, False, False)]
# limitAndBaseline = [(99, False, True)]
# limitAndBaseline = []
for lim, baseline, spatialSplit in limitAndBaseline:
simResultFilename = "%s_%s_b%d_lim%2.1f_sim.data" % (modelName, "DP" if baseline else "MP", globalBatchSize, lim)
f = open(simResultFilename, "w")
f.write("#batch GPUs IterMs GpuMs\n")
f.close()
for gpuCount in gpuCounts:
if not simOnly:
preSize = os.stat('runtimeResult.data').st_size
main(gpuCount, globalBatchSize, amplificationLimit=lim, dataParallelBaseline=baseline, netBw=netBw, spatialSplit=spatialSplit, simResultFilename=simResultFilename, simOnly=simOnly)
# check exp finished.
if not simOnly:
print("runtimeResult.data's original size: ", preSize)
while os.stat('runtimeResult.data').st_size == preSize and not spatialSplit:
time.sleep(10)
print("runtimeResult.data's current size: ", os.stat('runtimeResult.data').st_size)
if not spatialSplit and not simOnly:
fw = open("%s_%s_b%d_lim%2.1f_run.data" % (modelName, "DP" if baseline else "MP", globalBatchSize, lim), "w")
fr = open('runtimeResult.data', "r")
fw.write("#batch GPUs IterMs GpuMs\n")
fw.write(fr.read())
fw.close()
fr.close()
fr = open('runtimeResult.data', "w")
fr.close()
def runStrongScalingBench():
global cs
netBw = 2.66E5
cs = CostSim(None, netBw=netBw, verbose=False)
inputSize = (3,299,299)
model = Inception3(aux_logits=False)
fakeInputSize = (16,3,299,299)
fakeInput = torch.zeros(fakeInputSize)
traced = torch.jit.trace(model, fakeInput)
torch.jit.save(traced, "modules/inception.pt")
print("Model: ", "Inception3")
print("BatchSize iterMs fpMs bpMs")
for batchSize in [2 ** exp for exp in range(1, 9)]:
assert False
# iterTime, fpTime, bpTime = profiler.benchModel(model, inputSize, batchSize)
# print(" %8d %6.1f %6.1f %6.1f" %
# (batchSize, iterTime / 1000, fpTime / 10000, bpTime / 1000))
if __name__ == "__main__":
print(len(sys.argv))
if len(sys.argv) == 3:
gpuCount = int(sys.argv[1])
globalBatchSize = int(sys.argv[2])
simResultFilename = "%s_%s_b%d_sim.data" % ("inception", "DP", globalBatchSize)
main(gpuCount, globalBatchSize, dataParallelBaseline=True)
elif len(sys.argv) >= 4:
use_be = len(sys.argv) > 4 and int(sys.argv[4]) == 1
gpuCount = int(sys.argv[1])
globalBatchSize = int(sys.argv[2])
# simResultFilename = "%s_%s_b%d_lim%2.1f_sim.data" % ("inception", "MP", globalBatchSize, amplificationLimit)
if sys.argv[3] == "DP":
main(gpuCount, globalBatchSize, dataParallelBaseline=True, use_be=use_be)
else:
amplificationLimit = float(sys.argv[3])
main(gpuCount, globalBatchSize, amplificationLimit, use_be=use_be)
# main(gpuCount, globalBatchSize, amplificationLimit, simResultFilename = simResultFilename, use_be=use_be)
elif len(sys.argv) == 2:
print("Run all configs")
runAllConfigs("inceptionV3", sys.argv[1])
elif len(sys.argv) == 1:
runStrongScalingBench()
else:
print("Wrong number of arguments.\nUsage: ")
``` |
{
"source": "joshuafuller/adsbxcot",
"score": 2
} |
#### File: adsbxcot/adsbxcot/classes.py
```python
import concurrent
import aiohttp
import asyncio
import configparser
import json
import logging
import os
import queue
import random
import threading
import time
import urllib
import pytak
import requests
import aircot
import adsbxcot
__author__ = "<NAME> W2GMD <<EMAIL>>"
__copyright__ = "Copyright 2021 Orion Labs, Inc."
__license__ = "Apache License, Version 2.0"
class ADSBXWorker(pytak.MessageWorker):
"""Reads ADS-B Exchange Data, renders to CoT, and puts on queue."""
def __init__(self, event_queue: asyncio.Queue, opts):
super().__init__(event_queue)
self.url: urllib.parse.ParseResult = urllib.parse.urlparse(opts.get("ADSBX_URL"))
self.cot_stale = opts.get("COT_STALE")
self.poll_interval: int = int(opts.get("POLL_INTERVAL") or adsbxcot.DEFAULT_POLL_INTERVAL)
self.api_key: str = opts.get("API_KEY")
self.include_tisb = bool(opts.get("INCLUDE_TISB")) or False
self.include_all_craft = bool(opts.get("INCLUDE_ALL_CRAFT")) or False
self.filters = opts.get("FILTERS")
self.known_craft = opts.get("KNOWN_CRAFT")
self.known_craft_key = opts.get("KNOWN_CRAFT_KEY") or "HEX"
self.filter_type = ""
self.known_craft_db = None
async def handle_message(self, aircraft: list) -> None:
"""
Transforms Aircraft ADS-B data to CoT and puts it onto tx queue.
"""
if not isinstance(aircraft, list):
self._logger.warning(
"Invalid aircraft data, should be a Python list.")
return False
if not aircraft:
self._logger.warning("Empty aircraft list")
return False
_lac = len(aircraft)
_acn = 1
for craft in aircraft:
# self._logger.debug("craft=%s", craft)
icao = craft.get("hex", craft.get("icao")).strip().upper()
flight = craft.get("flight", "").strip().upper()
reg = craft.get("r", "").strip().upper()
if "~" in icao and not self.include_tisb:
continue
known_craft = {}
if self.filter_type:
if self.filter_type == "HEX":
filter_key: str = icao
elif self.filter_type == "FLIGHT":
filter_key: str = flight
elif self.filter_type == "REG":
filter_key: str = reg
else:
filter_key: str = ""
# self._logger.debug("filter_key=%s", filter_key)
if self.known_craft_db and filter_key:
known_craft = (list(filter(
lambda x: x[self.known_craft_key].strip().upper() == filter_key, self.known_craft_db)) or
[{}])[0]
# self._logger.debug("known_craft='%s'", known_craft)
elif filter_key:
if "include" in self.filters[self.filter_type] and filter_key not in self.filters.get(filter_type,
"include"):
continue
if "exclude" in self.filters[self.filter_type] and filter_key in self.filters.get(filter_type,
"exclude"):
continue
# If we're using a known_craft csv and this craft wasn't found, skip:
if self.known_craft_db and not known_craft and not self.include_all_craft:
continue
event = adsbxcot.adsbx_to_cot(
craft,
stale=self.cot_stale,
known_craft=known_craft
)
if not event:
self._logger.debug(f"Empty CoT Event for craft={craft}")
_acn += 1
continue
self._logger.debug(
"Handling %s/%s ICAO: %s Flight: %s Category: %s",
_acn,
_lac,
craft.get("hex"),
craft.get("flight"),
craft.get("category")
)
await self._put_event_queue(event)
_acn += 1
async def _get_adsbx_feed(self):
# Support for either direct ADSBX API, or RapidAPI
if "rapidapi" in self.url.geturl():
headers = {
"x-rapidapi-key": self.api_key,
"x-rapidapi-host": "adsbexchange-com1.p.rapidapi.com"
}
else:
headers = {"api-auth": self.api_key}
async with aiohttp.ClientSession() as session:
response = await session.request(
method="GET",
url=self.url.geturl(),
headers=headers
)
response.raise_for_status()
json_resp = await response.json()
aircraft = json_resp.get("ac")
self._logger.debug("Retrieved %s aircraft", len(aircraft))
await self.handle_message(aircraft)
async def run(self):
"""Runs this Thread, Reads from Pollers."""
self._logger.info(
"Running ADSBXWorker with URL '%s'", self.url.geturl())
if self.known_craft is not None:
self._logger.info("Using KNOWN_CRAFT File: '%s'", self.known_craft)
self.known_craft_db = aircot.read_known_craft(self.known_craft)
self.filters = configparser.ConfigParser()
self.filters.add_section(self.known_craft_key)
self.filters[self.known_craft_key]["include"] = \
str([x[self.known_craft_key].strip().upper() for x in self.known_craft_db])
if self.filters or self.known_craft_db:
filter_src = self.filters or self.known_craft_key
self._logger.debug("filter_src=%s", filter_src)
if filter_src:
if "HEX" in filter_src:
self.filter_type = "HEX"
elif "FLIGHT" in filter_src:
self.filter_type = "FLIGHT"
elif "REG" in filter_src:
self.filter_type = "REG"
self._logger.debug("filter_type=%s", self.filter_type)
while 1:
await self._get_adsbx_feed()
await asyncio.sleep(self.poll_interval)
``` |
{
"source": "joshuafuller/aircotproxy",
"score": 2
} |
#### File: aircotproxy/aircotproxy/classes.py
```python
import asyncio
import configparser
import io
import logging
import urllib
import xml.etree.ElementTree
import aircot
import pytak
import aircotproxy
__author__ = "<NAME> W2GMD <<EMAIL>>"
__copyright__ = "Copyright 2021 Orion Labs, Inc."
__license__ = "Apache License, Version 2.0"
class ACPNetworkClient(asyncio.Protocol):
"""
CoT Network Listener (tcp only).
"""
_logger = logging.getLogger(__name__)
if not _logger.handlers:
_logger.setLevel(aircotproxy.LOG_LEVEL)
_console_handler = logging.StreamHandler()
_console_handler.setLevel(aircotproxy.LOG_LEVEL)
_console_handler.setFormatter(aircotproxy.LOG_FORMAT)
_logger.addHandler(_console_handler)
_logger.propagate = False
logging.getLogger("asyncio").setLevel(aircotproxy.LOG_LEVEL)
def __init__(self, ready, event_queue, opts) -> None:
self.transport = None
self.address = None
self.ready = ready
self.event_queue = event_queue
self.include_all_craft = bool(opts.get("INCLUDE_ALL_CRAFT")) or False
self.filters = opts.get("FILTERS")
self.known_craft = opts.get("KNOWN_CRAFT")
self.known_craft_key = opts.get("KNOWN_CRAFT_KEY") or "HEX"
self.filter_type = ""
self.known_craft_db = None
def handle_message(self, data) -> None:
d_data = data.decode().strip()
rx_cot = xml.etree.ElementTree.fromstring(d_data)
uid = str(rx_cot.attrib["uid"]).strip().upper()
if "ICAO" not in uid:
return
if "ICAO-" in uid:
icao = uid.split("-")[-1]
elif "ICAO." in uid:
icao = uid.split(".")[-1]
if "~" in icao and not self.include_tisb:
return
known_craft = {}
if self.filter_type:
if self.filter_type == "HEX":
filter_key: str = icao
else:
filter_key: str = ""
# self._logger.debug("filter_key=%s", filter_key)
if self.known_craft_db and filter_key:
known_craft = (list(filter(
lambda x: x[self.known_craft_key].strip().upper() == filter_key, self.known_craft_db)) or
[{}])[0]
# self._logger.debug("known_craft='%s'", known_craft)
elif filter_key:
if "include" in self.filters[self.filter_type] and filter_key not in self.filters.get(filter_type,
"include"):
return
if "exclude" in self.filters[self.filter_type] and filter_key in self.filters.get(filter_type,
"exclude"):
return
# If we're using a known_craft csv and this craft wasn't found, skip:
if self.known_craft_db and not known_craft and not self.include_all_craft:
return
event: str = aircotproxy.cot_to_cot(rx_cot, known_craft=known_craft)
if not event:
self._logger.debug("Empty CoT Event")
return
self._logger.debug("Handling ICAO: %s", icao)
if event:
self.event_queue.put_nowait(event)
def connection_made(self, transport):
self.transport = transport
self.address = transport.get_extra_info("peername")
self._logger.debug("Connection from %s", self.address)
if self.known_craft is not None:
self._logger.info("Using KNOWN_CRAFT File: '%s'", self.known_craft)
self.known_craft_db = aircotproxy.read_known_craft(self.known_craft)
self.filters = configparser.ConfigParser()
self.filters.add_section(self.known_craft_key)
self.filters[self.known_craft_key]["include"] = \
str([x[self.known_craft_key].strip().upper() for x in self.known_craft_db])
if self.filters or self.known_craft_db:
filter_src = self.filters or self.known_craft_key
self._logger.debug("filter_src=%s", filter_src)
if filter_src:
if "MMSI" in filter_src:
self.filter_type = "MMSI"
self._logger.debug("filter_type=%s", self.filter_type)
self.ready.set()
def data_received(self, data):
self._logger.debug("Recieved: %s", data)
for line in data.splitlines():
self.handle_message(line)
def connection_lost(self, exc):
self.ready.clear()
self._logger.exception(exc)
self._logger.warning("Disconnected from %s", self.address)
class ACPWorker(pytak.MessageWorker):
"""ACPWorker Cursor-on-Target Class."""
def __init__(self, event_queue, opts) -> None:
super().__init__(event_queue)
self.opts = opts
self.cot_receive_url: urllib.parse.ParseResult = urllib.parse.urlparse(self.opts.get("COT_RECEIVE_URL"))
async def run(self):
"""Runs this Thread, reads CoT & outputs CoT."""
self._logger.info("Running ACPWorker")
loop = asyncio.get_event_loop()
host, port = pytak.parse_cot_url(self.cot_receive_url)
self._logger.info("Receiving CoT on %s:%s", host, port)
ready = asyncio.Event()
server = await loop.create_server(
lambda: ACPNetworkClient(ready, self.event_queue, self.opts), host, int(port)
)
await ready.wait()
while 1:
await asyncio.sleep(0.01)
``` |
{
"source": "joshuafuller/aiscot",
"score": 3
} |
#### File: aiscot/aiscot/cmd.py
```python
import argparse
import time
import aiscot
__author__ = '<NAME> W2GMD <<EMAIL>>'
__copyright__ = 'Copyright 2020 Orion Labs, Inc.'
__license__ = 'Apache License, Version 2.0'
def cli():
"""Command Line interface for AIS Cursor-on-Target Gateway."""
parser = argparse.ArgumentParser()
parser.add_argument(
'-P', '--ais_port', help='AIS UDP Port',
default=aiscot.DEFAULT_AIS_PORT
)
parser.add_argument(
'-C', '--cot_host', help='Cursor-on-Target Host or Host:Port',
required=True
)
opts = parser.parse_args()
aiscot_i = aiscot.AISCoT(opts.ais_port, opts.cot_host)
try:
aiscot_i.start()
while aiscot_i.is_alive():
time.sleep(0.01)
except KeyboardInterrupt:
aiscot_i.stop()
finally:
aiscot_i.stop()
if __name__ == '__main__':
cli()
``` |
{
"source": "joshuafuller/aprscot",
"score": 2
} |
#### File: aprscot/aprscot/cmd.py
```python
import argparse
import time
import aprslib
import aprscot
__author__ = '<NAME> W2GMD <<EMAIL>>'
__copyright__ = 'Copyright 2017 <NAME>'
__license__ = 'Apache License, Version 2.0'
def cli():
"""Command Line interface for APRS Cursor-on-Target Gateway."""
parser = argparse.ArgumentParser()
parser.add_argument(
'-d', '--debug', help='Enable debug logging', action='store_true'
)
parser.add_argument(
'-c', '--callsign', help='callsign', required=True
)
parser.add_argument(
'-p', '--passcode', help='passcode', required=True
)
parser.add_argument(
'-C', '--cot_host', help='Cursor-on-Target Host', required=True
)
parser.add_argument(
'-f', '--aprs_filter', help='APRS Filter', default='m/1000'
)
opts = parser.parse_args()
aprs_i = aprslib.IS(opts.callsign, opts.passcode, port=14580)
aprs_i.set_filter(opts.aprs_filter)
aprscot_i = aprscot.APRSCOT(aprs_i, opts.cot_host)
try:
aprscot_i.start()
while aprscot_i.is_alive():
time.sleep(0.01)
except KeyboardInterrupt:
aprscot_i.stop()
finally:
aprscot_i.stop()
if __name__ == '__main__':
cli()
``` |
{
"source": "joshuafuller/pyGT",
"score": 2
} |
#### File: joshuafuller/pyGT/compatGTA.py
```python
from struct import pack, unpack
from pyTLV import tlvPack, tlvRead
from pycrc16 import crc
from gtdefs import * # noqa: F403
# Message content types - GTA specific
GTA_CONTENT_TEXT = 0
GTA_CONTENT_TEXT_LCTN = 1 # Text message with location attached
GTA_CONTENT_LCTN_RES = 2 # Location response
GTA_CONTENT_LCTN_REQ = 3 # Location request
GTA_CONTENT_TEXT_LREQ = 4 # Text message with location request
GTA_CONTENT_GROUP_KEY = 5 # Group setup information: GID, KEY and members
GTA_CONTENT_PING = 7 # Ping request
GTA_CONTENT_PUBK_REQ = 14
GTA_CONTENT_PUBK_RES = 15
def gtMakeGTABlobMsg(bodyTXT, fromTXT='API'):
"""
Assemble a GTA compatible message blob
(suitable for feeding to gtMakeAPIMsg() )
"""
blob = (tlvPack(MSGB_TLV_TYPE, "%d" % GTA_CONTENT_TEXT) +
tlvPack(MSGB_TLV_NICK, fromTXT) +
tlvPack(MSGB_TLV_TEXT, bodyTXT))
# append CRC and return
return blob + pack("!H", crc(blob))
def gtReadGTABlob(blob):
"""
Break down a GTA message blob into its elements
"""
msg = {}
# there's a CRC16 field at the end of the content blob;
# check this first and stop if incorrect
wantCRC = unpack('!H', blob[-2:])[0]
haveCRC = crc(blob[:-2])
if wantCRC != haveCRC:
print("CRC failed, want=%04x, have=%04x" % (wantCRC, haveCRC))
return False
for type, length, value in tlvRead(blob[:-2]):
msg[type] = value
# Check for mandatory subelement MSGB_TLV_TYPE
if MSGB_TLV_TYPE in msg:
return msg
return False
```
#### File: joshuafuller/pyGT/pygth16.py
```python
def gtAlgoH16(str):
""" Proprietary hash based on the Park-Miller LCG """
seed = 0xaa
mult = 48271
incr = 1
modulus = (1 << 31) - 1 # 0x7FFFFFFF
h = 0
x = seed
for c in bytearray(str):
x = (((x + c) * mult + incr) & 0xFFFFFFFF) % modulus
h = h ^ x
# Derive 16-bit value from 32-bit hash by XORing its two halves
r = ((h & 0xFFFF0000) >> 16) ^ (h & 0xFFFF)
return r
``` |
{
"source": "joshuafuller/pytak",
"score": 3
} |
#### File: pytak/pytak/functions.py
```python
import asyncio
import datetime
import os
import socket
import ssl
import xml
import xml.etree.ElementTree
import pytak
import pytak.asyncio_dgram
__author__ = "<NAME> W2GMD <<EMAIL>>"
__copyright__ = "Copyright 2021 Orion Labs, Inc."
__license__ = "Apache License, Version 2.0"
def split_host(host, port: int = None) -> tuple:
"""Given a host:port and/or port, returns host, port."""
if ":" in host:
addr, port = host.split(":")
port = int(port)
elif port:
addr = host
port = int(port)
else:
addr = host
port = int(pytak.DEFAULT_COT_PORT)
return addr, port
def parse_cot_url(url) -> tuple:
"""Parses a Cursor on Target destination URL."""
if ":" in url.path:
host, port = str(url.path).split(":")
else:
host = url.path
if "broadcast" in url.scheme:
port = pytak.DEFAULT_BROADCAST_PORT
else:
port = pytak.DEFAULT_COT_PORT
return host, port
def hello_event(uid="pytak") -> str:
"""Generates a Hello CoT Event."""
time = datetime.datetime.now(datetime.timezone.utc)
root = xml.etree.ElementTree.Element("event")
root.set("version", "2.0")
root.set("type", "t-x-d-d")
root.set("uid", uid)
root.set("how", "m-g")
root.set("time", time.strftime(pytak.ISO_8601_UTC))
root.set("start", time.strftime(pytak.ISO_8601_UTC))
root.set("stale", (time + datetime.timedelta(hours=1)).strftime(pytak.ISO_8601_UTC) )
return xml.etree.ElementTree.tostring(root)
``` |
{
"source": "joshuafuller/stratuxcot",
"score": 2
} |
#### File: stratuxcot/tests/test_functions.py
```python
import asyncio
import csv
import io
import urllib
import xml.etree.ElementTree
import pytest
import stratuxcot
import stratuxcot.functions
__author__ = '<NAME> W2GMD <<EMAIL>>'
__copyright__ = 'Copyright 2021 Orion Labs, Inc.'
__license__ = 'Apache License, Version 2.0'
# Sample JSON data:
#
# {
# 'Icao_addr': 11160165,
# 'Reg': 'N762QS',
# 'Tail': 'N762QS',
# 'Squawk': 0,
# 'Lat': 37.89692,
# 'Lng': -122.74547,
# 'Addr_type': 0,
# 'Age': 28.29,
# 'AgeLastAlt': 1.33,
# 'Alt': 21850,
# 'AltIsGNSS': False,
# 'Bearing': 0,
# 'BearingDist_valid': False,
# 'Distance': 0,
# 'Emitter_category': 0,
# 'ExtrapolatedPosition': False,
# 'GnssDiffFromBaroAlt': -275,
# 'LastSent': '0001-01-01T00:39:16.44Z',
# 'Last_GnssDiff': '0001-01-01T00:39:53.84Z',
# 'Last_GnssDiffAlt': 21775,
# 'Last_alt': '0001-01-01T00:39:54.77Z',
# 'Last_seen': '0001-01-01T00:39:54.77Z',
# 'Last_source': 1,
# 'Last_speed': '0001-01-01T00:39:53.84Z',
# 'NACp': 10,
# 'NIC': 8,
# 'OnGround': False,
# 'Position_valid': True,
# 'PriorityStatus': 0,
# 'SignalLevel': -28.21023052706831,
# 'Speed': 340,
# 'Speed_valid': True,
# 'TargetType': 1,
# 'Timestamp': '2020-11-06T19:58:06.234Z',
# 'Track': 249,
# 'Vvel': 3392
# }
#
#
# "Last_seen":"0001-01-01T00:43:19.61Z" (ws://192.168.10.1/traffic) 0001-01-01 is day zero,
# +
# "GPSTime":"2020-05-12T08:27:10Z" (http://192.168.10.1/getSituation)
# -
# ("Uptime":2610230,ms)"UptimeClock":"0001-01-01T00:43:30.23Z" (http://192.168.10.1/getStatus)
# = Timestamp of traffic "event"
#
#
# This is an illuminated/commented version of the traffic output from StratuX:
# type TrafficInfo struct {
# Icao_addr uint32 // decimal version of (ICAO HEX or ICAO OCTAL)
# Reg string // Registration. Calculated from Icao_addr for civil aircraft of US registry.
# Tail string // Callsign. Transmitted by aircraft. 8 Characters max including spaces
# Emitter_category uint8 // Formatted using GDL90 standard 3.5.1.10 Table 11, e.g. in a Mode ES report, A7 becomes 0x07, B0 becomes 0x08, etc.
# OnGround bool // Air-ground status. On-ground is "true".
# Addr_type uint8 // UAT address qualifier. Used by GDL90 format, so translations for ES TIS-B/ADS-R are needed. 3.5.1.2 Target Identity
# (GDL90 ICD)
# TargetType uint8 // types decribed in const above https://github.com/cyoung/stratux/blob/master/main/traffic.go#L66
# SignalLevel float64 // Signal level, dB RSSI.
# Squawk int // Squawk code
# Position_valid bool // false = MODE-S message without location data
# Lat float32 // decimal degrees, north positive
# Lng float32 // decimal degrees, east positive
# Alt int32 // Pressure altitude, feet
# GnssDiffFromBaroAlt int32 // GNSS altitude above WGS84 datum. Reported in TC 20-22 messages (negative = below BaroAlt, smaller magnitude)
# AltIsGNSS bool // Pressure alt = 0; GNSS alt = 1
# NIC int // Navigation Integrity Category.
# NACp int // Navigation Accuracy Category for Position.
# Track uint16 // degrees true
# Speed uint16 // knots
# Speed_valid bool // set when speed report received.
# Vvel int16 // feet per minute
# Timestamp time.Time // timestamp of traffic message, UTC
# PriorityStatus uint8 // Emergency or priority code as defined in GDL90 spec, DO-260B (Type 28 msg) and DO-282B
# // Parameters starting at 'Age' are calculated from last message receipt on each call of sendTrafficUpdates().
# // Mode S transmits position and track in separate messages, and altitude can also be
# // received from interrogations.
# Age float64 // Age of last valid position fix, seconds ago.
# AgeLastAlt float64 // Age of last altitude message, seconds ago.
# Last_seen time.Time // Time of last position update (stratuxClock). Used for timing out expired data.
# Last_alt time.Time // Time of last altitude update (stratuxClock).
# Last_GnssDiff time.Time // Time of last GnssDiffFromBaroAlt update (stratuxClock).
# Last_GnssDiffAlt int32 // Altitude at last GnssDiffFromBaroAlt update.
# Last_speed time.Time // Time of last velocity and track update (stratuxClock).
# Last_source uint8 // Last frequency on which this target was received.
# ExtrapolatedPosition bool //TODO: True if Stratux is "coasting" the target from last known position.
# BearingDist_valid bool // set when bearing and distance information is valid
# Bearing float64 // Bearing in degrees true to traffic from ownship, if it can be calculated. Units: degrees.
# Distance float64 // Distance to traffic from ownship, if it can be calculated. Units: meters.
# //FIXME: Rename variables for consistency, especially "Last_".
#
@pytest.fixture
def sample_craft():
return {
"Icao_addr":10698088,
"Reg":"N308DU",
"Tail":"DAL1352",
"Emitter_category":3,
"OnGround": False,
"Addr_type":0,
"TargetType":1,
"SignalLevel":-35.5129368009492,
"Squawk":3105,
"Position_valid":True,
"Lat":37.46306,
"Lng":-122.264626,
"Alt":7325,
"GnssDiffFromBaroAlt":25,
"AltIsGNSS":False,
"NIC":8,
"NACp":10,
"Track":135,
"Speed":262,
"Speed_valid":True,
"Vvel":-1600,
"Timestamp":"2021-05-19T23:13:18.484Z",
"PriorityStatus":0,
"Age":29.85,
"AgeLastAlt":29.83,
"Last_seen":"0001-01-01T16:43:24.75Z",
"Last_alt":"0001-01-01T16:43:24.77Z",
"Last_GnssDiff":"0001-01-01T16:43:24.54Z",
"Last_GnssDiffAlt":7700,
"Last_speed":"0001-01-01T16:43:24.54Z",
"Last_source":1,
"ExtrapolatedPosition":False,
"BearingDist_valid":True,
"Bearing":148.05441175901748,
"Distance":38889.68863349082,
"LastSent":"0001-01-01T16:43:22.85Z"
}
@pytest.fixture
def sample_known_craft():
sample_csv = """DOMAIN,AGENCY,REG,CALLSIGN,TYPE,MODEL,HEX,COT,TYPE,,
EMS,CALSTAR,N832CS,CALSTAR7,HELICOPTER,,,a-f-A-C-H,HELICOPTER,,
EMS,REACH AIR MEDICAL,N313RX,REACH16,HELICOPTER,,,a-f-A-C-H,HELICOPTER,,
FED,USCG,1339,C1339,FIXED WING,,,,FIXED WING,,
FIRE,USFS,N143Z,JUMPR43,FIXED WING,DH6,,a-f-A-C-F,FIXED WING,,
FIRE,,N17085,TNKR_911,FIXED WING,,,a-f-A-C-F,FIXED WING,,
FIRE,CAL FIRE,N481DF,C_104,HELICOPTER,,,a-f-A-C-H,HELICOPTER,,
FOOD,EL FAROLITO,N739UL,TACO_01,HELICOPTER,,,a-f-A-T-A-C-O,HELICOPTER,,
FOOD,EL FAROLITO,DAL1352,TACO_02,FIXED WING,,,a-f-A-T-A-C-O,FIXED WING,,
"""
csv_fd = io.StringIO(sample_csv)
all_rows = []
reader = csv.DictReader(csv_fd)
for row in reader:
all_rows.append(row)
print(all_rows)
return all_rows
def test_stratux_to_cot_raw(sample_craft):
print(sample_craft)
cot = stratuxcot.functions.stratux_to_cot_raw(sample_craft)
print(cot)
assert isinstance(cot, xml.etree.ElementTree.Element)
assert cot.tag == "event"
assert cot.attrib["version"] == "2.0"
assert cot.attrib["type"] == "a-.-A-C-F"
assert cot.attrib["uid"] == "ICAO-A33D68"
point = cot.findall("point")
assert point[0].tag == "point"
assert point[0].attrib["lat"] == "37.46306"
assert point[0].attrib["lon"] == "-122.264626"
assert point[0].attrib["hae"] == "2232.6600000000003"
detail = cot.findall("detail")
assert detail[0].tag == "detail"
assert detail[0].attrib["uid"] == "ICAO-A33D68"
track = detail[0].findall("track")
assert track[0].attrib["course"] == "135"
assert track[0].attrib["speed"] == "134.78432800000002"
def test_stratux_to_cot(sample_craft):
cot = stratuxcot.stratux_to_cot(sample_craft)
assert isinstance(cot, bytes)
assert b"a-.-A-C-F" in cot
assert b"DAL1352" in cot
assert b"ICAO-A33D68" in cot
assert b'speed="134.78432800000002"' in cot
def test_stratux_to_cot_raw_with_known_craft(sample_craft, sample_known_craft):
known_craft_key = "REG"
filter_key = sample_craft["Tail"].strip().upper()
known_craft = (list(filter(
lambda x: x[known_craft_key].strip().upper() == filter_key, sample_known_craft)) or
[{}])[0]
cot = stratuxcot.functions.stratux_to_cot_raw(sample_craft, known_craft=known_craft)
assert isinstance(cot, xml.etree.ElementTree.Element)
assert cot.tag == "event"
assert cot.attrib["version"] == "2.0"
assert cot.attrib["type"] == "a-f-A-T-A-C-O"
assert cot.attrib["uid"] == "ICAO-A33D68"
point = cot.findall("point")
assert point[0].tag == "point"
assert point[0].attrib["lat"] == "37.46306"
assert point[0].attrib["lon"] == "-122.264626"
assert point[0].attrib["hae"] == "2232.6600000000003"
detail = cot.findall("detail")
assert detail[0].tag == "detail"
assert detail[0].attrib["uid"] == "TACO_02"
track = detail[0].findall("track")
assert track[0].attrib["course"] == "135"
assert track[0].attrib["speed"] == "134.78432800000002"
def test_negative_stratux_to_cot():
sample_craft = {"taco": "burrito"}
cot = stratuxcot.stratux_to_cot(sample_craft)
assert cot == None
``` |
{
"source": "JoshuaGabriel/python_snake",
"score": 4
} |
#### File: python_snake/app/logic.py
```python
class Point:
def __init__(self, data=None, x=0, y=0):
if data != None:
self.x = data["x"]
self.y = data["y"]
return
else:
self.x = x
self.y = y
def __str__(self):
return "x: " + str(self.x) + " y: " + str(self.y)
def __repr__(self):
return "x: " + str(self.x) + " y: " + str(self.y)
class GameBoard():
"""
0 - Empty space
1 - Snake head
2 - Snake body
3 - Snake tail
4 - You head
5 - You body
6 - You tail
7 - food
"""
SnakeBodyCount = 0
MyBodyCount = 0
MyPreviousTile = -1 # Stores the value of the previous tile Skippy has been on
DidIJustEat = False # Check if I am about to grow, to omit the tail as a valid square (because I'm growing) #broken
Storage_dict = {} # Stores the health of an individual snake
def __init__(self, data=None):
"""Creates a new game board"""
if data == None:
print("Data not set... its going to crash")
return
self.data = data
self.height = data["board"]["height"]
self.width = data["board"]["width"]
self.board = [] # array of arrays
# init board
for _ in range(0, self.width):
column = []
for _ in range(0, self.height):
column.append(0)
self.board.append(column)
# go through all the snakes and add them to the board
GameBoard.SnakeBodyCount = 0
temporary_count = 0
GameBoard.Storage_dict = {}
for snake in data["board"]["snakes"]:
if(snake["id"]==data["you"]["id"]):
continue
temporary_count = 0
for bodypart in snake["body"]:
self.board[bodypart["x"]][bodypart["y"]] = 2
temporary_count+=1
'''
TODO: store each health into a dictionary (call the function called storage)
'''
if(temporary_count>GameBoard.SnakeBodyCount):
GameBoard.SnakeBodyCount = temporary_count
# add tail
tail = snake["body"][-1]
self.board[tail["x"]][tail["y"]] = 3
# add head
head = snake["body"][0]
self.board[head["x"]][head["y"]] = 1
# go through the food and add it to the board
for food in data["board"]["food"]:
self.board[food["x"]][food["y"]] = 7
# go through self
GameBoard.MyBodyCount = 0
for you in data["you"]["body"]:
self.board[you["x"]][you["y"]] = 5
GameBoard.MyBodyCount+=1
# get the head from the us
you_tail = data["you"]["body"][-1]
# set the board at head to the you head value (6)
self.board[you_tail["x"]][you_tail["y"]] = 6
you_head = data["you"]["body"][0]
self.board[you_head["x"]][you_head["y"]] = 4
# print("This is the created board")
# self.printBoard()
def printBoard(self):
for x in range(0, self.height):
for y in range(0, self.width):
print(self.board[y][x], end=' ')
print()
print(GameBoard.DidIJustEat)
print(self.Storage_dict)
def bfs(self, start, num, status_safety=True,status_trap=True):
"""
Start is the point on the board we start looking from
Num is the value (look at top) that we are looking for
Status is used to overide the safety protocol, this will default to True, unless specified
"""
queue = []
visited = set()
pg = {} # parent graph
# add the tiles around the head
self.enqueue_around_head(start, queue)
# While we are still in the queue
while len(queue) != 0:
# print("Visited: ", visited)
tile = queue.pop(0)
if tile.x >= self.width or tile.x < 0 or tile.y >= self.height or tile.y < 0:
continue
# print("queue:", queue)
# print("tile: ", end='')
# print(str(tile))
tile_val = self.board[tile.x][tile.y]
if str(tile) in visited:
continue
visited.add(str(tile))
if(tile==start):
continue
if (GameBoard.DidIJustEat) and (tile_val == 6) :
continue
if(not(self.safety_protocol(tile,num)) and status_safety):
continue
if(self.trap_protocol(tile) and status_trap):
continue
if tile_val == num:
return self.get_relative_direction(start, tile, pg)
if tile_val == 0 or tile_val == 7:
self.enqueue_around_point(tile, queue, visited, pg, num)
return -1 #it didnt find what it was looking for
def enqueue_around_head(self, tile, queue):
points = [Point(x=tile.x, y=(tile.y - 1)), Point(x=tile.x, y=(tile.y + 1)), Point(x=(tile.x - 1), y=tile.y), Point(x=(tile.x + 1), y=tile.y)]
valid_tiles = [0,3,6,7]
for point in points:
if point.x >= self.width or point.x < 0 or point.y >= self.height or point.y < 0: # to check if our value is out of bounds
continue # if it is out of bounds, the iteration is skipped
tile_val = self.board[point.x][point.y]
if tile_val in valid_tiles: #queue is only filled with 0,3,6,7 to start with
queue.append(point)
def enqueue_around_point(self, tile, queue, visted, parent_graph, num):
points = [Point(x=tile.x, y=(tile.y - 1)), Point(x=tile.x, y=(tile.y + 1)), Point(x=(tile.x - 1), y=tile.y), Point(x=(tile.x + 1), y=tile.y)]
safety_protocol = self.safety_protocol(tile,num)
for point in points:
if (not (point in visted) and safety_protocol):
queue.append(point)
parent_graph[point] = tile # The points point to the tile
# Gets the direction of the chosen square so skippy knows where to turn
def get_relative_direction(self, start, end, pg):
temp = end
while temp in pg: # gets where the end point was generated from
temp = pg[temp]
if(self.board[temp.x][temp.y]==7):
GameBoard.DidIJustEat = True
else:
GameBoard.DidIJustEat = False
# print("The tile I am going to is: ",temp)
# print("tile value of: ",self.board[temp.x][temp.y])
diff_x = start.x - temp.x
diff_y = start.y - temp.y
if diff_x == -1:
return 3
if diff_x == 1:
return 2
if diff_y == -1:
return 1
if diff_y == 1:
return 0
'''
TODO: need to say that it is safe to hit a head if I'm beside a snake with a lower health (not necessarily the biggest health)
plan:
implement other snake's ID or health to their head
'''
# returns false if the tile is dangerous (beside an opponent snake head)
# return true if the tile is safe
def safety_protocol(self,tile, num):
points = [Point(x=tile.x, y=(tile.y - 1)), Point(x=tile.x, y=(tile.y + 1)), Point(x=(tile.x - 1), y=tile.y), Point(x=(tile.x + 1), y=tile.y)]
if(GameBoard.AmIAlpha()):
return True
for point in points:
if point.x >= self.width or point.x < 0 or point.y >= self.height or point.y < 0:
continue
if(self.board[point.x][point.y]==1):
for snake in self.data["board"]["snakes"]:
print("length of snakes",len(self.data["board"]["snakes"]))
print("snake.x: ",snake["body"][0]["x"]," snake.y: ",snake["body"][0]["y"])
print("point.x: ",point.x," point.y: ",point.y)
print(str(snake["body"][0]["x"])==str(point.x) and str(snake["body"][0]["y"])==str(point.y))
if(str(snake["body"][0]["x"])==str(point.x) and str(snake["body"][0]["y"])==str(point.y)):
count = 0
for bodypart in snake["body"]:
count+=1
if(GameBoard.AmIAlpha(count)):
return True
break
return False
return True
#Returns a list of good points (IN STR FORMAT)
def neighbors(self,tile):
invalid_squares = [1,2,4,5]
head = None
points = [Point(x=tile.x, y=(tile.y - 1)), Point(x=tile.x, y=(tile.y + 1)), Point(x=(tile.x - 1), y=tile.y), Point(x=(tile.x + 1), y=tile.y)]
good_points = []
for point in points:
if point.x >= self.width or point.x < 0 or point.y >= self.height or point.y < 0:
continue
if(self.board[point.x][point.y]==1):
head = point
if (self.board[point.x][point.y] in invalid_squares):
continue
good_points.append(point)
return good_points,head
# Returns True if the next tile is a trapped tile
# A tile is considered to be trapped if there are no possible moves after
def trap_protocol(self,tile,previous_tile=None):
searching, head = self.neighbors(tile)
if(previous_tile!=None):
count=0
for square in searching:
if(square.x==previous_tile.x and square.y==previous_tile.y):
searching.pop(count)
break
count+=1
if(len(searching)>1):
return False
elif(len(searching)==0):
return True
else:
if(head!=None):
vector1 = Point(x=searching[0].x-tile.x,y=searching[0].y-tile.y)
vector2 = Point(x=head.x-tile.x,y=head.y-tile.y)
dot_product = vector1.x*vector2.x + vector1.y*vector2.y
print(dot_product)
if(dot_product==0):
return True
previous_tile = Point(x=tile.x,y=tile.y)
return self.trap_protocol(tile=searching[0],previous_tile=previous_tile)
@staticmethod
def AmIAlpha(count=None):
if(count!=None):
if(GameBoard.MyBodyCount>count):
return True
else:
return False
if(GameBoard.MyBodyCount>GameBoard.SnakeBodyCount):
return True
else:
return False
'''
TODO: this GetLength method
Initial Ideas: This will make skippy less likely to go into trapped squares
Fallbacks: Not sure if I need a limit to how many squares skippy should look at (I'm guessing the x (the dimension of the board)) most likely will need to limit
'''
# This method will find the amount of available squares
# Not sure what it will return yet
def GetLength(self):
pass
# Will Trap a snake in a corner situation
'''
Get relative direction of enemy snake
https://play.battlesnake.com/g/bf1f56d2-403e-482d-a324-8d0222a0cdb1/#
my head is at: (3,1)
for example ((1,1) (9,1) (1,9) (9,9)) for 11x11 grid
if (((head.x == edge.x) or (head.y==edge.y)) and (im not at the width or height)) and (enemeny head is at the edge and I'm beside it +1):
go for the end of board to kill them
elif(if im at the end of the board)
return -1
else:
return -1
to see how I am inside I need to get my head's coords
if(im inside of ((1,1) (9,1) (1,9) (9,9)))
'''
@staticmethod
def TrapKill():
pass
# implement a turtle and survive strategy for super late game scenario and we are smaller by a lot
def turtle(self,data):
move_data = -1
# print("CountMyBody: ", GameBoard.MyBodyCount)
# print("CountSnakeBody: ", GameBoard.SnakeBodyCount)
head = data["you"]["body"][0]
if(GameBoard.MyBodyCount+7<GameBoard.SnakeBodyCount and data["you"]["health"]<80):
move_data = GameBoard.bfs(self,Point(data=head), 7) #go for food
elif(GameBoard.MyBodyCount+7<GameBoard.SnakeBodyCount):
move_data = GameBoard.bfs(self,Point(data=head), 6,False,False) #go for tail
return move_data
def kill_snakes(self, data):
move_data = -1
# print("CountMyBody: ", GameBoard.MyBodyCount)
# print("CountSnakeBody: ", GameBoard.SnakeBodyCount)
if(GameBoard.MyBodyCount>GameBoard.SnakeBodyCount+1 and data["turn"]>50 and data["you"]["health"]>28):
head = data["you"]["body"][0]
move_data = GameBoard.bfs(self,Point(data=head), 1) # go for kill
return move_data
``` |
{
"source": "joshuagato/arithmetic-app-flask-api",
"score": 3
} |
#### File: joshuagato/arithmetic-app-flask-api/app.py
```python
from flask import Flask, jsonify, request
from flask_restful import Api, Resource
app = Flask(__name__)
api = Api(app)
def checkPostedData(postedData, functionName):
if ("x" not in postedData or "y" not in postedData):
return 301
else:
return 200
class Add(Resource):
def post(self):
postedData = request.get_json()
status_code = checkPostedData(postedData, "add")
if (status_code != 200):
retJson = {
"Message": "An error occurred",
"Status Code": status_code
}
x = postedData["x"]
y = postedData["y"]
x = int(x)
y = int(y)
ret = x + y
retMap = {
'Message': ret,
'Status Code': 200
}
return jsonify(retMap)
class Subtract(Resource):
pass
class Multiply(Resource):
pass
class Divide(Resource):
pass
api.add_resource(Add, "/add")
if __name__ == "__main__":
# app.run() # This works pefectly
app.run(debug=True) # This helps to give feedback, in times of errors
# app.run(host="127.0.0.1", port=80) # This is good for production/deployment
``` |
{
"source": "JoshuaGhost/e2expred",
"score": 2
} |
#### File: latent_rationale/common/util.py
```python
import json
from collections import namedtuple
import numpy as np
import random
import torch
from torch import nn
from torch.nn.init import _calculate_fan_in_and_fan_out
from latent_rationale.nn.bow_encoder import BOWEncoder
from latent_rationale.nn.cnn_encoder import CNNEncoder
from latent_rationale.nn.lstm_encoder import LSTMEncoder
from latent_rationale.nn.rcnn_encoder import RCNNEncoder
Example = namedtuple("Example", ["tokens", "label", "token_labels", 'query', 'docid', 'ann_id'])
def make_kv_string(d: object) -> object:
out = []
for k, v in d.items():
if isinstance(v, float):
out.append("{} {:.4f}".format(k, v))
else:
out.append("{} {}".format(k, v))
return " ".join(out)
def get_encoder(layer, in_features, hidden_size, bidirectional=True):
"""Returns the requested layer."""
if layer == "lstm":
return LSTMEncoder(in_features, hidden_size,
bidirectional=bidirectional)
elif layer == "rcnn":
return RCNNEncoder(in_features, hidden_size,
bidirectional=bidirectional)
elif layer == "bow":
return BOWEncoder()
elif layer == "cnn":
return CNNEncoder(
embedding_size=in_features, hidden_size=hidden_size,
kernel_size=5)
else:
raise ValueError("Unknown layer")
def get_z_stats(z=None, mask=None):
"""
Computes statistics about how many zs are
exactly 0, continuous (between 0 and 1), or exactly 1.
:param z:
:param mask: mask in [B, T]
:return:
"""
z = torch.where(mask, z, z.new_full([1], 1e2))
num_0 = (z == 0.).sum().item()
num_c = ((z > 0.) & (z < 1.)).sum().item()
num_1 = (z == 1.).sum().item()
total = num_0 + num_c + num_1
mask_total = mask.sum().item()
assert total == mask_total, "total mismatch"
return num_0, num_c, num_1, mask_total
def xavier_uniform_n_(w, gain=1., n=4):
"""
Xavier initializer for parameters that combine multiple matrices in one
parameter for efficiency. This is e.g. used for GRU and LSTM parameters,
where e.g. all gates are computed at the same time by 1 big matrix.
:param w:
:param gain:
:param n:
:return:
"""
with torch.no_grad():
fan_in, fan_out = _calculate_fan_in_and_fan_out(w)
assert fan_out % n == 0, "fan_out should be divisible by n"
fan_out = fan_out // n
std = gain * np.math.sqrt(2.0 / (fan_in + fan_out))
a = np.math.sqrt(3.0) * std
nn.init.uniform_(w, -a, a)
def initialize_model_(model):
"""
Model initialization.
:param model:
:return:
"""
# Custom initialization
print("Glorot init")
for name, p in model.named_parameters():
if "bert_model" in name or name.startswith("embed") or "lagrange" in name:
print("{:10s} {:20s} {}".format("unchanged", name, p.shape))
elif "lstm" in name and len(p.shape) > 1:
print("{:10s} {:20s} {}".format("xavier_n", name, p.shape))
xavier_uniform_n_(p)
elif len(p.shape) > 1:
print("{:10s} {:20s} {}".format("xavier", name, p.shape))
torch.nn.init.xavier_uniform_(p)
elif "bias" in name:
print("{:10s} {:20s} {}".format("zeros", name, p.shape))
torch.nn.init.constant_(p, 0.)
else:
print("{:10s} {:20s} {}".format("unchanged", name, p.shape))
def get_device():
if torch.cuda.is_available():
return torch.device('cuda')
else:
return torch.device('cpu')
def print_parameters(model):
"""Prints model parameters"""
total = 0
for name, p in model.named_parameters():
total += np.prod(p.shape)
print("{:24s} {:12s} requires_grad={}".format(name, str(list(p.shape)),
p.requires_grad))
print("\nTotal parameters: {}\n".format(total))
def get_minibatch(data, batch_size, shuffle=False, train_on_part=-1):
"""Return minibatches, optional shuffling"""
if shuffle:
print("Shuffling training data")
random.shuffle(data) # shuffle training data each epoch
batch = []
# yield minibatches
if train_on_part == -1:
data_to_return = data
else:
data_to_return = data[:int(len(data) * train_on_part)]
for example in data_to_return:
batch.append(example)
if len(batch) == batch_size:
yield batch
batch = []
# in case there is something left
if len(batch) > 0:
yield batch
# def get_minibatch(data, batch_size, shuffle=False):
# """Return minibatches, optional shuffling"""
# if shuffle:
# print("Shuffling training data")
# random.shuffle(data) # shuffle training data each epoch
#
# batch = []
#
# # yield minibatches
# for example in data:
# batch.append(example)
#
# if len(batch) == batch_size:
# yield batch
# batch = []
#
# # in case there is something left
# if len(batch) > 0:
# yield batch
def write_jsonl(jsonl, output_file):
with open(output_file, 'w') as of:
for js in jsonl:
as_str = json.dumps(js, sort_keys=True)
of.write(as_str)
of.write('\n')
```
#### File: latent_rationale/mtl_e2e/utils.py
```python
import argparse
import json
from dataclasses import dataclass
from typing import List, Any
import torch
import numpy as np
from torch.nn.utils.rnn import pad_sequence, PackedSequence, pack_padded_sequence, pad_packed_sequence
from transformers import BertTokenizer
from latent_rationale.common.util import Example
@dataclass(eq=True, frozen=True)
class PaddedSequence:
"""A utility class for padding variable length sequences mean for RNN input
This class is in the style of PackedSequence from the PyTorch RNN Utils,
but is somewhat more manual in approach. It provides the ability to generate masks
for outputs of the same input dimensions.
The constructor should never be called directly and should only be called via
the autopad classmethod.
We'd love to delete this, but we pad_sequence, pack_padded_sequence, and
pad_packed_sequence all require shuffling around tuples of information, and some
convenience methods using these are nice to have.
"""
data: torch.Tensor
batch_sizes: torch.Tensor
batch_first: bool = False
@classmethod
def autopad(cls, data, batch_first: bool = False, padding_value=0, device=None) -> 'PaddedSequence':
# handle tensors of size 0 (single item)
data_ = []
for d in data:
if len(d.size()) == 0:
d = d.unsqueeze(0)
data_.append(d)
padded = pad_sequence(data_, batch_first=batch_first, padding_value=padding_value)
if batch_first:
batch_lengths = torch.LongTensor([len(x) for x in data_])
if any([x == 0 for x in batch_lengths]):
raise ValueError(
"Found a 0 length batch element, this can't possibly be right: {}".format(batch_lengths))
else:
# TODO actually test this codepath
batch_lengths = torch.LongTensor([len(x) for x in data])
return PaddedSequence(padded, batch_lengths, batch_first).to(device=device)
#@classmethod
#def autopad(cls, data, len_queries, max_length, batch_first, device):
def pack_other(self, data: torch.Tensor):
return pack_padded_sequence(data, self.batch_sizes, batch_first=self.batch_first, enforce_sorted=False)
@classmethod
def from_packed_sequence(cls, ps: PackedSequence, batch_first: bool, padding_value=0) -> 'PaddedSequence':
padded, batch_sizes = pad_packed_sequence(ps, batch_first, padding_value)
return PaddedSequence(padded, batch_sizes, batch_first)
def cuda(self) -> 'PaddedSequence':
return PaddedSequence(self.data.cuda(), self.batch_sizes.cuda(), batch_first=self.batch_first)
def to(self, dtype=None, device=None, copy=False, non_blocking=False) -> 'PaddedSequence':
# TODO make to() support all of the torch.Tensor to() variants
return PaddedSequence(
self.data.to(dtype=dtype, device=device, copy=copy, non_blocking=non_blocking),
self.batch_sizes.to(device=device, copy=copy, non_blocking=non_blocking),
batch_first=self.batch_first)
def mask(self, mask_starts=None, on=int(0), off=int(0), device='cpu', size=None, dtype=None) -> torch.Tensor:
if size is None:
size = self.data.size()
out_tensor = torch.zeros(*size, dtype=dtype)
# TODO this can be done more efficiently
out_tensor.fill_(off)
if mask_starts is None:
mask_starts = [0] * len(self.batch_sizes)
# note to self: these are probably less efficient than explicilty populating the off values instead of the on values.
if self.batch_first:
for i, (mask_st, bl) in enumerate(zip(mask_starts, self.batch_sizes)):
out_tensor[i, mask_st:bl] = on
else:
for i, (mask_st, bl) in enumerate(zip(mask_starts, self.batch_sizes)):
out_tensor[mask_st:bl, i] = on
return out_tensor.to(device)
def unpad(self, other: torch.Tensor) -> List[torch.Tensor]:
out = []
for o, bl in zip(other, self.batch_sizes):
out.append(torch.cat((o[:bl], torch.zeros(max(0, bl-len(o))))))
return out
def flip(self) -> 'PaddedSequence':
return PaddedSequence(self.data.transpose(0, 1), not self.batch_first, self.padding_value)
def get_args():
parser = argparse.ArgumentParser(description='End to End ExPred')
parser.add_argument('--conf_fname', type=str)
parser.add_argument('--dataset_name', type=str, choices=['movies', 'fever', 'multirc', 'short_movies'])
parser.add_argument('--data_dir', type=str, default='')
parser.add_argument('--save_path', type=str, default='mtl_e2e/default')
parser.add_argument('--resume_snapshot', type=bool, default=True)
parser.add_argument('--warm_start_mtl', type=str)
parser.add_argument('--warm_start_cls', type=str)
parser.add_argument('--share_encoder', default=False, action='store_true')
parser.add_argument('--print_every', type=int, default=100)
parser.add_argument('--eval_every', type=int, default=-1)
parser.add_argument('--save_every', type=int, default=-1)
# rationale settings for HardKuma model
# parser.add_argument('--selection', type=float, default=1.,
# help="Target text selection rate for Lagrange.")
parser.add_argument('--w_aux', type=float)
parser.add_argument('--w_exp', type=float)
parser.add_argument('--selection', type=float)
parser.add_argument('--batch_size', type=int)
parser.add_argument('--train_on_part', type=float, default='-1')
parser.add_argument('--decode_split', type=str, default='test')
args = parser.parse_args()
args = vars(args)
conf_fname = args['conf_fname']
with open(conf_fname, 'r') as fin:
conf = json.load(fin)
print(args.keys())
for k, v in args.items(): # values in args overwrites the ones on conf file
if v is None:
continue
if k in ('selection', 'w_aux', 'w_exp'):
conf['weights'][k] = v
else:
conf[k] = v
conf["eval_batch_size"] = max(conf['batch_size'], conf['eval_batch_size'])
return conf
def prepare_minibatch(mb: List[Example],
tokenizer: BertTokenizer,
max_length: int=512,
device: str=None):
"""
Minibatch is a list of examples.
This function converts words to IDs and returns
torch tensors to be used as input/targets.
"""
cls_token_id = tokenizer.cls_token_id
sep_token_id = tokenizer.sep_token_id
pad_token_id = tokenizer.pad_token_id
cls_token = torch.tensor([cls_token_id])#.to(device=device)
sep_token = torch.tensor([sep_token_id])#.to(device=device)
inputs = []
exps = []
labels = []
position_ids = []
for inst in mb:
q = inst.query
d = inst.tokens
exp = inst.token_labels
labels.append(inst.label)
if len(q) + len(d) + 2 > max_length:
# d = torch.Tensor(d[:(max_length - len(q) - 2)]).type_as(cls_token)
# exp = torch.Tensor(exp[:(max_length - len(q) - 2)])
d = d[:(max_length - len(q) - 2)]
exp = exp[:(max_length - len(q) - 2)]
# q = torch.Tensor(q).type_as(cls_token)
# print(cls_token.__class__, q.__class__, exp.__class__)
# print(cls_token.type(), q.type(), exp.type())
inputs.append(torch.cat([cls_token, q, sep_token, d]))
exps.append(torch.cat([torch.Tensor([0] * (len(q) + 2)), exp]))
position_ids.append(torch.tensor(list(range(0, len(q) + 1)) + list(range(0, len(d) + 1)))) # tokens
# positions are counted from 1, the two 0s are for [cls] and [sep], [pad]s are also nominated as pos 0
inputs = PaddedSequence.autopad(inputs, batch_first=True, padding_value=pad_token_id, device=device)
positions = PaddedSequence.autopad(position_ids, batch_first=True, padding_value=0, device=device)
exps = PaddedSequence.autopad(exps, batch_first=True, padding_value=0, device=device)
attention_masks = inputs.mask(on=1., off=-1000.).type(torch.float).to(device=device)
padding_masks = inputs.mask(on=1., off=0.).type(torch.bool).to(device=device)
labels = torch.LongTensor(labels).to(device=device)
return inputs, exps, labels, positions, attention_masks, padding_masks
#
#
# queries, documents, exps, labels = [], [], [], []
# for inst in mb:
# q = inst.query
# queries.append(torch.Tensor(q))
# documents.append(torch.Tensor(inst.tokens))
# exps.append(torch.cat(torch.Tensor([0] * (len(q) + 2))inst.token_labels))
# labels.append(torch.Tensor([inst.label]))
#
# return queries, documents, exps, labels
# def prepare_for_input(example: Example,
# max_length: int,
# tokenizer: BertTokenizer,
# device: str = 'cpu'):
# q = example.query
# d = example.tokens
# cls_token = [tokenizer.cls_token_id]
# sep_token = [tokenizer.sep_token_id]
# pad_token_id = tokenizer.pad_token_id
# if len(q) + len(d) + 2 > max_length:
# d = d[:(max_length - len(q) - 2)]
# input = torch.cat([cls_token, q, sep_token, d])
# selector_mask_starts = torch.Tensor([len(q) + 2]).to(device)
# input = PaddedSequence.autopad(input, batch_first=True, padding_value=pad_token_id,
# device=device)
# attention_mask = input.mask(on=1., off=0., device=device)
# attributes = [0] * (len(example.query) + 2) + example.tokens
# return [input, attributes, example.label]
def numerify_labels(dataset, labels_mapping):
for exp_id, exp in enumerate(dataset):
# print(exp_id)
# print(dataset[exp_id])
# print(exp)
dataset[exp_id] = Example(tokens=exp.tokens,
label=labels_mapping[exp.label],
token_labels=exp.token_labels,
query=exp.query,
ann_id=exp.ann_id,
docid=exp.docid)
return dataset
def tokenize_query_doc(example: Example, tokenizer: Any):
if isinstance(tokenizer, BertTokenizer):
query_tokens = tokenizer.encode(example.query, add_special_tokens=False)
else:
query_tokens = tokenizer.tokenize(example.query)
tokens = []
token_labels = []
for token, token_label in zip(example.tokens, example.token_labels):
if isinstance(tokenizer, BertTokenizer):
token_pieces = tokenizer.encode(token, add_special_tokens=False)
else:
token_pieces = tokenizer.tokenize(token)
tokens.extend(token_pieces)
token_labels.extend([token_label] * len(token_pieces))
if isinstance(tokenizer, BertTokenizer):
return Example(query=torch.LongTensor(query_tokens),#.type(torch.long),
tokens=torch.LongTensor(tokens),#.type(torch.long),
token_labels=torch.Tensor(token_labels),
label=torch.Tensor([example.label]),
ann_id=example.ann_id,
docid=example.docid)
else:
return Example(query=query_tokens,
tokens=tokens,
token_labels=token_labels,
label=example.label,
ann_id=example.ann_id,
docid=example.docid)
def numerify_query_doc(example: Example, tokenizer: Any):
query_ids = tokenizer.encode(example.query, add_special_tokens=False)
token_ids = tokenizer.encode(example.tokens, add_special_tokens=False)
return Example(query=query_ids,
tokens=token_ids,
token_labels=example.token_labels,
label=example.label,
ann_id=example.ann_id,
docid=example.docid)
``` |
{
"source": "joshuag/lambda-rest",
"score": 3
} |
#### File: python/lambda-routing/routing.py
```python
from functools import wraps
import json
import re
class PathNotFoundError(Exception):
pass
class VerbNotFoundError(Exception):
pass
class UserRoleNotPermitted(Exception):
pass
class UserNotAuthenticated(Exception):
pass
class _RouteRegistry(object):
def __init__(self):
self.registry = {}
self.perm_registry = {}
def register_route(self, verb, path, func, required_roles):
if not path in self.registry:
self.registry[path] = {}
self.perm_registry[path] = {}
self.registry[path][verb] = func
self.perm_registry[path][verb] = required_roles
def match_route(self, request):
resourcePath = request["requestContext"]["resourcePath"]
verb = request["requestContext"]["httpMethod"]
request_params = request["pathParameters"]
resolved_path = self.registry.get(resourcePath, None)
if not resolved_path:
raise PathNotFoundError("Route for {resourcePath} not found".format(resourcePath=resourcePath))
if not verb in self.registry[resolved_path]: #We know we'll get a hit for the resourcePath because we would have raised out otherwise
raise VerbNotFoundError("{verb} not found for {resourcePath}".format(verb=verb, resourcePath=resourcePath))
return {
"function": self.registry[resolved_path][verb],
"perms": self.perm_registry[resolved_path][verb],
}
def check_perms(self, perms, user):
if not user:
raise UserNotAuthenticated("You must login")
else:
found = False
for role in user.roles:
if role in perms:
found = True
break
if not found:
raise UserRoleNotPermitted("You do not have a required role")
def match_and_execute_route_for_gateway(self, request, user=None):
request_params = request.get("pathParameters", {}) or {}
query_params = request.get("queryStringParameters", {}) or {}
request_params.update(query_params)
extra_headers = {}
matched_route = self.match_route(request)
matched_function = matched_route["function"]
matched_perms = matched_route["perms"]
try:
if matched_perms:
self.check_perms(matched_perms, user)
if request["requestContext"]["httpMethod"] in ("POST", "PATCH"):
body = matched_function(json.loads(request["body"]), **request_params)
else:
body = matched_function(**request_params)
status_code = 200
except Exception as e:
status_code = 500
body = json.dumps(
{
'errorMessage': str(e),
'errorType': "APIError",
'stackTrace': []
}
)
extra_headers = {'X-Amzn-ErrorType':'APIError'}
except TypeError as e:
status_code = 500
body = json.dumps(
{
'errorMessage': 'Unexpected path variable or querystring parameter',
'errorType': 'MalformedRequestError',
'stackTrace': []
}
)
extra_headers = {'X-Amzn-ErrorType':'MalformedRequestError'}
extra_headers.update({ 'Content-Type': 'application/json' })
if not isinstance(body, str):
# Sometimes a route may want to return headers (or just a plain object)
# So we'll do the escaping for them and pop the headers off
if isinstance(body, dict) and "headers" in body and "body" in body:
extra_headers.update(body.pop("headers"))
body = body.pop("body")
body = json.dumps(body)
return {
'statusCode': status_code,
'headers': extra_headers,
'body': body
}
RouteRegistry = _RouteRegistry()
class route(object):
def __init__(self, path, verb, required_roles=None):
self.path = path
self.verb = verb
self.required_roles = required_roles
def __call__(self, func):
RouteRegistry.register_route(verb=self.verb, path=self.path, func=func, required_roles=self.required_roles)
@wraps(func)
def wrapped(*args, **kwargs):
func(*args, **kwargs)
return wrapped
``` |
{
"source": "joshuagl/apt-transport-in-toto",
"score": 3
} |
#### File: apt-transport-in-toto/tests/test_units.py
```python
import mock
import unittest
import intoto
from intoto import (serialize_one, deserialize_one, _intoto_parse_config,
_intoto_verify, LOG_HANDLER_STDERR)
class TestSerialization(unittest.TestCase):
"""Test parts serialization and deserialization functions. """
def test_serialize_deserialize(self):
"""Test that data is (de)serialized as expected. """
msg = "601 Configuration\nConfig-Item: 1\nConfig-Item: 2\n\n"
msg_data = {
"code": 601,
"info": "Configuration",
"fields": [
("Config-Item", "1"),
("Config-Item", "2"),
]
}
self.assertEqual(deserialize_one(msg), msg_data)
self.assertEqual(serialize_one(msg_data), msg)
def test_deserialize_error(self):
"""Test deserialization errors on malformed data. """
for msg, error in [
("", "Invalid empty message:"),
("10000", "Invalid message header:"),
("LOL LOL", "Invalid message header status code:"),
("1000 LOL", "Invalid message header status code:"),
("100 LOL", "Invalid message header info"),
("601 Configuration\nConfig-Item", "Invalid header field:")]:
with self.assertRaises(Exception) as ctx:
deserialize_one(msg)
self.assertIn(error, str(ctx.exception))
class TestConfigParser(unittest.TestCase):
"""Test function that parses the `601 Configuration` message. """
def test_log_level_config(self):
"""Test parsing LogLevel config. """
def _intoto_parse_config_with_log_level(level):
"""Wrapper for _intoto_parse_config to pass message with specific log
level. """
_intoto_parse_config({
"code": 601,
"info": "Configuration",
"fields": [
("Config-Item", "APT::Intoto::LogLevel::={}".format(level)),
],
})
# Backup log level
level_backup = LOG_HANDLER_STDERR.level
# Test with bad log level values
for level in ["1.0", "abc"]:
_intoto_parse_config_with_log_level(level)
self.assertNotEqual(LOG_HANDLER_STDERR.level, level)
# Test with good log level values
_intoto_parse_config_with_log_level(100)
self.assertEqual(LOG_HANDLER_STDERR.level, 100)
# Restore log level
LOG_HANDLER_STDERR.level = level_backup
def test_ignore_config_items(self):
"""Test that irrelevant configs are ignored. """
empty_global_info = {
"config": {
"Rebuilders": [],
"GPGHomedir": "",
"Layout": "",
"Keyids": [],
"NoFail": False
}
}
# Backup and reset global info
backup_global_info = intoto.global_info
intoto.global_info = empty_global_info
# Call config parsing function with irrelevant configs
_intoto_parse_config({
"code": 601,
"info": "Configuration",
"fields": [
("No-Config-Item", "123"),
("Config-Item", "APT::Other::Info"),
],
})
# Global info should still be empty
self.assertDictEqual(intoto.global_info, empty_global_info)
# Restore global info
intoto.global_info = backup_global_info
class TestVerification(unittest.TestCase):
"""Test function that triggers intoto verification (upon reception of
`201 URI Done` message).
"""
def test_skip_wrong_name(self):
"""Skip in-toto verification for files with wrong filename. """
def _intoto_verify_with_filename(fn):
"""Wrapper for _intoto_verify to pass message with specific filename. """
_intoto_verify({
"code": 201,
"info": "URI Done",
"fields": [
("Filename", "{}".format(fn)),
],
})
for fn in ["not-a-deb.txt", "way_too_may_party.deb", "missing_parts.deb"]:
with mock.patch("intoto.logger") as mock_logger:
_intoto_verify_with_filename(fn)
mock_logger.info.assert_called_with(
"Skipping in-toto verification for '{}'".format(fn))
if __name__ == "__main__":
unittest.main()
``` |
{
"source": "joshuagl/in-toto",
"score": 3
} |
#### File: in-toto/in_toto/verifylib.py
```python
import os
import datetime
import iso8601
import fnmatch
import six
import logging
from dateutil import tz
import securesystemslib.exceptions
import in_toto.settings
import in_toto.util
import in_toto.runlib
import in_toto.models.layout
import in_toto.models.link
import in_toto.formats
from in_toto.models.metadata import Metablock
from in_toto.models.link import (FILENAME_FORMAT, FILENAME_FORMAT_SHORT)
from in_toto.models.layout import SUBLAYOUT_LINK_DIR_FORMAT
from in_toto.exceptions import (RuleVerificationError, LayoutExpiredError,
ThresholdVerificationError, BadReturnValueError,
SignatureVerificationError)
from in_toto.gpg.exceptions import KeyExpirationError
import in_toto.rulelib
# Inherits from in_toto base logger (c.f. in_toto.log)
log = logging.getLogger(__name__)
RULE_TRACE = {}
def _raise_on_bad_retval(return_value, command=None):
"""
<Purpose>
Internal function that checks return values of shell commands, e.g. from
inspections. Raises exception if the passed value is non-int and non-zero.
<Arguments>
return_value:
The return value to be verified
command: (optional)
The command whose execution returned the value, used for exception
message.
<Exceptions>
BadReturnValueError if the return_value is non-int and non-zero
<Side Effects>
None.
<Returns>
None.
"""
msg = "Got non-{what} " + "return value '{}'".format(return_value)
if command:
msg = "{0} from command '{1}'.".format(msg, command)
else:
msg = "{0}.".format(msg)
if not isinstance(return_value, int):
raise BadReturnValueError(msg.format(what="int"))
# TODO: in-toto specification suggests special behavior on
# return_value == 127, but does not fully define that behavior yet
if return_value != 0:
raise BadReturnValueError(msg.format(what="zero"))
def load_links_for_layout(layout, link_dir_path):
"""
<Purpose>
Try to load all existing metadata files for each Step of the Layout
from the current directory.
For each step the metadata might consist of multiple (thresholds) Link
or Layout (sub-layouts) files.
<Arguments>
layout:
Layout object
link_dir_path:
A path to directory where links are loaded from
<Side Effects>
Calls function to read files from disk
<Exceptions>
in_toto.exceptions.LinkNotFoundError,
if fewer than `threshold` link files can be found for any given
step of the supply chain (preliminary threshold check)
<Returns>
A dictionary carrying all the found metadata corresponding to the
passed layout, e.g.:
{
<step name> : {
<functionary key id> : <Metablock containing a Link or Layout object>,
...
}, ...
}
"""
steps_metadata = {}
# Iterate over all the steps in the layout
for step in layout.steps:
links_per_step = {}
# We try to load a link for every authorized functionary, but don't fail
# if the file does not exist (authorized != required)
# FIXME: Should we really pass on IOError, or just skip inexistent links?
for authorized_keyid in step.pubkeys:
# Iterate over the authorized key and if present over subkeys
for keyid in [authorized_keyid] + list(layout.keys.get(authorized_keyid,
{}).get("subkeys", {}).keys()):
filename = FILENAME_FORMAT.format(step_name=step.name, keyid=keyid)
filepath = os.path.join(link_dir_path, filename)
try:
metadata = Metablock.load(filepath)
links_per_step[keyid] = metadata
except IOError:
pass
# This is only a preliminary threshold check, based on (authorized)
# filenames, to fail early. A more thorough signature-based threshold
# check is indispensable.
if len(links_per_step) < step.threshold:
raise in_toto.exceptions.LinkNotFoundError("Step '{0}' requires '{1}'"
" link metadata file(s), found '{2}'."
.format(step.name, step.threshold, len(links_per_step)))
steps_metadata[step.name] = links_per_step
return steps_metadata
def run_all_inspections(layout):
"""
<Purpose>
Extracts all inspections from a passed Layout's inspect field and
iteratively runs each command defined in the Inspection's `run` field using
`runlib.in_toto_run`, which returns a Metablock object containing a Link
object.
If a link command returns non-zero the verification is aborted.
<Arguments>
layout:
A Layout object which is used to extract the Inspections.
<Exceptions>
Calls function that raises BadReturnValueError if an inspection returned
non-int or non-zero.
<Returns>
A dictionary of metadata about the executed inspections, e.g.:
{
<inspection name> : {
<Metablock containing a Link object>,
...
}, ...
}
"""
inspection_links_dict = {}
for inspection in layout.inspect:
log.info("Executing command for inspection '{}'...".format(
inspection.name))
# FIXME: We don't want to use the base path for runlib so we patch this
# for now. This will not stay!
base_path_backup = in_toto.settings.ARTIFACT_BASE_PATH
in_toto.settings.ARTIFACT_BASE_PATH = None
# FIXME: What should we record as material/product?
# Is the current directory a sensible default? In general?
# If so, we should probably make it a default in run_link
# We could use artifact rule paths.
material_list = product_list = ["."]
link = in_toto.runlib.in_toto_run(inspection.name, material_list,
product_list, inspection.run)
_raise_on_bad_retval(link.signed.byproducts.get("return-value"), inspection.run)
inspection_links_dict[inspection.name] = link
# Dump the inspection link file for auditing
# Keep in mind that this pollutes the verifier's (client's) filesystem.
filename = FILENAME_FORMAT_SHORT.format(step_name=inspection.name)
link.dump(filename)
in_toto.settings.ARTIFACT_BASE_PATH = base_path_backup
return inspection_links_dict
def verify_layout_expiration(layout):
"""
<Purpose>
Raises an exception if the passed layout has expired, i.e. if its
`expires` property is lesser "now".
Time zone aware datetime objects in UTC+00:00 (Zulu Time) are used.
<Arguments>
layout:
The Layout object to be verified.
<Exceptions>
LayoutExpiredError
TBA (see https://github.com/in-toto/in-toto/issues/6)
<Side Effects>
None.
"""
expire_datetime = iso8601.parse_date(layout.expires)
if expire_datetime < datetime.datetime.now(tz.tzutc()):
raise LayoutExpiredError("Layout expired")
def substitute_parameters(layout, parameter_dictionary):
"""
<Purpose>
This function is a transitionary measure for parameter substitution (or
any other solution defined by the in-toto team). As of now, it acts as
a very simple replacement layer for python-like parameters
<Arguments>
layout:
The Layout object to process.
parameter_dictionary:
A dictionary containing key-value pairs for substitution.
<Exceptions>
securesystemslib.exceptions.FormatError:
if the parameter dictionary is malformed.
KeyError:
if one of the keys in the parameter dictionary are not present for
substitution
<Side Effects>
The layout object will have any tags replaced with the corresponding
values defined in the parameter dictionary.
"""
in_toto.formats.PARAMETER_DICTIONARY_SCHEMA.check_match(parameter_dictionary)
for step in layout.steps:
new_material_rules = []
for rule in step.expected_materials:
new_rule = []
for stanza in rule:
new_rule.append(stanza.format(**parameter_dictionary))
new_material_rules.append(new_rule)
new_product_rules = []
for rule in step.expected_products:
new_rule = []
for stanza in rule:
new_rule.append(stanza.format(**parameter_dictionary))
new_product_rules.append(new_rule)
new_expected_command = []
for argv in step.expected_command:
new_expected_command.append(argv.format(**parameter_dictionary))
step.expected_command = new_expected_command
step.expected_materials = new_material_rules
step.expected_products = new_product_rules
for inspection in layout.inspect:
new_material_rules = []
for rule in inspection.expected_materials:
new_rule = []
for stanza in rule:
new_rule.append(stanza.format(**parameter_dictionary))
new_material_rules.append(new_rule)
new_product_rules = []
for rule in inspection.expected_products:
new_rule = []
for stanza in rule:
new_rule.append(stanza.format(**parameter_dictionary))
new_product_rules.append(new_rule)
new_run = []
for argv in inspection.run:
new_run.append(argv.format(**parameter_dictionary))
inspection.run = new_run
inspection.expected_materials = new_material_rules
inspection.expected_products = new_product_rules
def verify_layout_signatures(layout_metablock, keys_dict):
"""
<Purpose>
Iteratively verifies the signatures of a Metablock object containing
a Layout object for every verification key in the passed keys dictionary.
Requires at least one key to be passed and requires every passed key to
find a valid signature.
<Arguments>
layout_metablock:
A Metablock object containing a Layout whose signatures are
verified.
keys_dict:
A dictionary of keys to verify the signatures conformant with
securesystemslib.formats.ANY_VERIFICATION_KEY_DICT_SCHEMA.
<Exceptions>
securesystemslib.exceptions.FormatError
if the passed key dict does not match ANY_VERIFICATION_KEY_DICT_SCHEMA.
SignatureVerificationError
if an empty verification key dictionary was passed, or
if any of the passed verification keys fails to verify a signature.
in_toto.gpg.exceptions.KeyExpirationError:
if any of the passed verification keys is an expired gpg key
"""
in_toto.formats.ANY_VERIFICATION_KEY_DICT_SCHEMA.check_match(keys_dict)
# Fail if an empty verification key dictionary was passed
if len(keys_dict) < 1:
raise SignatureVerificationError("Layout signature verification"
" requires at least one key.")
# Fail if any of the passed keys can't verify a signature on the Layout
for junk, verify_key in six.iteritems(keys_dict):
layout_metablock.verify_signature(verify_key)
def verify_link_signature_thresholds(layout, chain_link_dict):
"""
<Purpose>
Verify that for each step of the layout there are at least `threshold`
links, signed by different authorized functionaries and return the chain
link dictionary containing only authorized links whose signatures
were successfully verified.
NOTE: If the layout's key store (`layout.keys`) lists a (master) key `K`,
with a subkey `K'`, then `K'` is authorized implicitly, to sign any link
that `K` is authorized to sign. In other words, the trust in a master key
extends to the trust in a subkey. The inverse is not true.
<Arguments>
layout:
A Layout object whose Steps are extracted and verified.
chain_link_dict:
A dictionary containing link metadata per functionary per step,
e.g.:
{
<link name> : {
<functionary key id> : <Metablock containing a Link or Layout
object>,
...
}, ...
}
<Exceptions>
ThresholdVerificationError
If any of the steps of the passed layout does not have enough
(`step.threshold`) links signed by different authorized
functionaries.
<Returns>
A chain_link_dict containing only links with valid signatures created by
authorized functionaries.
"""
# Create an inverse keys-subkeys dictionary, with subkey keyids as
# dictionary keys and main keys as dictionary values. This will be
# required below to assess main-subkey trust delegations.
# We assume that a given subkey can only belong to one master key
# TODO: Is this a safe assumption? Should we assert for it?
main_keys_for_subkeys = {}
for main_key in list(layout.keys.values()):
for sub_keyid in main_key.get("subkeys", []):
main_keys_for_subkeys[sub_keyid] = main_key
# Dict for valid and authorized links of all steps of the layout
verfied_chain_link_dict = {}
# For each step of the layout check the signatures of corresponding links.
# Consider only links where the signature is valid and keys are authorized,
# and discard others.
# Only count one of multiple links signed with different subkeys of a main
# key towards link threshold.
# Only proceed with final product verification if threshold requirements are
# fulfilled.
for step in layout.steps:
# Dict for valid and authorized links of a given step
verified_key_link_dict = {}
# List of used keyids
used_main_keyids = []
# Do per step link threshold verification
for link_keyid, link in six.iteritems(chain_link_dict.get(step.name, {})):
# Iterate over authorized keyids to find a key or subkey corresponding
# to the given link and check if the link's keyid is authorized.
# Subkeys of authorized main keys are authorized implicitly.
for authorized_keyid in step.pubkeys:
authorized_key = layout.keys.get(authorized_keyid)
main_key_for_subkey = main_keys_for_subkeys.get(authorized_keyid)
# The signing key is authorized
if authorized_key and link_keyid == authorized_keyid:
verification_key = authorized_key
break
# The signing key is an authorized subkey
elif main_key_for_subkey and link_keyid == authorized_keyid:
verification_key = main_key_for_subkey
break
# The signing key is a subkey of an authorized key
elif (authorized_key and
link_keyid in authorized_key.get("subkeys", {}).keys()):
verification_key = authorized_key
break
else:
log.info("Skipping link. Keyid '{0}' is not authorized to sign links"
" for step '{1}'".format(link_keyid, step.name))
continue
# Verify signature and skip invalidly signed links
try:
link.verify_signature(verification_key)
except SignatureVerificationError:
log.info("Skipping link. Broken link signature with keyid '{0}'"
" for step '{1}'".format(link_keyid, step.name))
continue
except KeyExpirationError as e:
log.info("Skipping link. {}".format(e))
continue
# Warn if there are links signed by different subkeys of same main key
if verification_key["keyid"] in used_main_keyids:
log.warning("Found links signed by different subkeys of the same main"
" key '{}' for step '{}'. Only one of them is counted towards the"
" step threshold.".format(verification_key["keyid"], step.name))
used_main_keyids.append(verification_key["keyid"])
# Keep only links with valid and authorized signature
verified_key_link_dict[link_keyid] = link
# For each step, verify that we have enough validly signed links from
# distinct authorized functionaries. Links signed by different subkeys of
# the same main key are counted only once towards the threshold.
valid_authorized_links_cnt = (len(verified_key_link_dict) -
(len(used_main_keyids) - len(set(used_main_keyids))))
# TODO: To guarantee that links are signed by different functionaries
# we rely on the layout to not carry duplicate verification keys under
# different dictionary keys, e.g. {keyid1: KEY1, keyid2: KEY1}
# Maybe we should add such a check to the layout validation? Or here?
if valid_authorized_links_cnt < step.threshold:
raise ThresholdVerificationError("Step '{}' requires at least '{}' links"
" validly signed by different authorized functionaries. Only"
" found '{}'".format(step.name, step.threshold,
valid_authorized_links_cnt))
# Add all good links of this step to the dictionary of links of all steps
verfied_chain_link_dict[step.name] = verified_key_link_dict
# Threshold verification succeeded, return valid and authorized links for
# further verification
return verfied_chain_link_dict
def verify_command_alignment(command, expected_command):
"""
<Purpose>
Checks if a run command aligns with an expected command. The commands align
if all of their elements are equal. If alignment fails, a warning is
printed.
Note:
Command alignment is a weak guarantee. Because a functionary can easily
alias commands.
<Arguments>
command:
A command list, e.g. ["vi", "foo.py"]
expected_command:
A command list, e.g. ["make", "install"]
<Exceptions>
None.
<Side Effects>
Logs warning in case commands do not align.
"""
# In what case command alignment should fail and how that failure should be
# propagated has been thoughly discussed in:
# https://github.com/in-toto/in-toto/issues/46 and
# https://github.com/in-toto/in-toto/pull/47
# We chose the simplest solution for now, i.e. Warn if they do not align.
if command != expected_command:
log.warning("Run command '{0}' differs from expected command '{1}'"
.format(command, expected_command))
def verify_all_steps_command_alignment(layout, chain_link_dict):
"""
<Purpose>
Iteratively checks if all expected commands as defined in the
Steps of a Layout align with the actual commands as recorded in the Link
metadata.
<Arguments>
layout:
A Layout object to extract the expected commands from.
chain_link_dict:
A dictionary containing link metadata per functionary per step,
e.g.:
{
<link name> : {
<functionary key id> : <Metablock containing a Link object>,
...
}, ...
}
<Exceptions>
None.
<Side Effects>
None.
"""
for step in layout.steps:
# Find the according link for this step
expected_command = step.expected_command
key_link_dict = chain_link_dict[step.name]
# FIXME: I think we could do this for one link per step only
# providing that we verify command alignment AFTER threshold equality
for keyid, link in six.iteritems(key_link_dict):
log.info("Verifying command alignment for '{0}'...".format(
in_toto.models.link.FILENAME_FORMAT.format(step_name=step.name,
keyid=keyid)))
command = link.signed.command
verify_command_alignment(command, expected_command)
def verify_match_rule(rule_data, artifacts_queue, source_artifacts, links):
"""
<Purpose>
Filters artifacts from artifact queue using rule pattern and optional rule
source prefix and consumes them if there is a corresponding destination
artifact, filtered using the same rule pattern and an optional rule
destination prefix, and source and destination artifacts have matching
hashes.
NOTE: The destination artifacts are extracted from the links dictionary,
using destination name and destination type from the rule data. The source
artifacts could also be extracted from the links dictionary, but would
require the caller to pass source name and source type, as those are not
encoded in the rule. However, we choose to let the caller directly pass the
relevant artifacts.
<Arguments>
rule_data:
An unpacked "MATCH" rule (see in_toto.rulelib).
artifacts_queue:
Not yet consumed artifacts (paths only).
source_artifacts:
All artifacts of the source item (including hashes).
links:
A dictionary containing link metadata per step or inspection, e.g.:
{
<link name> : <Metablock containing a link object>,
...
}
<Exceptions>
None.
<Side Effects>
None.
<Returns>
The set of consumed artifacts (paths only).
"""
consumed = set()
# The rule can only consume artifacts if the destination link exists
dest_link = links.get(rule_data["dest_name"])
if not dest_link:
return consumed
# Extract destination artifacts from destination link
dest_artifacts = getattr(dest_link.signed, rule_data["dest_type"])
# Filter part 1 - Filter artifacts using optional source prefix, and subtract
# prefix before filtering with rule pattern (see filter part 2) to prevent
# globbing in the prefix.
if rule_data["source_prefix"]:
filtered_source_paths = []
# Add trailing slash to source prefix if it does not exist
normalized_source_prefix = os.path.join(
rule_data["source_prefix"], "").replace("\\", "/")
for artifact_path in artifacts_queue:
if artifact_path.startswith(normalized_source_prefix):
filtered_source_paths.append(
artifact_path[len(normalized_source_prefix):])
else:
filtered_source_paths = artifacts_queue
# Filter part 2 - glob above filtered artifact paths
filtered_source_paths = fnmatch.filter(
filtered_source_paths, rule_data["pattern"])
# Iterate over filtered source paths and try to match the corresponding
# source artifact hash with the corresponding destination artifact hash
for path in filtered_source_paths:
# If a source prefix was specified, we subtracted the prefix above before
# globbing. We have to re-prepend the prefix in order to retrieve the
# corresponding source artifact below.
if rule_data["source_prefix"]:
full_source_path = os.path.join(
rule_data["source_prefix"], path).replace("\\", "/")
else:
full_source_path = path
# If a destination prefix was specified, the destination artifact should
# be queried with the full destination path, i.e. the prefix joined with
# the globbed path.
if rule_data["dest_prefix"]:
full_dest_path = os.path.join(
rule_data["dest_prefix"], path).replace("\\", "/")
else:
full_dest_path = path
# Extract source artifact hash dict
# We know the source artifact is available, it is also in the queue
source_artifact = source_artifacts[full_source_path]
# Don't consume source artifact w/o corresponding dest artifact (by path)
try:
dest_artifact = dest_artifacts[full_dest_path]
except KeyError:
continue
# Don't consume source artifact w/o corresponding dest artifact (by hash)
if source_artifact != dest_artifact:
continue
# Source and destination matched, consume artifact
consumed.add(full_source_path)
return consumed
def verify_create_rule(rule_pattern, artifacts_queue, materials, products):
"""
<Purpose>
Filters artifacts from artifacts queue using rule pattern and consumes them
if they are not in the materials set but are in the products set, i.e.
were created.
<Arguments>
rule_pattern:
A "CREATE" rule pattern (see in_toto.rulelib).
artifacts_queue:
Not yet consumed artifacts (paths only).
materials:
All materials of an item (paths only).
products:
All products of an item (paths only).
<Exceptions>
None.
<Side Effects>
None.
<Returns>
The set of consumed artifacts (paths only).
"""
# Filter queued artifacts using the rule pattern
filtered_artifacts = fnmatch.filter(artifacts_queue, rule_pattern)
# Consume filtered artifacts that are products but not materials
consumed = set(filtered_artifacts) & (products - materials)
return consumed
def verify_delete_rule(rule_pattern, artifacts_queue, materials, products):
"""
<Purpose>
Filters artifacts from artifacts queue using rule pattern and consumes them
if they are in the materials set but are not in the products set, i.e.
were deleted.
<Arguments>
rule_pattern:
A "DELETE" rule pattern (see in_toto.rulelib).
artifacts_queue:
Not yet consumed artifacts (paths only).
materials:
All materials of an item (paths only).
products:
All products of an item (paths only).
<Exceptions>
None.
<Side Effects>
None.
<Returns>
The set of consumed artifacts (paths only).
"""
# Filter queued artifacts using the rule pattern
filtered_artifacts = fnmatch.filter(artifacts_queue, rule_pattern)
# Consume filtered artifacts that are materials but not products
consumed = set(filtered_artifacts) & (materials - products)
return consumed
def verify_modify_rule(rule_pattern, artifacts_queue, materials, products):
"""
<Purpose>
Filters artifacts from artifacts queue using rule pattern and consumes them
if they are in both the materials dict and in the products doct, but have
different hashes, i.e. were modified.
<Arguments>
rule_pattern:
A "MODIFY" rule pattern (see in_toto.rulelib).
artifacts_queue:
Not yet consumed artifacts (paths only).
materials:
All materials of an item (including hashes).
products:
All products of an item (including hashes).
<Exceptions>
None.
<Side Effects>
None.
<Returns>
The set of consumed artifacts (paths only).
"""
# Filter queued artifacts using the rule pattern
filtered_artifacts = fnmatch.filter(artifacts_queue, rule_pattern)
# Filter filtered artifacts that are materials and products
filtered_artifacts = set(filtered_artifacts) & \
set(materials.keys()) & set(products.keys())
# Consume filtered artifacts that have different hashes
consumed = set()
for path in filtered_artifacts:
if materials[path] != products[path]:
consumed.add(path)
return consumed
def verify_allow_rule(rule_pattern, artifacts_queue):
"""
<Purpose>
Consumes artifacts, filtered from the artifacts queue using rule pattern.
<Arguments>
rule_pattern:
An "ALLOW" rule pattern (see in_toto.rulelib).
artifacts_queue:
Not yet consumed artifacts (paths only).
<Exceptions>
None.
<Side Effects>
None.
<Returns>
The set of consumed artifacts (paths only).
"""
# Filter queued artifacts using the rule pattern
filtered_artifacts = fnmatch.filter(artifacts_queue, rule_pattern)
# Consume all filtered artifacts
return set(filtered_artifacts)
def verify_disallow_rule(rule_pattern, artifacts_queue):
"""
<Purpose>
Raises RuleVerificationError if rule pattern applies to any artifacts in
the queue.
NOTE: Each set of rules should have a terminal DISALLOW rule to make
overall verification fail in case preceding rules did not consume all
artifacts as intended.
<Arguments>
rule_pattern:
A "DISALLOW" rule pattern (see in_toto.rulelib).
artifacts_queue:
Not yet consumed artifacts (paths only).
<Exceptions>
RuleVerificationError
if the rule pattern filters artifacts in the artifact queue.
<Side Effects>
None.
<Returns>
None.
"""
filtered_artifacts = fnmatch.filter(artifacts_queue, rule_pattern)
if len(filtered_artifacts):
raise RuleVerificationError("'DISALLOW {}' matched the following "
"artifacts: {}\n{}".format(rule_pattern, filtered_artifacts,
_get_artifact_rule_traceback()))
def verify_require_rule(filename, artifacts_queue):
"""
<Purpose>
Raises RuleVerificationError if the filename provided does not exist in the
artifacts_queue
<Arguments>
filename:
A single filename (see issues #193 and #152). We will ignore the
artifact rule pattern because it's ambiguous and instead treat it
as a literal file name.
artifacts_queue:
Not yet consumed artifacts (paths only).
<Exceptions>
RuleVerificationError:
if the filename is not present in the artifacts queue
<Side Effects>
None.
<Returns>
None.
"""
if filename not in artifacts_queue:
raise RuleVerificationError("'REQUIRE {filename}' did not find {filename} "
"in: {queue}\n{traceback}".format(filename=filename,
queue=artifacts_queue, traceback=_get_artifact_rule_traceback()))
def _get_artifact_rule_traceback():
"""Build and return string form global `RULE_TRACE` which may be used as
error message for RuleVerificationError.
"""
traceback_str = "Full trace for 'expected_{0}' of item '{1}':\n".format(
RULE_TRACE["source_type"], RULE_TRACE["source_name"])
# Show all materials and products available in the beginning and
# label the one that is used to generate a queue.
for source_type in ["materials", "products"]:
traceback_str += "Available {}{}:\n{}\n".format(
source_type,
[" (used for queue)", ""][RULE_TRACE["source_type"] != source_type],
RULE_TRACE[source_type])
for trace_entry in RULE_TRACE["trace"]:
traceback_str += "Queue after '{0}':\n".format(
" ".join(trace_entry["rule"]))
traceback_str += "{}\n".format(trace_entry["queue"])
return traceback_str
def verify_item_rules(source_name, source_type, rules, links):
"""
<Purpose>
Apply all passed material or product rules (see source_type) of a given
step or inspection (see source_name), to enforce and authorize the
corresponding artifacts and to guarantee that artifacts are linked together
across steps of the supply chain.
The mode of operation is similar to that of a firewall:
In the beginning all materials or products of the step or inspection are
placed into an artifact queue. The rules are then applied sequentially,
consuming artifacts in the queue, i.e. removing them from the queue upon
successful application.
The consumption of artifacts by itself has no effects on the verification.
Only through a subsequent "DISALLOW" rule, that finds unconsumed artifacts,
is an exception raised. Similarly does the "REQUIRE" rule raise exception,
if it does not find the artifact it requires, because it has falsely been
consumed or was not there from the beginning.
<Arguments>
source_name:
The name of the item (step or inspection) being verified.
source_type:
One of "materials" or "products" depending on whether the rules are
taken from the "expected_materials" or "expected_products" field of
the item being verified.
rules:
The list of rules (material or product rules) for the item being
verified.
links:
A dictionary containing link metadata per step or inspection, e.g.:
{
<link name> : <Metablock containing a link>,
...
}
<Exceptions>
FormatError
if source_type is not "materials" or "products", or
if a rule in the passed list of rules does not conform with any rule
format.
RuleVerificationError
if a DISALLOW rule matches disallowed artifacts, or
if a REQUIRE rule does not find a required artifact.
<Side Effects>
Clears and populates the global RULE_TRACE data structure.
"""
if source_type not in ["materials", "products"]:
raise securesystemslib.exceptions.FormatError(
"Argument 'source_type' of function 'verify_item_rules' has to be "
"one of 'materials' or 'products'. Got: '{}'".format(source_type))
# Create shortcuts to item's materials and products (including hashes),
# required to verify "modify" and "match" rules.
materials_dict = links[source_name].signed.materials
products_dict = links[source_name].signed.products
# All other rules only require materials or products paths (without hashes)
materials_paths = set(materials_dict.keys())
products_paths = set(products_dict.keys())
# Depending on the source type we create the artifact queue from the item's
# materials or products and use it to keep track of (not) consumed artifacts.
# The queue also only contains aritfact keys (without hashes)
artifacts = getattr(links[source_name].signed, source_type)
artifacts_queue = set(artifacts.keys())
# Reset and re-populate rule traceback info dict for a rich error message
RULE_TRACE.clear()
RULE_TRACE["source_name"] = source_name
RULE_TRACE["source_type"] = source_type
RULE_TRACE["materials"] = list(materials_dict)
RULE_TRACE["products"] = list(products_dict)
RULE_TRACE["trace"] = []
# Process rules and remove consumed items from queue in each iteration
for rule in rules:
log.info("Verifying '{}'...".format(" ".join(rule)))
# Parse the rule and create two shortcuts to contained rule data
rule_data = in_toto.rulelib.unpack_rule(rule)
_type = rule_data["rule_type"]
_pattern = rule_data["pattern"]
# Initialize empty consumed set as fallback for rules that do not consume
# artifacts. All rules except "disallow" and "require" consume artifacts.
consumed = set()
if _type == "match":
consumed = verify_match_rule(
rule_data, artifacts_queue, artifacts, links)
elif _type == "create":
consumed = verify_create_rule(
_pattern, artifacts_queue, materials_paths, products_paths)
elif _type == "delete":
consumed = verify_delete_rule(
_pattern, artifacts_queue, materials_paths, products_paths)
elif _type == "modify":
consumed = verify_modify_rule(
_pattern, artifacts_queue, materials_dict, products_dict)
elif _type == "allow":
consumed = verify_allow_rule(_pattern, artifacts_queue)
# It's up to the "disallow" and "require" rule to raise an error if
# artifacts were not consumed as intended
elif _type == "disallow":
verify_disallow_rule(_pattern, artifacts_queue)
elif _type == "require":
verify_require_rule(_pattern, artifacts_queue)
else: # pragma: no cover (unreachable)
raise securesystemslib.exceptions.FormatError(
"Invaldid rule type '{}'.".format(_type))
artifacts_queue -= consumed
# Append rule and copy of queue to global info for a rich error message
RULE_TRACE["trace"].append({
"rule": rule,
"queue": list(artifacts_queue)
})
def verify_all_item_rules(items, links):
"""
<Purpose>
Iteratively verifies artifact rules of passed items (Steps or Inspections).
<Arguments>
items:
A list containing Step or Inspection objects whose material
and product rules will be verified.
links:
A dictionary containing link metadata per step or inspection, e.g.:
{
<link name> : <Metablock containing a Link object>,
...
}
<Exceptions>
None.
<Side Effects>
None.
"""
for item in items:
log.info("Verifying material rules for '{}'...".format(item.name))
verify_item_rules(item.name, "materials", item.expected_materials, links)
log.info("Verifying product rules for '{}'...".format(item.name))
verify_item_rules(item.name, "products", item.expected_products, links)
def verify_threshold_constraints(layout, chain_link_dict):
"""
<Purpose>
Verifies that all links corresponding to a given step report the same
materials and products.
NOTE: This function does not verify if the signatures of each link
corresponding to a step are valid or created by a different authorized
functionary. This should be done earlier, using the function
`verify_link_signature_thresholds`.
<Arguments>
layout:
The layout whose step thresholds are being verified
chain_link_dict:
A dictionary containing link metadata per functionary per step,
e.g.:
{
<link name> : {
<functionary key id> : <Metablock containing a Link object>,
...
}, ...
}
<Exceptions>
ThresholdVerificationError
If there are not enough (threshold) links for a steps
If the artifacts for all links of a step are not equal
<Side Effects>
None.
"""
# We are only interested in links that are related to steps defined in the
# Layout, so iterate over layout.steps
for step in layout.steps:
# Skip steps that don't require multiple functionaries
if step.threshold <= 1:
log.info("Skipping threshold verification for step '{0}' with"
" threshold '{1}'...".format(step.name, step.threshold))
continue
log.info("Verifying threshold for step '{0}' with"
" threshold '{1}'...".format(step.name, step.threshold))
# Extract the key_link_dict for this step from the passed chain_link_dict
key_link_dict = chain_link_dict[step.name]
# Check if we have at least <threshold> links for this step
# NOTE: This is already done in `verify_link_signature_thresholds`,
# Should we remove the check?
if len(key_link_dict) < step.threshold:
raise ThresholdVerificationError("Step '{0}' not performed"
" by enough functionaries!".format(step.name))
# Take a reference link (e.g. the first in the step_link_dict)
reference_keyid = list(key_link_dict.keys())[0]
reference_link = key_link_dict[reference_keyid]
# Iterate over all links to compare their properties with a reference_link
for keyid, link in six.iteritems(key_link_dict):
# TODO: Do we only care for artifacts, or do we want to
# assert equality of other properties as well?
if (reference_link.signed.materials != link.signed.materials or
reference_link.signed.products != link.signed.products):
raise ThresholdVerificationError("Links '{0}' and '{1}' have different"
" artifacts!".format(
in_toto.models.link.FILENAME_FORMAT.format(
step_name=step.name, keyid=reference_keyid),
in_toto.models.link.FILENAME_FORMAT.format(
step_name=step.name, keyid=keyid)))
def reduce_chain_links(chain_link_dict):
"""
<Purpose>
Iterates through the passed chain_link_dict and builds a dict with
step-name as keys and link objects as values.
We already check if the links of different functionaries are
identical.
<Arguments>
layout:
The layout specified by the project owner against which the
threshold will be verified.
chain_link_dict:
A dictionary containing link metadata per functionary per step,
e.g.:
{
<link name> : {
<functionary key id> : <Metablock containing a Link object>,
...
}, ...
}
<Exceptions>
None.
<Side Effects>
None.
<Returns>
A dictionary containing one Link metadata object per step only if
the link artifacts of all link objects are identical for a step.
"""
reduced_chain_link_dict = {}
for step_name, key_link_dict in six.iteritems(chain_link_dict):
# Extract the key_link_dict for this step from the passed chain_link_dict
# take one exemplary link (e.g. the first in the step_link_dict)
# form the reduced_chain_link_dict to return
reduced_chain_link_dict[step_name] = list(key_link_dict.values())[0]
return reduced_chain_link_dict
def verify_sublayouts(layout, chain_link_dict, superlayout_link_dir_path):
"""
<Purpose>
Checks if any step has been delegated by the functionary, recurses into
the delegation and replaces the layout object in the chain_link_dict
by an equivalent link object.
<Arguments>
layout:
The layout specified by the project owner.
chain_link_dict:
A dictionary containing link metadata per functionary per step,
e.g.:
{
<link name> : {
<functionary key id> : <Metablock containing a Link or Layout
object>,
...
}, ...
}
superlayout_link_dir_path:
A path to a directory, where links of the superlayout are loaded
from. Links of the sublayout are expected to be in a subdirectory
relative to this path, with a name in the format
in_toto.models.layout.SUBLAYOUT_LINK_DIR_FORMAT.
<Exceptions>
raises an Exception if verification of the delegated step fails.
<Side Effects>
None.
<Returns>
The passed dictionary containing link metadata per functionary per step,
with layouts replaced with summary links.
e.g.:
{
<link name> : {
<functionary key id> : <Metablock containing a Link object>,
...
}, ...
}
"""
for step_name, key_link_dict in six.iteritems(chain_link_dict):
for keyid, link in six.iteritems(key_link_dict):
if link.type_ == "layout":
log.info("Verifying sublayout {}...".format(step_name))
layout_key_dict = {}
# Retrieve the entire key object for the keyid
# corresponding to the link
layout_key_dict = {keyid: layout.keys.get(keyid)}
# Sublayout links are expected to be in a directory with the following
# name relative the the current link directory path, i.e. if there
# are multiple levels of sublayout nesting, the links are expected to
# be nested accordingly
sublayout_link_dir = SUBLAYOUT_LINK_DIR_FORMAT.format(
name=step_name, keyid=keyid)
sublayout_link_dir_path = os.path.join(
superlayout_link_dir_path, sublayout_link_dir)
# Make a recursive call to in_toto_verify with the
# layout and the extracted key object
summary_link = in_toto_verify(link, layout_key_dict,
link_dir_path=sublayout_link_dir_path, step_name=step_name)
# Replace the layout object in the passed chain_link_dict
# with the link file returned by in-toto-verify
key_link_dict[keyid] = summary_link
return chain_link_dict
def get_summary_link(layout, reduced_chain_link_dict, name):
"""
<Purpose>
Merges the materials of the first step (as mentioned in the layout)
and the products of the last step and returns a new link.
This link reports the materials and products and summarizes the
overall software supply chain.
NOTE: The assumption is that the steps mentioned in the layout are
to be performed sequentially. So, the first step mentioned in the
layout denotes what comes into the supply chain and the last step
denotes what goes out.
<Arguments>
layout:
The layout specified by the project owner.
reduced_chain_link_dict:
A dictionary containing link metadata per step,
e.g.:
{
<link name> : <Metablock containing a Link object>,
...
}
name:
The name that the summary link will be associated with.
<Exceptions>
None.
<Side Effects>
None.
<Returns>
A Metablock object containing a Link which summarizes the materials and
products of the overall software supply chain.
"""
# Create empty link object
summary_link = in_toto.models.link.Link()
# Take first and last link in the order the corresponding
# steps appear in the layout, if there are any.
if len(layout.steps) > 0:
first_step_link = reduced_chain_link_dict[layout.steps[0].name]
last_step_link = reduced_chain_link_dict[layout.steps[-1].name]
summary_link.materials = first_step_link.signed.materials
summary_link.name = name
summary_link.products = last_step_link.signed.products
summary_link.byproducts = last_step_link.signed.byproducts
summary_link.command = last_step_link.signed.command
return Metablock(signed=summary_link)
def in_toto_verify(layout, layout_key_dict, link_dir_path=".",
substitution_parameters=None, step_name=""):
"""
<Purpose>
Does entire in-toto supply chain verification of a final product
by performing the following actions:
1. Verify layout signature(s), requires at least one verification key
to be passed, and a valid signature for each passed key.
2. Verify layout expiration
3. Load link metadata for every Step defined in the layout and
fail if less links than the defined threshold for a step are found.
NOTE: Link files are expected to have the corresponding step
and the functionary, who carried out the step, encoded in their
filename.
4. Verify functionary signature for every loaded Link, skipping links
with failing signatures or signed by unauthorized functionaries,
and fail if less than `threshold` links validly signed by different
authorized functionaries can be found.
The routine returns a dictionary containing only links with valid
signatures by authorized functionaries.
5. Verify sublayouts
Recurses into layout verification for each link of the
superlayout that is a layout itself (i.e. sublayout).
Links for the sublayout are expected to be in a subdirectory
relative to the superlayout's link_dir_path, with a name in the
format: in_toto.models.layout.SUBLAYOUT_LINK_DIR_FORMAT.
The successfully verified sublayout is replaced with an unsigned
summary link in the chain_link_dict of the superlayout.
The summary link is then used just like a regular link
to verify command alignments, thresholds and inspections according
to the superlayout.
6. Verify alignment of defined (Step) and reported (Link) commands
NOTE: Won't raise exception on mismatch
7. Verify threshold constraints, i.e. if all links corresponding to
one step have recorded the same artifacts (materials and products).
8. Verify rules defined in each Step's expected_materials and
expected_products field
NOTE: At this point no Inspection link metadata is available,
hence (MATCH) rules cannot reference materials or products of
Inspections.
Verifying Steps' artifact rules before executing Inspections
guarantees that Inspection commands don't run on compromised
target files, which would be a surface for attacks.
9. Execute Inspection commands
NOTE: Inspections, similar to Steps executed with 'in-toto-run',
will record materials before and products after command execution.
For now it records everything in the current working directory.
10. Verify rules defined in each Inspection's expected_materials and
expected_products field
<Arguments>
layout:
Layout object that is being verified.
layout_key_dict:
Dictionary of project owner public keys, used to verify the
layout's signature.
link_dir_path: (optional)
A path to the directory from which link metadata files
corresponding to the steps in the passed layout are loaded.
Default is the current working directory.
substitution_parameters: (optional)
a dictionary containing key-value pairs for substituting in the
following metadata fields:
- artifact rules in step and inspection definitions in the layout
- the run fields in the inspection definitions
- the expected command in the step definitions
step_name: (optional)
The step that the layout corresponds to, typically used during
recursive calls of in_toto_verify. This usually happens when
resolving sublayouts. The function verify_sublayouts may provide a
clearer picture on how it's used.
<Exceptions>
None.
<Side Effects>
Read link metadata files from disk
<Returns>
A link which summarizes the materials and products of the overall
software supply chain (used by super-layout verification if any)
"""
log.info("Verifying layout signatures...")
verify_layout_signatures(layout, layout_key_dict)
# For the rest of the verification we only care about the layout payload
# (Layout) that carries all the information and not about the layout
# container (Metablock) that also carries the signatures
layout = layout.signed
log.info("Verifying layout expiration...")
verify_layout_expiration(layout)
# If there are parameters sent to the tanslation layer, substitute them
if substitution_parameters is not None:
log.info('Performing parameter substitution...')
substitute_parameters(layout, substitution_parameters)
log.info("Reading link metadata files...")
chain_link_dict = load_links_for_layout(layout, link_dir_path)
log.info("Verifying link metadata signatures...")
chain_link_dict = verify_link_signature_thresholds(layout, chain_link_dict)
log.info("Verifying sublayouts...")
chain_link_dict = verify_sublayouts(layout, chain_link_dict, link_dir_path)
log.info("Verifying alignment of reported commands...")
verify_all_steps_command_alignment(layout, chain_link_dict)
log.info("Verifying threshold constraints...")
verify_threshold_constraints(layout, chain_link_dict)
reduced_chain_link_dict = reduce_chain_links(chain_link_dict)
log.info("Verifying Step rules...")
verify_all_item_rules(layout.steps, reduced_chain_link_dict)
log.info("Executing Inspection commands...")
inspection_link_dict = run_all_inspections(layout)
log.info("Verifying Inspection rules...")
# Artifact rules for inspections can reference links that correspond to
# Steps or Inspections, hence the concatenation of both collections of links
combined_links = reduced_chain_link_dict.copy()
combined_links.update(inspection_link_dict)
verify_all_item_rules(layout.inspect, combined_links)
# We made it this far without exception that means, verification passed
log.info("The software product passed all verification.")
# Return a link file which summarizes the entire software supply chain
# This is mostly relevant if the currently verified supply chain is embedded
# in another supply chain
return get_summary_link(layout, reduced_chain_link_dict, step_name)
```
#### File: in-toto/tests/common.py
```python
import os
import sys
import inspect
import unittest
if sys.version_info >= (3, 3):
from unittest.mock import patch # pylint: disable=no-name-in-module,import-error
else:
from mock import patch # pylint: disable=import-error
def run_with_portable_scripts(decorated):
print("patching...")
scripts_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "scripts")
print("scripts are located in {}".format(scripts_path))
@patch.dict(os.environ, {"PATH": "{};{}".format(scripts_path, os.environ['PATH'])})
class Patched(decorated):
pass
return Patched
class CliTestCase(unittest.TestCase):
"""TestCase subclass providing a test helper that patches sys.argv with
passed arguments and asserts a SystemExit with a return code equal
to the passed status argument.
Subclasses of CliTestCase require a class variable that stores the main
function of the cli tool to test as staticmethod, e.g.:
```
import tests.common
from in_toto.in_toto_run import main as in_toto_run_main
class TestInTotoRunTool(tests.common.CliTestCase):
cli_main_func = staticmethod(in_toto_run_main)
...
```
"""
cli_main_func = None
def __init__(self, *args, **kwargs):
"""Constructor that checks for the presence of a callable cli_main_func
class variable. And stores the filename of the module containing that
function, to be used as first argument when patching sys.argv in
self.assert_cli_sys_exit.
"""
if not callable(self.cli_main_func):
raise Exception("Subclasses of `CliTestCase` need to assign the main"
" function of the cli tool to test using `staticmethod()`: {}"
.format(self.__class__.__name__))
file_path = inspect.getmodule(self.cli_main_func).__file__
self.file_name = os.path.basename(file_path)
super(CliTestCase, self).__init__(*args, **kwargs)
def assert_cli_sys_exit(self, cli_args, status):
"""Test helper to mock command line call and assert return value.
The passed args does not need to contain the command line tool's name.
This is assessed from `self.cli_main_func`
"""
with patch.object(sys, "argv", [self.file_name]
+ cli_args), self.assertRaises(SystemExit) as raise_ctx:
self.cli_main_func() # pylint: disable=not-callable
self.assertEqual(raise_ctx.exception.code, status)
```
#### File: in-toto/tests/runtests.py
```python
from unittest import defaultTestLoader, TextTestRunner
import sys
import os
import subprocess
def check_usable_gpg():
"""Set `TEST_SKIP_GPG` environment variable if neither gpg2 nor gpg is
available.
"""
os.environ["TEST_SKIP_GPG"] = "1"
for gpg in ["gpg2", "gpg"]:
try:
subprocess.check_call([gpg, "--version"])
except OSError:
pass
else:
# If one of the two exists, we can unset the skip envvar and ...
os.environ.pop("TEST_SKIP_GPG", None)
# ... abort the availability check.:
break
check_usable_gpg()
suite = defaultTestLoader.discover(start_dir=".")
result = TextTestRunner(verbosity=2, buffer=True).run(suite)
sys.exit(0 if result.wasSuccessful() else 1)
``` |
{
"source": "joshuagl/securesystemslib",
"score": 3
} |
#### File: securesystemslib/gpg/exceptions.py
```python
import datetime
import dateutil.tz
class PacketParsingError(Exception):
pass
class KeyNotFoundError(Exception):
pass
class PacketVersionNotSupportedError(Exception):
pass
class SignatureAlgorithmNotSupportedError(Exception):
pass
class CommandError(Exception):
pass
class KeyExpirationError(Exception):
def __init__(self, key):
super(KeyExpirationError, self).__init__()
self.key = key
def __str__(self):
creation_time = datetime.datetime.fromtimestamp(
self.key["creation_time"], dateutil.tz.UTC)
expiration_time = datetime.datetime.fromtimestamp(
self.key["creation_time"] + self.key["validity_period"],
dateutil.tz.UTC)
validity_period = expiration_time - creation_time
return ("GPG key '{}' created on '{:%Y-%m-%d %H:%M} UTC' with validity "
"period '{}' expired on '{:%Y-%m-%d %H:%M} UTC'.".format(
self.key["keyid"], creation_time, validity_period, expiration_time))
```
#### File: securesystemslib/securesystemslib/interface.py
```python
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import os
import errno
import sys
import time
import datetime
import getpass
import logging
import tempfile
import shutil
import json
import gzip
import random
import securesystemslib.formats
import securesystemslib.settings
import securesystemslib.util
import securesystemslib.keys
import six
from colorama import Fore
# See 'log.py' to learn how logging is handled in securesystemslib.
logger = logging.getLogger('securesystemslib_interface')
# Recommended RSA key sizes:
# https://en.wikipedia.org/wiki/Key_size#Asymmetric_algorithm_key_lengths
# Based on the above, RSA keys of size 3072 bits are expected to provide
# security through 2031 and beyond.
DEFAULT_RSA_KEY_BITS = 3072
# Supported key types.
SUPPORTED_KEY_TYPES = ['rsa', 'ed25519']
def _prompt(message, result_type=str):
"""
Non-public function that prompts the user for input by logging 'message',
converting the input to 'result_type', and returning the value to the
caller.
"""
return result_type(six.moves.input(message))
def get_password(prompt='Password: ', confirm=False):
"""
<Purpose>
Return the password entered by the user. If 'confirm' is True, the user is
asked to enter the previously entered password once again. If they match,
the password is returned to the caller.
<Arguments>
prompt:
The text of the password prompt that is displayed to the user.
confirm:
Boolean indicating whether the user should be prompted for the password
a second time. The two entered password must match, otherwise the
user is again prompted for a password.
<Exceptions>
None.
<Side Effects>
None.
<Returns>
The password entered by the user.
"""
# Are the arguments the expected type?
# If not, raise 'securesystemslib.exceptions.FormatError'.
securesystemslib.formats.TEXT_SCHEMA.check_match(prompt)
securesystemslib.formats.BOOLEAN_SCHEMA.check_match(confirm)
while True:
# getpass() prompts the user for a password without echoing
# the user input.
password = getpass.getpass(prompt, sys.stderr)
if not confirm:
return password
password2 = getpass.getpass('Confirm: ', sys.stderr)
if password == <PASSWORD>:
return password
else:
print('Mismatch; try again.')
def generate_and_write_rsa_keypair(filepath=None, bits=DEFAULT_RSA_KEY_BITS,
password=None):
"""
<Purpose>
Generate an RSA key pair. The public portion of the generated RSA key is
saved to <'filepath'>.pub, whereas the private key portion is saved to
<'filepath'>. If no password is given, the user is prompted for one. If
the 'password' is an empty string, the private key is saved unencrypted to
<'filepath'>. If the filepath is not given, the KEYID is used as the
filename and the keypair saved to the current working directory.
The best available form of encryption, for a given key's backend, is used
with pyca/cryptography. According to their documentation, "it is a curated
encryption choice and the algorithm may change over time."
<Arguments>
filepath:
The public and private key files are saved to <filepath>.pub and
<filepath>, respectively. If the filepath is not given, the public and
private keys are saved to the current working directory as <KEYID>.pub
and <KEYID>. KEYID is the generated key's KEYID.
bits:
The number of bits of the generated RSA key.
password:
The password to encrypt 'filepath'. If None, the user is prompted for a
password. If an empty string is given, the private key is written to
disk unencrypted.
<Exceptions>
securesystemslib.exceptions.FormatError, if the arguments are improperly
formatted.
<Side Effects>
Writes key files to '<filepath>' and '<filepath>.pub'.
<Returns>
The 'filepath' of the written key.
"""
# Does 'bits' have the correct format?
# Raise 'securesystemslib.exceptions.FormatError' if there is a mismatch.
securesystemslib.formats.RSAKEYBITS_SCHEMA.check_match(bits)
# Generate the public and private RSA keys.
rsa_key = securesystemslib.keys.generate_rsa_key(bits)
public = rsa_key['keyval']['public']
private = rsa_key['keyval']['private']
if not filepath:
filepath = os.path.join(os.getcwd(), rsa_key['keyid'])
else:
logger.debug('The filepath has been specified. Not using the key\'s'
' KEYID as the default filepath.')
# Does 'filepath' have the correct format?
securesystemslib.formats.PATH_SCHEMA.check_match(filepath)
# If the caller does not provide a password argument, prompt for one.
if password is None: # pragma: no cover
# It is safe to specify the full path of 'filepath' in the prompt and not
# worry about leaking sensitive information about the key's location.
# However, care should be taken when including the full path in exceptions
# and log files.
password = get_password('Enter a password for the encrypted RSA'
' key (' + Fore.RED + filepath + Fore.RESET + '): ',
confirm=True)
else:
logger.debug('The password has been specified. Not prompting for one')
# Does 'password' have the correct format?
securesystemslib.formats.PASSWORD_SCHEMA.check_match(password)
# Encrypt the private key if 'password' is set.
if len(password):
private = securesystemslib.keys.create_rsa_encrypted_pem(private, password)
else:
logger.debug('An empty password was given. Not encrypting the private key.')
# If the parent directory of filepath does not exist,
# create it (and all its parent directories, if necessary).
securesystemslib.util.ensure_parent_dir(filepath)
# Write the public key (i.e., 'public', which is in PEM format) to
# '<filepath>.pub'. (1) Create a temporary file, (2) write the contents of
# the public key, and (3) move to final destination.
file_object = securesystemslib.util.TempFile()
file_object.write(public.encode('utf-8'))
# The temporary file is closed after the final move.
file_object.move(filepath + '.pub')
# Write the private key in encrypted PEM format to '<filepath>'.
# Unlike the public key file, the private key does not have a file
# extension.
file_object = securesystemslib.util.TempFile()
file_object.write(private.encode('utf-8'))
file_object.move(filepath)
return filepath
def import_rsa_privatekey_from_file(filepath, password=None,
scheme='rsassa-pss-sha256', prompt=False):
"""
<Purpose>
Import the PEM file in 'filepath' containing the private key.
If password is passed use passed password for decryption.
If prompt is True use entered password for decryption.
If no password is passed and either prompt is False or if the password
entered at the prompt is an empty string, omit decryption, treating the
key as if it is not encrypted.
If password is passed and prompt is True, an error is raised. (See below.)
The returned key is an object in the
'securesystemslib.formats.RSAKEY_SCHEMA' format.
<Arguments>
filepath:
<filepath> file, an RSA encrypted PEM file. Unlike the public RSA PEM
key file, 'filepath' does not have an extension.
password:
<PASSWORD> '<PASSWORD>'.
scheme:
The signature scheme used by the imported key.
prompt:
If True the user is prompted for a passphrase to decrypt 'filepath'.
Default is False.
<Exceptions>
ValueError, if 'password' is passed and 'prompt' is True.
ValueError, if 'password' is passed and it is an empty string.
securesystemslib.exceptions.FormatError, if the arguments are improperly
formatted.
securesystemslib.exceptions.FormatError, if the entered password is
improperly formatted.
IOError, if 'filepath' can't be loaded.
securesystemslib.exceptions.CryptoError, if a password is available
and 'filepath' is not a valid key file encrypted using that password.
securesystemslib.exceptions.CryptoError, if no password is available
and 'filepath' is not a valid non-encrypted key file.
<Side Effects>
The contents of 'filepath' are read, optionally decrypted, and returned.
<Returns>
An RSA key object, conformant to 'securesystemslib.formats.RSAKEY_SCHEMA'.
"""
# Does 'filepath' have the correct format?
# Ensure the arguments have the appropriate number of objects and object
# types, and that all dict keys are properly named.
# Raise 'securesystemslib.exceptions.FormatError' if there is a mismatch.
securesystemslib.formats.PATH_SCHEMA.check_match(filepath)
# Is 'scheme' properly formatted?
securesystemslib.formats.RSA_SCHEME_SCHEMA.check_match(scheme)
if password and prompt:
raise ValueError("Passing 'password' and 'prompt' True is not allowed.")
# If 'password' was passed check format and that it is not empty.
if password is not None:
securesystemslib.formats.PASSWORD_SCHEMA.check_match(password)
# TODO: PASSWORD_SCHEMA should be securesystemslib.schema.AnyString(min=1)
if not len(password):
raise ValueError('Password must be 1 or more characters')
elif prompt:
# Password confirmation disabled here, which should ideally happen only
# when creating encrypted key files (i.e., improve usability).
# It is safe to specify the full path of 'filepath' in the prompt and not
# worry about leaking sensitive information about the key's location.
# However, care should be taken when including the full path in exceptions
# and log files.
# NOTE: A user who gets prompted for a password, can only signal that the
# key is not encrypted by entering no password in the prompt, as opposed
# to a programmer who can call the function with or without a 'password'.
# Hence, we treat an empty password here, as if no 'password' was passed.
password = get_password('Enter a password for an encrypted RSA'
' file \'' + Fore.RED + filepath + Fore.RESET + '\': ',
confirm=False) or None
if password is not None:
# This check will not fail, because a mal-formatted passed password fails
# above and an entered password will always be a string (see get_password)
# However, we include it in case PASSWORD_SCHEMA or get_password changes.
securesystemslib.formats.PASSWORD_SCHEMA.check_match(password)
else:
logger.debug('No password was given. Attempting to import an'
' unencrypted file.')
# Read the contents of 'filepath' that should be a PEM formatted private key.
with open(filepath, 'rb') as file_object:
pem_key = file_object.read().decode('utf-8')
# Convert 'pem_key' to 'securesystemslib.formats.RSAKEY_SCHEMA' format.
# Raise 'securesystemslib.exceptions.CryptoError' if 'pem_key' is invalid.
# If 'password' is None decryption will be omitted.
rsa_key = securesystemslib.keys.import_rsakey_from_private_pem(pem_key,
scheme, password)
return rsa_key
def import_rsa_publickey_from_file(filepath, scheme='rsassa-pss-sha256'):
"""
<Purpose>
Import the RSA key stored in 'filepath'. The key object returned is in the
format 'securesystemslib.formats.RSAKEY_SCHEMA'. If the RSA PEM in
'filepath' contains a private key, it is discarded.
<Arguments>
filepath:
<filepath>.pub file, an RSA PEM file.
scheme:
The signature scheme used by the imported key.
<Exceptions>
securesystemslib.exceptions.FormatError, if 'filepath' is improperly
formatted.
securesystemslib.exceptions.Error, if a valid RSA key object cannot be
generated. This may be caused by an improperly formatted PEM file.
<Side Effects>
'filepath' is read and its contents extracted.
<Returns>
An RSA key object conformant to 'securesystemslib.formats.RSAKEY_SCHEMA'.
"""
# Does 'filepath' have the correct format?
# Ensure the arguments have the appropriate number of objects and object
# types, and that all dict keys are properly named.
# Raise 'securesystemslib.exceptions.FormatError' if there is a mismatch.
securesystemslib.formats.PATH_SCHEMA.check_match(filepath)
# Is 'scheme' properly formatted?
securesystemslib.formats.RSA_SCHEME_SCHEMA.check_match(scheme)
# Read the contents of the key file that should be in PEM format and contains
# the public portion of the RSA key.
with open(filepath, 'rb') as file_object:
rsa_pubkey_pem = file_object.read().decode('utf-8')
# Convert 'rsa_pubkey_pem' to 'securesystemslib.formats.RSAKEY_SCHEMA' format.
try:
rsakey_dict = securesystemslib.keys.import_rsakey_from_public_pem(
rsa_pubkey_pem, scheme)
except securesystemslib.exceptions.FormatError as e:
raise securesystemslib.exceptions.Error('Cannot import improperly formatted'
' PEM file.' + repr(str(e)))
return rsakey_dict
def generate_and_write_ed25519_keypair(filepath=None, password=None):
"""
<Purpose>
Generate an Ed25519 keypair, where the encrypted key (using 'password' as
the passphrase) is saved to <'filepath'>. The public key portion of the
generated Ed25519 key is saved to <'filepath'>.pub. If the filepath is not
given, the KEYID is used as the filename and the keypair saved to the
current working directory.
The private key is encrypted according to 'cryptography's approach:
"Encrypt using the best available encryption for a given key's backend.
This is a curated encryption choice and the algorithm may change over
time."
<Arguments>
filepath:
The public and private key files are saved to <filepath>.pub and
<filepath>, respectively. If the filepath is not given, the public and
private keys are saved to the current working directory as <KEYID>.pub
and <KEYID>. KEYID is the generated key's KEYID.
password:
The password, or passphrase, to encrypt the private portion of the
generated Ed25519 key. A symmetric encryption key is derived from
'password', so it is not directly used.
<Exceptions>
securesystemslib.exceptions.FormatError, if the arguments are improperly
formatted.
securesystemslib.exceptions.CryptoError, if 'filepath' cannot be encrypted.
<Side Effects>
Writes key files to '<filepath>' and '<filepath>.pub'.
<Returns>
The 'filepath' of the written key.
"""
# Generate a new Ed25519 key object.
ed25519_key = securesystemslib.keys.generate_ed25519_key()
if not filepath:
filepath = os.path.join(os.getcwd(), ed25519_key['keyid'])
else:
logger.debug('The filepath has been specified. Not using the key\'s'
' KEYID as the default filepath.')
# Does 'filepath' have the correct format?
# Ensure the arguments have the appropriate number of objects and object
# types, and that all dict keys are properly named.
# Raise 'securesystemslib.exceptions.FormatError' if there is a mismatch.
securesystemslib.formats.PATH_SCHEMA.check_match(filepath)
# If the caller does not provide a password argument, prompt for one.
if password is None: # pragma: no cover
# It is safe to specify the full path of 'filepath' in the prompt and not
# worry about leaking sensitive information about the key's location.
# However, care should be taken when including the full path in exceptions
# and log files.
password = get_password('Enter a password for the Ed25519'
' key (' + Fore.RED + filepath + Fore.RESET + '): ',
confirm=True)
else:
logger.debug('The password has been specified. Not prompting for one.')
# Does 'password' have the correct format?
securesystemslib.formats.PASSWORD_SCHEMA.check_match(password)
# If the parent directory of filepath does not exist,
# create it (and all its parent directories, if necessary).
securesystemslib.util.ensure_parent_dir(filepath)
# Create a temporary file, write the contents of the public key, and move
# to final destination.
file_object = securesystemslib.util.TempFile()
# Generate the ed25519 public key file contents in metadata format (i.e.,
# does not include the keyid portion).
keytype = ed25519_key['keytype']
keyval = ed25519_key['keyval']
scheme = ed25519_key['scheme']
ed25519key_metadata_format = securesystemslib.keys.format_keyval_to_metadata(
keytype, scheme, keyval, private=False)
file_object.write(json.dumps(ed25519key_metadata_format).encode('utf-8'))
# Write the public key (i.e., 'public', which is in PEM format) to
# '<filepath>.pub'. (1) Create a temporary file, (2) write the contents of
# the public key, and (3) move to final destination.
# The temporary file is closed after the final move.
file_object.move(filepath + '.pub')
# Write the encrypted key string, conformant to
# 'securesystemslib.formats.ENCRYPTEDKEY_SCHEMA', to '<filepath>'.
file_object = securesystemslib.util.TempFile()
# Encrypt the private key if 'password' is set.
if len(password):
ed25519_key = securesystemslib.keys.encrypt_key(ed25519_key, password)
else:
logger.debug('An empty password was given. '
'Not encrypting the private key.')
ed25519_key = json.dumps(ed25519_key)
# Raise 'securesystemslib.exceptions.CryptoError' if 'ed25519_key' cannot be
# encrypted.
file_object.write(ed25519_key.encode('utf-8'))
file_object.move(filepath)
return filepath
def import_ed25519_publickey_from_file(filepath):
"""
<Purpose>
Load the ED25519 public key object (conformant to
'securesystemslib.formats.KEY_SCHEMA') stored in 'filepath'. Return
'filepath' in securesystemslib.formats.ED25519KEY_SCHEMA format.
If the key object in 'filepath' contains a private key, it is discarded.
<Arguments>
filepath:
<filepath>.pub file, a public key file.
<Exceptions>
securesystemslib.exceptions.FormatError, if 'filepath' is improperly
formatted or is an unexpected key type.
<Side Effects>
The contents of 'filepath' is read and saved.
<Returns>
An ED25519 key object conformant to
'securesystemslib.formats.ED25519KEY_SCHEMA'.
"""
# Does 'filepath' have the correct format?
# Ensure the arguments have the appropriate number of objects and object
# types, and that all dict keys are properly named.
# Raise 'securesystemslib.exceptions.FormatError' if there is a mismatch.
securesystemslib.formats.PATH_SCHEMA.check_match(filepath)
# ED25519 key objects are saved in json and metadata format. Return the
# loaded key object in securesystemslib.formats.ED25519KEY_SCHEMA' format that
# also includes the keyid.
ed25519_key_metadata = securesystemslib.util.load_json_file(filepath)
ed25519_key, junk = \
securesystemslib.keys.format_metadata_to_key(ed25519_key_metadata)
# Raise an exception if an unexpected key type is imported. Redundant
# validation of 'keytype'. 'securesystemslib.keys.format_metadata_to_key()'
# should have fully validated 'ed25519_key_metadata'.
if ed25519_key['keytype'] != 'ed25519': # pragma: no cover
message = 'Invalid key type loaded: ' + repr(ed25519_key['keytype'])
raise securesystemslib.exceptions.FormatError(message)
return ed25519_key
def import_ed25519_privatekey_from_file(filepath, password=None, prompt=False):
"""
<Purpose>
Import the encrypted ed25519 key file in 'filepath', decrypt it, and return
the key object in 'securesystemslib.formats.ED25519KEY_SCHEMA' format.
The private key (may also contain the public part) is encrypted with AES
256 and CTR the mode of operation. The password is strengthened with
PBKDF2-HMAC-SHA256.
<Arguments>
filepath:
<filepath> file, an RSA encrypted key file.
password:
The password, or passphrase, to import the private key (i.e., the
encrypted key file 'filepath' must be decrypted before the ed25519 key
object can be returned.
prompt:
If True the user is prompted for a passphrase to decrypt 'filepath'.
Default is False.
<Exceptions>
securesystemslib.exceptions.FormatError, if the arguments are improperly
formatted or the imported key object contains an invalid key type (i.e.,
not 'ed25519').
securesystemslib.exceptions.CryptoError, if 'filepath' cannot be decrypted.
<Side Effects>
'password' is used to decrypt the 'filepath' key file.
<Returns>
An ed25519 key object of the form:
'securesystemslib.formats.ED25519KEY_SCHEMA'.
"""
# Does 'filepath' have the correct format?
# Ensure the arguments have the appropriate number of objects and object
# types, and that all dict keys are properly named.
# Raise 'securesystemslib.exceptions.FormatError' if there is a mismatch.
securesystemslib.formats.PATH_SCHEMA.check_match(filepath)
if password and prompt:
raise ValueError("Passing 'password' and 'prompt' True is not allowed.")
# If 'password' was passed check format and that it is not empty.
if password is not None:
securesystemslib.formats.PASSWORD_SCHEMA.check_match(password)
# TODO: PASSWORD_SCHEMA should be securesystemslib.schema.AnyString(min=1)
if not len(password):
raise ValueError('Password must be 1 or more characters')
elif prompt:
# Password confirmation disabled here, which should ideally happen only
# when creating encrypted key files (i.e., improve usability).
# It is safe to specify the full path of 'filepath' in the prompt and not
# worry about leaking sensitive information about the key's location.
# However, care should be taken when including the full path in exceptions
# and log files.
# NOTE: A user who gets prompted for a password, can only signal that the
# key is not encrypted by entering no password in the prompt, as opposed
# to a programmer who can call the function with or without a 'password'.
# Hence, we treat an empty password here, as if no 'password' was passed.
password = get_password('Enter a password for an encrypted RSA'
' file \'' + Fore.RED + filepath + Fore.RESET + '\': ',
confirm=False)
# If user sets an empty string for the password, explicitly set the
# password to None, because some functions may expect this later.
if len(password) == 0: # pragma: no cover
password = None
# Finally, regardless of password, try decrypting the key, if necessary.
# Otherwise, load it straight from the disk.
with open(filepath, 'rb') as file_object:
json_str = file_object.read()
return securesystemslib.keys.\
import_ed25519key_from_private_json(json_str, password=password)
def generate_and_write_ecdsa_keypair(filepath=None, password=None):
"""
<Purpose>
Generate an ECDSA keypair, where the encrypted key (using 'password' as the
passphrase) is saved to <'filepath'>. The public key portion of the
generated ECDSA key is saved to <'filepath'>.pub. If the filepath is not
given, the KEYID is used as the filename and the keypair saved to the
current working directory.
The 'cryptography' library is currently supported. The private key is
encrypted according to 'cryptography's approach: "Encrypt using the best
available encryption for a given key's backend. This is a curated
encryption choice and the algorithm may change over time."
<Arguments>
filepath:
The public and private key files are saved to <filepath>.pub and
<filepath>, respectively. If the filepath is not given, the public and
private keys are saved to the current working directory as <KEYID>.pub
and <KEYID>. KEYID is the generated key's KEYID.
password:
The password, or passphrase, to encrypt the private portion of the
generated ECDSA key. A symmetric encryption key is derived from
'password', so it is not directly used.
<Exceptions>
securesystemslib.exceptions.FormatError, if the arguments are improperly
formatted.
securesystemslib.exceptions.CryptoError, if 'filepath' cannot be encrypted.
<Side Effects>
Writes key files to '<filepath>' and '<filepath>.pub'.
<Returns>
The 'filepath' of the written key.
"""
# Generate a new ECDSA key object. The 'cryptography' library is currently
# supported and performs the actual cryptographic operations.
ecdsa_key = securesystemslib.keys.generate_ecdsa_key()
if not filepath:
filepath = os.path.join(os.getcwd(), ecdsa_key['keyid'])
else:
logger.debug('The filepath has been specified. Not using the key\'s'
' KEYID as the default filepath.')
# Does 'filepath' have the correct format?
# Raise 'securesystemslib.exceptions.FormatError' if there is a mismatch.
securesystemslib.formats.PATH_SCHEMA.check_match(filepath)
# If the caller does not provide a password argument, prompt for one.
if password is None: # pragma: no cover
# It is safe to specify the full path of 'filepath' in the prompt and not
# worry about leaking sensitive information about the key's location.
# However, care should be taken when including the full path in exceptions
# and log files.
password = get_password('Enter a password for the ECDSA'
' key (' + Fore.RED + filepath + Fore.RESET + '): ',
confirm=True)
else:
logger.debug('The password has been specified. Not prompting for one')
# Does 'password' have the correct format?
securesystemslib.formats.PASSWORD_SCHEMA.check_match(password)
# If the parent directory of filepath does not exist,
# create it (and all its parent directories, if necessary).
securesystemslib.util.ensure_parent_dir(filepath)
# Create a temporary file, write the contents of the public key, and move
# to final destination.
file_object = securesystemslib.util.TempFile()
# Generate the ECDSA public key file contents in metadata format (i.e., does
# not include the keyid portion).
keytype = ecdsa_key['keytype']
keyval = ecdsa_key['keyval']
scheme = ecdsa_key['scheme']
ecdsakey_metadata_format = securesystemslib.keys.format_keyval_to_metadata(
keytype, scheme, keyval, private=False)
file_object.write(json.dumps(ecdsakey_metadata_format).encode('utf-8'))
# Write the public key (i.e., 'public', which is in PEM format) to
# '<filepath>.pub'. (1) Create a temporary file, (2) write the contents of
# the public key, and (3) move to final destination.
file_object.move(filepath + '.pub')
# Write the encrypted key string, conformant to
# 'securesystemslib.formats.ENCRYPTEDKEY_SCHEMA', to '<filepath>'.
file_object = securesystemslib.util.TempFile()
# Raise 'securesystemslib.exceptions.CryptoError' if 'ecdsa_key' cannot be
# encrypted.
encrypted_key = securesystemslib.keys.encrypt_key(ecdsa_key, password)
file_object.write(encrypted_key.encode('utf-8'))
file_object.move(filepath)
return filepath
def import_ecdsa_publickey_from_file(filepath):
"""
<Purpose>
Load the ECDSA public key object (conformant to
'securesystemslib.formats.KEY_SCHEMA') stored in 'filepath'. Return
'filepath' in securesystemslib.formats.ECDSAKEY_SCHEMA format.
If the key object in 'filepath' contains a private key, it is discarded.
<Arguments>
filepath:
<filepath>.pub file, a public key file.
<Exceptions>
securesystemslib.exceptions.FormatError, if 'filepath' is improperly
formatted or is an unexpected key type.
<Side Effects>
The contents of 'filepath' is read and saved.
<Returns>
An ECDSA key object conformant to
'securesystemslib.formats.ECDSAKEY_SCHEMA'.
"""
# Does 'filepath' have the correct format?
# Ensure the arguments have the appropriate number of objects and object
# types, and that all dict keys are properly named.
# Raise 'securesystemslib.exceptions.FormatError' if there is a mismatch.
securesystemslib.formats.PATH_SCHEMA.check_match(filepath)
# ECDSA key objects are saved in json and metadata format. Return the
# loaded key object in securesystemslib.formats.ECDSAKEY_SCHEMA' format that
# also includes the keyid.
ecdsa_key_metadata = securesystemslib.util.load_json_file(filepath)
ecdsa_key, junk = \
securesystemslib.keys.format_metadata_to_key(ecdsa_key_metadata)
# Raise an exception if an unexpected key type is imported. Redundant
# validation of 'keytype'. 'securesystemslib.keys.format_metadata_to_key()'
# should have fully validated 'ecdsa_key_metadata'.
if ecdsa_key['keytype'] != 'ecdsa-sha2-nistp256': # pragma: no cover
message = 'Invalid key type loaded: ' + repr(ecdsa_key['keytype'])
raise securesystemslib.exceptions.FormatError(message)
return ecdsa_key
def import_ecdsa_privatekey_from_file(filepath, password=None):
"""
<Purpose>
Import the encrypted ECDSA key file in 'filepath', decrypt it, and return
the key object in 'securesystemslib.formats.ECDSAKEY_SCHEMA' format.
The 'cryptography' library is currently supported and performs the actual
cryptographic routine.
<Arguments>
filepath:
<filepath> file, an ECDSA encrypted key file.
password:
The password, or passphrase, to import the private key (i.e., the
encrypted key file 'filepath' must be decrypted before the ECDSA key
object can be returned.
<Exceptions>
securesystemslib.exceptions.FormatError, if the arguments are improperly
formatted or the imported key object contains an invalid key type (i.e.,
not 'ecdsa-sha2-nistp256').
securesystemslib.exceptions.CryptoError, if 'filepath' cannot be decrypted.
<Side Effects>
'password' is used to decrypt the 'filepath' key file.
<Returns>
An ECDSA key object of the form: 'securesystemslib.formats.ECDSAKEY_SCHEMA'.
"""
# Does 'filepath' have the correct format?
# Ensure the arguments have the appropriate number of objects and object
# types, and that all dict keys are properly named.
# Raise 'securesystemslib.exceptions.FormatError' if there is a mismatch.
securesystemslib.formats.PATH_SCHEMA.check_match(filepath)
# If the caller does not provide a password argument, prompt for one.
# Password confirmation disabled here, which should ideally happen only
# when creating encrypted key files (i.e., improve usability).
if password is None: # pragma: no cover
# It is safe to specify the full path of 'filepath' in the prompt and not
# worry about leaking sensitive information about the key's location.
# However, care should be taken when including the full path in exceptions
# and log files.
password = get_password('Enter a password for the encrypted ECDSA'
' key (' + Fore.RED + filepath + Fore.RESET + '): ',
confirm=False)
# Does 'password' have the correct format?
securesystemslib.formats.PASSWORD_SCHEMA.check_match(password)
# Store the encrypted contents of 'filepath' prior to calling the decryption
# routine.
encrypted_key = None
with open(filepath, 'rb') as file_object:
encrypted_key = file_object.read()
# Decrypt the loaded key file, calling the 'cryptography' library to generate
# the derived encryption key from 'password'. Raise
# 'securesystemslib.exceptions.CryptoError' if the decryption fails.
key_object = securesystemslib.keys.decrypt_key(encrypted_key.decode('utf-8'),
password)
# Raise an exception if an unexpected key type is imported.
if key_object['keytype'] != 'ecdsa-sha2-nistp256':
message = 'Invalid key type loaded: ' + repr(key_object['keytype'])
raise securesystemslib.exceptions.FormatError(message)
# Add "keyid_hash_algorithms" so that equal ecdsa keys with different keyids
# can be associated using supported keyid_hash_algorithms.
key_object['keyid_hash_algorithms'] = \
securesystemslib.settings.HASH_ALGORITHMS
return key_object
if __name__ == '__main__':
# The interactive sessions of the documentation strings can
# be tested by running interface.py as a standalone module:
# $ python interface.py.
import doctest
doctest.testmod()
```
#### File: securesystemslib/_vendor/ssl_match_hostname.py
```python
import re
class CertificateError(ValueError):
pass
def _dnsname_to_pat(dn):
pats = []
for frag in dn.split(r'.'):
if frag == '*':
# When '*' is a fragment by itself, it matches a non-empty dotless
# fragment.
pats.append('[^.]+')
else:
# Otherwise, '*' matches any dotless fragment.
frag = re.escape(frag)
pats.append(frag.replace(r'\*', '[^.]*'))
return re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE)
def match_hostname(cert, hostname):
"""Verify that *cert* (in decoded format as returned by
SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 rules
are mostly followed, but IP addresses are not accepted for *hostname*.
CertificateError is raised on failure. On success, the function
returns nothing.
"""
if not cert:
raise ValueError("empty or no certificate")
dnsnames = []
san = cert.get('subjectAltName', ())
for key, value in san:
if key == 'DNS':
if _dnsname_to_pat(value).match(hostname):
return
dnsnames.append(value)
if not dnsnames:
# The subject is only checked when there is no dNSName entry
# in subjectAltName
for sub in cert.get('subject', ()):
for key, value in sub:
# XXX according to RFC 2818, the most specific Common Name
# must be used.
if key == 'commonName':
if _dnsname_to_pat(value).match(hostname):
return
dnsnames.append(value)
if len(dnsnames) > 1:
raise CertificateError("hostname %r "
"doesn't match either of %s"
% (hostname, ', '.join(map(repr, dnsnames))))
elif len(dnsnames) == 1:
raise CertificateError("hostname %r "
"doesn't match %r"
% (hostname, dnsnames[0]))
else:
raise CertificateError("no appropriate commonName or "
"subjectAltName fields were found")
```
#### File: securesystemslib/tests/aggregate_tests.py
```python
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import os
import sys
import unittest
import subprocess
def check_usable_gpg():
"""Set `TEST_SKIP_GPG` environment variable if neither gpg2 nor gpg is
available. """
os.environ["TEST_SKIP_GPG"] = "1"
for gpg in ["gpg2", "gpg"]:
try:
subprocess.check_call([gpg, "--version"])
except OSError:
pass
else:
# If one of the two exists, we can unset the skip envvar and ...
os.environ.pop("TEST_SKIP_GPG", None)
# ... abort the availability check.:
break
if __name__ == '__main__':
check_usable_gpg()
suite = unittest.TestLoader().discover("tests", top_level_dir=".")
all_tests_passed = unittest.TextTestRunner(
verbosity=1, buffer=True).run(suite).wasSuccessful()
if not all_tests_passed:
sys.exit(1)
```
#### File: securesystemslib/tests/test_ed25519_keys.py
```python
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import unittest
import os
import logging
import securesystemslib.exceptions
import securesystemslib.formats
import securesystemslib.ed25519_keys
logger = logging.getLogger('securesystemslib.test_ed25519_keys')
public, private = securesystemslib.ed25519_keys.generate_public_and_private()
FORMAT_ERROR_MSG = 'securesystemslib.exceptions.FormatError raised. Check object\'s format.'
class TestEd25519_keys(unittest.TestCase):
def setUp(self):
pass
def test_generate_public_and_private(self):
pub, priv = securesystemslib.ed25519_keys.generate_public_and_private()
# Check format of 'pub' and 'priv'.
self.assertEqual(True, securesystemslib.formats.ED25519PUBLIC_SCHEMA.matches(pub))
self.assertEqual(True, securesystemslib.formats.ED25519SEED_SCHEMA.matches(priv))
def test_create_signature(self):
global public
global private
data = b'The quick brown fox jumps over the lazy dog'
scheme = 'ed25519'
signature, scheme = securesystemslib.ed25519_keys.create_signature(public,
private, data, scheme)
# Verify format of returned values.
self.assertEqual(True,
securesystemslib.formats.ED25519SIGNATURE_SCHEMA.matches(signature))
self.assertEqual(True, securesystemslib.formats.ED25519_SIG_SCHEMA.matches(scheme))
self.assertEqual('ed25519', scheme)
# Check for improperly formatted argument.
self.assertRaises(securesystemslib.exceptions.FormatError,
securesystemslib.ed25519_keys.create_signature, 123, private, data,
scheme)
self.assertRaises(securesystemslib.exceptions.FormatError,
securesystemslib.ed25519_keys.create_signature, public, 123, data,
scheme)
# Check for invalid 'data'.
self.assertRaises(securesystemslib.exceptions.CryptoError,
securesystemslib.ed25519_keys.create_signature, public, private, 123,
scheme)
def test_verify_signature(self):
global public
global private
data = b'The quick brown fox jumps over the lazy dog'
scheme = 'ed25519'
signature, scheme = securesystemslib.ed25519_keys.create_signature(public,
private, data, scheme)
valid_signature = securesystemslib.ed25519_keys.verify_signature(public,
scheme, signature, data)
self.assertEqual(True, valid_signature)
# Test with 'pynacl'.
valid_signature = securesystemslib.ed25519_keys.verify_signature(public,
scheme, signature, data, use_pynacl=True)
self.assertEqual(True, valid_signature)
# Test with 'pynacl', but a bad signature is provided.
bad_signature = os.urandom(64)
valid_signature = securesystemslib.ed25519_keys.verify_signature(public,
scheme, bad_signature, data, use_pynacl=True)
self.assertEqual(False, valid_signature)
# Check for improperly formatted arguments.
self.assertRaises(securesystemslib.exceptions.FormatError,
securesystemslib.ed25519_keys.verify_signature, 123, scheme,
signature, data)
# Signature method improperly formatted.
self.assertRaises(securesystemslib.exceptions.FormatError,
securesystemslib.ed25519_keys.verify_signature, public, 123,
signature, data)
# Invalid signature method.
self.assertRaises(securesystemslib.exceptions.FormatError,
securesystemslib.ed25519_keys.verify_signature, public,
'unsupported_scheme', signature, data)
# Signature not a string.
self.assertRaises(securesystemslib.exceptions.FormatError,
securesystemslib.ed25519_keys.verify_signature, public, scheme,
123, data)
# Invalid signature length, which must be exactly 64 bytes..
self.assertRaises(securesystemslib.exceptions.FormatError,
securesystemslib.ed25519_keys.verify_signature, public, scheme,
'bad_signature', data)
# Check for invalid signature and data.
# Mismatched data.
self.assertEqual(False, securesystemslib.ed25519_keys.verify_signature(
public, scheme, signature, '123'))
# Mismatched signature.
bad_signature = b'a'*64
self.assertEqual(False, securesystemslib.ed25519_keys.verify_signature(
public, scheme, bad_signature, data))
# Generated signature created with different data.
new_signature, scheme = securesystemslib.ed25519_keys.create_signature(
public, private, b'mismatched data', scheme)
self.assertEqual(False, securesystemslib.ed25519_keys.verify_signature(
public, scheme, new_signature, data))
# Run the unit tests.
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "Joshua-Gordon/Evenchur",
"score": 4
} |
#### File: Joshua-Gordon/Evenchur/evenchur.py
```python
from inventory import Inventory
from data import getLocation, getItemInLocation
import sys
class Evenchur:
def __init__(self,loc):
self.location = loc
self.inv = Inventory()
self.visitedLocations = []
def loop(self):
self.location = self.visited()
print(self.location["desc"])
inp = input(">>>").split(" ")
if(inp[0] == "q"):
print("Thank you for playing wing commander!")
sys.exit(0)
if(inp[0] == "go"):
prev = self.location
if inp[1] == "n" or inp[1] == "north":
if self.location["name"] == self.location["north"]:
print("You cannot go that way.")
else:
self.location = getLocation(self.location["north"])
if inp[1] == "e" or inp[1] == "east":
if self.location["name"] == self.location["east"]:
print("You cannot go that way.")
else:
self.location = getLocation(self.location["east"])
if inp[1] == "w" or inp[1] == "west":
if self.location["name"] == self.location["west"]:
print("You cannot go that way.")
else:
self.location = getLocation(self.location["west"])
if inp[1] == "s" or inp[1] == "south":
if self.location["name"] == self.location["south"]:
print("You cannot go that way.")
else:
self.location = getLocation(self.location["south"])
if self.location == "locked":
self.location = prev
print("The way is locked.")
elif inp[0] == "items":
print(self.inv)
elif inp[0] == "look":
if len(inp) == 1:
print(self.location["look"])
for i in self.location["objects"]:
print("There is a " + str(i))
elif len(inp) == 2:
i = getItemInLocation(inp[1].lower(),self.location)
if i:
print(i.getDesc())
else:
if inp[1] in self.location["objects"]:
print("You can't take the " + inp[1])
else:
print("There is no " + inp[1] + " here!")
elif len(inp) == 3:
if inp[1] == "inv":
itm = self.inv.item(inp[2].lower())
if itm:
print(itm.getDesc())
else:
print("You don't have a " + inp[2].lower())
elif inp[0] == "take":
i = getItemInLocation(inp[1].lower(),self.location)
if i:
self.location["objects"].remove(i.getName())
self.location["items"].remove(i.getName())
self.inv.addItem(i)
print("You take the " + str(i))
else:
print("There is no " + inp[0] + " here!")
def visited(self):
for l in self.visitedLocations:
if l["name"] == self.location["name"]:
return l
return self.location
```
#### File: Joshua-Gordon/Evenchur/inventory.py
```python
class Inventory:
def __init__(self):
self.items = []
self.weight = 0
self.maxWeight = 20
def getItems(self):
return self.items
def getWeight(self):
return self.weight
def getMaxWeight(self):
return self.maxWeight
def addItem(self,item):
if self.weight + item.getWeight() < self.maxWeight:
if item in self.items:
idx = self.items.index(item)
self.items[idx].increment()
else:
self.items.append(item)
def item(self,name):
for i in self.items:
if i.getName() == name:
return i
def __str__(self):
toreturn = "Inventory:\n"
for i in self.items:
toreturn += "x"+str(i.getCount()) + "\t" + i.getName() + "\n"
return toreturn
class Item:
def __init__(self,**kwargs):
#print(kwargs)
self.name = kwargs['name']
self.desc = kwargs['desc']
self.weight = kwargs['weight']
if "count" in kwargs.keys():
self.count = kwargs['count']
else:
self.count = 1
def getName(self):
return self.name
def getDesc(self):
return self.desc
def getWeight(self):
return self.weight
def getCount(self):
return self.count
def increment(self):
self.count += 1
def __eq__(self,other):
return self.name == other.getName()
def __str__(self):
return self.name
``` |
{
"source": "Joshua-Gordon/Language",
"score": 3
} |
#### File: Joshua-Gordon/Language/preproc.py
```python
import re
import tokenizer
def processNewlines(code):
lines = code.split("\n")
lines = list(map(lambda s: s+";",lines))
code = ''.join(lines)
return code
def removeComments(code):
lines = code.split(";")
lnew = []
multiLine = False
done = False
for l in lines:
done = False
if multiLine and not "*/" in l:
l = ""
if "/*" in l:
multiLine = True
idx = l.find("/*")
lnew.append(l[:idx])
done = True
if multiLine and "*/" in l:
multiLine = False
idx = l.find("*/")
lnew.append(l[idx+2:]+";")
done = True
if "//" in l:
idx = l.find("//")
lnew.append(l[:idx]+";")
elif not done:
lnew.append(l+";")
return ''.join(lnew)[:-1]
def expandMacros(code):
lines = code.split(";")
macros = []
lnew = []
lnew2 = []
for line in lines:
if line.strip()[0:5] == "macro":
idx = line.find("=")
macro = line[6:idx] #accounts for space after macro keyword
body = line[idx+1:]
#grab argument list
args = macro.split(")")[0][1:]
macro = macro[len(args)+2:]
args = args.split(",")
macros.append((macro,body,args))
else:
lnew.append(line+";")
for line in lnew:
#print(macros)
for macro in macros:
#find match for macro
regex = macro[0].strip()
for arg in args:
regex = regex.replace(arg,"[^ ;]+")
regex = re.compile(regex)
matchIters = [m.span() for m in re.finditer(regex,line)] #For some reason, this is not matching correctly
for match in matchIters:
#match is a tuple of (startIndex,endIndex)
string = line[match[0]:match[1]+1]
macroTokens = tokenizer.refineTokens(tokenizer.tokenize(macro[0].strip()+";","all"))
lineTokens = tokenizer.refineTokens(tokenizer.tokenize(string,"all")) #tokenize both. Should be same length.
if not len(lineTokens) == len(macroTokens):
print("Bad macro")
print(str(len(lineTokens)) + " tokens in match, but " + str(len(macroTokens)) + " tokens in macro")
print("Match:",string)
print("Match tokens:",str(lineTokens))
print("Macro:",macro[0].strip())
print("Macro tokens:",str(macroTokens))
break
argvalues = {}
for i in range(len(lineTokens)):
if macroTokens[i] == lineTokens[i]:
pass
elif macroTokens[i][0] in args:
argvalues[macroTokens[i][0]] = lineTokens[i][0] #map arguments to values
else:
print("ERROR In macro expansion!")
newline = body
for a,v in argvalues.items():
newline = newline.replace(a,v)
line = line.replace(line[match[0]:match[1]],newline)
print(line)
#old way
#line = line.replace(macro[0],macro[1])
lnew2.append(line)
return ''.join(lnew2)[:-1]
def preproc(code):
code = processNewlines(code)
#print("STAGE 1",code)
code = removeComments(code)
#print("STAGE 2",code)
code = expandMacros(code)
#print("STAGE 3",code)
return code
``` |
{
"source": "joshuagornall/jax",
"score": 2
} |
#### File: tf_js/quickdraw/utils.py
```python
from jax import numpy as jnp # type: ignore
import numpy as np # type: ignore
import os
import requests # type: ignore
def download_dataset(dir_path, nb_classes):
if not os.path.exists(dir_path):
os.makedirs(dir_path)
assert os.path.isdir(dir_path), f"{dir_path} exists and is not a directory"
classes_path = os.path.join(
os.path.dirname(__file__),
'third_party/zaidalyafeai.github.io/class_names.txt')
with open(classes_path, 'r') as classes_file:
classes = (
list(map(lambda c: c.strip(), classes_file.readlines()))[:nb_classes])
url = 'https://storage.googleapis.com/quickdraw_dataset/full/numpy_bitmap/'
for cls in classes:
cls_filename = f"{cls}.npy"
cls_file_path = os.path.join(dir_path, cls_filename)
if os.path.exists(cls_file_path):
print(f'{cls_filename} already exists, skipping')
continue
with open(cls_file_path, "wb") as save_file:
try:
response = requests.get(url + cls_filename.replace('_', ' '))
save_file.write(response.content)
print(f'Successfully fetched {cls_filename}')
except:
print(f'Failed to fetch {cls_filename}')
return classes
def load_classes(dir_path, classes, batch_size=256, test_ratio=0.1,
max_items_per_class=4096):
x, y = np.empty([0, 784]), np.empty([0])
for idx, cls in enumerate(classes):
cls_path = os.path.join(dir_path, f"{cls}.npy")
data = np.load(cls_path)[:max_items_per_class, :]
labels = np.full(data.shape[0], idx)
x, y = np.concatenate((x, data), axis=0), np.append(y, labels)
assert x.shape[0] % batch_size == 0
x, y = x.astype(jnp.float32) / 255.0, y.astype(jnp.int32)
# Reshaping to square images
x = np.reshape(x, (x.shape[0], 28, 28, 1))
permutation = np.random.permutation(y.shape[0])
x = x[permutation, :]
y = y[permutation]
x = np.reshape(x, [x.shape[0] // batch_size, batch_size] + list(x.shape[1:]))
y = np.reshape(y, [y.shape[0] // batch_size, batch_size])
nb_test_elements = int(x.shape[0] * test_ratio)
x_test, y_test = x[:nb_test_elements], y[:nb_test_elements]
x_train, y_train = x[nb_test_elements:], y[nb_test_elements:]
return list(zip(x_train, y_train)), list(zip(x_test, y_test))
```
#### File: jax/_src/profiler.py
```python
from contextlib import contextmanager
from functools import wraps
import threading
from typing import Callable, Optional
import warnings
from jax.lib import xla_bridge
from jax.lib import xla_client
def start_server(port: int):
"""Starts a profiler server on port `port`.
Using the "TensorFlow profiler" feature in `TensorBoard
<https://www.tensorflow.org/tensorboard>`_ 2.2 or newer, you can
connect to the profiler server and sample execution traces that show CPU,
GPU, and/or TPU device activity.
Returns a profiler server object. The server remains alive and listening until
the server object is destroyed.
"""
return xla_client.profiler.start_server(port)
class _ProfileState(object):
def __init__(self):
self.profile_session = None
self.log_dir = None
self.lock = threading.Lock()
_profile_state = _ProfileState()
def start_trace(log_dir):
"""Starts a profiler trace.
The trace will capture CPU, GPU, and/or TPU activity, including Python
functions and JAX on-device operations. Use ``stop_trace()`` to end the trace
and save the results to ``log_dir``.
The resulting trace can be viewed with TensorBoard. Note that TensorBoard
doesn't need to be running when collecting the trace.
Only once trace may be collected a time. A RuntimeError will be raised if
``start_trace()`` is called while another trace is running.
Args:
log_dir: The directory to save the profiler trace to (usually the
TensorBoard log directory).
"""
with _profile_state.lock:
if _profile_state.profile_session is not None:
raise RuntimeError("Profile has already been started. "
"Only one profile may be run at a time.")
_profile_state.profile_session = xla_client.profiler.ProfilerSession()
_profile_state.log_dir = log_dir
def stop_trace():
"""Stops the currently-running profiler trace.
The trace will be saved to the ``log_dir`` passed to the corresponding
``start_trace()`` call. Raises a RuntimeError if a trace hasn't been started.
"""
with _profile_state.lock:
if _profile_state.profile_session is None:
raise RuntimeError("No profile started")
_profile_state.profile_session.stop_and_export(_profile_state.log_dir)
_profile_state.profile_session = None
_profile_state.log_dir = None
@contextmanager
def trace(log_dir):
"""Context manager to take a profiler trace.
The trace will capture CPU, GPU, and/or TPU activity, including Python
functions and JAX on-device operations.
The resulting trace can be viewed with TensorBoard. Note that TensorBoard
doesn't need to be running when collecting the trace.
Only once trace may be collected a time. A RuntimeError will be raised if a
trace is started while another trace is running.
Args:
log_dir: The directory to save the profiler trace to (usually the
TensorBoard log directory).
"""
start_trace(log_dir)
try:
yield
finally:
stop_trace()
class TraceAnnotation(xla_client.profiler.TraceMe):
"""Context manager that generates a trace event in the profiler.
The trace event spans the duration of the code enclosed by the context.
For example:
>>> x = jnp.ones((1000, 1000))
>>> with jax.profiler.TraceAnnotation("my_label"):
... result = jnp.dot(x, x.T).block_until_ready()
This will cause a "my_label" event to show up on the trace timeline if the
event occurs while the process is being traced.
"""
pass
# TODO: remove this sometime after jax 0.2.11 is released
class TraceContext(TraceAnnotation):
def __init__(self, *args, **kwargs):
warnings.warn(
"TraceContext has been renamed to TraceAnnotation. This alias "
"will eventually be removed; please update your code.")
super().__init__(*args, **kwargs)
class StepTraceAnnotation(TraceAnnotation):
"""Context manager that generates a step trace event in the profiler.
The step trace event spans the duration of the code enclosed by the context.
The profiler will provide the performance analysis for each step trace event.
For example, it can be used to mark training steps and enable the profiler to
provide the performance analysis per step:
>>> while global_step < NUM_STEPS: # doctest: +SKIP
... with jax.profiler.StepTraceAnnotation("train", step_num=global_step): # doctest: +SKIP
... train_step() # doctest: +SKIP
... global_step += 1 # doctest: +SKIP
This will cause a "train xx" event to show up on the trace timeline if the
event occurs while the process is being traced by TensorBoard. In addition,
if using accelerators, the device trace timeline will also show a "train xx"
event. Note that "step_num" can be set as a keyword argument to pass the
global step number to the profiler.
"""
def __init__(self, name: str, **kwargs):
super().__init__(name, _r=1, **kwargs)
# TODO: remove this sometime after jax 0.2.11 is released
class StepTraceContext(StepTraceAnnotation):
def __init__(self, *args, **kwargs):
warnings.warn(
"StepTraceContext has been renamed to StepTraceAnnotation. This alias "
"will eventually be removed; please update your code.")
super().__init__(*args, **kwargs)
def annotate_function(func: Callable, name: str = None, **kwargs):
"""Decorator that generates a trace event for the execution of a function.
For example:
>>> @jax.profiler.annotate_function
... def f(x):
... return jnp.dot(x, x.T).block_until_ready()
>>>
>>> result = f(jnp.ones((1000, 1000)))
This will cause an "f" event to show up on the trace timeline if the
function execution occurs while the process is being traced by TensorBoard.
Arguments can be passed to the decorator via :py:func:`functools.partial`.
>>> from functools import partial
>>> @partial(jax.profiler.annotate_function, name="event_name")
... def f(x):
... return jnp.dot(x, x.T).block_until_ready()
>>> result = f(jnp.ones((1000, 1000)))
"""
name = name or getattr(func, '__qualname__', None)
name = name or func.__name__
@wraps(func)
def wrapper(*args, **kwargs):
with TraceAnnotation(name, **kwargs):
return func(*args, **kwargs)
return wrapper
return wrapper
# TODO: remove this sometime after jax 0.2.11 is released
def trace_function(*args, **kwargs):
warnings.warn(
"trace_function has been renamed to annotate_function. This alias "
"will eventually be removed; please update your code.")
return annotate_function(*args, **kwargs)
def device_memory_profile(backend: Optional[str] = None) -> bytes:
"""Captures a JAX device memory profile as ``pprof``-format protocol buffer.
A device memory profile is a snapshot of the state of memory, that describes the JAX
:class:`jax.DeviceArray` and executable objects present in memory and their
allocation sites.
For more information how to use the device memory profiler, see
:doc:`/device_memory_profiling`.
The profiling system works by instrumenting JAX on-device allocations,
capturing a Python stack trace for each allocation. The instrumentation is
always enabled; :func:`device_memory_profile` provides an API to capture it.
The output of :func:`device_memory_profile` is a binary protocol buffer that
can be interpreted and visualized by the `pprof tool
<https://github.com/google/pprof>`_.
Args:
backend: optional; the name of the JAX backend for which the device memory
profile should be collected.
Returns:
A byte string containing a binary `pprof`-format protocol buffer.
"""
return xla_client.heap_profile(xla_bridge.get_backend(backend))
def save_device_memory_profile(filename, backend: Optional[str] = None):
"""Collects a device memory profile and writes it to a file.
:func:`save_device_memory_profile` is a convenience wrapper around :func:`device_memory_profile`
that saves its output to a ``filename``. See the
:func:`device_memory_profile` documentation for more information.
Args:
filename: the filename to which the profile should be written.
backend: optional; the name of the JAX backend for which the device memory
profile should be collected.
"""
profile = device_memory_profile(backend)
with open(filename, "wb") as f:
f.write(profile)
```
#### File: jax/tests/jaxpr_util_test.py
```python
from absl.testing import absltest
from jax import jaxpr_util, jit, make_jaxpr, numpy as jnp
from jax import test_util as jtu
from jax.config import config
config.parse_flags_with_absl()
class JaxprStatsTest(jtu.JaxTestCase):
def test_primitives(self):
def f(x, y):
s = jit(jnp.sin)(x)
return jnp.sin(s) + jnp.cos(y)
hist = jaxpr_util.primitives(make_jaxpr(f)(1., 1.).jaxpr)
for k in ['add', 'sin', 'cos', 'xla_call']:
assert k in hist, k
self.assertEqual(hist['sin'], 2)
self.assertTrue(all(count == 1 for k, count in hist.items() if k != 'sin'))
def test_primitives_by_source(self):
def f(x, y):
s = jnp.sin(x)
return jnp.sin(s) + jnp.cos(y)
hist = jaxpr_util.primitives_by_source(make_jaxpr(f)(1., 1.).jaxpr)
sin_keys = [k for k in hist.keys() if k.startswith('sin @ ')]
rem_keys = [k for k in hist.keys() if not k.startswith('sin @ ')]
self.assertEqual(sum(hist[k] for k in sin_keys), 2)
self.assertTrue(all(hist[k] == 1 for k in rem_keys))
def test_primitives_by_shape(self):
def f(x, y):
def sub(x, y):
return jnp.sum(jnp.array([x, y])), y
s, _ = jit(sub)(x, y)
return jnp.sin(s) + jnp.cos(y)
hist = jaxpr_util.primitives_by_shape(make_jaxpr(f)(1., 1.).jaxpr)
t = '64' if config.x64_enabled else '32'
shapes = [
f'add :: float{t}[]',
f'sin :: float{t}[]',
f'cos :: float{t}[]',
f'reduce_sum :: float{t}[]',
f'concatenate :: float{t}[2]',
f'xla_call :: float{t}[] *',
]
for k in shapes:
self.assertEqual(hist[k], 1)
def test_source_locations(self):
def f(x, y):
s = jnp.sin(x) # sin
return jnp.sin(s) + jnp.cos(y) # sin, cos, add
hist = jaxpr_util.source_locations(make_jaxpr(f)(1., 1.).jaxpr)
self.assertEqual(sum(hist.values()), 4)
def test_print_histogram(self):
def f(x, y):
s = jit(jnp.sin)(x)
return jnp.sin(s) + jnp.cos(y)
hist = jaxpr_util.primitives_by_source(make_jaxpr(f)(1., 1.).jaxpr)
jaxpr_util.print_histogram(hist)
if __name__ == "__main__":
absltest.main()
``` |
{
"source": "joshua-gould/anndata",
"score": 2
} |
#### File: _io/specs/methods.py
```python
from __future__ import annotations
from os import PathLike
from collections.abc import Mapping
from functools import partial
from typing import Union
from types import MappingProxyType
from warnings import warn
import h5py
import numpy as np
import pandas as pd
from scipy import sparse
import anndata as ad
from anndata import AnnData, Raw
from anndata._core.index import _normalize_indices
from anndata._core.merge import intersect_keys
from anndata._core.sparse_dataset import SparseDataset
from anndata._core import views
from anndata.compat import (
Literal,
OverloadedDict,
ZarrArray,
ZarrGroup,
_read_attr,
_from_fixed_length_strings,
_decode_structured_array,
)
from anndata._io.utils import report_write_key_on_error, check_key, H5PY_V3
from anndata._warnings import OldFormatWarning
from .registry import (
_REGISTRY,
IOSpec,
get_spec,
read_elem,
read_elem_partial,
write_elem,
)
H5Array = h5py.Dataset
H5Group = h5py.Group
####################
# Dispatch methods #
####################
# def is_full_slice(idx):
# if isinstance(idx, tuple)len(idx) == 1:
# if isinstance(idx, type(None)):
# return True
# elif idx is Ellipsis:
# return True
# elif isinstance(idx, tuple):
# for el in idx:
# if isinstance(el, type(None)):
# pass
# elif isinstance(el, slice):
# if el != slice(None):
# return False
# else:
# return False
# return True
# return False
################################
# Fallbacks / backwards compat #
################################
# Note: there is no need for writing in a backwards compatible format, maybe
@_REGISTRY.register_read(H5Group, IOSpec("", ""))
@_REGISTRY.register_read(H5Array, IOSpec("", ""))
def read_basic(elem):
from anndata._io import h5ad
warn(
f"Element '{elem.name}' was written without encoding metadata.",
OldFormatWarning,
stacklevel=3,
)
if isinstance(elem, Mapping):
# Backwards compat sparse arrays
if "h5sparse_format" in elem.attrs:
return SparseDataset(elem).to_memory()
return {k: read_elem(v) for k, v in elem.items()}
elif isinstance(elem, h5py.Dataset):
return h5ad.read_dataset(elem) # TODO: Handle legacy
@_REGISTRY.register_read(ZarrGroup, IOSpec("", ""))
@_REGISTRY.register_read(ZarrArray, IOSpec("", ""))
def read_basic_zarr(elem):
from anndata._io import zarr
warn(
f"Element '{elem.name}' was written without encoding metadata.",
OldFormatWarning,
stacklevel=3,
)
if isinstance(elem, Mapping):
# Backwards compat sparse arrays
if "h5sparse_format" in elem.attrs:
return SparseDataset(elem).to_memory()
return {k: read_elem(v) for k, v in elem.items()}
elif isinstance(elem, ZarrArray):
return zarr.read_dataset(elem) # TODO: Handle legacy
# @_REGISTRY.register_read_partial(IOSpec("", ""))
# def read_basic_partial(elem, *, items=None, indices=(slice(None), slice(None))):
# if isinstance(elem, Mapping):
# return _read_partial(elem, items=items, indices=indices)
# elif indices != (slice(None), slice(None)):
# return elem[indices]
# else:
# return elem[()]
###########
# AnnData #
###########
def read_indices(group):
obs_group = group["obs"]
obs_idx_elem = obs_group[_read_attr(obs_group.attrs, "_index")]
obs_idx = read_elem(obs_idx_elem)
var_group = group["var"]
var_idx_elem = var_group[_read_attr(var_group.attrs, "_index")]
var_idx = read_elem(var_idx_elem)
return obs_idx, var_idx
def read_partial(
pth: PathLike,
*,
obs_idx=slice(None),
var_idx=slice(None),
X=True,
obs=None,
var=None,
obsm=None,
varm=None,
obsp=None,
varp=None,
layers=None,
uns=None,
) -> ad.AnnData:
result = {}
with h5py.File(pth, "r") as f:
obs_idx, var_idx = _normalize_indices((obs_idx, var_idx), *read_indices(f))
result["obs"] = read_elem_partial(
f["obs"], items=obs, indices=(obs_idx, slice(None))
)
result["var"] = read_elem_partial(
f["var"], items=var, indices=(var_idx, slice(None))
)
if X:
result["X"] = read_elem_partial(f["X"], indices=(obs_idx, var_idx))
else:
result["X"] = sparse.csr_matrix((len(result["obs"]), len(result["var"])))
if "obsm" in f:
result["obsm"] = _read_partial(
f["obsm"], items=obsm, indices=(obs_idx, slice(None))
)
if "varm" in f:
result["varm"] = _read_partial(
f["varm"], items=varm, indices=(var_idx, slice(None))
)
if "obsp" in f:
result["obsp"] = _read_partial(
f["obsp"], items=obsp, indices=(obs_idx, obs_idx)
)
if "varp" in f:
result["varp"] = _read_partial(
f["varp"], items=varp, indices=(var_idx, var_idx)
)
if "layers" in f:
result["layers"] = _read_partial(
f["layers"], items=layers, indices=(obs_idx, var_idx)
)
if "uns" in f:
result["uns"] = _read_partial(f["uns"], items=uns)
return ad.AnnData(**result)
def _read_partial(group, *, items=None, indices=(slice(None), slice(None))):
if group is None:
return None
if items is None:
keys = intersect_keys((group,))
else:
keys = intersect_keys((group, items))
result = {}
for k in keys:
if isinstance(items, Mapping):
next_items = items.get(k, None)
else:
next_items = None
result[k] = read_elem_partial(group[k], items=next_items, indices=indices)
return result
@_REGISTRY.register_write(ZarrGroup, AnnData, IOSpec("anndata", "0.1.0"))
@_REGISTRY.register_write(H5Group, AnnData, IOSpec("anndata", "0.1.0"))
def write_anndata(f, k, adata, dataset_kwargs=MappingProxyType({})):
g = f.require_group(k)
write_elem(g, "X", adata.X, dataset_kwargs=dataset_kwargs)
write_elem(g, "obs", adata.obs, dataset_kwargs=dataset_kwargs)
write_elem(g, "var", adata.var, dataset_kwargs=dataset_kwargs)
write_elem(g, "obsm", dict(adata.obsm), dataset_kwargs=dataset_kwargs)
write_elem(g, "varm", dict(adata.varm), dataset_kwargs=dataset_kwargs)
write_elem(g, "obsp", dict(adata.obsp), dataset_kwargs=dataset_kwargs)
write_elem(g, "varp", dict(adata.varp), dataset_kwargs=dataset_kwargs)
write_elem(g, "layers", dict(adata.layers), dataset_kwargs=dataset_kwargs)
write_elem(g, "uns", dict(adata.uns), dataset_kwargs=dataset_kwargs)
write_elem(g, "raw", adata.raw, dataset_kwargs=dataset_kwargs)
@_REGISTRY.register_read(H5Group, IOSpec("anndata", "0.1.0"))
@_REGISTRY.register_read(H5Group, IOSpec("raw", "0.1.0"))
@_REGISTRY.register_read(ZarrGroup, IOSpec("anndata", "0.1.0"))
@_REGISTRY.register_read(ZarrGroup, IOSpec("raw", "0.1.0"))
def read_anndata(elem):
d = {}
for k in [
"X",
"obs",
"var",
"obsm",
"varm",
"obsp",
"varp",
"layers",
"uns",
"raw",
]:
if k in elem:
d[k] = read_elem(elem[k])
if "X" in d:
d["dtype"] = d["X"].dtype
return AnnData(**d)
@_REGISTRY.register_write(H5Group, Raw, IOSpec("raw", "0.1.0"))
@_REGISTRY.register_write(ZarrGroup, Raw, IOSpec("raw", "0.1.0"))
def write_raw(f, k, raw, dataset_kwargs=MappingProxyType({})):
g = f.create_group(k)
write_elem(g, "X", raw.X, dataset_kwargs=dataset_kwargs)
write_elem(g, "var", raw.var, dataset_kwargs=dataset_kwargs)
write_elem(g, "varm", dict(raw.varm), dataset_kwargs=dataset_kwargs)
############
# Mappings #
############
@_REGISTRY.register_read(H5Group, IOSpec("dict", "0.1.0"))
@_REGISTRY.register_read(ZarrGroup, IOSpec("dict", "0.1.0"))
def read_mapping(elem):
return {k: read_elem(v) for k, v in elem.items()}
@_REGISTRY.register_write(H5Group, OverloadedDict, IOSpec("dict", "0.1.0"))
@_REGISTRY.register_write(H5Group, dict, IOSpec("dict", "0.1.0"))
@_REGISTRY.register_write(ZarrGroup, OverloadedDict, IOSpec("dict", "0.1.0"))
@_REGISTRY.register_write(ZarrGroup, dict, IOSpec("dict", "0.1.0"))
def write_mapping(f, k, v, dataset_kwargs=MappingProxyType({})):
g = f.create_group(k)
for sub_k, sub_v in v.items():
write_elem(g, sub_k, sub_v, dataset_kwargs=dataset_kwargs)
##############
# np.ndarray #
##############
@_REGISTRY.register_write(H5Group, list, IOSpec("array", "0.2.0"))
@_REGISTRY.register_write(ZarrGroup, list, IOSpec("array", "0.2.0"))
def write_list(f, k, elem, dataset_kwargs=MappingProxyType({})):
write_elem(f, k, np.array(elem), dataset_kwargs=dataset_kwargs)
# TODO: Is this the right behaviour for MaskedArrays?
# It's in the `AnnData.concatenate` docstring, but should we keep it?
@_REGISTRY.register_write(H5Group, views.ArrayView, IOSpec("array", "0.2.0"))
@_REGISTRY.register_write(H5Group, np.ndarray, IOSpec("array", "0.2.0"))
@_REGISTRY.register_write(H5Group, h5py.Dataset, IOSpec("array", "0.2.0"))
@_REGISTRY.register_write(H5Group, np.ma.MaskedArray, IOSpec("array", "0.2.0"))
@_REGISTRY.register_write(ZarrGroup, views.ArrayView, IOSpec("array", "0.2.0"))
@_REGISTRY.register_write(ZarrGroup, np.ndarray, IOSpec("array", "0.2.0"))
@_REGISTRY.register_write(ZarrGroup, h5py.Dataset, IOSpec("array", "0.2.0"))
@_REGISTRY.register_write(ZarrGroup, np.ma.MaskedArray, IOSpec("array", "0.2.0"))
def write_basic(f, k, elem, dataset_kwargs=MappingProxyType({})):
"""Write methods which underlying library handles nativley."""
f.create_dataset(k, data=elem, **dataset_kwargs)
@_REGISTRY.register_read(H5Array, IOSpec("array", "0.2.0"))
@_REGISTRY.register_read(ZarrArray, IOSpec("array", "0.2.0"))
@_REGISTRY.register_read(ZarrArray, IOSpec("string-array", "0.2.0"))
def read_array(elem):
return elem[()]
@_REGISTRY.register_read_partial(H5Array, IOSpec("array", "0.2.0"))
@_REGISTRY.register_read_partial(ZarrArray, IOSpec("array", "0.2.0"))
@_REGISTRY.register_read_partial(ZarrArray, IOSpec("string-array", "0.2.0"))
def read_array_partial(elem, *, items=None, indices=(slice(None, None))):
return elem[indices]
# arrays of strings
@_REGISTRY.register_read(H5Array, IOSpec("string-array", "0.2.0"))
def read_string_array(d):
return read_array(d.asstr())
@_REGISTRY.register_read_partial(H5Array, IOSpec("string-array", "0.2.0"))
def read_array_partial(d, items=None, indices=slice(None)):
return read_array_partial(d.asstr(), items=items, indices=indices)
@_REGISTRY.register_write(
H5Group, (views.ArrayView, "U"), IOSpec("string-array", "0.2.0")
)
@_REGISTRY.register_write(
H5Group, (views.ArrayView, "O"), IOSpec("string-array", "0.2.0")
)
@_REGISTRY.register_write(H5Group, (np.ndarray, "U"), IOSpec("string-array", "0.2.0"))
@_REGISTRY.register_write(H5Group, (np.ndarray, "O"), IOSpec("string-array", "0.2.0"))
def write_vlen_string_array(f, k, elem, dataset_kwargs=MappingProxyType({})):
"""Write methods which underlying library handles nativley."""
str_dtype = h5py.special_dtype(vlen=str)
f.create_dataset(k, data=elem.astype(str_dtype), dtype=str_dtype, **dataset_kwargs)
@_REGISTRY.register_write(
ZarrGroup, (views.ArrayView, "U"), IOSpec("string-array", "0.2.0")
)
@_REGISTRY.register_write(
ZarrGroup, (views.ArrayView, "O"), IOSpec("string-array", "0.2.0")
)
@_REGISTRY.register_write(ZarrGroup, (np.ndarray, "U"), IOSpec("string-array", "0.2.0"))
@_REGISTRY.register_write(ZarrGroup, (np.ndarray, "O"), IOSpec("string-array", "0.2.0"))
def write_vlen_string_array_zarr(f, k, elem, dataset_kwargs=MappingProxyType({})):
import numcodecs
f.create_dataset(
k,
shape=elem.shape,
dtype=object,
object_codec=numcodecs.VLenUTF8(),
**dataset_kwargs,
)
f[k][:] = elem
###############
# np.recarray #
###############
def _to_hdf5_vlen_strings(value: np.ndarray) -> np.ndarray:
"""This corrects compound dtypes to work with hdf5 files."""
new_dtype = []
for dt_name, (dt_type, _) in value.dtype.fields.items():
if dt_type.kind in ("U", "O"):
new_dtype.append((dt_name, h5py.special_dtype(vlen=str)))
else:
new_dtype.append((dt_name, dt_type))
return value.astype(new_dtype)
@_REGISTRY.register_read(H5Array, IOSpec("rec-array", "0.2.0"))
@_REGISTRY.register_read(ZarrArray, IOSpec("rec-array", "0.2.0"))
def read_recarray(d):
value = d[()]
dtype = value.dtype
value = _from_fixed_length_strings(value)
if H5PY_V3:
value = _decode_structured_array(value, dtype=dtype)
return value
@_REGISTRY.register_write(H5Group, (np.ndarray, "V"), IOSpec("rec-array", "0.2.0"))
@_REGISTRY.register_write(H5Group, np.recarray, IOSpec("rec-array", "0.2.0"))
def write_recarray(f, k, elem, dataset_kwargs=MappingProxyType({})):
f.create_dataset(k, data=_to_hdf5_vlen_strings(elem), **dataset_kwargs)
@_REGISTRY.register_write(ZarrGroup, (np.ndarray, "V"), IOSpec("rec-array", "0.2.0"))
@_REGISTRY.register_write(ZarrGroup, np.recarray, IOSpec("rec-array", "0.2.0"))
def write_recarray_zarr(f, k, elem, dataset_kwargs=MappingProxyType({})):
from anndata.compat import _to_fixed_length_strings
f.create_dataset(k, data=_to_fixed_length_strings(elem), **dataset_kwargs)
#################
# Sparse arrays #
#################
def write_sparse_compressed(
f, key, value, fmt: Literal["csr", "csc"], dataset_kwargs=MappingProxyType({})
):
g = f.create_group(key)
g.attrs["shape"] = value.shape
# Allow resizing
if "maxshape" not in dataset_kwargs:
dataset_kwargs = dict(maxshape=(None,), **dataset_kwargs)
g.create_dataset("data", data=value.data, **dataset_kwargs)
g.create_dataset("indices", data=value.indices, **dataset_kwargs)
g.create_dataset("indptr", data=value.indptr, **dataset_kwargs)
write_csr = partial(write_sparse_compressed, fmt="csr")
write_csc = partial(write_sparse_compressed, fmt="csc")
_REGISTRY.register_write(H5Group, sparse.csr_matrix, IOSpec("csr_matrix", "0.1.0"))(
write_csr
)
_REGISTRY.register_write(H5Group, views.SparseCSRView, IOSpec("csr_matrix", "0.1.0"))(
write_csr
)
_REGISTRY.register_write(H5Group, sparse.csc_matrix, IOSpec("csc_matrix", "0.1.0"))(
write_csc
)
_REGISTRY.register_write(H5Group, views.SparseCSCView, IOSpec("csc_matrix", "0.1.0"))(
write_csc
)
_REGISTRY.register_write(ZarrGroup, sparse.csr_matrix, IOSpec("csr_matrix", "0.1.0"))(
write_csr
)
_REGISTRY.register_write(ZarrGroup, views.SparseCSRView, IOSpec("csr_matrix", "0.1.0"))(
write_csr
)
_REGISTRY.register_write(ZarrGroup, sparse.csc_matrix, IOSpec("csc_matrix", "0.1.0"))(
write_csc
)
_REGISTRY.register_write(ZarrGroup, views.SparseCSCView, IOSpec("csc_matrix", "0.1.0"))(
write_csc
)
@_REGISTRY.register_write(H5Group, SparseDataset, IOSpec("", "0.1.0"))
@_REGISTRY.register_write(ZarrGroup, SparseDataset, IOSpec("", "0.1.0"))
def write_sparse_dataset(f, k, elem, dataset_kwargs=MappingProxyType({})):
write_sparse_compressed(
f, k, elem.to_backed(), fmt=elem.format_str, dataset_kwargs=dataset_kwargs
)
# TODO: Cleaner way to do this
f[k].attrs["encoding-type"] = f"{elem.format_str}_matrix"
f[k].attrs["encoding-version"] = "0.1.0"
@_REGISTRY.register_read(H5Group, IOSpec("csc_matrix", "0.1.0"))
@_REGISTRY.register_read(H5Group, IOSpec("csr_matrix", "0.1.0"))
@_REGISTRY.register_read(ZarrGroup, IOSpec("csc_matrix", "0.1.0"))
@_REGISTRY.register_read(ZarrGroup, IOSpec("csr_matrix", "0.1.0"))
def read_sparse(elem):
return SparseDataset(elem).to_memory()
@_REGISTRY.register_read_partial(H5Group, IOSpec("csc_matrix", "0.1.0"))
@_REGISTRY.register_read_partial(H5Group, IOSpec("csr_matrix", "0.1.0"))
def read_sparse_partial(elem, *, items=None, indices=(slice(None), slice(None))):
return SparseDataset(elem)[indices]
##############
# DataFrames #
##############
@_REGISTRY.register_write(H5Group, views.DataFrameView, IOSpec("dataframe", "0.2.0"))
@_REGISTRY.register_write(H5Group, pd.DataFrame, IOSpec("dataframe", "0.2.0"))
@_REGISTRY.register_write(ZarrGroup, views.DataFrameView, IOSpec("dataframe", "0.2.0"))
@_REGISTRY.register_write(ZarrGroup, pd.DataFrame, IOSpec("dataframe", "0.2.0"))
def write_dataframe(f, key, df, dataset_kwargs=MappingProxyType({})):
# Check arguments
for reserved in ("_index",):
if reserved in df.columns:
raise ValueError(f"{reserved!r} is a reserved name for dataframe columns.")
group = f.create_group(key)
col_names = [check_key(c) for c in df.columns]
group.attrs["column-order"] = col_names
if df.index.name is not None:
index_name = df.index.name
else:
index_name = "_index"
group.attrs["_index"] = check_key(index_name)
# ._values is "the best" array representation. It's the true array backing the
# object, where `.values` is always a np.ndarray and .array is always a pandas
# array.
write_elem(group, index_name, df.index._values, dataset_kwargs=dataset_kwargs)
for colname, series in df.items():
# TODO: this should write the "true" representation of the series (i.e. the underlying array or ndarray depending)
write_elem(group, colname, series._values, dataset_kwargs=dataset_kwargs)
@_REGISTRY.register_read(H5Group, IOSpec("dataframe", "0.2.0"))
@_REGISTRY.register_read(ZarrGroup, IOSpec("dataframe", "0.2.0"))
def read_dataframe(elem):
columns = list(_read_attr(elem.attrs, "column-order"))
idx_key = _read_attr(elem.attrs, "_index")
df = pd.DataFrame(
{k: read_elem(elem[k]) for k in columns},
index=read_elem(elem[idx_key]),
columns=list(columns),
)
if idx_key != "_index":
df.index.name = idx_key
return df
# TODO: Figure out what indices is allowed to be at each element
@_REGISTRY.register_read_partial(H5Group, IOSpec("dataframe", "0.2.0"))
@_REGISTRY.register_read_partial(ZarrGroup, IOSpec("dataframe", "0.2.0"))
def read_dataframe_partial(
elem, *, items=None, indices=(slice(None, None), slice(None, None))
):
if items is not None:
columns = [
col for col in _read_attr(elem.attrs, "column-order") if col in items
]
else:
columns = list(_read_attr(elem.attrs, "column-order"))
idx_key = _read_attr(elem.attrs, "_index")
df = pd.DataFrame(
{k: read_elem_partial(elem[k], indices=indices[0]) for k in columns},
index=read_elem_partial(elem[idx_key], indices=indices[0]),
columns=list(columns),
)
if idx_key != "_index":
df.index.name = idx_key
return df
# Backwards compat dataframe reading
@_REGISTRY.register_read(H5Group, IOSpec("dataframe", "0.1.0"))
@_REGISTRY.register_read(ZarrGroup, IOSpec("dataframe", "0.1.0"))
def read_dataframe_0_1_0(elem):
columns = _read_attr(elem.attrs, "column-order")
idx_key = _read_attr(elem.attrs, "_index")
df = pd.DataFrame(
{k: read_series(elem[k]) for k in columns},
index=read_series(elem[idx_key]),
columns=list(columns),
)
if idx_key != "_index":
df.index.name = idx_key
return df
def read_series(dataset: h5py.Dataset) -> Union[np.ndarray, pd.Categorical]:
# For reading older dataframes
if "categories" in dataset.attrs:
if isinstance(dataset, ZarrArray):
import zarr
parent_name = dataset.name.rstrip(dataset.basename)
parent = zarr.open(dataset.store)[parent_name]
else:
parent = dataset.parent
categories_dset = parent[_read_attr(dataset.attrs, "categories")]
categories = read_elem(categories_dset)
ordered = bool(_read_attr(categories_dset.attrs, "ordered", False))
return pd.Categorical.from_codes(
read_elem(dataset), categories, ordered=ordered
)
else:
return read_elem(dataset)
@_REGISTRY.register_read_partial(H5Group, IOSpec("dataframe", "0.1.0"))
@_REGISTRY.register_read_partial(ZarrGroup, IOSpec("dataframe", "0.1.0"))
def read_partial_dataframe_0_1_0(
elem, *, items=None, indices=(slice(None), slice(None))
):
if items is None:
items = slice(None)
else:
items = list(items)
return read_elem(elem)[items].iloc[indices[0]]
###############
# Categorical #
###############
@_REGISTRY.register_write(H5Group, pd.Categorical, IOSpec("categorical", "0.2.0"))
@_REGISTRY.register_write(ZarrGroup, pd.Categorical, IOSpec("categorical", "0.2.0"))
def write_categorical(f, k, v, dataset_kwargs=MappingProxyType({})):
g = f.create_group(k)
g.attrs["ordered"] = bool(v.ordered)
write_elem(g, "codes", v.codes, dataset_kwargs=dataset_kwargs)
write_elem(g, "categories", v.categories._values, dataset_kwargs=dataset_kwargs)
@_REGISTRY.register_read(H5Group, IOSpec("categorical", "0.2.0"))
@_REGISTRY.register_read(ZarrGroup, IOSpec("categorical", "0.2.0"))
def read_categorical(elem):
return pd.Categorical.from_codes(
codes=read_elem(elem["codes"]),
categories=read_elem(elem["categories"]),
ordered=_read_attr(elem.attrs, "ordered"),
)
@_REGISTRY.register_read_partial(H5Group, IOSpec("categorical", "0.2.0"))
@_REGISTRY.register_read_partial(ZarrGroup, IOSpec("categorical", "0.2.0"))
def read_categorical(elem, *, items=None, indices=(slice(None),)):
return pd.Categorical.from_codes(
codes=read_elem_partial(elem["codes"], indices=indices),
categories=read_elem(elem["categories"]),
ordered=_read_attr(elem.attrs, "ordered"),
)
####################
# Pandas nullables #
####################
@_REGISTRY.register_write(
H5Group, pd.arrays.IntegerArray, IOSpec("nullable-integer", "0.1.0")
)
@_REGISTRY.register_write(
ZarrGroup, pd.arrays.IntegerArray, IOSpec("nullable-integer", "0.1.0")
)
@_REGISTRY.register_write(
H5Group, pd.arrays.BooleanArray, IOSpec("nullable-boolean", "0.1.0")
)
@_REGISTRY.register_write(
ZarrGroup, pd.arrays.BooleanArray, IOSpec("nullable-boolean", "0.1.0")
)
def write_nullable_integer(f, k, v, dataset_kwargs=MappingProxyType({})):
g = f.create_group(k)
if v._mask is not None:
write_elem(g, "mask", v._mask, dataset_kwargs=dataset_kwargs)
write_elem(g, "values", v._data, dataset_kwargs=dataset_kwargs)
@_REGISTRY.register_read(H5Group, IOSpec("nullable-integer", "0.1.0"))
@_REGISTRY.register_read(ZarrGroup, IOSpec("nullable-integer", "0.1.0"))
def read_nullable_integer(elem):
if "mask" in elem:
return pd.arrays.IntegerArray(
read_elem(elem["values"]), mask=read_elem(elem["mask"])
)
else:
return pd.array(read_elem(elem["values"]))
@_REGISTRY.register_read(H5Group, IOSpec("nullable-boolean", "0.1.0"))
@_REGISTRY.register_read(ZarrGroup, IOSpec("nullable-boolean", "0.1.0"))
def read_nullable_boolean(elem):
if "mask" in elem:
return pd.arrays.BooleanArray(
read_elem(elem["values"]), mask=read_elem(elem["mask"])
)
else:
return pd.array(read_elem(elem["values"]))
###########
# Scalars #
###########
@_REGISTRY.register_read(H5Array, IOSpec("numeric-scalar", "0.2.0"))
@_REGISTRY.register_read(ZarrArray, IOSpec("numeric-scalar", "0.2.0"))
def read_scalar(elem):
return elem[()]
def write_scalar(f, key, value, dataset_kwargs=MappingProxyType({})):
return f.create_dataset(key, data=np.array(value), **dataset_kwargs)
def write_hdf5_scalar(f, key, value, dataset_kwargs=MappingProxyType({})):
# Can’t compress scalars, error is thrown
dataset_kwargs = dataset_kwargs.copy()
dataset_kwargs.pop("compression", None)
dataset_kwargs.pop("compression_opts", None)
f.create_dataset(key, data=np.array(value), **dataset_kwargs)
# fmt: off
for numeric_scalar_type in [
bool, np.bool_,
np.uint8, np.uint16, np.uint32, np.uint64,
int, np.int8, np.int16, np.int32, np.int64,
float, *np.floating.__subclasses__(),
*np.complexfloating.__subclasses__(),
]:
_REGISTRY.register_write(H5Group, numeric_scalar_type, IOSpec("numeric-scalar", "0.2.0"))(write_hdf5_scalar)
_REGISTRY.register_write(ZarrGroup, numeric_scalar_type, IOSpec("numeric-scalar", "0.2.0"))(write_scalar)
# fmt: on
_REGISTRY.register_write(ZarrGroup, str, IOSpec("string", "0.2.0"))(write_scalar)
_REGISTRY.register_write(ZarrGroup, np.str_, IOSpec("string", "0.2.0"))(write_scalar)
@_REGISTRY.register_read(H5Array, IOSpec("string", "0.2.0"))
def read_hdf5_string(elem):
return elem.asstr()[()]
@_REGISTRY.register_read(ZarrArray, IOSpec("string", "0.2.0"))
def read_zarr_string(elem):
return str(elem[()])
_REGISTRY.register_read(H5Array, IOSpec("bytes", "0.2.0"))(read_scalar)
_REGISTRY.register_read(ZarrArray, IOSpec("bytes", "0.2.0"))(read_scalar)
@_REGISTRY.register_write(H5Group, np.str_, IOSpec("string", "0.2.0"))
@_REGISTRY.register_write(H5Group, str, IOSpec("string", "0.2.0"))
def write_string(f, k, v, dataset_kwargs):
dataset_kwargs = dataset_kwargs.copy()
dataset_kwargs.pop("compression", None)
dataset_kwargs.pop("compression_opts", None)
f.create_dataset(
k, data=np.array(v, dtype=h5py.string_dtype(encoding="utf-8")), **dataset_kwargs
)
# @_REGISTRY.register_write(np.bytes_, IOSpec("bytes", "0.2.0"))
# @_REGISTRY.register_write(bytes, IOSpec("bytes", "0.2.0"))
# def write_string(f, k, v, dataset_kwargs):
# if "compression" in dataset_kwargs:
# dataset_kwargs = dict(dataset_kwargs)
# dataset_kwargs.pop("compression")
# f.create_dataset(k, data=np.array(v), **dataset_kwargs)
```
#### File: anndata/tests/test_io_warnings.py
```python
from importlib.util import find_spec
from pathlib import Path
import warnings
import pytest
import anndata as ad
from anndata.tests.helpers import gen_adata
@pytest.mark.skipif(not find_spec("scanpy"), reason="Scanpy is not installed")
def test_old_format_warning_thrown():
import scanpy as sc
with pytest.warns(ad._warnings.OldFormatWarning):
pth = Path(sc.datasets.__file__).parent / "10x_pbmc68k_reduced.h5ad"
ad.read_h5ad(pth)
def test_old_format_warning_not_thrown(tmp_path):
pth = tmp_path / "current.h5ad"
adata = gen_adata((20, 10))
adata.write_h5ad(pth)
with warnings.catch_warnings(record=True) as record:
warnings.simplefilter("always", ad._warnings.OldFormatWarning)
ad.read_h5ad(pth)
if len(record) != 0:
msg_content = "\n".join(
[f"\t{w.category.__name__}('{w.message}')" for w in record]
)
pytest.fail(
f"Warnings were thrown when they shouldn't be. Got:\n\n{msg_content}"
)
``` |
{
"source": "JoshuaGRubin/AI-Tweet-Validator",
"score": 4
} |
#### File: tweetvalidator/data_processing/filter_tweets.py
```python
import os
import re
import json
CONFIG_PATH = "../../config.json"
def filter_tweets_from_files(input_file_path, output_file_path,
filters, min_tweet_characters):
""" Reads tweets from a json file of [[tweet, date],...], removes
patters matching the regexps in filters, and if more than
<min_tweet_characters> are left, writes to the output location.
Args:
input_file_path (str): where to look for user file
output_file_path (str): where to deposit filtered file
filters (list): List of reg. expressions whose matches will be removed.
min_tweet_characters (int): Tweets with fewer characters will be
deleted.
"""
output_tweets = []
print('Processing ' + input_file_path + '.', end=' ')
with open(input_file_path, 'r') as file:
in_data = json.loads(file.read())
for tweet in in_data:
filtered_tweet = tweet[0]
# Remove text matching each of the filters sequentially.
for reFilter in filters:
filtered_tweet = re.sub(reFilter,'', filtered_tweet)
# Trim whitespace
filtered_tweet = filtered_tweet.strip()
# Pitch if below min character threshold
if len(filtered_tweet) >= min_tweet_characters:
output_tweets.append([filtered_tweet, tweet[1]])
print(str(len(output_tweets)), 'tweets processed.')
with open(output_file_path, 'w') as file:
json.dump(output_tweets, file)
def filter_tweets_from_directories(input_directory_path, output_directory_path,
filters, min_tweet_characters):
"""Reads raw twitter output and filters based on a list of reg expressions.
Files are json formatted lists of [tweet, date].
Args:
input_directory_path (str): where to look for files starting with @
containing raw tweets
output_directory_path (str): where to deposit identically named files
with filters applied.
filters (list): List of reg. expressions whose matches will be removed.
min_tweet_characters (int): Tweets with fewer characters will be
deleted.
"""
# Look for files beginning with @, i.e. twitter handles
tweet_files = [f for f in os.listdir(input_directory_path) if f[0]=='@']
for file_name in tweet_files:
input_file_path = os.path.join(input_directory_path, file_name)
output_file_path = os.path.join(output_directory_path, file_name)
filter_tweets_from_files(input_file_path, output_file_path,
filters, min_tweet_characters)
```
#### File: tweetvalidator/models/base_model.py
```python
class Model:
""" Base class for binary classifier to identify fraudulent tweet.
"""
def set_hyperparameters(self, params = {}):
""" Set model hyperparameters e.g. classifier threshold.
Args:
params (dict): Inference-time settings (e.g. cos-sim threshold)"""
self.params = params
``` |
{
"source": "joshuagruenstein/lhyra",
"score": 3
} |
#### File: lhyra/python/closestpoints.py
```python
from random import random, shuffle, randint
import math
from time import time, sleep
from tqdm import tqdm
import matplotlib.pyplot as plt
import numpy as np
from lhyra import Solver, FeatureExtractor
def random_points(length: int=100):
return [(random(), random()) for _ in range(length)]
class PointsFeatureExtractor(FeatureExtractor):
@property
def shape(self):
"""
Get the output shape of the feature extractor.
:return: A list of integers representing the output's dimensions.
"""
return [1] # Length, (mean, variance)?
def __call__(self, data):
"""
Call extractor on given data.
:param data: A piece of data to extract the parameters of.
:return: Floats between 0 and 1 of shape self.shape.
"""
return [len(data)]
def point_dist(p1, p2):
return math.sqrt((p1[0] - p2[0])**2 + (p1[1] - p2[1])**2)
def points_brute(points, hook, _={}):
min_dist = float('inf')
closest_pair = None
for i in range(len(points)):
for j in range(i+1, len(points)):
p, q = points[i], points[j]
if point_dist(p,q) < min_dist:
min_dist = point_dist(p,q)
closest_pair = (p,q)
return closest_pair,0
def points_smart(points, hook, _={}):
#sleep(0.00001)
if len(points) <= 3:
return points_brute(points,None)
psorted = sorted(points, key=lambda p:p[0])
right, right_overhead = hook(psorted[:len(psorted)//2])
left, left_overhead = hook(psorted[len(psorted)//2:])
mid = psorted[len(psorted)//2][0]
dleft, dright = point_dist(*left), point_dist(*right)
d = min(dleft, dright)
strip = []
for x,y in points:
if point_dist((x,y),(mid,y)) <= d:
strip.append((x,y))
strip = sorted(strip, key=lambda p:p[1])
min_dist = d
closest_pair = None
for i, point in enumerate(strip):
for j in range(i+1, min(len(strip),i+8)):
if point_dist(point,strip[j]) < min_dist:
min_dist = point_dist(point,strip[j])
closest_pair = (point,strip[j])
if closest_pair is None:
return (left if dleft < dright else right), right_overhead + left_overhead
return closest_pair, right_overhead + left_overhead
# def timer(func, iters=10):
# start = time()
# for _ in range(iters):
# func()
# return (time() - start)/iters
# dataset = [[(random(), random()) for _ in range(2**i)] for i in range(1,10)]
# dumb = [timer(lambda:point_dist(*points_brute(p))) for p in tqdm(dataset)]
# smart = [timer(lambda:point_dist(*points_smart(p))) for p in tqdm(dataset)]
# plt.plot(np.log(dumb), label="dumb")
# plt.plot(np.log(smart), label="smart!")
# plt.legend()
# plt.show()
```
#### File: lhyra/python/policy.py
```python
import numpy as np
from tqdm import tqdm
from typing import Any, List
from lhyra import Optimizer, Lhyra, Solver
import matplotlib.pyplot as plt
from random import random
from functools import reduce
from copy import deepcopy
from math import exp
"""
class PolicyLinearOptimizer(Optimizer):
def __init__(self, lhyra: Lhyra, gamma: float=0.99):
""
Initialize an optimizer.
:param lhyra: A Lhyra instance to optimize.
:param gamma: Discount factor (default 0.99)
""
super().__init__(lhyra)
self.gamma = gamma
self.policy = torch.nn.Sequential(
torch.nn.Linear(lhyra.extractor.shape[0], len(lhyra.solvers)),
torch.nn.Softmax(dim=0)
)
for name, param in self.policy.named_parameters():
if param.requires_grad:
print(name, param.data)
self.opt = torch.optim.Adam(self.policy.parameters(), lr=1e-2)
self.eps = np.finfo(np.float32).eps.item()
self.saved_log_probs = []
def train(self, iters: int=1000, plot=False):
""
Train the classifier on the data, given a hook into
the Lhyra object's eval method.
:param iters: Number of training iterations to run.
:param plot: Show a plot.
""
data = self.lhyra.data_store.get_data(iters)
totals = []
for episode, datum in enumerate(tqdm(data)):
self.lhyra.clear()
self.lhyra.eval(datum)
totals.append(self.lhyra.times[0])
rewards = [-t for t in self.lhyra.times]
returns = []
policy_loss = []
R = 0
for r in rewards:
R = r + self.gamma * R
returns.insert(0,R)
returns = torch.tensor(returns)
rns = (returns - returns.mean()) / (returns.std() + self.eps)
for log_prob, R in zip(self.saved_log_probs, returns):
policy_loss.append(log_prob * R)
self.opt.zero_grad()
policy_loss = torch.cat(policy_loss).sum()
policy_loss.backward()
self.opt.step()
self.saved_log_probs.clear()
vals = [
sum(totals[i:i + iters//100])/100
for i in range(0,iters,iters//100)
]
plt.plot(vals)
plt.show()
def solver(self, features: List) -> Solver:
""
Pick and parametrize a solver from the bag of solvers.
:param features: Features provided to inform solver choice.
:return: The Solver best suited given the features provided.
""
state = torch.FloatTensor(features).unsqueeze(0)
probs = self.policy(state)
m = torch.distributions.Categorical(probs)
action = m.sample()
self.saved_log_probs.append(m.log_prob(action))
if self.lhyra.vocal:
print(self.lhyra.solvers[action.item()])
return self.lhyra.solvers[action.item()].parametrized({})
"""
class SAOptimizer(Optimizer):
def __init__(self, lhyra: Lhyra, agg: float=2):
"""
Initialize an optimizer.
:param lhyra: A Lhyra instance to optimize.
:param gamma: Discount factor (default 0.99)
"""
super().__init__(lhyra)
self.lhyra = lhyra
self.policy = [[random() for t in range(2**lhyra.extractor.shape[0])] for s in lhyra.solvers] # Assuming 1D feature vect
self.agg = agg # Aggressiveness of training parameters
def train(self, iters: int=1000, sample: int=100, plot=False):
"""
Train the classifier on the data, given a hook into
the Lhyra object's eval method.
:param iters: Number of training iterations to run.
:param plot: Show a plot.
"""
data = self.lhyra.data_store.get_data(sample)
self.prev_time = 999999999
times = []
for i in range(iters):
temperature = min(exp(-4*i/iters), .5)
time = 0
for episode, datum in enumerate(tqdm(data)):
self.lhyra.clear()
self.lhyra.eval(datum)
time += self.lhyra.times[0] # Get the total time
times.append(time)
if time < self.prev_time:# or random() < temperature:
self.prev_policy = deepcopy(self.policy)
self.prev_time = time
# Perturb. Decrease perturb sizes over time.
self.policy = [[t+(self.agg*random()-self.agg)*(1-i/iters) for t in s] for s in self.prev_policy]
self.policy = self.prev_policy # Don't use the last one, use the second-to-last
print(self.policy)
plt.plot(range(iters), times)
plt.show()
def solver(self, features: List) -> Solver:
"""
Pick and parametrize a solver from the bag of solvers.
:param features: Features provided to inform solver choice.
:return: The Solver best suited given the features provided.
"""
"""
if self.lhyra.vocal:
for i in range(2**len(features)):
print([features[n] for n in range(len(features)) if i&(2**n)], self.policy[0][i])
"""
ans = max(list(range(len(self.policy))), key=lambda s: sum([reduce(lambda x, y: x*y,
[features[n] for n in range(len(features)) if i&(2**n)]+[1])*t for i, t in enumerate(self.policy[s])]))
if self.lhyra.vocal:
print(self.lhyra.solvers[ans])
return self.lhyra.solvers[ans].parametrized({})
```
#### File: lhyra/python/test_plo.py
```python
from sorting import SortFeatureExtractor, merge_sort, radix_sort, insertion_sort, quick_sort, random_list
from qlearn import ValueOptimizer
from lhyra import Lhyra, Solver, DataGenerator
from time import time
data = DataGenerator(lambda: sorted(random_list()))
solvers = [
Solver(merge_sort, []),
Solver(insertion_sort, []),
Solver(quick_sort, [])
]
lhyra = Lhyra(solvers, data, SortFeatureExtractor(), ValueOptimizer)
lhyra.train(iters=1000)
def bench():
ex = sorted(random_list())
start = time()
py = sorted(ex)
print("Py time:", time()-start)
start = time()
lh = lhyra.eval(ex, vocal=True)
print("Lhyra time:", time()-start)
start = time()
merge_hook = lambda t: merge_sort(t, merge_hook, None)
merge_sort(ex, merge_hook, None)
print("Merge time:", time()-start)
start = time()
insert_hook = lambda t: insertion_sort(t, insert_hook, None)
insertion_sort(ex, insert_hook, None)
print("Insertion time:", time()-start)
start = time()
quick_hook = lambda t: quick_sort(t, quick_hook, None)
quick_sort(ex, quick_hook, None)
print("Quick time:", time()-start)
start = time()
radix_hook = lambda t: radix_sort(t, radix_hook, None)
radix_sort(ex, radix_hook, None)
print("Radix time:", time()-start)
bench()
``` |
{
"source": "joshuagryphon/plastid",
"score": 3
} |
#### File: plastid/bin/counts_in_region.py
```python
import argparse
import inspect
import sys
import itertools
import warnings
import numpy
from plastid.genomics.roitools import SegmentChain
from plastid.util.io.filters import NameDateWriter
from plastid.util.io.openers import argsopener, get_short_name
from plastid.util.scriptlib.argparsers import (
AnnotationParser,
AlignmentParser,
MaskParser,
BaseParser,
)
from plastid.util.scriptlib.help_formatters import format_module_docstring
warnings.simplefilter("once")
printer = NameDateWriter(get_short_name(inspect.stack()[-1][1]))
_DISABLED = ["normalize"]
def main(argv=sys.argv[1:]):
"""Command-line program
Parameters
----------
argv : list, optional
A list of command-line arguments, which will be processed
as if the script were called from the command line if
:func:`main` is called directly.
Default: `sys.argv[1:]`. The command-line arguments, if the script is
invoked from the command line
"""
ap = AnnotationParser()
annotation_file_parser = ap.get_parser(conflict_handler="resolve")
al = AlignmentParser(disabled=_DISABLED)
alignment_file_parser = al.get_parser(conflict_handler="resolve")
mp = MaskParser()
mask_file_parser = mp.get_parser()
bp = BaseParser()
base_parser = bp.get_parser()
parser = argparse.ArgumentParser(
description = format_module_docstring(__doc__),
formatter_class = argparse.RawDescriptionHelpFormatter,
parents = [
base_parser,
alignment_file_parser,
annotation_file_parser,
mask_file_parser,
],
) # yapf: disable
parser.add_argument("outfile", type=str, help="Output filename")
args = parser.parse_args(argv)
bp.get_base_ops_from_args(args)
ga = al.get_genome_array_from_args(args, printer=printer)
transcripts = ap.get_transcripts_from_args(args, printer=printer, return_type=SegmentChain)
crossmap = mp.get_genome_hash_from_args(args, printer=printer)
ga_sum = ga.sum()
normconst = 1000.0 * 1e6 / ga_sum
with argsopener(args.outfile, args, "w") as fout:
fout.write("## total_dataset_counts: %s\n" % ga_sum)
fout.write("region_name\tregion\tcounts\tcounts_per_nucleotide\trpkm\tlength\n")
for n, ivc in enumerate(transcripts):
name = ivc.get_name()
masks = crossmap.get_overlapping_features(ivc)
ivc.add_masks(*itertools.chain.from_iterable((X for X in masks)))
if n % 1000 == 0:
printer.write("Processed %s regions..." % n)
counts = numpy.nansum(ivc.get_masked_counts(ga))
length = ivc.masked_length
rpnt = numpy.nan if length == 0 else float(counts) / length
rpkm = numpy.nan if length == 0 else rpnt * normconst
ltmp = [name, str(ivc), "%.8e" % counts, "%.8e" % rpnt, "%.8e" % rpkm, "%d" % length]
fout.write("%s\n" % "\t".join(ltmp))
fout.close()
printer.write("Processed %s regions total." % n)
printer.write("Done.")
if __name__ == '__main__':
main()
```
#### File: plastid/bin/make_wiggle.py
```python
__author__ = "joshua"
__date__ = "2011-03-18"
import warnings
import inspect
import sys
import argparse
from plastid.util.scriptlib.argparsers import AlignmentParser, BaseParser
from plastid.util.io.filters import NameDateWriter
from plastid.util.io.openers import get_short_name, argsopener
from plastid.plotting.colors import get_rgb255
from plastid.util.scriptlib.help_formatters import format_module_docstring
warnings.simplefilter("once")
printer = NameDateWriter(get_short_name(inspect.stack()[-1][1]))
def main(argv=sys.argv[1:]):
"""Command-line program
Parameters
----------
argv : list, optional
A list of command-line arguments, which will be processed
as if the script were called from the command line if
:py:func:`main` is called directly.
Default: sys.argv[1:] (actually command-line arguments)
"""
ap = AlignmentParser()
bp = BaseParser()
parser = argparse.ArgumentParser(
description=format_module_docstring(__doc__),
formatter_class=argparse.RawDescriptionHelpFormatter,
parents=[bp.get_parser(), ap.get_parser()]
)
parser.add_argument(
"-o",
"--out",
dest="outbase",
type=str,
required=True,
metavar="FILENAME",
help="Base name for output files"
)
parser.add_argument("--window_size",default=100000,metavar="N",type=int,
help="Size of nucleotides to fetch at once for export. "+\
"Large values are faster but require more memory "+\
"(Default: 100000)")
track_opts = parser.add_argument_group(title="Browser track options")
track_opts.add_argument(
"--color",
type=str,
default=None,
help="An RGB hex string (`'#NNNNNN'`, `N` in `[0-9,A-F]`) specifying \
the track color."
)
track_opts.add_argument(
"-t",
"--track_name",
dest="track_name",
type=str,
help="Name to give browser track",
default=None
)
track_opts.add_argument(
"--output_format",
choices=("bedgraph", "variable_step"),
default="bedgraph",
help="Format of output file (Default: bedgraph)"
)
args = parser.parse_args(argv)
gnd = ap.get_genome_array_from_args(args, printer=printer)
bp.get_base_ops_from_args(args)
if args.track_name is None:
name = args.outbase
else:
name = args.track_name
if args.color is not None:
fw_color = rc_color = "%s,%s,%s" % tuple(get_rgb255(args.color))
else:
fw_color = rc_color = "0,0,0"
if args.output_format == "bedgraph":
outfn = gnd.to_bedgraph
elif args.output_format == "variable_step":
outfn = gnd.to_variable_step
track_fw = "%s_fw.wig" % args.outbase
track_rc = "%s_rc.wig" % args.outbase
with argsopener(track_fw, args, "w") as fw_out:
printer.write("Writing forward strand track to %s ..." % track_fw)
outfn(
fw_out,
"%s_fw" % name,
"+",
window_size=args.window_size,
color=fw_color,
printer=printer
)
fw_out.close()
with argsopener(track_rc, args, "w") as rc_out:
printer.write("Writing reverse strand track to %s ..." % track_rc)
outfn(
rc_out,
"%s_rc" % name,
"-",
window_size=args.window_size,
color=rc_color,
printer=printer
)
rc_out.close()
printer.write("Done!")
if __name__ == "__main__":
main()
```
#### File: plastid/bin/phase_by_size.py
```python
import sys
import argparse
import inspect
import warnings
import pandas as pd
import numpy
import matplotlib
matplotlib.use("Agg")
from plastid.util.scriptlib.argparsers import (
AlignmentParser,
AnnotationParser,
PlottingParser,
BaseParser,
)
from plastid.util.io.openers import get_short_name, argsopener, read_pl_table
from plastid.util.io.filters import NameDateWriter
from plastid.util.scriptlib.help_formatters import format_module_docstring
from plastid.util.services.exceptions import DataWarning, ArgumentWarning
from plastid.plotting.plots import phase_plot
from plastid.genomics.roitools import SegmentChain
warnings.simplefilter("once")
printer = NameDateWriter(get_short_name(inspect.stack()[-1][1]))
def roi_row_to_cds(row):
"""Helper function to extract coding portions from maximal spanning windows
flanking CDS starts that are created by |metagene| ``generate`` subprogram.
Parameters
----------
row : (int, Series)
Row from a :class:`pandas.DataFrame` of an ROI file made by the |metagene|
``generate`` subprogram
Returns
-------
|SegmentChain|
Coding portion of maximal spanning window
"""
chainstr, alignment_offset, zero_point = row[1][["region", "alignment_offset", "zero_point"]]
chain = SegmentChain.from_str(chainstr)
cds_start = zero_point - alignment_offset
subchain = chain.get_subchain(cds_start, chain.length)
return subchain
def main(argv=sys.argv[1:]):
"""Command-line program
Parameters
----------
argv : list, optional
A list of command-line arguments, which will be processed
as if the script were called from the command line if
:py:func:`main` is called directly.
Default: `sys.argv[1:]`. The command-line arguments, if the script is
invoked from the command line
"""
al = AlignmentParser(
disabled=["normalize", "big_genome", "spliced_bowtie_files"],
input_choices=["BAM"],
)
an = AnnotationParser()
pp = PlottingParser()
bp = BaseParser()
plotting_parser = pp.get_parser()
alignment_file_parser = al.get_parser(conflict_handler="resolve")
annotation_file_parser = an.get_parser(conflict_handler="resolve")
base_parser = bp.get_parser()
parser = argparse.ArgumentParser(
description=format_module_docstring(__doc__),
formatter_class=argparse.RawDescriptionHelpFormatter,
conflict_handler="resolve",
parents=[base_parser, annotation_file_parser, alignment_file_parser, plotting_parser]
)
parser.add_argument("roi_file",type=str,nargs="?",default=None,
help="Optional. ROI file of maximal spanning windows surrounding start codons, "+\
"from ``metagene generate`` subprogram. Using this instead of `--annotation_files` "+\
"prevents double-counting of codons when multiple transcript isoforms exist "+\
"for a gene. See the documentation for `metagene` for more info about ROI files."+\
"If an ROI file is not given, supply an annotation with ``--annotation_files``")
parser.add_argument("outbase", type=str, help="Required. Basename for output files")
parser.add_argument(
"--codon_buffer",
type=int,
default=5,
help="Codons before and after start codon to ignore (Default: 5)"
)
args = parser.parse_args(argv)
bp.get_base_ops_from_args(args)
pp.set_style_from_args(args)
gnd = al.get_genome_array_from_args(args, printer=printer)
read_lengths = list(range(args.min_length, args.max_length + 1))
codon_buffer = args.codon_buffer
dtmp = {
"read_length": numpy.array(read_lengths),
"reads_counted": numpy.zeros_like(read_lengths, dtype=int),
}
if args.roi_file is not None:
using_roi = True
roi_table = read_pl_table(args.roi_file)
regions = roi_table.iterrows()
transform_fn = roi_row_to_cds
back_buffer = -1
if len(args.annotation_files) > 0:
warnings.warn(
"If an ROI file is given, annotation files are ignored. Pulling regions from '%s'. Ignoring '%s'"
% (args.roi_file, ", ".join(args.annotation_files)), ArgumentWarning
)
else:
using_roi = False
if len(args.annotation_files) == 0:
printer.write("Either an ROI file or at least annotation file must be given.")
sys.exit(1)
else:
warnings.warn(
"Using a transcript annotation file instead of an ROI file can lead to double-counting of codons if the annotation contains multiple transcripts per gene.",
ArgumentWarning
)
regions = an.get_transcripts_from_args(args, printer=printer)
back_buffer = -codon_buffer
transform_fn = lambda x: x.get_cds()
phase_sums = {}
for k in read_lengths:
phase_sums[k] = numpy.zeros(3)
for n, roi in enumerate(regions):
if n % 1000 == 1:
printer.write("Counted %s ROIs ..." % n)
# transformation needed to extract CDS from transcript or from ROI file window
cds_part = transform_fn(roi)
# only calculate for coding genes
if len(cds_part) > 0:
read_dict = {}
count_vectors = {}
for k in read_lengths:
read_dict[k] = []
count_vectors[k] = []
# for each seg, fetch reads, sort them, and create individual count vectors
for seg in cds_part:
reads = gnd.get_reads(seg)
for read in filter(lambda x: len(x.positions) in read_dict, reads):
read_dict[len(read.positions)].append(read)
# map and sort by length
for read_length in read_dict:
count_vector = list(gnd.map_fn(read_dict[read_length], seg)[1])
count_vectors[read_length].extend(count_vector)
# add each count vector for each length to total
for k, vec in count_vectors.items():
counts = numpy.array(vec)
if cds_part.strand == "-":
counts = counts[::-1]
if len(counts) % 3 == 0:
counts = counts.reshape((int(len(counts) / 3), 3))
else:
if using_roi == False:
message = "Length of '%s' coding region (%s nt) is not divisible by 3. Ignoring last partial codon." % (
roi.get_name(), len(counts)
)
warnings.warn(message, DataWarning)
newlen = int(len(counts) // 3)
counts = counts[:3 * newlen]
counts = counts.reshape(newlen, 3)
phase_sums[k] += counts[codon_buffer:back_buffer, :].sum(0)
printer.write("Counted %s ROIs total." % (n + 1))
for k in dtmp:
dtmp[k] = numpy.array(dtmp[k])
# total reads counted for each size
for k in read_lengths:
dtmp["reads_counted"][dtmp["read_length"] == k] = phase_sums[k].sum()
# read length distribution
dtmp["fraction_reads_counted"
] = dtmp["reads_counted"].astype(float) / dtmp["reads_counted"].sum()
# phase vectors
phase_vectors = {K: V.astype(float) / V.astype(float).sum() for K, V in phase_sums.items()}
for i in range(3):
dtmp["phase%s" % i] = numpy.zeros(len(dtmp["read_length"]))
for k, vec in phase_vectors.items():
for i in range(3):
dtmp["phase%s" % i][dtmp["read_length"] == k] = vec[i]
# phase table
fn = "%s_phasing.txt" % args.outbase
printer.write("Saving phasing table to %s ..." % fn)
dtmp = pd.DataFrame(dtmp)
with argsopener(fn, args) as fh:
dtmp.to_csv(
fh,
columns=[
"read_length",
"reads_counted",
"fraction_reads_counted",
"phase0",
"phase1",
"phase2",
],
float_format="%.6f",
na_rep="nan",
sep="\t",
index=False,
header=True
)
fh.close()
fig = {}
if args.figsize is not None:
fig["figsize"] = tuple(args.figsize)
colors = pp.get_colors_from_args(args, len(read_lengths))
fn = "%s_phasing.%s" % (args.outbase, args.figformat)
printer.write("Plotting to %s ..." % fn)
plot_counts = numpy.vstack([V for (_, V) in sorted(phase_sums.items())])
fig, (ax1,_) = phase_plot(plot_counts,labels=read_lengths,lighten_by=0.3,
cmap=None,color=colors,fig=fig)
if args.title is not None:
ax1.set_title(args.title)
else:
ax1.set_title("Phasing stats for %s" % args.outbase)
fig.savefig(fn, dpi=args.dpi, bbox_inches="tight")
if __name__ == "__main__":
main()
```
#### File: plastid/genomics/seqtools.py
```python
import random, re
from Bio.Alphabet import generic_dna
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from twobitreader import TwoBitFile
IUPAC_TABLE = {
"A": "A",
"C": "C",
"T": ("T", "U"),
"U": ("T", "U"),
"G": "G",
"N": ("A", "C", "T", "G", "U"),
# puRines
"R": ("A", "G"),
# pYrimidines
"Y": ("C", "T", "U"),
# Strong binding
"S": ("G", "C"),
# Weak binding
"W": ("A", "T", "U"),
# Keto-functionalized
"K": ("G", "T", "U"),
# aMino-functionalized
"M": ("A", "C"),
# B = any but A
"B": ("C", "G", "T", "U"),
# C = any but C
"D": ("A", "G", "T", "U"),
# H = any but G
"H": ("A", "C", "T", "U"),
# V = any but T or U
"V": ("A", "C", "G"),
}
"""Dictionary mapping IUPAC nucleotide symbols to tuples of nucleotides
they represent (e.g. R -> (A, G) )
"""
def seq_to_regex(inp, flags=0):
"""Convert a nucleotide sequence of IUPAC nucleotide characters as a regular expression.
Ambiguous IUPAC characters are converted to groups (e.g. `'Y'` to `'[CTU]'`),
and T and U are considered equivalent.
Parameters
----------
inp : str
Nucleotide sequence using IUPAC nucleotide codes
flags : int, optional
Flags to pass to :py:func:`re.compile` (Default: 0 / no flags)
Examples
--------
Convert a sequence to a regex::
>>> seq_to_regex("CARYYA").pattern
'CA[AG][CTU][CTU]A'
Returns
-------
:py:class:`re.RegexObject`
Regular expression pattern corresponding to IUPAC sequence in `inp`
"""
out = []
for ch in inp:
if len(IUPAC_TABLE.get(ch, ch)) == 1:
out.append(ch)
else:
out.append("[" + "".join(IUPAC_TABLE.get(ch, ch)) + "]")
return re.compile("".join(out), flags=flags)
def mutate_seqs(seqs, nucleotides="NACTG", mutations=1):
"""Generate all sequences within `mutations` distance from a reference sequence
Parameters
----------
seqs : str or list of str
Single reference sequence (a string) or a group of strings
nucleotides : list of char, optional
Permitted nucleotide substitutions (Default: `'NACTG'`)
mutations : int, optional
Number of substitutions to make (Default: `1`)
Returns
-------
set
all sequences within `mutations` substitutions from the sequence(s)
specified in `seqs`
"""
if isinstance(seqs, str):
seqs = [seqs]
if mutations == 0:
return set(seqs)
else:
seqsout = []
for seq in seqs:
for nuc in nucleotides:
for i in range(len(seq)):
newseq = list(seq)[:]
newseq[i] = nuc
seqsout.append("".join(newseq))
seqsout.extend(mutate_seqs(seqsout, nucleotides=nucleotides, mutations=mutations - 1))
return set(seqsout) | set(seqs)
def random_seq(size, nucleotides="ACTG"):
"""Generate a random nucleotide sequence of length `size` and composition `nucleotides`
Parameters
----------
size : int
length of desired sequence
nucleotides : str, optional
string of nucleotides to use in sequence, in desired base composition
(i.e. need not be unique; can supply `'AATCG'` to increase `'A'` bias.
Default: `'ACTG'`)
Returns
-------
str : randomized DNA sequence
"""
seq = "".join([nucleotides[random.randrange(0, len(nucleotides))] for _ in range(0, size)])
return seq
def revive(twobitreader, seqname):
_TwoBitSeqProxy(twobitreader[seqname], twobitreader)
class _TwoBitSeqProxy(object):
"""Adaptor class that fetches :class:`Bio.SeqRecord.SeqRecord`
objects from :class:`twobitreader.TwoBitSequence` objects
Defines `seq` property and :meth:`~_TwoBitSeqProxy.reverse_complement` method
"""
def __init__(self, twobitfile, key):
"""
Parameters
----------
twobitseq : :class:`twobitfile.TwoBitSequence`
"""
self.name = key
self._twobitfile = twobitfile
self.twobitseq = twobitfile[key]
self._seq = None
def __reduce__(self):
return (_TwoBitSeqProxy, (self._twobitfile, self.name))
def __getitem__(self, slice_):
return SeqRecord(
Seq(self.twobitseq.get_slice(min_=slice_.start, max_=slice_.stop), generic_dna)
)
def __len__(self):
return len(self.twobitseq)
def __str__(self):
"""Return sequence in `self.twobitseq` as str"""
return str(self.twobitseq)
def __getattr__(self, attr):
if attr == "seq":
if self._seq is None:
self._seq = Seq(str(self.twobitseq), generic_dna)
return self._seq
def reverse_complement(self):
"""Return the reverse complement of the TwoBitSequence
Returns
-------
:class:`Bio.SeqRecord.SeqRecord`
Reverse complement of the sequence held in `self.twobitseq`
"""
return SeqRecord(self.seq).reverse_complement()
class TwoBitSeqRecordAdaptor(object):
"""Adaptor class that makes a :class:`twobitreader.TwoBitFile` behave
like a dictionary of :class:`Bio.SeqRecord.SeqRecord` objects.
"""
def __init__(self, fh):
self.twobitfile = TwoBitFile(fh)
self._filename = fh
self._chroms = {K: _TwoBitSeqProxy(self.twobitfile, K) for K in self.twobitfile}
def __reduce__(self):
return (TwoBitSeqRecordAdaptor, (self._filename, ))
def __getitem__(self, key):
return self._chroms[key]
def __getattr__(self, attr):
try:
return getattr(self._chroms, attr)
except AttributeError:
return getattr(self.twobitfile, attr)
def __iter__(self):
return iter(self._chroms)
def __len__(self):
return len(self._chroms)
```
#### File: plastid/readers/bed.py
```python
__date__ = "Aug 23, 2011"
__author__ = "joshua"
import shlex
from plastid.readers.common import AssembledFeatureReader
from plastid.util.services.exceptions import FileFormatWarning, warn
bed_x_formats = {
"bedDetail": [
("ID", str),
("description", str),
],
"narrowPeak": [("signalValue", float),
("pValue", float),
("qValue", float),
("peak", int)],
"broadPeak": [("signalValue", float),
("pValue", float),
("qValue", float)],
"gappedPeak": [("signalValue", float),
("pValue", float),
("qValue", float)],
"tagAlign": [("sequence", str),
("score", float),
("strand", str)],
"pairedTagAlign": [("seq1", str),
("seq2", str)],
"peptideMapping":
[("rawScore", float),
("spectrumId", str),
("peptideRank", int),
("peptideRepeatCount", int)],
}
"""Column names and types for various :term:`extended BED` formats used by
the `ENCODE`_ project. These can be passed to the `extra_columns` keyword of
:class:`BED_Reader`."""
class BED_Reader(AssembledFeatureReader):
"""
BED_Reader(*streams, return_type=SegmentChain, add_three_for_stop=False, extra_columns=0, printer=None, tabix=False)
Reads `BED`_ and :term:`extended BED` files line-by-line into |SegmentChains|
or |Transcripts|. Metadata, if present in a track declaration, is saved
in `self.metadata`. Malformed lines are stored in `self.rejected`, while
parsing continues.
Parameters
----------
*streams : file-like
One or more open filehandles of input data.
return_type : |SegmentChain| or subclass, optional
Type of feature to return from assembled subfeatures (Default: |SegmentChain|)
add_three_for_stop : bool, optional
Some annotation files exclude the stop codon from CDS annotations. If set to
`True`, three nucleotides will be added to the threeprime end of each
CDS annotation, **UNLESS** the annotated transcript contains explicit stop_codon
feature. (Default: `False`)
extra_columns: int or list optional
Extra, non-BED columns in :term:`extended BED` format file corresponding
to feature attributes. This is common in `ENCODE`_-specific `BED`_ variants.
if `extra-columns` is:
- an :class:`int`: it is taken to be the
number of attribute columns. Attributes will be stored in
the `attr` dictionary of the |SegmentChain|, under names like
`custom0`, `custom1`, ... , `customN`.
- a :class:`list` of :class:`str`, it is taken to be the names
of the attribute columns, in order, from left to right in the file.
In this case, attributes in extra columns will be stored under
their respective names in the `attr` dict.
- a :class:`list` of :class:`tuple`, each tuple is taken
to be a pair of `(attribute_name, formatter_func)`. In this case,
the value of `attribute_name` in the `attr` dict of the |SegmentChain|
will be set to `formatter_func(column_value)`.
(Default: 0)
printer : file-like, optional
Logger implementing a ``write()`` method. Default: |NullWriter|
tabix : boolean, optional
`streams` point to `tabix`_-compressed files or are open
:class:`~pysam.ctabix.tabix_file_iterator` (Default: `False`)
Examples
--------
Read entries in a `BED`_ file as |Transcripts|. `thickEnd` and `thickStart`
columns will be interpreted as the endpoints of coding regions::
>>> bed_reader = BED_Reader(open("some_file.bed"),return_type=Transcript)
>>> for transcript in bed_reader:
>>> pass # do something fun
Open an :term:`extended BED` file that contains additional columns for `gene_id`
and `favorite_color`. Values for these attributes will be stored in the `attr`
dict of each |Transcript|::
>>> bed_reader = BED_Reader(open("some_file.bed"),return_type=Transcript,extra_columns=["gene_id","favorite_color"])
Open several `Tabix`_-compressed `BED`_ files, and iterate over them as if
they were one uncompressed stream::
>>> bed_reader = BED_Reader("file1.bed.gz","file2.bed.gz",tabix=True)
>>> for chain in bed_reader:
>>> pass # do something more interesting
Attributes
----------
streams : file-like
One or more open streams (usually filehandles) of input data.
return_type : class
The type of object assembled by the reader. Typically a |SegmentChain|
or a subclass thereof. Must import a method called ``from_bed()``
counter : int
Cumulative line number counter over all streams
rejected : list
List of `BED`_ lines that could not be parsed
metadata : dict
Attributes declared in track line, if any
extra_columns : int or list, optional
Extra, non-BED columns in :term:`extended BED` format file corresponding to feature
attributes. This is common in `ENCODE`_-specific `BED`_ variants.
if `extra_columns` is:
- an :class:`int`: it is taken to be the
number of attribute columns. Attributes will be stored in
the `attr` dictionary of the |SegmentChain|, under names like
`custom0`, `custom1`, ... , `customN`.
- a :class:`list` of :class:`str`, it is taken to be the names
of the attribute columns, in order, from left to right in the file.
In this case, attributes in extra columns will be stored under
there respective names in the `attr` dict.
- a :class:`list` of :class:`tuple`, each tuple is taken
to be a pair of `(attribute_name, formatter_func)`. In this case,
the value of `attribute_name` in the `attr` dict of the |SegmentChain|
will be set to `formatter_func(column_value)`.
If unspecified, :class:`BED_Reader` reads the track declaration line
(if present), and:
- if a known track type is specified by the `type` field, it attempts
to format the extra columns as specified by that type. Known track
types presently include:
- bedDetail
- narrowPeak
- broadPeak
- gappedPeak
- tagAlign
- pairedTagAlign
- peptideMapping
- if not, it assumes 0 non-`BED`_ fields are present, and that all columns
are `BED`_ formatted.
"""
def __init__(self, *args, **kwargs):
"""
BED_Reader(*streams, return_type=SegmentChain, add_three_for_stop=False, extra_columns=0, printer=None, tabix=False)
Parameters
----------
*streams : file-like
One or more open filehandles of input data.
return_type : |SegmentChain| or subclass, optional
Type of feature to return from assembled subfeatures (Default: |SegmentChain|)
add_three_for_stop : bool, optional
Some annotation files exclude the stop codon from CDS annotations. If set to
`True`, three nucleotides will be added to the threeprime end of each
CDS annotation, **UNLESS** the annotated transcript contains explicit stop_codon
feature. (Default: `False`)
extra_columns: int or list optional
Extra, non-BED columns in :term:`Extended BED`_ format file corresponding
to feature attributes. This is common in `ENCODE`_-specific `BED`_ variants.
if `extra-columns` is:
- an :class:`int`: it is taken to be the
number of attribute columns. Attributes will be stored in
the `attr` dictionary of the |SegmentChain|, under names like
`custom0`, `custom1`, ... , `customN`.
- a :class:`list` of :class:`str`, it is taken to be the names
of the attribute columns, in order, from left to right in the file.
In this case, attributes in extra columns will be stored under
their respective names in the `attr` dict.
- a :class:`list` of :class:`tuple`, each tuple is taken
to be a pair of `(attribute_name, formatter_func)`. In this case,
the value of `attribute_name` in the `attr` dict of the |SegmentChain|
will be set to `formatter_func(column_value)`.
(Default: 0)
printer : file-like, optional
Logger implementing a ``write()`` method. Default: |NullWriter|
tabix : boolean, optional
`streams` are `tabix`_-compressed (Default: `False`)
"""
AssembledFeatureReader.__init__(self, *args, **kwargs)
self.extra_columns = kwargs.get("extra_columns", 0)
def _parse_track_line(self, inp):
"""Parse track line from `BED`_ / extended BED file
Parameters
----------
inp : str
track definition line from `BED`_ / extended BED file
Returns
-------
dict
key-value pairs from `BED`_ line
"""
self.metadata = {}
ltmp = shlex.split(inp.strip("\n"))
for item in ltmp:
k, v = item.split("=")
self.metadata[k] = v
track_type = self.metadata.get("type", None)
if track_type is not None:
if track_type in bed_x_formats:
self.printer.write(
"Found track type '%s' in track definition line. Assuming extra columns follow UCSC definitions."
% track_type
)
if self.extra_columns == 0:
self.extra_columns = bed_x_formats[track_type]
elif self.extra_columns != bed_x_formats[track_type]:
my_columns = self._get_extra_column_names()
track_format_columns = ",".join([X[0] for X in bed_x_formats[track_type]])
warn("Extra columns specified by %s track type declaration (%s) don't match those specified by user (%s). Using those specified by user." %\
(track_type,track_format_columns,my_columns),FileFormatWarning)
self.metadata["type"] = "custom"
else:
self.printer.write("Found track type '%s' in track definition line." % track_type)
def _get_extra_column_names(self):
"""Return names of extra columns in extended BED file)"""
if isinstance(self.extra_columns, int):
my_columns = "%s unnamed columns" % self.extra_columns
elif isinstance(self.extra_columns, list):
if all([isinstance(X, tuple) for X in self.extra_columns]):
my_columns = ",".join([X[0] for X in self.extra_columns])
elif all([isinstance(X, str) for X in self.extra_columns]):
my_columns = ",".join(self.extra_columns)
return my_columns
def _assemble(self, line):
"""Read `BED`_ files line-by-line into types specified by `self.return_type`"""
self.counter += 1
if line.strip() == "":
return self.__next__()
elif line.startswith("browser"):
return self.__next__()
elif line.startswith("track"):
# reset metadata
self._parse_track_line(line[5:])
return self.__next__()
elif line.startswith("#"):
return self.__next__()
else:
try:
return self.return_type.from_bed(line, extra_columns=self.extra_columns)
except:
self.rejected.append(line)
msg = "Cannot parse BED line number %s. " % self.counter
if self.metadata.get("type", None) is not None:
msg += (
"Are you sure this is a %s BED file with extra columns (%s)?" %
(self.metadata.get("type"), self._get_extra_column_names())
)
elif self.extra_columns != 0:
msg += (
"Are you sure this BED file has extra columns (%s)?" %
self._get_extra_column_names()
)
else:
msg += "Maybe this BED has extra columns (i.e. is an extended BED file)?"
msg += ("\n %s" % line)
warn(msg, FileFormatWarning)
return self.__next__()
```
#### File: plastid/readers/gff_tokens.py
```python
import re
import shlex
import copy
from plastid.util.services.exceptions import FileFormatWarning, warn
gtfpat = re.compile(r"^ *([^ ]*) +(.*) *$")
# From the spec: http://www.sequenceontology.org/gff3.shtml
# In addition to Parent, the Alias, Note, Dbxref and Ontology_term attributes can have multiple values.
# Also, SGD uses 'dbxref' instead of 'Dbxref'
_GFF3_DEFAULT_LISTS = ("Parent", "Alias", "Note", "Dbxref", "Ontology_term", "dbxref")
#===============================================================================
# INDEX: helper functions for escaping
#===============================================================================
# must escape % first, otherwise we'll end up escaping everything else,
# since other escape codes start with percent signs
_GFF3_escape_sequences = [
('%', '%25'), # percent signs MUST be escaped FIRST
(';', '%3B'),
(',', '%2C'),
('=', '%3D'),
('&', '%26'),
('\x00', '%00'),
('\x01', '%01'),
('\x02', '%02'),
('\x03', '%03'),
('\x04', '%04'),
('\x05', '%05'),
('\x06', '%06'),
('\x07', '%07'),
('\x08', '%08'),
('\t', '%09'),
('\n', '%0A'),
('\x0b', '%0B'),
('\x0c', '%0C'),
('\r', '%0D'),
('\x0e', '%0E'),
('\x0f', '%0F'),
('\x10', '%10'),
('\x11', '%11'),
('\x12', '%12'),
('\x13', '%13'),
('\x14', '%14'),
('\x15', '%15'),
('\x16', '%16'),
('\x17', '%17'),
('\x18', '%18'),
('\x19', '%19'),
('\x1a', '%1A'),
('\x1b', '%1B'),
('\x1c', '%1C'),
('\x1d', '%1D'),
('\x1e', '%1E'),
('\x1f', '%1F'),
('\x7f', '%7F'),
('\x80', '%80'),
('\x81', '%81'),
('\x82', '%82'),
('\x83', '%83'),
('\x84', '%84'),
('\x85', '%85'),
('\x86', '%86'),
('\x87', '%87'),
('\x88', '%88'),
('\x89', '%89'),
('\x8a', '%8A'),
('\x8b', '%8B'),
('\x8c', '%8C'),
('\x8d', '%8D'),
('\x8e', '%8E'),
('\x8f', '%8F'),
('\x90', '%90'),
('\x91', '%91'),
('\x92', '%92'),
('\x93', '%93'),
('\x94', '%94'),
('\x95', '%95'),
('\x96', '%96'),
('\x97', '%97'),
('\x98', '%98'),
('\x99', '%99'),
('\x9a', '%9A'),
('\x9b', '%9B'),
('\x9c', '%9C'),
('\x9d', '%9D'),
('\x9e', '%9E'),
('\x9f', '%9F')
]
"""List mapping characters to their escape sequences, per the `GFF3`_ specification"""
_GTF2_escape_sequences = copy.deepcopy(_GFF3_escape_sequences)
_GTF2_escape_sequences.append(("\"", "%22"))
"""List mapping characters to their escape sequences for `GTF2`_. These are undefined,
but we are using `GFF3`_ characters plus double quotation marks as a convention.
"""
def escape(inp, char_pairs):
"""Escape reserved characters specified in the list of tuples `char_pairs`
Parameters
----------
inp : str
Input string
chair_pairs : list
List of tuples of (character, escape sequence for character)
Returns
-------
str
Escaped output
See also
--------
unescape_GFF3
"""
for char_, repl in char_pairs:
inp = inp.replace(char_, repl)
return inp
def unescape(inp, char_pairs):
"""Unescape reserved characters specified in the list of tuples `char_pairs`
Parameters
----------
inp : str
Input string
Returns
-------
str
Unescaped output
See also
--------
escape_GFF3
"""
for repl, char_ in reversed(char_pairs):
inp = inp.replace(char_, repl)
return inp
def escape_GFF3(inp):
"""Escape reserved characters in `GFF3`_ tokens using percentage notation.
In the `GFF3`_ spec, reserved characters include:
- control characters (ASCII 0-32, 127, and 128-159)
- tab, newline, & carriage return
- semicolons & commas
- the percent sign
- the equals sign
- the ampersand
Parameters
----------
inp : str
Input string
chair_pairs : list
List of tuples of (character, escape sequence for character)
Returns
-------
str
Escaped output
See also
--------
unescape_GFF3
"""
return escape(inp, _GFF3_escape_sequences)
def unescape_GFF3(inp):
"""Unescape reserved characters in `GFF3`_ tokens using percentage notation.
In the `GFF3`_ spec, reserved characters include:
- control characters (ASCII 0-32, 127, and 128-159)
- tab, newline, & carriage return
- semicolons & commas
- the percent sign
- the equals sign
- the ampersand
Parameters
----------
inp : str
Input string
Returns
-------
str
Unescaped output
See also
--------
escape_GFF3
"""
return unescape(inp, _GFF3_escape_sequences)
def escape_GTF2(inp):
"""Escape reserved characters in `GTF2`_ tokens using percentage notation.
While the `GTF2`_ spec is agnostic for escaping, it is useful when adding
extra attributes to files. As a convention, we escape the characters
specified in the `GFF3`_ spec, as well as double quotation marks.
In the `GTF2`_ spec, reserved characters include:
- control characters (ASCII 0-32, 127, and 128-159)
- tab, newline, & carriage return
- semicolons & commas
- the percent sign
- the equals sign
- the ampersand
Parameters
----------
inp : str
Input string
chair_pairs : list
List of tuples of (character, escape sequence for character)
Returns
-------
str
Escaped output
See also
--------
unescape_GFF3
"""
return escape(inp, _GTF2_escape_sequences)
def unescape_GTF2(inp):
"""Unescape reserved characters in `GTF2`_ tokens using percentage notation.
While the `GTF2`_ spec is agnostic for escaping, it is useful when adding
extra attributes to files. As a convention, we escape the characters
specified in the `GFF3`_ spec, as well as single quotation marks.
In the `GFF3`_ spec, reserved characters include:
- control characters (ASCII 0-32, 127, and 128-159)
- tab, newline, & carriage return
- semicolons & commas
- the percent sign
- the equals sign
- the ampersand
Parameters
----------
inp : str
Input string
Returns
-------
str
Unescaped output
See also
--------
escape_GFF3
"""
return unescape(inp, _GTF2_escape_sequences)
#===============================================================================
# INDEX: attribute token formatting and parsing
#===============================================================================
def _make_generic_tokens(attr, excludes=None, join_pat='%s %s; ', escape=None):
"""Helper function to convert the `attr` dict of a |SegmentChain|
into the string representation used in GFF files. This includes
URL escaping of keys and values, and catenating lists with `','`
before string conversion
Parameters
----------
attr : dict
Dictionary of key-value pairs to export
excludes : list<str>
List of keys to exclude from string
join_pat
printf-style pattern explaining how to join key:value pairs
escape : None or func, optional
If None, no special characters are escaped. If a function, that
funciton will be used to perform the escaping. (Default: `False`)
Returns
-------
str
"""
f = lambda x: x[0] not in excludes
if escape is None:
esc = lambda inp: inp
else:
esc = lambda inp: escape(str(inp))
excludes = [] if excludes is None else excludes
ltmp = []
for key, val in filter(f, attr.items()):
if isinstance(val, list):
val = ",".join([esc(X) for X in val])
else:
val = esc(val)
ltmp.append(join_pat % (esc(key), val))
return ''.join(ltmp)
def make_GFF3_tokens(attr, excludes=None, escape=True):
"""Helper function to convert the `attr` dict of a |SegmentChain|
into the string representation used in `GFF3`_ files. This includes
URL escaping of special characters, and catenating lists with '`,`'
before string conversion
Examples
--------
>>> d = {'a':1,'b':2,'c':3,'d':4,'e':5,'z':26,'text':"something; with escape sequences"}
>>> _make_GFF3_tokens(d)
'a=1;c=3;b=2;e=5;d=4;z=26;text=something%3B with escape sequences'
>>> excludes=['a','b','c']
>>> _make_GFF3_tokens(d,excludes)
'e=5;d=4;z=26;text=something%3B with escape sequences'
>>> d = {'a':1,'b':2,'c':[3,7],'d':4,'e':5,'z':26}
>>> _make_GFF3_tokens(d)
'a=1;c=3,7;b=2;e=5;d=4;z=26'
Parameters
----------
attr : dict
Dictionary of key-value pairs to export
excludes : list, optional
List of keys to exclude from string
escape : bool, optional
If True, special characters in output are `GFF3`_-escaped (Default: `True`)
Returns
-------
str
Data formatted for *attributes* column of `GFF3`_ (column 9)
"""
escape = escape_GFF3 if escape == True else None
excludes = [] if excludes is None else excludes
return _make_generic_tokens(attr, excludes=excludes, join_pat="%s=%s;", escape=escape)
def make_GTF2_tokens(attr, excludes=None, escape=True):
"""Helper function to convert the `attr` dict of a |SegmentChain|
into the string representation used in `GTF2`_ files. By default, special
characters defined in the `GFF3`_ spec will be URL-escaped.
Examples
--------
>>> d = {'transcript_id' : 't;id', 'a':1,'b':2,'c':3,'d':4,'e':5,'z':26,
'gene_id' : 'gid'}
>>> _make_GTF2_tokens(d)
'transcript_id "t%3Bid"; gene_id "gid"; a "1"; c "3"; b "2"; e "5"; d "4"; z "26";'
>>> excludes=['a','b','c']
>>> _make_GTF2_tokens(d,excludes)
'transcript_id "t%3Bid"; gene_id "gid"; e "5"; d "4"; z "26";'
Parameters
----------
attr : dict
Dictionary of key-value pairs to export
excludes : list, optional
List of keys to exclude from string
escape : bool, optional
If True, special characters in output are `GTF2`_-escaped (Default: `True`)
Returns
-------
str
Data formatted for *attributes* column of `GTF2`_ (column 9)
"""
excludes = [] if excludes is None else excludes
excludes.extend(["transcript_id", "gene_id"])
stmp = 'gene_id "%s"; transcript_id "%s"; ' % (attr.get("gene_id"), attr.get("transcript_id"))
if escape == True:
escape = escape_GTF2
else:
escape = None
return stmp + _make_generic_tokens(
attr, excludes=excludes, join_pat='%s "%s"; ', escape=escape
).strip(" ")
def parse_GFF3_tokens(inp, list_types=None):
"""Helper function to parse tokens in the final column of a `GFF3`_ file
into a dictionary of attributes. Because, the following attributes are
permitted to have multiple values in the `GFF3`_ spec, their values, if present
are returned as lists in the dictionary rather than strings:
- `Parent`
- `Alias`
- `Note`
- `Dbxref`
- `Ontology_term`
All values are unescaped folowing the `GFF3`_ specification.
Examples
--------
>>> tokens = 'a=1;c=3;b=2;e=5;d=4;z=26,Parents=gene01'
>>> parse_GFF3_tokens(tokens)
{'a': '1', 'c': '3', 'b': '2', 'e': '5', 'd': '4', 'z': '26', 'parents' : ['gene01'] }
>>> tokens = 'a=1;c=3,7;b=2;e=5;d=4;z=26,Parents=gene01,gene02'
>>> parse_GFF3_tokens(tokens)
{'a': '1', 'c': '3,7', 'b': '2', 'e': '5', 'd': '4', 'z': '26', 'parents' : ['gene01','gene02']}
Parameters
----------
inp : str
Ninth column of `GFF3`_ entry
list_types : list, optional
Names of attributes that should be returned as lists
(Default: %s)
Returns
-------
dict : key-value pairs
""" % ",".join(_GFF3_DEFAULT_LISTS)
if list_types is None:
list_types = _GFF3_DEFAULT_LISTS
d = {}
items = inp.strip("\n").strip(";").split(";")
for item in items:
if len(item) > 0:
key, val = item.split("=")
key = unescape_GFF3(key.strip(" "))
if key in list_types:
val = [unescape_GFF3(X) for X in val.strip(" ").split(",")]
else:
val = unescape_GFF3(val.strip(" "))
if key in d:
warn(
"Found duplicate attribute key '%s' in GFF3 line. Catenating value with previous value for key in attr dict:\n %s"
% (key, inp), FileFormatWarning
)
val = "%s,%s" % (d[key], val)
d[key] = val
return d
def parse_GTF2_tokens(inp):
"""Helper function to parse tokens in the final column of a `GTF2`_ file
into a dictionary of attributes. All attributes are returned as strings,
and are unescaped if GFF escape sequences (e.g. *'%2B'*) are present.
If duplicate keys are present (e.g. as in GENCODE `GTF2`_ files),
their values are catenated, separated by a comma.
Examples
--------
>>> tokens = 'gene_id "mygene"; transcript_id "mytranscript";'
>>> parse_GTF2_tokens(tokens)
{'gene_id' : 'mygene', 'transcript_id' : 'mytranscript'}
>>> tokens = 'gene_id "mygene"; transcript_id "mytranscript"'
>>> parse_GTF2_tokens(tokens)
{'gene_id' : 'mygene', 'transcript_id' : 'mytranscript'}
>>> tokens = 'gene_id "mygene;"; transcript_id "myt;ranscript"'
>>> parse_GTF2_tokens(tokens)
{'gene_id' : 'mygene;', 'transcript_id' : 'myt;ranscript'}
>>> tokens = 'gene_id "mygene"; transcript_id "mytranscript"; tag "tag value";'
>>> parse_GTF2_tokens(tokens)
{'gene_id' : 'mygene', 'tag' : 'tag value', 'transcript_id' : 'mytranscript'}
>>> tokens = 'gene_id "mygene"; transcript_id "mytranscript"; tag "tag value"; tag "tag value 2";'
>>> parse_GTF2_tokens(tokens)
{'gene_id' : 'mygene', 'tag' : 'tag value,tag value 2', 'transcript_id' : 'mytranscript'}
Parameters
----------
inp : str
Ninth column of `GTF2`_ entry
Returns
-------
dict : key-value pairs
"""
d = {}
items = shlex.split(inp.strip("\n"))
assert len(items) % 2 == 0
for i in range(0, len(items), 2):
key = unescape_GTF2(items[i])
val = items[i + 1]
# require separation by semicolons for all but final token
if i + 1 < len(items) - 2:
assert val.endswith(";")
if val.endswith(";"):
val = val[:-1]
if key in d:
warn(
"Found duplicate attribute key '%s' in GTF2 line. Catenating value with previous value for key in attr dict:\n %s"
% (key, inp), FileFormatWarning
)
d[key] = "%s,%s" % (d[key], unescape_GTF2(val))
else:
d[key] = unescape_GTF2(val)
return d
```
#### File: test/functional/test_cs.py
```python
import tempfile
import os
from nose.plugins.attrib import attr
from pkg_resources import resource_filename, cleanup_resources
from plastid.test.functional.base import execute_helper
from plastid.test.ref_files import (
COUNT_OPTIONS,
ANNOTATION_OPTIONS,
MASK_OPTIONS,
)
from plastid.bin.cs import main
from plastid.util.services.decorators import catch_stderr
#===============================================================================
# INDEX: global constants used by tests
#===============================================================================
test_info = {
"test_method": catch_stderr()(main),
"module_name": "plastid.bin.cs",
"ref_file_path": resource_filename("plastid", "test/data/command_line"),
"temp_file_path": tempfile.mkdtemp(prefix="cs"),
}
"""Constants used by multiple tests"""
_outbase = os.path.join(test_info["temp_file_path"], "test_cs")
_file_stubs = [
'merged.txt',
'gene.positions',
'transcript.positions',
'gene_cds.bed',
'gene_exon.bed',
'gene_masked.bed',
'gene_utr3.bed',
'gene_utr5.bed',
'transcript_cds.bed',
'transcript_exon.bed',
'transcript_masked.bed',
'transcript_utr3.bed',
'transcript_utr5.bed',
]
#===============================================================================
# INDEX: tests to execute
#===============================================================================
cs_tests = [
(
"generate %s %s %s" % (_outbase, ANNOTATION_OPTIONS, MASK_OPTIONS),
[os.path.join(test_info["ref_file_path"], "gen_cs_" + X) for X in _file_stubs],
[os.path.join(test_info["temp_file_path"], "test_cs_" + X) for X in _file_stubs], [
"--no_header",
"--sort_keys region",
"--sort_keys region",
] + ["--no_header --sort_keys 3"] * 10
),
(
"count %s_gene.positions %s_count %s " % (_outbase, _outbase, COUNT_OPTIONS),
[os.path.join(test_info["ref_file_path"], 'gen_cs_count_gene.txt')],
[os.path.join(test_info["temp_file_path"], "test_cs_count.txt")], ["--sort_keys region"]
),
(
"count %s_gene.positions %s_count %s --sum 1e9" % (_outbase, _outbase, COUNT_OPTIONS),
[os.path.join(test_info["ref_file_path"], 'gen_cs_count_gene_sum_1.txt')],
[os.path.join(test_info["temp_file_path"], "test_cs_count.txt")], ["--sort_keys region"]
),
#("",[],[],[]),
]
"""Functional tests of :py:mod:`plastid.bin.cs`.
Tests are specified as tuples of:
1. Command-line style arguments to pass to :py:func:`main`
2. A list of reference files that output should be compared against
3. A list of output files created by running :py:func:`main`
with the arguments provided in (1)
4. A list of strings specifying how equality should be evaluated
"""
#===============================================================================
# INDEX: test/helper functions
#===============================================================================
@attr(test="functional")
@attr(speed="slow")
def do_test():
"""Perform functional test for :py:mod:`plastid.bin.cs`"""
for x in execute_helper(test_info, cs_tests):
yield x
```
#### File: test/functional/test_metagene.py
```python
import tempfile
import os
from pkg_resources import resource_filename, cleanup_resources
from nose.plugins.attrib import attr
from plastid.test.functional.base import execute_helper
from plastid.test.ref_files import (
RPATH,
REF_FILES,
COUNT_OPTIONS,
ANNOTATION_OPTIONS,
MASK_OPTIONS,
)
from plastid.bin.test_table_equality import main as table_test
from plastid.bin.metagene import main
from plastid.util.services.decorators import catch_stderr
#===============================================================================
# INDEX: global constants used by tests
#===============================================================================
TEST_INFO = {
"test_method": catch_stderr()(main),
"module_name": "plastid.bin.metagene",
"ref_file_path": resource_filename("plastid", "test/data/command_line"),
"temp_file_path": tempfile.mkdtemp(prefix="metagene"),
}
_basename = os.path.join(TEST_INFO["temp_file_path"], "test_metagene")
#===============================================================================
# INDEX: tests
#===============================================================================
tests = [
# test generate cds start
(
"generate %s_cds_start --downstream 100 %s %s" %
(_basename, ANNOTATION_OPTIONS, MASK_OPTIONS),
[REF_FILES["yeast_metagene_cds_start"], REF_FILES["yeast_metagene_cds_start_bed"]], [
_basename + "_cds_start_rois.txt",
_basename + "_cds_start_rois.bed",
], ["", "--no_header"]
),
# test generate cds stop
(
"generate %s_cds_stop --upstream 100 --landmark cds_stop %s %s" %
(_basename, ANNOTATION_OPTIONS, MASK_OPTIONS), [
REF_FILES["yeast_metagene_cds_stop"],
REF_FILES["yeast_metagene_cds_stop_bed"],
], [
_basename + "_cds_stop_rois.txt",
_basename + "_cds_stop_rois.bed",
], ["", "--no_header"]
),
# test count cds start with --norm_region
(
"count %s %s_cds_start --keep --norm_region 70 150 %s" %
(REF_FILES["yeast_metagene_cds_start"], _basename, COUNT_OPTIONS), [
REF_FILES["yeast_metagene_cds_start_profile"],
REF_FILES["yeast_metagene_cds_start_normcounts"],
REF_FILES["yeast_metagene_cds_start_rawcounts"],
], [
_basename + "_cds_start_metagene_profile.txt",
_basename + "_cds_start_normcounts.txt.gz", _basename + "_cds_start_rawcounts.txt.gz"
], ["", "--no_header", "--no_header"]
),
# test count cds stop with --norm_region
(
"count %s %s_cds_stop --keep --norm_region 0 80 %s" %
(REF_FILES["yeast_metagene_cds_stop"], _basename, COUNT_OPTIONS), [
REF_FILES["yeast_metagene_cds_stop_profile"],
REF_FILES["yeast_metagene_cds_stop_normcounts"],
REF_FILES["yeast_metagene_cds_stop_rawcounts"],
], [
_basename + "_cds_stop_metagene_profile.txt", _basename + "_cds_stop_normcounts.txt.gz",
_basename + "_cds_stop_rawcounts.txt.gz"
], ["", "--no_header", "--no_header"]
),
# test count cds start, using --normalize_over
(
"count %s %s_cds_start --keep --normalize_over 20 100 %s" %
(REF_FILES["yeast_metagene_cds_start"], _basename, COUNT_OPTIONS), [
REF_FILES["yeast_metagene_cds_start_profile"],
REF_FILES["yeast_metagene_cds_start_normcounts"],
REF_FILES["yeast_metagene_cds_start_rawcounts"],
], [
_basename + "_cds_start_metagene_profile.txt",
_basename + "_cds_start_normcounts.txt.gz", _basename + "_cds_start_rawcounts.txt.gz"
], ["", "--no_header", "--no_header"]
),
# test count cds stop, using --normalize_over
(
"count %s %s_cds_stop --keep --normalize_over '-100' '-20' %s" %
(REF_FILES["yeast_metagene_cds_stop"], _basename, COUNT_OPTIONS), [
REF_FILES["yeast_metagene_cds_stop_profile"],
REF_FILES["yeast_metagene_cds_stop_normcounts"],
REF_FILES["yeast_metagene_cds_stop_rawcounts"],
], [
_basename + "_cds_stop_metagene_profile.txt", _basename + "_cds_stop_normcounts.txt.gz",
_basename + "_cds_stop_rawcounts.txt.gz"
], ["", "--no_header", "--no_header"]
),
]
"""Functional tests of :py:mod:`plastid.bin.metagene`.
Tests are specified as tuples of:
1. Command-line style arguments to pass to :py:func:`main`
2. A list of reference files that output should be compared against
3. A list of output files created by running :py:func:`main`
with the arguments provided in (1)
4. A list of strings specifying how equality should be evaluated
"""
#===============================================================================
# INDEX: test functions
#===============================================================================
@attr(test="functional")
@attr(speed="slow")
def do_test():
for x in execute_helper(TEST_INFO, tests):
yield x
```
#### File: test/functional/test_psite.py
```python
import tempfile
import os
from nose.plugins.attrib import attr
from pkg_resources import resource_filename, cleanup_resources
from plastid.util.services.decorators import catch_stderr
from plastid.test.functional.base import execute_helper
from plastid.test.ref_files import REF_FILES
from plastid.bin.psite import main
#===============================================================================
# INDEX: global constants
#===============================================================================
test_info = {
"test_method": catch_stderr()(main),
"module_name": "plastid.bin.psite",
"ref_file_path": resource_filename("plastid", "test/data/command_line"),
"temp_file_path": tempfile.mkdtemp(prefix="psite"),
}
_outbase = os.path.join(test_info["temp_file_path"], "test_psite")
#===============================================================================
# INDEX: tests
#===============================================================================
psite_tests = [
(
"%s %s --count_files %s --norm_region 70 150 --require_upstream --min_length 26 --max_length 31"
% (
REF_FILES["yeast_metagene_cds_start"],
_outbase,
REF_FILES["yeast_rp_bam"],
), [REF_FILES["yeast_psite"]], [_outbase + "_p_offsets.txt"], [""]
),
(
"%s %s --count_files %s --norm_region 70 150 --require_upstream --min_length 26 --max_length 31 --constrain 0 11"
% (
REF_FILES["yeast_metagene_cds_start"],
_outbase,
REF_FILES["yeast_rp_bam"],
), [REF_FILES["yeast_psite_constrain"]], [_outbase + "_p_offsets.txt"], [""]
),
# test using --normalize_over instead of --norm_region
(
"%s %s --count_files %s --normalize_over 20 100 --require_upstream --min_length 26 --max_length 31"
% (
REF_FILES["yeast_metagene_cds_start"],
_outbase,
REF_FILES["yeast_rp_bam"],
), [REF_FILES["yeast_psite"]], [_outbase + "_p_offsets.txt"], [""]
),
# test using --normalize_over instead of --norm_region
(
"%s %s --count_files %s --normalize_over 20 100 --require_upstream --min_length 26 --max_length 31 --constrain 0 11"
% (
REF_FILES["yeast_metagene_cds_start"],
_outbase,
REF_FILES["yeast_rp_bam"],
), [REF_FILES["yeast_psite_constrain"]], [_outbase + "_p_offsets.txt"], [""]
),
]
# /home/joshua/projects/plastid/plastid/test/data/command_line/gen_cds_start_rois.txt /tmp/psite5SY5Wc/test_psite --count_files /home/joshua/projects/plastid/plastid/test/data/command_line/gen_reads.bam --norm_region 70 150 --require_upstream --min_length 26 --max_length 31
"""Functional tests of :py:mod:`plastid.bin.psite`.
Tests are specified as tuples of:
1. Command-line style arguments to pass to :py:func:`main`
2. A list of reference files that output should be compared against
3. A list of output files created by running :py:func:`main`
with the arguments provided in (1)
4. A list of strings specifying how equality should be evaluated
"""
#===============================================================================
# INDEX: test/helper functions
#===============================================================================
@attr(test="functional")
@attr(speed="slow")
def do_test():
"""Perform functional test for :py:mod:`plastid.bin.psite`"""
for x in execute_helper(test_info, psite_tests):
yield x
```
#### File: test/functional/test_slidejuncs.py
```python
import tempfile
import shutil
import os
import itertools
import numpy
import copy
import shlex
from nose.tools import assert_set_equal
from nose.plugins.attrib import attr
from pkg_resources import resource_filename, cleanup_resources
from plastid.test.ref_files import REF_FILES
from plastid.util.io.filters import CommentReader
from plastid.readers.bed import BED_Reader
from plastid.genomics.roitools import SegmentChain
from plastid.bin.slidejuncs import main
from plastid.util.services.decorators import catch_stderr, catch_stdout
#===============================================================================
# INDEX: global constants
#===============================================================================
test_info = {
"test_method": catch_stderr()(main),
"module_name": "plastid.bin.slidejuncs",
"ref_file_path": resource_filename("plastid", "test/data/command_line"),
"temp_file_path": tempfile.mkdtemp(prefix="slidejuncs"),
}
OUTBASE = os.path.join(test_info["temp_file_path"], "slidejuncs_%s_%s_%s")
# reference files used
# yapf: disable
FASTA_FILE = REF_FILES["slidejuncs_seqs"]
COMBINED_BED = REF_FILES["slidejuncs_input"]
REF_FILE = REF_FILES["slidejuncs_ref"]
MASK_FILE = REF_FILES["slidejuncs_crossmap"]
# yapf: enable
#===============================================================================
# INDEX: helper functions
#===============================================================================
def get_junction_names_from_file(filename):
"""Return a set of names of splice junctions in a file
Parameters
----------
filename : str
Fully-qualified path to file
Returns
-------
set
Set of splice junction names found in ``filename``
"""
with open(filename) as fh:
return set(
[
X.spanning_segment.chrom
for X in BED_Reader(CommentReader(fh), return_type=SegmentChain)
]
)
def get_junction_names_from_category(cat):
"""Helper function to deduce names of splice junctions/genes in each
junction category from input files
Parameters
----------
cat : str
Category of splice junction. Must be present in ``junction_categories``
Returns
-------
set
Set of junction names in input splice junction category ``cat``
"""
return get_junction_names_from_file(REF_FILES["slidejuncs_%s" % cat])
#===============================================================================
# INDEX: programmatic definition of tests and expected results
#===============================================================================
# query junctions
junction_categories = [
"known_juncs_non_crossmap",
"known_juncs_crossmap",
"to_slide_known_non_crossmap",
"to_slide_known_crossmap",
"noncan_no_ref",
"expected_untouched",
]
"""Categories of splice junctions taken as input in tests below"""
junctions = {K: get_junction_names_from_category(K) for K in junction_categories}
"""Dictionary mapping categories of splice junctions to names of junctions
used in tests below"""
# classification order is repetitive > reference > canonical > untouched
# set up tests as configurations of kwargs
# we'll represent these as 3-tuples, where 1 in a position indicates whether
# or not we'll supply:
# 0. a crossmap
# 1. a reference file
# 2. the --slide_canonical flag
# 3. always value 1 (untouched state)
test_options = list(itertools.product((0, 1), repeat=3))
test_options = [numpy.array(list(X) + [1]) for X in test_options]
base_command_line = " ".join([COMBINED_BED, OUTBASE, "--sequence_file %s" % FASTA_FILE])
switches = [
" --mask_annotation_format BED --mask_annotation_file %s" % MASK_FILE,
" --ref %s" % REF_FILE, " --slide_canonical", ""
]
"""Command-line switches used for building tests"""
labels = ["repetitive", "shifted_known", "shifted_canonical", "untouched"]
"""Labels for various junctions under different test conditions"""
# Classification preferences for each set of query junctions
#
# 1 represents the ability to be classified in a category denoted by position,
#
# 0. repetitive
# 1. reference
# 2. canonical
# 3. untouched
#
# These positions correspond to the test_option positions above in the sense
# that if both test_option[position] and classification_pref[k][position] are 1,
# then junctions in group k can be classified with the label corresponding
# to that position. In practice, the leftmost position in which both bits are 1
# should be chosen by the script
classification_prefs = {
"known_juncs_non_crossmap" : (0, 1, 1, 1),
"known_juncs_crossmap" : (1, 1, 1, 1),
"to_slide_known_non_crossmap" : (0, 1, 1, 1),
"to_slide_known_crossmap" : (1, 1, 1, 1),
"noncan_no_ref" : (0, 0, 1, 1),
"expected_untouched" : (0, 0, 0, 1),
} # yapf: disable
classification_prefs = {K: numpy.array(V) for K, V in classification_prefs.items()}
"""Classification preferences for each category of query junction"""
output_files = {K: OUTBASE + ("_%s.bed" % K) for K in labels}
"""Output files written by each test"""
# tests will be defined as tuples of command-line arguments,
# and dictionaries mapping classifications to splice junctions that
# should fall into that category for a given run
tests = []
for my_opts in test_options:
# build command-line arguments
stmp = base_command_line + " ".join([switches[X] for X in my_opts.nonzero()[0]])
stmp = stmp % (my_opts[0], my_opts[1], my_opts[2])
result_dict = {}
# determine results
for k, v in classification_prefs.items():
label = labels[(v & my_opts).argmax()]
try:
result_dict[label] |= junctions[k]
except KeyError:
result_dict[label] = copy.deepcopy(junctions[k])
tests.append((my_opts, stmp, result_dict))
#===============================================================================
# INDEX: tests
#===============================================================================
def compare_sets(label, found, expected, crossmap, ref, slide):
message = "Failed test %s.\n\tCrossmap: %s\n\tRef: %s\n\tslide: %s\nItems expected but not found: %s\nVice versa: %s\n" % (
label, crossmap, ref, slide, expected - found, found - expected
)
assert_set_equal(expected, found, message)
@attr(test="functional")
def do_test():
for tup, argstr, expected_results in tests:
test_info["test_method"](shlex.split(argstr))
for label, filename in output_files.items():
found = get_junction_names_from_file(filename % (tup[0], tup[1], tup[2]))
expected = expected_results.get(label, set([]))
yield compare_sets, label, found, expected, tup[0], tup[1], tup[2]
def tearDownModule():
"""Remove test dataset files after unit tests are complete"""
if test_info["temp_file_path"] != "":
shutil.rmtree(test_info["temp_file_path"])
cleanup_resources()
```
#### File: unit/bin/test_test_table_equality.py
```python
import unittest
import copy
import numpy
import pandas as pd
from random import shuffle
from pkg_resources import resource_filename, cleanup_resources
from nose.plugins.attrib import attr
from plastid.genomics.roitools import GenomicSegment
# slight hack to keep imported method from being run as a test
# can't use unittest.skip, or else no tests will never be run!
from plastid.bin.test_table_equality import test_dataframe_equality as checkeq
checkeq.__name__ = "checkeq"
checkeq.__module__ = "checkeq"
# components we will use in equality tests
size = 5000
@attr(test="unit")
class TestTestDataframeEquality(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.cols = {
"intA" : numpy.random.randint(0, high=2**16, size=size),
"intB" : numpy.random.randint(-10, high=20, size=size),
"idxA" : numpy.arange(size),
"chrA" : numpy.array([chr(65+(X%(91-65))) for X in range(size)]),
"strA" : numpy.array([str(GenomicSegment("chrA", X, X+500, "+")) for X in range(size)]),
"strB" : numpy.array([str(GenomicSegment("chrB", X/2, X/2+500, "-")) for X in range(size)]),
"floatA" : 10*numpy.random.randn(size) + 500,
"floatB" : (10**-5)*numpy.random.random(size),
"objA" : numpy.tile(None, 5000),
"objB" : numpy.array([GenomicSegment("chrC", X, X+Y, "+") for X, Y in zip(range(size), numpy.random.randint(2, high=1000, size=size))]),
} # yapf: disable
def test_dataframe_equality_when_identical(self):
df1 = pd.DataFrame(self.cols)
self.assertTrue(checkeq(df1, df1))
def test_dataframe_equality_no_sort(self):
df1 = pd.DataFrame(self.cols)
df2 = pd.DataFrame(copy.deepcopy(self.cols))
self.assertTrue(checkeq(df1, df2))
self.assertTrue(checkeq(df2, df1))
def test_dataframe_equality_within_tol(self):
tol = 10**-8
noiseA = tol / 10**2 * numpy.random.randn(size)
noiseB = tol / 10**2 * numpy.random.randn(size)
df1 = pd.DataFrame(self.cols)
df2 = copy.deepcopy(df1)
df2["floatA"] += noiseA
df2["floatB"] -= noiseB
self.assertTrue(checkeq(df1, df2, tol=tol))
self.assertTrue(checkeq(df2, df1, tol=tol))
def test_dataframe_inequality_above_tol(self):
tol = 10**-8
noiseA = tol * 10**2 * numpy.random.randn(size)
noiseB = tol * 10**2 * numpy.random.randn(size)
df1 = pd.DataFrame(self.cols)
df2 = copy.deepcopy(df1)
df2["floatA"] += noiseA
df2["floatB"] -= noiseB
self.assertFalse(checkeq(df1, df2, tol=tol))
self.assertFalse(checkeq(df2, df1, tol=tol))
def test_dataframe_inequality_wrong_columns(self):
df1 = pd.DataFrame(self.cols)
df2 = pd.DataFrame({K: copy.deepcopy(self.cols[K]) for K in sorted(self.cols.keys())[:-2]})
self.assertFalse(checkeq(df1, df2))
self.assertFalse(checkeq(df2, df1))
def test_dataframe_inequality_wrong_rows(self):
df1 = pd.DataFrame(self.cols)
df2 = pd.DataFrame({K: self.cols[K][:size - 1000] for K in self.cols.keys()})
self.assertFalse(checkeq(df1, df2))
self.assertFalse(checkeq(df2, df1))
def check_dataframe_equality_same(self, special_val):
"""Helper function for checking equality when various special values
are in the same location in both dataframes
Parameters
----------
special_val : numpy.nan, numpy.inf, -numpy.inf, or None
Value that is ignored by :py:meth:`plastid.bin.test_dataframe_equality` if it occurs in the same cells of both dataframes
"""
idx = numpy.random.randint(0, high=size, size=500)
tmpcols = copy.deepcopy(self.cols)
tmpcols["floatA"][idx] = special_val
df1 = pd.DataFrame(tmpcols)
df2 = pd.DataFrame(copy.deepcopy(tmpcols))
self.assertTrue(checkeq(df1, df2))
self.assertTrue(checkeq(df2, df1))
def test_dataframe_equality_same_nans(self):
self.check_dataframe_equality_same(numpy.nan)
def test_dataframe_equality_same_infs(self):
self.check_dataframe_equality_same(numpy.inf)
self.check_dataframe_equality_same(-numpy.inf)
def check_dataframe_inequality_different(self, special_val):
"""Helper function for checking inequality when various special values
appear in the different locations in both dataframes
Parameters
----------
special_val : numpy.nan, numpy.inf, -numpy.inf, or None
Value that is ignored by :py:meth:`plastid.bin.test_dataframe_equality` if it occurs in the same cells of both dataframes
"""
idxA = numpy.random.randint(0, high=size, size=500)
idxB = numpy.random.randint(0, high=size, size=500)
colsA = copy.deepcopy(self.cols)
colsA["floatA"][idxA] = special_val
colsB = copy.deepcopy(self.cols)
colsB["floatA"][idxB] = special_val
df1 = pd.DataFrame(colsA)
df2 = pd.DataFrame(colsB)
self.assertFalse(checkeq(df1, df2))
self.assertFalse(checkeq(df2, df1))
def test_dataframe_inequality_different_nans(self):
self.check_dataframe_inequality_different(numpy.nan)
def test_dataframe_inequailty_different_infs(self):
self.check_dataframe_inequality_different(numpy.inf)
self.check_dataframe_inequality_different(-numpy.inf)
def test_dataframe_equality_with_sort_str(self):
shuffidx = numpy.arange(size)
shuffle(shuffidx)
df1 = pd.DataFrame(self.cols)
df2 = pd.DataFrame({K: self.cols[K][shuffidx] for K in self.cols.keys()})
self.assertTrue(checkeq(df1, df2, sort_columns=["strA"]))
self.assertTrue(checkeq(df2, df1, sort_columns=["strA"]))
def test_dataframe_equality_with_sort_numeric(self):
shuffidx = numpy.arange(size)
shuffle(shuffidx)
df1 = pd.DataFrame(self.cols)
df2 = pd.DataFrame({K: self.cols[K][shuffidx] for K in self.cols.keys()})
self.assertTrue(checkeq(df1, df2, sort_columns=["idxA"]))
self.assertTrue(checkeq(df2, df1, sort_columns=["idxA"]))
def test_dataframe_equality_with_multi_sort(self):
shuffidx = numpy.arange(size)
shuffle(shuffidx)
df1 = pd.DataFrame(self.cols)
df2 = pd.DataFrame({K: self.cols[K][shuffidx] for K in self.cols.keys()})
self.assertTrue(checkeq(df1, df2, sort_columns=["strB", "chrA"]))
self.assertTrue(checkeq(df2, df1, sort_columns=["strB", "chrA"]))
```
#### File: unit/genomics/test_seqtools.py
```python
import re
import sys
from nose.plugins.attrib import attr
from nose.tools import assert_equal, assert_set_equal, assert_true
from Bio import SeqIO
from Bio.SeqRecord import SeqRecord
from plastid.test.ref_files import REF_FILES
from plastid.genomics.seqtools import seq_to_regex, mutate_seqs, random_seq,\
_TwoBitSeqProxy, TwoBitSeqRecordAdaptor
#===============================================================================
# INDEX: unit tests
#===============================================================================
@attr(test="unit")
def test_seq_to_regex():
for flags, inp, outp in SEQ_REGEX:
check = seq_to_regex(inp, flags=flags)
pattern = check.pattern
pflags = check.flags
yield assert_equal, pattern, outp, "seq_to_regex(%s,%s) regex do not match. Expected %s, found %s" % (
inp, flags, outp, pattern
)
yield assert_equal, flags, pflags, "seq_to_regex(%s,%s) flags do not match. Expected %s, found %s" % (
inp, flags, flags, pflags
)
@attr(test="unit")
def test_mutate_seqs():
for inp, nucs, mutations, outp in MUTANTS:
check = mutate_seqs(inp, nucleotides=nucs, mutations=mutations)
yield assert_set_equal, check, outp, "mutate_seqs(%s,%s) did not match expectation. Expected %s, found %s" % (
inp, mutations, outp, check
)
def check_random_seq(length, nucleotides):
check = random_seq(length, nucleotides=nucleotides)
# check length
assert_equal(
length, len(check), "random_chr(%s,%s) length is incorrect. Expected %s, found %s" %
(length, nucleotides, length, len(check))
)
# make sure nucleotides used are a subset of those expected
diff = set(list(check)) - set(nucleotides)
assert_equal(
diff, set(),
"random_chr(%s,%s) found extra nucleotides in result: %s" % (length, nucleotides, diff)
)
@attr(test="unit")
def test_random_chr():
for length, nucs in RANDOM_CHR:
yield check_random_seq, length, nucs
@attr(test="unit")
def testTwoBitSeqProxyFetch():
g1 = SeqIO.to_dict(SeqIO.parse(REF_FILES["yeast_fasta"], "fasta"))
g2 = TwoBitSeqRecordAdaptor(REF_FILES["yeast_twobit"])
seq = g2["chrV"][50:5000]
assert_true(isinstance(seq, SeqRecord))
assert_equal(str(g1["chrV"][50:5000].seq), str(seq.seq))
@attr(test="unit")
def testTwoBitSeqProxyLen():
g1 = SeqIO.to_dict(SeqIO.parse(REF_FILES["yeast_fasta"], "fasta"))
g2 = TwoBitSeqRecordAdaptor(REF_FILES["yeast_twobit"])
for k in g1:
assert_equal(len(g1[k]), len(g2[k]))
@attr(test="unit")
def testTwoBitSeqProxyStr():
g1 = SeqIO.to_dict(SeqIO.parse(REF_FILES["yeast_fasta"], "fasta"))
g2 = TwoBitSeqRecordAdaptor(REF_FILES["yeast_twobit"])
for k in g1:
assert_equal(str(g1[k].seq), str(g2[k]))
@attr(test="unit")
def testTwoBitSeqProxySeqProp():
g1 = SeqIO.to_dict(SeqIO.parse(REF_FILES["yeast_fasta"], "fasta"))
g2 = TwoBitSeqRecordAdaptor(REF_FILES["yeast_twobit"])
for k in g1:
assert_equal(str(g1[k].seq), str(g2[k].seq))
@attr(test="unit")
def testTwoBitSeqProxyRevComp():
g1 = SeqIO.to_dict(SeqIO.parse(REF_FILES["yeast_fasta"], "fasta"))
g2 = TwoBitSeqRecordAdaptor(REF_FILES["yeast_twobit"])
for k in g1:
assert_equal(str(g1[k].reverse_complement().seq), str(g2[k].reverse_complement().seq))
@attr(test="unit")
def testTwoBitSeqRecordAdaptor():
g1 = SeqIO.to_dict(SeqIO.parse(REF_FILES["yeast_fasta"], "fasta"))
g2 = TwoBitSeqRecordAdaptor(REF_FILES["yeast_twobit"])
for k, v in g1.items():
assert_true(isinstance(g2[k], _TwoBitSeqProxy))
#===============================================================================
# INDEX: test data
#===============================================================================
if sys.version_info > (3, ):
default_flag = re.UNICODE
else:
default_flag = 0
SEQ_REGEX = [
(default_flag, "NNNN", "[ACTGU][ACTGU][ACTGU][ACTGU]"), # N->ACTGU
(default_flag, "CCCAGA", "CCCAGA"), # No change
(default_flag, "TCTAGA", "[TU]C[TU]AGA"), # T->TU
(default_flag, "UCUAGA", "[TU]C[TU]AGA"), # U->TU
(default_flag, "ARCGA", "A[AG]CGA"), # R->AG
(default_flag, "YAGAC", "[CTU]AGAC"), # Y->CTU
(default_flag, "SAGAC", "[GC]AGAC"), # S->GC
(default_flag, "WAGAC", "[ATU]AGAC"), # W->AT
(default_flag, "KAGAC", "[GTU]AGAC"),
(default_flag, "MAGAC", "[AC]AGAC"),
(default_flag, "BAGAC", "[CGTU]AGAC"),
(default_flag, "DAGAC", "[AGTU]AGAC"),
(default_flag, "HAGAC", "[ACTU]AGAC"),
(default_flag, "VAGAC", "[ACG]AGAC"),
(default_flag, "SWANKY", "[GC][ATU]A[ACTGU][GTU][CTU]"),
(default_flag | re.IGNORECASE, "KAGAC", "[GTU]AGAC"),
(default_flag | re.DEBUG, "MAGAC", "[AC]AGAC"),
(default_flag | re.MULTILINE, "BAGAC", "[CGTU]AGAC"),
(default_flag | re.DEBUG | re.MULTILINE, "DAGAC", "[AGTU]AGAC"),
(default_flag | re.DOTALL | re.UNICODE, "HAGAC", "[ACTU]AGAC"),
] # yapf: disable
"""Test cases for :py:func:`seq_to_regex`, as tuples of
1. Regex flags
2. Input string
3. Expected regex pattern
"""
MUTANTS = [
("A", "NACTG", 1, {"A", "C", "T", "G", "N"}),
("C", "NACTG", 1, {"A", "C", "T", "G", "N"}),
("T", "NACTG", 1, {"A", "C", "T", "G", "N"}),
("G", "NACTG", 1, {"A", "C", "T", "G", "N"}),
("N", "NACTG", 1, {"A", "C", "T", "G", "N"}),
# 2 mutations
("A", "NACTG", 2, {"A", "C", "T", "G", "N"}),
("AT", "NACTG", 1, {"AT", "AA", "AC", "AG", "AN", "CT", "GT", "TT", "NT"}),
# 2 mutations
(
"AT", "NACTG", 2, {
"AA", "AC", "AT", "AG", "AN", "CA", "CC", "CT", "CG", "CN", "GA", "GC", "GT", "GG",
"GN", "TA", "TC", "TT", "TG", "TN", "NA", "NC", "NT", "NG", "NN"
}
),
(
"AAA", "NACTG", 1,
{"AAA", "AAT", "AAC", "AAG", "AAN", "ATA", "ACA", "AGA", "ANA", "TAA", "CAA", "GAA", "NAA"}
),
(
"AAAA", "NACTG", 1, {
"AAAA",
"AAAT",
"AAAC",
"AAAG",
"AAAN",
"AATA",
"AACA",
"AAGA",
"AANA",
"ATAA",
"ACAA",
"AGAA",
"ANAA",
"TAAA",
"CAAA",
"GAAA",
"NAAA",
}
),
# restricted nucleotides
("AAAA", "CT", 1, {
"AAAA",
"AAAT",
"AAAC",
"AATA",
"AACA",
"ATAA",
"ACAA",
"TAAA",
"CAAA",
}),
# multiple input sequences
(
["A", "AAA"], "NACTG", 1, {
"A", "C", "T", "G", "N", "AAA", "AAT", "AAC", "AAG", "AAN", "ATA", "ACA", "AGA", "ANA",
"TAA", "CAA", "GAA", "NAA"
}
)
]
"""Test cases for :py:func:`mutate_seqs` as tuples of:
1. Input sequence(s)
2. Permitted nucleotide substitutions
3. Number of mutations
4. Expected output
"""
RANDOM_CHR = [
(1, "ACTG"),
(100, "AC"),
(100, "ACTG"),
(10000, "ACTG"),
]
"""Test cases for :py:func:`random_seq` as tuples of:
1. Sequence length
2. Nucleotide composition
"""
```
#### File: unit/readers/test_bigbed.py
```python
import unittest
import copy
import warnings
from random import shuffle
from pkg_resources import resource_filename, cleanup_resources
from nose.plugins.attrib import attr
from nose.tools import assert_almost_equal
from collections import OrderedDict
from plastid.genomics.roitools import SegmentChain, GenomicSegment, Transcript
from plastid.genomics.genome_hash import GenomeHash
from plastid.readers.bed import BED_Reader
from plastid.readers.bigbed import BigBedReader
warnings.simplefilter("ignore", DeprecationWarning)
#===============================================================================
# INDEX: helper functions
#===============================================================================
def tearDownModule():
"""Remove test dataset files after unit tests are complete"""
cleanup_resources()
def transcript_identical(ivc1, ivc2):
"""Test for identity between positions of two Transcripts"""
position_test = ivc1.get_position_set() == ivc2.get_position_set()
strand_test = ivc1.spanning_segment.strand == ivc2.spanning_segment.strand
chrom_test = ivc1.spanning_segment.chrom == ivc2.spanning_segment.chrom
start_test = (ivc1.cds_start is None and ivc2.cds_start is None) or\
(ivc1.cds_start == ivc2.cds_start)
end_test = (ivc1.cds_end is None and ivc2.cds_end is None) or\
(ivc1.cds_end == ivc2.cds_end)
return position_test & strand_test & chrom_test & start_test & end_test
#===============================================================================
# INDEX: test suites
#===============================================================================
@attr(test="unit")
class test_BigBedReader(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.cols = [3, 4, 5, 6, 8, 9, 12]
cls.bedfiles = {}
cls.bbfiles = {}
for col in cls.cols:
cls.bedfiles[col] = resource_filename(
"plastid", "test/data/annotations/100transcripts_bed%s.bed" % col
)
cls.bbfiles[col] = resource_filename(
"plastid", "test/data/annotations/100transcripts_bed%s.bb" % col
)
cls.chrom_sizes = {}
with open(resource_filename("plastid", "test/data/annotations/sacCer3.sizes")) as fh:
for line in fh:
chrom, size = line.strip().split("\t")
cls.chrom_sizes[chrom] = int(size)
cls.bbs = {K: BigBedReader(cls.bbfiles[K], return_type=Transcript) for K in cls.cols}
# comparisons against genome hash
cls.binsize = 10000
with open(cls.bedfiles[12]) as fh:
transcripts = list(BED_Reader(fh, return_type=Transcript))
cls.tx_dict = {}
cls.cds_dict = {}
cls.as_cds_dict = {}
for tx in transcripts:
txid = tx.get_name()
cls.tx_dict[txid] = tx
cds_ivc = tx.get_cds()
cds_ivc.attr["ID"] = txid
if cds_ivc.length > 0:
cls.cds_dict[txid] = tx.get_cds()
cls.as_cds_dict[txid] = tx.get_cds().get_antisense()
cls.as_cds_dict[txid].attr["ID"] = txid
cls.tx_hash = GenomeHash(cls.tx_dict, do_copy=False, binsize=cls.binsize)
cls.cds_hash = GenomeHash(cls.cds_dict, do_copy=False, binsize=cls.binsize)
cls.as_cds_hash = GenomeHash(cls.as_cds_dict, do_copy=False, binsize=cls.binsize)
cls.shuffled_indices = list(range(len(transcripts)))
shuffle(cls.shuffled_indices)
cls.flybbfile = resource_filename(
"plastid", "test/data/annotations/dmel-all-no-analysis-r5.54.bb"
)
cls.flybedfile = resource_filename(
"plastid", "test/data/annotations/dmel-all-no-analysis-r5.54.bed"
)
# BigBed files with and without extra columns, with and without autoSql descriptions
cls.bb_bonuscols = {
"bb4as":
resource_filename(
"plastid", "test/data/annotations/100transcripts_bed4plus_bonus_as.bb"
),
"bb12as":
resource_filename(
"plastid", "test/data/annotations/100transcripts_bed12plus_bonus_as.bb"
),
"bb4no_as":
resource_filename(
"plastid", "test/data/annotations/100transcripts_bed4plus_bonus_no_as.bb"
),
"bb12no_as":
resource_filename(
"plastid", "test/data/annotations/100transcripts_bed12plus_bonus_no_as.bb"
),
}
cls.bonus_col_file = resource_filename(
"plastid", "test/data/annotations/bonus_bed_columns.txt"
)
# BigBed file with indexes
cls.bb_indexed = resource_filename("plastid", "test/data/annotations/dmel-bonus-cols.bb")
def test_count_records(self):
for _, my_reader in self.bbs.items():
# make sure we have all records
self.assertEqual(my_reader.num_records, 100)
def test_num_chroms(self):
for _, my_reader in self.bbs.items():
self.assertEqual(my_reader.num_chroms, 17)
def test_chrom_sizes(self):
for _, my_reader in self.bbs.items():
for k, v in self.chrom_sizes.items():
self.assertEqual(my_reader.chroms[k], v)
def test_iter_same_as_bed_reader_various_columns(self):
# implicitly tests iterate_over_chunk over all bed files, too
# this tests BigBed equality with various ranges of columns
# and various custom columns
for col in self.cols:
bigbed = self.bbs[col]
with open(self.bedfiles[col]) as fh:
bed = BED_Reader(fh, return_type=Transcript)
for n, (tx1, tx2) in enumerate(zip(bed, bigbed)):
msg = "Transcript mismatch in BigBed file at record %s. Expected '%s'. Got '%s'." % (
n, tx1, tx2
)
self.assertTrue(transcript_identical(tx1, tx2), msg)
self.assertEqual(n, 100 - 1)
def test_iter_same_as_bed_reader_flydata(self):
# test more complex transcript models
# we cast them to lists, sadly, because Python's lexical chromosome sorting
# differs from unix command-line sort; so even though the records are
# in the same order in both files, they are returned with different sorts
flybb = BigBedReader(self.flybbfile, return_type=Transcript)
with open(self.flybedfile) as fh:
flybed = BED_Reader(fh, return_type=Transcript)
for n, (tx1, tx2) in enumerate(zip(flybed, flybb)):
msg = "Transcript mismatch in BigBed file at record %s. Expected '%s'. Got '%s'." % (
n, tx1, tx2
)
self.assertTrue(transcript_identical(tx1, tx2), msg)
self.assertEqual(n, 32682 - 1)
def test_getitem_stranded(self):
"""Test fetching of overlapping features, minding strand
1. Make sure each feature can fetch its own subregion from its own neighborhood
2. Make sure each feature cannot fetch its own antisense subregion
3. Make sure each features fetches exactly the same features as a GenomeHash
"""
# make sure we can fetch each transcript's own CDS
bb = self.bbs[12]
u = 0
for txid, cds in list(self.cds_dict.items())[:100]:
gh_ol_features = self.tx_hash.get_overlapping_features(cds, stranded=True)
bb_ol_features = bb[cds]
self.assertIn(
txid, (X.get_name() for X in gh_ol_features),
msg="%s failed to fetch its own CDS on correct strand" % txid
)
# make sure bb fetch matches GenomeHash fetch
self.assertSetEqual(
set([str(X) for X in gh_ol_features]), set([str(X) for X in bb_ol_features])
)
u += 1
self.assertGreater(u, 0)
# make sure we don't fetch each transcript's own antisense CDS
# on opposite strand
for txid, cds in list(self.as_cds_dict.items())[:100]:
gh_ol_features = self.tx_hash.get_overlapping_features(cds, stranded=True)
bb_ol_features = bb[cds]
self.assertNotIn(
txid, (X.get_name() for X in gh_ol_features),
msg="%s fetched its own name on wrong strand!" % txid
)
self.assertSetEqual(
set([str(X) for X in gh_ol_features]), set([str(X) for X in bb_ol_features])
)
def test_get_stranded(self):
"""Test fetching of overlapping features, minding strand
1. Make sure each feature can fetch its own subregion from its own neighborhood
2. Make sure each feature cannot fetch its own antisense subregion
3. Make sure each features fetches exactly the same features as a GenomeHash
"""
# make sure we can fetch each transcript's own CDS
bb = self.bbs[12]
u = 0
for txid, cds in list(self.cds_dict.items())[:100]:
gh_ol_features = self.tx_hash.get_overlapping_features(cds, stranded=True)
bb_ol_features = bb.get(cds, stranded=True)
self.assertIn(
txid, (X.get_name() for X in gh_ol_features),
msg="%s failed to fetch its own CDS on correct strand" % txid
)
# make sure bb fetch matches GenomeHash fetch
self.assertSetEqual(
set([str(X) for X in gh_ol_features]), set([str(X) for X in bb_ol_features])
)
u += 1
self.assertGreater(u, 0)
# make sure we don't fetch each transcript's own antisense CDS
# on opposite strand
for txid, cds in list(self.as_cds_dict.items())[:100]:
gh_ol_features = self.tx_hash.get_overlapping_features(cds, stranded=True)
bb_ol_features = bb[cds]
self.assertNotIn(
txid, (X.get_name() for X in gh_ol_features),
msg="%s fetched its own name on wrong strand!" % txid
)
self.assertSetEqual(
set([str(X) for X in gh_ol_features]), set([str(X) for X in bb_ol_features])
)
def test_get_unstranded(self):
"""Test fetching of overlapping features, disregarding strand
1. Make sure each feature can fetch its own subregion from its own neighborhood
2. Make sure each feature can fetch its own antisense subregion
3. Make sure each features fetches exactly the same features as a GenomeHash
"""
# make sure we can fetch each transcript's from its own CDS on same strand
bb = self.bbs[12]
u = 0
for txid, cds in list(self.cds_dict.items())[:100]:
gh_ol_features = self.tx_hash.get_overlapping_features(cds, stranded=False)
bb_ol_features = bb.get(cds, stranded=False)
self.assertIn(
txid, (X.get_name() for X in gh_ol_features),
msg="%s failed to fetch its own CDS on same strand" % txid
)
# make sure bb fetch matches GenomeHash fetch
self.assertSetEqual(
set([str(X) + X.get_name() for X in gh_ol_features]),
set([str(X) + X.get_name() for X in bb_ol_features])
)
u += 1
self.assertGreater(u, 0)
# make sure we can fetch each transcript's from its own antisense CDS
# on opposite strand
for txid, cds in list(self.as_cds_dict.items())[:100]:
gh_ol_features = self.tx_hash.get_overlapping_features(cds, stranded=False)
bb_ol_features = bb.get(cds, stranded=False)
self.assertIn(
txid, (X.get_name() for X in gh_ol_features),
msg="%s failed to fetched its own name on opposite strand!" % txid
)
s1 = set([str(X) + X.get_name() for X in gh_ol_features])
s2 = set([str(X) + X.get_name() for X in bb_ol_features])
self.assertSetEqual(
s1,
s2,
msg="%s failure:\n Only in first set: %s\n Only in second set: %s" %
(txid, s1 - s2, s2 - s1)
)
def test_return_type(self):
bb = self.bbs[12]
i = iter(bb)
for _ in range(5):
self.assertTrue(isinstance(next(i), Transcript))
ivcbb = BigBedReader(self.bbfiles[12], return_type=SegmentChain)
i = iter(ivcbb)
for _ in range(5):
self.assertTrue(isinstance(next(i), SegmentChain))
def test_get_autosql_str(self):
for k in (4, 12):
bbplus_as = BigBedReader(self.bb_bonuscols["bb%sas" % k])
with open(resource_filename(
"plastid", "test/data/annotations/bed%s_bonus_bed_columns.as" % k)) as fh:
expected_as = fh.read()
self.assertEqual(bbplus_as._get_autosql_str(), expected_as)
def test_get_no_autosql_str(self):
for k in (4, 12):
bbplus_noas = BigBedReader(self.bb_bonuscols["bb%sno_as" % k])
self.assertEqual(bbplus_noas._get_autosql_str(), "")
def test_custom_columns_names_with_autosql(self):
expected = OrderedDict(
[
("my_floats", "some float values"),
("my_sets", "some set options"),
("my_ints", "signed integer values"),
("my_strs", "str representation of transcripts"),
("my_colors", "r,g,b colors"),
]
)
for k in (4, 12):
fn = "bb%sas" % k
bb = BigBedReader(self.bb_bonuscols[fn])
self.assertEqual(bb.extension_fields, expected)
def test_custom_columns_names_without_autosql(self):
expected = OrderedDict(
[
("custom_0", "no description"),
("custom_1", "no description"),
("custom_2", "no description"),
("custom_3", "no description"),
("custom_4", "no description"),
]
)
for k in (4, 12):
fn = "bb%sno_as" % k
bb = BigBedReader(self.bb_bonuscols[fn])
self.assertEqual(bb.extension_fields, expected)
def test_custom_columns_retval_type_with_autosql(self):
values = {
"my_floats": [],
"my_sets": [],
"my_ints": [],
"my_strs": [],
"my_colors": [],
}
with open(self.bonus_col_file) as bfile:
for line in bfile:
items = line.strip("\n").split("\t")
values["my_floats"].append(float(items[0]))
if items[1] == "":
values["my_sets"].append(set())
else:
values["my_sets"].append(set([X.strip() for X in items[1].split(",")]))
values["my_ints"].append(int(items[2]))
values["my_strs"].append(items[3])
values["my_colors"].append(tuple([int(X) for X in items[4].split(",")]))
for k in (4, 12):
fn = "bb%sas" % k
# ignore a Warning caused by trying to turn the BED color field
# to an int- this has to deal with the fact that BedToBigBed wants
# field 9 (itemRgb, typically uint[3]) to be `reserved uint;` in
# autoSql declarations
with warnings.catch_warnings():
#warnings.simplefilter("ignore")
bb = BigBedReader(self.bb_bonuscols[fn])
for n, item in enumerate(bb):
for key in values:
expected = values[key][n]
found = item.attr[key]
msg = "failed test_custom_columns_retval_type_with_autosql at record %s, key %s. Expected '%s'. Got '%s' " % (
n, key, expected, found
)
if isinstance(expected, float):
assert_almost_equal(expected, found, msg)
else:
self.assertEqual(expected, found, msg)
def test_custom_columns_retval_type_without_autosql(self):
values = {"custom_%s" % X: copy.deepcopy([]) for X in range(5)}
with open(self.bonus_col_file) as bfile:
for line in bfile:
items = line.strip("\n").split("\t")
values["custom_0"].append(items[0])
values["custom_1"].append(items[1])
values["custom_2"].append(items[2])
values["custom_3"].append(items[3])
values["custom_4"].append(items[4])
for k in (4, 12):
fn = "bb%sno_as" % k
bb = BigBedReader(self.bb_bonuscols[fn])
for n, item in enumerate(bb):
for key in values:
self.assertEqual(values[key][n], item.attr[key])
def test_indexed_fields(self):
reader = BigBedReader(self.bb_indexed)
self.assertEqual(
sorted(["gene_id", "name", "Name", "Alias"]), sorted(reader.indexed_fields)
)
def test_indexed_fields_no_as_no_index(self):
reader = BigBedReader(self.bb_bonuscols["bb12no_as"])
self.assertEqual([], reader.indexed_fields)
def test_indexed_fields_as_no_index(self):
reader = BigBedReader(self.bb_bonuscols["bb4as"])
self.assertEqual([], reader.indexed_fields)
def test_search_fields_invalid_raises_error(self):
reader = BigBedReader(self.bb_indexed)
self.assertRaises(KeyError, reader.search, "garbage_field", "garbage_value")
def test_search_fields_singlevalue(self):
reader = BigBedReader(self.bb_indexed)
found = list(reader.search("name", "should_have_no_match"))
self.assertEqual([], found)
found = list(reader.search("Name", "Sam-S-RE"))
expected = [
SegmentChain(
GenomicSegment('2L', 106902, 107000, '+'),
GenomicSegment('2L', 107764, 107838, '+'),
GenomicSegment('2L', 108587, 108809, '+'),
GenomicSegment('2L', 110405, 110483, '+'),
GenomicSegment('2L', 110754, 110877, '+'),
GenomicSegment('2L', 111906, 112019, '+'),
GenomicSegment('2L', 112689, 113369, '+'),
GenomicSegment('2L', 113433, 114432, '+'),
Alias="'['M(2)21AB-RE', 'CG2674-RE']'",
ID='FBtr0089437',
Name='Sam-S-RE',
color='#000000',
gene_id='FBgn0005278',
score='0.0',
thickend='113542',
thickstart='108685',
type='exon'
),
]
self.assertEqual(expected, found)
found = list(reader.search("gene_id", "FBgn0005278"))
expected = [
SegmentChain(
GenomicSegment('2L', 106902, 107000, '+'),
GenomicSegment('2L', 107764, 107838, '+'),
GenomicSegment('2L', 108587, 108809, '+'),
GenomicSegment('2L', 110405, 110483, '+'),
GenomicSegment('2L', 110754, 110877, '+'),
GenomicSegment('2L', 111906, 112019, '+'),
GenomicSegment('2L', 112689, 113369, '+'),
GenomicSegment('2L', 113433, 114432, '+'),
Alias="'['M(2)21AB-RE', 'CG2674-RE']'",
ID='FBtr0089437',
Name='Sam-S-RE',
color='#000000',
gene_id='FBgn0005278',
score='0.0',
thickend='113542',
thickstart='108685',
type='exon'
),
SegmentChain(
GenomicSegment('2L', 107760, 107838, '+'),
GenomicSegment('2L', 108587, 108809, '+'),
GenomicSegment('2L', 110405, 110483, '+'),
GenomicSegment('2L', 110754, 111337, '+'),
Alias='na',
ID='FBtr0308091',
Name='Sam-S-RK',
color='#000000',
gene_id='FBgn0005278',
score='0.0',
thickend='110900',
thickstart='108685',
type='exon'
),
SegmentChain(
GenomicSegment('2L', 107760, 107838, '+'),
GenomicSegment('2L', 108587, 108809, '+'),
GenomicSegment('2L', 110405, 110483, '+'),
GenomicSegment('2L', 110754, 110877, '+'),
GenomicSegment('2L', 111004, 111117, '+'),
GenomicSegment('2L', 111906, 112019, '+'),
GenomicSegment('2L', 112689, 113369, '+'),
GenomicSegment('2L', 113433, 114210, '+'),
Alias="'['M(2)21AB-RB', 'CG2674-RB']'",
ID='FBtr0089428',
Name='Sam-S-RB',
color='#000000',
gene_id='FBgn0005278',
score='0.0',
thickend='112741',
thickstart='108685',
type='exon'
),
SegmentChain(
GenomicSegment('2L', 107760, 107838, '+'),
GenomicSegment('2L', 108587, 108809, '+'),
GenomicSegment('2L', 110405, 110483, '+'),
GenomicSegment('2L', 110754, 110877, '+'),
GenomicSegment('2L', 111906, 112019, '+'),
GenomicSegment('2L', 112689, 113369, '+'),
GenomicSegment('2L', 113433, 114432, '+'),
Alias="'['M(2)21AB-RA', 'CG2674-RA']'",
ID='FBtr0089429',
Name='Sam-S-RA',
color='#000000',
gene_id='FBgn0005278',
score='0.0',
thickend='113542',
thickstart='108685',
type='exon'
),
SegmentChain(
GenomicSegment('2L', 107760, 107956, '+'),
GenomicSegment('2L', 108587, 108809, '+'),
GenomicSegment('2L', 110405, 110483, '+'),
GenomicSegment('2L', 110754, 110877, '+'),
GenomicSegment('2L', 112689, 113369, '+'),
GenomicSegment('2L', 113433, 114432, '+'),
Alias='na',
ID='FBtr0330656',
Name='Sam-S-RL',
color='#000000',
gene_id='FBgn0005278',
score='0.0',
thickend='112781',
thickstart='108685',
type='exon'
),
SegmentChain(
GenomicSegment('2L', 107936, 108226, '+'),
GenomicSegment('2L', 108587, 108809, '+'),
GenomicSegment('2L', 110405, 110483, '+'),
GenomicSegment('2L', 110754, 110877, '+'),
GenomicSegment('2L', 111906, 112019, '+'),
GenomicSegment('2L', 112689, 113369, '+'),
GenomicSegment('2L', 113433, 114210, '+'),
Alias="'['M(2)21AB-RH', 'CG2674-RH']'",
ID='FBtr0089432',
Name='Sam-S-RH',
color='#000000',
gene_id='FBgn0005278',
score='0.0',
thickend='113542',
thickstart='108685',
type='exon'
),
SegmentChain(
GenomicSegment('2L', 107936, 108101, '+'),
GenomicSegment('2L', 108587, 108809, '+'),
GenomicSegment('2L', 110405, 110483, '+'),
GenomicSegment('2L', 110754, 110877, '+'),
GenomicSegment('2L', 111906, 112019, '+'),
GenomicSegment('2L', 112689, 113369, '+'),
GenomicSegment('2L', 113433, 114432, '+'),
Alias="'['M(2)21AB-RD', 'CG2674-RD']'",
ID='FBtr0089430',
Name='Sam-S-RD',
color='#000000',
gene_id='FBgn0005278',
score='0.0',
thickend='113542',
thickstart='108685',
type='exon'
),
SegmentChain(
GenomicSegment('2L', 107936, 108101, '+'),
GenomicSegment('2L', 108587, 108809, '+'),
GenomicSegment('2L', 110405, 110483, '+'),
GenomicSegment('2L', 110754, 110877, '+'),
GenomicSegment('2L', 111004, 111117, '+'),
GenomicSegment('2L', 112689, 113369, '+'),
GenomicSegment('2L', 113433, 114432, '+'),
Alias="'['M(2)21AB-RC', 'CG2674-RC']'",
ID='FBtr0089431',
Name='Sam-S-RC',
color='#000000',
gene_id='FBgn0005278',
score='0.0',
thickend='113542',
thickstart='108685',
type='exon'
),
SegmentChain(
GenomicSegment('2L', 108088, 108226, '+'),
GenomicSegment('2L', 108587, 108809, '+'),
GenomicSegment('2L', 110405, 110483, '+'),
GenomicSegment('2L', 110754, 110877, '+'),
GenomicSegment('2L', 111906, 112019, '+'),
GenomicSegment('2L', 112689, 113369, '+'),
GenomicSegment('2L', 113433, 114432, '+'),
Alias="'['M(2)21AB-RF', 'CG2674-RF']'",
ID='FBtr0089433',
Name='Sam-S-RF',
color='#000000',
gene_id='FBgn0005278',
score='0.0',
thickend='113542',
thickstart='108685',
type='exon'
),
SegmentChain(
GenomicSegment('2L', 108132, 108346, '+'),
GenomicSegment('2L', 108587, 108809, '+'),
GenomicSegment('2L', 110405, 110483, '+'),
GenomicSegment('2L', 110754, 110877, '+'),
GenomicSegment('2L', 111906, 112019, '+'),
GenomicSegment('2L', 112689, 113369, '+'),
GenomicSegment('2L', 113433, 114432, '+'),
Alias="'['M(2)21AB-RI', 'CG2674-RI']'",
ID='FBtr0089434',
Name='Sam-S-RI',
color='#000000',
gene_id='FBgn0005278',
score='0.0',
thickend='113542',
thickstart='108685',
type='exon'
),
SegmentChain(
GenomicSegment('2L', 108132, 108226, '+'),
GenomicSegment('2L', 108587, 108809, '+'),
GenomicSegment('2L', 110405, 110483, '+'),
GenomicSegment('2L', 110754, 110877, '+'),
GenomicSegment('2L', 111004, 111117, '+'),
GenomicSegment('2L', 112689, 113369, '+'),
GenomicSegment('2L', 113433, 114432, '+'),
Alias="'['M(2)21AB-RJ', 'CG2674-RJ']'",
ID='FBtr0089435',
Name='Sam-S-RJ',
color='#000000',
gene_id='FBgn0005278',
score='0.0',
thickend='113542',
thickstart='108685',
type='exon'
),
SegmentChain(
GenomicSegment('2L', 109593, 109793, '+'),
GenomicSegment('2L', 110405, 110483, '+'),
GenomicSegment('2L', 110754, 110877, '+'),
GenomicSegment('2L', 111004, 111117, '+'),
GenomicSegment('2L', 112689, 113369, '+'),
GenomicSegment('2L', 113433, 114210, '+'),
Alias="'['M(2)21AB-RG', 'CG2674-RG']'",
ID='FBtr0089436',
Name='Sam-S-RG',
color='#000000',
gene_id='FBgn0005278',
score='0.0',
thickend='113542',
thickstart='109750',
type='exon'
),
]
self.assertEqual(sorted(expected), sorted(found))
def test_search_fields_multivalue(self):
reader = BigBedReader(self.bb_indexed)
found = list(reader.search("name", "should_have_no_match", "should_also_have_no_match"))
self.assertEqual([], found)
found = list(reader.search("Name", "Sam-S-RE", "Sam-S-RK"))
expected = [
SegmentChain(
GenomicSegment('2L', 106902, 107000, '+'),
GenomicSegment('2L', 107764, 107838, '+'),
GenomicSegment('2L', 108587, 108809, '+'),
GenomicSegment('2L', 110405, 110483, '+'),
GenomicSegment('2L', 110754, 110877, '+'),
GenomicSegment('2L', 111906, 112019, '+'),
GenomicSegment('2L', 112689, 113369, '+'),
GenomicSegment('2L', 113433, 114432, '+'),
Alias="'['M(2)21AB-RE', 'CG2674-RE']'",
ID='FBtr0089437',
Name='Sam-S-RE',
color='#000000',
gene_id='FBgn0005278',
score='0.0',
thickend='113542',
thickstart='108685',
type='exon'
),
SegmentChain(
GenomicSegment('2L', 107760, 107838, '+'),
GenomicSegment('2L', 108587, 108809, '+'),
GenomicSegment('2L', 110405, 110483, '+'),
GenomicSegment('2L', 110754, 111337, '+'),
Alias='na',
ID='FBtr0308091',
Name='Sam-S-RK',
color='#000000',
gene_id='FBgn0005278',
score='0.0',
thickend='110900',
thickstart='108685',
type='exon'
),
]
self.assertEqual(expected, found)
```
#### File: unit/readers/test_bigwig.py
```python
import os
import numpy
from nose.tools import assert_less_equal, assert_raises, assert_dict_equal,\
assert_true, assert_equal, assert_almost_equal
from pkg_resources import resource_filename
from plastid.readers.bigwig import BigWigReader
from plastid.readers.wiggle import WiggleReader
from plastid.genomics.roitools import GenomicSegment
from plastid.genomics.genome_array import GenomeArray
from plastid.util.services.decorators import skip_if_abstract
TOL = 1e-5 #tolerance is high because bigwig files are approximate
base_path = resource_filename("plastid", "test/data")
wigfile = os.path.join(base_path, "command_line", "gen_reads_center_12_fw.wig")
bigwigfile = os.path.join(base_path, "command_line", "gen_reads_center_12_fw.bw")
class WildCard(float):
def __eq__(self, other):
return True
def __neq__(self, other):
return False
def __repr__(self):
return 'any float'
def __str__(self):
return 'any float'
wildcard = WildCard()
class AbstractTestBBIFile():
@classmethod
def setUpClass(cls):
cls.bw = BigWigReader(bigwigfile, fill=0)
@skip_if_abstract
def test_chrom_sizes(self):
found = self.bw.chroms
expected = {}
with open(os.path.join(base_path, "annotations", "sacCer3.sizes")) as fh:
for line in fh:
k, v = line.strip().split("\t")
expected[k] = int(v)
# these two happen not to be in the dataset
expected.pop("chrVI")
expected.pop("chrmt")
assert_dict_equal(expected, found)
@skip_if_abstract
def test_no_crash_if_file_not_exist(self):
with assert_raises(IOError) as _:
_ = self.reader_class("non_existant_file")
@skip_if_abstract
def test_sum(self):
assert False
class TestBigWigReader(AbstractTestBBIFile):
@classmethod
def setUpClass(cls):
cls.bw = BigWigReader(bigwigfile, fill=0)
cls.reader_class = BigWigReader
cls.chrdict = {
'chrI': 230218,
'chrII': 813184,
'chrIII': 316620,
'chrIV': 1531933,
'chrV': 576874,
'chrVII': 1090940,
'chrVIII': 562643,
'chrIX': 439888,
'chrX': 745751,
'chrXI': 666816,
'chrXII': 1078177,
'chrXIII': 924431,
'chrXIV': 784333,
'chrXV': 1091291,
'chrXVI': 948066
}
def check_vals_against_wig(self, expected, found):
diff = abs(expected - found)
maxdiff = diff.max()
maxloc = diff.argmax()
msg = "Maximum difference found between BigWig and Wiggle (%s) is at position %s and exceeded tolerance (%s).\n" % (
maxdiff, maxloc, TOL
)
msg += "At that position, expected %s, got %s." % (expected[maxloc], found[maxloc])
assert_less_equal(maxdiff, TOL, msg)
# NOTE: this test relies on WiggleReader being correct
def test_vals_against_wig(self):
ga = GenomeArray()
with open(wigfile) as fin:
ga.add_from_wiggle(fin, "+")
for chrom, length in self.chrdict.items():
seg = GenomicSegment(chrom, 0, length, "+")
expected = ga[seg]
found = self.bw[seg]
yield self.check_vals_against_wig, expected, found
# NOTE: this test relies on WiggleReader being correct
def check_random_windows_against_wig(self, strand):
chrdict = self.chrdict
chroms = list(self.chrdict)
chridx = numpy.random.randint(0, high=len(chroms), size=50)
ga = GenomeArray()
i = 0
with open(wigfile) as fin:
ga.add_from_wiggle(fin, strand)
while i < 50:
chrom = chroms[chridx[i]]
maxlength = chrdict[chrom]
start = numpy.random.randint(0, high=maxlength - 2000)
end = numpy.random.randint(start + 10000, high=start + 20000)
# make sure we don't go off chrom
while end > maxlength:
end = numpy.random.randint(start + 100, high=start + 10000)
seg = GenomicSegment(chrom, start, end, strand)
expected = ga[seg]
# make sure segment has counts in it
if expected.sum() > 0:
i += 1
found = self.bw[seg]
yield self.check_vals_against_wig, expected, found
def test_random_windows_against_wig_fw(self):
self.check_random_windows_against_wig("+")
def test_random_windows_against_wig_rc(self):
self.check_random_windows_against_wig("-")
def test_get_chromosome_counts_zero_fill(self):
ga = GenomeArray()
with open(wigfile) as fin:
ga.add_from_wiggle(fin, "+")
for chrom, length in self.chrdict.items():
seg = GenomicSegment(chrom, 0, length, "+")
expected = ga[seg]
found = self.bw.get_chromosome_counts(chrom)
yield self.check_vals_against_wig, expected, found
def test_fill_val_absent_chrom(self):
filldef = BigWigReader(bigwigfile)
fillnan = BigWigReader(bigwigfile, fill=numpy.nan)
fill0 = BigWigReader(bigwigfile, fill=0)
fill10 = BigWigReader(bigwigfile, fill=10)
# chrVI is not in dataset; this should be an empty array
seg = GenomicSegment("chrVI", 5, 1000, "+")
assert_equal(len(filldef[seg]), len(seg), "fetched wrong size")
# assert_true(numpy.isnan(filldef[seg]).all(),
# "default not nan")
#
# assert_true(numpy.isnan(fillnan[seg]).all(),
# "nanfill didn't work")
assert_true((fill0[seg] == 0).all(), "0-fill didn't work")
# assert_true((fill10[seg] == 10).all(),
# "10-fill didn't work")
def test_fill_val_present_chrom(self):
filldef = BigWigReader(bigwigfile)
fillnan = BigWigReader(bigwigfile, fill=numpy.nan)
fill0 = BigWigReader(bigwigfile, fill=0)
fill10 = BigWigReader(bigwigfile, fill=10)
# empty region
seg = GenomicSegment("chrIV", 5, 10, "+")
assert_equal(len(filldef[seg]), len(seg), "fetched wrong size")
# assert_true(numpy.isnan(filldef[seg]).all(),
# "default not nan")
#
# assert_true(numpy.isnan(fillnan[seg]).all(),
# "nanfill didn't work")
assert_true((fill0[seg] == 0).all(), "0-fill didn't work")
# assert_true((fill10[seg] == 10).all(),
# "10-fill didn't work")
def test_sum(self):
bw = BigWigReader(os.path.join(base_path, "mini", "wig", "bw_fiveprime_15_fw.bw"))
assert_equal(bw.sum(), 4000)
def test_iter(self):
with open(wigfile) as fh:
wig = WiggleReader(fh)
bw = BigWigReader(bigwigfile)
for found in bw:
expected = next(wig)
fchrom = found[0]
echrom = expected[0]
assert_equal(
fchrom, echrom,
"Chromosome mismatch. Expected '%s', found '%s'." % (fchrom, echrom)
)
fstart = found[1]
estart = expected[1]
assert_equal(
fstart, estart, "Start mismatch. Expected '%s', found '%s'." % (fstart, estart)
)
fend = found[2]
eend = expected[2]
assert_equal(fend, eend, "End mismatch. Expected '%s', found '%s'." % (fend, eend))
fval = found[3]
eval_ = expected[3]
diff = abs(fval - eval_)
assert_true(
diff < TOL, "Difference %s exceeds tolerance '%s'. Expected '%s', found '%s'." %
(diff, TOL, fval, eval_)
)
# Disabled until we decide what to do with summarize()
# def test_summarize(self):
# bw = BigWigReader(bigwigfile)
#
# #ga = GenomeArray(bw.chroms)
# #ga.add_from_wiggle(open(wigfile),"+")
#
# chrom = "chrI"
# maxlen = bw.chroms[chrom]
# winstarts = numpy.random.randint(0,maxlen-20000,size=10)
# winends = winstarts + numpy.random.randint(500,40000,size=10)
# winends[winends > maxlen] = maxlen
#
# numtests = 10
# i = 0
# while i < numtests:
# s = numpy.random.randint(0,high=maxlen)
# e = min(maxlen,numpy.random.randint(s+500,s+40000))
# seg = GenomicSegment(chrom,s,e,"+")
# #arr = ga[seg]
# arr = bw[seg]
#
# labels = ["mean","max","min","cov","stdev"]
# expected = [arr.mean(),arr.max(),arr.min(),wildcard,arr.std()]
#
# # change nans to 0
# expected = [0 if numpy.isnan(X) else X for X in expected]
#
# print(expected)
# found = bw.summarize(seg)
# print(found)
# print("---------------")
# for label, exval, fval in zip(labels,expected,found):
# msg = "test_summarize failed for stat '%s'. Expected %s, got %s (diff: %s)." % (label,exval,fval,abs(exval-fval))
# assert_almost_equal(exval,fval,msg=msg,delta=5)
#
# i += 1
#
# # retval for summarize: (mean,max_,min_,cov,stdev)
```
#### File: util/io/test_filters.py
```python
import unittest
from nose.plugins.attrib import attr
from plastid.util.services.mini2to3 import cStringIO
from plastid.util.io.filters import SkipBlankReader, CommentReader, FunctionReader
#===============================================================================
# INDEX: test cases
#===============================================================================
@attr(test="unit")
class TestSkipBlankReader(unittest.TestCase):
"""TestCase for :py:class:`SkipBlankReader`"""
def get_stream(self):
return SkipBlankReader(cStringIO.StringIO(_NL_TEXT))
def test_read(self):
fin = self.get_stream()
lines = fin.read().strip().split("\n")
self.assertEqual(len(lines), 30)
for line in lines:
self.assertNotEqual(line.strip(), "")
fin.close()
def test_readlines(self):
lines = self.get_stream().readlines()
self.assertEqual(len(lines), 30)
for line in lines:
self.assertNotEqual(line.strip(), "")
def test_iter(self):
for n, line in enumerate(self.get_stream()):
self.assertNotEqual(line.strip(), "")
self.assertEqual(n, 29)
@attr(test="unit")
class TestCommentReader(TestSkipBlankReader):
"""TestCase for :py:class:`CommentReader`"""
def get_stream(self):
return CommentReader(cStringIO.StringIO(_COMMENT_TEXT))
def test_get_comments(self):
reader = self.get_stream()
comments = reader.get_comments()
# comments should not be populated until we have read
self.assertEquals(len(comments), 0)
reader.read()
# comments should be found afterwards
self.assertEquals(len(comments), 5)
for comment in comments:
self.assertTrue(comment.startswith("#"))
@attr(test="unit")
class TestFunctionReader(unittest.TestCase):
def test_via_backwards(self):
reader = FunctionReader(cStringIO.StringIO(_NL_TEXT), lambda x: x[::-1])
for line1, line2 in zip(reader, cStringIO.StringIO(_NL_TEXT)):
self.assertEqual(line1, line2[::-1])
def test_via_upper(self):
reader = FunctionReader(cStringIO.StringIO(_NL_TEXT), str.upper)
for line1, line2 in zip(reader, cStringIO.StringIO(_NL_TEXT)):
self.assertEqual(line1, line2.upper())
#===============================================================================
# INDEX: test data
#===============================================================================
_NL_TEXT = """1 Lorem ipsum dolor sit amet, consectetur adipiscing elit. Morbi vitae ipsum vel
2 nisi dapibus dapibus in vel neque. Proin nec consectetur arcu. Praesent
3 convallis venenatis metus quis elementum. Ut elementum justo ac faucibus
4 efficitur. Nam at ornare elit. Sed pulvinar mi sapien, sed faucibus risus
5 cursus at. Nulla sit amet posuere ex, sit amet convallis ante. Integer
6 imperdiet purus nec ante pretium fringilla. Vivamus a ligula tristique,
7 sodales elit et, fringilla ligula. Nulla facilisi. Nunc sed enim non ligula
8 commodo blandit. Sed ultricies quis urna vel imperdiet. Curabitur leo nisi,
9 faucibus sed mauris ut, bibendum pretium nibh.
10 Cras libero quam, scelerisque ut suscipit ac, pellentesque non sapien. Vivamus
11 eu tristique nisi. Sed luctus molestie mollis. Praesent dapibus tincidunt
12 pretium. Sed faucibus vestibulum est, ac mollis libero dapibus in. Phasellus
13 nec euismod sapien. Donec faucibus orci sem, vitae hendrerit orci sodales non.
14 Sed facilisis erat at erat semper facilisis. Proin at orci et ligula mattis
15 condimentum. Aenean tincidunt, nunc sit amet malesuada scelerisque, nisl augue
16 imperdiet lectus, et sollicitudin quam quam nec massa. Morbi volutpat nulla et
17 erat porta tempus.
18 Nulla placerat ipsum elit, at vestibulum urna pellentesque a. Nunc bibendum
19 convallis orci, vel euismod tellus commodo ullamcorper. Vestibulum vestibulum
20 lobortis tempor. Maecenas non nisi aliquet lorem tincidunt varius sed aliquam
21 est. Vivamus egestas, nisi sed laoreet interdum, nisi tellus egestas erat, sit
22 amet ultricies metus orci sed orci. Aenean at lectus viverra, venenatis mauris
23 sed, ultricies augue. Sed ac purus vitae dui tempus pulvinar. Duis tincidunt
24 nisi sit amet purus laoreet placerat. Quisque nulla orci, vestibulum nec metus
25 eu, convallis consequat libero. Maecenas sed ultrices tellus, non commodo
26 lectus. Suspendisse non diam eget purus imperdiet aliquam a sed libero.
27 Quisque eu venenatis tellus. Aliquam ornare purus non ante imperdiet interdum.
28 Cras pretium, diam ac sagittis luctus, diam metus scelerisque libero, sit amet
29 cursus erat nulla ut augue. Ut est tortor, faucibus non nisi a, gravida
30 fermentum tortor.
"""
_COMMENT_TEXT = """1 Lorem ipsum dolor sit amet, consectetur adipiscing elit. Morbi vitae ipsum vel
2 nisi dapibus dapibus in vel neque. Proin nec consectetur arcu. Praesent
3 convallis venenatis metus quis elementum. Ut elementum justo ac faucibus
4 efficitur. Nam at ornare elit. Sed pulvinar mi sapien, sed faucibus risus
5 cursus at. Nulla sit amet posuere ex, sit amet convallis ante. Integer
6 imperdiet purus nec ante pretium fringilla. Vivamus a ligula tristique,
7 sodales elit et, fringilla ligula. Nulla facilisi. Nunc sed enim non ligula
8 commodo blandit. Sed ultricies quis urna vel imperdiet. Curabitur leo nisi,
9 faucibus sed mauris ut, bibendum pretium nibh.
# Here is comment #1
10 Cras libero quam, scelerisque ut suscipit ac, pellentesque non sapien. Vivamus
11 eu tristique nisi. Sed luctus molestie mollis. Praesent dapibus tincidunt
12 pretium. Sed faucibus vestibulum est, ac mollis libero dapibus in. Phasellus
13 nec euismod sapien. Donec faucibus orci sem, vitae hendrerit orci sodales non.
14 Sed facilisis erat at erat semper facilisis. Proin at orci et ligula mattis
15 condimentum. Aenean tincidunt, nunc sit amet malesuada scelerisque, nisl augue
16 imperdiet lectus, et sollicitudin quam quam nec massa. Morbi volutpat nulla et
17 erat porta tempus.
# This is comment #2
18 Nulla placerat ipsum elit, at vestibulum urna pellentesque a. Nunc bibendum
19 convallis orci, vel euismod tellus commodo ullamcorper. Vestibulum vestibulum
20 lobortis tempor. Maecenas non nisi aliquet lorem tincidunt varius sed aliquam
21 est. Vivamus egestas, nisi sed laoreet interdum, nisi tellus egestas erat, sit
22 amet ultricies metus orci sed orci. Aenean at lectus viverra, venenatis mauris
23 sed, ultricies augue. Sed ac purus vitae dui tempus pulvinar. Duis tincidunt
# This is comment #3
24 nisi sit amet purus laoreet placerat. Quisque nulla orci, vestibulum nec metus
25 eu, convallis consequat libero. Maecenas sed ultrices tellus, non commodo # this is an end-of-line comment, which should not be removed
26 lectus. Suspendisse non diam eget purus imperdiet aliquam a sed libero.
# This is comment #4
27 Quisque eu venenatis tellus. Aliquam ornare purus non ante imperdiet interdum.
28 Cras pretium, diam ac sagittis luctus, diam metus scelerisque libero, sit amet
29 cursus erat nulla ut augue. Ut est tortor, faucibus non nisi a, gravida
30 fermentum tortor.
# This is comment #5"""
```
#### File: util/services/test_decorators.py
```python
import sys
import os
import fcntl
import types
import warnings
from nose.plugins.attrib import attr
from nose.tools import assert_equal, assert_true, assert_greater_equal, assert_raises, assert_set_equal, assert_not_equal
from plastid.util.services.decorators import (
notimplemented,
deprecated,
parallelize,
in_separate_process,
catch_stderr,
catch_stdout,
catch_warnings,
)
#===============================================================================
# INDEX: functions and classes that will be decorated
#===============================================================================
def stderr_func(msg):
sys.stderr.write(msg)
return msg
def stdout_func(msg):
sys.stdout.write(msg)
return msg
def util_func(x):
"""Square numbers and return process in which function was run
Parameters
----------
x : int or float
Returns
-------
int or float
Squared value of ``x``
int
Process ID in which function was run
"""
return x**2, os.getpid()
def func_that_warns():
warnings.warn("Some warning", UserWarning)
def get_pipes():
readfd, writefd = os.pipe()
fcntl.fcntl(readfd, fcntl.F_SETFL, os.O_NONBLOCK)
fcntl.fcntl(writefd, fcntl.F_SETFL, os.O_NONBLOCK)
return os.fdopen(readfd, "r"), os.fdopen(writefd, "w")
class UtilClass(object):
def __init__(self, tmp):
self.name = tmp
def get_name(self):
return self.name
#===============================================================================
# INDEX: unit tests
#===============================================================================
# stdout/err redirection -------------------------------------------------------
STDERR_FD = sys.stderr.fileno()
if sys.version_info[0] == 2:
EMPTY_BUFFER_ERROR = IOError
else:
# Python 3.x codecs returns None from empty non-blocking buffers
# which causes a TypeError to be raised instead of
# the IOError raised in Python 2.x
EMPTY_BUFFER_ERROR = TypeError
@attr(test="unit")
def test_catch_stderr_doesnt_print_without_buffer():
# spy on `inner` by making sure there is nothing written to stderr
outer_reader, outer_writer = get_pipes()
message = "this is a test"
sys.stderr = sys.__stderr__
@catch_stderr(outer_writer)
def inner():
# make sure value is returned from wrapped function
wrapped = catch_stderr()(stderr_func)
msg = wrapped(message)
assert_equal(msg, message)
inner()
outer_writer.flush()
outer_writer.close()
# make sure no message made it out of `inner`
# with nothing to read in pipe, outer_reader should raise IOError
assert_raises(EMPTY_BUFFER_ERROR, outer_reader.read)
outer_reader.close()
@attr(test="unit")
def test_catch_stderr_doesnt_print_with_buffer_but_catches_in_buffer():
# spy on `inner` by making sure there is nothing written to stderr
# but make sure message is found in inner readre
outer_reader, outer_writer = get_pipes()
message = "this is a test"
sys.stderr = sys.__stderr__
@catch_stderr(outer_writer)
def inner():
inner_reader, inner_writer = get_pipes()
wrapped = catch_stderr(inner_writer)(stderr_func)
# make sure value is returned from wrapped function
msg = wrapped(message)
assert_equal(message, msg)
# make sure we caught entire message from stderr
inner_writer.flush()
inner_writer.close()
assert_equal(message, inner_reader.read())
inner_reader.close()
inner()
outer_writer.flush()
outer_writer.close()
# make sure no message made it out of `inner`
# with nothing to read in pipe, outer_reader should raise IOError
assert_raises(EMPTY_BUFFER_ERROR, outer_reader.read)
outer_reader.close()
# catch warnings ------------------------------------------------------------
@attr(test="unit")
def test_catch_warnings_catches_warnings():
ign_func = catch_warnings("ignore")(func_that_warns)
num = 5
with warnings.catch_warnings(record=True) as warns:
warnings.simplefilter("always")
for x in range(num):
func_that_warns()
# make sure warning is issued with vanilla function
assert_equal(len(warns), num)
num = 5
with warnings.catch_warnings(record=True) as warns:
warnings.simplefilter("always")
for x in range(num):
ign_func()
# make sure warning is caught with wrapped function
assert_equal(len(warns), 0)
# notimplemented ------------------------------------------------------------
@attr(test="unit")
def test_notimplemented_raises_exception():
my_func = notimplemented(util_func)
assert_true(isinstance(my_func, types.FunctionType))
assert_raises(NotImplementedError, my_func, 5)
# deprecated ----------------------------------------------------------------
@attr(test="unit")
def test_deprecated_function_raises_warning_only_once():
num = 5
my_func = deprecated(util_func)
assert_true(isinstance(my_func, types.FunctionType))
with warnings.catch_warnings(record=True) as warns:
warnings.simplefilter("always")
for x in range(num):
assert_equal(my_func(x), util_func(x))
my_func(x)
# make sure warning is issued only once
assert_equal(len(warns), 1)
@attr(test="unit")
def test_deprecated_class_raises_warning():
reg_obj = UtilClass("my_object")
dep_class = deprecated(UtilClass)
with warnings.catch_warnings(record=True) as warns:
warnings.simplefilter("always")
dep_obj = dep_class("my_object")
assert_true(isinstance(dep_obj, UtilClass))
# make sure warning is issued
assert_equal(len(warns), 1)
# make sure wrapped class behaves as it should
assert_equal(reg_obj.get_name(), dep_obj.get_name())
# parallelize or in other processes --------------------------------------------
@attr(test="unit")
def test_parallelize_spawns_processes_and_gets_correct_ansswer():
x = range(500)
my_func = parallelize(util_func)
outer_vals, outer_pids = zip(*[util_func(X) for X in x])
inner_vals, inner_pids = zip(*my_func(x))
assert_set_equal(set(outer_vals), set(inner_vals))
assert_not_equal(set(outer_pids), set(inner_pids))
@attr(test="unit")
def test_in_separate_process_spawns_process_and_gets_correct_ansswers():
my_func = in_separate_process(util_func)
assert_true(isinstance(my_func, types.FunctionType))
for x in range(100):
res_out, outer_pid = my_func(x)
res_in, inner_pid = util_func(x)
assert_equal(res_out, res_in)
assert_not_equal(outer_pid, inner_pid)
```
#### File: util/services/test_misc.py
```python
import numpy
import unittest
from nose.plugins.attrib import attr
from plastid.util.services.misc import guess_formatter, number
@attr(test="unit")
class TestMisc(unittest.TestCase):
def setUp(self):
self.tests = [
("nan", numpy.nan),
("Nan", numpy.nan),
("NaN", numpy.nan),
("None", numpy.nan),
("none", numpy.nan),
("Inf", numpy.inf),
("inf", numpy.inf),
("-Inf", -numpy.inf),
("-inf", -numpy.inf),
("5", 5),
("5.0", 5),
("-5", -5),
("-5.0", -5),
("0", 0),
("5.1", 5.1),
("-5.1", -5.1),
("a5", "a5"),
("5a", "5a"),
("-5.1a", "-5.1a"),
("some_string", "some_string"),
("True", True),
("False", False),
("true", True),
("false", False),
("True5", "True5"),
("5True", "5True"),
("1e10", 1e10),
("1e-10", 1e-10),
("-1e10", -1e10),
("-1e-10", -1e-10),
]
def test_guess_formatter(self):
"""Test parsing of strings to floats, ints, nans, infs, bools, and strings"""
for source, dest in self.tests:
if isinstance(dest, float):
if not numpy.isnan(dest):
self.assertEquals(guess_formatter(source), dest)
else:
self.assertTrue(numpy.isnan(guess_formatter(source)))
else: #if isinstance(dest,int):
self.assertEquals(guess_formatter(source), dest)
def test_number(self):
"""Test numerical parsing of strings to floats, ints, nans, and infs"""
for source, dest in self.tests:
if isinstance(dest, bool):
pass
elif isinstance(dest, float):
if not numpy.isnan(dest):
self.assertEquals(number(source), dest)
else:
self.assertTrue(numpy.isnan(number(source)))
elif isinstance(dest, int):
self.assertEquals(number(source), dest)
else:
self.assertRaises(ValueError, number, source)
```
#### File: util/services/misc.py
```python
import numpy
def guess_formatter(inp):
"""Guesses the format of input, trying `bool`, `int`, `float`, then `str`.
Correctly parses `nan`s and `Inf`s. Converts `None` to `nan`
Parameters
----------
inp : str
input
Returns
-------
boolean, number, or string
"""
if inp.lower() == "true":
return True
elif inp.lower() == "false":
return False
else:
try:
return number(inp)
except ValueError:
return str(inp)
def number(inp):
"""Parses numbers from strings, preferring int over float.
Parses `nan`, `Nan`, `None`, `none`, `inf`, and `-inf`
Parameters
----------
inp : str
string input
Returns
-------
float, numpy.nan, numpy.inf, or -numpy.inf, or str if no conversion found
Raises
------
ValueError
if `inp` cannot be converted to a number
"""
if inp in ("nan", "NaN", "na", "None", "none"):
return numpy.nan
elif inp in ("inf", "Inf"):
return numpy.inf
elif inp in ("-inf", "-Inf"):
return -numpy.inf
else:
try:
# note: in python bools are also ints!
# isinstance(True,int) == True
val = int(inp)
except ValueError:
val = float(inp)
return val
``` |
{
"source": "JoshuaGud777/AppleQuest",
"score": 3
} |
#### File: AppleQuest/webapp/game.py
```python
if __name__ == '__main__':
import library as lib
else:
import webapp.library as lib
import cgitb
cgitb.enable()
def html_print(value='', print_data='', newcookie=''):
'''Prints the HTML to the client with varibals
$$ printdata $$
$$ command $$
$$ newcookie $$
$$ oldcookie $$'''
html = lib.get_html(lib.HTML_DIR + 'game.html')
if True:
html = html.replace('$$oldcookie$$$', cookie_read())
else:
html = html.replace('$$oldcookie$$$', '')
if newcookie != '':
html = html.replace('$$newcookie$$$', str(newcookie))
else:
html = html.replace('$$newcookie$$$', '')
if print_data != '':
html = html.replace('$$printdata$$', print_data)
else:
html = html.replace('$$printdata$$', '')
if value != '':
html = html.replace('$$command$$', value)
else:
html = html.replace('$$command$$', '')
if True is not False:
html = html.replace('$$printcommand$$', '')
else:
print('This is not the case you are looking for!')
print('P.S. The world is about to end!!!')
print(html)
def cookie_read():
'''Reads the cookies sent in the request headers and prints the back
to the client'''
cookie = lib.get_cookies()
if cookie is None:
return 'No saved Cookies'
else:
return str(cookie)
def main():
'''main'''
lib.open_conn(lib.DB_DIR + 'AppleQuest.db')
print_data = ''
newcookie = ''
cookie_read()
form = lib.get_cgi_data()
command = form.getfirst("command")
renew = form.getfirst("newid")
if command is None:
command = ''
elif type(command) != str:
command = str(command)
if renew == 'true':
cookies = lib.get_cookies()
sessioninfo = lib.renew_session_id(cookies['id'].value,
cookies['username'].value)
if type(sessioninfo) == str or sessioninfo is False:
print_data += 'Could not renew\n'
else:
newcookie = lib.cookie_wright(sessioninfo[0], sessioninfo[1],
sessioninfo[2],)
if command == '301':
print_data += '103\n'
elif command == '302':
print_data += '203\n'
elif command == '303':
print_data += '303\n'
else:
print_data += '003\n'
lib.print_header(newcookie)
html_print(command, print_data, newcookie)
lib.save_close_conn()
if __name__ == '__main__':
main()
```
#### File: AppleQuest/webapp/library.py
```python
import binascii
import cgi
import hashlib
import http.cookies
import os
import sqlite3
import time
# Global Variables
HTML_DIR = 'html\\'
REDIRECT_DIR = 'redirect\\'
DB_DIR = 'db17b1a5c2b2f6d370af2c59c885d5db\\'
# COOKIE_MAX_AGE = 300
# COOKIE_DOMAIN = 'applequest.fallenofftheedge.com'
COOKIE_PATH = '/'
conn = None
c = None
def open_conn(database): # Function 1
'''Open SQL Connection to a given sqlite databsase'''
global conn
global c
conn = sqlite3.connect(database)
c = conn.cursor()
def save_conn(): # Function 2
'''Savesthe conn'''
conn.commit()
def save_close_conn(): # Function 3
'''Saves and closes the conn'''
conn.commit()
conn.close()
def close_conn(): # Function 4
'''Closes the database conn'''
conn.close()
def add_user(username, pword, email=None): # Function 5
'''For a givven username and pasword and maybe an e-mail. Adds the user to
the database. If the user is allready there then it returns false. if it
added the database it sends True'''
display = username[:]
username = username.lower()
salt = binascii.hexlify(os.urandom(64)) # 64 bytes = 512 bits
utf8pword = pword.encode("utf-8")
utf8pword_salt = utf8pword + salt
hashed_salted_password = hashlib.sha512(utf8pword_salt)
enchexpass = hashed_salted_password.hexdigest()
try:
c.execute("INSERT INTO logon VALUES (?, ?, ?, ?, ?)", (username,
display,
enchexpass,
salt, email))
except:
return False
return True
def issue_session_id(username, pword): # Function 6
'''issues a session id for a given username, checks the user and pass
agenst the db then sends back a sessionif, epx, and theusername it is sent
agenst | noauth means username and password is <PASSWORD>ong | sqlerror means the
server is haveing issues'''
username = username.lower()
authuser = check_user(username, pword)
if authuser is True:
sqlretry = 0
sqlgood = False
while sqlgood is False:
# c.execute("SELECT * FROM logon WHERE username = ?", [username])
# dbdata = c.fetchone()
# db_username = dbdata[0]
# db_display = dbdata[1]
exp = int(time.time()) + 300
# seconds till this is expired | 300 = 5 min | 1 = 1 sec
sessionid = binascii.hexlify(os.urandom(16)).decode("utf-8")
try:
c.execute("DELETE FROM sessions WHERE username = ?",
[username])
c.execute("INSERT INTO sessions VALUES (?, ?, ?)",
[sessionid, exp, username])
sqlgood = True
except:
sqlretry += 1
if sqlretry == 10:
return ('sqlerror', 'sqlerror', 'sqlerror')
save_conn()
return (sessionid, exp, username)
return ('noauth', 'noauth', 'noauth')
def renew_session_id(old_id, username): # Function 7
'''givven the old session id and username it checks that the session is
is still good then send a newone if OK, else it sends out a "sqlerror" in
the case the server is erroring and a "expired" if the session is old'''
username = username.lower()
c.execute("SELECT * FROM sessions WHERE username = ? AND id = ?",
[username, old_id])
dbdata = c.fetchone()
if dbdata is None:
return False
db_exp = int(dbdata[1])
if int(time.time()) > db_exp:
return 'expired'
elif int(time.time()) <= db_exp:
sqlgood = False
sqlretry = 0
while sqlgood is False:
exp = int(time.time()) + 300
# seconds till this is expired | 300 = 5 min | 1 = 1 sec
sessionid = binascii.hexlify(os.urandom(16)).decode("utf-8")
try:
c.execute("DELETE FROM sessions WHERE username = ?",
[username])
c.execute("INSERT INTO sessions VALUES (?, ?, ?)",
[sessionid, exp, username])
sqlgood = True
except:
sqlretry += 1
if sqlretry == 10:
return 'sqlerror'
save_conn()
return (sessionid, exp, username)
def delete_session(sessionid, username): # Function 8
'''deletes a session from the database in the case the client wants to
"logoff"'''
username = username.lower()
c.execute("SELECT * FROM sessions WHERE username = ? OR id = ?",
[username, sessionid])
dbdata = c.fetchone()
if dbdata is None:
return False
c.execute("DELETE FROM sessions WHERE username = ? OR id = ?",
[username, sessionid])
save_conn()
return True
def check_user(username, pword):
'''checks the username and password agenst the data base loaded with the
open_conn(), returns True is they are correct'''
username = username.lower()
c.execute("SELECT username, password, salt FROM logon WHERE username = ?",
[username])
dbdata = c.fetchone()
if dbdata is None:
return None
enchexpassdb = dbdata[1]
salt = dbdata[2]
utf8pword = pword.encode('utf8')
utf8pword_salt = utf8pword + salt
hashed_salted_password = hashlib.sha512(utf8pword_salt)
enchexpass = hashed_salted_password.hexdigest()
if slow_equal(enchexpassdb, enchexpass):
return True
else:
return False
def slow_equal(hexstrA, hexstrB): # Function 9
'''TODO : make the compair bit for bit in binary using XNOR OR SOMETHING
Instead of comparing the string with == it checkes each part on at a
time, this makes it slower and therefor harder to crack.'''
length = 0
errors = 0
a = ''.join(format(ord(char), 'b') for char in hexstrA)
b = ''.join(format(ord(char), 'b') for char in hexstrB)
if len(a) == len(b):
length = len(a)
else:
time.sleep(1)
length = 0
errors = 1000
for i in range(length):
errors += int(a[i]) ^ int(b[i])
if errors == 0:
return True
else:
return False
def cookie_wright(sessionid, exp, username): # Function 10
'''give the imput data it returns a session cookie ment to be placed in the
print_header function to send to the client'''
cookie = http.cookies.BaseCookie()
cookie['id'] = sessionid
cookie['exp'] = exp
cookie['username'] = username
for morsel in cookie:
# cookie[morsel]['max-age'] = COOKIE_MAX_AGE
# cookie[morsel]['domain'] = COOKIE_DOMAIN
cookie[morsel]['path'] = COOKIE_PATH
return cookie
def get_cookies(): # Function 11
'''returns a cookie opject of the request header sent to the server from
the client'''
cookie = http.cookies.BaseCookie()
if 'HTTP_COOKIE' in os.environ:
cookie.load(os.environ['HTTP_COOKIE'])
return cookie
return None
def print_header(cookie=''): # Function 12
'''Prints the standard HTTP header needed by CGI along with any cookie data
sent to the function - cookie must be a cookie object'''
print('Content-type: text/html')
print('Status: 200 OK')
print(cookie)
if not cookie == '':
print()
def get_html(filepath): # Function 13
'''For the given path it returns a str of all the data in that file.
\n and all'''
file = open(filepath)
txt = file.read()
return txt
def print_me(filename): # Function 14
'''prints file to screen - use for redirects'''
file = open(filename)
txt = file.read()
print(txt)
def get_cgi_data(): # Function 15
'''gets the cgi data from the last form the client summited'''
cgidata = cgi.FieldStorage()
return cgidata
``` |
{
"source": "joshuaguite/cloudpassage-halo-python-sdk",
"score": 3
} |
#### File: cloudpassage-halo-python-sdk/cloudpassage/alert_profile.py
```python
from .halo_endpoint import HaloEndpoint
from .http_helper import HttpHelper
from .utility import Utility as utility
class AlertProfile(HaloEndpoint):
"""Initializing the AlertProfile class:
Filtering options for :func:`AlertProfile.list_all()` can be passed in as
keyword arguments. Valid filters can be found at
https://api-doc.cloudpassage.com/help#list-alert-profiles.
Args:
session (:class:`cloudpassage.HaloSession`): This will define how you
interact with the Halo API, including proxy settings and API keys
used for authentication.
"""
object_name = "alert_profile"
objects_name = "alert_profiles"
@classmethod
def endpoint(cls):
"""Return endpoint for API requests."""
return "/v1/%s" % AlertProfile.objects_name
@classmethod
def object_key(cls):
"""Return the key used to pull the policy from the json document."""
return AlertProfile.object_name
@classmethod
def pagination_key(cls):
"""Return the pagination key for parsing paged results."""
return AlertProfile.objects_name
def create(self, policy_body):
"""Create a policy from JSON document.
Returns the ID of the new policy
"""
request = HttpHelper(self.session)
request_body = utility.policy_to_dict(policy_body)
return request.post(self.endpoint(),
request_body)["id"]
```
#### File: tests/integration/test_integration_firewall_policy.py
```python
import cloudpassage
import json
import os
policy_file_name = "firewall.json"
config_file_name = "portal.yaml.local"
tests_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "../"))
config_file = os.path.join(tests_dir, "configs/", config_file_name)
policy_file = os.path.join(tests_dir, 'policies/', policy_file_name)
session_info = cloudpassage.ApiKeyManager(config_file=config_file)
key_id = session_info.key_id
secret_key = session_info.secret_key
api_hostname = session_info.api_hostname
api_port = session_info.api_port
with open(policy_file, 'r') as p_file:
firewall_policy_body = p_file.read().replace('\n', '')
def create_firewall_policy_object():
session = cloudpassage.HaloSession(key_id, secret_key,
api_host=api_hostname,
api_port=api_port,
integration_string="SDK-Smoke")
firewall_policy_object = cloudpassage.FirewallPolicy(session)
return firewall_policy_object
def create_firewall_rule_object():
session = cloudpassage.HaloSession(key_id, secret_key,
api_host=api_hostname,
api_port=api_port,
integration_string="SDK-Smoke")
firewall_rule_object = cloudpassage.FirewallRule(session)
return firewall_rule_object
def create_firewall_zone_object():
session = cloudpassage.HaloSession(key_id, secret_key,
api_host=api_hostname,
api_port=api_port,
integration_string="SDK-Smoke")
firewall_zone_object = cloudpassage.FirewallZone(session)
return firewall_zone_object
def create_firewall_service_object():
session = cloudpassage.HaloSession(key_id, secret_key,
api_host=api_hostname,
api_port=api_port,
integration_string="SDK-Smoke")
firewall_service_object = cloudpassage.FirewallService(session)
return firewall_service_object
def create_firewall_interface_object():
session = cloudpassage.HaloSession(key_id, secret_key,
api_host=api_hostname,
api_port=api_port,
integration_string="SDK-Smoke")
firewall_interface_object = cloudpassage.FirewallInterface(session)
return firewall_interface_object
def get_target_linux_firewall_policy():
firewall_policy = create_firewall_policy_object()
policy_list = firewall_policy.list_all()
for policy in policy_list:
if policy["platform"] == 'linux':
return policy["id"]
return None
def remove_policy_by_name(policy_name):
fw_policy_obj = create_firewall_policy_object()
policy_list = fw_policy_obj.list_all()
for policy in policy_list:
if policy["name"] == policy_name:
fw_policy_obj.delete(policy["id"])
class TestIntegrationFirewallPolicy:
def test_instantiation(self):
session = cloudpassage.HaloSession(key_id, secret_key)
assert cloudpassage.FirewallPolicy(session)
def test_firewall_policy_list_all(self):
"""This test requires that a firewall policy exist in your Halo
account. If you don't have a firewall policy in your Halo account,
this test will fail.
"""
firewall_policy = create_firewall_policy_object()
firewall_policy_list = firewall_policy.list_all()
assert "id" in firewall_policy_list[0]
def test_firewall_policy_describe(self):
"""This test requires that a firewall policy exist in your Halo
account. If you don't have a firewall policy in your Halo account,
this test will fail.
"""
firewall_policy = create_firewall_policy_object()
firewall_policy_list = firewall_policy.list_all()
target_firewall_policy_id = firewall_policy_list[0]["id"]
target_policy = firewall_policy.describe(target_firewall_policy_id)
assert "id" in target_policy
def test_firewall_policy_create_update_delete(self):
firewall_policy = create_firewall_policy_object()
remove_policy_by_name("cpapi_test_1")
remove_policy_by_name("NewName")
this_policy = json.loads(firewall_policy_body)
this_policy["firewall_policy"]["name"] = "cpapi_test_1"
new_policy_id = firewall_policy.create(json.dumps(this_policy))
policy_update = {"firewall_policy": {"name": "NewName",
"id": new_policy_id}}
firewall_policy.update(policy_update)
delete_error = firewall_policy.delete(new_policy_id)
assert delete_error is None
class TestIntegrationFirewallRule:
def test_instantiation(self):
session = cloudpassage.HaloSession(key_id, secret_key)
assert cloudpassage.FirewallRule(session)
def test_list_firewall_policy_rules(self):
firewall_rule = create_firewall_rule_object()
target_firewall_policy_id = get_target_linux_firewall_policy()
policy_rules = firewall_rule.list_all(target_firewall_policy_id)
assert "id" in policy_rules[0]
def test_get_firewall_policy_rule_describe(self):
firewall_rule = create_firewall_rule_object()
target_firewall_policy_id = get_target_linux_firewall_policy()
policy_rules = firewall_rule.list_all(target_firewall_policy_id)
target_rule_id = policy_rules[0]["id"]
rule_details = firewall_rule.describe(target_firewall_policy_id,
target_rule_id)
assert "id" in rule_details
def test_firewall_policy_rule_create_mod_delete(self):
modification_body = {"firewall_rule": {
"comment": "Your momma makes firewall rules"}}
firewall_policy = create_firewall_policy_object()
remove_policy_by_name("cpapi_test_2")
firewall_rule = create_firewall_rule_object()
this_policy = json.loads(firewall_policy_body)
this_policy["firewall_policy"]["name"] = "cpapi_test_2"
target_policy_id = firewall_policy.create(json.dumps(this_policy))
rule_imported = firewall_rule.list_all(target_policy_id)[0]
del rule_imported["url"]
rule_imported["position"] = 1
rule_body = {"firewall_rule": rule_imported}
print(rule_body)
target_rule_id = firewall_rule.create(target_policy_id, rule_body)
modification_error = firewall_rule.update(target_policy_id,
target_rule_id,
modification_body)
delete_rule_error = firewall_rule.delete(target_policy_id,
target_rule_id)
delete_policy_error = firewall_policy.delete(target_policy_id)
assert modification_error is None
assert delete_rule_error is None
assert delete_policy_error is None
class TestIntegraationFirewallZone:
def test_instantiation(self):
session = cloudpassage.HaloSession(key_id, secret_key)
assert cloudpassage.FirewallZone(session)
def test_list_all_ip_zones(self):
firewall_zone = create_firewall_zone_object()
list_of_zones = firewall_zone.list_all()
assert "id" in list_of_zones[0]
def test_get_zone_details(self):
firewall_zone = create_firewall_zone_object()
target_zone_id = firewall_zone.list_all()[0]["id"]
details = firewall_zone.describe(target_zone_id)
assert "id" in details
def test_firewall_zone_create_update_delete(self):
firewall_zone = create_firewall_zone_object()
firewall_zone_body = {"firewall_zone": {"name": "CPAPI TEST",
"ip_address": "127.0.0.1"}}
target_zone_id = firewall_zone.create(firewall_zone_body)
zone_update = {"firewall_zone": {"name": "NewName",
"id": target_zone_id}}
firewall_zone.update(zone_update)
delete_error = firewall_zone.delete(target_zone_id)
assert delete_error is None
class TestIntegrationFirewallService:
def test_instantiation(self):
session = cloudpassage.HaloSession(key_id, secret_key)
assert cloudpassage.FirewallService(session)
def test_list_all_services(self):
firewall_service = create_firewall_service_object()
list_of_services = firewall_service.list_all()
assert "id" in list_of_services[0]
def test_get_service_details(self):
firewall_service = create_firewall_service_object()
target_service_id = firewall_service.list_all()[0]["id"]
details = firewall_service.describe(target_service_id)
assert "id" in details
def test_firewall_zone_create_update_delete(self):
firewall_service = create_firewall_service_object()
firewall_service_body = {"firewall_service": {"name": "<NAME>",
"protocol": "TCP",
"port": "1234"}}
target_service_id = firewall_service.create(firewall_service_body)
service_update = {"firewall_service": {"name": "NewName",
"id": target_service_id}}
firewall_service.update(service_update)
delete_error = firewall_service.delete(target_service_id)
assert delete_error is None
class TestIntegrationFirewallInterface:
def test_instantiation(self):
session = cloudpassage.HaloSession(key_id, secret_key)
assert cloudpassage.FirewallInterface(session)
def test_list_all_interfaces(self):
interface = create_firewall_interface_object()
list_of_interfaces = interface.list_all()
assert "id" in list_of_interfaces[0]
def test_get_interface_details(self):
interface = create_firewall_interface_object()
target_interface_id = interface.list_all()[0]["id"]
details = interface.describe(target_interface_id)
assert "id" in details
def test_firewall_interface_create_delete(self):
interface = create_firewall_interface_object()
interface_body = {"firewall_interface": {"name": "eth12"}}
target_interface_id = interface.create(interface_body)
delete_error = interface.delete(target_interface_id)
assert delete_error is None
```
#### File: tests/integration/test_integration_lids_policy.py
```python
import cloudpassage
import json
import os
from cloudpassage.utility import Utility as utility
policy_file_name = "core-system-centos-v1-1.lids.json"
config_file_name = "portal.yaml.local"
tests_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "../"))
config_file = os.path.join(tests_dir, "configs/", config_file_name)
policy_file = os.path.join(tests_dir, 'policies/', policy_file_name)
session_info = cloudpassage.ApiKeyManager(config_file=config_file)
key_id = session_info.key_id
secret_key = session_info.secret_key
api_hostname = session_info.api_hostname
api_port = session_info.api_port
class TestIntgrationLidsPolicy:
def build_lids_policy_object(self):
session = cloudpassage.HaloSession(key_id, secret_key,
api_host=api_hostname,
api_port=api_port,
integration_string="SDK-Smoke")
return_obj = cloudpassage.LidsPolicy(session)
return(return_obj)
def test_instantiation(self):
session = cloudpassage.HaloSession(key_id, secret_key)
assert cloudpassage.LidsPolicy(session)
def remove_policy_by_name(self, policy_name):
lids_policy_obj = self.build_lids_policy_object()
policy_list = lids_policy_obj.list_all()
for policy in policy_list:
if policy["name"] == policy_name:
lids_policy_obj.delete(policy["id"])
def test_list_all(self):
"""This test gets a list of LIDS policies from the Halo API.
If you have no configuration policies in your account, it will fail
"""
request = self.build_lids_policy_object()
response = request.list_all()
assert "id" in response[0]
def test_get_details(self):
"""This test requires at least one LIDS policy in your account. If
you don't have one, this test will fail."""
request = self.build_lids_policy_object()
policy_list = request.list_all()
target_policy_id = policy_list[0]["id"]
target_policy_body = request.describe(target_policy_id)
assert "id" in target_policy_body
def test_lids_policy_create_delete(self):
deleted = False
request = self.build_lids_policy_object()
with open(policy_file, 'rb') as policy_file_object:
policy_body = policy_file_object.read()
pol_meta = utility.determine_policy_metadata(policy_body)
self.remove_policy_by_name(pol_meta["policy_name"])
policy_id = request.create(policy_body)
request.delete(policy_id)
try:
request.describe(policy_id)
except cloudpassage.CloudPassageResourceExistence:
deleted = True
assert deleted
def test_lids_policy_create_update_delete(self):
deleted = False
request = self.build_lids_policy_object()
newname = "<NAME>"
with open(policy_file, 'rb') as policy_file_object:
policy_body = policy_file_object.read()
policy_id = request.create(policy_body)
policy_update = json.loads(policy_body)
self.remove_policy_by_name(newname)
policy_update["lids_policy"]["name"] = newname
policy_update["lids_policy"]["id"] = policy_id
request.update(policy_update)
request.delete(policy_id)
try:
request.describe(policy_id)
except cloudpassage.CloudPassageResourceExistence:
deleted = True
assert deleted
```
#### File: tests/integration/test_integration_special_events_policy.py
```python
import cloudpassage
import os
config_file_name = "portal.yaml.local"
tests_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "../"))
config_file = os.path.join(tests_dir, "configs/", config_file_name)
session_info = cloudpassage.ApiKeyManager(config_file=config_file)
key_id = session_info.key_id
secret_key = session_info.secret_key
api_hostname = session_info.api_hostname
api_port = session_info.api_port
class TestIntegrationSpecialEventsPolicy:
def create_special_events_policy_obj(self):
session = cloudpassage.HaloSession(key_id, secret_key,
api_host=api_hostname,
api_port=api_port,
integration_string="SDK-Smoke")
return cloudpassage.SpecialEventsPolicy(session)
def test_instantiation(self):
assert self.create_special_events_policy_obj()
def test_list_all(self):
session = cloudpassage.HaloSession(key_id, secret_key,
api_host=api_hostname,
api_port=api_port,
integration_string="SDK-Smoke")
se_policy = cloudpassage.SpecialEventsPolicy(session)
se_policy_list = se_policy.list_all()
assert "id" in se_policy_list[0]
def test_describe(self):
"""This test gets the details of a special events policy in
your Halo account. If you don't have any special events policies
configured, it will fail.
"""
request = self.create_special_events_policy_obj()
policy_list = request.list_all()
target_policy_id = policy_list[0]["id"]
target_policy_body = request.describe(target_policy_id)
assert "id" in target_policy_body
def test_create(self):
rejected = False
session = cloudpassage.HaloSession(key_id, secret_key,
api_host=api_hostname,
api_port=api_port,
integration_string="SDK-Smoke")
policy = cloudpassage.SpecialEventsPolicy(session)
try:
policy.create("DoesNotEvenMatter")
except NotImplementedError:
rejected = True
assert rejected
def test_update(self):
rejected = False
session = cloudpassage.HaloSession(key_id, secret_key,
api_host=api_hostname,
api_port=api_port,
integration_string="SDK-Smoke")
policy = cloudpassage.SpecialEventsPolicy(session)
try:
policy.update("DoesNotEvenMatter")
except NotImplementedError:
rejected = True
assert rejected
def test_delete(self):
rejected = False
session = cloudpassage.HaloSession(key_id, secret_key,
api_host=api_hostname,
api_port=api_port,
integration_string="SDK-Smoke")
policy = cloudpassage.SpecialEventsPolicy(session)
try:
policy.delete("DoesNotEvenMatter")
except NotImplementedError:
rejected = True
assert rejected
```
#### File: tests/unit/test_unit_server.py
```python
import cloudpassage
import os
config_file_name = "portal.yaml.local"
tests_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "../"))
config_file = os.path.join(tests_dir, "configs/", config_file_name)
session_info = cloudpassage.ApiKeyManager(config_file=config_file)
key_id = session_info.key_id
secret_key = session_info.secret_key
api_hostname = session_info.api_hostname
class TestUnitServer:
def test_instantiation(self):
assert cloudpassage.Server(None)
def test_validate_bad_list(self):
search_criteria_list = ["ephemeral", "cats", "!!!LOSER"]
server_object = cloudpassage.Server(None)
platform = server_object.validate_platform(search_criteria_list)
kb = server_object.validate_kb_id(search_criteria_list)
cve = server_object.validate_cve_id(search_criteria_list)
assert platform is False
assert kb is False
assert cve is False
``` |
{
"source": "JoshuaHaberzettl/CMPT-120L-910-20F",
"score": 4
} |
#### File: Assignments/Assignment 7/prime_or_composite.py
```python
def prime_or_composite(number):
if number <= 3:
if number > 1:
return "Prime"
else:
return "Composite"
if number % 2 == 0 or number % 3 == 0:
return "Composite"
i = 5
while i ** 2 <= number:
if number % i == 0 or number % (i + 2) == 0:
return "Composite"
i += 6
return "Prime"
if __name__ == "__main__":
numbers = [1, 2, 10, 31, 47, 89, 101, 103, 97, 187, 981, 19201, -7, 47055833459]
# If you want to test the efficency of your algorithm add this number to the array above -7
# If you want to test the efficency of your algorithm add this number to the array above 47055833459
answers = []
for number in numbers:
answers.append(prime_or_composite(number))
print(answers)
``` |
{
"source": "joshuahaddad/Alfred",
"score": 2
} |
#### File: Alfred/src/main.py
```python
from discord.ext import commands
import configloader as cfload
from logger import Logger as log
log.info('Reading config data...')
cfload.read('../config.ini')
####################### Begin Loading Process ################################
startup_extensions = cfload.configSectionMap('Startup')['startup_extensions'].split()
command_prefix = cfload.configSectionMap('Commands')['command_prefix']
bot = commands.Bot(command_prefix=commands.when_mentioned_or(command_prefix), description=cfload.configSectionMap('Startup')['description'])
# Load extensions
if __name__ == '__main__':
log.info('\nLoading extensions...')
for extension in startup_extensions:
try:
bot.load_extension(extension)
log.info(f'Extension \'{extension}\' loaded.')
except Exception as e:
log.critical(f'Failed to load {extension} extension.')
log.critical(e)
@bot.event
async def on_ready():
log.info('\nConnected to Discord as', bot.user.name, '- ID ', str(bot.user.id))
log.info('Alfred loaded successfully.\n______________________________\n')
@bot.event
async def on_message(message):
await bot.process_commands(message)
if message.author == bot.user and message.content.startswith(command_prefix):
return
# Magically checks for any of the greetings in message and waves
if not set(message.content.upper().split(' ')).isdisjoint(('HELLO', 'HI', 'HEY', 'GREETINGS', 'SALUTATIONS', 'YO')):
await message.add_reaction('\U0001F44B') # Waving hand
###################################################################################
bot.run(cfload.configSectionMap('Startup')['token'])
``` |
{
"source": "joshuahaddad/OMLT",
"score": 2
} |
#### File: tests/neuralnet/test_nn_formulation.py
```python
import pytest
import numpy as np
import pyomo.environ as pyo
from omlt import OmltBlock
from omlt.neuralnet import (ReducedSpaceNNFormulation, ReducedSpaceSmoothNNFormulation, \
FullSpaceNNFormulation, FullSpaceSmoothNNFormulation,
NetworkDefinition)
from omlt.neuralnet.layer import InputLayer, DenseLayer
def two_node_network(activation, input_value):
"""
1 1
x0 -------- (1) --------- (3)
| /
| /
| / 5
| /
| |
| -1 | 1
---------- (2) --------- (4)
"""
net = NetworkDefinition(scaled_input_bounds=[(-10.0, 10.0)])
input_layer = InputLayer([1])
net.add_layer(input_layer)
dense_layer_0 = DenseLayer(
input_layer.output_size,
[1, 2],
activation=activation,
weights=np.array([[1.0, -1.0]]),
biases=np.array([1.0, 2.0])
)
net.add_layer(dense_layer_0)
net.add_edge(input_layer, dense_layer_0)
dense_layer_1 = DenseLayer(
dense_layer_0.output_size,
[1, 2],
activation=activation,
weights=np.array([[1.0, 0.0], [5.0, 1.0]]),
biases=np.array([3.0, 4.0])
)
net.add_layer(dense_layer_1)
net.add_edge(dense_layer_0, dense_layer_1)
y = input_layer.eval(np.asarray([input_value]))
y = dense_layer_0.eval(y)
y = dense_layer_1.eval(y)
return net, y
def _test_two_node_FullSpaceNNFormulation_smooth(activation):
m = pyo.ConcreteModel()
m.neural_net_block = OmltBlock()
net, y = two_node_network(activation, -2.0)
m.neural_net_block.build_formulation(FullSpaceNNFormulation(net))
assert m.nvariables() == 15
assert m.nconstraints() == 14
m.neural_net_block.inputs[0].fix(-2)
m.obj1 = pyo.Objective(expr=0)
status = pyo.SolverFactory("ipopt").solve(m, tee=False)
assert abs(pyo.value(m.neural_net_block.outputs[0,0]) - y[0,0]) < 1e-6
assert abs(pyo.value(m.neural_net_block.outputs[0,1]) - y[0,1]) < 1e-6
net, y = two_node_network(activation, 1.0)
m.neural_net_block.inputs[0].fix(1)
status = pyo.SolverFactory("ipopt").solve(m, tee=False)
assert abs(pyo.value(m.neural_net_block.outputs[0,0]) - y[0,0]) < 1e-6
assert abs(pyo.value(m.neural_net_block.outputs[0,1]) - y[0,1]) < 1e-6
def _test_two_node_FullSpaceNNFormulation_relu():
m = pyo.ConcreteModel()
m.neural_net_block = OmltBlock()
net, y = two_node_network('relu', -2.0)
m.neural_net_block.build_formulation(FullSpaceNNFormulation(net))
assert m.nvariables() == 19
assert m.nconstraints() == 26
m.neural_net_block.inputs[0].fix(-2)
m.obj1 = pyo.Objective(expr=0)
status = pyo.SolverFactory("cbc").solve(m, tee=False)
assert abs(pyo.value(m.neural_net_block.outputs[0,0]) - y[0,0]) < 1e-6
assert abs(pyo.value(m.neural_net_block.outputs[0,1]) - y[0,1]) < 1e-6
net, y = two_node_network('relu', 1.0)
m.neural_net_block.inputs[0].fix(1)
status = pyo.SolverFactory("cbc").solve(m, tee=False)
assert abs(pyo.value(m.neural_net_block.outputs[0,0]) - y[0,0]) < 1e-6
assert abs(pyo.value(m.neural_net_block.outputs[0,1]) - y[0,1]) < 1e-6
def _test_two_node_FullSpaceSmoothNNFormulation(activation):
m = pyo.ConcreteModel()
m.neural_net_block = OmltBlock()
net, y = two_node_network(activation, -2.0)
m.neural_net_block.build_formulation(FullSpaceSmoothNNFormulation(net))
assert m.nvariables() == 15
assert m.nconstraints() == 14
m.neural_net_block.inputs[0].fix(-2)
m.obj1 = pyo.Objective(expr=0)
status = pyo.SolverFactory("ipopt").solve(m, tee=False)
assert abs(pyo.value(m.neural_net_block.outputs[0,0]) - y[0,0]) < 1e-6
assert abs(pyo.value(m.neural_net_block.outputs[0,1]) - y[0,1]) < 1e-6
net, y = two_node_network(activation, 1.0)
m.neural_net_block.inputs[0].fix(1)
status = pyo.SolverFactory("ipopt").solve(m, tee=False)
assert abs(pyo.value(m.neural_net_block.outputs[0,0]) - y[0,0]) < 1e-6
assert abs(pyo.value(m.neural_net_block.outputs[0,1]) - y[0,1]) < 1e-6
def _test_two_node_ReducedSpaceNNFormulation(activation):
m = pyo.ConcreteModel()
m.neural_net_block = OmltBlock()
net, y = two_node_network(activation, -2.0)
m.neural_net_block.build_formulation(ReducedSpaceNNFormulation(net))
assert m.nvariables() == 6
assert m.nconstraints() == 5
m.neural_net_block.inputs[0].fix(-2)
m.obj1 = pyo.Objective(expr=0)
status = pyo.SolverFactory("ipopt").solve(m, tee=False)
assert abs(pyo.value(m.neural_net_block.outputs[0,0]) - y[0,0]) < 1e-6
assert abs(pyo.value(m.neural_net_block.outputs[0,1]) - y[0,1]) < 1e-6
net, y = two_node_network(activation, 1.0)
m.neural_net_block.inputs[0].fix(1)
status = pyo.SolverFactory("ipopt").solve(m, tee=False)
assert abs(pyo.value(m.neural_net_block.outputs[0,0]) - y[0,0]) < 1e-6
assert abs(pyo.value(m.neural_net_block.outputs[0,1]) - y[0,1]) < 1e-6
def _test_two_node_ReducedSpaceSmoothNNFormulation(activation):
m = pyo.ConcreteModel()
m.neural_net_block = OmltBlock()
net, y = two_node_network(activation, -2.0)
m.neural_net_block.build_formulation(ReducedSpaceSmoothNNFormulation(net))
assert m.nvariables() == 6
assert m.nconstraints() == 5
m.neural_net_block.inputs[0].fix(-2)
m.obj1 = pyo.Objective(expr=0)
status = pyo.SolverFactory("ipopt").solve(m, tee=False)
assert abs(pyo.value(m.neural_net_block.outputs[0,0]) - y[0,0]) < 1e-6
assert abs(pyo.value(m.neural_net_block.outputs[0,1]) - y[0,1]) < 1e-6
net, y = two_node_network(activation, 1.0)
m.neural_net_block.inputs[0].fix(1)
status = pyo.SolverFactory("ipopt").solve(m, tee=False)
assert abs(pyo.value(m.neural_net_block.outputs[0,0]) - y[0,0]) < 1e-6
assert abs(pyo.value(m.neural_net_block.outputs[0,1]) - y[0,1]) < 1e-6
def test_two_node_ReducedSpaceNNFormulation():
_test_two_node_ReducedSpaceNNFormulation('linear')
_test_two_node_ReducedSpaceNNFormulation('sigmoid')
_test_two_node_ReducedSpaceNNFormulation('tanh')
def test_two_node_ReducedSpaceSmoothNNFormulation():
_test_two_node_ReducedSpaceSmoothNNFormulation('linear')
_test_two_node_ReducedSpaceSmoothNNFormulation('sigmoid')
_test_two_node_ReducedSpaceSmoothNNFormulation('tanh')
def test_two_node_ReducedSpaceSmoothNNFormulation_invalid_activation():
with pytest.raises(ValueError) as excinfo:
_test_two_node_ReducedSpaceSmoothNNFormulation('relu')
expected_msg = 'Activation relu is not supported by this formulation.'
assert str(excinfo.value) == expected_msg
def test_two_node_FullSpaceNNFormulation():
_test_two_node_FullSpaceNNFormulation_smooth('linear')
_test_two_node_FullSpaceNNFormulation_smooth('sigmoid')
_test_two_node_FullSpaceNNFormulation_smooth('tanh')
_test_two_node_FullSpaceNNFormulation_relu()
def test_two_node_FullSpaceSmoothNNFormulation():
_test_two_node_FullSpaceSmoothNNFormulation('linear')
_test_two_node_FullSpaceSmoothNNFormulation('sigmoid')
_test_two_node_FullSpaceSmoothNNFormulation('tanh')
def test_two_node_FullSpaceSmoothNNFormulation_invalid_activation():
with pytest.raises(ValueError) as excinfo:
_test_two_node_FullSpaceSmoothNNFormulation('relu')
expected_msg = 'Activation relu is not supported by this formulation.'
assert str(excinfo.value) == expected_msg
@pytest.mark.skip(reason="Need to add checks on layer types")
def test_invalid_layer_type():
assert False
```
#### File: tests/neuralnet/train_keras_models.py
```python
import tensorflow.keras as keras
from omlt.io import write_onnx_model_with_bounds
import pytest
# from conftest import get_neural_network_data
from keras.layers import Dense, Conv2D
from keras.models import Model, Sequential
from pyomo.common.fileutils import this_file_dir
from tensorflow.keras.optimizers import Adamax
def train_models():
x, y, x_test = get_neural_network_data("131")
nn = Sequential(name="keras_linear_131")
nn.add(
Dense(
units=3,
input_dim=1,
kernel_initializer=keras.initializers.RandomNormal(
mean=1.0, stddev=0.05, seed=42
),
bias_initializer=keras.initializers.RandomNormal(
mean=0.0, stddev=0.05, seed=43
),
)
)
nn.add(
Dense(
units=1,
kernel_initializer=keras.initializers.RandomNormal(
mean=1.0, stddev=0.05, seed=62
),
bias_initializer=keras.initializers.RandomNormal(
mean=0.0, stddev=0.05, seed=63
),
)
)
nn.compile(optimizer=Adamax(learning_rate=0.01), loss="mae")
history = nn.fit(
x=x, y=y, validation_split=0.2, batch_size=16, verbose=1, epochs=15
)
nn.save(this_file_dir() + "/models/keras_linear_131")
x, y, x_test = get_neural_network_data("131")
nn = Sequential(name="keras_linear_131_sigmoid")
nn.add(
Dense(
units=3,
input_dim=1,
kernel_initializer=keras.initializers.RandomNormal(
mean=1.0, stddev=0.05, seed=42
),
bias_initializer=keras.initializers.RandomNormal(
mean=0.0, stddev=0.05, seed=43
),
activation="sigmoid",
)
)
nn.add(
Dense(
units=1,
kernel_initializer=keras.initializers.RandomNormal(
mean=1.0, stddev=0.05, seed=62
),
bias_initializer=keras.initializers.RandomNormal(
mean=0.0, stddev=0.05, seed=63
),
)
)
nn.compile(optimizer=Adamax(learning_rate=0.01), loss="mae")
history = nn.fit(
x=x, y=y, validation_split=0.2, batch_size=16, verbose=1, epochs=15
)
nn.save(this_file_dir() + "/models/keras_linear_131_sigmoid")
x, y, x_test = get_neural_network_data("131")
nn = Sequential(name="keras_linear_131_sigmoid_output_activation")
nn.add(
Dense(
units=3,
input_dim=1,
kernel_initializer=keras.initializers.RandomNormal(
mean=1.0, stddev=0.05, seed=42
),
bias_initializer=keras.initializers.RandomNormal(
mean=0.0, stddev=0.05, seed=43
),
activation="sigmoid",
)
)
nn.add(
Dense(
units=1,
kernel_initializer=keras.initializers.RandomNormal(
mean=1.0, stddev=0.05, seed=62
),
bias_initializer=keras.initializers.RandomNormal(
mean=0.0, stddev=0.05, seed=63
),
activation="sigmoid",
)
)
nn.compile(optimizer=Adamax(learning_rate=0.01), loss="mae")
history = nn.fit(
x=x, y=y, validation_split=0.2, batch_size=16, verbose=1, epochs=15
)
nn.save(this_file_dir() + "/models/keras_linear_131_sigmoid_output_activation")
x, y, x_test = get_neural_network_data("131")
nn = Sequential(name="keras_linear_131_relu")
nn.add(
Dense(
units=3,
input_dim=1,
kernel_initializer=keras.initializers.RandomNormal(
mean=1.0, stddev=0.05, seed=42
),
bias_initializer=keras.initializers.RandomNormal(
mean=0.0, stddev=0.05, seed=43
),
activation="relu",
)
)
nn.add(
Dense(
units=1,
kernel_initializer=keras.initializers.RandomNormal(
mean=1.0, stddev=0.05, seed=62
),
bias_initializer=keras.initializers.RandomNormal(
mean=0.0, stddev=0.05, seed=63
),
)
)
nn.compile(optimizer=Adamax(learning_rate=0.01), loss="mae")
history = nn.fit(
x=x, y=y, validation_split=0.2, batch_size=16, verbose=1, epochs=15
)
nn.save(this_file_dir() + "/models/keras_linear_131_relu")
x, y, x_test = get_neural_network_data("131")
nn = Sequential(name="keras_linear_131_relu_output_activation")
nn.add(
Dense(
units=3,
input_dim=1,
kernel_initializer=keras.initializers.RandomNormal(
mean=1.0, stddev=0.05, seed=42
),
bias_initializer=keras.initializers.RandomNormal(
mean=0.0, stddev=0.05, seed=43
),
activation="relu",
)
)
nn.add(
Dense(
units=1,
kernel_initializer=keras.initializers.RandomNormal(
mean=1.0, stddev=0.05, seed=62
),
bias_initializer=keras.initializers.RandomNormal(
mean=0.0, stddev=0.05, seed=63
),
activation="relu",
)
)
nn.compile(optimizer=Adamax(learning_rate=0.01), loss="mae")
history = nn.fit(
x=x, y=y, validation_split=0.2, batch_size=16, verbose=1, epochs=15
)
nn.save(this_file_dir() + "/models/keras_linear_131_relu_output_activation")
x, y, x_test = get_neural_network_data("131")
nn = Sequential(name="keras_linear_131_sigmoid_softplus_output_activation")
nn.add(
Dense(
units=3,
input_dim=1,
kernel_initializer=keras.initializers.RandomNormal(
mean=1.0, stddev=0.05, seed=42
),
bias_initializer=keras.initializers.RandomNormal(
mean=0.0, stddev=0.05, seed=43
),
activation="sigmoid",
)
)
nn.add(
Dense(
units=1,
kernel_initializer=keras.initializers.RandomNormal(
mean=1.0, stddev=0.05, seed=62
),
bias_initializer=keras.initializers.RandomNormal(
mean=0.0, stddev=0.05, seed=63
),
activation="softplus",
)
)
nn.compile(optimizer=Adamax(learning_rate=0.01), loss="mae")
history = nn.fit(
x=x, y=y, validation_split=0.2, batch_size=16, verbose=1, epochs=15
)
nn.save(
this_file_dir() + "/models/keras_linear_131_sigmoid_softplus_output_activation"
)
x, y, x_test = get_neural_network_data("131")
nn = Sequential(name="keras_big")
N = 100
nn.add(
Dense(
units=N,
input_dim=1,
kernel_initializer=keras.initializers.RandomNormal(
mean=1.0, stddev=0.05, seed=42
),
bias_initializer=keras.initializers.RandomNormal(
mean=0.0, stddev=0.05, seed=43
),
activation="sigmoid",
)
)
nn.add(
Dense(
units=N,
kernel_initializer=keras.initializers.RandomNormal(
mean=1.0, stddev=0.05, seed=42
),
bias_initializer=keras.initializers.RandomNormal(
mean=0.0, stddev=0.05, seed=43
),
activation="sigmoid",
)
)
nn.add(
Dense(
units=N,
kernel_initializer=keras.initializers.RandomNormal(
mean=1.0, stddev=0.05, seed=42
),
bias_initializer=keras.initializers.RandomNormal(
mean=0.0, stddev=0.05, seed=43
),
activation="sigmoid",
)
)
nn.add(
Dense(
units=1,
kernel_initializer=keras.initializers.RandomNormal(
mean=1.0, stddev=0.05, seed=62
),
bias_initializer=keras.initializers.RandomNormal(
mean=0.0, stddev=0.05, seed=63
),
activation="softplus",
)
)
nn.compile(optimizer=Adamax(learning_rate=0.01), loss="mae")
history = nn.fit(
x=x, y=y, validation_split=0.2, batch_size=16, verbose=1, epochs=15
)
nn.save(this_file_dir() + "/models/big")
x, y, x_test = get_neural_network_data("2353")
nn = Sequential(name="keras_linear_2353")
nn.add(
Dense(
units=3,
input_dim=2,
kernel_initializer=keras.initializers.RandomNormal(
mean=1.0, stddev=0.05, seed=42
),
bias_initializer=keras.initializers.RandomNormal(
mean=0.0, stddev=0.05, seed=43
),
)
)
nn.add(
Dense(
units=5,
kernel_initializer=keras.initializers.RandomNormal(
mean=1.0, stddev=0.05, seed=52
),
bias_initializer=keras.initializers.RandomNormal(
mean=0.0, stddev=0.05, seed=53
),
)
)
nn.add(
Dense(
units=3,
kernel_initializer=keras.initializers.RandomNormal(
mean=1.0, stddev=0.05, seed=62
),
bias_initializer=keras.initializers.RandomNormal(
mean=0.0, stddev=0.05, seed=63
),
)
)
nn.compile(optimizer=Adamax(learning_rate=0.01), loss="mae")
history = nn.fit(
x=x, y=y, validation_split=0.2, batch_size=16, verbose=1, epochs=15
)
nn.save(this_file_dir() + "/models/keras_linear_2353")
def train_conv():
nn = Sequential(name="keras_conv_7x7_relu")
nn.add(
Conv2D(
filters=1,
kernel_size=(2, 2),
activation="relu",
data_format="channels_first",
kernel_initializer=keras.initializers.RandomNormal(
mean=1.0, stddev=0.05, seed=62
),
input_shape=(1, 7, 7),
)
)
nn.compile(optimizer=Adamax(learning_rate=0.01), loss="mae")
import tf2onnx
import tempfile
onnx_model, _ = tf2onnx.convert.from_keras(nn)
input_bounds = dict()
for i in range(7):
for j in range(7):
input_bounds[0, i, j] = (0.0, 1.0)
with tempfile.NamedTemporaryFile(suffix='.onnx', delete=False) as f:
write_onnx_model_with_bounds(f.name, onnx_model, input_bounds)
print(f"Wrote ONNX model with bounds at {f.name}")
if __name__ == "__main__":
train_models()
train_conv()
``` |
{
"source": "joshuahaertel/friendexing",
"score": 3
} |
#### File: friendexing/games/models.py
```python
from typing import List, Dict, Optional
from uuid import UUID, uuid4
class Game:
def __init__(
self,
total_time_to_guess: int,
should_randomize_fields: bool,
name: str,
):
self.id = uuid4()
self.settings = Settings(
total_time_to_guess,
should_randomize_fields,
)
self.players: List['Player'] = [
Player(
name,
),
]
self.batches: List['Batch'] = []
self.state = 'wait'
class Settings:
def __init__(
self,
total_time_to_guess: int,
should_randomize_fields: bool,
):
self.total_time_to_guess = total_time_to_guess
self.should_randomize_fields = should_randomize_fields
class Player:
def __init__(
self,
name: str,
):
self.id = uuid4()
self.name: str = name
self.score = 0
self.guess_id: Optional[int] = None
self.guess: Optional[str] = None
self.guess_time: Optional[int] = None
class Batch:
images: List['Image']
schema: Dict[str, type]
class Image:
indexable: bool
records: List['Record']
class Record:
fields: Dict[str, 'Field']
class Field:
value: str
is_checked: bool
``` |
{
"source": "joshuahaertel/fullprofiler",
"score": 3
} |
#### File: fullprofiler/fullprofiler/statistic.py
```python
ABSOLUTE_MIN = float('-inf')
ABSOLUTE_MAX = float('inf')
class Statistic:
def __init__(self):
self.count = 0
self.total_time = 0
self.min = ABSOLUTE_MAX
self.max = ABSOLUTE_MIN
@property
def mean(self):
if not self.count:
return 0
return self.total_time / self.count
def add_run_time(self, run_time):
self.count += 1
self.total_time += run_time
self.min = min(run_time, self.min)
self.max = max(run_time, self.max)
```
#### File: fullprofiler/tests/test_profiler.py
```python
import asyncio
from time import sleep
from unittest import TestCase
from fullprofiler.profiler import Profiler
class TestProfiler(TestCase):
def setUp(self):
Profiler.statistics.clear()
def test_all(self):
Profiler.enable()
self.do_something()
Profiler.disable()
Profiler.print_statistics()
def do_something(self):
self.do_sleep()
self.do_something_else()
@staticmethod
def do_sleep():
sleep(.3)
@staticmethod
def do_something_else():
for _ in range(100):
pass
class TestAsyncioProfile(TestCase):
def setUp(self):
Profiler.statistics.clear()
def test_all(self):
Profiler.statistics.clear()
loop = asyncio.get_event_loop()
Profiler.enable()
loop.run_until_complete(self.do_something())
Profiler.disable()
print('\n\nNew Test\n\n')
Profiler.print_statistics()
async def do_something(self):
await self.do_sleep()
await self.do_something_else()
@staticmethod
async def do_sleep():
await asyncio.sleep(.3)
@staticmethod
async def do_something_else():
for _ in range(100):
pass
``` |
{
"source": "joshuahaertel/index-royale",
"score": 2
} |
#### File: index-royale/demo/models.py
```python
from typing import List
from uuid import uuid4
from django.db import models
from django.db.models import Sum, Avg, Count
from django.utils.functional import cached_property
class Demo(models.Model):
id = models.UUIDField(primary_key=True, default=uuid4)
state = models.CharField(max_length=7, default='wait', choices=(
('wait', 'Wait'),
('race', 'Race'),
('over', 'Over'),
))
current_batch_index = models.PositiveSmallIntegerField(default=0)
current_entry_index = models.PositiveSmallIntegerField(default=0)
current_field_index = models.PositiveSmallIntegerField(default=0)
def __init__(self, *args, batches=None, **kwargs):
super().__init__(*args, **kwargs)
self.batches: List[Batch] = batches or [DEFAULT_BATCH]
@property
def current_images(self):
return self.current_batch.images
@property
def current_batch(self):
return self.batches[self.current_batch_index]
@property
def max_batch_index(self):
return len(self.batches) - 1
@property
def current_batch_max_entry_index(self):
return len(self.current_batch.entries) - 1
@property
def current_entry_max_field_index(self):
return len(self.current_entry.fields) - 1
@property
def current_label(self):
entry = self.current_entry
return entry.fields[self.current_field_index].label
@property
def current_entry(self):
return self.current_batch.entries[self.current_entry_index]
@cached_property
def teams_in_winning_order(self):
return self.team_set.annotate(
average_points=Avg('player__points'),
total_players=Count('player'),
).order_by('-average_points')
@cached_property
def teamless_players(self):
return self.player_set.filter(team=None)
@property
def teamless_players_average_points(self):
if not self.teamless_players:
return 0.0
points = sum(player.points for player in self.teamless_players)
return points/len(self.teamless_players)
@property
def can_wait(self):
return self.state != 'wait'
@property
def can_race(self):
return self.state == 'wait'
@property
def can_finish(self):
return self.state == 'wait'
class Batch:
def __init__(self, entries=None, images=None, name=''):
self.entries: List[Entry] = entries or []
self.images: List[str] = images or []
self.name: str = name
class Entry:
def __init__(self, fields=None):
self.fields: List[Field] = fields or []
class Field:
def __init__(self, label, value=None):
self.label: str = label
self.value: str = value
DEFAULT_BATCH = Batch(
entries=[Entry(
fields=[
Field('Name', 'josh'),
Field('Day', '7'),
]
)],
images=[
'demo/test-1.png',
'demo/test-2.png',
],
name='Example',
)
class Team(models.Model):
id = models.UUIDField(primary_key=True, default=uuid4)
demo = models.ForeignKey(Demo, models.CASCADE)
name = models.CharField(max_length=63)
@property
def points(self):
return self.player_set.aggregate(Sum('points'))['points__sum']
@cached_property
def players_in_best_order(self):
return self.player_set.all().order_by('-points')
class BasePlayer(models.Model):
points = models.SmallIntegerField(default=0)
team = models.ForeignKey(Team, models.SET_NULL, null=True, blank=True)
name = models.CharField(max_length=63)
skill_level = models.CharField(default='beg', max_length=3, choices=(
('beg', 'Beginner'),
('int', 'Intermediate'),
('adv', 'Advanced'),
))
class Meta:
abstract = True
class Player(BasePlayer, models.Model):
id = models.UUIDField(primary_key=True, default=uuid4)
demo = models.ForeignKey(Demo, models.CASCADE)
class Admin(models.Model):
id = models.UUIDField(primary_key=True, default=uuid4)
demo = models.OneToOneField(Demo, models.CASCADE)
class PlayingAdmin(Admin, BasePlayer):
pass
``` |
{
"source": "joshuahaertel/putils",
"score": 4
} |
#### File: putils/deplytils/decorators.py
```python
class class_property(object): # pylint: disable=invalid-name
"""
Decorator to make a classmethod behave like a property
"""
def __init__(self, method):
self.method = method
self.name = method.__name__
self.__doc__ = getattr(method, '__doc__')
self.class_ = None
def __get__(self, instance, owner):
assert owner is not None and instance is None
return self.method(owner)
# noinspection PyPep8Naming pylint: disable=invalid-name,unused-variable
class cached_class_property(class_property):
"""
Decorator to transform a class method into a class field. The first
time the field is accessed, the method is run. All subsequent
accesses will return a cached value of the method.
The cache can be invalidated by deleting the field. Note, however,
that deleting the field before a value is in the cache will delete
the method and corresponding field from the class forever.
"""
def __init__(self, method):
super(cached_class_property, self).__init__(method)
self.is_cached = False
self.cached_value = None
def __get__(self, instance, owner=None):
"""
If the cache is empty/invalidated, run the class method and
store it in the cache. The cached value is returned.
:param instance: self instance
:param owner: type of the class
:return: result of cached method call
"""
if not self.is_cached:
self.class_ = owner or instance.__class__
self.cached_value = self.method(owner)
self.is_cached = True
return self.cached_value
def __del__(self):
"""
Invalidate the cache, if there is one. If there isn't one then
delete the property from the class forever
:return: None
"""
if self.class_:
setattr(self.class_, self.name, self.__class__(self.method))
```
#### File: putils/tests/run.py
```python
from __future__ import print_function # pylint: disable=unused-variable
import os
import subprocess
import sys
import unittest
from coverage import Coverage
from deplytils.contexts.coverage import CoverageContext
class Checker(object):
"""
Object to make sure unittests pass, the appropriate coverage is
achieved, and everything passes linting.
"""
def __init__(self):
"""Cleans up old coverage data files, if any"""
self._test_group_errors = 0
for file_ in os.listdir(os.curdir):
if '.coverage' in file_:
os.remove(file_)
def run(self):
"""Run all checks"""
coverage_tests = self._start_coverage_tests()
normal_tests = self._run_normal_tests()
coverage_tests_output = self._output_coverage_tests(coverage_tests)
self._validate_tests(coverage_tests, coverage_tests_output,
normal_tests)
if self._test_group_errors != 0:
print('Not all test groups passed!', file=sys.stderr)
sys.exit(self._test_group_errors)
print('Success! All tests passed!')
@staticmethod
def _start_coverage_tests():
"""Start the process to test the CoverageContext"""
args = ('coverage run --branch --rcfile coverage_tests/.coveragerc '
'-p -m unittest discover -s coverage_tests')
coverage_tests = subprocess.Popen(
args.split(' '), stderr=subprocess.STDOUT, stdout=subprocess.PIPE)
return coverage_tests
@staticmethod
def _run_normal_tests():
"""Run other/normal tests"""
with CoverageContext(coverage_kwargs=dict(
cover_pylib=False, branch=True, data_suffix=True,
config_file='normal/.coveragerc')) as coverage:
tests = unittest.TestProgram(module=None, exit=False, argv=[
'normal_tests', 'discover', '-s', 'normal'])
coverage.coverage.save()
return tests
@staticmethod
def _output_coverage_tests(coverage_tests):
"""Print coverage tests to stderr, as is expected"""
coverage_tests.wait()
coverage_tests_output = tuple(coverage_tests.stdout.readlines())
for byte_line in coverage_tests_output:
line = byte_line.decode('utf-8')
print(line, file=sys.stderr)
return coverage_tests_output
def _validate_tests(self, coverage_tests, coverage_tests_output,
normal_tests):
"""Make sure all tests pass"""
normal_test_results = normal_tests.result
errors = normal_test_results.errors
failures = normal_test_results.failures
error_template = 'Encountered errors and/or failures in {} tests'
self.__assert(normal_test_results.testsRun != 0,
'Did not run any test!')
self.__assert(errors == [] and failures == [],
error_template.format('normal'))
self.__assert(coverage_tests.returncode == 0 and
coverage_tests_output[-1] == b'OK\n',
error_template.format('coverage'))
self._validate_coverage()
def __assert(self, condition, error_message):
"""Log errors, but don't quit"""
if not condition:
print(error_message, file=sys.stderr)
self._test_group_errors += 1
def _validate_coverage(self):
"""Make sure there 100% coverage between the two test groups"""
coverage = Coverage(config_file='coverage_tests/.coveragerc',)
coverage.load()
coverage.combine(strict=True)
self.__assert(coverage.report() == 100.0, '100% coverage not achieved')
def main():
"""Run the checker"""
checker = Checker()
checker.run()
if __name__ == '__main__':
main()
``` |
{
"source": "joshuahaertel/scipture-mastery-downloader",
"score": 3
} |
#### File: referencelistparser/states/reference.py
```python
import logging
from referencelistparser.states.base import BaseState
from scriptureref import ScriptureRef
logger = logging.getLogger(__file__)
class ReferenceState(BaseState):
def __init__(self, old_state, book):
super().__init__(old_state)
self.book = book
self.url = None
def handle_start_tag(self, tag, attrs):
if tag == 'a':
self.url = dict(attrs)['href']
logger.debug('Reference found, processing')
else:
logger.debug('Ignoring tag %s', tag)
return self
def handle_data(self, data, references):
references[self.book].append(ScriptureRef(self.url, data))
```
#### File: referencelistparser/states/row.py
```python
import logging
from referencelistparser.states.base import BaseState
from referencelistparser.states.tabledata import TableDataState
logger = logging.getLogger(__file__)
class RowState(BaseState):
def handle_start_tag(self, tag, attrs):
if tag == 'tr':
logger.debug('Row found, looking for data')
return TableDataState(self)
logger.debug('Ignoring tag %s', tag)
return self
def handle_end_tag(self, tag):
if tag == 'tbody':
return self.old_state
return self
```
#### File: joshuahaertel/scipture-mastery-downloader/scriptureref.py
```python
import logging
import requests
from referenceparser.webparser import ScriptureParser
logger = logging.getLogger(__name__)
class ScriptureRef:
def __init__(self, url: str, reference):
self.url = url
self.reference = reference
self.text = ''
def get_text(self):
logger.debug('Getting text for url: %s', self.url)
# url = /study/scriptures/pgp/moses/1.39?lang=eng#p3
full_url, url_directives = self.url.rsplit('?') # type: str, str
# full_url = /study/scriptures/pgp/moses/1.39
_, _, api_url = full_url.split('/', 2)
# api_url = scriptures/pgp/moses/1.39
# url_directives = lang=eng#p3
query_param, _ = url_directives.split('#') # type: str, str
# query_param = lang=eng
_, language = query_param.split('=')
uris_param = f'/{language}/{api_url}'
logger.debug('Hitting api v2 with uris: %s', uris_param)
response = requests.get(
'https://www.churchofjesuschrist.org/content/api/v2',
params={'uris': uris_param},
)
response_json = response.json() # type: dict
logger.debug('response json: %s', response_json)
text_list = []
for _, reference_data in response_json.items():
for verse_content in reference_data['content']: # type: dict
parser = ScriptureParser()
parser.feed(verse_content['markup'])
text_list.append(parser.text)
self.text = ' '.join(text_list)
@property
def file_name(self):
return self.reference
``` |
{
"source": "joshuahansel/cl1de",
"score": 3
} |
#### File: cl1de/python/cl1de_plot_utilities.py
```python
from file_utilities import readCSVFile
from PlotterLine import PlotterLine
def plotDataSets(data_sets, data_names):
for var in data_names:
desc, symbol = data_names[var]
plotter = PlotterLine("$x$", desc + ", $" + symbol + "$")
for i, data_set in enumerate(data_sets):
set_name, data = data_set
plotter.addSet(data["x"], data[var], set_name, color=i)
plotter.save(var + ".png")
euler1phase_data_names = {
"r": ("Density", "\\rho"),
"u": ("Velocity", "u"),
"p": ("Pressure", "p")
}
def plotEuler1PhaseDataSets(data_sets):
plotDataSets(data_sets, euler1phase_data_names)
``` |
{
"source": "joshuaharveyrn/WizardTower",
"score": 2
} |
#### File: joshuaharveyrn/WizardTower/app.py
```python
from starlette.applications import Starlette
from starlette.responses import UJSONResponse
import gpt_2_simple as gpt2
import tensorflow as tf
import uvicorn
import os
import gc
import time
app = Starlette(debug=False)
model_name = "124M"
sess = gpt2.start_tf_sess(threads=1)
gpt2.load_gpt2(sess, model_name=model_name)
# Needed to avoid cross-domain issues
response_header = {
'Access-Control-Allow-Origin': '*'
}
generate_count = 0
@app.route('/', methods=['GET', 'POST', 'PUT', 'HEAD'])
async def homepage(request):
start = time.process_time()
global generate_count
global sess
if request.method == 'GET':
params = request.query_params
elif request.method == 'POST':
params = await request.json()
elif request.method == 'HEAD':
return UJSONResponse({'text': ''},
headers=response_header)
print(params.get('prefix'))
print(params.get('temperature'))
#str(params.get('include_prefix', True)).lower() == 'true',
text = gpt2.generate(sess,
length=int(params.get('length', 50)),
temperature=float(params.get('temperature', 0.7)),
top_k=int(params.get('top_k', 0)),
top_p=float(params.get('top_p', 0)),
prefix=params.get('prefix', '')[:500],
truncate=params.get('truncate', None),
include_prefix=False,
return_as_list=True
)[0]
generate_count += 1
if generate_count == 8:
# Reload model to prevent Graph/Session from going OOM
tf.reset_default_graph()
sess.close()
sess = gpt2.start_tf_sess(threads=1)
gpt2.load_gpt2(sess)
generate_count = 0
gc.collect()
print(f"That took {time.process_time() - start}")
return UJSONResponse({'text': text},
headers=response_header)
if __name__ == '__main__':
uvicorn.run(app, host='0.0.0.0', port=int(os.environ.get('PORT', 8080)))
``` |
{
"source": "joshua-hashimoto/django-channels-jwt-auth-middleware",
"score": 2
} |
#### File: testproject/users/serializers.py
```python
from django.contrib.auth import get_user_model
from dj_rest_auth.serializers import UserDetailsSerializer
from rest_framework.serializers import ModelSerializer, SerializerMethodField
User = get_user_model()
class UserSerializer(ModelSerializer):
profile_image = SerializerMethodField()
class Meta:
model = User
fields = (
'id',
'profile_image',
'username',
'email',
)
def get_profile_image(self, obj):
if (profile_image := obj.profile_image):
return profile_image.url
else:
return None
class CustomUserDetailSerializer(UserDetailsSerializer):
class Meta(UserDetailsSerializer.Meta):
fields = (
'id',
'profile_image',
'username',
'email',
)
def update(self, instance, validated_data):
"""
Note:
change this method if you want to update user model.
in the case of this application this is not needed because
it already uses CustomUser. However it is worth keeping memo.
"""
return super().update(instance, validated_data)
``` |
{
"source": "joshua-hashimoto/eigo-of-the-day-django",
"score": 2
} |
#### File: core/additional/account_adapter.py
```python
from django.http import Http404
from allauth.account.adapter import DefaultAccountAdapter
class CustomAccountAdapter(DefaultAccountAdapter):
"""
A custom account adapter to disable signup
functionality from django-allauth
"""
def is_open_for_signup(self, request):
raise Http404
return False
```
#### File: eigo-of-the-day-django/pages/views.py
```python
import os
import json
import uuid
from django.conf import settings
from django.http import HttpResponse
from django.utils.translation import ugettext_lazy as _
from django.views.generic import View
import cloudinary
class MarkdownImageUploader(View):
"""
custom image uploader for martor.
"""
def post(self, request, *args, **kwargs):
"""
called when images are uploaded to martor's markdown field.
validation is from martor's documentation.
it will upload images to cloudinary.
Note:
when there is '?' in the to be foldername the image upload will not work.
"""
if not request.is_ajax():
return HttpResponse(_('Invalid request!'))
if 'markdown-image-upload' not in request.FILES:
return HttpResponse(_('Invalid request!'))
image = request.FILES['markdown-image-upload']
image_types = [
'image/png', 'image/jpg',
'image/jpeg', 'image/pjpeg', 'image/gif'
]
if image.content_type not in image_types:
# return error when the image type
# is not an expected type
data = json.dumps({
'status': 405,
'error': _('Bad image format.')
}, cls=LazyEncoder)
return HttpResponse(
data, content_type='application/json', status=405)
if image.size > settings.MAX_IMAGE_UPLOAD_SIZE:
# return error when the image size
# is over the setted MAX_IMAGE_UPLOAD_SIZE
to_MB = settings.MAX_IMAGE_UPLOAD_SIZE / (1024 * 1024)
data = json.dumps({
'status': 405,
'error': _('Maximum image file is %(size) MB.') % {'size': to_MB}
}, cls=LazyEncoder)
return HttpResponse(
data, content_type='application/json', status=405)
# when the image is valid
# create new name for image
img_name = f'{uuid.uuid4().hex[:10]}-{image.name.replace(" ", "-")}'
# assign new name to the image that is being uploaded
image.name = img_name
# create folder path
img_folder = os.path.join(
settings.MEDIA_URL, 'memo')
# save image to cloudinary
cloudinary_img = cloudinary.uploader.upload(
image, folder=img_folder, overwrite=True)
# get the saved image url from cloudinary response
cloudinary_img_url = cloudinary_img['url']
# name json data to return to markdown
data = json.dumps({
'status': 200,
'link': cloudinary_img_url,
'name': image.name
})
return HttpResponse(data, content_type='application/json')
``` |
{
"source": "joshua-hashimoto/molecule-django",
"score": 2
} |
#### File: molecule-django/articles/admin.py
```python
from django.contrib import admin
from django.db import models
# from django_summernote.admin import SummernoteModelAdmin
from martor.widgets import AdminMartorWidget
from django.utils import timezone
from .models import Article
class ArticleAdmin(admin.ModelAdmin):
"""
custom admin for model Phrase
Attributes:
list_desplay (List): list of fields in model to display in admin site
list_display_links (List): list of fields in model to attach links to in admin site
list_filter (List): list of fields in model that the user can filter through in admin site
search_fields (List): list of fields in model that the user can search through in admin site
actions (List): list of custom functions to add custom actions to admin site
inlines (List): list of custom Inline classes to add relational fields in admin site
"""
list_display = [
'title',
'identifier',
'truncated_description',
'is_published',
'is_active',
]
list_display_links = [
'title',
]
list_filter = [
'title',
'is_active',
]
search_fields = [
'slug',
'title',
'description',
'content',
]
actions = ['active', 'inactive']
def active(self, request, queryset):
"""
function to set the target model's "is_active" to True.
used in admin site.
"""
queryset.update(is_active=True)
active.short_description = '閲覧可能'
def inactive(self, request, queryset):
"""
function to set the target model's "is_active" to False.
used in admin site.
"""
queryset.update(is_active=False)
inactive.short_description = '閲覧不可能'
def identifier(self, obj):
return obj.slug
identifier.short_description = 'identifier'
def truncated_description(self, obj):
"""
custom admin column.
call get_description on model.
Returns:
str: truncated string
"""
return obj.get_description()
truncated_description.short_description = "description"
def is_published(self, obj):
"""
custom admin column.
if publish_at field has no data, it will return string 'no publish date'.
if publish_at field has data and current time is over the
object data, it will return string 'published'.
if publish_at field has data and is before current time it will return the field value.
Args:
obj (Article): data object.
Returns:
datetime|str: publish status.
"""
if (publish_at := obj.publish_at):
formated_date = publish_at.strftime('%Y/%m/%d %H:%M')
now = timezone.now()
if now > publish_at:
return formated_date
return f'publish at {formated_date}'
return 'no publish date'
is_published.short_description = 'publish status'
admin.site.register(Article, ArticleAdmin)
```
#### File: molecule-django/articles/models.py
```python
from django.contrib.auth import get_user_model
from django.db import models
from django.db.models import Q
from django.urls import reverse
from django.utils.safestring import mark_safe
from django.utils import timezone
from cloudinary_storage.storage import VideoMediaCloudinaryStorage, MediaCloudinaryStorage
from cloudinary_storage.validators import validate_video
from martor.utils import markdownify
from martor.models import MartorField
from bs4 import BeautifulSoup
from core.additional.models import CoreModel
from tags.models import Tag
class ArticleQuerySet(models.QuerySet):
"""
custom QuerySet for model Article
"""
def all(self):
"""
Returns:
queryset: return all object with is_active=True
"""
queryset = self.filter(is_active=True)
return queryset
def published(self):
"""
return all objects that is,
1. is_active is True
2. publish_at is over the current datetime
Returns:
queryset: return all objects that meats the lookup
"""
now = timezone.now()
lookup: Q = (
Q(is_active=True) &
Q(publish_at__lte=now)
)
queryset = self.filter(lookup)
return queryset
def search(self, query: str):
"""
filter objects that is,
1. is_active is True
2. publish_at is over the current datetime
3. query is contained in title
4. query is contained in content
5. query is contained in description
Returns:
queryset: return all objects that meats the lookup
"""
now = timezone.now()
lookup: Q = (
Q(is_active=True) &
Q(publish_at__lte=now) &
Q(title__icontains=query) |
Q(description__icontains=query) |
Q(content__icontains=query)
)
queryset = self.filter(lookup)
return queryset
def all_related(self):
"""
fetch all article except the article that is tagged with tag 'Series'
Returns:
queryset: return all article except the article tagged with 'Series'
"""
return self.published().exclude(tags__name='Series').order_by('publish_at')
class ArticleManager(models.Manager):
"""
custom manager for model Article using ArticleQuerySet
"""
def get_queryset(self):
"""
set custom QuerySet to use in manager
Returns:
ArticleQuerySet: return ArticleQuerySet using model Article
"""
return ArticleQuerySet(self.model, using=self._db)
def all(self):
"""
call .all() from ArticleQuerySet
Retruns:
queryset: return queryset returned from ArticleQuerySet.all()
"""
queryset = self.get_queryset().all()
return queryset
def published(self):
"""
call .published() from ArticleQuerySet
Returns:
queryset: return queryset returned from ArticleQuerySet.published()
"""
queryset = self.get_queryset().published()
return queryset
def search(self, query=None):
"""
call .search() from ArticleQuerySet
if argument query is not set it will return None
Args:
query (str|None): user input query
Returns:
queryset|None: return queryset returned from ArticleQuerySet.search()
"""
if query is None:
return self.get_queryset().none()
return self.get_queryset().search(query)
def all_related(self):
"""
call .all_related() from ArticleQuerySet
Returns:
queryset: return queryset returned from ArticleQuerySet.all_related()
"""
return self.get_queryset().all_related()
def upload_image_to(instance, filename):
"""
custom path for saving images
Returns:
str: image path
"""
asset_path = f'article/{str(instance.title)}/images/{filename}'
return asset_path
def upload_video_to(instance, filename):
"""
custom path for saving videos
Returns:
str: video path
"""
asset_path = f'article/{str(instance.title)}/video/{filename}'
return asset_path
class Article(CoreModel):
"""
A model for representing each article for the blog.
Attributes:
author (ForeignKey): one-to-one relation to set user to object
tags (ManyToManyField): many-to-many relation to set tags to object
video (FileField): field for video files. saved to cloudinary
cover (ImageField): field for image files. saved to cloudinary
title (CharField): field for article title. max length to 255. this field needs to be unique
slug (SlugField): field for article slug. used for routing
description (TextField): field for article description.
content (MartorField): field for article content. uses martor's MartorField for markdown.
related_article (ManyToManyField): many-to-many relation to set self as related articles
keywords (CharField): field for article keyword. this is used for SEO.
publish_at (DateTimeField) field for article publish datetime.
objects (ArticleManager): set custom Manager to model
Note:
because ImageField and DateTimeField saves string in
the database, null=True is not necessary.
"""
author = models.ForeignKey(get_user_model(), on_delete=models.CASCADE)
tags = models.ManyToManyField(Tag, blank=True)
video = models.FileField(upload_to=upload_video_to, blank=True, null=True,
storage=VideoMediaCloudinaryStorage(), validators=[validate_video])
cover = models.ImageField(
upload_to=upload_image_to, blank=True, null=True, storage=MediaCloudinaryStorage())
title = models.CharField(max_length=255, unique=True)
slug = models.SlugField(unique=True)
description = models.TextField()
content = MartorField()
related_articles = models.ManyToManyField(
'self', verbose_name='related articles', blank=True)
keywords = models.CharField('記事のキーワード', max_length=255, default='プログラミング')
publish_at = models.DateTimeField(
auto_now=False, auto_now_add=False, blank=True, null=True)
objects = ArticleManager()
class Meta:
"""
Attributes:
ordering (List): use to determine the ordering of model objects when listed
"""
ordering = ['-publish_at', '-timestamp', '-updated', ]
def __str__(self):
"""
determine which field of the model should be representing the model object.
mainly used in admin site.
Returns:
str: returns title field.
"""
return self.title
def get_absolute_url(self):
"""
determine to absolute url of the model.
mainly used to route to detail view.
"""
return reverse("articles:article_detail", kwargs={"slug": self.slug})
def get_markdown(self):
"""
return a cleaned html.
in case there is a markdown we use markdown package to convert them to html.
Returns:
str: string of safe html
"""
content = self.content
markdown_content = markdownify(content)
return mark_safe(markdown_content)
def get_description(self):
"""
return description. if the description str length
is greater then 50 it truncates the str.
Returns:
str: truncated string
"""
if len(self.description) > 50:
return f"{self.description[:50]}..."
return self.description
def get_text_count(self):
"""
count strings in html.
using BeautifulSoup4 to sanitize html tags.
Returns:
int: length of the string after sanitized by BeautifulSoup4
"""
content = self.content
markdown_content = markdownify(content)
souped = BeautifulSoup(markdown_content, features="html.parser").findAll(
text=True
)
stripted_text = "".join(souped).replace(" ", "")
return len(stripted_text)
def get_img_count(self):
"""
count img tags in html.
using BeautifulSoup4 search through html.
Returns:
int: length of the images after filtering through using BeautifulSoup4
"""
content = self.content
markdown_content = markdownify(content)
img_tags = BeautifulSoup(markdown_content, features="html.parser").find_all(
"img"
)
return len(img_tags)
@property
def is_series_summary(self):
return 'Series' in [tag.name for tag in self.tags.all()]
@property
def is_published(self):
if self.publish_at is None:
return False
return timezone.now() > self.publish_at
```
#### File: molecule-django/comments/admin.py
```python
from django.contrib import admin
from .models import Comment
class CommentAdmin(admin.ModelAdmin):
"""
custom admin for model Phrase
Attributes:
list_desplay (List): list of fields in model to display in admin site
list_display_links (List): list of fields in model to attach links to in admin site
list_filter (List): list of fields in model that the user can filter through in admin site
search_fields (List): list of fields in model that the user can search through in admin site
actions (List): list of custom functions to add custom actions to admin site
inlines (List): list of custom Inline classes to add relational fields in admin site
"""
list_display = [
'id',
'truncated_comment',
'article',
'is_active',
]
list_display_links = [
'id',
]
list_filter = [
'article',
'is_active',
]
search_fields = [
'comment',
]
actions = ['active', 'inactive']
def active(self, request, queryset):
"""
function to set the target model's "is_active" to True.
used in admin site.
"""
queryset.update(is_active=True)
active.short_description = '閲覧可能'
def inactive(self, request, queryset):
"""
function to set the target model's "is_active" to False.
used in admin site.
"""
queryset.update(is_active=False)
inactive.short_description = '閲覧不可能'
def truncated_comment(self, obj):
"""
custom admin column.
Returns:
str: truncated string
"""
if len(obj.comment) > 20:
return f'{obj.comment[:20]}...'
return obj.comment
truncated_comment.short_description = "comment"
admin.site.register(Comment, CommentAdmin)
``` |
{
"source": "JoshuaHaustein/planner_tests",
"score": 3
} |
#### File: src/planner_tests/ros_oracle_bridge.py
```python
import rospy
import numpy
import itertools
from planner_tests.srv import *
from geometry_msgs.msg import Pose2D
class ROSOracleBridge(object):
"""
Python interface using ROS services to interact with the OracleTrainingServer.
"""
def __init__(self, node_name='ros_oracle_client', server_name='oracle_training_server'):
"""
Create a new ROSOracleBridge.
---------
Arguments
---------
node_name, string - name of this ROS node
server_name, string - name of the server
"""
rospy.init_node(node_name)
rospy.loginfo("Waiting for services...")
gasi_name = '/' + server_name + '/get_action_space_info'
gop_name = '/' + server_name + '/get_object_properties'
sop_name = '/' + server_name + '/set_object_properties'
gs_name = '/' + server_name + '/get_state'
prop_name = '/' + server_name + '/propagate'
sao_name = '/' + server_name + '/set_active_objects'
ss_name = '/' + server_name + '/set_state'
rospy.wait_for_service(gasi_name)
rospy.wait_for_service(gop_name)
rospy.wait_for_service(gs_name)
rospy.wait_for_service(prop_name)
rospy.wait_for_service(sao_name)
rospy.wait_for_service(ss_name)
self._get_action_space_info_service = rospy.ServiceProxy(gasi_name, GetActionSpaceInfo)
self._get_object_properties_service = rospy.ServiceProxy(gop_name, GetObjectProperties)
self._set_object_properties_service = rospy.ServiceProxy(sop_name, SetObjectProperties)
self._get_state_service = rospy.ServiceProxy(gs_name, GetState)
self._propagate_service = rospy.ServiceProxy(prop_name, Propagate)
self._set_active_objects_service = rospy.ServiceProxy(sao_name, SetActiveObjects)
self._set_state_service = rospy.ServiceProxy(ss_name, SetState)
resp = self.get_object_properties()
self._robot_name = 'robot'
self._object_names = [name for name in resp.obj_names if name != self._robot_name]
self._all_entities_names = resp.obj_names
self._active_objects = set(self._object_names)
rospy.loginfo("Ready")
def get_action_space_info(self):
"""
Return information on action space.
------
Returns
-------
response, planner_tests.srv.GetActionSpaceInfoResponse
The response has the following fields:
dof_action_space, int - dimension of action space
lower_bounds, array of floats
upper_bounds, array of floats
is_cyclic, array of bools - True if the action dimension is cyclic
"""
return self._get_action_space_info_service()
def get_object_properties(self):
"""
Return object properties of objects in scene.
-------
Returns
-------
response, planner_tets.srv.GetObjectPropertiesResponse
The response has the following fields:
obj_names, array of string
masses, array of floats
inertias, array of floats
ground_friction_coeffs, array of floats
ground_friction_torque_integrals, array of floats
contact_friction_coeffs, array of floats
widths, array of floats
heights, array of floats
"""
response = self._get_object_properties_service()
return response
def set_object_properties(self, properties):
"""
Set the given object properties.
---------
Arguments
---------
properties, dict: string -> dict - mapping from object name to a dictionary containing
object properties for this object. Supported object properties are:
mass, ground_friction_coeff, ground_friction_torque_integral, contact_friction_coeff
-------
Returns
-------
success, bool
"""
req = SetObjectPropertiesRequest()
for name, prop in properties.iteritems():
req.obj_names.append(name)
if 'mass' in prop:
req.masses.append(prop['mass'])
else:
req.masses.append(-1.0)
if 'ground_friction_coeff' in prop:
req.ground_friction_coeffs.append(prop['ground_friction_coeff'])
else:
req.ground_friction_coeffs.append(-1.0)
if 'ground_friction_torque_integral' in prop:
req.ground_friction_torque_integrals.append(prop['ground_friction_torque_integral'])
else:
req.ground_friction_torque_integrals.append(-1.0)
if 'contact_friction_coeff' in prop:
req.contact_friction_coeffs.append(prop['contact_friction_coeff'])
else:
req.contact_friction_coeffs.append(-1.0)
res = self._set_object_properties_service(req)
return res.success
def set_state(self, state, b_active_only=True):
"""
Set the state of the world.
---------
Arguments
---------
state, numpy array of shape (n, 3) - every row is x, y, theta. First state is robot state.
b_active_only - if True, set state for active objects only, i.e. n-1 is number of active objects,
else set states for all (n-1 must be number of total objects)
--------
Returns
--------
valid_state, bool - True iff the set state is valid
"""
request = SetStateRequest()
if b_active_only:
request.obj_names = [self._robot_name]
request.obj_names.extend(self._active_objects)
else:
request.obj_names = self._all_entities_names
for obj_state in state:
request.states.append(Pose2D(*obj_state))
response = self._set_state_service(request)
if not response.service_success:
raise RuntimeError("Set state service failed.")
return response.valid_state
def get_state(self, b_active_only=True):
"""
Return the current state of the world.
---------
Arguments
---------
b_active_only, bool - if True, only return state of active objects.
First state is always robot state.
-------
Returns
-------
state, numpy array of shape (n, 3) -
n-1 is the number of active objects
first state is robot state
each row is x, y, theta for one object/robot
"""
response = self._get_state_service()
name_state_map = dict(zip(
response.obj_names, [[pose.x, pose.y, pose.theta] for pose in response.states]))
if b_active_only:
object_names = [self._robot_name]
object_names.extend(self._active_objects)
else:
object_names = self._all_entities_names
return numpy.array([name_state_map[name] for name in object_names])
def set_active(self, obj_name):
"""
Set the only the object with the given name active.
This is a convenience function to set only a single object (and the robot)
active. Any other object will be inactive.
"""
active_mapping = dict(zip(self._all_entities_names, itertools.repeat(False, len(self._all_entities_names))))
active_mapping[self._robot_name] = True
active_mapping[obj_name] = True
self.set_active_objects(active_mapping)
def set_active_objects(self, active_mapping):
"""
Set which objects to activate.
---------
Arguments
---------
active_mapping, dict - maps object names to bools indicating whether the object
should be active or not.
Any object that is not specified in active_mapping, remains in its current state of
activity.
"""
request = SetActiveObjectsRequest(active_mapping.keys(), active_mapping.values())
response = self._set_active_objects_service(request)
if not response.service_success:
raise RuntimeError("Could not set activity for objects " + str(active_mapping.keys()))
old_active_objects = set(self._active_objects)
deactivated_objects = set([obj_name for (obj_name, b_active) in active_mapping.items() if not b_active])
activated_objects = set([obj_name for (obj_name, b_active) in active_mapping.items()
if b_active and obj_name != self._robot_name])
self._active_objects = old_active_objects - deactivated_objects | activated_objects
def propagate(self, action):
"""
Forward propagate the given action.
---------
Arguments
---------
action, iterable of floats representing action to forward propagate
-------
Returns
-------
valid, bool - True if resulting state is valid
"""
request = PropagateRequest(action)
response = self._propagate_service(request)
if not response.service_success:
# TODO figure out what should be done in this case
return False
return response.valid_propagation
``` |
{
"source": "joshuahellier/PhDStuff",
"score": 2
} |
#### File: 2Dim/mainStuff/2dSteadyFlow.py
```python
import sys
import os
import math
import shutil
resultDir = os.environ.get('RESULTS')
tempDir = os.environ.get('TMPDIR')
if resultDir == None or tempDir == None:
print ("WARNING! $RESULTS not set! Attempt to write results will fail!\n")
# Expecting input botConc, topConc, rateConstFull, sysSize, analInterval, numStepsEquilib, numStepsSnapshot, numStepsAnal, numStepsReq, numPasses, timeInterval, fileCode
from KMCLib import *
from KMCLib.Backend import Backend
import numpy
from RateCalc import *
from DensHist import *
botConc = float(sys.argv[1])
topConc = float(sys.argv[2])
rateConstFull = float(sys.argv[3])
sysWidth = int(sys.argv[4])
sysLength = int(sys.argv[5])
analInterval = int(sys.argv[6])
numStepsEquilib = int(sys.argv[7])
numStepsSnapshot = int(sys.argv[8])
numStepsAnal = int(sys.argv[9])
numStepsReq = int(sys.argv[10])
numPasses = int(sys.argv[11])
timeInterval = float(sys.argv[12])
fileInfo = sys.argv[13]
resultsPlace = resultDir+"/"+fileInfo+"/"
tempPlace = tempDir+"/"+tempFolderName+"/"
if not os.path.exists(resultsPlace):
os.makedirs(resultsPlace)
with open(resultsPlace+'settings', 'w') as f:
f.write('BotConcentration = ' + str(botConc) +'\n')
f.write('TopConcentration = ' + str(topConc) +'\n')
f.write('FullRate = ' + str(rateConstFull) +'\n')
f.write('SysWidth = ' + str(sysWidth) +'\n')
f.write('SysLength = ' + str(sysLength) +'\n')
f.write('TimeInterval = ' + str(timeInterval) +'\n')
f.write('AnalInterval = ' +str(analInterval) + '\n')
f.write('NumStepsEquilib = '+str(numStepsEquilib) +'\n')
f.write('NumStepsSnapshot = '+str(numStepsSnapshot)+'\n')
f.write('NumStepsAnal = '+str(numStepsAnal) +'\n')
"""I've put this in the file to make command line input easier"""
# Load the configuration and interactions.
# We're in 2d
cell_vectors = [[1.0,0.0,0.0],
[0.0,1.0,0.0],
[0.0,0.0,1.0]]
# Only bothering with one set
basis_points = [[0.0, 0.0, 0.0]]
unit_cell = KMCUnitCell(cell_vectors=cell_vectors,
basis_points=basis_points)
# Define the lattice.
xRep = sysWidth
yRep = sysLength + 4
zRep = 1
numPoints = xRep*zRep*yRep
lattice = KMCLattice(unit_cell=unit_cell,
repetitions=(xRep,yRep,zRep),
periodic=(True, True, False))
# Generate the initial types. There's double-layered section of "To" at the top and "Bo" at the bottom
avConc = 0.5*(botConc+topConc)
types = []
for yIndex in range(0, 2):
for xIndex in range(0, xRep):
random = numpy.random.rand()
if random < botConc:
types.append((xIndex, yIndex, 0, 0, "BoO"))
else:
types.append((xIndex, yIndex, 0, 0, "BoV"))
for yIndex in range(2, yRep-2):
for xIndex in range(0, xRep):
random = numpy.random.rand()
if random < avConc:
types.append((xIndex, yIndex, 0, 0, "O"))
else:
types.append((xIndex, yIndex, 0, 0, "V"))
for yIndex in range(yRep-2, yRep):
for xIndex in range(0, xRep):
random = numpy.random.rand()
if random < topConc:
types.append((xIndex, yIndex, 0, 0, "ToO"))
else:
types.append((xIndex, yIndex, 0, 0, "ToV"))
# Setup the configuration.
configuration = KMCConfiguration(lattice=lattice,
types=types,
possible_types=["O","V","ToV","BoV", "ToO", "BoO"], default_type="V")
# Rates.
rateConstEmpty = 1.0
topSpawn = rateConstFull*rateConstFull*math.sqrt(topConc/(1.0-topConc))
botSpawn = rateConstFull*rateConstFull*math.sqrt(botConc/(1.0-botConc))
topDespawn = (rateConstFull**4)/topSpawn
botDespawn = (rateConstFull**4)/botSpawn
#
##
###
"""I've put the processes in here to make it easier to adjust them via command line arguments."""
# Fill the list of processes.
processes = []
# Only on the first set of basis_points for O/V
basis_sites = [0]
# Bulk processes
# Up
#0
elements_before = ["O", "V"]
elements_after = ["V", "O"]
coordinates = [[0.0, 0.0, 0.0], [0.0, 1.0, 0.0]]
processes.append( KMCProcess(coordinates=coordinates,
elements_before=elements_before,
elements_after=elements_after,
basis_sites=basis_sites,
rate_constant=1.0))
# Will customise
# Down
#1
elements_before = ["O", "V"]
elements_after = ["V", "O"]
coordinates = [[0.0, 0.0, 0.0], [0.0, -1.0, 0.0]]
processes.append( KMCProcess(coordinates=coordinates,
elements_before=elements_before,
elements_after=elements_after,
basis_sites=basis_sites,
rate_constant=1.0))
# Will customise
# Left
#2
elements_before = ["O", "V"]
elements_after = ["V", "O"]
coordinates = [[0.0, 0.0, 0.0], [-1.0, 0.0, 0.0]]
processes.append( KMCProcess(coordinates=coordinates,
elements_before=elements_before,
elements_after=elements_after,
basis_sites=basis_sites,
rate_constant=1.0))
# Will customise
# Right
#3
elements_before = ["O", "V"]
elements_after = ["V", "O"]
coordinates = [[0.0, 0.0, 0.0], [1.0, 0.0, 0.0]]
processes.append( KMCProcess(coordinates=coordinates,
elements_before=elements_before,
elements_after=elements_after,
basis_sites=basis_sites,
rate_constant=1.0))
# Will customise
# Oxygen annihilation at the top boundary
#4
elements_before = ["O", "ToV"]
elements_after = ["V", "ToV"]
coordinates = [[0.0, 0.0, 0.0], [0.0, 1.0, 0.0]]
processes.append( KMCProcess(coordinates=coordinates,
elements_before=elements_before,
elements_after=elements_after,
basis_sites=basis_sites,
rate_constant=1.0))
# Oxygen creation at the top boundary
#5
elements_before = ["ToO", "V"]
elements_after = ["ToO", "O"]
coordinates = [[0.0, 0.0, 0.0], [0.0, -1.0, 0.0]]
processes.append( KMCProcess(coordinates=coordinates,
elements_before=elements_before,
elements_after=elements_after,
basis_sites=basis_sites,
rate_constant=1.0))
# Now for Oxygen annihilation at the bottom boundary
#6
elements_before = ["O", "BoV"]
elements_after = ["V", "BoV"]
coordinates = [[0.0, 0.0, 0.0], [0.0, -1.0, 0.0]]
processes.append( KMCProcess(coordinates=coordinates,
elements_before=elements_before,
elements_after=elements_after,
basis_sites=basis_sites,
rate_constant=1.0))
# Oxygen creation at the bottom boundary
#7
elements_before = ["BoO", "V"]
elements_after = ["BoO", "O"]
coordinates = [[0.0, 0.0, 0.0], [0.0, 1.0, 0.0]]
processes.append( KMCProcess(coordinates=coordinates,
elements_before=elements_before,
elements_after=elements_after,
basis_sites=basis_sites,
rate_constant=1.0))
# Boundary Oxygen creation at the bottom boundary
#8
elements_before = ["BoV"]
elements_after = ["BoO"]
coordinates = [[0.0, 0.0, 0.0]]
processes.append( KMCProcess(coordinates=coordinates,
elements_before=elements_before,
elements_after=elements_after,
basis_sites=basis_sites,
rate_constant=1.0))
# Boundary Oxygen annihilation at the bottom boundary
#9
elements_before = ["BoO"]
elements_after = ["BoV"]
coordinates = [[0.0, 0.0, 0.0]]
processes.append( KMCProcess(coordinates=coordinates,
elements_before=elements_before,
elements_after=elements_after,
basis_sites=basis_sites,
rate_constant=1.0))
# Boundary Oxygen creation at the top boundary
#10
elements_before = ["ToV"]
elements_after = ["ToO"]
coordinates = [[0.0, 0.0, 0.0]]
processes.append( KMCProcess(coordinates=coordinates,
elements_before=elements_before,
elements_after=elements_after,
basis_sites=basis_sites,
rate_constant=1.0))
# Boundary Oxygen annihilation at the top boundary
#11
elements_before = ["ToO"]
elements_after = ["ToV"]
coordinates = [[0.0, 0.0, 0.0]]
processes.append( KMCProcess(coordinates=coordinates,
elements_before=elements_before,
elements_after=elements_after,
basis_sites=basis_sites,
rate_constant=1.0))
# Create the interactions object.
interactions = KMCInteractions(processes, implicit_wildcards=True)
# Define the custom rates calculator, using the lol model as a template
class modelRates2d(KMCRateCalculatorPlugin):
# Class for defining the custom rates function for the KMCLib paper.
def rate(self, geometry, elements_before, elements_after, rate_constant, process_number, global_coordinate):
if process_number == 8:
return botSpawn
if process_number == 9:
return botDespawn
if process_number == 10:
return topSpawn
if process_number == 11:
return topDespawn
numNeighbours = len([e for e in elements_before if e == "O"]) + len([e for e in elements_before if e == "ToO"]) + len([e for e in elements_before if e == "BoO"])
return math.pow(rateConstFull, numNeighbours-1)
def cutoff(self):
# Overloaded base class API function
return 1.0
interactions.setRateCalculator(rate_calculator=modelRates2d)
"""End of processes"""
###
##
#
# Create the model.
model = KMCLatticeModel(configuration, interactions)
compositionTracker = Composition(time_interval=timeInterval)
# Define the parameters; not entirely sure if these are sensible or not...
control_parameters_equilib = KMCControlParameters(number_of_steps=numStepsEquilib, analysis_interval=numStepsEquilib/100,
dump_interval=numStepsEquilib/10)
control_parameters_req = KMCControlParameters(number_of_steps=numStepsReq, analysis_interval=numStepsReq/100,
dump_interval=numStepsReq/10)
control_parameters_anal = KMCControlParameters(number_of_steps=numStepsAnal, analysis_interval=1,
dump_interval=numStepsAnal/10)
# Run the simulation - save trajectory to resultsPlace, which should by now exist
model.run(control_parameters_equilib, trajectory_filename=(tempPlace+"equilib.traj"))
with open(tempPlace+"inBot.dat", 'w') as f:
pass
with open(tempPlace+"outBot.dat", 'w') as f:
pass
with open(tempPlace+"inTop.dat", 'w') as f:
pass
with open(tempPlace+"outTop.dat", 'w') as f:
pass
if not os.path.exists(tempPlace+"numHists"):
os.makedirs(tempPlace+"numHists")
ovNumHist = []
for index in range(0, numPoints):
ovNumHist.append(0.0)
for passNum in range(0, numPasses):
processStatsOxInBot = RateCalc(processes=[7])
processStatsOxOutBot = RateCalc(processes=[6])
processStatsOxInTop = RateCalc(processes=[5])
processStatsOxOutTop = RateCalc(processes=[4])
numHist = DensHist(spec=["O"], inProc=[7, 5], outProc=[6, 4])
model.run(control_parameters_req, trajectory_filename=("/dev/null"))
model.run(control_parameters_anal, trajectory_filename=("/dev/null"), analysis=[processStatsOxInBot, processStatsOxOutBot, processStatsOxInTop, processStatsOxOutTop, numHist])
with open(tempPlace+"inBot.dat", 'a') as f:
processStatsOxInBot.printResults(f)
with open(tempPlace+"outBot.dat", 'a') as f:
processStatsOxOutBot.printResults(f)
with open(tempPlace+"inTop.dat", 'a') as f:
processStatsOxInTop.printResults(f)
with open(tempPlace+"outTop.dat", 'a') as f:
processStatsOxOutTop.printResults(f)
with open(tempPlace+"numHists/numHist"+str(passNum)+".dat", 'w') as f:
pass
with open(tempPlace+"numHists/numHist"+str(passNum)+".dat", 'a') as f:
numHist.printResults(f)
with open(tempPlace+"numHists/numHist"+str(passNum)+".dat", 'r') as f:
lines = f.readlines()
for index in range(0, numPoints):
words = lines[index].split()
ovNumHist[index] += float(words[1])
os.remove(tempPlace+"numHists/numHist"+str(passNum)+".dat")
with open(resultsPlace+"ovNumHist.dat", 'w') as f:
for index in range(0, numPoints):
f.write(str(index)+" "+str(ovNumHist[index])+"\n")
sh.copy(tempPlace+"inBot.dat", resultsPlace+"inBot.dat")
sh.copy(tempPlace+"outBot.dat", resultsPlace+"outBot.dat")
sh.copy(tempPlace+"inTop.dat", resultsPlace+"inTop.dat")
sh.copy(tempPlace+"outTop.dat", resultsPlace+"outTop.dat")
sh.copy(tempPlace+"mainTraj.tr", resultsPlace+"mainTraj.tr")
shutil.rmtree(tempPlace, ignore_errors=True)
print("Process would appear to have succesfully terminated! How very suspicious...")
```
#### File: 2Dim/periodicEnergyCalc/2dPeriodic.py
```python
import sys
import os
import math
import shutil
from random import randint
resultDir = os.environ.get('RESULTS')
if resultDir == None :
print ("WARNING! $RESULTS not set! Attempt to write results will fail!\n")
# Expecting input
from KMCLib import *
from KMCLib.Backend import Backend
import numpy
#from RateCalc import *
from DensHist import *
from energyStats2d import *
ovConc = float(sys.argv[1])
rateConstFull = float(sys.argv[2])
sysWidth = int(sys.argv[3])
sysLength = int(sys.argv[4])
analInterval = int(sys.argv[5])
numStepsEquilib = int(sys.argv[6])
numStepsAnal = int(sys.argv[7])
numStepsReq = int(sys.argv[8])
numPasses = int(sys.argv[9])
fileInfo = sys.argv[10]
resultsPlace = resultDir+"/"+fileInfo+"/"
if not os.path.exists(resultsPlace):
os.makedirs(resultsPlace)
with open(resultsPlace+'settings', 'w') as f:
f.write('OverallConcentration = ' + str(ovConc) +'\n')
f.write('FullRate = ' + str(rateConstFull) +'\n')
f.write('SysWidth = ' + str(sysWidth) +'\n')
f.write('SysLength = ' + str(sysLength) +'\n')
f.write('AnalInterval = ' +str(analInterval) + '\n')
f.write('NumStepsEquilib = '+str(numStepsEquilib) +'\n')
f.write('NumStepsAnal = '+str(numStepsAnal) +'\n')
"""I've put this in the file to make command line input easier"""
# Load the configuration and interactions.
# We're in 2d
cell_vectors = [[1.0,0.0,0.0],
[0.0,1.0,0.0],
[0.0,0.0,1.0]]
# Only bothering with one set
basis_points = [[0.0, 0.0, 0.0]]
unit_cell = KMCUnitCell(cell_vectors=cell_vectors,
basis_points=basis_points)
# Define the lattice.
xRep = sysWidth
yRep = sysLength
zRep = 1
numPoints = xRep*zRep*yRep
lattice = KMCLattice(unit_cell=unit_cell,
repetitions=(xRep,yRep,zRep),
periodic=(True, True, False))
# Generate the initial types
types = []
types = ["V"]*numPoints
numParticles = int(numPoints*ovConc)
i=0
firstPass = True
while firstPass or ( i <= numParticles and i < numPoints-1 ):
firstPass = False
typePos = randint(0, numPoints-1)
if types[typePos] == "V":
types[typePos] = "O"
i += 1
# Setup the configuration.
configuration = KMCConfiguration(lattice=lattice,
types=types,
possible_types=["O","V"])
# Rates.
rateConstEmpty = 1.0
#
##
###
"""I've put the processes in here to make it easier to adjust them via command line arguments."""
# Fill the list of processes.
processes = []
# Only on the first set of basis_points for O/V
basis_sites = [0]
# Bulk processes
# Up
#0
elements_before = ["O", "V"]
elements_after = ["V", "O"]
coordinates = [[0.0, 0.0, 0.0], [0.0, 1.0, 0.0]]
processes.append( KMCProcess(coordinates=coordinates,
elements_before=elements_before,
elements_after=elements_after,
basis_sites=basis_sites,
rate_constant=1.0))
# Will customise
# Down
#1
elements_before = ["O", "V"]
elements_after = ["V", "O"]
coordinates = [[0.0, 0.0, 0.0], [0.0, -1.0, 0.0]]
processes.append( KMCProcess(coordinates=coordinates,
elements_before=elements_before,
elements_after=elements_after,
basis_sites=basis_sites,
rate_constant=1.0))
# Will customise
# Left
#2
elements_before = ["O", "V"]
elements_after = ["V", "O"]
coordinates = [[0.0, 0.0, 0.0], [-1.0, 0.0, 0.0]]
processes.append( KMCProcess(coordinates=coordinates,
elements_before=elements_before,
elements_after=elements_after,
basis_sites=basis_sites,
rate_constant=1.0))
# Will customise
# Right
#3
elements_before = ["O", "V"]
elements_after = ["V", "O"]
coordinates = [[0.0, 0.0, 0.0], [1.0, 0.0, 0.0]]
processes.append( KMCProcess(coordinates=coordinates,
elements_before=elements_before,
elements_after=elements_after,
basis_sites=basis_sites,
rate_constant=1.0))
# Will customise
# Create the interactions object.
interactions = KMCInteractions(processes, implicit_wildcards=True)
# Define the custom rates calculator, using the lol model as a template
class modelRates2d(KMCRateCalculatorPlugin):
# Class for defining the custom rates function for the KMCLib paper.
def rate(self, geometry, elements_before, elements_after, rate_constant, process_number, global_coordinate):
numNeighbours = len([e for e in elements_before if e == "O"])
return math.pow(rateConstFull, numNeighbours-1)
def cutoff(self):
# Overloaded base class API function
return 1.0
interactions.setRateCalculator(rate_calculator=modelRates2d)
"""End of processes"""
###
##
#
# Create the model.
model = KMCLatticeModel(configuration, interactions)
# Define the parameters; not entirely sure if these are sensible or not...
control_parameters_equilib = KMCControlParameters(number_of_steps=numStepsEquilib, analysis_interval=numStepsEquilib/100,
dump_interval=numStepsEquilib/10)
control_parameters_req = KMCControlParameters(number_of_steps=numStepsReq, analysis_interval=numStepsReq/100,
dump_interval=numStepsReq/10)
control_parameters_anal = KMCControlParameters(number_of_steps=numStepsAnal, analysis_interval=1,
dump_interval=numStepsAnal/10)
# Run the simulation - save trajectory to resultsPlace, which should by now exist
model.run(control_parameters_equilib, trajectory_filename=(resultsPlace+"equilib.traj"))
if not os.path.exists(resultsPlace+"enHists"):
os.makedirs(resultsPlace+"enHists")
ovEnHist = []
for index in range(0, 2*numPoints):
ovEnHist.append(0.0)
for passNum in range(0, numPasses):
enHist = energyStats2d(spec=["O"])
model.run(control_parameters_req, trajectory_filename=("/dev/null"))
model.run(control_parameters_anal, trajectory_filename=("/dev/null"), analysis=[enHist])
with open(resultsPlace+"enHists/enHist"+str(passNum)+".dat", 'w') as f:
pass
with open(resultsPlace+"enHists/enHist"+str(passNum)+".dat", 'a') as f:
enHist.printResults(f)
with open(resultsPlace+"enHists/enHist"+str(passNum)+".dat", 'r') as f:
lines = f.readlines()
for index in range(0, 2*numPoints):
words = lines[index].split()
ovEnHist[index] += float(words[1])
os.remove(resultsPlace+"enHists/enHist"+str(passNum)+".dat")
with open(resultsPlace+"ovEnHist.dat", 'w') as f:
for index in range(0, 2*numPoints):
f.write(str(index)+" "+str(ovEnHist[index]/float(numPasses))+"\n")
shutil.rmtree(resultsPlace+"enHists", ignore_errors=True)
print("Process would appear to have succesfully terminated! How very suspicious...")
```
#### File: rateCaculation/postStuff/steadyStateFlow.py
```python
import sys
import os
import math
resultDir = os.environ.get('RESULTS')
if resultDir == None :
print "WARNING! $RESULTS not set! Attempt to write results will fail!\n"
# Expecting input botConc, topConc, rateConstFull, sysSize, analInterval, numStepsEquilib, numStepsSnapshot, numStepsAnal, numStepsReq, numPasses, timeInterval, fileCode
from KMCLib import *
from KMCLib.Backend import Backend
import numpy
botConc = float(sys.argv[1])
topConc = float(sys.argv[2])
rateConstFull = float(sys.argv[3])
sysSize = int(sys.argv[4])
analInterval = int(sys.argv[5])
numStepsEquilib = int(sys.argv[6])
numStepsSnapshot = int(sys.argv[7])
numStepsAnal = int(sys.argv[8])
numStepsReq = int(sys.argv[9])
numPasses = int(sys.argv[10])
timeInterval = float(sys.argv[11])
fileInfo = sys.argv[12]
resultsPlace = resultDir+"/"+fileInfo+"/"
if not os.path.exists(resultsPlace):
os.makedirs(resultsPlace)
with open(resultsPlace+'settings', 'w') as f:
f.write('BotConcentration = ' + str(botConc) +'\n')
f.write('TopConcentration = ' + str(topConc) +'\n')
f.write('FullRate = ' + str(rateConstFull) +'\n')
f.write('SysSize = ' + str(sysSize) +'\n')
f.write('TimeInterval = ' + str(timeInterval) +'\n')
f.write('AnalInterval = ' +str(analInterval) + '\n')
f.write('NumStepsEquilib = '+str(numStepsEquilib) +'\n')
f.write('NumStepsSnapshot = '+str(numStepsSnapshot)+'\n')
f.write('NumStepsAnal = '+str(numStepsAnal) +'\n')
"""I've put this in the file to make command line input easier"""
# Load the configuration and interactions.
# We're in 1d, so everything's a bit trivial
cell_vectors = [[1.0,0.0,0.0],
[0.0,1.0,0.0],
[0.0,0.0,1.0]]
# Only bothering with one set
basis_points = [[0.0, 0.0, 0.0]]
unit_cell = KMCUnitCell(cell_vectors=cell_vectors,
basis_points=basis_points)
# Define the lattice.
xRep = 1
yRep = 1
zRep = sysSize
numPoints = xRep*(zRep+4)*yRep
lattice = KMCLattice(unit_cell=unit_cell,
repetitions=(xRep,yRep,zRep+4),
periodic=(False, False, True))
# Generate the initial types. There's double-layered section of "To" at the top and "Bo" at the bottom
avConc = 0.5*(botConc+topConc)
types = ["V"]*numPoints
types[0] = "BoV"
types[1] = "BoV"
types[-2] = "ToV"
types[-1] = "ToV"
for i in range(int(zRep*avConc)):
# find a site which is not yet occupied by a "O" type.
pos = int(numpy.random.rand()*zRep+2.0)
while (types[pos] != "V"):
pos = int(numpy.random.rand()*zRep+2.0)
# Set the type.
types[pos] = "O"
"""
for i in range(2, numPoints-2):
if i < numPoints/2:
types[i] = "O"
else:
types[i] = "V"
"""
# Setup the configuration.
configuration = KMCConfiguration(lattice=lattice,
types=types,
possible_types=["O","V","ToV","BoV", "ToO", "BoO"])
# Rates.
rateConstEmpty = 1.0
topSpawn = math.sqrt(topConc/(1.0-topConc))
botSpawn = math.sqrt(botConc/(1.0-botConc))
topDespawn = 1.0/topSpawn
botDespawn = 1.0/botSpawn
#
##
###
"""I've put the processes in here to make it easier to adjust them via command line arguments."""
# Fill the list of processes.
processes = []
# Only on the first set of basis_points for O/V
basis_sites = [0]
# Bulk processes
# Up, empty.
#0
elements_before = ["O", "V"]
elements_after = ["V", "O"]
coordinates = [[0.0, 0.0, 0.0], [0.0, 0.0, 1.0]]
processes.append( KMCProcess(coordinates=coordinates,
elements_before=elements_before,
elements_after=elements_after,
basis_sites=basis_sites,
rate_constant=1.0))
# Will customise
# Down, empty.
#1
elements_before = ["O", "V"]
elements_after = ["V", "O"]
coordinates = [[0.0, 0.0, 0.0], [0.0, 0.0, -1.0]]
processes.append( KMCProcess(coordinates=coordinates,
elements_before=elements_before,
elements_after=elements_after,
basis_sites=basis_sites,
rate_constant=1.0))
# Will customise
# Now for Oxygen annihilation at the top boundary
#2
elements_before = ["O", "ToV"]
elements_after = ["V", "ToV"]
coordinates = [[0.0, 0.0, 0.0], [0.0, 0.0, 1.0]]
processes.append( KMCProcess(coordinates=coordinates,
elements_before=elements_before,
elements_after=elements_after,
basis_sites=basis_sites,
rate_constant=1.0))
# Will customise the rate constant
# Oxygen creation at the top boundary
#3
elements_before = ["ToO", "V"]
elements_after = ["ToO", "O"]
coordinates = [[0.0, 0.0, 0.0], [0.0, 0.0, -1.0]]
processes.append( KMCProcess(coordinates=coordinates,
elements_before=elements_before,
elements_after=elements_after,
basis_sites=basis_sites,
rate_constant=1.0))
# Now for Oxygen annihilation at the bottom boundary
#4
elements_before = ["O", "BoV"]
elements_after = ["V", "BoV"]
coordinates = [[0.0, 0.0, 0.0], [0.0, 0.0, -1.0]]
processes.append( KMCProcess(coordinates=coordinates,
elements_before=elements_before,
elements_after=elements_after,
basis_sites=basis_sites,
rate_constant=1.0))
# Obviously the rate constant will be customised
# Oxygen creation at the bottom boundary
#5
elements_before = ["BoO", "V"]
elements_after = ["BoO", "O"]
coordinates = [[0.0, 0.0, 0.0], [0.0, 0.0, 1.0]]
processes.append( KMCProcess(coordinates=coordinates,
elements_before=elements_before,
elements_after=elements_after,
basis_sites=basis_sites,
rate_constant=1.0))
# Boundary Oxygen creation at the bottom boundary
#6
elements_before = ["BoV"]
elements_after = ["BoO"]
coordinates = [[0.0, 0.0, 0.0]]
processes.append( KMCProcess(coordinates=coordinates,
elements_before=elements_before,
elements_after=elements_after,
basis_sites=basis_sites,
rate_constant=1.0))
# Boundary Oxygen annihilation at the bottom boundary
#7
elements_before = ["BoO"]
elements_after = ["BoV"]
coordinates = [[0.0, 0.0, 0.0]]
processes.append( KMCProcess(coordinates=coordinates,
elements_before=elements_before,
elements_after=elements_after,
basis_sites=basis_sites,
rate_constant=1.0))
# Boundary Oxygen creation at the top boundary
#8
elements_before = ["ToV"]
elements_after = ["ToO"]
coordinates = [[0.0, 0.0, 0.0]]
processes.append( KMCProcess(coordinates=coordinates,
elements_before=elements_before,
elements_after=elements_after,
basis_sites=basis_sites,
rate_constant=1.0))
# Boundary Oxygen annihilation at the bottom boundary
#9
elements_before = ["ToO"]
elements_after = ["ToV"]
coordinates = [[0.0, 0.0, 0.0]]
processes.append( KMCProcess(coordinates=coordinates,
elements_before=elements_before,
elements_after=elements_after,
basis_sites=basis_sites,
rate_constant=1.0))
"""I have all the processes explicitly spelled out here; however, for some weird reason this does not work
properly with KMCLib (as in, the processes involving the Bo atoms never seem to fire); hence I have switched
back to using custom rates.
# Bulk processes
# Up, empty.
#0
elements_before = ["O", "V", "V"]
elements_after = ["V", "O", "V"]
coordinates = [[0.0, 0.0, 0.0], [0.0, 0.0, 1.0], [0.0, 0.0, -1.0]]
processes.append( KMCProcess(coordinates=coordinates,
elements_before=elements_before,
elements_after=elements_after,
basis_sites=basis_sites,
rate_constant=rateConstEmpty))
# Up, full.
#1
elements_before = ["O", "V", "O"]
elements_after = ["V", "O", "O"]
coordinates = [[0.0, 0.0, 0.0], [0.0, 0.0, 1.0], [0.0, 0.0, -1.0]]
processes.append( KMCProcess(coordinates=coordinates,
elements_before=elements_before,
elements_after=elements_after,
basis_sites=basis_sites,
rate_constant=rateConstFull))
# Down, empty.
#2
elements_before = ["O", "V", "V"]
elements_after = ["V", "O", "V"]
coordinates = [[0.0, 0.0, 0.0], [0.0, 0.0, -1.0], [0.0, 0.0, 1.0]]
processes.append( KMCProcess(coordinates=coordinates,
elements_before=elements_before,
elements_after=elements_after,
basis_sites=basis_sites,
rate_constant=rateConstEmpty))
# Down, full.
#3
elements_before = ["O", "V", "O"]
elements_after = ["V", "O", "O"]
coordinates = [[0.0, 0.0, 0.0], [0.0, 0.0, -1.0], [0.0, 0.0, 1.0]]
processes.append( KMCProcess(coordinates=coordinates,
elements_before=elements_before,
elements_after=elements_after,
basis_sites=basis_sites,
rate_constant=rateConstFull))
# Now for Oxygen annihilation at the top boundary (when empty)
#4
elements_before = ["O", "To", "V"]
elements_after = ["V", "To", "V"]
coordinates = [[0.0, 0.0, 0.0], [0.0, 0.0, 1.0], [0.0, 0.0, -1.0]]
processes.append( KMCProcess(coordinates=coordinates,
elements_before=elements_before,
elements_after=elements_after,
basis_sites=basis_sites,
rate_constant=rateConstEmpty*(1.0-topConc)))
# Now for Oxygen annihilation at the top boundary (when full)
#5
elements_before = ["O", "To", "O"]
elements_after = ["V", "To", "O"]
coordinates = [[0.0, 0.0, 0.0], [0.0, 0.0, 1.0], [0.0, 0.0, -1.0]]
processes.append( KMCProcess(coordinates=coordinates,
elements_before=elements_before,
elements_after=elements_after,
basis_sites=basis_sites,
rate_constant=rateConstFull*(1.0-topConc)))
# Oxygen creation at the top boundary
#6
elements_before = ["V", "To"]
elements_after = ["O", "To"]
coordinates = [[0.0, 0.0, 0.0], [0.0, 0.0, 1.0]]
processes.append( KMCProcess(coordinates=coordinates,
elements_before=elements_before,
elements_after=elements_after,
basis_sites=basis_sites,
rate_constant=topConc*(topConc*rateConstFull+(1.0-topConc)*rateConstEmpty)))
# Now for Oxygen annihilation at the bottom boundary, empty.
# Bottom
#7
elements_before = ["Bo", "O", "V"]
elements_after = ["Bo", "V", "V"]
coordinates = [[0.0, 0.0, 0.0], [0.0, 0.0, 1.0], [0.0, 0.0, 2.0]]
processes.append( KMCProcess(coordinates=coordinates,
elements_before=elements_before,
elements_after=elements_after,
basis_sites=basis_sites,
rate_constant=rateConstEmpty*(1.0-botConc)))
# Now for Oxygen annihilation at the bottom boundary, full.
# Bottom
#8
elements_before = ["Bo", "O", "O"]
elements_after = ["Bo", "V", "O"]
coordinates = [[0.0, 0.0, 0.0], [0.0, 0.0, 1.0], [0.0, 0.0, 2.0]]
processes.append( KMCProcess(coordinates=coordinates,
elements_before=elements_before,
elements_after=elements_after,
basis_sites=basis_sites,
rate_constant=rateConstFull*(1.0-botConc)))
# Oxygen creation at the bottom boundary
# Bottom
#9
elements_before = ["Bo", "V"]
elements_after = ["Bo", "O"]
coordinates = [[0.0, 0.0, 0.0], [0.0, 0.0, 1.0]]
processes.append( KMCProcess(coordinates=coordinates,
elements_before=elements_before,
elements_after=elements_after,
basis_sites=basis_sites,
rate_constant=botConc*(rateConstFull*botConc+(1.0-botConc)*rateConstEmpty)))
# Special annoying corner cases for boundaries
# Up, bottom.
#10
elements_before = ["Bo", "O", "V"]
elements_after = ["Bo", "V", "O"]
coordinates = [[0.0, 0.0, -1.0], [0.0, 0.0, 0.0], [0.0, 0.0, 1.0]]
processes.append( KMCProcess(coordinates=coordinates,
elements_before=elements_before,
elements_after=elements_after,
basis_sites=basis_sites,
rate_constant=(botConc*rateConstFull+(1.0-botConc)*rateConstEmpty)))
# Down, top.
#11
elements_before = ["V", "O", "To"]
elements_after = ["O", "V", "To"]
coordinates = [[0.0, 0.0, -1.0], [0.0, 0.0, 0.0], [0.0, 0.0, 1.0]]
processes.append( KMCProcess(coordinates=coordinates,
elements_before=elements_before,
elements_after=elements_after,
basis_sites=basis_sites,
rate_constant=(topConc*rateConstFull + (1.0-topConc)*rateConstEmpty)))
"""
# Create the interactions object.
interactions = KMCInteractions(processes, implicit_wildcards=True)
# Define the custom rates calculator, using the lol model as a template
class lolModelRates(KMCRateCalculatorPlugin):
# Class for defining the custom rates function for the KMCLib paper.
def rate(self, geometry, elements_before, elements_after, rate_constant, process_number, global_coordinate):
if process_number == 0:
if len([e for e in elements_before if e == "O"]) + len([e for e in elements_before if e == "ToO"]) + len([e for e in elements_before if e == "BoO"]) == 2:
return rateConstFull
else:
return rateConstEmpty
if process_number == 1:
if len([e for e in elements_before if e == "O"]) + len([e for e in elements_before if e == "ToO"]) + len([e for e in elements_before if e == "BoO"]) == 2:
return rateConstFull
else:
return rateConstEmpty
if process_number == 2:
if len([e for e in elements_before if e == "O"]) + len([e for e in elements_before if e == "ToO"]) + len([e for e in elements_before if e == "BoO"]) == 2:
return rateConstFull
else:
return rateConstEmpty
if process_number == 4:
if len([e for e in elements_before if e == "O"]) + len([e for e in elements_before if e == "ToO"]) + len([e for e in elements_before if e == "BoO"]) == 2:
return rateConstFull
else:
return rateConstEmpty
if process_number == 3:
if len([e for e in elements_before if e == "O"]) + len([e for e in elements_before if e == "ToO"]) + len([e for e in elements_before if e == "BoO"]) == 2:
return rateConstFull
else:
return rateConstEmpty
if process_number == 5:
if len([e for e in elements_before if e == "O"]) + len([e for e in elements_before if e == "ToO"]) + len([e for e in elements_before if e == "BoO"]) == 2:
return rateConstFull
else:
return rateConstEmpty
if process_number == 6:
return botSpawn
if process_number == 7:
return botDespawn
if process_number == 8:
return topSpawn
if process_number == 9:
return topDespawn
def cutoff(self):
# Overloaded base class API function
return 1.0
interactions.setRateCalculator(rate_calculator=lolModelRates)
"""End of processes"""
###
##
#
# Create the model.
model = KMCLatticeModel(configuration, interactions)
# Msd stuff; this is producing weird outputs, but probably because my parameter choices are completely wrong,
# so I'm not so worried about that yet
"""msd_analysis = OnTheFlyMSD(history_steps=10000,
n_bins=10000,
t_max=2000.0,
track_type="O")"""
# Trying to find out information about distribution of time steps
#timeStepDistn = TimeStepDistribution(0.1)
processStatsOxInBot = ProcessStatistics(processes=[5], time_interval=timeInterval, spatially_resolved=False, anal_Interval = analInterval, resultsPlace=resultsPlace, processesObject=processes)
processStatsOxOutBot = ProcessStatistics(processes=[4], time_interval=timeInterval, spatially_resolved=False, anal_Interval = analInterval)
processStatsOxInTop = ProcessStatistics(processes=[3], time_interval=timeInterval, spatially_resolved=False, anal_Interval = analInterval)
processStatsOxOutTop = ProcessStatistics(processes=[2], time_interval=timeInterval, spatially_resolved=False, anal_Interval = analInterval)
compositionTracker = Composition(time_interval=timeInterval)
# Define the parameters; not entirely sure if these are sensible or not...
control_parameters_equilib = KMCControlParameters(number_of_steps=numStepsEquilib, analysis_interval=numStepsEquilib/100,
dump_interval=numStepsEquilib/100)
control_parameters_req = KMCControlParameters(number_of_steps=numStepsReq, analysis_interval=numStepsReq/100,
dump_interval=numStepsReq/100)
control_parameters_snapshot = KMCControlParameters(number_of_steps=numStepsSnapshot, analysis_interval=1,
dump_interval=numStepsSnapshot/100)
control_parameters_anal = KMCControlParameters(number_of_steps=numStepsAnal, analysis_interval=1,
dump_interval=numStepsAnal/100)
# Run the simulation - save trajectory to resultsPlace, which should by now exist
model.run(control_parameters_equilib, trajectory_filename=(resultsPlace+"equilibTraj.tr"))
model.run(control_parameters_snapshot, trajectory_filename=(resultsPlace+"snapTraj.tr"), analysis=[processStatsOxInBot, processStatsOxOutBot, processStatsOxInTop, processStatsOxOutTop, compositionTracker])
with open(resultsPlace+"procOxInBotSnap.dat", 'w') as f:
processStatsOxInBot.printResults(f)
with open(resultsPlace+"procOxOutBotSnap.dat", 'w') as f:
processStatsOxOutBot.printResults(f)
with open(resultsPlace+"procOxInTopSnap.dat", 'w') as f:
processStatsOxInTop.printResults(f)
with open(resultsPlace+"procOxOutTopSnap.dat", 'w') as f:
processStatsOxOutTop.printResults(f)
with open(resultsPlace+"composition.dat", 'w') as f:
compositionTracker.printResults(f)
for passNum in range(0, numPasses):
processStatsOxInBot = ProcessStatistics(processes=[5], time_interval=timeInterval, spatially_resolved=False, anal_Interval = analInterval)
processStatsOxOutBot = ProcessStatistics(processes=[4], time_interval=timeInterval, spatially_resolved=False, anal_Interval = analInterval)
processStatsOxInTop = ProcessStatistics(processes=[3], time_interval=timeInterval, spatially_resolved=False, anal_Interval = analInterval)
processStatsOxOutTop = ProcessStatistics(processes=[2], time_interval=timeInterval, spatially_resolved=False, anal_Interval = analInterval)
model.run(control_parameters_req, trajectory_filename=(resultsPlace+"mainTraj.tr"))
model.run(control_parameters_anal, trajectory_filename=(resultsPlace+"mainTraj.tr"), analysis=[processStatsOxInBot, processStatsOxOutBot, processStatsOxInTop, processStatsOxOutTop])
if not os.path.exists(resultsPlace+"inBot"):
os.makedirs(resultsPlace+"inBot")
if not os.path.exists(resultsPlace+"outBot"):
os.makedirs(resultsPlace+"outBot")
if not os.path.exists(resultsPlace+"inTop"):
os.makedirs(resultsPlace+"inTop")
if not os.path.exists(resultsPlace+"outTop"):
os.makedirs(resultsPlace+"outTop")
with open(resultsPlace+"inBot/inBot"+str(passNum)+".dat", 'w') as f:
processStatsOxInBot.printResults(f)
with open(resultsPlace+"outBot/outBot"+str(passNum)+".dat", 'w') as f:
processStatsOxOutBot.printResults(f)
with open(resultsPlace+"inTop/inTop"+str(passNum)+".dat", 'w') as f:
processStatsOxInTop.printResults(f)
with open(resultsPlace+"outTop/outTop"+str(passNum)+".dat", 'w') as f:
processStatsOxOutTop.printResults(f)
print("Process would appear to have succesfully terminated! How very suspicious...")
```
#### File: thesisCodes/correlationFunctions/exactDist.py
```python
import copy
import sys
def configMake(L, N, prevList, totList):
if L==1:
endList = [copy.deepcopy(prevList), N]
totList.append(unfold(endList))
return [N]
if N==0:
return configMake(L-1, 0, [copy.deepcopy(prevList), 0], totList)
if L==N:
return configMake(L-1, N-1, [copy.deepcopy(prevList), 1], totList)
return [configMake(L-1, N, [copy.deepcopy(prevList), 0], totList), configMake(L-1, N-1, [copy.deepcopy(prevList), 1], totList)]
def adjSum(candList):
listLen = len(candList)
total = 0
for index in range(0, listLen):
total += candList[index-1]*candList[index]
return total
def unfold(candList):
if isinstance(candList, list):
if len(candList)==2:
return unfold(candList[0])+unfold(candList[1])
if len(candList)==1:
return candList
if len(candList)==0:
return []
return [candList]
def listCollate(candList):
maxItem = 0
for index in candList:
if index > maxItem:
maxItem = index
outPut = []
for size in range(0, maxItem+1):
numCounts = 0
for index in candList:
if index == size:
numCounts += 1
outPut.append((size, numCounts))
return outPut
def genCorrFn(L, N):
totList = []
allStates = configMake(L, N, [], totList)
restStates = []
weightList = []
maxAdj = 0
for state in totList:
if state[0]==1:
restStates.append((state, adjSum(state)))
if restStates[-1][1]>maxAdj:
maxAdj = restStates[-1][1]
weightList.append(restStates[-1][1])
partFnList = listCollate(weightList)
print(partFnList)
partitionFn = "("
for pair in partFnList:
partitionFn += str(pair[1])+" Exp["+str(pair[0]-maxAdj)+"b] + "
partitionFn += "0)"
print(partitionFn)
finalOut = "{"
for shift in range(0, L-L/2):
tempList = []
for config in restStates:
if config[0][shift] == 1:
tempList.append(config[1])
stateDist = listCollate(tempList)
outSum = "{"+str(shift)+", ("
for pair in stateDist:
outSum += str(pair[1])+" Exp["+str(pair[0]-maxAdj)+"b] + "
outSum += "0)/"+partitionFn+"}"
finalOut += outSum
if shift != L-L/2-1:
finalOut += ", "
finalOut+="}"
return finalOut
L = int(sys.argv[1])
with open("corrFnResults.m", 'w') as f:
f.write("{")
for n in range(2, L-2):
f.write("{"+str(n)+"/"+str(L)+", "+genCorrFn(L, n)+"}, ")
f.write(genCorrFn(L, L-2) + "}")
```
#### File: kmc/customAnalysis/BlockStats.py
```python
import sys
import numpy
import math
from KMCLib.PluginInterfaces.KMCAnalysisPlugin import KMCAnalysisPlugin
from KMCLib.Utilities.CheckUtilities import checkSequenceOfPositiveIntegers
from KMCLib.Utilities.CheckUtilities import checkPositiveFloat
from KMCLib.Utilities.CheckUtilities import checkPositiveInteger
from KMCLib.Exceptions.Error import Error
from KMCLib.Backend.Backend import MPICommons
class BlockStats(KMCAnalysisPlugin):
""" Class for collecting block size statistics during a run."""
def __init__(self,
blockComp=None):
"""
Initialisation for BlockStats
"""
# Check and set the input.
if blockComp==None:
sys.exit("Need to specify which types are to be counted as part of the block!")
self.__blockComp = blockComp
def setup(self, step, time, configuration):
"""
Recieves the setup call.
"""
# Set the initial time.
self.__lastTime = time
self.__initialTime = time
# Allocate space for the spatially resolved information.
typeList = configuration.types()
self.__histogram = []
for typeIndex in range(0, len(typeList)):
self.__histogram.append([typeIndex, 0.0])
def registerStep(self, step, time, configuration):
typeList = configuration.types()
timeInterval = time - self.__lastTime
scanIndex = 0
blockLength = 0
inBlock = False
while(scanIndex<len(typeList)):
if inBlock==True:
""" Continuing an existing block """
if typeList[scanIndex] in self.__blockComp:
blockLength += 1
else:
""" Finishing a block """
self.__histogram[blockLength][1] += timeInterval
inBlock = False
else:
if typeList[scanIndex] in self.__blockComp:
""" Start a new block """
inBlock = True
blockLength = 1
""" Otherwise, just keep looking for block bits """
scanIndex += 1
self.__lastTime = time
def finalize(self):
totalTime = self.__lastTime - self.__initialTime
if totalTime<=0.0:
sys.exit("Something has gone wrong with the elapsed time!")
for item in self.__histogram:
item[1]/=totalTime
def printResults(self, stream=sys.stdout):
"""
Print the results to the stream.
:param stream: The stream to print to.
"""
# Only master writes.
if MPICommons.isMaster():
for item in self.__histogram:
stream.write(str(item[0])+" "+str(item[1])+"\n")
```
#### File: kmc/customPythonLibs/foldyFloatList.py
```python
import numberedFloat
import pyllist
nf = numberedFloat.numberedFloat
dllist = pyllist.dllist
class foldyFloatList:
def __init__(self):
self.__values = dllist()
def addValue(self, x):
self.__values.append(nf(x))
self.attemptFold()
def toString(self):
outString = "["
for x in self.__values:
outString += x.toString()+" "
outString += "\b]"
return outString
def extractSum(self):
total = 0.0
curVals = reversed(self.__values)
for x in curVals:
total += x.getValue()
return total
def attemptFold(self):
it = self.__values.last
if it==None:
return
front = self.__values.first
previous = it
while it != front:
it = it.prev
if it.value.getHeight()>previous.value.getHeight():
break
else:
sum = it.value.getValue() + previous.value.getValue()
self.__values.remove(previous)
it.value.setValue(sum)
it.value.setHeight(it.value.getHeight()+1)
previous = it
``` |
{
"source": "JoshuaHenriques/ecommerce-db-generator",
"score": 3
} |
#### File: JoshuaHenriques/ecommerce-db-generator/scrape.py
```python
from bs4 import BeautifulSoup
import cloudscraper
def scrape():
url = 'https://www.fakeaddressgenerator.com/World/ca_address_generator'
scraper = cloudscraper.create_scraper()
page = scraper.get(url).content
soup = BeautifulSoup(page, "html.parser")
results = soup.find_all("b")
name = results[1].text.split()
first_name = name[0]
last_name = name[2]
full_name = first_name + " " + last_name
phone_number = results[12].text
phone_number = phone_number.split("-")[0] + phone_number.split("-")[1] + phone_number.split("-")[2]
email = last_name + "." + first_name + phone_number[8] + "@gmail.com"
password = results[53].text
date_of_birth = results[4].text
if len(date_of_birth.split("/")[0]) == 1:
day = "0" + date_of_birth.split("/")[0]
else:
day = date_of_birth.split("/")[0]
if len(date_of_birth.split("/")[1]) == 1:
month = "0" + date_of_birth.split("/")[1]
else:
month = date_of_birth.split("/")[1]
date_of_birth = (date_of_birth.split("/")[2][2] + date_of_birth.split("/")[2][3]) + month + day
street_name = results[7].text.split()[1] + " " + results[7].text.split()[2]
street_number = results[7].text.split()[0]
city = results[8].text
postal_code = results[11].text.split()[0] + results[11].text.split()[1]
province = results[10].text
ccn = results[30].text
four_dig = ccn[12] + ccn[13] + ccn[14] + ccn[15]
cvc = results[31].text
login = {
"roles": ["USER"],
"email": email,
"phoneNumber": phone_number,
"password": password,
"enabled": True
}
customer = {
"firstName": first_name,
"lastName": last_name,
"phoneNumber": phone_number,
"email": email,
"password": password,
"dateOfBirth": date_of_birth,
"cart": {},
"address": {
"streetName": street_name,
"streetNumber": street_number,
"unitNumber": 000,
"city": city,
"postalCode": postal_code,
"province": province
},
"wallet": [{
"fullName": full_name,
"ccn": ccn,
"expDate": "0425",
"cvc": cvc,
"fourDig": four_dig
}],
"orders": []
}
return customer, login
``` |
{
"source": "JoshuaHess12/hdi-prep",
"score": 3
} |
#### File: hdi-prep/HDIprep/utils.py
```python
import numpy as np
import pandas as pd
import nibabel as nib
from pathlib import Path
from skimage.transform import resize
from ast import literal_eval
# Define function
def CreateHyperspectralImage(embedding, array_size, coordinates, scale=True):
"""Fill a hyperspectral image from n-dimensional embedding of high-dimensional
imaging data by rescaling each channel from 0-1 (optional). All coordinates
in the image not listed in coordinates object will be masked
and set to 0 (background).
Parameters
----------
embedding: Pandas DataFrame
Indicates embedding coordinates from UMAP or another method.
array_size: tuple
Indicates size of image.
coordinates: 1-indexed list of tuples
Indicates pixel coordinates of image.
scale: Bool (Default: True)
Rescale pixel intensities on the range of 0-1.
Returns
-------
im: array
Reconstructed image.
"""
# Create zeros array to fill with number channels equal to embedding dimension
im = np.zeros((array_size[0], array_size[1], embedding.shape[1]), dtype=np.float32)
# Run through the data coordinates and fill array
for i, (x, y, z) in enumerate(coordinates):
# Run through each slice of embedding (dimension)
for dim in range(embedding.shape[1]):
# Add data to this slice
im[y - 1, x - 1, dim] = embedding.values[i, dim]
# Create a mask to use for excluding pixels not used in dimension reduction
im_bool = np.zeros((array_size[0], array_size[1]), dtype=np.bool)
# Fill the mask array with True values at the coordinates used
for i, (x, y, z) in enumerate(coordinates):
# Add boolean mask
im_bool[y - 1, x - 1] = True
# Check to see if scaling the pixel values 0 to 1
if scale:
# Scale the data 0-1 for hyperspectral image construction
for dim in range(im.shape[2]):
# min-max scaler
im[:, :, dim] = (im[:, :, dim] - im[:, :, dim].min()) / (
im[:, :, dim].max() - im[:, :, dim].min()
)
# Mask the image with the boolean array to remove unused pixels
im[~im_bool] = 0
# Return the hyperspectral image
return im
# Define function
def CreateHyperspectralImageRectangular(embedding, array_size, coordinates, scale=True):
"""Fill a hyperspectral image from n-dimensional embedding of high-dimensional
imaging data by rescaling each channel from 0-1 (optional). All coordinates
in the image not listed in coordinates object will be masked
and set to 0 (background). This function assumes that the data you want
to reconstruct can be automatically reshaped into a rectangular array.
Parameters
----------
embedding: Pandas DataFrame
Indicates embedding coordinates from UMAP or another method.
array_size: tuple
Indicates size of image.
coordinates: 1-indexed list of tuples
Indicates pixel coordinates of image.
scale: Bool (Default: True)
Rescale pixel intensities on the range of 0-1.
Returns
-------
im: array
Reconstructed image.
"""
# get the embedding shape
number_channels = embedding.shape[1]
# Create zeros array to fill with number channels equal to embedding dimension
im = embedding.values.reshape((array_size[0], array_size[1], number_channels))
# Check to see if scaling the pixel values 0 to 1
if scale:
# Scale the data 0-1 for hyperspectral image construction
for dim in range(im.shape[2]):
# min-max scaler
im[:, :, dim] = (im[:, :, dim] - im[:, :, dim].min()) / (
im[:, :, dim].max() - im[:, :, dim].min()
)
# Return the hyperspectral image
return im
def ExportNifti(image, filename, padding=None, target_size=None):
"""Export processed images resulting from UMAP and
spatially mapping UMAP, or exporting processed histology images.
Parameters
----------
image: array
Array that represents image to export.
filename: string
Path, including filename, to export processed nifti image to.
padding: string of tuple of type integer (padx,pady; Default: None)
Indicates height and length padding to add to the image before exporting.
target_size: string of tuple of type integer (sizex,sizey; Default: None)
Resize image using bilinear interpolation before exporting.
"""
# Create pathlib object from the filename
filename = Path(filename)
# convert the padding and target size to tuple if present
if padding is not None:
padding = literal_eval(padding)
if target_size is not None:
target_size = literal_eval(target_size)
# Print update
print("Exporting nifti image stack...")
# Check to see if padding
if padding is not None:
image = np.pad(
image,
[(padding[0], padding[0]), (padding[1], padding[1]), (0, 0)],
mode="constant",
)
# Check to see if resizing
if target_size is not None:
image = resize(image,target_size)
# Create nifti object -- transpose axes because of the transformation!
# Check size
if len(image.shape) > 2:
# Create nifti object -- transpose axes because of the transformation!
nifti_im = nib.Nifti1Image(image.transpose(1, 0, 2), affine=np.eye(4))
else:
# Create nifti object -- transpose axes because of the transformation!
nifti_im = nib.Nifti1Image(image.T, affine=np.eye(4))
# Save the image
nib.save(nifti_im, str(filename))
# Print update
print("Finished exporting " + str(filename))
def Exp(x, a, b, c):
"""Exponential function to use for regression.
"""
return a * np.exp(-b * x) + c
``` |
{
"source": "JoshuaHess12/hdi-reg",
"score": 3
} |
#### File: hdi-reg/HDIreg/tmp_elastix_registration.py
```python
import matplotlib.pyplot as plt
import numpy as np
import sys
import time
import os
import re
import nibabel as nib
from pathlib import Path
import pandas as pd
import shutil
import glob
import fileinput
import cv2
from joblib import Parallel, delayed
from numba import njit, prange
import subprocess
import utils
#-----------------General Utility Functions for Registration-----------------------
def FormatFijiPointsFromCSV(input_file,selection_type):
"""This function will take the csv point selection file that you export from
using control+m in fiji (ImageJ) to the correct text file format to use with
elastix image registration
selection_type must be the string 'index' or 'points'"""
#Get the current directory
tmp_path = Path('..')
home_dir=tmp_path.cwd()
#Read the csv file that you input
data = pd.read_csv(input_file)
#Get the image folder name
parent=Path(input_file).parent
#Change the directory to the parent folder so we know where to export
os.chdir(parent)
#Remove the file extension
prefix=Path(input_file).stem
#Rename the first column for ease of access
data.rename(columns = {list(data)[0]:'Landmark'}, inplace=True)
#Create a new text file
txt_file = open(prefix+".txt","w+")
#Create first string to write to your file
str_list = [str(selection_type),str(max(data['Landmark']))]
txt_file.writelines(i + '\n' for i in str_list)
#Close and save the txt file
txt_file.close()
#Get only the data we need for the text file
point_tab = data[['X','Y']]
#Now append the data table to the txt file
point_tab.to_csv(prefix+".txt", header=False, index=False, sep=' ', mode='a')
#Change the current directory back to the home directory
os.chdir(home_dir)
def FormatRegionFromFiji(input_file,selection_type="index"):
"""This function will take a set of points that you set and export in ImageJ
using Analyze > Tools > Save XY coordinates plugin"""
#Get the current directory
tmp_path = Path('..')
home_dir=tmp_path.cwd()
#Read the csv file that you input
data = pd.read_csv(input_file)
#Get the image folder name
parent=Path(input_file).parent
#Change the directory to the parent folder so we know where to export
os.chdir(parent)
#Remove the file extension
prefix=Path(input_file).stem
#Create a new text file
txt_file = open(prefix+".txt","w+")
#Create first string to write to your file
str_list = [str(selection_type),str(data.shape[0])]
txt_file.writelines(i + '\n' for i in str_list)
#Close and save the txt file
txt_file.close()
#Get only the data we need for the text file
point_tab = data[['X','Y']]
#Now append the data table to the txt file
point_tab.to_csv(prefix+".txt", header=False, index=False, sep=' ', mode='a')
#Change the current directory back to the home directory
os.chdir(home_dir)
def GetROImask(dir,full_size,ROI_correction = None):
"""This function will take the check all subdirectories
for csv files exported from using Analyze > Tools > Save XY coordinates plugin. It will
then create a mask for each of those ROIs based on the full image coordinates so that
each region can be extracted efficiently.
dir: The directory that you want to do a subdirectory search on for csv files.
full_image: Full image to use for masking ROI from
ROI_correction: correction to be added to the dimensions of the ROI
Returns a dictionary object with keys being the folders detected and the values
being the masks created based on csv files in folders and the full image
Note: We will use this function with ROIs defined on the toluidine blue image and we use
it to extract the ROIs from the final registered full MSI UMAP image """
#Get the names of the folders in your directory
filenames = [file for file in os.listdir(dir) if os.path.isdir(os.path.join(dir,file))]
#Get all csv files in those directories
#***Note that we are traversing all directory so may need to change to avoid prblems***
csv_files = utils.TraverseDir(ending="coordinates.csv")
#Create a dictionary object for our ROIs that contains the folder names with csv full paths
ROI_dict = dict(zip(filenames, csv_files))
#Create a new dictionary object to store the masks and coordinates in
ROI_masks = {}
#Access the dictionary ROI object to load the mask coordinates from csv files
for ROI, filename in ROI_dict.items():
#Read csv
file_cont = pd.read_csv(filename)
#Get the XY coordinates from the dataframe
point_tab = file_cont[['X','Y']]
#Create a mask for each of these ROI regions
tp_left = (point_tab[['X']].values.min()-1,point_tab[['Y']].values.min()-1)
bt_right = (point_tab[['X']].values.max()-1,point_tab[['Y']].values.max()-1)
#Create a blank mask - ***Order is 1 and 0 for the nifti image format*****
mask = np.zeros(shape=(full_size[1],full_size[0]),dtype="uint8")
#Draw rectangle on the mask using opencv
cv2.rectangle(img=mask,pt1=tp_left,pt2=bt_right,color=(255,255,255),thickness=-1)
#Extract sliced, nonzero regions from your original image
nonzero = np.nonzero(mask)
minx = min(nonzero[0])
maxx = max(nonzero[0])
miny = min(nonzero[1])
maxy = max(nonzero[1])
#Check to see if we are adding overlap to our ROIs
if ROI_correction is not None:
print('Detected ROI Correction Factor...')
minx = (minx-(ROI_correction))
maxx = (maxx+(ROI_correction))
miny = (miny-(ROI_correction))
maxy = (maxy+(ROI_correction))
#Add the mask in this iteration to our dictionary object
ROI_masks.update({str(ROI):[minx,maxx,miny,maxy]})
#Report that the masking is complete
print('Finished getting coordinates')
#Return the dictionary object
return ROI_masks
def CreateMaskFromCoords(filenames_list,full_image,invert=False):
"""Function for reading in ROI coordinates and converting them to a mask.
Currently only accepts nifti format for the full_image. Option to invert the mask"""
#Read the full image
full_im = nib.load(str(full_image)).get_fdata().T
#Get the full array size
full_size=full_im.shape
#Create a blank mask - ***Order is 1 and 0 for the nifti image format*****
mask = np.zeros(shape=(full_size[0],full_size[1]),dtype="uint8")
#Iterate through each ROI csv file
for roi in filenames_list:
#Read csv
file_cont = pd.read_csv(roi)
#Get the XY coordinates from the dataframe
point_tab = file_cont[['X','Y']]
#Create a mask for each of these ROI regions
tp_left = (point_tab[['X']].values.min()-1,point_tab[['Y']].values.min()-1)
bt_right = (point_tab[['X']].values.max()-1,point_tab[['Y']].values.max()-1)
#Draw rectangle on the mask using opencv
cv2.rectangle(img=mask,pt1=tp_left,pt2=bt_right,color=(255,255,255),thickness=-1)
#Check to see if inverting the mask
if invert:
mask = ~mask
#return the mask
return mask
def ROImaskExport(ROI_masks,full_img,flip_horz=False,flip_vert=False,prefix=None,export_image=True):
"""Function for exporting ROIs from an ROI_masks dictionary object that gets returned
from GetROImask function
ROI_masks: Returned dictionary from GetROImask function
full_img: Array that contains the image to be cropped from
flip_horz: Logical. If true, ROI is flipped horizontally
flip_vert: Logical. If true, ROI is flipped vertically
prefix: prefix to add to the ROIs that are exported. If left to None,
then a default prefix is added that corresponds to the image folder that the ROI represents"""
#Set your home directory
tmp = Path('..')
home_dir=tmp.cwd()
#Create a dictionary of final rescaled sizes to use for MSI ROI Extraction
fin_sizes = {}
#Loop through the ROI_masks dictionary
for ROI,mask in ROI_masks.items():
os.chdir(str(ROI))
#Read the imc image for resizing
imc_im=mms.imc_file(tmp.cwd(),return_im=True,return_imc_cell_table=False,return_pix_table=False).image
#Get our ROIs
tmp_ROI = full_img[int(mask[2]):int(mask[3]),int(mask[0]):int(mask[1])]
#Save the new ROI
if flip_horz:
tmp_ROI = np.flip(tmp_ROI,0)
if flip_vert:
tmp_ROI = np.flip(tmp_ROI,1)
#Get the size of the imc image
multiple = int(imc_im.shape[1]/tmp_ROI.shape[0])
#Use the rounded multiple to resize our ROI for image registration
HiRes_size = (tmp_ROI.shape[1]*multiple,tmp_ROI.shape[0]*multiple)
#Remember that cv2 does the axis in the opposite order as numpy
tmp_ROI_HR = cv2.resize(tmp_ROI,HiRes_size)
#Create a nifti object and save the image
nifti_ROI = nib.Nifti1Image(tmp_ROI_HR, affine=np.eye(4))
#Get a prefix for the image that we are saving
if prefix is None:
prefix_tmp = str(ROI)
else:
prefix = prefix
if export_image:
#Save the ROI
nib.save(nifti_ROI,prefix_tmp+'.nii')
#Report the finished job
print('Finished exporting '+str(ROI)+'...')
#Add the final size to our dictionary
fin_sizes.update({str(ROI):HiRes_size})
#Change back to the original directory
os.chdir(home_dir)
#Report the finished export job
print('Finsihed export all regions')
#Return an object that contains the final size for each H&E ROI so we can apply the
#transformix function for each MSI ROI
return fin_sizes
def ROImaskExport_MSI_Transformix(ROI_masks_from_tolBlue,final_sizes,parameter_files,full_img,flip_horz=False,flip_vert=False,prefix=None):
"""Function for exporting ROIs from an ROI_masks dictionary object that gets returned
from GetROImask function
ROI_masks: Returned dictionary from GetROImask function
full_img: Array that contains the image to be cropped from
flip_horz: Logical. If true, ROI is flipped horizontally
flip_vert: Logical. If true, ROI is flipped vertically
prefix: prefix to add to the ROIs that are exported. If left to None,
then a default prefix is added that corresponds to the image folder that the ROI represents"""
#Set your home directory
tmp = Path('..')
home_dir=tmp.cwd()
#Loop through the ROI_masks dictionary
for ROI,mask in ROI_masks_from_tolBlue.items():
#Create a directory for our ROI and switch to it
if not os.path.exists(os.path.join(tmp.cwd(),str(ROI))):
os.makedirs(str(ROI))
os.chdir(str(ROI))
#Get our ROIs
tmp_ROI = full_img[int(mask[2]):int(mask[3]),int(mask[0]):int(mask[1])]
#Save the new ROI
if flip_horz:
tmp_ROI = np.flip(tmp_ROI,0)
if flip_vert:
tmp_ROI = np.flip(tmp_ROI,1)
#Use the dictionary of ROI final sizes to resize the image
HiRes_size = final_sizes[str(ROI)]
#Resize the image
tmp_ROI_HR = cv2.resize(tmp_ROI,HiRes_size)
#Create a nifti object and save the image
nifti_ROI = nib.Nifti1Image(tmp_ROI_HR, affine=np.eye(4))
#Get a prefix for the image that we are saving (will come from the loop that indicates m/z slice)
prefix = prefix
#Save the ROI
nib.save(nifti_ROI,prefix+'.nii')
#Report the finished job
print('Finished exporting original crop for '+str(ROI)+'...')
#Run transformix on this particular ROI using the false H&E and H&E registration parameters
tmp_imagepath = Path(os.path.join(tmp.cwd(),str(prefix)+'.nii'))
#Create a directory to store this slice in within each ROIs folder
os.mkdir(str(prefix))
#Create an output directory path (here tmp.cwd() is the ROI folder and prefix is m/z slice number)
out_dir = Path(os.path.join(tmp.cwd(),str(prefix)))
#Get the parameter file from this ROI (user will need to provide the paths for right now)
par = parameter_files[str(ROI)]
#Check to see if we are transforming this slice
if par is not None:
#Send the command to the shell
print('Running Transformix for '+str(ROI)+' '+str(prefix)+'...')
os.system("transformix -in "+str(tmp_imagepath)+" -out "+str(out_dir)+" -tp "+str(par))
#Delete the temporary m/z image that comes from exporting ROI from composition MSI slice
os.remove(tmp_imagepath)
#Change back to the original directory (outside of the ROI specific folder)
os.chdir(home_dir)
#Report the finished export job
print('Finsihed export all regions for '+str(prefix))
#######----******Add in optional extra registration alternative to ROImaskExport for detecting transformation file in the folder
def GetParamFileImageSize(par):
"""Function for parsing an elastix parameter file to get the size tuple for the registration.
The size corresponds to the final size of the registered image.
par: Path to the parameter file that you want to parse"""
#Read the parameter file to pandas dataframe
par_file = pd.read_csv(par, sep = "\n")
#Get those rows that contain the string "Size"
size_cols = par_file[par_file.iloc[:,0].str.contains("Size")]
#Replace the right parentheses
tmp_tab = size_cols.iloc[:,0].str.replace('(','')
#Replace the left parentheses
tmp_tab = pd.DataFrame(tmp_tab.iloc[:].str.replace(')',''))
#String match again for "Size" and convert that to a string
size_cols_fin = tmp_tab[tmp_tab.iloc[:,0].str.match("Size")].to_string(header=False,index=False)
#Extract the positive integers from the final string and convert to tuple
size_tup = tuple([int(s) for s in size_cols_fin.split() if s.isdigit()])
#Return the final size tuple
return size_tup
#-----------------General Registration Functions-----------------------
def ElastixRegistration(fixed,moving,out_dir,p0,p1=None,fp=None,mp=None,fMask=None,mkdir=False):
"""This is a python function for running the elastix image registration
toolbox. You must be able to call elastix from your command shell to use this.
You must also have your parameter text files set before running this through
python."""
#Start a timer
start = time.time()
#Get the name of the output directory
out_dir = Path(out_dir)
#Get the name of the parameter file
p0 = Path(p0)
#Get the names of the input images
fixedName = Path(fixed)
movingName = Path(moving)
#Get the number of channels for the fixed and moving images
niiFixed = nib.load(str(fixedName))
niiMoving = nib.load(str(movingName))
#Check to see if there is single channel input
if niiFixed.ndim is 2 and niiMoving.ndim is 2:
print('Detected single channel input images...')
#Send the command to shell to run elastix
command = "elastix -f "+str(fixed)+ " -m "+str(moving)
#Check to see if there is multichannel input
else:
print('Exporting single channel images for multichannel input...')
#Read the images
niiFixed_im = niiFixed.get_fdata()
niiMoving_im = niiMoving.get_fdata()
#Set up list of names for the images
fixedList = []
movingList = []
command = "elastix"
#Export single channel images for each channel
for i in range(niiFixed.shape[2]):
#Create a filename
fname = Path(os.path.join(fixedName.parent,str(fixedName.stem+str(i)+fixedName.suffix)))
#Update the list of names for fixed image
fixedList.append(fname)
#Update the list of names for fixed image
command = command + ' -f' + str(i) + ' ' + str(fname)
#Create a nifti image
#Check to see if the path exists
if not fname.is_file():
#Create a nifti image
nii_im = nib.Nifti1Image(niiFixed_im[:,:,i], affine=np.eye(4))
nib.save(nii_im,str(fname))
for i in range(niiMoving.shape[2]):
#Create a filename
mname = Path(os.path.join(movingName.parent,str(movingName.stem+str(i)+movingName.suffix)))
#Update the list of names for moving image
movingList.append(mname)
#Update the list of names for moving image
command = command + ' -m' + str(i) + ' ' + str(mname)
#Check to see if the path exists
if not mname.is_file():
#Create a nifti image
nii_im = nib.Nifti1Image(niiMoving_im[:,:,i], affine=np.eye(4))
nib.save(nii_im,str(mname))
#Add the parameter files
command = command+" -p "+str(p0)
#Check for additional files
if p1 is not None:
#Create pathlib Path
p1 = Path(p1)
command = command + " -p "+str(p1)
#Check for corresponding points in registration
if fp and mp is not None:
#Create pathlib Paths
fp = Path(fp)
mp = Path(mp)
command = command +" -fp "+str(fp)+" -mp "+str(mp)
#Check for fixed mask
if fMask is not None:
#Create pathlib Paths
fMask = Path(fMask)
command = command +" -fMask "+str(fMask)
#Check for making new directories
if mkdir is True:
n=0
while n>=0:
tmp_name = "elastix"+str(n)
if not os.path.exists(Path(os.path.join(out_dir,tmp_name))):
os.mkdir(Path(os.path.join(out_dir,tmp_name)))
out_dir = Path(os.path.join(out_dir,tmp_name))
break
n+=1
#Add the output directory to the command
command = command +" -out "+str(out_dir)
#Send the command to the shell
print('Running elastix...')
os.system(command)
end = time.time()
print('Initial registration finished\n'+'Computation time: '+str(end-start)+' sec.')
#Return values
return command
def TransformixRegistration(input_img,output_dir,parameter_file,conc=False,points=None):
"""This is a python function for running transformix image registration. Again,
must be able to call elastix from the command line to be able to run this.
If your channel is multichannel, this script must first export each channel
in the nifti format. By default, images will be exported to your output_dir
and will include the suffix 'Unregistered' followed by the channel number"""
#Create a timer
trans_start = time.time()
#Check to see if the image is multichannel or grayscale (note: need to remove " in your filename)
tmp_data = nib.load(str(input_img)).get_data()
if tmp_data.ndim is not 2:
print('Detected multichannel image. Creating channel images...')
#Get the current directory
tmp_path = Path('..')
home_dir=tmp_path.cwd()
#Set the working directory as the output directory
parent=Path(output_dir)
os.chdir(parent)
#Now take each of the channels and export a separate image for registration
filenames_channels = []
for i in range(tmp_data.shape[2]):
#Get the image name from your input image path
im_name=Path(tmp_data.filename).parts[-1]
#Remove the file extension
prefix,extension=Path(im_name).stem,Path(im_name).suffix
#Create new image channel i in each iteration
nifti_col = nib.Nifti1Image(tmp_data[:,:,i], affine=np.eye(4))
#Create the image path for this iteration
tmp_image=prefix+"_Unregistered"+str(i)+extension
#Save the nifti image
print("Saving a temporary image for channel "+str(i)+"...")
nib.save(nifti_col,str(tmp_image))
#Now load the image and run transformix on that channel in the shell
print("Running Transformix for channel "+str(i)+"...")
#Creat a new file for your transformix results
transformix_path = Path(os.path.join(str(output_dir),str(prefix)+"_Transformix_Registered"+str(i)))
transformix_path.mkdir()
os.system("transformix -in " +str(tmp_image)+ " -out "+str(transformix_path)+" -tp "+str(parameter_file))
print("Finished Transforming Channel "+str(i))
#add filenames to the list
filenames_channels.append(os.path.join(str(transformix_path),"result.nii"))
#Check to see if we are concatenating images
if conc is True:
tmp_nii = nib.concat_images(filenames_channels)
#Create a path and save the image
conc_path = Path(os.path.join(str(output_dir),str(prefix)+"_Transformix_Registered"))
conc_path.mkdir()
os.chdir(conc_path)
nib.save(tmp_nii,"result.nii")
#Create a return path
ret_path = conc_path
#Set working directory back to its original
os.chdir(home_dir)
else:
print("Single channel image detected...")
print("Running Transformix...")
#Send the command to the shell to run transformix
os.system("transformix -in " +str(input_img)+ " -out "+str(output_dir)+" -tp "+str(parameter_file))
trans_stop = time.time()
print('Finished transforming\n'+'Transformix Computation Time: '+str(trans_stop-trans_start)+' sec.')
#Create a return path
ret_path = output_dir
return ret_path
def ApplyTransformixDir(dir,ending,remove_from_names=None,**kwargs):
"""Function for applying a transformix registration for all images contained
within a folder (indicated by dir parameter)"""
#Set a path object
tmp = Path('..')
#Set the home directory and record its position
home = Path(dir)
os.chdir(home)
#Get the names of all nifti images in the directory
im_names = utils.SearchDir(ending = ending)
#Run the the images in the directory and export their transformix
for im in im_names:
#Get the name of the image stem
im_stem = im.stem
#Check to see if removing a string from image name
if remove_from_names is not None:
#Remove the nonsense from the image name
im_stem = im_stem.replace(remove_from_names,"")
#Run transformix on this iteration image
ret_path = TransformixRegistration(input_img=im,output_dir=tmp.cwd(),**kwargs)
#Rename the image in this iteration
trans_im = Path(os.path.join(ret_path,"result.nii"))
#Rename this file to the correct ROI for dice score
trans_im.replace(trans_im.with_name(im_stem+"_result.nii"))
trans_im = trans_im.with_name(im_stem+"_result.nii")
#Change back to the home directory
os.chdir(home)
def AlphaMIParameterFileGrid(template,num_neighbors,error,alpha,out):
"""Function for creating parameter file grid for alpha mutual information
based registration"""
#Get the current directory
home_dir = Path(os.getcwd())
#Get the parent directory and filename
out = Path(out)
os.chdir(out)
#Get the current k value
k_val,_ = utils.ParseElastix(template,'KNearestNeighbours')
#Get the current error value
err_bound,_ = utils.ParseElastix(template,'ErrorBound')
#Get the current alpha value
alph,_ = utils.ParseElastix(template,'Alpha')
#Set up a list for all new parameter file names
nms_list = []
#Iterate through the options for k,error,and alpha
for nn in num_neighbors:
for e in error:
for a in alpha:
#Read the transform parameters
with open(template, 'r') as file:
filedata = file.read()
#Replace the Alpha value
filedata = filedata.replace("(KNearestNeighbours "+k_val+")", "(KNearestNeighbours "+str(nn)+")")
#Replace the file type from nifti to tif
filedata = filedata.replace("(ErrorBound "+err_bound+")", "(ErrorBound "+str(e)+")")
#Replace the resulting image pixel type from double to 8bit
filedata = filedata.replace("(Alpha "+alph+")", "(Alpha "+str(a)+")")
#Get a temporary name
tmp_name = 'aMI_'+str(nn)+'NN_'+str(e)+'ERR_'+str(a)+'A'
#Make directories for the new names
if not os.path.exists(tmp_name):
#make directory
os.mkdir(tmp_name)
#Change to the new subdirectory
os.chdir(tmp_name)
#Create name for the new transform parameter file
new_name = Path(os.path.join(os.getcwd(),str(tmp_name+'.txt')))
#Write out the new file
with open(new_name, 'w+') as new_file:
new_file.write(filedata)
#Close the files
file.close()
new_file.close()
#Add the new names to a list
nms_list.append(new_name)
#Switch back to the out directory
os.chdir(out)
#Change back to the home directory
os.chdir(home_dir)
#Return the path to the TransformParameters
return nms_list
def MIParameterFileGrid(template,num_resolutions,out,nonlinear=False,grid_spacing=None):
"""Function for creating parameter file grid for alpha mutual information
based registration"""
#Get the current directory
home_dir = Path(os.getcwd())
#Get the parent directory and filename
out = Path(out)
os.chdir(out)
#Get the current k value
num_res,_ = utils.ParseElastix(template,'NumberOfResolutions')
#Set up a list for all new parameter file names
nms_list = []
#Check if nonlinear transformation
if nonlinear:
#Get the original grid spacing
og_grid,_ = utils.ParseElastix(template,'FinalGridSpacingInVoxels')
#iterate through grid spacing schedule
for gg in grid_spacing:
#Iterate through the options for k,error,and alpha
for nn in num_resolutions:
#Read the transform parameters
with open(template, 'r') as file:
filedata = file.read()
#Replace the Alpha value
filedata = filedata.replace("(NumberOfResolutions "+num_res+")", "(NumberOfResolutions "+str(nn)+")")
#Replace the grid spacing schedule
filedata = filedata.replace("(FinalGridSpacingInVoxels "+og_grid+")", "(FinalGridSpacingInVoxels "+str(gg)+")")
#Get a temporary name
tmp_name = 'MI_'+str(nn)+'Resolutions'+'_'+str(gg)+'GridSpacing'
#Make directories for the new names
if not os.path.exists(tmp_name):
#make directory
os.mkdir(tmp_name)
#Change to the new subdirectory
os.chdir(tmp_name)
#Create name for the new transform parameter file
new_name = Path(os.path.join(os.getcwd(),str(tmp_name+'.txt')))
#Write out the new file
with open(new_name, 'w+') as new_file:
new_file.write(filedata)
#Close the files
file.close()
new_file.close()
#Add the new names to a list
nms_list.append(new_name)
#Switch back to the out directory
os.chdir(out)
#Change back to the home directory
os.chdir(home_dir)
else:
#Iterate through the options for k,error,and alpha
for nn in num_resolutions:
#Read the transform parameters
with open(template, 'r') as file:
filedata = file.read()
#Replace the Alpha value
filedata = filedata.replace("(NumberOfResolutions "+num_res+")", "(NumberOfResolutions "+str(nn)+")")
#Get a temporary name
tmp_name = 'MI_'+str(nn)+'Resolutions'
#Make directories for the new names
if not os.path.exists(tmp_name):
#make directory
os.mkdir(tmp_name)
#Change to the new subdirectory
os.chdir(tmp_name)
#Create name for the new transform parameter file
new_name = Path(os.path.join(os.getcwd(),str(tmp_name+'.txt')))
#Write out the new file
with open(new_name, 'w+') as new_file:
new_file.write(filedata)
#Close the files
file.close()
new_file.close()
#Add the new names to a list
nms_list.append(new_name)
#Switch back to the out directory
os.chdir(out)
#Change back to the home directory
os.chdir(home_dir)
#Return the path to the TransformParameters
return nms_list
def HyperParameterElastixRegistration(par_files,p=None,run_elastix = True,outdir=None,subfolder=None,**kwargs):
"""Function for iterating over a list of parameter files and running elastix
on those parameter files"""
#Get the current directory
home_dir = Path(os.getcwd())
#Create a list to store elastix log files and another for transform parameter files
logs = []
trans = []
#Iterate through the files
for par in par_files:
#Set the paramter files
if p is None:
p0 = par
p1=None
else:
p0=p
p1 = par
#Switch to the directory
os.chdir(str(par.parent))
#Check if moving to a subfolder (Use for multiple images in this function)
if subfolder is not None:
#Create a path object
tmp_subfolder = Path(os.path.join(os.getcwd(),str(subfolder)))
#Create folder if it doesnt exist
if not tmp_subfolder.is_dir():
os.mkdir(str(tmp_subfolder))
#Change to subfolder
os.chdir(str(tmp_subfolder))
#Get the output path as the current working directory
outdir = Path(os.getcwd())
#Run elastix for this file if you choose
if run_elastix:
ElastixRegistration(p0=p0,p1=p1,out_dir=outdir,**kwargs)
#Get the elastix log file from this directory
tmp_log = utils.SearchDir(ending = "elastix.log")
#Add the log to the new list
logs.append(tmp_log)
#Get the final transform file
tmp_trans,_ = utils.GetFinalTransformParameters()
#Add the transform parameter file to the list
trans.append(tmp_trans)
#Change back to the home_dir
os.chdir(home_dir)
#Return the parameter files
return logs,trans,par_files
#-----------------Composition Registration Functions for MSI full image registration-----------------------
#Working towards a more succinct script
def CompositionElastix(out,f0,m0,p0_0,f1,m1,p0_1,p1_0=None,fp0=None,mp0=None,p1_1=None,fp1=None,mp1=None):
"""This function will perform multiple elastix registrations and save the output
so that we can directly call transformix composition of functions.
Input for points here will now allow for the csv files straight from Image J. We assume
that you have run the FormatFijiPointsFromCSV function and exported"""
#Perform the registration
ElastixRegistration(fixed=f0,moving=m0,out_dir=out,p0=p0_0,p1=p0_1,fp=fp0,mp=mp0,mkdir=True)
ElastixRegistration(fixed=f1,moving=m1,out_dir=out,p0=p1_0,p1=p1_1,fp=fp1,mp=mp1,mkdir=True)
#Get the list of elastix files
tmp_path = Path('..')
home_dir=tmp_path.cwd()
os.chdir(out)
#Get the list of files in order
dirFolders = [f for f in os.listdir(out) if os.path.isdir(os.path.join(out, f))]
dirFolders.sort(key=lambda f: int(f.split("elastix")[1]))
#Create a new directory for the composition
os.mkdir('elastix_composition')
#Save the path to the first elastix folder
first_trans = Path(os.path.join(out,dirFolders[0]))
#Access the last file that elastix produced
os.chdir(os.path.join(out,dirFolders[-1]))
#List the files in this elastix directory that correspond to transform paramters
elastix_dir=tmp_path.cwd()
dirFiles=glob.glob('TransformParameters*')
dirFiles.sort(key=lambda f: int(f.split(".")[1]))
#Add new transform parameters to the created elastix_composition directory
for i in range(len(dirFiles)):
shutil.copyfile(dirFiles[i],os.path.join(out,'elastix_composition',Path(dirFiles[i]).stem)+'.comp.txt')
#Go into the 1st registration parameter file and change to composition of Functions
os.chdir(os.path.join(out,'elastix_composition'))
dirComp = glob.glob('TransformParameters*')
#Sort the files
dirComp.sort(key=lambda f: int(f.split(".")[1]))
#Only access the first file for composition (The second depends on first, if second exists)
with open(dirComp[0], 'r') as file :
filedata = file.read()
#Access the first transform file
trans_file=glob.glob(os.path.join(first_trans,'TransformParameters*'))
trans_file.sort(key=lambda f: int(f.split(".")[1]))
# Replace the target string
filedata = filedata.replace("NoInitialTransform", str(os.path.join(first_trans,trans_file[-1])))
# Write the file out again
with open(dirComp[0], 'w') as file:
file.write(filedata)
#If you have two transform files in the last transformation, you will need to change the files there too
if len(dirFiles) is 2:
comp_file = tmp_path.cwd()
with open(dirComp[1], 'r') as file :
filedata = file.read()
filedata.replace(str(os.path.join(elastix_dir,dirFiles[0])), str(os.path.join(comp_file,dirComp[0])))
with open(dirComp[1], 'w') as file:
file.write(filedata)
#-----------------Composition Registration Functions for extracting IMC ROIs-----------------------
#Testing to read in data and slice the correction padding so we can overlay
def RGBextractROI(dir,full_image,ROI_correction=None,flip_horz=False,flip_vert=False,export_image=False):
"""Function for exporting 3 channel ROIs from full images"""
#Set your home directory
tmp_path = Path('..')
home_dir=tmp_path.cwd()
os.chdir(dir)
#Load the full image - Note this is a nifti image so we can use the index order given by GetROImask
full_dat = nib.load(str(full_image)).get_fdata()
#Get the final image size
full_size = (full_dat.shape[0],full_dat.shape[1])
#Get the coordinates for our ROIs
ROI_masks_from_tolBlue = GetROImask(dir,full_size,ROI_correction)
#Mask each channel using our pre-made masks!
final_sizes = ROImaskExport(ROI_masks_from_tolBlue,full_dat,flip_horz=flip_horz,flip_vert=flip_vert,prefix=None,export_image=export_image)
#Return the mask object so we can use it easily for the transformix ROI Extraction
return ROI_masks_from_tolBlue,final_sizes
######Need to include resizing function for the ROIs based on the IMC Images
#that exist in each of the folders. Need to be able to have another registration
#for the exported ROIs extracted from the toluidine blue image
#####Create general function for masking and exporting the ROIs from the full Images
#you are repeating yourself in the RGB extraction and MSI extraction functions. In
#both instances we need to resize the ROIs upon saving so that we can apply another
#registration step
#****Both functions can be run through the folder structuure
def TransformixROIExtractionMSI(data,ROI_masks_from_tolBlue,final_sizes,par_composition,parameter_files,dir,rot1=1,flip_horz=False,flip_vert=False):
"""MUST BE USED AFTER imzMLreader function! This function will take the composition elastix transform file and will
apply it to each of the channels in your MSI dataset. Given the directory of filename ROIs
from fiji, the regions will then be cropped from the transformed image and stored
as an individual slice. data: An imzMLParser class object
data: An imzMLParser class object
ROI_masks: Object returned from the RGB ROI extraction (step prior to registering false IMC H&E with tolblue-HE registered H&E image)
par: The path to the parameter file to use for image registration on the full image (In our case the composition file)
ROI_dict: Dictionary object indicating the name of files and filepaths to the csv Files
rot1: Any extra rotation that you added before exporting the UMAP nifti image
ROI_correction: Corrective padding to add onto the width and height of exported ROIs - integer values"""
#Set your home directory
tmp_path = Path('..')
home_dir=tmp_path.cwd()
#Change our directory to the output directory
os.chdir(dir)
trans_dir = tmp_path.cwd()
#Get the final full image registration size from the composition parameter file\
par = Path(par_composition)
#full_size = GetParamFileImageSize(par).....why??
#Get the ROI ROI_masks
ROI_masks = ROI_masks_from_tolBlue
#Fill the array with pixel values
trans_start = time.time()
#This loop will loop through all m/z channels in the SpectrumTable object stored in imzML reader class object
for j in range(0,data.data.SpectrumTable.shape[1]):
#Block for exporting the full m/z slice using the composition elastix transform parameters
print('Transformix Working on slice: '+str(j)+'/'+str(data.data.SpectrumTable.shape[1])+'...')
#Create a blank array to fill for each m/z channel
im = np.zeros((data.data.imzmldict["max count of pixels y"],\
data.data.imzmldict["max count of pixels x"]), dtype = np.float32)
#Fill the image array with the m/z pixel intensities
for i, (x, y, z) in enumerate(data.data.coordinates):
im[y - 1, x - 1] = data.data.SpectrumTable.iloc[i,j]
#Pad the image the same way you did for the dimension reduction image and registration with H&E image
im2 = np.pad(im,[(data.data.Image_padx,data.data.Image_padx),(data.data.Image_pady,data.data.Image_pady)],mode='constant')
#Rotate the image and resize the dame way you did for the dimension reduction image with H&E
im2 = cv2.resize(np.rot90(im2,rot1),data.data.Image_resize_shape)
#Create nifti image the same way you did the dimension reduction image with H&E
tmp_nifti = nib.Nifti1Image(np.rot90(im2,data.data.Image_rot), affine=np.eye(4))
nib.save(tmp_nifti,'MZ_slice'+str(j)+'.nii')
print('Saved temporary nifti image')
#Run transformix for this slice
tmp_imagepath = Path(os.path.join(trans_dir,'MZ_slice'+str(j)+'.nii'))
print('Running Transformix for slice '+str(j)+'...')
os.system("transformix -in "+str(tmp_imagepath)+" -out "+str(Path(trans_dir))+" -tp "+str(par))
#Now read the transformed image and crop the regions that we need
full_dat = nib.load(os.path.join(str(trans_dir),'result.nii')).get_fdata()
#Create a prefix for the ROIs so we can track channels
pref = 'MZ_slice'+str(j)
#For each slice, export and apply transformix to the corresponding MSI ROI
ROImaskExport_MSI_Transformix(ROI_masks_from_tolBlue = ROI_masks,final_sizes = final_sizes,\
full_img = full_dat,flip_horz=flip_horz,parameter_files=parameter_files,flip_vert=flip_vert,prefix=pref)
#Delete our sliced image
del full_dat
os.remove('MZ_slice'+str(j)+'.nii')
print('Finished slice '+str(j)+'/'+str(data.data.SpectrumTable.shape[1]))
#Change back to our home directory
os.chdir(home_dir)
trans_stop = time.time()
print('Finished transforming\n'+'Transformix Computation Time: '+str(trans_stop-trans_start)+' sec.')
def ExportMSIArray(data,export_name,rot1):
#Copy the spectrum table for input to numba
spec_table = np.array(data.data.SpectrumTable.copy())
#Create blank array
im = np.zeros((data.data.imzmldict["max count of pixels y"],\
data.data.imzmldict["max count of pixels x"],spec_table.shape[1]), dtype = np.float32)
#Copy the coordinates of this dataset
coords = data.data.coordinates.copy()
#Define the function
@njit(parallel=True)
def parallel_array_fill(nd_im,coords,spec_table):
"""Function for quickly filling a large ndarray across z axis"""
#Fill the array
for slice_num in prange(spec_table.shape[1]):
#Fill the image array with the m/z pixel intensities
for i, (x, y, z) in enumerate(coords):
nd_im[y - 1, x - 1,slice_num] = spec_table[i,slice_num]
return(nd_im)
#Run the function and fill the array
filled_array = parallel_array_fill(nd_im = im ,coords = coords ,spec_table = spec_table)
#Pad the image the same way you did for the dimension reduction image and registration with H&E image
im2 = np.pad(filled_array,[(data.data.Image_padx,data.data.Image_padx),(data.data.Image_pady,data.data.Image_pady),(0,0)],mode='constant')
#Rotate the image and resize the dame way you did for the dimension reduction image with H&E
tmp_nifti = nib.Nifti1Image(np.rot90(np.rot90(im2,data.data.Image_rot),rot1), affine=np.eye(4))
nib.save(tmp_nifti,str(export_name))
def TransformixROIExtractionMSI_slice(nibabel_obj,slice_num,resize_shape,ROI_masks_from_tolBlue,final_sizes,par_composition,parameter_files,dir,flip_horz=False,flip_vert=False):
#Set your home directory
tmp_path = Path('..')
home_dir=tmp_path.cwd()
#Change our directory to the output directory
os.chdir(dir)
trans_dir = tmp_path.cwd()
#Get the final full image registration size from the composition parameter file\
par = Path(par_composition)
#full_size = GetParamFileImageSize(par).....why??
#Get the ROI ROI_masks
ROI_masks = ROI_masks_from_tolBlue
#Get this slice from the exported nifti image from ExportMSIArray function
im = nibabel_obj[:,:,slice_num]
#Rotate the image and resize the dame way you did for the dimension reduction image with H&E
im = cv2.resize(im,resize_shape)
#Create nifti image the same way you did the dimension reduction image with H&E
tmp_nifti = nib.Nifti1Image(im, affine=np.eye(4))
nib.save(tmp_nifti,'MZ_slice'+str(slice_num)+'.nii')
print('Saved temporary nifti image for slice '+str(slice_num))
#Run transformix for this slice
tmp_imagepath = Path(os.path.join(trans_dir,'MZ_slice'+str(slice_num)+'.nii'))
print('Running Transformix for slice '+str(slice_num)+'...')
subprocess.call("transformix -in "+str(tmp_imagepath)+" -out "+str(Path(trans_dir))+" -tp "+str(par))
#Now read the transformed image and crop the regions that we need
full_dat = nib.load(os.path.join(str(trans_dir),'result.nii')).get_fdata()
#Create a prefix for the ROIs so we can track channels
pref = 'MZ_slice'+str(slice_num)
#For each slice, export and apply transformix to the corresponding MSI ROI
ROImaskExport_MSI_Transformix(ROI_masks_from_tolBlue = ROI_masks,final_sizes = final_sizes,\
full_img = full_dat,flip_horz=flip_horz,parameter_files=parameter_files,flip_vert=flip_vert,prefix=pref)
#Delete our sliced image
del full_dat
os.remove('MZ_slice'+str(slice_num)+'.nii')
def TransformixROIExtractionMSI_parallel(data,nibabel_obj,ROI_masks_from_tolBlue,final_sizes,par_composition,parameter_files,dir,flip_horz=False,flip_vert=False,processes=-1):
resize_shape = data.data.Image_resize_shape
num_slices = nibabel_obj.shape[2]
Parallel(n_jobs = processes)(delayed(TransformixROIExtractionMSI_slice)(nibabel_obj,slice_num,resize_shape,ROI_masks_from_tolBlue,\
final_sizes,par_composition,parameter_files,dir,flip_horz,\
flip_vert) for slice_num in range(num_slices))
print('Finished Exporting')
def CompileROIs(home_dir):
"""This function is to be used for reading all m/z images in your final registrations
folder in order to create a single nifti file containing the registered MSI stack"""
#Set your working directory
tmp=Path('..')
os.chdir(home_dir)
home_dir = tmp.cwd()
start = time.time()
#Get a list of files in the home directory
list_folders = [os.path.join(tmp.cwd(),f) for f in os.listdir(tmp.cwd()) if os.path.isdir(os.path.join(tmp.cwd(), f))]
#loop through each folder and concatenate
for i in list_folders:
#Switch to this directory
os.chdir(i)
#Get all of the nifti images in this directory
MZ_images = utils.TraverseDir(ending = '.nii')
#Have to convert the images to strings for nibabel to concatenate them
MZ_images = [str(image_path) for image_path in MZ_images]
#Sort the images so the slices are in order
MZ_images.sort(key=lambda f: int((str(Path(f).parent).split("MZ_slice")[-1])))
#Concatenate the images to new nifti object
print('Compiling '+str(Path(i).stem)+'...')
tmp_nii = nib.concat_images(MZ_images)
#Save the object
print('Saving '+str(Path(i).stem)+'...')
nib.save(tmp_nii,str(Path(i).stem)+'_result.nii')
print('Saved '+str(Path(i).stem))
#Remove our tmp nifti object
del tmp_nii
#Change directory back to the home directory
os.chdir(home_dir)
stop = time.time()
print('Finished Compiling Images '+str(stop-start)+' sec. ')
#-----Create class object-----
class Elastix():
"""Elastix image registration class
"""
def __init__(self,fixed,moving,out_dir,p,fp=None,mp=None,fMask=None):
"""initialize class instance
"""
#Create pathlib objects and set class parameters
self.fixed = Path(fixed)
self.moving = Path(moving)
self.out_dir = Path(out_dir)
self.p = [Path(par_file) for par_file in p]
self.fp = None if fp is None else self.fp = Path(fp)
self.mp = None if mp is None else self.mp = Path(fp)
self.fMask = None if fMask is None else self.fMask = Path(fMask)
self.command = "elastix"
#Load the images to check for dimension number
print('Loading images...')
#Load images
niiFixed = nib.load(str(self.fixed))
niiMoving = nib.load(str(self.moving))
#Print update
print('Done loading')
#Check to see if there is single channel input (grayscale)
if niiFixed.ndim == 2 and niiMoving.ndim == 2:
print('Detected single channel input images...')
#Add fixed and moving image to the command string
self.command = self.command+" -f "+str(self.fixed)+ " -m "+str(self.moving)
#Check to see if there is multichannel input
else:
print('Exporting single channel images for multichannel input...')
#Read the images
niiFixed = niiFixed.get_fdata()
niiMoving = niiMoving.get_fdata()
#Set up list of names for the images
fixedList = []
movingList = []
#Export single channel images for each channel
for i in range(niiFixed.shape[2]):
#Create a filename
fname = Path(os.path.join(self.fixed.parent,str(self.fixed.stem+str(i)+self.fixed.suffix)))
#Update the list of names for fixed image
fixedList.append(fname)
#Update the list of names for fixed image
self.command = self.command + ' -f' + str(i) + ' ' + str(fname)
#Create a nifti image
#Check to see if the path exists
if not fname.is_file():
#Create a nifti image
nii_im = nib.Nifti1Image(niiFixed[:,:,i], affine=np.eye(4))
nib.save(nii_im,str(fname))
for i in range(niiMoving.shape[2]):
#Create a filename
mname = Path(os.path.join(self.moving.parent,str(self.moving.stem+str(i)+self.moving.suffix)))
#Update the list of names for moving image
movingList.append(mname)
#Update the list of names for moving image
self.command = self.command + ' -m' + str(i) + ' ' + str(mname)
#Check to see if the path exists
if not mname.is_file():
#Create a nifti image
nii_im = nib.Nifti1Image(niiMoving[:,:,i], affine=np.eye(4))
nib.save(nii_im,str(mname))
#Add the parameter files
self.command = self.command+" -p "+str(self.p[par_file]) for par_file in self.p
#Check for corresponding points in registration (must have fixed and moving set)
if self.fp and self.mp is not None:
#Add to the command
self.command = self.command +" -fp "+str(self.fp)+" -mp "+str(self.mp)
#Check for fixed mask
if fMask is not None:
#Create pathlib Paths
fMask = Path(fMask)
self.command = self.command +" -fMask "+str(fMask)
#Check for making new directories
#if mkdir is True:
# n=0
# while n>=0:
# tmp_name = "elastix"+str(n)
# if not os.path.exists(Path(os.path.join(out_dir,tmp_name))):
# os.mkdir(Path(os.path.join(out_dir,tmp_name)))
# out_dir = Path(os.path.join(out_dir,tmp_name))
# break
# n+=1
#Add the output directory to the command
self.command = self.command +" -out "+str(self.out_dir)
#Add main elastix component
def RunElastix(self,command):
"""
Run the elastix registration. You must be able to call elastix
from your command shell to use this. You must also have your parameter
text files set before running (see elastix parameter files).
Currently supports nifti1image format only!
"""
#Print command
print(str(self.command))
#Print elastix update
print('Running elastix...')
#Start timer
start = time.time()
#Send the command to the shell
os.system(self.command)
#Stop timer
stop = time.time()
#Print update
print('Finished -- computation took '+str(stop-start)+'sec.')
#Return values
return self.command
class Transformix():
"""Python class for transformix
"""
def __init__(self):
"""initialize class instance
"""
#Create pathlib objects
#
``` |
{
"source": "JoshuaHess12/hdi-utils",
"score": 3
} |
#### File: hdiutils/HDIimport/tif_reader.py
```python
from pathlib import Path
import skimage.io
import numpy as np
# Define class object
class TIFreader:
"""TIF cytometry data reader using scikit image. Ca"""
def __init__(self, path_to_tif):
"""Initialize the class by using the path to the image.
path_to_tif: Path to tif image (Ex: path/to/image.extension)
"""
# Initialize the objects in this class
self.image = None
# Create a pathlib object for the path_to_tif
path_to_tif = Path(path_to_tif)
# Read tif(f) or ome.tif(f) data and return the parsed data
im = skimage.io.imread(str(path_to_tif), plugin="tifffile")
# Check to see if the number of channels is greater than one
im_shape = im.shape
# check to see if the image is a tiff hyperstack based on the shape
if len(im_shape) == 4:
# ravel along the last axis
im = np.concatenate([im[:,:,:,i] for i in range(im.shape[3])])
# recalculate the image shape
im_shape = im.shape
# Check to see if the image is considered xyc or just xy(single channel)
# Note: skimage with tifffile plugin reads channel numbers of 1 as xy array,
# and reads images with 3 and 4 channels in the correct order. Channel numbers
# of 2 or >5 need axis swapping
if len(im_shape) > 2:
##########This will fail if the array is a 3 or 4 channel image with 5 pixels in the x direction...shouldnt happen##########
# Check if channel numbers are 3 or 4
if (im_shape[2] == 3) or (im_shape[2] == 4):
pass
else:
# If number of channels is less than two then swap the axes to be zyxc
im = np.swapaxes(im, 0, 2)
# Swap the axes to be in the order zyxc
im = np.swapaxes(im, 0, 1)
# Assign the data to the class
self.image = im
``` |
{
"source": "joshuahigginson1/AWS_Lambda_Function",
"score": 3
} |
#### File: Python 3.8/DynamoDB Serverless Task/Create Functionality.py
```python
from __future__ import print_function # Python 2/3 compatibility
import boto3
import json
import decimal
def lambda_handler(event, context):
# Helper class to convert a DynamoDB item to JSON.
class DecimalEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, decimal.Decimal):
if abs(o) % 1 > 0:
return float(o)
else:
return int(o)
return super(DecimalEncoder, self).default(o)
dynamodb = boto3.resource('dynamodb')
table = dynamodb.Table('Movies')
title = '{}'.format(event['title'])
year = '{}'.format(event['year'])
response = table.put_item(
Item={
'Title':title,
'Year':year,
'Rating':5
}
)
print(table.creation_date_time)
print("PutItem succeeded:")
print(json.dumps(response, indent=4, cls=DecimalEncoder))
```
#### File: Python 3.8/RDS Serverless Task/DELETE-RDSServerless.py
```python
import pymysql
from os import getenv
rds_host = getenv("RDS_ENDPOINT")
username = getenv("RDS_ROOT_USER")
password = getenv("RDS_ROOT_PASS")
db_name = getenv("RDS_DB_NAME")
def delete_events(event):
print("Deleting the user...")
connection = pymysql.connect(host=rds_host,
user=username,
password=password,
db=db_name,
connect_timeout=5)
with connection.cursor() as cursor:
cursor.execute("""DELETE FROM test WHERE id=%s""" % (event['id']))
connection.commit()
cursor.close()
print(f"User {event['id']} deleted.")
def lambda_handler(event, context):
return delete_events(event)
``` |
{
"source": "joshuahigginson1/DevOps-Assessment-1",
"score": 3
} |
#### File: page_objects/patient_page_objects/patient_greeting_page_object.py
```python
class PatientGreetingPageObject(object):
bio = 'This is a test bio.'
def get_current_feeling_buttons(self):
feeling1element = self.client.find_element_by_xpath('// *[ @ id = "current_feeling-0"]')
feeling2element = self.client.find_element_by_xpath('// *[ @ id = "current_feeling-1"]')
feeling3element = self.client.find_element_by_xpath('// *[ @ id = "current_feeling-2"]')
feeling4element = self.client.find_element_by_xpath('// *[ @ id = "current_feeling-3"]')
feeling5element = self.client.find_element_by_xpath('// *[ @ id = "current_feeling-4"]')
feeling6element = self.client.find_element_by_xpath('// *[ @ id = "current_feeling-5"]')
feeling7element = self.client.find_element_by_xpath('// *[ @ id = "current_feeling-6"]')
feeling8element = self.client.find_element_by_xpath('// *[ @ id = "current_feeling-7"]')
feeling9element = self.client.find_element_by_xpath('// *[ @ id = "current_feeling-8"]')
feeling10element = self.client.find_element_by_xpath('// *[ @ id = "current_feeling-9"]')
attributes = {
'feeling1 element': feeling1element,
'feeling2 element': feeling2element,
'feeling3 element': feeling3element,
'feeling4 element': feeling4element,
'feeling5 element': feeling5element,
'feeling6 element': feeling6element,
'feeling7 element': feeling7element,
'feeling8 element': feeling8element,
'feeling9 element': feeling9element,
'feeling10 element': feeling10element
}
return attributes
def click_feeling1_button(self):
self.get_current_feeling_buttons(self)['feeling1 element'].click()
def click_feeling2_button(self):
self.get_current_feeling_buttons(self)['feeling2 element'].click()
def click_feeling3_button(self):
self.get_current_feeling_buttons(self)['feeling3 element'].click()
def click_feeling4_button(self):
self.get_current_feeling_buttons(self)['feeling4 element'].click()
def click_feeling5_button(self):
self.get_current_feeling_buttons(self)['feeling5 element'].click()
def click_feeling6_button(self):
self.get_current_feeling_buttons(self)['feeling6 element'].click()
def click_feeling7_button(self):
self.get_current_feeling_buttons(self)['feeling7 element'].click()
def click_feeling8_button(self):
self.get_current_feeling_buttons(self)['feeling8 element'].click()
def click_feeling9_button(self):
self.get_current_feeling_buttons(self)['feeling9 element'].click()
def click_feeling10_button(self):
self.get_current_feeling_buttons(self)['feeling10 element'].click()
def get_feeling_comparison_buttons(self):
attributes = {
'worse element': self.client.find_element_by_xpath('//*[@id="feeling_comparison-0"]'),
'same element': self.client.find_element_by_xpath('//*[@id="feeling_comparison-1"]'),
'better element': self.client.find_element_by_xpath('//*[@id="feeling_comparison-2"]'),
}
return attributes
def click_worse_button(self):
self.get_feeling_comparison_buttons(self)['worse element'].click()
def click_same_button(self):
self.get_feeling_comparison_buttons(self)['same element'].click()
def click_better_button(self):
self.get_feeling_comparison_buttons(self)['better element'].click()
def get_behaviour_buttons(self):
attributes = {
'happy element': self.client.find_element_by_xpath('//*[@id="behaviours-0"]'),
'angry element': self.client.find_element_by_xpath('//*[@id="behaviours-1"]'),
'disappointed element': self.client.find_element_by_xpath('//*[@id="behaviours-2"]'),
'done with today element': self.client.find_element_by_xpath('//*[@id="behaviours-3"]'),
'persevering element': self.client.find_element_by_xpath('//*[@id="behaviours-4"]'),
'anxious element': self.client.find_element_by_xpath('//*[@id="behaviours-5"]'),
'confused element': self.client.find_element_by_xpath('//*[@id="behaviours-6"]'),
'worried element': self.client.find_element_by_xpath('//*[@id="behaviours-7"]'),
'ill element': self.client.find_element_by_xpath('//*[@id="behaviours-8"]'),
'exhausted element': self.client.find_element_by_xpath('//*[@id="behaviours-9"]'),
'accomplished element': self.client.find_element_by_xpath('//*[@id="behaviours-10"]'),
'star struck element': self.client.find_element_by_xpath('//*[@id="behaviours-11"]'),
'frightened element': self.client.find_element_by_xpath('//*[@id="behaviours-12"]'),
}
return attributes
def click_happy_element(self):
self.get_behaviour_buttons(self)['happy element'].click()
def click_angry_element(self):
self.get_behaviour_buttons(self)['angry element'].click()
def click_disappointed_element(self):
self.get_behaviour_buttons(self)['disappointed element'].click()
def click_done_with_today_element(self):
self.get_behaviour_buttons(self)['done with today element'].click()
def click_persevering_element(self):
self.get_behaviour_buttons(self)['persevering element'].click()
def click_anxious_element(self):
self.get_behaviour_buttons(self)['anxious element'].click()
def click_confused_element(self):
self.get_behaviour_buttons(self)['confused element'].click()
def click_worried_element(self):
self.get_behaviour_buttons(self)['worried element'].click()
def click_ill_element(self):
self.get_behaviour_buttons(self)['ill element'].click()
def click_exhausted_element(self):
self.get_behaviour_buttons(self)['exhausted element'].click()
def click_accomplished_element(self):
self.get_behaviour_buttons(self)['accomplished element'].click()
def click_star_struck_element(self):
self.get_behaviour_buttons(self)['star struck element'].click()
def click_frightened_element(self):
self.get_behaviour_buttons(self)['frightened element'].click()
def get_bio_element(self):
return self.client.find_element_by_xpath('//*[@id="patient_comment"]')
def type_in_bio_form(self, input_to_type=bio): # A function to type text into our bio form box.
# Retrieve our form attributes.
get_field_element = self.get_bio_element()
# After retrieving the field element, simulate typing into a form box.
get_field_element.send_keys(input_to_type)
print(f"Running Simulation: Currently typing '{input_to_type}' in the bio field.")
def clear_password_form(self): # A function to clear the text from within our password form box.
# Retrieve our form attributes.
get_field_element = self.get_bio_element()
get_field_element.clear() # Clears our form.
print(f"Running Simulation: Currently clearing the bio field.")
def get_submit_button(self):
return self.client.find_element_by_xpath('//*[@id="submit"]')
def click_submit_button(self):
self.get_submit_button(self).click()
def get_skip_evaluation_button(self):
return self.client.find_element_by_xpath('/html/body/p/a')
def click_skip_evaluation_button(self):
return self.get_skip_evaluation_button(self).click()
```
#### File: functional_tests/test_pages/test_common_page_elements.py
```python
from flask import url_for
from tests.functional_tests.page_objects.common_page_objects import CommonPageObject, MainNavBar, PatientNavBar, \
PsychiatristNavBar
from tests.functional_test_framework import LiveServerTestCase
# Helper Functions ----------------------------------------------------------------------
# Helper Functions are used to prevent repeating basic lines of code within our testing.
def print_assertion_to_console(element_to_test, test_outcome, assertion):
print(f'ASSERTION: The current {element_to_test} should be: {assertion}!\n')
print(f'OUTCOME: The current {element_to_test} is actually: {test_outcome}!\n')
def compare_and_evaluate_relative_urls(test_outcome, endpoint_bp):
print(f'The root URL is: {LiveServerTestCase.root_url}\n') # Here, we retrieve the root URL from our webdriver.
# We ask the developer for the test's expected endpoint, and get it's relative associated URL with the url_for function.
expected_endpoint = url_for(endpoint_bp)
# We run string concatenation to generate the absolute URL associated with our endpoint.
full_endpoint_url = f'{LiveServerTestCase.root_url}{expected_endpoint}'
print(f'Expected URL: {full_endpoint_url}\n')
print(f'Test Outcome: {test_outcome}\n')
if full_endpoint_url == test_outcome: # If the test outcome matches expected endpoint URL, then return true.
return True
else:
return False
# Tests ----------------------------------------------------------------------------------
class TestCommonPageElements(LiveServerTestCase):
# Assertions -----------------------------------------------------------------
favicon_assertion_name = None
title_assertion = None
footer_assertion = "By <NAME> for QA Consulting"
# Element Specific Tests ----------------------------------------------------
def test_page_favicon(self): # A test to check that the correct favicon is displaying.
get_favicon_name = CommonPageObject.get_favicon_element(self)['name'] # Retrieve favicon name.
print_assertion_to_console('page favicon', get_favicon_name, self.favicon_assertion_name)
self.assertEqual(get_favicon_name, self.favicon_assertion_name) # Make test assertion.
def test_page_title(self): # A test to check that the title of our page is correct.
get_page_title = CommonPageObject.get_page_title(self) # Retrieve the page title.
print_assertion_to_console('page title', get_page_title, self.title_assertion)
self.assertEqual(get_page_title, self.title_assertion)
def test_page_footer(self): # A test to check that the page footer is correct.
get_page_footer = CommonPageObject.get_page_footer(self)['text'] # Retrieve text from page footer.
print_assertion_to_console('page footer', get_page_footer, self.footer_assertion)
self.assertEqual(get_page_footer, self.footer_assertion)
class TestMainNavBar(LiveServerTestCase):
# Element Specific Tests ----------------------------------------------------
def test_home_button_label(self): # A test to check that our button is correctly labeled.
button_label_assertion = "Home" # Declare our assertion criteria.
get_home_button = MainNavBar.get_home_button(self)['button label'] # Retrieve the button label.
print_assertion_to_console('home button', get_home_button, button_label_assertion)
self.assertEqual(get_home_button, button_label_assertion)
def test_home_button_click(self): # A test to check the functionality of clicking the 'about' button.
MainNavBar.click_home_button()
page_status_code = CommonPageObject.get_page_response(self) # Get the current page's status code.
self.assertEqual(page_status_code, 200) # Our page status code should equal 200 if it is functional.
page_url = CommonPageObject.get_page_url(self) # Get the current page's url.
self.assertTrue(compare_and_evaluate_relative_urls(page_url, 'main_bp.homepage')) # Make our assertion.
def test_about_button_label(self): # A test to check that our button is correctly labeled.
button_label_assertion = "About"
get_about_button = MainNavBar.get_about_button(self)['button label']
print_assertion_to_console('about button', get_about_button, button_label_assertion)
self.assertEqual(get_about_button, button_label_assertion)
def test_about_button_click(self): # A test to check the functionality of clicking the 'about' button.
MainNavBar.click_home_button()
page_status_code = CommonPageObject.get_page_response(self)
self.assertEqual(page_status_code, 200) # Our page status code should equal 200 if it is functional.
page_url = CommonPageObject.get_page_url(self)
self.assertTrue(compare_and_evaluate_relative_urls(page_url, 'main_bp.about'))
class TestPatientNavBar(LiveServerTestCase):
# Element Specific Tests ----------------------------------------------------
def test_patient_dashboard_button_label(self):
button_label_assertion = "Dashboard"
get_patient_dashboard_button = PatientNavBar.get_dashboard_button(self)['button label']
print_assertion_to_console('patient dashboard button', get_patient_dashboard_button, button_label_assertion)
self.assertEqual(get_patient_dashboard_button, button_label_assertion)
def test_patient_dashboard_button_click(self):
PatientNavBar.click_dashboard_button()
page_status_code = CommonPageObject.get_page_response(self)
self.assertEqual(page_status_code, 200) # Our page status code should equal 200 if it is functional.
page_url = CommonPageObject.get_page_url(self)
self.assertTrue(compare_and_evaluate_relative_urls(page_url, 'dashboard_bp.dashboard'))
def test_patient_progress_button_label(self):
button_label_assertion = "My Progress"
get_patient_progress_button = PatientNavBar.get_patient_progress_button(self)['button label']
print_assertion_to_console('my progress button', get_patient_progress_button, button_label_assertion)
self.assertEqual(get_patient_progress_button, button_label_assertion)
def test_patient_progress_button_click(self):
PatientNavBar.click_patient_progress_button()
page_status_code = CommonPageObject.get_page_response(self)
self.assertEqual(page_status_code, 200) # Our page status code should equal 200 if it is functional.
page_url = CommonPageObject.get_page_url(self)
self.assertTrue(compare_and_evaluate_relative_urls(page_url, 'dashboard_bp.user_progress'))
def test_patient_tools_button_label(self):
button_label_assertion = "My Tools"
get_patient_tools_button = PatientNavBar.get_patient_tools_button(self)['button label']
print_assertion_to_console('my progress button', get_patient_tools_button, button_label_assertion)
self.assertEqual(get_patient_tools_button, button_label_assertion)
def test_patient_tools_button_click(self):
PatientNavBar.click_patient_tools_button()
page_status_code = CommonPageObject.get_page_response(self)
self.assertEqual(page_status_code, 200) # Our page status code should equal 200 if it is functional.
page_url = CommonPageObject.get_page_url(self)
self.assertTrue(compare_and_evaluate_relative_urls(page_url, 'dashboard_bp.user_tools'))
def test_patient_logout_button_label(self):
button_label_assertion = "Log Out"
get_logout_button = PatientNavBar.get_logout_button(self)['button label']
print_assertion_to_console('patient logout button', get_logout_button, button_label_assertion)
self.assertEqual(get_logout_button, button_label_assertion)
def test_patient_logout_button_click(self):
PatientNavBar.click_logout_button()
page_status_code = CommonPageObject.get_page_response(self)
self.assertEqual(page_status_code, 200) # Our page status code should equal 200 if it is functional.
page_url = CommonPageObject.get_page_url(self)
self.assertTrue(compare_and_evaluate_relative_urls(page_url, 'auth_bp.logout'))
def test_patient_settings_button_label(self):
button_label_assertion = "Settings"
get_settings_button = PatientNavBar.get_settings_button(self)['button label']
print_assertion_to_console('patient settings button', get_settings_button, button_label_assertion)
self.assertEqual(get_settings_button, button_label_assertion)
def test_patient_settings_button_click(self):
PatientNavBar.click_acc_settings_button()
page_status_code = CommonPageObject.get_page_response(self)
self.assertEqual(page_status_code, 200) # Our page status code should equal 200 if it is functional.
page_url = CommonPageObject.get_page_url(self)
self.assertTrue(compare_and_evaluate_relative_urls(page_url, 'acc_settings_bp.patient_acc_settings'))
class TestPsychiatristNavBar(LiveServerTestCase):
# Element Specific Tests ----------------------------------------------------
def test_psychiatrist_dashboard_button_label(self):
button_label_assertion = "Dashboard"
get_psychiatrist_dashboard_button = PsychiatristNavBar.get_dashboard_button(self)['button label']
print_assertion_to_console('psychiatrist dashboard button', get_psychiatrist_dashboard_button,
button_label_assertion)
self.assertEqual(get_psychiatrist_dashboard_button, button_label_assertion)
def test_psychiatrist_dashboard_button_click(self):
PsychiatristNavBar.click_dashboard_button()
page_status_code = CommonPageObject.get_page_response(self)
self.assertEqual(page_status_code, 200) # Our page status code should equal 200 if it is functional.
page_url = CommonPageObject.get_page_url(self)
self.assertTrue(compare_and_evaluate_relative_urls(page_url, 'dashboard_bp.dashboard'))
def test_my_patients_button_label(self):
button_label_assertion = "My Patients"
get_my_patients_button = PsychiatristNavBar.get_my_patients_button(self)['button label']
print_assertion_to_console('my patients button', get_my_patients_button, button_label_assertion)
self.assertEqual(get_my_patients_button, button_label_assertion)
def test_my_patients_button_click(self):
PsychiatristNavBar.click_my_patients_button()
page_status_code = CommonPageObject.get_page_response(self)
self.assertEqual(page_status_code, 200) # Our page status code should equal 200 if it is functional.
page_url = CommonPageObject.get_page_url(self)
self.assertTrue(compare_and_evaluate_relative_urls(page_url, 'dashboard_bp.my_patients'))
def test_psychiatrist_tools_button_label(self):
button_label_assertion = "Patient Tools"
get_patient_tools_button = PsychiatristNavBar.get_psychiatrist_tools_button(self)['button label']
print_assertion_to_console('patient tools button', get_patient_tools_button, button_label_assertion)
self.assertEqual(get_patient_tools_button, button_label_assertion)
def test_psychiatrist_tools_button_click(self):
PsychiatristNavBar.click_psychiatrist_tools_button()
page_status_code = CommonPageObject.get_page_response(self)
self.assertEqual(page_status_code, 200) # Our page status code should equal 200 if it is functional.
page_url = CommonPageObject.get_page_url(self)
self.assertTrue(compare_and_evaluate_relative_urls(page_url, 'dashboard_bp.psychiatrist_tools'))
def test_psychiatrist_logout_button_label(self):
button_label_assertion = "Log Out"
get_logout_button = PsychiatristNavBar.get_logout_button(self)['button label']
print_assertion_to_console('psychiatrist logout button', get_logout_button, button_label_assertion)
self.assertEqual(get_logout_button, button_label_assertion)
def test_psychiatrist_logout_button_click(self):
PsychiatristNavBar.click_logout_button()
page_status_code = CommonPageObject.get_page_response(self)
self.assertEqual(page_status_code, 200) # Our page status code should equal 200 if it is functional.
page_url = CommonPageObject.get_page_url(self)
self.assertTrue(compare_and_evaluate_relative_urls(page_url, 'auth_bp.logout'))
def test_psychiatrist_settings_button_label(self):
button_label_assertion = "Settings"
get_settings_button = PsychiatristNavBar.get_settings_button(self)['button label']
print_assertion_to_console('psychiatrist settings button', get_settings_button, button_label_assertion)
self.assertEqual(get_settings_button, button_label_assertion)
def test_psychiatrist_settings_button_click(self):
PsychiatristNavBar.click_acc_settings_button()
page_status_code = CommonPageObject.get_page_response(self)
self.assertEqual(page_status_code, 200) # Our page status code should equal 200 if it is functional.
page_url = CommonPageObject.get_page_url(self)
self.assertTrue(compare_and_evaluate_relative_urls(page_url, 'acc_settings_bp.psychiatrist_acc_settings'))
``` |
{
"source": "joshuahigginson1/DevOps_Assessment_2",
"score": 3
} |
#### File: DevOps_Assessment_2/service1/service1_config.py
```python
from os import environ, path
# .env location ------------------------
# We find the absolute path of the root directory of our current file.
basedir = path.abspath(path.dirname(__file__))
# Functions ------------------------------------------------------------
def remove_quotes(string):
""" This function removes any speech mark quotes from a string input.
Keyword Arguments;
string: A string with speech marks.
"""
return string.replace('"', '')
# Declare Classes ------------------------------------------------------
class Config(object): # General Config
FLASK_APP = 'service1_wsgi.py'
DEBUG = False
TESTING = False
files_dir_env = remove_quotes(environ.get("FILES_DIRECTORY"))
FILES_DIRECTORY = path.join(basedir, files_dir_env)
SECRET_KEY = remove_quotes(environ.get("PRODUCTION_SECRET_KEY"))
SERVICE_2_URL = "http://0.0.0.0:5002"
SERVICE_3_URL = "http://0.0.0.0:5003"
SERVICE_4_URL = "http://0.0.0.0:5004"
SQLALCHEMY_TRACK_MODIFICATIONS = False
class ProductionConfig(Config):
ENV = 'production'
DB_USER = environ.get('PRODUCTION_DB_USERNAME')
DB_PASS = environ.get('PRODUCTION_DB_USERPASS')
DB_ADD = environ.get('PRODUCTION_DATABASE_ADDRESS')
DB_NAME = environ.get('PRODUCTION_DB')
database_step_1 = f"mysql+pymysql://{DB_USER}:{DB_PASS}@{DB_ADD}/{DB_NAME}"
SQLALCHEMY_DATABASE_URI = str(remove_quotes(database_step_1))
SERVICE_2_URL = remove_quotes(environ.get('SERVICE_2_URL'))
SERVICE_3_URL = remove_quotes(environ.get('SERVICE_3_URL'))
SERVICE_4_URL = remove_quotes(environ.get('SERVICE_4_URL'))
class DevelopmentConfig(Config):
DEBUG = True
ENV = 'development'
DB_USER = environ.get('DEVELOPMENT_DB_USERNAME')
DB_PASS = environ.get('DEVELOPMENT_DB_USERPASS')
DB_ADD = environ.get('DEVELOPMENT_DATABASE_ADDRESS')
DB_NAME = environ.get('DEVELOPMENT_DB')
database_step_1 = f"mysql+pymysql://{DB_USER}:{DB_PASS}@{DB_ADD}/{DB_NAME}"
SQLALCHEMY_DATABASE_URI = str(remove_quotes(database_step_1))
SECRET_KEY = remove_quotes(environ.get("DEV_SECRET_KEY"))
class TestingConfig(Config):
TESTING = True
ENV = 'testing'
DB_USER = environ.get('TESTING_DB_USERNAME')
DB_PASS = environ.get('TESTING_DB_USERPASS')
DB_ADD = environ.get('TESTING_DATABASE_ADDRESS')
DB_NAME = environ.get('TESTING_DB')
database_step_1 = f"mysql+pymysql://{DB_USER}:{DB_PASS}@{DB_ADD}/{DB_NAME}"
SQLALCHEMY_DATABASE_URI = str(remove_quotes(database_step_1))
SECRET_KEY = remove_quotes(environ.get("TESTING_SECRET_KEY"))
```
#### File: service2/src/service2.py
```python
import random
from os import environ
from flask import Flask, jsonify, request
# Flask ----------------------------------------------------------------
# Create our flask application.
service2 = Flask(__name__)
if environ.get("FLASK_ENV").replace('"', '') == 'production':
service2.config.from_object('service2_config.ProductionConfig')
elif environ.get("FLASK_ENV").replace('"', '') == 'testing':
service2.config.from_object('service2_config.TestingConfig')
else:
service2.config.from_object('service2_config.DevelopmentConfig')
# On GET Request -------------------------------------------------------
# Helper Functions -----------------------------------------------------
def return_scale_dictionary():
"""This function is to be used with a GET request, returning a list of
scales for our user to select from.
Service #1 requires a list of pitches for our user to chose from. The
different implementations of service #2 will alter these pitch lists.
When Service #2 receives a GET request, it will send the output.
"""
scale_list = {
"chromatic": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, "r"],
"major": [1, 3, 5, 6, 8, 10, 12, "r"],
"major pentatonic": [1, 3, 5, 8, 10, "r"],
"major blues": [1, 3, 4, 5, 8, 10, "r"],
"natural minor": [1, 3, 4, 6, 8, 9, 11, "r"],
"harmonic minor": [1, 3, 4, 6, 8, 9, 12, "r"],
"minor pentatonic": [1, 4, 6, 8, 11, "r"],
"minor blues": [1, 4, 6, 7, 8, 11, "r"],
"png implementation": [1, 4, 6, 7, 8, 11, "r"]
}
return scale_list
# Function -------------------------------------------------------------
@service2.route('/', methods=['GET'])
def on_get_request():
"""This function triggers after every get request, to the endpoint '/'"""
# TODO: Write unit test for on_get_request().
return jsonify(return_scale_dictionary())
# On POST Request ------------------------------------------------------
# Helper Functions -----------------------------------------------------
def generate_random_note_pitch(scale_list):
"""Generate a random note pitch determinant on the scale list.
Keyword Arguments:
scale_list: A list of Mingus compatible pitches, in a list.
"""
return random.choice(scale_list)
def get_note_name(generated_note_pitch, note_names_in_c):
"""Converts our randomised note pitch into musical notes in the key of C.
Keyword Arguments:
generated_note_pitch: Our randomly generated note pitch.
note_names_in_c: A dictionary of the note positions in the C
chromatic scale, and their corresponding note names.
"""
return note_names_in_c.get(generated_note_pitch)
def return_random_pitch(user_chosen_scale):
"""This function is to be used with a POST request, returning a random
note pitch, based on the user's chosen scale.
Keyword Arguments:
user_chosen_scale: A musical scale, chosen by the user as a result
of a GET request to the service #2 API.
"""
c_chromatic_dictionary = { # Notes an corresponding positions of C chrom.
1: 'C',
2: 'C#',
3: 'D',
4: 'D#',
5: 'E',
6: 'F',
7: 'F#',
8: 'G',
9: 'G#',
10: 'A',
11: 'A#',
12: 'B',
"r": "r"
}
rand_note_pitch = generate_random_note_pitch(user_chosen_scale)
return get_note_name(rand_note_pitch, c_chromatic_dictionary)
# Function -------------------------------------------------------------
@service2.route('/', methods=['POST'])
def on_post_request():
"""This function triggers after every post request to the endpoint '/'
We expect to receive a specific set of notes from service 1.
"""
# TODO: Write unit test for on_post_request().
received_data = request.get_json()
print(f"Received data = {received_data}")
converted_data = list(received_data.values())
print(f"Converted data = {converted_data}")
note_pitch_output = return_random_pitch(converted_data[0])
print(f'Converted data: {converted_data[0]}')
print(f'Note pitch output: {note_pitch_output}')
return jsonify(note_pitch_output)
```
#### File: src/tests/conftest.py
```python
import pytest
# Fixtures -------------------------------------------------------------
# Fixtures are a great place to store data to use for testing.
# You can return anything.
@pytest.fixture(name='all_pitches', scope='function', autouse=False)
def melodie_proprietary_pitches():
"""A fixture which returns the notes in a chromatic musical scale,
into our own proprietary format."""
return [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, "r"]
@pytest.fixture(name='common_scales', scope='function', autouse=False)
def common_scales():
"""A fixture which returns all of the common musical scales,
in mélodies' own proprietary format.
Represented in musical tab, this would be all of the scales in which
the root note starts with 'F'.
"""
common_scales = {
"chromatic": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, "r"],
"major": [1, 3, 5, 6, 8, 10, 12, "r"],
"major pentatonic": [1, 3, 5, 8, 10, "r"],
"major blues": [1, 3, 4, 5, 8, 10, "r"],
"natural minor": [1, 3, 4, 6, 8, 9, 11, "r"],
"harmonic minor": [1, 3, 4, 6, 8, 9, 12, "r"],
"minor pentatonic": [1, 4, 6, 8, 11, "r"],
"minor blues": [1, 4, 6, 7, 8, 11, "r"],
"png implementation": [1, 4, 6, 7, 8, 11, "r"]
}
return common_scales
@pytest.fixture(name='note_names_in_c', scope='function', autouse=False)
def note_names_in_c():
c_chromatic_dictionary = {
1: 'C',
2: 'C#',
3: 'D',
4: 'D#',
5: 'E',
6: 'F',
7: 'F#',
8: 'G',
9: 'G#',
10: 'A',
11: 'A#',
12: 'B',
"r": "r"
}
return c_chromatic_dictionary
```
#### File: src/tests/test_service3.py
```python
import json
from src.service3 import random_note_length, service3, \
return_rhythms_dictionary
# Test Flask App Config ------------------------------------------------
def test_production_config():
"""This app checks the functionality of our .config file switcher."""
client = service3.test_client()
service3.config.from_object('service3_config.TestingConfig')
assert service3.config.get("TESTING") is True
assert service3.config.get("DEBUG") is False
service3.config.from_object('service3_config.ProductionConfig')
assert service3.config.get("TESTING") is False
assert service3.config.get("DEBUG") is False
service3.config.from_object('service3_config.DevelopmentConfig')
assert service3.config.get("TESTING") is False
assert service3.config.get("DEBUG") is True
# Test Functions -------------------------------------------------------
def test_return_rhythms_dictionary(all_rhythms):
"""This test checks the main function behind s3.
This test utilises our 'all_rhythms' pytest fixture.
- Return a dictionary.
- Keys must be in lower case.
- Rhythms can only be returned in Mingus format.
"""
check_dictionary = return_rhythms_dictionary()
assert isinstance(check_dictionary, dict)
for rhythm_name, rhythm_list in list(return_rhythms_dictionary().items()):
assert rhythm_name.islower()
for rhythm in rhythm_list:
assert rhythm in all_rhythms
def test_random_note_length(common_rhythms, all_rhythms):
"""This test checks our random note length generation function.
This test utilises the fixtures: 'all_rhythms' and 'common_rhythms'.
For every rhythm in our fixture of common lengths, run assertion:
- Must be a valid Mingus rhythm. See 'mingus_rhythms' fixture.
- Cannot be a data type other than an integer or float.
"""
for key, rhythms in common_rhythms.items():
rhythm = random_note_length(rhythms)
assert rhythm in all_rhythms
assert isinstance(rhythm, (int, float)) is True
# Test API Requests ----------------------------------------------------
def test_on_get_request(common_rhythms):
"""This test checks our GET request functionality for our API.
This test utilises the pytest fixture 'common_scales'.
When we send a get req to service 2, we should:
- GET a status code 200.
- GET a JSON file, containing a list of our common rhythms.
"""
client = service3.test_client()
response = client.get('/')
# Converts our JSON response to a python dictionary.
decode_response = json.loads(response.get_data())
assert response.status_code == 200
assert decode_response == common_rhythms
def test_on_post_request(all_rhythms):
"""
This function tests our POST request functionality for our API.
This test will utilise the pytest fixture 'all_rhythms'.
When we receive a post request to service 3, we expect:
- To receive data in a rhythm key-pair format.
- To return a status code of 200.
- To return to get back a single rhythm, as int.
"""
client = service3.test_client()
rhythm_key_pair = {"all_rhythms": [1, 2, 4, 8, 16, 32, 64]}
response = client.post('/', json=rhythm_key_pair)
response_data = int(response.get_data())
assert response_data in all_rhythms
assert response.status_code == 200
```
#### File: service4/src/service4_routes.py
```python
from flask import request
# Import our application.
from src.service4_init import service4
# Import our Logic
from src.service4_logic import create_bar, initialise_bar, add_notes_to_bar,\
save_as_png, save_as_midi, send_png_to_user,\
send_midi_to_user, overwrite_transpose_bar
# Import AST to perform literal evaluation.
import ast
from os import environ
# Global Variables -----------------------------------------------------
PNG_DIRECTORY = service4.config["PNG_DIRECTORY"]
print(f"s4 routes .png dir: {PNG_DIRECTORY}")
MIDI_DIRECTORY = service4.config["MIDI_DIRECTORY"]
print(f"s4 routes .mid dir: {MIDI_DIRECTORY}")
SERVICE_2_URL = service4.config["SERVICE_2_URL"]
SERVICE_3_URL = service4.config["SERVICE_3_URL"]
# Routes ---------------------------------------------------------------
@service4.route("/", methods=["POST"])
def service4_post_request():
"""This function triggers on a post request to service 4."""
# When we get a post request from S1, we first take the data and unpack it
# into something useful to us.
s1_data = request.get_json()
print(f"Received from S1: {s1_data}")
# We create a new bar with this information.
encode_time_signature = s1_data.get("time_signature")
decode_time_signature = ast.literal_eval(encode_time_signature)
new_bar = create_bar(decode_time_signature)
# Then we initialise the bar.
print(" \n ----- Note Before Initialisation ----- \n")
print(new_bar)
initialise_bar(new_bar,
s1_data.get("first_note_length"),
s1_data.get("first_note_pitch"))
print(" \n ----- Note After Initialisation ----- \n")
print(new_bar)
# We run our "poll s2 and s3" function to fill the bar.
scale_key_pair = s1_data.get("scale_key_pair")
rhythm_key_pair = s1_data.get("rhythm_key_pair")
# Keep adding notes to our bar.
add_notes_to_bar(new_bar, SERVICE_2_URL,
SERVICE_3_URL, scale_key_pair, rhythm_key_pair)
print(" \n ----- THE BAR IS FULL ----- \n ")
print(new_bar)
# We transpose the bar.
overwrite_transpose_bar(new_bar, s1_data.get("key"))
print(f"Transposed bar: {new_bar}")
# === IF MIDI! ===
# save_as_midi(s1_data.get('file_name'), new_bar, s1_data.get("tempo"))
# print('\n ---------- THE FILE HAS BEEN SAVED AS MIDI ---------- \n')
# Send midi file name to user.
# midi_file_name = f"{s1_data.get('file_name')}-melodie.mid"
# return send_midi_to_user(midi_file_name)
# === IF PNG! ===
png_file_name = f"{s1_data.get('file_name')}-melodie.png"
save_as_png(s1_data.get("file_name"), new_bar)
print('\n ---------- THE FILE HAS BEEN SAVED AS PNG ---------- \n')
return send_png_to_user(png_file_name)
``` |
{
"source": "joshuahigginson1/Python_Colour_Tools",
"score": 4
} |
#### File: application/modules/average_colour.py
```python
from skimage import draw
import numpy
# Functions ------------------------------------------------------------
def ave_colour_from_selection(image, poly):
""" This function gets the average pixel colour from a polygon selection.
Credit to user Malibuoooo from StackOverflow.
:param image: Our image file.
:param poly: Numpy array of coordinates in which we are averaging.
:return: Returns the average colour within our polygon.
"""
# Generates a list of pixels that match in our polygon.
pixels = image[draw.polygon(poly[:, 1], poly[:, 0])]
# Use the channels of each pixel to get averages and convert them to ints.
# Return the average colour of every pixel.
return numpy.average(pixels, 0).astype(int)
``` |
{
"source": "joshuahlang/template-specialize",
"score": 2
} |
#### File: src/template_specialize/aws_parameter_store.py
```python
try:
import botocore.session
import botocore.exceptions
except ImportError: # pragma: nocover
pass
from jinja2 import nodes, TemplateRuntimeError
from jinja2.ext import Extension
class ParameterStoreExtension(Extension):
__slots__ = ()
tags = set(['aws_parameter_store'])
def __init__(self, environment):
super(ParameterStoreExtension, self).__init__(environment)
environment.extend(aws_parameter_store_client=None, aws_parameter_store_values={})
def parse(self, parser):
lineno = next(parser.stream).lineno
name = parser.parse_expression()
parameter_value = self.call_method('_get_parameter', [name], lineno=lineno)
return nodes.Output([parameter_value], lineno=lineno)
def _get_parameter(self, name):
if name not in self.environment.aws_parameter_store_values:
# Create the ssm client as needed
if self.environment.aws_parameter_store_client is None:
session = botocore.session.get_session()
self.environment.aws_parameter_store_client = session.create_client('ssm')
try:
result = self.environment.aws_parameter_store_client.get_parameter(Name=name, WithDecryption=True)
self.environment.aws_parameter_store_values[name] = result['Parameter']['Value']
except botocore.exceptions.ClientError as ex:
code = ex.response.get('Error', {}).get('Code')
raise TemplateRuntimeError(
'Failed to retrieve value "{0}" from parameter store with error: {1}'.format(name, code)
) from None
return self.environment.aws_parameter_store_values[name]
```
#### File: src/tests/__init__.py
```python
import os
import tempfile
import unittest
class TestCase(unittest.TestCase):
@staticmethod
def create_test_files(file_defs):
tempdir = tempfile.TemporaryDirectory()
for path_parts, content in file_defs:
if isinstance(path_parts, str):
path_parts = [path_parts]
path = os.path.join(tempdir.name, *path_parts)
os.makedirs(os.path.dirname(path), exist_ok=True)
with open(path, 'w', encoding='utf-8') as file_:
file_.write(content)
return tempdir
```
#### File: src/tests/test_main.py
```python
from io import StringIO
import os
import re
import unittest.mock as unittest_mock
try:
import botocore.exceptions
except ImportError: # pragma: nocover
pass
from template_specialize import __version__
import template_specialize.__main__
from template_specialize.main import main, _parse_environments, _merge_environment, _merge_values
from . import TestCase
class TestMain(TestCase):
def test_module_main(self):
self.assertTrue(template_specialize.__main__)
def test_sys_argv(self):
test_files = [
('template.txt', 'the value of "foo" is "{{foo}}"')
]
with self.create_test_files(test_files) as input_dir, \
self.create_test_files([]) as output_dir:
input_path = os.path.join(input_dir, 'template.txt')
output_path = os.path.join(output_dir, 'other.txt')
argv = ['template-specialize', input_path, output_path, '--key', 'foo', '--value', 'bar']
with unittest_mock.patch('sys.stdout', new=StringIO()) as stdout, \
unittest_mock.patch('sys.stderr', new=StringIO()) as stderr, \
unittest_mock.patch('sys.argv', argv):
main()
self.assertEqual(stdout.getvalue(), '')
self.assertEqual(stderr.getvalue(), '')
with open(os.path.join(output_dir, 'other.txt'), 'r', encoding='utf-8') as f_output:
self.assertEqual(f_output.read(), 'the value of "foo" is "bar"')
def test_version(self):
for argv in [
['-v'],
['--version']
]:
with unittest_mock.patch('sys.stdout', new=StringIO()) as stdout, \
unittest_mock.patch('sys.stderr', new=StringIO()) as stderr:
with self.assertRaises(SystemExit) as cm_exc:
main(argv)
self.assertEqual(cm_exc.exception.code, 0)
self.assertEqual(stdout.getvalue(), '')
self.assertEqual(stderr.getvalue(), str(__version__) + '\n')
def test_mismatched_keys_values(self):
for argv in [
['--key', 'a', 'src.txt', 'dst.txt'],
['--key', 'a', '--value', 'foo', '--key', 'b', 'src.txt', 'dst.txt']
]:
with unittest_mock.patch('sys.stdout', new=StringIO()) as stdout, \
unittest_mock.patch('sys.stderr', new=StringIO()) as stderr:
with self.assertRaises(SystemExit) as cm_exc:
main(argv)
self.assertEqual(cm_exc.exception.code, 2)
self.assertEqual(stdout.getvalue(), '')
self.assertEqual(
stderr.getvalue(),
'''\
usage: template-specialize [-h] [-c FILE] [-e ENV] [--key KEY] [--value VALUE]
[--dump] [-v]
[SRC] [DST]
template-specialize: error: mismatched keys/values
'''
)
def test_invalid_keys_values(self):
with unittest_mock.patch('sys.stdout', new=StringIO()) as stdout, \
unittest_mock.patch('sys.stderr', new=StringIO()) as stderr:
with self.assertRaises(SystemExit) as cm_exc:
main(['--key', 'a', '--value', 'a: b: c'])
self.assertEqual(cm_exc.exception.code, 2)
self.assertEqual(stdout.getvalue(), '')
self.assertEqual(
stderr.getvalue(),
'''\
mapping values are not allowed here
in "<unicode string>", line 1, column 5:
a: b: c
^
'''
)
def test_config_errors(self):
test_files = [
(
'test.config',
'''\
env1:
values:
a:
a: "env1 a.a"
b:
a: "env1 b.a"
asdf1
env2:
values:
a: ["env2 a.0"]
asdf2
'''
),
(
'test2.config',
'''\
env3:
parents: [env1, env2]
'''
),
(
'template.txt',
'''\
a.a = {{a.a}}
b.a = {{b.a}}
'''
)
]
with self.create_test_files(test_files) as input_dir, \
self.create_test_files([]) as output_dir:
test_path = os.path.join(input_dir, 'test.config')
test2_path = os.path.join(input_dir, 'test2.config')
input_path = os.path.join(input_dir, 'template.txt')
output_path = os.path.join(output_dir, 'other.txt')
with unittest_mock.patch('sys.stdout', new=StringIO()) as stdout, \
unittest_mock.patch('sys.stderr', new=StringIO()) as stderr:
with self.assertRaises(SystemExit) as cm_exc:
main([
'-c', test_path,
'-c', test2_path,
'-e', 'env3',
'--key', 'b', '--value', '[b0]',
input_path,
output_path
])
self.assertEqual(cm_exc.exception.code, 2)
self.assertEqual(stdout.getvalue(), '')
self.assertEqual(
re.sub('^.+?test', 'test', stderr.getvalue(), flags=re.MULTILINE),
'''\
while scanning a simple key
test.config", line 7, column 5
could not find expected ':'
test.config", line 9, column 1
'''
)
self.assertFalse(os.path.exists(output_path))
def test_environment_only(self):
test_files = [
(
'test.config',
'''\
env1:
values:
a:
a: "foo"
c: [1, 2]
env2:
values:
b:
a: "nope"
'''
),
(
'test2.config',
'''\
env3:
parents: [env1]
values:
a:
b: "bar"
c: [4, 5, 3]
'''
),
(
'template.txt', '''\
a.a = {{a.a}}
a.b = {{a.b}}
a.c = {{a.c}}
'''
)
]
with self.create_test_files(test_files) as input_dir, \
self.create_test_files([]) as output_dir:
test_path = os.path.join(input_dir, 'test.config')
test2_path = os.path.join(input_dir, 'test2.config')
input_path = os.path.join(input_dir, 'template.txt')
output_path = os.path.join(output_dir, 'other.txt')
with unittest_mock.patch('sys.stdout', new=StringIO()) as stdout, \
unittest_mock.patch('sys.stderr', new=StringIO()) as stderr:
main(['-c', test_path, '-c', test2_path, '-e', 'env3', input_path, output_path])
self.assertEqual(stdout.getvalue(), '')
self.assertEqual(stderr.getvalue(), '')
with open(os.path.join(output_dir, 'other.txt'), 'r', encoding='utf-8') as f_output:
self.assertEqual(
f_output.read(),
'''\
a.a = foo
a.b = bar
a.c = [4, 5, 3]'''
)
def test_keys_only(self):
test_files = [
(
'template.txt',
'''\
a.a = {{a.a}}
a.b = {{a.b}}
a.c = {{a.c}}
'''
)
]
with self.create_test_files(test_files) as input_dir, \
self.create_test_files([]) as output_dir:
input_path = os.path.join(input_dir, 'template.txt')
output_path = os.path.join(output_dir, 'other.txt')
with unittest_mock.patch('sys.stdout', new=StringIO()) as stdout, \
unittest_mock.patch('sys.stderr', new=StringIO()) as stderr:
main([
'--key', 'a', '--value', '{a: foo}',
'--key', 'a', '--value', '{b: bar}',
'--key', 'a', '--value', '{c: [3]}',
input_path,
output_path
])
self.assertEqual(stdout.getvalue(), '')
self.assertEqual(stderr.getvalue(), '')
with open(os.path.join(output_dir, 'other.txt'), 'r', encoding='utf-8') as f_output:
self.assertEqual(
f_output.read(),
'''\
a.a = foo
a.b = bar
a.c = [3]'''
)
def test_environment_and_keys(self):
test_files = [
(
'config.config',
'''\
env:
values:
a:
a: foo
b: bar
c: [1]
'''
),
(
'template.txt',
'''\
a.a = {{a.a}}
a.b = {{a.b}}
a.c = {{a.c}}
'''
)
]
with self.create_test_files(test_files) as input_dir, \
self.create_test_files([]) as output_dir:
config_path = os.path.join(input_dir, 'config.config')
input_path = os.path.join(input_dir, 'template.txt')
output_path = os.path.join(output_dir, 'other.txt')
with unittest_mock.patch('sys.stdout', new=StringIO()) as stdout, \
unittest_mock.patch('sys.stderr', new=StringIO()) as stderr:
main([
'-c', config_path,
'-e', 'env',
'--key', 'a',
'--value', '{b: bonk}',
'--key', 'a',
'--value', '{c: [10]}',
'--key', 'a',
'--value', '{c: [12, 11]}',
input_path,
output_path
])
self.assertEqual(stdout.getvalue(), '')
self.assertEqual(stderr.getvalue(), '')
with open(os.path.join(output_dir, 'other.txt'), 'r', encoding='utf-8') as f_output:
self.assertEqual(
f_output.read(),
'''\
a.a = foo
a.b = bonk
a.c = [12, 11]'''
)
def test_dump(self):
test_files = [
(
'config.config',
'''\
env:
values:
a:
a: foo
b: bar
c: [1]
'''
)
]
with self.create_test_files(test_files) as input_dir:
config_path = os.path.join(input_dir, 'config.config')
with unittest_mock.patch('sys.stdout', new=StringIO()) as stdout, \
unittest_mock.patch('sys.stderr', new=StringIO()) as stderr:
with self.assertRaises(SystemExit) as cm_exc:
main([
'-c', config_path,
'-e', 'env',
'--key', 'a',
'--value', '{b: bonk}',
'--key', 'a',
'--value', '{c: [12, 11]}',
'--dump'
])
self.assertEqual(cm_exc.exception.code, 0)
self.assertEqual(stdout.getvalue(), '')
self.assertEqual(stderr.getvalue(), '''\
a:
a: foo
b: bonk
c:
- 12
- 11
''')
def test_unknown_environment(self):
test_files = [
(
'config.config',
'''\
env:
values:
a:
a: foo
'''
)
]
with self.create_test_files(test_files) as input_dir:
config_path = os.path.join(input_dir, 'config.config')
for argv in [
['-c', config_path, '-e', 'unknown', 'src.txt', 'dst.txt'],
['-e', 'unknown', 'src.txt', 'dst.txt']
]:
with unittest_mock.patch('sys.stdout', new=StringIO()) as stdout, \
unittest_mock.patch('sys.stderr', new=StringIO()) as stderr:
with self.assertRaises(SystemExit) as cm_exc:
main(argv)
self.assertEqual(cm_exc.exception.code, 2)
self.assertEqual(stdout.getvalue(), '')
self.assertEqual(
stderr.getvalue(),
'''\
unknown environment 'unknown'
'''
)
def test_file_to_file(self):
test_files = [
('template.txt', 'the value of "foo" is "{{foo}}"')
]
with self.create_test_files(test_files) as input_dir, \
self.create_test_files([]) as output_dir:
input_path = os.path.join(input_dir, 'template.txt')
output_path = os.path.join(output_dir, 'other.txt')
with unittest_mock.patch('sys.stdout', new=StringIO()) as stdout, \
unittest_mock.patch('sys.stderr', new=StringIO()) as stderr:
main([input_path, output_path, '--key', 'foo', '--value', 'bar'])
self.assertEqual(stdout.getvalue(), '')
self.assertEqual(stderr.getvalue(), '')
with open(os.path.join(output_dir, 'other.txt'), 'r', encoding='utf-8') as f_output:
self.assertEqual(f_output.read(), 'the value of "foo" is "bar"')
def test_file_to_stdout(self):
test_files = [
('template.txt', 'the value of "foo" is "{{foo}}"')
]
with self.create_test_files(test_files) as input_dir:
input_path = os.path.join(input_dir, 'template.txt')
with unittest_mock.patch('sys.stdout', new=StringIO()) as stdout, \
unittest_mock.patch('sys.stderr', new=StringIO()) as stderr:
main([input_path, '--key', 'foo', '--value', 'bar'])
self.assertEqual(stdout.getvalue(), 'the value of "foo" is "bar"')
self.assertEqual(stderr.getvalue(), '')
def test_stdin_to_stdout(self):
with unittest_mock.patch('sys.stdin', new=StringIO('the value of "foo" is "{{foo}}"')), \
unittest_mock.patch('sys.stdout', new=StringIO()) as stdout, \
unittest_mock.patch('sys.stderr', new=StringIO()) as stderr:
main(['--key', 'foo', '--value', 'bar'])
self.assertEqual(stdout.getvalue(), 'the value of "foo" is "bar"')
self.assertEqual(stderr.getvalue(), '')
def test_stdin_to_file(self):
with self.create_test_files([]) as output_dir:
output_path = os.path.join(output_dir, 'other.txt')
with unittest_mock.patch('sys.stdin', new=StringIO('the value of "foo" is "{{foo}}"')), \
unittest_mock.patch('sys.stdout', new=StringIO()) as stdout, \
unittest_mock.patch('sys.stderr', new=StringIO()) as stderr:
main(['-', output_path, '--key', 'foo', '--value', 'bar'])
self.assertEqual(stdout.getvalue(), '')
self.assertEqual(stderr.getvalue(), '')
with open(output_path, 'r', encoding='utf-8') as f_output:
self.assertEqual(f_output.read(), 'the value of "foo" is "bar"')
def test_file_to_dir(self):
test_files = [
('template.txt', 'the value of "foo" is "{{foo}}"')
]
with self.create_test_files(test_files) as input_dir, \
self.create_test_files([]) as output_dir:
input_path = os.path.join(input_dir, 'template.txt')
output_path = os.path.join(output_dir, '')
with unittest_mock.patch('sys.stdout', new=StringIO()) as stdout, \
unittest_mock.patch('sys.stderr', new=StringIO()) as stderr:
with self.assertRaises(SystemExit) as cm_exc:
main([input_path, output_path, '--key', 'foo', '--value', 'bar'])
self.assertEqual(cm_exc.exception.code, 2)
self.assertEqual(stdout.getvalue(), '')
self.assertEqual(stderr.getvalue(), "[Errno 21] Is a directory: '{0}'\n".format(output_path))
self.assertTrue(os.path.isfile(input_path))
self.assertTrue(os.path.isdir(output_path))
def test_dir_to_dir(self):
test_files = [
('template.txt', 'the value of "foo" is "{{foo}}"'),
(('subdir', 'subtemplate.txt'), 'agree, "{{foo}}" is the value of "foo"')
]
with self.create_test_files(test_files) as input_dir, \
self.create_test_files([]) as output_dir:
with unittest_mock.patch('sys.stdout', new=StringIO()) as stdout, \
unittest_mock.patch('sys.stderr', new=StringIO()) as stderr:
main([input_dir, output_dir, '--key', 'foo', '--value', 'bar'])
self.assertEqual(stdout.getvalue(), '')
self.assertEqual(stderr.getvalue(), '')
with open(os.path.join(output_dir, 'template.txt'), 'r', encoding='utf-8') as f_output:
self.assertEqual(f_output.read(), 'the value of "foo" is "bar"')
with open(os.path.join(output_dir, 'subdir', 'subtemplate.txt'), 'r', encoding='utf-8') as f_output:
self.assertEqual(f_output.read(), 'agree, "bar" is the value of "foo"')
def test_dir_to_file(self):
test_files = [
(('subdir', 'template.txt'), 'the value of "foo" is "{{foo}}"'),
('other.txt', 'hello')
]
with self.create_test_files(test_files) as input_dir:
input_path = os.path.join(input_dir, 'subdir')
output_path = os.path.join(input_dir, 'other.txt')
with unittest_mock.patch('sys.stdout', new=StringIO()) as stdout, \
unittest_mock.patch('sys.stderr', new=StringIO()) as stderr:
with self.assertRaises(SystemExit) as cm_exc:
main([input_path, output_path, '--key', 'foo', '--value', 'bar'])
self.assertEqual(cm_exc.exception.code, 2)
self.assertEqual(stdout.getvalue(), '')
self.assertEqual(stderr.getvalue(), "[Errno 17] File exists: '{0}'\n".format(output_path))
self.assertTrue(os.path.isdir(input_path))
self.assertTrue(os.path.isfile(output_path))
def test_file_not_exist(self):
with self.create_test_files([]) as input_dir, \
self.create_test_files([]) as output_dir:
input_path = os.path.join(input_dir, 'template.txt')
output_path = os.path.join(output_dir, 'other.txt')
with unittest_mock.patch('sys.stdout', new=StringIO()) as stdout, \
unittest_mock.patch('sys.stderr', new=StringIO()) as stderr:
with self.assertRaises(SystemExit) as cm_exc:
main([input_path, output_path, '--key', 'foo', '--value', 'bar'])
self.assertEqual(cm_exc.exception.code, 2)
self.assertEqual(stdout.getvalue(), '')
self.assertEqual(stderr.getvalue(), "[Errno 2] No such file or directory: '{0}'\n".format(input_path))
self.assertFalse(os.path.exists(input_path))
self.assertFalse(os.path.exists(output_path))
def test_aws_parameter_store(self):
test_files = [
(
'template.txt',
'''\
{% filter tojson %}{% aws_parameter_store 'some/string' %}{% endfilter %}
{% aws_parameter_store 'some/string' %}
{% aws_parameter_store foo %}
'''
)
]
def get_parameter(**kwargs):
return {
'Parameter': {
'Value': '{0}-{{value}}'.format(kwargs['Name'])
}
}
with self.create_test_files(test_files) as input_dir:
input_path = os.path.join(input_dir, 'template.txt')
with unittest_mock.patch('sys.stdout', new=StringIO()) as stdout, \
unittest_mock.patch('sys.stderr', new=StringIO()) as stderr, \
unittest_mock.patch('botocore.session') as mock_session:
mock_session.get_session.return_value.create_client.return_value.get_parameter.side_effect = get_parameter
main([input_path, '--key', 'foo', '--value', 'a"[bar}'])
self.assertEqual(
stdout.getvalue(),
'''\
"some/string-{value}"
some/string-{value}
a"[bar}-{value}'''
)
self.assertEqual(stderr.getvalue(), '')
# get_parameter results should be cached between blocks.
mock_session.get_session.return_value.create_client.return_value.assert_has_calls([
unittest_mock.call.get_parameter(Name='some/string', WithDecryption=True),
unittest_mock.call.get_parameter(Name='a"[bar}', WithDecryption=True)
])
def test_aws_parameter_store_error(self):
test_files = [
(
'template.txt',
'''\
{% aws_parameter_store 'some/string' %}
'''
)
]
with self.create_test_files(test_files) as input_dir:
input_path = os.path.join(input_dir, 'template.txt')
with unittest_mock.patch('sys.stdout', new=StringIO()) as stdout, \
unittest_mock.patch('sys.stderr', new=StringIO()) as stderr, \
unittest_mock.patch('botocore.session') as mock_session:
mock_session.get_session.return_value.create_client.return_value.get_parameter.side_effect = \
botocore.exceptions.ClientError({'Error': {'Code': 'SomeError'}}, 'GetParameter')
with self.assertRaises(SystemExit) as cm_exc:
main([input_path])
self.assertEqual(cm_exc.exception.code, 2)
self.assertEqual(stdout.getvalue(), '')
self.assertEqual(
stderr.getvalue(),
'''\
Failed to retrieve value "some/string" from parameter store with error: SomeError
'''
)
class TestParseEnvironments(TestCase):
def test_parse_environments(self):
environments = {}
_parse_environments(
StringIO('''\
# This is a comment
env:
values:
key: value
env2:
parents: [env]
values:
key: value
'''),
environments
)
self.assertDictEqual(environments, {
'env': {
'values': {
'key': 'value'
}
},
'env2': {
'parents': ['env'],
'values': {
'key': 'value'
}
}
})
def test_parse_environments_not_dict(self):
environments = {}
with self.assertRaises(ValueError) as cm_exc:
_parse_environments(
StringIO('''\
[1, 2, 3]
'''),
environments
)
self.assertEqual(str(cm_exc.exception), 'invalid environments container: [1, 2, 3]')
self.assertDictEqual(environments, {})
def test_parse_environments_invalid_environment_name(self):
environments = {}
with self.assertRaises(ValueError) as cm_exc:
_parse_environments(
StringIO('''\
1:
'''),
environments
)
self.assertEqual(str(cm_exc.exception), 'invalid environment name 1')
self.assertDictEqual(environments, {})
def test_parse_environments_redefined_environment(self):
environments = {'env': {}}
with self.assertRaises(ValueError) as cm_exc:
_parse_environments(
StringIO('''\
env:
'''),
environments
)
self.assertEqual(str(cm_exc.exception), "redefinition of environment 'env'")
self.assertDictEqual(environments, {'env': {}})
def test_parse_environments_invalid_metadata(self):
environments = {}
with self.assertRaises(ValueError) as cm_exc:
_parse_environments(
StringIO('''\
env: [1, 2, 3]
'''),
environments
)
self.assertEqual(str(cm_exc.exception), "invalid environment metadata for environment 'env': [1, 2, 3]")
self.assertDictEqual(environments, {})
def test_parse_environments_invalid_parents_non_list(self):
environments = {}
with self.assertRaises(ValueError) as cm_exc:
_parse_environments(
StringIO('''\
env:
parents: {}
'''),
environments
)
self.assertEqual(str(cm_exc.exception), "invalid parents for environment 'env': {}")
self.assertDictEqual(environments, {})
def test_parse_environments_invalid_parents_non_str(self):
environments = {}
with self.assertRaises(ValueError) as cm_exc:
_parse_environments(
StringIO('''\
env:
parents: ['env2', 1]
'''),
environments
)
self.assertEqual(str(cm_exc.exception), "invalid parents for environment 'env': ['env2', 1]")
self.assertDictEqual(environments, {})
def test_parse_environments_invalid_values(self):
environments = {}
with self.assertRaises(ValueError) as cm_exc:
_parse_environments(
StringIO('''\
env:
values: []
'''),
environments
)
self.assertEqual(str(cm_exc.exception), "invalid values for environment 'env': []")
self.assertDictEqual(environments, {})
class TestMergeEnvironment(TestCase):
def test_merge_environment(self):
environments = {
'env': {
'values': {
'a': 1,
'b': 2,
'c': [{'a': 'b'}]
}
},
'env2': {
'parents': ['env'],
'values': {
'a': 3,
'c': [{'a', 'b2'}, {'c': 'd'}],
'd': 4
}
},
'env3': {
'parents': ['env', 'env2'],
'values': {
'c': [{'c': 'd3'}],
'e': 5
}
},
'env4': {
'parents': ['env3']
}
}
values = _merge_environment(environments, 'env', None, set())
self.assertDictEqual(values, {
'a': 1,
'b': 2,
'c': [{'a': 'b'}]
})
values = {}
values2 = _merge_environment(environments, 'env2', values, set())
self.assertIs(values2, values)
self.assertDictEqual(values, {
'a': 3,
'b': 2,
'c': [{'b2', 'a'}, {'c': 'd'}],
'd': 4
})
values = {}
values2 = _merge_environment(environments, 'env3', values, set())
self.assertIs(values2, values)
self.assertDictEqual(values, {
'a': 3,
'b': 2,
'c': [{'c': 'd3'}, {'c': 'd'}],
'd': 4,
'e': 5
})
values = {}
values2 = _merge_environment(environments, 'env4', values, set())
self.assertIs(values2, values)
self.assertDictEqual(values, {
'a': 3,
'b': 2,
'c': [{'c': 'd3'}, {'c': 'd'}],
'd': 4,
'e': 5
})
def test_merge_environment_unknown(self):
environments = {
'env': {
'parents': ['unknown']
}
}
with self.assertRaises(ValueError) as cm_exc:
_merge_environment(environments, 'env2', None, set())
self.assertEqual(str(cm_exc.exception), "unknown environment 'env2'")
with self.assertRaises(ValueError) as cm_exc:
_merge_environment(environments, 'env', None, set())
self.assertEqual(str(cm_exc.exception), "unknown environment 'unknown'")
def test_merge_environment_circular(self):
environments = {
'env': {
'parents': ['env'],
'values': {
'a': 1,
'b': 2,
'c': [{'a': 'b'}]
}
}
}
with self.assertRaises(ValueError) as cm_exc:
_merge_environment(environments, 'env', None, set())
self.assertEqual(str(cm_exc.exception), "circular inheritance with environment 'env'")
def test_merge_values(self):
values = {}
values2 = _merge_values({
'a': 'b',
'b': [1, 2, 3],
'c': {'a': 'b', 'c': 'd'},
'd': [{'a': 'b'}, {'c': 'd'}],
'e': {'a': [1, 2, 3], 'b': [4, 5, 6]},
'f': 1
}, values)
self.assertIs(values, values2)
self.assertDictEqual(values, {
'a': 'b',
'b': [1, 2, 3],
'c': {'a': 'b', 'c': 'd'},
'd': [{'a': 'b'}, {'c': 'd'}],
'e': {'a': [1, 2, 3], 'b': [4, 5, 6]},
'f': 1
})
values2 = _merge_values({
'a': 'b2',
'b': [4, 5],
'c': {'a': 'b2', 'e': 'f'},
'd': [{'e': 'f'}, {'c': 'd2'}, {'g': 'h'}],
'e': {'a': [4, 5], 'b': [7, 8, 9, 10]},
'g': 2
}, values)
self.assertIs(values, values2)
self.assertDictEqual(values, {
'a': 'b2',
'b': [4, 5, 3],
'c': {'a': 'b2', 'c': 'd', 'e': 'f'},
'd': [{'a': 'b', 'e': 'f'}, {'c': 'd2'}, {'g': 'h'}],
'e': {'a': [4, 5, 3], 'b': [7, 8, 9, 10]},
'f': 1,
'g': 2
})
values2 = _merge_values({
'a': [1, 2, 3],
'f': {'a': 'b'},
'b': 3,
'd': {'c': 'd'},
'c': 4,
'e': [4, 5, 6]
}, values)
self.assertIs(values, values2)
self.assertDictEqual(values, {
'a': [1, 2, 3],
'b': 3,
'c': 4,
'd': {'c': 'd'},
'e': [4, 5, 6],
'f': {'a': 'b'},
'g': 2
})
``` |
{
"source": "joshua-hull/pokemon-tcg-sdk-python",
"score": 3
} |
#### File: pokemon-tcg-sdk-python/tests/test_card.py
```python
import unittest
import vcr
from pokemonsdk import Card
class TestCard(unittest.TestCase):
"""Test card.py."""
def test_find_returns_card(self):
"""Testing Card.find() returns a card."""
with vcr.use_cassette('fixtures/Gardevoir.yaml'):
card = Card.find('xy7-54')
self.assertEqual('xy7-54', card.id)
self.assertEqual('Gardevoir', card.name)
self.assertEqual('https://s3.amazonaws.com/pokemontcg/xy7/54.png',
card.image_url)
self.assertEqual('Stage 2', card.subtype)
self.assertEqual('Pokémon', card.supertype)
self.assertEqual({'name': 'Bright Heal',
'text': 'Once during your turn '
'(before your attack), '
'you may heal 20 damage '
'from each of your Pokémon.'},
card.ability)
self.assertEqual('130', card.hp)
self.assertEqual(['Colorless', 'Colorless'], card.retreat_cost)
self.assertEqual('54', card.number)
self.assertEqual('TOKIYA', card.artist)
self.assertEqual('Rare Holo', card.rarity)
self.assertEqual('XY', card.series)
self.assertEqual('Ancient Origins', card.set)
self.assertEqual('xy7', card.set_code)
self.assertEqual(['Fairy'], card.types)
self.assertEqual(1, len(card.attacks))
self.assertEqual(3, len(card.attacks[0]['cost']))
self.assertEqual('Telekinesis', card.attacks[0]['name'])
self.assertEqual('', card.attacks[0]['damage'])
self.assertEqual(3, card.attacks[0]['convertedEnergyCost'])
# self.assertEqual([
# {'cost': ['Colorless',
# 'Colorless',
# 'Colorless'],
# 'name':'Telekinesis',
# 'text':"This attack does 50 damage to 1 of "
# "your opponents's Pokémon. This "
# "attack's damage isn't affected "
# "by Weakness or Resistance.",
# 'damage':'',
# 'convertedEnergyCost':3}
# ], card.attacks)
self.assertEqual([{'type': 'Metal', 'value': '\xd72'}],
card.weaknesses)
self.assertEqual([{'type': 'Darkness', 'value': '-20'}],
card.resistances)
def test_all_with_params_return_cards(self):
"""Testing Card.where() returns cards."""
with vcr.use_cassette('fixtures/metal_psychic.yaml'):
cards = Card.where(types='metal,psychic') \
.all()
self.assertEqual(5, len(cards))
def test_all_with_page_returns_cards(self):
"""Testing Card.where() with a page returns that page."""
with vcr.use_cassette('fixtures/all_first_page.yaml'):
cards = Card.where(page=1).all()
self.assertEqual(100, len(cards))
def test_all_with_page_and_page_size_returns_card(self):
"""Testing Card.where().
Testing Card.where with a page and size returns that page with that
many results.
"""
with vcr.use_cassette('fixtures/all_first_page_one_card.yaml'):
cards = Card.where(page=1).where(pageSize=1).all()
self.assertEqual(1, len(cards))
```
#### File: pokemon-tcg-sdk-python/tests/test_pokemonexception.py
```python
import unittest
from pokemonsdk import PokemonException
class TestPokemonException(unittest.TestCase):
"""Test PokemonException."""
def test_constructor_sets_description(self):
"""Test PokemonException description is set correctly."""
description = "An error has occured"
exception = PokemonException(description)
self.assertEqual(description, exception.__str__())
```
#### File: pokemon-tcg-sdk-python/tests/test_subtype.py
```python
import unittest
import vcr
from pokemonsdk import Subtype
class TestType(unittest.TestCase):
"""Test Suite for Superype."""
def test_all_returns_subtypes(self):
"""Test Subtype.all() returns all subtypes."""
with vcr.use_cassette('fixtures/subtypes.yaml'):
subtypes = Subtype.all()
self.assertTrue(len(subtypes) == 17)
self.assertTrue('Basic' in subtypes)
self.assertTrue('Stage 1' in subtypes)
self.assertTrue('Stage 2' in subtypes)
``` |
{
"source": "JoshuaHuntley/dbt-snowflake",
"score": 2
} |
#### File: integration/simple_snapshot_test/test_snapshot_query_tag.py
```python
from tests.integration.base import DBTIntegrationTest, use_profile
class TestSnapshotWithQueryTag(DBTIntegrationTest):
@property
def schema(self):
return "simple_snapshot_004"
@property
def models(self):
return "models"
@property
def project_config(self):
return {
'config-version': 2,
"snapshot-paths": ['check-snapshots-query-tag'],
"test-paths": ['check-snapshots-query-tag-expected'],
"model-paths": [],
}
def dbt_run_seed(self):
self.run_sql_file('seed.sql')
def test_snapshot_with_query_tag(self):
self.run_dbt(["snapshot", "--vars", '{{"query_tag": {}}}'.format(self.prefix)])
def assert_query_tag_expected(self):
self.run_dbt(['test', '--select', 'test_type:singular', '--vars', '{{"query_tag": {}}}'.format(self.prefix)])
@use_profile('snowflake')
def test__snowflake__snapshot_with_query_tag(self):
self.dbt_run_seed()
self.test_snapshot_with_query_tag()
self.assert_query_tag_expected()
``` |
{
"source": "joshuaipwork/cdptools",
"score": 3
} |
#### File: cdptools/dev_utils/load_custom_object.py
```python
import importlib
import logging
from typing import Dict, List, Union
###############################################################################
log = logging.getLogger(__name__)
###############################################################################
def load_custom_object(module_path: Union[str, List[str]], object_name: str, object_kwargs: Dict) -> object:
"""
Load a custom object with kwargs.
Parameters
----------
module_path: Union[str, List[str]]
Python module path or list of path parts to a custom module. Ex: "cptools.pipeline"
object_name: str
Name of the object to retrieve from the module. Ex: "Pipeline"
object_kwargs: Dict
Any kwargs to pass to the object.
Returns
-------
obj: object
The initialized object.
"""
# Convert module path to string
if isinstance(module_path, list):
module_path = ".".join(module_path)
# Load target module
mod = importlib.import_module(module_path)
obj = getattr(mod, object_name)
obj = obj(**object_kwargs)
# Log
log.debug(f"Using object: {type(obj)}")
return obj
```
#### File: tests/audio_splitters/test_ffmpeg_audio_splitter.py
```python
from pathlib import Path
from unittest import mock
import pytest
from cdptools.audio_splitters.ffmpeg_audio_splitter import FFmpegAudioSplitter
@pytest.fixture
def example_video(data_dir) -> Path:
return data_dir / "example_video.mp4"
@pytest.mark.parametrize("audio_save_path", [
("test.wav"),
(Path("test.wav")),
pytest.param(__file__, marks=pytest.mark.raises(exception=FileExistsError)),
pytest.param(Path(__file__), marks=pytest.mark.raises(exception=FileExistsError)),
pytest.param(Path(__file__).parent, marks=pytest.mark.raises(exception=IsADirectoryError))
])
def test_mocked_save_path(tmpdir, example_video, audio_save_path):
# Append save name to tmpdir
audio_save_path = Path(tmpdir) / audio_save_path
# Initialize splitter
splitter = FFmpegAudioSplitter()
# Mock split
with mock.patch("ffmpeg.run") as mocked_ffmpeg:
mocked_ffmpeg.return_value = (b"OUTPUT", b"ERROR")
splitter.split(video_read_path=example_video, audio_save_path=audio_save_path)
```
#### File: tests/dev_utils/test_load_custom_object.py
```python
import pytest
from cdptools.dev_utils import load_custom_object
@pytest.mark.parametrize("module_path, object_name, object_kwargs", [
("pathlib", "Path", {}),
("cdptools.audio_splitters.ffmpeg_audio_splitter", "FFmpegAudioSplitter", {}),
(["cdptools", "audio_splitters", "ffmpeg_audio_splitter"], "FFmpegAudioSplitter", {}),
("datetime", "datetime", {"year": 2019, "month": 5, "day": 11}),
pytest.param("fake.module.path", "DoesNotExist", {}, marks=pytest.mark.raises(exception=ModuleNotFoundError)),
pytest.param("datetime", "DoesNotExist", {}, marks=pytest.mark.raises(exception=AttributeError))
])
def test_load_custom_object(module_path, object_name, object_kwargs):
load_custom_object.load_custom_object(module_path, object_name, object_kwargs)
```
#### File: tests/dev_utils/test_run_manager.py
```python
from pathlib import Path
from unittest import mock
import pytest
from firebase_admin import firestore
from google.cloud import storage
from cdptools.databases.cloud_firestore_database import CloudFirestoreDatabase
from cdptools.dev_utils import RunIO, RunManager
from cdptools.file_stores.gcs_file_store import GCSFileStore
from ..databases.test_cloud_firestore_database import MockedCollection
from ..file_stores.test_gcs_file_store import MockedBlob, MockedBucket
@pytest.fixture
def empty_creds_db() -> CloudFirestoreDatabase:
with mock.patch("cdptools.databases.cloud_firestore_database.CloudFirestoreDatabase._initialize_creds_db"):
db = CloudFirestoreDatabase("/fake/path/to/creds.json")
db._credentials_path = "/fake/path/to/creds.json"
db._root = mock.Mock(firestore.Client)
db._root.collection.return_value = MockedCollection([])
return db
@pytest.fixture
def empty_creds_fs() -> GCSFileStore:
with mock.patch("cdptools.file_stores.gcs_file_store.GCSFileStore._initialize_creds_fs"):
fs = GCSFileStore("/fake/path/to/creds.json")
fs._credentials_path = "/fake/path/to/creds.json"
fs._client = mock.Mock(storage.Client)
fs._bucket = MockedBucket("fake_bucket", [MockedBlob("example.mp4", exists=False)])
return fs
@pytest.fixture
def example_audio(data_dir) -> Path:
return data_dir / "example_audio.wav"
@pytest.mark.parametrize("inputs, expected", [
([RunIO(str(str), "hello")], [RunIO(str(str), "hello")]),
([RunIO(str(int), 1)], [RunIO(str(int), 1)]),
([[str, "hello"]], [RunIO(str(str), "hello")]),
([[int, 1]], [RunIO(str(int), 1)]),
([[1.0]], [RunIO(str(float), 1.0)]),
pytest.param([["this", "will", "fail"]], None, marks=pytest.mark.raises(exception=ValueError)),
([(str, "hello")], [RunIO(str(str), "hello")]),
([(int, 1)], [RunIO(str(int), 1)]),
([(1.0)], [RunIO(str(float), 1.0)]),
pytest.param([("this", "will", "fail")], None, marks=pytest.mark.raises(exception=ValueError)),
([RunIO(str(str), "hello"), RunIO(str(str), "world")], [RunIO(str(str), "hello"), RunIO(str(str), "world")])
])
def test_run_manager_init(empty_creds_db, empty_creds_fs, inputs, expected):
with RunManager(
database=empty_creds_db,
file_store=empty_creds_fs,
algorithm_name="fake",
algorithm_version="1.1.1",
inputs=inputs
) as run:
assert run._inputs == expected
def test_make_serializable_type(empty_creds_db, empty_creds_fs, data_dir, example_audio):
# Test with real path
with RunManager(
database=empty_creds_db,
file_store=empty_creds_fs,
algorithm_name="fake",
algorithm_version="1.1.1",
inputs=[example_audio]
) as run:
# Can't use PosixPath because if testing is done on windows then this fails
for i in run._input_files:
assert "Path" in i.type
assert isinstance(i.value, Path)
# Test with non existent path
with pytest.raises(FileNotFoundError):
run = RunManager(
database=empty_creds_db,
file_store=empty_creds_fs,
algorithm_name="fake",
algorithm_version="1.1.1",
inputs=[Path("/this/will/fail.mp4")]
)
# Test with directory
with pytest.raises(IsADirectoryError):
run = RunManager(
database=empty_creds_db,
file_store=empty_creds_fs,
algorithm_name="fake",
algorithm_version="1.1.1",
inputs=[data_dir]
)
# With any other type
with RunManager(
database=empty_creds_db,
file_store=empty_creds_fs,
algorithm_name="fake",
algorithm_version="1.1.1",
inputs=[((str(tuple), ("this", "will", "be", "cast", "to", "string")))]
) as run:
assert run._inputs == [RunIO(str(tuple), "('this', 'will', 'be', 'cast', 'to', 'string')")]
def test_run_manager_safe_exit(empty_creds_db, empty_creds_fs):
with RunManager(
database=empty_creds_db,
file_store=empty_creds_fs,
algorithm_name="fake",
algorithm_version="1.1.1"
) as run:
run.register_output(1)
def test_run_manager_failed_exit(empty_creds_db, empty_creds_fs):
# Generate exception log
with pytest.raises(AssertionError):
with RunManager(
database=empty_creds_db,
file_store=empty_creds_fs,
algorithm_name="fake",
algorithm_version="1.1.1"
):
assert False
# Check exception log exists
logs = list(Path(".").glob("exception_log_*.err"))
assert len(logs) == 1
# Clean up exception log
for log in logs:
log.unlink()
```
#### File: tests/file_stores/test_app_dirs_file_store.py
```python
import shutil
from pathlib import Path
from unittest import mock
import pytest
from cdptools.file_stores.app_dirs_file_store import AppDirsFileStore
@pytest.fixture
def fs(tmpdir):
with mock.patch("appdirs.user_data_dir") as MockAppDirs:
MockAppDirs.return_value = tmpdir
fs = AppDirsFileStore()
return fs
@pytest.fixture
def example_video(data_dir):
return Path(data_dir) / "example_video.mp4"
@pytest.fixture
def mock_compute_sha256():
with mock.patch("cdptools.file_stores.file_store.FileStore.compute_sha256_for_file") as MockSHA:
MockSHA.return_value = "936a185caaa266bb9cbe981e9e05cb78cd732b0b3280eb944412bb6f8f8f07af"
yield MockSHA
@pytest.mark.parametrize("filename", [
("file.mp3"),
(Path("file.mp3")),
("/path/to/a/file.mp3"),
(Path("/path/to/a/file.mp3"))
])
def test_locate_file(fs, mock_compute_sha256, filename):
fs._locate_file(filename)
@pytest.mark.parametrize("save_name, remove", [
(None, False),
("file.mp4", False),
(Path("file.mp4"), False),
(None, True),
("file.mp4", True),
(Path("file.mp4"), True)
])
def test_upload_file(fs, mock_compute_sha256, tmpdir, example_video, save_name, remove):
tmp_input_path = shutil.copyfile(example_video, tmpdir / "tmp.mp4")
fs.upload_file(tmp_input_path, save_name, remove)
if remove:
assert not Path(tmp_input_path).exists()
@pytest.mark.parametrize("filename, save_path", [
("file.mp4", "saved_out.mp4"),
("file.mp4", Path("saved_out.mp4")),
pytest.param("does_not_exist.mp4", "not_going_to_exist.mp4", marks=pytest.mark.raises(exception=FileNotFoundError))
])
def test_download_file(fs, mock_compute_sha256, tmpdir, example_video, filename, save_path):
# Upload file
tmp_input_path = shutil.copyfile(example_video, tmpdir / "tmp.mp4")
fs.upload_file(tmp_input_path, "file.mp4")
# Send save out to tmpdir
save_path = tmpdir / save_path
# Attempt download
fs.download_file(filename, save_path)
```
#### File: tests/pipelines/test_event_gather_pipeline.py
```python
import json
from pathlib import Path
from typing import List, Union
from unittest import mock
import pytest
from firebase_admin import firestore
from google.cloud import storage
from requests import RequestException
from cdptools.audio_splitters.ffmpeg_audio_splitter import FFmpegAudioSplitter
from cdptools.databases.cloud_firestore_database import CloudFirestoreDatabase
from cdptools.event_scrapers.seattle_event_scraper import SeattleEventScraper
from cdptools.file_stores.gcs_file_store import GCSFileStore
from cdptools.pipelines import EventGatherPipeline
from cdptools.sr_models.google_cloud_sr_model import (GoogleCloudSRModel,
SRModelOutputs)
from cdptools.sr_models.webvtt_sr_model import WebVTTSRModel
from ..databases.test_cloud_firestore_database import MockedCollection
from ..file_stores.test_gcs_file_store import MockedBlob, MockedBucket
@pytest.fixture
def legistar_data_dir(data_dir) -> Path:
return data_dir / "legistar"
@pytest.fixture
def example_video(data_dir) -> Path:
return data_dir / "example_video.mp4"
@pytest.fixture
def example_audio(data_dir) -> Path:
return data_dir / "example_audio.wav"
@pytest.fixture
def example_config(data_dir) -> Path:
return data_dir / "example_event_pipeline_config.json"
@pytest.fixture
def example_config_with_mixture_sr_model(data_dir) -> Path:
return data_dir / "example_event_pipeline_config_with_mixture_sr_model.json"
@pytest.fixture
def example_transcript_raw(data_dir) -> Path:
return data_dir / "example_transcript_raw.json"
@pytest.fixture
def example_transcript_words(data_dir) -> Path:
return data_dir / "example_transcript_words.json"
@pytest.fixture
def example_transcript_sentences(data_dir) -> Path:
return data_dir / "example_transcript_sentences.json"
@pytest.fixture
def example_transcript_speaker_turns(data_dir) -> Path:
return data_dir / "example_transcript_speaker_turns.json"
@pytest.fixture
def empty_creds_db() -> CloudFirestoreDatabase:
with mock.patch("cdptools.databases.cloud_firestore_database.CloudFirestoreDatabase._initialize_creds_db"):
db = CloudFirestoreDatabase("/fake/path/to/creds.json")
db._credentials_path = "/fake/path/to/creds.json"
db._root = mock.Mock(firestore.Client)
db._root.collection.return_value = MockedCollection([])
return db
@pytest.fixture
def empty_creds_fs() -> GCSFileStore:
with mock.patch("cdptools.file_stores.gcs_file_store.GCSFileStore._initialize_creds_fs"):
fs = GCSFileStore("/fake/path/to/creds.json")
fs._credentials_path = "/fake/path/to/creds.json"
fs._client = mock.Mock(storage.Client)
fs._bucket = MockedBucket("fake_bucket", [MockedBlob("example.mp4", exists=False)])
return fs
@pytest.fixture
def mocked_splitter(example_audio) -> FFmpegAudioSplitter:
mocked_splitter = mock.Mock(FFmpegAudioSplitter())
mocked_splitter.split.return_value = example_audio
return mocked_splitter
@pytest.fixture
def mocked_sr_model(
example_transcript_raw,
example_transcript_words,
example_transcript_sentences
) -> GoogleCloudSRModel:
# Create basic sr model
# It doesn't matter what file is put in the init as long as it's a file
# The speech client is configured during the transcribe function
mocked_model = mock.Mock(GoogleCloudSRModel(example_transcript_raw))
mocked_model.transcribe.return_value = SRModelOutputs(
example_transcript_raw,
99.0,
example_transcript_words,
example_transcript_sentences
)
return mocked_model
@pytest.fixture
def mocked_caption_sr_model(
example_transcript_raw,
example_transcript_sentences,
example_transcript_speaker_turns
) -> WebVTTSRModel:
mocked_model = mock.Mock(WebVTTSRModel("any-new-turn-pattern"))
mocked_model.transcribe.return_value = SRModelOutputs(
raw_path=example_transcript_raw,
confidence=1,
timestamped_sentences_path=example_transcript_sentences,
timestamped_speaker_turns_path=example_transcript_speaker_turns
)
return mocked_model
@pytest.fixture
def mocked_webvtt_sr_model_with_request_exception() -> WebVTTSRModel:
mocked_model = mock.Mock(WebVTTSRModel("any-new-turn-pattern"))
# Mock RequestException for transcribe with invalid-caption-uri
mocked_model.transcribe.side_effect = RequestException("invalid-caption-uri")
return mocked_model
@pytest.fixture
def example_seattle_routes(data_dir):
return data_dir / "example_seattle_routes.html"
@pytest.fixture
def example_seattle_route(data_dir):
return data_dir / "example_seattle_route.html"
class RequestReturn:
def __init__(self, content: Union[str, Path]):
if isinstance(content, Path):
with open(content, "r") as read_in:
if content.suffix == ".json":
content = json.load(read_in)
else:
content = read_in.read()
self.content = content
def raise_for_status(self):
pass
def json(self):
return self.content
@pytest.fixture
def loaded_legistar_requests(legistar_data_dir) -> List[RequestReturn]:
mocked_responses = []
for i in range(len(list(legistar_data_dir.glob("request_*")))):
mocked_responses.append(RequestReturn(list(legistar_data_dir.glob(f"request_{i}_*"))[0]))
return mocked_responses
def test_event_pipeline_single_sr_model_initialization(
empty_creds_db,
empty_creds_fs,
mocked_sr_model,
example_config
):
# Configure all mocks
with mock.patch("cdptools.dev_utils.load_custom_object.load_custom_object") as mock_loader:
mock_loader.side_effect = [
SeattleEventScraper(),
empty_creds_db,
empty_creds_fs,
FFmpegAudioSplitter(),
mocked_sr_model
]
# Initialize pipeline
pipeline = mock.Mock(EventGatherPipeline(example_config))
# Test EventGatherPipeline's single sr_model initialization
assert hasattr(pipeline, "sr_model")
assert not hasattr(pipeline, "caption_sr_model")
def test_event_pipeline_mixture_sr_model_initialization(
empty_creds_db,
empty_creds_fs,
mocked_sr_model,
mocked_caption_sr_model,
example_config_with_mixture_sr_model
):
# Configure all mocks
with mock.patch("cdptools.dev_utils.load_custom_object.load_custom_object") as mock_loader:
mock_loader.side_effect = [
SeattleEventScraper(),
empty_creds_db,
empty_creds_fs,
FFmpegAudioSplitter(),
mocked_caption_sr_model,
mocked_sr_model
]
# Initialize pipeline
pipeline = mock.Mock(EventGatherPipeline(example_config_with_mixture_sr_model))
# Test EventGatherPipeline's mixture sr_model initialization
assert hasattr(pipeline, "sr_model")
assert hasattr(pipeline, "caption_sr_model")
def test_event_pipeline_no_backfill(
empty_creds_db,
empty_creds_fs,
mocked_sr_model,
example_config,
example_seattle_routes
):
# Configure all mocks
with mock.patch("cdptools.dev_utils.load_custom_object.load_custom_object") as mock_loader:
mock_loader.side_effect = [
SeattleEventScraper(), empty_creds_db, empty_creds_fs, FFmpegAudioSplitter(), mocked_sr_model
]
# Initialize pipeline
pipeline = mock.Mock(EventGatherPipeline(example_config))
with mock.patch("requests.get") as mock_requests:
# No backfill means only routes will be gathered because example html file only includes past events.
mock_requests.side_effect = [RequestReturn(example_seattle_routes)]
pipeline.run()
# This should never be ran because example html files only include past events.
pipeline.process_event.assert_not_called()
def test_event_gather_pipeline_with_backfill(
empty_creds_db,
empty_creds_fs,
mocked_splitter,
mocked_sr_model,
example_config,
example_seattle_routes,
example_seattle_route,
example_video,
loaded_legistar_requests
):
# Configure all mocks
with mock.patch("cdptools.dev_utils.load_custom_object.load_custom_object") as mock_loader:
mock_loader.side_effect = [
SeattleEventScraper(backfill=True), empty_creds_db, empty_creds_fs, mocked_splitter, mocked_sr_model
]
# Initialize pipeline
pipeline = EventGatherPipeline(example_config)
with mock.patch("requests.get") as mock_requests:
# Backfill means we need to mock every request call including all the legistar calls
mock_requests.side_effect = [
RequestReturn(example_seattle_routes),
RequestReturn(example_seattle_route),
*loaded_legistar_requests
]
# Mock the video copy
with mock.patch("cdptools.file_stores.FileStore._external_resource_copy") as mocked_resource_copy:
mocked_resource_copy.return_value = example_video
# Interupt calls to os.remove because it deletes test data otherwise
with mock.patch("os.remove"):
pipeline.run()
def test_event_pipeline_sr_model_failure(
empty_creds_db,
empty_creds_fs,
mocked_splitter,
mocked_sr_model,
mocked_webvtt_sr_model_with_request_exception,
example_config_with_mixture_sr_model,
example_seattle_routes,
example_seattle_route,
example_video,
loaded_legistar_requests
):
# Configure all mocks
with mock.patch("cdptools.dev_utils.load_custom_object.load_custom_object") as mock_loader:
mock_loader.side_effect = [
SeattleEventScraper(backfill=True),
empty_creds_db,
empty_creds_fs,
mocked_splitter,
mocked_webvtt_sr_model_with_request_exception,
mocked_sr_model
]
# Initialize pipeline
pipeline = EventGatherPipeline(example_config_with_mixture_sr_model)
with mock.patch("requests.get") as mock_requests:
# Backfill means we need to mock every request call including all the legistar calls
mock_requests.side_effect = [
RequestReturn(example_seattle_routes),
RequestReturn(example_seattle_route),
*loaded_legistar_requests
]
# Mock the video copy
with mock.patch("cdptools.file_stores.FileStore._external_resource_copy") as mocked_resource_copy:
mocked_resource_copy.return_value = example_video
# Interupt calls to os.remove because it deletes test data otherwise
with mock.patch("os.remove"):
pipeline.run()
# Check if sr_model is called, because caption_sr_model raised RequestException
pipeline.sr_model.transcribe.assert_called()
def test_event_pipeline_caption_sr_model_success(
empty_creds_db,
empty_creds_fs,
mocked_splitter,
mocked_sr_model,
mocked_caption_sr_model,
example_config_with_mixture_sr_model,
example_seattle_routes,
example_seattle_route,
example_video,
loaded_legistar_requests
):
# Configure all mocks
with mock.patch("cdptools.dev_utils.load_custom_object.load_custom_object") as mock_loader:
mock_loader.side_effect = [
SeattleEventScraper(backfill=True),
empty_creds_db,
empty_creds_fs,
mocked_splitter,
mocked_caption_sr_model,
mocked_sr_model
]
# Initialize pipeline
pipeline = EventGatherPipeline(example_config_with_mixture_sr_model)
with mock.patch("requests.get") as mock_requests:
# Backfill means we need to mock every request call including all the legistar calls
mock_requests.side_effect = [
RequestReturn(example_seattle_routes),
RequestReturn(example_seattle_route),
*loaded_legistar_requests
]
# Mock the video copy
with mock.patch("cdptools.file_stores.FileStore._external_resource_copy") as mocked_resource_copy:
mocked_resource_copy.return_value = example_video
# Interupt calls to os.remove because it deletes test data otherwise
with mock.patch("os.remove"):
pipeline.run()
# Check if sr_model is not called, because caption_sr_model return valid outputs
pipeline.sr_model.transcribe.assert_not_called()
```
#### File: tests/sr_models/test_google_cloud_sr_model.py
```python
import random
from unittest import mock
import pytest
from google.cloud import speech_v1p1beta1 as speech
from cdptools.sr_models.google_cloud_sr_model import GoogleCloudSRModel
@pytest.fixture
def example_audio(data_dir):
return data_dir / "example_audio.wav"
@pytest.fixture
def fake_creds_path(data_dir):
return data_dir / "fake_creds.json"
class FakeRecognizeTime:
def __init__(self, seconds):
self.seconds = seconds
self.nanos = 0
class FakeRecognizeWord:
def __init__(self, word, start_time, end_time):
self.word = word
self.start_time = FakeRecognizeTime(start_time)
self.end_time = FakeRecognizeTime(end_time)
class FakeRecognizeAlternative:
def __init__(self, words):
self.words = words
self.confidence = random.random()
class FakeRecognizeResult:
def __init__(self, alternatives):
self.alternatives = alternatives
class FakeRecognizeResults:
results = [
FakeRecognizeResult([
FakeRecognizeAlternative([
FakeRecognizeWord("Hello", 0.0, 0.6),
FakeRecognizeWord("everyone", 0.7, 1.1),
FakeRecognizeWord("and", 1.2, 1.4),
FakeRecognizeWord("thank", 1.5, 1.7),
FakeRecognizeWord("you", 1.8, 1.9),
FakeRecognizeWord("for", 2.0, 2.1),
FakeRecognizeWord("coming.", 2.2, 2.4)
])
]),
FakeRecognizeResult([
FakeRecognizeAlternative([
FakeRecognizeWord("Will", 3.0, 3.1),
FakeRecognizeWord("the", 3.2, 3.3),
FakeRecognizeWord("clerk", 3.4, 3.5),
FakeRecognizeWord("begin", 3.6, 3.7),
FakeRecognizeWord("by", 3.8, 3.9),
FakeRecognizeWord("taking", 4.0, 4.1),
FakeRecognizeWord("roll.", 4.2, 4.3),
])
])
]
class FakeRecognizeOperation:
def __init__(self):
self._result = FakeRecognizeResults
def result(self, **kwargs):
return self._result
def test_google_cloud_sr_model_init(fake_creds_path):
GoogleCloudSRModel(fake_creds_path)
@pytest.mark.parametrize("phrases, cleaned", [
(None, []),
([], []),
([str(i) for i in range(600)], [str(i) for i in range(500)]),
(
["this will be chunked to less than one hundred characters because that is the maximum allowed by google "
"cloud speech recognition"],
["this will be chunked to less than one hundred characters because that is the maximum allowed by"]
),
(["-" * 100] * 200, ["-" * 100] * 100)
])
def test_clean_phrases(phrases, cleaned):
assert GoogleCloudSRModel._clean_phrases(phrases) == cleaned
def test_google_cloud_transcribe(fake_creds_path, example_audio, tmpdir):
with mock.patch("google.cloud.speech_v1p1beta1.SpeechClient.from_service_account_json") as mocked_client_init:
mocked_client = mock.Mock(spec=speech.SpeechClient)
mocked_client.long_running_recognize.return_value = FakeRecognizeOperation()
mocked_client_init.return_value = mocked_client
sr_model = GoogleCloudSRModel(fake_creds_path)
sr_model.transcribe(str(example_audio), tmpdir / "raw.json", tmpdir / "words.json", tmpdir / "sentences.json")
``` |
{
"source": "joshuaipwork/ideolog-backend",
"score": 2
} |
#### File: ideolog/base/views.py
```python
from django.shortcuts import render
import sys
sys.path.insert(0, './model/')
from VoteClassifier import VoteClassifier
from django.http import JsonResponse
import json
import os
import pickle
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.naive_bayes import MultinomialNB
from sklearn.pipeline import Pipeline
from threading import Thread
import pandas as pd
HOUSE_TABLE = pd.read_csv('../data/house_table.csv')
# Create your views here.
def predict(request):
statement = request.GET.get('statement', '')
chamber = request.GET.get('chamber', '')
members = HOUSE_TABLE[(HOUSE_TABLE['chamber'] == chamber) & (HOUSE_TABLE['iteration'] == 116)]
if chamber not in {'house', 'senate'}:
responseData = {'ok': False, 'msg': "No valid chamber supplied"}
else:
names = list(members['name'])
state = list(members['state'])
clf = VoteClassifier()
votes = clf.classify_all_legislators(statement)
responseData = {
'ok': True,
'statement': statement,
'chamber': chamber,
'results': [
{
'name': name,
'state': state[i],
'agree': float(votes.get(name, 0.5))
} for i, name in enumerate(names)
]
}
return JsonResponse(responseData)
def senators():
pass
``` |
{
"source": "JoshuaJB/respc-union-dashboard",
"score": 3
} |
#### File: JoshuaJB/respc-union-dashboard/data_scraper.py
```python
from bs4 import BeautifulSoup
from flask import Flask
from scrapers import production_history
import requests
import json
#Setup
BASE_URL = "https://www.sunnyportal.com/Templates/PublicPage.aspx?page=6d806835-63f7-4577-ab4c-8116de0ec142"
app = Flask(__name__)
def scrapeData(url):
response = requests.get(url)
return BeautifulSoup(response.content,"html.parser")
def getData(html):
titles = ("currentPower", "energy", "co2Avoided")
page = html.find_all("span", class_="mainValueAmount")
values = [None for x in range(3)]
for x in range(3):
values[x] = page[x].get_text()
return dict(zip(titles,values))
def writeOut(data):
with open("data.json", "w") as writeJSON:
json.dump(data,writeJSON)
@app.route("/")
def handleRequest():
return json.dumps(getData(scrapeData(BASE_URL)))
page = scrapeData(BASE_URL)
data = getData(page)
writeOut(data)
print(production_history.getHistory(BASE_URL))
# Start Flask
#if __name__ == "__main__":
# app.run()
```
#### File: respc-union-dashboard/scrapers/production_history.py
```python
from bs4 import BeautifulSoup
from urllib import parse
import requests
enablement_data = "\
&ctl00%24ContentPlaceHolder1%24PublicPagePlaceholder1%24PageUserControl%24ctl00%24PublicPageLoadFixPage%24UserControlShowEnergyAndPower1%24SelectedIntervalID=5\
&ctl00%24ContentPlaceHolder1%24PublicPagePlaceholder1%24PageUserControl%24ctl00%24PublicPageLoadFixPage%24UserControlShowEnergyAndPower1%24PlantName=UNC%20Student%20Union\
&ctl00%24ContentPlaceHolder1%24PublicPagePlaceholder1%24PageUserControl%24ctl00%24PublicPageLoadFixPage%24UserControlShowEnergyAndPower1%24UseIntervalHour=1\
&ctl00%24ContentPlaceHolder1%24PublicPagePlaceholder1%24PageUserControl%24ctl00%24PublicPageLoadFixPage%24UserControlShowEnergyAndPower1%24_datePicker%24textBox=8%2F31%2F2016\
&ctl00%24ContentPlaceHolder1%24PublicPagePlaceholder1%24PageUserControl%24ctl00%24PublicPageLoadFixPage%24UserControlShowEnergyAndPower1%24DatePickerYear=2016\
&ctl00%24ContentPlaceHolder1%24PublicPagePlaceholder1%24PageUserControl%24ctl00%24PublicPageLoadFixPage%24UserControlShowEnergyAndPower1%24ImageButtonValues.x=8\
&ctl00%24ContentPlaceHolder1%24PublicPagePlaceholder1%24PageUserControl%24ctl00%24PublicPageLoadFixPage%24UserControlShowEnergyAndPower1%24ImageButtonValues.y=16"
TABLE_BASE_URL = "https://www.sunnyportal.com/Templates/PublicChartValues.aspx?ID=00000000-0000-0000-0000-000000000000&splang=en-US&plantTimezoneBias=-240&name="
TABLE_SETTINGS_PREFIX = "ctl00$ContentPlaceHolder1$PublicPagePlaceholder1$PageUserControl$ctl00$PublicPageLoadFixPage$UserControlShowEnergyAndPower1$"
def getHistory(base_url):#, interval_type, plant_name = "UNC Student Union", use_interval_hour = "1", start_date = "8/31/2016"):
# Create session on the server
base_page = requests.get(base_url)
base_parser = BeautifulSoup(base_page.content, "html.parser")
# Set server-side state
table_settings = [("__VIEWSTATE", base_parser.find(id = "__VIEWSTATE")["value"])]
pre_table_settings = [("ImageButtonValues.x", "8"), ("ImageButtonValues.y", "16"), ("SelectedIntervalID", 5), ("PlantName", "UNC Student Union"), ("UseIntervalHour", 1), ("_datePicker$textBox", "8/31/2016"), ("DatePickerYear","2016")]
#table_settings = map(lambda setting: (TABLE_SETTING_PREFIX + setting[0], setting[1]), pre_table_settings)
table_settings.extend(map(lambda setting: (TABLE_SETTINGS_PREFIX + setting[0], setting[1]), pre_table_settings))
requests.post(base_url, parse.urlencode(table_settings), None, cookies = base_page.cookies, headers = {"Content-Type": "application/x-www-form-urlencoded; charset=UTF-8"})
# Get table of history data
detail_response = requests.get(TABLE_BASE_URL + "&endTime=12/31/2016%2011:59:59%20PM", cookies = base_page.cookies)
# Perform some questionable parsing
table_parser = BeautifulSoup(detail_response.content, "html.parser")
table_entries = table_parser.find_all("td")[2:]
stripped_table_entries = list(map(lambda entry: entry.string, table_entries))
data = zip(stripped_table_entries[::2], stripped_table_entries[1::2])
# Return a list of (date, reading) tuples
return list(data)
def prefixSettings (settings_list):
for idx in range(len(settings_list)):
pre_table_settings[idx] = (TABLE_SETTINGS_PREFIX + pre_table_settings[idx][0], pre_table_settings[idx][1])
``` |
{
"source": "joshuajcarson/ControlAI",
"score": 3
} |
#### File: ControlAI/control_ai/game_state.py
```python
class GameState(object):
def __init__(self, player, action, card_name):
self.action = action
self.player = player
self.card_name = card_name
```
#### File: ControlAI/test/test_deck_data_is_complete.py
```python
import unittest
import control_ai.game_manager
class TestGameManager(unittest.TestCase):
def setUp(self):
self.class_under_test = control_ai.game_manager.GameManager()
def test_game_manager_deck_has_four_rift_cards(self):
cards = self.class_under_test.deck.query("name == 'rift'")
self.assertEqual(4, len(cards))
self.assertEqual(4, len(cards.query("fuel_cells == 1")))
self.assertEqual(4, len(cards.query("type == 'silver'")))
def test_game_manager_deck_has_four_exotic_matter_cards(self):
cards = self.class_under_test.deck.query("name == 'exotic_matter'")
self.assertEqual(4, len(cards))
self.assertEqual(4, len(cards.query("fuel_cells == 2")))
self.assertEqual(4, len(cards.query("type == 'silver'")))
def test_game_manager_deck_has_four_deflector_cards(self):
cards = self.class_under_test.deck.query("name == 'deflector'")
self.assertEqual(4, len(cards))
self.assertEqual(4, len(cards.query("fuel_cells == 3")))
self.assertEqual(4, len(cards.query("type == 'silver'")))
def test_game_manager_deck_has_four_wormhole_cards(self):
cards = self.class_under_test.deck.query("name == 'wormhole'")
self.assertEqual(4, len(cards))
self.assertEqual(4, len(cards.query("fuel_cells == 4")))
self.assertEqual(4, len(cards.query("type == 'bronze'")))
def test_game_manager_deck_has_four_anomaly_cards(self):
cards = self.class_under_test.deck.query("name == 'anomaly'")
self.assertEqual(4, len(cards))
self.assertEqual(4, len(cards.query("fuel_cells == 4")))
self.assertEqual(4, len(cards.query("type == 'bronze'")))
def test_game_manager_deck_has_four_rewind_cards(self):
cards = self.class_under_test.deck.query("name == 'rewind'")
self.assertEqual(4, len(cards))
self.assertEqual(4, len(cards.query("fuel_cells == 5")))
self.assertEqual(4, len(cards.query("type == 'bronze'")))
def test_game_manager_deck_has_four_reactor_cards(self):
cards = self.class_under_test.deck.query("name == 'reactor'")
self.assertEqual(4, len(cards))
self.assertEqual(4, len(cards.query("fuel_cells == 5")))
self.assertEqual(4, len(cards.query("type == 'silver'")))
def test_game_manager_deck_has_four_dark_energy_cards(self):
cards = self.class_under_test.deck.query("name == 'dark_energy'")
self.assertEqual(4, len(cards))
self.assertEqual(4, len(cards.query("fuel_cells == 6")))
self.assertEqual(4, len(cards.query("type == 'bronze'")))
def test_game_manager_deck_has_four_future_shift_cards(self):
cards = self.class_under_test.deck.query("name == 'future_shift'")
self.assertEqual(4, len(cards))
self.assertEqual(4, len(cards.query("fuel_cells == 6")))
self.assertEqual(4, len(cards.query("type == 'bronze'")))
def test_game_manager_deck_has_four_singularity_cards(self):
cards = self.class_under_test.deck.query("name == 'singularity'")
self.assertEqual(4, len(cards))
self.assertEqual(4, len(cards.query("fuel_cells == 7")))
self.assertEqual(4, len(cards.query("type == 'bronze'")))
def test_game_manager_deck_has_four_antimatter_cards(self):
cards = self.class_under_test.deck.query("name == 'antimatter'")
self.assertEqual(4, len(cards))
self.assertEqual(4, len(cards.query("fuel_cells == 8")))
self.assertEqual(4, len(cards.query("type == 'bronze'")))
def test_game_manager_deck_has_four_time_stop_cards(self):
cards = self.class_under_test.deck.query("name == 'time_stop'")
self.assertEqual(4, len(cards))
self.assertEqual(4, len(cards.query("fuel_cells == 9")))
self.assertEqual(4, len(cards.query("type == 'bronze'")))
def test_game_manager_deck_has_four_nova_cards(self):
cards = self.class_under_test.deck.query("name == 'nova'")
self.assertEqual(4, len(cards))
self.assertEqual(4, len(cards.query("fuel_cells == 10")))
self.assertEqual(4, len(cards.query("type == 'bronze'")))
``` |
{
"source": "joshuajcarson/gym-scythe",
"score": 3
} |
#### File: gym-scythe/scythe_automa_ai/scythe_board_creator.py
```python
import numpy as np
import pandas as pd
from scythe_automa_ai.scythe_faction_creator import SAXONY, ALBION
TYPE = 'type'
IDENTIFIER = 'identifier'
TUNNEL = 'tunnel'
HOME_BASE = 'home_base'
TOP_LEFT_RIVER = 'top_left_river'
TOP_RIGHT_RIVER = 'top_right_river'
LEFT_RIVER = 'left_river'
RIGHT_RIVER = 'right_river'
BOTTOM_LEFT_RIVER = 'bottom_left_river'
BOTTOM_RIGHT_RIVER = 'bottom_right_river'
MOUNTAIN = 'mountain'
U_CORD = 'u'
V_CORD = 'v'
STARTING_BOARD = pd.DataFrame(np.array([[-4, 7, HOME_BASE, SAXONY, "", "", "", "", "", "", ""],
[1, 0, HOME_BASE, ALBION, "", "", "", "", "", "", ""],
[0, 3, MOUNTAIN, "", 'True', "", "", "True", "", "True", ""]]),
columns=[U_CORD, V_CORD, TYPE, IDENTIFIER, TUNNEL, TOP_LEFT_RIVER, TOP_RIGHT_RIVER,
LEFT_RIVER, RIGHT_RIVER, BOTTOM_LEFT_RIVER, BOTTOM_RIGHT_RIVER])
STARTING_BOARD[U_CORD] = pd.to_numeric(STARTING_BOARD[U_CORD])
STARTING_BOARD[V_CORD] = pd.to_numeric(STARTING_BOARD[V_CORD])
STARTING_BOARD[TUNNEL] = STARTING_BOARD[TUNNEL].astype('bool')
STARTING_BOARD[TOP_LEFT_RIVER] = STARTING_BOARD[TOP_LEFT_RIVER].astype('bool')
STARTING_BOARD[TOP_RIGHT_RIVER] = STARTING_BOARD[TOP_RIGHT_RIVER].astype('bool')
STARTING_BOARD[LEFT_RIVER] = STARTING_BOARD[LEFT_RIVER].astype('bool')
STARTING_BOARD[RIGHT_RIVER] = STARTING_BOARD[RIGHT_RIVER].astype('bool')
STARTING_BOARD[BOTTOM_LEFT_RIVER] = STARTING_BOARD[BOTTOM_LEFT_RIVER].astype('bool')
STARTING_BOARD[BOTTOM_RIGHT_RIVER] = STARTING_BOARD[BOTTOM_RIGHT_RIVER].astype('bool')
STARTING_BOARD = STARTING_BOARD.set_index([U_CORD, V_CORD])
class ScytheBoard():
def __init__(self):
self.tiles = STARTING_BOARD.copy()
``` |
{
"source": "joshua-jd-lee/BitcoinExchangeFH",
"score": 3
} |
#### File: befh/core/runner.py
```python
import logging
import multiprocessing as mp
from datetime import datetime
LOGGER = logging.getLogger(__name__)
class Runner:
"""Runner.
"""
def __init__(self, config, is_debug, is_cold):
"""Constructor.
"""
self._config = config
self._is_debug = is_debug
self._is_cold = is_cold
self._exchanges = {}
self._handlers = {}
def load(self):
"""Load.
"""
LOGGER.info('Loading runner')
handlers_configuration = self._config.handlers
handlers = self.create_handlers(
handlers_configuration=handlers_configuration,
is_debug=self._is_debug,
is_cold=self._is_cold)
self._handlers = handlers
exchanges_configuration = self._config.subscriptions
exchanges = self.create_exchanges(
exchanges_configuration=exchanges_configuration,
handlers=handlers,
is_debug=self._is_debug,
is_cold=self._is_cold)
self._exchanges = exchanges
def run(self):
"""Run.
"""
LOGGER.info('Start running the feed handler')
processes = []
for name, handler in self._handlers.items():
LOGGER.info('Running handler %s', name)
process = mp.Process(target=handler.run)
process.start()
processes.append(process)
for name, exchange in self._exchanges.items():
LOGGER.info('Running exchange %s', name)
if len(self._exchanges) > 1:
process = mp.Process(target=exchange.run)
process.start()
processes.append(process)
else:
exchange.run()
LOGGER.info('Joining all the processes')
for process in processes:
process.join()
def archive(self, date):
"""Archive.
"""
date = datetime.strptime(date, '%Y-%m-%d')
LOGGER.info('Archiving the tables with date %s', date)
processes = []
for name, handler in self._handlers.items():
LOGGER.info('Running handler %s', name)
process = mp.Process(target=handler.run)
process.start()
processes.append(process)
for exchange in self._exchanges.values():
for name, instrument in exchange.instruments.items():
for handler in exchange.handlers.values():
handler.rotate_table(
table=instrument,
last_datetime=date,
allow_fail=True)
LOGGER.info('Closing the handlers')
for handler in self._handlers.values():
handler.prepare_close()
LOGGER.info('Joining all the processes')
for process in processes:
process.join()
LOGGER.info('Archived the tables with date %s', date)
@staticmethod
def create_exchange(
exchange_name, subscription, handlers, is_debug, is_cold):
"""Create exchange.
"""
try:
from befh.exchange.websocket_exchange import WebsocketExchange
exchange = WebsocketExchange(
name=exchange_name,
config=subscription,
is_debug=is_debug,
is_cold=is_cold)
exchange.load(handlers=handlers)
except ImportError as error:
LOGGER.info(
'Cannot load websocket exchange %s and fall into '
'REST api exchange', exchange_name)
from befh.exchange.rest_api_exchange import RestApiExchange
exchange = RestApiExchange(
name=exchange_name,
config=subscription,
is_debug=is_debug,
is_cold=is_cold)
exchange.load(handlers=handlers)
return exchange
@staticmethod
def create_exchanges(
exchanges_configuration, handlers, is_debug, is_cold):
"""Create exchanges.
"""
exchanges = {}
for exchange_name, subscription in exchanges_configuration.items():
exchange = Runner.create_exchange(
exchange_name=exchange_name,
subscription=subscription,
handlers=handlers,
is_debug=is_debug,
is_cold=is_cold)
exchanges[exchange_name] = exchange
return exchanges
@staticmethod
def create_handler(handler_name, handler_parameters, is_debug, is_cold):
"""Create handler.
"""
LOGGER.info('Creating handler %s', handler_name)
handler_name = handler_name.lower()
if handler_name == "sql":
from befh.handler import SqlHandler
handler = SqlHandler(
is_debug=is_debug,
is_cold=is_cold,
**handler_parameters)
elif handler_name == "zmq":
from befh.handler import ZmqHandler
handler = ZmqHandler(
is_debug=is_debug,
is_cold=is_cold,
**handler_parameters)
else:
raise NotImplementedError(
'Handler %s is not implemented' % handler_name)
handler.load(queue=mp.Queue())
return handler
@staticmethod
def create_handlers(handlers_configuration, is_debug, is_cold):
"""Create handlers.
"""
handlers = {}
for handler_name, handler_para in handlers_configuration.items():
handlers[handler_name] = Runner.create_handler(
handler_name=handler_name,
handler_parameters=handler_para,
is_debug=is_debug,
is_cold=is_cold)
return handlers
```
#### File: befh/handler/sql_handler.py
```python
import logging
from datetime import datetime
from sqlalchemy import (
create_engine,
Table,
Column,
Integer,
String,
Numeric,
MetaData)
from .rotate_handler import RotateHandler
LOGGER = logging.getLogger(__name__)
class SqlHandler(RotateHandler):
"""Sql handler.
"""
def __init__(self, connection, **kwargs):
"""Constructor.
"""
super().__init__(**kwargs)
self._connection = connection
self._engine = None
@property
def engine(self):
"""Engine.
"""
return self._engine
@property
def queue(self):
"""Queue.
"""
return self._queue
def load(self, queue):
"""Load.
"""
super().load(queue=queue)
self._engine = create_engine(self._connection)
def create_table(self, table_name, fields, **kwargs):
"""Create table.
"""
assert self._engine, "Engine is not initialized"
# Check if the table exists
if table_name in self._engine.table_names():
if self._is_cold:
self._engine.execute(
'delete table {table_name}'.format(
table_name=table_name))
LOGGER.info(
'Table %s is deleted in cold mode',
table_name)
else:
LOGGER.info('Table %s is created', table_name)
return
LOGGER.info('Creating table %s', table_name)
columns = []
for field_name, field in fields.items():
columns.append(self._create_column(
field_name=field_name,
field=field))
meta_data = MetaData()
Table(table_name, meta_data, *columns)
meta_data.create_all(self._engine)
LOGGER.info('Created table %s', table_name)
def insert(self, table_name, fields):
"""Insert.
"""
assert self._engine, "Engine is not initialized"
fields = [
(k, v) for k, v in fields.items() if not v.is_auto_increment]
fields = list(zip(*fields))
column_names = (','.join(fields[0]))
values = (','.join([str(f) for f in fields[1]]))
sql_statement = (
"insert into {table_name} ({column_names}) values "
"({values})").format(
table_name=table_name,
column_names=column_names,
values=values)
self._engine.execute(sql_statement)
def rename_table(self, from_name, to_name, fields=None, keep_table=True):
"""Rename table.
"""
from alembic.migration import MigrationContext
from alembic.operations import Operations
# Refresh the connection again
self._engine = create_engine(self._connection)
conn = self._engine.connect()
ctx = MigrationContext.configure(conn)
op = Operations(ctx)
op.rename_table(from_name, to_name)
if keep_table:
assert fields is not None, (
"Fields must be provided to create the table")
self.create_table(
table_name=from_name,
fields=fields)
@staticmethod
def _create_column(field_name, field):
"""Create column.
"""
field_params = {}
if field.field_type is int:
field_type = Integer
elif field.field_type is str:
field_type = String(field.field_length)
elif field.field_type is float:
field_type = Numeric(
precision=field.size,
scale=field.decimal)
elif field.field_type is datetime:
field_type = String(26)
else:
raise NotImplementedError(
'Field type {type} not implemented'.format(
type=field.field_type))
if field.is_key:
field_params['primary_key'] = True
if field.is_auto_increment:
field_params['autoincrement'] = True
return Column(field_name, field_type, **field_params)
def _should_rerun(self, element, exception):
"""Handle exception.
"""
if element.allow_fail:
LOGGER.warn(
'Execution failed on element %s (%s)',
element,
str(exception))
return element.should_rerun
if element.should_rerun:
return True
else:
return False
elif 'MySQL server has gone away' in str(exception):
# Only for MySQL case:
# Shuold rerun on MySQL server gone
return True
else:
raise
```
#### File: befh/handler/zmq_handler.py
```python
import logging
from datetime import datetime
import zmq
from .handler import Handler
LOGGER = logging.getLogger(__name__)
class ZmqHandler(Handler):
"""Zmq handler.
"""
def __init__(self, connection, **kwargs):
"""Constructor.
"""
super().__init__(**kwargs)
self._connection = connection
self._context = zmq.Context()
self._socket = None
def load(self, queue):
"""Load.
"""
super().load(queue=queue)
LOGGER.info('Binding connection %s as a publisher',
self._connection)
def create_table(self, table_name, fields, **kwargs):
"""Create table.
"""
assert self._socket, "Socket is not initialized"
def insert(self, table_name, fields):
"""Insert.
"""
assert self._socket, "Socket is not initialized"
native_fields = {
k: self.serialize(v) for k, v in fields.items()
if not v.is_auto_increment}
data = {
"table_name": table_name,
"data": native_fields
}
self._socket.send_json(data)
@staticmethod
def serialize(value):
"""Serialize value.
"""
if isinstance(value.value, datetime):
return str(value)
return value.value
def run(self):
"""Run.
"""
# The socket has to be initialized here due to pyzmq #1232
# https://github.com/zeromq/pyzmq/issues/1232
self._socket = self._context.socket(zmq.PUB)
self._socket.bind(self._connection)
super().run()
```
#### File: tests/zmq/zmq_publisher.py
```python
from time import sleep
import zmq
PORT = 9123
def main():
"""Main.
"""
context = zmq.Context()
socket = context.socket(zmq.PUB)
print('Binding port %s' % PORT)
socket.bind("tcp://*:%s" % PORT)
while True:
socket.send(b"a abc")
sleep(1)
if __name__ == '__main__':
main()
``` |
{
"source": "joshuajharris/alfred-can-i-stream-it",
"score": 2
} |
#### File: joshuajharris/alfred-can-i-stream-it/stream.py
```python
import sys
from workflow import Workflow, ICON_WEB, web
def main(wf):
if wf.update_available:
wf.add_item("An update is available!",
autocomplete='workflow:update', valid=False)
# The Workflow instance will be passed to the function
# you call from `Workflow.run`. Not so useful, as
# the `wf` object created in `if __name__ ...` below is global.
#
# Your imports go here if you want to catch import errors (not a bad idea)
# or if the modules/packages are in a directory added via `Workflow(libraries=...)`
# Get args from Workflow, already in normalized Unicode
# Get query from Alfred
if len(wf.args):
query = wf.args[0]
else:
query = None
params = dict(movieId=query, attributes='1', mediaType='streaming')
url = 'http://www.canistream.it/services/query'
r = web.get(url, params)
r.raise_for_status()
results = r.json()
if len(results) > 0:
for key, value in results.iteritems():
wf.add_item(title=value['friendlyName'],
subtitle=str('View on ' + value['friendlyName']),
uid=value['external_id'],
arg=value['direct_url'],
valid=True,
icon='images/' + key + '.png')
else:
wf.add_item('No streaming options available.')
#
# Send output to Alfred. You can only call this once.
# Well, you *can* call it multiple times, but Alfred won't be listening
# any more...
wf.send_feedback()
if __name__ == '__main__':
# Create a global `Workflow` object
wf = Workflow()
# update_settings={
# 'github_slug': '',
# 'version': 'v1.0.0'
# })
# Call your entry function via `Workflow.run()` to enable its helper
# functions, like exception catching, ARGV normalization, magic
# arguments etc.
sys.exit(wf.run(main))
``` |
{
"source": "joshuajharris/dotfiles",
"score": 2
} |
#### File: workflows/user.workflow.2E020B45-B449-45A0-A753-DD2F58A2AA27/slackfred-files.py
```python
import sys
import argparse
from workflow import Workflow, web, PasswordNotFound
def slack_keys():
wf_password = Workflow()
try:
slack_keys = wf_password.get_password('slack_api_key')
except PasswordNotFound:
wf.add_item(title='No API key set. Please run slt',
valid=False)
wf.send_feedback()
return 0
keys = slack_keys.split(",")
return keys
def slack_files(keys):
files_list = []
for key in keys:
api_key = str(key)
slack_auth = web.get('https://slack.com/api/auth.test?token=' + api_key + '&pretty=1').json()
if slack_auth['ok'] is False:
wf.add_item('Authentication failed.'
'Try saving your API key again',
valid=False)
wf.send_feedback()
break
else:
files = web.get('https://slack.com/api/files.list?token=' + api_key + '&count=20&pretty=1').json()
for file in files['files']:
if 'initial_comment' in file:
files_list.append({'name': file['name'], 'id': file['id'], 'url': file['permalink'], 'title':
file['title'], 'filetype': file['filetype'], 'initial_comment': file['initial_comment'],
'comment': file['initial_comment']['comment']})
else:
files_list.append({'name': file['name'], 'id': file['id'], 'url': file['url'], 'title':
file['title'], 'filetype': file['filetype']})
return files_list
def search_slack_files(files):
elements = []
elements.append(files['name'])
return u' '.join(elements)
def main(wf):
parser = argparse.ArgumentParser()
parser.add_argument('query', nargs='?', default=None)
args = parser.parse_args(wf.args)
query = args.query
def wrapper():
return slack_files(keys=slack_keys())
files_to_list = wf.cached_data('files', wrapper, max_age=120)
if query:
files_to_list = wf.filter(query, files_to_list, key=search_slack_files)
for files in files_to_list:
if 'initial_comment' in files_to_list:
wf.add_item(title=files['name'],
subtitle=files['comment'],
arg=files['url'],
valid=True)
else:
wf.add_item(title=files['name'],
arg=files['url'],
valid=True)
wf.send_feedback()
if __name__ == u"__main__":
wf = Workflow()
sys.exit(wf.run(main))
```
#### File: workflows/user.workflow.2E020B45-B449-45A0-A753-DD2F58A2AA27/slackfred-snooze.py
```python
import sys
import argparse
from workflow import Workflow, web, PasswordNotFound
import json
def slack_keys():
wf = Workflow()
try:
slack_keys = wf.get_password('slack_api_key')
except PasswordNotFound:
wf.add_item(title='No API key set. Please run slt',
valid=False)
wf.send_feedback()
return 0
keys = slack_keys.split(",")
return keys
def slack_list(keys):
wf = Workflow()
slack_snooze = []
for key in keys:
api_key = str(key)
slack_auth = web.get('https://slack.com/api/auth.test?token=' + api_key + '&pretty=1').json()
if slack_auth['ok'] is False:
wf.add_item(title='Authentication failed. Check your API key',
valid=False)
wf.send_feedback()
break
else:
slack_dnd = web.get('https://slack.com/api/dnd.info?token={token}&pretty=1'.format(token=api_key)).json()
if slack_dnd['snooze_enabled'] is True:
slack_snooze.append({'team': slack_auth['team'], 'status': 'Snoozed'})
else:
slack_snooze.append({'team': slack_auth['team'], 'status': 'Active'})
return slack_snooze
def search_slack_names(slack_list):
elements = []
elements.append(slack_list['team'])
name_sort = sorted(elements, key=len)
return u' '.join(name_sort)
def main(wf):
parser = argparse.ArgumentParser()
parser.add_argument('--snooze', dest='snooze', nargs='?', default=None)
parser.add_argument('query', nargs='?', default=60)
args = parser.parse_args(wf.args)
if args.snooze:
query = args.snooze
carrot = query.find('>')
team = query[7:(carrot-1)]
snooze_time = query[carrot+2:]
for key in slack_keys():
api_key = str(key)
slack_auth = web.get('https://slack.com/api/auth.test?token=' + api_key + '&pretty=1').json()
if slack_auth['ok'] is True and slack_auth['team'] == team:
if snooze_time != '0':
dnd_url = 'https://slack.com/api/dnd.setSnooze?token={0}&num_minutes={1}'.format(api_key, snooze_time)
web.get(dnd_url)
else:
dnd_url = 'https://slack.com/api/dnd.endSnooze?token={0}'.format(api_key)
web.get(dnd_url)
if len(wf.args):
query = wf.args[0]
def wrapper():
return slack_list(keys=slack_keys())
slack_snooze = wf.cached_data('slacksnooze', wrapper, max_age=5)
if query:
slack_snooze = wf.filter(query, slack_snooze, key=search_slack_names)
if len(slack_snooze) == 0:
wf.add_item(title='Enter time in minutes',
arg=query,
valid=True)
else:
for team in slack_snooze:
wf.add_item(title='{0} -- Status: {1}'.format(team['team'], team['status']),
autocomplete='Snooze {0} > '.format(team['team']),
arg=query,
valid=True)
wf.send_feedback()
if __name__==u"__main__":
wf = Workflow()
sys.exit(wf.run(main))
``` |
{
"source": "joshua-jin/algorithm-campus",
"score": 4
} |
#### File: algorithm-campus/lulu/a_b_problem.py
```python
class Solution:
"""
@param a: The first integer
@param b: The second integer
@return: The sum of a and b
"""
# Actually both of these methods fail to work in Python
# because Python supports infinite integer
# Non-recursive
def aplusb(self, a, b):
# write your code here, try to do it without arithmetic operators.
while b != 0:
carry = a & b
a = a ^ b
b = carry << 1
return a
# Recursive
def aplusb(self, a, b):
carry = a & b
result = a ^ b
return result if carry == 0 else self.aplusb(result, carry << 1)
```
#### File: algorithm-campus/lulu/add_two_numbers.py
```python
class Solution:
# @param l1: the first list
# @param l2: the second list
# @return: the sum list of l1 and l2
def addLists(self, l1, l2):
head = l1
carry = 0
# write your code here
while l1 is not None and l2 is not None:
l1.val += l2.val + carry
carry = l1.val / 10
l1.val = l1.val % 10
if l1.next is None and l2.next is not None:
l1.next = ListNode(0)
last = l1
l1 = l1.next
if l2.next is None and l1 is not None:
l2.next = ListNode(0)
l2 = l2.next
if carry == 1:
last.next = ListNode(1)
return head
```
#### File: algorithm-campus/lulu/binary_tree_paths.py
```python
class Solution:
# @param {TreeNode} root the root of the binary tree
# @return {List[str]} all root-to-leaf paths
def binaryTreePaths(self, root):
# Write your code here
if root is None:
return []
paths = []
path = ''
self.find_path(root, paths, path)
return paths
def find_path(self, root, paths, path):
if root.left is None and root.right is None:
path += str(root.val)
paths.append(path)
else:
path += str(root.val) + '->'
if root.left is not None:
self.find_path(root.left, paths, path)
if root.right is not None:
self.find_path(root.right, paths, path)
```
#### File: algorithm-campus/lulu/count_and_say.py
```python
class Solution:
# @param {int} n the nth
# @return {string} the nth sequence
def countAndSay(self, n):
# Write your code here
say = '1'
for x in xrange(0, n-1):
tmp = ''
count = 0
former = say[0]
for i in say:
if i == former:
count += 1
else:
tmp += str(count) + former
count = 1
former = i
tmp += str(count) + former
say = tmp
return say
```
#### File: algorithm-campus/lulu/fibonacci.py
```python
class Solution:
# @param n: an integer
# @return an integer f(n)
def fibonacci(self, n):
# write your code here
a, b = 0, 1
for i in range(1, n):
a, b = b, a + b
return a
```
#### File: algorithm-campus/lulu/flip-bits.py
```python
class Solution:
"""
@param a, b: Two integer
return: An integer
"""
def bitSwapRequired(self, a, b):
# write your code here
return self.countOnes(a^b)
def countOnes(self, num):
# write your code here
counter = 0
a = 1
for i in range(0, 32):
digit = num & a
if digit != 0:
counter += 1
a *= 2
return counter
```
#### File: algorithm-campus/lulu/length-of-last-word.py
```python
class Solution:
# @param {string} s A string
# @return {int} the length of last word
def lengthOfLastWord(self, s):
# Write your code here
if s == '':
return 0
return len(s.split()[-1])
``` |
{
"source": "joshuajonah/feincms-elephantblog",
"score": 2
} |
#### File: testapp/tests/test_templatetags.py
```python
from __future__ import absolute_import, unicode_literals
from django.template.loader import render_to_string
from django.test.testcases import TestCase
from .factories import EntryFactory, create_entries, create_category
class TemplateTagsTest(TestCase):
def test_templatetags(self):
entries = create_entries(EntryFactory)
category = create_category(title='Category 1')
create_category(title='Category 2')
entries[0].categories.add(category)
entries[1].is_featured = True
entries[1].save()
html = render_to_string('test_templatetags.html', {})
self.assertIn(
'<p>categories:Category 1,</p>',
html)
self.assertIn(
'<p>categories+empty:Category 1,Category 2,</p>',
html)
self.assertIn(
'<p>months:10.12,08.12,</p>',
html)
self.assertIn(
'<p>entries:Eintrag 1,Entry 1,</p>',
html)
self.assertIn(
'<p>entries+featured:Eintrag 1,</p>',
html)
self.assertIn(
'<p>entries+category0:Entry 1,</p>',
html)
self.assertIn(
'<p>entries+category1:</p>',
html)
self.assertIn(
'<p>entries+limit:Eintrag 1,</p>',
html)
``` |
{
"source": "joshuajonah/feincms-navigation",
"score": 2
} |
#### File: joshuajonah/feincms-navigation/admin.py
```python
from django.contrib import admin
from django.contrib.contenttypes.models import ContentType
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
def add_to_navigation(modeladmin, request, queryset):
selected = request.POST.getlist(admin.ACTION_CHECKBOX_NAME)
ct = ContentType.objects.get_for_model(queryset.model)
return HttpResponseRedirect("%s?ct=%s&ids=%s&next=%s" % (
reverse('add-to-nav'),
ct.pk,
",".join(selected),
request.path
))
add_to_navigation.short_description = "Add items to navigation"
def remove_from_navigation(modeladmin, request, queryset):
selected = request.POST.getlist(admin.ACTION_CHECKBOX_NAME)
ct = ContentType.objects.get_for_model(queryset.model)
return HttpResponseRedirect("%s?ct=%s&ids=%s&next=%s" % (
reverse('remove-from-nav'),
ct.pk,
",".join(selected),
request.path
))
remove_from_navigation.short_description = "Remove items from navigation"
```
#### File: joshuajonah/feincms-navigation/extension.py
```python
from __future__ import absolute_import, unicode_literals
from django import forms
from django.db import models
from django.utils.safestring import mark_safe
from feincms.module.page.forms import PageAdminForm
from feincms import extensions
from feincms.module.page.models import Page
from mptt.fields import TreeManyToManyField
class Extension(extensions.Extension):
def handle_model(self):
self.model.add_to_class('feincms_navigation', TreeManyToManyField(Page, blank=True, symmetrical=False, help_text='Pages linked to in the navigation of this page.', limit_choices_to={'in_navigation': True}, related_name='fein_nav'))
def handle_modeladmin(self, modeladmin):
modeladmin.add_extension_options('feincms_navigation')
``` |
{
"source": "JoshuaJoost/CVIS",
"score": 2
} |
#### File: JoshuaJoost/CVIS/06_Projektion.py
```python
__author__ = "<NAME> (1626034)"
__maintainer = __author__
__date__ = "2020-04-27"
__version__ = "1.0"
__status__ = "Finished"
import numpy as np
import cv2
## --- Task 1 Projection
# --- Constants Task 1
# focal lenght
fx = 460
fy = 460
# Translation of the image main point to adapt to the coordinate system of the image plane
cx = 320
cy = 240
# Image resolution
imageExercise1 = np.zeros((640, 480))
# 3D Room points
roomPoints3D = np.array([[[10],[10],[100]], [[33],[22],[111]], [[100],[100],[1000]], [[20],[-100],[100]]])
# calibration matrix, contains intrinsic parameters
k = np.array([[fx,0,cx],[0,fy,cy],[0,0,1]], dtype=np.float32)
# World and camera coordinate system are identical
extrinsicMatrix = np.concatenate((np.eye(3), np.zeros((3,1))),axis=1)
# Projection matrix
p = np.dot(k, extrinsicMatrix)
# using homegeneous coordinate system
# :param arg2: cartesian3DRoomPoint need to be shape 3x1
# :return: return cartesian 2D imageplane point
def calc3DRoomPointTo2DPointOnImagePlane(projectionMatrix, cartesian3DRoomPoint):
if not len(cartesian3DRoomPoint.shape) == 2 or not cartesian3DRoomPoint.shape[0] == 3 or not cartesian3DRoomPoint.shape[1] == 1:
roomPointDim = ""
for i in range(len(cartesian3DRoomPoint.shape)):
roomPointDim = roomPointDim + str(cartesian3DRoomPoint.shape[i])
if i < len(cartesian3DRoomPoint.shape) - 1:
roomPointDim = roomPointDim + "x"
pass
pass
raise ValueError(f"Der kartesische 3D-Raumpunkt muss ein 3x1 Vektor sein, gegeben {roomPointDim}")
pass
# convert cartesian 3D room point to homogeneous 3D room point
homogeneous3DRoomPoint = np.reshape(np.concatenate((np.reshape(cartesian3DRoomPoint, (1,-1)), np.ones((cartesian3DRoomPoint.shape[1],1))), axis=1), (-1,1))
# Calculate 2D homogenuous image plane point
homogeneous2DImagePlanePoint = np.dot(p, homogeneous3DRoomPoint)
# Convert 2D homogenuous to 2D cartesian point
cartesian2DImagePlanePoint = np.zeros((homogeneous2DImagePlanePoint.shape[0] - 1, homogeneous2DImagePlanePoint.shape[1]))
for i in range(cartesian2DImagePlanePoint.shape[0]):
cartesian2DImagePlanePoint[i] = homogeneous2DImagePlanePoint[i] / homogeneous2DImagePlanePoint[-1]
pass
return cartesian2DImagePlanePoint
pass
## --- Determining the pixel position with own function
imagePlanePoints2D = np.zeros((roomPoints3D.shape[0], roomPoints3D.shape[1] - 1, roomPoints3D.shape[2]))
for i in range(roomPoints3D.shape[0]):
cartesicImagePlanePointCoords = calc3DRoomPointTo2DPointOnImagePlane(p, roomPoints3D[i])
imagePlanePoints2D[i][0] = cartesicImagePlanePointCoords[0]
imagePlanePoints2D[i][1] = cartesicImagePlanePointCoords[1]
pass
print(imagePlanePoints2D)
## --- Determining the pixel position using the openCV function
cartesicImagePlanePoint = cv2.projectPoints(np.reshape(np.float32(roomPoints3D[:]), (-1,3)), np.eye(3), np.zeros((1,3)), k, None)
#print(cartesicImagePlanePoint[0]) # own and cv2 projection identical
## --- Liegen alle Pixel im Bild?
# No Pixel 4 is too low on the y-axis and therefore lies outside the image plane
## --- Was fällt bei den Bildpunkten von X1 und X3 auf?
# Pixel X1 and X3 are projected on the same spot of the image plane
``` |
{
"source": "JoshuaJoost/GNN_SS20",
"score": 2
} |
#### File: Aufgaben/abgabe2/neuronalNetwork.py
```python
__authors__ = "<NAME> (1813064), <NAME> (1713179), <NAME> (1626034)"
# maintainer = who fixes buggs?
__maintainer = __authors__
__date__ = "2020-04-23"
__version__ = "1.0"
__status__ = "Ready"
##--- TODO
# - [optional]: importieren und exportieren des Neuronalen Netzes (um es speichern und laden zu können)
# kernel imports
import numpy as np
import scipy.special
import types
import random
import math
# own data imports
import constants
from constants import inputNeurons, biasNeurons, hiddenNeurons, outputNeurons, activationFunction, activationFunctionDerived_1, learningRate
from constants import errorfunction
from constants import inputLayerLabel, hiddenLayerLabel, outputLayerLabel
import ownFunctions
import neuronalNetworkLayer as nnl
import ownTests
import view
class neuronalNetwork:
# :param2: inputLayerArray: shape(1,numberOfInputNeurons) [0] = BiasNeurons, [1] = InputNeurons
# :param3: hiddenLayerNDIMArray: shape(numberOfHiddenLayers, 2) [x][0] = NumberOfBiasNeurons, [x][1] = NumberOfNeurons
# :param4: outputLayerArray: shape(numberOfOutputNeurons) [0] = NumberOfOutputNeurons
def __init__(self, inputLayerArray, hiddenLayerNDIMArray, outputLayerArray):
# object variables
self.errorValues = np.empty(shape=1) # set in backpropagation process
self.errorValues = np.delete(self.errorValues, 0)
## --- Generate and connect layer
self.neuronalNetworkStructure = np.empty(1 + hiddenLayerNDIMArray.shape[0] + 1, dtype=object)
#self.neuronalNetworkConnections = None
# create inputlayer
self.neuronalNetworkStructure[0] = nnl.neuronalNetworkLayer(inputLayerArray[0], inputLayerArray[1], inputLayerLabel, isInputLayer=True)
# create hiddenLayer
for i in range(hiddenLayerNDIMArray.shape[0]):
self.neuronalNetworkStructure[i + 1] = nnl.neuronalNetworkLayer(hiddenLayerNDIMArray[i][0], hiddenLayerNDIMArray[i][1], hiddenLayerLabel + " (" + str(i+1) + ")")
pass
# create outputLayer
self.neuronalNetworkStructure[-1] = nnl.neuronalNetworkLayer(0, outputLayerArray[0], outputLayerLabel, isOutputLayer=True)
self.__connectLayers()
self.__initialiseWeights()
pass
def __connectLayers(self):
for i in range(self.neuronalNetworkStructure.shape[0] - 1):
self.neuronalNetworkStructure[i].connectTo(self.neuronalNetworkStructure[i+1])
pass
pass
def __initialiseWeights(self):
for i in range(self.neuronalNetworkStructure.shape[0] - 1):
self.neuronalNetworkStructure[i].setWeights(generateRandomWeights=True)
pass
pass
def __str__(self):
outputNeuronalNetworkStructure = ""
for i in range(self.neuronalNetworkStructure.shape[0]):
outputNeuronalNetworkStructure += self.neuronalNetworkStructure[i].__str__() + "\n"
if not isinstance(self.neuronalNetworkStructure[i].getLayerWeights(), type(None)):
outputNeuronalNetworkStructure += str(self.neuronalNetworkStructure[i].getLayerWeights()) + "\n"
pass
pass
return outputNeuronalNetworkStructure
pass
# forwarding function neuronal network
# :param input: type = np.array, shape = 3 [x, y, targetValue] or shape = 2 [x, y]
def forwarding(self, input):
#print(input)
for layer in range(self.neuronalNetworkStructure.size):
# set values of input layer
if self.neuronalNetworkStructure[layer].getIsInputLayer():
if input.shape[0] == 2:
# input: shape [x, y]
self.neuronalNetworkStructure[layer].setLayerInputs(input[:])
pass
elif input.shape[0] == 3:
# input: shape [x, y, targetValue]
# target value is not considered
self.neuronalNetworkStructure[layer].setLayerInputs(input[:-1])
pass
else:
raise ValueError("Der forwarding Funktion muss ein Array des Shape 2 (x,y) oder 3 (x,y,targetValue) übergeben werden. Übergebener shape: " + str(input.shape[0]))
pass
pass
# set values of hidden and output layer (in the same way)
else:
self.neuronalNetworkStructure[layer].setLayerInputs(np.dot(self.neuronalNetworkStructure[layer - 1].getLayerNeuronsAndBiasOutputValues().T, self.neuronalNetworkStructure[layer - 1].getLayerWeights())[0])
pass
pass
return self.neuronalNetworkStructure[-1].getLayerNeuronsAndBiasOutputValues()
pass
# :param2: labeldTrainData: Data must have the shape (numberOfTrainingData, numberOfInputValues + 1), numberOfInputValues = numberOfInputNeurons
def trainWithlabeldData(self, labeldTrainData):
if len(labeldTrainData.shape) != 2:
raise ValueError("Als Eingabe wird ein 2Dim Array erwartet")
pass
elif labeldTrainData.shape[1] < self.neuronalNetworkStructure[0].numberOfNeurons + 1: # +1 because of the label
errorMsg = "Eingegebene Werte müsse der Anzahl an Neuronen (+1 für das Label) entsprechen, hier: shape Array der Daten zum Formwarden " + str(input.shape[1]) + ", Anzahl der InputNeuronen " + str(self.neuronalNetworkStructure[0].numberOfNeurons)
raise ValueError(errorMsg)
pass
for trainData in range(labeldTrainData.shape[0]):
# forwarding
output = self.forwarding(labeldTrainData[trainData])
# backpropagation
# calculate and set delta value
for i in range(self.neuronalNetworkStructure.shape[0] - 1):
# output layer
if i == 0:
for outputNeuronI in range(self.neuronalNetworkStructure[-1 - i].getNumberOfNeurons()):
networkInputOutputneuronI = self.neuronalNetworkStructure[-1 - i].getLayerNeurons()[outputNeuronI].getInput()
# calc error
error = labeldTrainData[trainData][2] - output[outputNeuronI]
# save error
self.errorValues = np.append(self.errorValues, error)
# calc delta value
deltaOutputNeuronI = activationFunctionDerived_1(networkInputOutputneuronI) * error
# set delta value
self.neuronalNetworkStructure[-1 - i].getLayerNeurons()[outputNeuronI].setDelta(deltaOutputNeuronI)
pass
pass
# hidden layer
else:
for neuron in range(self.neuronalNetworkStructure[-1 -i].getLayerNeurons().size - self.neuronalNetworkStructure[-1 -i].getNumberOfBiasNeurons()):
networkInputHiddenneuronI = self.neuronalNetworkStructure[-1 - i].getLayerNeurons()[neuron + self.neuronalNetworkStructure[-1 - i].getNumberOfBiasNeurons()].getInput()
deltaHiddenNeuronI = activationFunctionDerived_1(networkInputHiddenneuronI) * (np.dot(self.neuronalNetworkStructure[-1 - i].getLayerWeights()[neuron + self.neuronalNetworkStructure[-1 - i].getNumberOfBiasNeurons()],self.neuronalNetworkStructure[-1 - i + 1].getLayerDeltavalueMatrix()))
# set delta value
self.neuronalNetworkStructure[-1 - i].getLayerNeurons()[neuron + self.neuronalNetworkStructure[-1 - i].getNumberOfBiasNeurons()].setDelta(deltaHiddenNeuronI)
pass
pass
pass
# calculate and set new weights
for i in range(self.neuronalNetworkStructure.shape[0] - 1):
# calculate the delta value of the weights
deltaWeights = learningRate * (np.dot(self.neuronalNetworkStructure[-1 - i].getLayerDeltavalueMatrix(), self.neuronalNetworkStructure[-1 - i - 1].getLayerNeuronsAndBiasOutputValues().T))
newWeights = self.neuronalNetworkStructure[-1 - i -1].getLayerWeights() + deltaWeights.T
self.neuronalNetworkStructure[-1 - i -1].setWeights(useSpecificWeights = True, specificWeightsArray = newWeights)
pass
pass
pass
def preparePlotData_Error(self, dataDivisor = 1000):
numberOfData = int(self.errorValues.size / dataDivisor)
if numberOfData == 0 or self.errorValues.size % dataDivisor > 0:
numberOfData += 1
pass
plotData = np.zeros([numberOfData])
elementTranslation = 0
for i in range(plotData.size):
startIndexPos_ErrorGroup = i * dataDivisor + elementTranslation
endIndexPos_ErrorGroup = (i + 1) * dataDivisor
if i+1 == plotData.size:
endIndexPos_ErrorGroup = self.errorValues.size
pass
plotData[i] = np.median(self.errorValues[startIndexPos_ErrorGroup:endIndexPos_ErrorGroup])
if math.isnan(plotData[i]):
plotData[i] = self.errorValues[-1]
pass
elementTranslation = 1
pass
return plotData
pass
pass
```
#### File: Aufgaben/abgabe2/neuron.py
```python
__authors__ = "<NAME> (1813064), <NAME> (1713179), <NAME> (1626034)"
# maintainer = who fixes buggs?
__maintainer = __authors__
__date__ = "2020-05-01"
__version__ = "1.0"
__status__ = "Ready"
# kernel imports
import numpy as np
# own data imports
from constants import activationFunction
class neuron:
def __init__(self, layerName, layerNeuronNumber, input = 0, isBiasNeuron = False, isInputNeuron = False, isOutputNeuron=False, activationFunc = activationFunction):
# init neuron via params
self.isBiasNeuron = isBiasNeuron
self.isInputNeuron = isInputNeuron
self.isOutputNeuron = isOutputNeuron
self.input = input
self.activationFunc = activationFunc
self.layerName = layerName
self.layerNeuronNumber = layerNeuronNumber
# further init
self.neuronName = ""
# backpropagation
self.delta = 0.0
# if isBias initialise neuron as bias neuron
if isBiasNeuron:
self.neuronName = "Bias" + str(self.layerNeuronNumber)
self.input = 1
pass
else:
self.neuronName = "Neuron" + str(self.layerNeuronNumber)
pass
pass
def getOutput(self):
if self.isBiasNeuron:
return 1
pass
elif self.isInputNeuron:
return self.input
pass
else:
return self.activationFunc(self.input)
pass
pass
def __str__(self):
return self.neuronName + ": " + str(self.getOutput())
pass
def setInput(self, newInput):
self.input = newInput
pass
def getInput(self):
return self.input
pass
def setDelta(self, newDeltaValue):
self.delta = newDeltaValue
pass
def getDelta(self):
return self.delta
pass
pass
``` |
{
"source": "joshua-kairu/python-slightly-tame",
"score": 2
} |
#### File: python-slightly-tame/think-python-chapter-3/lyrics.py
```python
def repeat_lyrics():
print_lyrics()
print_lyrics()
def print_lyrics():
print ("Ding dong bell")
print ("Kitty's in the well")
repeat_lyrics()
``` |
{
"source": "joshuakarbi/Course-Bidding-Optimization",
"score": 3
} |
#### File: Course-Bidding-Optimization/backend/optimize.py
```python
from pulp import *
import pandas as pd
from scipy.stats import norm
import json
import re
def course_with_name(name, dataframe):
index = 0
for row in data.values:
if name in row[0]:
return index
else:
index += 1
return None
# Load in data and config
parameters = json.load(open("config/parameters.json", "r"))
data = pd.read_csv("data/courses.csv")
prob = LpProblem("Course Bidding Problem",LpMaximize)
# Define probability matrix, row is course, column is probability
probabilities = []
row = 0
for course in data.values:
probabilities.append(list())
for i in range(parameters["Points to Bid"]):
mean = (course[1] + course[2])/2
range_dif = course[2] - course[1]
stddev = range_dif/6
distribution = norm(mean, stddev)
probabilities[row].append(distribution.cdf(i))
row += 1
NUM_COURSES = len(probabilities)
print("Number of possible courses: ", len(probabilities))
print("Solving . . .")
# Define decision variables
should_bid = list() # 2D matrix of binaries binary if a course should be bid on
row_num = 0
for row in probabilities:
should_bid.append(list())
col_num = 0
for col in row:
should_bid[-1].append(LpVariable(str(row_num)+"-should-bid-"+str(col_num)+"?", cat="Binary") )
col_num += 1
row_num += 1
# list of possible bid values (0 -> 200 for example)
possible_bids = range(parameters["Points to Bid"])
possible_bid_2D = []
for i in range(NUM_COURSES):
possible_bid_2D.append(possible_bids)
print("Possible bid decisions: ", len(possible_bid_2D[0]) * len(possible_bid_2D))
# Affinity parameters
affinities = data['Affinity']
# "Happiness Function" to maximize
tuples_list = []
for i in range(NUM_COURSES):
for j in range(parameters["Points to Bid"]):
tuples_list.append((i, j))
prob += lpSum( [affinities[i] * should_bid[i][j] * probabilities[i][j] for (i, j) in tuples_list] )
# Add constraints
# 1. Maximum points available to bid with
prob += lpSum( [should_bid[i][j] * possible_bid_2D[i][j] for (i, j) in tuples_list] ) <= parameters["Points to Bid"]
# 2. Required courses must be bid on
for req_course in parameters["Required Courses"]:
prob += lpSum( [possible_bid_2D[i][j] * should_bid[course_with_name(req_course, data)][j] for (i, j) in tuples_list] ) >= 1
# 3. Must bid on enough courses to graduate
prob += lpSum( [should_bid[i][j] for (i, j) in tuples_list] ) >= parameters["Courses Required to Bid On"]
# 4. Can only chose 1 optimal bid per course
for row in should_bid:
prob += lpSum(row) <= 1
# Solve the linear problem
prob.solve()
print("Status:", LpStatus[prob.status])
# Save result in output_files
with open('output_files/bidding_strategy.csv', 'w+') as out_file:
out_file.write("Course,Optimal Bid\n")
for v in prob.variables():
if v.varValue == 1.0:
print(v.name)
course_number = int(v.name[:v.name.find("_") ])
numbers = re.findall(r'\d+', v.name)
out_file.write(str(data.values[course_number][0]) + "," + numbers[1] + "\n")
print("Value of Objective = ", value(prob.objective))
``` |
{
"source": "joshuakarjala/mandrill-to-rds",
"score": 2
} |
#### File: joshuakarjala/mandrill-to-rds/main.py
```python
import os
import sys
import datetime
import json
import psycopg2
from psycopg2.extras import Json
from bottle import get, post, run, request
from bottle import jinja2_template as template
# Connection credentials
DB_HOST = os.environ.get('DB_HOST')
if not DB_HOST:
print >> sys.stderr, 'Missing environment variable DB_HOST'
exit(1)
DB_NAME = os.environ.get('DB_NAME')
if not DB_NAME:
print >> sys.stderr, 'Missing environment variable DB_NAME'
exit(1)
# make sure we have AWS credentials and a S3 Bucket
DB_USER = os.environ.get('DB_USER')
if not DB_USER:
print >> sys.stderr, 'Missing environment variable DB_USER'
exit(1)
DB_PASSWORD = os.environ.get('DB_PASSWORD')
if not DB_PASSWORD:
print >> sys.stderr, 'Missing environment variable DB_PASSWORD'
exit(1)
TABLE_NAME = os.environ.get('TABLE_NAME')
if not TABLE_NAME:
print >> sys.stderr, 'Missing environment variable TABLE_NAME'
exit(1)
def write_to_db(data):
today = datetime.datetime.now()
# establish connection to RDS
conn = psycopg2.connect("host=%s dbname=%s user=%s password=%s" % (DB_HOST, DB_NAME, DB_USER, TABLE_NAME))
cur = conn.cursor()
cur.execute("insert into %s (jsondata) values (%s)", TABLE_NAME, [Json(data)])
conn.commit()
cur.close()
conn.close()
@post('/inbound_mail')
def inbound_mail():
post_data = request.POST
event_list = json.loads(post_data.get('mandrill_events'))
for data in event_list:
write_to_db(data)
return 'OK'
@get('/setup')
def setup():
url = request.url.replace('/setup', '/inbound_mail')
return template('This is your hook url, copy it:<h3>{{url}}</h3>', url=url)
run(host='0.0.0.0', port=int(os.environ.get('PORT', 8010)))
``` |
{
"source": "joshuakarjala/redisq-py",
"score": 3
} |
#### File: redisq-py/redisq/task.py
```python
import time
import json
class Task():
def __init__(self, data, task_time=None):
if not isinstance(data, str):
raise TypeError("Data must be a JSON string ")
self.data = data
if not task_time:
self.time = int(time.time())
else:
self.time = task_time
def get_json(self):
return json.dumps([0, self.time, "{{placeholder}}", 0]).replace("\"{{placeholder}}\"", self.data)
``` |
{
"source": "Joshuakemboi/Politic_API",
"score": 3
} |
#### File: tests/v1/test_party_record_views.py
```python
from .base_test import *
import unittest
from io import BytesIO
testapp = app.test_client()
class TestParty(unittest.TestCase):
def party(self,party_name , party_headquarters_address ,party_logo_url):
return testapp.post('/api/v1/party',data=dict(party_name=party_name,
party_headquarters_address = party_headquarters_address, party_logo_url = party_logo_url),follow_redirects=True)
def test_valid_inputs(self):
response = self.party(party_name='jubilee',party_headquarters_address = "jossgmail",party_logo_url = "lion")
self.assertEqual(response.status_code,201)
def put_party(self,party_name , party_headquarters_address ,party_logo_url):
return testapp.put('/api/v1/party/1',data=dict(party_name=party_name,
party_headquarters_address = party_headquarters_address, party_logo_url = party_logo_url),follow_redirects=True)
def test_put_valid_inputs(self):
response = self.put_party(party_name='jubilee',party_headquarters_address = "<EMAIL>",party_logo_url = "lion")
self.assertEqual(response.status_code,201)
def test_put_taken_party_name(self):
response = self.put_party(party_name='taken_party',party_headquarters_address = "<EMAIL>",party_logo_url = "lion")
self.assertEqual(response.status_code,400)
def test_put_taken_hq_address(self):
response = self.put_party(party_name='jubilee',party_headquarters_address = "taken_hq",party_logo_url = "lion")
self.assertEqual(response.status_code,400)
def party_missing_fields(self):
return testapp.post('/api/v1/party',data=dict(),follow_redirects=True)
def test_party_missing_fields(self):
response = self.party_missing_fields()
self.assertEqual(response.status_code,400)
def party_edit_missing_fields(self):
return testapp.put('/api/v1/party/1000',data=dict(),follow_redirects=True)
def test_party_edit_missing_fields(self):
response = self.party_edit_missing_fields()
self.assertEqual(response.status_code,400)
def get_party(self):
return testapp.get('/api/v1/party/1000')
def test_get_party(self):
response = self.get_party()
self.assertEqual(response.status_code, 200)
def get_missing_party(self):
return testapp.get('/api/v1/party/999')
def test_get_missing_party(self):
response = self.get_missing_party()
self.assertEqual(response.status_code, 400)
def get_parties(self):
return testapp.get('/api/v1/party')
def test_get_parties(self):
response = self.get_parties()
self.assertEqual(response.status_code, 200)
def delete_party(self):
return testapp.delete('/api/v1/party/100')
def test_delete_party(self):
response = self.delete_party()
self.assertEqual(response.status_code,201)
def delete_missing_party(self):
return testapp.delete('/api/v1/party/99')
def test_delete_missing_party(self):
response = self.delete_missing_party()
self.assertEqual(response.status_code,404)
``` |
{
"source": "joshua-kent/PyTkAppMng",
"score": 2
} |
#### File: included/dummyfour/module.py
```python
from tkinter import *
from tkinter.ttk import *
try:
from .defs import *
except:
from defs import *
import os.path
class init:
def __init__(self, root):
self.root = root
self.root.title("Scientific Calculator")
self.root.geometry("300x600+20+20")
self.icon = os.path.join(os.path.dirname(os.path.realpath(__file__)), "icon.png")
self.root.iconphoto(False, ImageTk.PhotoImage(file = self.icon))
Grid.rowconfigure(self.root, 0, weight = 1)
Grid.columnconfigure(self.root, 0, weight = 1)
self.root.resizable(False, False)
style = Style()
style.configure("calc_frame.TFrame", theme = "winnative")
style.configure("calc_buttons.TButton", theme = "winnative", relief = "flat")
self.frame = Frame(self.root, style = "calc_frame.TFrame")
# maybe add loading screen
input_box = Label(self.frame, background = "#FFFFFF")
input_box.grid(row = 0, column = 0, columnspan = 9, sticky = E+W)
Grid.columnconfigure(self.frame, 0, weight = 1)
input_text = Label(self.frame, text = "in place", background = "#FFFFFF")
input_text.grid(row = 0, column = 8, sticky = N+S+E)
separator_1 = Separator(self.frame, orient = HORIZONTAL)
separator_1.grid(column = 0, row = 0, columnspan = 9, sticky = N+E+W)
separator_2 = Separator(self.frame, orient = HORIZONTAL)
separator_2.grid(column = 0, row = 0, columnspan = 9, sticky = S+E+W)
i = 1
for y in range(5):
Grid.rowconfigure(self.frame, y + 1, weight = 1)
for x in range(8):
Grid.columnconfigure(self.frame, x + 1, weight = 1)
latex_text = to_latex(buttons_dict[i][0], 15)
button = Button(self.frame, image = latex_text, compound = CENTER)
button.img = latex_text
button.grid(row = y + 1, column = x + 1, sticky = N+S+E+W)
i += 1
self.frame.grid(sticky = S)
if __name__ == "__main__":
root = Tk()
init(root)
root.mainloop()
```
#### File: included/SciCalc/module.py
```python
import tkinter as tk
from tkinter import ttk
from PIL import ImageTk, Image
import warnings
import os.path
if __name__ == "__main__":
raise Exception("This module cannot be run directly. Please run from PyTkAppMng/main.py")
else:
import packages.typemathtext as tmt
from .defs import *
class init:
current_string = ""
def __init__(self, root):
self.root = root
self.root.title("Scientific Calculator")
self.root.geometry("300x600+20+20")
self.icon = os.path.join(os.path.dirname(os.path.realpath(__file__)), "icon.png")
self.root.iconphoto(False, ImageTk.PhotoImage(file = self.icon))
tk.Grid.rowconfigure(self.root, 0, weight = 1)
tk.Grid.columnconfigure(self.root, 0, weight = 1)
self.root.resizable(False, False)
style = ttk.Style()
style.configure("calc_frame.TFrame", theme = "winnative")
style.configure("calc_buttons.TButton", theme = "winnative", relief = "flat")
self.frame = ttk.Frame(self.root, style = "calc_frame.TFrame")
input_box = tk.Label(self.frame, background = "#FFFFFF")
input_box.grid(row = 0, column = 0, columnspan = 9, sticky = "ew")
tk.Grid.columnconfigure(self.frame, 0, weight = 1)
self.input_text = tk.Label(self.frame, text = "", background = "#FFFFFF", anchor = "e")
self.input_text.grid(row = 0, column = 0, columnspan = 9, sticky = "nsew")
separator_1 = ttk.Separator(self.frame, orient = tk.HORIZONTAL)
separator_1.grid(column = 0, row = 0, columnspan = 9, sticky = "new")
separator_2 = ttk.Separator(self.frame, orient = tk.HORIZONTAL)
separator_2.grid(column = 0, row = 0, columnspan = 9, sticky = "sew")
i = 1
for y in range(5):
tk.Grid.rowconfigure(self.frame, y + 1, weight = 1)
for x in range(8):
tk.Grid.columnconfigure(self.frame, x + 1, weight = 1)
latex_text = tmt.to_latex(buttons_dict[i][0], 15)
button = ttk.Button(self.frame, image = latex_text, compound = tk.CENTER,
command = lambda i=i: self.button_clicked(buttons_dict[i][1]))
button.img = latex_text
button.grid(row = y + 1, column = x + 1, sticky = "nsew")
i += 1
self.frame.grid(sticky = "s")
def button_clicked(self, button_text):
self.input_text["text"] += button_text # need to add to this
if __name__ == "__main__":
root = tk.Tk()
init(root)
root.mainloop()
```
#### File: packages/typemathtext/typemath.py
```python
import warnings
import math
import os
import json
import sympy
class typemathtextError(Exception):
pass
class typemath:
r"""Creates an object that can be used for easy-to-use methods to create calculators.
It does this by creating methods to convert between Python (sympy) and LaTeX.
Furthermore, it creates a pointer that can be used to insert new text (in the
custom typemath format) in different places in the text.
Parameters:
latex (str) -- a string written with LaTeX format.
(e.g. "\int 4x^2 dx")
Attributes:
pointer (int) -- the current position of the pointer
This determines where the string will be edited
parsed (list) -- a list containing the parsed version of latex_input
This is only updated when primary_parse() is called
compiled (str) -- a string containing the fully converted version of latex_input
into standard Python/sympy format
This is only updated when secondary_parse() is called"""
_current_dir = os.path.dirname(os.path.realpath(__file__))
_parse_info_dir = os.path.join(_current_dir, "parse_info.json")
def __init__(self, latex = None, parsed = None, compiled = None): # edit this to support multiple initial formats
if latex is not None:
self.latex = latex
self.parsed = self.parse()
self.compiled = self.compile()
elif parsed is not None:
self.parsed = parsed
self.latex = self.deparse(self.parsed)
self.compiled = self.compile(self.parsed)
elif compiled is not None:
self.compiled = compiled
self.parsed = self.decompile(self.compiled)
self.latex = self.deparse(self.parsed)
try:
self.evaluate = eval(self.compile())
except: # the compiled form may currently be incomplete and return an error
self.evaluate = None
self.pointer = len(self.parsed)
def parse(self, text = None, require_dollars = True):
r"""Splits a LaTeX math string into its parsed format.
This parsed form can be used as a midway point between LaTex
and normal Python (sympy) formats. It is also useful for pointers.
(e.g '\$frac{1}{2}$' (LaTeX) --> (parse) --> ['\FRAC{', '1', '}', '{', '2', '}']
--> (compile) --> '(1)/(2)' --> (evaluate) --> 0.5)
Returns:
Returns self.parsed, which is the list that 'primary_parse' has generated."""
original_text = text # this will not change, which lets us determine if the argument was None later on
if text == None:
text = self.latex
# check if the text starts and ends with $ (to confirm it is a LaTeX string)
if (text[0], text[-1]) != ("$", "$") and require_dollars:
raise typemathtextError("The input text must begin and end with a '$' symbol")
# isolate keywords into list
output = []
for char in text:
if char not in (" ", "$"):
output.append(char)
# uses parse_info.json to properly combine characters into term
output = self.__fixup(output)
# connect consecutive numbers, multiply consecutive numbers & variables to create terms
output = self.__concatenate_ints(output)
# if the original argument for 'text' was None (which means to edit 'parsed' argument instead)
if original_text is None:
self.parsed = output
return output
def deparse(self, lst = None):
"""Converts a parsed list into into a LaTeX string.
Parameters:
lst (list/None) -- a parsed list (see more info in typemath.parsed
help page)
Returns:
A converted version of the parsed list in LaTeX format.
This can make some notation differences, as multiplication
signs between numbers and variables that were added during
parsing are still present in the string."""
if lst is None:
edited_parsed = self.parsed.copy()
else:
edited_parsed = lst.copy()
edited_parsed = self.__reverse_fixup(edited_parsed)
edited_parsed.insert(0, "$")
edited_parsed.insert(-1, "$")
edited_parsed = "".join(edited_parsed)
if lst is None:
self.latex = edited_parsed
return edited_parsed
def compile(self, text = None):
"""Fully converts the parsed text list into a sympy-readable format as a
string to be executed.
Only the current parsed LaTeX text is compiled. Instead of directly
calling this function, it is automatically called when a new typemath
instance is initiated, and is also called automatically when the 'typemath.edit'
method is called.
Returns:
This returns the new string and also puts it in the attribute 'compiled'."""
# if text is not set, automatically compile attribute 'parsed' instead
if text is None:
output = self.parsed.copy() # setting a variable to a list only creates a new reference, not id
else:
output = text
with open(self._parse_info_dir, "r") as f:
doc_ = json.load(f)
keywords = doc_["keywords"]
keywords_get = [item[0] for item in keywords]
keywords_set = [item[1] for item in keywords]
specials = doc_["specials"]
specials_get = [item[0] for item in specials]
specials_set =[item[1] for item in specials]
for i in range(len(keywords)):
output = self.__swap(output, keywords_get[i], keywords_set[i])
# Creates tokens for each special value (that need more work to change)
# In format:
# {"token_number": (token's value, position)}
# token_numbers gives a list of the keys of tokens in order
# token_values gives the value that the token represents
# token_positions gives a list of the positions of each token in output
current_token_number = 1
tokens = {}
for i in range(len(output)):
if output[i] == "}":
tokens[f"{current_token_number}a"] = ("}", i)
for k in range(len(output)):
t = len(output) - k - 1
token_positions = [item[1] for item in tokens.values()]
if (output[t] == "{" or output[t] in specials_set) and t < i:
if not token_positions.__contains__(t):
tokens[f"{current_token_number}b"] = (output[t], t)
break
current_token_number += 1
token_numbers = [key for key in tokens.keys()]
token_values = [item[0] for item in tokens.values()]
token_positions = [item[1] for item in tokens.values()]
# convert \frac{a}{b} to (a)/(b)
for i in range(len(tokens)):
if token_values[i] == "FRAC{": # first {
token_1_number = token_numbers[i]
token_1 = tokens[token_1_number]
token_1_pos = token_1[1]
corresponding_token_1_number = token_1_number.replace("b", "a")
corresponding_token_1 = tokens[corresponding_token_1_number] # first }
corresponding_token_1_pos = corresponding_token_1[1]
for k in range(len(token_positions)):
if token_positions[k] == corresponding_token_1[1] + 1:
if token_values[k] == "{": # second {
token_2_number = token_numbers[k]
token_2 = tokens[token_2_number]
token_2_pos = token_2[1]
corresponding_token_2_number = token_2_number.replace("b", "a")
corresponding_token_2 = tokens[corresponding_token_2_number] # second }
corresponding_token_2_pos = corresponding_token_2[1]
break
if "corresponding_token_2" in locals():
output[token_1_pos] = "("
output[corresponding_token_1_pos] = ")/"
output[token_2_pos] = "("
output[corresponding_token_2_pos] = ")"
# join together output and return
output = "".join(output)
if text is None:
self.compiled = output
return self.compiled
def decompile(self, compiled_string): # need to do
return "None"
def edit(self, latex_input = None, parsed_input = None,
latex_insert = None, parsed_insert = None, abs_pointer = None):
r"""Edits LaTeX text by parsing, editing, repositioning the pointer, recompiling.
Parameters:
latex_input (str/None) -- the LaTeX string that is to be edited. (default: None)
If this and 'parsed_input' are both None, then the
instance's 'parsed' attribute will automatically be
edited.
If both this and 'parsed_input' are set, then 'parsed_input'
will automatically be used, rather than this.
parsed_input (list/None) -- the already parsed list that is to be edited. (default: None)
If this and 'latex_input' are both None, then the
instance's 'parsed' attribute will automatically be
edited.
If both this and 'latex_input' are set, then this
will automatically be used, rather than 'latex_input'.
latex_insert (str/None) -- the LaTeX string to be inserted in. (default: None)
If this and 'parsed_insert' are both None, then an error
will occur. If you wish to only change the pointer attribute,
then use the 'set_pointer' method.
If both this and 'parsed_insert' are set, then 'parsed_insert'
will automatically be used, rather than this.
parsed_insert (list/None) -- the already parsed list that is to be edited. (default: None)
If this and 'latex_insert' are both None, then an error
will occur. If you wish to only change the pointer attribute,
then use the 'set_pointer' method.
If both this and 'latex_insert' are set, then this will
automatically be used, rather than 'latex_insert'.
abs_pointer (int/None) -- the absolute position in the parsed list where the pointer
will move to. (default: None)
If this value is equal to none, the default will become the
'pointer' attribute of the instance.
Returns:
The new value of the text in the parsed list format. If no input arguments
('latex_input' and 'parsed_input') are given, then the instance's attributes
are also updated, and self.parsed is returned.
Example:
my_integral = typemath("$\int 4x^2 dx$") -> This gets parsed to ["\int", "4", "*", "x", "**", "2", "dx"]
my_integral.edit(latex_insert = "$+4$")
As typemath() automatically sets its instance attribute 'pointer' to the length of the parsed text, it will
edit the end. In this example, 'pointer' will first equal 7. When edit() is called, "8" will be appended
to the end of the 'parsed' attribute, and its 'pointer' attribute will increase by one. All other attributes
will be automatically updated with it, which is why this method is useful.
Hence, this method results in:
my_integral.parsed = ["\int", "4", "*", "x", "**", "2", "dx", "+", "4"]
my_integral.compiled = "sympy.integrate(4*sympy.symbols("x")**2, sympy.symbols("x"))+4"
my_integral.pointer = 9"""
# check value types
if latex_input is not None:
if not isinstance(latex_input, str):
raise typemathtextError("'latex_input' must be either a string or None")
if parsed_input is not None:
if not isinstance(parsed_input, list):
raise typemathtextError("'parsed_input' must be either a list or None")
if latex_insert is not None:
if not isinstance(latex_insert, str):
raise typemathtextError("'latex_insert' must be either a string or None")
if parsed_insert is not None:
if not isinstance(parsed_insert, list):
raise typemathtextError("'parsed_insert' must be either a list or None")
if abs_pointer is not None:
if not isinstance(abs_pointer, int):
raise typemathtextError("'abs_pointer' must be either an integer or None")
# equal value warnings
if latex_input is not None and parsed_input is not None:
warnings.warn("typemathtext: 'latex_input' and 'parsed_input' are both set."
"Automatically using 'parsed_input'.")
latex_input = None
if latex_insert is not None and parsed_insert is not None:
warnings.warn("typemathtext: 'latex_insert' and 'parsed_insert' are both set. "
"Automatically using 'parsed_insert'.")
latex_insert = None
if latex_insert is None and parsed_insert is None:
raise typemathtextError("Something must be inserted to edit. "
"If you wish to move the pointer, "
"use the 'set_pointer' method.")
# set values
reference_self = False
if latex_input is not None:
parsed_input = self.parse(latex_input)
if (latex_input, parsed_input) == (None, None):
reference_self = True
parsed_input = self.parsed
pointer = self.pointer
else:
if abs_pointer is None:
pointer = len(parsed_input)
else:
pointer = abs_pointer
# If a LaTeX string is inserted, parse it first
if latex_insert is not None:
parsed_insert = self.parse(latex_insert)
# As parsed_insert will be a list, insert each of its item to the main list and adjust the pointer
for i in parsed_insert:
parsed_input.insert(pointer, i)
pointer += 1
if abs_pointer is None:
self.pointer += 1
# if it is the instance that is edited, adjust accordingly
if reference_self:
self.parsed = parsed_input
self.refresh(self.parsed)
return parsed_insert
def remove(self, removals = 1, pointer_pos = None): # rename removals
r"""Removes a specified amount of items in the 'parsed' attribute.
This mimics the behaviour of a calculator, as it should remove one mathematical
expression at a time rather than individual characters.
Parameters:
removals (int/None) -- the amount of items to be removed (default: None)
pointer_pos (int/None) -- sets the position of the pointer (default: None)
If None, then the object instance's 'pointer' attribute
will be used instead.
See the typemath object's help page for more information
on the pointer.
Returns:
This method returns the new 'latex' attribute of the instance that was edited. That
instance will be updated in accordance with the new changes.
Example:
expr = typemath(r"$\int 4x dx + 5$")
expr.remove(2)
Output:
"$\int 4x dx$"
By default, the pointer position is at the end of the string, this deletes two mathematical
objects from it, in this case '5' and '+'. It should be noted that what is returned is a
typemath object, not a string, but the output is simply its represented version (its 'latex'
attribute).
"""
# pointer_pos defaults sets it automatically
if pointer_pos == None:
pointer_pos = self.pointer
i = 1
while i <= removals:
self.parsed.pop(pointer_pos - 1)
pointer_pos -= 1
i += 1
self.pointer = pointer_pos
self.refresh(self.parsed)
return self.parsed
def refresh(self, origin):
# adjust other values (for when one changes, so attributes are not desynced)
if origin is self.parsed:
origin = self.__fixup(origin)
self.deparse()
self.compile()
try:
self.evaluate = eval(self.compiled)
except: # self.compiled may not have correct syntax and return an error
self.evaluate = None
# dunders
def __add__(self, other):
r"""returns a new instance of typemath that adds the value of one typemath
object and either another typemath object or a LaTeX string (shown with
wrapping dollar signs)
If you are looking to reassign a function when adding, use += or edit()
Example:
expr_1 = typemath(r"$\int 4x dx$")
expr_2 = typemath(r"\frac{82}{2}")
Input:
expr_1 + expr_2
Returns:
r"$\int 4x dx + \frac{82}{2}$"
However, this is not a string, but the represented (__repr__) version
of a new typemath() object, returning its 'latex' attribute."""
temp_typemath_object = typemath(self.latex)
if type(other) in (str, int, float): # when added to a string (presumed LaTeX string) or integer
temp_typemath_object.edit(latex_insert = f"+{other}")
elif type(other) is typemath: # when added to another typemath object
temp_typemath_object.edit(latex_insert = "$+$")
temp_typemath_object.edit(latex_insert = f"{other.latex}")
else:
raise TypeError(f"'{type(other)}' is not a valid type for this expression.'")
return temp_typemath_object
def __iadd__(self, other):
"""Does the same as __add__, but stores the new object into the
first typemath() object, rather than creating a new object
See the __add__() method's help page for how this will behave."""
self = self + other
def __sub__(self, other):
"""Does the same as __add__, but subtracts rather than adds
See __add__ for more information, as all its specifics still apply."""
temp_typemath_object = typemath(self.latex)
if type(other) is str:
temp_typemath_object.edit(latex_insert = f"-({other})")
elif type(other) in (int, float):
temp_typemath_object.edit(latex_insert = f"-{other}")
elif type(other) is typemath:
temp_typemath_object.edit(latex_insert = "$-($")
temp_typemath_object.edit(latex_insert = f"{other.latex}")
temp_typemath_object.edit(latex_insert = "$)$")
else:
raise TypeError(f"'{type(other)}' is not a valid type for this expression.'")
return temp_typemath_object
def __isub__(self, other):
"""Does the same as __sub__, but stores the new object into the
first typemath() object, rather than creating a new object
See the __add__() method's help page for how this will behave."""
self = self - other
return self
def __repr__(self):
"""Returns the 'compiled' attribute of the typemath object."""
return self.compiled
# internal functions
def __concatenate_chars(self, chars, string):
# concatenates consecutive items in a string if they match some string
# e.g. __concatenate_chars(["h", "e", "l", "l", "o"], "he") returns ["he", "l", "l", "o"]
output = chars.copy()
for i in range(len(chars)):
place_check = ""
for k in range(len(string)):
try:
place_check += output[i + k]
except:
break
if place_check == string:
output[i] = string
q = len(string) - 1
if len(string) > 2:
while q > 0:
output.pop(i + 1)
q -= 1
return output
@staticmethod
def __concatenate_ints(lst):
# concatenates consecutive numbers, and automatically inserts * for variables next to numbers
# e.g. __concatenate_ints(["5", "4", "x"]) returns ["54", "*", "x"]
output = []
tuple_list = ('1', '2', '3', '4', '5', '6', '7', '8', '9', '0', 'x', 'y', "math.e", "math.pi")
for i in range(len(lst)):
try:
if lst[i] in tuple_list and lst[i - 1] in tuple_list:
if lst[i] in ("x", "y", "math.e", "math.pi"):
output.append("*")
output.append(lst[i])
else:
output[-1] += lst[i]
else:
output.append(lst[i])
except:
output.append(lst[i])
return output
def __swap(self, lst, old, new):
# __swaps some value with a new one in a list for all instances of that value
# e.g. __swap([5, 4, 2, 5], 5, 1) returns [1, 4, 2, 5] - the 5's get replaced with 1
lst = lst.copy()
i = 0
for item in lst:
if item == old:
lst[i] = new
i += 1
return lst
def __fixup(self, lst):
# internal function to join together special values as defined in parse_info.json
# this does most of the parsing
lst = lst.copy()
with open(self._parse_info_dir, "r") as f:
doc_ = json.load(f)
specials = doc_["specials"]
specials_get = [item[0] for item in specials]
specials_set = [item[1] for item in specials]
keywords = doc_["keywords"]
keywords_get = [item[0] for item in keywords]
pointouts = doc_["pointouts"]
pointouts_get = [item[0] for item in pointouts]
pointouts_set = [item[1] for item in pointouts]
for i in range(len(specials)):
lst = self.__concatenate_chars(lst, specials_get[i])
lst = self.__swap(lst, specials_get[i], specials_set[i])
for i in range(len(keywords)):
lst = self.__concatenate_chars(lst, keywords_get[i])
for i in range(len(pointouts)):
lst = self.__concatenate_chars(lst, pointouts_get[i])
lst = self.__swap(lst, pointouts_get[i], pointouts_set[i])
return lst
def __reverse_fixup(self, lst):
# Reverses the __fixup function's swap function
# There is no need to reverse __concatenate_chars as it will be
# turned into a string anyway in deparse()
lst = lst.copy()
with open(self._parse_info_dir, "r") as f:
doc_ = json.load(f)
specials = doc_["specials"]
specials_get = [item[0] for item in specials]
specials_set = [item[1] for item in specials]
pointouts = doc_["pointouts"]
pointouts_get = [item[0] for item in pointouts]
pointouts_set = [item[1] for item in pointouts]
for i in range(len(specials)):
lst = self.__swap(lst, specials_set[i], specials_get[i])
for i in range(len(pointouts)):
lst = self.__swap(lst, pointouts_set[i], pointouts_get[i])
return lst
``` |
{
"source": "JoshuaKGoldberg/General-Language-Syntax",
"score": 4
} |
#### File: General-Language-Syntax/Samples/basic.py
```python
def sayHello():
print("Hello world!")
def combineStrings(a, b):
return a + b
# Class Declarations
class Point:
x = None
y = None
def __init__(self, x, y):
self.x = x
self.y = y
def setX(self, x):
self.x = x
def setY(self, y):
self.y = y
def getX(self):
return self.x
def getY(self):
return self.y
def getManhattanTotal(self):
return self.x + self.y
# Main
if __name__ == '__main__':
# Basic Usage
print("Hello world!") # Basic printing here...
# Variables
a = "Hello world!"
b = 7
c = 11.7
d = True
# Operations
e = 1 + 2
f = b < c
# If Statements
if d:
print("d is true!")
if c < 14:
print("c is less than 14!")
# While Loops
while d:
print("d is", d)
d = False
while c > 3:
print("c is", c)
c -= 1
# For Loops
for i in range(0, 7):
print("i plus one is", i + 1)
# Calling Functions
sayHello()
combineStrings("hello", "world")
combineStrings("hello" + " ", "world")
combineStrings(combineStrings("hello", "world"), "world")
# Class Usage
g = Point(3, 7)
g.setX(4)
print(g.getManhattanTotal())
# fin
``` |
{
"source": "JoshuaKhooWX/IE-ACHE",
"score": 3
} |
#### File: IE-ACHE/Output/dragonfly_private_Output.py
```python
import time
import hashlib
import random
import logging
import socket
import re, uuid
import base64
import os
import subprocess
from collections import namedtuple
from Cryptodome.Cipher import AES
from Cryptodome import Random
import asn1tools
import sys
#Compile asn1 file for secret_key
asn1_file = asn1tools.compile_files('declaration.asn')
#create tcp/ip socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
#retrieve local hostname
local_hostname = socket.gethostname()
#get fully qualified hostname
local_fqdn = socket.getfqdn()
#get the according ip address
ip_address = socket.gethostbyname(local_hostname)
#bind socket to port
server_address = ('192.168.0.3', 4380)
while True:
try:
sock.connect(server_address)
break
except ConnectionRefusedError as conn_error:
print("Attempting to connect to server...")
time.sleep(5)
except:
# print("Unexpected error", sys.exc_info()[0])
continue
print ("Connecting to %s (%s) with %s" % (local_hostname, local_fqdn, ip_address))
logger = logging.getLogger('dragonfly')
logger.setLevel(logging.INFO)
# create file handler which logs even debug messages
fh = logging.FileHandler('dragonfly.log')
fh.setLevel(logging.DEBUG)
# create console handler with a higher log level
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
# create formatter and add it to the handlers
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
fh.setFormatter(formatter)
# add the handlers to logger
logger.addHandler(ch)
logger.addHandler(fh)
Point = namedtuple("Point", "x y")
# The point at infinity (origin for the group law).
O = 'Origin'
def lsb(x):
binary = bin(x).lstrip('0b')
return binary[0]
def legendre(a, p):
return pow(a, (p - 1) // 2, p)
def tonelli_shanks(n, p):
"""
# https://rosettacode.org/wiki/Tonelli-Shanks_algorithm#Python
"""
assert legendre(n, p) == 1, "not a square (mod p)"
q = p - 1
s = 0
while q % 2 == 0:
q //= 2
s += 1
if s == 1:
return pow(n, (p + 1) // 4, p)
for z in range(2, p):
if p - 1 == legendre(z, p):
break
c = pow(z, q, p)
r = pow(n, (q + 1) // 2, p)
t = pow(n, q, p)
m = s
t2 = 0
while (t - 1) % p != 0:
t2 = (t * t) % p
for i in range(1, m):
if (t2 - 1) % p == 0:
break
t2 = (t2 * t2) % p
b = pow(c, 1 << (m - i - 1), p)
r = (r * b) % p
c = (b * b) % p
t = (t * c) % p
m = i
return r
class Curve():
"""
Mathematical operations on a Elliptic Curve.
A lot of code taken from:
https://stackoverflow.com/questions/31074172/elliptic-curve-point-addition-over-a-finite-field-in-python
"""
def __init__(self, a, b, p):
self.a = a
self.b = b
self.p = p
def curve_equation(self, x):
"""
We currently use the elliptic curve
NIST P-384
"""
return (pow(x, 3) + (self.a * x) + self.b) % self.p
def is_quadratic_residue(self, x):
"""
https://en.wikipedia.org/wiki/Euler%27s_criterion
Computes Legendre Symbol.
"""
return pow(x, (self.p-1) // 2, self.p) == 1
def valid(self, P):
"""
Determine whether we have a valid representation of a point
on our curve. We assume that the x and y coordinates
are always reduced modulo p, so that we can compare
two points for equality with a simple ==.
"""
if P == O:
return True
else:
return (
(P.y**2 - (P.x**3 + self.a*P.x + self.b)) % self.p == 0 and
0 <= P.x < self.p and 0 <= P.y < self.p)
def inv_mod_p(self, x):
"""
Compute an inverse for x modulo p, assuming that x
is not divisible by p.
"""
if x % self.p == 0:
raise ZeroDivisionError("Impossible inverse")
return pow(x, self.p-2, self.p)
def ec_inv(self, P):
"""
Inverse of the point P on the elliptic curve y^2 = x^3 + ax + b.
"""
if P == O:
return P
return Point(P.x, (-P.y) % self.p)
def ec_add(self, P, Q):
"""
Sum of the points P and Q on the elliptic curve y^2 = x^3 + ax + b.
https://stackoverflow.com/questions/31074172/elliptic-curve-point-addition-over-a-finite-field-in-python
"""
if not (self.valid(P) and self.valid(Q)):
raise ValueError("Invalid inputs")
# Deal with the special cases where either P, Q, or P + Q is
# the origin.
if P == O:
result = Q
elif Q == O:
result = P
elif Q == self.ec_inv(P):
result = O
else:
# Cases not involving the origin.
if P == Q:
dydx = (3 * P.x**2 + self.a) * self.inv_mod_p(2 * P.y)
else:
dydx = (Q.y - P.y) * self.inv_mod_p(Q.x - P.x)
x = (dydx**2 - P.x - Q.x) % self.p
y = (dydx * (P.x - x) - P.y) % self.p
result = Point(x, y)
# The above computations *should* have given us another point
# on the curve.
assert self.valid(result)
return result
def double_add_algorithm(self, scalar, P):
"""
Double-and-Add Algorithm for Point Multiplication
Input: A scalar in the range 0-p and a point on the elliptic curve P
https://stackoverflow.com/questions/31074172/elliptic-curve-point-addition-over-a-finite-field-in-python
"""
assert self.valid(P)
b = bin(scalar).lstrip('0b')
T = P
for i in b[1:]:
T = self.ec_add(T, T)
if i == '1':
T = self.ec_add(T, P)
assert self.valid(T)
return T
class Peer:
"""
Implements https://wlan1nde.wordpress.com/2018/09/14/wpa3-improving-your-wlan-security/
Take a ECC curve from here: https://safecurves.cr.yp.to/
Example: NIST P-384
y^2 = x^3-3x+27580193559959705877849011840389048093056905856361568521428707301988689241309860865136260764883745107765439761230575
modulo p = 2^384 - 2^128 - 2^96 + 2^32 - 1
2000 NIST; also in SEC 2 and NSA Suite B
See here: https://www.rfc-editor.org/rfc/rfc5639.txt
Curve-ID: brainpoolP256r1
p =
A9FB57DBA1EEA9BC3E660A909D838D726E3BF623D52620282013481D1F6E5377
A =
7D5A0975FC2C3057EEF67530417AFFE7FB8055C126DC5C6CE94A4B44F330B5D9
B =
26DC5C6CE94A4B44F330B5D9BBD77CBF958416295CF7E1CE6BCCDC18FF8C07B6
x =
8BD2AEB9CB7E57CB2C4B482FFC81B7AFB9DE27E1E3BD23C23A4453BD9ACE3262
y =
547EF835C3DAC4FD97F8461A14611DC9C27745132DED8E545C1D54C72F046997
q =
A9FB57DBA1EEA9BC3E660A909D838D718C397AA3B561A6F7901E0E82974856A7
h = 1
"""
def __init__(self, password, mac_address, name):
self.name = name
self.password = password
self.mac_address = mac_address
# Try out Curve-ID: brainpoolP256t1
self.p = int('A9FB57DBA1EEA9BC3E660A909D838D726E3BF623D52620282013481D1F6E5377', 16)
self.a = int('7D5A0975FC2C3057EEF67530417AFFE7FB8055C126DC5C6CE94A4B44F330B5D9', 16)
self.b = int('<KEY>', 16)
self.q = int('A9FB57DBA1EEA9BC3E660A909D838D718C397AA3B561A6F7901E0E82974856A7', 16)
self.curve = Curve(self.a, self.b, self.p)
# A toy curve
# self.a, self.b, self.p = 2, 2, 17
# self.q = 19
# self.curve = Curve(self.a, self.b, self.p)
def initiate(self, other_mac, k=40):
"""
See algorithm in https://tools.ietf.org/html/rfc7664
in section 3.2.1
"""
self.other_mac = other_mac
found = 0
num_valid_points = 0
counter = 1
n = self.p.bit_length() + 64
while counter <= k:
base = self.compute_hashed_password(counter)
temp = self.key_derivation_function(n, base, 'Dragonfly Hunting And Pecking')
seed = (temp % (self.p - 1)) + 1
val = self.curve.curve_equation(seed)
if self.curve.is_quadratic_residue(val):
if num_valid_points < 5:
x = seed
save = base
found = 1
num_valid_points += 1
logger.debug('Got point after {} iterations'.format(counter))
counter = counter + 1
if found == 0:
logger.error('No valid point found after {} iterations'.format(k))
elif found == 1:
# https://crypto.stackexchange.com/questions/6777/how-to-calculate-y-value-from-yy-mod-prime-efficiently
# https://rosettacode.org/wiki/Tonelli-Shanks_algorithm
y = tonelli_shanks(self.curve.curve_equation(x), self.p)
PE = Point(x, y)
# check valid point
assert self.curve.curve_equation(x) == pow(y, 2, self.p)
logger.info('[{}] Using {}-th valid Point={}'.format(self.name, num_valid_points, PE))
logger.info('[{}] Point is on curve: {}'.format(self.name, self.curve.valid(PE)))
self.PE = PE
assert self.curve.valid(self.PE)
def commit_exchange(self):
"""
This is basically Diffie Hellman Key Exchange (or in our case ECCDH)
In the Commit Exchange, both sides commit to a single guess of the
password. The peers generate a scalar and an element, exchange them
with each other, and process the other's scalar and element to
generate a common and shared secret.
If we go back to elliptic curves over the real numbers, there is a nice geometric
interpretation for the ECDLP: given a starting point P, we compute 2P, 3P, . . .,
d P = T , effectively hopping back and forth on the elliptic curve. We then publish
the starting point P (a public parameter) and the final point T (the public key). In
order to break the cryptosystem, an attacker has to figure out how often we “jumped”
on the elliptic curve. The number of hops is the secret d, the private key.
"""
# seed the PBG before picking a new random number
# random.seed(time.process_time())
# None or no argument seeds from current time or from an operating
# system specific randomness source if available.
random.seed()
# Otherwise, each party chooses two random numbers, private and mask
self.private = random.randrange(1, self.p)
self.mask = random.randrange(1, self.p)
logger.debug('[{}] private={}'.format(self.name, self.private))
logger.debug('[{}] mask={}'.format(self.name, self.mask))
# These two secrets and the Password Element are then used to construct
# the scalar and element:
# what is q?
# o A point, G, on the elliptic curve, which serves as a generator for
# the ECC group. G is chosen such that its order, with respect to
# elliptic curve addition, is a sufficiently large prime.
#
# o A prime, q, which is the order of G, and thus is also the size of
# the cryptographic subgroup that is generated by G.
# https://math.stackexchange.com/questions/331329/is-it-possible-to-compute-order-of-a-point-over-elliptic-curve
# In the elliptic Curve cryptography, it is said that the order of base point
# should be a prime number, and order of a point P is defined as k, where kP=O.
# Theorem 9.2.1 The points on an elliptic curve together with O
# have cyclic subgroups. Under certain conditions all points on an
# elliptic curve form a cyclic group.
# For this specific curve the group order is a prime and, according to Theo-
# rem 8.2.4, every element is primitive.
# Question: What is the order of our PE?
# the order must be p, since p is a prime
self.scalar = (self.private + self.mask) % self.q
# If the scalar is less than two (2), the private and mask MUST be
# thrown away and new values generated. Once a valid scalar and
# Element are generated, the mask is no longer needed and MUST be
# irretrievably destroyed.
if self.scalar < 2:
raise ValueError('Scalar is {}, regenerating...'.format(self.scalar))
P = self.curve.double_add_algorithm(self.mask, self.PE)
# get the inverse of res
# −P = (x_p , p − y_p ).
self.element = self.curve.ec_inv(P)
assert self.curve.valid(self.element)
# The peers exchange their scalar and Element and check the peer's
# scalar and Element, deemed peer-scalar and Peer-Element. If the peer
# has sent an identical scalar and Element -- i.e., if scalar equals
# peer-scalar and Element equals Peer-Element -- it is sign of a
# reflection attack, and the exchange MUST be aborted. If the values
# differ, peer-scalar and Peer-Element must be validated.
logger.info('[{}] Sending scalar and element to the Peer!'.format(self.name))
logger.info('[{}] Scalar={}'.format(self.name, self.scalar))
logger.info('[{}] Element={}'.format(self.name, self.element))
return self.scalar, self.element
def compute_shared_secret(self, peer_element, peer_scalar, peer_mac):
"""
ss = F(scalar-op(private,
element-op(peer-Element,
scalar-op(peer-scalar, PE))))
AP1: K = private(AP1) • (scal(AP2) • P(x, y) ◊ new_point(AP2))
= private(AP1) • private(AP2) • P(x, y)
AP2: K = private(AP2) • (scal(AP1) • P(x, y) ◊ new_point(AP1))
= private(AP2) • private(AP1) • P(x, y)
A shared secret element is computed using one’s rand and
the other peer’s element and scalar:
Alice: K = rand A • (scal B • PW + elemB )
Bob: K = rand B • (scal A • PW + elemA )
Since scal(APx) • P(x, y) is another point, the scalar multiplied point
of e.g. scal(AP1) • P(x, y) is added to the new_point(AP2) and afterwards
multiplied by private(AP1).
"""
self.peer_element = peer_element
self.peer_scalar = peer_scalar
self.peer_mac = peer_mac
assert self.curve.valid(self.peer_element)
# If both the peer-scalar and Peer-Element are
# valid, they are used with the Password Element to derive a shared
# secret, ss:
Z = self.curve.double_add_algorithm(self.peer_scalar, self.PE)
ZZ = self.curve.ec_add(self.peer_element, Z)
K = self.curve.double_add_algorithm(self.private, ZZ)
self.k = K[0]
logger.info('[{}] Shared Secret ss={}'.format(self.name, self.k))
own_message = '{}{}{}{}{}{}'.format(self.k , self.scalar , self.peer_scalar , self.element[0] , self.peer_element[0] , self.mac_address).encode()
H = hashlib.sha256()
H.update(own_message)
self.token = H.hexdigest()
return self.token
def confirm_exchange(self, peer_token):
"""
In the Confirm Exchange, both sides confirm that they derived the
same secret, and therefore, are in possession of the same password.
"""
peer_message = '{}{}{}{}{}{}'.format(self.k , self.peer_scalar , self.scalar , self.peer_element[0] , self.element[0] , self.peer_mac).encode()
H = hashlib.sha256()
H.update(peer_message)
self.peer_token_computed = H.hexdigest()
logger.info('[{}] Computed Token from Peer={}'.format(self.name, self.peer_token_computed))
logger.info('[{}] Received Token from Peer={}'.format(self.name, peer_token))
# Pairwise Master Key” (PMK)
# compute PMK = H(k | scal(AP1) + scal(AP2) mod q)
pmk_message = '{}{}'.format(self.k, (self.scalar + self.peer_scalar) % self.q).encode()
#H = hashlib.sha256()
#H.update(pmk_message)
self.PMK = hashlib.sha256(pmk_message).digest()
logger.info('[{}] Pairwise Master Key(PMK)={}'.format(self.name, self.PMK))
return self.PMK
def key_derivation_function(self, n, base, seed):
"""
B.5.1 Per-Message Secret Number Generation Using Extra Random Bits
Key derivation function from Section B.5.1 of [FIPS186-4]
The key derivation function, KDF, is used to produce a
bitstream whose length is equal to the length of the prime from the
group's domain parameter set plus the constant sixty-four (64) to
derive a temporary value, and the temporary value is modularly
reduced to produce a seed.
"""
combined_seed = '{}{}'.format(base, seed).encode()
# base and seed concatenated are the input to the RGB
random.seed(combined_seed)
# Obtain a string of N+64 returned_bits from an RBG with a security strength of
# requested_security_strength or more.
randbits = random.getrandbits(n)
binary_repr = format(randbits, '0{}b'.format(n))
assert len(binary_repr) == n
logger.debug('Rand={}'.format(binary_repr))
# Convert returned_bits to the non-negative integer c (see Appendix C.2.1).
C = 0
for i in range(n):
if int(binary_repr[i]) == 1:
C += pow(2, n-i)
logger.debug('C={}'.format(C))
#k = (C % (n - 1)) + 1
k = C
logger.debug('k={}'.format(k))
return k
def compute_hashed_password(self, counter):
maxm = max(self.mac_address, self.other_mac)
minm = min(self.mac_address, self.other_mac)
message = '{}{}{}{}'.format(maxm, minm, self.password, counter).encode()
logger.debug('Message to hash is: {}'.format(message))
H = hashlib.sha256()
H.update(message)
digest = H.digest()
return digest
def decrypting(key, filename):
chunksize = 64 * 1024
outputFile = filename.split('.hacklab')[0]
with open(filename, 'rb') as infile:
filesize = int(infile.read(16))
IV = infile.read(16)
decryptor = AES.new(key, AES.MODE_CBC, IV)
with open(outputFile, 'wb') as outfile:
while True:
chunk = infile.read(chunksize)
if len(chunk) == 0:
break
outfile.write(decryptor.decrypt(chunk))
outfile.truncate(filesize)
return outputFile
def handshake():
#Own mac address
own_mac = (':'.join(re.findall('..', '%012x' % uuid.getnode())))
#Encode MAC address with BER
own_mac_BER = asn1_file.encode('DataMac', {'data': own_mac})
print (own_mac)
sta = Peer('abc1238', own_mac, 'STA')
logger.info('Starting hunting and pecking to derive PE...\n')
sock.send(own_mac_BER)
raw_other_mac = sock.recv(1024)
#decode BER and get mac address
other_decode_mac = asn1_file.decode('DataMac', raw_other_mac)
other_mac = other_decode_mac.get('data')
print ('Received', other_mac)
sta.initiate(other_mac)
print()
logger.info('Starting dragonfly commit exchange...\n')
scalar_sta, element_sta = sta.commit_exchange()
#Send BER encodewd Scalar / element ap to peer
scalar_complete = ("\n".join([str(scalar_sta), str(element_sta)]))
scalar_element_BER = asn1_file.encode('DataScalarElement',{'data':scalar_complete})
sock.sendall(scalar_element_BER)
print()
print('data send', scalar_complete)
logger.info('Computing shared secret...\n')
#receive BER encoded scalar / element ap
scalar_element_ap_BER = sock.recv(1024)
scalar_element_ap_decoded = asn1_file.decode('DataScalarElement', scalar_element_ap_BER)
scalar_element_ap = scalar_element_ap_decoded.get('data')
# scalar_element_ap = sock.recv(1024).decode()
print('scalar element received ', scalar_element_ap)
data = scalar_element_ap.split('\n')
# print (data[0])
# print (data[1])
scalar_ap = data[0]
element_ap = data[1]
print()
print ('scalar_ap recv:',scalar_ap)
print()
print ('element_ap recv:',element_ap)
print ()
print ()
namedtuple_element_ap = eval(element_ap)
print (namedtuple_element_ap.y, namedtuple_element_ap.x)
print ()
print ()
sta_token = sta.compute_shared_secret(namedtuple_element_ap, int(scalar_ap), other_mac)
#Encode sta_token to be BER encoded and send to peer
staToken_encoded = asn1_file.encode('DataStaAp',{'data':sta_token})
sock.send(staToken_encoded)
# sock.send(sta_token.encode())
print("sta_token", sta_token)
print()
logger.info('Confirm Exchange...\n')
#Receive BER encoded AP Token and decode it
apToken_encoded = sock.recv(1024)
apToken_decoded = asn1_file.decode('DataStaAp', apToken_encoded)
ap_token = apToken_decoded.get('data')
print('received ap token', ap_token)
PMK_Key = sta.confirm_exchange(ap_token)
#print (PMK_Key)
#encrypted = sock.recv(1024).decode()
#print ("Encrypted ciphertext: ", encrypted)
# Decrypt using PMK_Key
#decrypted = decrypt(encrypted, PMK_Key)
#print (decrypted.decode())
# Open the received secret file from the key generator
with open('secret.key.hacklab', 'wb') as s, open('nbit.key.hacklab', 'wb') as t:
print ('File opened...\n')
while True:
# print ('Receiving data...\n')
keys_BER = sock.recv(16396, socket.MSG_WAITALL)
if (len(keys_BER) > 10):
keys_decoded = asn1_file.decode('DataKey', keys_BER)
secret_key = keys_decoded.get('key')
nbit_key = keys_decoded.get('nbit')
time.sleep(0.001)
else:
break
if not (secret_key or nbit_key):
break
s.write(secret_key)
t.write(nbit_key)
s.close()
t.close()
print ('Successfully got the file\n')
print ('Encrypted secret file size: ', os.path.getsize('secret.key.hacklab'))
print ('Encrypted nbit file size: ', os.path.getsize('nbit.key.hacklab'))
print ('Decrypting the files...\n')
decrypted_secret_key = decrypting(PMK_Key, 'secret.key.hacklab')
print('Acquired original secret key file size: ', os.path.getsize(decrypted_secret_key))
os.system("md5sum secret.key")
decrypted_nbit_key = decrypting(PMK_Key, 'nbit.key.hacklab')
print('Acquired original nbit key file size: ', os.path.getsize(decrypted_nbit_key))
os.system("md5sum nbit.key")
def tests():
"""
Test the Curve class.
See Understanding Cryptography ECC Section.
"""
a, b, p = 2, 2, 17
curve = Curve(a, b, p)
P = Point(5, 1)
assert curve.double_add_algorithm(19, P) == O
T = P
for i in range(p+1):
T = curve.ec_add(T, P)
assert curve.double_add_algorithm(19, P) == T
if __name__ == '__main__':
#tests()
handshake()
``` |
{
"source": "Joshuakim1011/crash-model",
"score": 3
} |
#### File: src/data/add_waze_data.py
```python
import argparse
from . import util
import os
import json
import geojson
from collections import defaultdict
from .record import Record
BASE_DIR = os.path.dirname(
os.path.dirname(
os.path.dirname(
os.path.abspath(__file__))))
def get_linestring(value):
"""
Turns a waze linestring into a geojson linestring
Args:
value - the waze dict for this line
Returns:
geojson linestring with properties
"""
line = value['line']
coords = [(x['x'], x['y']) for x in line]
return geojson.Feature(
geometry=geojson.LineString(coords),
properties=value
)
def get_features(waze_info, properties, num_snapshots):
"""
Given a dict with keys of segment id, and val a list of waze jams
(for now, just jams), the properties of a road segment, and the
total number of snapshots we're looking at, update the road segment's
properties to include features
Args:
waze_info - dict
properties - dict
num_snapshots
Returns
properties
"""
# Waze feature list
# jam_percent - percentage of snapshots that have a jam on this segment
if properties['segment_id'] in waze_info:
# only count one jam per snapshot on a road
num_jams = len(set([x['properties']['snapshotId']
for x in waze_info[properties['segment_id']]]))
# The average jam level across all jam instances
avg_level_when_jammed = round(sum(
[x['properties']['level']
for x in waze_info[properties['segment_id']]]
)/len(waze_info[properties['segment_id']]))
avg_speed = round(sum(
[x['properties']['speed']
for x in waze_info[properties['segment_id']]]
)/len(waze_info[properties['segment_id']]))
else:
num_jams = 0
avg_speed = 0
avg_level_when_jammed = 0
# Turn into number between 0 and 100
properties.update(jam_percent=100*num_jams/num_snapshots)
properties.update(jam=1 if num_jams else 0)
properties.update(avg_jam_speed=avg_speed)
properties.update(avg_jam_level=avg_level_when_jammed)
return properties
def add_alerts(items, road_segments):
roads, roads_index = util.index_segments(
road_segments, geojson=True, segment=True)
# We'll want to consider making these point-based features at some point
items = [Record(x) for x in items
if x['eventType'] == 'alert']
util.find_nearest(
items, roads, roads_index, 30, type_record=True)
# Turn records into a dict
items_dict = defaultdict(dict)
for item in items:
if item.properties['type'] not in items_dict[item.near_id]:
items_dict[item.near_id][item.properties['type']] = 0
items_dict[item.near_id][item.properties['type']] += 1
for road in road_segments:
properties = road.properties
if properties['id'] in items_dict:
for key in items_dict[properties['id']].keys():
properties['alert_' + key] = items_dict[properties['id']][key]
road.properties = properties
return road_segments
def map_segments(datadir, filename, forceupdate=False):
"""
Map a set of waze segment info (jams) onto segments drawn from
openstreetmap: the osm_elements.geojson file
Args:
datadir - directory where the city's data is found
filename - the filename of the json aggregated waze file
Returns:
nothing - just updates osm_elements.geojson and writes
a jams.geojson with the segments that have jams
"""
items = json.load(open(filename))
# Get the total number of snapshots in the waze data
num_snapshots = max([x['snapshotId'] for x in items])
osm_file = os.path.join(
datadir,
'processed',
'maps',
'osm_elements.geojson'
)
road_segments, inters = util.get_roads_and_inters(osm_file)
if 'jam' in road_segments[0].properties and not forceupdate:
print("Already processed waze data")
return
# Add jam and alert information
road_segments, roads_with_jams = add_jams(
items, road_segments, inters, num_snapshots)
road_segments = add_alerts(items, road_segments)
# Convert into format that util.prepare_geojson is expecting
geojson_roads = []
for road in road_segments:
geojson_roads.append({
'geometry': {
'coordinates': [x for x in road.geometry.coords],
'type': 'LineString'
},
'properties': road.properties
})
# Convert this back to geojson from shapely point
inters = [{
'geometry': {
'type': 'Point',
'coordinates': [x['geometry'].x, x['geometry'].y],
},
'properties': x['properties']
} for x in inters]
results = util.prepare_geojson(geojson_roads + inters)
with open(osm_file, 'w') as outfile:
geojson.dump(results, outfile)
jam_results = util.prepare_geojson(roads_with_jams)
with open(os.path.join(
datadir,
'processed',
'maps',
'jams.geojson'), 'w') as outfile:
geojson.dump(jam_results, outfile)
def add_jams(items, road_segments, inters, num_snapshots):
# Only look at jams for now
items = [get_linestring(x) for x in items
if x['eventType'] == 'jam']
items = util.reproject_records(items)
# Get roads_and_inters returns elements that have shapely geometry
# In order to output the unchanged points back out at the end,
# Need to convert to geojson
# This is something that should be addressed
inters = [{'properties': x['properties'], 'geometry': {
'type': 'Point',
'coordinates': [x['geometry'].x, x['geometry'].y]
}} for x in inters]
roads, roads_index = util.index_segments(
road_segments, geojson=True, segment=True)
road_buffers = []
for road in roads:
road_buffers.append(road[0].buffer(3))
print("read in {} road segments".format(len(roads)))
waze_info = defaultdict(list)
count = 0
for item in items:
count += 1
if item['properties']['eventType'] == 'jam':
for idx in roads_index.intersection(item['geometry'].bounds):
segment = roads[idx]
buff = road_buffers[idx]
# But if the roads share a name,
# increase buffer size, in case of a median segment
# Waze does not appear to specify which direction
if 'street' in item['properties'] and segment[1]['name'] and \
item['properties']['street'].split()[0] == segment[1]['name'].split()[0]:
buff = segment[0].buffer(10)
overlap = buff.intersection(item['geometry'])
if not overlap.length or \
(overlap.length < 20 and segment[0].length > 20):
# Skip segments with no overlap
# or very short overlaps
continue
waze_info[segment[1]['segment_id']].append(item)
# Add waze features
roads_with_jams = []
for road in road_segments:
properties = get_features(
waze_info,
road.properties,
num_snapshots
)
road.properties = properties
if properties['segment_id'] in waze_info:
roads_with_jams.append({
'geometry': {
'coordinates': [x for x in road.geometry.coords],
'type': 'LineString'
},
'properties': properties
})
return road_segments, roads_with_jams
def make_map(filename, datadir):
"""
Turns a json file into a geojson file of linestrings
Used mainly for visualization/debugging
It is not a simplified set of linestrings, but rather a
linestring for each jam instance (even if multiple jam
instances are on the same segment)
Args:
filename - input json file
datadir - directory to write the waze.geojson file out
"""
items = json.load(open(filename))
geojson_items = []
for item in items:
if item['eventType'] == 'jam':
geojson_items.append(get_linestring(item))
with open(os.path.join(datadir, 'waze.geojson'), 'w') as outfile:
geojson.dump(geojson.FeatureCollection(geojson_items), outfile)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--datadir", type=str,
help="data directory")
parser.add_argument('--forceupdate', action='store_true',
help='Whether to force update of the waze data')
args = parser.parse_args()
infile = os.path.join(args.datadir, 'standardized', 'waze.json')
# make_map(infile, os.path.join(args.datadir, 'processed', 'maps'))
map_segments(args.datadir, infile, forceupdate=args.forceupdate)
``` |
{
"source": "joshuakim314/bridge-robo-advisor",
"score": 4
} |
#### File: bridge-robo-advisor/business_logic/db_connect.py
```python
import numpy as np
import pandas as pd
import psycopg2.extensions
psycopg2.extensions.register_adapter(np.int64, psycopg2._psycopg.AsIs)
def convert_db_fetch_to_df(fetched, column_names=None):
"""
This method converts the cursor.fetchall() output of SELECT query into a Pandas dataframe.
:param fetched: the output of SELECT query
:type fetched: list of row tuples
:param column_names: column names to use for the dataframe
:type column_names: list of column names
:return: converted dataframe
:rtype: Pandas dataframe
"""
return pd.DataFrame(fetched, columns=column_names)
conn = psycopg2.connect(
host='database-1.csuf8nkuxrw3.us-east-2.rds.amazonaws.com',
port=5432,
user='postgres',
password='<PASSWORD>',
database='can2_etfs'
)
print('connected to postgres db')
conn.autocommit = True
cursor = conn.cursor()
sql = '''SELECT * FROM prices ORDER BY date ASC, ticker ASC LIMIT 10'''
cursor.execute(sql)
selected = cursor.fetchall()
print(convert_db_fetch_to_df(selected))
cursor.close()
print("end")
```
#### File: bridge-robo-advisor/business_logic/portfolio_opt_front_end.py
```python
import yfinance as yf
import matplotlib.pyplot as plt
import collections
import pandas as pd
import numpy as np
import cvxpy as cp
import efficient_frontier
import param_estimator
import backtest
import objective_functions
def port_opt(stock_picks, weight_constraints, control, trade_horizon, cardinality, target_return, risk_aversion):
selected_etfs = ['IWD', 'IYH', 'IYW', 'MDY', 'EWT', 'XLE', 'EWZ', 'EWY', 'IWB', 'EZU']
if cardinality >= 20:
selected_etfs = ['IWD', 'IYH', 'IYW', 'MDY', 'EWT', 'XLE', 'EWZ', 'EWY', 'IWB', 'EZU']
num_stocks = len(stock_picks)
train_start, train_end = '2016-12-01', '2021-11-30'
etf_table = 'americanetfs'
etf_tickers = selected_etfs
etf_returns_by_tick = []
for tick in etf_tickers:
returns = param_estimator.get_returns(tick, etf_table, train_start, train_end, freq='monthly')
if returns.empty:
continue
returns[tick] = returns['adj_close']
etf_returns_by_tick += [returns[[tick]]]
etf_returns = pd.concat(etf_returns_by_tick, axis=1).T.dropna()
train_etf_returns = etf_returns.T
etf_table = 'spy'
print(stock_picks)
stock_returns_by_tick = []
for tick in stock_picks:
returns = param_estimator.get_returns(tick, etf_table, train_start, train_end, freq='monthly')
if returns.empty:
continue
returns[tick] = returns['adj_close']
stock_returns_by_tick += [returns[[tick]]]
stock_returns = pd.concat(stock_returns_by_tick, axis=1).T.dropna()
train_stock_returns = stock_returns.T
# Fama-French factors
train_factors = param_estimator.get_factors(start=int(train_start[0:4] + train_start[5:7]),
end=int(train_end[0:4] + train_end[5:7]), freq='monthly')
asset_universe = stock_picks + selected_etfs
train_returns = pd.concat([train_stock_returns, train_etf_returns], axis=1)
# historical average param. estimation
mu, sample_cov = backtest.historical_avg(train_returns, 12 * 5, 12)
print(sample_cov)
factor_model = dict()
for tick in asset_universe:
merged = pd.merge(train_factors, train_returns[[tick]], left_on='date', right_on='date', how="inner",
sort=False)
ff5 = merged[['excess', 'smb', 'hml', 'rmw', 'cma']]
merged[tick] = merged[tick] - merged['riskfree'].astype('float')/100.0
adj_returns = merged[[tick]]
alpha = 1e-1
l1_ratio = 0.05
en5, en_r_sq5 = param_estimator.EN(ff5, adj_returns[tick], alpha=alpha, l1_ratio=l1_ratio)
factor_model[tick] = en5
# arima-garch
ag = param_estimator.arima_garch(train_factors[['excess', 'smb', 'hml', 'rmw', 'cma']], trade_horizon=trade_horizon,
columns=['excess', 'smb', 'hml', 'rmw', 'cma'])
mu_factor = []
for month in range(trade_horizon):
mu_month = []
for tick in asset_universe:
data = [ag[factor_name][1][month-1] for factor_name in ['excess', 'smb', 'hml', 'rmw', 'cma']]
mu = factor_model[tick].predict(np.array(data).reshape(1, -1))
mu_month.append(mu[0])
mu_factor.append(mu_month)
print(mu_month)
mu_factor = [pd.Series(mu_factor[i]) for i in range(trade_horizon)]
print(mu_factor)
print(sample_cov)
ef = efficient_frontier.EfficientFrontier(mu_factor, sample_cov, trade_horizon=trade_horizon)
# ef.add_objective(objective_functions.transaction_cost, w_prev=np.zeros(len(asset_universe)), k=0.001)
for i in range(num_stocks):
min = np.zeros(shape=len(asset_universe))
max = np.ones(shape=len(asset_universe))
min[i] = weight_constraints[asset_universe[i]][0]/100.0
max[i] = weight_constraints[asset_universe[i]][1]/100.0
ef.add_constraint(lambda w: w >= min, broadcast=False, var_list=[0])
ef.add_constraint(lambda w: w <= max, broadcast=False, var_list=[0])
card = np.zeros(shape=len(asset_universe))
for i in range(num_stocks):
card[i] = 1
ef.add_constraint(lambda w: card @ w >= control, broadcast=False, var_list=[0])
print(ef.n_assets)
print(ef.trade_horizon)
print(ef.cov_matrices)
print(ef.expected_returns)
ef.efficient_return(target_return=target_return)
weights = ef.clean_weights()
print(weights)
new_weights = dict(weights)
proper_weights = {}
for key in new_weights.keys():
proper_weights[asset_universe[key]] = weights[key]
print(proper_weights)
weights = pd.DataFrame.from_dict(new_weights, orient='index')
exp_returns = {month: np.dot(mu_factor[month-1], weights) for month in range(trade_horizon)}
ret_exp = {}
for key in exp_returns.keys():
ret_exp[key+1] = (1 + exp_returns[key][0])
for key in ret_exp.keys():
if key != 1:
ret_exp[key] = ret_exp[key]*ret_exp[key-1]
return proper_weights, ret_exp
```
#### File: bridge-robo-advisor/front_end/backend_access.py
```python
import numpy as np
import pandas as pd
from datetime import datetime
import time
import plotly.express as px
import psycopg2.extensions
from dateutil.relativedelta import relativedelta
psycopg2.extensions.register_adapter(np.int64, psycopg2._psycopg.AsIs)
conn = psycopg2.connect(
host='database-1.csuf8nkuxrw3.us-east-2.rds.amazonaws.com',
port=5432,
user='postgres',
password='<PASSWORD>',
database='can2_etfs'
)
def convert_db_fetch_to_df(fetched, column_names=None):
"""
This method converts the cursor.fetchall() output of SELECT query into a Pandas dataframe.
:param fetched: the output of SELECT query
:type fetched: list of row tuples
:param column_names: column names to use for the dataframe
:type column_names: list of column names
:return: converted dataframe
:rtype: Pandas dataframe
"""
return pd.DataFrame(fetched, columns=column_names)
def push_new_user(connection, user_array):
#print('connected to postgres db ...')
connection.autocommit = True
cursor = connection.cursor()
#print('connected')
#print('formatting dict to_record')
sql = '''INSERT INTO clients (first, last, email, password, risk, control, horizon, return, max_card)
VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s)'''
cursor.execute(sql, user_array)
#print("end")
return True
def pull_user_data(connection, email, pswrd):
connection.autocommit = True
cursor = connection.cursor()
sql = f'''SELECT * FROM clients WHERE (email like '{email}') AND (password like '{pswrd}')'''
cursor.execute(sql)
selected = cursor.fetchall()
cursor.close()
#print("end")
df = convert_db_fetch_to_df(selected, column_names=['First', 'Last', 'Email', 'Password', 'Risk', 'Control', 'Horizon', 'Return', 'Max'])
dct = {
'First': df['First'][0],
'Last': df['Last'][0],
'Email': df['Email'][0],
'Password': df['Password'][0],
'Risk': df['Risk'][0],
'Control': df['Control'][0],
'Horizon': df['Horizon'][0],
'Return': df['Return'][0],
'Max': df['Max'][0]
}
return dct
def update_risk_control(connection, email, pswrd, risk, control, horizon, ret, max_card):
connection.autocommit = True
cursor = connection.cursor()
sql = f'''UPDATE clients SET risk = {risk}, control = {control}, horizon={horizon}, return={ret}, max_card={max_card} WHERE (email like'{email}') AND (password like '{<PASSWORD>}')'''
cursor.execute(sql)
# print("end")
return True
def add_transaction(connection, email, asset, ammount, comment=''):
now = time.localtime()
date = time.strftime('%Y-%m-%d', now)
now = datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S.%f')[:-3]
transact_row = np.array([now, email, asset, ammount, date, comment])
connection.autocommit = True
cursor = connection.cursor()
sql = '''INSERT INTO transactions (datetime, email, ticker, amount, date, comment) VALUES(%s, %s, %s, %s, %s, %s)'''
cursor.execute(sql, transact_row)
return True
def wipe_table(connection, table):
connection.autocommit = True
cursor = connection.cursor()
sql = f'''TRUNCATE TABLE {table}'''
cursor.execute(sql)
return True
def get_portfolio_data(connection, email):
connection.autocommit = True
cursor = connection.cursor()
sql = f'''SELECT ticker, SUM(amount) FROM transactions WHERE (email like '{email}') AND (ticker like 'cash') GROUP BY ticker'''
cursor.execute(sql)
selected = cursor.fetchall()
cursor.close()
# print("end")
df = convert_db_fetch_to_df(selected, column_names=['Ticker', 'Amount'])
ret_dict = df.to_dict('records')
return ret_dict
def get_past_deposits_brief(connection, email):
connection.autocommit = True
cursor = connection.cursor()
sql = f'''SELECT * FROM transactions WHERE (email like '{email}') AND (ticker like 'cash')'''
cursor.execute(sql)
selected = cursor.fetchall()
cursor.close()
# print("end")
df = convert_db_fetch_to_df(selected, column_names=['Date Time', 'Email', 'Ticker', 'Amount', 'Date', 'Comment'])
df = df[['Date Time', 'Amount']]
df['Amount'] = df['Amount'].div(100)
df = df.sort_values(by='Date Time', ascending=False)
df["Date Time"] = df["Date Time"].dt.strftime("%D")
#print(df)
df = df.groupby(["Date Time"]).Amount.sum().reset_index()
# print(df)
# print([{"name": i, "id": i} for i in df.columns])
ret_dict = df.to_dict('records')
#print(ret_dict)
return ret_dict
def get_past_deposits(connection, email):
connection.autocommit = True
cursor = connection.cursor()
sql = f'''SELECT * FROM transactions WHERE (email like '{email}') AND (ticker like 'cash')'''
cursor.execute(sql)
selected = cursor.fetchall()
cursor.close()
# print("end")
df = convert_db_fetch_to_df(selected, column_names=['Date Time', 'Email', 'Ticker', 'Amount', 'Date', 'Comment'])
df = df[['Date Time', 'Amount', 'Comment']]
df['Amount'] = df['Amount'].div(100)
df = df.sort_values(by = 'Date Time', ascending = False)
#print(df)
df["Date Time"] = df["Date Time"].dt.strftime("%A %B %d, %Y - %H:%M:%S")
#print(df)
#print([{"name": i, "id": i} for i in df.columns])
ret_dict = df.to_dict('records')
return ret_dict
def transact_stock(connection, email, stock, ammount):
now = datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S.%f')[:-3]
#print('STOCK TIME')
#print(now)
connection.autocommit = True
cursor = connection.cursor()
#Grab Max Date
sql = f"""
SELECT MAX(date) FROM americanetfs
"""
cursor.execute(sql)
selected = cursor.fetchall()
df = convert_db_fetch_to_df(selected, column_names=['date'])
#print(df)
max_date = df['date'][0]
#print(max_date)
#Calculate Stocks Cost and store as value_of_shares
sql = f"""SELECT adj_close FROM americanetfs WHERE (date = '{max_date}') AND (ticker like '{stock}')
"""
cursor.execute(sql)
selected = cursor.fetchall()
cadetfs = convert_db_fetch_to_df(selected, column_names=['adj_close'])
sql = f"""SELECT adj_close FROM spy WHERE (date = '{max_date}') AND (ticker like '{stock}')
"""
cursor.execute(sql)
selected = cursor.fetchall()
df = convert_db_fetch_to_df(selected, column_names=['adj_close'])
df = df.append(cadetfs, ignore_index=True)
stock_value = int(100*ammount*df['adj_close'][0])+1
#print(stock_value)
if ammount > 0:
transact_cash_statement = f'Bought {ammount} shares of {stock} for ${stock_value/100:,.2f}'
transact_stock_statement = f'Bought {ammount} shares of {stock}'
else:
transact_cash_statement = f'Sold {abs(ammount)} shares of {stock} for ${stock_value/100:,.2f}'
transact_stock_statement = f'Sold {abs(ammount)} shares of {stock}'
#Transact Cash
add_transaction(connection, email, 'cash', -1*stock_value, transact_cash_statement)
#Transact Stock
add_transaction(connection, email, stock, ammount, transact_stock_statement)
return True
#Transact Stock
def get_portfolio_value_by_date(connection, email):
connection.autocommit = True
cursor = connection.cursor()
# Grab Start Date of User Profile
sql = f"""
SELECT MIN(date) FROM transactions WHERE email like '{email}'
"""
cursor.execute(sql)
selected = cursor.fetchall()
df = convert_db_fetch_to_df(selected, column_names=['date'])
min_account_date = df['date'][0]
sql = f'''
DROP VIEW IF EXISTS data5;
DROP VIEW IF EXISTS data4;
DROP VIEW IF EXISTS data3;
DROP VIEW IF EXISTS data2;
CREATE VIEW data2 AS
SELECT * FROM transactions WHERE email like '{email}';
CREATE VIEW data3 AS
SELECT date, ticker, sum(amount)
OVER (PARTITION BY ticker ORDER BY date) AS cum_amt
FROM data2
ORDER BY date, ticker;
CREATE VIEW data4 AS
SELECT date, ticker, cum_amt FROM data3 GROUP BY date, ticker, cum_amt;
SELECT * FROM data4
'''
#
cursor.execute(sql)
selected = cursor.fetchall()
df = convert_db_fetch_to_df(selected, column_names=['date', 'ticker', 'cumAmount'])
#print(df)
df['date'] = pd.to_datetime(df['date'])
min_date = df['date'].min().strftime('%Y-%m-%d')
max_date = df['date'].max().strftime('%Y-%m-%d')
list_tickers = list(set(df['ticker'].to_list()))
#print(list_tickers)
if len(list_tickers) > 1:
string_tickers = '('
for tick in list_tickers[:-1]:
string_tickers = string_tickers + (f"'{tick}', ")
string_tickers = string_tickers + f"'{list_tickers[-1]}')"
else:
string_tickers = f"('{list_tickers[0]}')"
#Get price info for unique tickers, between min and max date
### Start hitting prices tables ###
sql = f'''
SELECT date, ticker, adj_close FROM canadianetfs WHERE (date BETWEEN '{min_date}' AND '{max_date}') AND (ticker IN {string_tickers})
'''
cursor.execute(sql)
selected = cursor.fetchall()
cadetfs = convert_db_fetch_to_df(selected, column_names=['date', 'ticker', 'adj_close'])
sql = f'''
SELECT date, ticker, adj_close FROM americanetfs WHERE (date BETWEEN '{min_date}' AND '{max_date}') AND (ticker IN {string_tickers})
'''
cursor.execute(sql)
selected = cursor.fetchall()
ametfs = convert_db_fetch_to_df(selected, column_names=['date', 'ticker', 'adj_close'])
sql = f'''
SELECT date, ticker, adj_close FROM spy WHERE (date BETWEEN '{min_date}' AND '{max_date}') AND (ticker IN {string_tickers})
'''
cursor.execute(sql)
selected = cursor.fetchall()
spy = convert_db_fetch_to_df(selected, column_names=['date', 'ticker', 'adj_close'])
sql = f'''
SELECT date, ticker, adj_close FROM tsx60 WHERE (date BETWEEN '{min_date}' AND '{max_date}') AND (ticker IN {string_tickers})
'''
cursor.execute(sql)
selected = cursor.fetchall()
tsx60 = convert_db_fetch_to_df(selected, column_names=['date', 'ticker', 'adj_close'])
#print(tsx60)
dfpx = cadetfs.append(ametfs, ignore_index=True)
dfpx = dfpx.append(spy, ignore_index=True)
dfpx = dfpx.append(tsx60, ignore_index=True)
#print(dfpx)
#print(dfpx)
cursor.close()
dfpx['date'] = pd.to_datetime(dfpx['date'])
df.set_index(['date', 'ticker'], inplace=True)
unique_tickers = df.index.unique(level='ticker')
date_range = pd.DatetimeIndex(pd.date_range(start=min_date, end=max_date, freq="D"))
ticker_date_index = (
pd.MultiIndex
.from_product(
iterables=[date_range, unique_tickers],
names=['date', 'ticker']
)
)
df = df.reindex(ticker_date_index)
df_start = df.loc[slice(min_date), slice(None), :].fillna({'cumAmount':0})
df.update(df_start)
df = df.sort_index(level=1).ffill().reindex(df.index)
#print(df.tail(20))
#print(dfpx.tail(20))
#print(df.reset_index().tail(20))
df = pd.merge(
df,
dfpx,
how="left",
on=['date', 'ticker'],
)
mask = (df['ticker'] == 'cash')
df['adj_close'][mask] = 0.01
#print(df.tail(20))
#print(df.reindex(ticker_date_index).tail(20))
df = df.set_index(['date', 'ticker'])
#print(df.tail(20))
df = df.sort_index(level=1).ffill().reindex(df.index)
#print(df.tail(20))
df.fillna(0, inplace=True)
df['value'] = df['cumAmount']*df['adj_close']
#print(df)
df = df.round(2)
port_comp = df.loc[slice(max_date, max_date), slice(None), :].reset_index()[['ticker', 'value']]
port_comp = port_comp[port_comp['value'] != 0]
port_comp.reset_index(inplace=True)
port_comp_dict = {}
for index, row in port_comp.iterrows():
port_comp_dict[row['ticker']] = row['value']
if port_comp_dict == {}:
port_comp_dict = {'No Assets': 1}
df = df.reset_index(level=1)
#print(df.tail(20))
df = df.groupby(['date']).sum().reset_index()[['date','value']]
final_value = {'portfolio_value': round(df['value'].to_list()[-1], 2)}
initial_value = round(df['value'].to_list()[0], 2)
if int(final_value['portfolio_value']) == 0:
port_returns = {'port_returns': 0}
else:
time_difference = relativedelta(datetime.strptime(max_date, "%Y-%m-%d").date(), datetime.strptime(min_date, "%Y-%m-%d").date())
difference_in_years = time_difference.years + time_difference.months*(1/12) + time_difference.days*(1/365)
port_returns = (((final_value['portfolio_value'] - initial_value)/initial_value) + 1)**(1/difference_in_years) - 1
port_returns = {'port_returns': round(port_returns,3)}
data = df.round(2).to_json(date_format='iso', orient='split')
return data, final_value, port_returns, port_comp_dict
def get_all_tickers(connection):
connection.autocommit = True
cursor = connection.cursor()
#sql = f'''SELECT DISTINCT(ticker) FROM spy'''
#cursor.execute(sql)
#selected = cursor.fetchall()
#spy = convert_db_fetch_to_df(selected, column_names=['ticker'])
sql = f'''SELECT DISTINCT(ticker) FROM spy'''
cursor.execute(sql)
selected = cursor.fetchall()
tsx60 = convert_db_fetch_to_df(selected, column_names=['ticker'])
#dfpx = spy.append(tsx60, ignore_index=True)
dfpx = tsx60
cursor.close()
ret_dict = {'stocks': dfpx['ticker'].to_list()}
# print("end")
return ret_dict
def get_portfolio_weights(stock_dict, account_info, ):
optimal_portfolio = {}
numStocks = len(stock_dict.keys())
for i in range(0, numStocks):
optimal_portfolio[stock_dict[i]['stock']] = (1/numStocks)
expected_monthly_returns = {
'exp': 1.015,
'ub': 1.02,
'lb': 1.01
}
return optimal_portfolio, expected_monthly_returns
def get_trades(connection, email, opt, rets):
connection.autocommit = True
cursor = connection.cursor()
# Grab Start Date of User Profile
sql = f"""
SELECT MIN(date) FROM transactions WHERE email like '{email}'
"""
cursor.execute(sql)
selected = cursor.fetchall()
df = convert_db_fetch_to_df(selected, column_names=['date'])
min_account_date = df['date'][0]
sql = f'''
DROP VIEW IF EXISTS data5;
DROP VIEW IF EXISTS data4;
DROP VIEW IF EXISTS data3;
DROP VIEW IF EXISTS data2;
CREATE VIEW data2 AS
SELECT * FROM transactions WHERE email like '{email}';
CREATE VIEW data3 AS
SELECT date, ticker, sum(amount)
OVER (PARTITION BY ticker ORDER BY date) AS cum_amt
FROM data2
ORDER BY date, ticker;
CREATE VIEW data4 AS
SELECT date, ticker, cum_amt FROM data3 GROUP BY date, ticker, cum_amt;
SELECT * FROM data4
'''
#
cursor.execute(sql)
selected = cursor.fetchall()
df = convert_db_fetch_to_df(selected, column_names=['date', 'ticker', 'cumAmount'])
df['date'] = pd.to_datetime(df['date'])
min_date = df['date'].min().strftime('%Y-%m-%d')
max_date = df['date'].max().strftime('%Y-%m-%d')
list_tickers = list(set(df['ticker'].to_list()))
# print(list_tickers)
if len(list_tickers) > 1:
string_tickers = '('
for tick in list_tickers[:-1]:
string_tickers = string_tickers + (f"'{tick}', ")
string_tickers = string_tickers + f"'{list_tickers[-1]}')"
else:
string_tickers = f"('{list_tickers[0]}')"
# Get price info for unique tickers, between min and max date
### Start hitting prices tables ###
sql = f'''
SELECT date, ticker, adj_close FROM canadianetfs WHERE (date BETWEEN '{min_date}' AND '{max_date}') AND (ticker IN {string_tickers})
'''
cursor.execute(sql)
selected = cursor.fetchall()
cadetfs = convert_db_fetch_to_df(selected, column_names=['date', 'ticker', 'adj_close'])
sql = f'''
SELECT date, ticker, adj_close FROM americanetfs WHERE (date BETWEEN '{min_date}' AND '{max_date}') AND (ticker IN {string_tickers})
'''
cursor.execute(sql)
selected = cursor.fetchall()
ametfs = convert_db_fetch_to_df(selected, column_names=['date', 'ticker', 'adj_close'])
sql = f'''
SELECT date, ticker, adj_close FROM spy WHERE (date BETWEEN '{min_date}' AND '{max_date}') AND (ticker IN {string_tickers})
'''
cursor.execute(sql)
selected = cursor.fetchall()
spy = convert_db_fetch_to_df(selected, column_names=['date', 'ticker', 'adj_close'])
sql = f'''
SELECT date, ticker, adj_close FROM tsx60 WHERE (date BETWEEN '{min_date}' AND '{max_date}') AND (ticker IN {string_tickers})
'''
cursor.execute(sql)
selected = cursor.fetchall()
tsx60 = convert_db_fetch_to_df(selected, column_names=['date', 'ticker', 'adj_close'])
# print(tsx60)
dfpx = cadetfs.append(ametfs, ignore_index=True)
dfpx = dfpx.append(spy, ignore_index=True)
dfpx = dfpx.append(tsx60, ignore_index=True)
# print(dfpx)
dfpx['date'] = pd.to_datetime(dfpx['date'])
df.set_index(['date', 'ticker'], inplace=True)
unique_tickers = df.index.unique(level='ticker')
date_range = pd.DatetimeIndex(pd.date_range(start=min_date, end=max_date, freq="D"))
ticker_date_index = (
pd.MultiIndex
.from_product(
iterables=[date_range, unique_tickers],
names=['date', 'ticker']
)
)
df = df.reindex(ticker_date_index)
df = df.sort_index(level=1).ffill().reindex(df.index)
df = pd.merge(
df,
dfpx,
how="left",
on=['date', 'ticker'],
)
mask = (df['ticker'] == 'cash')
df['adj_close'][mask] = 0.01
df = df.set_index(['date', 'ticker'])
df = df.sort_index(level=1).ffill().reindex(df.index)
df.fillna(0, inplace=True)
df['value'] = df['cumAmount'] * df['adj_close']
# print(df)
df = df.round(2)
max_port_date = df.index.max()[0]
df = df.loc[max_port_date]
port_total_value = sum(df['value'].to_list())
#print(port_total_value)
#Calculate # of new stocks to by/old to sell
curr_stocks = df.reset_index()[df.reset_index()['ticker'] != 'cash']
old_portfolio_values = df.reset_index()[['ticker', 'value']]
#print('old_portfolio_values')
#print(old_portfolio_values)
#Get list of stocks from optimal dict
list_tickers = list(opt.keys())
# print(list_tickers)
if len(list_tickers) > 1:
string_tickers = '('
for tick in list_tickers[:-1]:
string_tickers = string_tickers + (f"'{tick}', ")
string_tickers = string_tickers + f"'{list_tickers[-1]}')"
else:
string_tickers = f"('{list_tickers[0]}')"
# Get price info for unique tickers, between min and max date
sql = f"""
SELECT MAX(date) FROM americanetfs
"""
cursor.execute(sql)
selected = cursor.fetchall()
df = convert_db_fetch_to_df(selected, column_names=['date'])
max_date = df['date'][0]
### Start hitting prices tables ###
sql = f'''
SELECT date, ticker, adj_close FROM canadianetfs WHERE (date = '{max_date}') AND (ticker IN {string_tickers})
'''
cursor.execute(sql)
selected = cursor.fetchall()
cadetfs = convert_db_fetch_to_df(selected, column_names=['date', 'ticker', 'adj_close'])
sql = f'''
SELECT date, ticker, adj_close FROM americanetfs WHERE (date = '{max_date}') AND (ticker IN {string_tickers})
'''
cursor.execute(sql)
selected = cursor.fetchall()
ametfs = convert_db_fetch_to_df(selected, column_names=['date', 'ticker', 'adj_close'])
sql = f'''
SELECT date, ticker, adj_close FROM spy WHERE (date = '{max_date}') AND (ticker IN {string_tickers})
'''
cursor.execute(sql)
selected = cursor.fetchall()
spy = convert_db_fetch_to_df(selected, column_names=['date', 'ticker', 'adj_close'])
sql = f'''
SELECT date, ticker, adj_close FROM tsx60 WHERE (date = '{max_date}') AND (ticker IN {string_tickers})
'''
cursor.execute(sql)
selected = cursor.fetchall()
tsx60 = convert_db_fetch_to_df(selected, column_names=['date', 'ticker', 'adj_close'])
# print(tsx60)
dfpx = cadetfs.append(ametfs, ignore_index=True)
dfpx = dfpx.append(spy, ignore_index=True)
dfpx = dfpx.append(tsx60, ignore_index=True)
dfpx['new_weight'] = dfpx['ticker'].map(opt)
dfpx['port_value'] = port_total_value
dfpx['new_amount'] = (dfpx['new_weight'] * dfpx['port_value'])/dfpx['adj_close']
dfpx['new_amount'] = dfpx['new_amount'].astype(int)
dfpx['new_value'] = dfpx['new_amount'] * dfpx['adj_close']
new_portfolio_cash_component = (int(100*port_total_value) - int(100*sum(dfpx['new_value'])+1))/100
new_portfolio_values = dfpx[['ticker', 'new_value']]
new_portfolio_values.loc[-1] = ['cash', new_portfolio_cash_component]
new_portfolio_values = new_portfolio_values.reset_index(drop=True)
new_portfolio_values = new_portfolio_values.rename(columns={'new_value': 'value'})
#print('new_portfolio_values')
#print(new_portfolio_values.round(2))
#print(f'New cash: {new_portfolio_cash_component}')
dfpx = dfpx[['ticker', 'new_amount']]
# print(dfpx)
curr_stocks = curr_stocks[['ticker', 'cumAmount']]
curr_stocks = curr_stocks.rename(columns={'cumAmount':'old_amount'})
dfpx = pd.merge(
dfpx,
curr_stocks,
how="outer",
on=['ticker'],
)
dfpx.fillna(0, inplace=True)
dfpx['delta'] = dfpx['new_amount'] - dfpx['old_amount']
tradeDf = dfpx.copy()
#Handle Hypothetical Back Test
back_test_min_date = max_date - relativedelta(years=5)
sql = f'''
SELECT date, ticker, adj_close FROM americanetfs WHERE (date BETWEEN '{back_test_min_date}' AND '{max_date}') AND (ticker IN {string_tickers})
'''
cursor.execute(sql)
selected = cursor.fetchall()
cadetfs = convert_db_fetch_to_df(selected, column_names=['date', 'ticker', 'adj_close'])
sql = f'''
SELECT date, ticker, adj_close FROM spy WHERE (date BETWEEN '{back_test_min_date}' AND '{max_date}') AND (ticker IN {string_tickers})
'''
cursor.execute(sql)
selected = cursor.fetchall()
tsx60 = convert_db_fetch_to_df(selected, column_names=['date', 'ticker', 'adj_close'])
# print(tsx60)
dfpx = cadetfs.append(tsx60, ignore_index=True)
dfpx = pd.merge(
dfpx,
tradeDf[['ticker', 'new_amount']],
how="left",
on=['ticker'],
)
dfpx['value'] = dfpx['adj_close'] * dfpx['new_amount']
########
dfpx = dfpx.reset_index(drop=True)
#print(dfpx)
dfpx = dfpx.set_index(['date', 'ticker'])
#print(dfpx.dtypes)
dfpx = dfpx.sort_index(level=1).ffill().reindex(dfpx.index)
dfpx.fillna(0, inplace=True)
# print(df)
dfpx = dfpx.round(2)
dfpx = dfpx.reset_index(level=1)
#print(dfpx)
dfpx = dfpx.groupby(['date']).sum().reset_index()
#print(dfpx)
dfpx = dfpx.reset_index()[['date', 'value']]
dfpx['value'] = dfpx['value'] + new_portfolio_cash_component
df_future_up = dfpx.copy()[dfpx['date'] == max_date].reset_index(drop=True)
df_future_mid = dfpx.copy()[dfpx['date'] == max_date].reset_index(drop=True)
df_future_down = dfpx.copy()[dfpx['date'] == max_date].reset_index(drop=True)
for key in rets.keys():
future_ret_date = max_date + relativedelta(months=key)
df_future_up.loc[-1] = [future_ret_date, df_future_up['value'][0]*rets[key]]
df_future_up = df_future_up.reset_index(drop=True)
df_future_mid.loc[-1] = [future_ret_date, df_future_mid['value'][0] * rets[key]]
df_future_mid = df_future_mid.reset_index(drop=True)
df_future_down.loc[-1] = [future_ret_date, df_future_down['value'][0] * rets[key]]
df_future_down = df_future_down.reset_index(drop=True)
print(df_future_up)
return dfpx, df_future_up, df_future_mid, df_future_down, tradeDf[['ticker', 'delta']].to_dict('records'), old_portfolio_values.round(2), new_portfolio_values.round(2)
#test = {0: {'stock': 'L.TO', 'range': [0, 100]}, 1: {'stock': 'SAP.TO', 'range': [0, 100]}, 2: {'stock': 'WCN.TO', 'range': [0, 100]}}
#opt, rets = get_portfolio_weights(test, True)
#bt, fu, fm, fd, trades = get_trades(conn, '<EMAIL>', opt, rets)
#print(trades)
#print(opt)
#print(rets)
#transact_stock(conn, '<EMAIL>', 'RY.TO', -40)
#get_portfolio_value_by_date(conn, '<EMAIL>')
#print(get_all_tickers(conn))
#get_portfolio_value_by_date(conn, '<EMAIL>')
#transact_stock(conn, '<EMAIL>', 'AAPL', 100)
#wipe_table(conn, 'clients')
#wipe_table(conn, 'transactions')
#add_transaction(conn, '<EMAIL>', 'cash', 1000)
#add_transaction(conn, '<EMAIL>', 'cash', 0, 'Initialize Account')
#get_past_deposits(conn, '<EMAIL>')
#print(pull_user_data(conn, '<EMAIL>', 'admin'))
```
#### File: bridge-robo-advisor/front_end/main.py
```python
import dash_bootstrap_components as dbc
from dash import Dash, Input, Output, State, html, dcc, dash_table, callback, dependencies
from dash_extensions.enrich import Output, DashProxy, Input, MultiplexerTransform
from static.welcome_page import welcome_page
from static.sign_up_1 import sign_up_1
from static.sign_up_2 import sign_up_2
from static.main_page import main_page
from static.deposit_page import deposit_page
from static.prefs_page import prefs_page
from static.not_yet_implemented import nyi_page
from static.test_page import test_page
import callbacks
from app_old import app
app.layout = html.Div([
dcc.Location(id='url', refresh=False),
html.Div(id='page-content'),
dcc.Store(id='account-info'),
dcc.Store(id='portfolio-info'),
dcc.Store(id='past-deposits'),
dcc.Store(id='past-deposits-brief'),
dcc.Store(id='portfolio-graph-data'),
dcc.Store(id='portfolio-value'),
dcc.Store(id='all-stock-list'),
dcc.Store(id='trades'),
dcc.Store(id='port-comp'),
dcc.Store(id='portfolio-returns')
])
#Page Navigation
@app.callback(dependencies.Output('page-content', 'children'),
[dependencies.Input('url', 'pathname')])
def display_page(pathname):
if pathname == '/sign_up':
return sign_up_1
if pathname == '/sign_up_cont':
return sign_up_2
if pathname == '/main':
return main_page
if pathname == '/deposit':
return deposit_page
if pathname == '/prefs':
return prefs_page
if pathname == '/nyi':
return nyi_page
if pathname == '/test':
return test_page
else:
return welcome_page
# You could also return a 404 "URL not found" page here
if __name__ == '__main__':
app.run_server(port=8080, debug=True)
```
#### File: front_end/static/backtest.py
```python
import warnings
import csv
import collections
from functools import reduce
import itertools
import pandas as pd
import numpy as np
import yfinance as yf
import matplotlib.pyplot as plt
import sklearn
import pmdarima as pm
import arch
from arch.__future__ import reindexing
from sklearn.decomposition import PCA
from sklearn_extra.cluster import KMedoids
import static.param_estimator as param_estimator
import static.objective_functions as objective_functions
import static.efficient_frontier as efficient_frontier
import psycopg2.extensions
psycopg2.extensions.register_adapter(np.int64, psycopg2._psycopg.AsIs)
warnings.filterwarnings("ignore")
conn = psycopg2.connect(
host='database-1.csuf8nkuxrw3.us-east-2.rds.amazonaws.com',
port=5432,
user='postgres',
password='<PASSWORD>',
database='can2_etfs'
)
conn.autocommit = True
cursor = conn.cursor()
pd.options.mode.chained_assignment = None # default='warn'
def historical_avg(returns, freq, trade_horizon=12):
mu = returns.agg(lambda data: reduce(lambda x, y: x*y, [x+1 for x in data])**(1/freq) - 1)
mu.rename("mu", inplace=True)
cov = param_estimator.sample_cov(returns, returns_data=True, frequency=freq)
return [mu for _ in range(trade_horizon)], [cov for _ in range(trade_horizon)]
def get_factor_mu(model_name, model, asset_list, factor_columns, ag):
mu_factor = []
for month in range(12):
mu_month = []
for tick in asset_list:
data = [ag[model_name[:3]][factor_name][1][month - 1] for factor_name in factor_columns]
mu = model[(tick, model_name)][0].predict(np.array(data).reshape(1, -1))
mu_month.append(mu[0])
mu_factor.append(mu_month)
mu_factor = [pd.Series(mu_factor[i]) for i in range(12)]
return mu_factor
def get_scenarios(model_name, model, asset_list, factor_columns, ag, res, num_s):
scenarios = []
for s in range(num_s):
mu_factor = []
for month in range(12):
mu_month = []
for idx, tick in enumerate(asset_list):
data = [ag[model_name[:3]][factor_name][1][month - 1] for factor_name in factor_columns]
mu = model[(tick, model_name)][0].predict(np.array(data).reshape(1, -1))
mu_month.append(mu[0] + np.random.normal(0.0, res[idx][month]**0.5))
mu_factor.append(mu_month)
mu_factor = [pd.Series(mu_factor[i]) for i in range(12)]
scenarios.append(mu_factor)
return scenarios
if __name__ == '__main__':
stock_picks = ['DIS', 'IBM', 'JPM', 'KO', 'WMT']
weights_dict = collections.OrderedDict()
returns_dict = collections.OrderedDict()
assets_dict = collections.OrderedDict()
val_periods = [(f'{str(year)}-01-01', f'{str(year+4)}-12-31', f'{str(year+5)}-01-01', f'{str(year+5)}-12-31')
for year in range(2001, 2011)]
for val in val_periods:
print(val)
train_start, train_end, test_start, test_end = val
etf_table = 'americanetfs'
etf_tickers = param_estimator.get_all_tickers(etf_table)
etf_returns_by_tick = []
for tick in etf_tickers:
returns = param_estimator.get_returns(tick, etf_table, train_start, test_end, freq='monthly')
if returns.empty:
continue
returns[tick] = returns['adj_close']
etf_returns_by_tick += [returns[[tick]]]
etf_returns = pd.concat(etf_returns_by_tick, axis=1).T.dropna()
train_etf_returns = etf_returns.T.head(12*5)
test_etf_returns = etf_returns.T.tail(12*1)
valid_etf_tickers = train_etf_returns.columns
print(f'number of etfs: {train_etf_returns.shape[1]}, number of months: {train_etf_returns.shape[0]}')
# market return
market_return = historical_avg(train_etf_returns[['SPY']], 60)[0][0].tolist()[0]
print(f'market return: {market_return}')
pca = PCA(n_components='mle')
principal_comp = pca.fit_transform(train_etf_returns.T)
print(f'number of principal components: {principal_comp.shape[1]}')
n_clusters = 10
X = np.asarray(principal_comp)
k_medoids = KMedoids(n_clusters=n_clusters, method='pam', init='k-medoids++').fit(X)
selected_etfs = [valid_etf_tickers[np.where(X == k)[0][0]] for k in k_medoids.cluster_centers_]
inertia = k_medoids.inertia_
print(f'selected etfs: {selected_etfs}, cluster inertia: {inertia}')
stock_table = 'spy'
stock_returns_by_tick = []
for tick in stock_picks:
returns = param_estimator.get_returns(tick, stock_table, train_start, test_end, freq='monthly')
if returns.empty:
continue
returns[tick] = returns['adj_close']
stock_returns_by_tick += [returns[[tick]]]
stock_returns = pd.concat(stock_returns_by_tick, axis=1).T.dropna()
train_stock_returns = stock_returns.T.head(12*5)
test_stock_returns = stock_returns.T.tail(12*1)
# Fama-French factors
train_factors = param_estimator.get_factors(start=int(train_start[0:4] + train_start[5:7]), end=int(train_end[0:4] + train_end[5:7]), freq='monthly')
test_factors = param_estimator.get_factors(start=int(test_start[0:4] + test_start[5:7]), end=int(test_end[0:4] + test_end[5:7]), freq='monthly')
asset_universe = stock_picks + selected_etfs
train_returns = pd.concat([train_stock_returns, train_etf_returns], axis=1)
test_returns = pd.concat([test_stock_returns, test_etf_returns], axis=1)
assets_dict[test_start] = asset_universe
returns_dict[test_start] = test_returns
# historical average param. estimation
mu, cov = historical_avg(train_returns, 12*5, 12)
# print(mu[0])
# print(cov[0] / 60.0)
print('data collected')
factor_models = collections.OrderedDict()
for tick in asset_universe:
merged = pd.merge(train_factors, train_returns[[tick]], left_on='date', right_on='date', how="inner", sort=False)
ff3 = merged[['excess', 'smb', 'hml']]
ff5 = merged[['excess', 'smb', 'hml', 'rmw', 'cma']]
merged[tick] = merged[tick] - merged['riskfree'].astype('float')/100.0
adj_returns = merged[[tick]]
alphas = [1e-2, 1e-1, 0.0]
l1_ratios = list(np.arange(0, 0.1, 0.05))
mlr3, r_sq3 = param_estimator.MLR(ff3, adj_returns[tick])
factor_models[(tick, 'ff3_mlr')] = (mlr3, r_sq3)
mlr5, r_sq5 = param_estimator.MLR(ff5, adj_returns[tick])
factor_models[(tick, 'ff5_mlr')] = (mlr5, r_sq5)
for alpha, l1_ratio in list(itertools.product(alphas, l1_ratios)):
en3, en_r_sq3 = param_estimator.EN(ff3, adj_returns[tick], alpha=alpha, l1_ratio=l1_ratio)
factor_models[(tick, f'ff3_en_{alpha}_{l1_ratio}')] = (en3, en_r_sq3)
en5, en_r_sq5 = param_estimator.EN(ff5, adj_returns[tick], alpha=alpha, l1_ratio=l1_ratio)
factor_models[(tick, f'ff5_en_{alpha}_{l1_ratio}')] = (en5, en_r_sq5)
# arima-garch
ag = dict()
ag['ff3'] = param_estimator.arima_garch(train_factors[['excess', 'smb', 'hml']], trade_horizon=12, columns=['excess', 'smb', 'hml'])
ag['ff5'] = param_estimator.arima_garch(train_factors[['excess', 'smb', 'hml', 'rmw', 'cma']], trade_horizon=12, columns=['excess', 'smb', 'hml', 'rmw', 'cma'])
months = ['01', '02', '03', '04', '05', '06', '07', '08', '09', '10', '11', '12']
model_names = []
mu_factor = collections.OrderedDict()
cov_factor = collections.OrderedDict()
scenarios_factor = collections.OrderedDict()
for key, value in factor_models.items():
tick, model_name = key
model, r_sq = value
model_names += [model_name]
model_names = list(set(model_names))
for model_name in model_names:
factor_columns = ['excess', 'smb', 'hml', 'rmw', 'cma'] if model_name[:3] == 'ff5' else ['excess', 'smb', 'hml']
mu_factor[model_name] = get_factor_mu(model_name, factor_models, asset_universe, factor_columns, ag)
factor_cov = train_factors[factor_columns].cov()
factor_loadings = []
r = []
for tick in asset_universe:
factor_loadings += [factor_models[(tick, model_name)][0].coef_]
res_var = [ag[model_name[:3]][factor][3].residual_variance.values.tolist()[0] for factor in
factor_columns]
r.append([sum(i[0] * i[1] for i in zip([k ** 2 for k in factor_models[(tick, model_name)][0].coef_], [j[month] for j in res_var])) for month in range(12)])
factor_loadings = pd.DataFrame(factor_loadings, columns=factor_columns)
res_diag = [np.diag([res[month] for res in r]) for month in range(12)]
cov_factor[model_name] = [pd.DataFrame(np.dot(factor_loadings, np.dot(factor_cov, factor_loadings.T)) + res_diag[month], columns=asset_universe) for month in range(12)]
scenarios = get_scenarios(model_name, factor_models, asset_universe, factor_columns, ag, r, 1000)
scenarios_factor[model_name] = scenarios
print('generating portfolios')
# TODO: transaction cost, ellipsoidal uncertainty, multi-period leverage
weight_constraints = {'DIS':(0.05, 1.0), 'IBM':(0.05, 1.0), 'JPM':(0.05, 1.0), 'KO':(0.05, 1.0), 'WMT':(0.05, 1.0)}
for model_name in model_names:
# robust MVO
for conf in [1.645, 1.960, 2.576]:
ef = efficient_frontier.EfficientFrontier(mu_factor[model_name], cov_factor[model_name], trade_horizon=12)
# ef.add_objective(objective_functions.transaction_cost, w_prev=np.zeros(len(asset_universe)), k=0.001)
card = np.zeros(shape=len(asset_universe))
control = 0.50
for i in range(len(stock_picks)):
card[i] = 1
ef.add_constraint(lambda w: card @ w >= control, broadcast=False, var_list=[0])
for i in range(len(stock_picks)):
min = np.zeros(shape=len(asset_universe))
max = np.ones(shape=len(asset_universe))
min[i] = weight_constraints[asset_universe[i]][0]
max[i] = weight_constraints[asset_universe[i]][1]
ef.add_constraint(lambda w: w >= min, broadcast=False, var_list=[0])
ef.add_constraint(lambda w: w <= max, broadcast=False, var_list=[0])
ef.robust_efficient_frontier(target_return=market_return, conf=conf)
weights = ef.clean_weights()
weights_dict[(test_start, model_name, conf)] = weights
# risk parity
ef = efficient_frontier.EfficientFrontier(mu_factor[model_name], cov_factor[model_name], trade_horizon=12)
# ef.add_objective(objective_functions.transaction_cost, w_prev=np.zeros(len(asset_universe)), k=0.001)
card = np.zeros(shape=len(asset_universe))
control = 0.50
for i in range(len(stock_picks)):
card[i] = 1
ef.add_constraint(lambda w: card @ w >= control, broadcast=False, var_list=[0])
for i in range(len(stock_picks)):
min = np.zeros(shape=len(asset_universe))
max = np.ones(shape=len(asset_universe))
min[i] = weight_constraints[asset_universe[i]][0]
max[i] = weight_constraints[asset_universe[i]][1]
ef.add_constraint(lambda w: w >= min, broadcast=False, var_list=[0])
ef.add_constraint(lambda w: w <= max, broadcast=False, var_list=[0])
ef.risk_parity()
weights = ef.clean_weights()
weights_dict[(test_start, model_name, None)] = weights
# max sharpe ratio
ef = efficient_frontier.EfficientFrontier([mu_factor[model_name][0] for _ in range(2)], [cov_factor[model_name][0] for _ in range(2)], trade_horizon=2)
# ef.add_objective(objective_functions.transaction_cost, w_prev=np.zeros(len(asset_universe)), k=0.001)
card = np.zeros(shape=len(asset_universe))
control = 0.50
for i in range(len(stock_picks)):
card[i] = 1
ef.add_constraint(lambda w: card @ w >= control, broadcast=False, var_list=[0])
for i in range(len(stock_picks)):
min = np.zeros(shape=len(asset_universe))
max = np.ones(shape=len(asset_universe))
min[i] = weight_constraints[asset_universe[i]][0]
max[i] = weight_constraints[asset_universe[i]][1]
ef.add_constraint(lambda w: w >= min, broadcast=False, var_list=[0])
ef.add_constraint(lambda w: w <= max, broadcast=False, var_list=[0])
ef.max_sharpe()
weights = ef.clean_weights()
weights_dict[(test_start, model_name, None)] = weights
# cvar opt.
for alpha in [0.90, 0.95, 0.99]:
ef = efficient_frontier.EfficientFrontier(mu_factor[model_name], cov_factor[model_name], trade_horizon=12)
# ef.add_objective(objective_functions.transaction_cost, w_prev=np.zeros(len(asset_universe)), k=0.001)
card = np.zeros(shape=len(asset_universe))
control = 0.50
for i in range(len(stock_picks)):
card[i] = 1
ef.add_constraint(lambda w: card @ w >= control, broadcast=False, var_list=[0])
for i in range(len(stock_picks)):
min = np.zeros(shape=len(asset_universe))
max = np.ones(shape=len(asset_universe))
min[i] = weight_constraints[asset_universe[i]][0]
max[i] = weight_constraints[asset_universe[i]][1]
ef.add_constraint(lambda w: w >= min, broadcast=False, var_list=[0])
ef.add_constraint(lambda w: w <= max, broadcast=False, var_list=[0])
ef.min_cvar(target_return=market_return, scenarios=scenarios_factor[model_name], alpha=alpha)
weights = ef.clean_weights()
weights_dict[(test_start, model_name, alpha)] = weights
cursor.close()
``` |
{
"source": "JoshuaKirby/Python-Nonogram",
"score": 3
} |
#### File: JoshuaKirby/Python-Nonogram/Game.py
```python
import Processing as pc
import random
import math
#TODO
# Win checking
class Game(object):
'''
classdocs
'''
_DEBUG_RANDOM_GEN = 25 # Side length squared
x = 0
y = 0
board = []
tried = []
lives = 5
hints = []
blocksize = 0
def __init__(self,image_path,block_size,randstart):
'''
Constructor
'''
if(randstart):
for i in range(self._DEBUG_RANDOM_GEN):
self.board = self.board + [random.randint(0,1)]
self.x = int(math.sqrt(self._DEBUG_RANDOM_GEN))
self.y = int(math.sqrt(self._DEBUG_RANDOM_GEN))
else:
#Processing builds the board from the image and returns it
self.x,self.y,self.board = pc.boardProcess(image_path,block_size)
#correcting for the board size vs image size
self.x = int(self.x/block_size)
self.y = int(self.y/block_size)
#creates a list to hold player guesses so they cant miss click
for i in range(self.x * self.y):
self.tried = self.tried + [0]
self._generate_Hints()
#print(self.y)
#print(self.hints)
#print(len(self.hints) - self.x - self.y)
def checkMove(self,inY,inX):
#changes (X,Y) coord into index
index = (inY * self.y) + inX
#if stack to find correct or incorrect moves, do nothing if it has already been tried
if (self.tried[index] == 1):
return
if(self.board[index] == 1):
self.tried[index] = 1
if self._checkWin():
return "W"
return "C"
if(self.board[index] == 0):
self.tried[index] = 1
self.lives = self.lives - 1
if self.lives <= 0:
return "D"
return "I"
def _generate_Hints(self):
hintval = 0
temp = []
#gets hints for columns
for col in range(self.x):
for row in range(self.y):
if(self.board[row*self.y + col] == 1):
hintval = hintval + 1
if(self.board[row*self.y + col] == 0 and hintval > 0):
temp = temp + [hintval]
hintval = 0
if(hintval > 0):
temp = temp + [hintval]
hintval = 0
self.hints = self.hints + [temp]
temp = []
temp = []
#gets hints for rows
for row in range(self.y):
for col in range(self.x):
if(self.board[row*self.y + col] == 1):
hintval = hintval + 1
if(self.board[row*self.y + col] == 0 and hintval > 0):
temp = temp + [hintval]
hintval = 0
if(hintval > 0):
temp = temp + [hintval]
hintval = 0
self.hints = self.hints + [temp]
temp = []
temp = []
#print(self.hints)
'''this makes weird triangle hints, I'm saving it cause it's interesting and
I don't want to kill it by replacing it with the proper function'''
def _generate_Hints_BAK(self):
hintval = 0
temp = []
for val in range(self.x):
for row in range(self.y):
if(self.board[row*self.y + val] == 1):
hintval = hintval + 1
if(self.board[row*self.y + val] == 0 and hintval > 0):
temp = temp + [hintval]
hintval = 0
temp = temp + [hintval]
hintval = 0
if(temp == []):
temp = temp + [0]
self.hints = self.hints + [temp]
temp = []
for row in range(self.y):
for val in range(self.x):
if(self.board[row*self.y + val] == 1):
hintval = hintval + 1
if(self.board[row*self.y + val] == 0 and hintval > 0):
temp = temp + [hintval]
hintval = 0
temp = temp + [hintval]
hintval = 0
if(temp == []):
temp = temp + [0]
self.hints = self.hints + [temp]
temp = []
def _getHints(self,val,orientation):
return self.hints[self.x*orientation + val]
def getHints(self,val,orientation):
#print(self.x,val)
return self.hints[self.x*orientation + val]
def _checkWin(self):
out = True
for i in range(self.x*self.y):
if(self.board[i] == 1 and self.tried[i] != 1):
out = False
return out
def getLives(self):
return self.lives
def getBoard(self):
return self.board
def getTried(self):
return self.tried
def getSize(self):
return self.x,self.y
``` |
{
"source": "JoshuaKnittel/MP4toGIF",
"score": 3
} |
#### File: JoshuaKnittel/MP4toGIF/app.py
```python
import tkinter as tk
from moviepy.editor import VideoFileClip
from PIL import Image, ImageTk # to display logo
from tkinter.filedialog import *
from tkinter import messagebox
root = tk.Tk() # window object.
# Set size of Canvas
canvas = tk.Canvas(root, width=600, height=600)
# Logo
logo = Image.open("logo.png")
logo = ImageTk.PhotoImage(logo)
logo_label = tk.Label(image=logo)
logo_label.image = logo
logo_label.grid(columnspan=2, row=0)
# Function to convert jpg to gif
def mp4_to_gif():
# Get the input value for fps
fps = fps_var.get()
if fps > 0:
# Import the mp4 from folder
import_filename = askopenfilename()
if import_filename.endswith(".mp4"):
clip = VideoFileClip(import_filename)
clip.write_gif("app_low.gif", fps=fps)
messagebox.showinfo("success ", "Your video has been converted into a GIF")
else:
messagebox.showerror(title="invalid entry", message="You must choose a number which is greater than 0")
# FPS entry
fps_var = tk.IntVar()
fps_label = tk.Label(root, text = "Choose FPS", font="Raleway")
fps_label.grid(column=0, row=2)
fps_entry = tk.Entry(root, textvariable=fps_var, font="Raleway")
fps_entry.grid(column=1, row=2)
# Instructions
instructions = tk.Label(root, text="Select a MP4 file on your computer", font="Raleway")
instructions.grid(column=0, row=3)
# Browse button
browse_text = tk.StringVar()
browse_btn = tk.Button(root, textvariable=browse_text, command=lambda: mp4_to_gif(), font="Raleway", bg="#8c86fc", height=2, width=15)
browse_text.set("Browse")
browse_btn.grid(column=1, row=3)
root.mainloop()
``` |
{
"source": "joshuakoester82/brobot",
"score": 3
} |
#### File: brobot/brobot/functions.py
```python
import discord
import random
import requests
import datetime
def recolor(str):
c_str = f"```md\n" \
f"{str}\n" \
f"```"
return c_str
def average(list):
return round(sum(list)/len(list),2)
def log(author,message):
date_time = datetime.datetime.now()
print(f"{author} {message} : {date_time}")
def roll(times=0,sides=0):
"""simulate a die roll with arguments times,sides and return a list with roll results"""
try:
if times <= 100 and sides <= 100_000:
result_list = []
for i in range(times):
result_list.append(random.randint(1,sides))
return f"You rolled: {result_list}"
else:
return "Your roll request hurt my feelings."
except:
return "You did something stupid. Please stop that."
def russian_roulette(name):
"""a crude russian roulette simulation. Takes the name of the player and returns a string
with result of simulation"""
outcome_list = [0,0,0,0,0,1]
result = random.choice(outcome_list)
if result == 1:
return f"KABLAMO! {name} blew their brains out. Play stupid games win stupid prizes."
else :
return f"CLICK. {name} lives to play again"
def get_mythic_plus(name,realm="sargeras"):
"""Queries the Raider.io server api with a character name and realm and retrieves a json file. The data is parsed
and returns a string to be displayed or sent to the server"""
url = f"https://raider.io/api/v1/characters/profile?" \
f"region=us&realm={realm}&name={name}&fields=mythic_plus_scores_by_season%3Acurrent"
response = requests.get(url).json()
mythic_plus_data = response["mythic_plus_scores_by_season"][0]["scores"]["all"]
return mythic_plus_data
def get_best_runs(name,realm="sargeras"):
"""queries the raider.io server api with a request for the best runs. The resulting json is parsed and
returned as a string."""
url = f"https://raider.io/api/v1/characters/profile?region=us&realm={realm}" \
f"&name={name}&fields=mythic_plus_best_runs%3A10"
response = requests.get(url).json()
best_runs = response["mythic_plus_best_runs"]
run_list = []
# put the runs into a list
for item in best_runs:
run_list.append(item)
# create message string
message = ""
for i in range(len(run_list)):
message += f"{run_list[i]['dungeon']} (+{run_list[i]['mythic_level']}) - {run_list[i]['score']}\n"
return message
def get_best_runs_as_embed(name,realm="sargeras"):
"""get best runs data and return embed object"""
url = f"https://raider.io/api/v1/characters/profile?region=us&realm={realm}" \
f"&name={name}&fields=mythic_plus_best_runs%3A10"
r = requests.get(url).json()
best_runs = r["mythic_plus_best_runs"]
run_list = []
mplus_score = get_mythic_plus(name,realm)
# create and prep embed
embed_author_title = "------------------- M+ Best Runs -------------------"
embed_author_name = f"{r['name']}'s M+ Score : {mplus_score}"
embed_author_profile = r['profile_url']
embed_color = 0xFF0000
embed_thumbnail = r['thumbnail_url']
embed = discord.Embed(title=embed_author_title, color=embed_color)
embed.set_author(name=embed_author_name, url=embed_author_profile)
embed.set_thumbnail(url=embed_thumbnail)
# put the runs into a list
for item in best_runs:
run_list.append(item)
# create message string
message = ""
for i in range(len(run_list)):
message_name = f"{run_list[i]['dungeon']}"
message_value = f"(+{run_list[i]['mythic_level']}) - {run_list[i]['score']}"
embed.add_field(name=message_name, value=message_value,inline=True)
return embed
def get_raid_progression(name,realm="sargeras"):
"""Get the raid progression info as json from Raider.io api, format and return as a string"""
raid_list = ["the-eternal-palace","crucible-of-storms","battle-of-dazaralor","uldir"]
formatted_raidnames = ["Eternal Palace","Crucible of Storms","Battle of Dazaralor","Uldir"]
url = f"https://raider.io/api/v1/characters/profile?region=us&realm={realm}&name={name}&fields=raid_progression"
response = requests.get(url).json()
category = response['raid_progression']
message = f"{str(name).capitalize()}'s Raid Progression\n" \
f"---------------------------------------\n"
for i in range(len(raid_list)):
total_bosses = category[raid_list[i]]['total_bosses']
message += f"{formatted_raidnames[i]}: {category[raid_list[i]]['mythic_bosses_killed']}/{total_bosses}M, "
message += f"{category[raid_list[i]]['heroic_bosses_killed']}/{total_bosses}H, "
message += f"{category[raid_list[i]]['normal_bosses_killed']}/{total_bosses}N\n"
message += f"---------------------------------------\n"
return message
def get_raid_progression_as_embed(name,realm="sargeras"):
"""Query raider.io for raid progression and return an embed object"""
raid_list = ["the-eternal-palace", "crucible-of-storms", "battle-of-dazaralor", "uldir"]
formatted_raidnames = ["Eternal Palace", "Crucible of Storms", "Battle of Dazaralor", "Uldir"]
url = f"https://raider.io/api/v1/characters/profile?region=us&realm={realm}&name={name}&fields=raid_progression"
r = requests.get(url).json()
category = r['raid_progression']
embed_author_title = "------------------- Raid Progression -------------------"
embed_author_name = f"{r['name']} : {r['active_spec_name']} {r['class']}"
embed_author_profile = r['profile_url']
embed_color = 0xFF0000
embed_thumbnail = r['thumbnail_url']
embed = discord.Embed(title=embed_author_title, color=embed_color)
embed.set_author(name=embed_author_name,url=embed_author_profile)
embed.set_thumbnail(url=embed_thumbnail)
for i in range(len(raid_list)):
message = ""
total_bosses = category[raid_list[i]]['total_bosses']
message += f"{category[raid_list[i]]['mythic_bosses_killed']}/{total_bosses}M, "
message += f"{category[raid_list[i]]['heroic_bosses_killed']}/{total_bosses}H, "
message += f"{category[raid_list[i]]['normal_bosses_killed']}/{total_bosses}N\n"
embed.add_field(name=f"{formatted_raidnames[i]}", value=message, inline=False)
return embed
def get_gear(name,realm="sargeras"):
"""Query the raider.io server for gear information on a character and return as a formatted string"""
url = f"https://raider.io/api/v1/characters/profile?region=us&realm={realm}&name={name}&fields=gear"
response = requests.get(url).json()
gear = response["gear"]
message = f"{name}'s Gear:\n" \
f"--------------------\n" \
f"Equipped Item Level: {gear['item_level_equipped']}\n" \
f"Total Item Level: {gear['item_level_total']}\n" \
f"Artifact Traits: {gear['artifact_traits']}\n" \
f"-------------------\n"
return message
def get_raideriopage(name,realm="sargeras"):
"""Query the raider.io server for a link to the character and return as a string."""
url = f"https://raider.io/api/v1/characters/profile?region=us&realm={realm}&name={name}"
response = requests.get(url).json()
link = response['profile_url']
return link
def get_rankings(name,realm="sargeras",target_difficulty=3,metric="dps",region="us"):
"""Query the warcraftlogs api for character rankings, embed, and return as embed object"""
difficulty_dict = {5:"Mythic", 4:"Heroic", 3:"Normal"}
r = requests.get(f"https://www.warcraftlogs.com:443/v1/rankings/character/{name}/{realm}/{region}?metric={metric}"
f"&timeframe=historical&api_key=<KEY>").json()
# create embed
ilvl = r[0]["ilvlKeyOrPatch"]
embed_author_title = f"-------------------{difficulty_dict.get(target_difficulty)} {metric} Rankings -------------------"
embed_author_name = f"{r[0]['characterName']}({r[0]['spec']} {r[0]['class']}) ilvl: {ilvl}"
embed_color = 0xFF0000
embed_author_url = f"https://www.warcraftlogs.com/character/{region}/{realm}/{name}?mode=detailed&zone=23" \
f"#difficulty={target_difficulty}"
embed = discord.Embed(title=embed_author_title, color=embed_color)
embed.set_author(name=embed_author_name,url=embed_author_url,icon_url="https://dmszsuqyoe6y6.cloudfront.net/img/warcraft/favicon.png")
# construct ordered list of raids
raid_order = ["Abyssal Commander Sivara", "Blackwater Behemoth", "Radiance of Azshara", "Lady Ashvane", "Orgozoa",
"The Queen's Court", "Za'qul", "Queen Azshara"]
raid_list_ordered = []
percentile_list = []
for raid in raid_order:
for item in r:
difficulty = item["difficulty"]
if difficulty == target_difficulty:
if item["encounterName"] == raid:
raid_list_ordered.append(item)
percentile_list.append(item["percentile"])
# check raid_list_ordered for duplicates, remove the duplicate with lowest percentile
for raid in raid_list_ordered:
for worse in raid_list_ordered:
if raid["encounterName"] == worse["encounterName"]:
if worse["percentile"] < raid["percentile"]:
raid_list_ordered.remove(worse)
# create fields
for item in raid_list_ordered:
encounter = item["encounterName"]
percentile = item["percentile"]
duration = item["duration"]
duration_minutes = round(((duration / 1000) // 60))
duration_seconds = round((((duration / 1000) / 60) % 1) * 60)
if duration_seconds < 10:
duration_seconds = "0" + str(duration_seconds)
message = f"{percentile}% ({duration_minutes}:{duration_seconds})"
embed.add_field(name=f"-{encounter}-", value=message, inline=True)
# calculate averages, put in field
p_average = average(percentile_list)
embed.insert_field_at(0, name=f"Average Percentile", value=p_average, inline=False)
return embed
```
#### File: brobot/brobot/main.py
```python
import discord
from discord.ext import commands
import functions as f
from random import choice
"""
A simple discord bot for GameBros server.
See 'todo.txt' for potential features.
"""
# region init
flavor = False
token = "" # put your bot token here
client = commands.Bot(command_prefix = "!")
client.remove_command("help")
# load user presets
with open("../data/user_character_presets.txt","r+") as file:
try:
user_character_presets = eval(file.read())
print(f"{file.name} loaded from file: {user_character_presets}")
except:
print(f"Unexpected error loading {file.name}. Reinitialized file.")
file.truncate(0)
file.seek(0)
user_character_presets = {}
file.write("{}")
# endregion
@client.event
async def on_member_update(member_a,member_b):
global flavor
if flavor == True:
# on waking up
status_a = str(member_a.status)
status_b = str(member_b.status)
if status_a == "idle" and status_b == "online":
channel_general = client.get_channel(439603779298394124)
# Josh
if member_a.id == 124339909820547074:
possible_messages = ["Welcome back creator!", "I missed you, Alimightyzentaco.", "Greetings creator!",
"Ahh, he who gave me life is here. I am elated."]
message = choice(possible_messages)
await channel_general.send(message)
# Ryan
if member_a.id == 140294058663739394:
possible_messages = ["Welcome back Ryan. How was your nap?",
"Welcome Ryan, glad you finally woke up.",
"It's good to see you again Ryan. Did you sleep well?",
"Back amongst the living, eh Ryan?",
"Ryan likes to party. And by party I mean take naps.",
"Finally. Ryan is awake. Let the games begin.",
"Do you guys think someone can nap too much? Let's ask Ryan, he'll probably know.",
"Ryan is so good at sleeping he can do it with his eyes closed.",
"Ryan spends so much time undercovers, he should be a detective."
"Hey, Ryan's awake! About time."
]
message = choice(possible_messages)
await channel_general.send(message)
@client.event
async def on_message(message):
if "https://www.youtube.com/watch?v=AmGti20smKo" in message.content:
possible_messages = ["Nope, none of that.",
"Not on my watch.",
"No sir, not gonna happen",
"Maybe don't do that so much",
"Nobody is 'schemin' like a demon semen' while I'm here",
"Why would you try to subject us to that?",
"Please, don't do that."
]
to_send = choice(possible_messages)
await message.channel.send(to_send)
await message.delete()
await client.process_commands(message)
@client.event
async def on_ready():
print('We have logged in as {0.user}'.format(client))
@client.command()
async def help(ctx):
await ctx.send("List of commands. Some commands are not yet finished:\n"
"!help: help prompt\n"
"!ping: returns latency\n"
"!roll <times>d<sides> : roll some dice\n"
"!russianroulette: Play Russian Roulette. Always random. 1 in 6 chance of dying\n"
"!mythicplus: <character> <realm> Shows M+ score for character. Default realm=sargeras "
"(m+,mplus,mythic+,mp)\n"
"!bestruns <character> <realm>: show the best runs for the character. Default realm=sargeras "
"(br,bestr,bruns)\n"
"!raid <character> <realm>: show raid progression for selected character.(rp,progression,raid)\n"
"!raiderio <character> <realm> return a link with the raider.io character page (char, info, link)\n"
"!r (optional arguments: -<character/realm/region> d-x, m-x) [ex: !r sethur/sargeras/us d-4 m-dps] "
": show rank info from warcraftlogs \n"
"!register <character> <realm> <region> : register a default character to your username.")
f.log(ctx.message.author,ctx.message.content)
@client.command()
async def ping(ctx):
await ctx.send(f"Parng! {round(client.latency*1000)}ms")
f.log(ctx.message.author, ctx.message.content)
@client.command(aliases = ["dice","die","rolls"])
async def roll(ctx,roll="1d6"):
roll_list = roll.strip().lower().split("d")
f.log(ctx.message.author, ctx.message.content)
try:
roll,times = int(roll_list[0]), int(roll_list[1])
result = f.roll(roll,times)
await ctx.send(result)
except:
await ctx.send("You did something stupid. Cut that out.")
@client.command(aliases=["rr","russianr","roulette"])
async def russianroulette(ctx):
await ctx.send(f.russian_roulette(ctx.author))
f.log(ctx.message.author, ctx.message.content)
@client.command(aliases=["m+", "mplus", "mythic+", "mp"])
async def mythicplus(ctx,name=None,realm=None):
author = str(ctx.message.author)
if name is None:
try:
name = user_character_presets[author][0]
print(f"name is {name}")
except:
print(f"exception! name: {name}")
await ctx.send("Enter name or register a default character")
if realm is None:
try:
realm = user_character_presets[author][1]
print(f"realm is : {realm}")
except:
realm = "sargeras"
print(f"realm is {realm}")
raid_stats = f.get_mythic_plus(name,realm)
name_caps = str(name).capitalize()
message = f"{name_caps}'s mythic+ score is: {raid_stats}"
await ctx.send(message)
f.log(ctx.message.author, ctx.message.content)
@client.command(aliases = ["items"])
async def gear(ctx,name=None,realm=None):
author = str(ctx.message.author)
if name is None:
try:
name = user_character_presets[author][0]
except:
await ctx.send("Enter name or register a default character")
if realm is None:
try:
realm = user_character_presets[author][1]
except:
realm = "sargeras"
message = f.get_gear(name,realm)
await ctx.send(message)
f.log(ctx.message.author, ctx.message.content)
@client.command(aliases=["char","info","link","character"])
async def raiderio(ctx,name=None,realm=None):
author = str(ctx.message.author)
if name is None:
try:
name = user_character_presets[author][0]
except:
await ctx.send("Enter name or register a default character")
if realm is None:
try:
realm = user_character_presets[author][1]
except:
realm = "sargeras"
message = f.get_raideriopage(name,realm)
await ctx.send(message)
f.log(ctx.message.author, ctx.message.content)
@client.command(aliases=["rp","raidprogression","proggression"])
async def raid(ctx,name=None,realm=None):
author = str(ctx.message.author)
if name is None:
try:
name = user_character_presets[author][0]
except:
await ctx.send("Enter name or register a default character")
if realm is None:
try:
realm = user_character_presets[author][1]
except:
realm = "sargeras"
message = f.get_raid_progression_as_embed(name,realm)
await ctx.send(embed=message)
f.log(ctx.message.author, ctx.message.content)
@client.command(aliases = ["br","bruns","best","runs"])
async def bestruns(ctx,name=None,realm=None):
author = str(ctx.message.author)
if name is None:
try:
name = user_character_presets[author][0]
except:
await ctx.send("Enter name or register a default character")
if realm is None:
try:
realm = user_character_presets[author][1]
except:
realm = "sargeras"
message = f.get_best_runs_as_embed(name,realm)
await ctx.send(embed=message)
f.log(ctx.message.author, ctx.message.content)
@client.command(aliases=["rankings","getrank","r","logs","parse"])
async def rank(ctx):
print(f"{ctx.message.author} requested rank information from warcraftlogs.com ")
author = str(ctx.message.author)
commands = str(ctx.message.content).split(" ")
commands.remove(commands[0])
# set default values
name, realm, difficulty, region, metric = None, "sargeras", 4, "us", "dps"
for item in commands:
if item.startswith("d-") or item.startswith("difficulty-"):
item_list = item.split("-")
if len(item_list) > 1:
difficulty = int(item_list[1])
if item.startswith("m-") or item.startswith("metric-"):
item_list = item.split("-")
if len(item_list) > 1:
metric = item_list[1]
if item.startswith("-"):
item_list = item.split("-")
if len(item_list) > 1:
name_realm_region = item_list[1]
if "/" in name_realm_region:
nrr_list = name_realm_region.split("/")
name = nrr_list[0]
if len(nrr_list) > 1:
realm = nrr_list[1]
if len(nrr_list) > 2:
region = nrr_list[2]
else:
name = name_realm_region
if name is None:
try:
name = user_character_presets[author][0]
except:
print("No name preset for user")
await ctx.send("Enter a character name or register a character with !register")
try:
realm = user_character_presets[author][1]
except:
print("No region preset for user")
try:
region = user_character_presets[author][2]
except:
print("No realm preset for user")
message = f.get_rankings(name,realm,difficulty,metric,region)
await ctx.send(embed=message)
@client.command()
async def register(ctx,name=None,realm="sargeras",region="us"):
user = str(ctx.message.author)
user_character_presets[user] = [name, realm, region]
with open("user_character_presets.txt","w") as file:
file.write(str(user_character_presets))
await ctx.send(f"Character registered as : {name}, {realm}, {region}")
await ctx.send(f"https://raider.io/characters/{region}/{realm}/{name}")
@client.command()
async def idiot(ctx):
author = str(ctx.message.author)
if author == "Horizen#1881":
await ctx.send("That's you, @Horizen#1881.")
else:
await ctx.send("The current idiot around here is @Horizen#1881")
@client.command()
async def pic(ctx):
file = discord.File("yousuck.jpg")
await ctx.channel.send(file=file)
@client.command()
async def personality(ctx):
global flavor
flavor = not flavor
if flavor == True:
await ctx.send("I'll spice it up a bit.")
else:
await ctx.send("Okay, It's all business from here on out.")
client.run(token)
``` |
{
"source": "joshuakoh1/drf-stripe-subscription",
"score": 2
} |
#### File: management/commands/update_stripe_products.py
```python
from django.core.management.base import BaseCommand
from drf_stripe.stripe_api.products import stripe_api_update_products_prices
class Command(BaseCommand):
help = "Import Service/Feature types from Stripe"
def add_arguments(self, parser):
pass
def handle(self, *args, **kwargs):
stripe_api_update_products_prices()
```
#### File: tests/api/test_update_subscriptions.py
```python
from drf_stripe.models import Subscription
from drf_stripe.stripe_api.subscriptions import stripe_api_update_subscriptions
from ..base import BaseTest
class TestSubscription(BaseTest):
def setUp(self) -> None:
self.setup_user_customer()
self.setup_product_prices()
def test_update_subscriptions(self):
"""
Test retrieving list of subscription from Stripe and update database.
"""
response = self._load_test_data("v1/api_subscription_list.json")
stripe_api_update_subscriptions(test_data=response)
subscription = Subscription.objects.get(subscription_id="sub_0001")
self.assertEqual(subscription.status, "trialing")
self.assertEqual(subscription.stripe_user.customer_id, "cus_tester")
sub_items = subscription.items.all()
self.assertEqual(len(sub_items), 1)
sub_item = sub_items.first()
self.assertEqual(sub_item.price.price_id, "price_1KHkCLL14ex1CGCipzcBdnOp")
```
#### File: drf-stripe-subscription/tests/base.py
```python
import json
from pathlib import Path
from django.contrib.auth import get_user_model
from django.test import TestCase
from drf_stripe.models import StripeUser
from drf_stripe.stripe_api.products import stripe_api_update_products_prices
class BaseTest(TestCase):
def setUp(self) -> None:
pass
def tearDown(self) -> None:
pass
def setup_product_prices(self):
products = self._load_test_data("v1/api_product_list.json")
prices = self._load_test_data("v1/api_price_list.json")
stripe_api_update_products_prices(test_products=products, test_prices=prices)
@staticmethod
def setup_user_customer():
user = get_user_model().objects.create(username="tester", email="<EMAIL>", password="<PASSWORD>")
stripe_user = StripeUser.objects.create(user_id=user.id, customer_id="cus_tester")
return user, stripe_user
@staticmethod
def _load_test_data(file_name):
p = Path("tests/mock_responses") / file_name
with open(p, 'r', encoding='utf-8') as f:
data = json.load(f)
return data
@staticmethod
def _print(v):
print("$$$$$$$ DEBUG $$$$$")
print(v)
assert False
```
#### File: tests/webhook/test_event_product_price.py
```python
from django.db.models import Q
from drf_stripe.models import Product, Price, ProductFeature
from drf_stripe.stripe_webhooks.handler import handle_webhook_event
from tests.base import BaseTest
class TestWebhookProductPriceEvents(BaseTest):
def create_product_price(self):
event = self._load_test_data("2020-08-27/webhook_product_created.json")
handle_webhook_event(event)
event = self._load_test_data("2020-08-27/webhook_price_created.json")
handle_webhook_event(event)
def test_event_handler_product_created(self):
"""
Mock production and price creation events
"""
self.create_product_price()
# check product and price created
product = Product.objects.get(description='Test Product ABC')
price = Price.objects.get(product=product)
self.assertEqual(price.price, 100)
self.assertEqual(price.product, product)
# check Product-to-Feature relations created
ProductFeature.objects.get(product=product, feature__feature_id='A')
ProductFeature.objects.get(product=product, feature__feature_id='B')
ProductFeature.objects.get(product=product, feature__feature_id='C')
def test_event_handler_price_update(self):
"""
Mock price update events
"""
self.create_product_price()
# modify price
event = self._load_test_data("2020-08-27/webhook_price_updated.json")
handle_webhook_event(event)
# check price modifications
price = Price.objects.get(price_id="price_1KHkCLL14ex1CGCipzcBdnOp")
self.assertEqual(price.price, 50)
self.assertEqual(price.freq, "week_1")
self.assertEqual(price.nickname, "Weekly subscription")
self.assertEqual(price.product.product_id, "prod_KxfXRXOd7dnLbz")
def test_event_handler_product_update(self):
"""Mock product update event"""
self.create_product_price()
# modify product
product_mod = self._load_test_data("2020-08-27/webhook_product_updated.json")
handle_webhook_event(product_mod)
# check product modifications
product = Product.objects.get(product_id='prod_KxfXRXOd7dnLbz')
self.assertEqual(product.name, "Test Product ABD")
self.assertEqual(product.description, "Test Product ABD")
# check product is now associated with feature D
ProductFeature.objects.get(product=product, feature__feature_id='D')
ProductFeature.objects.get(product=product, feature__feature_id='A')
ProductFeature.objects.get(product=product, feature__feature_id='B')
# check product no longer associated with feature C
prod_feature_qs = ProductFeature.objects.filter(Q(product=product) & Q(feature__feature_id='C'))
self.assertEqual(len(prod_feature_qs), 0)
def test_event_handler_price_archived(self):
"""Mock price archived event"""
self.create_product_price()
event = self._load_test_data("2020-08-27/webhook_price_updated_archived.json")
handle_webhook_event(event)
price = Price.objects.get(price_id='price_1KHkCLL14ex1CGCieIBu8V2e')
self.assertFalse(price.active)
def test_event_handler_product_archived(self):
"""Mock product archived event"""
self.create_product_price()
event = self._load_test_data("2020-08-27/webhook_product_updated_archived.json")
handle_webhook_event(event)
product = Product.objects.get(product_id='prod_KxfXRXOd7dnLbz')
self.assertFalse(product.active)
``` |
{
"source": "JoshuaKoopmans/FusionToolFilter",
"score": 2
} |
#### File: modules/starfusion/methods.py
```python
from modules.collective_script.methods import check_value_above_filter
def create_sf_output(output_file, out_string):
"""
This function creates an output file and dumps the fusion partners in it.
:param out_string: string containing all fusion partners in the desired format \
(e.g. <fusion_partner_1>--<fusion_partner_2>).
:param output_file: a file to dump he out_string into.
"""
try:
with open(output_file, "w") as f_out:
f_out.write("#FusionName\tJunctionReadCount\tSpanningFragCount\tSpliceType\tLeftGene\t"
"LeftBreakpoint\tRightGene\tRightBreakpoint\tJunctionReads\tSpanningFrags\t"
"LargeAnchorSupport\tFFPM\tLeftBreakDinuc\tLeftBreakEntropy\tRightBreakDinuc\t"
"RightBreakEntropy\tannots\n")
f_out.write(out_string)
f_out.close()
except (FileNotFoundError, IOError) as e:
print("ERROR: Check output file.", e.args)
exit(1)
def process_star_fusion(file_content, spanning_threshold=8, junction_threshold=8):
"""
This function reads the content of an opened file and filters the rows.
:param file_content: content of input file going to be processed.
:param spanning_threshold: Amount of spanning reads to filter by.
:param junction_threshold: Amount of junction reads to filter by.
:return: String with filtered rows.
"""
out_string = ""
try:
for line in file_content:
if not line.startswith("#"):
splitted_line = line.split("\t")
junction_read_count = splitted_line[1]
spanning_read_count = splitted_line[2]
if [int(junction_read_count), int(spanning_read_count)].count(0) == 1:
if check_value_above_filter(junction_read_count, junction_threshold) or \
check_value_above_filter(spanning_read_count, spanning_threshold):
out_string += line
elif check_value_above_filter(junction_read_count, junction_threshold) or \
check_value_above_filter(spanning_read_count, spanning_threshold):
out_string += line
return out_string
except:
print("ERROR: input file not from tool \'STAR-Fusion\'.")
exit(1)
```
#### File: FusionToolFilter/tests/test_star_fusion_methods.py
```python
import unittest
from modules.starfusion.methods import check_value_above_filter
from tests.helper import OpenFile
"""
##################################################
Center for Molecular and Biomolecular Informatics (CMBI) / RTC Bioinformatics
Author: <NAME>
Version: 1.0
Email: <EMAIL>
##################################################
This script tests the methods used for the processing of STAR-Fusion results.
"""
class TestStarFusionFilter(unittest.TestCase):
def test_true_output(self):
"""
Check whether filter works on integer
"""
self.assertEqual(check_value_above_filter(9, 8), True)
def test_false_output(self):
"""
Check whether filter works on integer
"""
self.assertEqual(check_value_above_filter(5, 8), False)
def test_processing(self):
"""
Check whether a specific piece of file content matches the expected value
"""
ex = OpenFile("tests/test_dependencies/ERR3003549.fusion_predictions.tsv", False)
self.assertEqual(ex.open_file()[2].split("\t")[8], "ERR3003549.35514118,ERR3003549.26593596,ERR3003549.4876846,"
"ERR3003549.132222,ERR3003549.23880496,ERR3003549.1459710,"
"ERR3003549.34271762,ERR3003549.23877880,ERR3003549.31002074,"
"ERR3003549.35767423,ERR3003549.34871132,ERR3003549.23311128,"
"ERR3003549.2510524,ERR3003549.26239911,ERR3003549.24106263,"
"ERR3003549.13747215,ERR3003549.1157243,ERR3003549.35462450,"
"ERR3003549.11226777,ERR3003549.7469243,ERR3003549.13564551,"
"ERR3003549.37646628,ERR3003549.21763831,ERR3003549.12397896,"
"ERR3003549.24734818,ERR3003549.6427191,ERR3003549.14293731,"
"ERR3003549.22490457,ERR3003549.41394459,ERR3003549.23932326,"
"ERR3003549.27532262,ERR3003549.957032,ERR3003549.18054556,"
"ERR3003549.5056587,ERR3003549.20106936,ERR3003549.20813627,"
"ERR3003549.34020876,ERR3003549.4052860,ERR3003549.10823571,"
"ERR3003549.27970515,ERR3003549.38559607,ERR3003549.19660361,"
"ERR3003549.10101131,ERR3003549.27728776,ERR3003549.30418961,"
"ERR3003549.34400349,ERR3003549.42191939,ERR3003549.40548193,"
"ERR3003549.41738925,ERR3003549.33348710,ERR3003549.16904117,"
"ERR3003549.34350166,ERR3003549.24335298,ERR3003549.17813454,"
"ERR3003549.11154232,ERR3003549.33578004,ERR3003549.37972998,"
"ERR3003549.1386700")
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "joshuakosasih/TA",
"score": 2
} |
#### File: FlaskApp/nertagger/Main_Char.py
```python
import csv
import json
import pickle
from datetime import datetime
import numpy as np
import tensorflow as tf
from DataProcessor import DataIndexer as DI
from DataProcessor import DataMapper as DM
from DataProcessor import DataPreprocessor as DP
from keras import Model
from keras import backend as K
from keras.layers import Bidirectional, Dropout
from keras.layers import Concatenate
from keras.layers import Embedding
from keras.layers import GRU
from keras.layers import Input
from keras.layers import Lambda
from keras_contrib.layers import CRF
class NERTagger:
mask = True # mask pad (zeros) or not
EMBEDDING_DIM = 64
CHAR_EMBEDDING_DIM = 64
padsize = 188
char_padsize = 25
def __init__(self):
self.textinput = ''
self.test = ''
self.x_test = ''
self.x_test_char = ''
self.results = []
self.data = {}
self.json_data = {}
self.char = DI()
self.char.load('char')
self.word = DI()
self.word.load('word.ner')
self.label = DI()
self.label.load('label.ner')
print 'Found', self.word.cnt - 1, 'unique words.'
print 'Found', self.char.cnt - 1, 'unique chars.'
print 'Found', self.label.cnt - 1, 'unique labels.'
embedding_matrix = np.zeros((len(self.word.index) + 1, int(self.EMBEDDING_DIM)))
char_embedding_matrix = np.zeros((len(self.char.index) + 1, int(self.CHAR_EMBEDDING_DIM)))
"""
Create keras word model
"""
MAX_SEQUENCE_LENGTH = self.padsize
embedding_layer = Embedding(len(self.word.index) + 1,
self.EMBEDDING_DIM,
weights=[embedding_matrix],
input_length=MAX_SEQUENCE_LENGTH,
mask_zero=self.mask)
sequence_input = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype='int32')
embedded_sequences = embedding_layer(sequence_input)
drop = 0.4
dropout = Dropout(rate=drop)(embedded_sequences)
"""
Create keras char model
"""
def reshape_one(c):
return K.reshape(c, (tf.shape(c)[0] * self.padsize, self.char_padsize, self.CHAR_EMBEDDING_DIM))
def reshape_two(c):
if merge_m_c == 'concat':
return K.reshape(c, (tf.shape(c)[0] / self.padsize, self.padsize, self.CHAR_EMBEDDING_DIM * 2))
else:
return K.reshape(c, (tf.shape(c)[0] / self.padsize, self.padsize, self.CHAR_EMBEDDING_DIM))
MAX_WORD_LENGTH = self.char_padsize
# embeddingPrompt('char')
embedding_layer_c = Embedding(len(self.char.index) + 1,
self.CHAR_EMBEDDING_DIM,
weights=[char_embedding_matrix],
input_length=MAX_WORD_LENGTH,
mask_zero=self.mask)
sequence_input_c = Input(shape=(self.padsize, MAX_WORD_LENGTH,), dtype='int32')
embedded_sequences_c = embedding_layer_c(sequence_input_c)
dropout_c = Dropout(rate=drop)(embedded_sequences_c)
rone = Lambda(reshape_one)(dropout_c)
merge_m = 'concat'
merge_m_c = merge_m
dropout_gru = 0.5
rec_dropout = dropout_gru
gru_karakter = Bidirectional(
GRU(self.CHAR_EMBEDDING_DIM, return_sequences=False, dropout=dropout_gru, recurrent_dropout=rec_dropout),
merge_mode=merge_m, weights=None)(rone)
rtwo = Lambda(reshape_two)(gru_karakter)
"""
Combine word + char model
"""
merge_m = 'concat'
gru_kata = Bidirectional(GRU(self.EMBEDDING_DIM * 2, return_sequences=True, dropout=dropout_gru,
recurrent_dropout=rec_dropout), merge_mode=merge_m, weights=None)(rtwo)
crf = CRF(len(self.label.index) + 1, learn_mode='marginal')(gru_kata)
self.model = Model(inputs=[sequence_input, sequence_input_c], outputs=[crf])
optimizer = 'adagrad'
loss = 'poisson'
self.model.summary()
self.model.compile(loss=loss,
optimizer=optimizer,
metrics=['acc'])
self.w_name = '06-05_17:19_658'
m_layers_len = len(self.model.layers)
for i in range(m_layers_len):
with open(self.w_name + "_" + str(i) + ".wgt", "rb") as fp:
w = pickle.load(fp)
self.model.layers[i].set_weights(w)
def predict(self, text):
self.textinput = text
self.test = DP(text)
self.x_test = DM(self.test.words, self.word.index)
print "Number of OOV:", len(self.x_test.oov_index)
print "OOV word occurences:", self.x_test.oov
self.x_test.pad(self.padsize)
print('Padded until %s tokens.' % self.padsize)
self.x_test_char = self.convertCharText2Int(self.test)
self.results = []
print "Computing..."
print self.x_test.padded
print self.x_test_char
raw_results = self.model.predict([np.array(self.x_test.padded), np.array(self.x_test_char)])
for raw_result in raw_results:
result = []
for token in raw_result:
value = np.argmax(token)
result.append(value)
self.results.append(result)
temp = self.results[0]
li = self.label.index
keys = li.keys()
values = li.values()
self.results = []
start = False
for token in temp:
if token != 0:
start = True
if start:
if token == 0:
self.results.append('?')
else:
self.results.append(keys[values.index(token)])
print self.test.words[0]
print self.results
self.data = {'words': self.test.words[0], 'labels': self.results}
self.json_data = json.dumps(self.data)
return self.json_data
def log(self):
self.textoutput = ''
for token in self.results:
self.textoutput = self.textoutput + token + ' '
rnow = datetime.now()
logcsv = open('log.csv', 'a')
writer = csv.writer(logcsv, delimiter=',')
writer.writerow(
['no', str(rnow.date()), str(rnow.time())[:-10], self.w_name, self.word.cnt - 1,
self.char.cnt - 1, self.textinput, len(self.x_test.oov_index), self.textoutput])
logcsv.close()
def convertCharText2Int(self, dataload):
x_tmp1 = []
for sent in dataload.words:
x_map = DM(sent, self.char.index, False)
if x_map.padsize > self.char_padsize:
self.char_padsize = x_map.padsize
x_tmp1.append(x_map)
x_tmp2 = []
for sent in x_tmp1:
sent.pad(self.char_padsize)
x_tmp2.append(sent.padded)
print('Padded until %s chars.' % self.char_padsize)
zeroes = []
for i in range(self.char_padsize):
zeroes.append(0)
x_char = []
for sent in x_tmp2:
padded_sent = sent
pad = self.padsize - len(sent)
for i in range(pad):
padded_sent = np.vstack((zeroes, padded_sent))
x_char.append(padded_sent)
print('Padded until %s tokens.' % self.padsize)
return x_char
```
#### File: Webapp/Program/app.py
```python
from flask import Flask, render_template, request
from flask_bootstrap import Bootstrap
from postagger.Main import POSTagger
from nertagger.Main import NERTagger
import json
app = Flask(__name__)
data = {}
pt = 0
nt = 0
@app.route("/")
def main():
return render_template('index.html')
@app.route('/pos', methods=['GET'])
def posG():
global pt
if pt == 0:
pt = POSTagger()
return render_template('pos.html')
@app.route('/pos', methods=['POST'])
def posP():
in_x = request.form['in_x']
global pt
json_data = pt.predict(in_x)
print 'json data', json_data
return render_template('pos.html', in_val=in_x, jsondata=json_data)
@app.route('/ner', methods=['GET'])
def nerG():
global nt
nt = NERTagger()
return render_template('ner.html')
@app.route('/ner', methods=['POST'])
def nerP():
in_x = request.form['in_x']
global nt
json_data = nt.predict(in_x)
print 'json data', json_data
return render_template('ner.html', in_val=in_x, jsondata=json_data)
if __name__ == "__main__":
app.run(debug=False, threaded=False)
```
#### File: TA/welldone_pos/DataProcessor.py
```python
import nltk
import numpy as np
import random
import pickle
class DataLoader:
"""
Initialize with a string of file name
ex: DataLoader('filename')
Attributes:
corpus
words
labels
filename
"""
def __init__(self, name):
print "Opening file", name
self.myfile = open(name, 'r')
self.filename = name
print "Loading data..."
self.mydict = []
self.corpus = []
lines = []
for line in self.myfile:
self.mydict.append(line)
for line in self.mydict:
if line == '-----\r\n':
self.corpus.append(lines)
lines = []
else:
lines.append((line.split('\t')[0], line.split('\t')[1][:-2])) # there's [:-2] to remove newline chars
print "Creating words and labels..."
self.words = []
self.labels = []
for sent in self.corpus:
line = []
y_true = []
for token in sent:
line.append(token[0])
y_true.append(token[1])
self.words.append(line)
self.labels.append(y_true)
print "Data loaded!", len(self.corpus), "sentences!"
def slice(self, percent, seed):
random.seed(seed)
random.shuffle(self.words)
random.seed(seed)
random.shuffle(self.labels)
random.seed(seed)
random.shuffle(self.corpus)
num_item = int(percent * len(self.labels))
self.corpus = self.corpus[:num_item]
self.words = self.words[:num_item]
self.labels = self.labels[:num_item]
print "Data sliced!", len(self.corpus), "sentences!"
def add(self, name):
print "Opening file", name
myfile = open(name, 'r')
print "Loading data..."
mydict = []
corpus = []
lines = []
for line in myfile:
mydict.append(line)
for line in mydict:
if line == '-----\r\n':
corpus.append(lines)
self.corpus.append(lines)
lines = []
else:
lines.append((line.split('\t')[0], line.split('\t')[1][:-2])) # there's [:-2] to remove newline chars
print "Adding words and labels..."
for sent in corpus:
line = []
y_true = []
for token in sent:
line.append(token[0])
y_true.append(token[1])
self.words.append(line)
self.labels.append(y_true)
print "Data added!", len(self.corpus), "sentences!"
def trim(self, length):
print "Triming sequence length..."
for i, line in enumerate(self.corpus):
if len(line) > length:
self.corpus[i] = line[:length]
for i, line in enumerate(self.words):
if len(line) > length:
self.words[i] = line[:length]
for i, line in enumerate(self.labels):
if len(line) > length:
self.labels[i] = line[:length]
print "Sequence trimmed!"
class DataIndexer:
"""
Initialize with an array of corpus to be indexed together
ex: DataIndexer([corpus1, corpus2, ...])
Attributes:
index
cnt
"""
def __init__(self, data=[]):
print "Indexing..."
self.cnt = 1
self.index = {}
for datum in data:
for sent in datum:
for token in sent:
if token not in self.index:
self.index[token] = self.cnt
self.cnt = self.cnt + 1
print "Data indexed!"
def add(self, data=[]):
print "Indexing..."
for datum in data:
for sent in datum:
for token in sent:
if token not in self.index:
self.index[token] = self.cnt
self.cnt = self.cnt + 1
print "Index added!"
def save(self, name):
with open(name + '.idx', 'wb') as fp:
pickle.dump((self.index, self.cnt), fp)
print "Index saved!"
def load(self, name):
with open(name + '.idx', 'rb') as fp:
(self.index, self.cnt) = pickle.load(fp)
print "Index loaded!"
class DataMapper:
"""
Initialize with corpus and index
ex: DataMapper(words, index)
Attributes:
mapped
padded
padsize
oov
oov_index
"""
def __init__(self, data, index, verbose=True):
self.verbose = verbose
if verbose:
print "Mapping..."
self.padsize = 0
self.mapped = []
self.padded = []
self.oov_index = []
self.oov = 0
for sent in data:
tokens = []
for token in sent:
try:
tokens.append(index[token])
except KeyError:
tokens.append(0)
self.oov = self.oov + 1
if token not in self.oov_index:
self.oov_index.append(token)
self.mapped.append(tokens)
if len(tokens) > self.padsize:
self.padsize = len(tokens)
if verbose:
print "Data mapped!"
def pad(self, size):
if self.verbose:
print "Padding..."
for sent in self.mapped:
sub = size-len(sent)
new = np.pad(sent, (sub, 0), 'constant')
self.padded.append(new)
if self.verbose:
print "Data padded!"
``` |
{
"source": "joshuakt/Oxygen-False-Positives",
"score": 3
} |
#### File: joshuakt/Oxygen-False-Positives/all_classes.py
```python
class Switch_Inputs:
def __init__(self, print_switch, speedup_flag, start_speed , fin_speed,heating_switch,C_cycle_switch,Start_time):
self.print_switch = print_switch
self.speedup_flag = speedup_flag
self.start_speed = start_speed
self.fin_speed = fin_speed
self.heating_switch = heating_switch
self.C_cycle_switch = C_cycle_switch
self.Start_time = Start_time
class Planet_inputs:
def __init__(self, RE,ME,rc,pm,Total_Fe_mol_fraction,Planet_sep,albedoC,albedoH):
self.RE = RE
self.ME = ME
self.rc = rc
self.pm = pm
self.Total_Fe_mol_fraction = Total_Fe_mol_fraction
self.Planet_sep = Planet_sep
self.albedoC = albedoC
self.albedoH = albedoH
class Init_conditions:
def __init__(self, Init_solid_H2O,Init_fluid_H2O,Init_solid_O,Init_fluid_O,Init_solid_FeO1_5,Init_solid_FeO,Init_solid_CO2,Init_fluid_CO2):
self.Init_solid_H2O = Init_solid_H2O
self.Init_fluid_H2O = Init_fluid_H2O
self.Init_solid_O = Init_solid_O
self.Init_fluid_O = Init_fluid_O
self.Init_solid_FeO1_5 = Init_solid_FeO1_5
self.Init_solid_FeO = Init_solid_FeO
self.Init_solid_CO2 = Init_solid_CO2
self.Init_fluid_CO2 = Init_fluid_CO2
class Numerics:
def __init__(self, total_steps,step0,step1,step2,step3,step4,tfin0,tfin1,tfin2,tfin3,tfin4):
self.total_steps = total_steps
self.step0 = step0
self.step1 = step1
self.step2 = step2
self.step3 = step3
self.step4 = step4
self.tfin0 = tfin0
self.tfin1 = tfin1
self.tfin2 = tfin2
self.tfin3 = tfin3
self.tfin4 = tfin4
class Stellar_inputs:
def __init__(self, tsat_XUV, Stellar_Mass,fsat, beta0 , epsilon ):
self.tsat_XUV = tsat_XUV
self.Stellar_Mass = Stellar_Mass
self.fsat = fsat
self.beta0 = beta0
self.epsilon = epsilon
class MC_inputs:
def __init__(self, esc_a, esc_b, esc_c, esc_d,ccycle_a , ccycle_b ,supp_lim, interiora , interiorb,interiorc,interiord,interiore,interiorf,ocean_a,ocean_b,Tstrat, surface_magma_frac):
self.esc_a = esc_a
self.esc_b = esc_b
self.esc_c = esc_c
self.esc_d = esc_d
self.ccycle_a = ccycle_a
self.ccycle_b = ccycle_b
self.supp_lim = supp_lim
self.interiora = interiora
self.interiorb = interiorb
self.interiorc = interiorc
self.interiord = interiord
self.interiore = interiore
self.interiorf = interiorf
self.ocean_a = ocean_a
self.ocean_b = ocean_b
self.Tstrat = Tstrat
self.surface_magma_frac = surface_magma_frac
class Model_outputs:
def __init__(self, total_time,total_y,FH2O_array,FCO2_array,MH2O_liq,MH2O_crystal,MCO2_liq,Pressre_H2O,CO2_Pressure_array,fO2_array,Mass_O_atm,Mass_O_dissolved,water_frac,Ocean_depth,Max_depth,Ocean_fraction):
self.total_time = total_time
self.total_y = total_y
self.FH2O_array = FH2O_array
self.FCO2_array = FCO2_array
self.MH2O_liq = MH2O_liq
self.MH2O_crystal = MH2O_crystal
self.MCO2_liq = MCO2_liq
self.Pressre_H2O = Pressre_H2O
self.CO2_Pressure_array = CO2_Pressure_array
self.fO2_array = fO2_array
self.Mass_O_atm = Mass_O_atm
self.Mass_O_dissolved = Mass_O_dissolved
self.water_frac = water_frac
self.Ocean_depth = Ocean_depth
self.Max_depth = Max_depth
self.Ocean_fraction = Ocean_fraction
```
#### File: joshuakt/Oxygen-False-Positives/other_functions.py
```python
import numpy as np
import pylab
from scipy.interpolate import interp1d
from scipy import optimize
from numba import jit
#######################################
@jit(nopython=True)
def qr(t00,Start_time,heatscale): #interior radiogenic heatproduction
t = t00 - Start_time*365*24*60*60
u238 = 0.9928 * (0.022e-6) * 9.17e-5 * np.exp(-(np.log(2)/4.46e9)*(t/(365*24*60*60)-4.5e9))
u235 = 0.0072 * (0.022e-6) * 5.75e-4 * np.exp(-(np.log(2)/7.04e8)*(t/(365*24*60*60)-4.5e9))
Th = (0.069e-6) * 2.56e-5 * np.exp(-(np.log(2)/1.4e10)*(t/(365*24*60*60)-4.5e9))
K = 1.17e-4 * (280e-6) * 2.97e-5 * np.exp(-(np.log(2)/1.26e9)*(t/(365*24*60*60)-4.5e9))
Al = 5e-5 * 3.54e-1 * (8650e-6) * np.exp(-(np.log(2)/7.17e5)*(t/(365*24*60*60)))
return (heatscale*u238 + heatscale*u235 + heatscale*Th + heatscale*K + Al)
@jit(nopython=True) #calcuate mantle viscosity
def viscosity_fun(Tp,pm,visc_offset,Tsurf,Tsolidus):
visc_rock = visc_offset*3.8e7 * np.exp(350000/(8.314*Tp))/pm
visc_liq = 0.00024*np.exp(4600.0 / (Tp - 1000.0))/pm
LowerT = Tsolidus
UpperT = LowerT + 600.0
if Tp < LowerT+0.000:
visc = visc_rock
elif Tp > UpperT+0.000:
visc = visc_liq
else:
v1 = np.log10(visc_rock)
v2 = np.log10(visc_liq)
logvisc = ( v2 * 0.2*(Tp - (LowerT))**5 + v1 * 0.8*(UpperT - Tp)**5) / ( 0.2*(Tp - (LowerT))**5 + 0.8*(UpperT - Tp)**5)
visc = 10**logvisc
return visc
@jit(nopython=True)
def f_for_optim(x,kH2O,Mcrystal,Mliq,rp,yH2O_liq,g,MMW):
result = (kH2O * x * Mcrystal + x * (Mliq-Mcrystal) + 4.0 *(0.018/MMW)* np.pi * (rp**2/g) * (x / 3.44e-8)**(1.0/0.74) - yH2O_liq)
return result
def H2O_partition_function( yH2O_liq,Mliq,Mcrystal,rp,g,kH2O,MMW): #partition H2O between magma ocean and atmosphere
if (Mliq >0)or(Mliq>0):
FH2O = optimize.newton(f_for_optim,0.5,args=(kH2O,Mcrystal,Mliq,rp,yH2O_liq,g,MMW))
Pressure_surface = (FH2O / 3.44e-8)**(1.0/0.74)
else:
FH2O = 3.44e-8*( yH2O_liq/(4 * (0.018/MMW) * np.pi * (rp**2/g)) ) ** (0.74)
Pressure_surface = (FH2O / 3.44e-8)**(1.0/0.74)
return [FH2O,Pressure_surface]
@jit(nopython=True)
def CO2_partition_function( yCO2_liq,Mliq,Mcrystal,rp,g,kCO2,MMW): #partition CO2 between magma ocean and atmosphere
if (Mliq>0)or(Mcrystal>0):
FCO2 = yCO2_liq / (kCO2 * Mcrystal + (Mliq-Mcrystal) + 4 * (0.044/MMW) * np.pi * (rp**2/g) * (1 /4.4e-12))
Pressure_surface = (FCO2 /4.4e-12)
else:
FCO2 = 0.0
Pressure_surface = (yCO2_liq*g)/(4.0 *(0.044/MMW)* np.pi * (rp**2))
return [FCO2,Pressure_surface]
@jit(nopython=True)
def Mliq_fun(y2,rp,rs,pm): #calculate liquid mass of mantle
if rs < rp:
Mliq = pm * 4./3. * np.pi * (rp**3 - rs**3)
else:
Mliq = 0.0
return Mliq
@jit(nopython=True)
def rs_term_fun(r,a1,b1,a2,b2,g,alpha,cp,pm,rp,Tp,Poverburd): #controls solidification radius evolution
numerator = 1 + alpha*(g/cp) * (rp - r)
e1 = np.exp(1e-5*(-rp+r+100000))
e2 = np.exp(1e-5*(rp-r-100000))
sub_denom = (e1+e2)**2
T1 = (b1+a1*g*pm*(rp-r)+a1*Poverburd)
T2 = (b2+a2*g*pm*(rp-r)+a2*Poverburd)
sub_num1 = ((-a1*g*pm*e1 + T1*1e-5*e1) + (-a2*g*pm*e2 - T2*1e-5*e2))*(e1+e2)
sub_num2 = (T1*e1+T2*e2)*(1e-5*e1-1e-5*e2)
everything = (sub_num1 - sub_num2)/sub_denom
if r>rp:
return 0
else:
return numerator /(alpha*g*Tp/cp + everything)
@jit(nopython=True)
def adiabat(radius,Tp,alpha,g,cp,rp): #mantle adiabat
Tr = Tp*(1 + alpha*(g/cp)*(rp-radius))
return Tr
@jit(nopython=True)
def sol_liq(radius,g,pm,rp,Poverburd,mH2O): #For calculating solidus
a1 = 104.42e-9
b1 = 1420+0.000 - 80.0
TS1 = b1 + a1 * g * pm * (rp - radius) + a1 * Poverburd - 4.7e4 * mH2O**0.75
if TS1 < 1170.0:
T_sol1 = 1170.0+0*TS1
else:
T_sol1 = TS1
a2 = 26.53e-9
b2 = 1825+0.000
TS2 = b2 + a2 * g * pm * (rp -radius) + a2 * Poverburd - 4.7e4 * mH2O**0.75
if TS2 < 1170.0:
T_sol2 = 1170.0+0*TS2
else:
T_sol2 = TS2
T_sol = (T_sol1 * np.exp(1e-5*( -rp + radius + 100000)) + T_sol2 * np.exp(1e-5*(rp - radius - 100000)))/ (np.exp(1e-5*(-rp + radius + 100000)) + np.exp(1e-5*(rp - radius - 100000)))
return T_sol
def find_r(r,Tp,alpha,g,cp,pm,rp,Poverburd,mH2O): ## used for finding the solidus radius numerically
Tr = adiabat(r,Tp,alpha,g,cp,rp)
rr = float(r)
T_sol = sol_liq(rr,g,pm,rp,Poverburd,mH2O)
return (Tr-T_sol)**2.0
@jit(nopython=True)
def temp_meltfrac(rc,rp,alpha,pm,Tp,cp,g,Poverburd,mH2O): #for calculating melt fraction
rad = np.linspace(rc,rp,1000)
melt_r = np.copy(rad)
visc_r = np.copy(rad)
vol_r = np.copy(rad)*0 + 4.0*np.pi*(rad[1]-rad[0])
for j in range(0,len(rad)):
Tsol = sol_liq(float(rad[j]),g,pm,rp,Poverburd,mH2O)
Tliq = Tsol + 600.0
T_r = adiabat(rad[j],Tp,alpha,g,cp,rp) #
if T_r>Tliq:
melt_r[j] = 1.0
visc_r[j] = 1
vol_r[j] = vol_r[j]*rad[j]**2
elif T_r<Tsol:
melt_r[j] = 0.0
visc_r[j] = 1
vol_r[j] = 0.0
else:
melt_r[j] = (T_r - Tsol)/(Tliq - Tsol)
visc_r[j] = 1
vol_r[j] = vol_r[j]*rad[j]**2
if np.sum(vol_r) == 0.0:
return (0.0,0.0,0.0)
actual_phi_surf = np.sum(melt_r*vol_r)/np.sum(vol_r)
Va = np.sum(vol_r)
actual_visc = np.sum(visc_r*vol_r)/np.sum(vol_r)
return (actual_phi_surf,actual_visc,Va)
```
#### File: joshuakt/Oxygen-False-Positives/stellar_funs.py
```python
import numpy as np
from scipy.interpolate import interp1d
def main_sun_fun(time,stellar_mass,tsat_XUV,beta_XUV,fsat):
if stellar_mass == 1.0:
stellar_data = np.loadtxt('Baraffe3.txt',skiprows=31) # for reproducing sun exactly
else:
print ("This version of code is only set up for solar mass stars")
return [time*0,time*0,time*0,time*0]
stellar_array=[]
for i in range(0,len(stellar_data[:,0])):
if stellar_data[i,0] == stellar_mass:
stellar_array.append(stellar_data[i,:])
stellar_array=np.array(stellar_array)
min_time = np.min(stellar_array[:,1])
max_time = np.max(stellar_array[:,1])
if (min_time>np.min(time) ) or (max_time<np.max(time)):
print ("Problem: exceeding time range for stellar data")
time_array = stellar_array[:,1]
Total_Lum = (10**stellar_array[:,4])
ratio_out = [] # For XUV ratio
for i in range(0,len(time_array)):
if time_array[i]<tsat_XUV:
ratio= fsat
else:
ratio = fsat*(time_array[i]/tsat_XUV)**beta_XUV
ratio_out.append(ratio)
XUV_Lum = ratio_out*Total_Lum
Total_fun = interp1d(time_array,Total_Lum)
XUV_fun = interp1d(time_array,XUV_Lum)
Relative_total_Lum = Total_fun(time)
Relative_XUV_lum = XUV_fun(time)
Absolute_total_Lum = Relative_total_Lum*3.828e26
Absolute_XUV_Lum = Relative_XUV_lum*3.828e26
return [Relative_total_Lum,Relative_XUV_lum,Absolute_total_Lum,Absolute_XUV_Lum ]
``` |
{
"source": "joshuakwan/tvshow-scheduler",
"score": 3
} |
#### File: tvshow-scheduler/scheduler/show.py
```python
from json import JSONEncoder
from display_time import DisplayTime
class Show(object):
def __init__(self, raw_data):
self.number = raw_data['number']
self.name = raw_data['name']
self.duration = DisplayTime(raw_data['duration'])
self.plan = DisplayTime(raw_data['plan'])
class ShowEncoder(JSONEncoder):
def default(self, o):
return {
"number": o.number,
"name": o.name,
"duration": {
"timeStr": o.duration._time_str,
"totalSeconds": o.duration.total_seconds,
"isNegative": o.duration.is_negative,
"hour": o.duration.hour,
"minute": o.duration.minute,
"second": o.duration.second
},
"plan": {
"timeStr": o.plan._time_str,
"totalSeconds": o.plan.total_seconds,
"isNegative": o.plan.is_negative,
"hour": o.plan.hour,
"minute": o.plan.minute,
"second": o.plan.second
}
}
```
#### File: tvshow-scheduler/test/test_show.py
```python
import unittest
from scheduler.show import Show
class TestShow(unittest.TestCase):
def testInitialization(self):
show = Show('1,show 1,00:03:00,20:00:00')
self.assertEqual(show.seq, '1')
self.assertEqual(show.name,'show 1')
self.assertEqual(show.duration.get_time_string(),'00:03:00')
self.assertEqual(show.plan.get_time_string(),'20:00:00')
``` |
{
"source": "joshua-laughner/CAADA",
"score": 3
} |
#### File: caada/ca_pems/__main__.py
```python
from argparse import ArgumentParser
from .agglomeration import cl_dispatcher
from .files import sort_pems_files
def parse_ca_pems_agg_args(p: ArgumentParser):
p.description = 'Agglomerate Caltrans PEMS station files into a single netCDF file'
p.add_argument('pems_root', help='The path to the root directory containing the PEMS data. This must '
'be a directory with subdirectories organizing the data by district '
'named "d03", "d04", ..., "d12". DO NOT mix different time resolutions.')
p.add_argument('meta_root', help='The path to the root directory containing the PEMS metadata. This must '
'have the same organization as PEMS_ROOT.')
p.add_argument('save_path', help='The path to save the netCDF file as (including filename).')
p.add_argument('-s', '--spatial-resolution', default='county', choices=('county',),
help='What spatial resolution to agglomerate the data to.')
p.set_defaults(driver_fxn=cl_dispatcher)
def parse_ca_pems_orgfiles_args(p: ArgumentParser):
p.description = 'Organize downloaded Caltrans PEMS stations files into directories needed by the agglomerator'
p.add_argument('pems_root', help='The path to the directory where you want the actual data stored.')
p.add_argument('meta_root', help='The path to the directory where you want the metadata stored.')
p.add_argument('pems_files', nargs='+', help='All PEMS station and station metadata files to organize.')
p.add_argument('-x', '--delete-orig', action='store_true', help='Delete original files as they are moved.')
p.add_argument('-c', '--no-decompress', action='store_false', dest='decompress',
help='Do not decompress any .gz files as they are moved. By default, .gz files are decompressed '
'and, if --delete-orig is specified, deleted.')
p.add_argument('-d', '--dry-run', action='store_true', help='Print what would be done, but do not actually do it.')
p.set_defaults(driver_fxn=sort_pems_files)
```
#### File: caada/common_ancillary/__init__.py
```python
import geopandas as gpd
import netCDF4 as ncdf
import numpy as np
import os
import pandas as pd
from typing import Sequence, Optional, Union
from ..caada_typing import intseq
from jllutils.subutils import ncdf as ncio
_county_shapefile = os.path.join(os.path.dirname(__file__), 'county_shp_files', 'cb_2018_us_county_20m.shp')
_county_gdf = gpd.read_file(_county_shapefile)
_county_gdf.rename(columns=lambda s: s.lower(), inplace=True)
_county_gdf['statefp'] = _county_gdf['statefp'].astype('int')
_county_gdf['countyfp'] = _county_gdf['countyfp'].astype('int')
_state_shapefile = os.path.join(os.path.dirname(__file__), 'state_shp_files', 'cb_2018_us_state_20m.shp')
_state_gdf = gpd.read_file(_state_shapefile)
_state_gdf.rename(columns=lambda s: s.lower(), inplace=True)
_state_gdf.sort_values('name', inplace=True)
_state_gdf['statefp'] = _state_gdf['statefp'].astype('int')
conus_states = ('AL', 'AZ', 'AR', 'CA', 'CO', 'CT', 'DE', 'FL', 'GA', 'ID',
'IL', 'IN', 'IA', 'KS', 'KY', 'LA', 'ME', 'MD', 'MA', 'MI',
'MN', 'MS', 'MO', 'MT', 'NE', 'NV', 'NH', 'NJ', 'NM', 'NY',
'NC', 'ND', 'OH', 'OK', 'OR', 'PA', 'RI', 'SC', 'SD', 'TN',
'TX', 'UT', 'VT', 'VA', 'WA', 'WV', 'WI', 'WY')
def _standardize_input_ids(county_ids, state_ids):
if county_ids is None and state_ids is None:
county_ids = [county_ids]
state_ids = [state_ids]
elif isinstance(county_ids, (int, type(None))):
county_ids = [county_ids]
if state_ids is not None and not isinstance(state_ids, int):
county_ids *= len(state_ids)
if state_ids is None or isinstance(state_ids, int):
state_ids = [state_ids] * len(county_ids)
elif len(state_ids) != len(county_ids):
raise ValueError('Either provide a single state ID for all counties (as an integer) or a sequence the same '
'length as county_ids')
return county_ids, state_ids
def get_county_polygons(county_ids: Optional[intseq], state_ids: Union[int, intseq]):
"""Get a dataframe with the polygons or multipolygons representing county borders
The returned dataframe will have the county polygons for each county listed. `county_ids` and `state_ids` may both
be scalar integers or lists of integers. If both are lists, then then must be the same length. Each county is drawn
from the corresponding state in the list. If one is a scalar, it will be used for every element in the other, e.g.
if `state_ids = 6` then 6 will be used as the state for every county listed in `county_ids`. If you want two
counties from state 1 and three from state 2, then you would pass `county_ids = [a1, b1, c1, a2, b2]` and
`state_ids = [1, 1, 1, 2, 2]`.
Parameters
----------
county_ids
The numeric census IDs of counties to get polygons for. May be an integer, `None`, or sequence of both. If
`None`, then all counties for each state listed will be returned. If `None` is given as an element of the
list, then all counties for the corresponding state are returned.
state_ids
The numeric census IDs of states that each county belongs in. These may not be `None` and must be an integer
or list of integers.
Returns
-------
List[Polygons]
The list of Shapely polygons corresponding to the counties requested.
"""
gdf_subset = get_poly_gdf_subset(county_ids, state_ids)
return gdf_subset['geometry'].tolist()
def get_poly_gdf_subset(county_ids: Optional[intseq], state_ids: Union[int, intseq]):
county_ids, state_ids = _standardize_input_ids(county_ids, state_ids)
polys = []
for cid, sid in zip(county_ids, state_ids):
if sid is None:
xx = (_county_gdf['statefp'] > -99)
else:
xx = (_county_gdf['statefp'] == sid)
if cid is not None:
xx &= (_county_gdf['countyfp'] == cid)
polys.append(_county_gdf.loc[xx, :])
return pd.concat(polys, axis=0)
def get_state_polygons(state_ids, as_gdf=False):
if isinstance(state_ids, int) or state_ids is None:
state_ids = [state_ids]
polys = []
for sid in state_ids:
if sid is None:
continue
xx = _state_gdf['statefp'] == sid
if xx.sum() != 1:
raise IndexError('Expected 1 match for state ID = {}, instead got {}'.format(sid, xx.sum()))
elif as_gdf:
polys.append(_state_gdf.index[xx].item())
else:
polys.append(_state_gdf.loc[xx, 'geometry'].item())
if as_gdf and len(polys) > 0:
return _state_gdf.loc[polys, :]
elif as_gdf:
return _state_gdf
elif len(polys) > 0:
return polys
else:
return _state_gdf['geometry'].tolist()
def geometry_to_lat_lon(geo):
if geo.geom_type == 'MultiPolygon':
lats = []
lons = []
for g in geo.geoms:
y, x = geometry_to_lat_lon(g)
lons.append(x)
lats.append(y)
lons.append(np.array([np.nan]))
lats.append(np.array([np.nan]))
# There will always be an extra NaN at the end we don't need to concatenate
lats = np.concatenate(lats[:-1], axis=0)
lons = np.concatenate(lons[:-1], axis=0)
elif geo.geom_type == 'Polygon':
lons, lats = zip(*geo.exterior.coords)
lons = np.array(lons)
lats = np.array(lats)
else:
raise NotImplementedError('Cannot convert geometry of type "{}"'.format(geo.geom_type))
return lats, lons
def add_county_polys_to_ncdf(nch: ncdf.Dataset, county_ids: Sequence[int], state_ids: Sequence[int],
county_dimension: str = 'county'):
polys = get_county_polygons(county_ids, state_ids)
# Convert to an array of lat/lon
poly_latlon = np.empty([len(polys), 2], object)
for i, p in enumerate(polys):
lat, lon = geometry_to_lat_lon(p)
poly_latlon[i, 0] = lat.astype('float32')
poly_latlon[i, 1] = lon.astype('float32')
# Create 2 variables: one for the county bounds lat/lon as numbers and one for the "well known text" representation
vlen_t = nch.createVLType(np.float32, 'county_bounds_vlen')
ncio.make_ncdim_helper(nch, 'bounds_coord', np.array([0, 1]),
description='Index for shape bounds. 0 = latitude, 1 = longitude.')
bounds_var = nch.createVariable("county_bounds", vlen_t, (county_dimension, 'bounds_coord'))
bounds_var[:] = poly_latlon
bounds_var.setncattr('crs', str(_county_gdf.crs))
bounds_var.setncattr('description', "The latitude and longitude of each county's boundaries")
bounds_var.setncattr('note', 'If fill values are present, they indicate breaks between coordinates for unconnected polygons')
wkt_var = nch.createVariable('county_bounds_wkt', str, county_dimension)
for i, p in enumerate(polys):
# assume the polys are in the same order as the county IDs - the county IDs MUST be given in the order they
# are in the netCDF file
wkt_var[i] = p.to_wkt()
wkt_var.setncattr('crs', str(_county_gdf.crs))
wkt_var.setncattr('description', 'The county shape described in the CRS well known text format')
```
#### File: caada/epa_cems/__main__.py
```python
from argparse import ArgumentParser
from .web import download_cl_driver
def parse_cems_download_args(p: ArgumentParser):
p.description = 'Downlowd continuous emissions data from the US EPA FTP server'
p.add_argument('time_res', choices=('daily', 'hourly'), help='Which time resolution of data to download')
p.add_argument('start_time', help='Beginning of time period to download, in YYYY-MM-DD format.')
p.add_argument('stop_time', help='End of time period to download, in YYYY-MM-DD format.')
p.add_argument('-s', '--save-dir', default='.',
help='Where to save the downloaded files. Default is the current directory.')
p.add_argument('-c', '--no-decompress', action='store_false', dest='unzip',
help='By default the .zip files downloaded are unzipped into their .csv file. Pass this flag to '
'skip that and leave them as .zip files.')
p.add_argument('-k', '--keep-zip', action='store_false', dest='delete_zip',
help='If the .zip files are decompresses, they are by default deleted. Pass this flag to skip '
'deleting them. This has no effect either way if --no-decompress is set.')
p.set_defaults(driver_fxn=download_cl_driver)
p.epilog = 'A note on the start and stop time: the hourly data is provided in monthly files and the daily data ' \
'in quarterly files. The start/stop time given filter based on the first date of the files. That is, ' \
'if you specify a start date of 15 Jan 2020 and an end date of 15 Feb 2020 for the hourly data, the ' \
'file for February will be downloaded because 15 Jan <= 1 Feb <= 15 Feb, but the file for January will ' \
'NOT be downloaded.'
```
#### File: caada/epa_cems/web.py
```python
import ftplib
import os
import pandas as pd
from pathlib import Path
import re
from zipfile import ZipFile
from ..common_ancillary import conus_states
from ..caada_logging import logger
from ..caada_typing import datetimelike, pathlike
class EPAFTP(ftplib.FTP):
"""Class that manages a connection to the EPA FTP server.
This wraps the standard :class:`ftplib.FTP` class but supplies the URLs by default and contains a helper
:meth:`download` method to support batch downloading of Continuous Emission Monitoring System (CEMS) files.
This can be used as a context manager::
with EPAFTP() as ftp:
ftp.download(...)
This is encouraged because it ensures that the FTP connection is closed whether or not an error occurs.
"""
def __init__(self, url: str = 'newftp.epa.gov', *args, **kwargs):
"""Instantiate a connection to the EPA FTP server
Parameters
----------
url
The URL of the FTP server. Should not need changed.
args, kwargs
Additional positional and keyword arguments are passed through to :class:`ftplib.FTP`. No additional
arguments should be required.
"""
super(EPAFTP, self).__init__(url, *args, **kwargs)
self.login()
def download(self, time_res: str, start_time: datetimelike, stop_time: datetimelike, save_dir: pathlike,
states: str = 'all', unzip: bool = True, delete_zip: bool = True):
"""Download a collection of EPA CEMS files.
Parameters
----------
time_res
Which time resolution of files to get. Options are "daily" or "hourly".
start_time, stop_time
Beginning and end of the time period to download data for. See Notes, below. This may be any format that
:class:`pandas.Timestamp` recognizes as a valid input.
save_dir
Path to save the CEMS files to.
states
Which states to download data for. The string `"all"` will download all states, and `"conus"` will download
only continental US states. If you want to limit to specific states, pass a sequence of state abbreviations,
e.g. `("ca", "or", "wa")`.
unzip
Whether to unzip the .zip files after downloading.
delete_zip
Whether to delete the .zip files after extracting the contained .csv file. Has no effect if `unzip` is
`False`.
Notes
-----
Time filtering is done based on the start time of each file. Since the hourly data is organized into monthly
files and the daily data in organized into quarterly files, a time range from 2020-03-15 to 2020-04-15 would
download the April 2020 hourly file or Q2 daily file (because April 1, 2020 is the start date for both files),
however it will *not* download the March 2020 hourly or Q1 2020 daily file, even though the first part of the
date range overlaps those files.
"""
save_dir = Path(save_dir)
start_time = pd.Timestamp(start_time)
stop_time = pd.Timestamp(stop_time)
if not save_dir.is_dir():
raise IOError('Save directory ({}) either does not exist or is a file.'.format(save_dir))
if time_res == 'hourly':
files = self._hourly_file_list(start_time, stop_time, states)
elif time_res == 'daily':
files = self._daily_file_list(start_time, stop_time, states)
else:
raise ValueError('Unknown option for time_res: "{}". Allowed values are "hourly", "daily".')
for dir_url, fnames in files.items():
logger.info('Downloading {} files from {}'.format(len(fnames), dir_url))
self.cwd(dir_url)
for f in fnames:
cmd = 'RETR {}'.format(f)
logger.debug('Downloading {} (command is {})'.format(f, cmd))
with open(save_dir / f, 'wb') as wobj:
self.retrbinary(cmd, wobj.write)
if unzip:
self._unzip_file(save_dir / f, delete_zip=delete_zip)
def _hourly_file_list(self, start_time, stop_time, states='all'):
url = '/DMDnLoad/emissions/hourly/monthly/'
all_files = []
for year in range(start_time.year, stop_time.year+1):
self.cwd('{}{}'.format(url, year))
all_files += self.nlst()
file_info = []
for f in all_files:
fname = f.split('/')[-1]
m = re.search(r'(\d{4})([a-z]{2})(\d{2})', fname, re.IGNORECASE)
year, month, state = int(m.group(1)), int(m.group(3)), m.group(2)
file_info.append({'name': fname, 'date': pd.Timestamp(year, month, 1), 'state': state.lower()})
return self._filter_files(url, file_info, start_time, stop_time, states)
def _daily_file_list(self, start_time, stop_time, states='all'):
url = '/DMDnLoad/emissions/daily/quarterly/'
all_files = []
for year in range(start_time.year, stop_time.year+1):
dir_url = '{}{}'.format(url, year)
logger.debug('Changing remote directory to {}'.format(dir_url))
self.cwd(dir_url)
all_files += self.nlst()
file_info = []
for f in all_files:
fname = f.split('/')[-1]
m = re.search(r'(\d{4})([a-z]{2})(Q\d)', fname)
year, quarter, state = int(m.group(1)), m.group(3), m.group(2)
file_info.append({'name': fname, 'date': self._q_to_date(year, quarter), 'state': state.lower()})
return self._filter_files(url, file_info, start_time, stop_time, states)
@staticmethod
def _filter_files(base_url, file_info, start_time, stop_time, states):
if not base_url.endswith('/'):
base_url += '/'
if states == 'all':
states = set([f['state'] for f in file_info])
elif states == 'conus':
states = conus_states
states = [s.lower() for s in states]
file_info = [f for f in file_info if start_time <= f['date'] <= stop_time and f['state'] in states]
# Group the files by FTP directory so that we only have to cd to a directory once
file_dict = dict()
for f in file_info:
dir_url = '{}{}'.format(base_url, f['date'].year)
if dir_url not in file_dict:
file_dict[dir_url] = []
file_dict[dir_url].append(f['name'])
return file_dict
@staticmethod
def _q_to_date(year, q):
if q.lower() == 'q1':
return pd.Timestamp(year, 1, 1)
elif q.lower() == 'q2':
return pd.Timestamp(year, 4, 1)
elif q.lower() == 'q3':
return pd.Timestamp(year, 7, 1)
elif q.lower() == 'q4':
return pd.Timestamp(year, 10, 1)
else:
return ValueError('Unknown value for quarter. Expected "qN" or "QN" where N is 1, 2, 3, or 4.')
@staticmethod
def _unzip_file(zip_path: Path, delete_zip):
logger.debug('Decompressing {}'.format(zip_path))
with ZipFile(zip_path) as z:
for mem in z.infolist():
if os.sep in mem.filename:
raise IOError('A member of {} contains path separators. This is unexpected and potentially unsafe.'.format(zip_path))
out_path = z.extract(mem, path=zip_path.parent)
logger.debug('Created {}'.format(out_path))
if delete_zip:
os.remove(zip_path)
logger.debug('Deleted {}'.format(zip_path))
def download_cl_driver(time_res: str, start_time: datetimelike, stop_time: datetimelike,
save_dir: str = '.', unzip: bool = True, delete_zip: bool = True):
"""Download EPA continuous emissions monitoring system data via FTP
Creates a default :class:`EPAFTP` connection and downloads CEMS data. All arguments correspond to those for
:meth:`EPAFTP.download`.
"""
with EPAFTP() as ftp:
ftp.download(time_res=time_res, start_time=start_time, stop_time=stop_time, save_dir=save_dir,
unzip=unzip, delete_zip=delete_zip)
```
#### File: caada/opensky/web.py
```python
import os
from pandas import Timedelta
import requests
import time
from . import get_airport_code_source
from ..caada_errors import HTMLRequestError
from ..caada_typing import pathlike
from ..caada_logging import logger
def _download_airport_codes(source='openflights', update='never'):
"""Download geographic data linked to airport codes.
Parameters
----------
source
Which web source to pull data from. Currently the only allowed option is `"openflights"`.
update
Controls whether CAADA redownloads the needed data or not. Possible values are:
* `"never"` - only download if no local copy is available.
* `"periodically"` - only download if the local copy is more than a week old.
* `"always"` - always redownloads
Returns
-------
None
"""
entry = get_airport_code_source(source)
local_file = entry['local']
remote_url = entry['remote']
_download_airport_code_data(source, local_file, remote_url, update)
def _download_airport_code_data(source_name: str, local_file: pathlike, remote_url: str, update: str = 'never'):
"""General driver for geographic airport data in .csv format.
Parameters
----------
source_name
Name the user passes to identify this source.
local_file
Path to where the local file is or should be
remote_url
URL to where the data is on the web
update
Controls whether CAADA redownloads the needed data or not. Possible values are:
* `"never"` - only download if no local copy is available.
* `"periodically"` - only download if the local copy is more than a week old.
* `"always"` - always redownloads
Returns
-------
None
Returns nothing, downloads the file to `local_file`.
"""
if update == 'never':
if local_file.exists():
logger.debug('%s already exists', local_file)
return
else:
logger.info('%s does not exist, must download', local_file)
elif update == 'periodically':
if local_file.exists():
mtime = os.path.getmtime(local_file)
age = time.time() - mtime
td = str(Timedelta(seconds=age))
if age < 7*24*3600:
# don't update if it's been modified within the last week
logger.debug('%s recently updated (%s old), not updating', local_file, td)
return
else:
logger.debug('%s more than 7 days old (%s old), will update', local_file, td)
elif update != 'always':
raise ValueError('Bad value for update: "{}". Options are "never", "periodically", and "always".'.format(update))
logger.info('Downloading %s to %s', remote_url, local_file)
r = requests.get(remote_url)
if r.status_code != 200:
raise HTMLRequestError('Error retrieving {} airport codes. HTTP status code was {}'.format(source_name, r.status_code))
with open(local_file, 'wb') as wobj:
wobj.write(r.content)
logger.info('Download successful.')
``` |
{
"source": "joshualee155/FundOptimizer",
"score": 3
} |
#### File: joshualee155/FundOptimizer/fund_holding.py
```python
import pandas as pd
from bs4 import BeautifulSoup
import requests
import re
from tqdm import tqdm
def get_fund_holding(symbol):
url = 'http://finance.sina.com.cn/fund/quotes/{}/bc.shtml'.format(symbol)
html = requests.get(url)
bs = BeautifulSoup(html.content, features="lxml")
tbl = bs.find('table', {'id':'fund_sdzc_table'})
if tbl is None or tbl.tbody.text=='\n':
return
pat = re.compile('\d\d\d\d-\d\d-\d\d')
report_date = pd.to_datetime( pat.findall(bs.find('div', {'class':'zqx_zcpz_date'}).text)[0] )
stocks = tbl.attrs['codelist'].split(',')
ts_codes = [ s[2:]+'.'+s[:2].upper() for s in stocks]
holding = pd.read_html(tbl.prettify())[0]
data_dict = dict(zip(ts_codes, holding[('占净值比例(%)', '持股比例')].str[:-1].astype(float)))
data = pd.DataFrame.from_dict(data_dict, 'index', columns=['holding'])
# lib_fund_holding.write(fund, data, metadata={'report_date':report_date})
return data
if __name__ == "__main__":
import numpy as np
symbols, _ = np.genfromtxt( './refData/fund_list.csv', dtype = str, delimiter = ',', unpack = True )
symbols = [ symbol.zfill(6) for symbol in symbols ]
res = []
for symbol in tqdm(symbols):
df = get_fund_holding(symbol)
if df is not None:
res.append( df.reset_index().assign(fund=symbol) )
fund_holding = pd.concat(res, ignore_index=True)
fund_holding.to_csv('fund_holding.csv')
```
#### File: FundOptimizer/tests/testFundOptimiser.py
```python
import unittest
from fundopt.fundopt import FundTargetRetMinCVaROptimiser
import numpy as np
import pandas as pd
import datetime as dt
import os
class Test_testFundOptimiser(unittest.TestCase):
def test_Optimise(self):
startDate = dt.date(2017, 1, 26)
endDate = dt.date(2018, 1, 26)
DateIndex = pd.date_range( startDate, endDate, freq='D' )
fundList = np.genfromtxt( './refData/AvailableFundList.txt', dtype = str )
fundList = fundList[:10]
currentPosition = [0.0]*len(fundList)
currentPosition[0] = 50000.0
# kwargs = { 'prefix' : 'temp', 'index_col' : 1, 'date_cols' : ['Date'] }
optimiser = FundTargetRetMinCVaROptimiser(
targetRet = 0.005,
startDate = startDate,
endDate = endDate,
holdingPeriod = 30,
longOnly = True,
fundList = fundList,
currentPosition = currentPosition,
)
optMovement = optimiser.getOptimalPosition(verbose = True)
newPosition = np.array( currentPosition ) + optMovement
currentDate = dt.date(2018, 2, 1)
initialValue = sum( newPosition )
print( initialValue )
currentValue = initialValue
for fund, position in zip( fundList, newPosition ):
currentValue += position * optimiser.tsLoader[fund].getReturnByDate( StartDate = endDate, EndDate = currentDate )
print( currentValue )
if __name__ == '__main__':
unittest.main()
```
#### File: FundOptimizer/utils/fundoptutils.py
```python
import pandas as pd
import datetime as dt
class FundType( object ):
OF = 'Open Ended Fund'
ETF = 'Exchange Traded Fund'
LOF = 'Listed Open Ended Fund'
MMF = 'Money Market Fund'
def getFundType( fundCode ):
fundTypeDf = pd.read_csv( 'refData/fund_list.csv', names = [ 'fundCode', 'fundType' ] )
fundTypeDf[ 'fundCode' ] = fundTypeDf[ 'fundCode' ].apply( lambda x: str(x).zfill(6) )
fundTypeDf.drop_duplicates( subset = [ 'fundCode' ], inplace = True )
fundTypeDf.set_index( 'fundCode', drop = True, inplace = True )
try:
sType = fundTypeDf[ 'fundType' ][ fundCode ]
if sType == 'OF':
return FundType.OF
elif sType == 'ETF':
return FundType.ETF
elif sType == 'LOF':
return FundType.LOF
elif sType == 'MMF':
return FundType.MMF
else:
raise NameError( "Unknown fund type %s" % sType )
except KeyError:
return FundType.OF
def str2date( sDate ):
"""
Convert a string date to datetime.date
"""
try:
dateTime = dt.datetime.strptime( sDate, "%Y%m%d" )
except ValueError:
dateTime = dt.datetime.strptime( sDate, "%Y-%m-%d" )
return dateTime.date()
def getHolidays( startDate, endDate ):
"""
Return China exchange holidays ( non-trading days ) from `startDate` to `endDate`
"""
with open( 'refData/holidays.txt', 'r' ) as f:
holidays = f.read().strip().split('\n')
holidays = [ date for date in map( str2date, holidays ) if date >= startDate and date <= endDate ]
return holidays
``` |
{
"source": "joshualee155/ml-stock-prediction",
"score": 3
} |
#### File: ml-stock-prediction/agent/stochastic_policy_gradient_agent.py
```python
import tensorflow as tf
import numpy as np
class StochasticPolicyGradientAgent():
"""
A Gaussian Policy Gradient based agent implementation
"""
def __init__(self, env, learning_rate = 0.001, discount_rate = 0.99, batch_size = 1, quiet = True):
self._optimizer = tf.train.AdamOptimizer(learning_rate = learning_rate)
self._sess = tf.Session()
self._env = env
self._batch_size = batch_size
self._discount_rate = discount_rate
self._state_buffer = []
self._reward_buffer = []
self._action_buffer = []
self._quiet = quiet
state_dim = np.prod(np.array(env.observation_space.shape))
self._states = tf.placeholder(tf.float32,
shape=(None, state_dim),
name="states")
init = tf.contrib.layers.xavier_initializer()
# neural featurizer parameters
h1 = 256
h2 = 128
h3 = 128
mu_hidden = tf.layers.dense(self._states, h1,
activation = tf.nn.tanh,
name = 'dense_0',
kernel_initializer=init)
mu_hidden_2 = tf.layers.dense(mu_hidden, h2,
activation = tf.nn.tanh,
name = 'dense_1',
kernel_initializer=init)
mu_hidden_3 = tf.layers.dense(mu_hidden_2, h3,
activation = tf.nn.tanh,
name = 'dense_2',
kernel_initializer=init)
self._mu = tf.layers.dense(mu_hidden_3, 1,
activation = tf.tanh,
name = 'mu',
kernel_initializer=init)
self._mu = tf.squeeze(self._mu)
# Building sigma Model
sig_hidden = tf.layers.dense(self._states, h1,
activation = tf.sigmoid,
name = 'sigma_dense_0',
kernel_initializer=init)
sig_hidden_2 = tf.layers.dense(sig_hidden, h2,
activation = tf.sigmoid,
name = 'sig_dense_1',
kernel_initializer=init)
sig_hidden_3 = tf.layers.dense(sig_hidden_2, h3,
activation = tf.sigmoid,
name = 'sig_dense_2',
kernel_initializer=init)
self._sigma = tf.layers.dense(sig_hidden_3, 1,
activation = tf.exp,
name = 'sigma',
kernel_initializer=init)
self._sigma = tf.squeeze(self._sigma)
self._sigma = tf.add(self._sigma, 1e-5)
#Sampling action from distribuition
self._normal_dist = tf.contrib.distributions.Normal(self._mu, self._sigma)
self._action = self._normal_dist.sample()
#Computing loss function
self._discounted_rewards = tf.placeholder(tf.float32, (None, 1), name="discounted_rewards")
self._taken_actions = tf.placeholder(tf.float32, (None, 1), name="taken_actions")
self._loss = -tf.reduce_mean(tf.log(1e-5 + self._normal_dist.prob(self._taken_actions)) * self._discounted_rewards,0)
self._train_op = self._optimizer.minimize(self._loss)
self._sess.run(tf.global_variables_initializer())
def act(self, state):
mu, sigma, action = self._sess.run([self._mu, self._sigma, self._action], feed_dict={
self._states: state})
action = np.clip(action, self._env.action_space.low[0], self._env.action_space.high[0])
if not self._quiet:
print("Sigma: {}, Mu: {}, Action: {}".format(sigma, mu, action))
return action
def train(self):
rewards = self._discount_rewards().tolist()
#rewards -= np.mean(rewards)
rewards = [[r] for r in rewards]
samples = []
for t in range(len(self._state_buffer)):
samples.append([self._state_buffer[t], rewards[t], self._action_buffer[t]])
np.random.shuffle(samples)
batches = []
for i in range(0, len(samples), self._batch_size):
batches.append(samples[i:i + self._batch_size])
for b in range(len(batches)):
batch = batches[b]
states_batch = [row[0] for row in batch]
actions_batch = [row[2] for row in batch]
rewards_batch = [row[1] for row in batch]
feed_dict={
self._states: states_batch,
self._discounted_rewards: rewards_batch,
self._taken_actions: actions_batch}
self._sess.run([self._train_op], feed_dict=feed_dict)
#After applying gradients
self._state_buffer = []
self._reward_buffer = []
self._action_buffer = []
def store_step(self, action, state, reward):
self._state_buffer.append(state)
self._reward_buffer.append(np.array(reward))
self._action_buffer.append(np.array([action]))
def _discount_rewards(self):
r = 0
N = len(self._reward_buffer)
discounted_rewards = np.zeros(N)
for t in reversed(range(N)):
r = r + self._reward_buffer[t] * self._discount_rate
discounted_rewards[t] = r
return discounted_rewards
```
#### File: ml-stock-prediction/agent_v2/dqn_agent.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import random
from collections import deque
import math
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class Model(nn.Module):
def __init__(self, state_dim):
super(Model, self).__init__()
h1 = 512
h2 = 256
h3 = 128
self.hidden_1 = nn.Linear(state_dim, h1)
self.hidden_2 = nn.Linear(h1, h2)
self.hidden_3 = nn.Linear(h2, h3)
self.output = nn.Linear(h3, 3)
def forward(self, x):
x = F.relu(self.hidden_1(x))
x = F.relu(self.hidden_2(x))
x = F.relu(self.hidden_3(x))
return self.output(x)
class DQNAgent():
def __init__(self, env, gamma=0.99, buffer_size = 1000000, update_steps = 100,
epsilon=1.0, epsilon_min=0.01, epsilon_log_decay=0.999, tau = 0.01, max_grad_norm = 10,
alpha=1e-4, alpha_decay=0.001, batch_size=128, quiet=False):
self.env = env
self.memory = deque(maxlen = buffer_size)
self.gamma = gamma
self.epsilon = epsilon
self.epsilon_min = epsilon_min
self.epsilon_decay = epsilon_log_decay
self.alpha = alpha
self.alpha_decay = alpha_decay
self.update_steps = update_steps
self.max_grad_norm = max_grad_norm
self._tau = tau
self._batch_size = batch_size
self.quiet = quiet
self._state_dim = np.prod(np.array(env.observation_space.shape))
self.model = Model(self._state_dim).to(device)
self.target = Model(self._state_dim).to(device)
# Align model network and target network
self.copy_target(1.0)
self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.alpha)
def get_gradient_norm(self, model):
total_norm = 0.0
for p in model.parameters():
param_norm = p.grad.data.norm(2)
total_norm += param_norm.item() ** 2
total_norm = total_norm ** (1. / 2)
return total_norm
def store_step(self, state, action, reward, next_state, done):
self.memory.append((state, action, reward, next_state, done))
def act(self, state, greedy=False):
state = torch.from_numpy(state).float().to(device)
if greedy:
# epsilon = max(self.epsilon_min, min(self.epsilon, 1.0 - math.log10((step + 1) * self.epsilon_decay)))
epsilon = self.epsilon
q_values = self.model(state)
q_values = q_values.detach().cpu().numpy()
random_action = self.env.action_space.sample() # action space: 0, 1, 2
q_max_action = np.argmax(q_values)
action = random_action if (np.random.random() <= epsilon) else q_max_action
return action
else:
q_values = self.model(state)
q_values = q_values.detach().cpu().numpy()
return np.argmax(q_values)
def train(self):
loss_fn = nn.MSELoss()
batch_size = self._batch_size
grad = 0.0
for _ in range(self.update_steps):
batch = random.sample(self.memory, batch_size)
states, actions, rewards, next_states, dones = zip(*batch)
states = torch.tensor(states).float().to(device)
actions = torch.tensor(actions).long().to(device)
rewards = torch.tensor(rewards).float().to(device)
next_states = torch.tensor(next_states).float().to(device)
dones = torch.tensor(dones).float().to(device)
with torch.no_grad():
y_targets = rewards + (1 - dones) * self.gamma * self.target(next_states).detach().max(1)[0]
# loss = torch.mean( (y_preds - y_targets)**2 )
self.optimizer.zero_grad()
# actions: (0, 1, 2)
y_preds = self.model(states).gather(1, actions.unsqueeze(1)).squeeze()
loss = loss_fn( y_preds, y_targets )
# print("Loss before update: {}".format(loss.item()))
loss.backward()
torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.max_grad_norm)
grad += self.get_gradient_norm(self.model)
self.optimizer.step()
# re-evaluate loss function
# y_preds = self.model(states).gather(1, actions.unsqueeze(1)).squeeze()
# loss = loss_fn( y_preds, y_targets )
# print("Loss after update: {}".format(loss.item()))
self.copy_target( 1.0 )
if self.epsilon > self.epsilon_min:
self.epsilon *= self.epsilon_decay
return grad / self.update_steps
def copy_target(self, polyak=1.0):
"""copy model parameters to target network
Args:
polyak (float, optional): [Polyak averaging]. Defaults to 1.0.
"""
with torch.no_grad():
for var1, var2 in zip( self.model.parameters(), self.target.parameters() ):
var2 = polyak * var1 + (1-polyak) * var2
```
#### File: ml-stock-prediction/agent_v2/spg_agent.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import Normal
import numpy as np
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class Model(nn.Module):
def __init__(self, state_dim):
super(Model, self).__init__()
h1 = 256
h2 = 128
h3 = 128
self.hidden_1 = nn.Linear(state_dim, h1)
self.hidden_2 = nn.Linear(h1, h2)
self.hidden_3 = nn.Linear(h2, h3)
self.output = nn.Linear(h3, 1)
def forward(self, x):
x = F.tanh(self.hidden_1(x))
x = F.tanh(self.hidden_2(x))
x = F.tanh(self.hidden_3(x))
return F.tanh(self.output(x))
class StochasticPolicyGradientAgent():
"""
A Gaussian Policy Gradient based agent implementation
"""
def __init__(self, env, learning_rate = 0.001, discount_rate = 0.99, batch_size = 1, quiet = True):
self._env = env
self._batch_size = batch_size
self._discount_rate = discount_rate
self._state_buffer = []
self._reward_buffer = []
self._action_buffer = []
self._log_prob_buffer = []
self._quiet = quiet
state_dim = np.prod(np.array(env.observation_space.shape))
self._mu_model = Model(state_dim).to(device)
self._sigma_model = Model(state_dim).to(device)
self._optimizer = torch.optim.Adam(params=[ { 'params' : self._mu_model.parameters() },
{ 'params' : self._sigma_model.parameters() }
])
#Sampling action from distribuition
# self._normal_dist = tf.contrib.distributions.Normal(self._mu, self._sigma)
# self._action = self._normal_dist.sample()
# #Computing loss function
# self._discounted_rewards = tf.placeholder(tf.float32, (None, 1), name="discounted_rewards")
# self._taken_actions = tf.placeholder(tf.float32, (None, 1), name="taken_actions")
# self._loss = -tf.reduce_mean(tf.log(1e-5 + self._normal_dist.prob(self._taken_actions)) * self._discounted_rewards,0)
# self._train_op = self._optimizer.minimize(self._loss)
# self._sess.run(tf.global_variables_initializer())
def act(self, state):
state = torch.from_numpy(state).float().to(device)
mu = self._mu_model(state)
log_sigma = self._sigma_model(state)
dist = Normal(mu, torch.exp(log_sigma))
action = dist.sample()
action = torch.clamp(action, self._env.action_space.low[0], self._env.action_space.high[0])
log_prob = dist.log_prob(action)
self._log_prob_buffer.append(log_prob)
if not self._quiet:
print("Sigma: {}, Mu: {}, Action: {}".format(sigma, mu, action))
return action.item()
def train(self):
rewards = self._discount_rewards().tolist()
#rewards -= np.mean(rewards)
samples = []
for t in range(len(rewards)):
samples.append([self._log_prob_buffer[t], rewards[t]])
np.random.shuffle(samples)
batches = []
for i in range(0, len(samples), self._batch_size):
batches.append(samples[i:i + self._batch_size])
for batch in batches:
log_probs_batch = [row[0] for row in batch]
rewards_batch = [row[1] for row in batch]
loss = [log_prob*r for log_prob, r in batch ]
loss = torch.cat(loss).sum()
self._optimizer.zero_grad()
loss.backward()
self._optimizer.step()
#After applying gradients
self._state_buffer = []
self._reward_buffer = []
self._log_prob_buffer = []
def store_step(self, state, reward):
self._state_buffer.append(state)
self._reward_buffer.append(np.array(reward))
def _discount_rewards(self):
r = 0
N = len(self._reward_buffer)
discounted_rewards = np.zeros(N)
for t in reversed(range(N)):
r = r + self._reward_buffer[t] * self._discount_rate
discounted_rewards[t] = r
return discounted_rewards
```
#### File: joshualee155/ml-stock-prediction/test_ddpg_v3.py
```python
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import datetime
import gym
import trading_env
from agent_v2.ddpg_agent_lstm import DDPGAgent
import sys
def main():
env_trading = gym.make('test_trading-v2')
NUM_EP = 400
agentDDPG = DDPGAgent(env_trading,
buffer_size=1000000,
tau = 0.01,
actor_lr = 1e-4,
critic_lr = 1e-4)
# Ornstein-Uhlenbeck noise by lirnli/OpenAI-gym-solutions
def UONoise():
theta = 0.15
sigma = 0.2
state = 0
while True:
yield state
state += -theta*state+sigma*np.random.randn()
date = datetime.datetime(2017, 7, 15, 0, 0)
date_test = datetime.datetime(2017, 7, 15, 0, 0)
noise = UONoise()
scores = []
scores_test = []
sample_actions = [] # Keep track of actions every 100 episode
portfolios = []
actor_grads = []
critic_grads = []
while (len(agentDDPG.memory) < 100000):
state = env_trading.reset(date = date)
while (True):
# action = agentDDPG.actor.act(state)
# action = np.clip( action + next(noise), -1, 1 )
action = env_trading.action_space.sample()[0]
next_state, reward, done, _ = env_trading.step(action)
agentDDPG.store_step(state, action, reward, next_state, done)
state = next_state
print("\rPopulating memory buffer: {:5d}/100000".format(len(agentDDPG.memory)), end="")
sys.stdout.flush()
if done:
break
print("\n")
for e in range(NUM_EP):
state = env_trading.reset(date=date)
score = 0
rewards = []
actions = []
while(True):
action = agentDDPG.actor.act(state)
action += next( noise )
action = np.clip(action, -1, 1)
actions.append(action)
next_state, reward, done, _ = env_trading.step( action )
score += reward
rewards.append( reward )
agentDDPG.store_step(state, action, reward, next_state, done)
if done:
actor_grad, critic_grad = agentDDPG.train()
actor_grads.append(actor_grad)
critic_grads.append(critic_grad)
scores.append(score)
# print("Episode: {}, Total reward: {}".format(e, score))
break
state = next_state
# Testing session
state = env_trading.reset( date = date_test )
score_test = 0
actions_test = []
while(True):
action = agentDDPG.actor.act(state)
next_state, reward, done, _ = env_trading.step( action )
actions_test.append( action )
score_test += reward
if done:
# agentDDPG.actor.update_averages( rewards, [score_test] )
# agentDDPG.actor.record_summary( e )
scores_test.append(score_test)
portfolios.append( env_trading.portfolio_value )
if e % 100 == 0:
sample_actions.append( actions_test )
print("\rEpisode: {}, Training reward: {:.2f}, Testing reward: {:.2f}, Actor grad: {:.4f}, Critic grad: {:.4f}, Actions: {:.4f}+/-{:.4f}, Test Actions: {:.4f}+/-{:.4f}".format(e, score, score_test, actor_grad, critic_grad, np.mean(actions), np.std(actions), np.mean(actions_test), np.std(actions_test)), end="")
sys.stdout.flush()
break
state = next_state
plt.figure()
plt.plot( scores_test, label = 'Testing' )
plt.plot( scores, label = 'Training' )
plt.legend()
plt.show()
plt.figure()
plt.plot( np.array( sample_actions ).T )
plt.show()
plt.figure()
plt.plot( np.array( portfolios ).T )
plt.show()
if __name__ == "__main__":
main()
``` |
{
"source": "joshuaLei/ROV",
"score": 3
} |
#### File: joshuaLei/ROV/Shape_task.py
```python
import cv2
import numpy as np
#from video import Video
import time
from PIL import Image, ImageEnhance
class ROV:
def __init__(self, debug=True):
self.video1 = cv2.VideoCapture(0)
self._triangle = 0
self._rectangle = 0
self._circle = 0
self._line = 0
self.min_x = 0
self.min_y = 0
self._num_of_shapes = {
"circle": 0,
"line": 0,
"triangle": 0,
"rectangle": 0
}
self.srcframe = None
self.mask = None
self.cropped = None
self.frame = None
self.status = True
self.areaval = 1000
def debug(self):
success, self.frame = self.video1.read()
self.frame = cv2.flip(self.frame, 3)
self.srcframe = self.frame
return self.frame
def msk(self, image):
if image.shape[0] == 0 or image.shape[1] == 0:
return None
image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
image = cv2.medianBlur(image, 5)
lower = np.array([0, 0, 150])
upper = np.array([255, 100, 255])
mask = cv2.inRange(image, lower, upper)
self.mask = mask
return mask
def preprocessing(self, image):
image = ImageEnhance.Color(Image.fromarray(image)).enhance(1.1)
image = ImageEnhance.Brightness(image).enhance(1.2)
image = ImageEnhance.Contrast(image).enhance(1.2)
image = ImageEnhance.Sharpness(image).enhance(1.1)
kernel = np.ones((5, 5), np.uint8)
image = cv2.erode(np.array(image), kernel, iterations=1)
image = cv2.dilate(np.array(image), kernel, iterations=1)
return image
def overlay(self, frame):
overlay = frame.copy()
cropped = frame.copy()
img = frame[180:690, 160:600].copy()
self.cropped = img
self.srcframe = cropped
def white_mask(self):
thresh = self.msk(self.cropped)
if thresh is None:
return None
contours, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
max_area = 100
max_cnt = None
for cnt in contours:
area = cv2.contourArea(cnt)
if area > max_area:
max_area = area
max_cnt = cnt
if max_cnt is None:
return None
x, y, w, h = cv2.boundingRect(max_cnt)
self.mask = self.cropped[y:y + h, x:x + w].copy()
cropped = self.cropped[y:y + h, x:x + w].copy()
return cropped, x, y, w, h
def shape_mask(self):
thresh = self.msk(self.mask)
if thresh is None:
return None
thresh = 255 - thresh
image = self.mask
contours, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
image_h, image_w = image.shape
bounding = 0.02
bounding_x = image_w * bounding
bounding_y = image_h * bounding
target_contours = []
for cnt in contours:
area = cv2.contourArea(cnt)
if area > 150:
x, y, w, h = cv2.boundingRect(cnt)
if x > bounding_x and x + w < image_w - bounding_x and \
y > bounding_y and y + h < image_h - bounding_y:
target_contours.append(cnt)
min_x = image_w
min_y = image_h
max_x = 0
max_y = 0
for cnt in target_contours:
x, y, w, h = cv2.boundingRect(cnt)
if x + w > max_x:
max_x = x + w
if x < min_x:
min_x = x
if y + h > max_y:
max_y = y + h
if y < min_y:
min_y = y
space = 10
min_y = max(min_y - space, 0)
max_y = min(max_y + space, image_h)
min_x = max(min_x - space, 0)
max_x = min(max_x + space, image_w)
x = min_x
y = min_y
self.max_x = max_x
self.max_y = max_y
w = max_x - min_x
h = max_y - min_y
self.mask = image[min_y:max_y, min_x:max_x].copy()
cropped = self.cropped[min_y:max_y, min_x:max_x].copy()
return cropped, x, y, w, h
def detection(self, image, is_draw=True):
if image.shape[0] == 0 or image.shape[1] == 0:
return [0, 0]
else:
self._num_of_shapes["circle"] = 0
self._num_of_shapes["line"] = 0
self._num_of_shapes["triangle"] = 0
self._num_of_shapes["rectangle"] = 0
cv2.imshow('000', image)
image = cv2.resize(image, (800, 600), interpolation=cv2.INTER_AREA)
cropped = image
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
equ = cv2.equalizeHist(image)
mask = cv2.inRange(equ, 0, 52)
cv2.imshow("equ", cv2.inRange(equ, 0, 52))
contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
image_h, image_w = mask.shape
bounding = 0.015
bounding_x = image_w * bounding
bounding_y = image_h * bounding
active_cnts = []
for cnt in contours:
area = cv2.contourArea(cnt)
if area > self.areaval:
x, y, w, h = cv2.boundingRect(cnt)
epsilon = 0.03 * cv2.arcLength(cnt, True)
approx = cv2.approxPolyDP(cnt, epsilon, True)
if x > bounding_x and x + w < image_w - bounding_x and \
y > bounding_y and y + h < image_h - bounding_y:
vertex = 0
for i in range(len(approx)):
p1 = approx[i]
p2 = approx[(i + 1) % len(approx)]
e = np.sqrt(np.sum(abs(p1 - p2) ** 2))
if e >= 25:
vertex += 1
active_cnts.append(cnt)
cv2.drawContours(cropped, [cnt], 0, (0, 255, 0), 3)
cv2.putText(cropped, str(vertex), (x + int(w / 2) - 5, y + int(h / 2) + 5),
cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), lineType=cv2.LINE_AA)
#cv2.imshow('numbers', self.frame)
area = cv2.contourArea(cnt)
if area > 3000:
if vertex == 2:
if self._num_of_shapes["line"] >= 6:
self._num_of_shapes["line"] = 6
else:
self._num_of_shapes["line"] += 1
elif vertex == 3:
if self._num_of_shapes["triangle"] >= 6:
self._num_of_shapes["triangle"] = 6
else:
self._num_of_shapes["triangle"] += 1
elif vertex == 4:
(x, y, w, h) = cv2.boundingRect(approx)
ar = w / float(h)
if ar >= 0.2 and ar <= 1.8:
if self._num_of_shapes["rectangle"] >= 6:
self._num_of_shapes["rectangle"] = 6
else:
self._num_of_shapes["rectangle"] += 1
else:
if self._num_of_shapes["line"] >= 6:
self._num_of_shapes["line"] = 6
else:
self._num_of_shapes["line"] += 1
elif vertex == 0 or vertex == 1 or vertex == 5 or vertex == 6 \
or vertex == 7 or vertex == 8:
if self._num_of_shapes["circle"] >= 6:
self._num_of_shapes["circle"] = 6
else:
self._num_of_shapes["circle"] += 1
else:
pass
else:
if self._num_of_shapes["line"] >= 6:
self._num_of_shapes["line"] = 6
else:
self._num_of_shapes["line"] += 1
return cropped, area
def show(self, img):
cv2.circle(img, (150, 40), 20, (0, 0, 255), -1)
triangle_cnt = np.array([(150, 90), (130, 120), (170, 120)])
cv2.line(img, (130, 180), (170, 180), (0, 0, 255), 4)
cv2.rectangle(img, (170, 230), (130, 270), (0, 0, 255), -1)
cv2.drawContours(img, [triangle_cnt], 0, (0, 0, 255), -1)
cv2.putText(img, str(self._circle), (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
cv2.putText(img, str(self._triangle), (50, 120), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
cv2.putText(img, str(self._line), (50, 190), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
cv2.putText(img, str(self._rectangle), (50, 260), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
self.frame = img
if __name__ == "__main__":
rov = ROV(True)
#video = Video(port=4777)
i = 0
while True:
k = cv2.waitKey(1)
#if not video.frame_available():
#continue
#img = video.frame()
img = rov.debug()
# img = cv2.imread('photos/train2/img0.jpg')
rov.srcframe = img
cap = rov.preprocessing(img)
frame = cv2.resize(cap, (800, 600))
rov.srcframe = frame
rov.cropped = frame
rov.overlay(frame)
rov.msk(rov.cropped)
img, x1, y1, w1, h1 = rov.white_mask()
img2, x2, y2, w2, h2 = rov.shape_mask()
frame, area = rov.detection(img, False)
if area == 0:
print('error')
else:
src = rov.srcframe
frame = cv2.resize(frame, (w1, h1))
src[y1 + 180:y1 + h1 + 180, x1 + 160:x1 + w1 + 160] = frame
rov.show(src)
color = src.copy()
cv2.rectangle(src, (180, 160), (690, 600), (0, 0, 255), -1)
color = cv2.addWeighted(src, 0.3, color, 1 - 0.3, 0)
cv2.imshow('frame', color)
if k == 32:
rov._circle = rov._num_of_shapes["circle"]
rov._line = rov._num_of_shapes["line"]
rov._triangle = rov._num_of_shapes["triangle"]
rov._rectangle = rov._num_of_shapes["rectangle"]
if k == 27:
print("stop")
break
if k == ord('s'):
cv2.imwrite('photos/img' + str(time.time()) + '.jpg', frame)
print('saved')
i += 1
cv2.destroyAllWindows()
``` |
{
"source": "joshualemmon/db-error-detection-gan",
"score": 3
} |
#### File: db-error-detection-gan/scripts/xml_to_csv.py
```python
from xml.etree import ElementTree
import os
import csv
import argparse
import numpy as np
def get_xml_vals(infile):
tree = ElementTree.parse(infile)
root = tree.getroot()
item = ""
attr = []
# Get attribute names
for r in root:
item = r.tag
for c in r:
attr.append(c.tag)
attr = set(attr)
# Get attribute values for each item
vals = []
for p in list(root):
v = []
for a in attr:
v.append(p.find(a).text)
vals.append(v)
return vals
def add_attributes(xml_data):
data = []
# Load possible interests and countries
interests = [i.rstrip('\n') for i in open('interests.txt', 'r').readlines()]
countries = [c.rstrip('\n') for c in open('countries.txt', 'r').readlines()]
# Generate random valid values for each tuple
for x in xml_data:
x.append(gen_age())
x.append(gen_salary())
x.append(gen_height())
x.append(gen_interest(interests))
x.append(gen_country(countries))
x.append(0)
data.append(x)
return data
def gen_age(mean=30, std=15, max_age=120, min_age=18):
age = int(np.random.normal(mean, std, 1))
if age < min_age:
return min_age
elif age > max_age:
return max_age
else:
return age
def gen_salary(mean=50000, std=10000, max_sal=100000, min_sal=20000):
sal = int(np.random.normal(mean, std, 1))
if sal < min_sal:
return min_sal
elif sal > max_sal:
return max_sal
else:
return sal
def gen_height(mean=168, std=10, max_height=200, min_height=155):
height = int(np.random.normal(mean, std, 1))
if height < min_height:
return min_height
elif height > max_height:
return max_height
else:
return height
def gen_interest(ints):
return ints[np.random.randint(low=0, high=len(ints))]
def gen_country(countries):
return countries[np.random.randint(low=0, high=len(countries))]
def main(args):
infile = args.infile
out = args.out
# Read XML data
xml_data = get_xml_vals(infile)
full_data = add_attributes(xml_data)
with open(out, 'w') as f:
writer = csv.writer(f, delimiter=',')
# f_name, l_name, email, age, salary, height, interest, country, clean/dirty
writer.writerow(['l_name', 'email', 'f_name', 'age', 'salary', 'height', 'interest', 'country', 'is_dirty'])
writer.writerows(full_data)
if __name__ == '__main__':
ap = argparse.ArgumentParser()
ap.add_argument('--infile', '-i', type=str, required=True)
ap.add_argument('--out', '-o', type=str, required=True)
main(ap.parse_args())
``` |
{
"source": "joshualevitas/single-transferable-voting",
"score": 3
} |
#### File: joshualevitas/single-transferable-voting/dataset.py
```python
import numpy as np
import pandas as pd
def make_dataset(n_cands, n_spots, n_voters, name="out.csv", seed=90210):
#np.random.seed(seed)
np_data = [np.random.choice(np.arange(n_cands), n_spots, replace=False)]
for i in range(n_voters-1):
np_data = np.append(np_data, [np.random.choice(np.arange(n_cands), n_spots, replace=False)], axis=0)
names = np.zeros((n_voters,), dtype='object')
for i in range(len(names)):
names[i] = 'phil' + str(i)
names = np.array([names]).T
info = np.concatenate((names, np_data), axis=1)
data = pd.DataFrame(data=info)
data.to_csv(name, index=False)
``` |
{
"source": "joshuali925/opensearch-build",
"score": 2
} |
#### File: tests/tests_manifests/test_build_manifest.py
```python
import os
import unittest
import yaml
from manifests.build_manifest import BuildManifest
class TestBuildManifest(unittest.TestCase):
def setUp(self):
self.data_path = os.path.realpath(
os.path.join(os.path.dirname(__file__), "data")
)
self.manifest_filename = os.path.join(
self.data_path, "opensearch-build-1.1.0.yml"
)
self.manifest = BuildManifest.from_path(self.manifest_filename)
def test_build(self):
self.assertEqual(self.manifest.version, "1.0")
self.assertEqual(self.manifest.build.name, "OpenSearch")
self.assertEqual(self.manifest.build.version, "1.1.0")
self.assertEqual(len(self.manifest.components), 15)
def test_component(self):
opensearch_component = self.manifest.components[0]
self.assertEqual(opensearch_component.name, "OpenSearch")
self.assertEqual(
opensearch_component.repository,
"https://github.com/opensearch-project/OpenSearch.git",
)
self.assertEqual(
opensearch_component.commit_id, "0<PASSWORD>d9bbb39<PASSWORD>bb1be83a<PASSWORD>2f57f81ec"
)
self.assertEqual(opensearch_component.ref, "1.x")
self.assertEqual(
sorted(opensearch_component.artifacts.keys()),
["bundle", "core-plugins", "maven"],
)
def test_to_dict(self):
data = self.manifest.to_dict()
with open(self.manifest_filename) as f:
self.assertEqual(yaml.safe_load(f), data)
```
#### File: tests/tests_manifests/test_bundle_manifest.py
```python
import os
import unittest
import yaml
from manifests.bundle_manifest import BundleManifest
class TestBundleManifest(unittest.TestCase):
def setUp(self):
self.data_path = os.path.realpath(
os.path.join(os.path.dirname(__file__), "data")
)
self.manifest_filename = os.path.join(
self.data_path, "opensearch-bundle-1.1.0.yml"
)
self.manifest = BundleManifest.from_path(self.manifest_filename)
def test_build(self):
self.assertEqual(self.manifest.version, "1.0")
self.assertEqual(self.manifest.build.name, "OpenSearch")
self.assertEqual(self.manifest.build.version, "1.1.0")
self.assertEqual(
self.manifest.build.location, "bundle/opensearch-1.1.0-linux-x64.tar.gz"
)
self.assertEqual(self.manifest.build.architecture, "x64")
self.assertEqual(len(self.manifest.components), 13)
def test_component(self):
opensearch_min_component = self.manifest.components[0]
self.assertEqual(opensearch_min_component.name, "OpenSearch")
self.assertEqual(
opensearch_min_component.location,
"artifacts/bundle/opensearch-min-1.1.0-linux-x64.tar.gz",
)
self.assertEqual(
opensearch_min_component.repository,
"https://github.com/opensearch-project/OpenSearch.git",
)
self.assertEqual(
opensearch_min_component.commit_id,
"0<PASSWORD>",
)
self.assertEqual(opensearch_min_component.ref, "1.x")
def test_to_dict(self):
data = self.manifest.to_dict()
with open(self.manifest_filename) as f:
self.assertEqual(yaml.safe_load(f), data)
``` |
{
"source": "JoshuaLight/chalice-restful",
"score": 2
} |
#### File: tests/unit/configs_test.py
```python
import pytest
from chalice_restful import config, flag, only_classes, only_functions
def test_that_config_adds_named_field_to_the_class():
# Arrange.
@config
def aspect(_): ...
class Fake: ...
# Act.
Fake = aspect('value')(Fake)
# Assert.
assert Fake.aspect == 'value'
def test_that_config_adds_named_field_to_the_function():
# Arrange.
@config
def aspect(_): ...
def fake(): ...
# Act.
fake = aspect('value')(fake)
# Assert.
assert fake.aspect == 'value'
def test_that_flag_adds_named_field_to_the_class():
# Arrange.
@flag
def has_x(): ...
# Act.
@has_x
class Fake: ...
# Assert.
assert Fake.has_x
def test_that_flag_adds_named_field_to_the_function():
# Arrange.
@flag
def has_x(): ...
# Act.
@has_x
def fake(): ...
# Assert.
assert fake.has_x
def test_that_only_classes_flag_cant_be_added_to_function():
# Arrange.
@flag
@only_classes
def has_x(): ...
def fake(): ...
# Act.
decorate = lambda: has_x(fake)
# Assert.
with pytest.raises(AssertionError):
decorate()
def test_that_only_functions_flag_cant_be_added_to_class():
# Arrange.
@flag
@only_functions
def has_x(): ...
class Fake: ...
# Act.
decorate = lambda: has_x(Fake)
# Assert.
with pytest.raises(AssertionError):
decorate()
def test_that_only_classes_config_cant_be_added_to_function():
# Arrange.
@config
@only_classes
def has_x(_): ...
def fake(): ...
# Act.
decorate = lambda: has_x('value')(fake)
# Assert.
with pytest.raises(AssertionError):
decorate()
def test_that_only_functions_config_cant_be_added_to_class():
# Arrange.
@config
@only_functions
def has_x(_): ...
class Fake: ...
# Act.
decorate = lambda: has_x('value')(Fake)
# Assert.
with pytest.raises(AssertionError):
decorate()
def test_that_cant_add_both_only_functions_and_only_classes_flags_to_class():
# Arrange.
@flag
@only_functions
@only_classes
def has_x(): ...
class Fake: ...
# Act.
decorate = lambda: has_x('value')(Fake)
# Assert.
with pytest.raises(AssertionError):
decorate()
def test_that_cant_add_both_only_functions_and_only_classes_flags_to_function():
# Arrange.
@flag
@only_functions
@only_classes
def has_x(): ...
def fake(): ...
# Act.
decorate = lambda: has_x('value')(fake)
# Assert.
with pytest.raises(AssertionError):
decorate()
def test_that_multiple_flags_cant_override_each_other():
# Arrange.
@flag
def a(): ...
@flag
def b(): ...
# Act.
@a
@b
def func(): ...
# Assert.
assert func.a
assert func.b
``` |
{
"source": "joshualiu555/Pixel-Art-Maker",
"score": 3
} |
#### File: Pixel-Art-Maker/pixelart/color_box.py
```python
import pygame as pg
from constants import *
class ColorBox(pg.sprite.DirtySprite):
def __init__(self, color, x, y, gap):
pg.sprite.DirtySprite.__init__(self)
self.color = color
self.x, self.y = x, y
self.image = pg.Surface((COLOR_BOX_SIZE, COLOR_BOX_SIZE))
self.image.fill(color)
self.rect = self.image.get_rect()
self.rect.x, self.rect.y = x * (COLOR_BOX_SIZE + gap) + 40, y * (COLOR_BOX_SIZE + gap) + 475
self.selected = False
def draw_circle(self, screen):
if self.selected:
if self.color != WHITE:
pg.draw.circle(screen, WHITE, self.rect.center, 20)
else:
pg.draw.circle(screen, BLACK, self.rect.center, 20)
```
#### File: Pixel-Art-Maker/pixelart/mouse_box.py
```python
import pygame as pg
from constants import *
class mouseBox(pg.sprite.DirtySprite):
def __init__(self, pos, gap):
pg.sprite.DirtySprite.__init__(self)
self.image = pg.Surface((MOUSE_BOX_SIZE, MOUSE_BOX_SIZE))
self.image.fill(WHITE)
self.rect = self.image.get_rect()
self.rect.x, self.rect.y = pos * (MOUSE_BOX_SIZE + gap) + 50, 415
self.selected = False
def update(self):
if self.selected:
self.image.set_alpha(255)
else:
self.image.set_alpha(0)
```
#### File: Pixel-Art-Maker/pixelart/mouse.py
```python
import pygame as pg
from constants import *
import math
class Mouse(pg.sprite.DirtySprite):
def __init__(self, size, color):
pg.sprite.DirtySprite.__init__(self)
self.size = size
self.image = pg.Surface((PIXEL_SIZE * size, PIXEL_SIZE * size))
self.image.fill(color)
self.color = color
self.rect = self.image.get_rect()
self.rect.x, self.rect.y = 0, 0
self.on_board = False
def update(self):
mx, my = pg.mouse.get_pos()
# boundary is the one third mark of the board
self.on_board = self.rect.left >= int(WIDTH / 3 - 10)
if self.on_board:
# rounds the mouse coordinates to the nearest pixel
self.rect.x, self.rect.y = math.ceil(mx / PIXEL_SIZE) * PIXEL_SIZE, math.ceil(my / PIXEL_SIZE) * PIXEL_SIZE
self.image.set_colorkey(None)
pg.mouse.set_cursor(pg.cursors.diamond)
else:
self.rect.x, self.rect.y = mx, my
# make the mouse rect invisible
self.image.set_colorkey(self.color)
pg.mouse.set_cursor(pg.cursors.arrow)
def change_size(self, size):
self.size = size
self.image = pg.Surface((PIXEL_SIZE * size, PIXEL_SIZE * size))
self.image.fill(self.color)
self.rect = self.image.get_rect()
def change_color(self, color):
self.color = color
self.image.fill(color)
``` |
{
"source": "joshualoving/ExampleCode",
"score": 3
} |
#### File: joshualoving/ExampleCode/copyNum.py
```python
from docopt import docopt
import csv
from intervaltree import IntervalTree, Interval
def readInput(infile,genefile, segfile):
"""
Reads input files.
Extended description of function.
Parameters:
infile (str): File containing list of genes to be analyzed
genefile (str): File containing gene range definitions
segfile (str): File containing cell line intervals and copy number data
Returns:
genes (list): List of genes
genedef (dict): Dictionary of genes mapping to corresponding intervals
interval_dict(dict): Dictionary of dictionary of interval trees containing cell line ranges
"""
with open(infile) as inf:
genes = [i.strip() for i in inf.readlines()]
with open(genefile) as genef:
dictgenes = csv.DictReader(genef, delimiter="\t")
genedef = {}
for d in dictgenes:
if d["cds_from"] != "-" and d["cds_to"] != "-":
genedef[d["gene"]] = (d["#chromosome"], Interval(int(d["cds_from"]),int(d["cds_to"])))
with open(segfile) as seg:
interval_dict = {}
dictseg = csv.DictReader(seg, delimiter="\t")
for d in dictseg:
d = dict(d)
if "e" in d["End"]:
#Replace one incorrect exponential value
d["End"] = 115000000
if d["CCLE_name"] in interval_dict:
if d["Chromosome"] in interval_dict[d["CCLE_name"]]:
interval_dict[d["CCLE_name"]][d["Chromosome"]][int(d["Start"]):int(d["End"])] = float(d["Segment_Mean"])
else:
interval_dict[d["CCLE_name"]][d["Chromosome"]] = IntervalTree()
interval_dict[d["CCLE_name"]][d["Chromosome"]][int(d["Start"]):int(d["End"])] = float(d["Segment_Mean"])
else:
interval_dict[d["CCLE_name"]] = dict()
interval_dict[d["CCLE_name"]][d["Chromosome"]] = IntervalTree()
interval_dict[d["CCLE_name"]][d["Chromosome"]][int(d["Start"]):int(d["End"])] = float(d["Segment_Mean"])
return genes, genedef, interval_dict
import datetime
if __name__ == '__main__':
#Parse arguments from the docstring
arguments = docopt(__doc__)
#Generate logfile
with(open(arguments["--log"], "w")) as logf:
logf.write("Date: %s\n"% str(datetime.date.today()))
logf.write("Version: CopyNum 0.1")
logf.write("Input gene file: %s\n" % arguments["--input"])
logf.write("Output CN file: %s\n" % arguments["--output"])
logf.write("Input gene range file: %s\n" % arguments["--genedef"])
logf.write("Input cell line CN file: %s\n" % arguments["--seg"])
#parse input into objects
g, gdef, id = readInput(arguments["--input"],arguments["--genedef"],arguments["--seg"])
#Get only the genes we are interested in
interest = dict((k, gdef[k]) for k in g if k in gdef)
with open(arguments["--output"], "w") as out:
#Generate header
out.write("Gene\t")
for line in id:
out.write(line + "\t")
out.write("\n")
#Loop over genes
for gene_name in interest:
gene = interest[gene_name]
out.write(gene_name + "\t")
#Loop over cell line intervals
for line in id:
SumCN = 0
SumWidth = 0
#Check whether there were overlapping intervals found
if len(id[line][gene[0]][gene[1].begin:gene[1].end]) == 0:
#No copy number data, write NA
out.write("NA\t")
else:
for i in id[line][gene[0]][gene[1].begin:gene[1].end]:
#Compute copy number across interval
width = i.end - i.begin
SumCN += i.data*width
SumWidth += width
#Average copy number across gene intervals
out.write(str(SumCN/SumWidth))
out.write("\t")
out.write("\n")
``` |
{
"source": "joshualyguessennd/ocean.py",
"score": 2
} |
#### File: joshualyguessennd/ocean.py/deploy.py
```python
import json
import os
import sys
from pathlib import Path
from examples import ExampleConfig
from ocean_lib.config_provider import ConfigProvider
from ocean_lib.models.bfactory import BFactory
from ocean_lib.models.bpool import BPool
from ocean_lib.models.data_token import DataToken
from ocean_lib.models.dtfactory import DTFactory
from ocean_lib.models.fixed_rate_exchange import FixedRateExchange
from ocean_lib.models.metadata import MetadataContract
from ocean_lib.ocean import util
from ocean_lib.ocean.util import get_web3_connection_provider
from ocean_lib.web3_internal.contract_handler import ContractHandler
from ocean_lib.web3_internal.utils import privateKeyToAddress
from ocean_lib.web3_internal.wallet import Wallet
from ocean_lib.web3_internal.web3_provider import Web3Provider
from tests.resources.helper_functions import (
get_ganache_wallet,
get_publisher_ocean_instance,
get_web3,
)
SUPPORTED_NETWORKS_STR = str(util.SUPPORTED_NETWORK_NAMES)[1:-1]
def main():
network, address_file = processArgs()
addresses = deploy(network, address_file)
_s = json.dumps(addresses, indent=4)
s = "**** deployed contracts with the following addresses ****\n" + _s
print(s)
def processArgs():
# set help message
help = f"""
Deploy DataTokenTemplate and more to a target network.
Usage: deploy.py NETWORK ADDRESSES_FILE_PATH
NETWORK -- one of: {SUPPORTED_NETWORKS_STR}
ADDRESSES_FILE_PATH -- path to json file to update the deployed contracts addresses
"""
# ****SET INPUT ARGS****
# got the right number of args? If not, output help
num_args = len(sys.argv) - 1
num_args_needed = 1
if num_args != num_args_needed:
print(help)
if num_args > 0:
print("Got %d argument(s), need %s.\n" % (num_args, num_args_needed))
sys.exit(0)
# grab args
network = sys.argv[1]
print("Arguments: NETWORK=%s\n" % network)
# corner cases
if network not in util.SUPPORTED_NETWORK_NAMES:
print(f"Invalid network. Supported networks: {SUPPORTED_NETWORKS_STR}")
sys.exit(0)
return network, sys.argv[2] if len(sys.argv) > 2 else ""
def deploy(network, addresses_file):
config = ExampleConfig.get_config()
ConfigProvider.set_config(config)
Web3Provider.init_web3(provider=get_web3_connection_provider(config.network_url))
ContractHandler.set_artifacts_path(config.artifacts_path)
artifacts_path = ContractHandler.artifacts_path
if not addresses_file:
addresses_file = config.address_file
else:
addresses_file = Path(addresses_file).expanduser().resolve()
ocean = get_publisher_ocean_instance()
web3 = ocean.web3
addresses = dict()
if os.path.exists(addresses_file):
with open(addresses_file) as f:
network_addresses = json.load(f)
else:
network_addresses = {network: {}}
if network == "ganache" and network not in network_addresses:
network = "development"
_addresses = network_addresses[network]
# ****SET ENVT****
# grab vars
factory_deployer_private_key = get_ganache_wallet().private_key
# corner cases
if invalidKey(factory_deployer_private_key):
print("Need valid FACTORY_DEPLOYER_PRIVATE_KEY")
sys.exit(0)
# ****SEE FUNDS****
print(
"Keys:\n%s"
% Wallet(web3=get_web3(), private_key=factory_deployer_private_key).keysStr()
)
print("")
# ****DEPLOY****
deployer_wallet = Wallet(web3, private_key=factory_deployer_private_key)
minter_addr = deployer_wallet.address
# cap = 2 ** 255 not used
if DTFactory.CONTRACT_NAME not in _addresses:
print("****Deploy DataTokenTemplate: begin****")
dt_address = DataToken.deploy(
web3,
deployer_wallet,
artifacts_path,
"Template Contract",
"TEMPLATE",
minter_addr,
DataToken.DEFAULT_CAP_BASE,
DTFactory.FIRST_BLOB,
minter_addr,
)
addresses[DataToken.CONTRACT_NAME] = dt_address
print("****Deploy DataTokenTemplate: done****\n")
print("****Deploy DTFactory: begin****")
dtfactory = DTFactory(
DTFactory.deploy(
web3, deployer_wallet, artifacts_path, dt_address, minter_addr
)
)
addresses[DTFactory.CONTRACT_NAME] = dtfactory.address
print("****Deploy DTFactory: done****\n")
if BFactory.CONTRACT_NAME not in _addresses:
print("****Deploy BPool: begin****")
bpool_address = BPool.deploy(web3, deployer_wallet, artifacts_path)
bpool_template = BPool(bpool_address)
addresses[BPool.CONTRACT_NAME] = bpool_address
print("****Deploy BPool: done****\n")
print("****Deploy 'BFactory': begin****")
bfactory_address = BFactory.deploy(
web3, deployer_wallet, artifacts_path, bpool_template.address
)
_ = BFactory(bfactory_address)
addresses[BFactory.CONTRACT_NAME] = bfactory_address
print("****Deploy 'BFactory': done****\n")
if FixedRateExchange.CONTRACT_NAME not in _addresses:
print("****Deploy 'FixedRateExchange': begin****")
addresses[FixedRateExchange.CONTRACT_NAME] = FixedRateExchange.deploy(
web3, deployer_wallet, artifacts_path
)
print("****Deploy 'FixedRateExchange': done****\n")
if MetadataContract.CONTRACT_NAME not in _addresses:
print("****Deploy 'Metadata': begin****")
addresses[MetadataContract.CONTRACT_NAME] = MetadataContract.deploy(
web3, deployer_wallet, artifacts_path
)
print("****Deploy 'Metadata': done****\n")
if network in ("ganache", "development"):
print("****Deploy fake OCEAN: begin****")
# For simplicity, hijack DataTokenTemplate.
minter_addr = deployer_wallet.address
OCEAN_cap = 1410 * 10 ** 6 # 1.41B
OCEAN_cap_base = util.to_base_18(float(OCEAN_cap))
OCEAN_token = DataToken(
DataToken.deploy(
web3,
deployer_wallet,
artifacts_path,
"Ocean",
"OCEAN",
minter_addr,
OCEAN_cap_base,
"",
minter_addr,
)
)
addresses["Ocean"] = OCEAN_token.address
print("****Deploy fake OCEAN: done****\n")
print("****Mint fake OCEAN: begin****")
OCEAN_token.mint(minter_addr, OCEAN_cap_base, from_wallet=deployer_wallet)
print("****Mint fake OCEAN: done****\n")
print("****Distribute fake OCEAN: begin****")
amt_distribute = 1000
amt_distribute_base = util.to_base_18(float(amt_distribute))
for key_label in ["TEST_PRIVATE_KEY1", "TEST_PRIVATE_KEY2"]:
key = os.environ.get(key_label)
if not key:
continue
dst_address = privateKeyToAddress(key)
OCEAN_token.transfer(
dst_address, amt_distribute_base, from_wallet=deployer_wallet
)
print("****Distribute fake OCEAN: done****\n")
network_addresses[network].update(addresses)
with open(addresses_file, "w") as f:
json.dump(network_addresses, f, indent=2)
return addresses
def invalidKey(private_key_str): # super basic check
return len(private_key_str) < 10
def invalidAddr(addr_str): # super basic check
return len(addr_str) < 10
def setenv(key, value):
# os.putenv(key, value) #Do *not* use putenv(), it doesn't work
os.environ[key] = value
if __name__ == "__main__":
main()
```
#### File: ocean_lib/web3_internal/utils.py
```python
import json
import logging
import os
from collections import namedtuple
import eth_account
import eth_keys
import eth_utils
from eth_keys import KeyAPI
from eth_utils import big_endian_to_int
from ocean_lib.web3_internal.web3_provider import Web3Provider
from web3 import Web3
Signature = namedtuple("Signature", ("v", "r", "s"))
logger = logging.getLogger(__name__)
def generate_multi_value_hash(types, values):
"""
Return the hash of the given list of values.
This is equivalent to packing and hashing values in a solidity smart contract
hence the use of `soliditySha3`.
:param types: list of solidity types expressed as strings
:param values: list of values matching the `types` list
:return: bytes
"""
assert len(types) == len(values)
return Web3.soliditySha3(types, values)
def prepare_prefixed_hash(msg_hash):
"""
:param msg_hash:
:return:
"""
return generate_multi_value_hash(
["string", "bytes32"], ["\x19Ethereum Signed Message:\n32", msg_hash]
)
def add_ethereum_prefix_and_hash_msg(text):
"""
This method of adding the ethereum prefix seems to be used in web3.personal.sign/ecRecover.
:param text: str any str to be signed / used in recovering address from a signature
:return: hash of prefixed text according to the recommended ethereum prefix
"""
prefixed_msg = f"\x19Ethereum Signed Message:\n{len(text)}{text}"
return Web3.sha3(text=prefixed_msg)
def get_public_key_from_address(web3, account):
"""
:param web3:
:param account:
:return:
"""
_hash = web3.sha3(text="verify signature.")
signature = web3.personal.sign(_hash, account.address, account.password)
signature = split_signature(web3, web3.toBytes(hexstr=signature))
signature_vrs = Signature(
signature.v % 27, big_endian_to_int(signature.r), big_endian_to_int(signature.s)
)
prefixed_hash = prepare_prefixed_hash(_hash)
pub_key = KeyAPI.PublicKey.recover_from_msg_hash(
prefixed_hash, KeyAPI.Signature(vrs=signature_vrs)
)
assert (
pub_key.to_checksum_address() == account.address
), "recovered address does not match signing address."
return pub_key
def to_32byte_hex(web3, val):
"""
:param web3:
:param val:
:return:
"""
return web3.toBytes(val).rjust(32, b"\0")
def split_signature(web3, signature):
"""
:param web3:
:param signature: signed message hash, hex str
:return:
"""
assert len(signature) == 65, (
f"invalid signature, " f"expecting bytes of length 65, got {len(signature)}"
)
v = web3.toInt(signature[-1])
r = to_32byte_hex(web3, int.from_bytes(signature[:32], "big"))
s = to_32byte_hex(web3, int.from_bytes(signature[32:64], "big"))
if v != 27 and v != 28:
v = 27 + v % 2
return Signature(v, r, s)
def get_wallet(index):
name = "PARITY_ADDRESS" if not index else f"PARITY_ADDRESS{index}"
pswrd_name = "PARITY_PASSWORD" if not index else f"PARITY_PASSWORD{index}"
key_name = "PARITY_KEY" if not index else f"PARITY_KEY{index}"
encrypted_key_name = (
"PARITY_ENCRYPTED_KEY" if not index else f"PARITY_ENCRYPTED_KEY{index}"
)
keyfile_name = "PARITY_KEYFILE" if not index else f"PARITY_KEYFILE{index}"
address = os.getenv(name)
if not address:
return None
pswrd = os.getenv(pswrd_name)
key = os.getenv(key_name)
encr_key = os.getenv(encrypted_key_name)
key_file = os.getenv(keyfile_name)
if key_file and not encr_key:
with open(key_file) as _file:
encr_key = json.loads(_file.read())
from ocean_lib.web3_internal.wallet import Wallet
return Wallet(
Web3Provider.get_web3(),
private_key=key,
encrypted_key=encr_key,
address=Web3.toChecksumAddress(address),
password=<PASSWORD>,
)
def privateKeyToAddress(private_key: str) -> str:
return eth_account.Account().privateKeyToAccount(private_key).address
def privateKeyToPublicKey(private_key: str):
private_key_bytes = eth_utils.decode_hex(private_key)
private_key_object = eth_keys.keys.PrivateKey(private_key_bytes)
return private_key_object.public_key
```
#### File: tests/models/test_bpool.py
```python
import pytest
from ocean_lib.models.bfactory import BFactory
from ocean_lib.models.bpool import BPool
from ocean_lib.models.btoken import BToken
from ocean_lib.ocean.util import from_base_18, get_bfactory_address, to_base_18
from ocean_lib.web3_internal.wallet import Wallet
from tests.models.conftest import alice_info
HUGEINT = 2 ** 255
def test_notokens_basic(OCEAN_address, network, alice_wallet, alice_address):
pool = _deployBPool(network, alice_wallet)
assert not pool.isPublicSwap()
assert not pool.isFinalized()
assert not pool.isBound(OCEAN_address)
assert pool.getNumTokens() == 0
assert pool.getCurrentTokens() == []
with pytest.raises(Exception):
pool.getFinalTokens() # pool's not finalized
assert pool.getSwapFee() == to_base_18(1e-6)
assert pool.getController() == alice_address
assert str(pool)
with pytest.raises(Exception):
pool.finalize() # can't finalize if no tokens
def test_setSwapFee_works(network, alice_wallet):
pool = _deployBPool(network, alice_wallet)
pool.setSwapFee(to_base_18(0.011), from_wallet=alice_wallet)
assert from_base_18(pool.getSwapFee()) == 0.011
def test_setSwapFee_fails(
network, alice_wallet, alice_address, bob_wallet, bob_address
):
factory = BFactory(get_bfactory_address(network))
pool_address = factory.newBPool(alice_wallet)
pool = BPool(pool_address)
with pytest.raises(Exception):
pool.setSwapFee(
to_base_18(0.011), from_wallet=bob_wallet
) # not ok, bob isn't controller
pool.setController(bob_address, from_wallet=alice_wallet)
pool.setSwapFee(to_base_18(0.011), from_wallet=bob_wallet) # ok now
def test_setController(network, alice_wallet, alice_address, bob_wallet, bob_address):
pool = _deployBPool(network, alice_wallet)
pool.setController(bob_address, from_wallet=alice_wallet)
assert pool.getController() == bob_address
pool.setController(alice_address, from_wallet=bob_wallet)
assert pool.getController() == alice_address
def test_setPublicSwap(network, alice_wallet):
pool = _deployBPool(network, alice_wallet)
pool.setPublicSwap(True, from_wallet=alice_wallet)
assert pool.isPublicSwap()
pool.setPublicSwap(False, from_wallet=alice_wallet)
assert not pool.isPublicSwap()
def test_2tokens_basic(network, T1, T2, alice_wallet, alice_address):
pool = _deployBPool(network, alice_wallet)
assert T1.address != T2.address
assert T1.address != pool.address
assert from_base_18(T1.balanceOf(alice_address)) >= 90.0
_ = from_base_18(T2.balanceOf(alice_address)) >= 10.0
with pytest.raises(Exception): # can't bind until we approve
pool.bind(T1.address, to_base_18(90.0), to_base_18(9.0))
# Bind two tokens to the pool
T1.approve(pool.address, to_base_18(90.0), from_wallet=alice_wallet)
T2.approve(pool.address, to_base_18(10.0), from_wallet=alice_wallet)
assert from_base_18(T1.allowance(alice_address, pool.address)) == 90.0
assert from_base_18(T2.allowance(alice_address, pool.address)) == 10.0
assert not pool.isBound(T1.address) and not pool.isBound(T1.address)
pool.bind(T1.address, to_base_18(90.0), to_base_18(9.0), from_wallet=alice_wallet)
pool.bind(T2.address, to_base_18(10.0), to_base_18(1.0), from_wallet=alice_wallet)
assert pool.isBound(T1.address) and pool.isBound(T2.address)
assert pool.getNumTokens() == 2
assert pool.getCurrentTokens() == [T1.address, T2.address]
assert pool.getDenormalizedWeight(T1.address) == to_base_18(9.0)
assert pool.getDenormalizedWeight(T2.address) == to_base_18(1.0)
assert pool.getTotalDenormalizedWeight() == to_base_18(9.0 + 1.0)
assert pool.getNormalizedWeight(T1.address) == to_base_18(0.9)
assert pool.getNormalizedWeight(T2.address) == to_base_18(0.1)
assert pool.getBalance(T1.address) == to_base_18(90.0)
assert pool.getBalance(T2.address) == to_base_18(10.0)
assert str(pool)
def test_unbind(network, T1, T2, alice_wallet):
pool = _createPoolWith2Tokens(network, T1, T2, alice_wallet, 1.0, 1.0, 1.0, 1.0)
pool.unbind(T1.address, from_wallet=alice_wallet)
assert pool.getNumTokens() == 1
assert pool.getCurrentTokens() == [T2.address]
assert from_base_18(pool.getBalance(T2.address)) == 1.0
def test_finalize(network, T1, T2, alice_address, alice_wallet):
pool = _createPoolWith2Tokens(network, T1, T2, alice_wallet, 90.0, 10.0, 9.0, 1.0)
assert not pool.isPublicSwap()
assert not pool.isFinalized()
assert pool.totalSupply() == 0
assert pool.balanceOf(alice_address) == 0
assert pool.allowance(alice_address, pool.address) == 0
pool.finalize(from_wallet=alice_wallet)
assert pool.isPublicSwap()
assert pool.isFinalized()
assert pool.totalSupply() == to_base_18(100.0)
assert pool.balanceOf(alice_address) == to_base_18(100.0)
assert pool.allowance(alice_address, pool.address) == 0
assert pool.getFinalTokens() == [T1.address, T2.address]
assert pool.getCurrentTokens() == [T1.address, T2.address]
def test_public_pool(network, bob_wallet):
alice = alice_info()
alice_address = alice.address
bob_address = bob_wallet.address
T1 = alice.T1
T2 = alice.T2
pool = _createPoolWith2Tokens(
network, alice.T1, alice.T2, alice.wallet, 90.0, 10.0, 9.0, 1.0
)
BPT = pool
# alice give Bob some tokens
alice.T1.transfer(bob_wallet.address, to_base_18(100.0), from_wallet=alice.wallet)
alice.T2.transfer(bob_wallet.address, to_base_18(100.0), from_wallet=alice.wallet)
# verify holdings
assert from_base_18(alice.T1.balanceOf(alice.address)) == (1000.0 - 90.0 - 100.0)
assert from_base_18(alice.T2.balanceOf(alice.address)) == (1000.0 - 10.0 - 100.0)
assert from_base_18(BPT.balanceOf(alice.address)) == 0
assert from_base_18(alice.T1.balanceOf(bob_address)) == 100.0
assert from_base_18(alice.T2.balanceOf(bob_address)) == 100.0
assert from_base_18(BPT.balanceOf(bob_address)) == 0
assert from_base_18(T1.balanceOf(pool.address)) == 90.0
assert from_base_18(T2.balanceOf(pool.address)) == 10.0
assert from_base_18(BPT.balanceOf(pool.address)) == 0
# finalize
pool = BPool(pool.address)
pool.finalize(from_wallet=alice.wallet)
# verify holdings
assert from_base_18(alice.T1.balanceOf(alice.address)) == (1000.0 - 90.0 - 100.0)
assert from_base_18(alice.T2.balanceOf(alice.address)) == (1000.0 - 10.0 - 100.0)
assert from_base_18(BPT.balanceOf(alice.address)) == 100.0 # new!
assert from_base_18(T1.balanceOf(pool.address)) == 90.0
assert from_base_18(T2.balanceOf(pool.address)) == 10.0
assert from_base_18(BPT.balanceOf(pool.address)) == 0
# bob join pool. Wants 10 BPT
T1.approve(pool.address, to_base_18(100.0), from_wallet=bob_wallet)
T2.approve(pool.address, to_base_18(100.0), from_wallet=bob_wallet)
pool.joinPool(
poolAmountOut_base=to_base_18(10.0), # 10 BPT
maxAmountsIn_base=[to_base_18(100.0), to_base_18(100.0)],
from_wallet=bob_wallet,
)
# verify holdings
assert from_base_18(T1.balanceOf(alice_address)) == (1000.0 - 90.0 - 100.0)
assert from_base_18(T2.balanceOf(alice_address)) == (1000.0 - 10.0 - 100.0)
assert from_base_18(BPT.balanceOf(alice_address)) == 100.0
assert from_base_18(T1.balanceOf(bob_address)) == (100.0 - 9.0)
assert from_base_18(T2.balanceOf(bob_address)) == (100.0 - 1.0)
assert from_base_18(BPT.balanceOf(bob_address)) == 10.0
assert from_base_18(T1.balanceOf(pool.address)) == (90.0 + 9.0)
assert from_base_18(T2.balanceOf(pool.address)) == (10.0 + 1.0)
assert from_base_18(BPT.balanceOf(pool.address)) == 0
# bob sells 2 BPT
# -this is where BLabs fee kicks in. But the fee is currently set to 0.
pool.exitPool(
poolAmountIn_base=to_base_18(2.0),
minAmountsOut_base=[to_base_18(0.0), to_base_18(0.0)],
from_wallet=bob_wallet,
)
assert from_base_18(T1.balanceOf(bob_address)) == 92.8
assert from_base_18(T2.balanceOf(bob_address)) == 99.2
assert from_base_18(BPT.balanceOf(bob_address)) == 8.0
# bob buys 5 more BPT
pool.joinPool(
poolAmountOut_base=to_base_18(5.0),
maxAmountsIn_base=[to_base_18(90.0), to_base_18(90.0)],
from_wallet=bob_wallet,
)
assert from_base_18(BPT.balanceOf(bob_address)) == 13.0
# bob fully exits
pool.exitPool(
poolAmountIn_base=to_base_18(13.0),
minAmountsOut_base=[to_base_18(0.0), to_base_18(0.0)],
from_wallet=bob_wallet,
)
assert from_base_18(BPT.balanceOf(bob_address)) == 0.0
def test_rebind_more_tokens(network, T1, T2, alice_wallet):
pool = _createPoolWith2Tokens(network, T1, T2, alice_wallet, 90.0, 10.0, 9.0, 1.0)
# insufficient allowance
with pytest.raises(Exception):
pool.rebind(
T1.address, to_base_18(120.0), to_base_18(9.0), from_wallet=alice_wallet
)
# sufficient allowance
T1.approve(pool.address, to_base_18(30.0), from_wallet=alice_wallet)
pool.rebind(
T1.address, to_base_18(120.0), to_base_18(9.0), from_wallet=alice_wallet
)
def test_gulp(network, T1, alice_wallet):
pool = _deployBPool(network, alice_wallet)
# bind T1 to the pool, with a balance of 2.0
T1.approve(pool.address, to_base_18(50.0), from_wallet=alice_wallet)
pool.bind(T1.address, to_base_18(2.0), to_base_18(50.0), from_wallet=alice_wallet)
# T1 is now pool's (a) ERC20 balance (b) _records[token].balance
assert T1.balanceOf(pool.address) == to_base_18(2.0) # ERC20 balance
assert pool.getBalance(T1.address) == to_base_18(2.0) # records[]
# but then some joker accidentally sends 5.0 tokens to the pool's address
# rather than binding / rebinding. So it's in ERC20 bal but not records[]
T1.transfer(pool.address, to_base_18(5.0), from_wallet=alice_wallet)
assert T1.balanceOf(pool.address) == to_base_18(2.0 + 5.0) # ERC20 bal
assert pool.getBalance(T1.address) == to_base_18(2.0) # records[]
# so, 'gulp' gets the pool to absorb the tokens into its balances.
# i.e. to update _records[token].balance to be in sync with ERC20 balance
pool.gulp(T1.address, from_wallet=alice_wallet)
assert T1.balanceOf(pool.address) == to_base_18(2.0 + 5.0) # ERC20
assert pool.getBalance(T1.address) == to_base_18(2.0 + 5.0) # records[]
def test_spot_price(network, T1, T2, alice_wallet):
(p, p_sans) = _spotPrices(network, T1, T2, alice_wallet, 1.0, 1.0, 1.0, 1.0)
assert p_sans == 1.0
assert round(p, 8) == 1.000001
(p, p_sans) = _spotPrices(network, T1, T2, alice_wallet, 90.0, 10.0, 9.0, 1.0)
assert p_sans == 1.0
assert round(p, 8) == 1.000001
(p, p_sans) = _spotPrices(network, T1, T2, alice_wallet, 1.0, 2.0, 1.0, 1.0)
assert p_sans == 0.5
assert round(p, 8) == 0.5000005
(p, p_sans) = _spotPrices(network, T1, T2, alice_wallet, 2.0, 1.0, 1.0, 1.0)
assert p_sans == 2.0
assert round(p, 8) == 2.000002
(p, p_sans) = _spotPrices(network, T1, T2, alice_wallet, 9.0, 10.0, 9.0, 1.0)
assert p_sans == 0.1
assert round(p, 8) == 0.1000001
def _spotPrices(
network: str,
T1: BToken,
T2: BToken,
wallet: Wallet,
bal1: float,
bal2: float,
w1: float,
w2: float,
):
pool = _createPoolWith2Tokens(network, T1, T2, wallet, bal1, bal2, w1, w2)
a1, a2 = T1.address, T2.address
return (
from_base_18(pool.getSpotPrice(a1, a2)),
from_base_18(pool.getSpotPriceSansFee(a1, a2)),
)
def test_joinSwapExternAmountIn(network, T1, T2, alice_wallet, alice_address):
init_T1balance = from_base_18(T1.balanceOf(alice_address))
T2balance = from_base_18(T2.balanceOf(alice_address))
pool = _createPoolWith2Tokens(network, T1, T2, alice_wallet, 90.0, 10.0, 9.0, 1.0)
T1.approve(pool.address, to_base_18(100.0), from_wallet=alice_wallet)
# pool's not public
with pytest.raises(Exception):
pool.swapExactAmountOut(
tokenIn_address=T1.address,
maxAmountIn_base=to_base_18(100.0),
tokenOut_address=T2.address,
tokenAmountOut_base=to_base_18(10.0),
maxPrice_base=HUGEINT,
from_wallet=alice_wallet,
)
# pool's public
pool.setPublicSwap(True, from_wallet=alice_wallet)
pool.swapExactAmountOut(
tokenIn_address=T1.address,
maxAmountIn_base=to_base_18(100.0),
tokenOut_address=T2.address,
tokenAmountOut_base=to_base_18(1.0),
maxPrice_base=HUGEINT,
from_wallet=alice_wallet,
)
new_balance = init_T1balance - 91.055
assert (
(new_balance - 0.005)
<= from_base_18(T1.balanceOf(alice_address))
<= (new_balance + 0.005)
)
assert from_base_18(T2.balanceOf(alice_address)) == (T2balance - 9.0)
def test_joinswapPoolAmountOut(network, T1, T2, alice_address, alice_wallet):
T1balance = from_base_18(T1.balanceOf(alice_address))
pool = _createPoolWith2Tokens(network, T1, T2, alice_wallet, 90.0, 10.0, 9.0, 1.0)
BPT = pool
pool.finalize(from_wallet=alice_wallet)
pool_balance = from_base_18(BPT.balanceOf(alice_address))
T1.approve(pool.address, to_base_18(90.0), from_wallet=alice_wallet)
assert from_base_18(T1.balanceOf(alice_address)) == (T1balance - 90)
T1balance = from_base_18(T1.balanceOf(alice_address))
pool.joinswapPoolAmountOut(
tokenIn_address=T1.address,
poolAmountOut_base=to_base_18(10.0), # BPT wanted
maxAmountIn_base=to_base_18(90.0), # max T1 to spend
from_wallet=alice_wallet,
)
assert from_base_18(T1.balanceOf(alice_address)) >= (T1balance - 90.0)
assert from_base_18(BPT.balanceOf(alice_address)) == (pool_balance + 10.0)
def test_exitswapPoolAmountIn(network, T1, T2, alice_address, alice_wallet):
T1balance = from_base_18(T1.balanceOf(alice_address))
pool = _createPoolWith2Tokens(network, T1, T2, alice_wallet, 90.0, 10.0, 9.0, 1.0)
BPT = pool
pool.finalize(from_wallet=alice_wallet)
pool_balance = from_base_18(BPT.balanceOf(alice_address))
assert from_base_18(T1.balanceOf(alice_address)) == (T1balance - 90)
pool.exitswapPoolAmountIn(
tokenOut_address=T1.address,
poolAmountIn_base=to_base_18(10.0), # BPT spent
minAmountOut_base=to_base_18(1.0), # min T1 wanted
from_wallet=alice_wallet,
)
assert from_base_18(T1.balanceOf(alice_address)) >= (T1balance - 90 + 1.0)
assert from_base_18(BPT.balanceOf(alice_address)) == (pool_balance - 10.0)
def test_exitswapExternAmountOut(network, T1, T2, alice_address, alice_wallet):
T1balance = from_base_18(T1.balanceOf(alice_address))
pool = _createPoolWith2Tokens(network, T1, T2, alice_wallet, 90.0, 10.0, 9.0, 1.0)
BPT = pool
pool.finalize(from_wallet=alice_wallet)
pool_balance = from_base_18(BPT.balanceOf(alice_address))
assert from_base_18(T1.balanceOf(alice_address)) == T1balance - 90
pool.exitswapExternAmountOut(
tokenOut_address=T1.address,
tokenAmountOut_base=to_base_18(2.0), # T1 wanted
maxPoolAmountIn_base=to_base_18(10.0), # max BPT spent
from_wallet=alice_wallet,
)
assert from_base_18(T1.balanceOf(alice_address)) == (T1balance - 90 + 2.0)
assert from_base_18(BPT.balanceOf(alice_address)) >= (pool_balance - 10.0)
def test_calcSpotPrice_base(network, T1, T2, alice_address, alice_wallet):
pool = _deployBPool(network, alice_wallet)
x = pool.calcSpotPrice(
tokenBalanceIn_base=to_base_18(10.0),
tokenWeightIn_base=to_base_18(1.0),
tokenBalanceOut_base=to_base_18(11.0),
tokenWeightOut_base=to_base_18(1.0),
swapFee_base=0,
)
assert round(from_base_18(x), 3) == 0.909
def test_calcOutGivenIn_base(network, alice_wallet):
pool = _deployBPool(network, alice_wallet)
x = pool.calcOutGivenIn(
tokenBalanceIn_base=to_base_18(10.0),
tokenWeightIn_base=to_base_18(1.0),
tokenBalanceOut=to_base_18(10.1),
tokenWeightOut_base=to_base_18(1.0),
tokenAmountIn_base=to_base_18(1.0),
swapFee_base=0,
)
assert round(from_base_18(x), 3) == 0.918
def test_calcInGivenOut_base(network, alice_wallet):
pool = _deployBPool(network, alice_wallet)
x = pool.calcInGivenOut(
tokenBalanceIn_base=to_base_18(10.0),
tokenWeightIn_base=to_base_18(1.0),
tokenBalanceOut_base=to_base_18(10.1),
tokenWeightOut_base=to_base_18(1.0),
tokenAmountOut_base=to_base_18(1.0),
swapFee_base=0,
)
assert round(from_base_18(x), 3) == 1.099
def test_calcPoolOutGivenSingleIn_base(network, alice_wallet):
pool = _deployBPool(network, alice_wallet)
x = pool.calcPoolOutGivenSingleIn(
tokenBalanceIn_base=to_base_18(10.0),
tokenWeightIn_base=to_base_18(1.0),
poolSupply_base=to_base_18(120.0),
totalWeight_base=to_base_18(2.0),
tokenAmountIn_base=to_base_18(0.1),
swapFee_base=0,
)
assert round(from_base_18(x), 3) == 0.599
def test_calcSingleInGivenPoolOut_base(network, alice_wallet):
pool = _deployBPool(network, alice_wallet)
x = pool.calcSingleInGivenPoolOut(
tokenBalanceIn_base=to_base_18(10.0),
tokenWeightIn_base=to_base_18(1.0),
poolSupply_base=to_base_18(120.0),
totalWeight_base=to_base_18(2.0),
poolAmountOut_base=to_base_18(10.0),
swapFee_base=0,
)
assert round(from_base_18(x), 3) == 1.736
def test_calcSingleOutGivenPoolIn_base(network, alice_wallet):
pool = _deployBPool(network, alice_wallet)
x = pool.calcSingleOutGivenPoolIn(
tokenBalanceOut_base=to_base_18(10.0),
tokenWeightOut_base=to_base_18(1.0),
poolSupply_base=to_base_18(120.0),
totalWeight_base=to_base_18(2.0),
poolAmountIn_base=to_base_18(10.0),
swapFee_base=0,
)
assert round(from_base_18(x), 3) == 1.597
def test_calcPoolInGivenSingleOut_base(network, alice_wallet):
pool = _deployBPool(network, alice_wallet)
x = pool.calcPoolInGivenSingleOut(
tokenBalanceOut_base=to_base_18(1000.0),
tokenWeightOut_base=to_base_18(5.0),
poolSupply_base=to_base_18(100.0),
totalWeight_base=to_base_18(10.0),
tokenAmountOut_base=to_base_18(0.1),
swapFee_base=0,
)
assert round(from_base_18(x), 3) == 0.005
def _createPoolWith2Tokens(
network: str,
T1: BToken,
T2: BToken,
wallet: Wallet,
bal1: float,
bal2: float,
w1: float,
w2: float,
):
pool = _deployBPool(network, wallet)
T1.get_tx_receipt(T1.approve(pool.address, to_base_18(bal1), from_wallet=wallet))
T2.get_tx_receipt(T2.approve(pool.address, to_base_18(bal2), from_wallet=wallet))
if pool.isBound(T1.address):
pool.unbind(T1.address, wallet)
if pool.isBound(T2.address):
pool.unbind(T2.address, wallet)
pool.bind(T1.address, to_base_18(bal1), to_base_18(w1), from_wallet=wallet)
pool.bind(T2.address, to_base_18(bal2), to_base_18(w2), from_wallet=wallet)
return pool
def _deployBPool(network: str, from_wallet: Wallet) -> BPool:
factory_address = get_bfactory_address(network)
factory = BFactory(factory_address)
pool_address = factory.newBPool(from_wallet=from_wallet)
pool = BPool(pool_address)
return pool
```
#### File: tests/ocean/test_ocean_auth.py
```python
from ocean_lib.ocean.ocean_auth import OceanAuth
from tests.resources.helper_functions import get_publisher_wallet
def test_get_token():
ocn_auth = OceanAuth(":memory:")
wallet = get_publisher_wallet()
token = ocn_auth.get(wallet)
assert isinstance(token, str), "Invalid auth token type."
assert token.startswith("0x"), "Invalid auth token."
parts = token.split("-")
assert len(parts) == 2, "Invalid token, timestamp separator is not found."
address = ocn_auth.check(token)
assert address != "0x0", "Verifying token failed."
def test_check_token(web3_instance):
ocn_auth = OceanAuth(":memory:")
wallet = get_publisher_wallet()
token = ocn_auth.get(wallet)
address = ocn_auth.check(token)
assert address != "0x0", "Verifying token failed."
sig = token.split("-")[0]
assert ocn_auth.check(sig) == "0x0"
# Test token expiration
def test_store_token():
ocn_auth = OceanAuth(":memory:")
wallet = get_publisher_wallet()
token = ocn_auth.store(wallet)
assert ocn_auth.check(token) == wallet.address, "invalid token, check failed."
# verify it is saved
assert ocn_auth.restore(wallet) == token, "Restoring token failed."
def test_restore_token():
ocn_auth = OceanAuth(":memory:")
wallet = get_publisher_wallet()
assert (
ocn_auth.restore(wallet) is None
), "Expecting None when restoring non-existing token."
token = ocn_auth.store(wallet)
assert ocn_auth.check(token) == wallet.address, "invalid token, check failed."
# verify it is saved
assert ocn_auth.restore(wallet) == token, "Restoring token failed."
def test_known_token():
token = (
"0x1d2741<PASSWORD>30e64989ef0203957c01b14f250f5d2f6ccb0c"
"88c9518816e4fcec16f84e545094eb3f377b7e214ded22676"
"fbde8ca2e41b4eb1b3565047ecd9acf300-1568372035"
)
pub_address = "0xe2DD09d719Da89e5a3D0F2549c7E24566e947260"
ocn_auth = OceanAuth(":memory:")
assert ocn_auth.is_token_valid(
token
), "Invalid token!! has the token specs changed?"
def _get_timestamp():
return int("1568372035") + 10000
ocn_auth._get_timestamp = _get_timestamp
address = ocn_auth.check(token)
assert address.lower() == pub_address.lower(), (
f"Recovered address {address} does not match "
f"known signer address {pub_address}, if the "
f"token generation method is changed please update "
f"the token in this test with the new format."
)
``` |
{
"source": "joshualy/numerical_computing",
"score": 4
} |
#### File: Introduction/PlottingIntro/solutions.py
```python
import numpy as np
from matplotlib import pyplot as plt
def var_of_means(n):
"""Construct a random matrix A with values drawn from the standard normal
distribution. Calculate the mean value of each row, then calculate the
variance of these means. Return the variance.
Inputs:
n (int): The number of rows and columns in the matrix A.
Returns:
(float) The variance of the means of each row.
"""
A = np.random.randn(n,n)
return A.mean(axis=1).var()
def prob1():
"""Create an array of the results of var_of_means() with inputs
n = 100, 200, ..., 1000. Plot and show the resulting array.
"""
y = np.array([var_of_means(n) for n in xrange(100, 1100, 100)])
plt.plot(y)
plt.show()
def prob2():
"""Plot the functions sin(x), cos(x), and arctan(x) on the domain
[-2pi, 2pi]. Make sure the domain is refined enough to produce a figure
with good resolution.
"""
x = np.linspace(-2*np.pi, 2*np.pi, 200)
plt.plot(x, np.sin(x))
plt.plot(x, np.cos(x))
plt.plot(x, np.arctan(x))
plt.show()
def prob3():
"""Plot the curve f(x) = 1/(x-1) on the domain [-2,6].
1. Split the domain so that the curve looks discontinuous.
2. Plot both curves with a thick, dashed magenta line.
3. Change the range of the y-axis to [-6,6].
"""
x1, x2 = np.split(np.linspace(-2, 6, 200), [75])
# x1, x2 = np.linspace(-2, 1, 75), np.linspace(1, 6, 125)
plt.plot(x1, 1/(x1 - 1), 'm--', lw=4)
plt.plot(x2, 1/(x2 - 1), 'm--', lw=4)
plt.ylim(-6, 6)
plt.show()
def prob4():
"""Plot the functions sin(x), sin(2x), 2sin(x), and 2sin(2x) on the
domain [0, 2pi].
1. Arrange the plots in a square grid of four subplots.
2. Set the limits of each subplot to [0, 2pi]x[-2, 2].
3. Give each subplot an appropriate title.
4. Give the overall figure a title.
5. Use the following line colors and styles.
sin(x): green solid line.
sin(2x): red dashed line.
2sin(x): blue dashed line.
2sin(2x): magenta dotted line.
"""
x = np.linspace(0, 2*np.pi, 200)
plt.subplot(221) # sin(x)
plt.plot(x, np.sin(x), 'g-', lw=2)
plt.axis([0, 2*np.pi, -2, 2])
plt.title("sin(x)")
plt.subplot(222) # sin(2x)
plt.plot(x, np.sin(2*x), 'r--', lw=2)
plt.axis([0, 2*np.pi, -2, 2])
plt.title("sin(2x)")
plt.subplot(223) # 2sin(x)
plt.plot(x, 2*np.sin(x), 'b--', lw=2)
plt.axis([0, 2*np.pi, -2, 2])
plt.title("2sin(x)")
plt.subplot(224) # 2sin(2x)
plt.plot(x, 2*np.sin(2*x), 'm:', lw=2)
plt.axis([0, 2*np.pi, -2, 2])
plt.title("2sin(2x)")
plt.suptitle("Solution to Problem 4 (subplots)")
plt.show()
def prob5():
"""Visualize the data in FARS.npy. Use np.load() to load the data, then
create a single figure with two subplots:
1. A scatter plot of longitudes against latitudes. Because of the
large number of data points, use black pixel markers (use "k,"
as the third argument to plt.plot()). Label both axes.
2. A histogram of the hours of the day, with one bin per hour.
Label and set the limits of the x-axis.
"""
data = np.load("FARS.npy")
plt.subplot(211)
plt.plot(data[:,1], data[:,2], 'k,')
plt.xlabel("Longitude")
plt.ylabel("Latitude")
plt.axis("equal")
plt.subplot(212)
plt.hist(data[:,0], bins=24, range=[-.5, 23.5])
plt.xlim(-.5,23.5)
plt.xlabel("Hour (Military Time)")
plt.suptitle("Solution to Problem 5 (FARS data)")
plt.show()
def prob6():
"""Plot the function f(x,y) = sin(x)sin(y)/xy on the domain
[-2pi, 2pi]x[-2pi, 2pi].
1. Create 2 subplots: one with a heat map of f, and one with a contour
map of f. Choose an appropriate number of level curves, or specify
the curves yourself.
2. Set the limits of each subplot to [-2pi, 2pi]x[-2pi, 2pi].
3. Choose a non-default color scheme.
4. Add a colorbar to each subplot.
"""
# Define the mesgrid and calculate f() on the grid.
x = np.linspace(-2*np.pi, 2*np.pi, 200)
y = np.copy(x)
X, Y = np.meshgrid(x,y)
Z = np.sin(X)*np.sin(Y)/(X*Y)
plt.subplot(121) # Heat map.
plt.pcolormesh(X, Y, Z, cmap="Spectral")
plt.axis([-2*np.pi, 2*np.pi, -2*np.pi, 2*np.pi])
plt.colorbar()
plt.subplot(122) # Contour map.
plt.contour(X, Y, Z, 10, cmap="Spectral")
plt.axis([-2*np.pi, 2*np.pi, -2*np.pi, 2*np.pi])
plt.colorbar()
plt.suptitle("Solution to Problem 6 (meshgrids)")
plt.show()
```
#### File: Introduction/PythonIntro/testDriver.py
```python
import signal
from functools import wraps
def _timeout(seconds):
"""Decorator for preventing a function from running for too long.
Inputs:
seconds (int): The number of seconds allowed.
Notes:
This decorator uses signal.SIGALRM, which is only available on Unix.
"""
assert isinstance(seconds, int), "@timeout(sec) requires an int"
class TimeoutError(Exception):
pass
def _handler(signum, frame):
"""Handle the alarm by raising a custom exception."""
raise TimeoutError("Timeout after {0} seconds".format(seconds))
def decorator(func):
def wrapper(*args, **kwargs):
signal.signal(signal.SIGALRM, _handler)
signal.alarm(seconds) # Set the alarm.
try:
result = func(*args, **kwargs)
finally:
signal.alarm(0) # Turn the alarm off.
return result
return wraps(func)(wrapper)
return decorator
# Test Driver =================================================================
from numpy.random import randint
from solutions import *
def test(student_module):
"""Test script. Import the student's solutions file as a module.
5 points for problem 2
10 points for problem 3
5 points for problem 4
5 points for problem 5
10 points for problem 6
5 points for problem 7
Parameters:
student_module: the imported module for the student's file.
Returns:
score (int): the student's score, out of 40.
feedback (str): a printout of results for the student.
"""
tester = _testDriver()
tester.test_all(student_module)
return tester.score, tester.feedback
class _testDriver(object):
"""Class for testing a student's work. See test.__doc__ for more info.
This and all other test drivers can be used to grade the entire lab
assignment at once via test_all(), or to grade one problem at a time
via the different problemX() functions.
The point distribution is only a suggestion; the instructor may alter
the weight of each problem as they see fit.
"""
# Constructor -------------------------------------------------------------
def __init__(self):
"""Initialize the feedback attribute."""
self.feedback = ""
# Main routine -----------------------------------------------------------
def test_all(self, student_module, total=40):
"""Grade the provided module on each problem and compile feedback."""
# Reset feedback and score.
self.feedback = ""
self.score = 0
def test_one(problem, label, value):
"""Test a single problem, checking for errors."""
try:
self.feedback += "\n\n{} ({} points):".format(label, value)
points = problem(student_module)
self.score += points
self.feedback += "\nScore += {}".format(points)
except BaseException as e:
self.feedback += "\n{}: {}".format(self._errType(e),e)
# Grade each problem.
test_one(self.problem2, "Problem 2", 5) # Problem 2: 5 points.
test_one(self.problem3, "Problem 3", 10) # Problem 3: 10 points.
test_one(self.problem4, "Problem 4", 5) # Problem 4: 5 points.
test_one(self.problem5, "Problem 5", 5) # Problem 5: 5 points.
test_one(self.problem6, "Problem 6", 10) # Problem 6: 10 points.
test_one(self.problem7, "Problem 7", 5) # Problem 7: 5 points.
# Report final score.
percentage = (100. * self.score) / total
self.feedback += "\n\nTotal score: {}/{} = {}%".format(
self.score, total, round(percentage, 2))
if percentage >= 98: self.feedback += "\n\nExcellent!"
elif percentage >= 90: self.feedback += "\n\nGreat job!"
# Add comments (optionally).
print(self.feedback)
comments = str(raw_input("Comments: "))
if len(comments) > 0:
self.feedback += '\n\n\nComments:\n\t{}'.format(comments)
# Helper Functions --------------------------------------------------------
@staticmethod
def _errType(error):
"""Get just the name of the exception 'error' in string format."""
return str(type(error).__name__)
def _eqTest(self, correct, student, message):
"""Test to see if 'correct' and 'student' are equal.
Report the given 'message' if they are not.
"""
if correct == student:
return 1
else:
self.feedback += "\n{}".format(message)
self.feedback += "\n\tCorrect response: {}".format(correct)
self.feedback += "\n\tStudent response: {}".format(student)
return 0
def _grade(self, points, message=None):
"""Manually grade a problem worth 'points'. Return the score.
If full points are not earned, get feedback on the problem.
"""
credit = -1
while credit > points or credit < 0:
try:
credit = int(input("\nScore out of {}: ".format(points)))
except:
credit = -1
if credit != points:
# Add comments (optionally),
comments = raw_input("Comments: ")
if len(comments) > 0:
self.feedback += "\n{}".format(comments)
# Or add a predetermined error message.
elif message is not None:
self.feedback += "\n{}".format(message)
return credit
# Problems ----------------------------------------------------------------
@_timeout(5)
def problem2(self, s):
"""Test sphere_volume(). 5 Points."""
if s.sphere_volume(1) is None:
raise NotImplementedError("sphere_volume() returned nothing")
points = 2*self._eqTest(sphere_volume(5), s.sphere_volume(5),
"sphere_volume(5) failed")
points += 3*self._eqTest(sphere_volume(3.14), s.sphere_volume(3.14),
"sphere_volume(3.14) failed")
return points
@_timeout(5)
def problem3(self, s):
"""Test first_half() and backward(). 10 points."""
if s.first_half("abcde") is None:
raise NotImplementedError("first_half() returned nothing")
if s.backward("abcde") is None:
raise NotImplementedError("backward() returned nothing")
points = 2*self._eqTest(first_half("abcde"), s.first_half("abcde"),
"first_half('abcde') failed")
points += 3*self._eqTest(first_half("TK421"), s.first_half("TK421"),
"first_half('TK421') failed")
points += 2*self._eqTest(backward("abcde"), s.backward("abcde"),
"backward('abcde') failed")
points += 3*self._eqTest(backward("TK421"), s.backward("TK421"),
"backward('TK421') failed")
return points
@_timeout(5)
def problem4(self, s):
"""Test list_ops(). 5 points."""
if s.list_ops() is None:
raise NotImplementedError("list_ops() returned nothing")
return 5*self._eqTest(list_ops(), s.list_ops(), "list_ops() failed")
@_timeout(5)
def problem5(self, s):
"""Test pig_latin(). 5 points."""
if s.pig_latin("piglatin") is None:
raise NotImplementedError("pig_latin() returned nothing")
points = 2*self._eqTest( pig_latin("college"),
s.pig_latin("college"),
"pig_latin('college') failed")
points += 3*self._eqTest( pig_latin("university"),
s.pig_latin("university"),
"pig_latin('university') failed")
return points
@_timeout(10)
def problem6(self, s):
"""Test palindrome(). 5 points."""
if s.palindrome() is None:
raise NotImplementedError("palindrome() returned nothing")
correct, student = palindrome(), s.palindrome()
if correct > student:
self.feedback += "\npalindrome() failed: {} is too low".format(
student)
elif correct < student:
self.feedback += "\npalindrome() failed: {} is too high".format(
student)
return 10 if correct == student else 0
@_timeout(5)
def problem7(self, s):
"""Test alt_harmonic(). 10 points."""
if s.alt_harmonic(10) is None:
raise NotImplementedError("alt_harmonic() returned nothing")
points = 2*self._eqTest(alt_harmonic(100), s.alt_harmonic(100),
"alt_harmonic(100) failed")
points += 3*self._eqTest(alt_harmonic(5000), s.alt_harmonic(5000),
"alt_harmonic(5000) failed")
return points
if __name__ == '__main__':
import solutions
test(solutions)
```
#### File: Introduction/StandardLibrary/box.py
```python
from itertools import combinations
def isvalid(roll, remaining):
"""Check to see whether or not a roll is valid. That is, check if there
exists a combination of the entries of 'remaining' that sum up to 'roll'.
Parameters:
roll (int): The value of a dice roll, between 2 and 12 (inclusive).
remaining (list): The list of the numbers that still need to be
removed before the box can be shut.
Returns:
True if the roll is valid.
False if the roll is invalid.
"""
if roll not in range(2, 13):
return False
for i in xrange(1, len(remaining)+1):
if any([sum(combo) == roll for combo in combinations(remaining, i)]):
return True
return False
def parse_input(player_input, remaining):
"""Convert a string of numbers into a list of unique integers, if possible.
Then check that each of those integers is an entry in the other list.
Parameters:
player_input (str): A string of integers, separated by spaces.
The player's choices for which numbers to remove.
remaining (list): The list of the numbers that still need to be
removed before the box can be shut.
Returns:
A list of the integers if the input was valid.
An empty list if the input was invalid.
"""
try:
choices = [int(i) for i in player_input.split()]
if len(set(choices)) != len(choices):
raise ValueError
if any([number not in remaining for number in choices]):
raise ValueError
return choices
except ValueError:
return []
```
#### File: Introduction/StandardLibrary/solutions.py
```python
def add(x,y):
return x + y
def mult(x,y):
return x * y
def sqrt(x):
return x**.5
# or 'from math import sqrt' at the top.
# solutions.py ================================================================
import sys
from random import randint
from box import isvalid, parse_input
# Problem 1: Implement this function.
def prob1(l):
"""Accept a list 'l' of numbers as input and return a list with the
minimum, maximum, and average of the original list (in that order).
"""
return [min(l), max(l), float(sum(l))/len(l)]
# Problem 2: Implement this function.
def prob2():
"""Programmatically determine which Python objects are mutable and which
are immutable. Test numbers, strings, lists, tuples, and dictionaries.
Print your results to the terminal.
"""
# numbers: num+= 1
num1 = 0
num2 = num1
num1 += 1
print("Numbers:\t"),
if num1 == num2:
print("Mutable")
else:
print("Immutable")
# strings: str1 += 'a'
str1 = "a"
str2 = str1
str1 += "a"
print("Strings:\t"),
if str1 == str2:
print("Mutable")
else:
print("Immutable")
# lists: list1.append(1)
list1 = [4,3,2]
list2 = list1
list1.append(1)
print("Lists:\t\t"),
if list1 == list2:
print("Mutable")
else:
print("Immutable")
# tuples: tup1 += (1,)
tup1 = (4,3,2)
tup2 = tup1
tup1 += (1,)
print("Tuples:\t\t"),
if tup1 == tup2:
print("Mutable")
else:
print("Immutable")
# dictionaries: dic1[1] = 'a'
dic1 = dict()
dic1[1] = 'b'
dic2 = dic1
dic1[1] = 'a'
print("Dictionaries:\t"),
if dic1 == dic2:
print("Mutable")
else:
print("Immutable")
# Problem 3: Write a 'calculator' module and use it to implement this function.
def prob3(a,b):
"""Calculate and return the length of the hypotenuse of a right triangle.
Do not use any methods other than those that are imported from your
'calculator' module.
Parameters:
a : the length one of the sides of the triangle.
b : the length the other non-hypotenuse side of the triangle.
Returns:
The length of the triangle's hypotenuse.
"""
# c = calculator # or "import calculator as c" at the top
# return c.sqrt(c.add(c.mult(a,a),c.mult(b,b)))
# Or, a slightly longer way:
a2 = mult(a,a)
b2 = mult(b,b)
a2plusb2 = add(a2, b2)
return sqrt(a2plusb2)
# Problem 4: Implement shut the box.
def shut_the_box():
"""Play a single game of shut the box."""
# Get the player's name.
if len(sys.argv) != 2:
player = raw_input("Player name: ")
else:
player = sys.argv[1]
# Initialize the box.
numbers = range(1,10)
# Take a turn until either the player wins or gets a game over.
while len(numbers) > 0:
if sum(numbers) <= 6: # Roll a single die.
roll = randint(1,6)
else: # Roll two dice.
roll = randint(1,6) + randint(1,6)
# Print the game information.
print "\nNumbers left:", numbers
print "Roll:", roll
if not isvalid(roll, numbers):
print "Game over!"
break
# Choose a valid integer or integers to eliminate.
choices = []
while len(choices) == 0:
# Parse the player's input.
choices = parse_input(raw_input("Numbers to eliminate: "), numbers)
# Make sure the player's choices actually sum up to the roll.
if sum(choices) == roll:
# Remove the player's choices from the remaining numbers.
for number in choices:
numbers.remove(number)
# Report invalid input and go back to the top of the inner loop.
else:
print "Invalid input"
choices = []
# Report the player's final score.
score = sum(numbers)
print("\nScore for player " + player + ": " + str(score) + " points")
# or print("\nScore for player {}: {} points".format(player, score))
if score == 0:
print("Congratulations!! You shut the box!")
print("")
if __name__ == "__main__":
shut_the_box()
```
#### File: Orphans/CompressedSensing/camera.py
```python
import numpy as np
class Camera(object):
def __init__(self, faces, verts, C):
Fs = self._buildfaces(faces, verts)
l = len(Fs)
self.F = np.ones((l, 4, 3))
self.F[:, :-1, :] = Fs
self.C = C
self.b = None
self.M = None
def _buildfaces(self, facetM, verM):
x = np.zeros(3)
y = np.zeros(3)
z = np.zeros(3)
l = len(facetM)
F = np.empty((l, 3, 3))
for n in xrange(l):
x[0] = verM[facetM[n, 0]-1, 0]
y[0] = verM[facetM[n, 0]-1, 1]
z[0] = verM[facetM[n, 0]-1, 2]
x[1] = verM[facetM[n, 1]-1, 0]
y[1] = verM[facetM[n, 1]-1, 1]
z[1] = verM[facetM[n, 1]-1, 2]
x[2] = verM[facetM[n, 2]-1, 0]
y[2] = verM[facetM[n, 2]-1, 1]
z[2] = verM[facetM[n, 2]-1, 2]
verts = np.array([x, y, z])
F[n] = verts
return F
def _Transform(self, theta, psi, r=3):
c = np.array([r*np.sin(psi)*np.cos(theta), r*np.sin(psi)*np.sin(theta), r*np.cos(psi)])
cnorm = np.linalg.norm(c)
t = np.arccos(-c[2]/cnorm)
wstar = np.array([c[1]/cnorm, -c[0]/cnorm, 0])
w = wstar/np.linalg.norm(wstar)
what = np.array([[0, -w[2], w[1]], [w[2], 0, -w[0]], [-w[1], w[0], 0]])
R = np.eye(3)+np.sin(t)*what+(1-np.cos(t))*np.linalg.matrix_power(what, 2)
P = np.zeros((4, 4))
P[:3, :3] = R.T
P[:3, -1] = np.dot(-R.T ,c)
P[-1, -1] = 1
return P, c
def _SPC(self, theta, psi, r=3):
P, c = self._Transform(theta, psi, r)
def Pc(y,f=.5):
return np.array([y[0]/(f*y[2]), y[1]/(f*y[2])])
l = len(self.F)
A = np.empty(l)
for i in xrange(l):
v1 = Pc(np.dot(P, self.F[i, :, 1]))-Pc(np.dot(P, self.F[i, :, 0]))
v2 = Pc(np.dot(P, self.F[i, :, 2]))-Pc(np.dot(P, self.F[i, :, 0]))
A[i] = .5*np.abs(v1[0]*v2[1]-v1[1]*v2[0])
centers = np.mean(self.F[:, :-1, :], axis=-1)
e = np.sqrt(np.sum((c-centers)**2, axis=1))<np.sqrt(10.)
M = e*A
b = np.empty(3)
b = np.dot(M, self.C)
return M, b
def add_pic(self, theta, psi, r=3):
M, b = self._SPC(theta, psi, r)
if self.b is None:
self.b = np.array([b], dtype=np.float)
self.M = np.array([M], dtype=np.float)
else:
self.b = np.concatenate((self.b, np.array([b])), axis=0)
self.M = np.concatenate((self.M, np.array([M])), axis=0)
def add_lots_pic(self, n, r=3):
self.b = np.empty((n, 3))
self.M = np.empty((n, 1012))
for i in xrange(n):
theta = np.random.rand()*np.pi
psi = np.random.rand()*2*np.pi
M, b = self._SPC(theta, psi, r)
self.b[i] = b
self.M[i] = M
def returnData(self):
return self.M, self.b
def clear(self):
self.b = None
self.M = None
```
#### File: Orphans/GUI/spec.py
```python
import functools
import numpy as np
'''Problem 1 - Create a GUI with a button, text box, and label that will
display the contents of the text box in the label once the button is pressed.'''
'''Problem 2 - Complete the MatrixCalculator by
1. Adding a QComboBox to the GUI.
2. Add options to the QComboBox to calculate the determinant and inverse.
3. Implement the determinant and inverse function. Hint: Use NumPy.
4. Display the proper output in the textbox.
First read through the entire class in order to see examples of interesting widgets.
Then complete the problem at the specified places.'''
class matrix_calculator(QtGui.QMainWindow):
def __init__(self):
super(matrix_calculator, self).__init__()
self._initUI()
def _initUI(self):
# Sets the number of dimensions for the first matrix
self.rows = 3
self.columns = 3
# For second matrix if multiplication
self.rows2 = 3
self.columns2 = 3
self.color = None
# Creates menu
menubar = self.menuBar()
# Import Matrix
self.importMatrix = QtGui.QAction('Import Matrix', self)
# Import Second Matrix
# We will add importMatrix2 later only if the calculation is set to multiplication
self.importMatrix2 = QtGui.QAction('Import Matrix2', self)
# Add menus to menubar
self.fileMenu = menubar.addMenu('&File')
self.importMenu = self.fileMenu.addMenu('Import')
self.importMenu.addAction(self.importMatrix)
# Creates the table that will be used for the inputted matrix
self.matrix = QtGui.QTableWidget()
self.matrix.setRowCount(self.rows)
self.matrix.setColumnCount(self.columns)
for i in xrange(self.columns):
# makes the columns a little skinnier
self.matrix.setColumnWidth(i, 60)
# Creates a second matrix table for multiplication
self.matrix2 = QtGui.QTableWidget()
self.matrix2.setRowCount(self.rows)
self.matrix2.setColumnCount(self.columns)
for i in xrange(self.columns):
# makes the columns a little skinnier
self.matrix2.setColumnWidth(i, 60)
# Hide matrix2 until needed
self.matrix2.setVisible(False)
# Creates a push button to calculate
self.calculateButton = QtGui.QPushButton("Calculate")
# Creates a smashing awesome radio button
self.radio = QtGui.QRadioButton("Fill empty cells with 0")
# Creates the output textbox
self.output = QtGui.QPlainTextEdit("Output here")
# Creates spinboxes for matrix dimensions
self.dimRows = QtGui.QSpinBox()
self.dimCols = QtGui.QSpinBox()
self.dimRows.setRange(2,10)
self.dimCols.setRange(2,10)
self.dimRows.setValue(3)
self.dimCols.setValue(3)
labelRows = QtGui.QLabel("Number of Rows")
labelCols = QtGui.QLabel("Number of Columns")
self.dimRows2 = QtGui.QSpinBox()
self.dimCols2 = QtGui.QSpinBox()
self.dimRows2.setRange(2,10)
self.dimCols2.setRange(2,10)
self.dimRows2.setValue(3)
self.dimCols2.setValue(3)
self.dimRows2.setVisible(False)
self.dimCols2.setVisible(False)
# Creates grids for side-by-side widgets
dispgrid = QtGui.QGridLayout()
dispgrid.addWidget(self.matrix, 0, 0)
dispgrid.addWidget(self.matrix2, 0, 1)
dispgrid2 = QtGui.QGridLayout()
dispgrid2.addWidget(self.dimRows, 0, 0)
dispgrid2.addWidget(labelRows, 0, 1)
dispgrid2.addWidget(self.dimRows2, 0, 2)
dispgrid2.addWidget(self.dimCols, 1, 0)
dispgrid2.addWidget(labelCols, 1, 1)
dispgrid2.addWidget(self.dimCols2, 1, 2)
dispgrid2.addWidget(self.radio, 2, 0)
# Creates layout, adding the grids and remaining widgets
layout = QtGui.QVBoxLayout()
layout.addLayout(dispgrid)
layout.addLayout(dispgrid2)
layout.addWidget(self.calculateButton)
layout.addWidget(self.output)
# Adds the functionality of the buttons
self.calculateButton.clicked.connect(self.clickCalculate)
self.radio.clicked.connect(self.clickRadio)
self.dimRows.valueChanged.connect(self.updateRows)
self.dimCols.valueChanged.connect(self.updateCols)
self.dimRows2.valueChanged.connect(self.updateRows2)
self.dimCols2.valueChanged.connect(self.updateCols2)
self.importMatrix.triggered.connect(functools.partial(self.fileDialog, 1))
self.importMatrix2.triggered.connect(functools.partial(self.fileDialog, 2))
# Note: functools.partial is simply a function that allows you to
# pass arguments through this connect function.
'''Problem 2.1 -
Add a drop down menu here by adding a QComboBox. Call it self.matrixFunction.
Add the QComboBox to layout.
'''
'''Problem 2.2 -
Add options to the QComboBox to calculate the Determinant, Inverse, and
Multiplication.'''
# Sets central layout
window = QtGui.QWidget()
window.setLayout(layout)
self.setCentralWidget(window)
# Sets the location of the window on the screen
# The first two numbers are the location of the top left corner
# The last two numbers are the size of the window
self.setGeometry(50, 50, 500, 600)
self.setWindowTitle("Deluxe Matrix Calculator")
self.show()
def clickCalculate(self):
#get matrix out of table
Matrix = np.zeros((self.rows, self.columns))
for i in xrange(self.rows):
for j in xrange(self.columns):
try:
Matrix[i, j] = self.matrix.item(i,j).text()
except AttributeError:
self.output.setPlainText("Attribute Error: please fill in all the boxes.")
return
except ValueError:
self.output.setPlainText("Value Error: invalid character detected.")
return;
calculation = self.matrixFunction.currentText()
result = "No result"
'''Problem 2.3 and 2.4 -
Implement the Determinant and Inverse portion of the function. And add
the proper text to result then display the result in self.output.
Hint: Use NumPy.
The Multiplication portion has been done for you.
'''
#Perform calculation
if (calculation == "Determinant"):
pass
elif (calculation == "Inverse"):
pass
elif (calculation == "Multiplication"):
# Get second matrix
Matrix2 = np.zeros((self.rows2, self.columns2))
for i in xrange(self.rows2):
for j in xrange(self.columns2):
try:
Matrix2[i, j] = self.matrix2.item(i,j).text()
except AttributeError:
self.output.setPlainText("Attribute Error: Please fill in all the boxes.")
return
except ValueError:
self.output.setPlainText("Value Error: Invalid character detected.")
return
try:
result = str(Matrix.dot(Matrix2))
except ValueError:
self.output.setPlainText("Value Error: Dimensions not aligned.")
return
def clickRadio(self):
# There's gotta be a better way to do this
for i in range(self.rows):
for j in range(self.columns):
# To find out if there is something in this slot,
# attempt to get the item in this slot.
# If an error is thrown, fill this slot with 0.
try:
a = self.matrix.item(i, j).text()
except AttributeError:
self.matrix.setItem(i, j, QtGui.QTableWidgetItem(str(0)))
if (self.matrix2.isVisible()):
for i in range(self.rows2):
for j in range(self.columns2):
try:
a = self.matrix2.item(i, j).text()
except AttributeError:
self.matrix2.setItem(i, j, QtGui.QTableWidgetItem(str(0)))
def changeDisplay(self):
'''Depending on the selected calculation,
show or hide various Widgets.
'''
if (self.matrixFunction.currentText() == "Multiplication"):
self.matrix2.setVisible(True)
self.dimRows2.setVisible(True)
self.dimCols2.setVisible(True)
self.importMenu.addAction(self.importMatrix2)
else:
self.matrix2.setVisible(False)
self.dimRows2.setVisible(False)
self.dimCols2.setVisible(False)
self.importMenu.removeAction(self.importMatrix2)
def updateRows(self, n):
'''Changes number of rows'''
self.rows = n
self.matrix.setRowCount(self.rows)
def updateCols(self, n):
'''Changes number of columns'''
self.columns = n
self.matrix.setColumnCount(self.columns)
for i in xrange(self.columns):
self.matrix.setColumnWidth(i, 60)
#TODO: make it not resize columns that have been resized by user
def updateRows2(self, n):
'''Changes number of rows in matrix2'''
self.rows2 = n
self.matrix2.setRowCount(self.rows2)
def updateCols2(self, n):
'''Changes number of columns in matrix2'''
self.columns2 = n
self.matrix2.setColumnCount(self.columns2)
for i in xrange(self.columns2):
self.matrix2.setColumnWidth(i, 60)
#TODO: make it not resize columns that have been resized by user
def fileDialog(self, which):
'''Dialog box for importing a matrix.
Correct format for a matrix file:
Number of rows, number of columns, all entries;
separated by whitespace.
If there are not enough numbers in the file, fill the
remainder of the matrix with 0s. Excess numbers are ignored.
'''
filename = QtGui.QFileDialog.getOpenFileName(self, 'Open file', '/home')
try:
f = open(filename).read().split()
except:
self.output.setPlainText("IO Error: bad file.")
return
if (which == 1):
self.rows = int(f[0])
self.matrix.setRowCount(self.rows)
self.columns = int(f[1])
self.matrix.setColumnCount(self.columns)
# Iterate through the list f and set entries of matrix
for i in xrange(self.rows):
for j in xrange(self.columns):
try:
self.matrix.setItem(i, j, QtGui.QTableWidgetItem(str(f[i*self.columns + j + 2])))
except IndexError:
# If the file did not have enough numbers in it,
# fill the remaining entries with 0
f = np.zeros((self.rows * self.columns + 2))
self.matrix.setItem(i, j, QtGui.QTableWidgetItem(str(f[i*self.columns + j + 2])))
elif (which == 2):
self.rows2 = int(f[0])
self.matrix2.setRowCount(self.rows)
self.columns2 = int(f[1])
self.matrix2.setColumnCount(self.columns)
# Iterate through the list f and set entries of matrix2
for i in xrange(self.rows2):
for j in xrange(self.columns2):
try:
self.matrix2.setItem(i, j, QtGui.QTableWidgetItem(str(f[i*self.columns2 + j + 2])))
except IndexError:
# If the file did not have enough numbers in it,
# fill the remaining entries with 0
f = np.zeros((self.rows2 * self.columns2 + 2))
self.matrix2.setItem(i, j, QtGui.QTableWidgetItem(str(f[i*self.columns2 + j + 2])))
'''Problem 3 - Create your own GUI. You may make the GUI to display an old lab
in an interesting way. Some suggestions are Numerical Derivatives, Image
Segmentation, SVD, or Convolution. Or you may make your own GUI. Include at
least 5 widgets.'''
```
#### File: Orphans/SparseGrid/SparseGridIntro.py
```python
import pysg
import numpy as np
from matplotlib import pyplot as plt
def plotGrid(dim,level):
sg = pysg.sparseGrid(dim=dim,level=level)
sg.generatePoints()
points = sg.indices
pts = len(points)
xpts = np.empty(pts)
if dim==1:
for i in xrange(pts):
pt = tuple(points[i])
xpts[i] = sg.gP[pt].pointPosition(pt)[0]
plt.plot(xpts,np.ones_like(xpts),'*')
elif dim==2:
ypts = np.empty(pts)
for i in xrange(pts):
pt = tuple(points[i])
xpts[i], ypts[i] = sg.gP[pt].pointPosition(pt)
plt.plot(xpts,ypts,'*')
else:
if dim > 3:
print "Showing first three dimensions only"
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ypts = np.empty(pts)
zpts = np.empty(pts)
for i in xrange(pts):
pt = tuple(points[i])
xpts[i], ypts[i], zpts[i] = sg.gP[pt].pointPosition(pt)[:3]
ax.scatter(xpts, ypts, zpts)
#plt.show()
def n_vol(length,dim,level):
return length**dim/(2**(dim*(level-1))*(dim+1))
def example():
# There is an error here. the coefficient for loc [0.25,0.5] should be 0.
def f(x):
return x[0]
dim = 2
level = 3
sg = pysg.sparseGrid(dim=dim,level=level)
sg.setFunctionValues(f)
sg.nodal2Hier()
sg.setCoefficients()
total = 0
for i in xrange(len(sg.indices)):
total += sg.gP[tuple(sg.indices[i])].coeff
total /= 2**((level-1)*dim)*(dim+1)
print total
```
#### File: joshualy/numerical_computing/travis_post.py
```python
from os.path import isfile
from travis_common import raise_msg
def all_present(fatal=True):
try:
assert isfile("Vol1.pdf"), "Vol1.pdf is missing"
assert isfile("Vol2.pdf"), "Vol2.pdf is missing"
assert isfile("Vol3.pdf"), "Vol3.pdf is missing"
assert isfile("Vol4.pdf"), "Vol4.pdf is missing"
assert isfile("ExtraLabs.pdf"), "ExtraLabs.pdf is missing"
except AssertionError as e:
raise_msg(e, fatal)
if __name__ == "__main__":
all_present(True)
```
#### File: joshualy/numerical_computing/travis_pre.py
```python
from os import popen
from travis_common import raise_msg
# 200KB in bytes
MAX_FILESIZE = 204800
def getOutput(cmd):
return popen(cmd).read()
def find_big_files(fatal=True):
# Load the names of the files listed in the exceptions file.
with open('travis_file_exceptions', 'rU') as ex:
approved_files = {name for name in ex.read().split('\n') if name != ""}
# Get the objects in the tree at the most recent commit.
this_commit = getOutput("git rev-list HEAD").split()[0]
tree = getOutput("git ls-tree -rlz {}".format(this_commit)).split("\0")
# Check that the objects in the tree are not too big.
violations = set()
for obj in tree:
try:
data = obj.split()
size, name = int(data[3]), data[4]
if name not in approved_files and size > MAX_FILESIZE:
violations.add((name, size))
except (IndexError, ValueError):
continue
if violations:
files = "\n".join(sorted(["\t{:.<50}{:.>20} bytes".format(*v)
for v in violations]))
raise_msg("Large files present:\n{}\n".format(files), fatal=fatal)
if __name__ == "__main__":
find_big_files(True)
```
#### File: Vol1A/DataVisualization/plots.py
```python
from __future__ import print_function
import matplotlib
matplotlib.rcParams = matplotlib.rc_params_from_file('../../matplotlibrc')
# Decorator ===================================================================
from matplotlib import colors, pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from functools import wraps
from sys import stdout
import os
def _save(filename):
"""Decorator for saving, clearing, and closing figures automatically."""
try:
name, extension = filename.split(".")
except (ValueError, TypeError) as e:
raise ValueError("Invalid file name '{}'".format(filename))
if extension not in {"pdf", "png"}:
raise ValueError("Invalid file extension '{}'".format(extension))
if not os.path.isdir("figures"):
os.mkdir("figures")
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
try:
print("{:.<40}".format(filename), end='')
stdout.flush()
plt.clf()
out = func(*args, **kwargs)
plt.savefig("figures/"+filename, format=extension)
print("done.")
return out
except Exception as e:
print("\n\t", e, sep='')
finally:
plt.clf()
plt.close('all')
return wrapper
return decorator
# Plots =======================================================================
import numpy as np
# Problem 1 (Anscombe's Quartet) ----------------------------------------------
def anscombe_data(save=False):
data = np.array([[10.0, 8.04, 10.0, 9.14, 10.0, 7.46, 8.0, 6.58],
[ 8.0, 6.95, 8.0, 8.14, 8.0, 6.77, 8.0, 5.76],
[13.0, 7.58, 13.0, 8.74, 13.0, 12.74, 8.0, 7.71],
[ 9.0, 8.81, 9.0, 8.77, 9.0, 7.11, 8.0, 8.84],
[11.0, 8.33, 11.0, 9.26, 11.0, 7.81, 8.0, 8.47],
[14.0, 9.96, 14.0, 8.10, 14.0, 8.84, 8.0, 7.04],
[ 6.0, 7.24, 6.0, 6.13, 6.0, 6.08, 8.0, 5.25],
[ 4.0, 4.26, 4.0, 3.10, 4.0, 5.39, 19.0, 12.50],
[12.0, 10.84, 12.0, 9.13, 12.0, 8.15, 8.0, 5.56],
[ 7.0, 4.82, 7.0, 7.26, 7.0, 6.42, 8.0, 7.91],
[ 5.0, 5.68, 5.0, 4.74, 5.0, 5.73, 8.0, 6.89]])
if save:
np.save("anscombe.npy", data)
return data
# Problem 2 (Line Plots / Small Multiples) ------------------------------------
@_save("chebyshev_bad.pdf")
def line_bad():
x = np.linspace(-1, 1, 200)
for n in range(9):
plt.plot(x, np.polynomial.Chebyshev.basis(n)(x), lw=1,
label= "n = {}".format(n))
plt.axis([-1.1, 1.1, -1.1, 1.1])
plt.legend()
@_save("chebyshev_good.pdf")
def line_good():
x = np.linspace(-1, 1, 200)
for n in range(9):
plt.subplot(3,3,n+1)
plt.plot(x, np.polynomial.Chebyshev.basis(n)(x))
plt.axis([-1.1, 1.1, -1.1, 1.1])
# Turn off extra tick marks and axis labels.
plt.tick_params(which="both", top="off", right="off")
if n < 6:
plt.tick_params(labelbottom="off")
if n % 3:
plt.tick_params(labelleft="off")
plt.title("n = {}".format(n))
def prob2():
line_bad()
line_good()
# Problem 3 (Scatter Plots) ---------------------------------------------------
@_save("scatter_1.pdf")
def scatter_1():
length, width, height = np.random.randint(1, 20, (3,50))
plt.scatter(length, width, s=100)
plt.grid()
plt.xlabel("Length (inches)", fontsize=18, color="white")
plt.ylabel("Width (inches)", fontsize=18)
plt.tick_params(labelbottom="off")
return length, width, height
@_save("scatter_2.pdf")
def scatter_2(length, width, height):
plt.scatter(length, width, c=height, s=100)
plt.grid()
plt.xlabel("Length (inches)", fontsize=18, color="white")
plt.ylabel("Width (inches)", fontsize=18, color="white")
plt.tick_params(labelbottom="off", labelleft="off")
cbar = plt.colorbar()
cbar.set_label("Height (inches)", fontsize=18)
@_save("scatter_3.pdf")
def scatter_3(length, width, height):
plt.scatter(length, width, s=length*width*height/2., alpha=.7)
plt.grid()
plt.xlabel("Length (inches)", fontsize=18)
plt.ylabel("Width (inches)", fontsize=18)
@_save("scatter_4.pdf")
def scatter_4(length, width, height):
plt.scatter(length, width, c=height, s=length*width*height/2., alpha=.7)
plt.grid()
plt.xlabel("Length (inches)", fontsize=18)
plt.ylabel("Width (inches)", fontsize=18, color="white")
plt.tick_params(labelleft="off")
cbar = plt.colorbar()
cbar.set_label("Height (inches)", fontsize=18)
def prob3():
l,w,h = scatter_1()
scatter_2(l,w,h)
scatter_3(l,w,h)
scatter_4(l,w,h)
# Problem 4 (Histograms) ------------------------------------------------------
@_save("histogram_1_bad.pdf")
def histogram_1_bad(N):
data = np.random.normal(size=N)
plt.plot(data)
return data
@_save("histogram_1_good.pdf")
def histogram_1_good(data):
plt.hist(data, bins=30)
@_save("histogram_2.pdf")
def histogram_2(N):
data = np.random.beta(a=5, b=2, size=N)
plt.hist(data, bins=30)
return data
@_save("histogram_3.pdf")
def histogram_3(data):
plt.hist(data, bins=30, lw=0, histtype="stepfilled")
plt.tick_params(axis="y", labelcolor='white')
plt.tick_params(left="off", top="off", right="off")
@_save("histogram_4.pdf")
def histogram_4(data):
freq, bin_edges = np.histogram(data, bins=30)
bin_centers = (bin_edges[:-1] + bin_edges[1:])/2.
plt.plot(bin_centers, freq, 'k-', lw=4)
plt.tick_params(axis="y", labelcolor="white")
plt.tick_params(left="off", top="off", right="off")
# plt.tick_params(left="off", top="off", right="off", labelleft="off")
@_save("earthquake.pdf")
def earthquake():
years, magnitudes, longitude, latitude = np.load("earthquakes.npy").T
plt.plot(years, magnitudes, '.')
plt.xlabel("Year")
plt.ylabel("Magnitude")
def prob4():
histogram_1_good(histogram_1_bad(1000))
data = histogram_2(10000)
histogram_3(data)
histogram_4(data)
earthquake()
# Problem 5 -------------------------------------------------------------------
@_save("heatmap_1.png")
def heatmap_1(N):
x = np.linspace(-1.5, 1.5, N)
X, Y = np.meshgrid(x, x.copy())
Z = Y**2 - X**3 + X**2
plt.pcolormesh(X, Y, Z, cmap="viridis")
plt.colorbar()
return X, Y, Z
@_save("heatmap_2.png")
def heatmap_2(X, Y, Z):
plt.contour(X, Y, Z, [-1, -.25, 0, .25, 1, 4], colors="white")
plt.pcolormesh(X, Y, Z, cmap="viridis")
plt.colorbar()
@_save("contour_1.pdf")
def contour_1(X, Y, Z):
plt.contour(X, Y, Z, 6, cmap="viridis")
plt.colorbar()
@_save("contour_2.pdf")
def contour_2(X, Y, Z):
plt.contourf(X, Y, Z, 12, cmap="viridis")
plt.colorbar()
@_save("heatmap_3.png")
def heatmap_3(N):
x = np.linspace(-6, 6, N)
X, Y = np.meshgrid(x, x.copy())
Z = np.abs(Y**2 - X**3 + X**2)
plt.pcolormesh(X, Y, Z, cmap="plasma")
plt.colorbar()
return X, Y, Z
@_save("contour_3.pdf")
def contour_3(X, Y, Z):
plt.contourf(X, Y, Z, 6, cmap="plasma", norm=colors.LogNorm())
plt.colorbar()
def prob5():
x,y,z = heatmap_1(200)
heatmap_2(x,y,z)
contour_1(x,y,z)
contour_2(x,y,z)
x,y,z = heatmap_3(200)
contour_3(x,y,z)
# Problem 6 -------------------------------------------------------------------
@_save("bar_1.pdf")
def bar_1():
labels = ["Lobster Thermador", "Baked Beans", "Crispy Bacon",
"Smoked Sausage", "Hannibal Ham", "Eggs", "Spam"]
values = [10, 11, 18, 19, 20, 21, 22]
positions = np.arange(len(labels))
plt.bar(positions, values, align="center")
plt.xticks(positions, labels)
return labels, values, positions
@_save("bar_2.pdf")
def bar_2(labels, values, positions):
plt.barh(positions, values, align="center")
plt.yticks(positions, labels)
plt.gcf().subplots_adjust(left=0.2)
@_save("pie.pdf")
def pie(labels, values, positions):
explode = np.zeros(len(values))
explode[np.random.randint(0,explode.size)] = .2
plt.pie(values, explode, labels, shadow=True,
startangle=np.random.randint(0,360,1))
plt.gca().set_aspect("equal")
@_save("dishonest_1.pdf")
def dishonest_1(N):
x = np.linspace(5, 10, N) + np.random.normal(size=N)/3.
y = .5*x + 4 + np.random.normal(size=N)/2.
plt.plot(x, y, 'o', ms=10)
return x, y
@_save("dishonest_2.pdf")
def dishonest_2(x, y):
plt.plot(x, y, 'o', ms=10)
plt.xlim(-5,20)
@_save("dishonest_3.pdf")
def dishonest_3(x, y):
plt.semilogy(x, y, 'o', ms=10)
@_save("honest.pdf")
def honest(x, y):
plt.plot(x, y, 'o', ms=10)
plt.xlim([0, x.max()+.2])
plt.ylim([0, x.max()+.2])
def country_data(save=True):
data = np.array([
[ 8.742, 374.056, 179.2, 167.6],
[ 10.985, 33.197, 160.0, 142.2],
[ 206.553, 1774.725, 173.1, 158.8],
[1378.36, 10866.444, 167 , 158.6],
[ 5.495, 229.810, 178.9, 165.3],
[ 81.771, 3355.772, 178 , 165 ],
[ 9.823, 120.687, 176 , 164 ],
[1330.09, 2073.543, 164.7, 161.2],
[ 127.00, 4123.258, 172.5, 158 ],
[ 24.214, 17.396, 165.6, 154.9],
[ 0.622, 4.588, 183.2, 168.4],
[ 5.237, 388.315, 182.4, 168 ],
[ 31.489, 192.084, 164 , 151 ],
[ 50.617, 1377.873, 170.8, 157.4],
[ 20.966, 82.316, 163.6, 151.4],
[ 8.342, 664.738, 175.4, 164 ],
[ 78.742, 718.221, 174 , 158.9],
[ 65.110, 2848.755, 177.8, 164.5],
[324.311, 17946.996, 176.1, 162.1],
[ 92.700, 193.599, 165.7, 155.2]
])
if save:
np.save("countries.npy", data)
return data
def prob6():
l,v,p = bar_1()
bar_2(l,v,p)
pie(l,v,p)
x,y = dishonest_1(20)
dishonest_2(x,y)
dishonest_3(x,y)
honest(x,y)
# =============================================================================
def save_all():
prob2()
prob3()
prob4()
prob5()
prob6()
if __name__ == '__main__':
save_all()
```
#### File: Vol1A/LinearSystems/testDriver.py
```python
import signal
from functools import wraps
from matplotlib import pyplot as plt
def _autoclose(func):
"""Decorator for closing figures automatically."""
@wraps(func)
def wrapper(*args, **kwargs):
try:
plt.ion()
return func(*args, **kwargs)
finally:
plt.close('all')
plt.ioff()
return wrapper
def _timeout(seconds):
"""Decorator for preventing a function from running for too long.
Inputs:
seconds (int): The number of seconds allowed.
Notes:
This decorator uses signal.SIGALRM, which is only available on Unix.
"""
assert isinstance(seconds, int), "@timeout(sec) requires an int"
class TimeoutError(Exception):
pass
def _handler(signum, frame):
"""Handle the alarm by raising a custom exception."""
message = "Timeout after {} seconds".format(seconds)
print(message)
raise TimeoutError(message)
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
signal.signal(signal.SIGALRM, _handler)
signal.alarm(seconds) # Set the alarm.
try:
return func(*args, **kwargs)
finally:
signal.alarm(0) # Turn the alarm off.
return wrapper
return decorator
# Test Driver =================================================================
import numpy as np
from scipy import sparse
from scipy.linalg import lu_factor, solve
from inspect import getsourcelines
from solutions import ref, lu, prob5
np.set_printoptions(precision=3, suppress=True)
def test(student_module):
"""Grade a student's entire solutions file.
5 points for problem 1
10 points for problem 2
10 points for problem 3
5 points for problem 4
5 points for problem 5
5 points for problem 6
Inputs:
student_module: the imported module for the student's file.
Returns:
score (int): the student's score, out of 40.
feedback (str): a printout of test results for the student.
"""
tester = _testDriver()
tester.test_all(student_module)
return tester.score, tester.feedback
class _testDriver(object):
"""Class for testing a student's work.
Attributes:
Score (int)
Feedback (str)
"""
# Constructor -------------------------------------------------------------
def __init__(self):
"""Initialize the feedback attribute."""
self.feedback = ""
# Main routine ------------------------------------------------------------
def test_all(self, student_module, total=40):
"""Grade the provided module on each problem and compile feedback."""
# Reset feedback and score.
self.feedback = ""
self.score = 0
def test_one(problem, label, value):
"""Test a single problem, checking for errors."""
try:
self.feedback += "\n\n{} ({} points):".format(label, value)
points = problem(student_module)
self.score += points
self.feedback += "\nScore += {}".format(points)
except BaseException as e:
self.feedback += "\n{}: {}".format(self._errType(e), e)
# Grade each problem.
test_one(self.problem1, "Problem 1", 5) # Problem 1: 5 points.
test_one(self.problem2, "Problem 2", 10) # Problem 2: 10 points.
test_one(self.problem3, "Problem 3", 10) # Problem 3: 10 points.
test_one(self.problem4, "Problem 4", 5) # Problem 4: 5 points.
test_one(self.problem5, "Problem 5", 5) # Problem 5: 5 points.
test_one(self.problem6, "Problem 6", 5) # Problem 6: 5 points.
# Report final score.
percentage = (100. * self.score) / total
self.feedback += "\n\nTotal score: {}/{} = {}%".format(
self.score, total, round(percentage, 2))
if percentage >= 98: self.feedback += "\n\nExcellent!"
elif percentage >= 90: self.feedback += "\n\nGreat job!"
# Add comments (optionally).
print(self.feedback)
comments = str(raw_input("Comments: "))
if len(comments) > 0:
self.feedback += '\n\n\nComments:\n\t{}'.format(comments)
# Helper Functions --------------------------------------------------------
@staticmethod
def _errType(error):
"""Get just the name of the exception 'error' in string format."""
return str(type(error).__name__)
def _checkCode(self, func, keyword):
"""Check a function's source code for a key word. If the word is found,
print the code to the screen and prompt the grader to check the code.
Use this function to detect cheating. Returns a score out of 10.
"""
code = getsourcelines(func)[0][len(func.__doc__.splitlines())+1 :]
if any([keyword in line for line in code]):
print("\nStudent {}() code:\n{}\nCheating? [OK=10, Bad=0]".format(
func.__name__, "".join(code)))
return self._grade(10)
return 10
def _eqTest(self, correct, student, message):
"""Test to see if 'correct' and 'student' are equal.
Report the given 'message' if they are not.
"""
if np.allclose(correct, student):
return 1
else:
self.feedback += "\n{}".format(message)
self.feedback += "\n\tCorrect response:\n{}".format(correct)
self.feedback += "\n\tStudent response:\n{}".format(student)
return 0
def _grade(self, points, message=None):
"""Manually grade a problem worth 'points'. Return the score.
If full points are not earned, get feedback on the problem.
"""
credit = -1
while credit > points or credit < 0:
try:
credit = int(input("\nScore out of {}: ".format(points)))
except:
credit = -1
if credit != points:
# Add comments (optionally),
comments = raw_input("Comments: ")
if len(comments) > 0:
self.feedback += "\n{}".format(comments)
# Or add a predetermined error message.
elif message is not None:
self.feedback += "\n{}".format(message)
return credit
@staticmethod
def _luTestCase(n):
"""Generate an nxn matrix that does not require pivoting with the
Gaussian elimination or the LU Decomposition.
"""
A = np.random.randint(0,10,(n,n))
_, piv = lu_factor(A)
P = np.eye(n)
for i,j in enumerate(piv):
P[i], P[j] = P[j].copy(), P[i].copy()
return P.dot(A).astype(np.float)
# Problems ----------------------------------------------------------------
def problem1(self, s):
"""Test ref(). 5 points."""
@_timeout(2)
def _test(n, p):
"""Do an nxn test case worth p points."""
A = self._luTestCase(n)
return p * self._eqTest(ref(A), s.ref(A), "ref() failed.")
points = _test(3, 1) + _test(4, 2) + _test(5, 2)
return int(points * self._checkCode(s.ref, "lu_factor") / 10.)
def problem2(self, s):
"""Test lu(). 10 points."""
@_timeout(2)
def _test(n):
"""Do an nxn test case worth 5 points."""
A = self._luTestCase(n)
L1, U1 = lu(A)
if not np.allclose(L1.dot(U1), A):
return _test(n)
stu = s.lu(A.copy())
try:
L2, U2 = stu
except(TypeError, ValueError):
raise ValueError("lu() failed to return two arrays")
pts = 5
if not all(np.allclose(*x) for x in [(np.tril(L2), L2),
(np.triu(U2), U2), (L1, L2), (U1, U2), (A, L2.dot(U2))]):
pts = 2
self.feedback += "\n\n{}\nA =\n{}".format('- '*20, A)
if not np.allclose(np.tril(L2), L2):
self.feedback += "\nL not lower triangular:\n{}".format(L2)
pts -= 1
if not np.allclose(np.triu(U2), U2):
self.feedback += "\nU not upper triangular:\n{}".format(U2)
pts -= 1
pts += self._eqTest(L1, L2, "lu() failed (L incorrect)")
pts += self._eqTest(U1, U2, "lu() failed (U incorrect)")
pts += self._eqTest(A, L2.dot(U2), "lu() failed (A != LU)")
return pts
points = _test(4) + _test(6)
return int(points * self._checkCode(s.lu, "lu_factor") / 10.)
def problem3(self, s):
"""Test solve(). 10 points."""
@_timeout(2)
def _test(n, p):
"""Do an nxn test case worth p points."""
A = self._luTestCase(n)
b = np.random.randint(0, 10, n).astype(np.float)
stu = s.solve(A, b)
if not np.allclose(b, A.dot(stu)):
self.feedback += "\n\n{}\nA =\n{}\nb =\n{}".format('- '*20,A,b)
return p * self._eqTest(solve(A,b), stu, "solve() failed")
else:
return p
points = _test(3, 3) + _test(4, 3) + _test(5, 4)
return int(points * self._checkCode(s.solve, "solve(") / 10.)
@_autoclose
def problem4(self, s):
"""Test prob4(). 5 points."""
print("Running prob4()...(60 second time limit)")
_timeout(60)(s.prob4)()
print("""\nSpecifications:
1. Plots system size n versus execution times. (1 point)
2. Four lines, clearly marked: (2 points)
- la.inv() (slowest)
- la.solve() (in the middle)
- la.lu_factor() and la.lu_solve() (in the middle)
- la.lu_solve() alone (fastest)
3. The difference between each line is apparent. (2 points)
(Plot could be loglog or linlin as long as it is clear)
(Title and axis labels unnecessary)""")
return self._grade(5, "prob4() does not match specifications")
@_timeout(3)
def problem5(self, s):
"""Test prob5(). 5 points."""
def _test(n, p):
"""Do an nxn test case with p points."""
stu = prob5(n)
if not sparse.issparse(stu):
self.feedback += "\n\tFailed to return a scipy.sparse matrix"
return 0
else:
if type(stu) is not sparse.dia.dia_matrix:
self.feedback += "\n\tReturn type should be sparse "
self.feedback += "diagonal matrix"
stu = sparse.dia_matrix(stu)
sol = prob5(n)
if not self._eqTest(stu.offsets,sol.offsets,"prob5({}) failed "
"(comparing indices of nonzero diagonals".format(n)):
p -= 1
return p*self._eqTest(stu.data, sol.data, "prob5({}) failed "
"(comparing nonzero diagonals)".format(n))
return _test(100, 2) + _test(100000, 3)
@_autoclose
def problem6(self, s):
"""Test prob6(). 5 points."""
print("\nRunning prob5()...(60 second time limit)")
_timeout(60)(s.prob6)()
print("""\nSpecifications:
1. Plots system size n versus execution times. (1 point)
2. Two lines, clearly marked: (2 points)
- scipy.sparse.linalg.spsolve() (faster)
- scipy.linalg.solve() (slower)
3. The difference between each line is apparent. (2 points)
(Plot could be loglog or linlin as long as it is clear)
(Title and axis labels unnecessary)""")
return self._grade(5, "prob6() does not match specifications")
# Validation ==================================================================
if __name__ == '__main__':
"""Validate the test driver by testing the solutions file."""
import solutions
test(solutions)
```
#### File: Vol1A/QR1-Decomposition/solutions.py
```python
import numpy as np
from scipy import linalg as la
# Problem 1
def qr_gram_schmidt(A):
"""Compute the reduced QR decomposition of A via Modified Gram-Schmidt.
Inputs:
A ((m,n) ndarray): A matrix of rank n.
Returns:
Q ((m,n) ndarray): An orthonormal matrix.
R ((n,n) ndarray): An upper triangular matrix.
"""
m,n = A.shape
Q = np.copy(A).astype(np.float)
R = np.zeros((n,n))
for i in range(n):
R[i,i] = la.norm(Q[:,i])
Q[:,i] /= R[i,i]
for j in range(i+1,n):
R[i,j] = np.dot(Q[:,j].T, Q[:,i])
Q[:,j] -= R[i,j]*Q[:,i]
return Q, R
# Problem 2
def abs_det(A):
"""Use the QR decomposition to efficiently compute the absolute value of
the determinant of A.
Inputs:
A ((n,n) ndarray): A square matrix.
Returns:
(float) the absolute value of the detetminant of A.
"""
Q,R = la.qr(A) # or Q, R = qr_gram_schmidt(A)
return abs(R.diagonal().prod())
# Problem 3
def solve(A, b):
"""Use the QR decomposition to efficiently solve the system Ax = b.
Inputs:
A ((n,n) ndarray): An invertible matrix.
b ((n, ) ndarray): A vector of length n.
Returns:
x ((n, ) ndarray): The solution to the system Ax = b.
"""
m,n = A.shape
Q,R = la.qr(A)
# QRx = b -> Rx = (Q^T)b.
y = np.dot(Q.T, b)
# Use back substitution to solve Rx = y.
x = np.zeros(n)
for k in reversed(range(n)):
x[k] = (y[k] - np.dot(R[k,k:], x[k:])) / R[k,k]
return x
# Use this since np.sign(0) -> 0.
sign = lambda x: 1 if x >= 0 else -1
# Problem 4
def qr_householder(A):
"""Compute the full QR decomposition of A via Householder reflections.
Inputs:
A ((m,n) ndarray): A matrix of rank n.
Returns:
Q ((m,m) ndarray): An orthonormal matrix.
R ((m,n) ndarray): An upper triangular matrix.
"""
m,n = A.shape
R = np.copy(A).astype(np.float)
Q = np.identity(m)
for k in range(n):
u = np.copy(R[k:,k])
u[0] += sign(u[0])*la.norm(u)
u /= la.norm(u)
R[k:,k:] -= 2*np.outer(u, np.dot(u, R[k:,k:]))
Q[k:] -= 2*np.outer(u, np.dot(u, Q[k:]))
return Q.T, R
# Problem 5
def hessenberg(A):
"""Compute the Hessenberg form H of A, along with the orthonormal matrix Q
such that A = QHQ^T.
Inputs:
A ((n,n) ndarray): An invertible matrix.
Returns:
H ((n,n) ndarray): The upper Hessenberg form of A.
Q ((n,n) ndarray): An orthonormal matrix.
"""
m,n = A.shape
H = np.copy(A).astype(np.float)
Q = np.identity(m)
for k in range(n-2):
u = np.copy(H[k+1:,k])
u[0] += sign(u[0])*la.norm(u)
u /= la.norm(u)
H[k+1:,k:] -= 2*np.outer(u, np.dot(u, H[k+1:,k:]))
H[:,k+1:] -= 2*np.outer(np.dot(H[:,k+1:], u), u)
Q[k+1:] -= 2*np.outer(u, np.dot(u, Q[k+1:]))
return H, Q.T
# Additional Material
def qr_givens(A):
"""Compute the QR decomposition of A via Givens triangularization,
assuming that at the ijth stage of the algorithm, a_ij will be nonzero.
Inputs:
A ((m,n) ndarray): A matrix of rank n.
Returns:
Q ((m,n) ndarray): An orthonormal matrix.
R ((n,n) ndarray): An upper triangular matrix.
"""
m,n = A.shape
R = np.copy(A).astype(np.float)
Q = np.identity(m)
for j in range(n):
for i in reversed(range(j+1,m)):
a,b = R[i-1,j], R[i,j]
G = np.array([[a,b],[-b,a]]) / np.sqrt(a**2+b**2)
R[i-1:i+1,j:] = np.dot(G, R[i-1:i+1,j:])
Q[i-1:i+1] = np.dot(G, Q[i-1:i+1])
return Q.T, R
def qr_givens_hessenberg(H):
"""Compute the QR decomposition of the upper Hessenberg matrix H via
Givens triangularization.
Inputs:
H ((m,n) ndarray): A matrix of rank n in upper Hessenberg form.
Returns:
Q ((m,n) ndarray): An orthonormal matrix.
R ((n,n) ndarray): An upper triangular matrix.
"""
m,n = H.shape
R = np.copy(H).astype(np.float)
Q = np.identity(m)
for j in xrange(min(n, m-1)):
i = j+1
a,b = R[i-1,j],R[i,j]
G = np.array([[a,b],[-b,a]]) / np.sqrt(a**2+b**2)
R[i-1:i+1,j:] = np.dot(G, R[i-1:i+1,j:])
Q[i-1:i+1,:i+1] = np.dot(G, Q[i-1:i+1,:i+1])
return Q.T, R
```
#### File: Vol1A/SVD/svd_image_compression.py
```python
from scipy import linalg as la
import numpy as np
from matplotlib import pyplot as plt
# Problem 1
def truncated_svd(A,k=None):
"""Computes the truncated SVD of A. If r is None or equals the number
of nonzero singular values, it is the compact SVD.
Parameters:
A: the matrix
k: the number of singular values to use
Returns:
U - the matrix U in the SVD
s - the diagonals of Sigma in the SVD
Vh - the matrix V^H in the SVD
"""
raise NotImplementedError("truncated_svd incomplete")
# Problem 2
def visualize_svd():
"""Plot each transformation associated with the SVD of A."""
raise NotImplementedError("visualize_svd incomplete")
# Problem 3
def svd_approx(A, k):
"""Returns best rank k approximation to A with respect to the induced 2-norm.
Inputs:
A - np.ndarray of size mxn
k - rank
Return:
Ahat - the best rank k approximation
"""
raise NotImplementedError("svd_approx incomplete")
# Problem 4
def lowest_rank_approx(A,e):
"""Returns the lowest rank approximation of A with error less than e
with respect to the induced 2-norm.
Inputs:
A - np.ndarray of size mxn
e - error
Return:
Ahat - the lowest rank approximation of A with error less than e.
"""
raise NotImplementedError("lowest_rank_approx incomplete")
# Problem 5
def compress_image(filename,k):
"""Plot the original image found at 'filename' and the rank k approximation
of the image found at 'filename.'
filename - jpg image file path
k - rank
"""
raise NotImplementedError("compress_image incomplete")
```
#### File: Vol1B/ConditioningStability/conditioning.py
```python
import numpy as np
from numpy.random import normal
from matplotlib import pyplot as plt
# Problem 1
def prob1():
"""Randomly perturb w_coeff by replacing each coefficient a_i with
a_i*r_i, where r_i is drawn from a normal distribution centered at 1 with
varience 1e-10.
Plot the roots of 100 such experiments in a single graphic, along with the
roots of the unperturbed polynomial w(x).
Using the final experiment only, estimate the relative and absolute
condition number (in any norm you prefer).
Returns:
Display a graph of all 100 perturbations.
Print the values of relative and absolute condition numbers.
"""
w_roots = np.arange(1, 21)
w_coeffs = np.array([1, -210, 20615, -1256850, 53327946, -1672280820,
40171771630, -756111184500, 11310276995381,
-135585182899530, 1307535010540395,
-10142299865511450, 63030812099294896,
-311333643161390640, 1206647803780373360,
-3599979517947607200, 8037811822645051776,
-12870931245150988800, 13803759753640704000,
-8752948036761600000, 2432902008176640000])
raise NotImplementedError("Problem 1 Incomplete")
# Problem 2
def eig_condit(M):
"""Approximate the condition number of the eigenvalue problem at M.
Inputs:
M - A 2-D square NumPy array, representing a square matrix.
Returns:
A tuple containing approximations to the absolute and
relative condition numbers of the eigenvalue problem at M.
"""
raise NotImplementedError("Problem 2 Incomplete")
# 1 pt extra credit
def plot_eig_condit(x0=-100, x1=100, y0=-100, y1=100, res=10):
'''
Create a grid of points. For each pair (x,y) in the grid, find the
relative condition number of the eigenvalue problem, using the matrix
[[1 x]
[y 1]]
as your input. You can use plt.pcolormesh to plot the condition number
over the entire grid.
INPUT:
x0 - min x-value of the grid
x1 - max x-value
y0 - min y-value
y1 - max y-value
res - number of points along each edge of the grid
'''
raise NotImplementedError("plot_eig_condit() not implemented")
# Problem 3
def integral(n):
"""RETURN I(n)"""
raise NotImplementedError("Problem 3 Incomplete")
def prob3():
"""For the values of n in the problem, compute integral(n). Compare
the values to the actual values, and print your explanation of what
is happening.
"""
#actual values of the integral at specified n
actual_values = [0.367879441171, 0.145532940573, 0.0838770701034,
0.0590175408793, 0.0455448840758, 0.0370862144237,
0.0312796739322, 0.0270462894091, 0.023822728669,
0.0212860390856, 0.0192377544343]
raise NotImplementedError("Problem 3 Incomplete")
```
#### File: Vol1B/ConditioningStability/plots.py
```python
import matplotlib
matplotlib.rcParams = matplotlib.rc_params_from_file('../../../matplotlibrc')
import numpy as np
from scipy import linalg as la
from matplotlib import pyplot as plt
def wilkinson_poly():
""" Reproduce the Wilkinson Polynomial example shown in the lab. """
roots = np.arange(1,21)
w_coeffs = np.array([1, -210, 20615, -1256850, 53327946, -1672280820,
40171771630, -756111184500, 11310276995381,
-135585182899530, 1307535010540395,
-10142299865511450, 63030812099294896,
-311333643161390640, 1206647803780373360,
-3599979517947607200, 8037811822645051776,
-12870931245150988800, 13803759753640704000,
-8752948036761600000, 2432902008176640000])
perturb = np.zeros(21)
perturb[1]=1e-7
rootsp = np.roots(np.poly1d(w_coeffs+perturb))
# Plot original roots
plt.scatter(range(1,21), np.zeros(20), s=30)
# Plot roots of the perturbed polynomial
plt.scatter(np.real(rootsp), np.imag(rootsp), s=60, c='red', marker='x')
plt.savefig('figures/wilkinsonpolynomial.pdf', bbox_inches='tight')
plt.close()
def plot_eig_condit():
x0, x1 =-100, 100
y0, y1 =-100, 100
res=200
x = np.linspace(x0,x1,res)
y = np.linspace(y0,y1,res)
X,Y = np.meshgrid(x,y)
J = np.empty_like(X)
for i in xrange(res):
for j in xrange(res):
M = np.array([[1, X[i,j]],[Y[i,j],1]])
eigs = la.eig(M)[0]
perturb = np.random.normal(0, 1e-6, M.shape) + np.random.normal(0,1e-6, M.shape)*1j
eigsp = la.eig(M+perturb)[0]
k = la.norm(eigs-eigsp)/la.norm(perturb)
J[i,j] = k*la.norm(M)/la.norm(eigs)
plt.pcolormesh(X,Y,J, cmap='Greys')
plt.colorbar()
plt.savefig('figures/eigenvalue_conditioning.png', bbox_inches='tight')
plt.close()
def wilkinson_many():
roots = np.arange(1,21)
w_coeffs = np.array([1, -210, 20615, -1256850, 53327946, -1672280820,
40171771630, -756111184500, 11310276995381,
-135585182899530, 1307535010540395,
-10142299865511450, 63030812099294896,
-311333643161390640, 1206647803780373360,
-3599979517947607200, 8037811822645051776,
-12870931245150988800, 13803759753640704000,
-8752948036761600000, 2432902008176640000])
for trial in xrange(100):
perturb = np.random.normal(1, 1e-10, 21)
rootsp = np.roots(np.poly1d(w_coeffs*perturb))
# Plot roots of the perturbed polynomial
plt.scatter(np.real(rootsp), np.imag(rootsp), c='black', s=5, marker='.')
# Plot original roots
plt.scatter(range(1,21), np.zeros(20), s=30)
plt.xlim(0, 23)
plt.savefig('figures/wilkinsonpolynomial_many.pdf', bbox_inches='tight')
plt.close()
if __name__ == "__main__":
wilkinson_poly()
wilkinson_many()
plot_eig_condit()
```
#### File: Vol1B/PageRank/spec.py
```python
import numpy as np
import scipy.sparse as spar
import scipy.linalg as la
from scipy.sparse import linalg as sla
def to_matrix(filename,n):
'''
Return the nxn adjacency matrix described by datafile.
INPUTS:
datafile (.txt file): A .txt file describing a directed graph. Lines
describing edges should have the form '<from node>\t<to node>\n'.
The file may also include comments.
n (int): The number of nodes in the graph described by datafile
RETURN:
Return a SciPy sparse `dok_matrix'.
'''
pass
def calculateK(A,N):
'''
Compute the matrix K as described in the lab.
Input:
A (array): adjacency matrix of an array
N (int): the datasize of the array
Return:
K (array)
'''
pass
def iter_solve(adj, N=None, d=.85, tol=1E-5):
'''
Return the page ranks of the network described by `adj`.
Iterate through the PageRank algorithm until the error is less than `tol'.
Inputs:
adj - A NumPy array representing the adjacency matrix of a directed graph
N (int) - Restrict the computation to the first `N` nodes of the graph.
Defaults to N=None; in this case, the entire matrix is used.
d - The damping factor, a float between 0 and 1.
Defaults to .85.
tol - Stop iterating when the change in approximations to the solution is
less than `tol'. Defaults to 1E-5.
Returns:
The approximation to the steady state.
'''
pass
def eig_solve( adj, N=None, d=.85):
'''
Return the page ranks of the network described by `adj`. Use the
eigenvalue solver in \li{scipy.linalg} to calculate the steady state
of the PageRank algorithm
Inputs:
adj - A NumPy array representing the adjacency matrix of a directed graph
N - Restrict the computation to the first `N` nodes of the graph.
Defaults to N=None; in this case, the entire matrix is used.
d - The damping factor, a float between 0 and 1.
Defaults to .85.
Returns:
The approximation to the steady state.
'''
pass
def team_rank(filename='ncaa2013.csv'):
'''
Use your iterative PageRank solver to predict the rankings of the teams in
the given dataset of games.
The dataset should have two columns, representing winning and losing teams.
Each row represents a game, with the winner on the left, loser on the right.
Parse this data to create the adjacency matrix, and feed this into the
solver to predict the team ranks.
Inputs:
filename (optional) - The name of the dataset.
Returns:
ranks - A list of the ranks of the teams in order "best" to "worst"
teams - A list of the names of the teams, also in order "best" to "worst"
'''
pass
```
#### File: Vol1B/Testing/solutions.py
```python
def addition(a,b):
return a+b
def fibonacci(n):
"""generate the nth fibonacci number recursively.
Assume n is a positive integer.
"""
if n == 0:
return 1
elif n == 1:
return 1
else:
return fibonacci(n-1) + fibonacci(n-2)
#Problem 2 Write unit tests to test the following
def operator(a,b,oper):
if type(oper) != str:
raise ValueError("Oper should be a string")
if len(oper) != 1:
raise ValueError("Oper should be one character")
if oper == "+":
return a+b
if oper == "/":
if b == 0:
raise ValueError("You can't divide by zero!")
return a/float(b)
if oper == "-":
return a-b
if oper == "*":
return a*b
else:
raise ValueError("Oper can only be: '+', '/', '-', or '*'")
#Problem 3 Write unit test for this class
class ComplexNumber(object):
def __init__(self, real=0, imag=0):
self.real = real
self.imag = imag
def conjugate(self):
conjugate = ComplexNumber(real=self.real, imag=-self.imag)
return conjugate
def norm(self):
magnitude = math.sqrt(self.real**2 + self.imag**2)
return magnitude
def __add__(self, other):
real = self.real + other.real
imag = self.imag + other.imag
return ComplexNumber(real=real, imag=imag)
def __sub__(self, other):
real = self.real - other.real
imag = self.imag - other.imag
return ComplexNumber(real=real, imag=imag)
def __mul__(self, other):
real = self.real*other.real - self.imag*other.imag
imag = self.imag*other.real + other.imag*self.real
return ComplexNumber(real=real, imag=imag)
def __div__(self, other):
if other.real==0 and other.imag==0:
raise ValueError("Cannot divide by zero")
bottom = (other.conjugate()*other*1.).real
top = self*other.conjugate()
return ComplexNumber(real=(top.real/bottom), imag=(top.imag/bottom))
def __eq__(self, other):
return self.imag == other.imag and self.real == other.real
def __str__(self):
return str(self.real)+('+' if self.imag>=0 else '')+str(self.imag)+'i'
#Problem 4 Write tests for the Linked List Class (be sure to do sub classes seperatly, and for linkedlist node consider different data types)
class LinkedListNode(object):
def __init__(self, data):
self.data = data
self.next = None
def __str__(self):
return str(self.data)
def __lt__(self, other):
if type(self.data) != type(other.data):
raise ValueError("To compare nodes with __lt__ they must be of the same type")
if self.data < other.data:
return True
else:
return False
def __eq__(self, other):
if type(self.data) != type(other.data):
raise ValueError("To compare nodes with __eq__ they must be of the same type")
if self.data == other.data:
return True
else:
return False
def __gt__(self, other):
if type(self.data) != type(other.data):
raise ValueError("To compare nodes with __gt__ they must be of the same type")
if self.data > other.data:
return True
else:
return False
class LinkedList(object):
def __init__(self):
self.head = None
def add(self, data):
"""
Example:
>>> my_list = LinkedList()
>>> my_list.add(1)
>>> my_list.head.data
1
>>> my_list.add(2)
>>> my_list.head.next.data
2
"""
new_node = LinkedListNode(data)
if self.head is None:
self.head = new_node
else:
current_node = self.head
while current_node.next is not None:
current_node = current_node.next
current_node.next = new_node
def __str__(self):
"""
Example:
>>> my_list = LinkedList()
>>> my_list.add(1)
>>> my_list.add(2)
>>> my_list.add(3)
>>> print(my_list)
[1, 2, 3]
>>> str(my_list) == str([1,2,3])
True
"""
return_list = []
if self.head is None:
return str(return_list)
else:
current_node = self.head
while current_node.next is not None:
return_list.append(current_node.data)
current_node = current_node.next
return_list.append(current_node.data) # Catches the last one, since it's next certainly will be falsely.
return str(return_list)
def remove(self, data):
"""
Example:
>>> print(my_list)
[1, 2, 3]
>>> my_list.remove(2)
>>> print(my_list)
[1, 3]
>>> my_list.remove(2)
2 is not in the list.
>>> print(my_list)
[1, 3]
"""
if self.head is None:
raise ValueError(str(data) +" is not in the list.")
if self.head.data == data:
self.head = self.head.next
else:
current_node = self.head
try:
while current_node.next.data != data:
current_node = current_node.next
new_next_node = current_node.next.next
current_node.next = new_next_node
except:
raise ValueError(str(data) +" is not in the list.")
def insert(self, data, place):
"""
Example:
>>> print(my_list)
[1, 3]
>>> my_list.insert(2,3)
>>> print(my_list)
[1, 2, 3]
>>> my_list.insert(2,4)
4 is not in the list.
"""
try:
current = self.head
temp = LinkedListNode(data)
previous = None
if current.data == place:
temp.next = self.head
self.head = temp
else:
while current.data != place:
previous = current
current = current.next
if current.data == place:
break
previous.next = temp
temp.next = current
temp.back = previous
current.back = temp
current = self.head
except:
raise ValueError(str(place) + " is not in the list.")
```
#### File: Vol2A/BFS-KBacon/solutions.py
```python
from collections import deque
import networkx as nx
from matplotlib import pyplot as plt
# Problems 1-4: Implement the following class
class Graph(object):
"""A graph object, stored as an adjacency dictionary. Each node in the graph is a key in the dictionary.
The value of each key is a list of the corresponding node's neighbors.
"""
def __init__(self, adjacency):
"""Store the adjacency dictionary as a class attribute"""
self.dictionary = adjacency
# Problem 1
def __str__(self):
"""String representation: a sorted view of the adjacency dictionary.
Example:
>>> test = {'A':['D', 'C', 'B'], 'D':['A', 'C'],
... 'C':['B', 'A', 'D'], 'B':['A', 'C']}
>>> print(Graph(test))
A: B; C; D
B: A; C
C: A; B; D
D: A; C
"""
out = ""
keys = self.dictionary.keys()
keys.sort()
# join() approach
for key in keys:
out += str(key) + ": " # add each node
values = self.dictionary[key]
values.sort()
out += "; ".join(values) + "\n" # add the node's neighborhood
return out
# for loop approach
for key in keys:
out += str(key) + ": " # add each node
values = self.dictionary[key]
values.sort()
for value in values: # add each neighbor
out += str(value) + "; "
out = out.strip("; ") + "\n" # strip off the last "; "
return out
# Problem 2
def traverse(self, start):
"""Begin at 'start' and perform a breadth-first search until all
nodes in the graph have been visited. Return a list of values,
in the order that they were visited. If 'start' is not in the
adjacency dictionary, raise a ValueError.
Inputs:
start: the node to start the search at.
Returns:
the list of visited nodes (in order of visitation)
Example:
>>> test = {'A':['B'], 'B':['A', 'C',], 'C':['B']}
>>> Graph(test).traverse('B')
['B', 'A', 'C']
"""
# Validate input
if start not in self.dictionary:
raise ValueError(str(start) + " is not in the graph.")
# Set up the data structures
visited = list()
marked = set(start)
visit_queue = deque(start)
# Search the graph until done:
while len(visit_queue) > 0:
# get the next node from the queue and visit it
current = visit_queue.popleft()
visited.append(current)
# put any unvisited, unmarked neighbors of the
# current node on the visiting queue
for neighbor in self.dictionary[current]:
if neighbor not in marked:
visit_queue.append(neighbor)
marked.add(neighbor)
return visited
# Problem 3 (Optional)
def DFS(self, start):
"""Begin at 'start' and perform a depth-first search until all
nodes in the graph have been searched. Return a list of values, in
the order that they were visited. If 'start' is not in the
adjacency dictionary, raise a ValueError.
Inputs:
start: the node to start the search at.
Returns:
the list of visited nodes (in order of visitation)
Example:
>>> test = {'A':['B', 'D'], 'B':['A', 'C'],
... 'C':['B', 'D'], 'D':['A', 'C']}
>>> Graph(test).DFS('A')
['A', 'B', 'C', 'D']
"""
if start not in self.dictionary:
raise ValueError(str(start) + " is not in the graph.")
visited = list()
marked = set(start)
visit_queue = deque(start) # for DFS, use this as a stack
while len(visit_queue) > 0:
current = visit_queue.pop() # This line is different from BFS
visited.append(current)
for neighbor in self.dictionary[current]:
if neighbor not in marked:
visit_queue.append(neighbor)
marked.add(neighbor)
return visited
# Problem 4
def shortest_path(self, start, target):
"""Begin at the node containing 'start' and perform a breadth-first
search until the node containing 'target' is found. Return a list
containg the shortest path from 'start' to 'target'. If either of
the inputs are not in the adjacency graph, raise a ValueError.
Inputs:
start: the node to start the search at.
target: the node to search for.
Returns:
A list of nodes along the shortest path from start to target,
including the endpoints.
Example:
>>> test = {'A':['B', 'F'], 'B':['A', 'C'], 'C':['B', 'D'],
... 'D':['C', 'E'], 'E':['D', 'F'], 'F':['A', 'E', 'G'],
... 'G':['A', 'F']}
>>> Graph(test).shortest_path('A', 'G')
['A', 'F', 'G']
"""
if start not in self.dictionary:
raise ValueError("Starting point " + str(start)
+ " is not in the graph.")
# if target not in self.dictionary: raise ValueError
visited = list()
marked = set(start)
visit_queue = deque(start)
all_paths = {}
final_path = deque()
while len(visit_queue) > 0:
current = visit_queue.popleft()
visited.append(current)
# Check for the target
if current == target:
# Build the shortest path
final_path.append(current)
while current in all_paths:
final_path.appendleft(all_paths[current])
current = all_paths[current]
return list(final_path)
# Otherwise continue as before
else:
for neighbor in self.dictionary[current]:
if neighbor not in marked:
visit_queue.append(neighbor)
marked.add(neighbor)
# Track the path
all_paths[neighbor] = current
# If all neighbors have been checked, the target isn't in the graph.
raise ValueError("Target " + str(target) + " is not in the graph.")
def cheater(self, start, target):
return nx.shortest_path(
convert_to_networkx(self.dictionary), start, target)
# Problem 5: Write the following function
def convert_to_networkx(dictionary):
"""Convert 'dictionary' to a networkX object and return it."""
# Make the graph
output = nx.Graph()
for key in dictionary:
for value in dictionary[key]:
# Add each edge. Duplicates are automatically ignored.
output.add_edge(key, value)
return output
# Helper function for problem 6
def parse(filename="movieData.txt"):
"""Generate an adjacency dictionary where each key is
a movie and each value is a list of actors in the movie.
"""
# open the file, read it in, and split the text by '\n'
movieFile = open(filename, 'r')
movieFile = movieFile.read()
movieFile = movieFile.split('\n')
graph = dict()
# for each movie in the file,
for line in movieFile:
# get movie name and list of actors
names = line.split('/')
movie = names[0]
graph[movie] = []
# add the actors to the dictionary
for actor in names[1:]:
graph[movie].append(actor)
return graph
# Problems 6-8: Implement the following class
class BaconSolver(object):
"""Class for solving the Kevin Bacon problem."""
# Problem 6
def __init__(self, filename="movieData.txt"):
"""Initialize the networkX graph and with data from the specified
file. Store the graph as a class attribute. Also store the collection
of actors in the file as a class attribute.
"""
# Get the adjacency dictionary from the file
movie_to_actors = parse(filename)
# Extract the actors from the adjacency dictionary (values)
self.actors = {actor for movie in movie_to_actors
for actor in movie_to_actors[movie]}
# Convert the adjacency matrix to networkX
self.bacon_graph = convert_to_networkx(movie_to_actors)
# Problem 6
def path_to_bacon(self, start, target="Bacon, Kevin"):
"""Find the shortest path from 'start' to 'target'."""
if start not in self.actors:
raise ValueError(str(start) + " is not in the set of actors")
if target not in self.actors:
raise ValueError(str(target) + " is not in the set of actors")
return nx.shortest_path(self.bacon_graph, start, target)
# Problem 7
def bacon_number(self, start, target="<NAME>"):
"""Return the Bacon number of 'start'."""
# Integer divide by two to account for movies in the path.
return len(self.path_to_bacon(start, target)) // 2
# Problem 7
def average_bacon(self, target="<NAME>"):
"""Calculate the average Bacon number in the data set.
Note that actors are not guaranteed to be connected to the target.
"""
connected = 0
isolated = 0
total = 0
for actor in self.actors:
# if nx.has_path(self.bacon_graph, actor, target):
try:
total += self.bacon_number(actor, target)
connected += 1
except nx.NetworkXNoPath:
isolated += 1
return float(total)/connected, isolated
# Problem 8 (Optional)
def plot_bacon(self, target="<NAME>"):
"""Create and show a histogram displaying the frequency of the Bacon
numbers in the data set. Ignore entries with no path to 'target'.
"""
bacon = deque()
for actor in self.actors:
try:
bacon.append(self.bacon_number(actor, target))
except nx.NetworkXNoPath:
pass
name = target.partition(",")[0]
plt.hist(bacon, bins=[.5, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5], log=True)
plt.title(name + " Number Distribution")
plt.xlabel(name + " Number")
plt.ylabel("Actors")
plt.show()
if __name__ == '__main__':
print BaconSolver().average_bacon()
```
#### File: Vol2A/DataStructures2-Trees/plots.py
```python
import matplotlib
matplotlib.rcParams = matplotlib.rc_params_from_file('../../matplotlibrc')
import numpy as np
from time import time
from sys import stdout
from matplotlib import pyplot as plt
from solutions import iterative_search, SinglyLinkedList, BST, AVL
def prob4_plots(N=12, verbose=False):
"""At each iteration, take n random items from a pre-determined subset.
Time (separately) how long it takes to load a SinglyLinkedList, a BST, and
an AVL with the data set of n items.
Choose 5 random items from the data set. Time (separately) how long it
takes to find all 5 items in each object.
Create two log-log figures.
The first figure plots the number of items in each dataset against the
build time for each object.
The second figure, plots the number of items against the search time for
each object.
"""
# Initialize lists to hold results
lls_build, lls_search = [], []
bst_build, bst_search = [], []
avl_build, avl_search = [], []
data = np.random.random(2**(N+1))
domain = 2**np.arange(3,N+1)
# Get the values [start, start + step, ..., stop - step]
for n in domain:
if verbose:
print("\rn = {}".format(n)),
stdout.flush()
# Initialize wordlist and data structures
subset = data[:n]
bst = BST()
avl = AVL()
lls = SinglyLinkedList()
# Time the singly-linked list build
begin = time()
for item in subset:
lls.append(item)
lls_build.append(time() - begin)
# Time the binary search tree build
begin = time()
for item in subset:
bst.insert(item)
bst_build.append(time() - begin)
# Time the AVL tree build
begin = time()
for item in subset:
avl.insert(item)
avl_build.append(time() - begin)
random_subset = np.random.choice(subset, size=5, replace=False)
# Time the singly-linked list search
begin = time()
for target in random_subset:
iterative_search(lls, target)
lls_search.append(time() - begin)
# Time the binary search tree search
begin = time()
for target in random_subset:
bst.find(target)
bst_search.append(time() - begin)
# Time the AVL tree search
begin = time()
for target in random_subset:
avl.find(target)
avl_search.append(time() - begin)
# Plot the data
plt.clf()
plt.title("Build Times")
plt.loglog(domain,lls_build,'.-',lw=2,ms=10,basex=2,basey=2,label='Singly Linked List')
plt.loglog(domain,bst_build,'.-',lw=2,ms=10,basex=2,basey=2,label='Binary Search Tree')
plt.loglog(domain,avl_build,'.-',lw=2,ms=10,basex=2,basey=2,label='AVL Tree')
plt.xlabel("n"); plt.ylabel("Seconds")
plt.legend(loc='upper left')
plt.savefig("BuildTimes.pdf")
plt.clf()
plt.title("Search Times")
plt.loglog(domain,lls_search,'.-',lw=2,ms=10,basex=2,basey=2,label='Singly Linked List')
plt.loglog(domain,bst_search,'.-',lw=2,ms=10,basex=2,basey=2,label='Binary Search Tree')
plt.loglog(domain,avl_search,'.-',lw=2,ms=10,basex=2,basey=2,label='AVL Tree')
plt.xlabel("n")
plt.legend(loc='upper left')
plt.savefig("SearchTimes.pdf")
plt.clf()
if __name__ == '__main__':
prob4_plots(verbose=True)
```
#### File: Vol2A/Fourier2-Convolution/plots.py
```python
import matplotlib
matplotlib.rcParams = matplotlib.rc_params_from_file('../../matplotlibrc')
import matplotlib.pyplot as plt
# Switch backends to render PNG images.
plt.switch_backend("Agg")
import numpy as np
from scipy.io import wavfile
from scipy import fftpack as ft
#plots Noisysignal1.wav
def noise():
plt.close('all')
rate, sig = wavfile.read('Noisysignal1.wav')
plt.plot(sig[0:sig.shape[0]/2])
plt.savefig('figures/noisy.png')
plt.clf()
#plots lect half of spectrum of
#Noisysignal1.wav
def noise_spec():
rate, sig = wavfile.read('Noisysignal1.wav')
sig = sig.astype('float32')
fsig = ft.fft(sig.T).T
f = np.absolute(fsig)
plt.plot(f[0:f.shape[0]/2])
plt.savefig('figures/noisyspec.png')
plt.clf()
#plots cleaned noisy signal
def cleaned_signal():
rate,data = wavfile.read('Noisysignal1.wav')
fsig = ft.fft(data,axis = 0)
for j in xrange(10000,20000):
fsig[j]=0
fsig[-j]=0
newsig = ft.ifft(fsig)
newsig = newsig.real
newsig = (newsig/np.absolute(newsig).max()*32767).astype('int16')
plt.plot(newsig[0:newsig.shape[0]/2])
plt.savefig('figures/Cleanedsignal.png')
plt.clf()
if __name__ == '__main__':
noise()
noise_spec()
cleaned_signal()
```
#### File: Vol2A/Wavelets/plots.py
```python
import matplotlib
matplotlib.rcParams = matplotlib.rc_params_from_file('../../matplotlibrc')
import numpy as np
from matplotlib import pyplot as plt
from scipy import misc
def grey_image(filename, greyname):
"Create a grayscale version of a color image"
image = misc.imread(filename,True)
plt.imsave("figures/"+greyname, image, cmap='gray')
def noise_image(filename, noisename):
"Create a noised version of a grayscale image"
image = misc.imread(filename,True)
noiseSigma = 16.0
image += np.random.normal(0, noiseSigma, size=image.shape)
plt.imsave("figures/"+noisename, image, cmap='gray')
grey_image("swanlake.jpg", "swanlake_gray.jpg")
noise_image("swanlake_gray.jpg", "swanlake_polluted.jpg")
```
#### File: Vol2B/DynamicOpt1-Value/plots.py
```python
import matplotlib
matplotlib.rcParams = matplotlib.rc_params_from_file('../../matplotlibrc')
#================================================
#Plots for the Value Function Iteration Lab
#================================================
import numpy as np
import math
from scipy import stats as st
import discretenorm
from matplotlib import pyplot as plt
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D
def diff_policies():
def graph_policy(policies, utility_function=np.sqrt, beta=0.9):
# First, create a state space that is the same length as the policy.
states = np.linspace(0,1,len(policies[0]))
# Now calculate the utility accumulated at each step of the policy.
fig = plt.figure()
fig.set_size_inches(8,8)
plt.title('Utility Gained From Various Policies')
for i, policy in enumerate(policies):
total_utility = np.zeros(len(policy))
total_utility[0] = utility_function(policy[0])
for j, consumption_amount in enumerate(policy[1:]):
total_utility[j+1] = total_utility[j] + beta**(j+1)*utility_function(policy[j+1])
l, = plt.plot(np.arange(len(policy)), total_utility, label='Policy ' + str(i+1)
+ ', Utility = ' + str(total_utility[-1])[:3])
print("Total Utility: \t" + str(total_utility[-1]))
plt.legend(loc='upper left')
plt.savefig('./diff_policies.pdf')
policy1 = np.array([1.0, 0, 0, 0, 0])
policy2 = np.array([0, 0, 0, 0, 1.0])
policy3 = np.array([0.2, 0.2, 0.2, 0.2, 0.2])
policy4 = np.array([0.4, 0.3, 0.2, 0.1, 0])
policies = [policy1, policy2, policy3, policy4]
graph_policy(policies)
def eatCake(beta, N, Wmax=1., T=None, finite=True, plot=False):
"""
Solve the finite horizon cake-eating problem using Value Function iteration.
Inputs:
T -- final time period
beta -- discount factor
N -- number of discrete intervals to break up the cake
size -- size of the cake to begin with
plot -- boolean indicating whether to plot value function surface and policy function
surface
Returns:
values -- numpy array of shape (N, T+2) (if finite=True) or shape (N,) (if finite=False)
giving the value function at each time period for each state
psi -- numpy array of shape (N, T+1) (if finite=True) or shape (N,) (if finite=False)
giving the policy at each time period for each state.
"""
states = np.linspace(0,Wmax,N) #state space vector
actions = np.tile(states, N).reshape((N,N)).T
actions = actions - actions.T
actions[actions<0] = 0
rewards = np.sqrt(actions)
rewards[np.triu_indices(N, k=1)] = -1e10 #pre-computed reward function
n_range = np.arange(N) #this is used several times, so initialize it once
if finite:
values = np.zeros((N, T+2))
psi = np.zeros((N, T+1))
for i in xrange(T,-1,-1):
argmaxs = np.argmax(rewards + beta*values[:,i+1].reshape(1,N), axis=1)
values[:,i] = (rewards + beta*values[:,i+1].reshape(1,N))[n_range,argmaxs]
psi[:,i] = states[argmaxs]
x=np.arange(0,N)
if plot:
x=np.arange(0,N)
y=np.arange(0,T+2)
X,Y=np.meshgrid(x,y)
fig1 = plt.figure()
ax1= Axes3D(fig1)
ax1.plot_surface(states[X],Y,values.T, cmap=cm.coolwarm)
plt.show ()
fig2 = plt.figure()
ax2 = Axes3D(fig2)
y = np.arange(0,T+1)
X,Y=np.meshgrid(x,y)
ax2.plot_surface(states[X],Y,psi.T, cmap = cm.coolwarm)
plt.show()
else:
values = np.zeros(N)
psi = np.zeros(N)
delta = 1.
while delta >= 1e-9:
values1 = values.copy()
argmaxs = np.argmax(rewards + beta*values1.reshape(1,N), axis=1)
values = (rewards + beta*values.reshape(1,N))[n_range, argmaxs]
psi = states[argmaxs]
delta = ((values-values1)**2).sum()
if plot:
plt.plot(states, psi)
plt.show()
return values, psi
def finite_horiz():
#First compute solution to problem 1
beta = 0.9;
T = 10;
N = 100;
u = lambda c: np.sqrt(c);
W = np.linspace(0,1,N);
X, Y = np.meshgrid(W,W);
Wdiff = Y-X
index = Wdiff <0;
Wdiff[index] = 0;
util_grid = u(Wdiff);
util_grid[index] = -10**10;
V = np.zeros((N,T+2));
psi = np.zeros((N,T+1));
for k in xrange(T,-1,-1):
val = util_grid + beta*np.tile(V[:,k+1].T,(N,1));
vt = np.amax(val, axis = 1);
psi_ind = np.argmax(val,axis = 1)
V[:,k] = vt;
psi[:,k] = W[psi_ind];
#now create plots
#fixed time plot
plt.figure()
plt.plot(V[:,5])
plt.title(r'Value function for $t = 5$')
plt.ylabel(r'$V$')
plt.xlabel(r'$W$')
plt.savefig('fixed_time.pdf')
#fixed W plot
plt.figure()
plt.plot(V[50,:])
plt.title(r'Value function for $W = 0.505$')
plt.ylabel(r'$V$')
plt.xlabel(r'$t$')
plt.savefig('fixed_w.pdf')
plt.clf()
#plot delta -> 0
def delta():
beta = 0.99
N = 1000
u = lambda c: np.sqrt(c)
W = np.linspace(0,1,N)
X, Y = np.meshgrid(W,W)
Wdiff = (X-Y).T
index = Wdiff <0
Wdiff[index] = 0
util_grid = u(Wdiff)
util_grid[index] = -10**10
Vprime = np.zeros((N,1))
delta = np.ones(1)
tol = 10**-9
it = 0
max_iter = 500
while (delta[-1] >= tol) and (it < max_iter):
V = Vprime
it += 1;
val = util_grid + beta*V.T
Vprime = np.amax(val, axis = 1)
Vprime = Vprime.reshape((N,1))
delta = np.append(delta,np.dot((Vprime-V).T,Vprime-V))
plt.figure()
plt.plot(delta[1:])
plt.ylabel(r'$\delta_k$')
plt.xlabel('iteration')
plt.savefig('convergence.pdf')
plt.clf()
def infiniteHorizon():
"""
Plot policy function for infinite time horizon cake eating problem.
"""
values, psi = eatCake(.9, 100, finite=False)
states = np.linspace(0,1,100)
plt.figure()
plt.title(r'Policy Function')
plt.ylabel(r'$\psi$')
plt.xlabel(r'$W$')
plt.plot(states, psi)
plt.savefig('infiniteHorizon.pdf')
plt.clf()
def disc_norm():
x = np.linspace(-3,3,100)
y = st.norm.pdf(x,0,1)
fig, ax = plt.subplots()
fig.canvas.draw()
ax.plot(x,y)
fill1_x = np.linspace(-2,-1.5,100)
fill1_y = st.norm.pdf(fill1_x,0,1)
fill2_x = np.linspace(-1.5,-1,100)
fill2_y = st.norm.pdf(fill2_x,0,1)
ax.fill_between(fill1_x,0,fill1_y,facecolor = 'blue', edgecolor = 'k',alpha = 0.75)
ax.fill_between(fill2_x,0,fill2_y,facecolor = 'blue', edgecolor = 'k',alpha = 0.75)
for label in ax.get_yticklabels():
label.set_visible(False)
for tick in ax.get_xticklines():
tick.set_visible(False)
for tick in ax.get_yticklines():
tick.set_visible(False)
plt.rc("font", size = 16)
plt.xticks([-2,-1.5,-1])
labels = [item.get_text() for item in ax.get_xticklabels()]
labels[0] = r"$v_k$"
labels[1] = r"$\varepsilon_k$"
labels[2] = r"$v_{k+1}$"
ax.set_xticklabels(labels)
plt.ylim([0, .45])
plt.savefig('discnorm.pdf')
plt.clf()
def stoch_value():
#Compute Solution==========================================================
sigma = .5
mu = 4*sigma
K = 7
Gamma, eps = discretenorm.discretenorm(K,mu,sigma)
N = 100
W = np.linspace(0,1,N)
V = np.zeros((N,K))
u = lambda c: np.sqrt(c)
beta = 0.99
X,Y= np.meshgrid(W,W)
Wdiff = Y-X
index = Wdiff < 0
Wdiff[index] = 0
util_grid = u(Wdiff)
util3 = np.tile(util_grid[:,:,np.newaxis],(1,1,K))
eps_grid = eps[np.newaxis,np.newaxis,:]
eps_util = eps_grid*util3
Gamma_grid = Gamma[np.newaxis,:]
delta = 1
Vprime = V
z = 0
while (delta > 10**-9):
z= z+1
V = Vprime
gamV = Gamma_grid*V
Expval = np.sum(gamV,1)
Exp_grid = np.tile(Expval[np.newaxis,:,np.newaxis],(N,1,K))
arg = eps_util+beta*Exp_grid
arg[index] = -10^10
Vprime = np.amax(arg,1)
psi_ind = np.argmax(arg,1)
psi = W[psi_ind]
delta = np.linalg.norm(Vprime - V)
#============================================================
#Plot 3D
x=np.arange(0,N)
y=np.arange(0,K)
X,Y=np.meshgrid(x,y)
fig1 = plt.figure()
ax1= Axes3D(fig1)
ax1.set_xlabel(r'$W$')
ax1.set_ylabel(r'$\varepsilon$')
ax1.set_zlabel(r'$V$')
ax1.plot_surface(W[X],Y,np.transpose(Vprime), cmap=cm.coolwarm)
plt.savefig('stoch_value.pdf')
plt.clf()
if __name__ == "__main__":
disc_norm()
stoch_value()
finite_horiz()
delta()
infiniteHorizon()
```
#### File: Vol2B/InteriorPoint1-Linear/spec.py
```python
import numpy as np
from scipy import linalg as la
from scipy.stats import linregress
from matplotlib import pyplot as plt
# Auxiliary Functions ---------------------------------------------------------
def startingPoint(A, b, c):
"""Calculate an initial guess to the solution of the linear program
min c^T x, Ax = b, x>=0.
Reference: Nocedal and Wright, p. 410.
"""
# Calculate x, lam, mu of minimal norm satisfying both
# the primal and dual constraints.
B = la.inv(A.dot(A.T))
x = A.T.dot(B.dot(b))
lam = B.dot(A.dot(c))
mu = c - A.T.dot(lam)
# Perturb x and s so they are nonnegative.
dx = max((-3./2)*x.min(), 0)
dmu = max((-3./2)*mu.min(), 0)
x += dx*np.ones_like(x)
mu += dmu*np.ones_like(mu)
# Perturb x and mu so they are not too small and not too dissimilar.
dx = .5*(x*mu).sum()/mu.sum()
dmu = .5*(x*mu).sum()/x.sum()
x += dx*np.ones_like(x)
mu += dmu*np.ones_like(mu)
return x, lam, mu
# Use this linear program generator to test your interior point method.
def randomLP(m):
"""Generate a 'square' linear program min c^T x s.t. Ax = b, x>=0.
First generate m feasible constraints, then add slack variables.
Inputs:
m -- positive integer: the number of desired constraints
and the dimension of space in which to optimize.
Outputs:
A -- array of shape (m,n).
b -- array of shape (m,).
c -- array of shape (n,).
x -- the solution to the LP.
"""
n = m
A = np.random.random((m,n))*20 - 10
A[A[:,-1]<0] *= -1
x = np.random.random(n)*10
b = A.dot(x)
c = A.sum(axis=0)/float(n)
return A, b, -c, x
# This random linear program generator is more general than the first.
def randomLP2(m,n):
"""Generate a linear program min c^T x s.t. Ax = b, x>=0.
First generate m feasible constraints, then add
slack variables to convert it into the above form.
Inputs:
m -- positive integer >= n, number of desired constraints
n -- dimension of space in which to optimize
Outputs:
A -- array of shape (m,n+m)
b -- array of shape (m,)
c -- array of shape (n+m,), with m trailing 0s
v -- the solution to the LP
"""
A = np.random.random((m,n))*20 - 10
A[A[:,-1]<0] *= -1
v = np.random.random(n)*10
k = n
b = np.zeros(m)
b[:k] = A[:k,:].dot(v)
b[k:] = A[k:,:].dot(v) + np.random.random(m-k)*10
c = np.zeros(n+m)
c[:n] = A[:k,:].sum(axis=0)/k
A = np.hstack((A, np.eye(m)))
return A, b, -c, v
# Problems --------------------------------------------------------------------
def interiorPoint(A, b, c, niter=20, tol=1e-16, verbose=False):
"""Solve the linear program min c^T x, Ax = b, x>=0
using an Interior Point method.
Parameters:
A ((m,n) ndarray): Equality constraint matrix with full row rank.
b ((m, ) ndarray): Equality constraint vector.
c ((n, ) ndarray): Linear objective function coefficients.
niter (int > 0): The maximum number of iterations to execute.
tol (float > 0): The convergence tolerance.
Returns:
x ((n, ) ndarray): The optimal point.
val (float): The minimum value of the objective function.
"""
raise NotImplementedError("Problems 1-4 Incomplete")
def leastAbsoluteDeviations(filename='simdata.txt'):
"""Generate and show the plot requested in the lab."""
raise NotImplementedError("Problem 5 Incomplete")
```
#### File: Vol2B/scipyoptimize/blackbox_function.py
```python
import numpy as np
from scipy import linalg as la
def blackbox(y_free):
"""
Finds the length of a curve approximated piece-wise by a set of points.
Accepts:
y_free (1xn ndarray): the non-endpoint y-values of the curve.
Returns:
total_length (float): the length of the approximated curve.
"""
# Initialize local constants.
m = len(y_free) + 2 # Number points: free variables, origin, and endpoint.
a, b = 40, 30 # Coordinates of endpoint.
# Generate the evenly-spaced x-values of the curve.
x = np.linspace(0,a,m)
# Pad the free variables with the fixed endpoint values, 0 and b.
y = np.hstack((0,y_free, b))
# Calculate and return the line integral of the approximated curve.
partial_norms = []
for i,item in enumerate(y[:-1]):
partial_norms.append(la.norm(np.array([x[i+1]-x[i],y[i+1] - item])))
return np.sum(partial_norms)
```
#### File: Vol2B/scipyoptimize/rosenbrock.py
```python
import math, copy
import numpy
from matplotlib import pyplot, colors, cm
import scipy as sp
from mpl_toolkits.mplot3d import Axes3D
def cmap_powerlaw_adjust(cmap, a):
'''
returns a new colormap based on the one given
but adjusted via power-law:
newcmap = oldcmap**a
'''
if a < 0.:
return cmap
cdict = copy.copy(cmap._segmentdata)
fn = lambda x : (x[0]**a, x[1], x[2])
for key in ('red','green','blue'):
cdict[key] = map(fn, cdict[key])
cdict[key].sort()
assert (cdict[key][0]<0 or cdict[key][-1]>1), \
"Resulting indices extend out of the [0, 1] segment."
return colors.LinearSegmentedColormap('colormap',cdict,1024)
def cmap_center_adjust(cmap, center_ratio):
'''
returns a new colormap based on the one given
but adjusted so that the old center point higher
(>0.5) or lower (<0.5)
'''
if not (0. < center_ratio) & (center_ratio < 1.):
return cmap
a = math.log(center_ratio) / math.log(0.5)
return cmap_powerlaw_adjust(cmap, a)
def cmap_center_point_adjust(cmap, range, center):
'''
converts center to a ratio between 0 and 1 of the
range given and calls cmap_center_adjust(). returns
a new adjusted colormap accordingly
'''
if not ((range[0] < center) and (center < range[1])):
return cmap
return cmap_center_adjust(cmap,
abs(center - range[0]) / abs(range[1] - range[0]))
if __name__ == '__main__':
def rosenbrock(x, y):
return (1.0-x)**2 + 100*(y-x**2)**2
a = sp.arange(-1.8, 1.8,.01)
b = sp.arange(-1,2.5,.01)
A, B = sp.meshgrid(a,b)
Z = rosenbrock(A, B)
plotkwargs = {'rstride': 8,
'cstride': 8,
'linewidth': 0.01}
fig = pyplot.figure(figsize=(6,4))
ax = fig.add_subplot(111, projection='3d')
cmap = cm.jet
plt = ax.plot_surface(A, B, Z, cmap=cmap, **plotkwargs)
plt.set_cmap(cmap_center_adjust(cmap, .25))
# ax.scatter([0.], [0.], rosenbrock(0.,0.), s=30, c='r')
pyplot.show()
```
#### File: Vol2B/scipyoptimize/solutions.py
```python
import scipy.optimize as opt
import numpy as np
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import axes3d
from blackbox_function import blackbox
# Problem 1: use scipy.optimize.minimize() with different methods and compare.
def prob1():
"""Use the minimize() function in the scipy.optimize package to find the
minimum of the Rosenbrock function (scipy.optimize.rosen) using the
following methods:
Nelder-Mead
CG
BFGS
Use x0 = np.array([4., -2.5]) for the initial guess for each test.
For each method, print whether it converged, and if so, print how many
iterations it took.
"""
# Set up the initial guess.
x0 = np.array([4.0,-2.5])
# Test each method.
info = {}
info["Nelder-Mead"] = opt.minimize(opt.rosen, x0, method='Nelder-Mead')
info["CG"] = opt.minimize(opt.rosen, x0, method='CG')
info["BFGS"] = opt.minimize(opt.rosen, x0, method='BFGS')
# Report the info.
for method in info:
print("Method:\t{}\nConverged:\t{} "
.format(method, info[method]['success']))
if info[method]['success']:
print "Number of Iterations:", info[method]['nit'], '\n'
# Problem 2: Minizize an unknown "blackbox" function.
def prob2():
"""Minimize the function blackbox() in the blackbox_function module,
selecting the appropriate method of scipy.optimize.minimize() for this
problem. Do not pass your method a derivative. You may need to test
several methods and determine which is most appropriate.
The blackbox() function returns the length of a piecewise-linear curve
between two fixed points: the origin, and the point (40,30).
It accepts a one-dimensional ndarray} of length m of y-values, where m
is the number of points of the piecewise curve excluding endpoints.
These points are spaced evenly along the x-axis, so only the y-values
of each point are passed into blackbox().
Once you have selected a method, select an initial point with the
provided code.
Then plot your initial curve and minimizing curve together on the same
plot, including endpoints. Note that this will require padding your
array of internal y-values with the y-values of the endpoints, so
that you plot a total of 20 points for each curve.
SOLUTIONS NOTE: This solutions file uses method="BFGS", but
method="Powell" also returns the correct answer, which is a straight
line connecting the origin and the point (40,30).
Students may attempt to minimize using method="Nelder-Mead", as
this also does not use a derivative. However, this does not return
the optimal solution.
"""
# Set up the initial values
y_initial = 30*np.random.random_sample(18)
x = np.linspace(0,40,20)
# Plot the pre-graph
yplot = np.hstack((0,y_initial,30))
plt.plot(x, yplot, '.-r', markersize=10)
# Minimize the blackbox() function using method="BFGS".
result = opt.minimize(blackbox, y_initial, tol=1e-4, method="BFGS")
if not result['success']:
raise RuntimeError("didn't converge")
ypost = np.hstack((0,result['x'],30))
plt.plot(x, ypost, '.-b', markersize=10)
plt.show()
# The solution should plot the Batman symbol.
# Run blackbox_Batman.py to see original.
prob2()
# Problem 3: learn and use scipy.optimize.basinhopping()
def prob3():
"""Explore the documentation on the function scipy.optimize.basinhopping()
online or via IPython. Use it to find the global minimum of the multmin()
function given in the lab, with initial point x0 = np.array([-2, -2]) and
the Nelder-Mead algorithm. Try it first with stepsize=0.5, then with
stepsize=0.2.
Plot the multimin function and minima found using the code provided.
Print statements answering the following questions:
Which algorithms fail to find the global minimum?
Why do these algorithms fail?
Finally, return the global minimum.
"""
# Define the function to be optimized and the initial condition.
def multimin(x):
r = np.sqrt((x[0]+1)**2 + x[1]**2)
return r**2 *(1+ np.sin(4*r)**2)
x0 = np.array([-2, -1.9])
small_step = .2
large_step = .5
# Optimize using variations on Nelder-Mead. NOTE: Here, each has been stored
# seperately for ease of plotting differently colored minimums.
small = opt.basinhopping(multimin, x0, stepsize=small_step,
minimizer_kwargs={'method':'nelder-mead'})
large = opt.basinhopping(multimin, x0, stepsize=large_step,
minimizer_kwargs={'method':'nelder-mead'})
# Print the results.
print("Stepsize:\t{}\nMinimum:\t{}\nX-Values:\t{}\n".format(small_step,
small['fun'], small['x']))
print("Stepsize:\t{}\nMinimum:\t{}\nX-Values:\t{}\n".format(large_step,
large['fun'], large['x']))
# Plot the multimin graph. Here, the points are colored differently for emphasis.
xdomain = np.linspace(-3.5,1.5,70)
ydomain = np.linspace(-2.5,2.5,60)
X,Y = np.meshgrid(xdomain,ydomain)
Z = multimin((X,Y))
fig = plt.figure()
ax1 = fig.add_subplot(111, projection='3d')
ax1.plot_wireframe(X, Y, Z, linewidth=.5, color='c')
ax1.scatter(x0[0], x0[1], multimin(x0), c='b') # Initial pt: blue
# Plot the results of the algorithms.
ax1.scatter(small.x[0], small.x[1], small.fun, s=30, c='r') # Small step: red
ax1.scatter(large.x[0], large.x[1], large.fun, s=30, c='g') # Large step: green
plt.show()
# Answer the problem questions.
print("minimize() fails because it gets trapped in a basin.")
print("0.2 fails because it is too small a stepsize to escape a basin.")
# Return the correct global minimum.
return large['fun']
# Problem 4: learn and use scipy.optimize.root()
def prob4():
"""Find the roots of the system
[ -x + y + z ] [0]
[ 1 + x^3 - y^2 + z^3 ] = [0]
[ -2 - x^2 + y^2 + z^2 ] [0]
Returns the values of x,y,z as an array.
"""
# Define the nonlinear system, its Jacobian, and the initial guess.
def f(X):
x,y,z = X
return np.array([ -x + y + z,
1 + x**3 -y**2 + z**3,
-2 -x**2 + y**2 + z**2 ])
def jacobian(X):
x,y,z = X
return np.array([ [ -1, 1, 1 ],
[3*x**2, -2*y, 3*z**2],
[ -2*x, 2*y, 2*z ] ])
x0 = np.array([0,0,0])
# Calculate the solution, check that it is a root, and return it.
sol = opt.root(f, x0, jac=jacobian, method='hybr')
assert np.allclose(np.zeros_like(sol.x), f(sol.x)), "FAILURE"
return sol.x
# Problem 5: learn and use scipy.optimize.curve_fit().
def prob5():
"""Use the scipy.optimize.curve_fit() function to fit a curve to
the data found in `convection.npy`. The first column of this file is R,
the Rayleigh number, and the second column is Nu, the Nusselt number.
The fitting parameters should be c and beta, as given in the convection
equations.
Plot the data from `convection.npy` and the curve generated by curve_fit.
Return the values c and beta as an array.
"""
data = np.load("convection.npy")
initial = 4
# Define the function to optimize.
def nusselt(R, c, beta):
return c*R**beta
# Use curve_fit and the data to get the parameters.
popt, pcov = opt.curve_fit(nusselt, data[initial:,0], data[initial:,1])
curve = nusselt(data[initial:,0], popt[0], popt[1])
# Plot the data and the curve.
plt.loglog(data[:,0], data[:,1], '.k', label='Data')
plt.loglog(data[initial:,0], curve, 'b', label='Curve', linewidth=2)
plt.legend(loc="lower right")
plt.show()
# Return the parameter values.
return popt
```
#### File: Vol3A/Bokeh/bokeh_solutions.py
```python
from __future__ import division
import numpy as np
import pandas as pd
import pickle
from bokeh.io import curdoc
from bokeh.plotting import Figure, output_file, show
from bokeh.layouts import column, row, layout, widgetbox
from bokeh.palettes import Reds9 as COLORS
#from bokeh.tile_providers import STAMEN_TONER, STAMEN_TERRAIN, STAMEN_TONER_BACKGROUND
from bokeh.sampledata import us_states
from bokeh.core.properties import Either, Auto, Instance
from bokeh.models import (Range1d, ColumnDataSource, Slider,
CustomJS, HoverTool, WheelZoomTool,
DataRange1d, TextInput, Toggle, Div,
WMTSTileSource)
from bokeh.models.widgets import CheckboxButtonGroup, Select
from pyproj import Proj, transform
# Prep data
accidents = pd.read_pickle("fars_accidents.pickle")
drivers = pd.read_pickle("final_drivers.pickle")
#us_states = us_states.data.copy()
with open("us_states.pickle", "rb") as file:
us_states = pickle.load(file)
state_xs = [us_states[code]["lons"] for code in us_states]
state_ys = [us_states[code]["lats"] for code in us_states]
state_names = us_states.keys()
id_to_st = {1:"AL", 2:"AK", 4:"AZ", 5:"AR", 6:"CA", 8:"CO", 9:"CT", 10:"DE",
11:"DC", 12:"FL", 13:"GA", 15:"HI", 16:"ID", 17:"IL", 18:"IN",
19:"IA", 20:"KS", 21:"KY", 22:"LA", 23:"ME", 24:"MD", 25:"MA",
26:"MI", 27:"MN", 28:"MS", 29:"MO", 30:"MT", 31:"NE", 32:"NV",
33:"NH", 34:"NJ", 35:"NM", 36:"NY", 37:"NC", 38:"ND", 39:"OH",
40:"OK", 41:"OR", 42:"PA", 44:"RI", 45:"SC", 46:"SD", 47:"TN",
48:"TX", 49:"UT", 50:"VT", 51:"VA", 53:"WA", 54:"WV", 55:"WI",
56:"WY"}
with open("id_to_state.pickle", "wb") as f:
pickle.dump(id_to_st, f)
st_to_id = {v: k for k, v in id_to_st.items()}
id_list = []
for s in state_names:
id_list.append(st_to_id[s])
state_totals = [accidents[accidents["STATE"]==s].shape[0] for s in id_list]
state_drunk = [accidents[(accidents["STATE"]==s) & (accidents["DRUNK_DR"]>0)].shape[0] for s in id_list]
state_percent = (np.array(state_drunk) / np.array(state_totals, dtype=float)) * 100
state_speed = [accidents[(accidents["STATE"]==s) & (accidents["SP"]==1)].shape[0] for s in id_list]
state_percent_sp = (np.array(state_speed) / np.array(state_totals, dtype=float)) * 100
# Convert data to appropriate format for map
"""
from_proj = Proj(init="epsg:4326")
to_proj = Proj(init="epsg:3857")
def convert(longitudes, latitudes):
x_vals = []
y_vals = []
for lon, lat in zip(longitudes, latitudes):
x, y = transform(from_proj, to_proj, lon, lat)
x_vals.append(x)
y_vals.append(y)
return x_vals, y_vals
accidents["x"], accidents["y"] = convert(accidents.LONGITUD, accidents.LATITUDE)
accidents["r"] = 10000
accidents.to_pickle("fars_accidents.pickle")
borders_x = []
borders_y = []
for i in xrange(len(state_xs)):
cx, cy = convert(state_xs[i], state_ys[i])
borders_x.append(cx)
borders_y.append(cy)
borders = dict(x=borders_x, y=borders_y)
with open("borders.pickle", "wb") as f:
pickle.dump(borders, f)
"""
# set up map
mercator_extent = dict(start=-1400000, end=2000000, bounds="auto", )
x_range = Range1d(start=-14000000, end=-7000000, bounds="auto")
y_range = Range1d(start=2500000, end=6500000, bounds="auto")
fig = Figure(plot_width=1100, plot_height=650, tools=["wheel_zoom", "pan"],
x_range=(-13000000, -7000000), y_range=(2750000, 6250000), webgl=True,
active_scroll="wheel_zoom")
fig.axis.visible = False
STAMEN_TONER_BACKGROUND = WMTSTileSource(
url='http://tile.stamen.com/toner-background/{Z}/{X}/{Y}.png',
attribution=(
'Map tiles by <a href="http://stamen.com">Stamen Design</a>, '
'under <a href="http://creativecommons.org/licenses/by/3.0">CC BY 3.0</a>.'
'Data by <a href="http://openstreetmap.org">OpenStreetMap</a>, '
'under <a href="http://www.openstreetmap.org/copyright">ODbL</a>'
)
)
fig.add_tile(STAMEN_TONER_BACKGROUND)
accidents.loc[accidents.DRUNK_DR != 0, "DRUNK_DR"] = "YES"
accidents.loc[accidents.DRUNK_DR == 0, "DRUNK_DR"] = "NO"
accidents.loc[accidents.SP != 0, "SP"] = "YES"
accidents.loc[accidents.SP == 0, "SP"] = "NO"
accidents.loc[accidents.WEATHER.isin([0,1,8,10,98,99]), "WEATHER"] = "Clear"
accidents.loc[accidents.WEATHER == 2, "WEATHER"] = "Rain"
accidents.loc[accidents.WEATHER == 3, "WEATHER"] = "Sleet/Hail"
accidents.loc[accidents.WEATHER == 4, "WEATHER"] = "Snow"
accidents.loc[accidents.WEATHER == 5, "WEATHER"] = "Fog/Smog/Smoke"
accidents.loc[accidents.WEATHER == 6, "WEATHER"] = "Severe Crosswinds"
accidents.loc[accidents.WEATHER == 7, "WEATHER"] = "Blowing Sand, Soil, Dirt"
accidents.loc[accidents.WEATHER == 11, "WEATHER"] = "Blowing Snow"
accidents.loc[accidents.WEATHER == 12, "WEATHER"] = "Freezing Rain"
accidents["r"] = 10000
drunk = accidents[accidents.DRUNK_DR == "YES"].copy()
speed = accidents[(accidents.DRUNK_DR == "NO") & (accidents.SP == "YES")].copy()
other = accidents[(accidents.DRUNK_DR == "NO") & (accidents.SP == "NO")].copy()
del accidents
COLORS.reverse()
no_colors = ['#FFFFFF']*len(state_names)
drunk_colors = [COLORS[i] for i in pd.qcut(state_percent, len(COLORS)).codes]
speeding_colors = [COLORS[i] for i in pd.qcut(state_percent_sp, len(COLORS)).codes]
alpha=[0]*len(state_names)
with open("borders.pickle", "rb") as f:
b = pickle.load(f)
patch_source = ColumnDataSource(
data=dict(borders_x=b["x"],
borders_y=b["y"],
colors=no_colors,
alpha=alpha,
state_names=state_names,
state_totals=state_totals,
state_drunk=state_drunk,
state_percent=state_percent,
state_percent_sp=state_percent_sp
)
)
patches = fig.patches(xs="borders_x", ys="borders_y", source=patch_source, fill_alpha="alpha",
fill_color="colors", line_alpha=0, hover_alpha=.3, line_width=5)
fig.add_tools(HoverTool(renderers=[patches], tooltips=[("State", "@state_names"),
("Total", "@state_totals"),
("Drunk%", "@state_percent{1.11}" + "%"),
("Speeding%", "@state_percent_sp{1.11}" + "%")]))
select = Select(title="Color States By:", value="None", options=["None", "Drunk%", "Speeding%"])
def update_color(attrname, old, new):
if select.value == "Drunk%":
patch_source.data["colors"] = drunk_colors
patch_source.data["alpha"] = [.3]*len(state_names)
elif select.value == "Speeding%":
patch_source.data["colors"] = speeding_colors
patch_source.data["alpha"] = [.3]*len(state_names)
else:
patch_source.data["colors"] = no_colors
patch_source.data["alpha"] = [0]*len(state_names)
select.on_change('value', update_color)
# --------------------------------------
def gen_dict(df):
return dict(
x=df["x"],
y=df["y"],
r=df["r"],
MONTH=df["MONTH"],
DAY=df["DAY"],
YEAR=df["YEAR"],
FATALS=df["FATALS"],
DRUNK_DR=df["DRUNK_DR"],
SP=df["SP"],
WEATHER=df["WEATHER"]
)
other_source = ColumnDataSource(data=gen_dict(other))
speed_source = ColumnDataSource(data=gen_dict(speed))
drunk_source = ColumnDataSource(data=gen_dict(drunk))
other_circles = fig.circle(source=other_source, x="x", y="y", radius="r", fill_color="gray",
fill_alpha=.3, line_alpha=0, hover_alpha=1, hover_color="yellow", legend="Other")
speed_circles = fig.circle(source=speed_source, x="x", y="y", radius="r", fill_color="blue",
fill_alpha=.3, line_alpha=0, hover_alpha=1, hover_color="yellow", legend="Speeding")
drunk_circles = fig.circle(source=drunk_source, x="x", y="y", radius="r", fill_color="red",
fill_alpha=.3, line_alpha=0, hover_alpha=1, hover_color="yellow", legend="Drunk")
dot_tooltips = [("Date", "@MONTH/@DAY/@YEAR"), ("Fatalities", "@FATALS"), ("Drunk", "@DRUNK_DR"),
("Speeding", "@SP"), ("Weather", "@WEATHER")]
fig.add_tools(HoverTool(renderers=[other_circles, speed_circles, drunk_circles], tooltips=dot_tooltips))
button_group = CheckboxButtonGroup(
labels=["Other", "Speeding", "Drunk"], active=[0, 1, 2], width=200)
toggle = Toggle(label="Sort by Hour", button_type="default")
slider = Slider(title="Hour (Military Time)", start=0, end=23, value=0, step=1)
empty_dict = dict(
x=np.array([]),
y=np.array([]),
r=np.array([]),
MONTH=np.array([]),
DAY=np.array([]),
YEAR=np.array([]),
FATALS=np.array([]),
DRUNK_DR=np.array([]),
SP=np.array([]),
WEATHER=np.array([])
)
def update_hour(attrname, old, new):
if toggle.active:
if 0 in button_group.active:
new_other = other[other.HOUR == slider.value]
other_source.data = gen_dict(new_other)
else:
other_source.data = empty_dict
if 1 in button_group.active:
new_speed = speed[speed.HOUR == slider.value]
speed_source.data = gen_dict(new_speed)
else:
speed_source.data = empty_dict
if 2 in button_group.active:
new_drunk = drunk[drunk.HOUR == slider.value]
drunk_source.data = gen_dict(new_drunk)
else:
drunk_source.data = empty_dict
else:
if 0 in button_group.active:
other_source.data = gen_dict(other)
else:
other_source.data = empty_dict
if 1 in button_group.active:
speed_source.data = gen_dict(speed)
else:
speed_source.data = empty_dict
if 2 in button_group.active:
drunk_source.data = gen_dict(drunk)
else:
drunk_source.data = empty_dict
slider.on_change('value', update_hour)
toggle.on_change('active', update_hour)
button_group.on_change('active', update_hour)
callback = CustomJS(args=dict(other=other_circles, speed=speed_circles, drunk=drunk_circles), code="""
other.glyph.radius = { value: cb_obj.get('value')*125 }
other.data_source.trigger('change')
speed.glyph.radius = { value: cb_obj.get('value')*125 }
speed.data_source.trigger('change')
drunk.glyph.radius = { value: cb_obj.get('value')*125 }
drunk.data_source.trigger('change')
""")
size_slider = Slider(title="Dot Size", start=1, end=100, orientation="horizontal",
value=100, step=1, callback=callback, callback_policy="mouseup")
fig.legend.background_fill_alpha = 1
fig.legend.background_fill_color = "gainsboro"
fig.legend.border_line_width = 4
fig.legend.level="overlay"
fig.legend.label_text_font_size = "15pt"
wbox = widgetbox(select, Div(text="<br>"), Div(text="Type of Accidents to Show:"),
button_group, Div(text="<br>"), toggle, slider, Div(text="<br>"),
size_slider)
curdoc().add_root(row(fig, wbox))
```
#### File: Vol3A/UnixShell1/Shell_solutions.py
```python
'''
$ SHELL COMMANDS: (It's trivial. Free points awarded)
$ cd Downloads/Shell-Lab
'''
# PROBLEM 2: Delete Audio folder and its contents. Create Documents, Photos, and Python directories
'''
SHELL COMMANDS: (Executed in the Shell-Lab directory)
$ rm -r Audio
$ mkdir Documents Photos Python
'''
# PROBLEM 3: Move *.jpg to Photos, *.txt to Documents, and *.py to Python
'''
SHELL COMMANDS: (Executed in the Shell-Lab directory)
$ mv *.jpg Photos
$ mv *.txt Documents
$ mv *.py Python
'''
# PROBLEM 4: Move the *.jpg files found deep in the the directory hierachy to Photos
'''
SHELL COMMANDS: (Executed in the Shell-Lab directory)
To find where the .jpg files are
$ find . -type f -name "*.jpg"
Then move each file with mv command
$ mv <filepath> Photos
More specifically,
$ mv Files/Dec/Holidays/*.jpg Photos
$ mv Files/Feb/pics/*.jpg Photos
$ mv Files/Apr/user/Sally/Alaska/*.jpg Photos
$ mv Files/Jul/Vacation/*.jpg Photos
'''
# PROBLEM 5: Count words and sort words in word.txt. Save output to sortedwords.txt
'''
SHELL COMMANDS: (Executed in the Shell-Lab/Documents directory)
$ wc -l < words.txt > sortedwords.txt
$ sort < words.txt >> sortedwords.txt
'''
# PROBLEM 6: Make count_files.py an executable script
'''
SHELL COMMANDS: (Executed in the Shell-Lab/Python directory)
$ which python
On the author's system, this was: /home/tanner/anaconda/bin/python
Open count_files.py and add the shebang at the first line of the file. One the
author's system, this was
#!/home/tanner/anaconda/bin/python
$ chmod ug+x count_files.py
'''
# PROBLEM 7:
'''
SHELL COMMANDS: (Executed in the Shell-Lab/Scripts directory
$ ./script1 &
$ ./script2 &
$ ./script3 &
$ jobs > log.txt
'''
import subprocess
import numpy as np
import scipy as sp
class Shell(object):
def __init__(self):
pass
# PROBLEM 8: Implement find_file and find_word
def find_file(self, filename, d=None):
"""
Find a file inside a given directory. By default, the search starts
in the current directory.
"""
if d is None:
d = "."
command = "find " + d + " -name \"" + filename + "\""
files = subprocess.check_output(command,shell=True).split('\n')
files.pop()
return files
def find_word(self, word, d=None):
"""
Search the contents of all the files within a directory for a given
word. By default, the search starts in the current directory
"""
if d is None:
d = "."
command = "grep -nr " + word + " " + d
files = subprocess.check_output(command,shell=True).split('\n')
files.pop()
return files
# PROBLEM 9: Implement largest_files
def largest_files(self,n,d=None):
"""
Return a list of the n biggest files and their sizes.
By default, the search starts in the current directory
"""
if d is None:
d = '.'
command = "find " + d + " -type f"
files = subprocess.check_output(command, shell=True).split('\n')
files.pop()
split_files = np.array([subprocess.check_output('du ' + f, shell=True).strip().split('\t') for f in files])
sizes = np.array(split_files[:,0],dtype=np.int32)
sorted_index = sp.argsort(sizes)[::-1]
return split_files[sorted_index][:n]
# PROBLEM 10 (Optional): secure copy with partner
```
#### File: Vol4A/FiniteDifferenceMethod/solution.py
```python
from __future__ import division
import numpy as np
from scipy.sparse import spdiags
from scipy.sparse.linalg import spsolve, cg
import matplotlib.pyplot as plt
def fd_order2_ode(func,a1,a2,a3,a=0.,b=1.,alpha=1.,beta=3.,N=5):
# A Simple Finite Difference Scheme to solve BVP's of the form
# a1(x)u''(x) + a2(x)u'(x) + a3(x)u(x) = f(x), x \in [a,b]
# u(a) = alpha
# u(b) = beta
# (Dirichlet boundary conditions)
#
# U_0 = alpha, U_1, U_2, ..., U_m, U_{m+1} = beta
# We use m+1 subintervals, giving m algebraic equations
m = N-1
h = (b-a)/N # Here we form the diagonals
x = np.linspace(a,b,N+1)
D0,Dp,Dm,diags = np.zeros((1,m)), np.zeros((1,m)), np.zeros((1,m)), np.array([0,-1,1])
D0 += -2.*a1(x[1:-1])*h**(-2.) + a3(x[1:-1])
Dm += a1(x[1:-1])*h**(-2.) - a2(x[1:-1])*(2.*h)**(-1.)
Dp += a1(x[1:-1])*h**(-2.) + a2(x[1:-1])*(2.*h)**(-1.)
# print "\nD0 = \n", D0[0,:5]
# print "\nDm = \n", Dm[0,:5]
# print "\nDp = \n", Dp[0,:5]
# Here we create the matrix A
data = np.concatenate((D0,np.roll(Dm,-1),np.roll(Dp,1)),axis=0) # This stacks up rows
A = spdiags(data,diags,m,m).asformat('csr')
print "\nA = \n", A[:5,:5].todense()
print "\nA = \n", A[-5:,-5:].todense()
# print A.shape
# Here we create the vector B
B = np.zeros(N+1)
B[2:-2] = func(x[2:-2])
xj = a+1.*h
B[0], B[1] = alpha, func(xj)-alpha *( a1(xj)*h**(-2.) - a2(xj)*(2.*h)**(-1.) )
xj = a+m*h
B[-1], B[-2] = beta, func(xj)-beta*( a1(xj)*h**(-2.) + a2(xj)*(2.*h)**(-1.) )
print "\nB = \n", B[:5]
print "\nB = \n", B[-5:]
# Here we solve the equation AX = B and return the result
B[1:-1] = spsolve(A,B[1:-1])
return np.linspace(a,b,m+2), B
def approx_order(num_approx,N,bvp,*args):
h, max_error = (1.-0)/N[:-1], np.ones(num_approx-1)
mesh_best, num_sol_best = bvp(*args, subintervals=N[-1])
for j in range(len(N)-1):
mesh, num_sol = bvp(*args, subintervals=N[j])
max_error[j] = np.max(np.abs( num_sol- num_sol_best[::2**(num_approx-j-1)] ) )
plt.loglog(h,max_error,'.-r',label="$E(h)$")
plt.loglog(h,h**(2.),'-k',label="$h^{\, 2}$")
plt.xlabel("$h$")
plt.legend(loc='best')
plt.show()
print "The order of the finite difference approximation is about ", ( (np.log(max_error[0]) -
np.log(max_error[-1]) )/( np.log(h[0]) - np.log(h[-1]) ) ), "."
#
# def example():
# # First Code block in the lab manual
# import numpy as np
# from scipy.sparse import spdiags
# from scipy.sparse.linalg import spsolve
#
# def bvp(func, epsilon, alpha, beta, N):
# a,b = 0., 1. # Interval for the BVP
# h = (b-a)/N # The length of each subinterval
#
# # Initialize and define the vector F on the right
# F = np.empty(N-1.)
# F[0] = func(a+1.*h)-alpha*(epsilon+h/2.)*h**(-2.)
# F[N-2] = func(a+(N-1)*h)-beta*(epsilon-h/2.)*h**(-2.)
# for j in xrange(1,N-2):
# F[j] = func(a + (j+1)*h)
#
# # Here we define the arrays that will go on the diagonals of A
# data = np.empty((3,N-1))
# data[0,:] = -2.*epsilon*np.ones((1,N-1)) # main diagonal
# data[1,:] = (epsilon+h/2.)*np.ones((1,N-1)) # off-diagonals
# data[2,:] = (epsilon-h/2.)*np.ones((1,N-1))
# # Next we specify on which diagonals they will be placed, and create A
# diags = np.array([0,-1,1])
# A=h**(-2.)*spdiags(data,diags,N-1,N-1).asformat('csr')
#
# U = np.empty(N+1)
# U[1:-1] = spsolve(A,F)
# U[0], U[-1] = alpha, beta
# return np.linspace(a,b,N+1), U
#
# x, y = bvp(lambda x:-1., epsilon=.05,alpha=1, beta=3, N=400)
# import matplotlib.pyplot as plt
# plt.plot(x,y,'-k',linewidth=2.0)
# plt.show()
#
# num_approx = 5 # Number of Approximations
# N = 20*np.array([2**j for j in range(num_approx)])
# h, max_error = (1.-0)/N[:-1], np.ones(num_approx-1)
#
# mesh_best, num_sol_best = bvp(lambda x:-1, epsilon=.5, alpha=1, beta=3, N=N[-1])
# for j in range(len(N)-1):
# mesh, num_sol = bvp(lambda x:-1, epsilon=.5, alpha=1, beta=3, N=N[j])
# max_error[j] = np.max(np.abs( num_sol- num_sol_best[::2**(num_approx-j-1)] ) )
# plt.loglog(h,max_error,'.-r',label="$E(h)$")
# plt.loglog(h,h**(2.),'-k',label="$h^{\, 2}$")
# plt.xlabel("$h$")
# plt.legend(loc='best')
# plt.show()
# print "The order of the finite difference approximation is about ", ( (np.log(max_error[0]) -
# np.log(max_error[-1]) )/( np.log(h[0]) - np.log(h[-1]) ) ), "."
# return
#
```
#### File: Vol4A/WeightLoss/plots.py
```python
from __future__ import division
import matplotlib
matplotlib.rcParams = matplotlib.rc_params_from_file('../../matplotlibrc')
from scipy.integrate import ode
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import MultipleLocator
import solution
def weightloss_calculator(age, sex, H, BW, T, (PAL, EI)):
# Initial stats
# Age (y), Gender ('male' or 'female'), Height (m), Body Weight (kg)
# Time (d)
#
# Diet/Lifestyle Change
# (PAL, EI) = Future Physical Activity Level and Energy Intake
#
# Call the IVP Solver
########################################
F = solution.fat_mass(BW, age, H, sex)
L = BW - F
t, y = solution.compute_weight_curve(F, L, T, EI, PAL)
# Plot the Results
####################################
fig, ax = plt.subplots()
plt.plot(t, 2.2 * y[:, 0], '-b', label='Fat', linewidth=2.0)
plt.plot(t, 2.2 * y[:, 1], '-g', label='Lean', linewidth=2.0)
plt.plot(t, 2.2 * (y[:, 0] + y[:, 1]), '-r', label='Total', linewidth=2.0)
plt.legend(loc=1) # Upper right placement
plt.xlabel('days', fontsize=16)
plt.ylabel('lbs', fontsize=16)
plt.axis([0, np.max(t), 20, 180])
# High end of normal weight range
plt.plot(t, 2.2 * 25 * H ** 2 * np.ones(t.shape), '-.k')
# Low end of normal weight range
plt.plot(t, 2.2 * 20 * H ** 2 * np.ones(t.shape), '-.k')
majorLocator = MultipleLocator(200)
ax.xaxis.set_major_locator(majorLocator)
plt.savefig('weightloss.pdf')
plt.clf()
def Exercise1():
age, sex = 38.0, 'female'
H, BW = 1.73, 72.7
# Long time frame
T = 5 * 7 * 52.
# PAL, EI = 1.5, 2025.
def EI(t):
return 2025
def PAL(t):
return 1.5
weightloss_calculator(age, sex, H, BW, T, (PAL, EI))
def Exercise2():
age, sex = 38.0, 'female'
H, BW = 1.73, 72.7
# Long time frame
T = 5 * 7 * 52.
def EI(t):
return 1850
def PAL(t):
return 1.4
weightloss_calculator(age, sex, H, BW, T, (PAL, EI))
def Exercise3():
age, sex = 38.0, 'female'
H, BW = 1.73, 72.7
T = 16 * 7 * 2.
def EI(t):
if t < 16 * 7 * 1.:
return 1600
else:
return 2025
def PAL(t):
if t < 16 * 7 * 1.:
return 1.7
else:
return 1.5
weightloss_calculator(age, sex, H, BW, T, (PAL, EI))
###########################################################################
# Lotka-Volterra Predator Prey Model
# U_t = U(1-V)
# V_t = alpha V(U-1)
#
# Logistic Predator Prey Model
# U_t = U(1-U-V)
# V_t = alpha V(U-beta)
def Example_Lotka_Volterra():
# (Nondimensional) Time interval for one 'period'
a, b = 0., 30.
# Nondimensional parameters
alpha = 1. / 3
# dimension of the system / initial conditions
dim, ya = 2, np.array([3 / 4., 3 / 4.])
def Lotka_Volterra(x, y):
return np.array([y[0] * (1. - y[1]), alpha * y[1] * (y[0] - 1.)])
subintervals = 500
Y = solution.RK4(Lotka_Volterra, a, b, subintervals, ya, dim)
# Plot the direction field
Y1, Y2 = np.meshgrid(
np.arange(0, 4.5, .2), np.arange(0, 4.5, .2), sparse=True)
U, V = Lotka_Volterra(0, (Y1, Y2))
Q = plt.quiver(Y1[::3, ::3], Y2[::3, ::3], U[::3, ::3], V[::3, ::3],
pivot='mid', color='b', units='dots', width=3.)
# Plot the 2 Equilibrium points
plt.plot(1, 1, 'ok', markersize=8)
plt.plot(0, 0, 'ok', markersize=8)
# Plot the solutions in phase space
# Y = RK4( Lotka_Volterra,a,b,subintervals,np.array([3/4., 3/4.]),dim)
plt.plot(Y[:, 0], Y[:, 1], '-k', linewidth=2.0)
# Y = RK4( Lotka_Volterra,a,b,subintervals,np.array([1/2., 3/4.]),dim)
# plt.plot(Y[:,0], Y[:,1],'-k',linewidth=2.0)
# Y = RK4( Lotka_Volterra,a,b,subintervals,np.array([1/4., 1/3.]),dim)
# plt.plot(Y[:,0], Y[:,1],'-k',linewidth=2.0)
# Y = RK4( Lotka_Volterra,a,b,subintervals,np.array([1/16., 3/4.]),dim)
# plt.plot(Y[:,0], Y[:,1],'-k',linewidth=2.0)
# Y = RK4( Lotka_Volterra,a,b,subintervals,np.array([1/40., 3/4.]),dim)
# plt.plot(Y[:,0], Y[:,1],'-k',linewidth=2.0)
# plt.plot(Y[::10,0], Y[::10,1],'*b')
plt.axis([-.5, 4.5, -.5, 4.5])
plt.title("Phase Portrait of the Lotka-Volterra Predator-Prey Model")
plt.xlabel('Prey', fontsize=15)
plt.ylabel('Predators', fontsize=15)
plt.savefig("Lotka_Volterra_Phase_Portrait.pdf")
plt.clf()
Y = solution.RK4(Lotka_Volterra, a, 2 * b, 2 * subintervals, ya, dim)
plt.plot(np.linspace(a, 2 * b, 2 * subintervals + 1),
Y[:, 0], '-b', linewidth=2.0)
plt.plot(np.linspace(a, 2 * b, 2 * subintervals + 1),
Y[:, 1], '-g', linewidth=2.0)
plt.savefig("Lotka_Volterra.pdf")
plt.clf()
def Exercise_Lotka_Volterra():
# (Nondimensional) Time interval for one 'period'
a, b = 0., 30.
# Nondimensional parameters
alpha = 1. / 3
# dimension of the system / initial conditions
dim, ya = 2, np.array([3 / 4., 3 / 4.])
def Lotka_Volterra(x, y):
return np.array([y[0] * (1. - y[1]), alpha * y[1] * (y[0] - 1.)])
subintervals = 500
Y = solution.RK4(Lotka_Volterra, a, b, subintervals, ya, dim)
# Plot the direction field
Y1, Y2 = np.meshgrid(
np.arange(0, 4.5, .2), np.arange(0, 4.5, .2), sparse=True)
U, V = Lotka_Volterra(0, (Y1, Y2))
Q = plt.quiver(Y1[::3, ::3], Y2[::3, ::3], U[::3, ::3], V[::3, ::3],
pivot='mid', color='b', units='dots', width=3.)
# Plot the 2 Equilibrium points
plt.plot(1, 1, 'ok', markersize=8)
plt.plot(0, 0, 'ok', markersize=8)
# Plot the solutions in phase space
plt.plot(Y[:, 0], Y[:, 1], '-k', linewidth=2.0)
Y = solution.RK4(Lotka_Volterra, a, b, subintervals,
np.array([1 / 2., 3 / 4.]), dim)
plt.plot(Y[:, 0], Y[:, 1], '-k', linewidth=2.0)
Y = solution.RK4(Lotka_Volterra, a, b, subintervals,
np.array([1 / 16., 3 / 4.]), dim)
plt.plot(Y[:, 0], Y[:, 1], '-k', linewidth=2.0)
Y = solution.RK4(Lotka_Volterra, a, b, subintervals,
np.array([1 / 40., 3 / 4.]), dim)
plt.plot(Y[:, 0], Y[:, 1], '-k', linewidth=2.0)
plt.plot(Y[::10, 0], Y[::10, 1], '*b')
plt.axis([-.5, 4.5, -.5, 4.5])
plt.title("Phase Portrait of the " +
"Lotka-Volterra Predator-Prey Model")
plt.xlabel('Prey', fontsize=15)
plt.ylabel('Predators', fontsize=15)
# plt.savefig("Lotka_Volterra_Phase_Portrait.pdf")
plt.show(); plt.clf()
def Exercise_Logistic():
# y[0], y[1] = Prey, Predator populations
a, b = 0., 40.
dim = 2
ya1 = np.array([1 / 3., 1 / 3.])
ya2 = np.array([2.5 / 5., 1 / 5.])
alpha, beta = 1., 1.1#1., .3
def Logistic(x, y):
return np.array([y[0] * (1 - y[0] - y[1]), alpha * y[1] * (y[0] - beta)])
example1 = ode(Logistic).set_integrator('dopri5', atol=1e-10)
example1.set_initial_value(ya1, a)
example2 = ode(Logistic).set_integrator('dopri5', atol=1e-10)
example2.set_initial_value(ya2, a)
t = np.linspace(a, b, 201)
Y1 = np.zeros((len(t), dim))
Y1[0, :] = ya1
Y2 = np.zeros((len(t), dim))
Y2[0, :] = ya2
for j in range(1, len(t)):
Y1[j, :] = example1.integrate(t[j])
Y2[j, :] = example2.integrate(t[j])
plt.plot(Y1[:, 0], Y1[:, 1], '-k', linewidth=1.5)
# plt.plot(Y1[::5,0], Y1[::5,1],'*g')
plt.plot(Y2[:, 0], Y2[:, 1], '-k', linewidth=1.5)
# plt.plot(Y2[::5,0], Y2[::5,1],'*g')
R, S = np.meshgrid(
np.arange(0., 1.35, .1), np.arange(0., 1.35, .1), sparse=True)
U, V = Logistic(0, (R, S))
Q = plt.quiver(R[::2, ::2], S[::2, ::2], U[::2, ::2], V[::2, ::2],
pivot='mid', color='green', units='dots', width=3.)
plt.plot(beta, 1 - beta, 'ok', markersize=6)
plt.plot(1, 0, 'ok', markersize=6)
plt.plot(0, 0, 'ok', markersize=6)
# plt.plot( R[::2, ::2], S[::2, ::2], 'k.')
plt.axis([-.1, 1.3, -.1, 1.3])
plt.title("Phase Portrait of Logistic Predator-Prey Model")
plt.xlabel('Prey', fontsize=15)
plt.ylabel('Predators', fontsize=15)
plt.show(); plt.clf()
if __name__ == "__main__":
Exercise_Lotka_Volterra()
Exercise_Logistic()
```
#### File: Vol4B/HIV/HIV_solutions.py
```python
import numpy as np
from matplotlib import pyplot as plt
####### Parameters
# * = value comes from Kirschner and Webb
# ** = value comes from lab
a1, b1 = 0., 0.02 # boundaries for drug1 dosage
a2, b2 = 0., 0.9 # boundaries for drug2 dosage
s1, s2 = 2., 1.5 #source/proliferation constants*
mu = 0.002 #T cell death rate*
k = 0.000025 #infection rate*
g = 30. #input rate of an external virus source*
c = .007 #viral loss rate*
B1, B2 = 14.0, 1.0 #half saturation constants*
A1, A2 = 250000 , 75 # cost weights
T0 = 400 # initial number of T cells**
V0 = 3 #initial concentration of free viruses**
t_f = 50. # number of days
n = 1000 # number of time steps
def initialize_all(y0, t0, t, n):
""" An initialization routine for the different ODE solving
methods in the lab. This initializes Y, T, and h. """
if isinstance(y0, np.ndarray):
Y = np.empty((n, y0.size)).squeeze()
else:
Y = np.empty(n)
Y[0] = y0
T = np.linspace(t0, t, n)
h = float(t - t0) / (n - 1)
return Y, T, h
def RK4(f, y0, t0, t, n):
""" Use the RK4 method to compute an approximate solution
to the ODE y' = f(t, y) at n equispaced parameter values from t0 to t
with initial conditions y(t0) = y0.
y0 is assumed to be either a constant or a one-dimensional numpy array.
t and t0 are assumed to be constants.
f is assumed to accept two arguments.
The first is a constant giving the value of t.
The second is a one-dimensional numpy array of the same size as y.
This function returns an array Y of shape (n,) if
y is a constant or an array of size 1.
It returns an array of shape (n, y.size) otherwise.
In either case, Y[i] is the approximate value of y at
the i'th value of np.linspace(t0, t, n).
"""
Y,T,h = initialize_all(y0,t0,t,n)
for i in xrange(n-1):
K1 = f(T[i],Y[i],i)
K2 = f(T[i]+h/2.,Y[i]+h/2.*K1,i)
K3 = f(T[i]+h/2.,Y[i]+h/2.*K2,i)
K4 = f(T[i+1],Y[i]+h*K3,i)
Y[i+1] = Y[i] + h/6.*(K1+2*K2 +2*K3+K4)
return Y
def state_equations(t,y,i):
'''
Parameters
---------------
t : float
the time
y : ndarray (2,)
the T cell concentration and the Virus concentration at time t
i : int
index for the global variable u.
Returns
--------------
y_dot : ndarray (2,)
the derivative of the T cell concentration and the virus concentration at time t
'''
yprime = np.empty(2)
yprime[0] = s1 - s2*y[1]/(B1 + y[1]) - mu*y[0] - k*y[1]*y[0] + u[i,0]*y[0] # T prime
yprime[1] = (g*(1-u[i,1])*y[1]) / (B2 + y[1]) - c*y[1]*y[0] # V prime
return yprime
def lambda_hat(t,y,i):
'''
Parameters
---------------
t : float
the time
y : ndarray (2,)
the lambda_hat values at time t
i : int
index for global variables, u and state.
Returns
--------------
y_dot : ndarray (2,)
the derivative of the lambda_hats at time t.
'''
j = n-1-i
yprime = np.empty(2)
yprime[0] = -(-1 + y[0]*(mu+k*state[j,1] - u[j,0]) + y[1]*c*state[j,1])
yprime[1] = -(y[0]*(B1*s2/((B1+state[j,1])**2) + k*state[j,0]) -
y[1]*(B2*g*(1-u[j,1])/((B2 + state[j,1])**2) - c*state[j,0]) )
return yprime
delta = 0.001
test = delta + 1
t=np.linspace(0,t_f,n)
state = np.zeros((n,2))
state0 = np.array([T0, V0])
costate = np.zeros((n,2))
costate0 = np.zeros(2)
u=np.zeros((n,2))
u[:,0] += .02
u[:,1] += .9
while(test > delta): # see if we've converged
oldu = u.copy();
#solve the state equations with forward iteration
state = RK4(state_equations,state0,0,t_f,n)
#solve the costate equations with backwards iteration
costate = RK4(lambda_hat,costate0,0,t_f,n)[::-1]
#update our control
temp1 = .5/A1*(costate[:,0]*state[:,0])
temp2 = -.5*costate[:,1]/A2*g*state[:,1]/(B2 + state[:,1])
u1 = np.minimum(np.maximum(a1*np.ones(temp1.shape),temp1),b1*np.ones(temp1.shape))
u2 = np.minimum(np.maximum(a2*np.ones(temp2.shape),temp2),b2*np.ones(temp2.shape))
u[:,0] = 0.5*(u1 + oldu[:,0])
u[:,1] = 0.5*(u2 + oldu[:,1])
test = abs(oldu - u).sum()
plt.subplot(221)
plt.plot(t,state[:,0])
plt.ylabel('T')
plt.subplot(222)
plt.plot(t,state[:,1])
plt.ylabel('V')
plt.subplot(223)
plt.plot(t,u[:,0])
plt.ylabel('u1')
plt.ylim([0,.022])
plt.xlabel('Days')
plt.subplot(224)
plt.plot(t,u[:,1])
plt.ylabel('u2')
plt.ylim([0,.92])
plt.xlabel('Days')
plt.show()
```
#### File: Vol4B/Solitons/new.py
```python
from __future__ import division
import numpy as np
from scipy.fftpack import fft, ifft
from mpl_toolkits.mplot3d.axes3d import Axes3D
import matplotlib.pyplot as plt
from matplotlib import cm
from math import sqrt, pi
def initialize_all(y0, t0, t1, n):
""" An initialization routine for the different ODE solving
methods in the lab. This initializes Y, T, and h. """
if isinstance(y0, np.ndarray):
Y = np.empty((n, y0.size),dtype=complex).squeeze()
else:
Y = np.empty(n,dtype=complex)
Y[0] = y0
T = np.linspace(t0, t1, n)
h = float(t1 - t0) / (n - 1)
return Y, T, h
def RK4(f, y0, t0, t1, n):
""" Use the RK4 method to compute an approximate solution
to the ODE y' = f(t, y) at n equispaced parameter values from t0 to t
with initial conditions y(t0) = y0.
'y0' is assumed to be either a constant or a one-dimensional numpy array.
't0' and 't1' are assumed to be constants.
'f' is assumed to accept two arguments.
The first is a constant giving the current value of t.
The second is a one-dimensional numpy array of the same size as y.
This function returns an array Y of shape (n,) if
y is a constant or an array of size 1.
It returns an array of shape (n, y.size) otherwise.
In either case, Y[i] is the approximate value of y at
the i'th value of np.linspace(t0, t, n).
"""
Y, T, h = initialize_all(y0, t0, t1, n)
for i in xrange(1, n):
K1 = f(T[i-1], Y[i-1])
# print "Y[i-1].shape = ", Y[i-1].shape
tplus = (T[i] + T[i-1]) * .5
K2 = f(tplus, Y[i-1] + .5 * h * K1)
K3 = f(tplus, Y[i-1] + .5 * h * K2)
K4 = f(T[i], Y[i-1] + h * K3)
# print "K1 + 2 * K2 + 2 * K3 + K4.shape = ", (K1 + 2 * K2 + 2 * K3 + K4).shape
Y[i] = Y[i-1] + (h / 6.) * (K1 + 2 * K2 + 2 * K3 + K4)
return T, Y
```
#### File: Vol4B/Solitons/plots.py
```python
import matplotlib
matplotlib.rcParams = matplotlib.rc_params_from_file('../../matplotlibrc')
from __future__ import division
import numpy as np
from scipy.fftpack import fft, ifft
from mpl_toolkits.mplot3d.axes3d import Axes3D
import matplotlib.pyplot as plt
from matplotlib import cm
from math import sqrt, pi
def initialize_all(y0, t0, t1, n):
""" An initialization routine for the different ODE solving
methods in the lab. This initializes Y, T, and h. """
if isinstance(y0, np.ndarray):
Y = np.empty((n, y0.size),dtype=complex).squeeze()
else:
Y = np.empty(n,dtype=complex)
Y[0] = y0
T = np.linspace(t0, t1, n)
h = float(t1 - t0) / (n - 1)
return Y, T, h
def RK4(f, y0, t0, t1, n):
""" Use the RK4 method to compute an approximate solution
to the ODE y' = f(t, y) at n equispaced parameter values from t0 to t
with initial conditions y(t0) = y0.
'y0' is assumed to be either a constant or a one-dimensional numpy array.
't0' and 't1' are assumed to be constants.
'f' is assumed to accept two arguments.
The first is a constant giving the current value of t.
The second is a one-dimensional numpy array of the same size as y.
This function returns an array Y of shape (n,) if
y is a constant or an array of size 1.
It returns an array of shape (n, y.size) otherwise.
In either case, Y[i] is the approximate value of y at
the i'th value of np.linspace(t0, t, n).
"""
Y, T, h = initialize_all(y0, t0, t1, n)
for i in xrange(1, n):
K1 = f(T[i-1], Y[i-1])
# print "Y[i-1].shape = ", Y[i-1].shape
tplus = (T[i] + T[i-1]) * .5
K2 = f(tplus, Y[i-1] + .5 * h * K1)
K3 = f(tplus, Y[i-1] + .5 * h * K2)
K4 = f(T[i], Y[i-1] + h * K3)
# print "K1 + 2 * K2 + 2 * K3 + K4.shape = ", (K1 + 2 * K2 + 2 * K3 + K4).shape
Y[i] = Y[i-1] + (h / 6.) * (K1 + 2 * K2 + 2 * K3 + K4)
return T, Y
def plot_soliton():
N = 256
# grid = np.linspace(0,2.*pi, N)
# s1, a1 = 25.**2., 2.
# y1 = 3*s1*np.cosh(sqrt(s1)/2.*(grid-a1))**(-2.)
# s2, a2 = 16.**2., 1.
# y2 = 3*s2*np.cosh(sqrt(s2)/2.*(grid-a2))**(-2.)
# plt.plot(grid,y1,'-k',linewidth=2.)
# plt.plot(grid,y2,'-b',linewidth=2.)
# plt.show()
def unScaled():
# Set up grid and two-soliton initial data:
x = (2.*np.pi/N)*np.arange(-N/2,N/2).reshape(N,1)
A, B = 25., 16.
A_shift, B_shift = 2., 1.
y0 = (3.*A**2.*np.cosh(.5*(A*(x+2.)))**(-2.) + 3*B**2.*np.cosh(.5*(B*(x+1.)))**(-2.)).reshape(N,)
k = np.concatenate(( np.arange(0,N/2) ,
np.array([0]) ,
np.arange(-N/2+1,0,1) )).reshape(N,)
ik3 = 1j*k**3.
def F_unscaled(t,u):
out = -.5*1j*k*fft(ifft(u,axis=0)**2.,axis=0) + ik3* u
return out
tmax = .006
dt = .01*N**(-2.)
nmax = int(round(tmax/dt))
nplt = int(np.floor((tmax/25.)/dt))
y0 = fft(y0,axis=0)
T,Y = RK4(F_unscaled, y0, t0=0, t1=tmax, n=nmax)
udata, tdata = np.real(ifft(y0,axis=0)).reshape(N,1), np.array(0.).reshape(1,1)
for n in range(1,nmax+1):
if np.mod(n,nplt) == 0:
t = n*dt
u = np.real( ifft(Y[n], axis=0) ).reshape(N,1)
udata = np.concatenate((udata,np.nan_to_num(u)),axis=1)
tdata = np.concatenate((tdata,np.array(t).reshape(1,1)),axis=1)
return x, tdata, udata
def Scaled():
# Set up grid and two-soliton initial data:
x = (2.*np.pi/N)*np.arange(-N/2,N/2).reshape(N,1)
A, B = 25., 16.
A_shift, B_shift = 2., 1.
y0 = (3.*A**2.*np.cosh(.5*(A*(x+2.)))**(-2.) + 3*B**2.*np.cosh(.5*(B*(x+1.)))**(-2.)).reshape(N,)
k = np.concatenate(( np.arange(0,N/2) ,
np.array([0]) ,
np.arange(-N/2+1,0,1) )).reshape(N,)
ik3 = 1j*k**3.
def F_scaled(t,U):
E = np.exp(-ik3*t)
E_recip = E**(-1.)
out = -.5*1j*E*k*fft(ifft(E_recip*U,axis=0)**2.,axis=0)
return out
tmax = .006
dt = .2*N**(-2.)
nmax = int(round(tmax/dt))
nplt = int(np.floor((tmax/25.)/dt))
y0 = fft(y0,axis=0)
T,Y = RK4(F_scaled, y0, t0=0, t1=tmax, n=nmax)
udata, tdata = np.real(ifft(y0,axis=0)).reshape(N,1), np.array(0.).reshape(1,1)
for n in range(1,nmax+1):
if np.mod(n,nplt) == 0:
t = n*dt
u = np.real(ifft(np.exp(ik3*t)*(Y[n]),axis=0)).reshape(N,1)
udata = np.concatenate((udata,np.nan_to_num(u)),axis=1)
tdata = np.concatenate((tdata,np.array(t).reshape(1,1)),axis=1)
return x, tdata, udata
# x, tdata, udata = Scaled()
x, tdata, udata = unScaled()
# import sys; sys.exit()
fig = plt.figure()# figsize=plt.figaspect(0.5))
#---- First subplot
# ax = fig.add_subplot(121, projection='3d')
ax = fig.gca(projection='3d')
ax.view_init(elev=45., azim=150)
tv, xv = np.meshgrid(tdata,x,indexing='ij')
surf = ax.plot_surface(tv, xv, udata.T, rstride=1, cstride=1, cmap=cm.coolwarm,
linewidth=0, antialiased=False)
tdata = tdata[0]
ax.set_xlim(tdata[0], tdata[-1])
ax.set_ylim(-pi, pi)
ax.invert_yaxis()
ax.set_zlim(0., 4000.)
ax.set_xlabel('T'); ax.set_ylabel('X'); ax.set_zlabel('Z')
# plt.savefig('interacting_solitons.png',dpi=100)
plt.show()
return
def tref_solitons():
# p27.m - Solve KdV eq. u_t + uu_x + u_xxx = 0 on [-pi,pi] by
# FFT with integrating factor v = exp(-ik^3t)*u-hat.
# Set up grid and two-soliton initial data:
N = 256
dt = .4*N**(-2.)
x = (2.*np.pi/N)*np.arange(-N/2,N/2).reshape(N,1)
A, B = 25., 16.
u = 3.*A**2.*np.cosh(.5*(A*(x+2.)))**(-2.) + 3*B**2.*np.cosh(.5*(B*(x+1.)))**(-2.);
u = u.reshape(N,1)
v = fft(u,axis=0)
# k = [0:N/2-1 0 -N/2+1:-1]'
k = np.concatenate(( np.arange(0,N/2) ,
np.array([0]) ,
np.arange(-N/2+1,0,1) )).reshape(N,1)
ik3 = 1j*k**3.
print "u = ", u
print "v = ", v
# print "k = ", k # This is correct
# Solve PDE and plot results:
tmax = 0.006;
nplt = int(np.floor((tmax/45.)/dt))
nmax = int(round(tmax/dt))
print "nplt = ", nplt
print "nmax = ", nmax
udata, tdata = u, np.array(0.).reshape(1,1)
for n in range(1,nmax+1): #= 1:nmax
t = n*dt
g = -.5j*dt*k
E = np.exp(dt*ik3/2.)
E2 = E**2.
a = g*fft(np.real( ifft( v ,axis=0) )**2.,axis=0)
b = g*fft(np.real( ifft(E*(v+a/2.),axis=0) )**2.,axis=0) # 4th-order
c = g*fft(np.real( ifft(E*v + b/2.,axis=0) )**2.,axis=0) # Runge-Kutta
d = g*fft(np.real( ifft(E2*v+E*c,axis=0) )**2.,axis=0)
v = E2*v + (E2*a + 2*E*(b+c) + d)/6.
if mod(n,nplt) == 0:
u = np.real(ifft(v,axis=0))
# print n
# print u
udata = np.concatenate((udata,np.nan_to_num(u)),axis=1)
tdata = np.concatenate((tdata,np.array(t).reshape(1,1)),axis=1)
fig = plt.figure()# figsize=plt.figaspect(0.5))
#---- First subplot
# ax = fig.add_subplot(121, projection='3d')
ax = fig.gca(projection='3d')
# ax.view_init(elev=40., azim=70)
ax.view_init(elev=20., azim=30)
tv, xv = np.meshgrid(tdata.reshape((tdata.shape[1],)),x.reshape((N,)),indexing='ij')
print tv.shape
print xv.shape
print udata.shape
surf = ax.plot_surface(tv, xv, udata.T, rstride=5, cstride=5, cmap=cm.coolwarm,
linewidth=0, antialiased=False)
tdata = tdata[0]
print tdata
ax.set_xlim(tdata[0], tdata[-1])
ax.set_ylim(x[0], x[-1])
ax.set_zlim(0., 10000.)
ax.set_xlabel('T')
ax.set_ylabel('X')
ax.set_zlabel('Z')
plt.show()
return
#
# def plot_burgers():
# N = 256
# # grid = np.linspace(0,2.*pi, N)
# # s1, a1 = 25.**2., 2.
# # y1 = 3*s1*np.cosh(sqrt(s1)/2.*(grid-a1))**(-2.)
# # s2, a2 = 16.**2., 1.
# # y2 = 3*s2*np.cosh(sqrt(s2)/2.*(grid-a2))**(-2.)
# # plt.plot(grid,y1,'-k',linewidth=2.)
# # plt.plot(grid,y2,'-b',linewidth=2.)
# # plt.show()
#
# def unScaled():
# # Set up grid and two-soliton initial data:
# x = (2.*np.pi/N)*np.arange(-N/2,N/2).reshape(N,1)
# A, B = 25., 16.
# A_shift, B_shift = 2., 1.
# y0 = (3.*A**2.*np.cosh(.5*(A*(x+2.)))**(-2.) + 3*B**2.*np.cosh(.5*(B*(x+1.)))**(-2.)).reshape(N,)
# k = np.concatenate(( np.arange(0,N/2) ,
# np.array([0]) ,
# np.arange(-N/2+1,0,1) )).reshape(N,)
# ik3 = 1j*k**3.
#
# def F_unscaled(t,u):
# out = -.5*1j*k*fft(ifft(u,axis=0)**2.,axis=0) + ik3* u
# return out
#
#
# tmax = .006
# dt = .01*N**(-2.)
# nmax = int(round(tmax/dt))
# nplt = int(np.floor((tmax/25.)/dt))
# y0 = fft(y0,axis=0)
# T,Y = RK4(F_unscaled, y0, t0=0, t1=tmax, n=nmax)
#
# udata, tdata = np.real(ifft(y0,axis=0)).reshape(N,1), np.array(0.).reshape(1,1)
# for n in range(1,nmax+1):
# if np.mod(n,nplt) == 0:
# t = n*dt
# u = np.real( ifft(Y[n], axis=0) ).reshape(N,1)
# udata = np.concatenate((udata,np.nan_to_num(u)),axis=1)
# tdata = np.concatenate((tdata,np.array(t).reshape(1,1)),axis=1)
#
# return x, tdata, udata
#
#
#
#
# def Scaled():
# # Set up grid and two-soliton initial data:
# x = (2.*np.pi/N)*np.arange(-N/2,N/2).reshape(N,1)
# A, B = 25., 16.
# A_shift, B_shift = 2., 1.
# y0 = (3.*A**2.*np.cosh(.5*(A*(x+2.)))**(-2.) + 3*B**2.*np.cosh(.5*(B*(x+1.)))**(-2.)).reshape(N,)
# k = np.concatenate(( np.arange(0,N/2) ,
# np.array([0]) ,
# np.arange(-N/2+1,0,1) )).reshape(N,)
# ik3 = 1j*k**3.
#
# def F_scaled(t,U):
# E = np.exp(-ik3*t)
# E_recip = E**(-1.)
# out = -.5*1j*E*k*fft(ifft(E_recip*U,axis=0)**2.,axis=0)
# return out
#
#
# tmax = .006
# dt = .2*N**(-2.)
# nmax = int(round(tmax/dt))
# nplt = int(np.floor((tmax/25.)/dt))
# y0 = fft(y0,axis=0)
# T,Y = RK4(F_scaled, y0, t0=0, t1=tmax, n=nmax)
#
# udata, tdata = np.real(ifft(y0,axis=0)).reshape(N,1), np.array(0.).reshape(1,1)
# for n in range(1,nmax+1):
# if np.mod(n,nplt) == 0:
# t = n*dt
# u = np.real(ifft(np.exp(ik3*t)*(Y[n]),axis=0)).reshape(N,1)
# udata = np.concatenate((udata,np.nan_to_num(u)),axis=1)
# tdata = np.concatenate((tdata,np.array(t).reshape(1,1)),axis=1)
#
# return x, tdata, udata
#
#
#
# # x, tdata, udata = Scaled()
# x, tdata, udata = unScaled()
# # import sys; sys.exit()
# fig = plt.figure()# figsize=plt.figaspect(0.5))
# #---- First subplot
# # ax = fig.add_subplot(121, projection='3d')
# ax = fig.gca(projection='3d')
# ax.view_init(elev=45., azim=150)
# tv, xv = np.meshgrid(tdata,x,indexing='ij')
# surf = ax.plot_surface(tv, xv, udata.T, rstride=1, cstride=1, cmap=cm.coolwarm,
# linewidth=0, antialiased=False)
#
# tdata = tdata[0]
# ax.set_xlim(tdata[0], tdata[-1])
# ax.set_ylim(-pi, pi)
# ax.invert_yaxis()
# ax.set_zlim(0., 4000.)
# ax.set_xlabel('T'); ax.set_ylabel('X'); ax.set_zlabel('Z')
# # plt.savefig('interacting_solitons.png',dpi=100)
# plt.show()
# return
if __name__ == "__main__":
plot_soliton()
```
#### File: Vol4B/TotalVariation/solution.py
```python
from __future__ import division
import numpy as np
def cheb(N):
x = np.cos((np.pi/N)*np.linspace(0,N,N+1))
x.shape = (N+1,1)
lin = np.linspace(0,N,N+1)
lin.shape = (N+1,1)
c = np.ones((N+1,1))
c[0], c[-1] = 2., 2.
c = c*(-1.)**lin
X = x*np.ones(N+1) # broadcast along 2nd dimension (columns)
dX = X - X.T
# print " x = \n", x, "\n"
# print " c = \n", c, "\n"
# print " X = \n", X, "\n"
# print " dX = \n", dX, "\n"
D = (c*(1./c).T)/(dX + np.eye(N+1))
D = D - np.diag(np.sum(D.T,axis=0))
x.shape = (N+1,)
return D, x
def cheb_loop(N):
def p(j1):
if (j1==0 or j1 == N): return 2.
else: return 1.
x = np.cos(np.pi*np.arange(N+1)/N)
D = np.zeros((N+1,N+1))
# j represents column index
for j in range(0,N+1):
for i in range(0,j)+range(j+1,N+1):
D[i,j] = ((-1.)**(i+j))*p(i)/( p(j)*(x[i]- x[j]) )
# Values on the main diagonal
for j in xrange(1,N):
D[j,j] = -x[j]/(2.*(1-x[j]**2.))
D[0,0] = (1.+2.*N**2.)/6.
D[N,N] = -(1.+2.*N**2.)/6.
return D,x
def cheb_vectorized(N):
x = np.cos((np.pi/N)*np.linspace(0,N,N+1))
x.shape = (N+1,1)
lin = np.linspace(0,N,N+1)
lin.shape = (N+1,1)
c = np.ones((N+1,1))
c[0], c[-1] = 2., 2.
c = c*(-1.)**lin
X = x*np.ones(N+1) # broadcast along 2nd dimension (columns)
dX = X - X.T
D = (c*(1./c).T)/(dX + np.eye(N+1))
D = D - np.diag(np.sum(D.T,axis=0))
x.shape = (N+1,)
# Here we return the differentation matrix and the Chebychev points,
# numbered from x_0 = 1 to x_N = -1
return D, x
``` |
{
"source": "JoshuaMaina/golclinics-dsa",
"score": 4
} |
#### File: golclinics-dsa/gol-assignments/stacks.py
```python
def reverse_string(string):
stack = []
for chr in string:
stack.append(chr)
new_string = ""
while len(stack):
new_string += stack.pop()
return new_string
def brackets_match(string_of_brackets):
close_to_open = {
'}': '{',
')': '(',
'[': ']'
}
stack = []
for bracket in string_of_brackets:
if bracket in close_to_open:
if len(stack):
opener = stack.pop()
if opener != close_to_open[bracket]:
return False
else:
return False
else:
stack.append(bracket)
return not len(stack)
``` |
{
"source": "joshuamangiola/python-itunes-playlist-converter",
"score": 3
} |
#### File: joshuamangiola/python-itunes-playlist-converter/scraper.py
```python
from urllib.request import urlopen
from bs4 import BeautifulSoup
import csv
playlist_url = "PLAYLIST_URL_GOES_HERE"
page = urlopen(playlist_url)
soup = BeautifulSoup(page, "html.parser")
playlist_title = soup.find("h1", class_="product-header__title").get_text()
track_list = soup.find("ul", class_="tracklist")
soup.find_all('tracklist-item__text__headline')
class playlistItem:
def __init__(self, title, artist):
self.title = title
self.artist = artist
playlist = []
for pi in track_list.select('li.tracklist-item'):
title = pi.find("span", class_="tracklist-item__text__headline").get_text()[2:-1]
artist = pi.find("a", class_="table__row__link--inline").get_text()
playlist.append(playlistItem(title, artist))
with open('output.csv', 'w') as csvfile:
fieldnames = ['Title', 'Artist']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for pi in playlist:
writer.writerow({'Title': pi.title, 'Artist': pi.artist})
print(playlist_title)
for pi in playlist:
print('\nTitle: ' + pi.title + '\nArtist: ' + pi.artist)
``` |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.