metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "JeffreyAsuncion/PyGame_TWT",
"score": 3
}
|
#### File: PyGame_TWT/TWT/Lesson04.py
```python
import pygame
pygame.init()
win = pygame.display.set_mode((500,480))
pygame.display.set_caption("Change the name")
walkRight = [pygame.image.load('R1.png'), pygame.image.load('R2.png'), pygame.image.load('R3.png'),
pygame.image.load('R4.png'), pygame.image.load('R5.png'), pygame.image.load('R6.png'),
pygame.image.load('R7.png'), pygame.image.load('R8.png'), pygame.image.load('R9.png')]
walkLeft = [pygame.image.load('L1.png'), pygame.image.load('L2.png'), pygame.image.load('L3.png'),
pygame.image.load('L4.png'), pygame.image.load('L5.png'), pygame.image.load('L6.png'),
pygame.image.load('L7.png'), pygame.image.load('L8.png'), pygame.image.load('L9.png')]
bg = pygame.image.load('bg.jpg')
char = pygame.image.load('standing.png')
clock = pygame.time.Clock()
# Create a class with all attritubutes for our player
class player(object):
def __init__(self, x, y, width, height):
self.x = x
self.y = y
self.width = width
self.height = height
self.vel = 5
self.isJump = False
self.jumpCount = 10
self.left = False
self.right = False
self.walkCount = 0
def draw(self, win):
if self.walkCount + 1 >= 27:
self.walkCount = 0
if self.left:
win.blit(walkLeft[self.walkCount//3], (self.x, self.y))
self.walkCount += 1
elif self.right:
win.blit(walkRight[self.walkCount//3], (self.x, self.y))
self.walkCount += 1
else:
win.blit(char, (self.x, self.y))
walkCount = 0
# Global Variables to the class player
# x = 50
# y = 400
# width = 40
# height = 60
# vel = 5
# isJump = False
# jumpCount = 10
# left = False
# right = False
# walkCount = 0
def redrawGameWindow():
# global walkCount ### this is in player class now
win.blit(bg, (0,0))
# step 2 clean up
# move this as a method of class player
man.draw(win)
# if walkCount + 1 >= 27:
# walkCount = 0
# if left:
# win.blit(walkLeft[walkCount//3], (x,y))
# walkCount += 1
# elif right:
# win.blit(walkRight[walkCount//3], (x,y))
# walkCount += 1
# else:
# win.blit(char, (x, y))
# walkCount = 0
pygame.display.update()
# alot of mods add man in front of all player attributes to link to player class
# step 1 clean up
# mainloop
# spawn our player
man = player(300, 410, 64, 64) # size of sprint is 64, 64
run = True
while run:
clock.tick(27)
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
keys = pygame.key.get_pressed()
if keys[pygame.K_LEFT] and man.x > man.vel:
man.x -= man.vel
man.left = True
man.right = False
elif keys[pygame.K_RIGHT] and man.x < 500 - man.vel - man.width:
man.x += man.vel
man.left = False
man.right = True
else:
man.left = False
man.right = False
man.walkCount = 0
if not(man.isJump):
if keys[pygame.K_SPACE]:
man.isJump = True
man.left = False
man.right = False
man.walkCount = 0
else:
if man.jumpCount >= -10:
man.y -= (man.jumpCount * abs(man.jumpCount)) * 0.5
man.jumpCount -= 1
else:
man.jumpCount = 10
man.isJump = False
redrawGameWindow()
pygame.quit()
```
|
{
"source": "JeffreyAsuncion/PythonEssentialTraining",
"score": 3
}
|
#### File: PythonEssentialTraining/Chapter07/function-working copy.py
```python
def main():
x = kitten()
print(type(x), x)
def kitten():
print("Meow.")
return 42
if __name__ == '__main__':
main()
```
#### File: PythonEssentialTraining/Chapter08/mixed_working.py
```python
dlevel = 0 # manage nesting level
def main():
r = range(11)
l = [ 1, 'two', 3, {'4' : 'four'}, 5 ]
t = ( 'one', 'two', None, 'four', 'five' )
s = set("It's a bird! It's a plane! It's Superman!")
d = dict( one = r, two = l, three = s)
mixed = [ l, r, s, d, t]
disp(mixed)
def disp(o):
global dlevel
dlevel += 1
if isinstance(o, list): print_list(o)
elif isinstance(o, range): print_list(o)
elif isinstance(o, tuple): print_tuple(o)
elif isinstance(o, set): print_set(o)
elif isinstance(o, dict): print_dict(o)
elif o is None: print('Nada', end=' ', flush=True)
else: print(repr(o), end=' ', flush=True)
dlevel -= 1
if dlevel <= 1: print() # newline after outer
def print_list(o):
print('[', end=' ')
for x in o: disp(x)
print(']', end=' ', flush=True)
def print_tuple(o):
print('(', end=' ')
for x in o: disp(x)
print(')', end=' ', flush=True)
def print_set(o):
print('{', end=' ')
for x in sorted(o): disp(x)
print('}', end=' ', flush=True)
def print_dict(o):
print('{', end=' ')
for k, v in o.items():
print(k, end=': ' )
disp(v)
print('}', end=' ', flush=True)
if __name__ == '__main__': main()
```
#### File: PythonEssentialTraining/Chapter08/tuple-working.py
```python
def main():
game = ('Rock', 'Paper', 'Scissors', 'Lizard', 'Spock')
# game.append('Computer') # tuples are immutable
print_list(game)
def print_list(o):
for i in o:
print(i, end=' ', flush=True)
print()
if __name__ == '__main__':
main()
```
#### File: PythonEssentialTraining/Chapter09/class-working.py
```python
class Duck:
sound = 'Quack quack.'
movement = 'Walks like a duck.'
def quack(self):
print(self.sound)
def move(self):
print(self.movement)
def main():
donald = Duck()
donald.quack()
donald.move()
if __name__ == '__main__':
main()
```
#### File: PythonEssentialTraining/Chapter09/variables-working.py
```python
class Animal:
# class variable
x = [1,2,3]
def __init__(self, **kwargs):
self._type = kwargs['type'] if 'type' in kwargs else 'kitten'
self._name = kwargs['name'] if 'name' in kwargs else 'fluffy'
self._sound = kwargs['sound'] if 'sound' in kwargs else 'meow'
def type(self, t=None):
if t:
self._type = t
return self._type
def name(self, n=None):
if n:
self._name = n
return self._name
def sound(self, s=None):
if s:
self._sound = s
return self._sound
def __str__(self):
return f'The {self.type()} is named "{self.name()}" and says "{self.sound()}"!!!'
def main():
a0 = Animal(type='kitten', name='fluffy', sound='rwar')
a1 = Animal(type='duck', name='Donald', sound='quack')
print(a0)
print(a0.x)
a0.x[0] = 3 # this changes in a1 because x is a class variable
print(a1)
print(a1.x)
if __name__ == '__main__':
main()
```
#### File: PythonEssentialTraining/Chapter10/exceptions-working.py
```python
class inclusive_range:
def __init__(self, *args):
numargs = len(args)
self._start = 0
self._step = 1
if numargs < 1:
raise TypeError(f'expected at least 1 argument, got {numargs}')
elif numargs == 1:
self._stop = args[0]
elif numargs == 2:
(self._start, self._stop) = args
elif numargs == 3:
(self._start, self._stop, self._step) = args
else:
raise TypeError(f'expected at most 3 arguments, got {numargs}')
self._next = self._start
def __iter__(self):
return self
# like a next in a linked list
def __next__(self):
if self._next > self._stop:
raise StopIteration
else:
_r = self._next
self._next += self._step
return _r
def main():
try:
for n in inclusive_range(25,4,4,4,4,4):
print(n, end=' ')
print()
except TypeError as e:
print(f'range error: {e}')
if __name__ == '__main__':
main()
```
#### File: PythonEssentialTraining/Chapter12/copy-bin-working.py
```python
def main():
infile = open('berlin.jpg', 'rb') # read bit
outfile = open('berlin-copy.jpg', 'wb') # write bit
while True:
buf = infile.read(10240)
if buf:
outfile.write(buf)
print('.', end='', flush=True)
else:
break
outfile.close()
print('\ndone.')
if __name__ == '__main__':
main()
```
#### File: PythonEssentialTraining/Chapter14/modules-working.py
```python
import sys
import os
import random
import datetime
def main():
v = sys.version_info
print('Python version {}.{}.{}'.format(*v)) # note *v - returns a collection
v = sys.platform
print(v)
v = os.name
print(v)
v = os.getenv('PATH')
print(v)
v = os.getcwd() # Current Working Directory
print(v)
v = os.urandom(25) # btye object
print(v)
x = random.randint(1, 1000)
print(x)
x = list(range(25))
print(x)
random.shuffle(x)
print(x)
now = datetime.datetime.now()
print(now, now.year, now.month, now.day, now.hour, now.minute, now.second, now.microsecond)
if __name__ == '__main__': main()
```
|
{
"source": "JeffreyAsuncion/WebScraping_BeautifulSoup",
"score": 3
}
|
#### File: JeffreyAsuncion/WebScraping_BeautifulSoup/working003.py
```python
import requests
from bs4 import BeautifulSoup
def extract(page):
#google 'my user agent'
my_user_agent = 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.85 Safari/537.36'
headers = {'User-Agent' : my_user_agent}
url = f'https://www.indeed.com/jobs?q=python+developer&l=New+York%2C+NY&start={page}'
r = requests.get(url, headers)
soup = BeautifulSoup(r.content, 'html.parser')
return soup
def transform(soup):
divs = soup.find_all('div', class_ = 'jobsearch-SerpJobCard')
for item in divs:
title = item.find('a').text.strip()
company = item.find('span', class_ = 'company').text.strip()
# not every post has salary
try:
salary = item.find('span', class_ = 'salaryText').text.strip()
except:
salary = 'Not Available'
summary = item.find('div', {'class' : 'summary'}).text.strip().replace('\n', '')
job = {
'title' : title,
'company' : company,
'salary' : salary,
'summary' : summary
}
joblist.append(job)
return
# 16:30
joblist = []
c = extract(0)
transform(c)
print(len(joblist))
print(joblist)
```
|
{
"source": "jeffreybarrick/barricklab-tacobot",
"score": 2
}
|
#### File: jeffreybarrick/barricklab-tacobot/tacobot.py
```python
import os
import sys
import time
import csv
import pprint
import re
import pickle
import time
from slackclient import SlackClient
# tacobot's ID as an environment variable
BOT_ID = os.environ.get("BOT_ID")
# constants
AT_BOT = "<@" + BOT_ID + ">"
EXAMPLE_COMMAND = "do"
# instantiate Slack client
slack_client = SlackClient(os.environ.get('SLACK_BOT_TOKEN'))
TACO_FILE_PATH = os.path.dirname(os.path.abspath(sys.argv[0])) + "/" + "taco.csv"
TACO_USER_PICKLE_PATH = os.path.dirname(os.path.abspath(sys.argv[0])) + "/" + "taco_users.pkl"
TACO_HEADER_LIST = ['user_id', 'current_tacos', 'all_time_tacos']
TACO_AUTHORIZED_USER_NAMES = ['simonvdalton', 'jbarrick']
NUM_TACOS_TO_REDEEM = 3
USER_INFO = {}
USER_INFO_LAST_SLACK_LOAD_TIME = 0
USER_INFO_SLACK_RELOAD_TIME = 600 #10 minutes
BOT_USER_NAME = "tacobot"
#Looks up info about a specific user.
#Reloads info from Slack if a certain amount of time has passes
#Otherwise uses a picked version
def load_user_info():
global USER_INFO
global USER_INFO_LAST_SLACK_LOAD_TIME
reload = 0
if not os.path.isfile(TACO_FILE_PATH) or (time.time() - USER_INFO_LAST_SLACK_LOAD_TIME > USER_INFO_SLACK_RELOAD_TIME):
USER_INFO_LAST_SLACK_LOAD_TIME = time.time()
reload = 1
#undefined, missing
if reload==1:
print "***Reloading user list from Slack\n"
r = slack_client.api_call(
"users.list",
)
pickle.dump( r['members'], open( TACO_USER_PICKLE_PATH, "wb" ) )
else:
print "***Using cached user list\n"
print "*Loading user list from file\n"
USER_INFO = pickle.load( open( TACO_USER_PICKLE_PATH, "rb" ) )
#pprint.pprint(USER_INFO)
def read_tacos():
#does it exist?
if not os.path.isfile(TACO_FILE_PATH):
return {}
tacos = {}
with open(TACO_FILE_PATH, 'r') as csvfile:
csv_reader = csv.DictReader(csvfile)
for row in csv_reader:
print(row)
tacos[row['user_id']] = row
return tacos
def write_tacos(tacos):
#does it exist?
with open(TACO_FILE_PATH, 'w') as csvfile:
csv_writer = csv.DictWriter(csvfile, ['user_id', 'current_tacos', 'all_time_tacos'])
csv_writer.writeheader()
for key in tacos:
taco_line = tacos[key]
taco_line['user_id'] = key
csv_writer.writerow(taco_line)
def get_name_to_user_id_lookup_table():
global USER_INFO
load_user_info()
name_to_user_id_lookup_table = {}
for member in USER_INFO:
name_to_user_id_lookup_table[member['name']] = member['id']
return name_to_user_id_lookup_table
def get_user_id_to_name_lookup_table():
global USER_INFO
load_user_info()
user_id_to_name_lookup_table = {}
for member in USER_INFO:
user_id_to_name_lookup_table[member['id']] = member['name']
#pprint.pprint(user_id_to_name_lookup_table)
return user_id_to_name_lookup_table
def handle_command(command, channel, user):
global BOT_USER_NAME
"""
Receives commands directed at the bot and determines if they
are valid commands. If so, then acts on the commands. If not,
returns back what it needs for clarification.
"""
user_id_to_name_lookup_table = get_user_id_to_name_lookup_table()
print command
user_id = ""
pattern = re.compile(r'<@(\S+)>')
user_id_match = re.search(pattern, command)
if user_id_match:
user_id = user_id_match.group(1).upper()
print user_id
if (command.count("standings")):
tacos = read_tacos()
message = "user current [all-time]\n"
for user_id in tacos:
entry = tacos[user_id]
#pprint.pprint(entry)
message += user_id_to_name_lookup_table[entry['user_id']] + " " + entry['current_tacos'] + " [" + entry['all_time_tacos'] + "]\n"
slack_client.api_call(
"chat.postMessage",
icon_emoji=":trophy:",
username="Taco Point Standings",
channel=channel,
text= message,
as_user=False
)
return
user_id_to_name_lookup_table = get_user_id_to_name_lookup_table()
calling_user_name = user_id_to_name_lookup_table[user]
if not calling_user_name in TACO_AUTHORIZED_USER_NAMES:
if calling_user_name=="spleonard1":
slack_client.api_call(
"chat.postMessage",
channel=channel,
text= "Not you again! AAAAAAHHHHH!",
as_user=True
)
else:
slack_client.api_call(
"chat.postMessage",
channel=channel,
text= "Nice try! You are not authorized to give/redeem taco points.",
as_user=True
)
return
# Did we find a valid username?
# The first one in the line is the one we will give tacos to
if user_id:
num_tacos = command.count(":taco:")
if num_tacos > 0:
tacos = read_tacos()
if (not user_id in tacos):
tacos[user_id] = {'user_id' : user_id, 'current_tacos' : 0, 'all_time_tacos' : 0}
num_redeem = command.count("redeem")
if num_redeem > 0:
if int(tacos[user_id]['current_tacos']) < 3:
slack_client.api_call(
"chat.postMessage",
channel=channel,
text= ("You need " + str(NUM_TACOS_TO_REDEEM) + " taco points to redeem!\n" +
"(" + tacos[user_id]['current_tacos'] + " in the bank and " + tacos[user_id]['all_time_tacos'] + " all-time.)"),
as_user=True
)
return
else:
tacos[user_id]['current_tacos'] = str(int(tacos[user_id]['current_tacos']) - NUM_TACOS_TO_REDEEM)
slack_client.api_call(
"chat.postMessage",
channel=channel,
text= (":taco: CONG :taco: RATU :taco: <@" + user_id + "> :taco: LATI :taco: IONS! :taco:\n" +
"(" + tacos[user_id]['current_tacos'] + " in the bank and " + tacos[user_id]['all_time_tacos'] + " all-time.)"),
as_user=True
)
write_tacos(tacos)
else:
if (not user_id in tacos):
tacos[user_id] = {'user_id' : user_id, 'current_tacos' : 0, 'all_time_tacos' : 0}
tacos[user_id]['current_tacos'] = str(int(tacos[user_id]['current_tacos']) + num_tacos)
tacos[user_id]['all_time_tacos'] = str(int(tacos[user_id]['all_time_tacos']) + num_tacos)
write_tacos(tacos)
slack_client.api_call(
"chat.postMessage",
channel=channel,
text= ("<@" + user_id + "> earned " + str(num_tacos) + " taco point" + ( "s" if (num_tacos>1) else "") + "!\n" +
"(" + tacos[user_id]['current_tacos'] + " in the bank and " + tacos[user_id]['all_time_tacos'] + " all-time.)"),
as_user=True
)
#for (user_id) in re.findall(pattern, command):
# user_id = user_id.upper()
#slack_client.api_call(
# "chat.postMessage",
# channel=channel,
# text= user_id + " = " + user_id_to_name_lookup_table[user_id],
# as_user=True
#)
slack_client.api_call(
"chat.postMessage",
channel=channel,
text="Hello from :taco: land!",
as_user=True
)
def parse_slack_output(slack_rtm_output):
"""
The Slack Real Time Messaging API is an events firehose.
this parsing function returns None unless a message is
directed at the Bot, based on its ID.
"""
output_list = slack_rtm_output
if output_list and len(output_list) > 0:
for output in output_list:
#pprint.pprint(output)
if output and 'text' in output and AT_BOT in output['text']:
# return text after the @ mention, whitespace removed
return output['text'].split(AT_BOT)[1].strip().lower(), \
output['channel'], output['user']
return None, None, None
if __name__ == "__main__":
READ_WEBSOCKET_DELAY = 1 # 1 second delay between reading from firehose
if slack_client.rtm_connect():
print("tacobot connected and running!")
while True:
command, channel, user = parse_slack_output(slack_client.rtm_read())
if command and channel:
handle_command(command, channel, user)
time.sleep(READ_WEBSOCKET_DELAY)
else:
print("Connection failed. Invalid Slack token or bot ID?")
```
|
{
"source": "jeffreyblair/ML",
"score": 3
}
|
#### File: ML/cross_val/cross_validation.py
```python
import numpy as np
def cross_validation(data, num_folds, lambda_seq):
"""
Cross validation function
:param data: data
:param num_folds: number of partitions
:param lambda_seq: sequence of values
:return: vector of errors
"""
data_shf = shuffle_data(data)
er = []
for i in range(len(lambda_seq)):
lambd = lambda_seq[i]
cv_loss_lmd = 0
for fold in range(1, num_folds + 1):
val_cv, train_cv = split_data(data_shf, num_folds, fold-1)
model = train_model(train_cv, lambd)
cv_loss_lmd += loss(val_cv, model)
er.append(cv_loss_lmd/num_folds)
return er
def split_data(data, num_folds, fold):
"""
Partitions and returns selected partition
:param data: data
:param num_folds: num folds to make
:param fold: selected fold
:return: data_fold, data_rest
"""
X = data['X']
t = data['t']
xJoinT = np.c_[X, t]
rows = np.size(xJoinT, 0)
ends = rows%num_folds
removed = []
if ends != 0:
for i in range(ends):
removed.append(xJoinT[i])
xJoinT = np.delete(xJoinT, i, axis=0)
folds = np.split(xJoinT, num_folds)
for r in removed:
folds[i] = np.vstack((folds[i], r))
data_fold = folds.pop(fold)
data_rest = folds[0]
for i in range(1, len(folds)):
data_rest = np.vstack((data_rest, folds[i]))
last_fold = np.shape(data_fold)[1] - 1
last_rest = np.shape(data_rest)[1] - 1
t_fold = data_fold[:,last_fold].flatten()
t_rest = data_rest[:,last_rest].flatten()
X_fold = np.delete(data_fold, last_fold, 1)
X_rest = np.delete(data_rest, last_rest, 1)
df = {'X': X_fold, 't': t_fold}
dr = {'X': X_rest, 't': t_rest}
return df, dr
def predict(data, model):
"""
Fitted values for data
:param data: X vals
:param model: fitted regression param
:return: fitted y vals
"""
return np.dot(data['X'], model)
def loss(data, model):
"""
Computes Squared Error Loss of data on weights in model
:param data: Data
:param model: vector of weights
:return: squared error loss
"""
w = model
X = data['X']
t = data['t']
n = np.size(t)
return (np.linalg.norm(t - predict(data, model)) ** 2)/n
def train_model(data, lambd):
"""
Fits a model to the data based on lambda
:param data: data to be fitted
:param lambd: value
:return: weight vector
"""
X = data['X']
t = data['t']
XTX = np.linalg.inv((np.dot(np.transpose(X), X) + lambd*np.identity(np.shape(X)[1])))
Xt = np.dot(np.transpose(X), t)
return np.dot(XTX, Xt)
def shuffle_data(data):
"""
Shuffle data keeping pairs in place
:param data: dict{'X': data 't': targets}
:return: shuffled data
"""
X = data['X']
t = data['t']
xJoinT = np.c_[X, t]
shuffled = np.random.permutation(xJoinT)
last_col = np.shape(shuffled)[1] - 1
t = shuffled[:,last_col]
X = np.delete(shuffled, last_col, 1)
return {'X': X, 't': t}
```
#### File: ML/logistic_regression/logistic.py
```python
import numpy as np
from utils import sigmoid
def logistic_predict(weights, data):
"""
Compute the probabilities predicted by the logistic classifier.
Note: N is the number of examples and
M is the number of features per example.
Inputs:
weights: (M+1) x 1 vector of weights, where the last element
corresponds to the bias (intercepts).
data: N x M data matrix where each row corresponds
to one data point.
Outputs:
y: :N x 1 vector of probabilities. This is the output of the classifier.
"""
ones = np.array([[1] for i in range(data.shape[0])])
n_data = np.c_[data, ones]
y = n_data @ weights
return sigmoid(y)
def evaluate(targets, y):
"""
Compute evaluation metrics.
Inputs:
targets : N x 1 vector of targets.
y : N x 1 vector of probabilities.
Outputs:
ce : (scalar) Cross entropy. CE(p, q) = E_p[-log q]. Here we want to compute CE(targets, y)
frac_correct : (scalar) Fraction of inputs classified correctly.
"""
ce = -np.sum(targets * np.log(y) + (1-targets) * np.log(1-y))/len(targets)
predictions = [1 if y_i > 0.5 else 0 for y_i in y]
correct = [1 if predictions[i] == targets[i] else 0 for i in range(len(predictions))]
frac_correct = sum(correct)/len(correct)
return ce, frac_correct
def logistic(weights, data, targets, hyperparameters):
"""
Calculate negative log likelihood and its derivatives with respect to weights.
Also return the predictions.
Note: N is the number of examples and
M is the number of features per example.
Inputs:
weights: (M+1) x 1 vector of weights, where the last element
corresponds to bias (intercepts).
data: N x M data matrix where each row corresponds
to one data point.
targets: N x 1 vector of targets class probabilities.
hyperparameters: The hyperparameters dictionary.
Outputs:
f: The sum of the loss over all data points. This is the objective that we want to minimize.
df: (M+1) x 1 vector of derivative of f w.r.t. weights.
y: N x 1 vector of probabilities.
"""
y = logistic_predict(weights, data)
f, frac = evaluate(targets, y)
d = data.T @ (y - targets)
db = np.array([sum((y - targets))])
zero = np.array([[0]])
df = np.r_[d, db]
return f, df, y
def logistic_pen(weights, data, targets, hyperparameters):
"""
Calculate negative log likelihood and its derivatives with respect to weights.
Also return the predictions.
Note: N is the number of examples and
M is the number of features per example.
Inputs:
weights: (M+1) x 1 vector of weights, where the last element
corresponds to bias (intercepts).
data: N x M data matrix where each row corresponds
to one data point.
targets: N x 1 vector of targets class probabilities.
hyperparameters: The hyperparameters dictionary.
Outputs:
f: The sum of the loss over all data points. This is the objective that we want to minimize.
df: (M+1) x 1 vector of derivative of f w.r.t. weights.
"""
lambd = hyperparameters['weight_regularization']
f, dwb, y = logistic(weights, data, targets, hyperparameters)
regularizer = hyperparameters['weight_regularization'] * weights
regularizer[-1] = 0 # do not penalize bias
df = dwb + regularizer
return f, df, y
```
|
{
"source": "JeffreyBradley772/Maze",
"score": 4
}
|
#### File: JeffreyBradley772/Maze/grid.py
```python
class Grid(object):
def __init__(self, width, height, defaultFill):
self._grid = [[defaultFill for x in range(width)] for y in range(height)]
def __getitem__(self, index):
return self._grid[index]
def getHeight(self):
return len(self._grid)
def getWidth(self):
return len(self._grid[0])
def __str__(self):
return "\n".join(["".join(x) for x in self._grid])
if __name__ == "__main__":
g = Grid(10, 20, "*")
print(g)
```
|
{
"source": "JeffreyBradley772/ML-Work",
"score": 3
}
|
#### File: JeffreyBradley772/ML-Work/LogisticRegModel.py
```python
from tensorflow.keras.datasets import fashion_mnist
from sklearn.model_selection import train_test_split
import tensorflow as tf
#10 classes (of clothing) -> 0-9
#784 features
#weight matrix so that each feature is a percentage of each class
# Load in dataset
(x_train, y_train), (x_test, y_test) = fashion_mnist.load_data()
#print(x_train, y_train)
print(y_train.shape)
print(x_train.shape)
x_train = x_train/255 # normalize dataset
#y_train = y_train/255
x_test = x_test/255
#y_test = y_test/255
# split data into 80% training 20% testing
x_train, x_val, y_train, y_val = train_test_split(x_train, y_train, test_size=0.20)
##
##
##
x_train = tf.reshape(x_train,[-1,784])# reshape train input to be a 1-D vector
#tf.reshape(y_train,[-1,784])
x_test = tf.reshape(x_test,[-1,784])
#tf.reshape(y_test,[-1,784]) # reshape test input to be a 1-D vector
#print(x_train.shape)
##
##
#dtype = tf.float64
#weight matrix should be 784x10
#bias is different, on a class level,
##one big component to every class so 10 biases
##shape (10,)
w0 = tf.Variable(tf.random.normal([784,10], dtype = tf.float64)) # Randomly initialize weight matrix
bias = tf.Variable(tf.random.normal([10,], dtype = tf.float64)) # Randomly initialize bias vector
#print(w0.shape)
#print(type(bias))
##
##
def logistic_regression(x_values):
log_r = tf.matmul(x_values,w0) + bias
#log_r = tf.add(tf.matmul(x_values,w0), bias) # Matrix multiply x values with weight matrix and add bias
return log_r
##
def cross_entropy(y_values, y_pred):
#predictions come from logistic regression
y_values = tf.one_hot(y_values,10) # One-hot encoding vector of y values
#loss = tf.reduce_mean(tf.square(y_values - v_pred)) # calculate the loss
loss = tf.nn.softmax_cross_entropy_with_logits(y_values, y_pred) #measures cross entropy between y-predictions and y trues
#type of loss function
#true y's are one hot encoded
#y-pred are probabilities they belong to certain classes
#labels -> true values
#logits -> probability prediction applying logit function to outcome of yhat,so only log_r output
return tf.reduce_mean(loss) # return the average loss
##
def accuracy(y_values, y_pred):
#argmax takes the propability and turns them back to real values and says is the
#max probability class the correct class
# values have to be casted as a 32 bit integer
y_values = tf.cast(y_values, dtype=tf.int32)
predictions = tf.cast(tf.argmax(y_pred, axis=1),dtype=tf.int32)
predictions = tf.equal(y_values,predictions) # how many y values equal their predictions
return tf.reduce_mean(tf.cast(predictions,dtype=tf.float32)) # return the average correct predictions
##
def gradient(x_values, y_values):
with tf.GradientTape() as t: # initialize your gradient tape
#yhat = x_values * w0 + bias# obtain your prediction values
yhat = logistic_regression(x_values)
loss = cross_entropy(y_values, yhat) # calculate your loss
return t.gradient(loss,[w0,bias]) # return the gradient calculation with the losses and parameter list
##
batches = 10000
learning_rate = 0.01
batch_size = 128
##
###slicing and shuffling the batches so that our model is not bias to the data
dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))
dataset = dataset.repeat().shuffle(x_train.shape[0]).batch(batch_size)
##
optimizer1 = tf.optimizers.SGD(learning_rate) # use stochastic gradient descent optimizer
##
for batch_number, (batch_x, batch_y) in enumerate(dataset.take(batches), 1):
gradient1 = gradient(batch_x, batch_y) # find the gradient using your function
optimizer1.apply_gradients(zip(gradient1,[w0,bias])) # apply the gradients to your parameters, use zip to pair gradient with parameters
yhat = logistic_regression(batch_x) # obtain predictions for your logistic regression
loss = cross_entropy(batch_y, yhat) # calculate the loss
accuracy1 = accuracy(batch_y, yhat) # calculate the accuracy
print("Batch number: %i, loss: %f, accuracy: %f" % (batch_number, loss, accuracy1))
##
```
|
{
"source": "jeffreybreen/h2o",
"score": 2
}
|
#### File: py/new/h2o_cloud.py
```python
import time, os, stat, json, signal, tempfile, shutil, datetime, inspect, threading, getpass
import requests, argparse, sys, unittest, glob
import urlparse, logging, random
import psutil, requests
import h2o_sandbox
# used in shutil.rmtree permission hack for windows
import errno
# For checking ports in use, using netstat thru a subprocess.
from subprocess import Popen, PIPE
def verboseprint(*args, **kwargs):
if verbose:
for x in args: # so you don't have to create a single string
print x,
for x in kwargs: # so you don't have to create a single string
print x,
print
# so we can see problems when hung?
sys.stdout.flush()
# The cloud is uniquely named per user (only)
# Fine to uniquely identify the flatfile by name only also?
# Both are the user that runs the test. The config might have a different username on the
# remote machine (0xdiag, say, or hduser)
def flatfile_name():
return ('pytest_flatfile-%s' % getpass.getuser())
# only usable after you've built a cloud (junit, watch out)
def cloud_name():
return nodes[0].cloud_name
def __drain(src, dst):
for l in src:
if type(dst) == type(0):
os.write(dst, l)
else:
dst.write(l)
dst.flush()
src.close()
if type(dst) == type(0):
os.close(dst)
def drain(src, dst):
t = threading.Thread(target=__drain, args=(src, dst))
t.daemon = True
t.start()
# Hackery: find the ip address that gets you to Google's DNS
# Trickiness because you might have multiple IP addresses (Virtualbox), or Windows.
# we used to not like giving ip 127.0.0.1 to h2o?
def get_ip_address():
if ipaddr_from_cmd_line:
verboseprint("get_ip case 1:", ipaddr_from_cmd_line)
return ipaddr_from_cmd_line
import socket
ip = '127.0.0.1'
socket.setdefaulttimeout(0.5)
hostname = socket.gethostname()
# this method doesn't work if vpn is enabled..it gets the vpn ip
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(('8.8.8.8', 0))
ip = s.getsockname()[0]
verboseprint("get_ip case 2:", ip)
except:
pass
if ip.startswith('127'):
# drills down into family
ip = socket.getaddrinfo(hostname, None)[0][4][0]
verboseprint("get_ip case 3:", ip)
ipa = None
try:
# Translate a host name to IPv4 address format, extended interface.
# Return a triple (hostname, aliaslist, ipaddrlist)
# where hostname is the primary host name responding to the given ip_address,
# aliaslist is a (possibly empty) list of alternative host names for the same address, and
# ipaddrlist is a list of IPv4 addresses for the same interface on the same host
ghbx = socket.gethostbyname_ex(hostname)
for ips in ghbx[2]:
# only take the first
if ipa is None and not ips.startswith("127."):
ipa = ips[:]
verboseprint("get_ip case 4:", ipa)
if ip != ipa:
print "\nAssuming", ip, "is the ip address h2o will use but", ipa, "is probably the real ip?"
print "You might have a vpn active. Best to use '-ip " + ipa + "' to get python and h2o the same."
except:
pass
# print "Timeout during socket.gethostbyname_ex(hostname)"
verboseprint("get_ip_address:", ip)
# set it back to default higher timeout (None would be no timeout?)
socket.setdefaulttimeout(5)
return ip
def get_sandbox_name():
if os.environ.has_key("H2O_SANDBOX_NAME"):
return os.environ["H2O_SANDBOX_NAME"]
else:
return "sandbox"
def unit_main():
global python_test_name, python_cmd_args, python_cmd_line, python_cmd_ip, python_username
# python_test_name = inspect.stack()[1][1]
python_test_name = ""
python_cmd_args = " ".join(sys.argv[1:])
python_cmd_line = "python %s %s" % (python_test_name, python_cmd_args)
python_username = getpass.getuser()
print "\nTest: %s command line: %s" % (python_test_name, python_cmd_line)
parse_our_args()
unittest.main()
verbose = False
ipaddr_from_cmd_line = None
config_json = None
debugger = False
random_seed = None
beta_features = True
abort_after_import = False
debug_rest = False
# jenkins gets this assign, but not the unit_main one?
# python_test_name = inspect.stack()[1][1]
python_test_name = ""
# trust what the user says!
if ipaddr_from_cmd_line:
python_cmd_ip = ipaddr_from_cmd_line
else:
python_cmd_ip = get_ip_address()
# no command line args if run with just nose
python_cmd_args = ""
# don't really know what it is if nosetests did some stuff. Should be just the test with no args
python_cmd_line = ""
python_username = getpass.getuser()
def parse_our_args():
parser = argparse.ArgumentParser()
# can add more here
parser.add_argument('-v', '--verbose', help='increased output', action='store_true')
parser.add_argument('-ip', '--ip', type=str, help='IP address to use for single host H2O with psutil control')
parser.add_argument('-cj', '--config_json',
help='Use this json format file to provide multi-host defaults. Overrides the default file pytest_config-<username>.json. These are used only if you do build_cloud_with_hosts()')
parser.add_argument('-dbg', '--debugger', help='Launch java processes with java debug attach mechanisms',
action='store_true')
parser.add_argument('-s', '--random_seed', type=int, help='initialize SEED (64-bit integer) for random generators')
parser.add_argument('-bf', '--beta_features', help='enable or switch to beta features (import2/parse2)',
action='store_true')
parser.add_argument('-debug_rest', '--debug_rest', help='Print REST API interactions to rest.log',
action='store_true')
parser.add_argument('unittest_args', nargs='*')
args = parser.parse_args()
global verbose, ipaddr_from_cmd_line, config_json, debugger
global random_seed, beta_features, debug_rest
verbose = args.verbose
ipaddr_from_cmd_line = args.ip
config_json = args.config_json
debugger = args.debugger
random_seed = args.random_seed
debug_rest = args.debug_rest
# Set sys.argv to the unittest args (leav sys.argv[0] as is)
# FIX! this isn't working to grab the args we don't care about
# Pass "--failfast" to stop on first error to unittest. and -v
# won't get this for jenkins, since it doesn't do parse_our_args
sys.argv[1:] = ['-v', "--failfast"] + args.unittest_args
# sys.argv[1:] = args.unittest_args
def find_file(base):
f = base
if not os.path.exists(f): f = '../' + base
if not os.path.exists(f): f = '../../' + base
if not os.path.exists(f): f = 'py/' + base
# these 2 are for finding from h2o-perf
if not os.path.exists(f): f = '../h2o/' + base
if not os.path.exists(f): f = '../../h2o/' + base
if not os.path.exists(f):
raise Exception("unable to find file %s" % base)
return f
# shutil.rmtree doesn't work on windows if the files are read only.
# On unix the parent dir has to not be readonly too.
# May still be issues with owner being different, like if 'system' is the guy running?
# Apparently this escape function on errors is the way shutil.rmtree can
# handle the permission issue. (do chmod here)
# But we shouldn't have read-only files. So don't try to handle that case.
def handleRemoveError(func, path, exc):
# If there was an error, it could be due to windows holding onto files.
# Wait a bit before retrying. Ignore errors on the retry. Just leave files.
# Ex. if we're in the looping cloud test deleting sandbox.
excvalue = exc[1]
print "Retrying shutil.rmtree of sandbox (2 sec delay). Will ignore errors. Exception was", excvalue.errno
time.sleep(2)
try:
func(path)
except OSError:
pass
LOG_DIR = get_sandbox_name()
def clean_sandbox():
if os.path.exists(LOG_DIR):
# shutil.rmtree hangs if symlinks in the dir? (in syn_datasets for multifile parse)
# use os.remove() first
for f in glob.glob(LOG_DIR + '/syn_datasets/*'):
verboseprint("cleaning", f)
os.remove(f)
# shutil.rmtree fails to delete very long filenames on Windoze
#shutil.rmtree(LOG_DIR)
# was this on 3/5/13. This seems reliable on windows+cygwin
### os.system("rm -rf "+LOG_DIR)
shutil.rmtree(LOG_DIR, ignore_errors=False, onerror=handleRemoveError)
# it should have been removed, but on error it might still be there
if not os.path.exists(LOG_DIR):
os.mkdir(LOG_DIR)
# who knows if this one is ok with windows...doesn't rm dir, just
# the stdout/stderr files
def clean_sandbox_stdout_stderr():
if os.path.exists(LOG_DIR):
files = []
# glob.glob returns an iterator
for f in glob.glob(LOG_DIR + '/*stdout*'):
verboseprint("cleaning", f)
os.remove(f)
for f in glob.glob(LOG_DIR + '/*stderr*'):
verboseprint("cleaning", f)
os.remove(f)
def tmp_file(prefix='', suffix='', tmp_dir=None):
if not tmp_dir:
tmpdir = LOG_DIR
else:
tmpdir = tmp_dir
fd, path = tempfile.mkstemp(prefix=prefix, suffix=suffix, dir=tmpdir)
# make sure the file now exists
# os.open(path, 'a').close()
# give everyone permission to read it (jenkins running as
# 0xcustomer needs to archive as jenkins
permissions = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH
os.chmod(path, permissions)
return (fd, path)
def tmp_dir(prefix='', suffix=''):
return tempfile.mkdtemp(prefix=prefix, suffix=suffix, dir=LOG_DIR)
def log(cmd, comment=None):
filename = LOG_DIR + '/commands.log'
# everyone can read
with open(filename, 'a') as f:
f.write(str(datetime.datetime.now()) + ' -- ')
# what got sent to h2o
# f.write(cmd)
# let's try saving the unencoded url instead..human readable
if cmd:
f.write(urlparse.unquote(cmd))
if comment:
f.write(' #')
f.write(comment)
f.write("\n")
elif comment: # for comment-only
f.write(comment + "\n")
# jenkins runs as 0xcustomer, and the file wants to be archived by jenkins who isn't in his group
permissions = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH
os.chmod(filename, permissions)
def make_syn_dir():
# move under sandbox
# the LOG_DIR must have been created for commands.log before any datasets would be created
SYNDATASETS_DIR = LOG_DIR + '/syn_datasets'
if os.path.exists(SYNDATASETS_DIR):
shutil.rmtree(SYNDATASETS_DIR)
os.mkdir(SYNDATASETS_DIR)
return SYNDATASETS_DIR
def dump_json(j):
return json.dumps(j, sort_keys=True, indent=2)
# can't have a list of cmds, because cmd is a list
# cmdBefore gets executed first, and we wait for it to complete
def spawn_cmd(name, cmd, capture_output=True, **kwargs):
if capture_output:
outfd, outpath = tmp_file(name + '.stdout.', '.log')
errfd, errpath = tmp_file(name + '.stderr.', '.log')
# everyone can read
ps = psutil.Popen(cmd, stdin=None, stdout=outfd, stderr=errfd, **kwargs)
else:
outpath = '<stdout>'
errpath = '<stderr>'
ps = psutil.Popen(cmd, **kwargs)
comment = 'PID %d, stdout %s, stderr %s' % (
ps.pid, os.path.basename(outpath), os.path.basename(errpath))
log(' '.join(cmd), comment=comment)
return (ps, outpath, errpath)
def spawn_wait(ps, stdout, stderr, capture_output=True, timeout=None):
rc = ps.wait(timeout)
if capture_output:
out = file(stdout).read()
err = file(stderr).read()
else:
out = 'stdout not captured'
err = 'stderr not captured'
if rc is None:
ps.terminate()
raise Exception("%s %s timed out after %d\nstdout:\n%s\n\nstderr:\n%s" %
(ps.name, ps.cmdline, timeout or 0, out, err))
elif rc != 0:
raise Exception("%s %s failed.\nstdout:\n%s\n\nstderr:\n%s" %
(ps.name, ps.cmdline, out, err))
return rc
def spawn_cmd_and_wait(name, cmd, capture_output=True, timeout=None, **kwargs):
(ps, stdout, stderr) = spawn_cmd(name, cmd, capture_output, **kwargs)
spawn_wait(ps, stdout, stderr, capture_output, timeout)
global nodes
nodes = []
# I suppose we could shuffle the flatfile order!
# but it uses hosts, so if that got shuffled, we got it covered?
# the i in xrange part is not shuffled. maybe create the list first, for possible random shuffle
# FIX! default to random_shuffle for now..then switch to not.
def write_flatfile(node_count=2, base_port=54321, hosts=None):
# always create the flatfile.
ports_per_node = 2
pff = open(flatfile_name(), "w+")
# doing this list outside the loops so we can shuffle for better test variation
hostPortList = []
if hosts is None:
ip = python_cmd_ip
for i in range(node_count):
hostPortList.append(ip + ":" + str(base_port + ports_per_node * i))
else:
for h in hosts:
for i in range(node_count):
# removed leading "/"
hostPortList.append(h.addr + ":" + str(base_port + ports_per_node * i))
for hp in hostPortList:
pff.write(hp + "\n")
pff.close()
def check_h2o_version():
# assumes you want to know about 3 ports starting at base_port
command1Split = ['java', '-jar', find_file('target/h2o.jar'), '--version']
command2Split = ['egrep', '-v', '( Java | started)']
print "Running h2o to get java version"
p1 = Popen(command1Split, stdout=PIPE)
p2 = Popen(command2Split, stdin=p1.stdout, stdout=PIPE)
output = p2.communicate()[0]
print output
def default_hosts_file():
if os.environ.has_key("H2O_HOSTS_FILE"):
return os.environ["H2O_HOSTS_FILE"]
return 'pytest_config-{0}.json'.format(getpass.getuser())
# node_count is per host if hosts is specified.
def build_cloud(node_count=1, base_port=54321, hosts=None,
timeoutSecs=30, retryDelaySecs=1, cleanup=True,
conservative=False, **kwargs):
clean_sandbox()
log("#*********************************************************************")
log("Starting new test: " + python_test_name + " at build_cloud()")
log("#*********************************************************************")
# start up h2o to report the java version (once). output to python stdout
# only do this for regression testing
if getpass.getuser() == 'jenkins':
check_h2o_version()
# keep this param in kwargs, because we pass it to the H2O node build, so state
# is created that polling and other normal things can check, to decide to dump
# info to benchmark.log
if kwargs.setdefault('enable_benchmark_log', False):
setup_benchmark_log()
ports_per_node = 2
nodeList = []
try:
# if no hosts list, use psutil method on local host.
totalNodes = 0
# doing this list outside the loops so we can shuffle for better test variation
# this jvm startup shuffle is independent from the flatfile shuffle
portList = [base_port + ports_per_node * i for i in range(node_count)]
if hosts is None:
# if use_flatfile, we should create it,
# because tests will just call build_cloud with use_flatfile=True
# best to just create it all the time..may or may not be used
write_flatfile(node_count=node_count, base_port=base_port)
hostCount = 1
for p in portList:
verboseprint("psutil starting node", i)
newNode = LocalH2O(port=p, node_id=totalNodes, **kwargs)
nodeList.append(newNode)
totalNodes += 1
else:
# if hosts, the flatfile was created and uploaded to hosts already
# I guess don't recreate it, don't overwrite the one that was copied beforehand.
# we don't always use the flatfile (use_flatfile=False)
# Suppose we could dispatch from the flatfile to match it's contents
# but sometimes we want to test with a bad/different flatfile then we invoke h2o?
hostCount = len(hosts)
hostPortList = []
for h in hosts:
for port in portList:
hostPortList.append((h, port))
for (h, p) in hostPortList:
verboseprint('ssh starting node', totalNodes, 'via', h)
newNode = h.remote_h2o(port=p, node_id=totalNodes, **kwargs)
nodeList.append(newNode)
totalNodes += 1
verboseprint("Attempting Cloud stabilize of", totalNodes, "nodes on", hostCount, "hosts")
start = time.time()
# UPDATE: best to stabilize on the last node!
stabilize_cloud(nodeList[0], len(nodeList),
timeoutSecs=timeoutSecs, retryDelaySecs=retryDelaySecs, noExtraErrorCheck=True)
verboseprint(len(nodeList), "Last added node stabilized in ", time.time() - start, " secs")
verboseprint("Built cloud: %d nodes on %d hosts, in %d s" % (len(nodeList),
hostCount, (time.time() - start)))
print "Built cloud:", nodeList[0].java_heap_GB, "GB java heap(s) with", len(nodeList), "total nodes"
# FIX! using "consensus" in node[-1] should mean this is unnecessary?
# maybe there's a bug. For now do this. long term: don't want?
# UPDATE: do it for all cases now 2/14/13
if conservative: # still needed?
for n in nodeList:
stabilize_cloud(n, len(nodeList), timeoutSecs=timeoutSecs, noExtraErrorCheck=True)
# this does some extra checking now
verify_cloud_size(nodeList)
# best to check for any errors due to cloud building right away?
check_sandbox_for_errors(python_test_name=python_test_name)
except:
if cleanup:
for n in nodeList: n.terminate()
else:
nodes[:] = nodeList
check_sandbox_for_errors(python_test_name=python_test_name)
raise
# this is just in case they don't assign the return to the nodes global?
nodes[:] = nodeList
print len(nodeList), "total jvms in H2O cloud"
# put the test start message in the h2o log, to create a marker
nodes[0].h2o_log_msg()
if config_json:
# like cp -p. Save the config file, to sandbox
print "Saving the ", config_json, "we used to", LOG_DIR
shutil.copy(config_json, LOG_DIR + "/" + os.path.basename(config_json))
# Figure out some stuff about how this test was run
cs_time = str(datetime.datetime.now())
cs_cwd = os.getcwd()
cs_python_cmd_line = "python %s %s" % (python_test_name, python_cmd_args)
cs_python_test_name = python_test_name
if config_json:
cs_config_json = os.path.abspath(config_json)
else:
cs_config_json = None
cs_username = python_username
cs_ip = python_cmd_ip
return nodeList
def upload_jar_to_remote_hosts(hosts, slow_connection=False):
def prog(sofar, total):
# output is bad for jenkins.
username = getpass.getuser()
if username != 'jenkins':
p = int(10.0 * sofar / total)
sys.stdout.write('\rUploading jar [%s%s] %02d%%' % ('#' * p, ' ' * (10 - p), 100 * sofar / total))
sys.stdout.flush()
if not slow_connection:
for h in hosts:
f = find_file('target/h2o.jar')
h.upload_file(f, progress=prog)
# skipping progress indicator for the flatfile
h.upload_file(flatfile_name())
else:
f = find_file('target/h2o.jar')
hosts[0].upload_file(f, progress=prog)
hosts[0].push_file_to_remotes(f, hosts[1:])
f = find_file(flatfile_name())
hosts[0].upload_file(f, progress=prog)
hosts[0].push_file_to_remotes(f, hosts[1:])
def check_sandbox_for_errors(cloudShutdownIsError=False, sandboxIgnoreErrors=False, python_test_name=''):
# dont' have both tearDown and tearDownClass report the same found error
# only need the first
if nodes and nodes[0].sandbox_error_report(): # gets current state
return
# Can build a cloud that ignores all sandbox things that normally fatal the test
# Kludge, test will set this directly if it wants, rather than thru build_cloud parameter.
# we need the sandbox_ignore_errors, for the test teardown_cloud..the state disappears!
ignore = sandboxIgnoreErrors or (nodes and nodes[0].sandbox_ignore_errors)
errorFound = h2o_sandbox.check_sandbox_for_errors(
LOG_DIR=LOG_DIR,
sandboxIgnoreErrors=ignore,
cloudShutdownIsError=cloudShutdownIsError,
python_test_name=python_test_name)
if errorFound and nodes:
nodes[0].sandbox_error_report(True) # sets
def tear_down_cloud(nodeList=None, sandboxIgnoreErrors=False):
if not nodeList: nodeList = nodes
try:
for n in nodeList:
n.terminate()
verboseprint("tear_down_cloud n:", n)
finally:
check_sandbox_for_errors(sandboxIgnoreErrors=sandboxIgnoreErrors, python_test_name=python_test_name)
nodeList[:] = []
# timeoutSecs is per individual node get_cloud()
def verify_cloud_size(nodeList=None, verbose=False, timeoutSecs=10, ignoreHealth=False):
if not nodeList: nodeList = nodes
expectedSize = len(nodeList)
# cloud size and consensus have to reflect a single grab of information from a node.
cloudStatus = [n.get_cloud(timeoutSecs=timeoutSecs) for n in nodeList]
cloudSizes = [c['cloud_size'] for c in cloudStatus]
cloudConsensus = [c['consensus'] for c in cloudStatus]
cloudHealthy = [c['cloud_healthy'] for c in cloudStatus]
if not all(cloudHealthy):
msg = "Some node reported cloud_healthy not true: %s" % cloudHealthy
if not ignoreHealth:
raise Exception(msg)
# gather up all the node_healthy status too
for i, c in enumerate(cloudStatus):
nodesHealthy = [n['node_healthy'] for n in c['nodes']]
if not all(nodesHealthy):
print "node %s cloud status: %s" % (i, dump_json(c))
msg = "node %s says some node is not reporting node_healthy: %s" % (c['node_name'], nodesHealthy)
if not ignoreHealth:
raise Exception(msg)
if expectedSize == 0 or len(cloudSizes) == 0 or len(cloudConsensus) == 0:
print "\nexpectedSize:", expectedSize
print "cloudSizes:", cloudSizes
print "cloudConsensus:", cloudConsensus
raise Exception("Nothing in cloud. Can't verify size")
for s in cloudSizes:
consensusStr = (",".join(map(str, cloudConsensus)))
sizeStr = (",".join(map(str, cloudSizes)))
if (s != expectedSize):
raise Exception("Inconsistent cloud size." +
"nodeList report size: %s consensus: %s instead of %d." % \
(sizeStr, consensusStr, expectedSize))
return (sizeStr, consensusStr, expectedSize)
def stabilize_cloud(node, node_count, timeoutSecs=14.0, retryDelaySecs=0.25, noExtraErrorCheck=False):
node.wait_for_node_to_accept_connections(timeoutSecs, noExtraErrorCheck=noExtraErrorCheck)
# want node saying cloud = expected size, plus thinking everyone agrees with that.
def test(n, tries=None):
c = n.get_cloud(noExtraErrorCheck=True)
# don't want to check everything. But this will check that the keys are returned!
consensus = c['consensus']
locked = c['locked']
cloud_size = c['cloud_size']
cloud_name = c['cloud_name']
node_name = c['node_name']
if 'nodes' not in c:
emsg = "\nH2O didn't include a list of nodes in get_cloud response after initial cloud build"
raise Exception(emsg)
# only print it when you get consensus
if cloud_size != node_count:
verboseprint("\nNodes in cloud while building:")
for ci in c['nodes']:
verboseprint(ci['name'])
if (cloud_size > node_count):
emsg = (
"\n\nERROR: cloud_size: %d reported via json is bigger than we expect: %d" % (cloud_size, node_count) +
"\nYou likely have zombie(s) with the same cloud name on the network, that's forming up with you." +
"\nLook at the cloud IP's in 'grep Paxos sandbox/*stdout*' for some IP's you didn't expect." +
"\n\nYou probably don't have to do anything, as the cloud shutdown in this test should" +
"\nhave sent a Shutdown.json to all in that cloud (you'll see a kill -2 in the *stdout*)." +
"\nIf you try again, and it still fails, go to those IPs and kill the zombie h2o's." +
"\nIf you think you really have an intermittent cloud build, report it." +
"\n" +
"\nUPDATE: building cloud size of 2 with 127.0.0.1 may temporarily report 3 incorrectly, with no zombie?"
)
raise Exception(emsg)
a = (cloud_size == node_count) and consensus
if a:
verboseprint("\tLocked won't happen until after keys are written")
verboseprint("\nNodes in final cloud:")
for ci in c['nodes']:
verboseprint(ci['name'])
return a
node.stabilize(test, error=('A cloud of size %d' % node_count),
timeoutSecs=timeoutSecs, retryDelaySecs=retryDelaySecs)
def log_rest(s):
if not debug_rest:
return
rest_log_file = open(os.path.join(LOG_DIR, "rest.log"), "a")
rest_log_file.write(s)
rest_log_file.write("\n")
rest_log_file.close()
class H2O(object):
def __url(self, loc, port=None):
# always use the new api port
if port is None: port = self.port
if loc.startswith('/'):
delim = ''
else:
delim = '/'
u = 'http://%s:%d%s%s' % (self.http_addr, port, delim, loc)
return u
def __do_json_request(self, jsonRequest=None, fullUrl=None, timeout=10, params=None, returnFast=False,
cmd='get', extraComment=None, ignoreH2oError=False, noExtraErrorCheck=False, **kwargs):
# if url param is used, use it as full url. otherwise crate from the jsonRequest
if fullUrl:
url = fullUrl
else:
url = self.__url(jsonRequest)
# remove any params that are 'None'
# need to copy dictionary, since can't delete while iterating
if params is not None:
params2 = params.copy()
for k in params2:
if params2[k] is None:
del params[k]
paramsStr = '?' + '&'.join(['%s=%s' % (k, v) for (k, v) in params.items()])
else:
paramsStr = ''
if extraComment:
log('Start ' + url + paramsStr, comment=extraComment)
else:
log('Start ' + url + paramsStr)
log_rest("")
log_rest("----------------------------------------------------------------------\n")
if extraComment:
log_rest("# Extra comment info about this request: " + extraComment)
if cmd == 'get':
log_rest("GET")
else:
log_rest("POST")
log_rest(url + paramsStr)
# file get passed thru kwargs here
try:
if cmd == 'post':
r = requests.post(url, timeout=timeout, params=params, **kwargs)
else:
r = requests.get(url, timeout=timeout, params=params, **kwargs)
except Exception, e:
# rethrow the exception after we've checked for stack trace from h2o
# out of memory errors maybe don't show up right away? so we should wait for h2o
# to get it out to h2o stdout. We don't want to rely on cloud teardown to check
# because there's no delay, and we don't want to delay all cloud teardowns by waiting.
# (this is new/experimental)
exc_info = sys.exc_info()
# use this to ignore the initial connection errors during build cloud when h2o is coming up
if not noExtraErrorCheck:
print "ERROR: got exception on %s to h2o. \nGoing to check sandbox, then rethrow.." % (url + paramsStr)
time.sleep(2)
check_sandbox_for_errors(python_test_name=python_test_name);
log_rest("")
log_rest("EXCEPTION CAUGHT DOING REQUEST: " + str(e.message))
raise exc_info[1], None, exc_info[2]
log_rest("")
try:
if r is None:
log_rest("r is None")
else:
log_rest("HTTP status code: " + str(r.status_code))
if hasattr(r, 'text'):
if r.text is None:
log_rest("r.text is None")
else:
log_rest(r.text)
else:
log_rest("r does not have attr text")
except Exception, e:
# Paranoid exception catch.
# Ignore logging exceptions in the case that the above error checking isn't sufficient.
pass
# fatal if no response
if not r:
raise Exception("Maybe bad url? no r in __do_json_request in %s:" % inspect.stack()[1][3])
rjson = None
if returnFast:
return
try:
rjson = r.json()
except:
print dump_json(r.text)
if not isinstance(r, (list, dict)):
raise Exception("h2o json responses should always be lists or dicts, see previous for text")
raise Exception("Could not decode any json from the request.")
# TODO: we should really only look in the response object. This check
# prevents us from having a field called "error" (e.g., for a scoring result).
for e in ['error', 'Error', 'errors', 'Errors']:
# error can be null (python None). This happens in exec2
if e in rjson and rjson[e]:
print "rjson:", dump_json(rjson)
emsg = 'rjson %s in %s: %s' % (e, inspect.stack()[1][3], rjson[e])
if ignoreH2oError:
# well, we print it..so not totally ignore. test can look at rjson returned
print emsg
else:
print emsg
raise Exception(emsg)
for w in ['warning', 'Warning', 'warnings', 'Warnings']:
# warning can be null (python None).
if w in rjson and rjson[w]:
verboseprint(dump_json(rjson))
print 'rjson %s in %s: %s' % (w, inspect.stack()[1][3], rjson[w])
return rjson
def get_cloud(self, noExtraErrorCheck=False, timeoutSecs=10):
# hardwire it to allow a 60 second timeout
a = self.__do_json_request('Cloud.json', noExtraErrorCheck=noExtraErrorCheck, timeout=timeoutSecs)
consensus = a['consensus']
locked = a['locked']
cloud_size = a['cloud_size']
cloud_name = a['cloud_name']
node_name = a['node_name']
node_id = self.node_id
verboseprint('%s%s %s%s %s%s %s%s' % (
"\tnode_id: ", node_id,
"\tcloud_size: ", cloud_size,
"\tconsensus: ", consensus,
"\tlocked: ", locked,
))
return a
def h2o_log_msg(self, message=None):
if 1 == 0:
return
if not message:
message = "\n"
message += "\n#***********************"
message += "\npython_test_name: " + python_test_name
message += "\n#***********************"
params = {'message': message}
self.__do_json_request('2/LogAndEcho', params=params)
# Shutdown url is like a reset button. Doesn't send a response before it kills stuff
# safer if random things are wedged, rather than requiring response
# so request library might retry and get exception. allow that.
def shutdown_all(self):
try:
self.__do_json_request('Shutdown.json', noExtraErrorCheck=True)
except:
pass
time.sleep(1) # a little delay needed?
return (True)
def put_value(self, value, key=None, repl=None):
return self.__do_json_request(
'PutValue.json',
params={"value": value, "key": key, "replication_factor": repl},
extraComment=str(value) + "," + str(key) + "," + str(repl))
# noise is a 2-tuple ("StoreView", none) for url plus args for doing during poll to create noise
# so we can create noise with different urls!, and different parms to that url
# no noise if None
def poll_url(self, response,
timeoutSecs=10, retryDelaySecs=0.5, initialDelaySecs=0, pollTimeoutSecs=180,
noise=None, benchmarkLogging=None, noPoll=False, reuseFirstPollUrl=False, noPrint=False):
### print "poll_url: pollTimeoutSecs", pollTimeoutSecs
verboseprint('poll_url input: response:', dump_json(response))
print "at top of poll_url, timeoutSec: ", timeoutSecs
def get_redirect_url(response):
url = None
params = None
# StoreView has old style, while beta_features
if 'response_info' in response:
response_info = response['response_info']
if 'redirect_url' not in response_info:
raise Exception("Response during polling must have 'redirect_url'\n%s" % dump_json(response))
if response_info['status'] != 'done':
redirect_url = response_info['redirect_url']
if redirect_url:
url = self.__url(redirect_url)
params = None
else:
if response_info['status'] != 'done':
raise Exception(
"'redirect_url' during polling is null but status!='done': \n%s" % dump_json(response))
else:
if 'response' not in response:
raise Exception("'response' not in response.\n%s" % dump_json(response))
if response['response']['status'] != 'done':
if 'redirect_request' not in response['response']:
raise Exception("'redirect_request' not in response. \n%s" % dump_json(response))
url = self.__url(response['response']['redirect_request'])
params = response['response']['redirect_request_args']
return (url, params)
# if we never poll
msgUsed = None
if 'response_info' in response: # trigger v2 for GBM always?
status = response['response_info']['status']
progress = response.get('progress', "")
else:
r = response['response']
status = r['status']
progress = r.get('progress', "")
doFirstPoll = status != 'done'
(url, params) = get_redirect_url(response)
# no need to recreate the string for messaging, in the loop..
if params:
paramsStr = '&'.join(['%s=%s' % (k, v) for (k, v) in params.items()])
else:
paramsStr = ''
# FIX! don't do JStack noise for tests that ask for it. JStack seems to have problems
noise_enable = noise and noise != ("JStack", None)
if noise_enable:
print "Using noise during poll_url:", noise
# noise_json should be like "Storeview"
(noise_json, noiseParams) = noise
noiseUrl = self.__url(noise_json + ".json")
if noiseParams is None:
noiseParamsStr = ""
else:
noiseParamsStr = '&'.join(['%s=%s' % (k, v) for (k, v) in noiseParams.items()])
start = time.time()
count = 0
if initialDelaySecs:
time.sleep(initialDelaySecs)
# can end with status = 'redirect' or 'done'
# Update: on DRF2, the first RF redirects to progress. So we should follow that, and follow any redirect to view?
# so for v2, we'll always follow redirects?
# For v1, we're not forcing the first status to be 'poll' now..so it could be redirect or done?(NN score? if blocking)
# Don't follow the Parse redirect to Inspect, because we want parseResult['destination_key'] to be the end.
# note this doesn't affect polling with Inspect? (since it doesn't redirect ?
while status == 'poll' or doFirstPoll or (status == 'redirect' and 'Inspect' not in url):
count += 1
if ((time.time() - start) > timeoutSecs):
# show what we're polling with
emsg = "Exceeded timeoutSecs: %d secs while polling." % timeoutSecs + \
"status: %s, url: %s?%s" % (status, urlUsed, paramsUsedStr)
raise Exception(emsg)
if benchmarkLogging:
cloudPerfH2O.get_log_save(benchmarkLogging)
# every other one?
create_noise = noise_enable and ((count % 2) == 0)
if create_noise:
urlUsed = noiseUrl
paramsUsed = noiseParams
paramsUsedStr = noiseParamsStr
msgUsed = "\nNoise during polling with"
else:
urlUsed = url
paramsUsed = params
paramsUsedStr = paramsStr
msgUsed = "\nPolling with"
print status, progress, urlUsed
time.sleep(retryDelaySecs)
response = self.__do_json_request(fullUrl=urlUsed, timeout=pollTimeoutSecs, params=paramsUsed)
verboseprint(msgUsed, urlUsed, paramsUsedStr, "Response:", dump_json(response))
# hey, check the sandbox if we've been waiting a long time...rather than wait for timeout
if ((count % 6) == 0):
check_sandbox_for_errors(python_test_name=python_test_name)
if (create_noise):
# this guarantees the loop is done, so we don't need to worry about
# a 'return r' being interpreted from a noise response
status = 'poll'
progress = ''
else:
doFirstPoll = False
status = response['response_info']['status']
progress = response.get('progress', "")
# get the redirect url
if not reuseFirstPollUrl: # reuse url for all v1 stuff
(url, params) = get_redirect_url(response)
if noPoll:
return response
# won't print if we didn't poll
if msgUsed:
verboseprint(msgUsed, urlUsed, paramsUsedStr, "Response:", dump_json(response))
return response
def stabilize(self, test_func, error, timeoutSecs=10, retryDelaySecs=0.5):
'''Repeatedly test a function waiting for it to return True.
Arguments:
test_func -- A function that will be run repeatedly
error -- A function that will be run to produce an error message
it will be called with (node, timeTakenSecs, numberOfRetries)
OR
-- A string that will be interpolated with a dictionary of
{ 'timeTakenSecs', 'numberOfRetries' }
timeoutSecs -- How long in seconds to keep trying before declaring a failure
retryDelaySecs -- How long to wait between retry attempts
'''
start = time.time()
numberOfRetries = 0
while time.time() - start < timeoutSecs:
if test_func(self, tries=numberOfRetries):
break
time.sleep(retryDelaySecs)
numberOfRetries += 1
# hey, check the sandbox if we've been waiting a long time...rather than wait for timeout
# to find the badness?. can check_sandbox_for_errors at any time
if ((numberOfRetries % 50) == 0):
check_sandbox_for_errors(python_test_name=python_test_name)
else:
timeTakenSecs = time.time() - start
if isinstance(error, type('')):
raise Exception('%s failed after %.2f seconds having retried %d times' % (
error, timeTakenSecs, numberOfRetries))
else:
msg = error(self, timeTakenSecs, numberOfRetries)
raise Exception(msg)
def wait_for_node_to_accept_connections(self, timeoutSecs=15, noExtraErrorCheck=False):
verboseprint("wait_for_node_to_accept_connections")
def test(n, tries=None):
try:
n.get_cloud(noExtraErrorCheck=noExtraErrorCheck)
return True
except requests.ConnectionError, e:
# Now using: requests 1.1.0 (easy_install --upgrade requests) 2/5/13
# Now: assume all requests.ConnectionErrors are H2O legal connection errors.
# Have trouble finding where the errno is, fine to assume all are good ones.
# Timeout check will kick in if continued H2O badness.
return False
self.stabilize(test, 'Cloud accepting connections',
timeoutSecs=timeoutSecs, # with cold cache's this can be quite slow
retryDelaySecs=0.1) # but normally it is very fast
def sandbox_error_report(self, done=None):
# not clearable..just or in new value
if done:
self.sandbox_error_was_reported = True
return (self.sandbox_error_was_reported)
def get_args(self):
args = ['java']
# I guess it doesn't matter if we use flatfile for both now
# defaults to not specifying
# FIX! we need to check that it's not outside the limits of the dram of the machine it's running on?
if self.java_heap_GB is not None:
if not (1 <= self.java_heap_GB <= 256):
raise Exception('java_heap_GB <1 or >256 (GB): %s' % (self.java_heap_GB))
args += ['-Xms%dG' % self.java_heap_GB]
args += ['-Xmx%dG' % self.java_heap_GB]
if self.java_heap_MB is not None:
if not (1 <= self.java_heap_MB <= 256000):
raise Exception('java_heap_MB <1 or >256000 (MB): %s' % (self.java_heap_MB))
args += ['-Xms%dm' % self.java_heap_MB]
args += ['-Xmx%dm' % self.java_heap_MB]
if self.java_extra_args is not None:
args += ['%s' % self.java_extra_args]
args += ["-ea"]
if self.use_maprfs:
args += ["-Djava.library.path=/opt/mapr/lib"]
if self.classpath:
entries = [find_file('build/classes'), find_file('lib/javassist.jar')]
entries += glob.glob(find_file('lib') + '/*/*.jar')
entries += glob.glob(find_file('lib') + '/*/*/*.jar')
args += ['-classpath', os.pathsep.join(entries), 'water.Boot']
else:
args += ["-jar", self.get_h2o_jar()]
if 1==1:
if self.hdfs_config:
args += [
'-hdfs_config=' + self.hdfs_config
]
if beta_features:
args += ["-beta"]
# H2O should figure it out, if not specified
# DON"T EVER USE on multi-machine...h2o should always get it right, to be able to run on hadoop
# where it's not told
if (self.addr is not None) and (not self.remoteH2O):
args += [
'--ip=%s' % self.addr,
]
# Need to specify port, since there can be multiple ports for an ip in the flatfile
if self.port is not None:
args += [
"--port=%d" % self.port,
]
if self.use_debugger:
# currently hardwire the base port for debugger to 8000
# increment by one for every node we add
# sence this order is different than h2o cluster order, print out the ip and port for the user
# we could save debugger_port state per node, but not really necessary (but would be more consistent)
debuggerBasePort = 8000
if self.node_id is None:
debuggerPort = debuggerBasePort
else:
debuggerPort = debuggerBasePort + self.node_id
if self.http_addr:
a = self.http_addr
else:
a = "localhost"
if self.port:
b = str(self.port)
else:
b = "h2o determined"
# I guess we always specify port?
print "You can attach debugger at port %s for jvm at %s:%s" % (debuggerPort, a, b)
args += ['-agentlib:jdwp=transport=dt_socket,server=y,suspend=y,address=%s' % debuggerPort]
if self.use_flatfile:
args += [
'--flatfile=' + self.flatfile,
]
args += [
'--ice_root=%s' % self.get_ice_dir(),
# if I have multiple jenkins projects doing different h2o clouds, I need
# I need different ports and different cloud name.
# does different cloud name prevent them from joining up
# (even if same multicast ports?)
# I suppose I can force a base address. or run on another machine?
]
args += [
'--name=' + self.cloud_name
]
# ignore the other -hdfs args if the config is used?
if 1==0:
if self.hdfs_config:
args += [
'-hdfs_config=' + self.hdfs_config
]
if self.use_hdfs:
args += [
# it's fine if hdfs_name has a ":9000" port or something too
'-hdfs hdfs://' + self.hdfs_name_node,
'-hdfs_version=' + self.hdfs_version,
]
if self.use_maprfs:
args += [
# 3 slashes?
'-hdfs maprfs:///' + self.hdfs_name_node,
'-hdfs_version=' + self.hdfs_version,
]
if self.aws_credentials:
args += ['--aws_credentials=' + self.aws_credentials]
if self.disable_h2o_log:
args += ['--nolog']
# disable logging of requests, as some contain "error", which fails the test
## FIXED. better escape in check_sandbox_for_errors
## args += ['--no_requests_log']
return args
def __init__(self,
use_this_ip_addr=None, port=54321, capture_output=True,
use_debugger=None, classpath=None,
use_hdfs=False, use_maprfs=False,
# hdfs_version="cdh4", hdfs_name_node="172.16.2.151",
# hdfs_version="cdh4", hdfs_name_node="172.16.2.176",
hdfs_version=None, hdfs_name_node=None, hdfs_config=None,
aws_credentials=None,
use_flatfile=False, java_heap_GB=None, java_heap_MB=None, java_extra_args=None,
use_home_for_ice=False, node_id=None, username=None,
disable_h2o_log=False,
enable_benchmark_log=False,
h2o_remote_buckets_root=None,
delete_keys_at_teardown=False,
cloud_name=None,
):
if use_hdfs:
# see if we can touch a 0xdata machine
try:
# long timeout in ec2...bad
a = requests.get('http://172.16.2.176:80', timeout=1)
hdfs_0xdata_visible = True
except:
hdfs_0xdata_visible = False
# different defaults, depending on where we're running
if hdfs_name_node is None:
if hdfs_0xdata_visible:
hdfs_name_node = "172.16.2.176"
else: # ec2
hdfs_name_node = "10.78.14.235:9000"
if hdfs_version is None:
if hdfs_0xdata_visible:
hdfs_version = "cdh4"
else: # ec2
hdfs_version = "0.20.2"
self.aws_credentials = aws_credentials
self.port = port
# None is legal for self.addr.
# means we won't give an ip to the jar when we start.
# Or we can say use use_this_ip_addr=127.0.0.1, or the known address
# if use_this_addr is None, use 127.0.0.1 for urls and json
# Command line arg 'ipaddr_from_cmd_line' dominates:
if ipaddr_from_cmd_line:
self.addr = ipaddr_from_cmd_line
else:
self.addr = use_this_ip_addr
if self.addr is not None:
self.http_addr = self.addr
else:
self.http_addr = get_ip_address()
# command line should always dominate for enabling
if debugger: use_debugger = True
self.use_debugger = use_debugger
self.classpath = classpath
self.capture_output = capture_output
self.use_hdfs = use_hdfs
self.use_maprfs = use_maprfs
self.hdfs_name_node = hdfs_name_node
self.hdfs_version = hdfs_version
self.hdfs_config = hdfs_config
self.use_flatfile = use_flatfile
self.java_heap_GB = java_heap_GB
self.java_heap_MB = java_heap_MB
self.java_extra_args = java_extra_args
self.use_home_for_ice = use_home_for_ice
self.node_id = node_id
if username:
self.username = username
else:
self.username = getpass.getuser()
# don't want multiple reports from tearDown and tearDownClass
# have nodes[0] remember (0 always exists)
self.sandbox_error_was_reported = False
self.sandbox_ignore_errors = False
self.disable_h2o_log = disable_h2o_log
# this dumps stats from tests, and perf stats while polling to benchmark.log
self.enable_benchmark_log = enable_benchmark_log
self.h2o_remote_buckets_root = h2o_remote_buckets_root
self.delete_keys_at_teardown = delete_keys_at_teardown
if cloud_name:
self.cloud_name = cloud_name
else:
self.cloud_name = 'pytest-%s-%s' % (getpass.getuser(), os.getpid())
def __str__(self):
return '%s - http://%s:%d/' % (type(self), self.http_addr, self.port)
#*****************************************************************
class LocalH2O(H2O):
'''An H2O instance launched by the python framework on the local host using psutil'''
def __init__(self, *args, **kwargs):
super(LocalH2O, self).__init__(*args, **kwargs)
self.rc = None
# FIX! no option for local /home/username ..always the sandbox (LOG_DIR)
self.ice = tmp_dir('ice.')
self.flatfile = flatfile_name()
self.remoteH2O = False # so we can tell if we're remote or local
if self.node_id is not None:
logPrefix = 'local-h2o-' + str(self.node_id)
else:
logPrefix = 'local-h2o'
spawn = spawn_cmd(logPrefix, cmd=self.get_args(), capture_output=self.capture_output)
self.ps = spawn[0]
def get_h2o_jar(self):
return find_file('target/h2o.jar')
def get_flatfile(self):
return self.flatfile
# return find_file(flatfile_name())
def get_ice_dir(self):
return self.ice
def is_alive(self):
verboseprint("Doing is_alive check for LocalH2O", self.wait(0))
return self.wait(0) is None
def terminate_self_only(self):
try:
if self.is_alive(): self.ps.kill()
if self.is_alive(): self.ps.terminate()
return self.wait(0.5)
except psutil.NoSuchProcess:
return -1
def terminate(self):
# send a shutdown request first.
# since local is used for a lot of buggy new code, also do the ps kill.
# try/except inside shutdown_all now
self.shutdown_all()
if self.is_alive():
print "\nShutdown didn't work for local node? : %s. Will kill though" % self
self.terminate_self_only()
def wait(self, timeout=0):
if self.rc is not None:
return self.rc
try:
self.rc = self.ps.wait(timeout)
return self.rc
except psutil.TimeoutExpired:
return None
def stack_dump(self):
self.ps.send_signal(signal.SIGQUIT)
```
#### File: py/testdir_multi_jvm/test_KMeans2_sphere5_bad_inits.py
```python
import unittest, time, sys, random, math
sys.path.extend(['.','..','py'])
import h2o, h2o_cmd, h2o_kmeans, h2o_hosts, h2o_import as h2i
from operator import itemgetter
# want named tuples
import collections
# a truly uniform sphere
# http://stackoverflow.com/questions/5408276/python-uniform-spherical-distribution
# While the author prefers the discarding method for spheres, for completeness
# he offers the exact solution: http://stackoverflow.com/questions/918736/random-number-generator-that-produces-a-power-law-distribution/918782#918782
# In spherical coordinates, taking advantage of the sampling rule:
# http://stackoverflow.com/questions/2106503/pseudorandom-number-generator-exponential-distribution/2106568#2106568
CLUSTERS = 5
SPHERE_PTS = 100000
# BAD_SEED = None
# BAD_SEED = 5010213207974401134
BAD_SEED = 815071896901582303
MAX_ITER = 1000
TRIALS = 1
INIT='Furthest'
# INIT='PlusPlus'
# random doesn't seem to get good answer?
# INIT=''
# since the init is using unnormalized values for sum of squares calcs,
# biasing the count for large numbers for some spheres will mess it up
NOT_SO_BAD = False
# NOT_SO_BAD = False
def get_xyz_sphere(R):
phi = random.uniform(0, 2 * math.pi)
costheta = random.uniform(-1,1)
u = random.random() # 0 to 1
theta = math.acos(costheta)
r = R * (u ** (1.0/3))
# now you have a (r, theta, phi) group which can be transformed to (x, y, z)
# in the usual way
x = r * math.sin(theta) * math.cos(phi)
y = r * math.sin(theta) * math.sin(phi)
z = r * math.cos(theta)
### print [z,y,z]
return [x,y,z]
def write_spheres_dataset(csvPathname, CLUSTERS, n):
dsf = open(csvPathname, "w+")
# going to do a bunch of spheres, with differing # of pts and R
# R is radius of the spheres
# separate them by 3 * the previous R
# keep track of the centers so we compare to a sorted result from H2O
expectedCenters = []
currentCenter = None
totalRows = 0
print ""
for sphereCnt in range(CLUSTERS):
R = 10 * (sphereCnt+1)
newOffset = [3*R,3*R,3*R]
# figure out the next center
if currentCenter is None:
currentCenter = [0,0,0]
else:
currentCenter = [a+b for a,b in zip(currentCenter, newOffset)]
expectedCenters.append(currentCenter)
# build a sphere at that center
# pick a random # of points, from .5n to 1.5n
if NOT_SO_BAD:
numPts = random.randint(int(.5*n), int(1.5*n))
else:
numPts = n
print "currentCenter:", currentCenter, "R:", R, "numPts", numPts
for i in range(numPts):
xyz = get_xyz_sphere(R)
xyzShifted = [a+b for a,b in zip(xyz,currentCenter)]
dsf.write(",".join(map(str,xyzShifted))+"\n")
totalRows += 1
dsf.close()
print "Spheres created:", len(expectedCenters), "totalRows:", totalRows
return expectedCenters
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global SEED, localhost
# use the known bad seed if it's set. otherwise should be None
SEED = h2o.setup_random_seed(seed=BAD_SEED)
localhost = h2o.decide_if_localhost()
if (localhost):
h2o.build_cloud(2)
else:
h2o_hosts.build_cloud_with_hosts()
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_KMeans2_sphere5_bad_inits(self):
h2o.beta_features = True
SYNDATASETS_DIR = h2o.make_syn_dir()
csvFilename = 'syn_spheres100.csv'
csvPathname = SYNDATASETS_DIR + '/' + csvFilename
expectedCenters = write_spheres_dataset(csvPathname, CLUSTERS, SPHERE_PTS)
print "\nStarting", csvFilename
parseResult = h2i.import_parse(path=csvPathname, schema='put', hex_key=csvFilename + ".hex")
# try 5 times, to see if all inits by h2o are good
savedResults = []
Result = collections.namedtuple('Result',
'trial clusters size cluster_variances error iterations normalized max_iter clustersSorted')
# save the best for comparison. Print messages when we update best
sameAsBest = 1
# big number? to init
bestResult = Result(None, None, None, None, None, None, None, None, None)
for trial in range(TRIALS):
# pass SEED so it's repeatable
kwargs = {
'normalize': 0,
'k': CLUSTERS,
'max_iter': MAX_ITER,
'initialization': INIT,
# 'initialization': 'PlusPlus',
'destination_key': 'syn_spheres100.hex',
'seed': SEED
}
timeoutSecs = 30
start = time.time()
kmeansResult = h2o_cmd.runKMeans(parseResult=parseResult, timeoutSecs=timeoutSecs, **kwargs)
elapsed = time.time() - start
print "kmeans end on ", csvPathname, 'took', elapsed, 'seconds.',\
"%d pct. of timeout" % ((elapsed/timeoutSecs) * 100)
# see if we took the full limit to get an answer
# inspect of model doesn't work
# kmeansResult = h2o_cmd.runInspect(key='syn_spheres100.hex')
### print h2o.dump_json(kmeans)
### print h2o.dump_json(kmeansResult)
h2o_kmeans.simpleCheckKMeans(self, kmeansResult, **kwargs)
model = kmeansResult['model']
clusters = model["centers"]
size = model["size"]
cluster_variances = model["within_cluster_variances"]
# round to int to avoid fp error when saying "same"
error = int(model["total_within_SS"])
iterations = model["iterations"]
normalized = model["normalized"]
max_iter = model["max_iter"]
# clustersSorted = sorted(clusters, key=itemgetter(0))
clustersSorted = sorted(clusters)
r = Result (
trial,
clusters,
size,
cluster_variances,
error,
iterations,
normalized,
max_iter,
clustersSorted,
)
savedResults.append(r)
if iterations >= (max_iter-1): # h2o hits the limit at max_iter-1..shouldn't hit it
raise Exception("KMeans unexpectedly took %s iterations..which was the full amount allowed by max_iter %s",
(iterations, max_iter))
print "iterations", iterations
### print clustersSorted
# For now, just analyze the one with the lowest error
# we could analyze how many are not best, and how many are best (maybe just look at error
print "savedResults, error"
print r.error
if bestResult.error and r.error <= bestResult.error:
sameAsBest += 1
# we can check that if it has the same error, the sizes should be the same (integer) and reflects centers?
# should
if sorted(r.size)!=sorted(bestResult.size):
raise Exception("Would expect that if two trials got the same error (rounded to int), the cluster sizes would likely be the same? %s %s" %
(r.size, bestResult.size))
if not bestResult.error: # init case
bestResult = r
elif r.error < bestResult.error:
print "Trial", r.trial, "has a lower error", r.error, "than current lowest error", bestResult.error
print "Using it for best now"
bestResult = r
print "Trial #", trial, "completed"
print "\nApparently, %s out of %s trials, got the same best error: %s (lowest) " % (sameAsBest, TRIALS, bestResult.error)
print "\nh2o best result was from trial %s, centers sorted:" % bestResult.trial
print bestResult.clustersSorted
print "\ngenerated centers for comparison"
print expectedCenters
for i,center in enumerate(expectedCenters):
a = center
bb = bestResult.clustersSorted
print "bb:", bb
b = bb[i]
print "\nexpected:", a
print "h2o:", b # h2o result
aStr = ",".join(map(str,a))
bStr = ",".join(map(str,b))
iStr = str(i)
self.assertAlmostEqual(a[0], b[0], delta=2, msg=aStr+"!="+bStr+". Sorted cluster center "+iStr+"; x not correct.")
self.assertAlmostEqual(a[1], b[1], delta=2, msg=aStr+"!="+bStr+". Sorted cluster center "+iStr+"; y not correct.")
self.assertAlmostEqual(a[2], b[2], delta=2, msg=aStr+"!="+bStr+". Sorted cluster center "+iStr+"; z not correct.")
# fix: should check size too. Really should format expected into the tuple that the h2o_kmeans checker uses
# the c5 testdir_release stuff has a checker..for centers, size, error?
if __name__ == '__main__':
h2o.unit_main()
```
#### File: py/testdir_multi_jvm/test_multi_with_a_browser.py
```python
import unittest
import time,sys
sys.path.extend(['.','..','py'])
import h2o_cmd, h2o, h2o_hosts
import h2o_browse as h2b
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
localhost = h2o.decide_if_localhost()
h2o.beta_features = True
if (localhost):
h2o.build_cloud(3, java_heap_GB=1, use_hdfs=True)
else:
h2o_hosts.build_cloud_with_hosts()
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_multi_with_a_browser(self):
h2b.browseTheCloud()
if not h2o.browse_disable:
time.sleep(500000)
if __name__ == '__main__':
h2o.unit_main()
```
#### File: py/testdir_multi_jvm/test_rf_stedo_fvec.py
```python
import unittest, os, sys, time
sys.path.extend(['.','..','py'])
import h2o, h2o_cmd, h2o_hosts, h2o_import as h2i
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
localhost = h2o.decide_if_localhost()
if (localhost):
h2o.build_cloud(3)
else:
h2o_hosts.build_cloud_with_hosts()
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_rf_stedo_fvec(self):
h2o.beta_features = True
csvPathname = 'stego/stego_training.data'
# Prediction class is the second column => class=1
parseResult = h2i.import_parse(bucket='smalldata', path=csvPathname, schema='put')
h2o_cmd.runRF(parseResult=parseResult, ntrees=10, timeoutSecs=300, response=1)
if __name__ == '__main__':
h2o.unit_main()
```
#### File: py/testdir_multi_jvm/test_summary2_with_x_libsvm.py
```python
import unittest
import random, sys, time, os
sys.path.extend(['.','..','py'])
import h2o, h2o_cmd, h2o_hosts, h2o_browse as h2b, h2o_import as h2i
def write_syn_dataset(csvPathname, rowCount, colCount, SEEDPERFILE):
# we can do all sorts of methods off the r object
r = random.Random(SEEDPERFILE)
def addValToRowStuff(colNumber, val, rowData, synColSumDict):
if val!=0:
rowData.append(str(colNumber) + ":" + str(val)) # f should always return string
if colNumber in synColSumDict:
synColSumDict[colNumber] += val # sum of column (dict)
else:
synColSumDict[colNumber] = val # sum of column (dict)
dsf = open(csvPathname, "w+")
synColSumDict = {0: 0} # guaranteed to have col 0 for output
for i in range(rowCount):
rowData = []
if i==(rowCount-1): # last row
val = 1
colNumber = colCount # max
colNumberMax = colNumber
addValToRowStuff(colNumber, val, rowData, synColSumDict)
# add output
val = 0
rowData.insert(0, val)
synColSumDict[0] += val # sum of column (dict)
rowDataCsv = " ".join(map(str,rowData))
dsf.write(rowDataCsv + "\n")
dsf.close()
return (colNumberMax, synColSumDict)
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global SEED, localhost
SEED = h2o.setup_random_seed()
localhost = h2o.decide_if_localhost()
if (localhost):
h2o.build_cloud(2,java_heap_GB=5)
else:
h2o_hosts.build_cloud_with_hosts()
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_summary_with_x_libsvm (self):
h2o.beta_features = True
print "Empty rows except for the last, with all zeros for class. Single col at max"
h2b.browseTheCloud()
SYNDATASETS_DIR = h2o.make_syn_dir()
tryList = [
(100, 100, 'cA', 300),
(100000, 100, 'cB', 300),
(100, 1000, 'cC', 300),
]
# h2b.browseTheCloud()
for (rowCount, colCount, hex_key, timeoutSecs) in tryList:
SEEDPERFILE = random.randint(0, sys.maxint)
csvFilename = "syn_%s_%s_%s.csv" % (SEEDPERFILE, rowCount, colCount)
csvPathname = SYNDATASETS_DIR + '/' + csvFilename
print "Creating random", csvPathname
# dict of col sums for comparison to exec col sums below
(colNumberMax, synColSumDict) = write_syn_dataset(csvPathname, rowCount, colCount, SEEDPERFILE)
parseResult = h2i.import_parse(path=csvPathname, schema='put', hex_key=hex_key, timeoutSecs=timeoutSecs,
doSummary=False)
print "Parse result['destination_key']:", parseResult['destination_key']
inspect = h2o_cmd.runInspect(None, parseResult['destination_key'], max_column_display=colNumberMax+1,
timeoutSecs=timeoutSecs)
numCols = inspect['numCols']
numRows = inspect['numRows']
self.assertEqual(colNumberMax+1, numCols,
msg="generated %s cols (including output). parsed to %s cols" % (colNumberMax+1, numCols))
self.assertEqual(rowCount, numRows,
msg="generated %s rows, parsed to %s rows" % (rowCount, numRows))
for x in range(numCols):
print "Doing summary with x=%s" % x
summaryResult = h2o_cmd.runSummary(key=hex_key, cols=x, timeoutSecs=timeoutSecs)
# skip the infoFromSummary check
colName = "C" + str(x+1)
print "Doing summary with col name x=%s" % colName
summaryResult = h2o_cmd.runSummary(key=hex_key, cols=colName, timeoutSecs=timeoutSecs)
# do a final one with all columns for the current check below
# FIX! we should update the check to check each individual summary result
print "Doing and checking summary with no x=%s" % x
summaryResult = h2o_cmd.runSummary(key=hex_key, max_ncols=colNumberMax+1, timeoutSecs=timeoutSecs)
h2o_cmd.infoFromSummary(summaryResult, noPrint=True)
# FIX! add some result checking
if __name__ == '__main__':
h2o.unit_main()
```
#### File: py/testdir_single_jvm/notest_exec2_fast_locks_overlap.py
```python
import unittest, time, sys
sys.path.extend(['.','..','py'])
import h2o, h2o_cmd, h2o_hosts, h2o_import as h2i, h2o_exec as h2e, h2o_jobs
print "overlap the parse (not the putfile) of the next one, with the exec of the last one"
print ""
print "Was getting a failure trying to write lock iris2_1.hex during the exec for iris2_2.hex"
print "Was it the GLM's locks on iris2_1.hex (prior) or the prior exec on iris2_1.hex. Dunno"
print "Focus on just fast back to back execs here..get rid of the glm"
print "maybe the putfile/parse? leave that in also"
print "maybe change to one upload, and don't delete the source file, so just reparse"
# avoid using delete_on_done = 0 because of h2o assertion error
AVOID_BUG = True
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
localhost = h2o.decide_if_localhost()
if (localhost):
h2o.build_cloud()
else:
h2o_hosts.build_cloud_with_hosts()
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_exec2_fast_locks_overlap(self):
csvPathname = 'iris/iris2.csv'
src_key='iris.csv'
if not AVOID_BUG:
# need the key name (pattern) to feed to parse)
(importResult, importPattern) = h2i.import_only(bucket='smalldata', path=csvPathname, schema='put',
src_key=src_key, timeoutSecs=10)
# just as a reminder of what these returns look like
print "importResult:", h2o.dump_json(importResult)
print "importPattern:", h2o.dump_json(importPattern)
y = 4
lastHexKey = None
for trial in range (1, 100):
if AVOID_BUG:
# need the key name (pattern) to feed to parse)
(importResult, importPattern) = h2i.import_only(bucket='smalldata', path=csvPathname, schema='put',
src_key=src_key, timeoutSecs=10)
# just as a reminder of what these returns look like
print "importResult:", h2o.dump_json(importResult)
print "importPattern:", h2o.dump_json(importPattern)
# make sure each parse is unique dest key (not in use)
hex_key = "iris2_" + str(trial) + ".hex"
# what if we kicked off another parse without waiting for it? I think the src key gets locked
# so we'd get lock issues on the src_key
parseResult = h2i.parse_only(pattern=src_key, hex_key=hex_key, noPoll=True,
delete_on_done=1 if AVOID_BUG else 0, timeoutSecs=10)
# wait until iteration 2, when lastHexKey is available, so you can operate on that
if lastHexKey:
execExpr="%s[,%s]=(%s[,%s]==%s)" % (lastHexKey, y+1, lastHexKey, y+1, 1)
h2e.exec_expr(execExpr=execExpr, timeoutSecs=10)
lastHexKey = hex_key
# since we are using the same source file, and potentially re-uploading if AVOID_BUG
# we have to synchronize here. I guess we have to make sure the parse is done too, since we're going to
# use it next iteration
h2o_jobs.pollWaitJobs(timeoutSecs=10)
# just show the jobs still going. Shouldn't be any
a = h2o.nodes[0].jobs_admin()
h2o.verboseprint("jobs_admin():", h2o.dump_json(a))
if __name__ == '__main__':
h2o.unit_main()
```
#### File: py/testdir_single_jvm/test_exec2_append_cols.py
```python
import unittest, random, sys, time
sys.path.extend(['.','..','py'])
import h2o, h2o_browse as h2b, h2o_exec as h2e, h2o_hosts, h2o_import as h2i, h2o_cmd
print "exec: adding col that's not +1 of last col, causes assertion error"
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global SEED, localhost
SEED = h2o.setup_random_seed()
localhost = h2o.decide_if_localhost()
if (localhost):
h2o.build_cloud(java_heap_GB=4)
else:
h2o_hosts.build_cloud_with_hosts()
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_exec2_append_cols(self):
h2o.beta_features = True
bucket = 'home-0xdiag-datasets'
csvPathname = 'standard/covtype.data'
hexKey = 'r.hex'
parseResult = h2i.import_parse(bucket=bucket, path=csvPathname, schema='put', hex_key=hexKey)
inspect = h2o_cmd.runInspect(key='r.hex')
print "\nr.hex" \
" numRows:", "{:,}".format(inspect['numRows']), \
" numCols:", "{:,}".format(inspect['numCols'])
numRows = inspect['numRows']
numCols = inspect['numCols']
execExpr = 's.hex = r.hex[,1]',
h2e.exec_expr(h2o.nodes[0], execExpr, resultKey='s.hex', timeoutSecs=10)
for i in range(1,10):
execExpr = 's.hex[,%s] = r.hex[,%s]' % (i, i),
h2e.exec_expr(h2o.nodes[0], execExpr, resultKey='s.hex', timeoutSecs=10)
inspect = h2o_cmd.runInspect(key='s.hex')
# check the names on all cols is correct
cols = inspect['cols']
print "cols:", h2o.dump_json(cols)
for i,c in enumerate(cols):
actual = c['name']
expected = 'C' + str(i+1)
self.assertEqual(actual, expected,
msg="actual col name: %s expected col name %s" % (actual, expected))
# make it fail with this one (skip)
execExpr = 's.hex[,%s] = r.hex[,%s]' % (2, 1),
h2e.exec_expr(h2o.nodes[0], execExpr, resultKey='s.hex', timeoutSecs=10)
inspect = h2o_cmd.runInspect(key='s.hex')
print "\ns.hex" \
" numRows:", "{:,}".format(inspect['numRows']), \
" numCols:", "{:,}".format(inspect['numCols'])
h2o.check_sandbox_for_errors()
if __name__ == '__main__':
h2o.unit_main()
```
#### File: py/testdir_single_jvm/test_exec2_filter_slice.py
```python
import unittest, random, sys, time
sys.path.extend(['.','..','py'])
import h2o, h2o_cmd, h2o_hosts, h2o_browse as h2b, h2o_import as h2i, h2o_exec as h2e
exprList = [
# "rTest=randomFilter(<keyX>,58101,12345)",
"a=runif(c.hex[,1], -1); rTrain=<keyX>[a<0.8,]",
# doesn't work yet
# "r2=c.hex[1:100,]",
]
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global SEED, localhost
SEED = h2o.setup_random_seed()
localhost = h2o.decide_if_localhost()
if (localhost):
h2o.build_cloud(1,java_heap_GB=1)
else:
h2o_hosts.build_cloud_with_hosts()
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_exec2_filter_slice(self):
h2o.beta_features = True
timeoutSecs = 10
csvFilename = "covtype.data"
csvPathname = 'standard/covtype.data'
hex_key = "c.hex"
parseResult = h2i.import_parse(bucket='home-0xdiag-datasets', path=csvPathname, schema='put', hex_key=hex_key,
timeoutSecs=20)
print "Parse result['desination_key']:", parseResult['destination_key']
inspect = h2o_cmd.runInspect(None, parseResult['destination_key'])
for trial in range(10):
print "Doing the execs in order, to feed filters into slices"
nodeX = 0
for exprTemplate in exprList:
execExpr = h2e.fill_in_expr_template(exprTemplate, colX=0, n=0, row=1, keyX=hex_key, m=2)
execResultInspect, min_value = h2e.exec_expr(h2o.nodes[nodeX], execExpr,
resultKey=None, timeoutSecs=10)
print "min_value:", min_value, "execExpr:", execExpr
h2o.verboseprint("min: ", min_value, "trial:", trial)
if __name__ == '__main__':
h2o.unit_main()
```
#### File: py/testdir_single_jvm/test_exec2_xorsum2.py
```python
import unittest, random, sys, time
sys.path.extend(['.','..','py'])
import h2o, h2o_browse as h2b, h2o_exec as h2e, h2o_hosts, h2o_import as h2i, h2o_cmd, h2o_util
# new ...ability to reference cols
# src[ src$age<17 && src$zip=95120 && ... , ]
# can specify values for enums ..values are 0 thru n-1 for n enums
exprList = [
'h=c(1); h = xorsum(r1[,1])',
]
ROWS = 2000000
STOP_ON_ERROR = True
DO_BUG = True
DO_NEGATIVE = True
RANDOM_E_FP_FORMATS = True
CHUNKING_CNT = 100000
# subtracting hex numbers below and using this for max delta
ALLOWED_DELTA= 0x7fff
# interesting lsb diff in right format, leads to larger mantissa error on left
# mask -0x7ff from mantissa for mostly-right comparison (theoreticaly can flip lots of mantissa and exp)
# we actually do a sub and look at delta below (not mask) (I guess the hex gets interpreted differently than IEEE but that's okay)
# alright if we're mostly right..looking to catch catastrophic error in h2o
# ullResult (0.16x): 0x01faad3090cdb9d8 3.98339643735e-299
# expectedUllSum (0.16x): 0x01faad3090cd88d7 3.98339643734e-299
# expectedFpSum (0.16x): 0x48428c76f4d986cf 1.26235843623e+40
# http://babbage.cs.qc.cuny.edu/IEEE-754.old/64bit.html
# 10 digits of precision on the right..They both agree
# But the left is what the ieee fp representation is inside h2o (64-bit real)
#
# 0x0d9435c98b2bc489 2.9598664472e-243
# 0x0d9435c98b2bd40b 2.9598664472e-243
#
#
# With more precision you can see the difference
#
# hex: 0d9435c98b2bc489
# is: 2.9598664471952256e-243
#
# hex: 0d9435c98b2bd40b
# is: 2.9598664471972913e-243
#********************************************************************************
def write_syn_dataset(csvPathname, rowCount, colCount, expectedMin, expectedMax, SEEDPERFILE, sel):
# this only does the sum stuff for single cols right now
if colCount!=1:
raise Exception("only support colCount == 1 here right now %s", colCount)
NUM_CASES = h2o_util.fp_format()
if sel and (sel<0 or sel>=NUM_CASES):
raise Exception("sel used to select from possible fp formats is out of range: %s %s", (sel, NUM_CASES))
dsf = open(csvPathname, 'w')
expectedRange = (expectedMax - expectedMin)
expectedFpSum = float(0)
expectedUllSum = int(0)
for row in range(rowCount):
rowData = []
for j in range(colCount):
# Be Nasty!. We know fp compression varies per chunk
# so...adjust the random fp data, depending on what rows your are at
# i.e. cluster results per chunk, smaller variance within chunk, larger variance outside of chunk
# Actually: generate "different" data depending on where you are in the rows
method = row % CHUNKING_CNT
if method==1:
value = expectedMin + (random.random() * expectedRange)
elif method==2:
value = random.randint(1,1e6)
elif method==3:
value = 5555555555555 + row
else: # method == 0 and > 3
# value = row * 2
# bad sum
# value = 5555555555555 + row
# bad
# value = 555555555555 + row
# value = 55555555555 + row
# fail
# value = 5555555555 + row
# exp = random.randint(0,120)
# 50 bad?
# constrain the dynamic range of the numbers to be within IEEE-754 support
# without loss of precision when adding. Why do we care though?
# could h2o compress if values are outside that kind of dynamic range ?
# we want a big exponent?
# was
# exp = random.randint(40,71)
exp = random.randint(0,120)
# skip over the current bug around int boundaries?
# have a fixed base
value = random.random() + (2 ** exp)
# value = -1 * value
# value = 2e9 + row
# value = 3 * row
r = random.randint(0,4)
# 20% negative
if DO_NEGATIVE and r==0:
value = -1 * value
# print "%30s" % "expectedUllSum (0.16x):", "0x%0.16x" % expectedUllSum
# Now that you know how many decimals you want,
# say, 15, just use a rstrip("0") to get rid of the unnecessary 0s:
# old bugs was: can't rstrip if .16e is used because trailing +00 becomes +, causes NA
# use a random fp format (string). use sel to force one you like
# only keeps it to formats with "e"
if RANDOM_E_FP_FORMATS:
# s = h2o_util.fp_format(value, sel=sel) # this is e/f/g formats for a particular sel within each group
# s = h2o_util.fp_format(value, sel=None) # this would be random
s = h2o_util.fp_format(value, sel=None, only='e') # this would be random, within 'e' only
else:
s = h2o_util.fp_format(value, sel=sel, only='e') # use same format for all numbers
# FIX! strip the trailing zeroes for now because they trigger a bug
if DO_BUG:
pass
else:
s = s.rstrip("0")
# now our string formatting will lead to different values when we parse and use it
# so we move the expected value generation down here..i.e after we've formatted the string
# we'll suck it back in as a fp number
# get the expected patterns from python
fpResult = float(s)
expectedUllSum ^= h2o_util.doubleToUnsignedLongLong(fpResult)
expectedFpSum += fpResult
# s = ("%.16e" % value)
rowData.append(s)
rowDataCsv = ",".join(map(str,rowData))
dsf.write(rowDataCsv + "\n")
dsf.close()
# zero the upper 4 bits of xorsum like h2o does to prevent inf/nan
# print hex(~(0xf << 60))
expectedUllSum &= (~(0xf << 60))
return (expectedUllSum, expectedFpSum)
#********************************************************************************
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global SEED, localhost
SEED = h2o.setup_random_seed()
localhost = h2o.decide_if_localhost()
if (localhost):
h2o.build_cloud(java_heap_GB=4)
else:
h2o_hosts.build_cloud_with_hosts()
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_exec2_xorsum2(self):
h2o.beta_features = True
SYNDATASETS_DIR = h2o.make_syn_dir()
tryList = [
(ROWS, 1, 'r1', 0, 10, None),
]
for trial in range(3):
ullResultList = []
NUM_FORMAT_CASES = h2o_util.fp_format()
for (rowCount, colCount, hex_key, expectedMin, expectedMax, expected) in tryList:
SEEDPERFILE = random.randint(0, sys.maxint)
# dynamic range of the data may be useful for estimating error
maxDelta = expectedMax - expectedMin
csvFilename = 'syn_real_' + str(rowCount) + 'x' + str(colCount) + '.csv'
csvPathname = SYNDATASETS_DIR + '/' + csvFilename
csvPathnameFull = h2i.find_folder_and_filename(None, csvPathname, returnFullPath=True)
print "Creating random", csvPathname
sel = random.randint(0, NUM_FORMAT_CASES-1)
(expectedUllSum, expectedFpSum) = write_syn_dataset(csvPathname,
rowCount, colCount, expectedMin, expectedMax, SEEDPERFILE, sel)
expectedUllSumAsDouble = h2o_util.unsignedLongLongToDouble(expectedUllSum)
expectedFpSumAsLongLong = h2o_util.doubleToUnsignedLongLong(expectedFpSum)
parseResult = h2i.import_parse(path=csvPathname, schema='put', hex_key=hex_key,
timeoutSecs=3000, retryDelaySecs=2)
inspect = h2o_cmd.runInspect(key=hex_key)
print "numRows:", inspect['numRows']
print "numCols:", inspect['numCols']
inspect = h2o_cmd.runInspect(key=hex_key, offset=-1)
print "inspect offset = -1:", h2o.dump_json(inspect)
# looking at the 8 bytes of bits for the h2o doubles
# xorsum will zero out the sign and exponent
for execExpr in exprList:
for repeate in range(3):
start = time.time()
(execResult, fpResult) = h2e.exec_expr(h2o.nodes[0], execExpr,
resultKey=None, timeoutSecs=300)
print 'exec took', time.time() - start, 'seconds'
print "execResult:", h2o.dump_json(execResult)
ullResult = h2o_util.doubleToUnsignedLongLong(fpResult)
ullResultList.append((ullResult, fpResult))
print "%30s" % "ullResult (0.16x):", "0x%0.16x %s" % (ullResult, fpResult)
print "%30s" % "expectedUllSum (0.16x):", "0x%0.16x %s" % (expectedUllSum, expectedUllSumAsDouble)
print "%30s" % "expectedFpSum (0.16x):", "0x%0.16x %s" % (expectedFpSumAsLongLong, expectedFpSum)
# allow diff of the lsb..either way. needed when integers are parsed
# okay for a couple of lsbs to be wrong, due to conversion from stringk
# ullResult (0.16x): 0x02c1a21f923cee96 2.15698793923e-295
# expectedUllSum (0.16x): 0x02c1a21f923cee97 2.15698793923e-295
# expectedFpSum (0.16x): 0x42f054af32b3c408 2.87294442126e+14
# ullResult and expectedUllSum are Q ints, (64-bit) so can subtract them.
# I guess we don't even care about sign, since we zero the first 4 bits (xorsum) to avoid nan/inf issues
if ullResult!=expectedUllSum and (abs(ullResult-expectedUllSum)>ALLOWED_DELTA):
emsg = "h2o didn't get the same xorsum as python. 0x%0.16x 0x%0.16x" % (ullResult, expectedUllSum)
if STOP_ON_ERROR:
raise Exception(emsg)
else:
print emsg
# print "%30s" % "hex(bitResult):", hex(ullResult)
h2o.check_sandbox_for_errors()
print "first result was from a sum. others are xorsum"
print "ullResultList:"
for ullResult, fpResult in ullResultList:
print "%30s" % "ullResult (0.16x):", "0x%0.16x %s" % (ullResult, fpResult)
print "%30s" % "expectedUllSum (0.16x):", "0x%0.16x %s" % (expectedUllSum, expectedUllSumAsDouble)
print "%30s" % "expectedFpSum (0.16x):", "0x%0.16x %s" % (expectedFpSumAsLongLong, expectedFpSum)
if __name__ == '__main__':
h2o.unit_main()
```
#### File: py/testdir_single_jvm/test_GLM2_covtype_1.py
```python
import unittest, time, sys, random
sys.path.extend(['.','..','py'])
import h2o, h2o_cmd, h2o_glm, h2o_hosts, h2o_import as h2i, h2o_jobs, h2o_exec as h2e
DO_POLL = False
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
localhost = h2o.decide_if_localhost()
if (localhost):
h2o.build_cloud(java_heap_GB=4)
else:
h2o_hosts.build_cloud_with_hosts()
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_GLM2_covtype_1(self):
h2o.beta_features = True
csvFilename = 'covtype.data'
csvPathname = 'standard/' + csvFilename
hex_key = "covtype.hex"
parseResult = h2i.import_parse(bucket='home-0xdiag-datasets', path=csvPathname, hex_key=hex_key, schema='local', timeoutSecs=20)
print "Gratuitous use of frame splitting. result not used"
fs = h2o.nodes[0].frame_split(source=hex_key, ratios=0.75)
split0_key = fs['split_keys'][0]
split1_key = fs['split_keys'][1]
split0_row = fs['split_rows'][0]
split1_row = fs['split_rows'][1]
split0_ratio = fs['split_ratios'][0]
split1_ratio = fs['split_ratios'][1]
print "WARNING: max_iter set to 8 for benchmark comparisons"
max_iter = 8
y = 54
modelKey = "GLMModel"
kwargs = {
# 'cols': x, # for 2
'response': 'C' + str(y+1), # for 2
'family': 'binomial',
# 'link': 'logit', # 2 doesn't support
'n_folds': 2,
'max_iter': max_iter,
'beta_epsilon': 1e-3,
'destination_key': modelKey
}
# maybe go back to simpler exec here. this was from when Exec failed unless this was used
execExpr="A.hex=%s" % parseResult['destination_key']
h2e.exec_expr(execExpr=execExpr, timeoutSecs=30)
# class 1=1, all else 0
execExpr="A.hex[,%s]=(A.hex[,%s]>%s)" % (y+1, y+1, 1)
h2e.exec_expr(execExpr=execExpr, timeoutSecs=30)
aHack = {'destination_key': 'A.hex'}
timeoutSecs = 120
# L2
start = time.time()
kwargs.update({'alpha': 0, 'lambda': 0})
def completionHack(jobKey, modelKey):
if DO_POLL: # not needed
pass
else:
h2o_jobs.pollStatsWhileBusy(timeoutSecs=300, pollTimeoutSecs=300, retryDelaySecs=5)
# print "FIX! how do we get the GLM result"
params = {'_modelKey': modelKey}
a = h2o.nodes[0].completion_redirect(jsonRequest="2/GLMModelView.json", params=params)
# print "GLM result from completion_redirect:", h2o.dump_json(a)
glmFirstResult = h2o_cmd.runGLM(parseResult=aHack, timeoutSecs=timeoutSecs, noPoll=not DO_POLL, **kwargs)
completionHack(glmFirstResult['job_key'], modelKey)
print "glm (L2) end on ", csvPathname, 'took', time.time() - start, 'seconds'
## h2o_glm.simpleCheckGLM(self, glm, 13, **kwargs)
# Elastic
kwargs.update({'alpha': 0.5, 'lambda': 1e-4})
start = time.time()
glmFirstResult = h2o_cmd.runGLM(parseResult=aHack, timeoutSecs=timeoutSecs, noPoll=not DO_POLL, **kwargs)
completionHack(glmFirstResult['job_key'], modelKey)
print "glm (Elastic) end on ", csvPathname, 'took', time.time() - start, 'seconds'
## h2o_glm.simpleCheckGLM(self, glm, 13, **kwargs)
# L1
kwargs.update({'alpha': 1, 'lambda': 1e-4})
start = time.time()
glmFirstResult = h2o_cmd.runGLM(parseResult=aHack, timeoutSecs=timeoutSecs, noPoll=not DO_POLL, **kwargs)
completionHack(glmFirstResult['job_key'], modelKey)
print "glm (L1) end on ", csvPathname, 'took', time.time() - start, 'seconds'
## h2o_glm.simpleCheckGLM(self, glm, 13, **kwargs)
if __name__ == '__main__':
h2o.unit_main()
```
#### File: py/testdir_single_jvm/test_GLM2_hhp107_01_browse.py
```python
import unittest, time, sys
sys.path.extend(['.','..','py'])
import h2o, h2o_cmd, h2o_hosts, h2o_glm, h2o_import as h2i
import h2o_browse as h2b
argcaseList = [
{
'response': 106,
'family': 'gaussian',
'lambda': 1.0E-5,
'max_iter': 50,
'n_folds': 0,
'alpha': 1,
'beta_epsilon': 1.0E-4
},
]
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
localhost = h2o.decide_if_localhost()
if (localhost):
h2o.build_cloud(1,java_heap_GB=1)
else:
h2o_hosts.build_cloud_with_hosts()
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_GLM2_hhp_107_01_browse(self):
h2o.beta_features = True
csvPathname = 'hhp_107_01.data.gz'
print "\n" + csvPathname
parseResult = h2i.import_parse(bucket='smalldata', path=csvPathname, schema='put',
hex_key="hhp_107_01.data.hex", timeoutSecs=15, doSummary=False)
# pop open a browser on the cloud
# h2b.browseTheCloud()
trial = 0
for argcase in argcaseList:
print "\nTrial #", trial, "start"
kwargs = argcase
print 'response:', kwargs['response']
start = time.time()
glm = h2o_cmd.runGLM(parseResult=parseResult, browseAlso=True, timeoutSecs=200, **kwargs)
h2o_glm.simpleCheckGLM(self, glm, None, **kwargs)
print "\nTrial #", trial
if __name__ == '__main__':
h2o.unit_main()
```
#### File: py/testdir_single_jvm/test_GLM2_lambda_search.py
```python
import unittest, random, sys, time
sys.path.extend(['.','..','py'])
import h2o, h2o_cmd, h2o_hosts, h2o_glm, h2o_import as h2i, h2o_exec as h2e
def define_params():
paramDict = {
'standardize': [None, 0,1],
'beta_epsilon': [None, 0.0001],
'family': [None, 'gaussian', 'binomial', 'poisson'],
'lambda': [0,1e-8,1e-4,1e-3],
'alpha': [0,0.8,0.75],
'ignored_cols': [1,'C1','1,2','C1,C2'],
'max_iter': [None, 10],
'higher_accuracy': [None, 0, 1],
'use_all_factor_levels': [None, 0, 1],
'lambda_search': [None, 0], # FIX! what if lambda is set when lambda_search=1
'tweedie_variance_power': [None, 0, 1],
}
return paramDict
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global SEED, localhost
SEED = h2o.setup_random_seed()
localhost = h2o.decide_if_localhost()
if (localhost):
h2o.build_cloud(node_count=1)
else:
h2o_hosts.build_cloud_with_hosts(node_count=1)
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_GLM2_lambda_search(self):
h2o.beta_features = True
csvPathname = 'covtype/covtype.20k.data'
parseResult = h2i.import_parse(bucket='smalldata', path=csvPathname, schema='put', hex_key="covtype.20k")
CLASS = 1
# make a binomial version
execExpr="B.hex=%s; B.hex[,%s]=(B.hex[,%s]==%s)" % ('covtype.20k', 54+1, 54+1, CLASS)
h2e.exec_expr(execExpr=execExpr, timeoutSecs=30)
paramDict = define_params()
for trial in range(8):
params = {}
colX = h2o_glm.pickRandGlmParams(paramDict, params)
# override choices with these
params = {
'response': 54,
'alpha': 0.1,
'max_iter': 8,
# 'lambda': 1e-4,
# 'lambda': 0,
'lambda': None,
'lambda_search': 1,
'n_folds': 1,
}
kwargs = params.copy()
if 'family' not in kwargs or kwargs['family']=='binomial':
bHack = {'destination_key': 'B.hex'}
else:
bHack = parseResult
start = time.time()
glm = h2o_cmd.runGLM(timeoutSecs=300, parseResult=bHack, **kwargs)
# pass the kwargs with all the params, so we know what we asked for!
h2o_glm.simpleCheckGLM(self, glm, None, **kwargs)
h2o.check_sandbox_for_errors()
print "glm end on ", csvPathname, 'took', time.time() - start, 'seconds'
print "Trial #", trial, "completed\n"
if __name__ == '__main__':
h2o.unit_main()
```
#### File: py/testdir_single_jvm/test_GLM2_many_cols_libsvm.py
```python
import unittest, random, sys, time
sys.path.extend(['.','..','py'])
import h2o, h2o_cmd, h2o_hosts, h2o_browse as h2b, h2o_import as h2i, h2o_glm
def write_syn_libsvm_dataset(csvPathname, rowCount, colCount, SEED):
r1 = random.Random(SEED)
dsf = open(csvPathname, "w+")
for i in range(rowCount):
rowData = []
for j in range(colCount):
ri = r1.randint(0,1)
if ri!=0: # don't include 0's
colNumber = j + 1
rowData.append(str(colNumber) + ":" + str(ri))
ri = r1.randint(0,1)
# output class goes first
rowData.insert(0, str(ri))
rowDataCsv = " ".join(rowData) # already all strings
dsf.write(rowDataCsv + "\n")
dsf.close()
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global SEED, localhost
SEED = h2o.setup_random_seed()
localhost = h2o.decide_if_localhost()
if (localhost):
h2o.build_cloud(1,java_heap_GB=10)
else:
h2o_hosts.build_cloud_with_hosts()
@classmethod
def tearDownClass(cls):
### time.sleep(3600)
h2o.tear_down_cloud()
def test_GLM2_many_cols_libsvm(self):
h2o.beta_features = True
SYNDATASETS_DIR = h2o.make_syn_dir()
tryList = [
(100, 3000, 'cA', 300),
(100, 5000, 'cB', 500),
# too slow!
# (100, 10000, 'cC', 800),
]
### h2b.browseTheCloud()
lenNodes = len(h2o.nodes)
for (rowCount, colCount, hex_key, timeoutSecs) in tryList:
SEEDPERFILE = random.randint(0, sys.maxint)
csvFilename = 'syn_' + "binary" + "_" + str(rowCount) + 'x' + str(colCount) + '.svm'
csvPathname = SYNDATASETS_DIR + '/' + csvFilename
print "Creating random libsvm:", csvPathname
write_syn_libsvm_dataset(csvPathname, rowCount, colCount, SEEDPERFILE)
parseResult = h2i.import_parse(path=csvPathname, hex_key=hex_key, schema='put', timeoutSecs=timeoutSecs)
print "Parse result['destination_key']:", parseResult['destination_key']
# We should be able to see the parse result?
inspect = h2o_cmd.runInspect(None, parseResult['destination_key'])
print "\n" + csvFilename
y = colCount
kwargs = {'response': y, 'max_iter': 2, 'n_folds': 1, 'alpha': 0.2, 'lambda': 1e-5}
start = time.time()
glm = h2o_cmd.runGLM(parseResult=parseResult, timeoutSecs=timeoutSecs, **kwargs)
print "glm end on ", csvPathname, 'took', time.time() - start, 'seconds'
h2o_glm.simpleCheckGLM(self, glm, None, **kwargs)
if __name__ == '__main__':
h2o.unit_main()
```
#### File: py/testdir_single_jvm/test_KMeans_basic_fvec.py
```python
import unittest, time, sys, random
sys.path.extend(['.','..','py'])
import h2o, h2o_cmd, h2o_kmeans, h2o_hosts, h2o_import as h2i, h2o_jobs
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global SEED
SEED = h2o.setup_random_seed()
global localhost
localhost = h2o.decide_if_localhost()
if (localhost):
h2o.build_cloud()
else:
h2o_hosts.build_cloud_with_hosts()
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_kmeans_benign(self):
h2o.beta_features = True # fvec
importFolderPath = "logreg"
csvFilename = "benign.csv"
hex_key = "benign.hex"
csvPathname = importFolderPath + "/" + csvFilename
# FIX! hex_key isn't working with Parse2 ? parseResult['destination_key'] not right?
print "\nStarting", csvFilename
parseResult = h2i.import_parse(bucket='smalldata', path=csvPathname, hex_key=hex_key, header=1,
timeoutSecs=180, doSummary=False)
inspect = h2o_cmd.runInspect(None, parseResult['destination_key'])
print "\nStarting", csvFilename
expected = [
([8.86, 2.43, 35.53, 0.31, 13.22, 1.47, 1.33, 20.06, 13.08, 0.53, 2.12, 128.61, 35.33, 1.57], 49, None),
([33.47, 2.29, 50.92, 0.34, 12.82, 1.33, 1.36, 21.43, 13.30, 0.37, 2.52, 125.40, 43.91, 1.79], 87, None),
([27.64, 2.87, 48.11, 0.09, 11.80, 0.98, 1.51, 21.02, 12.53, 0.58, 2.89, 171.27, 42.73, 1.53], 55, None),
([26.00, 2.67, 46.67, 0.00, 13.00, 1.33, 1.67, 21.56, 11.44, 0.22, 2.89, 234.56, 39.22, 1.56], 9, None),
]
# all are multipliers of expected tuple value
allowedDelta = (0.01, 0.01, 0.01, 0.01)
# loop, to see if we get same centers
for trial in range(1):
kmeansSeed = random.randint(0, sys.maxint)
# kmeansSeed = 6655548259421773879
kwargs = {
'k': 4,
'initialization': 'PlusPlus',
'destination_key': 'benign_k.hex',
# 'seed': 265211114317615310,
'max_iter': 50,
'seed': kmeansSeed,
}
kmeans = h2o_cmd.runKMeans(parseResult=parseResult, timeoutSecs=5, **kwargs)
## h2o.verboseprint("kmeans result:", h2o.dump_json(kmeans))
modelView = h2o.nodes[0].kmeans_view(model='benign_k.hex')
h2o.verboseprint("KMeans2ModelView:", h2o.dump_json(modelView))
model = modelView['model']
clusters = model['centers']
within_cluster_variances = model['within_cluster_variances']
total_within_SS = model['total_within_SS']
print "within_cluster_variances:", within_cluster_variances
print "total_within_SS:", total_within_SS
# make this fvec legal?
(centers, tupleResultList) = h2o_kmeans.bigCheckResults(self, kmeans, csvPathname, parseResult, 'd', **kwargs)
# h2o_kmeans.compareResultsToExpected(self, tupleResultList, expected, allowedDelta, trial=0)
def test_kmeans_prostate(self):
h2o.beta_features = True # fvec
importFolderPath = "logreg"
csvFilename = "prostate.csv"
hex_key = "prostate.hex"
csvPathname = importFolderPath + "/" + csvFilename
parseResult = h2i.import_parse(bucket='smalldata', path=csvPathname, hex_key=hex_key, header=1, timeoutSecs=180)
inspect = h2o_cmd.runInspect(None, parseResult['destination_key'])
print "\nStarting", csvFilename
# loop, to see if we get same centers
expected = [
([0.37,65.77,1.07,2.23,1.11,10.49,4.24,6.31], 215, 36955),
([0.36,66.44,1.09,2.21,1.06,10.84,34.16,6.31], 136, 46045),
([0.83,66.17,1.21,2.86,1.34,73.30,15.57,7.31], 29, 33412),
]
# all are multipliers of expected tuple value
allowedDelta = (0.01, 0.01, 0.01)
for trial in range(1):
# kmeansSeed = random.randint(0, sys.maxint)
# actually can get a slightly better error sum with a different seed
# this seed gets the same result as scikit
kmeansSeed = 6655548259421773879
kwargs = {
'ignored_cols': 'ID',
'k': 3,
# 'initialization': 'Furthest',
'initialization': 'PlusPlus',
'destination_key': 'prostate_k.hex',
'max_iter': 500,
'seed': kmeansSeed,
# reuse the same seed, to get deterministic results (otherwise sometimes fails
# 'seed': 265211114317615310}
}
# for fvec only?
kmeans = h2o_cmd.runKMeans(parseResult=parseResult, timeoutSecs=5, **kwargs)
# FIX! how do I get the kmeans result?
### print "kmeans result:", h2o.dump_json(kmeans)
# can't do this
# inspect = h2o_cmd.runInspect(key='prostate_k.hex')
modelView = h2o.nodes[0].kmeans_view(model='prostate_k.hex')
h2o.verboseprint("KMeans2ModelView:", h2o.dump_json(modelView))
model = modelView['model']
clusters = model['centers']
within_cluster_variances = model['within_cluster_variances']
total_within_SS = model['total_within_SS']
print "within_cluster_variances:", within_cluster_variances
print "total_within_SS:", total_within_SS
(centers, tupleResultList) = h2o_kmeans.bigCheckResults(self, kmeans, csvPathname, parseResult, 'd', **kwargs)
h2o_kmeans.compareResultsToExpected(self, tupleResultList, expected, allowedDelta, trial=trial)
if __name__ == '__main__':
h2o.unit_main()
```
#### File: py/testdir_single_jvm/test_nothing.py
```python
import unittest, random, sys, time
sys.path.extend(['.','..','py'])
import h2o, h2o_cmd, h2o_hosts, h2o_import as h2i, h2o_exec, h2o_glm, h2o_jobs
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global localhost
localhost = h2o.decide_if_localhost()
if (localhost):
h2o.build_cloud()
else:
h2o_hosts.build_cloud_with_hosts()
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_nothing(self):
pass
# just want to see how long
if __name__ == '__main__':
h2o.unit_main()
```
#### File: py/testdir_single_jvm/test_parse_multi_header_rand_fvec.py
```python
import unittest, time, sys, random, os
sys.path.extend(['.','..','py'])
import h2o, h2o_cmd, h2o_hosts, h2o_import as h2i, h2o_browse as h2b, h2o_util
# ord('a') gives 97. Use that when you pass it as url param to h2o
# str(unichr(97)) gives 'a'
print "Data rows in header_from_file are ignored unless that file was part of the parse pattern"
print "Tab in header is not auto-detected separator. comma and space are"
print "Hmmm..if there's a header in the data files, it needs to have the same separator as the data?? otherwise parse chaos"
print "Maybe only if header_from_file is also used"
paramsDict = {
# don't worry about these variants in this test (just parse normal)
# 'parser_type': [None, 'AUTO', 'XLS', 'XLSX', 'SVMLight'],
# I suppose, libsvm could have strangeness!..but there is no header with libsvm?
# 'parser_type': [None, 'AUTO'],
# add the hive 1 separator a choice
'separator': ['\t', ',', ' ', str(unichr(1))],
'preview': [1],
# 'separator': ['\t', ',', ' '],
'hdr_separator': [' ', ',', 'same'],
'header': [None, 0,1],
# we can point to the 'wrong' file!
# assume this is always used, otherwise we sum data rows without knowing if we'll use the header file?
# always point to the header file..again, if we switch it around, the counts are off
# 'header_from_file': [None, 'header', 'data'],
# FIX! always specify it from 'header' for now
'header_from_file': ['header'],
}
# ability to selectively comment the first line (which may be data or header)
# Does not write an extra line as the comment first in that case
# (header can be picked from the comment line)
# H2O always just strips the comment from the first line? and continues parsing?
# Don't write headerString if None (for non-header files)
# Don't write data if rowCount is None
def write_syn_dataset(csvPathname, rowCount, headerString, rList, commentFirst=False, sepChar=','):
dsf = open(csvPathname, "w+")
if commentFirst:
h = "# random comment junk because we don't support commented headers yet"
dsf.write(h + "\n")
# UPDATE: comments are always ignored. So a commented header, commented data, is ignored.
headerRowsDone = 0
if headerString is not None:
# FIX: for now, always put the header in even if we're told to!
h = headerString
headerRowsDone += 1
dsf.write(h + "\n")
if rowCount is not None:
for i in range(rowCount):
# two choices on the input. Make output choices random
# do the two choices, in order to be able to do RF (cols have to have varying values)
r = rList[random.randint(0,1)] + sepChar + str(random.randint(0,7))
dsf.write(r + "\n")
dsf.close()
return (headerRowsDone, rowCount) # rows done
else:
return (headerRowsDone, 0) # rows done
def rand_rowData(colCount, sepChar):
# do one fp, one enum, the rest int
# assume colCount is at least 3
if colCount<=3:
raise Exception("Test expects desired colCount to be 3 or more", colCount)
randomReal = "%0.2f" % (random.random() * float(random.randint(1,1000)))
randomEnum = random.choice(['hello', 'there', 'how', 'are', 'you'])
rowData = [randomReal, randomEnum] + [random.randint(0,7) for i in range(colCount-2)]
rowData1= sepChar.join(map(str,rowData))
randomReal = "%0.2f" % (random.random() * float(random.randint(1,10000)))
randomEnum = random.choice(['hello2', '2there', '2how', '2are', '2you'])
rowData = [randomReal, randomEnum] + [random.randint(0,7) for i in range(colCount-2)]
rowData2= sepChar.join(map(str,rowData))
# RF will complain if all inputs are the same
r = [rowData1, rowData2]
return r
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global SEED, localhost
SEED = h2o.setup_random_seed()
localhost = h2o.decide_if_localhost()
if (localhost):
h2o.build_cloud(java_heap_GB=4,use_flatfile=True)
else:
h2o_hosts.build_cloud_with_hosts()
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_parse_multi_header_rand_fvec(self):
h2o.beta_features = True
SYNDATASETS_DIR = h2o.make_syn_dir()
csvFilename = "syn_ints.csv"
csvPathname = SYNDATASETS_DIR + '/' + csvFilename
allowedLetters = 'abcdeABCDE01234[]'
headerChoices = []
for n in range(500): # max # of cols below is 500
done = False
while not done:
l = random.randint(1,64) # random length headers
headerName = ''.join([random.choice(allowedLetters) for _ in range(l)])
# we keep trying if we already have that header name. Has to be unique.
done = headerName not in headerChoices
headerChoices.append(headerName)
tryList = [
(3, 5, 9, 'cA', 60, 0),
# (3, 5, 25, 'cA', 60, 0),
# (10, 100, 500, 'cA', 60, 0),
]
for trial in range(20):
(fileNum, rowCount, colCount, hex_key, timeoutSecs, dataRowsWithHeader) = random.choice(tryList)
print fileNum, rowCount, colCount, hex_key, timeoutSecs, dataRowsWithHeader
# FIX! should we add a header to them randomly???
print "Wait while", fileNum, "synthetic files are created in", SYNDATASETS_DIR
rowxcol = str(rowCount) + 'x' + str(colCount)
totalCols = colCount + 1 # 1 extra for output
totalDataRows = 0
totalHeaderRows = 0
# random selection of parse param choices
# HEADER_HAS_HDR_ROW = random.randint(0,1)
HEADER_HAS_HDR_ROW = 1
DATA_HAS_HDR_ROW = random.randint(0,1)
PARSE_PATTERN_INCLUDES_HEADER = random.randint(0,1)
# DATA_FIRST_IS_COMMENT = random.randint(0,1)
# HEADER_FIRST_IS_COMMENT = random.randint(0,1)
# FIX! doesn't seem to like just comment in the header file
DATA_FIRST_IS_COMMENT = 0
HEADER_FIRST_IS_COMMENT = 0
GZIP_DATA = random.randint(0,1)
GZIP_HEADER = random.randint(0,1)
SEP_CHAR_GEN = random.choice(paramsDict['separator'])
HEADER_SEP_CHAR_GEN = random.choice(paramsDict['hdr_separator'])
if HEADER_SEP_CHAR_GEN == 'same':
HEADER_SEP_CHAR_GEN = SEP_CHAR_GEN
# don't put a header in a data file with a different separator?
if DATA_HAS_HDR_ROW and HEADER_HAS_HDR_ROW:
HEADER_SEP_CHAR_GEN = SEP_CHAR_GEN
# Hack: if both data and header files have a header, then, just in case
# the header and data files should have the same separator
# if they don't, make header match data
if DATA_HAS_HDR_ROW and HEADER_HAS_HDR_ROW:
HEADER_SEP_CHAR_GEN = SEP_CHAR_GEN
# New for fvec? if separators are not the same, then the header separator needs to be comma
if HEADER_SEP_CHAR_GEN != SEP_CHAR_GEN:
HEADER_SEP_CHAR_GEN = ','
# screw it. make them always match
HEADER_SEP_CHAR_GEN = SEP_CHAR_GEN
if HEADER_SEP_CHAR_GEN in (',', ' '):
pass
# extra spaces? Don't add any
# if random.randint(0,1):
# HEADER_SEP_CHAR_GEN = " " + HEADER_SEP_CHAR_GEN
# if random.randint(0,1):
# HEADER_SEP_CHAR_GEN = HEADER_SEP_CHAR_GEN + " "
kwargs = {}
for k,v in paramsDict.items():
kwargs[k] = random.choice(v)
kwargs['separator'] = SEP_CHAR_GEN
# parse doesn't auto-detect tab. will autodetect space and comma
if SEP_CHAR_GEN==" " or SEP_CHAR_GEN==",":
del kwargs['separator']
else:
kwargs['separator'] = ord(SEP_CHAR_GEN)
# randomly add leading and trailing white space
# we have to do this after we save the single char HEADER_SEP_CHAR_GEN
if SEP_CHAR_GEN in (',', ' '):
if random.randint(0,1):
SEP_CHAR_GEN = " " + SEP_CHAR_GEN
if random.randint(0,1):
SEP_CHAR_GEN = SEP_CHAR_GEN + " "
print '\nHEADER_HAS_HDR_ROW:', HEADER_HAS_HDR_ROW
print 'DATA_HAS_HDR_ROW:', DATA_HAS_HDR_ROW
print 'PARSE_PATTERN_INCLUDES_HEADER', PARSE_PATTERN_INCLUDES_HEADER
print 'DATA_FIRST_IS_COMMENT:', DATA_FIRST_IS_COMMENT
print 'HEADER_FIRST_IS_COMMENT:', HEADER_FIRST_IS_COMMENT
print 'SEP_CHAR_GEN:', "->" + SEP_CHAR_GEN + "<-"
print 'HEADER_SEP_CHAR_GEN:', "->" + HEADER_SEP_CHAR_GEN + "<-"
print 'GZIP_DATA:', GZIP_DATA
print 'GZIP_HEADER:', GZIP_HEADER
# they need to both use the same separator (h2o rule)
# can't have duplicates
hfhList = random.sample(headerChoices, colCount) + ["output"]
# UPDATE: always use comma or space for header separator?? it should work no matter what
# separator the data uses?
headerForHeader = HEADER_SEP_CHAR_GEN.join(hfhList)
print "headerForHeader:", headerForHeader
# make these different
# hfdList = [random.choice(headerChoices) for h in range(colCount)] + ["output"]
# FIX! keep them the same for now to avoid some odd cases on what header gets used to RF
hfdList = hfhList
headerForData = SEP_CHAR_GEN.join(hfdList)
# create data files
for fileN in range(fileNum):
csvFilenameSuffix = str(fileN) + "_" + str(SEED) + "_" + str(trial) + "_" + rowxcol + '_csv'
csvFilename = 'syn_data_' + csvFilenameSuffix
csvPathname = SYNDATASETS_DIR + '/' + csvFilename
rList = rand_rowData(colCount, sepChar=SEP_CHAR_GEN)
(headerRowsDone, dataRowsDone) = write_syn_dataset(csvPathname, rowCount,
headerString=(headerForData if DATA_HAS_HDR_ROW else None), rList=rList,
commentFirst=DATA_FIRST_IS_COMMENT, sepChar=SEP_CHAR_GEN)
totalDataRows += dataRowsDone
totalHeaderRows += headerRowsDone
if GZIP_DATA:
csvPathnamegz = csvPathname + ".gz"
print "gzipping to", csvPathnamegz
h2o_util.file_gzip(csvPathname, csvPathnamegz)
os.rename(csvPathname, SYNDATASETS_DIR + "/not_used_data_" + csvFilenameSuffix)
# pattern match should find the right key with csvPathname
# create the header file
hdrFilenameSuffix = str(SEED) + "_" + str(trial) + "_" + rowxcol + '_csv'
hdrFilename = 'syn_header_' + hdrFilenameSuffix
hdrPathname = SYNDATASETS_DIR + '/' + hdrFilename
# dataRowsWithHeader = 0 # temp hack
(headerRowsDone, dataRowsDone) = write_syn_dataset(hdrPathname, dataRowsWithHeader,
headerString=(headerForHeader if HEADER_HAS_HDR_ROW else None), rList=rList,
commentFirst=HEADER_FIRST_IS_COMMENT, sepChar=SEP_CHAR_GEN)
# only include header file data rows if the parse pattern includes it
if PARSE_PATTERN_INCLUDES_HEADER:
totalDataRows += dataRowsDone
totalHeaderRows += headerRowsDone
if GZIP_HEADER:
hdrPathnamegz = hdrPathname + ".gz"
print "gzipping to", hdrPathnamegz
h2o_util.file_gzip(hdrPathname, hdrPathnamegz)
os.rename(hdrPathname, SYNDATASETS_DIR + "/not_used_header_" + hdrFilenameSuffix)
# pattern match should find the right key with hdrPathnameh
# make sure all key names are unique, when we re-put and re-parse (h2o caching issues)
hex_key = "syn_dst" + str(trial) + ".hex"
# DON"T get redirected to S3! (EC2 hack in config, remember!)
# use it at the node level directly (because we gen'ed the files.
# I suppose we could force the redirect state bits in h2o.nodes[0] to False, instead?:w
# put them, rather than using import files, so this works if remote h2o is used
# and python creates the files locally
fileList = os.listdir(SYNDATASETS_DIR)
for f in fileList:
h2i.import_only(path=SYNDATASETS_DIR + "/" + f, schema='put', noPrint=True)
h2o_cmd.runStoreView()
headerKey = h2i.find_key(hdrFilename)
dataKey = h2i.find_key(csvFilename)
# use regex. the only files in the dir will be the ones we just created
# with *fileN* match
print "Header Key =", headerKey
# put the right name in
if kwargs['header_from_file'] == 'header':
# do we need to add the .hex suffix we know h2o will append
kwargs['header_from_file'] = headerKey
# use one of the data files?
elif kwargs['header_from_file'] == 'data':
# do we need to add the .hex suffix we know h2o will append
kwargs['header_from_file'] = dataKey
# if there's no header in the header file, turn off the header_from_file
if not HEADER_HAS_HDR_ROW:
kwargs['header_from_file'] = None
if HEADER_HAS_HDR_ROW and (kwargs['header_from_file'] == headerKey):
ignoreForRf = hfhList[0]
elif DATA_HAS_HDR_ROW:
ignoreForRf = hfdList[0]
else:
ignoreForRf = None
print "If header_from_file= , required to force header=1 for h2o"
if kwargs['header_from_file']:
kwargs['header'] = 1
# if we have a header in a data file, tell h2o (for now)
elif DATA_HAS_HDR_ROW:
kwargs['header'] = 1
else:
kwargs['header'] = 0
# may have error if h2o doesn't get anything!
start = time.time()
if PARSE_PATTERN_INCLUDES_HEADER and HEADER_HAS_HDR_ROW:
pattern = 'syn_*'+str(trial)+"_"+rowxcol+'*'
else:
pattern = 'syn_data_*'+str(trial)+"_"+rowxcol+'*'
# don't pass to parse
kwargs.pop('hdr_separator', None)
parseResult = h2i.parse_only(pattern=pattern, hex_key=hex_key, timeoutSecs=timeoutSecs, **kwargs)
print "parseResult['destination_key']: " + parseResult['destination_key']
inspect = h2o_cmd.runInspect(None, parseResult['destination_key'])
h2o_cmd.infoFromInspect(inspect, csvPathname)
print "\n" + csvPathname, \
" numRows:", "{:,}".format(inspect['numRows']), \
" numCols:", "{:,}".format(inspect['numCols'])
# more reporting: (we can error here if extra col in header,
# causes all NA for missing col of data)
h2o_cmd.columnInfoFromInspect(parseResult['destination_key'], exceptionOnMissingValues=False)
# should match # of cols in header or ??
self.assertEqual(inspect['numCols'], totalCols, \
"parse created result with the wrong number of cols %s %s" % (inspect['numCols'], totalCols))
# do we end up parsing one data rows as a header because of mismatch in gen/param
h2oLosesOneData = (headerRowsDone==0) and (kwargs['header']==1) and not DATA_HAS_HDR_ROW
# header in data file gets treated as data
h2oGainsOneData = (headerRowsDone!=0) and (kwargs['header']==1) and \
DATA_HAS_HDR_ROW and (kwargs['header_from_file'] is not None)
h2oGainsOneData = False
print "h2oLosesOneData:", h2oLosesOneData
print "h2oGainsOneData:", h2oGainsOneData
if h2oLosesOneData:
totalDataRows -= 1
if h2oGainsOneData:
totalDataRows += 1
if 1==0: # FIX! don't check for now
self.assertEqual(inspect['numRows'], totalDataRows,
"parse created result with the wrong number of rows h2o %s gen'ed: %s" % \
(inspect['numRows'], totalDataRows))
# put in an ignore param, that will fail unless headers were parsed correctly
# doesn't matter if the header got a comment, should see it
kwargs = {'sample': 100, 'depth': 25, 'ntree': 2, 'ignore': ignoreForRf}
start = time.time()
# h2o_cmd.runRF(parseResult=parseResult, timeoutSecs=10, **kwargs)
elapsed = time.time() - start
print "%d pct. of timeout" % ((elapsed/timeoutSecs) * 100)
print "trial #", trial, "totalDataRows:", totalDataRows, "parse end on ", csvFilename, \
'took', time.time() - start, 'seconds'
h2o.check_sandbox_for_errors()
h2i.delete_keys_at_all_nodes(pattern='syn_datasets')
if __name__ == '__main__':
h2o.unit_main()
```
#### File: py/testdir_single_jvm/test_rf_enum_mappings_fvec.py
```python
import unittest, random, sys, time, re, math
sys.path.extend(['.','..','py'])
import h2o, h2o_cmd, h2o_hosts, h2o_browse as h2b, h2o_import as h2i, h2o_rf, h2o_util, h2o_gbm
SPEEDRF = False
MULTINOMIAL = 2
DO_WITH_INT = True
ENUMS = 3
ENUMLIST = ['bacaa', 'cbcbcacd', 'dccdbda', 'efg', 'hij', 'jkl']
# use randChars for the random chars to use
def random_enum(randChars, maxEnumSize):
choiceStr = randChars
r = ''.join(random.choice(choiceStr) for x in range(maxEnumSize))
return r
def create_enum_list(randChars="abcd", maxEnumSize=8, listSize=10):
if DO_WITH_INT:
enumList = range(listSize)
else:
if ENUMLIST:
enumList = ENUMLIST
else:
enumList = [random_enum(randChars, random.randint(2,maxEnumSize)) for i in range(listSize)]
return enumList
def write_syn_dataset(csvPathname, enumList, rowCount, colCount=1,
colSepChar=",", rowSepChar="\n", SEED=12345678):
# always re-init with the same seed. that way the sequence of random choices from the enum list should stay the same
# for each call? But the enum list is randomized
robj = random.Random(SEED)
dsf = open(csvPathname, "w+")
for row in range(rowCount):
rowData = []
# keep a list of the indices used..return that for comparing multiple datasets
rowIndex = []
# keep a sum of all the index mappings for the enum chosen (for the features in a row)
riIndexSum = 0
for col in range(colCount):
riIndex = robj.randint(0, len(enumList)-1)
rowData.append(enumList[riIndex])
rowIndex.append(riIndex)
riIndexSum += riIndex
# output column
# make the output column match odd/even row mappings.
# change...make it 1 if the sum of the enumList indices used is odd
ri = riIndexSum % MULTINOMIAL
rowData.append(ri)
rowDataCsv = colSepChar.join(map(str,rowData)) + rowSepChar
dsf.write(rowDataCsv)
dsf.close()
rowIndexCsv = colSepChar.join(map(str,rowIndex)) + rowSepChar
return rowIndexCsv # last line as index
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global SEED, localhost
SEED = h2o.setup_random_seed()
localhost = h2o.decide_if_localhost()
if (localhost):
h2o.build_cloud(1,java_heap_GB=1)
else:
h2o_hosts.build_cloud_with_hosts()
@classmethod
def tearDownClass(cls):
### time.sleep(3600)
h2o.tear_down_cloud()
def test_rf_enums_mappings_fvec(self):
h2o.beta_features = True
SYNDATASETS_DIR = h2o.make_syn_dir()
n = 3000
tryList = [
# (n, 1, 'cD', 300),
# (n, 2, 'cE', 300),
# (n, 3, 'cF', 300),
# (n, 4, 'cG', 300),
# (n, 5, 'cH', 300),
# (n, 6, 'cI', 300),
(n, 3, 'cI', 300),
(n, 3, 'cI', 300),
(n, 3, 'cI', 300),
]
# SEED_FOR_TRAIN = random.randint(0, sys.maxint)
SEED_FOR_TRAIN = 1234567890
SEED_FOR_SCORE = 9876543210
errorHistory = []
enumHistory = []
lastcolsTrainHistory = []
lastcolsScoreHistory = []
for (rowCount, colCount, hex_key, timeoutSecs) in tryList:
enumList = create_enum_list(listSize=ENUMS)
# reverse the list
enumList.reverse()
# using the comma is nice to ensure no craziness
colSepHexString = '2c' # comma
colSepChar = colSepHexString.decode('hex')
colSepInt = int(colSepHexString, base=16)
print "colSepChar:", colSepChar
rowSepHexString = '0a' # newline
rowSepChar = rowSepHexString.decode('hex')
print "rowSepChar:", rowSepChar
csvFilename = 'syn_enums_' + str(rowCount) + 'x' + str(colCount) + '.csv'
csvPathname = SYNDATASETS_DIR + '/' + csvFilename
csvScoreFilename = 'syn_enums_score_' + str(rowCount) + 'x' + str(colCount) + '.csv'
csvScorePathname = SYNDATASETS_DIR + '/' + csvScoreFilename
# use same enum List
enumListForScore = enumList
print "Creating random", csvPathname, "for rf model building"
lastcols = write_syn_dataset(csvPathname, enumList, rowCount, colCount,
colSepChar=colSepChar, rowSepChar=rowSepChar, SEED=SEED_FOR_TRAIN)
lastcolsTrainHistory.append(lastcols)
print "Creating random", csvScorePathname, "for rf scoring with prior model (using same enum list)"
# same enum list/mapping, but different dataset?
lastcols = write_syn_dataset(csvScorePathname, enumListForScore, rowCount, colCount,
colSepChar=colSepChar, rowSepChar=rowSepChar, SEED=SEED_FOR_SCORE)
lastcolsScoreHistory.append(lastcols)
scoreDataKey = "score_" + hex_key
parseResult = h2i.import_parse(path=csvScorePathname, schema='put', hex_key=scoreDataKey,
timeoutSecs=30, separator=colSepInt)
parseResult = h2i.import_parse(path=csvPathname, schema='put', hex_key=hex_key,
timeoutSecs=30, separator=colSepInt)
print "Parse result['destination_key']:", parseResult['destination_key']
print "\n" + csvFilename
(missingValuesDict, constantValuesDict, enumSizeDict, colTypeDict, colNameDict) = \
h2o_cmd.columnInfoFromInspect(parseResult['destination_key'], exceptionOnMissingValues=True)
y = colCount
modelKey = 'enums'
# limit depth and number of trees to accentuate the issue with categorical split decisions
if SPEEDRF:
kwargs = {
'destination_key': modelKey,
'response': y,
'ntrees': 1,
'max_depth': 100,
'oobee': 1,
'seed': 123456789,
}
else:
kwargs = {
'destination_key': modelKey,
'response': y,
'classification': 1,
'ntrees': 1,
'max_depth': 100,
'min_rows': 1,
'validation': scoreDataKey,
'seed': 123456789,
}
for r in range(4):
start = time.time()
if SPEEDRF:
rfResult = h2o_cmd.runSpeeDRF(parseResult=parseResult,
timeoutSecs=timeoutSecs, pollTimeoutSecs=180, **kwargs)
else:
rfResult = h2o_cmd.runRF(parseResult=parseResult,
timeoutSecs=timeoutSecs, pollTimeoutSecs=180, **kwargs)
print "rf end on ", parseResult['destination_key'], 'took', time.time() - start, 'seconds'
# print h2o.dump_json(rfResult)
(classification_error, classErrorPctList, totalScores) = h2o_rf.simpleCheckRFView(rfv=rfResult)
h2o_cmd.runScore(dataKey=scoreDataKey, modelKey=modelKey, vactual=y, vpredict=1, doAUC=not MULTINOMIAL) # , expectedAuc=0.5)
errorHistory.append(classification_error)
enumHistory.append(enumList)
print "error from all runs on this dataset (with different enum mappings)"
print errorHistory
for e in enumHistory:
print e
print "last row from all train datasets, as integer"
for l in lastcolsTrainHistory:
print l
print "last row from all score datasets, as integer"
for l in lastcolsScoreHistory:
print l
if __name__ == '__main__':
h2o.unit_main()
```
#### File: py/testdir_single_jvm/test_speedrf_grid2.py
```python
import unittest, time, sys
sys.path.extend(['.','..','py'])
import h2o, h2o_cmd, h2o_hosts, h2o_import as h2i, h2o_jobs
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global localhost
localhost = h2o.decide_if_localhost()
if (localhost):
h2o.build_cloud(node_count=1)
else:
h2o_hosts.build_cloud_with_hosts(node_count=3)
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def notest_RF_iris2(self):
h2o.beta_features = True
trees = ",".join(map(str,range(1,4)))
timeoutSecs = 20
csvPathname = 'iris/iris2.csv'
parseResult = h2i.import_parse(bucket='smalldata', path=csvPathname, schema='put')
h2o_cmd.runSpeeDRF(parseResult=parseResult, ntrees=trees, timeoutSecs=timeoutSecs)
def test_RF_poker100(self):
MISSING_RESPONSE = False
DO_MODEL_INSPECT = False
trees = ",".join(map(str,range(10,50,2)))
timeoutSecs = 20
csvPathname = 'poker/poker100'
parseResult = h2i.import_parse(bucket='smalldata', path=csvPathname, schema='put')
jobs = []
for i in range(1):
if MISSING_RESPONSE:
rfResult = h2o_cmd.runSpeeDRF(parseResult=parseResult, ntrees=trees, timeoutSecs=timeoutSecs)
else:
rfResult = h2o_cmd.runSpeeDRF(parseResult=parseResult, response='C11', ntrees=trees, timeoutSecs=timeoutSecs)
job_key = rfResult['job_key']
model_key = rfResult['destination_key']
jobs.append( (job_key, model_key) )
h2o_jobs.pollWaitJobs(timeoutSecs=300)
for job_key, model_key in jobs:
gridResult = h2o.nodes[0].speedrf_grid_view(job_key=job_key, destination_key=model_key)
print "speedrf grid result for %s:", h2o.dump_json(gridResult)
print "speedrf grid result errors:", gridResult['prediction_errors']
for i,j in enumerate(gridResult['jobs']):
if DO_MODEL_INSPECT:
print "\nspeedrf result %s:" % i, h2o.dump_json(h2o_cmd.runInspect(key=j['destination_key']))
else:
# model = h2o.nodes[0].speedrf_view(modelKey=j['destination_key'])
model = h2o.nodes[0].speedrf_view(modelKey=j['destination_key'])
print "model:", h2o.dump_json(model)
# h2o_rf.showRFGridResults(GBMResult, 15)
def notest_GenParity1(self):
h2o.beta_features = True
SYNDATASETS_DIR = h2o.make_syn_dir()
parityPl = h2o.find_file('syn_scripts/parity.pl')
# two row dataset gets this. Avoiding it for now
# java.lang.ArrayIndexOutOfBoundsException: 1
# at hex.rf.Data.sample_fair(Data.java:149)
# always match the run below!
print "\nAssuming two row dataset is illegal. avoiding"
for x in xrange(10,20,10):
shCmdString = "perl " + parityPl + " 128 4 "+ str(x) + " quad " + SYNDATASETS_DIR
h2o.spawn_cmd_and_wait('parity.pl', shCmdString.split())
# algorithm for creating the path and filename is hardwired in parity.pl.
csvFilename = "parity_128_4_" + str(x) + "_quad.data"
trees = "1,2,3,4,5,6"
timeoutSecs = 20
# always match the gen above!
# FIX! we fail if min is 3
for x in xrange(10,20,10):
sys.stdout.write('.')
sys.stdout.flush()
csvFilename = "parity_128_4_" + str(x) + "_quad.data"
csvPathname = SYNDATASETS_DIR + '/' + csvFilename
parseResult = h2i.import_parse(path=csvPathname, schema='put')
h2o_cmd.runSpeeDRF(parseResult=parseResult, response=8, ntrees=trees, timeoutSecs=timeoutSecs)
timeoutSecs += 2
if __name__ == '__main__':
h2o.unit_main()
```
#### File: py/testdir_single_jvm/test_summary2_uniform_int.py
```python
import unittest, time, sys, random, math, getpass
sys.path.extend(['.','..','py'])
import h2o, h2o_cmd, h2o_hosts, h2o_import as h2i, h2o_util, h2o_print as h2p
import h2o_summ
print "Like test_summary_uniform, but with integers only"
DO_MEDIAN = False
DO_TRY_SCIPY = False
if getpass.getuser()=='kevin' or getpass.getuser()=='jenkins':
DO_TRY_SCIPY = True
MAX_QBINS = 1
MAX_QBINS = 1000000
DO_REAL = False
def write_syn_dataset(csvPathname, rowCount, colCount, expectedMin, expectedMax, SEED):
r1 = random.Random(SEED)
dsf = open(csvPathname, "w+")
expectedRange = (expectedMax - expectedMin)
for i in range(rowCount):
rowData = []
if DO_REAL:
ri = expectedMin + (random.random() * expectedRange)
else:
ri = random.randint(expectedMin,expectedMax)
for j in range(colCount):
rowData.append(ri)
rowDataCsv = ",".join(map(str,rowData))
dsf.write(rowDataCsv + "\n")
dsf.close()
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global SEED, localhost
SEED = h2o.setup_random_seed()
localhost = h2o.decide_if_localhost()
if (localhost):
h2o.build_cloud()
else:
h2o_hosts.build_cloud_with_hosts()
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_summary2_uniform_int(self):
h2o.beta_features = True
SYNDATASETS_DIR = h2o.make_syn_dir()
tryList = [
# colname, (min, 25th, 50th, 75th, max)
(1000000, 1, 'B.hex', 1, 1000, ('C1', 1.0, 250.0, 500.0, 750.0, 1000.0)),
(1000000, 1, 'x.hex', 0, 1000000000, ('C1', 0.0, 250000000.0, 500000000.0, 750000000.0, 1000000000.0)),
(1000000, 1, 'x.hex', 1, 20000, ('C1', 1.0, 5000.0, 10000.0, 15000.0, 20000.0)),
(1000000, 1, 'x.hex', -5000, 0, ('C1', -5000.00, -3750.0, -2500.0, -1250.0, 0)),
(1000000, 1, 'x.hex', -100000, 100000, ('C1', -100000.0, -50000.0, 0, 50000.0, 100000.0)),
# (1000000, 1, 'A.hex', 1, 101, ('C1', 1.0, 26.00, 51.00, 76.00, 101.0)),
# (1000000, 1, 'A.hex', -99, 99, ('C1', -99, -49.0, 0, 49.00, 99)),
(1000000, 1, 'B.hex', 1, 10000, ('C1', 1.0, 2501.0, 5001.0, 7501.0, 10000.0)),
(1000000, 1, 'B.hex', -100, 100, ('C1', -100.0, -50.0, 0.0, 50.0, 100.0)),
(1000000, 1, 'C.hex', 1, 100000, ('C1', 1.0, 25001.0, 50001.0, 75001.0, 100000.0)),
# (1000000, 1, 'C.hex', -101, 101, ('C1', -101, -51, -1, 49.0, 100.0)),
]
if not DO_REAL:
# only 3 integer values!
tryList.append(\
(1000000, 1, 'x.hex', -1, 1, ('C1', -1.0, -1, 0.000, 1, 1.00)) \
)
timeoutSecs = 10
trial = 1
n = h2o.nodes[0]
lenNodes = len(h2o.nodes)
x = 0
timeoutSecs = 60
for (rowCount, colCount, hex_key, expectedMin, expectedMax, expected) in tryList:
# max error = half the bin size?
maxDelta = ((expectedMax - expectedMin)/(MAX_QBINS + 0.0))
# add 5% for fp errors?
maxDelta = 1.05 * maxDelta
# also need to add some variance due to random distribution?
# maybe a percentage of the mean
distMean = (expectedMax - expectedMin) / 2
maxShift = distMean * .01
maxDelta = maxDelta + maxShift
SEEDPERFILE = random.randint(0, sys.maxint)
x += 1
csvFilename = 'syn_' + "binary" + "_" + str(rowCount) + 'x' + str(colCount) + '.csv'
csvPathname = SYNDATASETS_DIR + '/' + csvFilename
print "Creating random", csvPathname
write_syn_dataset(csvPathname, rowCount, colCount, expectedMin, expectedMax, SEEDPERFILE)
csvPathnameFull = h2i.find_folder_and_filename(None, csvPathname, returnFullPath=True)
parseResult = h2i.import_parse(path=csvPathname, schema='put', hex_key=hex_key, timeoutSecs=60, doSummary=False)
print "Parse result['destination_key']:", parseResult['destination_key']
inspect = h2o_cmd.runInspect(None, parseResult['destination_key'])
print "\n" + csvFilename
numRows = inspect["numRows"]
numCols = inspect["numCols"]
summaryResult = h2o_cmd.runSummary(key=hex_key, max_qbins=MAX_QBINS)
h2o.verboseprint("summaryResult:", h2o.dump_json(summaryResult))
# only one column
column = summaryResult['summaries'][0]
colname = column['colname']
self.assertEqual(colname, expected[0])
coltype = column['type']
nacnt = column['nacnt']
stats = column['stats']
stattype= stats['type']
# FIX! we should compare mean and sd to expected?
mean = stats['mean']
sd = stats['sd']
print "colname:", colname, "mean (2 places):", h2o_util.twoDecimals(mean)
print "colname:", colname, "std dev. (2 places):", h2o_util.twoDecimals(sd)
zeros = stats['zeros']
mins = stats['mins']
maxs = stats['maxs']
h2o_util.assertApproxEqual(mins[0], expected[1], tol=maxDelta, msg='min is not approx. expected')
h2o_util.assertApproxEqual(maxs[0], expected[5], tol=maxDelta, msg='max is not approx. expected')
pct = stats['pct']
# the thresholds h2o used, should match what we expected
expectedPct= [0.01, 0.05, 0.1, 0.25, 0.33, 0.5, 0.66, 0.75, 0.9, 0.95, 0.99]
pctile = stats['pctile']
h2o_util.assertApproxEqual(pctile[3], expected[2], tol=maxDelta, msg='25th percentile is not approx. expected')
h2o_util.assertApproxEqual(pctile[5], expected[3], tol=maxDelta, msg='50th percentile (median) is not approx. expected')
h2o_util.assertApproxEqual(pctile[7], expected[4], tol=maxDelta, msg='75th percentile is not approx. expected')
hstart = column['hstart']
hstep = column['hstep']
hbrk = column['hbrk']
hcnt = column['hcnt']
print "pct:", pct
print "hcnt:", hcnt
print "len(hcnt)", len(hcnt)
# don't check the last bin
for b in hcnt[1:-1]:
# should we be able to check for a uniform distribution in the files?
e = numRows/len(hcnt) # expect 21 thresholds, so 20 bins. each 5% of rows (uniform distribution)
# don't check the edge bins
self.assertAlmostEqual(b, rowCount/len(hcnt), delta=.01*rowCount,
msg="Bins not right. b: %s e: %s" % (b, e))
pt = h2o_util.twoDecimals(pctile)
mx = h2o_util.twoDecimals(maxs)
mn = h2o_util.twoDecimals(mins)
print "colname:", colname, "pctile (2 places):", pt
print "colname:", colname, "maxs: (2 places):", mx
print "colname:", colname, "mins: (2 places):", mn
# FIX! we should do an exec and compare using the exec quantile too
compareActual = mn[0], pt[3], pt[5], pt[7], mx[0]
h2p.green_print("min/25/50/75/max colname:", colname, "(2 places):", compareActual)
print "maxs colname:", colname, "(2 places):", mx
print "mins colname:", colname, "(2 places):", mn
trial += 1
scipyCol = 0
if colname!='' and expected[scipyCol]:
# don't do for enums
# also get the median with a sort (h2o_summ.percentileOnSortedlist()
h2o_summ.quantile_comparisons(
csvPathnameFull,
skipHeader=True,
col=scipyCol,
datatype='float',
quantile=0.5 if DO_MEDIAN else 0.999,
h2oSummary2=pctile[5 if DO_MEDIAN else 10],
# h2oQuantilesApprox=qresult_single,
# h2oQuantilesExact=qresult,
)
if __name__ == '__main__':
h2o.unit_main()
```
|
{
"source": "JeffreyCA/Skribbl-RNN",
"score": 2
}
|
#### File: JeffreyCA/Skribbl-RNN/sketch_rnn_train.py
```python
import json
import os
import time
import zipfile
import model as sketch_rnn_model
import utils
import numpy as np
import requests
import six
from six.moves.urllib.request import urlretrieve
import tensorflow.compat.v1 as tf
tf.logging.set_verbosity(tf.logging.INFO)
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string(
'data_dir',
'https://github.com/hardmaru/sketch-rnn-datasets/raw/master/aaron_sheep',
'The directory in which to find the dataset specified in model hparams. '
'If data_dir starts with "http://" or "https://", the file will be fetched '
'remotely.')
tf.app.flags.DEFINE_string(
'log_root', '/tmp/sketch_rnn/models/default',
'Directory to store model checkpoints, tensorboard.')
tf.app.flags.DEFINE_boolean('resume_training', False,
'Set to true to load previous checkpoint')
tf.app.flags.DEFINE_string(
'hparams', '', 'Pass in comma-separated key=value pairs such as '
'\'save_every=40,decay_rate=0.99\' '
'(no whitespace) to be read into the HParams object defined in model.py')
PRETRAINED_MODELS_URL = ('http://download.magenta.tensorflow.org/models/'
'sketch_rnn.zip')
def reset_graph():
"""Closes the current default session and resets the graph."""
sess = tf.get_default_session()
if sess:
sess.close()
tf.reset_default_graph()
def load_env(data_dir, model_dir):
"""Loads environment for inference mode, used in jupyter notebook."""
model_params = sketch_rnn_model.get_default_hparams()
with tf.gfile.Open(os.path.join(model_dir, 'model_config.json'), 'r') as f:
model_params.parse_json(f.read())
return load_dataset(data_dir, model_params, inference_mode=True)
def load_model(model_dir):
"""Loads model for inference mode, used in jupyter notebook."""
model_params = sketch_rnn_model.get_default_hparams()
with tf.gfile.Open(os.path.join(model_dir, 'model_config.json'), 'r') as f:
model_params.parse_json(f.read())
model_params.batch_size = 1 # only sample one at a time
eval_model_params = sketch_rnn_model.copy_hparams(model_params)
eval_model_params.use_input_dropout = 0
eval_model_params.use_recurrent_dropout = 0
eval_model_params.use_output_dropout = 0
eval_model_params.is_training = 0
sample_model_params = sketch_rnn_model.copy_hparams(eval_model_params)
sample_model_params.max_seq_len = 1 # sample one point at a time
return [model_params, eval_model_params, sample_model_params]
def download_pretrained_models(models_root_dir='/tmp/sketch_rnn/models',
pretrained_models_url=PRETRAINED_MODELS_URL):
"""Download pretrained models to a temporary directory."""
tf.gfile.MakeDirs(models_root_dir)
zip_path = os.path.join(models_root_dir,
os.path.basename(pretrained_models_url))
if os.path.isfile(zip_path):
tf.logging.info('%s already exists, using cached copy', zip_path)
else:
tf.logging.info('Downloading pretrained models from %s...',
pretrained_models_url)
urlretrieve(pretrained_models_url, zip_path)
tf.logging.info('Download complete.')
tf.logging.info('Unzipping %s...', zip_path)
with zipfile.ZipFile(zip_path) as models_zip:
models_zip.extractall(models_root_dir)
tf.logging.info('Unzipping complete.')
def load_dataset(data_dir, model_params, inference_mode=False):
"""Loads the .npz file, and splits the set into train/valid/test."""
# normalizes the x and y columns using the training set.
# applies same scaling factor to valid and test set.
if isinstance(model_params.data_set, list):
datasets = model_params.data_set
else:
datasets = [model_params.data_set]
train_strokes = None
valid_strokes = None
test_strokes = None
for dataset in datasets:
if data_dir.startswith('http://') or data_dir.startswith('https://'):
data_filepath = '/'.join([data_dir, dataset])
tf.logging.info('Downloading %s', data_filepath)
response = requests.get(data_filepath)
data = np.load(six.BytesIO(response.content), encoding='latin1')
else:
data_filepath = os.path.join(data_dir, dataset)
data = np.load(data_filepath, encoding='latin1', allow_pickle=True)
tf.logging.info('Loaded {}/{}/{} from {}'.format(
len(data['train']), len(data['valid']), len(data['test']),
dataset))
if train_strokes is None:
train_strokes = data['train']
valid_strokes = data['valid']
test_strokes = data['test']
else:
train_strokes = np.concatenate((train_strokes, data['train']))
valid_strokes = np.concatenate((valid_strokes, data['valid']))
test_strokes = np.concatenate((test_strokes, data['test']))
all_strokes = np.concatenate((train_strokes, valid_strokes, test_strokes))
num_points = 0
for stroke in all_strokes:
num_points += len(stroke)
avg_len = num_points / len(all_strokes)
tf.logging.info('Dataset combined: {} ({}/{}/{}), avg len {}'.format(
len(all_strokes), len(train_strokes), len(valid_strokes),
len(test_strokes), int(avg_len)))
# calculate the max strokes we need.
max_seq_len = utils.get_max_len(all_strokes)
# overwrite the hps with this calculation.
model_params.max_seq_len = max_seq_len
tf.logging.info('model_params.max_seq_len %i.', model_params.max_seq_len)
eval_model_params = sketch_rnn_model.copy_hparams(model_params)
eval_model_params.use_input_dropout = 0
eval_model_params.use_recurrent_dropout = 0
eval_model_params.use_output_dropout = 0
eval_model_params.is_training = 1
if inference_mode:
eval_model_params.batch_size = 1
eval_model_params.is_training = 0
sample_model_params = sketch_rnn_model.copy_hparams(eval_model_params)
sample_model_params.batch_size = 1 # only sample one at a time
sample_model_params.max_seq_len = 1 # sample one point at a time
train_set = utils.DataLoader(
train_strokes,
model_params.batch_size,
max_seq_length=model_params.max_seq_len,
random_scale_factor=model_params.random_scale_factor,
augment_stroke_prob=model_params.augment_stroke_prob)
normalizing_scale_factor = train_set.calculate_normalizing_scale_factor()
train_set.normalize(normalizing_scale_factor)
valid_set = utils.DataLoader(valid_strokes,
eval_model_params.batch_size,
max_seq_length=eval_model_params.max_seq_len,
random_scale_factor=0.0,
augment_stroke_prob=0.0)
valid_set.normalize(normalizing_scale_factor)
test_set = utils.DataLoader(test_strokes,
eval_model_params.batch_size,
max_seq_length=eval_model_params.max_seq_len,
random_scale_factor=0.0,
augment_stroke_prob=0.0)
test_set.normalize(normalizing_scale_factor)
tf.logging.info('normalizing_scale_factor %4.4f.',
normalizing_scale_factor)
result = [
train_set, valid_set, test_set, model_params, eval_model_params,
sample_model_params
]
return result
def evaluate_model(sess, model, data_set):
"""Returns the average weighted cost, reconstruction cost and KL cost."""
total_cost = 0.0
total_r_cost = 0.0
total_kl_cost = 0.0
for batch in range(data_set.num_batches):
unused_orig_x, x, s = data_set.get_batch(batch)
feed = {model.input_data: x, model.sequence_lengths: s}
(cost, r_cost,
kl_cost) = sess.run([model.cost, model.r_cost, model.kl_cost], feed)
total_cost += cost
total_r_cost += r_cost
total_kl_cost += kl_cost
total_cost /= (data_set.num_batches)
total_r_cost /= (data_set.num_batches)
total_kl_cost /= (data_set.num_batches)
return (total_cost, total_r_cost, total_kl_cost)
def load_checkpoint(sess, checkpoint_path):
saver = tf.train.Saver(tf.global_variables())
ckpt = tf.train.get_checkpoint_state(checkpoint_path)
tf.logging.info('Loading model %s.', ckpt.model_checkpoint_path)
saver.restore(sess, ckpt.model_checkpoint_path)
def save_model(sess, model_save_path, global_step):
saver = tf.train.Saver(tf.global_variables())
checkpoint_path = os.path.join(model_save_path, 'vector')
tf.logging.info('saving model %s.', checkpoint_path)
tf.logging.info('global_step %i.', global_step)
saver.save(sess, checkpoint_path, global_step=global_step)
def train(sess, model, eval_model, train_set, valid_set, test_set):
"""Train a sketch-rnn model."""
# Setup summary writer.
summary_writer = tf.summary.FileWriter(FLAGS.log_root)
# Calculate trainable params.
t_vars = tf.trainable_variables()
count_t_vars = 0
for var in t_vars:
num_param = np.prod(var.get_shape().as_list())
count_t_vars += num_param
tf.logging.info('%s %s %i', var.name, str(var.get_shape()), num_param)
tf.logging.info('Total trainable variables %i.', count_t_vars)
model_summ = tf.summary.Summary()
model_summ.value.add(tag='Num_Trainable_Params',
simple_value=float(count_t_vars))
summary_writer.add_summary(model_summ, 0)
summary_writer.flush()
# setup eval stats
best_valid_cost = 100000000.0 # set a large init value
valid_cost = 0.0
# main train loop
hps = model.hps
start = time.time()
for _ in range(hps.num_steps):
step = sess.run(model.global_step)
curr_learning_rate = ((hps.learning_rate - hps.min_learning_rate) *
(hps.decay_rate)**step + hps.min_learning_rate)
curr_kl_weight = (hps.kl_weight -
(hps.kl_weight - hps.kl_weight_start) *
(hps.kl_decay_rate)**step)
_, x, s = train_set.random_batch()
feed = {
model.input_data: x,
model.sequence_lengths: s,
model.lr: curr_learning_rate,
model.kl_weight: curr_kl_weight
}
(train_cost, r_cost, kl_cost, _, train_step, _) = sess.run([
model.cost, model.r_cost, model.kl_cost, model.final_state,
model.global_step, model.train_op
], feed)
if step % 20 == 0 and step > 0:
end = time.time()
time_taken = end - start
cost_summ = tf.summary.Summary()
cost_summ.value.add(tag='Train_Cost',
simple_value=float(train_cost))
reconstr_summ = tf.summary.Summary()
reconstr_summ.value.add(tag='Train_Reconstr_Cost',
simple_value=float(r_cost))
kl_summ = tf.summary.Summary()
kl_summ.value.add(tag='Train_KL_Cost', simple_value=float(kl_cost))
lr_summ = tf.summary.Summary()
lr_summ.value.add(tag='Learning_Rate',
simple_value=float(curr_learning_rate))
kl_weight_summ = tf.summary.Summary()
kl_weight_summ.value.add(tag='KL_Weight',
simple_value=float(curr_kl_weight))
time_summ = tf.summary.Summary()
time_summ.value.add(tag='Time_Taken_Train',
simple_value=float(time_taken))
output_format = ('step: %d, lr: %.6f, klw: %0.4f, cost: %.4f, '
'recon: %.4f, kl: %.4f, train_time_taken: %.4f')
output_values = (step, curr_learning_rate, curr_kl_weight,
train_cost, r_cost, kl_cost, time_taken)
output_log = output_format % output_values
tf.logging.info(output_log)
summary_writer.add_summary(cost_summ, train_step)
summary_writer.add_summary(reconstr_summ, train_step)
summary_writer.add_summary(kl_summ, train_step)
summary_writer.add_summary(lr_summ, train_step)
summary_writer.add_summary(kl_weight_summ, train_step)
summary_writer.add_summary(time_summ, train_step)
summary_writer.flush()
start = time.time()
if step % hps.save_every == 0 and step > 0:
(valid_cost, valid_r_cost,
valid_kl_cost) = evaluate_model(sess, eval_model, valid_set)
end = time.time()
time_taken_valid = end - start
start = time.time()
valid_cost_summ = tf.summary.Summary()
valid_cost_summ.value.add(tag='Valid_Cost',
simple_value=float(valid_cost))
valid_reconstr_summ = tf.summary.Summary()
valid_reconstr_summ.value.add(tag='Valid_Reconstr_Cost',
simple_value=float(valid_r_cost))
valid_kl_summ = tf.summary.Summary()
valid_kl_summ.value.add(tag='Valid_KL_Cost',
simple_value=float(valid_kl_cost))
valid_time_summ = tf.summary.Summary()
valid_time_summ.value.add(tag='Time_Taken_Valid',
simple_value=float(time_taken_valid))
output_format = (
'best_valid_cost: %0.4f, valid_cost: %.4f, valid_recon: '
'%.4f, valid_kl: %.4f, valid_time_taken: %.4f')
output_values = (min(best_valid_cost, valid_cost), valid_cost,
valid_r_cost, valid_kl_cost, time_taken_valid)
output_log = output_format % output_values
tf.logging.info(output_log)
summary_writer.add_summary(valid_cost_summ, train_step)
summary_writer.add_summary(valid_reconstr_summ, train_step)
summary_writer.add_summary(valid_kl_summ, train_step)
summary_writer.add_summary(valid_time_summ, train_step)
summary_writer.flush()
if valid_cost < best_valid_cost:
best_valid_cost = valid_cost
save_model(sess, FLAGS.log_root, step)
end = time.time()
time_taken_save = end - start
start = time.time()
tf.logging.info('time_taken_save %4.4f.', time_taken_save)
best_valid_cost_summ = tf.summary.Summary()
best_valid_cost_summ.value.add(
tag='Best_Valid_Cost', simple_value=float(best_valid_cost))
summary_writer.add_summary(best_valid_cost_summ, train_step)
summary_writer.flush()
(eval_cost, eval_r_cost,
eval_kl_cost) = evaluate_model(sess, eval_model, test_set)
end = time.time()
time_taken_eval = end - start
start = time.time()
eval_cost_summ = tf.summary.Summary()
eval_cost_summ.value.add(tag='Eval_Cost',
simple_value=float(eval_cost))
eval_reconstr_summ = tf.summary.Summary()
eval_reconstr_summ.value.add(tag='Eval_Reconstr_Cost',
simple_value=float(eval_r_cost))
eval_kl_summ = tf.summary.Summary()
eval_kl_summ.value.add(tag='Eval_KL_Cost',
simple_value=float(eval_kl_cost))
eval_time_summ = tf.summary.Summary()
eval_time_summ.value.add(tag='Time_Taken_Eval',
simple_value=float(time_taken_eval))
output_format = ('eval_cost: %.4f, eval_recon: %.4f, '
'eval_kl: %.4f, eval_time_taken: %.4f')
output_values = (eval_cost, eval_r_cost, eval_kl_cost,
time_taken_eval)
output_log = output_format % output_values
tf.logging.info(output_log)
summary_writer.add_summary(eval_cost_summ, train_step)
summary_writer.add_summary(eval_reconstr_summ, train_step)
summary_writer.add_summary(eval_kl_summ, train_step)
summary_writer.add_summary(eval_time_summ, train_step)
summary_writer.flush()
def trainer(model_params):
"""Train a sketch-rnn model."""
np.set_printoptions(precision=8, edgeitems=6, linewidth=200, suppress=True)
tf.logging.info('sketch-rnn')
tf.logging.info('Hyperparams:')
tf.logging.info('Loading data files.')
datasets = load_dataset(FLAGS.data_dir, model_params)
train_set = datasets[0]
valid_set = datasets[1]
test_set = datasets[2]
model_params = datasets[3]
eval_model_params = datasets[4]
reset_graph()
model = sketch_rnn_model.Model(model_params)
eval_model = sketch_rnn_model.Model(eval_model_params, reuse=True)
sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())
if FLAGS.resume_training:
load_checkpoint(sess, FLAGS.log_root)
# Write config file to json file.
tf.gfile.MakeDirs(FLAGS.log_root)
with tf.gfile.Open(os.path.join(FLAGS.log_root, 'model_config.json'),
'w') as f:
json.dump(list(model_params.values()), f, indent=True)
train(sess, model, eval_model, train_set, valid_set, test_set)
def main(unused_argv):
"""Load model params, save config file and start trainer."""
model_params = sketch_rnn_model.get_default_hparams()
if FLAGS.hparams:
model_params.parse(FLAGS.hparams)
trainer(model_params)
def console_entry_point():
tf.disable_v2_behavior()
tf.app.run(main)
if __name__ == '__main__':
console_entry_point()
```
|
{
"source": "JeffreyCA/spleeter-web",
"score": 2
}
|
#### File: spleeter-web/api/models.py
```python
import os
import uuid
from io import BytesIO
import requests
from django.conf import settings
from django.db import models
import mutagen
from mutagen.easyid3 import EasyID3
from mutagen.id3 import ID3NoHeaderError
from .validators import *
from .youtubedl import get_meta_info
from picklefield.fields import PickledObjectField
"""
This module defines Django models.
"""
def source_file_path(instance, filename):
"""
Get path to source file, using instance ID as subdirectory.
:param instance: SourceFile instance
:param filename: File name
:return: Path to source file
"""
return os.path.join(settings.UPLOAD_DIR, str(instance.id), filename)
def mix_track_path(instance, filename):
"""
Get path to mix track file, using instance ID as subdirectory.
:param instance: StaticMix/DynamicMix instance
:param filename: File name
:return: Path to mix track file
"""
return os.path.join(settings.SEPARATE_DIR, str(instance.id), filename)
SPLEETER = 'spleeter'
D3NET = 'd3net'
DEMUCS = 'demucs'
DEMUCS_HQ = 'demucs48_hq'
DEMUCS_EXTRA = 'demucs_extra'
DEMUCS_QUANTIZED = 'demucs_quantized'
TASNET = 'tasnet'
TASNET_EXTRA = 'tasnet_extra'
XUMX = 'xumx'
# Deprecated
DEMUCS_LIGHT = 'light'
DEMUCS_LIGHT_EXTRA = 'light_extra'
DEMUCS_FAMILY = [DEMUCS, DEMUCS_HQ, DEMUCS_EXTRA, DEMUCS_QUANTIZED, TASNET, TASNET_EXTRA, DEMUCS_LIGHT, DEMUCS_LIGHT_EXTRA]
SEP_CHOICES = [
(SPLEETER, 'Spleeter'),
(D3NET, 'D3Net'),
(
'demucs',
(
(DEMUCS, 'Demucs'),
(DEMUCS_HQ, 'Demucs HQ'),
(DEMUCS_EXTRA, 'Demucs Extra'),
(DEMUCS_QUANTIZED, 'Demucs Quantized'),
(TASNET, 'Tasnet'),
(TASNET_EXTRA, 'Tasnet Extra'),
# Deprecated
(DEMUCS_LIGHT, 'Demucs Light'),
(DEMUCS_LIGHT_EXTRA, 'Demucs Light Extra'))),
(XUMX, 'X-UMX')
]
class TaskStatus(models.IntegerChoices):
"""
Enum for status of a task.
"""
QUEUED = 0, 'Queued'
IN_PROGRESS = 1, 'In Progress'
DONE = 2, 'Done'
ERROR = -1, 'Error'
class Bitrate(models.IntegerChoices):
"""
Enum for MP3 bitrates.
"""
MP3_192 = 192
MP3_256 = 256
MP3_320 = 320
class YTAudioDownloadTask(models.Model):
"""Model representing the status of a task to fetch audio from YouTube link."""
# UUID to uniquely identify task
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
# ID of the associated Celery task
celery_id = models.UUIDField(default=None, null=True, blank=True)
# Status of task
status = models.IntegerField(choices=TaskStatus.choices,
default=TaskStatus.QUEUED)
# Error message in case of error
error = models.TextField(blank=True)
class SourceFile(models.Model):
"""
Model representing the file of a source/original audio track.
If a user uploads the audio file but than aborts the operation, then the SourceFile and the file
on disk is deleted.
"""
# UUID to uniquely identify file
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
# File object
file = models.FileField(upload_to=source_file_path,
blank=True,
null=True,
max_length=255,
validators=[is_valid_size, is_valid_audio_file])
# Whether the audio track is from a YouTube link import
is_youtube = models.BooleanField(default=False)
# The original YouTube link, if source is from YouTube
youtube_link = models.URLField(unique=True,
blank=True,
null=True,
validators=[is_valid_youtube])
# If file is from a YouTube link import, then this field refers to the task executed to download the audio file.
youtube_fetch_task = models.OneToOneField(YTAudioDownloadTask,
on_delete=models.DO_NOTHING,
null=True,
blank=True)
def metadata(self):
"""Extract artist and title information from audio
:return: Dict containing 'artist' and 'title' fields associated with the track
"""
artist = ''
title = ''
if self.youtube_link:
try:
info = get_meta_info(self.youtube_link)
except:
print('Getting metadata failed')
info = None
if not info:
artist = ''
title = ''
elif info['embedded_artist'] and info['embedded_title']:
artist = info['embedded_artist']
title = info['embedded_title']
elif info['parsed_artist'] and info['parsed_title']:
artist = info['parsed_artist']
title = info['parsed_title']
else:
artist = info['uploader']
title = info['title']
else:
try:
if settings.DEFAULT_FILE_STORAGE == 'api.storage.FileSystemStorage':
audio = EasyID3(self.file.path) if self.file.path.endswith('mp3') else mutagen.File(self.file.path)
else:
r = requests.get(self.file.url)
file = BytesIO(r.content)
audio = EasyID3(file) if self.file.url.endswith('mp3') else mutagen.File(file)
if 'artist' in audio:
artist = audio['artist'][0]
if 'title' in audio:
title = audio['title'][0]
except ID3NoHeaderError:
pass
except:
pass
return (artist, title)
def __str__(self):
if self.file and self.file.name:
return os.path.basename(self.file.name)
elif self.youtube_link:
return self.youtube_link
else:
return self.id
class SourceTrack(models.Model):
"""
Model representing the source song itself. SourceTrack differs from SourceFile as SourceTrack
contains additional metadata such as artist, title, and date created info.
SourceTrack contains a reference to SourceFile. The reasoning why they're separate is because the
user first uploads an audio file to the server, then confirms the artist and title information,
then completes the process of adding a new audio track.
If a user uploads the audio file but than aborts the operation, then the SourceFile and the file
on disk is deleted.
TODO: Refactor SourceFile and SourceTrack in a cleaner way.
"""
# UUID to uniquely identify the source song
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
# Corresponding SourceFile (id)
source_file = models.OneToOneField(SourceFile,
on_delete=models.DO_NOTHING,
unique=True)
# Artist name
artist = models.CharField(max_length=200)
# Title
title = models.CharField(max_length=200)
# DateTime when user added the song
date_created = models.DateTimeField(auto_now_add=True)
def url(self):
"""Get the URL of the source file."""
if self.source_file.file:
return self.source_file.file.url
return ''
def youtube_link(self):
"""Get the YouTube link of the source file (may return None)."""
return self.source_file.youtube_link
def youtube_fetch_task(self):
"""Get the ID of the YouTube fetch task associated with the track."""
return self.source_file.youtube_fetch_task.id
def __str__(self):
"""String representation."""
return self.artist + ' - ' + self.title
# pylint: disable=unsubscriptable-object
class StaticMix(models.Model):
"""Model representing a statically mixed track (certain parts are excluded)."""
# UUID to uniquely identify track
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
# ID of the associated Celery task
celery_id = models.UUIDField(default=None, null=True, blank=True)
# Separation model
separator = models.CharField(max_length=20,
choices=SEP_CHOICES,
default=SPLEETER)
# Separator-specific args
separator_args = PickledObjectField(default=dict)
# Bitrate
bitrate = models.IntegerField(choices=Bitrate.choices,
default=Bitrate.MP3_256)
# Source track on which it is based
source_track = models.ForeignKey(SourceTrack,
related_name='static',
on_delete=models.CASCADE)
# Whether track contains vocals
vocals = models.BooleanField()
# Whether track contains drums
drums = models.BooleanField()
# Whether track contains bass
bass = models.BooleanField()
# Whether track contains accompaniment ('other' is the term used by Spleeter API)
other = models.BooleanField()
# Status of source separation task
status = models.IntegerField(choices=TaskStatus.choices,
default=TaskStatus.QUEUED)
# Underlying file
file = models.FileField(upload_to=mix_track_path,
max_length=255,
blank=True)
# Error message
error = models.TextField(blank=True)
# DateTime when source separation task was started
date_created = models.DateTimeField(auto_now_add=True)
def artist(self):
"""Get the artist name."""
return self.source_track.artist
def title(self):
"""Get the title."""
return self.source_track.title
def url(self):
"""Get the file URL"""
if self.file:
return self.file.url
return ''
def formatted_name(self):
"""
Produce a string with the format like:
"Artist - Title (vocals, drums, bass, other)"
"""
prefix_lst = [self.source_track.artist, ' - ', self.source_track.title]
parts_lst = []
if self.vocals:
parts_lst.append('vocals')
if self.drums:
parts_lst.append('drums')
if self.bass:
parts_lst.append('bass')
if self.other:
parts_lst.append('other')
prefix = ''.join(prefix_lst)
parts = ','.join(parts_lst)
suffix = f'{self.bitrate} kbps,{self.separator}'
if self.separator in DEMUCS_FAMILY:
random_shifts = self.separator_args['random_shifts']
suffix += f',{random_shifts} shifts'
elif self.separator == XUMX:
iterations = self.separator_args['iterations']
softmask = self.separator_args['softmask']
# Replace decimal point with underscore
alpha = str(self.separator_args['alpha']).replace('.', '_')
suffix += f',{iterations} iter'
if softmask:
suffix += f',softmask {alpha}'
return f'{prefix} ({parts}) [{suffix}]'
def source_path(self):
"""Get the path to the source file."""
return self.source_track.source_file.file.path
def source_url(self):
"""Get the URL of the source file."""
return self.source_track.source_file.file.url
def get_extra_info(self):
"""Get extra information about the mix"""
if self.separator == SPLEETER:
return [f'{self.bitrate} kbps', '4 stems (16 kHz)']
elif self.separator == D3NET:
return [f'{self.bitrate} kbps']
elif self.separator in DEMUCS_FAMILY:
return [
f'{self.bitrate} kbps', f'Random shifts: {self.separator_args["random_shifts"]}'
]
else:
info_arr = [
f'{self.bitrate} kbps',
f'Iterations: {self.separator_args["iterations"]}',
f'Softmask: {self.separator_args["softmask"]}',
]
if self.separator_args["softmask"]:
info_arr.append(f'Alpha: {self.separator_args["alpha"]}')
return info_arr
class Meta:
unique_together = [[
'source_track', 'separator', 'separator_args', 'bitrate', 'vocals', 'drums',
'bass', 'other'
]]
# pylint: disable=unsubscriptable-object
class DynamicMix(models.Model):
"""Model representing a track that has been split into individually components."""
# UUID to uniquely identify track
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
# ID of the associated Celery task
celery_id = models.UUIDField(default=None, null=True, blank=True)
# Separation model
separator = models.CharField(max_length=20,
choices=SEP_CHOICES,
default=SPLEETER)
# Separator-specific args
separator_args = PickledObjectField(default=dict)
# Bitrate
bitrate = models.IntegerField(choices=Bitrate.choices,
default=Bitrate.MP3_256)
# Source track on which it is based
source_track = models.ForeignKey(SourceTrack,
related_name='dynamic',
on_delete=models.CASCADE)
# Path to vocals file
vocals_file = models.FileField(upload_to=mix_track_path,
max_length=255,
blank=True)
# Path to accompaniment file
other_file = models.FileField(upload_to=mix_track_path,
max_length=255,
blank=True)
# Path to bass file
bass_file = models.FileField(upload_to=mix_track_path,
max_length=255,
blank=True)
# Path to drums file
drums_file = models.FileField(upload_to=mix_track_path,
max_length=255,
blank=True)
# Status of source separation task
status = models.IntegerField(choices=TaskStatus.choices,
default=TaskStatus.QUEUED)
# Error message
error = models.TextField(blank=True)
# DateTime when source separation task was started
date_created = models.DateTimeField(auto_now_add=True)
def artist(self):
"""Get the artist name."""
return self.source_track.artist
def title(self):
"""Get the title."""
return self.source_track.title
def formatted_prefix(self):
"""
Produce a string with the format like:
"Artist - Title"
"""
return f'{self.source_track.artist} - {self.source_track.title}'
def formatted_suffix(self):
"""
Produce a string describing the separator model and random shift value:
"[Demucs, 0]"
"""
if self.separator == SPLEETER:
return f'[{self.bitrate} kbps,{self.separator}]'
elif self.separator == D3NET:
return f'[{self.bitrate} kbps]'
elif self.separator in DEMUCS_FAMILY:
random_shifts = self.separator_args['random_shifts']
return f'[{self.bitrate} kbps,{self.separator},{random_shifts} shifts]'
else:
iterations = self.separator_args['iterations']
softmask = self.separator_args['softmask']
# Replace decimal point with underscore
alpha = str(self.separator_args['alpha']).replace('.', '_')
suffix = f'[{self.bitrate} kbps,{self.separator},{iterations} iter'
if softmask:
suffix += f',softmask {alpha}'
suffix += ']'
return suffix
def vocals_url(self):
"""Get the URL of the vocals file."""
if self.vocals_file:
return self.vocals_file.url
return ''
def other_url(self):
"""Get the URL of the accompaniment file."""
if self.other_file:
return self.other_file.url
return ''
def bass_url(self):
"""Get the URL of the bass file."""
if self.bass_file:
return self.bass_file.url
return ''
def drums_url(self):
"""Get the URL of the drums file."""
if self.drums_file:
return self.drums_file.url
return ''
def source_path(self):
"""Get the path to the source file."""
return self.source_track.source_file.file.path
def source_url(self):
"""Get the URL of the source file."""
return self.source_track.source_file.file.url
def get_extra_info(self):
"""Get extra information about the mix"""
if self.separator == SPLEETER:
return [f'{self.bitrate} kbps', '4 stems (16 kHz)']
elif self.separator == D3NET:
return [f'{self.bitrate} kbps']
elif self.separator in DEMUCS_FAMILY:
random_shifts = self.separator_args['random_shifts']
return [f'{self.bitrate} kbps', f'Random shifts: {random_shifts}']
else:
info_arr = [
f'{self.bitrate} kbps',
f'Iterations: {self.separator_args["iterations"]}',
f'Softmask: {self.separator_args["softmask"]}',
]
if self.separator_args["softmask"]:
info_arr.append(f'Alpha: {self.separator_args["alpha"]}')
return info_arr
class Meta:
unique_together = [[
'source_track',
'separator',
'separator_args',
'bitrate'
]]
```
#### File: api/separators/x_umx_separator.py
```python
import os
import warnings
from pathlib import Path
import nnabla as nn
import numpy as np
from api.separators.util import download_and_verify
from billiard.pool import Pool
from nnabla.ext_utils import get_extension_context
from spleeter.audio.adapter import AudioAdapter
from tqdm import trange
from xumx.test import separate
MODEL_URL = 'https://nnabla.org/pretrained-models/ai-research-code/x-umx/x-umx.h5'
class XUMXSeparator:
"""Performs source separation using X-UMX API."""
def __init__(
self,
cpu_separation: bool,
bitrate=256,
softmask=False,
alpha=1.0,
iterations=1
):
"""Default constructor.
:param config: Separator config, defaults to None
"""
self.model_file = 'x-umx.h5'
self.model_dir = Path('pretrained_models')
self.model_file_path = self.model_dir / self.model_file
self.context = 'cpu' if cpu_separation else 'cudnn'
self.softmask = softmask
self.alpha = alpha
self.iterations = iterations
self.bitrate = bitrate
self.sample_rate = 44100
self.residual_model = False
self.audio_adapter = AudioAdapter.default()
self.chunk_duration = 30
def get_estimates(self, input_path: str):
ctx = get_extension_context(self.context)
nn.set_default_context(ctx)
nn.set_auto_forward(True)
audio, _ = self.audio_adapter.load(input_path,
sample_rate=self.sample_rate)
if audio.shape[1] > 2:
warnings.warn('Channel count > 2! '
'Only the first two channels will be processed!')
audio = audio[:, :2]
if audio.shape[1] == 1:
print('received mono file, so duplicate channels')
audio = np.repeat(audio, 2, axis=1)
# Split and separate sources using moving window protocol for each chunk of audio
# chunk duration must be lower for machines with low memory
chunk_size = self.sample_rate * self.chunk_duration
if (audio.shape[0] % chunk_size) == 0:
nchunks = (audio.shape[0] // chunk_size)
else:
nchunks = (audio.shape[0] // chunk_size) + 1
print('Separating...')
estimates = {}
for chunk_idx in trange(nchunks):
cur_chunk = audio[chunk_idx *
chunk_size:min((chunk_idx + 1) *
chunk_size, audio.shape[0]), :]
cur_estimates = separate(cur_chunk,
model_path=str(self.model_file_path),
niter=self.iterations,
alpha=self.alpha,
softmask=self.softmask,
residual_model=self.residual_model)
if any(estimates) is False:
estimates = cur_estimates
else:
for key in cur_estimates:
estimates[key] = np.concatenate(
(estimates[key], cur_estimates[key]), axis=0)
return estimates
def create_static_mix(self, parts, input_path: str, output_path: Path):
download_and_verify(MODEL_URL, self.model_dir, self.model_file_path)
estimates = self.get_estimates(input_path)
final_source = None
for name, source in estimates.items():
if not parts[name]:
continue
final_source = source if final_source is None else final_source + source
print('Writing to MP3...')
self.audio_adapter.save(output_path, final_source, self.sample_rate, 'mp3', self.bitrate)
def separate_into_parts(self, input_path: str, output_path: Path):
download_and_verify(MODEL_URL, self.model_dir, self.model_file_path)
estimates = self.get_estimates(input_path)
# Export all source MP3s in parallel
pool = Pool()
tasks = []
output_path = Path(output_path)
for name, estimate in estimates.items():
filename = f'{name}.mp3'
print(f'Exporting {name} MP3...')
task = pool.apply_async(self.audio_adapter.save, (output_path / filename, estimate, self.sample_rate, 'mp3', self.bitrate))
tasks.append(task)
pool.close()
pool.join()
```
|
{
"source": "jeffrey-clark/academic_journal_scraper",
"score": 3
}
|
#### File: jeffrey-clark/academic_journal_scraper/oxford_academic_crawler.py
```python
import Functions.AdminFunctions as AF
from Functions.ScrapeFunctions import *
import pandas as pd
import os, sys
import time
import random
def crawl_issues(journal, from_volume, from_issue, to_volume, to_issue, continuous_issue_inc=False):
'''
:param journal:
:param from_volume:
:param from_issue:
:param to_volume:
:param to_issue:
:return:
'''
# 1. CONFIRM JOURNAL DATA DIRECTORY
journal_dir = AF.confirm_journal_data_directories(journal)
# 2. LOAD TABLES OF CONTENTS AND ARTICLES
tocs_dir = f"{journal_dir}/Tables_of_Contents"
tocs = os.listdir(tocs_dir)
article_dir = f"{journal_dir}/Articles"
articles = os.listdir(article_dir)
# 3. PREPARE THE FN-LINK-DOI CONVERSION DATAFRAME AS DIC
fn_link_doi_dic = {'journal': [], 'year': [], 'volume': [], 'issue': [],
'article': [], 'filename': [], 'link': [], 'doi': []}
# 3. CRAWL FROM THE FROM_VOLUME TO THE TO_VOLUME
year, volume, issue = None, from_volume, from_issue
while True:
# search for the html TOC
toc_fp = None
for toc in tocs:
fp = f"{tocs_dir}/{toc}"
m = re.search(r"(\d{4})_(\d+)_([-\d]+)_Contents.html", toc)
y = int(m.group(1))
v = int(m.group(2))
issue_list = [int(x) for x in m.group(3).split("-")]
i = issue_list[-1]
if volume == v and issue in issue_list:
# if we have the TOC saved in HTML
toc_fp = fp
year = y
break
# if we do not have the TOC in html, scrape it
if toc_fp == None:
try:
toc_fp = scrape_and_save_toc_html(journal, volume, issue)
except:
if issue == 1:
issue = "1-2"
toc_fp = scrape_and_save_toc_html(journal, volume, issue)
else:
raise ValueError("Invalid issue link.")
m = re.search(r"(\d{4})_(\d+)_([-\d]+)_Contents.html", toc_fp)
year = m.group(1)
volume = int(m.group(2))
issue_list = [int(x) for x in m.group(3).split("-")]
issue = issue_list[-1]
print(f"scraping table of contents: volume {volume}, issue {issue}, year {year}")
# load the TOC soup
soup = load_soup(toc_fp)
# download all of the articles
article_refs = extract_article_refs(journal, soup)
article_links = [x['link'] for x in article_refs]
article_dois = [x['doi'] for x in article_refs]
article_links_reduced = [x['reduced_link'] for x in article_refs]
for i in range(0, len(article_links)):
a_id = i+1
a_link = article_links[i]
a_link_red = article_links_reduced[i]
a_doi = article_dois[i]
a_fp = generate_article_fp(journal, year, a_link)
a_fn = re.search(r"Articles/(.+)$", a_fp).group(1)
# reduced filenames for cases where issue is e.g. 1-2
a_fp_red = generate_article_fp(journal, year, a_link_red)
a_fn_red = re.search(r"Articles/(.+)$", a_fp_red).group(1)
fn_link_doi_dic['journal'].append(journal)
fn_link_doi_dic['year'].append(year)
fn_link_doi_dic['volume'].append(volume)
fn_link_doi_dic['issue'].append(issue)
fn_link_doi_dic['article'].append(a_id)
fn_link_doi_dic['filename'].append(a_fn)
fn_link_doi_dic['link'].append(a_link)
fn_link_doi_dic['doi'].append(a_doi)
if a_fn_red not in articles:
print(f"scraping article: {a_fn_red}")
scrape_and_save_article_html(a_link, a_fp_red)
time.sleep(random.randrange(5, 10))
# load in the volume details
details = get_volume_details_from_toc(soup)
# break if we have scraped the to_issue
if (issue == to_issue) and (volume == to_volume):
break
# Increment issue, and increment volume if reached issue max
if issue == details['end_issue']:
if not continuous_issue_inc:
issue = 0
volume += 1
issue += 1
continue
# 5. SAVE THE FN-LINK-DOI DF AS XLSX
fn_link_doi_dic_df = pd.DataFrame(fn_link_doi_dic)
fp = f'{journal_dir}/article_references.xlsx'
fn_link_doi_dic_df.to_excel(fp, index=False)
# writer = pd.ExcelWriter(fp, engine='xlsxwriter')
# fn_link_doi_dic_df.to_excel(writer, sheet_name="Articles", index=False)
# writer.save()
print(f"COMPLETED scrape of all articles in the interval for {journal}!")
def c_crawl_issues(journal, from_volume, from_issue, to_volume, to_issue, continuous_issue_inc=False):
crawl_issues(journal, from_volume, from_issue, to_volume, to_issue, continuous_issue_inc=continuous_issue_inc)
while True:
try:
crawl_issues(journal, from_volume, from_issue, to_volume, to_issue, continuous_issue_inc=continuous_issue_inc)
break
except:
print("Failure. Sleeping then restart...")
time.sleep(60)
if __name__ == "__main__":
#c_crawl_issues("ej", 113, 484, 131, 640, True) #2003
#c_crawl_issues("ectj", 5, 1, 25, 1, False) # 2002
#c_crawl_issues("qje", 126, 1, 137, 1, False) # 2011
#c_crawl_issues("restud", 71, 1, 89, 1, False) #2004
c_crawl_issues("oxrep", 24, 1, 37, 4, False) # 2007
#c_crawl_issues("cje", 28, 1, 45, 6, False) #2004
```
|
{
"source": "jeffrey-clark/gender_in_academia",
"score": 2
}
|
#### File: jeffrey-clark/gender_in_academia/docAPImatcher.py
```python
import datetime
from Models.initialize import *
import Models.ImportModel as IM
import Models.LocalAPIModel as Local_API
import docxtractor as dxtr
import pandas
def match(analysis_filename, core, total_cores, id_overwrite = None):
id_extraction = re.search(r'_(\d+)_(\d+).xlsx', analysis_filename)
start_id = id_extraction.group(1)
end_id = id_extraction.group(2)
# IMPORT THE ANALYSIS SPREADSHEET AS DATAFRAMES
analysis_filepath = project_root + "/Spreadsheets/new_docx_analysis/threaded/" + analysis_filename
raw = IM.Spreadsheet('Raw', analysis_filepath, 'Raw')
pub = IM.Spreadsheet('Publications', analysis_filepath, 'Publications')
researcher = IM.Spreadsheet("Researchers", analysis_filepath, "Researchers")
# CREATE A DOC_DATA OBJECT FROM THE DFs
doc_data = dxtr.Doc_data()
doc_data.import_dfs(raw.df, pub.df, researcher.df)
# create LAPI instance
LAPI = Local_API.data()
i = 0
while i < len(doc_data.pubs):
print("i is", i)
if doc_data.pubs[i] == []:
i = i + 1
continue
print(i, doc_data.pubs[i][0].app_id, doc_data.researchers[i].app_id)
if doc_data.pubs[i][0].app_id != doc_data.researchers[i].app_id:
if doc_data.researchers[i].extracted_pubs == None:
doc_data.pubs = doc_data.pubs[:i] + [[]] + doc_data.pubs[i:]
i = i - 1
else:
if int(doc_data.researchers[i].extracted_pubs) == 0:
doc_data.pubs = doc_data.pubs[:i] + [[]] + doc_data.pubs[i:]
i = i - 1
else:
raise ValueError("MISMATCH IN RESEARCHER AND PUBS IN THE MATCHING API PROCESS")
# to resolve, investigate the symmetry of the df.researchers and df.pubs lists
i = i + 1
# print("PUBS ARE FINALLY:")
# print(doc_data.pubs)
r_start = math.floor(len(doc_data.researchers) / total_cores) * (int(core) - 1)
r_end = math.floor(len(doc_data.researchers) / total_cores) * (int(core))
if int(core) == int(total_cores):
r_end = len(doc_data.researchers)
if id_overwrite != None:
r_start = int(id_overwrite['start']) - int(start_id)
r_end = int(id_overwrite['end']) - int(start_id) + 1
print("START OVERWRITE IS", r_start)
print("END OVERWRITE IS", r_end)
subsample = doc_data.researchers[r_start:r_end]
id = int(start_id) + int(r_start)
r_id = int(r_start)
for r in subsample:
# here r is the researcher row in the Scrape6 spreadsheet
print(id, ":", r.app_id, r.name)
# Fetch df with API all found API data
r.ID = id
r.r_ID = r_id
LAPI.load_data(r)
api_pubs = LAPI.pub_data
# now we will match publications with API data
doc_data.match_publications(api_pubs, r)
export_fp = "api_matched/" + analysis_filename[:-5] + "_core_" + str(core) + ".xlsx"
doc_data.export(export_fp)
id = str(int(id) + 1)
r_id = str(int(r_id) + 1)
if __name__ == "__main__":
starttime = datetime.datetime.now()
try:
filename = sys.argv[1]
core = int(sys.argv[2])
total_cores = int(sys.argv[3])
except:
start_id = 15001
end_id = 20000
filename = "analysis_" + str(start_id) + "_" + str(end_id) + ".xlsx"
core = 1
total_cores = 1
match(filename, core, total_cores)
#match(filename, 1, 1, {'start': 377, 'end': 378})
print("start time", starttime)
print("end time", datetime.datetime.now())
```
#### File: jeffrey-clark/gender_in_academia/docAPImatcher_server.py
```python
import json
import wsgiref.simple_server
import subprocess
import threading
import docAPImatcher_exe as exe
from docAPImatcher import *
def get_id():
# read file
with open('server_id.json', 'r') as file:
data = file.read()
# parse file
obj = json.loads(data)
# show values
return int(obj["id"])
def get_file():
server_id = get_id()
detailed = []
files = os.listdir(project_root + "/Spreadsheets/new_docx_analysis/threaded")
for f in files:
start_index = re.search(r"_(\d+)_", f).group(1)
detailed.append({'filename': f, 'start_index': start_index})
detailed = sorted(detailed , key = lambda i: i['start_index'])
files = [x['filename'] for x in detailed]
file = files[(server_id-1)]
return file
#def call_exe_server(filename, process, ftr):
# command = "nohup python3 " + project_root + "/docAPImatcher_exe.py " + filename + " " + str(process) + " 8" + " " + ftr + " &"
# subprocess.run(command)
def call_exe(filename, process):
command = "python " + project_root + "\docAPImatcher_exe.py " + filename + " " + str(process) + " " + "4"
print([command])
x = subprocess.run(command, capture_output=True)
print(x)
def main_local():
filename = get_file()
threads = []
for process in range(1, 9):
# for local computer test
t = threading.Thread(target=call_exe, args=(filename, process))
t.daemon = True
threads.append(t)
for t in threads:
t.start()
for t in threads:
t.join()
def main(from_id, to_id):
id = int(from_id)
files_to_run = []
while True:
plus_fvhundred = id + 499
filename = "analysis_" + str(id) + "_" + str(plus_fvhundred) + ".xlsx"
files_to_run.append(filename)
id = plus_fvhundred + 1
if id > int(to_id):
break
filename = files_to_run[0]
threads = []
for core in range(1, 9):
#exe.main(filename, core, 8, files_to_run)
t = threading.Thread(target=exe.main, args=(filename, core, 8, files_to_run))
t.daemon = True
threads.append(t)
for t in threads:
t.start()
for t in threads:
t.join()
if __name__ == "__main__":
try:
from_id = sys.argv[1]
to_id = sys.argv[2]
except:
from_id = 5001
to_id = 10000
main(from_id, to_id)
```
#### File: jeffrey-clark/gender_in_academia/extract_publications_from_docx.py
```python
from Models.initialize import *
import Models.MsWordModel as WM
import Models.ImportModel as IM
directory = project_root + '/docx_files'
filenames = os.listdir(directory)
org_sheet = IM.original()
possible_types = {'Peer-reviewed publication':
['peer-reviewed publications', 'bedömda artiklar', 'peer-reviewed articles',
'Peer-Reviewed Journals', 'Peer reviewed articles', 'peer reviewed original articles',
'peer reviewed original articles','Peer-reviewed scientific articles',
'Peer-reviewed original articles',
'Referee-reviewed published articles', 'peer-reviewed papers',
'peer reviewed journals', 'fackgranskade originalartiklar'],
'Publication': ['publications', 'publikationer'],
'Monograph':
['monographs', 'monograph', 'monografier', 'manuscripts', 'manuskript'],
'Patent':
['Patents', 'Patenter', 'patent'],
'Peer-reviewed conference contribution':
['Peer-reviewed conference contributions', 'bedömda konferensbidrag', 'conference contributions',
'Conference presentations' ],
'Presentation':
['Presentation', 'Presentations'],
'Computer software':
['Egenutvecklade allmänt tillgängliga datorprogram',
'Open access computer programs', 'Publicly available computer programs', 'computer programs'],
'Popular science':
['Popular science article', 'popular science', 'Popular-scientific articles',
'Popular-Science Articles', 'Populärvetenskapliga artiklar' ],
'Review articles, book chapters, books':
['Review articles, book chapters, books', 'Reviews, book chapters, books', 'Book chapters'],
'Supervision':
['supervision']
}
peer_review_flags = ['peer-reviewed', 'peer reviewed', 'referee-reviewed', 'referee reviewed',
'fackgranskade', 'bedömda', 'referee', 'referred', 'refereed',
'referee-granskad', 'referee-granskad', 'referee-bedömda', 'reviewed', 'journal']
possible_types2 = {
'Publication':
[r'article', r'articles', r'artikel', r'artiklar',
r'publication', r'publications' r'publikation', r'publikationer',
r'paper', r'papers'],
'Monograph':
[r'monograph', r'monographs', r'monograf', r'monografier', r'manuscripts', r'manuskript'],
'Patent':
[r'patent', r'patents', r'patenter'],
'Conference contribution':
[r'conference contribution', r'conference contributions',
r'conference presentation', r'conference presentations',
r'bedömda konferensbidrag', r'conference paper', r'conference papers',
r'conference publication'],
'Presentation':
[r'presentation', r'presentations', r'presentationer'],
'Computer software':
[r'computer programs', r'datorprogram', r'dator program'],
'Popular science':
[r'popular[\s]?science', r'popular[\-\s]?scientific', r'populärvetenskaplig'],
'Books, book chapter':
[r'book', r'chapter', r'kapitel', r'bok', r'böcker', r'bokkapitel', r'bookchapter'],
'Abstract':
[r'abstract', r'abstracts'],
'Online':
[r'on[-]?line publication'],
'Review':
[r"referee-granskad review", r"reviews", r"peer[\s-]?review\b"]
}
# regex is allowed
# these we skip if there is no type determined prior, or if there
skip_flags = [r'number of', r'mostly cited', r'most cited', r'most important']
# these we skip for sure
strict_skip_flags = [ r'citations.*from.*google scholar', r'appendix c', r'citation index',
r'database.*google scholar', r'document.imagefile', r'docx.table',
r'\*\s?:\s?relevant']
# string goes in.
# each keyword in possible type is looped
# as soon as match, we break and return the type
def identify_type(string):
p_match = False
type = None
# lets check for the peer reviewed flag
for p_word in peer_review_flags:
p_match = re.search(re.escape(p_word), string.lower())
if p_match:
p_match = True
# now we will check for other structural dividers
matched_word = None
for t in list(possible_types2.keys()):
for word in possible_types2[t]:
match = re.search(word, string.lower())
if match != None:
matched_word = match.group(0)
# adjustment for "conference paper" overwriting "paper", basically all publication words can be
# overwritten if they match at a later instance of the possible_types dictionary
if type in ["Publication", "Peer-reviewed publication"] or type == None:
if p_match:
type = 'Peer-reviewed ' + str(t).lower()
else:
type = t
break
else:
type = "ERROR "
break
return type
# old version
# def identify_type(string):
# type = None
# for t in list(possible_types.keys()):
# for word in possible_types[t]:
# match = re.search(word.lower(), string.lower())
# if match:
# type = t
# break
# return type
# old function that we used to get index of document
def index_document_df(df):
dic = {}
for i, row in df.iterrows():
index = i
value = row['para_text']
#print("Index:", index)
#print("Value:", value)
type = identify_type(value)
#print("TYPE:", type)
if type != None:
dic[type] = i
return dic
def analyze(obj, app_id, fullname, name_list, surname_list, WordDocument):
col = []
# make a boolean indicating if there ever has existed a type
never_type = True
df = WordDocument.compressed_df
# check for personnummer in the header
personnummer = None
try:
personnummer = re.search(r'\b([\d]{2}[0|1][\d][0-3][\d]-[\d]{4})\b', WordDocument.header_text).group(1)
except:
pass
for i, row in df.iterrows():
blank = None
reason = None
index = i
value = row['para_text']
f_strict_end = None
# first we get the personnummer
if personnummer == None:
try:
personnummer = re.search(r'\b([\d]{2}[0|1][\d][0-3][\d]-[\d]{4})\b', value).group(1)
except:
personnummer = None
# count the number of quotation marks
num_quotes = len(re.findall(r'([\"\'])', value))
if num_quotes > 2:
f_title = "x"
else:
f_title = None
# look for indications of a reference ending
pagenumbers = re.findall(r"p.\s*\d+\s*\-*\s*\d*", value.lower())
if pagenumbers == []:
pagenumbers = re.findall(r"s.\s*\d+\s*\-*\s*\d*", value.lower())
volumenumbers = re.findall(r"[\d:;-]{6,}", value.lower())
volumenumbers_weak = re.findall(r"[\d\:\;\-\s\,]{6,}", value.lower())
if personnummer in volumenumbers or personnummer in volumenumbers_weak:
volumenumbers.remove(personnummer)
#print("pagenumbers", pagenumbers)
#print("volumenumbers", volumenumbers)
end_digit = re.findall(r"\d$", value.lower())
if pagenumbers != [] or volumenumbers != [] or value[-3:] == "(*)" or end_digit != [] or volumenumbers_weak != []:
f_end = "x"
else:
f_end = None
other_endings = [r"in press", r"submitted to .{,25}$", r"\*$", r'forthcoming']
for e in other_endings:
match = re.search(e, value.lower())
if match != None:
f_end = "x"
f_strict_end = "x"
break
# lets get the number of citations
citation_patterns = [r"citations\s?[:=]\s*(\d+)", r"citation number\s*=\s*(\d+)",
r"citations\s?[:=]\s*\(([\d,\w\s\/]+)\)",
r"gsh:\s?(\d+)",
r"citations\s?n\s?[=:\s]?(\d+)",
r"\d+\scitations"]
citations = None
for pattern in citation_patterns:
try:
citations = re.search(pattern, value.lower()).group(1)
if ", " in citations:
cit_list = citations.split(", ")
int_cit_list = []
for x in cit_list:
try:
int_cit_list.append(int(x))
except:
pass
if int_cit_list != []:
citations = max(int_cit_list)
except:
pass
if citations != None:
break
# lets compute the impact factor
impact_factor_patters = [r"impact factor\s?[:=\s]\s*([\d\,\.]*\d)", r"if\s?[:=\s]\s*([\d\,\.]*\d)"]
impact_factor = None
for pattern in impact_factor_patters:
try:
impact_factor = re.search(pattern, value.lower()).group(1)
impact_factor = int(re.sub(r"\,", ".", impact_factor))
except:
pass
if impact_factor != None:
break
# lets look for a citations ending or google scholar
citation_end = re.search(r"citations:\s*\(?\s*[\d,\s]+\s*\)?.{,5}$", value.lower())
if citation_end == None:
citation_end = re.search(r"citations:\s*\d+\s*\(*.{,20}\)*.{,5}", value.lower())
if citation_end != None:
f_end = "x"
name_flag = None
for s in surname_list:
try:
name_flag = re.search(s, value)
if name_flag:
name_flag = "x"
except:
pass
# this is to deal with name-order-discrepancies
if name_flag == None:
for n in name_list:
try:
name_flag = re.search(n, value)
if name_flag:
name_flag = "x"
except:
pass
year = re.search(r'\b(19[\d]{2}|20[\d]{2})\b', value)
if year:
year_index = year.span()[0]
location = 'begining'
if year_index >= len(value) * 0.5:
location = 'end'
year = "YEAR " + location
if value in nonelist:
blank = "x"
type = identify_type(value)
try:
if type[0:5] == 'ERROR':
reason = "TYPE " + str(type)
type = None
except:
pass
# Do a parenthesis check (to check for false positives
if type != None:
# remove possible leading "peer-reviewed" from e.g. "Peer-reviewed publication"
stripped_type_name = re.sub(r"Peer-reviewed", "", type).strip().capitalize()
for word in possible_types2[stripped_type_name]:
parenthesis_match = re.search(r"\(\b.{,30}" + re.escape(word.lower()) + r".{,30}\b\)", value.lower())
if parenthesis_match != None and (name_flag != None or year != None):
type = None
reason = "Type removal"
# controlling for possiblity that the type trigger-word might be part of a title
if name_flag != None and year != None and f_end != None:
type = None
if type != None:
s_match = None
s_match_strict = None
for s in skip_flags:
s_match = re.search(s.lower(), value.lower())
if s_match != None:
break
if s_match != None and never_type == False:
structure = None
blank = "x"
reason = s_match
else:
structure = type
else:
structure = None
# Now do strict flags, but for all rows
for s in strict_skip_flags:
s_match_strict = re.search(s.lower(), value.lower())
if s_match_strict != None:
structure = None
blank = "x"
reason = s_match_strict
type = None
break
else:
structure = type
if type != None:
never_type = False
obj.rows.append(IM.Row([('app_id', app_id),('fullname', fullname),('text', value), ('blank', blank),
('structure', structure), ('f_year', year),
('f_name', name_flag), ('f_end', f_end), ("f_strict_end", f_strict_end), ('f_personnummer', personnummer),
('citations', citations), ('impact_factor', impact_factor), ('reason', reason)]))
def read_docs(export_filepath, reduced=None):
if reduced != None:
start = reduced['start']
if reduced['end'] == None:
end = len(filenames)
else:
end = reduced['end']
filenames_to_run = filenames[start:end]
else:
filenames_to_run = filenames
start = 0
end = len(filenames)
sheet = IM.Spreadsheet('analysis')
print("Reading documents")
i = 0
for filename in filenames_to_run:
i += 1
progress_bar(i, (end-start+1))
# first we identify the app_id from the file name
app_id = re.search(r'(.*)\.docx', filename).group(1)
#print(app_id)
# now we get the name of the researcher
application = org_sheet.get_rows([('app_id', app_id)])[0]
name = str(application.name)
surname = str(application.surname)
fullname = name + " " + surname
# generate a surname list for doc identification
surname_list = surname.split(" ")
# generate a name list to deal with name order discrepancies
name_list = name.split(" ")
# import the word document
filepath = directory + "/" + filename
d = WM.Docx_df(filepath)
# here we do the analysis
analyze(sheet, app_id, fullname, name_list, surname_list, d)
IM.export([sheet],export_filepath )
def extract_pubs(import_filepath, export_filepath):
docsheet = IM.Spreadsheet('docsheet', import_filepath, 0)
pubsheet = IM.Spreadsheet('cv_publications')
# get the personnummer
personnummer_rows = docsheet.filter_rows(['f_personnummer'])
pnums = Counter()
for r in personnummer_rows:
pnums.add(r.f_personnummer, r.app_id)
pnum_registry = {}
for key in list(pnums.ref.keys()):
pnum_registry[pnums.ref[key][0]] = key
type = None
merge_happening = False
do_append = False
pnum = None
c = {'text': None, 'year': None, 'name': None, 'end':None, 'citations': None , 'impact_factor': None}
for i in range(0, len(docsheet.rows)):
row = docsheet.rows[i]
if row.text in nonelist:
continue
#skip a row before even considering a merge
# note that if there is a blank row in the middle of an entry, the entry will be split
if row.blank != None:
continue
# here we set the structure
if row.structure in ["Peer-reviewed publication", "Publication", "Monograph"]:
type = row.structure
continue
elif row.structure != None:
type = None
do_append = False
#overwrite merge_happening if there is a structrual thing
merge_happening = False
if merge_happening:
c['text'] = c['text'] + " " + str(row.text).strip()
# update any potentially incomplete keys
if c['year'] == None:
c['year'] = row.f_year
if c['name'] == None:
c['name'] = row.f_name
if c['end'] == None:
c['end'] = row.f_end
if c['citations'] == None:
c['citations'] = row.citations
if c['impact_factor'] == None:
c['impact_factor'] = row.impact_factor
else:
c = {'text': str(row.text), 'year': row.f_year, 'name': row.f_name, 'end':row.f_end,
'citations': row.citations, 'impact_factor': row.impact_factor}
# make sure that the next row is valid
try:
j = 0
valid_next_row = True
while True:
j += 1
next_row = docsheet.rows[i+j]
if next_row.blank != None:
continue
if next_row.structure != None:
valid_next_row = False
if next_row.app_id != row.app_id:
valid_next_row = False
if str(next_row.text) in nonelist:
valid_next_row = False
break
except:
next_row = None
valid_next_row = False
print(row)
print(next_row)
print("valid next row", valid_next_row)
print('\n')
# if a row meets our conditions that it is a monograph or a peer reviewed paper
if type != None:
# get the personnummer from the registry
try:
pnum = pnum_registry[row.app_id]
except:
pnum = None
# if we have a strict ending, we just append right away
if row.f_strict_end != None:
do_append = True
merge_happening = False
else:
# if we have a perfect row, we append it
if c['year'] != None and c['name'] != None:
do_append = True
if valid_next_row:
# check if the next row is just some weird ending
if (next_row.f_year == None) and (next_row.f_name == None) and \
(next_row.f_end != None) and (len(str(next_row.text)) <51):
# this is a sign that we should wait and merge
do_append = False
merge_happening = True
continue
# sometimes the year is part of the weird ending
elif next_row.f_year != None and next_row.f_name == None and next_row.f_end != None:
try:
current_year = re.findall(r"[12][901]\d{2}", c['text'])[0]
ending_numberstrings = re.findall(r"[\d:-]*[12][901]\d{2}[\d:-]*", next_row.text)
print("r_years are", ending_numberstrings)
overlap = re.findall(current_year, ending_numberstrings[0])
print("overlap is", overlap)
if (overlap != []) and (len(next_row.text) < 51) and (next_row.f_end != None):
do_append = False
merge_happening = True
continue
except:
pass
# if we only have a name flag
if c['year'] == None and c['name'] != None:
if valid_next_row:
# check if we have an intermediary or a weird ending
if next_row.f_year == None and next_row.f_name == None:
do_append = False
merge_happening = True
continue
# check if we have a complementary entry
elif next_row.f_year != None and next_row.f_name == None:
do_append = False
merge_happening = True
continue
# however if we have the a substitute entry (name flag in next row), we append
elif next_row.f_name != None:
do_append = True
else:
do_append = True
elif c['year'] != None and c['name'] == None:
if valid_next_row:
# check if we have an intermediary or a weird ending
if next_row.f_year == None and next_row.f_name == None:
do_append = False
merge_happening = True
continue
# check if we have a complementary entry
elif next_row.f_year == None and next_row.f_name != None:
do_append = False
merge_happening = True
continue
# however if we have the a substitute entry (name flag in next row), we append
elif next_row.f_year != None:
# but if the next row has an ending, like volume info, containing the year,
# and if the text length is <= 50, we accept is as an ending and merge
try:
current_year = re.findall(r"[12][901]\d{2}", c['text'])
except:
current_year = []
try:
ending_numberstrings = re.findall(r"[\d:-]*[12][901]\d{2}[\d:;-]*", str(next_row.text))
print("r_years are", ending_numberstrings)
overlap = re.findall(re.escape(current_year[0]), ending_numberstrings[0])
except:
overlap = []
print("overlap is", overlap)
if (overlap != []) and (len(next_row.text) < 51) and (next_row.f_end != None):
do_append = False
merge_happening = True
continue
else:
do_append = True
else:
do_append = True
else:
do_append = True
# If we have no year and no author, we proceed with merge
elif c['year'] == None and c['name'] == None and c['text'] != None and row.structure == None:
if valid_next_row:
do_append = False
merge_happening = True
continue
else:
do_append = True
else:
pass
# here we append an approved publication row
if do_append:
try:
c['citations'] = int(c['citations'])
except:
pass
try:
c['impact_factor'] = float(c['impact_factor'])
except:
pass
pubsheet.rows.append(IM.Row([("app_id", row.app_id),
("fullname", row.fullname),
("personnummer", pnum),
("type", type),
("publication", c['text']),
("citations", c['citations']),
("impact_factor", c['impact_factor'])
]))
do_append = False
merge_happening = False
if next_row != None:
if row.app_id != next_row.app_id:
type = None
IM.export([pubsheet], export_filepath )
qoo = [
[{'start': 0, 'end': 5000}, 1],
[{'start': 5000, 'end': 10000}, 2],
[{'start': 10000, 'end': 15000}, 3],
[{'start': 15000, 'end': 20000}, 4],
[{'start': 20000, 'end': 25000}, 5],
[{'start': 25000, 'end': None}, 6]
]
qoo = [ [{'start': 25000, 'end': None}, 6] ]
for q in qoo:
#filename of document analysis
da_filepath = project_root + '/Spreadsheets/new_docx_analysis/doc_analysis_test' + str(q[1]) + '.xlsx'
#filename of publications
pub_filepath = project_root + '/Spreadsheets/new_docx_analysis/publications_test' + str(q[1]) + '.xlsx'
# reduce to a subset of researcher from VR.xlsx
subset = q[0]
read_docs(da_filepath, subset)
extract_pubs(da_filepath, pub_filepath)
```
#### File: gender_in_academia/Functions/DXT_functions.py
```python
import re
def blanks_separate(doc):
blank_count = 0
for i in range(0, len(doc)):
row = doc[i]
if row.blank == True:
blank_count = blank_count + 1
blank_percentage = blank_count / len(doc)
if blank_percentage > 0.2:
return True
else:
return False
# def blanks_separate_section():
def check_citation_bragger(row):
# strong triggers will skip even if author name is present
strong_triggers = [
r"asterisk.*denotes"
]
weak_triggers = [
r"\d+\s+citations*", # Cited until March 2016 (CT). In total: 503 citations.
r"according.+google\s*scholar",
]
for tp in strong_triggers:
trigger_match = re.search(tp, row.text.lower())
if trigger_match != None:
return "strong"
# if no match on strong triggers check the weak triggers
for tp in weak_triggers:
trigger_match = re.search(tp, row.text.lower())
if trigger_match != None:
return "weak"
return None
```
|
{
"source": "JeffreyCNL/hidden_markov",
"score": 3
}
|
#### File: hidden_markov/tests/small_test.py
```python
from unittest import TestCase
from hidden_markov import hmm
import numpy as np
states = ('s', 't')
#list of possible observations
possible_observation = ('A','B' )
# The observations that we observe and feed to the model
observations = ('A', 'B','B','A')
obs4 = ('B', 'A','B')
# Tuple of observations
observation_tuple = []
observation_tuple.extend( [observations,obs4] )
quantities_observations = [10, 20]
# Numpy arrays of the data
start_probability = np.matrix( '0.5 0.5')
transition_probability = np.matrix('0.6 0.4 ; 0.3 0.7 ')
emission_probability = np.matrix( '0.3 0.7 ; 0.4 0.6 ' )
class TestHmm(TestCase):
########## Training HMM ####################
def test_forward(self):
#Declare Class object
test = hmm(states,possible_observation,start_probability,transition_probability,emission_probability)
# Forward algorithm
forw_prob = (test.forward_algo(observations))
forw_prob = round(forw_prob, 5)
self.assertEqual(0.05153, forw_prob)
def test_viterbi(self):
#Declare Class object
test = hmm(states,possible_observation,start_probability,transition_probability,emission_probability)
# Viterbi algorithm
vit_out = (test.viterbi(observations))
self.assertEqual(['t','t','t','t'] , vit_out)
def test_log(self):
#Declare Class object
test = hmm(states,possible_observation,start_probability,transition_probability,emission_probability)
# Log prob function
prob = test.log_prob(observation_tuple, quantities_observations)
prob = round(prob, 3)
self.assertEqual(-67.920, prob)
def test_train_hmm(self):
#Declare Class object
test = hmm(states,possible_observation,start_probability,transition_probability,emission_probability)
# Baum welch Algorithm
num_iter=1000
e,t,s = test.train_hmm(observation_tuple,num_iter,quantities_observations)
```
|
{
"source": "jeffreycoen/prog_mb",
"score": 4
}
|
#### File: jeffreycoen/prog_mb/ch04_timer_03.py
```python
from microbit import *
mins = 1
def pluralize(word, x):
if x == 1:
return word
else:
return word + "s"
def display_mins(m):
display.scroll(str(m) + pluralize("min", m))
while True:
if button_a.was_pressed():
mins += 1
if mins > 10:
mins = 1
display_mins(mins)
```
#### File: jeffreycoen/prog_mb/ch06_timer_04.py
```python
from microbit import *
SET, RUN, ALARM = range(3)
state = SET
mins = 1
def display_mins(m):
message = str(m)
if m == 1:
message += " min"
else:
message += " mins"
display.scroll(message, wait=False, loop=True)
def handle_set_state():
global state, mins
if button_a.was_pressed():
mins += 1
if mins > 10:
mins = 1
display_mins(mins)
if button_b.was_pressed():
state = RUN
def handle_run_state():
global state
display.show("R")
if button_b.was_pressed():
state = SET
display_mins(mins)
def handle_alarm_state():
pass
display_mins(mins)
while True:
if state == SET:
handle_set_state()
elif state == RUN:
handle_run_state()
elif state == ALARM:
handle_alarm_state()
```
#### File: jeffreycoen/prog_mb/ch06_timer_06.py
```python
from microbit import *
SET, RUN, ALARM = range(3)
state = SET
mins = 1
secs = 0
last_tick_time = 0
def display_mins(m):
message = str(m)
if m == 1:
message += " min"
else:
message += " mins"
display.scroll(message, wait=False, loop=True)
def display_time(m, s):
message = str(m)
if s < 10:
message += ":0" + str(s)
else:
message += ":" + str(s)
display.scroll(message, wait=False)
def handle_set_state():
global state, mins
if button_a.was_pressed():
mins += 1
if mins > 10:
mins = 1
display_mins(mins)
if button_b.was_pressed():
display_time(mins, secs)
state = RUN
def handle_run_state():
global state, mins, secs, last_tick_time
if button_b.was_pressed():
state = SET
display_mins(mins)
time_now = running_time()
if time_now > last_tick_time + 5000:
last_tick_time = time_now
secs -= 5
if secs < 0:
secs = 55
mins -= 1
display_time(mins, secs)
if mins == 0 and secs == 0:
state = ALARM
display.show(Image.HAPPY)
def handle_alarm_state():
global state, mins
if button_b.was_pressed():
state = SET
mins = 1
display_mins(mins)
display_mins(mins)
while True:
if state == SET:
handle_set_state()
elif state == RUN:
handle_run_state()
elif state == ALARM:
handle_alarm_state()
```
#### File: jeffreycoen/prog_mb/ch06_timer.py
```python
from microbit import *
SET, RUN, ALARM = range(3)
state = SET
def handle_set_state():
def handle_run_state():
def handle_alarm_state():
while True:
if state == SET:
handle_set_state()
elif state == RUN:
handle_run_state()
elif state == ALARM:
handle_alarm_state()
```
|
{
"source": "JeffreyCowley/blog-post-challenge",
"score": 3
}
|
#### File: JeffreyCowley/blog-post-challenge/BlogPostAPI.py
```python
from flask import Flask, request
from flask_restful import Api, Resource, abort
import BlogPost
import configparser
import json
app = Flask(__name__)
api = Api(app)
blog_post_app = BlogPost.BlogPostApp()
class GetEntries(Resource):
def get(self):
result = blog_post_app.get_all_entries()
result_dict = {'entries': []}
for item in result:
result_dict['entries'].append({'post_id': item[0],'title': item[1],'body': item[2]})
json_result = json.dumps(result_dict)
return json_result
class AddEntry(Resource):
def post(self):
if not request.json:
abort(400)
try:
blog_post_app.set_entry(request.json['title'], request.json['body'])
except KeyError as error:
abort(400)
api.add_resource(GetEntries, "/posts")
api.add_resource(AddEntry, "/post")
if __name__ == '__main__':
config = configparser.ConfigParser(strict=False, interpolation=None)
config.read('BlogPost.ini')
database_file = config['Database']['file']
logging_file = config['Logging']['file']
logging_format = config['Logging']['format']
logging_level = config['Logging']['level']
api_port = config.get('API','port', fallback='5000')
blog_post_app.setup_logging(logging_file, logging_level, logging_format)
blog_post_app.connect_database(database_file)
app.run(debug=True, port=api_port)
```
|
{
"source": "jeffreyc/rerss",
"score": 2
}
|
#### File: rerss/feeds/harvester.py
```python
import datetime
import logging
import traceback
from django.conf import settings
import feedparser
from google.appengine.ext import db
import webapp2
from feeds import models
def datetime_from_parsed(parsed):
return datetime.datetime(parsed[0], parsed[1], parsed[2], parsed[3],
parsed[4], parsed[5], parsed[6])
class Harvester(webapp2.RequestHandler):
def get(self):
self.purge_unattached_feeds_and_items()
feeds = models.Feed.all()
for feed in feeds:
try:
logging.debug('Updating "%s"' % feed.link)
d = feedparser.parse(feed.link)
if not d.feed:
logging.info('"%s" not found, skipping' % feed.link)
continue
if d.feed.has_key('updated_parsed'):
dt = datetime_from_parsed(d.feed.updated_parsed)
elif d.feed.has_key('published_parsed'):
dt = datetime_from_parsed(d.feed.published_parsed)
else:
dt = datetime.datetime.now()
if feed.pubdate is None or dt > feed.pubdate:
self.update_datastore(feed, d, dt)
else:
logging.info('"%s" < "%s", skipping' % (dt, feed.pubdate))
except Exception:
logging.error('Exception processing "%s":\n%r' %
(feed.link, traceback.format_exc()))
pass
self.purge_old_items()
def purge_old_items(self):
before = datetime.datetime.now() - \
datetime.timedelta(days=settings.KEEP_FOR)
db.delete(models.Item.all(keys_only=True).filter('pubdate <=', before))
def purge_unattached_feeds_and_items(self):
before = datetime.datetime.now() - \
datetime.timedelta(minutes=settings.PURGE_DELAY)
f = models.Feed.all(keys_only=True)
f.filter('usercount =', 0).filter('modified <=', before)
for feed in f.run():
db.delete(models.Item.all(keys_only=True).filter('feed =', feed))
db.delete(f)
def update_datastore(self, feed, d, dt):
logging.debug('Updating %s' % feed)
if d.feed.has_key('description') and d.feed.description:
feed.description = d.feed.description
if d.feed.title:
feed.title = d.feed.title
feed.pubdate = dt
for entry in d.entries:
logging.debug('Adding/updating %s' % entry.title)
if entry.has_key('updated_parsed'):
et = datetime_from_parsed(entry.updated_parsed)
elif entry.has_key('published_parsed'):
et = datetime_from_parsed(entry.published_parsed)
else:
et = datetime.datetime.now()
i = db.Query(models.Item)
i.filter('feed =', feed)
i.filter('link =', entry.link)
i.ancestor(feed.key())
if i.count() == 0:
item = models.Item(feed=feed, link=entry.link,
title=entry.title,
description=entry.description, pubdate=et)
item.put()
else:
item = i.get()
if et > item.pubdate:
if entry.title:
item.title = entry.title
if entry.description:
item.description = entry.description
item.pubdate = et
item.put()
feed.put()
application = webapp2.WSGIApplication([('/harvester', Harvester)])
```
|
{
"source": "jeffreycshelton/ghp-challenges",
"score": 3
}
|
#### File: daily-problems/Day2/problem1_tests.py
```python
import unittest
from problem1 import findPoisonedDuration
class TestProblem1(unittest.TestCase):
def tests(self):
self.assertEqual(findPoisonedDuration([1,4], 2), 4)
self.assertEqual(findPoisonedDuration([1,2], 2), 3)
self.assertEqual(findPoisonedDuration([1,4,5,10], 3), 10)
self.assertEqual(findPoisonedDuration([1,8,12,13,15,17], 6), 21)
unittest.main()
```
#### File: daily-problems/Day2/problem2_tests.py
```python
import unittest
from problem2 import nextGreaterElements
class TestProblem2(unittest.TestCase):
def tests(self):
self.assertEqual(nextGreaterElements([1, 2, 1]), [2, -1, 2])
self.assertEqual(nextGreaterElements([1, 2, 3, 4, 3]), [2, 3, 4, -1, 4])
self.assertEqual(nextGreaterElements([13, 4, 9, 3, 2]), [-1, 9, 13, 13, 13])
self.assertEqual(nextGreaterElements([1, 1, 1, 1]), [-1, -1, -1, -1])
unittest.main()
```
#### File: daily-problems/Day3/problem1_tests.py
```python
import unittest
from problem1 import numJewelsInStones
class TestProblem1(unittest.TestCase):
def tests(self):
self.assertEqual(numJewelsInStones("a", "aaaa"), 4)
self.assertEqual(numJewelsInStones("a0b", "bbce3"), 2)
self.assertEqual(numJewelsInStones("aAz", "zzzmneAefeijefz321A"), 6)
self.assertEqual(numJewelsInStones("rgb", "25502553gr44asdb"), 3)
unittest.main()
```
|
{
"source": "jeffreycshelton/jdx-python",
"score": 3
}
|
#### File: jdx-python/jdx/dataset.py
```python
from __future__ import annotations
from io import BufferedReader, BufferedWriter
from typing import Tuple, Union
from .header import Header
import numpy as np
import zlib
_LABEL_BYTES = 2
class Dataset:
def __eq__(self, other: Dataset) -> bool:
return (
self.header == other.header
and np.array_equal(self._raw_data, other._raw_data)
)
def __init__(self, header: Header, raw_data: bytes):
if len(raw_data) % (header.image_size() + _LABEL_BYTES) != 0:
raise ValueError
self.header = header
self._raw_data = np.frombuffer(raw_data, dtype=np.uint8)
def __iter__(self) -> DatasetIterator:
return DatasetIterator(self)
def get_label_str(self, index) -> str:
return self.header.labels[index]
@staticmethod
def read_from(input: Union[str, BufferedReader]) -> Dataset:
if type(input) == str:
file = open(input, "rb")
elif isinstance(input, BufferedReader):
file = input
else:
raise TypeError
header = Header.read_from(file)
body_size = int.from_bytes(file.read(8), "little")
compressed_body = file.read(body_size)
decompressed_body = zlib.decompress(compressed_body, wbits=-15) # wbits parameter allows it to not have a zlib header & trailer
if type(input) == str:
file.close()
return Dataset(header, decompressed_body)
def write_to(self, output: Union[str, BufferedWriter]):
if type(output) == str:
file = open(output, "wb")
elif isinstance(output, BufferedWriter):
file = output
else:
raise TypeError
self.header.write_to(file)
compressed_body = zlib.compress(self._raw_data, 9)[2:-4]
file.write(len(compressed_body).to_bytes(8, "little"))
file.write(compressed_body)
if type(output) == str:
file.close()
else:
file.flush()
class DatasetIterator:
def __init__(self, dataset: Dataset):
self._raw_data = dataset._raw_data
self._offset = 0
self._image_shape = (dataset.header.image_height, dataset.header.image_width, dataset.header.bit_depth // 8)
self._image_size = dataset.header.image_size()
def __next__(self) -> Tuple[np.ndarray, int]:
start_block = self._offset
end_image = start_block + self._image_size
end_label = end_image + _LABEL_BYTES
if end_label > len(self._raw_data):
raise StopIteration
self._offset = end_label
return (
np.reshape(self._raw_data[start_block:end_image], self._image_shape),
int.from_bytes(self._raw_data[end_image:end_label], "little")
)
```
#### File: jdx-python/tests/test_header.py
```python
from jdx import Version
from jdx import Header
import unittest
class TestHeader(unittest.TestCase):
def test_read(self):
header = Header.read_from("../res/example.jdx")
self.assertTrue(header.version.is_compatible_with(Version.current()))
self.assertEqual(header.image_width, 52)
self.assertEqual(header.image_height, 52)
self.assertEqual(header.bit_depth, 24)
self.assertEqual(header.image_count, 8)
if __name__ == "__main__":
unittest.main()
```
|
{
"source": "jeffreycunn/toy-atmospheric-model-python",
"score": 2
}
|
#### File: jeffreycunn/toy-atmospheric-model-python/CMM_driver.py
```python
import user_settings as us
# Define Constants
import constants as cs
from griddef import griddef
from base import base
from cmm_init import cmm_init
from integ import integ
def CMM_driver():
# Define Grid
# test = "running CMM_driver"
print("Running griddef...")
zu, zw, xu, xs = griddef(us.nz, us.nx, us.dz, us.dx, cs.zu, cs.zw, cs.xu, cs.xs)
print("griddef ran")
# Initialize the Base State
tb, qb, qbs, tbv, pisfc, pib, rhou, rhow, rhb, ub, um, u = base(us.profile_method, us.nx,
us.nz, us.dz, us.psurf, us.qsurf, us.q4km, us.ztr,
us.temptr,
us.ttr, us.tsurf, cs.p0, cs.cp, cs.g, cs.rd, cs.xk,
cs.c_v, cs.zu, cs.rhow, cs.rhou, cs.tb,
cs.tbv, cs.qb, cs.qbs, cs.rhb, cs.pib, cs.ub, cs.um,
cs.u, cs.up)
# Set Initial Conditions
print("running cmm_init")
th, thm, pim, pic, pprt = cmm_init(cs.xs, cs.g, us.nx, us.nz, cs.zu, us.dx, us.dz, us.zcnt, us.xcnt, us.radz,
us.radx, cs.trigpi, cs.cp, cs.rhou, cs.tb, cs.qb, us.thermamp, cs.th, cs.thm,
cs.pim, cs.pic, cs.pprt, us.bubble_switch)
print("cmm_init ran...")
# Integrate the Model
print("Running integ...")
integ(cs.tbv, cs.pib, cs.p0, cs.lv, cs.rd, cs.ub, cs.g, cs.cp, us.c_sound, cs.rhow, cs.rhou, cs.tb, cs.zu, cs.zw,
cs.xu, cs.xs, us.x_dist_in, us.latdmpcoef, us.raydmpcoef, us.raydmpz, cs.trigpi, cs.qb, cs.um, cs.u, cs.up,
cs.wm,
cs.w, cs.wp, cs.thm, cs.th, cs.thp, cs.pim, cs.pic, cs.pip, cs.pprt, cs.qvtot, cs.qvm, cs.qv, cs.qvp, cs.qcm,
cs.qc, cs.qcp,
cs.qrainm, cs.qrain, cs.qrainp, us.dx, us.dz, us.nt, us.nz, us.nx, us.dt, us.asscoef, us.cmixh, us.cmixv,
us.qc0, us.k1,
us.k2, cs.thvm, cs.thv, cs.thvp, 'blank', 25)
print("integ ran")
# Save Model Data to Output File
# save (modeloutputpathandfilename)
return zu, zw, xu, xs, u, th
```
#### File: jeffreycunn/toy-atmospheric-model-python/griddef.py
```python
from google.cloud import logging
import numpy as np
# Instantiates a client
# logging_client = logging.Client()
#
# # The name of the log to write to
# log_name = "cmm-python-griddef"
#
# # Selects the log to write to
# logger2 = logging_client.logger(log_name)
def griddef(nz, nx, dz, dx, zu, zw, xu, xs):
# Assign height values to u-scalar heights
print("running griddef")
# logger2.log_text("griddef log....")
# for k in zu:
for k in range(1, nz):
zu[0, k] = (k - 1.5) * dz
# logger2.log_text(zu[0, k])
# Assign height values to w heights
for k in range(1, nz):
zw[0, k] = dz * (k - 2)
# Assign x-dist values to u on the x grid-pt
for i in range(1, nx):
xu[0, i] = dx * (i - 2)
#
# # Assign x-dist values to scalars on the x grid-pt
#
for i in range(1, nx):
xs[0, i] = (i - 1.5) * dx
return zu, zw, xu, xs
```
|
{
"source": "JeffreyDF/Lasagne",
"score": 2
}
|
#### File: tests/layers/test_merge.py
```python
from mock import Mock
import numpy
import pytest
import theano
class TestAutocrop:
# Test internal helper methods of MergeCropLayer
def test_autocrop_array_shapes(self):
from lasagne.layers.merge import autocrop_array_shapes
crop0 = None
crop1 = [None, 'lower', 'center', 'upper']
# Too few crop modes; should get padded with None
crop2 = ['lower', 'upper']
# Invalid crop modes
crop_bad = ['lower', 'upper', 'bad', 'worse']
assert autocrop_array_shapes(
[(1, 2, 3, 4), (5, 6, 7, 8), (5, 4, 3, 2)], crop0) == \
[(1, 2, 3, 4), (5, 6, 7, 8), (5, 4, 3, 2)]
assert autocrop_array_shapes(
[(1, 2, 3, 4), (5, 6, 7, 8), (5, 4, 3, 2)], crop1) == \
[(1, 2, 3, 2), (5, 2, 3, 2), (5, 2, 3, 2)]
assert autocrop_array_shapes(
[(1, 2, 3, 4), (5, 6, 7, 8), (5, 4, 3, 2)], crop2) == \
[(1, 2, 3, 4), (1, 2, 7, 8), (1, 2, 3, 2)]
with pytest.raises(ValueError):
autocrop_array_shapes(
[(1, 2, 3, 4), (5, 6, 7, 8), (5, 4, 3, 2)], crop_bad)
# Inconsistent dimensionality
with pytest.raises(ValueError):
autocrop_array_shapes(
[(1, 2, 3, 4), (5, 6, 7), (5, 4, 3, 2, 10)], crop1)
def test_crop_inputs(self):
from lasagne.layers.merge import autocrop
from numpy.testing import assert_array_equal
crop_0 = None
crop_1 = [None, 'lower', 'center', 'upper']
crop_l = ['lower', 'lower', 'lower', 'lower']
crop_c = ['center', 'center', 'center', 'center']
crop_u = ['upper', 'upper', 'upper', 'upper']
crop_x = ['lower', 'lower']
crop_bad = ['lower', 'lower', 'bad', 'worse']
x0 = numpy.random.random((2, 3, 5, 7))
x1 = numpy.random.random((1, 2, 3, 4))
x2 = numpy.random.random((6, 3, 4, 2))
def crop_test(cropping, inputs, expected):
inputs = [theano.shared(x) for x in inputs]
outs = autocrop(inputs, cropping)
outs = [o.eval() for o in outs]
assert len(outs) == len(expected)
for o, e in zip(outs, expected):
assert_array_equal(o, e)
crop_test(crop_0, [x0, x1],
[x0, x1])
crop_test(crop_1, [x0, x1],
[x0[:, :2, 1:4, 3:], x1[:, :, :, :]])
crop_test(crop_l, [x0, x1],
[x0[:1, :2, :3, :4], x1[:, :, :, :]])
crop_test(crop_c, [x0, x1],
[x0[:1, :2, 1:4, 1:5], x1[:, :, :, :]])
crop_test(crop_u, [x0, x1],
[x0[1:, 1:, 2:, 3:], x1[:, :, :, :]])
crop_test(crop_0, [x0, x2],
[x0, x2])
crop_test(crop_1, [x0, x2],
[x0[:, :, :4, 5:], x2[:, :, :, :]])
crop_test(crop_l, [x0, x2],
[x0[:, :, :4, :2], x2[:2, :, :, :]])
crop_test(crop_c, [x0, x2],
[x0[:, :, :4, 2:4], x2[2:4, :, :, :]])
crop_test(crop_u, [x0, x2],
[x0[:, :, 1:, 5:], x2[4:, :, :, :]])
crop_test(crop_0, [x0, x1, x2],
[x0, x1, x2])
crop_test(crop_1, [x0, x1, x2],
[x0[:, :2, 1:4, 5:], x1[:, :, :, 2:], x2[:, :2, :3, :]])
crop_test(crop_l, [x0, x1, x2],
[x0[:1, :2, :3, :2], x1[:, :, :, :2], x2[:1, :2, :3, :]])
crop_test(crop_c, [x0, x1, x2],
[x0[:1, :2, 1:4, 2:4], x1[:, :, :, 1:3], x2[2:3, :2, :3, :]])
crop_test(crop_u, [x0, x1, x2],
[x0[1:, 1:, 2:, 5:], x1[:, :, :, 2:], x2[5:, 1:, 1:, :]])
crop_test(crop_x, [x0, x1, x2],
[x0[:1, :2, :, :], x1[:1, :2, :, :], x2[:1, :2, :, :]])
with pytest.raises(ValueError):
crop_test(crop_bad, [x0, x1, x2],
[x0[:1, :2, :, :], x1[:1, :2, :, :], x2[:1, :2, :, :]])
# Inconsistent dimensionality
with pytest.raises(ValueError):
crop_test(crop_bad, [x0[:, :, :, 0], x1, x2[:, :, :, :, None]],
[x0[:1, :2, :, :], x1[:1, :2, :, :], x2[:1, :2, :, :]])
class TestConcatLayer:
@pytest.fixture
def layer(self):
from lasagne.layers.merge import ConcatLayer
return ConcatLayer([Mock(), Mock()], axis=1)
@pytest.fixture
def crop_layer_0(self):
from lasagne.layers.merge import ConcatLayer
return ConcatLayer([Mock(), Mock()], axis=0,
cropping=['lower'] * 2)
@pytest.fixture
def crop_layer_1(self):
from lasagne.layers.merge import ConcatLayer
return ConcatLayer([Mock(), Mock()], axis=1,
cropping=['lower'] * 2)
def test_get_output_shape_for(self, layer):
assert layer.get_output_shape_for([(3, 2), (3, 5)]) == (3, 7)
assert layer.get_output_shape_for([(3, 2), (3, None)]) == (3, None)
assert layer.get_output_shape_for([(None, 2), (3, 5)]) == (3, 7)
assert layer.get_output_shape_for([(None, 2), (None, 5)]) == (None, 7)
with pytest.raises(ValueError):
layer.get_output_shape_for([(4, None), (3, 5)])
with pytest.raises(ValueError):
layer.get_output_shape_for([(3, 2), (4, None)])
with pytest.raises(ValueError):
layer.get_output_shape_for([(None, 2), (3, 5), (4, 5)])
def test_get_output_shape_for_cropped(self, crop_layer_0, crop_layer_1):
input_shapes = [(3, 2), (4, 5)]
result_0 = crop_layer_0.get_output_shape_for(input_shapes)
result_1 = crop_layer_1.get_output_shape_for(input_shapes)
assert result_0 == (7, 2)
assert result_1 == (3, 7)
def test_get_output_for(self, layer):
inputs = [theano.shared(numpy.ones((3, 3))),
theano.shared(numpy.ones((3, 2)))]
result = layer.get_output_for(inputs)
result_eval = result.eval()
desired_result = numpy.hstack([input.get_value() for input in inputs])
assert (result_eval == desired_result).all()
def test_get_output_for_cropped(self, crop_layer_0, crop_layer_1):
x0 = numpy.random.random((5, 3))
x1 = numpy.random.random((4, 2))
inputs = [theano.shared(x0),
theano.shared(x1)]
result_0 = crop_layer_0.get_output_for(inputs).eval()
result_1 = crop_layer_1.get_output_for(inputs).eval()
desired_result_0 = numpy.concatenate([x0[:, :2], x1[:, :2]], axis=0)
desired_result_1 = numpy.concatenate([x0[:4, :], x1[:4, :]], axis=1)
assert (result_0 == desired_result_0).all()
assert (result_1 == desired_result_1).all()
class TestElemwiseSumLayer:
@pytest.fixture
def layer(self):
from lasagne.layers.merge import ElemwiseSumLayer
return ElemwiseSumLayer([Mock(), Mock()], coeffs=[2, -1])
@pytest.fixture
def crop_layer(self):
from lasagne.layers.merge import ElemwiseSumLayer
return ElemwiseSumLayer([Mock(), Mock()], coeffs=[2, -1],
cropping=['lower'] * 2)
def test_get_output_shape_for(self, layer):
assert layer.get_output_shape_for([(3, 2), (3, 2)]) == (3, 2)
assert layer.get_output_shape_for([(3, 2), (3, None)]) == (3, 2)
assert layer.get_output_shape_for([(None, 2), (3, 2)]) == (3, 2)
assert layer.get_output_shape_for([(None, 2), (None, 2)]) == (None, 2)
with pytest.raises(ValueError):
layer.get_output_shape_for([(3, None), (4, 2)])
with pytest.raises(ValueError):
layer.get_output_shape_for([(3, 2), (4, None)])
with pytest.raises(ValueError):
layer.get_output_shape_for([(None, 2), (3, 2), (4, 2)])
def test_get_output_for(self, layer):
a = numpy.array([[0, 1], [2, 3]])
b = numpy.array([[1, 2], [4, 5]])
inputs = [theano.shared(a),
theano.shared(b)]
result = layer.get_output_for(inputs)
result_eval = result.eval()
desired_result = 2*a - b
assert (result_eval == desired_result).all()
def test_get_output_for_cropped(self, crop_layer):
from numpy.testing import assert_array_almost_equal as aeq
x0 = numpy.random.random((5, 3))
x1 = numpy.random.random((4, 2))
inputs = [theano.shared(x0),
theano.shared(x1)]
result = crop_layer.get_output_for(inputs).eval()
desired_result = 2*x0[:4, :2] - x1[:4, :2]
aeq(result, desired_result)
def test_bad_coeffs_fails(self, layer):
from lasagne.layers.merge import ElemwiseSumLayer
with pytest.raises(ValueError):
ElemwiseSumLayer([Mock(), Mock()], coeffs=[2, 3, -1])
class TestElemwiseMergeLayerMul:
@pytest.fixture
def layer(self):
import theano.tensor as T
from lasagne.layers.merge import ElemwiseMergeLayer
return ElemwiseMergeLayer([Mock(), Mock()], merge_function=T.mul)
def test_get_output_for(self, layer):
a = numpy.array([[0, 1], [2, 3]])
b = numpy.array([[1, 2], [4, 5]])
inputs = [theano.shared(a),
theano.shared(b)]
result = layer.get_output_for(inputs)
result_eval = result.eval()
desired_result = a*b
assert (result_eval == desired_result).all()
class TestElemwiseMergeLayerMaximum:
@pytest.fixture
def layer(self):
import theano.tensor as T
from lasagne.layers.merge import ElemwiseMergeLayer
return ElemwiseMergeLayer([Mock(), Mock()], merge_function=T.maximum)
def test_get_output_for(self, layer):
a = numpy.array([[0, 1], [2, 3]])
b = numpy.array([[1, 2], [4, 5]])
inputs = [theano.shared(a),
theano.shared(b)]
result = layer.get_output_for(inputs)
result_eval = result.eval()
desired_result = numpy.maximum(a, b)
assert (result_eval == desired_result).all()
```
|
{
"source": "JeffreyDin/S3Migration",
"score": 2
}
|
#### File: ConvenientMigrationTool/BceBOS/initialization.py
```python
import os
from baidubce.bce_client_configuration import BceClientConfiguration
from baidubce.auth.bce_credentials import BceCredentials
from baidubce.services.sts.sts_client import StsClient
from baidubce.exception import BceError
from baidubce.services.bos.bos_client import BosClient
from baidubce.services.bos import storage_class
from common import get_logger, get_config
class BceAuthentication:
"""
config = {
'BCE_ACCESS_KEY_ID': BCE_ACCESS_KEY_ID,
'BCE_SECRET_ACCESS_KEY': BCE_SECRET_ACCESS_KEY,
'BCE_BOS_HOST': BCE_BOS_HOST,
'BCE_STS_HOST': BCE_STS_HOST,
'BOS_SRC_BUCKET': BOS_SRC_BUCKET,
'BOS_STORAGE_CLASS': BOS_STORAGE_CLASS,
'BOS_DES_DIR': BOS_DES_DIR,
'LOGGING_LEVEL': LOGGING_LEVEL
}
self.logger.debug('DEBUG')
self.logger.info('INFO')
self.logger.warning('WARNING')
self.logger.error('ERROR')
self.logger.critical('CRITICAL')
"""
def __init__(self):
self.logger = get_logger(__name__)
self.config = get_config()
self.bce_access_key_id = self.config['BCE_ACCESS_KEY_ID']
self.bce_secret_access_key = self.config['BCE_SECRET_ACCESS_KEY']
self.bce_bos_host = self.config['BCE_BOS_HOST']
self.bce_sts_host = self.config['BCE_STS_HOST']
self.bos_src_bucket = self.config['BOS_SRC_BUCKET']
self.bos_storage_class = self.config['BOS_STORAGE_CLASS']
self.bos_des_dir = self.config['BCE_SECRET_ACCESS_KEY']
def _bce_access_key_id(self):
bce_access_key_id = self.config['BCE_ACCESS_KEY_ID']
return bce_access_key_id
def _bce_secret_access_key(self):
bce_secret_access_key = self.config['BCE_SECRET_ACCESS_KEY']
return bce_secret_access_key
def _bce_bos_host(self):
bce_bos_host = self.config['BCE_BOS_HOST']
return bce_bos_host
def _bce_sts_host(self):
bce_sts_host = self.config['BCE_STS_HOST']
return bce_sts_host
def _bos_src_bucket(self):
bce_sts_host = self.config['BOS_SRC_BUCKET']
return bce_sts_host
def _bos_storage_class(self):
bos_storage_class = self.config['BOS_STORAGE_CLASS']
return bos_storage_class
def _bos_des_dir(self):
bos_des_dir = self.config['BOS_DES_DIR']
return bos_des_dir
def _bce_init_connection(self):
try:
bce_config = BceClientConfiguration(
credentials=BceCredentials(
access_key_id=self.bce_access_key_id,
secret_access_key=self.bce_secret_access_key),
endpoint=self.bce_bos_host)
bos_client = BosClient(bce_config)
return bos_client
except BceError as e:
self.logger.error('使用BCE当前凭证,在连接时发生错误 {}'.format(e))
return []
except Exception as e:
self.logger.exception('使用BCE当前凭证,在连接时发生异常错误 {}'.format(e))
return []
def _bce_init_connection_sts(self):
try:
bce_config = BceClientConfiguration(
credentials=BceCredentials(
access_key_id=self.bce_access_key_id,
secret_access_key=self.bce_secret_access_key),
endpoint=self.bce_sts_host)
sts_client = StsClient(bce_config)
access_dict = {}
duration_seconds = 3600
access_dict["service"] = "bce:bos"
access_dict["region"] = "bj"
access_dict["effect"] = "Allow"
resource = ["*"]
access_dict["resource"] = resource
permission = ["*"]
access_dict["permission"] = permission
access_control_dict = {"accessControlList": [access_dict]}
response = sts_client.get_session_token(acl=access_control_dict, duration_seconds=duration_seconds)
config = BceClientConfiguration(
credentials=BceCredentials(str(response.access_key_id), str(response.secret_access_key)),
endpoint=self.bce_bos_host,
security_token=response.session_token)
bos_client = BosClient(config)
return bos_client
except BceError as e:
self.logger.error('使用BCE当前连接令牌,在连接时发生错误 {}'.format(e))
return []
except Exception as e:
self.logger.exception('使用BCE当前连接令牌,在连接时发生异常错误 {}'.format(e))
return []
def get_bce_connection(self):
if self.bce_access_key_id is not None and self.bce_sts_host is not None:
bos_client = self._bce_init_connection_sts()
else:
bos_client = self._bce_init_connection()
return bos_client
```
#### File: ConvenientMigrationTool/BceBOS/test.py
```python
import datetime
file_time0 = datetime.datetime.now().isoformat()
file_time = datetime.datetime.now().isoformat().replace(':', '-')[:19]
print(file_time0)
print(file_time)
```
|
{
"source": "JeffreyDrJ/myfirstblog",
"score": 2
}
|
#### File: myfirstblog/home/views.py
```python
from django.shortcuts import render
# 定义首页视图:
from django.views import View
# ------------------------
from home.models import ArticleCategory
from home.models import Article
from django.http.response import HttpResponseNotFound
from django.core.paginator import Paginator, EmptyPage
class IndexView(View):
def get(self, request):
# 1.后端获取数据库中分类信息
categories = ArticleCategory.objects.all()
# 2.浏览器从html接受用户点击的分类id
cat_id = request.GET.get('cat_id', 1) # 若html未传递,默认值1(从网页地址中取值)
# 4.从html获取分页参数
page_num = request.GET.get('page_num', 1) # 目前页数,非总页数
page_size = request.GET.get('page_size', 10) # 单页对象数
# 3.根据用户点击的分类id进行分类查询
try:
selected_category = ArticleCategory.objects.get(id=cat_id)
except ArticleCategory.DoesNotExist:
return HttpResponseNotFound('未在数据库中找到此文类ArticleCategory not found!!')
# 5.获取分类信息筛选文章数据
filtered_articles = Article.objects.filter(category=selected_category) # 注意这里是对象
# 6.创建分页器
# (self, object_list, per_page, orphans=0,allow_empty_first_page=True)
paginator = Paginator(filtered_articles, per_page=page_size)
# 7.进行分页处理
try:
page_articles = paginator.page(page_num)
except EmptyPage:
return HttpResponseNotFound('empty page!')
# 8.获取总页数
total_page_num = paginator.num_pages
# 8.组织数据传递给模板.html
context = {
# 分类参数
'categories': categories,
'selected_category': selected_category,
# 分页参数
'articles': page_articles,
'page_size': page_size,
'page_num': page_num,
'total_page': total_page_num
}
return render(request, 'index.html', context=context)
# 定义详细页面(含显示,推荐,评论)视图:
from home.models import Article, ArticleCategory
from django.shortcuts import redirect # 用于重定向
from django.urls import reverse # 用于重定向
from home.models import Comment # 用于入库
# -------------------------
class DetailView(View):
def get(self, request):
# 文章详情显示&评论显示&页码显示的业务逻辑:
# 1.从index跳转到detail时附加的?id={{article.id}}接收文章id信息
show_article_id = request.GET.get('article_id')
# 2.根据文章id进行文章数据查询
try:
selected_article = Article.objects.get(id=show_article_id)
except Article.DoesNotExist:
return render(request, '404.html') # 若没有这个articleid总不能啥也不显示,所以跳转到404页面
else:
# 浏览量+1
selected_article.total_views += 1
selected_article.save()
# 3.查询分类数据(用于上边条板块高亮、跳转)
categories = ArticleCategory.objects.all()
# 4.查询浏览量前10的文章
# 用.order_by排序,-为降序
popular_articles = Article.objects.order_by('-total_views')[0:9]
# a.获取分页信息
page_size = request.GET.get('page_size',10)
page_num = request.GET.get('page_num',1)
# b.根据文章信息查询评论数据
comments = Comment.objects.filter(target_article=selected_article).order_by('-created_time')
# c.获取评论总数 (# todo:为什么不用selected_article.total_comments?)
comment_count = comments.count()
# d.创建分页器
from django.core.paginator import Paginator,EmptyPage
paginator = Paginator(comments,page_size)
# e.分页处理
try:
page_comments = paginator.page(page_num)
except EmptyPage:
return HttpResponseNotFound('评论分页出错.empty page-orz')
# f.总页数获取
total_page = paginator.num_pages
# 4.组织模板数据
context = {
'categories': categories, # 所有板块
'category': selected_article.category, # 浏览的板块
'article': selected_article, # 浏览的文章
'popular_articles': popular_articles, # 推荐的10篇文章
'comment_count':comment_count, # 评论总数
'comments':page_comments, # 本页评论
'page_size':page_size, # 页大小
'total_page':total_page, # 总页数
'page_num':page_num # 当前页数
}
return render(request, 'detail.html', context=context)
def post(self, request):
# 业务逻辑:
# 1.接收用户信息
user = request.user
# 2.判断用户是否存在+登录
if user and user.is_authenticated:
# 4 登录用户接收表单post:
target_id = request.POST.get('hidden_id') # 注意这里是接受来自html的隐藏域,だが、どうして?
content = request.POST.get('content')
# 5. 验证文章id存在性
try:
target_article = Article.objects.get(id=target_id)
except Article.DoesNotExist:
return HttpResponseNotFound('没有此文章。')
# 6. 评论入库
Comment.objects.create(
content=content,
target_article=target_article,
user=user
)
# 7. 修改评论数量
target_article.total_comments += 1
target_article.save()
# 8. 跳转(当前页面:detail目录+文章id)
path = reverse('home:detail') + '?article_id={}'.format(target_id)
return redirect(path)
else:
# 3 未登录跳转
return redirect(reverse('users:login'))
```
|
{
"source": "Jeffrey-Ede/Adaptive-Partial-STEM",
"score": 2
}
|
#### File: Adaptive-Partial-STEM/128/train.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import sonnet as snt
from tensorflow.contrib.layers.python.layers import initializers
from dnc import dnc
import numpy as np
import cv2
from scipy import ndimage as nd
from PIL import Image
import os, sys
import time
from utility import alrc, spiral_generator
experiment_number = 128
FLAGS = tf.flags.FLAGS
# Model parameters
tf.flags.DEFINE_integer("hidden_size", 64, "Size of LSTM hidden layer.")
tf.flags.DEFINE_integer("memory_size", 20, "The number of memory slots.")
tf.flags.DEFINE_integer("word_size", 32, "The width of each memory slot.")
tf.flags.DEFINE_integer("num_write_heads", 1, "Number of memory write heads.")
tf.flags.DEFINE_integer("num_read_heads", 4, "Number of memory read heads.")
tf.flags.DEFINE_integer("clip_value", 0, "Maximum absolute value of controller and dnc outputs.")
tf.flags.DEFINE_bool("use_batch_norm", True, "Use batch normalization in generator.")
tf.flags.DEFINE_string("model", "DNC", "LSTM or DNC.")
tf.flags.DEFINE_integer("projection_size", 0, "Size of projection layer. Zero for no projection.")
tf.flags.DEFINE_bool("is_input_embedder", False, "Embed inputs before they are input.")
tf.flags.DEFINE_bool("is_spirals", False, "True to use actions that describe spirals.")
# Optimizer parameters.
tf.flags.DEFINE_integer("batch_size", 32, "Batch size for training.")
tf.flags.DEFINE_integer("replay_size", 25000, "Maximum examples in ring buffer.")
tf.flags.DEFINE_integer("avg_replays", 4, "Mean frequency each experience is used.")
tf.flags.DEFINE_float("max_grad_norm", 50, "Gradient clipping norm limit.")
tf.flags.DEFINE_float("learning_rate", 1e-4, "Optimizer learning rate.")
tf.flags.DEFINE_float("optimizer_epsilon", 1e-10, "Epsilon used for RMSProp optimizer.")
tf.flags.DEFINE_float("L2_norm", 1.e-5, "Decay rate for L2 regularization. 0 for no regularization.")
# Task parameters
tf.flags.DEFINE_integer("img_side", 96, "Number of image pixels for square image")
tf.flags.DEFINE_integer("num_steps", 80, "Number of image pixels for square image")
tf.flags.DEFINE_integer("step_size", 5, "Distance STEM probe moves at each step (in px).")
tf.flags.DEFINE_integer("num_actions", 2, "Number of parameters to describe actions.")
tf.flags.DEFINE_integer("shuffle_size", 2000, "Size of moving buffer to sample data from.")
tf.flags.DEFINE_integer("prefetch_size", 10, "Number of batches to prepare in advance.")
# Training options.
tf.flags.DEFINE_float("actor_lr", 0.0007, "Actor learning rate.")
tf.flags.DEFINE_float("critic_lr", 0.001, "Critic learning rate.")
tf.flags.DEFINE_float("generator_lr", 0.003, "Generator learning rate.")
tf.flags.DEFINE_float("gamma", 0.99, "Reward/loss decay.")
tf.flags.DEFINE_bool("is_advantage_actor_critic", False, "Use advantage rather than Q errors for actor.")
tf.flags.DEFINE_bool("is_cyclic_generator_learning_rate", True, "True for sawtooth oscillations.")
tf.flags.DEFINE_bool("is_decaying_generator_learning_rate", True, "True for decay envelope for sawtooth oscillations.")
tf.flags.DEFINE_integer("supervision_iters", 100_000, "Starting value for supeversion.")
tf.flags.DEFINE_float("supervision_start", 1., "Starting value for supeversion.")
tf.flags.DEFINE_float("supervision_end", 0., "Starting value for supeversion.")
if FLAGS.supervision_iters:
#Flag will not be used
tf.flags.DEFINE_float("supervision", 0.5, "Weighting for known discounted future reward.")
else:
#Flag will be used
tf.flags.DEFINE_float("supervision", 0.0, "Weighting for known discounted future reward.")
tf.flags.DEFINE_bool("is_target_actor", True and FLAGS.supervision != 1, "True to use target actor.")
tf.flags.DEFINE_bool("is_target_critic", True and FLAGS.supervision != 1, "True to use target critic.")
tf.flags.DEFINE_bool("is_target_generator", False, "True to use target generator.")
tf.flags.DEFINE_integer("update_frequency", 0, "Frequency of hard target network updates. Zero for soft updates.")
tf.flags.DEFINE_float("target_decay", 0.9997, "Decay rate for target network soft updates.")
tf.flags.DEFINE_bool("is_generator_batch_norm_tracked", False, "True to track generator batch normalization.")
tf.flags.DEFINE_bool("is_positive_qs", True, "Whether to clip qs to be positive.")
tf.flags.DEFINE_bool("is_infilled", False, "True to use infilling rather than generator.")
tf.flags.DEFINE_bool("is_prev_position_input", True, "True to input previous positions.")
tf.flags.DEFINE_bool("is_ornstein_uhlenbeck", True, "True for O-U exploration noise.")
tf.flags.DEFINE_bool("is_noise_decay", True, "Decay noise if true.")
tf.flags.DEFINE_float("ou_theta", 0.1, "Drift back to mean.")
tf.flags.DEFINE_float("ou_sigma", 0.2, "Size of random process.")
tf.flags.DEFINE_bool("is_rel_to_truth", False, "True to normalize losses using expected losses.")
tf.flags.DEFINE_bool("is_clipped_reward", True, "True to clip rewards.")
tf.flags.DEFINE_bool("is_clipped_critic", False, "True to clip critic predictions for actor training.")
tf.flags.DEFINE_float("over_edge_penalty", 0.05, "Penalty for action going over edge of image.")
tf.flags.DEFINE_bool("is_prioritized_replay", False, "True to prioritize the replay of difficult experiences.")
tf.flags.DEFINE_bool("is_biased_prioritized_replay", False, "Priority sampling without bias correction.")
tf.flags.DEFINE_bool("is_relative_to_spirals", False, "True to compare generator losses against losses for spirals.")
tf.flags.DEFINE_bool("is_self_competition", False, "Oh it is on. True to compete against past versions of itself.")
tf.flags.DEFINE_float("norm_generator_losses_decay", 0.997, "Divide generator losses by their running mean. Zero for no normalization.")
tf.flags.DEFINE_bool("is_minmax_reward", False, "True to use highest losses for actor loss.")
tf.flags.DEFINE_integer("start_iter", 320_880, "Starting iteration")
tf.flags.DEFINE_integer("train_iters", 1_000_000, "Training iterations")
tf.flags.DEFINE_integer("val_examples", 20_000, "Number of validation examples")
tf.flags.DEFINE_float("style_loss", 0., "Weighting of style loss. Zero for no style loss.")
tf.flags.DEFINE_float("step_incr", np.sqrt(2), "Number of pixels per step.")
tf.flags.DEFINE_string("model_dir",
f"//ads.warwick.ac.uk/shared/HCSS6/Shared305/Microscopy/Jeffrey-Ede/models/recurrent_conv-1/{experiment_number}/",
"Working directory.")
tf.flags.DEFINE_string("data_file",
"//Desktop-sa1evjv/h/96x96_stem_crops.npy",
"Datafile containing 19769 96x96 downsampled STEM crops.")
tf.flags.DEFINE_integer("report_freq", 10, "How often to print losses to the console.")
os.chdir(FLAGS.model_dir)
sys.path.insert(0, FLAGS.model_dir)
def norm_img(img, min=None, max=None, get_min_and_max=False):
if min == None:
min = np.min(img)
if max == None:
max = np.max(img)
if np.absolute(min-max) < 1.e-6:
img.fill(0.)
else:
a = 0.5*(min+max)
b = 0.5*(max-min)
img = (img-a) / b
if get_min_and_max:
return img.astype(np.float32), (min, max)
else:
return img.astype(np.float32)
def scale0to1(img):
"""Rescale image between 0 and 1"""
img = img.astype(np.float32)
min = np.min(img)
max = np.max(img)
if np.absolute(min-max) < 1.e-6:
img.fill(0.5)
else:
img = (img - min)/(max - min)
return img.astype(np.float32)
def disp(img):
#if len(img.shape) == 3:
# img = np.sum(img, axis=2)
cv2.namedWindow('CV_Window', cv2.WINDOW_NORMAL)
cv2.imshow('CV_Window', scale0to1(img))
cv2.waitKey(0)
return
def run_model(input_sequence, output_size):
"""Runs model on input sequence."""
access_config = {
"memory_size": FLAGS.memory_size,
"word_size": FLAGS.word_size,
"num_reads": FLAGS.num_read_heads,
"num_writes": FLAGS.num_write_heads,
}
controller_config = {
"hidden_size": FLAGS.hidden_size,
}
clip_value = FLAGS.clip_value
dnc_core = dnc.DNC(access_config, controller_config, output_size, clip_value)
initial_state = dnc_core.initial_state(FLAGS.batch_size)
output_sequence, _ = tf.nn.dynamic_rnn(
cell=dnc_core,
inputs=input_sequence,
time_major=True,
initial_state=initial_state)
return output_sequence
class RingBuffer(object):
def __init__(
self,
action_shape,
observation_shape,
full_scan_shape,
batch_size,
buffer_size=1000,
num_past_losses=None,
):
self.buffer_size = buffer_size
self.actions = np.zeros([buffer_size]+list(action_shape)[1:])
self.observations = np.zeros([buffer_size]+list(observation_shape)[1:])
self.full_scans = np.zeros([buffer_size]+list(full_scan_shape)[1:])
self.position = 0
self._batch_size = batch_size
if FLAGS.is_prioritized_replay or FLAGS.is_biased_prioritized_replay:
self.priorities = np.zeros([buffer_size])
self.indices = np.arange(buffer_size)
if FLAGS.is_self_competition:
self.past_losses = np.zeros([num_past_losses])
self.labels = np.zeros([buffer_size], np.int32)
def add(self, actions, observations, full_scans, labels=None):
i0 = self.position % self.buffer_size
num_before_cycle = min(self.buffer_size-i0, self._batch_size)
self.actions[i0:i0+num_before_cycle] = actions[:num_before_cycle]
self.observations[i0:i0+num_before_cycle] = observations[:num_before_cycle]
self.full_scans[i0:i0+num_before_cycle] = full_scans[:num_before_cycle]
num_remaining = self._batch_size - num_before_cycle
if num_remaining > 0:
self.actions[0:num_remaining] = actions[num_before_cycle:]
self.observations[:num_remaining] = observations[num_before_cycle:]
self.full_scans[:num_remaining] = full_scans[num_before_cycle:]
if FLAGS.is_prioritized_replay or FLAGS.is_biased_prioritized_replay:
if self.position:
mean_priority = np.sum(self.priorities) / min(self.position, self.buffer_size)
else:
mean_priority = 0.3
self.priorities[i0:i0+num_before_cycle] = mean_priority*np.ones([num_before_cycle])
if num_before_cycle < self._batch_size:
self.priorities[0:num_remaining] = mean_priority*np.ones([self._batch_size - num_before_cycle])
if FLAGS.is_self_competition:
self.labels[i0:i0+num_before_cycle] = labels[:num_before_cycle]
if num_remaining > 0:
self.labels[0:num_remaining] = labels[num_before_cycle:]
self.position += self._batch_size
def get(self):
limit = min(self.position, self.buffer_size)
if FLAGS.is_prioritized_replay:
sample_idxs = np.random.choice(
self.indices,
size=self._batch_size,
replace=False,
p=self.priorities/np.sum(self.priorities)
) #alpha=1
beta = 0.5 + 0.5*(FLAGS.train_iters - self.position)/FLAGS.train_iters
sampled_priority_weights = self.priorities[sample_idxs]**( -beta )
sampled_priority_weights /= np.max(sampled_priority_weights)
elif FLAGS.is_biased_prioritized_replay:
alpha = (FLAGS.train_iters - self.position)/FLAGS.train_iters
priorities = self.priorities**alpha
sample_idxs = np.random.choice(
self.indices,
size=self._batch_size,
replace=False,
p=self.priorities/np.sum(self.priorities)
)
else:
sample_idxs = np.random.randint(0, limit, size=self._batch_size)
sampled_actions = np.stack([self.actions[i] for i in sample_idxs])
sampled_observations = np.stack([self.observations[i] for i in sample_idxs])
sampled_full_scans = np.stack([self.full_scans[i] for i in sample_idxs])
if FLAGS.is_prioritized_replay:
return sampled_actions, sampled_observations, sampled_full_scans, sample_idxs, sampled_priority_weights
elif FLAGS.is_biased_prioritized_replay:
return sampled_actions, sampled_observations, sampled_full_scans, sample_idxs
elif FLAGS.is_self_competition:
sampled_labels = np.stack([self.labels[i] for i in sample_idxs])
sampled_past_losses = np.stack([self.past_losses[i] for i in sampled_labels])
return sampled_actions, sampled_observations, sampled_full_scans, sampled_labels, sampled_past_losses
else:
return sampled_actions, sampled_observations, sampled_full_scans
def update_priorities(self, idxs, priorities):
"""For prioritized experience replay"""
self.priorities[idxs] = priorities
def update_past_losses(self, idxs, losses):
self.past_losses[idxs] = losses
class Agent(snt.AbstractModule):
def __init__(
self,
num_outputs,
name,
is_new=False,
noise_decay=None,
is_double_critic=False,
sampled_full_scans=None,
val_full_scans=None
):
super(Agent, self).__init__(name=name)
access_config = {
"memory_size": FLAGS.memory_size,
"word_size": FLAGS.word_size,
"num_reads": FLAGS.num_read_heads,
"num_writes": FLAGS.num_write_heads,
}
controller_config = {
"hidden_size": FLAGS.hidden_size,
"projection_size": FLAGS.projection_size or None,
}
clip_value = FLAGS.clip_value
with self._enter_variable_scope():
components = dnc.Components(access_config, controller_config, num_outputs)
self._dnc_core = dnc.DNC(components, num_outputs, clip_value, is_new=False, is_double_critic=is_double_critic)
if is_new:
self._dnc_core_new = dnc.DNC(
components,
num_outputs,
clip_value,
is_new=True,
noise_decay=noise_decay,
sampled_full_scans=sampled_full_scans,
is_noise=True
)
if not val_full_scans is None:
self._dnc_core_val = dnc.DNC(
components,
num_outputs,
clip_value,
is_new=True,
sampled_full_scans=val_full_scans
)
self._initial_state = self._dnc_core.initial_state(FLAGS.batch_size)
#self._action_embedder = snt.Linear(output_size=64)
#self._observation_embedder = snt.Linear(output_size=64)
def _build(self, observations, actions):
#Tiling here is a hack to make inputs the same size
num_tiles = 2 // (actions.get_shape().as_list()[-1] // FLAGS.num_actions)
tiled_actions = tf.tile(actions, [1, 1, num_tiles])
input_sequence = tf.concat([observations, tiled_actions], axis=-1)
output_sequence, _ = tf.nn.dynamic_rnn(
cell=self._dnc_core,
inputs=input_sequence,
time_major=False,
initial_state=self._initial_state
)
return output_sequence
def get_new_experience(self):
output_sequence, _ = tf.nn.dynamic_rnn(
cell=self._dnc_core_new,
inputs=tf.zeros([FLAGS.batch_size, FLAGS.num_steps, 1]),
time_major=False,
initial_state=self._initial_state
)
if hasattr(tf, 'ensure_shape'):
output_sequence = tf.ensure_shape(output_sequence, [FLAGS.batch_size, FLAGS.num_steps, FLAGS.step_size+FLAGS.num_actions])
else:
output_sequence = tf.reshape(output_sequence, [FLAGS.batch_size, FLAGS.num_steps, FLAGS.step_size+FLAGS.num_actions])
observations = output_sequence[:,:,:FLAGS.step_size]
actions = output_sequence[:,:,FLAGS.step_size:]
return observations, actions
def get_val_experience(self):
output_sequence, _ = tf.nn.dynamic_rnn(
cell=self._dnc_core_val,
inputs=tf.zeros([FLAGS.batch_size, FLAGS.num_steps, 1]),
time_major=False,
initial_state=self._initial_state
)
if hasattr(tf, 'ensure_shape'):
output_sequence = tf.ensure_shape(output_sequence, [FLAGS.batch_size, FLAGS.num_steps, FLAGS.step_size+FLAGS.num_actions])
else:
output_sequence = tf.reshape(output_sequence, [FLAGS.batch_size, FLAGS.num_steps, FLAGS.step_size+FLAGS.num_actions])
observations = output_sequence[:,:,:FLAGS.step_size]
actions = output_sequence[:,:,FLAGS.step_size:]
return observations, actions
@property
def variables(self):
with self._enter_variable_scope():
return tf.get_collection(
tf.GraphKeys.GLOBAL_VARIABLES,
scope=tf.get_variable_scope().name
)
@property
def trainable_variables(self):
with self._enter_variable_scope():
return tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES,
scope=tf.get_variable_scope().name
)
def spectral_norm(w, iteration=1, in_place_updates=False):
"""Spectral normalization. It imposes Lipschitz continuity by constraining the
spectral norm (maximum singular value) of weight matrices.
Inputs:
w: Weight matrix to spectrally normalize.
iteration: Number of times to apply the power iteration method to
enforce spectral norm.
Returns:
Weight matrix with spectral normalization control dependencies.
"""
w0 = w
w_shape = w.shape.as_list()
w = tf.reshape(w, [-1, w_shape[-1]])
u = tf.get_variable(auto_name("u"),
[1, w_shape[-1]],
initializer=tf.random_normal_initializer(mean=0.,stddev=0.03),
trainable=False)
u_hat = u
v_hat = None
for i in range(iteration):
"""
power iteration
Usually iteration = 1 will be enough
"""
v_ = tf.matmul(u_hat, tf.transpose(w))
v_hat = tf.nn.l2_normalize(v_)
u_ = tf.matmul(v_hat, w)
u_hat = tf.nn.l2_normalize(u_)
u_hat = tf.stop_gradient(u_hat)
v_hat = tf.stop_gradient(v_hat)
sigma = tf.matmul(tf.matmul(v_hat, w), tf.transpose(u_hat))
if in_place_updates:
#In-place control dependencies bottlenect training
with tf.control_dependencies([u.assign(u_hat)]):
w_norm = w / sigma
w_norm = tf.reshape(w_norm, w_shape)
else:
#Execute control dependency in parallel with other update ops
tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, u.assign(u_hat))
w_norm = w / sigma
w_norm = tf.reshape(w_norm, w_shape)
return w_norm
def spectral_norm_conv(
inputs,
num_outputs,
stride=1,
kernel_size=3,
padding='VALID',
biases_initializer=tf.zeros_initializer()
):
"""Convolutional layer with spectrally normalized weights."""
w = tf.get_variable(auto_name("kernel"), shape=[kernel_size, kernel_size, inputs.get_shape()[-1], num_outputs])
x = tf.nn.conv2d(input=inputs, filter=spectral_norm(w),
strides=[1, stride, stride, 1], padding=padding)
if biases_initializer != None:
b = tf.get_variable(auto_name("bias"), [num_outputs], initializer=biases_initializer)
x = tf.nn.bias_add(x, b)
return x
def conv(
inputs,
num_outputs,
kernel_size=3,
stride=1,
padding='SAME',
data_format="NHWC",
actv_fn=tf.nn.relu,
is_batch_norm=True,
is_spectral_norm=False,
is_depthwise_sep=False,
extra_batch_norm=False,
biases_initializer=tf.zeros_initializer,
weights_initializer=initializers.xavier_initializer,
transpose=False,
is_training=True
):
"""Convenience function for a strided convolutional or transpositional
convolutional layer.
Intro: https://towardsdatascience.com/intuitively-understanding-convolutions-for-deep-learning-1f6f42faee1.
The order is: Activation (Optional) -> Batch Normalization (optional) -> Convolutions.
Inputs:
inputs: Tensor of shape `[batch_size, height, width, channels]` to apply
convolutions to.
num_outputs: Number of feature channels to output.
kernel_size: Side lenth of square convolutional kernels.
stride: Distance between convolutional kernel applications.
padding: 'SAME' for zero padding where kernels go over the edge.
'VALID' to discard features where kernels go over the edge.
activ_fn: non-linearity to apply after summing convolutions.
is_batch_norm: If True, add batch normalization after activation.
is_spectral_norm: If True, spectrally normalize weights.
is_depthwise_sep: If True, depthwise separate convolutions into depthwise
spatial convolutions, then 1x1 pointwise convolutions.
extra_batch_norm: If True and convolutions are depthwise separable, implement
batch normalization between depthwise and pointwise convolutions.
biases_initializer: Function to initialize biases with. None for no biases.
weights_initializer: Function to initialize weights with. None for no weights.
transpose: If True, apply convolutional layer transpositionally to the
described convolutional layer.
is_training: If True, use training specific operations e.g. batch normalization
update ops.
Returns:
Output of convolutional layer.
"""
x = inputs
num_spatial_dims = len(x.get_shape().as_list()) - 2
if biases_initializer == None:
biases_initializer = lambda: None
if weights_initializer == None:
weights_initializer = lambda: None
if not is_spectral_norm:
#Convolutional layer without spectral normalization
if transpose:
stride0 = 1
if type(stride) == list or is_depthwise_sep or stride % 1:
#Apparently there is no implementation of transpositional
#depthwise separable convolutions, so bilinearly upsample then
#depthwise separably convolute
if kernel_size != 1:
x = tf.image.resize_bilinear(
images=x,
size=stride if type(stride) == list else \
[int(stride*d) for d in x.get_shape().as_list()[1:3]],
align_corners=True
)
stride0 = stride
stride = 1
if type(stride0) == list and not is_depthwise_sep:
layer = tf.contrib.layers.conv2d
elif is_depthwise_sep:
layer = tf.contrib.layers.separable_conv2d
else:
layer = tf.contrib.layers.conv2d_transpose
x = layer(
inputs=x,
num_outputs=num_outputs,
kernel_size=kernel_size,
stride=stride,
padding=padding,
data_format=data_format,
activation_fn=None,
weights_initializer=weights_initializer(),
biases_initializer=biases_initializer())
if type(stride0) != list:
if (is_depthwise_sep or stride0 % 1) and kernel_size == 1:
x = tf.image.resize_bilinear(
images=x,
size=[int(stride0*d) for d in x.get_shape().as_list()[1:3]],
align_corners=True
)
else:
if num_spatial_dims == 1:
layer = tf.contrib.layers.conv1d
elif num_spatial_dims == 2:
if is_depthwise_sep:
layer = tf.contrib.layers.separable_conv2d
else:
layer = tf.contrib.layers.conv2d
x = layer(
inputs=x,
num_outputs=num_outputs,
kernel_size=kernel_size,
stride=stride,
padding=padding,
data_format=data_format,
activation_fn=None,
weights_initializer=weights_initializer(),
biases_initializer=biases_initializer())
else:
#Weights are spectrally normalized
x = spectral_norm_conv(
inputs=x,
num_outputs=num_outputs,
stride=stride,
kernel_size=kernel_size,
padding=padding,
biases_initializer=biases_initializer())
if actv_fn:
x = actv_fn(x)
if is_batch_norm and FLAGS.use_batch_norm:
x = tf.contrib.layers.batch_norm(x, is_training=is_training)
return x
def residual_block(inputs, skip=3, is_training=True):
"""Residual block whre the input is added to the signal after skipping some
layers. This architecture is good for learning purturbative transformations.
If no layer is provided, it defaults to a convolutional layer.
Deep residual learning: https://arxiv.org/abs/1512.03385.
Inputs:
inputs: Tensor to apply residual block to. Outputs of every layer will
have the same shape.
skip: Number of layers to skip before adding input to layer output.
layer: Layer to apply in residual block. Defaults to convolutional
layer. Custom layers must support `inputs`, `num_outputs` and `is_training`
arguments.
Returns:
Final output of residual block.
"""
x = x0 = inputs
def layer(inputs, num_outputs, is_training, is_batch_norm, actv_fn):
x = conv(
inputs=inputs,
num_outputs=num_outputs,
is_training=is_training,
actv_fn=actv_fn
)
return x
for i in range(skip):
x = layer(
inputs=x,
num_outputs=x.get_shape()[-1],
is_training=is_training,
is_batch_norm=i < skip - 1,
actv_fn=tf.nn.relu
)
x += x0
if FLAGS.use_batch_norm:
x = tf.contrib.layers.batch_norm(x, is_training=is_training)
return x
class Generator(snt.AbstractModule):
def __init__(self,
name,
is_training
):
super(Generator, self).__init__(name=name)
self._is_training = is_training
def _build(self, inputs):
x = inputs
std_actv = tf.nn.relu#lambda x: tf.nn.leaky_relu(x, alpha=0.1)
is_training = self._is_training
is_depthwise_sep = False
base_size = 32
#x = tf.contrib.layers.batch_norm(x, is_training=is_training)
x = conv(
x,
num_outputs=32,
is_training=is_training,
actv_fn=std_actv
)
#Encoder
for i in range(1, 3):
x = conv(
x,
num_outputs=base_size*2**i,
stride=2,
is_depthwise_sep=is_depthwise_sep,
is_training=is_training,
actv_fn=std_actv
)
if i == 2:
low_level = x
#Residual blocks
for _ in range(5): #Number of blocks
x = residual_block(
x,
skip=3,
is_training=is_training
)
#Decoder
for i in range(1, -1, -1):
x = conv(
x,
num_outputs=base_size*2**i,
stride=2,
is_depthwise_sep=is_depthwise_sep,
is_training=is_training,
transpose=True,
actv_fn=std_actv
)
x = conv(
x,
num_outputs=base_size,
is_depthwise_sep=is_depthwise_sep,
is_training=is_training
)
#Project features onto output image
x = conv(
x,
num_outputs=1,
biases_initializer=None,
actv_fn=None,
is_batch_norm=False,
is_training=is_training
)
return x
@property
def variables(self):
with self._enter_variable_scope():
return tf.get_collection(
tf.GraphKeys.GLOBAL_VARIABLES,
scope=tf.get_variable_scope().name
)
@property
def trainable_variables(self):
with self._enter_variable_scope():
return tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES,
scope=tf.get_variable_scope().name
)
def construct_partial_scans(actions, observations):
"""
actions: [batch_size, num_steps, 2]
observations: [batch_size, num_steps, 10]
"""
#Last action unused and the first action is always the same
actions = np.concatenate((np.ones([FLAGS.batch_size, 1, 2]), actions[:,:-1,:]), axis=1)
starts = 0.5*FLAGS.img_side + FLAGS.step_size*(np.cumsum(actions, axis=1) - actions)
#starts = np.zeros(actions.shape)
#starts[:,0,:] = actions[:,0,:]
#for i in range(1, FLAGS.num_steps):
# starts[:,i,:] = actions[:,i,:] + starts[:,i-1,:]
#starts -= actions
#starts *= FLAGS.step_size
#starts += 0.5*FLAGS.img_side
positions = np.stack([starts + i*actions for i in range(FLAGS.step_size)], axis=-2)
x = np.minimum(np.maximum(positions, 0), FLAGS.img_side-1)
indices = []
for j in range(FLAGS.batch_size):
for k in range(FLAGS.num_steps):
for i in range(FLAGS.step_size):
indices.append( [j, int(x[j,k,i,0]), int(x[j,k,i,1])] )
indices = np.array(indices)
indices = tuple([indices[:,i] for i in range(3)])
partial_scans = np.zeros([FLAGS.batch_size, FLAGS.img_side, FLAGS.img_side])
masks = np.zeros([FLAGS.batch_size, FLAGS.img_side, FLAGS.img_side])
partial_scans[indices] = observations.reshape([-1])
masks[indices] = 1
partial_scans /= np.maximum(masks, 1)
masks = np.minimum(masks, 1)
partial_scans = np.stack([partial_scans, masks], axis=-1)
return partial_scans
def target_update_ops(target_network, network, decay=FLAGS.target_decay, l2_norm=False):
t_vars = target_network.variables
v_vars = network.variables
update_ops = []
for t, v in zip(t_vars, v_vars):
if FLAGS.is_generator_batch_norm_tracked or not "BatchNorm" in t.name: #Don't track batch normalization
if l2_norm:
v_new = (1-FLAGS.L2_norm)*v
op = v.assign(v_new)
update_ops.append(op)
op = t.assign(decay*t + (1-decay)*v_new)
update_ops.append(op)
else:
op = t.assign(decay*t + (1-decay)*v)
update_ops.append(op)
print(t.name.replace("target_", "") == v.name, t.name.replace("target_", ""), v.name)
return update_ops
def load_data(shape):
data_ph = tf.placeholder(tf.float32, shape=list(shape))
ds = tf.data.Dataset.from_tensor_slices(tuple([data_ph]))
if FLAGS.is_self_competition:
labels = tf.data.Dataset.range(0, list(shape)[0])
ds = tf.data.Dataset.zip((ds, labels))
ds = ds.shuffle(buffer_size=FLAGS.shuffle_size)
ds = ds.repeat()
ds = ds.batch(FLAGS.batch_size)
ds = ds.prefetch(FLAGS.prefetch_size)
iterator = ds.make_initializable_iterator()
return data_ph, iterator
@tf.custom_gradient
def overwrite_grads(x, y):
print("OG", x, y)
def grad(dy):
return y, None
return x, grad
def infill(data, mask):
return data[tuple(nd.distance_transform_edt(np.equal(mask, 0), return_distances=False, return_indices=True))]
#def infill(data, mask):
# x = np.zeros(data.shape)
# c = (cv2.GaussianBlur(mask.astype(np.float32), (7, 7), 3.5, None, 3.5) > 0).astype(np.float32)
# truth = data[tuple(nd.distance_transform_edt(np.equal(mask, 0), return_distances=False, return_indices=True))]
# x = (truth*c).astype(np.float32)
# return x
def fill(input):
return np.expand_dims(np.stack([infill(img, mask) for img, mask in zip(input[:,:,:,0], input[:,:,:,1])]), -1)
def flip_rotate(img, choice):
"""Applies a random flip || rotation to the image, possibly leaving it unchanged"""
if choice == 0:
return img
elif choice == 1:
return np.rot90(img, 1)
elif choice == 2:
return np.rot90(img, 2)
elif choice == 3:
return np.rot90(img, 3)
elif choice == 4:
return np.flip(img, 0)
elif choice == 5:
return np.flip(img, 1)
elif choice == 6:
return np.flip(np.rot90(img, 1), 0)
else:
return np.flip(np.rot90(img, 1), 1)
def draw_spiral(coverage, side, num_steps=10_000):
"""Duration spent at each location as a particle falls in a magnetic
field. Trajectory chosen so that the duration density is (approx.)
evenly distributed. Trajectory is calculated stepwise.
Args:
coverage: Average amount of time spent at a random pixel
side: Sidelength of square image that the motion is
inscribed on.
Returns:
A spiral
"""
#Use size that is larger than the image
size = int(np.ceil(np.sqrt(2)*side))
#Maximum radius of motion
R = size/2
#Get constant in equation of motion
k = 1/ (2*np.pi*coverage)
#Maximum theta that is in the image
theta_max = R / k
#Equispaced steps
theta = np.arange(0, theta_max, theta_max/num_steps)
r = k * theta
#Convert to cartesian, with (0,0) at the center of the image
x = r*np.cos(theta) + R
y = r*np.sin(theta) + R
#Draw spiral
z = np.empty((x.size + y.size,), dtype=x.dtype)
z[0::2] = x
z[1::2] = y
z = list(z)
img = Image.new('F', (size,size), "black")
img_draw = ImageDraw.Draw(img)
img_draw = img_draw.line(z)
img = np.asarray(img)
img = img[size//2-side//2:size//2+side//2+side%2,
size//2-side//2:size//2+side//2+side%2]
return img
def average_filter(image):
kernel = tf.ones([5,5,1,1])
filtered_image = tf.nn.conv2d(image, kernel, strides=[1, 1, 1, 1], padding="VALID")
return filtered_image
def pad(tensor, size):
d1_pad = size[0]
d2_pad = size[1]
paddings = tf.constant([[0, 0], [d1_pad, d1_pad], [d2_pad, d2_pad], [0, 0]], dtype=tf.int32)
padded = tf.pad(tensor, paddings, mode="REFLECT")
return padded
def gaussian_kernel(size: int,
mean: float,
std: float,
):
"""Makes 2D gaussian Kernel for convolution."""
d = tf.distributions.Normal(mean, std)
vals = d.prob(tf.range(start = -size, limit = size + 1, dtype = tf.float32))
gauss_kernel = tf.einsum('i,j->ij', vals, vals)
return gauss_kernel / tf.reduce_sum(gauss_kernel)
def blur(image):
gauss_kernel = gaussian_kernel( 2, 0., 2.5 )
#Expand dimensions of `gauss_kernel` for `tf.nn.conv2d` signature
gauss_kernel = gauss_kernel[:, :, tf.newaxis, tf.newaxis]
#Convolve
image = pad(image, (2,2))
return tf.nn.conv2d(image, gauss_kernel, strides=[1, 1, 1, 1], padding="VALID")
def calc_generator_losses(img1, img2):
if FLAGS.data_file == "//Desktop-sa1evjv/h/96x96_stem_crops.npy":
img2 = blur(img2) #Gaussian blur
generator_losses = 10*tf.reduce_mean( (img1 - img2)**2, axis=[1,2,3] )
losses = generator_losses
if FLAGS.style_loss:
edges1 = tf.image.sobel_edges(img1)
edges2 = tf.image.sobel_edges(img2)
print("Edges:", edges1)
generator_losses += FLAGS.style_loss*tf.reduce_mean( (edges1 - edges2)**2, axis=[1,2,3,4] )
return generator_losses, losses
def main(unused_argv):
"""Trains the DNC and periodically reports the loss."""
graph = tf.get_default_graph()
action_shape = [FLAGS.batch_size, FLAGS.num_steps, FLAGS.num_actions]
observation_shape = [FLAGS.batch_size, FLAGS.num_steps, FLAGS.step_size]
full_scan_shape = [FLAGS.batch_size, FLAGS.img_side, FLAGS.img_side, 1]
partial_scan_shape = [FLAGS.batch_size, FLAGS.img_side, FLAGS.img_side, 2]
images = np.load(FLAGS.data_file)
images[np.logical_not(np.isfinite(images))] = 0
images = np.stack([norm_img(x) for x in images])
train_images = images[:int(0.8*len(images))]
val_images = images[int(0.8*len(images)):]
train_data_ph, train_iterator = load_data(train_images.shape)
val_data_ph, val_iterator = load_data(val_images.shape)
if FLAGS.is_self_competition:
(full_scans, labels) = train_iterator.get_next()
(val_full_scans, val_labels) = val_iterator.get_next()
full_scans = full_scans[0]
val_full_scans = val_full_scans[0]
else:
(full_scans, ) = train_iterator.get_next()
(val_full_scans, ) = val_iterator.get_next()
if hasattr(tf, 'ensure_shape'):
full_scans = tf.ensure_shape(full_scans, full_scan_shape)
val_full_scans = tf.ensure_shape(val_full_scans, full_scan_shape)
else:
full_scans = tf.reshape(full_scans, full_scan_shape)
val_full_scans = tf.reshape(full_scans, full_scan_shape)
replay = RingBuffer(
action_shape=action_shape,
observation_shape=observation_shape,
full_scan_shape=full_scan_shape,
batch_size=FLAGS.batch_size,
buffer_size=FLAGS.replay_size,
num_past_losses=train_images.shape[0],
)
replay_actions_ph = tf.placeholder(tf.float32, shape=action_shape, name="replay_action")
replay_observations_ph = tf.placeholder(tf.float32, shape=observation_shape, name="replay_observation")
replay_full_scans_ph = tf.placeholder(tf.float32, shape=full_scan_shape, name="replay_full_scan")
partial_scans_ph = tf.placeholder(tf.float32, shape=partial_scan_shape, name="replay_partial_scan")
is_training_ph = tf.placeholder(tf.bool, name="is_training")
if FLAGS.is_noise_decay:
noise_decay_ph = tf.placeholder(tf.float32, shape=(), name="noise_decay")
else:
noise_decay_ph = None
if FLAGS.supervision_iters:
supervision_ph = tf.placeholder(tf.float32, name="supervision")
else:
supervision_ph = FLAGS.supervision
if FLAGS.is_prioritized_replay:
priority_weights_ph = tf.placeholder(tf.float32, shape=[FLAGS.batch_size], name="priority_weights")
if FLAGS.is_self_competition:
past_losses_ph = tf.placeholder(tf.float32, shape=[FLAGS.batch_size], name="past_losses")
batch_size = FLAGS.batch_size
if FLAGS.is_relative_to_spirals:
coverage = FLAGS.num_steps*FLAGS.step_size/FLAGS.img_side**2
spiral = draw_spiral(coverage=coverage, side=FLAGS.img_side)
ys = [1/i**2 for i in range(9, 2, -1)]
xs = [np.sum(draw_spiral(coverage=c, side=FLAGS.img_side)) / FLAGS.img_side**2 for c in ys]
ub_idx = next(i for i, x in xs if x > coverage)
lb = xs[ub_idx-1]
ub = xs[ub_idx]
input_coverage = ( (coverage - lb)*X + (ub - coverage)*Y ) / (lb - ub)
if not FLAGS.is_spirals:
actor = Agent(
num_outputs=FLAGS.num_actions,
is_new=True,
noise_decay=noise_decay_ph,
sampled_full_scans=full_scans,
val_full_scans=val_full_scans,
name="actor"
)
target_actor = Agent(num_outputs=FLAGS.num_actions, name="target_actor")
critic = Agent(num_outputs=1, is_double_critic=True, name="critic")
target_critic = Agent(num_outputs=1, is_double_critic=True, name="target_critic")
new_observations, new_actions = actor.get_new_experience()
val_observations, val_actions = actor.get_val_experience()
else:
new_observations, new_actions = spiral_generator(full_scans)
val_observations, val_actions = spiral_generator(val_full_scans)
#Last actions are unused
replay_observations = replay_observations_ph[:,:-1,:]
replay_actions = replay_actions_ph[:,:-1,:]
#First action must be added for actors (not critics)
start_actions = tf.ones([FLAGS.batch_size, 1, FLAGS.num_actions])/np.sqrt(2)
started_replay_actions = tf.concat([start_actions, replay_actions[:,:-1,:]], axis=1)
if not FLAGS.is_spirals:
actions = actor(replay_observations, started_replay_actions)
if FLAGS.is_target_actor:
target_actions = target_actor(replay_observations, started_replay_actions)
elif FLAGS.supervision != 1:
target_actions = tf.stop_gradient(actions)
#The last action is never used, and the first action is diagonally north-east
#Shifting because network expect actions from previous steps to be inputted
#start_actions = tf.ones([FLAGS.batch_size, 1, FLAGS.num_actions])/np.sqrt(2)
#actions = tf.concat([start_actions, actions[:, :-1, :]], axis=1)
#target_actions = tf.concat([start_actions, target_actions[:, :-1, :]], axis=1)
actor_actions = tf.concat([replay_actions, actions], axis=-1)
qs = critic(replay_observations, actor_actions)
critic_qs = qs[:,:,:1]
actor_qs = qs[:,:,1:]
if FLAGS.is_target_critic:
target_actor_actions = tf.concat([replay_actions, target_actions], axis=-1)
target_actor_qs = target_critic(replay_observations, target_actor_actions)[:,:,1:]
target_actor_qs = tf.stop_gradient(target_actor_qs)
elif FLAGS.supervision != 1:
target_actor_qs = actor_qs#critic(replay_observations, target_actor_actions)[:,:,1:]
target_actor_qs = tf.stop_gradient(target_actor_qs)
if not FLAGS.is_infilled:
generator = Generator(name="generator", is_training=is_training_ph)
generation = generator(partial_scans_ph)
else:
generation = tf.py_func(fill, [partial_scans_ph], tf.float32)
if hasattr(tf, 'ensure_shape'):
generation = tf.ensure_shape(generation, full_scan_shape)
else:
generation = tf.reshape(generation, full_scan_shape)
generator_losses, losses = calc_generator_losses(generation, replay_full_scans_ph)
if FLAGS.is_target_generator and not FLAGS.is_infilled:
target_generator = Generator(name="target_generator", is_training=is_training_ph)
target_generation = target_generator(partial_scans_ph)
if FLAGS.is_minmax_reward:
errors = (target_generation - replay_full_scans_ph)**2
losses = tf.reduce_max( average_filter(errors), reduction_indices=[1,2,3] )
else:
target_generator_losses, losses = calc_generator_losses(target_generation, replay_full_scans_ph)
losses = target_generator_losses #For RL
else:
if FLAGS.is_minmax_reward:
errors = (generation - replay_full_scans_ph)**2
losses = tf.reduce_max( average_filter(errors), reduction_indices=[1,2,3] )
unclipped_losses = losses
if FLAGS.is_positive_qs and (FLAGS.is_target_critic or FLAGS.supervision != 1) and not FLAGS.is_spirals:
target_actor_qs = tf.nn.relu(target_actor_qs)
if FLAGS.norm_generator_losses_decay:
mu = tf.get_variable(name="loss_mean", initializer=tf.constant(1., dtype=tf.float32))
mu_op = mu.assign(FLAGS.norm_generator_losses_decay*mu+(1-FLAGS.norm_generator_losses_decay)*tf.reduce_mean(losses))
tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, mu_op)
losses /= tf.stop_gradient(mu)
if not FLAGS.is_spirals:
if FLAGS.is_clipped_reward:
losses = alrc(losses)
if FLAGS.is_self_competition:
self_competition_losses = tf.where(
past_losses_ph > unclipped_losses,
tf.ones([FLAGS.batch_size]),
tf.zeros([FLAGS.batch_size])
)
losses += self_competition_losses
if FLAGS.over_edge_penalty:
positions = (
0.5 + #middle of image
FLAGS.step_size/(np.sqrt(2)*FLAGS.img_side) + #First step
(FLAGS.step_size/FLAGS.img_side)*tf.cumsum(replay_actions_ph[:,:-1,:], axis=1) # Actions
)
#new_positions = (
# positions - replay_actions_ph[:,:-1,:] + #Go back one action
# (FLAGS.step_size/FLAGS.img_side)*actions #New actions
# )
is_over_edge = tf.logical_or(tf.greater(positions, 1), tf.less(positions, 0))
is_over_edge = tf.logical_or(is_over_edge[:,:,0], is_over_edge[:,:,1])
over_edge_losses = tf.where(
is_over_edge,
FLAGS.over_edge_penalty*tf.ones(is_over_edge.get_shape()),
tf.zeros(is_over_edge.get_shape())
)
over_edge_losses = tf.cumsum(over_edge_losses, axis=1)
if FLAGS.supervision > 0 or FLAGS.is_advantage_actor_critic:
supervised_losses = []
for i in reversed(range(FLAGS.num_steps-1)):
if i == FLAGS.num_steps-1 - 1: #Extra -1 as idxs start from 0
step_loss = tf.expand_dims(losses, axis=-1)
else:
step_loss = FLAGS.gamma*step_loss
if FLAGS.over_edge_penalty:
step_loss += over_edge_losses[:,i:i+1]
supervised_losses.append(step_loss)
supervised_losses = tf.concat(supervised_losses, axis=-1)
if FLAGS.supervision < 1:
bellman_losses = tf.concat(
[FLAGS.gamma*target_actor_qs[:,1:,0], tf.expand_dims(losses, axis=-1)],
axis=-1
)
if FLAGS.over_edge_penalty:
bellman_losses += over_edge_losses
bellman_losses = supervision_ph * supervised_losses + (1 - supervision_ph) * bellman_losses
else:
bellman_losses = supervised_losses
if FLAGS.is_prioritized_replay:
unweighted_critic_losses = tf.reduce_mean( ( critic_qs[:,:,0] - bellman_losses )**2, axis=-1 )
critic_losses = tf.reduce_mean( priority_weights_ph*unweighted_critic_losses )
else:
critic_losses = tf.reduce_mean( ( critic_qs[:,:,0] - bellman_losses )**2 )
if FLAGS.is_biased_prioritized_replay:
unweighted_critic_losses = tf.reduce_mean( ( critic_qs[:,:,0] - bellman_losses )**2, axis=-1 )
if FLAGS.is_clipped_critic:
actor_qs = alrc(actor_qs)
if FLAGS.is_advantage_actor_critic:
actor_losses = tf.reduce_mean( supervised_losses - actor_qs[:,:,0] )
else:
actor_losses = tf.reduce_mean( actor_qs )
#critic_losses /= FLAGS.num_steps
#actor_losses /= FLAGS.num_steps
#Outputs to provide feedback for the developer
info = {
"generator_losses": tf.reduce_mean(unclipped_losses)
}
if not FLAGS.is_spirals:
info.update( {"actor_losses": actor_losses, "critic_losses": critic_losses} )
if FLAGS.is_prioritized_replay or FLAGS.is_biased_prioritized_replay:
info.update( {"priority_weights": unweighted_critic_losses} )
if FLAGS.is_self_competition:
info.update( {"unclipped_losses": unclipped_losses} )
outputs = {
"generation": generation[0,:,:,0],
"truth": replay_full_scans_ph[0,:,:,0],
"input": partial_scans_ph[0,:,:,0]
}
history_op = {
"actions": new_actions,
"observations": new_observations,
"full_scans": full_scans
}
if FLAGS.is_self_competition:
history_op.update( {"labels": labels} )
##Modify actor gradients
#[actor_grads] = tf.gradients(actor_losses, replay_actions_ph)
#actor_losses = overwrite_grads(actions, actor_grads)
start_iter = FLAGS.start_iter
train_iters = FLAGS.train_iters
config = tf.ConfigProto()
config.gpu_options.allow_growth = True #Only use required GPU memory
#config.gpu_options.force_gpu_compatible = True
model_dir = FLAGS.model_dir
log_filepath = model_dir + "log.txt"
save_period = 1; save_period *= 3600
log_file = open(log_filepath, "a")
with tf.Session(config=config) as sess:
if FLAGS.is_target_actor and not FLAGS.is_spirals:
if FLAGS.update_frequency <= 1:
update_target_critic_op = target_update_ops(target_actor, actor)
else:
update_target_critic_op = []
initial_update_target_critic_op = target_update_ops(target_actor, actor, decay=0)
else:
update_target_critic_op = []
initial_update_target_critic_op = []
if FLAGS.is_target_critic and not FLAGS.is_spirals:
if FLAGS.update_frequency <= 1:
update_target_actor_op = target_update_ops(target_critic, critic)
else:
update_target_actor_op = []
initial_update_target_actor_op = target_update_ops(target_critic, critic, decay=0)
else:
update_target_actor_op = []
initial_update_target_actor_op = []
if FLAGS.is_target_generator and not FLAGS.is_infilled:
if FLAGS.update_frequency <= 1:
update_target_generator_op = target_update_ops(target_generator, generator, l2_norm=FLAGS.L2_norm)
else:
update_target_generator_op = []
initial_update_target_generator_op = target_update_ops(target_generator, generator, decay=0)
else:
update_target_generator_op = []
initial_update_target_generator_op = []
initial_update_target_network_ops = (
initial_update_target_actor_op +
initial_update_target_critic_op +
initial_update_target_generator_op
)
actor_lr = FLAGS.actor_lr
critic_lr = FLAGS.critic_lr
if FLAGS.is_cyclic_generator_learning_rate and not FLAGS.is_infilled:
generator_lr = tf.placeholder(tf.float32, name="generator_lr")
else:
generator_lr = FLAGS.generator_lr
#critic_rep = (critic_qs[:,:,0] - bellman_losses)**2
#ps = [critic_qs[0,:,0], target_actor_qs[0,:,0], bellman_losses[0], critic_rep[0]]
#ps = [critic.trainable_variables[0], target_critic.trainable_variables[0]]
ps = []
#p = bellman_losses[0]
#p = generation[0,:,:,0]
train_op_dependencies = [tf.print(p) for p in ps] + tf.get_collection(tf.GraphKeys.UPDATE_OPS)
if not FLAGS.update_frequency:
update_target_network_ops = (
update_target_actor_op +
update_target_critic_op +
update_target_generator_op
)
train_op_dependencies += update_target_network_ops
train_ops = []
with tf.control_dependencies(train_op_dependencies):
if not FLAGS.is_spirals:
actor_train_op = tf.train.AdamOptimizer(learning_rate=actor_lr).minimize(
loss=actor_losses, var_list=actor.trainable_variables)
critic_train_op = tf.train.AdamOptimizer(learning_rate=critic_lr).minimize(
loss=critic_losses, var_list=critic.trainable_variables)
train_ops += [actor_train_op, critic_train_op]
if not FLAGS.is_infilled:
generator_train_op = tf.train.AdamOptimizer(learning_rate=generator_lr).minimize(
loss=generator_losses, var_list=generator.trainable_variables)
train_ops.append(generator_train_op)
else:
generator_train_op = tf.no_op()
feed_dict = {}
sess.run(tf.global_variables_initializer(), feed_dict=feed_dict)
saver = tf.train.Saver(max_to_keep=1)
noteable_saver = tf.train.Saver(max_to_keep=2)
if start_iter:
saver.restore(
sess,
tf.train.latest_checkpoint(model_dir+"model/")
)
else:
if len(initial_update_target_network_ops):
sess.run(initial_update_target_network_ops, feed_dict=feed_dict)
sess.run(train_iterator.initializer, feed_dict={train_data_ph: train_images})
sess.run(val_iterator.initializer, feed_dict={val_data_ph: val_images})
time0 = time.time()
for iter in range(start_iter, train_iters):
if iter < FLAGS.replay_size or not iter % FLAGS.avg_replays:
#Add experiences to the replay
feed_dict = {is_training_ph: np.bool(True)}
if FLAGS.is_noise_decay:
noise_decay = np.float32( (train_iters - iter)/train_iters )
feed_dict.update( {noise_decay_ph: noise_decay} )
history = sess.run(
history_op,
feed_dict=feed_dict)
replay.add(**history)
#Sample experiences from the replay
if FLAGS.is_prioritized_replay:
sampled_actions, sampled_observations, replay_sampled_full_scans, sample_idxs, sampled_priority_weights = replay.get()
elif FLAGS.is_biased_prioritized_replay:
sampled_actions, sampled_observations, replay_sampled_full_scans, sample_idxs = replay.get()
elif FLAGS.is_self_competition:
sampled_actions, sampled_observations, replay_sampled_full_scans, sampled_labels, sampled_past_losses = replay.get()
else:
sampled_actions, sampled_observations, replay_sampled_full_scans = replay.get()
replay_partial_scans = construct_partial_scans(sampled_actions, sampled_observations)
if not FLAGS.is_infilled:
sampled_full_scans = []
partial_scans = []
spiral_scans = []
for sampled_full_scan, partial_scan in zip(replay_sampled_full_scans, replay_partial_scans):
c = np.random.randint(0, 8)
sampled_full_scans.append( flip_rotate(sampled_full_scan, c) )
partial_scans.append( flip_rotate(partial_scan, c) )
if FLAGS.is_relative_to_spirals:
spiral_scan = spiral * sampled_full_scan
spiral_scans.append( flip_rotate(spiral_scan, c) )
sampled_full_scans = np.stack( sampled_full_scans )
partial_scans = np.stack( partial_scans )
else:
sampled_full_scans = replay_sampled_full_scans
partial_scans = replay_partial_scans
feed_dict = {
replay_actions_ph: sampled_actions,
replay_observations_ph: sampled_observations,
replay_full_scans_ph: sampled_full_scans,
partial_scans_ph: partial_scans,
is_training_ph: np.bool(True)
}
if FLAGS.is_prioritized_replay:
feed_dict.update({priority_weights_ph: sampled_priority_weights})
if FLAGS.supervision_iters:
supervision = FLAGS.supervision_start + min(iter, FLAGS.supervision_iters)*(FLAGS.supervision_end-FLAGS.supervision_start) / FLAGS.supervision_iters
feed_dict.update( {supervision_ph: supervision } )
if FLAGS.is_self_competition:
feed_dict.update( {past_losses_ph: sampled_past_losses} )
if FLAGS.is_cyclic_generator_learning_rate and not FLAGS.is_infilled:
if FLAGS.is_decaying_generator_learning_rate:
envelope = FLAGS.generator_lr * 0.75**(iter/(train_iters//5))
else:
envelope = FLAGS.generator_lr
cycle_half = train_iters//(10 - 1)
cycle_full = 2*cycle_half
cyclic_sawtooth = 1 - (min(iter%cycle_full, cycle_half) - min(iter%cycle_full - cycle_half, 0))/cycle_half
cyclic_lr = envelope*(0.2 + 0.8*cyclic_sawtooth)
feed_dict.update( {generator_lr: np.float32(cyclic_lr)} )
#Train
if iter in [0, 100, 500] or not iter % 25_000 or (0 <= iter < 10_000 and not iter % 1000) or iter == start_iter:
_, step_info, step_outputs = sess.run([train_ops, info, outputs], feed_dict=feed_dict)
for k in step_outputs:
save_loc = FLAGS.model_dir + k + str(iter)+".tif"
Image.fromarray( (0.5*step_outputs[k]+0.5).astype(np.float32) ).save( save_loc )
else:
_, step_info = sess.run([train_ops, info], feed_dict=feed_dict)
if FLAGS.update_frequency and not iter % FLAGS.update_frequency:
sess.run(initial_update_target_network_ops, feed_dict=feed_dict)
if FLAGS.is_prioritized_replay:
replay.update_priorities(sample_idxs, step_info["priority_weights"])
if FLAGS.is_self_competition:
replay.update_past_losses(sampled_labels, step_info["unclipped_losses"])
output = f"Iter: {iter}"
for k in step_info:
if k not in ["priority_weights", "unclipped_losses"]:
output += f", {k}: {step_info[k]}"
if not iter % FLAGS.report_freq:
print(output)
#if "nan" in output:
# saver.restore(
# sess,
# tf.train.latest_checkpoint(model_dir+"model/")
# )
try:
log_file.write(output)
except:
while True:
print("Issue writing log.")
time.sleep(1)
log_file = open(log_filepath, "a")
try:
log_file.write(output)
break
except:
continue
if iter in [train_iters//2-1, train_iters-1]:
noteable_saver.save(sess, save_path=model_dir+"noteable_ckpt/model", global_step=iter)
time0 = time.time()
start_iter = iter
elif time.time() >= time0 + save_period:
saver.save(sess, save_path=model_dir+"model/model", global_step=iter)
time0 = time.time()
val_losses_list = []
for iter in range(0, FLAGS.val_examples//FLAGS.batch_size):
#Add experiences to the replay
feed_dict = {is_training_ph: np.bool(True)}
sampled_actions, sampled_observations, sampled_full_scans = sess.run(
[val_actions, val_observations, val_full_scans],
feed_dict=feed_dict
)
partial_scans = construct_partial_scans(sampled_actions, sampled_observations)
feed_dict = {
replay_actions_ph: sampled_actions,
replay_observations_ph: sampled_observations,
replay_full_scans_ph: sampled_full_scans,
partial_scans_ph: partial_scans,
is_training_ph: np.bool(False)
}
val_losses = sess.run( unclipped_losses, feed_dict=feed_dict )
val_losses_list.append( val_losses )
val_losses = np.concatenate(tuple(val_losses_list), axis=0)
np.save(model_dir + "val_losses.npy", val_losses)
if __name__ == "__main__":
tf.app.run()
```
#### File: Adaptive-Partial-STEM/128/utility.py
```python
import tensorflow as tf
import itertools
import numpy as np
FLAGS = tf.flags.FLAGS
def stepped_spiral_actions(theta_incr=np.pi/180):
coverage = FLAGS.num_steps*FLAGS.step_size/FLAGS.img_side**2
start_theta = np.pi/4
start_r = np.sqrt(2)*FLAGS.step_size
start_position = np.ones([2])/2
alpha = 3.4
theta0 = -start_r/alpha
actions = []
positions = [start_position]
for _ in range(0, FLAGS.num_steps):
for i in itertools.count(start=1):
theta = start_theta + i*theta_incr
r = alpha*(theta - theta0)
if np.sqrt( (r*np.cos(theta) - start_r*np.cos(start_theta))**2 +
(r*np.sin(theta) - start_r*np.sin(start_theta))**2 ) >= np.sqrt(2)*FLAGS.step_size:
vect = np.array([r*np.cos(theta) - start_r*np.cos(start_theta),
r*np.sin(theta) - start_r*np.sin(start_theta)])
vect /= np.sum(np.sqrt(vect**2))
vect *= np.sqrt(2)
start_position += FLAGS.step_size*vect/FLAGS.img_side
actions.append( vect )
positions.append( start_position )
start_theta = theta
start_r = r
break
actions.append( np.ones([2]) ) #Discarded
actions = np.stack(actions)
actions = np.stack([actions]*FLAGS.batch_size).astype(np.float32)
positions = np.stack(positions)
positions = np.stack([positions]*FLAGS.batch_size).astype(np.float32)
return actions, positions
def make_observations(actions, starts, full_scans):
x = np.minimum(np.maximum(np.stack([starts + i*actions for i in range(FLAGS.step_size)]), 0), FLAGS.img_side-1)
indices = []
for j in range(FLAGS.batch_size):
for i in range(FLAGS.step_size):
indices.append( [j, int(x[i][j][0]), int(x[i][j][1]), 0] )
indices = tuple([np.array(indices)[:,i] for i in range(4)])
observations = full_scans[indices].reshape([-1, FLAGS.step_size])
return observations
def spiral_generator(scans):
actions0, positions = stepped_spiral_actions()
actor_actions = tf.convert_to_tensor(actions0[:,:-1], dtype=tf.float32)
positions *= FLAGS.img_side
def py_spiral_generator(imgs):
actions = np.concatenate([np.ones([FLAGS.batch_size, 1, 2]), actions0[:,1:]], axis=1)
observations = [make_observations(actions[:,i,:], positions[:,i,:], imgs) for i in range(FLAGS.num_steps)]
observations = np.stack(observations, axis=1)
return observations
observations = tf.py_func(py_spiral_generator, [scans], tf.float32)
observations = tf.reshape(observations, [FLAGS.batch_size, FLAGS.num_steps, FLAGS.step_size])
return observations, actor_actions
def auto_name(name):
"""Append number to variable name to make it unique.
Inputs:
name: Start of variable name.
Returns:
Full variable name with number afterwards to make it unique.
"""
scope = tf.contrib.framework.get_name_scope()
vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=scope)
names = [v.name for v in vars]
#Increment variable number until unused name is found
for i in itertools.count():
short_name = name + "_" + str(i)
sep = "/" if scope != "" else ""
full_name = scope + sep + short_name
if not full_name in [n[:len(full_name)] for n in names]:
return short_name
def alrc(
loss,
num_stddev=3,
decay=0.999,
mu1_start=2,
mu2_start=3**2,
in_place_updates=False
):
"""Adaptive learning rate clipping (ALRC) of outlier losses.
Inputs:
loss: Loss function to limit outlier losses of.
num_stddev: Number of standard deviation above loss mean to limit it
to.
decay: Decay rate for exponential moving averages used to track the first
two raw moments of the loss.
mu1_start: Initial estimate for the first raw moment of the loss.
mu2_start: Initial estimate for the second raw moment of the loss.
in_place_updates: If False, add control dependencies for moment tracking
to tf.GraphKeys.UPDATE_OPS. This allows the control dependencies to be
executed in parallel with other dependencies later.
Return:
Loss function with control dependencies for ALRC.
"""
#Varables to track first two raw moments of the loss
mu = tf.get_variable(
auto_name("mu1"),
initializer=tf.constant(mu1_start, dtype=tf.float32))
mu2 = tf.get_variable(
auto_name("mu2"),
initializer=tf.constant(mu2_start, dtype=tf.float32))
#Use capped loss for moment updates to limit the effect of outlier losses on the threshold
sigma = tf.sqrt(mu2 - mu**2+1.e-8)
loss = tf.where(loss < mu+num_stddev*sigma,
loss,
loss/tf.stop_gradient(loss/(mu+num_stddev*sigma)))
#Update moment moving averages
mean_loss = tf.reduce_mean(loss)
mean_loss2 = tf.reduce_mean(loss**2)
update_ops = [mu.assign(decay*mu+(1-decay)*mean_loss),
mu2.assign(decay*mu2+(1-decay)*mean_loss2)]
if in_place_updates:
with tf.control_dependencies(update_ops):
loss = tf.identity(loss)
else:
#Control dependencies that can be executed in parallel with other update
#ops. Often, these dependencies are added to train ops e.g. alongside
#batch normalization update ops.
for update_op in update_ops:
tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, update_op)
return loss
if __name__ == "__main__":
pass
```
#### File: Jeffrey-Ede/Adaptive-Partial-STEM/examples_half.py
```python
import numpy as np
from scipy.misc import imread
from scipy.stats import entropy
import matplotlib as mpl
#mpl.use('pdf')
import matplotlib.pyplot as plt
plt.rcParams["font.family"] = "Times New Roman"
mpl.rcParams['xtick.direction'] = 'in'
mpl.rcParams['ytick.direction'] = 'in'
mpl.rcParams['savefig.dpi'] = 50
fontsize = 11
mpl.rcParams['axes.titlesize'] = fontsize
mpl.rcParams['axes.labelsize'] = fontsize
mpl.rcParams['xtick.labelsize'] = fontsize
mpl.rcParams['ytick.labelsize'] = fontsize
mpl.rcParams['legend.fontsize'] = fontsize
import matplotlib.mlab as mlab
import cv2
from PIL import Image
from PIL import ImageDraw
columns = 6
rows = 4
parent = "Z:/Jeffrey-Ede/models/recurrent_conv-1/125/"
prependings = ["final_input", "final_truth", "final_generation", "final_truth", "final_generation", "final_truth", "final_generation"]
#image_nums = [0+i for i in range(2*rows)]
image_nums = [94, 22, 61, 77, 33, 78, 27, 71]
imgs = []
for i in image_nums:
for j, prepending in enumerate(prependings[:3]):
filename = parent + prepending + f"{i}.tif"
img = imread(filename, mode="F")
imgs.append(img)
x_titles = [
"Partial Scan",
"Target Output",
"Generated Output",
"Partial Scan",
"Target Output",
"Generated Output"
]
def scale0to1(img):
min = np.min(img)
max = np.max(img)
print(min, max)
if min == max:
img.fill(0.5)
else:
img = (img-min) / (max-min)
return img.astype(np.float32)
def block_resize(img, new_size):
x = np.zeros(new_size)
dx = int(new_size[0]/img.shape[0])
dy = int(new_size[1]/img.shape[1])
for i in range(img.shape[0]):
for j in range(img.shape[1]):
px = img[i,j]
for u in range(dx):
for v in range(dy):
x[i*dx+u, j*dy+v] = px
return x
#Width as measured in inkscape
scale = 4
width = scale * 2.2
height = 1.15*scale* (width / 1.618) / 2.2 / 1.96
w = h = 224
subplot_cropsize = 64
subplot_prop_of_size = 0.625
subplot_side = int(subplot_prop_of_size*w)
subplot_prop_outside = 0.25
out_len = int(subplot_prop_outside*subplot_side)
side = w+out_len
print(imgs[1])
f=plt.figure(figsize=(rows, columns))
#spiral = inspiral(1/20, int(512*0.6*512/64))
#spiral_crop = spiral[:subplot_side, :subplot_side]
for i in range(rows):
for j in range(1, columns+1):
img = imgs[columns*i+j-1]
k = i*columns+j
ax = f.add_subplot(rows, columns, k)
plt.imshow(img, cmap='gray')
plt.xticks([])
plt.yticks([])
ax.set_frame_on(False)
if not i:
ax.set_title(x_titles[j-1])
f.subplots_adjust(wspace=-0.01, hspace=0.04)
f.subplots_adjust(left=.00, bottom=.00, right=1., top=1.)
f.set_size_inches(width, height)
#plt.show()
f.savefig(parent+'examples.png', bbox_inches='tight')
```
|
{
"source": "Jeffrey-Ede/adaptive-scans",
"score": 4
}
|
#### File: adaptive-scans/misc/ou_trial.py
```python
import numpy as np
def ornstein_uhlenbeck(input, theta=0.1, sigma=0.2):
"""Ornstein-Uhlembeck perturbation. Using Gaussian Wiener process."""
noise_perturb = -theta*input + sigma*np.random.normal()
return input + noise_perturb
noise = 0
for _ in range(20):
noise = ornstein_uhlenbeck(noise)
print(noise/(np.pi))
```
|
{
"source": "Jeffrey-Ede/ALRC",
"score": 2
}
|
#### File: quartic/training_quartic/cifar10_train.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
from datetime import datetime
import time
import tensorflow as tf
import cifar10
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('train_dir', 'tmp/cifar10_train',
"""Directory where to write event logs """
"""and checkpoint.""")
tf.app.flags.DEFINE_integer('max_steps', 100000,
"""Number of batches to run.""")
tf.app.flags.DEFINE_boolean('log_device_placement', False,
"""Whether to log device placement.""")
tf.app.flags.DEFINE_integer('log_frequency', 1,
"""How often to log results to the console.""")
def train():
"""Train CIFAR-10 for a number of steps."""
with tf.Graph().as_default():
global_step = tf.train.get_or_create_global_step()
# Get images and labels for CIFAR-10.
# Force input pipeline to CPU:0 to avoid operations sometimes ending up on
# GPU and resulting in a slow down.
with tf.device('/cpu:0'):
images, labels = cifar10.distorted_inputs()
# Build a Graph that computes the logits predictions from the
# inference model.
logits = cifar10.inference(images)
# Calculate loss.
loss = cifar10.loss(logits, labels)
unadjusted_loss = tf.reduce_mean(logits)
# Build a Graph that trains the model with one batch of examples and
# updates the model parameters.
train_op = cifar10.train(loss, global_step)
class _LoggerHook(tf.train.SessionRunHook):
"""Logs loss and runtime."""
def begin(self):
self._step = -1
self._start_time = time.time()
def before_run(self, run_context):
self._step += 1
return tf.train.SessionRunArgs([loss, unadjusted_loss]) # Asks for loss value.
def after_run(self, run_context, run_values):
if self._step % FLAGS.log_frequency == 0:
current_time = time.time()
duration = current_time - self._start_time
self._start_time = current_time
loss_value = run_values.results[0]
unadjusted_loss = run_values.results[1]
examples_per_sec = FLAGS.log_frequency * FLAGS.batch_size / duration
sec_per_batch = float(duration / FLAGS.log_frequency)
format_str = ('%s: step %d, loss = %.5f, unadjusted = %.5f, (%.1f examples/sec; %.3f '
'sec/batch)')
output = ( format_str % (datetime.now(), self._step, loss_value, unadjusted_loss,
examples_per_sec, sec_per_batch) )
print(output)
with open(FLAGS.log_file, mode='a') as log:
log.write(output)
with tf.train.MonitoredTrainingSession(
checkpoint_dir=FLAGS.train_dir,
hooks=[tf.train.StopAtStepHook(last_step=FLAGS.max_steps),
tf.train.NanTensorHook(loss),
_LoggerHook()],
config=tf.ConfigProto(
log_device_placement=FLAGS.log_device_placement)) as mon_sess:
while not mon_sess.should_stop():
mon_sess.run(train_op)
def main(argv=None): # pylint: disable=unused-argument
cifar10.maybe_download_and_extract()
if tf.gfile.Exists(FLAGS.train_dir):
tf.gfile.DeleteRecursively(FLAGS.train_dir)
tf.gfile.MakeDirs(FLAGS.train_dir)
train()
if __name__ == '__main__':
tf.app.run()
```
|
{
"source": "Jeffrey-Ede/datasets",
"score": 3
}
|
#### File: Jeffrey-Ede/datasets/create_visualization_files.py
```python
import numpy as np
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
def preprocess(img):
try:
img[np.isnan(img)] = 0.
img[np.isinf(img)] = 0.
except:
img = np.zeros([96,96,1])
return img
def scale0to1(img):
"""Rescale image between 0 and 1"""
img = img.astype(np.float32)
if img.shape[-1] != 1:
img = np.sqrt(np.sum(img**2, axis=-1, keepdims=True))
min = np.min(img)
max = np.max(img)
if np.absolute(min-max) < 1.e-6:
img.fill(0.5)
else:
img = (img - min)/(max - min)
return img.astype(np.float32)
def batch_PCA(data, n_components):
data = np.stack([np.ndarray.flatten(x) for x in data])
pc = PCA(n_components=n_components).fit_transform(data)
print(pc.shape)
return pc
if __name__ == "__main__":
import matplotlib.pyplot as plt
SAVE_LOC = "//ads.warwick.ac.uk/shared/HCSS6/Shared305/Microscopy/Jeffrey-Ede/models/visualize_data/"
DATASET_FILES = [
"//Desktop-sa1evjv/h/96x96_stem_crops.npy",
"//Desktop-sa1evjv/h/small_scans/96x96.npy",
"//Desktop-sa1evjv/h/small_scans-tem/96x96-tem.npy",
"//Desktop-sa1evjv/h/wavefunctions_96x96/wavefunctions_n=3.npy",
"//Desktop-sa1evjv/h/wavefunctions_96x96/wavefunctions_restricted_n=3.npy",
"//Desktop-sa1evjv/h/wavefunctions_96x96/wavefunctions_single_n=3.npy",
]
DATASET_NAMES = [
"stem_crops_96x96",
"stem_downsampled_96x96",
"tem_downsampled_96x96",
"wavefunctions_n=3",
"wavefunctions_restricted_n=3",
"wavefunctions_single_n=3",
]
PCA_DIMENSIONS = [
50,
50,
50,
50,
50,
50,
]
IS_SCALE0TO1S = [
True,
True,
True,
True,
True,
True,
]
for i, (dataset_file, dataset_name, pca_dimension, is_scale0to1) in enumerate(zip(
DATASET_FILES, DATASET_NAMES, PCA_DIMENSIONS, IS_SCALE0TO1S)):
if i <= 2:
continue
dataset = np.load(dataset_file)
print(dataset.shape)
if is_scale0to1 and i <= 2:
dataset = np.stack([scale0to1(preprocess(x)) for x in dataset])
pc = batch_PCA(dataset, pca_dimension)
if i <= 2:
perplexity = int(np.sqrt(pc.shape[0]))
else:
perplexity = int(5*np.sqrt(pc.shape[0]))
tsne = TSNE(n_components=2, verbose=1, perplexity=perplexity, n_iter=10000).fit_transform(pc)
np.save(SAVE_LOC+"pca_"+dataset_name+".npy", pc)
np.save(SAVE_LOC+"tsne_"+dataset_name+".npy", tsne)
```
#### File: datasets/vaegan/conditional_tsne_to_uniform.py
```python
import numpy as np
from scipy import interpolate
BASE = r"Y:/HCSS6/Shared305/Microscopy/Jeffrey-Ede/models/visualize_data/"
NAME = "stem_crops_96x96"
TSNE_POINTS_FILE = BASE + r"vaegan/vae_tsne_" + NAME + ".npy"
SAVE_FILE = BASE + r"vaegan/vae_tsne_" + NAME + "_uniform.npy"
GAMMA = 0.3
GRID_SIZE_X = GRID_SIZE_Y = 25
TOL = 1e-4 # Stop iteration after maximum change is below this proportion of point support
MAX_ITER = 100
def scale0to1(img):
"""Rescale image between 0 and 1"""
img = img.astype(np.float32)
min = np.min(img)
max = np.max(img)
img = (img - min)/(max - min)
return img.astype(np.float32)
tsne = np.load(TSNE_POINTS_FILE)
x = tsne[:,0]
y = tsne[:,1]
x = scale0to1(x)
y = scale0to1(y)
full_idxs = np.array([i for i in range(tsne.shape[0])])
x_probs = []
y_probs_for_x = []
for i in range(GRID_SIZE_X):
_min = i/GRID_SIZE_X
_max = (i+1)/GRID_SIZE_X
select_x = (x > _min)*(x <= _max)
num_x = np.sum(select_x)
x_probs.append( num_x )
if num_x: #If points in this column
y_probs = []
for j in range(GRID_SIZE_Y):
_min = j/GRID_SIZE_Y
_max = (j+1)/GRID_SIZE_Y
select_y = select_x*(y > _min)*(y <= _max)
num_y = np.sum(select_y)
y_probs.append( num_y )
y_probs = np.cumsum(y_probs) / num_x
else:
y_probs = np.zeros([GRID_SIZE_Y])
y_probs_for_x.append(y_probs)
#Compute cumulative probabilities
x_probs = np.cumsum(x_probs) / tsne.shape[0]
print(x_probs)
#Create map from grid to distribution
grid_to_map = np.zeros([GRID_SIZE_X, GRID_SIZE_Y, 2])
for i in range(GRID_SIZE_X):
for j in range(GRID_SIZE_Y):
idx_x = next((idx for idx, p in enumerate(x_probs) if (i + 0.5)/GRID_SIZE_X <= p ))
idx_y = next((idx for idx, p in enumerate(y_probs_for_x[idx_x]) if (j + 0.5)/GRID_SIZE_Y <= p ))
grid_to_map[i, j, 0] = (idx_x+0.5)/GRID_SIZE_X
grid_to_map[i, j, 1] = (idx_y+0.5)/GRID_SIZE_Y
##Interpolate map locations at edges of cells
#lin_x = np.linspace(0.5, GRID_SIZE_X - 0.5, GRID_SIZE_X)
#lin_y = np.linspace(0.5, GRID_SIZE_Y - 0.5, GRID_SIZE_Y)
#f0 = interpolate.interp2d(x, y, z[:,:,0], kind='cubic')
#f1 = interpolate.interp2d(x, y, z[:,:,1], kind='cubic')
#lin_x = np.linspace(0.0, GRID_SIZE_X, GRID_SIZE_X+1)
#lin_y = np.linspace(0.0, GRID_SIZE_Y, GRID_SIZE_Y+1)
#full_grid_to_map_x = f0(lin_x, lin_y)
#full_grid_to_map_y = f1(lin_x, lin_y)
#grid_x = np.zeros(x.shape)
#grid_y = np.zeros(y.shape)
#for i in range(GRID_SIZE_X):
# for i in range(GRID_SIZE_Y):
# select = (x > full_grid_to_map_x[i])*(x <= full_grid_to_map_x[i+1]) * \
# (y > full_grid_to_map_y[i])*(y <= full_grid_to_map_y[i+1])
# #Distances from cell corners
# d_ll = np.sqrt( (x-full_grid_to_map_x[i])**2 + (y-full_grid_to_map_y[i])**2 )
# d_lu = np.sqrt( (x-full_grid_to_map_x[i])**2 + (y-full_grid_to_map_y[i+1])**2 )
# d_ul = np.sqrt( (x-full_grid_to_map_x[i+1])**2 + (y-full_grid_to_map_y[i])**2 )
# d_uu = np.sqrt( (x-full_grid_to_map_x[i+1])**2 + (y-full_grid_to_map_y[i+1])**2 )
# grid_x[select] =
# for _x, _y in zip(x[select], y[select]):
#Interpolate map locations at edges of cells
lin_x = np.linspace(0.5, GRID_SIZE_X - 0.5, GRID_SIZE_X) / GRID_SIZE_X
lin_y = np.linspace(0.5, GRID_SIZE_Y - 0.5, GRID_SIZE_Y) / GRID_SIZE_Y
xx, yy = np.meshgrid(lin_x, lin_y)
tsne = np.stack([x, y], axis=-1)
x = interpolate.griddata(grid_to_map.reshape(-1, 2), xx.reshape(-1), tsne, method='cubic')
y = interpolate.griddata(grid_to_map.reshape(-1, 2), yy.reshape(-1), tsne, method='cubic')
tsne = np.stack([x, y], axis=-1)
np.save(SAVE_FILE, tsne)
```
#### File: datasets/vaegan/create_visualization_files_pca.py
```python
import numpy as np
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
#from tsne import tsne
from bhtsne.bhtsne import run_bh_tsne
from bhtsne_unmodified.bhtsne import run_bh_tsne as run_bh_tsne_unmodified
import cv2
def preprocess(img):
try:
img[np.isnan(img)] = 0.
img[np.isinf(img)] = 0.
except:
img = np.zeros([96,96,1])
return img
def to_sobel(img):
g1 = cv2.Scharr(img, cv2.CV_32F, 0, 1)
g2 = cv2.Scharr(img, cv2.CV_32F, 1, 0)
x = np.sqrt(g1**2 + g2**2)
return x
def scale0to1(img):
"""Rescale image between 0 and 1"""
img = img.astype(np.float32)
if img.shape[-1] != 1:
img = np.sqrt(np.sum(img**2, axis=-1, keepdims=True))
min = np.min(img)
max = np.max(img)
if np.absolute(min-max) < 1.e-6:
img.fill(0.5)
else:
img = (img - min)/(max - min)
return img.astype(np.float32)
def batch_PCA(data, n_components):
data = np.stack([np.ndarray.flatten(x) for x in data])
pc = PCA(n_components=n_components).fit_transform(data)
print(pc.shape)
return pc
if __name__ == "__main__":
import matplotlib.pyplot as plt
SAVE_LOC = "Y:/HCSS6/Shared305/Microscopy/Jeffrey-Ede/models/visualize_data/"
DATASET_FILES = [
r"Y:\HCSS6\Shared305\Microscopy\Jeffrey-Ede\models\visualize_data\vaegan\1",
r"Y:\HCSS6\Shared305\Microscopy\Jeffrey-Ede\models\visualize_data\vaegan\4",
r"Y:\HCSS6\Shared305\Microscopy\Jeffrey-Ede\models\visualize_data\vaegan\2",
r"Y:\HCSS6\Shared305\Microscopy\Jeffrey-Ede\models\visualize_data\vaegan\3",
r"Y:\HCSS6\Shared305\Microscopy\Jeffrey-Ede\models\visualize_data\vaegan\5",
"//Desktop-sa1evjv/h/wavefunctions_96x96/wavefunctions_single_n=3.npy",
]
DATASET_NAMES = [
"pca_stem_crops_96x96",
"stem_downsampled_96x96",
"tem_downsampled_96x96",
"stem_crops_96x96_no_sobel",
"stem_crops_96x96_no_regul",
"wavefunctions_single_n=3",
]
SOBEL = False
if SOBEL:
DATASET_NAMES = [n+"_sobel" for n in DATASET_NAMES]
PCA_DIMENSIONS = [
50,
50,
50,
50,
50,
50,
]
IS_SCALE0TO1S = [
True,
True,
True,
True,
True,
True,
]
IS_CONSIDERING_ERRORS = True
for i, (dataset_file, dataset_name, pca_dimension, is_scale0to1) in enumerate(zip(
DATASET_FILES, DATASET_NAMES, PCA_DIMENSIONS, IS_SCALE0TO1S)):
if not i in [0]:
continue
embeddings_file = dataset_file + r"\vae_embeddings.npy"
embeddings = np.load(embeddings_file)
if IS_CONSIDERING_ERRORS:
sigma_file = dataset_file + r"\vae_errors.npy"
sigma = np.load(sigma_file)
dataset_name += "_we"
else:
sigma = np.array([])
perplexity = int(np.sqrt(embeddings.shape[0]))
if IS_CONSIDERING_ERRORS:
tsne_points = run_bh_tsne(embeddings, sigma, no_dims=2, perplexity=perplexity, theta=0.5, randseed=-1, verbose=True,
initial_dims=embeddings.shape[-1], use_pca=False, max_iter=10_000)
tsne_points = tsne_points[:tsne_points.shape[0]//2]
else:
tsne_points = run_bh_tsne_unmodified(embeddings, no_dims=2, perplexity=perplexity, theta=0.5, randseed=-1, verbose=True,
initial_dims=embeddings.shape[-1], use_pca=False, max_iter=10_000)
#tsne_points = TSNE(n_components=2, verbose=1, perplexity=perplexity, n_iter=10_000).fit_transform(embeddings)
#tsne_points = tsne(X=embeddings, no_dims=2, perplexity=perplexity, sigma=sigma, max_iter=10_000)
np.save(SAVE_LOC+"tsne_"+dataset_name+".npy", tsne_points)
```
|
{
"source": "Jeffrey-Ede/Denoising-Kernels-MLPs-Autoencoders",
"score": 2
}
|
#### File: Jeffrey-Ede/Denoising-Kernels-MLPs-Autoencoders/autoencoder_train-val-test.py
```python
from apply_autoencoders import Micrograph_Autoencoder
from scipy.misc import imread
from PIL import Image
import os
import numpy as np
cropsize = 20
ckpt_loc = 'G:/noise-removal-kernels-TEM+STEM/autoencoder/16/model/'
nn = Micrograph_Autoencoder(checkpoint_loc=ckpt_loc,
visible_cuda='1',
encoding_features=16)
data_loc1 = "G:/unaltered_TEM_crops-171x171/"
data_loc2 = "G:/unaltered_STEM_crops-171x171/"
save_loc0 = "G:/noise-removal-kernels-TEM+STEM/data/orig/"
save_loc = "G:/noise-removal-kernels-TEM+STEM/data/16/"
files = ([data_loc1+f for f in os.listdir(data_loc1)[:6077//2]] +
[data_loc1+f for f in os.listdir(data_loc2)[:(6077//2+6077%2)]])
num_files = len(files)
print("Num files: {}".format(num_files))
def disp(img):
cv2.namedWindow('CV_Window', cv2.WINDOW_NORMAL)
cv2.imshow('CV_Window', scale0to1(img))
cv2.waitKey(0)
return
counter = 1
for k, file in enumerate(files):
print("Train file {} of {}".format(k, num_files))
try:
img = imread(file, mode="F")
img = img[:160, :160]
nn_img = nn.denoise_crop(img)
c = np.min(img)
m = np.mean(img)-c
img = (img - c) / m
c = np.min(nn_img)
m = np.mean(nn_img)-c
nn_img = (nn_img - c) / m
if img.shape[0] >= cropsize and img.shape[1] >= cropsize:
#for i in range(0, img.shape[0]-cropsize+1, cropsize):
# for j in range(0, img.shape[1]-cropsize+1, cropsize):
i = np.random.randint(20, 160-20-20)
j = np.random.randint(20, 160-20-20)
Image.fromarray(nn_img[i:(i+cropsize), j:(j+cropsize)]).save( save_loc+str(counter)+".tif" )
Image.fromarray(img[i:(i+cropsize), j:(j+cropsize)]).save( save_loc0+str(counter)+".tif" )
counter += 1
except:
print('error')
if counter >= 2*6077:
break
```
#### File: Jeffrey-Ede/Denoising-Kernels-MLPs-Autoencoders/noise_removal_kernels.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
import argparse
import numpy as np
import tensorflow as tf
import cv2
from scipy.misc import imread
import time
import os, random
from PIL import Image
import functools
import itertools
import collections
import six
from tensorflow.python.platform import tf_logging as logging
from tensorflow.core.framework import node_def_pb2
from tensorflow.python.framework import device as pydev
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import session_run_hook
from tensorflow.python.training import training_util
from tensorflow.python.training import device_setter
from tensorflow.contrib.learn.python.learn import run_config
slim = tf.contrib.slim
tf.logging.set_verbosity(tf.logging.DEBUG)
data_dir1 = "G:/noise-removal-kernels-TEM+STEM/data/orig/"
data_dir2 = "G:/noise-removal-kernels-TEM+STEM/data/16/"
#data_dir = "E:/stills_hq-mini/"
modelSavePeriod = 4 #Train timestep in hours
modelSavePeriod *= 3600 #Convert to s
#model_dir = "G:/noise-removal-kernels-TEM/depth1/"
model_dir = "G:/noise-removal-kernels-TEM+STEM/results/1/"
shuffle_buffer_size = 5000
num_parallel_calls = 4
num_parallel_readers = 4
prefetch_buffer_size = 5
batch_size = 1
num_gpus = 1
#batch_size = 8 #Batch size to use during training
num_epochs = 1000000 #Dataset repeats indefinitely
logDir = "C:/dump/train/"
log_file = model_dir+"log.txt"
val_log_file = model_dir+"val_log.txt"
variables_file = model_dir+"variables.txt"
log_every = 1 #Log every _ examples
numMeans = 64 // batch_size
scaleMean = 4 #Each means array index increment corresponds to this increase in the mean
numDynamicGrad = 1 #Number of gradients to calculate for each possible mean when dynamically updating training
lossSmoothingBoxcarSize = 5
channels = 1 #Greyscale input image
#Sidelength of images to feed the neural network
cropsize = 20
generator_input_size = cropsize
height_crop = width_crop = cropsize
#hparams = experiment_hparams(train_batch_size=batch_size, eval_batch_size=16)
num_workers = 1
increase_batch_size_by_factor = 1
effective_batch_size = increase_batch_size_by_factor*batch_size
val_skip_n = 10
depths = [1]
widths = [3, 5, 7, 11, 15]
num_filters = len(depths)*len(widths)
def architectures(inputs, inputs_truth):
def pad(tensor, size):
d1_pad = size[0]
d2_pad = size[1]
paddings = tf.constant([[0, 0], [d1_pad, d1_pad], [d2_pad, d2_pad], [0, 0]], dtype=tf.int32)
padded = tf.pad(tensor, paddings, mode="REFLECT")
return padded
def make_layer(size, type):
if type == 'biases':
init = np.array([0.], dtype=np.float32)
if type == 'weights':
init = np.array([1./(size*size)], dtype=np.float32)
#print("Init: {}".format(init))
printij = False #Set to true to debug
if printij:
print("\nStart:")
variables = [[None for _ in range(size)] for _ in range(size)]
if printij:
for i in range(3):
for j in range(3):
if variables[i][j]:
print(i, j, variables[i][j].name)
else:
print(i,j)
print("\n")
#for i in range(size):
# for j in range(size):
# variables[i][j] = tf.get_variable('i-{}_j-{}'.format(i,j), dtype=tf.float32, initializer=init, trainable=True)
offset = size//2
for x in range(size//2+1):
for y in range(x+1):
with tf.variable_scope("var_x-{}_y-{}".format(x, y), reuse=False) as scope:
i, j = offset+x, offset+y
variables[i][j] = tf.get_variable('v', dtype=tf.float32, initializer=init, trainable=True)
if printij:
print(i,j,x,y,variables[i][j].name)
for i in range(3):
for j in range(3):
if variables[i][j]:
print(i, j, variables[i][j].name)
else:
print(i,j)
print("\n")
if x > 0:
if y == 0:
i, j = offset-x, offset
scope.reuse_variables()
variables[i][j] = tf.get_variable(name='v')
if printij:
print(i,j,x,y,variables[i][j].name)
for i in range(3):
for j in range(3):
if variables[i][j]:
print(i, j, variables[i][j].name)
else:
print(i,j)
print("\n")
i, j = offset, offset+x
scope.reuse_variables()
variables[i][j] = tf.get_variable(name='v')
if printij:
print(i,j,x,y,variables[i][j].name)
for i in range(3):
for j in range(3):
if variables[i][j]:
print(i, j, variables[i][j].name)
else:
print(i,j)
print("\n")
i, j = offset, offset-x
scope.reuse_variables()
variables[i][j] = tf.get_variable(name='v')
if printij:
print(i,j,x,y,variables[i][j].name)
for i in range(3):
for j in range(3):
if variables[i][j]:
print(i, j, variables[i][j].name)
else:
print(i,j)
print("\n")
elif y == x:
i, j = offset+x, offset-y
scope.reuse_variables()
variables[i][j] = tf.get_variable(name='v')
if printij:
print(i,j,x,y,variables[i][j].name)
for i in range(3):
for j in range(3):
if variables[i][j]:
print(i, j, variables[i][j].name)
else:
print(i,j)
print("\n")
i, j = offset-x, offset+y
scope.reuse_variables()
variables[i][j] = tf.get_variable(name='v')
if printij:
print(i,j,x,y,variables[i][j].name)
for i in range(3):
for j in range(3):
if variables[i][j]:
print(i, j, variables[i][j].name)
else:
print(i,j)
print("\n")
i, j = offset-x, offset-y
scope.reuse_variables()
variables[i][j] = tf.get_variable(name='v')
if printij:
print(i,j,x,y,variables[i][j].name)
for i in range(3):
for j in range(3):
if variables[i][j]:
print(i, j, variables[i][j].name)
else:
print(i,j)
print("\n")
elif y != x:
i, j = offset-x, offset+y
scope.reuse_variables()
variables[i][j] = tf.get_variable(name='v')
if printij:
print(i,j,x,y,variables[i][j].name)
for i in range(3):
for j in range(3):
if variables[i][j]:
print(i, j, variables[i][j].name)
else:
print(i,j)
print("\n")
i, j = offset+x, offset-y
scope.reuse_variables()
variables[i][j] = tf.get_variable(name='v')
if printij:
print(i,j,x,y,variables[i][j].name)
for i in range(3):
for j in range(3):
if variables[i][j]:
print(i, j, variables[i][j].name)
else:
print(i,j)
print("\n")
i, j = offset-x, offset-y
scope.reuse_variables()
variables[i][j] = tf.get_variable(name='v')
if printij:
print(i,j,x,y,variables[i][j].name)
for i in range(3):
for j in range(3):
if variables[i][j]:
print(i, j, variables[i][j].name)
else:
print(i,j)
print("\n")
i, j = offset+y, offset+x
scope.reuse_variables()
variables[i][j] = tf.get_variable(name='v')
if printij:
print(i,j,x,y,variables[i][j].name)
for i in range(3):
for j in range(3):
if variables[i][j]:
print(i, j, variables[i][j].name)
else:
print(i,j)
print("\n")
i, j = offset-y, offset+x
scope.reuse_variables()
variables[i][j] = tf.get_variable(name='v')
if printij:
print(i,j,x,y,variables[i][j].name)
for i in range(3):
for j in range(3):
if variables[i][j]:
print(i, j, variables[i][j].name)
else:
print(i,j)
print("\n")
i, j = offset+y, offset-x
scope.reuse_variables()
variables[i][j] = tf.get_variable(name='v')
if printij:
print(i,j,x,y,variables[i][j].name)
for i in range(3):
for j in range(3):
if variables[i][j]:
print(i, j, variables[i][j].name)
else:
print(i,j)
print("\n")
i, j = offset-y, offset-x
scope.reuse_variables()
variables[i][j] = tf.get_variable(name='v')
if printij:
print(i,j,x,y,variables[i][j].name)
for i in range(3):
for j in range(3):
if variables[i][j]:
print(i, j, variables[i][j].name)
else:
print(i,j)
print("\n")
#print(len(tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="depth-1_size-3/var_x-{}_y-{}".format(x, y))))
#print(variables)
if printij:
for i in range(3):
for j in range(3):
print(i, j, variables[i][j].name)
concats = []
for i in range(size):
concats.append(tf.concat(variables[i][:], axis=0))
kernel = tf.stack(concats, axis=1)
kernel = tf.expand_dims(kernel, axis=0)
kernel = tf.expand_dims(kernel, axis=3)
#kernel = tf.reshape(kernel, [-1, size, size, 1])
#print(kernel)
return kernel
#depths = [1]
#widths = [3]
#depths = [i for i in range(1, 6)]
#widths = [3, 5, 7, 9, 13, 17]
filters = []
filter_scopes = []
filter_depths = []
filter_widths = []
outputs = []
losses = []
ps = []
for depth in depths:
print("Depth: {}".format(depth))
for width in widths:
print("Width: {}".format(width))
default_scope = "depth-{}_size-{}".format(depth, width)
#Filter creation
def filter_fn(input):
with tf.variable_scope('w0'):
filter = make_layer(width, 'weights')*input
for i in range(1, depth):
with tf.variable_scope('b'+str(i)):
filter += make_layer(width, 'biases')
filter = tf.sigmoid(filter)
filter = tf.contrib.layers.fully_connected(
inputs=filter,
num_outputs=1,
activation_fn=None,
weights_initializer=None,
biases_initializer=None)
with tf.variable_scope('w'+str(i)):
filter = make_layer(width, 'weights')*filter
output = tf.reduce_sum(tf.reduce_sum(tf.reduce_sum(filter, axis=1), axis=1), axis=1)
return output
filters.append(filter_fn)
filter_scopes.append(default_scope)
filter_depths.append(depth)
filter_widths.append(width)
padded = pad(inputs, (0, 0))
#Generate outputs
output = [[None for _ in range(cropsize-width+1)] for _ in range(cropsize-width+1)]
for x in range(cropsize-width+1):
for y in range(cropsize-width+1):
if not x+y:
with tf.variable_scope(default_scope) as filter_scope:
_x = x+width
_y = y+width
output[x][y] = filter_fn(padded[:, x:_x, y:_y, :])
else:
with tf.variable_scope(filter_scope, reuse=True) as filter_scope:
_x = x+width
_y = y+width
output[x][y] = filter_fn(padded[:, x:_x, y:_y, :])
#print(len(tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="depth-1_size-3")))
concats = []
for i in range(cropsize-width+1):
concats.append(tf.stack(output[i][:], axis=1))
output = tf.stack(concats, axis=2)
output = tf.expand_dims(output, 3)
padded_truth = pad(inputs_truth, (0, 0))
p = padded_truth[:, (width//2):(cropsize-width//2), (width//2):(cropsize-width//2), :]
loss = tf.losses.mean_squared_error(output, p)#tf.reduce_mean(tf.abs(output-p))
loss = tf.cond(loss > 1., lambda: tf.sqrt(loss), lambda: loss)
outputs.append(output)
losses.append(loss)
return filters, filter_scopes, filter_depths, filter_widths, outputs, losses, p, padded_truth
def experiment(img, img_truth, learning_rate_ph):
filters, filter_scopes, filter_depths, filter_widths, \
outputs, losses, truth, padded_truth = architectures(img, img_truth)
train_ops = []
for i in range(len(losses)):
optimizer = tf.train.AdamOptimizer(learning_rate_ph[0], beta1 = 0.5)
train_op = optimizer.minimize(losses[i])
train_ops.append(train_op)
return {'filters': filters, 'filter_scopes': filter_scopes, 'filter_depths': filter_depths,
'filter_widths': filter_widths, 'outputs': outputs, 'train_ops': train_ops,
'losses': losses, 'truth': [truth], 'padded_truth': [padded_truth]}
def flip_rotate(img):
"""Applies a random flip || rotation to the image, possibly leaving it unchanged"""
choice = 0#np.random.randint(0, 8)
if choice == 0:
return img
if choice == 1:
return np.rot90(img, 1)
if choice == 2:
return np.rot90(img, 2)
if choice == 3:
return np.rot90(img, 3)
if choice == 4:
return np.flip(img, 0)
if choice == 5:
return np.flip(img, 1)
if choice == 6:
return np.flip(np.rot90(img, 1), 0)
if choice == 7:
return np.flip(np.rot90(img, 1), 1)
def load_image(addr, resize_size=None, img_type=np.float32):
"""Read an image and make sure it is of the correct type. Optionally resize it"""
try:
img = imread(addr, mode='F')
#x = np.random.randint(0, img.shape[0]-cropsize)
#y = np.random.randint(0, img.shape[1]-cropsize)
#img = img[x:(x+cropsize),y:(y+cropsize)]
except:
img = 0.5*np.ones((cropsize,cropsize))
print("Image read failed")
return img.astype(img_type)
def scale0to1(img):
"""Rescale image between 0 and 1"""
min = np.min(img)
max = np.max(img)
if min == max:
img.fill(0.5)
else:
img = (img-min) / (max-min)
return img.astype(np.float32)
def norm_img(img):
min = np.min(img)
max = np.max(img)
if min == max:
img.fill(0.)
else:
a = 0.5*(min+max)
b = 0.5*(max-min)
img = (img-a) / b
return img.astype(np.float32)
def preprocess(img):
img[np.isnan(img)] = 0.
img[np.isinf(img)] = 0.
img = scale0to1(img)
img /= np.mean(img)
return img.astype(np.float32)
def record_parser(record1, record2):
#print(record1, record2)
img1 = load_image(record1)
img2 = load_image(record2)
img1 = img1[:cropsize,:cropsize]
img2 = img2[:cropsize,:cropsize]
if (np.sum(np.isfinite(img1)) != cropsize**2) or (np.sum(np.isfinite(img2)) != cropsize**2):
img1 = 0.5*np.ones((cropsize, cropsize), dtype=np.float32)
img2 = 0.5*np.ones((cropsize, cropsize), dtype=np.float32)
return img1, img2
def reshaper(img1, img2):
img1 = tf.reshape(img1, [cropsize, cropsize, channels])
img2 = tf.reshape(img2, [cropsize, cropsize, channels])
return img1, img2
def input_fn(dir, subset, batch_size):
"""Create a dataset from a list of filenames and shard batches from it"""
with tf.device('/cpu:0'):
dataset1 = tf.data.Dataset.list_files(data_dir1+"*.tif", shuffle=False) #dir+subset+"/"+"*.tif"
#dataset1 = dataset1.take(6076)
dataset2 = tf.data.Dataset.list_files(data_dir2+"*.tif", shuffle=False) #dir+subset+"/"+"*.tif"
#dataset2 = dataset2.take(6076)
dataset = tf.data.Dataset.zip((dataset1, dataset2))
#dataset1_1 = tf.data.Dataset.list_files(data_dir1+"*.tif", shuffle=False)
#dataset1_2 = tf.data.Dataset.list_files(data_dir1+"*.tif", shuffle=False)
#dataset2_1 = tf.data.Dataset.list_files(data_dir2+"*.tif", shuffle=False)
#dataset2_2 = tf.data.Dataset.list_files(data_dir2+"*.tif", shuffle=False)
#dataset1 = tf.data.Dataset.zip((dataset1_1, dataset1_2))
#dataset2 = tf.data.Dataset.zip((dataset2_1, dataset2_2))
#dataset = dataset1.concatenate(dataset2)
dataset = dataset.shuffle(buffer_size=shuffle_buffer_size)
dataset = dataset.repeat(num_epochs)
dataset = dataset.map(
lambda file1, file2: tf.py_func(record_parser, [file1, file2], [tf.float32, tf.float32]),
num_parallel_calls=num_parallel_calls)
dataset = dataset.map(reshaper, num_parallel_calls=num_parallel_calls)
dataset = dataset.batch(batch_size=batch_size)
dataset = dataset.prefetch(buffer_size=prefetch_buffer_size)
iter = dataset.make_one_shot_iterator()
img1_batch, img2_batch = iter.get_next()
return img1_batch, img2_batch
def disp(img):
cv2.namedWindow('CV_Window', cv2.WINDOW_NORMAL)
cv2.imshow('CV_Window', scale0to1(img))
cv2.waitKey(0)
return
class RunConfig(tf.contrib.learn.RunConfig):
def uid(self, whitelist=None):
"""
Generates a 'Unique Identifier' based on all internal fields.
Caller should use the uid string to check `RunConfig` instance integrity
in one session use, but should not rely on the implementation details, which
is subject to change.
Args:
whitelist: A list of the string names of the properties uid should not
include. If `None`, defaults to `_DEFAULT_UID_WHITE_LIST`, which
includes most properties user allowes to change.
Returns:
A uid string.
"""
if whitelist is None:
whitelist = run_config._DEFAULT_UID_WHITE_LIST
state = {k: v for k, v in self.__dict__.items() if not k.startswith('__')}
# Pop out the keys in whitelist.
for k in whitelist:
state.pop('_' + k, None)
ordered_state = collections.OrderedDict(
sorted(state.items(), key=lambda t: t[0]))
# For class instance without __repr__, some special cares are required.
# Otherwise, the object address will be used.
if '_cluster_spec' in ordered_state:
ordered_state['_cluster_spec'] = collections.OrderedDict(
sorted(ordered_state['_cluster_spec'].as_dict().items(), key=lambda t: t[0]))
return ', '.join(
'%s=%r' % (k, v) for (k, v) in six.iteritems(ordered_state))
def main():
print("Initializing")
tf.reset_default_graph()
temp = set(tf.all_variables())
with open(log_file, 'a') as log:
log.flush()
with open(val_log_file, 'a') as val_log:
val_log.flush()
# The env variable is on deprecation path, default is set to off.
#os.environ['TF_SYNC_ON_FINISH'] = '0'
#os.environ['TF_ENABLE_WINOGRAD_NONFUSED'] = '1'
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) #For batch normalisation windows
with tf.control_dependencies(update_ops):
# Session configuration.
log_device_placement = False #Once placement is correct, this fills up too much of the cmd window...
sess_config = tf.ConfigProto(
allow_soft_placement=True,
log_device_placement=log_device_placement,
intra_op_parallelism_threads=1,
gpu_options=tf.GPUOptions(force_gpu_compatible=True))
config = RunConfig(session_config=sess_config, model_dir=model_dir)
img, img_truth = input_fn(data_dir1, '', batch_size=batch_size)
#img_truth = input_fn(data_dir1, '', batch_size=batch_size)
#img_val = input_fn(data_dir, 'val', batch_size=batch_size)
with tf.Session(config=sess_config) as sess:
sess.run(tf.initialize_variables(set(tf.all_variables()) - temp))
temp = set(tf.all_variables())
__img, __img_truth = sess.run([img, img_truth])
#disp(__img[0])
#disp(__img_truth[0])
img_ph = [tf.placeholder(tf.float32, shape=i.shape, name='img')
for i in __img]
img_truth_ph = [tf.placeholder(tf.float32, shape=i.shape, name='img_truth')
for i in __img_truth]
del __img, __img_truth
learning_rate_ph = tf.placeholder(tf.float32, name='learning_rate')
exp_dict = experiment(img_ph, img_truth_ph, learning_rate_ph)
#assign_ops = []
#for i in range(num_filters):
# vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, exp_dict['filter_scopes'][i])
# sum = vars[0]+4.*tf.reduce_sum(vars[1:])
# factor = 1./sum
# assign_ops += [v.assign(v*factor) for v in vars]
#########################################################################################
sess.run( tf.initialize_variables(set(tf.all_variables()) - temp) )
train_writer = tf.summary.FileWriter( logDir, sess.graph )
saver = tf.train.Saver()
#saver.restore(sess, tf.train.latest_checkpoint(model_dir+"model/"))
#with open(variables_file, 'a') as variables:
# variables.flush()
# for i in range(num_filters):
# vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, exp_dict['filter_scopes'][i])
# v = sess.run(vars)
# variables.write(str(v))
# variables.write("\r\n")
#quit()
counter = 0
save_counter = counter
counter_init = counter+1
print("Session started")
while counter < 10000:
counter += 1
lr = np.array([.01*(1.-counter/10001)])
base_dict = {learning_rate_ph: lr}
_img, _img_truth = sess.run([img, img_truth])
#disp(_img[0])
#disp(_img_truth[0])
feed_dict = base_dict.copy()
feed_dict.update({ph: img for ph, img in zip(img_ph, _img)})
feed_dict.update({ph: img.reshape((cropsize, cropsize)).T.reshape(
(cropsize, cropsize, 1)) for ph, img in zip(img_truth_ph, _img_truth)})
results = sess.run( exp_dict['train_ops']+exp_dict['losses'], feed_dict=feed_dict )
losses = results[num_filters:]
print("Iter: {}, Losses: {}".format(counter, losses))
try:
log.write("Iter: {}, {}".format(counter, losses))
except:
print("Write to discr pred file failed")
#if not counter % val_skip_n:
# _img = sess.run(img_val)
# feed_dict = base_dict.copy()
# feed_dict.update({ph: img for ph, img in zip(img_ph, _img)})
# losses = sess.run( exp_dict['losses'], feed_dict=feed_dict )
# print("Iter: {}, Val losses: {}".format(counter, losses))
# try:
# val_log.write("Iter: {}, {}".format(counter, losses))
# except:
# print("Write to val log file failed")
if counter > 50:
#sess.run(assign_ops)
vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, exp_dict['filter_scopes'][0])
vs = sess.run(vars)
print(vs)
#filtered_img, truth, padded_truth = sess.run([exp_dict['outputs'],
# exp_dict['truth'],
# exp_dict['padded_truth']],
# feed_dict=feed_dict)
#disp(_img[0])
##disp(_img_truth[0])
#disp(filtered_img[0][0].reshape((cropsize-2,cropsize-2)).T)
#disp(truth[0][0].reshape((cropsize-2,cropsize-2)).T)
#disp(padded_truth[0][0].reshape((cropsize,cropsize)).T)
# os.system("pause")
#Save the model
if not counter % 5000:
saver.save(sess, save_path=model_dir+"model/", global_step=counter)
#Save the model
saver.save(sess, save_path=model_dir+"model/", global_step=counter)
with open(variables_file, 'w') as variables:
variables.flush()
for i in range(num_filters):
vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, exp_dict['filter_scopes'][i])
names = [v.name for v in vars]
print(names)
for n in names:
variables.write(n)
variables.write("\r\n")
v = sess.run(vars)
names = sess.run(names)
variables.write(str(v))
variables.write("\r\n")
return
if __name__ == '__main__':
main()
```
|
{
"source": "Jeffrey-Ede/Electron-Micrograph-Denoiser",
"score": 2
}
|
#### File: Electron-Micrograph-Denoiser/misc/err_hist_maker.py
```python
import numpy as np
import matplotlib as mpl
#mpl.use('pdf')
import matplotlib.pyplot as plt
plt.rcParams["font.family"] = "Times New Roman"
mpl.rcParams['xtick.direction'] = 'in'
mpl.rcParams['ytick.direction'] = 'in'
fontsize = 10
mpl.rcParams['axes.labelsize'] = fontsize
mpl.rcParams['xtick.labelsize'] = fontsize
mpl.rcParams['ytick.labelsize'] = fontsize
mpl.rcParams['legend.fontsize'] = fontsize
mpl.rcParams['axes.titlepad'] = 10
mpl.rcParams['savefig.dpi'] = 600
import matplotlib.mlab as mlab
import scipy.stats as stats
# width as measured in inkscape
scale = 1.0
ratio = 1.3 # 1.618
width = scale * 2.2 * 3.487
height = 2.2*(width / ratio) / 2.2
num_data_to_use = 20000
num_hist_bins = 200
mse_x_to = 0.012
labels = ["Unfiltered", "Gaussian", "Bilateral", "Median", "Wiener",
"Wavelet", "Chambolle TV", "Bregman TV", "NL Means", "Neural Network"]
num = len(labels)
data = np.load('//flexo.ads.warwick.ac.uk/Shared41/Microscopy/Jeffrey-Ede/train-losses.npy')
data2 = np.load('//flexo.ads.warwick.ac.uk/Shared41/Microscopy/Jeffrey-Ede/test-losses-ssim3.npy')
data_nn = np.load('//flexo.ads.warwick.ac.uk/Shared41/Microscopy/Jeffrey-Ede/test-losses-ssim-nn.npy')
data_wiener = np.load('//flexo.ads.warwick.ac.uk/Shared41/Microscopy/Jeffrey-Ede/test-losses-ssim-nn-wiener.npy')
codes = [(num, 2, x+1) for x in range(2*num)]
data_general = np.load(r'//flexo.ads.warwick.ac.uk/Shared41/Microscopy/Jeffrey-Ede/denoiser-13-general-stats/test-losses-ssim-nn.npy')
datasets = []
datasets_general = []
means = []
means_general = []
for comp_idx in range(2):
for metric_idx in range(7):
if metric_idx != 4:
dataset = data[:num_data_to_use,metric_idx,comp_idx]
else:
dataset = data_wiener[:num_data_to_use,0,comp_idx]
mean = np.mean(dataset[np.isfinite(dataset)])
dataset[np.logical_not(np.isfinite(dataset))] = mean
if comp_idx == 0:
dataset[dataset > mse_x_to] = mse_x_to
elif comp_idx == 1:
dataset = dataset.clip(0.,1.)
means.append(mean)
datasets.append(dataset)
for comp_idx in range(2):
for metric_idx in range(2):
dataset = data2[:num_data_to_use,metric_idx,comp_idx]
mean = np.mean(dataset[np.isfinite(dataset)])
dataset[np.logical_not(np.isfinite(dataset))] = mean
if comp_idx == 0:
dataset[dataset > mse_x_to] = mse_x_to
elif comp_idx == 1:
dataset = dataset.clip(0.,1.)
means.append(mean)
datasets.append(dataset)
for comp_idx in range(2):
for metric_idx in range(1):
dataset = data_nn[:num_data_to_use,metric_idx,comp_idx]
mean = np.mean(dataset[np.isfinite(dataset)])
dataset[np.logical_not(np.isfinite(dataset))] = mean
if comp_idx == 0:
dataset[dataset > mse_x_to] = mse_x_to
elif comp_idx == 1:
dataset = dataset.clip(0.,1.)
means.append(mean)
datasets.append(dataset)
for comp_idx in range(2):
for metric_idx in range(10):
dataset = data_general[:num_data_to_use,metric_idx,comp_idx]
mean = np.mean(dataset[np.isfinite(dataset)])
dataset[np.logical_not(np.isfinite(dataset))] = mean
if comp_idx == 0:
dataset[dataset > mse_x_to] = mse_x_to
elif comp_idx == 1:
dataset = dataset.clip(0.,1.)
means_general.append(mean)
datasets_general.append(dataset)
#Rearrange positions of data
data_tmp = datasets_general[8]
datasets_general[8] = datasets_general[7]
datasets_general[7] = data_tmp
data_tmp = datasets_general[16]
datasets_general[16] = datasets_general[17]
datasets_general[17] = data_tmp
del data_tmp
mean_tmp = means_general[8]
means_general[8] = means_general[7]
means_general[7] = mean_tmp
mean_tmp = means_general[16]
means_general[16] = means_general[17]
means_general[17] = mean_tmp
del mean_tmp
datasets = (datasets[:7] + datasets[14:16] + datasets[18:19] +
datasets[7:14] +datasets[16:18] + datasets[19:20])
datasets.extend(datasets_general)
means.extend(means_general)
f, big_axes = plt.subplots( figsize=(15.0, 15.0),nrows=2, ncols=1, sharey=True)
titles = ["Low Dose, << 300 counts ppx", "Ordinary Dose, 200-2500 counts ppx"]
for row, big_ax in enumerate(big_axes):
big_ax.set_title(titles[row], fontsize=fontsize)
# Turn off axis lines and ticks of the big subplot
# obs alpha is 0 in RGBA string!
big_ax.tick_params(labelcolor=(1.,1.,1., 0.0), top='off', bottom='off', left='off', right='off')
# removes the white frame
big_ax._frameon = False
#f.set_facecolor('w')
print(np.min(datasets[12]), np.max(datasets[12]))
print(np.min(datasets[13]), np.max(datasets[13]))
print(np.min(datasets[14]), np.max(datasets[14]))
print(np.min(datasets[15]), np.max(datasets[15]))
print(np.min(datasets[16]), np.max(datasets[16]))
print(np.min(datasets[17]), np.max(datasets[17]))
def subplot_creator(loc, data):
plt.subplot(loc[0], loc[1], loc[2])
# the histogram of the data
n, bins, patches = plt.hist(data, 30, normed=1, facecolor='grey', edgecolor='black', alpha=0.75, linewidth=1)
# add a 'best fit' line
#y = mlab.normpdf( bins, mu, sigma)
#l = plt.plot(bins, y, 'r--', linewidth=1)
#plt.xlabel('Smarts')
#plt.ylabel('Probability')
#plt.title(r'$\mathrm{Histogram\ of\ IQ:}\ \mu=100,\ \sigma=15$')
#plt.axis([40, 160, 0, 0.03])
#plt.grid(True)
plt.rc('font', family='serif', serif='Times')
plt.rc('text', usetex=False)
plt.rc('xtick', labelsize=8)
plt.rc('ytick', labelsize=8)
plt.rc('axes', labelsize=8)
bins_set = []
density_set = []
for i in range(len(datasets)):
density_set.append(stats.gaussian_kde(datasets[i]))
n, bins, patches = plt.hist(np.asarray(datasets[i]).T, num_hist_bins, normed=1, histtype='step')
bins_set.append(bins)
#plt.clf()
integs = []
maxs = [0., 0., 0., 0.]
for i in range(num):
dens = density_set[i](bins_set[i])
dens = [den for _, den in sorted(zip(bins_set[i], dens))]
bins = sorted(bins_set[i])
integ = np.trapz(dens, bins)
max = np.max(dens/integ)
if max > maxs[0]:
maxs[0] = max
integs.append(integ)
for i in range(num, 2*num):
dens = density_set[i](bins_set[i])
dens = [den for _, den in sorted(zip(bins_set[i], dens))]
bins = sorted(bins_set[i])
integ = np.trapz(dens, bins)
max = np.max(dens/integ)
if max > maxs[1]:
maxs[1] = max
integs.append(integ)
for i in range(2*num, 3*num):
dens = density_set[i](bins_set[i])
dens = [den for _, den in sorted(zip(bins_set[i], dens))]
bins = sorted(bins_set[i])
integ = np.trapz(dens, bins)
max = np.max(dens/integ)
if max > maxs[2]:
maxs[2] = max
integs.append(integ)
for i in range(3*num, 4*num):
dens = density_set[i](bins_set[i])
dens = [den for _, den in sorted(zip(bins_set[i], dens))]
bins = sorted(bins_set[i])
integ = np.trapz(dens, bins)
max = np.max(dens/integ)
if max > maxs[3]:
maxs[3] = max
integs.append(integ)
print("Maxs: ", maxs)
ax = f.add_subplot(2,2,1)
for i in range(num):
dens = density_set[i](bins_set[i])
dens /= integs[i]
print(np.sum(dens))
dens /= maxs[0]
#bins_to_use = bins_set[i] < 0.006
#bins_not_to_use = np.logical_not(bins_to_use)
#bins = np.append(bins_set[i][bins_to_use], 0.008)
#dens = np.append(dens[bins_to_use], np.sum(dens[bins_not_to_use]))
select = bins_set[i] < 0.0045
lw = 1 if not i%num == num-1 else 2
ls = '--' if not i%num else '-'
plt.plot(bins_set[i][select], dens[select], linewidth=lw, label=labels[i],linestyle=ls)
plt.xlabel('Mean Squared Error')
plt.ylabel('Relative PDF')
plt.minorticks_on()
ax.xaxis.set_ticks_position('both')
ax.yaxis.set_ticks_position('both')
#ax.grid()
#plt.rc('font', family='serif', serif=['Times'])
#plt.rc('text', usetex=False)
#plt.rc('xtick', labelsize=8)
#plt.rc('ytick', labelsize=8)
#plt.rc('axes', labelsize=8)
plt.legend(loc='upper right', frameon=False)
ax = f.add_subplot(2,2,2)
for i in range(num, 2*num):
dens = density_set[i](bins_set[i])
dens /= integs[i]
print(np.sum(dens))
print(1. / maxs[1])
dens /= maxs[1]
lw = 1 if not i%num == num-1 else 2
ls = '--' if not i%num else '-'
plt.plot(bins_set[i], dens, linewidth=lw, linestyle=ls)
plt.xlabel('Structural Similarity Index')
plt.ylabel('Relative PDF')
plt.minorticks_on()
ax.xaxis.set_ticks_position('both')
ax.yaxis.set_ticks_position('both')
#ax.grid()
plt.tick_params()
##plt.rc('font', family='serif', serif=['Times'])
#plt.rc('text', usetex=False)
#plt.rc('xtick', labelsize=8)
#plt.rc('ytick', labelsize=8)
#plt.rc('axes', labelsize=8)
ax = f.add_subplot(2,2,3)
for i in range(2*num, 3*num):
dens = density_set[i](bins_set[i])
dens /= integs[i]
print(np.sum(dens))
print(1. / maxs[2])
dens /= maxs[2]
select = bins_set[i] < 0.0012
lw = 1 if not i%num == num-1 else 2
ls = '--' if not i%num else '-'
plt.plot(bins_set[i][select], dens[select], linewidth=lw,linestyle=ls)
plt.xlabel('Mean Squared Error')
plt.ylabel('Relative PDF')
plt.minorticks_on()
ax.xaxis.set_ticks_position('both')
ax.yaxis.set_ticks_position('both')
#ax.grid()
plt.tick_params()
ax = f.add_subplot(2,2,4)
for i in range(3*num, 4*num):
dens = density_set[i](bins_set[i])
dens /= integs[i]
print(np.sum(dens))
print(1. / maxs[3])
dens /= maxs[3]
lw = 1 if not i%num == num-1 else 2
ls = '--' if not i%num else '-'
plt.plot(bins_set[i], dens, linewidth=lw,linestyle=ls)
plt.xlabel('Structural Similarity Index')
plt.ylabel('Relative PDF')
plt.minorticks_on()
ax.xaxis.set_ticks_position('both')
ax.yaxis.set_ticks_position('both')
#ax.grid()
plt.tick_params()
#plt.show()
#for code, data in zip(codes, datasets):
# subplot_creator(code, data)
f.subplots_adjust(wspace=0.18, hspace=0.26)
f.subplots_adjust(left=.00, bottom=.00, right=1., top=1.)
#ax.set_ylabel('Some Metric (in unit)')
#ax.set_xlabel('Something (in unit)')
#ax.set_xlim(0, 3*np.pi)
f.set_size_inches(width, height)
#plt.show()
f.savefig('plot.png', bbox_inches='tight', )
```
|
{
"source": "jeffreyegan/VSB_Power_Line_Fault_Detection",
"score": 3
}
|
#### File: jeffreyegan/VSB_Power_Line_Fault_Detection/vsb_make_submission.py
```python
import os
import time
import numpy as np
import pandas as pd
import lightgbm as lgb
from datetime import datetime
from matplotlib import pyplot as plt
def split_data(features, labels, random_state_value=1):
from sklearn.model_selection import train_test_split
# Using standard split of 80-20 training to testing data split ratio and fixing random_state=1 for repeatability
x_train, x_test, y_train, y_test = train_test_split(features, labels, train_size=0.85, test_size=0.15, random_state=random_state_value)
return x_train, x_test, y_train, y_test
def classification_light_gbm_model(df_train):
print('Loading data...')
#features = ["entropy", "n5", "n25", "n75", "n95", "median", "mean", "std", "var", "rms", "no_zero_crossings", "no_mean_crossings", "min_height", "max_height", "mean_height", "min_width", "max_width", "mean_width", "num_detect_peak", "num_true_peaks"]
features = ["entropy", "n5", "n25", "n75", "n95", "median", "mean", "std", "var", "rms", "no_zero_crossings", "no_mean_crossings", "min_height", "max_height", "mean_height", "min_width", "max_width", "mean_width", "num_detect_peak", "num_true_peaks", "hi_count", "lo_count", "low_high_ratio", "hi_true", "lo_true", "low_high_ratio_true"]
features = ["entropy", "n5", "n25", "n75", "n95", "median", "mean", "std", "var", "rms", "no_zero_crossings", "no_mean_crossings", "min_height", "max_height", "mean_height", "min_width", "max_width", "mean_width", "num_detect_peak", "num_true_peaks", "low_high_ratio", "hi_true", "lo_true", "low_high_ratio_true"]
target = ["fault"]
x_train, x_test, y_train, y_test = split_data(df_train[features], df_train[target], 189) # Split Data
print("preparing validation datasets")
xgdata = lgb.Dataset(df_train[features], df_train[target])
xgtrain = lgb.Dataset(x_train, y_train)
xgtest = lgb.Dataset(x_test, y_test)
evals_results = {}
metrics = 'binary_logloss'
lgb_params = {
'objective': 'binary',
'metric': metrics,
'learning_rate': 0.025,
#'is_unbalance': 'true', #because training data is unbalance (replaced with scale_pos_weight)
'num_leaves': 31, # we should let it be smaller than 2^(max_depth)
'max_depth': -1, # -1 means no limit
'min_child_samples': 20, # Minimum number of data need in a child(min_data_in_leaf)
'max_bin': 255, # Number of bucketed bin for feature values
'subsample': 0.7, # Subsample ratio of the training instance.
'subsample_freq': 0, # frequence of subsample, <=0 means no enable
'colsample_bytree': 0.3, # Subsample ratio of columns when constructing each tree.
'min_child_weight': 5, # Minimum sum of instance weight(hessian) needed in a child(leaf)
'subsample_for_bin': 200000, # Number of samples for constructing bin
'min_split_gain': 0, # lambda_l1, lambda_l2 and min_gain_to_split to regularization
'reg_alpha': 0, # L1 regularization term on weights
'reg_lambda': 0, # L2 regularization term on weights
'nthread': 4,
'verbose': 0,
'scale_pos_weight':99, # because training data is extremely unbalanced
'boosting_type': 'gbdt',
'boost_from_average': False
}
print("Training...")
classifier = lgb.train(lgb_params,
xgtrain,
valid_sets=[xgtrain, xgtest],
valid_names=['train','test'],
evals_result=evals_results,
num_boost_round=1000,
early_stopping_rounds=50,
verbose_eval=True,
feval=None)
n_estimators = classifier.best_iteration
y_pred_probs = classifier.predict(x_test, n_estimators)
blues = ["#66D7EB", "#51ACC5", "#3E849E", "#2C5F78", "#1C3D52", "#0E1E2B"]
fig=plt.figure(figsize=(14, 8), dpi= 120, facecolor='w', edgecolor='k')
plt.hist(y_pred_probs, bins=100, color=blues[1])
plt.ylabel("Occurrences")
plt.xlabel("Probability of Assigning Fault")
plt.savefig("plots/fault_probs_lgbm_submit.png", bbox_inches='tight')
print("\nModel Report")
print("n_estimators : ", n_estimators)
print(metrics+":", evals_results['test'][metrics][n_estimators-1])
return classifier
def predict_light_gbm_model(classifier, df_test, threshold):
print('Predicting...')
test_features = ["entropy", "n5", "n25", "n75", "n95", "median", "mean", "std", "var", "rms", "no_zero_crossings", "no_mean_crossings", "min_height", "max_height", "mean_height", "min_width", "max_width", "mean_width", "num_detect_peak", "num_true_peaks", "hi_count", "lo_count", "low_high_ratio", "hi_true", "lo_true", "low_high_ratio_true"]
test_features = ["entropy", "n5", "n25", "n75", "n95", "median", "mean", "std", "var", "rms", "no_zero_crossings", "no_mean_crossings", "min_height", "max_height", "mean_height", "min_width", "max_width", "mean_width", "num_detect_peak", "num_true_peaks", "low_high_ratio", "hi_true", "lo_true", "low_high_ratio_true"]
n_estimators = classifier.best_iteration
y_pred_probs = classifier.predict(df_test[test_features], n_estimators)
y_predicted = []
for y in y_pred_probs: # use probabilities to assign binary classification
if y >= threshold:
y_predicted.append(1)
else:
y_predicted.append(0)
return y_predicted
# Train Model with Full Set, Return Classifier Model
dwt = "db4"
peak_thresh = "4.5"
training_data = "/home/jeffrey/repos/VSB_Power_Line_Fault_Detection/extracted_features/train_featuresHiLo_thresh_"+peak_thresh+"_"+dwt+".csv"
df_train = pd.read_csv(training_data)
classifier = classification_light_gbm_model(df_train) # Light GBM
# Make Predictions
test_data = "/home/jeffrey/repos/VSB_Power_Line_Fault_Detection/extracted_features/test_featuresHiLo_thresh_"+peak_thresh+"_"+dwt+".csv"
df_test = pd.read_csv(test_data).drop(['Unnamed: 0'],axis=1)
fault_detection_threshold = 0.85
predicted_faults = predict_light_gbm_model(classifier, df_test, fault_detection_threshold)
df_test["fault"] = predicted_faults
# Make Submission File
submission_filename = "submissions/prediction_submissionHiLoBLL_"+peak_thresh+"_"+dwt+"_"+str(fault_detection_threshold)+"fdt_.csv"
f_o = open(submission_filename, "w+")
f_o.write("signal_id,target\n")
for idx in range(len(df_test)):
signal_id = df_test["signal_id"][idx]
fault = df_test["fault"][idx]
f_o.write(str(signal_id)+","+str(fault)+"\n")
f_o.close()
```
|
{
"source": "Jeffrey-Gadenne/ENG103_GUI_IOT_DATABASE_HEART",
"score": 3
}
|
#### File: Jeffrey-Gadenne/ENG103_GUI_IOT_DATABASE_HEART/heartrate_monitor.py
```python
import argparse
#from PIL import Image, ImageTk
import sqlite3
from max30102 import MAX30102
import hrcalc
import threading
import time
import numpy as np
import Adafruit_DHT
dht_sensor = Adafruit_DHT.DHT11
pin = 5
humidity, temperature = Adafruit_DHT.read_retry(dht_sensor, pin)
class HeartRateMonitor(object):
"""
A class that encapsulates the max30102 device into a thread
"""
LOOP_TIME = 0.01
def __init__(self, print_raw=False, print_result=False):
self.bpm = 0
if print_raw is True:
print('IR, Red')
self.print_raw = print_raw
self.print_result = print_result
def run_sensor(self):
sensor = MAX30102()
ir_data = []
red_data = []
bpms = []
# run until told to stop
while not self._thread.stopped:
# check if any data is available
num_bytes = sensor.get_data_present()
if num_bytes > 0:
# grab all the data and stash it into arrays
while num_bytes > 0:
red, ir = sensor.read_fifo()
num_bytes -= 1
ir_data.append(ir)
red_data.append(red)
if self.print_raw:
print("{0}, {1}".format(ir, red))
while len(ir_data) > 100:
ir_data.pop(0)
red_data.pop(0)
if len(ir_data) == 100:
bpm, valid_bpm, spo2, valid_spo2 = hrcalc.calc_hr_and_spo2(ir_data, red_data)
if valid_bpm:
bpms.append(bpm)
while len(bpms) > 4:
bpms.pop(0)
self.bpm = np.mean(bpms)
if (np.mean(ir_data) < 50000 and np.mean(red_data) < 50000):
self.bpm = 0
if self.print_result:
print("Finger not detected")
if self.print_result:
print("BPM: {0}, SpO2: {1}".format(self.bpm, spo2))
time.sleep(self.LOOP_TIME)
sensor.shutdown()
def start_sensor(self):
self._thread = threading.Thread(target=self.run_sensor)
self._thread.stopped = False
self._thread.start()
def stop_sensor(self, timeout=2.0):
self._thread.stopped = True
self.bpm = 0
self._thread.join(timeout)
```
|
{
"source": "jeffreygrover/Cirq",
"score": 2
}
|
#### File: quirk/cells/cell_test.py
```python
import pytest
import cirq
from cirq.contrib.quirk.cells.cell import Cell, ExplicitOperationsCell
def test_cell_defaults():
class BasicCell(Cell):
def with_line_qubits_mapped_to(self, qubits):
raise NotImplementedError()
def gate_count(self) -> int:
raise NotImplementedError()
c = BasicCell()
assert c.operations() == ()
assert c.basis_change() == ()
assert c.controlled_by(cirq.LineQubit(0)) is c
x = []
c.modify_column(x)
assert x == []
def test_cell_replace_utils():
a, b, c = cirq.NamedQubit.range(3, prefix='q')
assert Cell._replace_qubit(cirq.LineQubit(1), [a, b, c]) == b
with pytest.raises(ValueError, match='only map from line qubits'):
_ = Cell._replace_qubit(cirq.GridQubit(0, 0), [a, b, c])
with pytest.raises(ValueError, match='not in range'):
_ = Cell._replace_qubit(cirq.LineQubit(-1), [a, b, c])
with pytest.raises(ValueError, match='not in range'):
_ = Cell._replace_qubit(cirq.LineQubit(999), [a, b, c])
def test_explicit_operations_cell_equality():
a = cirq.LineQubit(0)
eq = cirq.testing.EqualsTester()
eq.add_equality_group(ExplicitOperationsCell([], []),
ExplicitOperationsCell([]))
eq.add_equality_group(ExplicitOperationsCell([cirq.X(a)], []))
eq.add_equality_group(ExplicitOperationsCell([], [cirq.Y(a)]))
def test_explicit_operations_cell():
a, b = cirq.LineQubit.range(2)
v = ExplicitOperationsCell([cirq.X(a)], [cirq.S(a)])
assert v.operations() == (cirq.X(a),)
assert v.basis_change() == (cirq.S(a),)
assert v.controlled_by(b) == ExplicitOperationsCell(
[cirq.X(a).controlled_by(b)], [cirq.S(a)])
```
#### File: cirq/google/op_deserializer.py
```python
from dataclasses import dataclass
from typing import Any, Callable, Dict, Optional, Sequence, TYPE_CHECKING
import sympy
from google.protobuf import json_format
from cirq.api.google import v2
from cirq.google.api import v2 as api_v2
from cirq.google import arg_func_langs
if TYPE_CHECKING:
import cirq
@dataclass(frozen=True)
class DeserializingArg:
"""Specification of the arguments to deserialize an argument to a gate.
Args:
serialized_name: The serialized name of the gate that is being
deserialized.
constructor_arg_name: The name of the argument in the constructor of
the gate corresponding to this serialized argument.
value_func: Sometimes a value from the serialized proto needs to
converted to an appropriate type or form. This function takes the
serialized value and returns the appropriate type. Defaults to
None.
required: Whether a value must be specified when constructing the
deserialized gate. Defaults to True.
"""
serialized_name: str
constructor_arg_name: str
value_func: Optional[Callable[[arg_func_langs.ArgValue], Any]] = None
required: bool = True
class GateOpDeserializer:
"""Describes how to deserialize a proto to a given Gate type.
Attributes:
serialized_gate_id: The id used when serializing the gate.
"""
def __init__(self,
serialized_gate_id: str,
gate_constructor: Callable,
args: Sequence[DeserializingArg],
num_qubits_param: Optional[str] = None):
"""Constructs a deserializer.
Args:
serialized_gate_id: The serialized id of the gate that is being
deserialized.
gate_constructor: A function that produces the deserialized gate
given arguments from args.
args: A list of the arguments to be read from the serialized
gate and the information required to use this to construct
the gate using the gate_constructor above.
num_qubits_param: Some gate constructors require that the number
of qubits be passed to their constructor. This is the name
of the parameter in the constructor for this value. If None,
no number of qubits is passed to the constructor.
"""
self.serialized_gate_id = serialized_gate_id
self.gate_constructor = gate_constructor
self.args = args
self.num_qubits_param = num_qubits_param
def from_proto_dict(self, proto: Dict) -> 'cirq.GateOperation':
"""Turns a cirq.api.google.v2.Operation proto into a GateOperation."""
msg = v2.program_pb2.Operation()
json_format.ParseDict(proto, msg)
return self.from_proto(msg)
def from_proto(self,
proto: v2.program_pb2.Operation) -> 'cirq.GateOperation':
"""Turns a cirq.api.google.v2.Operation proto into a GateOperation."""
qubits = [api_v2.grid_qubit_from_proto_id(q.id) for q in proto.qubits]
args = self._args_from_proto(proto)
if self.num_qubits_param is not None:
args[self.num_qubits_param] = len(qubits)
gate = self.gate_constructor(**args)
return gate.on(*qubits)
def _args_from_proto(self, proto: v2.program_pb2.Operation
) -> Dict[str, arg_func_langs.ArgValue]:
return_args = {}
for arg in self.args:
if arg.serialized_name not in proto.args and arg.required:
raise ValueError(
'Argument {} not in deserializing args, but is required.'.
format(arg.serialized_name))
value = None # type: Optional[arg_func_langs.ArgValue]
if arg.serialized_name in proto.args:
arg_proto = proto.args[arg.serialized_name]
which = arg_proto.WhichOneof('arg')
if which == 'arg_value':
arg_value = arg_proto.arg_value
which_val = arg_value.WhichOneof('arg_value')
if which_val == 'float_value':
value = float(arg_value.float_value)
elif which_val == 'bool_values':
value = arg_value.bool_values.values
elif which_val == 'string_value':
value = str(arg_value.string_value)
elif which == 'symbol':
value = sympy.Symbol(arg_proto.symbol)
if value is None and arg.required:
raise ValueError(
'Could not get arg {} from arg_proto {}'.format(
arg.serialized_name, proto.args))
if arg.value_func is not None:
value = arg.value_func(value)
if value is not None:
return_args[arg.constructor_arg_name] = value
return return_args
```
#### File: cirq/ops/moment.py
```python
from typing import (Any, Callable, Iterable, Sequence, TypeVar, Union, Tuple,
FrozenSet, TYPE_CHECKING, Iterator)
from cirq import protocols
from cirq.ops import raw_types
if TYPE_CHECKING:
import cirq
TSelf_Moment = TypeVar('TSelf_Moment', bound='Moment')
class Moment:
"""A simplified time-slice of operations within a sequenced circuit.
Note that grouping sequenced circuits into moments is an abstraction that
may not carry over directly to the scheduling on the hardware or simulator.
Operations in the same moment may or may not actually end up scheduled to
occur at the same time. However the topological quantum circuit ordering
will be preserved, and many schedulers or consumers will attempt to
maximize the moment representation.
"""
def __init__(self, operations: Iterable[raw_types.Operation] = ()) -> None:
"""Constructs a moment with the given operations.
Args:
operations: The operations applied within the moment.
Will be frozen into a tuple before storing.
Raises:
ValueError: A qubit appears more than once.
"""
self._operations = tuple(operations)
# Check that operations don't overlap.
affected_qubits = [q for op in self.operations for q in op.qubits]
self._qubits = frozenset(affected_qubits)
if len(affected_qubits) != len(self._qubits):
raise ValueError(
'Overlapping operations: {}'.format(self.operations))
@property
def operations(self) -> Tuple[raw_types.Operation, ...]:
return self._operations
@property
def qubits(self) -> FrozenSet[raw_types.Qid]:
return self._qubits
def operates_on_single_qubit(self, qubit: raw_types.Qid) -> bool:
"""Determines if the moment has operations touching the given qubit.
Args:
qubit: The qubit that may or may not be touched by operations.
Returns:
Whether this moment has operations involving the qubit.
"""
return qubit in self.qubits
def operates_on(self, qubits: Iterable[raw_types.Qid]) -> bool:
"""Determines if the moment has operations touching the given qubits.
Args:
qubits: The qubits that may or may not be touched by operations.
Returns:
Whether this moment has operations involving the qubits.
"""
return bool(set(qubits) & self.qubits)
def with_operation(self, operation: raw_types.Operation):
"""Returns an equal moment, but with the given op added.
Args:
operation: The operation to append.
Returns:
The new moment.
"""
if any(q in self._qubits for q in operation.qubits):
raise ValueError('Overlapping operations: {}'.format(operation))
# Use private variables to facilitate a quick copy
m = Moment()
m._operations = self.operations + (operation,)
m._qubits = frozenset(self._qubits.union(set(operation.qubits)))
return m
def without_operations_touching(self, qubits: Iterable[raw_types.Qid]):
"""Returns an equal moment, but without ops on the given qubits.
Args:
qubits: Operations that touch these will be removed.
Returns:
The new moment.
"""
qubits = frozenset(qubits)
if not self.operates_on(qubits):
return self
return Moment(
operation for operation in self.operations
if qubits.isdisjoint(frozenset(operation.qubits)))
def __copy__(self):
return type(self)(self.operations)
def __bool__(self) -> bool:
return bool(self.operations)
def __eq__(self, other) -> bool:
if not isinstance(other, type(self)):
return NotImplemented
return (sorted(self.operations, key=lambda op: op.qubits) == sorted(
other.operations, key=lambda op: op.qubits))
def _approx_eq_(self, other: Any, atol: Union[int, float]) -> bool:
"""See `cirq.protocols.SupportsApproximateEquality`."""
if not isinstance(other, type(self)):
return NotImplemented
return protocols.approx_eq(sorted(self.operations,
key=lambda op: op.qubits),
sorted(other.operations,
key=lambda op: op.qubits),
atol=atol)
def __ne__(self, other) -> bool:
return not self == other
def __hash__(self):
return hash(
(Moment, tuple(sorted(self.operations, key=lambda op: op.qubits))))
def __iter__(self) -> Iterator['cirq.Operation']:
return iter(self.operations)
def __pow__(self, power):
if power == 1:
return self
new_ops = []
for op in self.operations:
new_op = protocols.pow(op, power, default=None)
if new_op is None:
return NotImplemented
new_ops.append(new_op)
return Moment(new_ops)
def __len__(self):
return len(self.operations)
def __repr__(self):
if not self.operations:
return 'cirq.Moment()'
return 'cirq.Moment(operations={})'.format(
_list_repr_with_indented_item_lines(self.operations))
def __str__(self):
return ' and '.join(str(op) for op in self.operations)
def transform_qubits(self: TSelf_Moment,
func: Callable[[raw_types.Qid], raw_types.Qid]
) -> TSelf_Moment:
"""Returns the same moment, but with different qubits.
Args:
func: The function to use to turn each current qubit into a desired
new qubit.
Returns:
The receiving moment but with qubits transformed by the given
function.
"""
return self.__class__(op.transform_qubits(func)
for op in self.operations)
def _json_dict_(self):
return protocols.obj_to_dict_helper(self, ['operations'])
def _list_repr_with_indented_item_lines(items: Sequence[Any]) -> str:
block = '\n'.join([repr(op) + ',' for op in items])
indented = ' ' + '\n '.join(block.split('\n'))
return '[\n{}\n]'.format(indented)
```
#### File: Cirq/examples/direct_fidelity_estimation.py
```python
import itertools
from typing import cast
from typing import List
from typing import Optional
from typing import Tuple
import numpy as np
import cirq
def build_circuit():
# Builds an arbitrary circuit to test. The circuit is non Clifford to show
# the use of simulators.
qubits = cirq.LineQubit.range(3)
circuit = cirq.Circuit(
cirq.Z(qubits[0])**0.25, # T-Gate, non Clifford.
cirq.X(qubits[1])**0.123,
cirq.X(qubits[2])**0.456)
return circuit, qubits
def compute_characteristic_function(circuit: cirq.Circuit,
P_i: Tuple[cirq.Gate, ...],
qubits: List[cirq.Qid],
noise: Optional[cirq.NoiseModel]):
n_qubits = len(P_i)
d = 2**n_qubits
simulator = cirq.DensityMatrixSimulator()
# rho or sigma in https://arxiv.org/pdf/1104.3835.pdf
density_matrix = cast(cirq.DensityMatrixTrialResult,
simulator.simulate(circuit)).final_density_matrix
pauli_string = cirq.PauliString(dict(zip(qubits, P_i)))
qubit_map = dict(zip(qubits, range(n_qubits)))
# rho_i or sigma_i in https://arxiv.org/pdf/1104.3835.pdf
trace = pauli_string.expectation_from_density_matrix(
density_matrix, qubit_map)
assert np.isclose(trace.imag, 0.0, atol=1e-6)
trace = trace.real
prob = trace * trace / d # Pr(i) in https://arxiv.org/pdf/1104.3835.pdf
return trace, prob
def direct_fidelity_estimation(circuit: cirq.Circuit, qubits: List[cirq.Qid],
noise: cirq.NoiseModel, n_trials: int):
# n_trials is upper-case N in https://arxiv.org/pdf/1104.3835.pdf
# Number of qubits, lower-case n in https://arxiv.org/pdf/1104.3835.pdf
n_qubits = len(qubits)
# Computes for every \hat{P_i} of https://arxiv.org/pdf/1104.3835.pdf,
# estimate rho_i and Pr(i). We then collect tuples (rho_i, Pr(i), \hat{Pi})
# inside the variable 'pauli_traces'.
pauli_traces = []
for P_i in itertools.product([cirq.I, cirq.X, cirq.Y, cirq.Z],
repeat=n_qubits):
rho_i, Pr_i = compute_characteristic_function(circuit,
P_i,
qubits,
noise=None)
pauli_traces.append({'P_i': P_i, 'rho_i': rho_i, 'Pr_i': Pr_i})
assert len(pauli_traces) == 4**n_qubits
p = [x['Pr_i'] for x in pauli_traces]
assert np.isclose(sum(p), 1.0, atol=1e-6)
# The package np.random.choice() is quite sensitive to probabilities not
# summing up to 1.0. Even an absolute difference below 1e-6 (as checked just
# above) does bother it, so we re-normalize the probs.
inv_sum_p = 1 / sum(p)
norm_p = [x * inv_sum_p for x in p]
fidelity = 0.0
for _ in range(n_trials):
# Randomly sample as per probability.
i = np.random.choice(range(4**n_qubits), p=norm_p)
Pr_i = pauli_traces[i]['Pr_i']
P_i = pauli_traces[i]['P_i']
rho_i = pauli_traces[i]['rho_i']
sigma_i, _ = compute_characteristic_function(circuit, P_i, qubits,
noise)
fidelity += Pr_i * sigma_i / rho_i
return fidelity / n_trials
def main():
circuit, qubits = build_circuit()
circuit.append(cirq.measure(*qubits, key='y'))
noise = cirq.ConstantQubitNoiseModel(cirq.depolarize(0.1))
estimated_fidelity = direct_fidelity_estimation(circuit,
qubits,
noise,
n_trials=10)
print(estimated_fidelity)
if __name__ == '__main__':
main()
```
|
{
"source": "JeffreyHayes/openthread",
"score": 2
}
|
#### File: scripts/thread-cert/debug.py
```python
from builtins import input
class Inspector:
"""This class provides a way to inspect node status of a test case.
USAGE:
`#` mode
This is selection mode. You may select the node to inspect here.
`list` - list available nodes.
`exit` - end inspecting, continue running test case.
<numbers> - select the node with id <number>. This will result in enter `> mode.
`>` mode
This is node mode. You may run OpenThread CLI here.
`exit` - go back to `#` mode.
EXAMPLE:
#
# 1
> state
leader
> exit
# 2
> panid
face
> exit
# exit
"""
def __init__(self, test_case):
""" Initialize a inspector.
Args:
test_case: The test case to inspect
"""
self.test_case = test_case
def inspect_node(self, nodeid):
""" Inspect the node with the given nodeid.
Args:
nodeid: key in self.test_case.nodes.
"""
node = self.test_case.nodes[nodeid]
while True:
line = input('> ')
if not line:
continue
if line == 'exit':
break
else:
node.send_command(line)
node._expect('Done')
def inspect(self):
""" Start inspecting.
"""
while True:
line = input('# ')
if not line:
continue
if line.isdigit():
self.inspect_node(int(line))
elif line == 'list':
print(self.test_case.nodes.keys())
elif line == 'exit':
break
```
#### File: scripts/thread-cert/sniffer.py
```python
import collections
import io
import logging
import os
import pcap
import threading
import traceback
try:
import Queue
except ImportError:
import queue as Queue
import message
import sniffer_transport
class Sniffer:
""" Class representing the Sniffing node, whose main task is listening
and logging message exchange performed by other nodes.
"""
logger = logging.getLogger("sniffer.Sniffer")
RECV_BUFFER_SIZE = 4096
def __init__(self, message_factory):
"""
Args:
message_factory (MessageFactory): Class producing messages from data bytes.
"""
self._message_factory = message_factory
self._pcap = pcap.PcapCodec(os.getenv('TEST_NAME', 'current'))
# Create transport
transport_factory = sniffer_transport.SnifferTransportFactory()
self._transport = transport_factory.create_transport()
self._thread = None
self._thread_alive = threading.Event()
self._thread_alive.clear()
self._buckets = collections.defaultdict(Queue.Queue)
def _sniffer_main_loop(self):
""" Sniffer main loop. """
self.logger.debug("Sniffer started.")
while self._thread_alive.is_set():
data, nodeid = self._transport.recv(self.RECV_BUFFER_SIZE)
self._pcap.append(data)
# Ignore any exceptions
try:
messages = self._message_factory.create(io.BytesIO(data))
self.logger.debug("Received messages: {}".format(messages))
for msg in messages:
self._buckets[nodeid].put(msg)
except Exception as e:
# Just print the exception to the console
print("EXCEPTION: %s" % e)
traceback.print_exc()
self.logger.debug("Sniffer stopped.")
def start(self):
""" Start sniffing. """
self._thread = threading.Thread(target=self._sniffer_main_loop)
self._thread.daemon = True
self._transport.open()
self._thread_alive.set()
self._thread.start()
def stop(self):
""" Stop sniffing. """
self._thread_alive.clear()
self._transport.close()
self._thread.join()
self._thread = None
def set_lowpan_context(self, cid, prefix):
self._message_factory.set_lowpan_context(cid, prefix)
def get_messages_sent_by(self, nodeid):
""" Get sniffed messages.
Note! This method flushes the message queue so calling this
method again will return only the newly logged messages.
Args:
nodeid (int): node id
Returns:
MessagesSet: a set with received messages.
"""
bucket = self._buckets[nodeid]
messages = []
while not bucket.empty():
messages.append(bucket.get_nowait())
return message.MessagesSet(messages)
```
#### File: scripts/thread-cert/test_common.py
```python
import random
import struct
import unittest
import ipaddress
import common
def any_eui64():
return bytearray([random.getrandbits(8) for _ in range(8)])
def any_rloc16_int():
return random.getrandbits(16)
def any_rloc16_bytearray():
return bytearray([random.getrandbits(8) for _ in range(2)])
def any_ipv6_address():
return bytearray([random.getrandbits(8) for _ in range(16)])
class TestMessageInfo(unittest.TestCase):
def test_should_return_source_ipv6_value_when_source_ipv6_property_is_called(
self):
# GIVEN
source_ipv6 = any_ipv6_address()
message_info = common.MessageInfo()
message_info.source_ipv6 = source_ipv6
# WHEN
actual_source_ipv6 = message_info.source_ipv6
# THEN
self.assertEqual(
ipaddress.ip_address(bytes(source_ipv6)), actual_source_ipv6
)
def test_should_return_destination_ipv6_value_when_destination_ipv6_property_is_called(
self):
# GIVEN
destination_ipv6 = any_ipv6_address()
message_info = common.MessageInfo()
message_info.destination_ipv6 = destination_ipv6
# WHEN
actual_destination_ipv6 = message_info.destination_ipv6
# THEN
self.assertEqual(
ipaddress.ip_address(bytes(destination_ipv6)),
actual_destination_ipv6,
)
def test_should_return_source_eui64_value_when_source_eui64_property_is_called(
self):
# GIVEN
source_mac_address = any_eui64()
message_info = common.MessageInfo()
message_info.source_mac_address = source_mac_address
# WHEN
actual_source_mac_address = message_info.source_mac_address
# THEN
self.assertEqual(source_mac_address, actual_source_mac_address)
def test_should_return_destination_eui64_value_when_destination_eui64_property_is_called(
self):
# GIVEN
destination_mac_address = any_eui64()
message_info = common.MessageInfo()
message_info.destination_mac_address = destination_mac_address
# WHEN
actual_destination_mac_address = message_info.destination_mac_address
# THEN
self.assertEqual(
destination_mac_address, actual_destination_mac_address
)
class TestMacAddress(unittest.TestCase):
def test_should_create_MacAddress_from_eui64_when_from_eui64_classmethod_is_called(
self):
# GIVEN
eui64 = any_eui64()
# WHEN
mac_address = common.MacAddress.from_eui64(eui64)
# THEN
self.assertEqual(common.MacAddressType.LONG, mac_address.type)
self.assertEqual(eui64, mac_address.mac_address)
def test_should_create_MacAddress_from_rloc16_int_when_from_rloc16_classmethod_is_called(
self):
# GIVEN
rloc16 = any_rloc16_int()
# WHEN
mac_address = common.MacAddress.from_rloc16(int(rloc16))
# THEN
self.assertEqual(common.MacAddressType.SHORT, mac_address.type)
self.assertEqual(struct.pack(">H", rloc16), mac_address.mac_address)
def test_should_create_MacAddress_from_rloc16_bytearray_when_from_rloc16_classmethod_is_called(
self):
# GIVEN
rloc16 = any_rloc16_bytearray()
# WHEN
mac_address = common.MacAddress.from_rloc16(rloc16)
# THEN
self.assertEqual(common.MacAddressType.SHORT, mac_address.type)
self.assertEqual(rloc16, mac_address.mac_address)
def test_should_convert_short_MacAddress_to_iid_when_convert_method_is_called(
self):
# GIVEN
rloc16 = any_rloc16_bytearray()
mac_address = common.MacAddress.from_rloc16(rloc16)
# WHEN
iid = mac_address.convert_to_iid()
# THEN
self.assertEqual(
bytearray([0x00, 0x00, 0x00, 0xff, 0xfe, 0x00]) + rloc16, iid
)
def test_should_convert_eui64_MacAddress_to_iid_when_convert_method_is_called(
self):
# GIVEN
eui64 = any_eui64()
mac_address = common.MacAddress.from_eui64(eui64)
# WHEN
iid = mac_address.convert_to_iid()
# THEN
self.assertEqual(bytearray([eui64[0] ^ 0x02]) + eui64[1:], iid)
if __name__ == "__main__":
unittest.main()
```
#### File: harness-automation/autothreadharness/open_thread_controller.py
```python
import logging
import re
import socket
import threading
import time
import serial
from . import settings
__all__ = ['OpenThreadController']
logger = logging.getLogger(__name__)
linesepx = re.compile(r'\r\n|\n')
class OpenThreadController(threading.Thread):
"""This is an simple wrapper to communicate with openthread"""
_lock = threading.Lock()
viewing = False
def __init__(self, port, log=False):
"""Initialize the controller
Args:
port (str): serial port's path or name(windows)
"""
super(OpenThreadController, self).__init__()
self.port = port
self.handle = None
self.lines = []
self._log = log
self._is_net = False
self._init()
def _init(self):
self._connect()
if not self._log:
return
self.start()
def __del__(self):
self.close()
def close(self):
if self.is_alive():
self.viewing = False
self.join()
self._close()
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def _close(self):
if self.handle:
self.handle.close()
self.handle = None
def _connect(self):
logger.debug('My port is %s', self.port)
if self.port.startswith('NET'):
portnum = settings.SER2NET_PORTBASE + int(
self.port.split('NET')[1]
)
logger.debug('My port num is %d', portnum)
address = (settings.SER2NET_HOSTNAME, portnum)
self.handle = socket.create_connection(address)
self.handle.setblocking(0)
self._is_net = True
elif ':' in self.port:
host, port = self.port.split(':')
self.handle = socket.create_connection((host, port))
self.handle.setblocking(0)
self._is_net = True
else:
self.handle = serial.Serial(
self.port, 115200, timeout=0, xonxoff=True
)
self._is_net = False
def _read(self, size=512):
if self._is_net:
return self.handle.recv(size)
else:
return self.handle.read(size)
def _write(self, data):
if self._is_net:
self.handle.sendall(data)
else:
self.handle.write(data)
def _expect(self, expected, times=50):
"""Find the `expected` line within `times` trials.
Args:
expected str: the expected string
times int: number of trials
"""
logger.debug('[%s] Expecting [%s]', self.port, expected)
retry_times = 10
while times:
if not retry_times:
break
line = self._readline()
if line == expected:
return
if not line:
retry_times -= 1
time.sleep(0.1)
times -= 1
raise Exception('failed to find expected string[%s]' % expected)
def _readline(self):
"""Read exactly one line from the device, nonblocking.
Returns:
None on no data
"""
if len(self.lines) > 1:
return self.lines.pop(0)
tail = ''
if len(self.lines):
tail = self.lines.pop()
try:
tail += self._read()
except socket.error:
logging.exception('No new data')
time.sleep(0.1)
self.lines += linesepx.split(tail)
if len(self.lines) > 1:
return self.lines.pop(0)
def _sendline(self, line):
"""Send exactly one line to the device
Args:
line str: data send to device
"""
self.lines = []
try:
self._read()
except socket.error:
logging.debug('Nothing cleared')
logger.debug('sending [%s]', line)
self._write(line + '\r\n')
# wait for write to complete
time.sleep(0.5)
def _req(self, req):
"""Send command and wait for response.
The command will be repeated 3 times at most in case data loss of serial port.
Args:
req (str): Command to send, please do not include new line in the end.
Returns:
[str]: The output lines
"""
logger.debug('DUT> %s', req)
self._log and self.pause()
times = 3
res = None
while times:
times = times - 1
try:
self._sendline(req)
self._expect(req)
line = None
res = []
while True:
line = self._readline()
logger.debug('Got line %s', line)
if line == 'Done':
break
if line:
res.append(line)
break
except BaseException:
logger.exception('Failed to send command')
self.close()
self._init()
self._log and self.resume()
return res
def run(self):
"""Threading callback"""
self.viewing = True
while self.viewing and self._lock.acquire():
try:
line = self._readline()
except BaseException:
pass
else:
logger.info(line)
self._lock.release()
time.sleep(0)
def is_started(self):
"""check if openthread is started
Returns:
bool: started or not
"""
state = self._req('state')[0]
return state != 'disabled'
def start(self):
"""Start openthread
"""
self._req('ifconfig up')
self._req('thread start')
def stop(self):
"""Stop openthread
"""
self._req('thread stop')
self._req('ifconfig down')
def reset(self):
"""Reset openthread device, not equivalent to stop and start
"""
logger.debug('DUT> reset')
self._log and self.pause()
self._sendline('reset')
self._read()
self._log and self.resume()
def resume(self):
"""Start dumping logs"""
self._lock.release()
def pause(self):
"""Start dumping logs"""
self._lock.acquire()
@property
def networkname(self):
"""str: Thread network name."""
return self._req('networkname')[0]
@networkname.setter
def networkname(self, value):
self._req('networkname %s' % value)
@property
def mode(self):
"""str: Thread mode."""
return self._req('mode')[0]
@mode.setter
def mode(self, value):
self._req('mode %s' % value)
@property
def mac(self):
"""str: MAC address of the device"""
return self._req('extaddr')[0]
@property
def addrs(self):
"""[str]: IP addresses of the devices"""
return self._req('ipaddr')
@property
def short_addr(self):
"""str: Short address"""
return self._req('rloc16')[0]
@property
def channel(self):
"""int: Channel number of openthread"""
return int(self._req('channel')[0])
@channel.setter
def channel(self, value):
self._req('channel %d' % value)
@property
def panid(self):
"""str: Thread panid"""
return self._req('panid')[0]
@panid.setter
def panid(self, value):
self._req('panid %s' % value)
@property
def extpanid(self):
"""str: Thread extpanid"""
return self._req('extpanid')[0]
@extpanid.setter
def extpanid(self, value):
self._req('extpanid %s' % value)
@property
def child_timeout(self):
"""str: Thread child timeout in seconds"""
return self._req('childtimeout')[0]
@child_timeout.setter
def child_timeout(self, value):
self._req('childtimeout %d' % value)
@property
def version(self):
"""str: Open thread version"""
return self._req('version')[0]
def add_prefix(self, prefix, flags, prf):
"""Add network prefix.
Args:
prefix (str): network prefix.
flags (str): network prefix flags, please refer thread documentation for details
prf (str): network prf, please refer thread documentation for details
"""
self._req('prefix add %s %s %s' % (prefix, flags, prf))
time.sleep(1)
self._req('netdataregister')
def remove_prefix(self, prefix):
"""Remove network prefix.
"""
self._req('prefix remove %s' % prefix)
time.sleep(1)
self._req('netdataregister')
def enable_blacklist(self):
"""Enable blacklist feature"""
self._req('blacklist enable')
def add_blacklist(self, mac):
"""Add a mac address to blacklist"""
self._req('blacklist add %s' % mac)
```
|
{
"source": "jeffrey-hokanson/ExperimentDesigns",
"score": 2
}
|
#### File: ExperimentDesigns/tests/test_design.py
```python
import numpy as np
import psdr
import json
import pytest
import sys, os
import re
import urllib.request
from functools import lru_cache
try:
check = os.environ['EXP_DESIGN_CHECK']
assert check in ['all', 'novel']
except KeyError:
check = 'novel'
except AssertionError as e:
print(" 'EXP_DESIGN_CHECK' should either be 'all' or 'novel' ")
raise e
try:
MAINDIR = os.environ['EXP_DESIGN_MAINDIR']
except:
MAINDIR = '.'
def get_current_design(fname):
origin_master = "https://raw.githubusercontent.com/jeffrey-hokanson/ExperimentDesigns/master/"
path = origin_master + fname
with urllib.request.urlopen(path) as response:
design = json.loads(response.read())
return design
def get_new_design(fname):
with open( os.path.join(MAINDIR,fname), 'r') as f:
design = json.load(f)
return design
def list_designs(root):
# Generate list of designs
design_files = []
for r, d, f in os.walk(os.path.join(MAINDIR, 'designs/',root)):
for fname in f:
if fname.endswith('.json'):
design_files.append(os.path.join(r, fname))
else:
raise AssertionError("Invalid format for a design")
return design_files
@lru_cache()
def check_designs(root, check):
design_files = list_designs(root)
if check == 'all':
return design_files
assert check == 'novel'
filtered_design_files = []
for df in design_files:
try:
old_design = get_current_design(df)
new_design = get_new_design(df)
if new_design != old_design:
filtered_design_files.append(df)
except urllib.error.HTTPError:
# If we don't have an existing file, we check it
filtered_design_files.append(df)
return filtered_design_files
@pytest.mark.parametrize("fname", check_designs("minimax/l2", check) )
def test_minimax_l2_design(fname):
r""" This checks the design for consistency.
It does not check if there is an improvment
"""
print(f"Loading design '{fname}'")
design = get_new_design(fname)
if design['domain'] == 'square':
domain = psdr.BoxDomain(np.zeros(2), np.ones(2))
else:
raise AssertionError('domain type "%s" not recognized' % design['domain'])
assert design['metric'] == 'l2', "Expected metric 'l2', got '%s'" % design['metric']
assert design['objective'] == 'minimax', "Expected objective 'minimax', got '%s'" % design['objective']
X = np.array(design['X'])
M = int(re.search(r'_(.*?).json', fname).group(1))
assert X.shape[0] == M, f"Number of points does not match the file name: name suggests {M}, files has {X.shape[0]}"
assert X.shape[1] == len(domain), "Points are in a different dimensional space than the domain"
assert np.all(domain.isinside(X)), "All points must be inside the domain"
# Check the objective value
V = psdr.voronoi_vertex(domain, X)
D = psdr.cdist(X, V)
radius = np.max(np.min(D, axis= 0))
print("Measured radius", '%20.15e' % radius)
print("Reported radius", '%20.15e' % design['radius'])
assert np.isclose(radius, design['radius'], rtol = 1e-10, atol = 1e-10)
@pytest.mark.parametrize("fname", check_designs("minimax/l2", check) )
def test_minimax_l2_design_improvement(fname):
new_design = get_new_design(fname)
try:
old_design = get_current_design(fname)
print(f"Old design radius {old_design['radius']:20.15e}")
print(f"New design radius {new_design['radius']:20.15e}")
assert new_design['radius'] <= old_design['radius'], "The new design does not decrease the radius balls covering the domain"
except urllib.error.HTTPError:
print("No existing design found")
if __name__ == '__main__':
print("Checking designs: check=", check)
for d in check_designs('minimax/l2', check):
print(d)
```
|
{
"source": "jeffrey-hsu/mids_capstone",
"score": 3
}
|
#### File: vaxxfacts/data_load/processing_helper.py
```python
import xlrd, csv
import itertools
import re
from operator import itemgetter
import os, json, getpass, imp
import pandas as pd
def get_cell_range(sh, start_col, start_row, end_col, end_row):
sh_list = [sh.row_slice(row, start_colx=start_col,
end_colx=end_col+1) for row in range(start_row, end_row+1)]
sh_val_list = []
for i, row in enumerate(sh_list):
sh_val_list.append([])
for cell in row:
sh_val_list[i].append(cell.value)
return sh_val_list
def check_col_dict(fname, col_dict):
"""
Arg:
- fname: file name
- col_dict: {year: {ftype: {tab_num:#, start_row:#, end_row:#, cols:[]}}}
Return:
- ftype
- tab_num
- start_row
- end_row
- cols: list of column indexes
"""
year = fname[:7]
year = year[:5] + '20' + year[-2:]
if "ChildCareData" in fname:
ftype = "ChildCareData"
elif "KindergartenData" in fname:
ftype = "KindergartenData"
elif "7thGradeData" in fname:
ftype = "7thGradeData"
else:
raise ValueError("not-supported file type")
tab_num = col_dict[year][ftype]['tab_num']
start_row = col_dict[year][ftype]['start_row']
end_row = col_dict[year][ftype]['end_row']
cols = col_dict[year][ftype]['cols']
return ftype, tab_num, start_row, end_row, cols
def str_int_check(s):
try:
int(s)
return True
except ValueError:
return False
x = '''
def cols2take(fname, sh, col_dict):
"""
Args:
- fname: input file name
- sh: excel sheet in nested list format
- col_dict: dict of the col indexes to subset and corresponding col names
"""
new_sh = []
if "ChildCareData" in fname:
col_index = list(col_dict["ChildCareData"].keys())[:-1]
col_def = list(col_dict["ChildCareData"].values())
for i, row in enumerate(sh):
ct = list(itemgetter(*col_index)(row))
if str_int_check(ct[0]):
new_sh.append(ct)
new_sh[i].append("childcare")
elif "KindergartenData" in fname:
col_index = list(col_dict["KindergartenData"].keys())[:-1]
col_def = list(col_dict["KindergartenData"].values())
for i, row in enumerate(sh):
ct = list(itemgetter(*col_index)(row))
if str_int_check(ct[0]):
new_sh.append(ct)
new_sh[i].append("kindergarten")
elif "7thGradeData" in fname:
col_index = list(col_dict["7thGradeData"].keys())[:-1]
col_def = list(col_dict["7thGradeData"].values())
for i, row in enumerate(sh):
ct = list(itemgetter(*col_index)(row))
if str_int_check(ct[0]):
new_sh.append(ct)
new_sh[i].append("7th_grade")
else:
raise ValueError("not-supported file type")
return new_sh, col_def
'''
def take_cols(sh, col_index, ftype):
subset_sh = []
missing_index = [i for i, num in enumerate(col_index) if num == -1]
col_index = [num for i, num in enumerate(col_index) if num != -1]
for i, row in enumerate(sh):
line = list(itemgetter(*col_index)(row))
for j in missing_index:
line = line[:j] + [""] + line[j:]
line = line + [ftype]
#if str_int_check(line[0]):
subset_sh.append(line)
return subset_sh
def xlsx2csv(input_file_path, output_file_path, col_dict):
wb = xlrd.open_workbook(input_file_path)
shs = wb.sheet_names()
fname = input_file_path.split("/")[-1]
ftype, tab_num, start_row, end_row, col_index = check_col_dict(fname = fname, col_dict = col_dict)
print("processing input file: ", fname, "\ntab: ", shs[tab_num])
sh = wb.sheet_by_name(shs[tab_num])
m = sh.ncols
print(m, " columns and ", end_row, "rows")
## get all the value cells in defined range
sh_list = get_cell_range(sh, 0, start_row, m-1, end_row)
## subset only the wanted columns
subset_sh = take_cols(sh = sh_list, col_index = col_index, ftype = ftype)
## write to csv file
with open(output_file_path, "w") as csv_file:
wr = csv.writer(csv_file, quoting=csv.QUOTE_ALL)
for row in subset_sh:
wr.writerow(row)
return subset_sh
def merge_csv(input_files, output_file_path, cols):
"""
Arg:
- input_files : [input_file_path]
- output_file_path : string path
- cols : [column definition]
"""
final_df = pd.DataFrame([])
for i, input_file in enumerate(input_files):
#print("processing file: ", input_file)
df = pd.read_csv(input_file, skipinitialspace = True, sep = ",", names = cols)
final_df = final_df.append(df)
final_df.to_csv(output_file_path, sep=',')
return final_df
```
|
{
"source": "jeffreyiacono/recrypto",
"score": 3
}
|
#### File: jeffreyiacono/recrypto/market_buyer.py
```python
import gdax
import yaml
import sys
import os
import argparse
from time import sleep
def to_usd(val):
return '${:,.2f} USD'.format(float(val))
parser = argparse.ArgumentParser()
parser.add_argument('--account', required=True, help='Specify which account to use')
parser.add_argument('--coin', choices=['btc', 'bch', 'ltc', 'eth'], required=True, help='Specify which coin to buy')
args = parser.parse_args()
account = yaml.load(open(os.path.dirname(os.path.abspath(__file__)) + '/accounts/' + args.account + '.yaml', 'r'),
Loader=yaml.BaseLoader)
coin = args.coin.lower()
try:
coin_buy_amount_usd = account['trading'][coin]['buy_amount_usd']
except KeyError:
print("{} is not specified in accounts/{}.yaml.".format(coin, args.account))
print("Exiting without buying anything.")
sys.exit()
gdax_credentials = account['gdax']
auth_client = gdax.AuthenticatedClient(gdax_credentials['key'],
gdax_credentials['secret'],
gdax_credentials['passphrase'])
usd_account = auth_client.get_account(account_id=account['gdax']['accounts']['usd_id'])
if 'message' in usd_account.keys():
print("Coinbase Pro authentication hit an error: {}".format(usd_account['message']))
print("Exiting without buying anything.")
sys.exit()
if float(usd_account['available']) >= float(coin_buy_amount_usd):
print("Sufficient funds: want to buy {} of {}, have {} available.".format(to_usd(coin_buy_amount_usd),
coin,
to_usd(usd_account['available'])))
else:
print("Insufficient funds: want to buy {} of {}, but only have {} available.".format(to_usd(coin_buy_amount_usd),
coin,
to_usd(usd_account['available'])))
print("Exiting without buying anything.")
sys.exit()
order = auth_client.buy(type='market',
funds=str(coin_buy_amount_usd),
product_id="{}-USD".format(coin.upper()))
if 'id' in order.keys():
print("Order {} submitted for {} of {}, status is {}".format(order['id'],
to_usd(order['specified_funds']),
coin,
order['status']))
order_done = False
seconds_to_wait_between_checks = 5
times_to_check = 5
times_checked = 0
while not order_done and times_checked < times_to_check:
sleep(seconds_to_wait_between_checks)
print("Checking the status of order {} ...".format(order['id']))
updated_order = auth_client.get_order(order['id'])
times_checked += 1
if updated_order['status'] == 'done':
print("Order {} settled and done. {} of {} bought ({} fees paid).".format(order['id'],
to_usd(float(updated_order['funds'])),
order['product_id'],
to_usd(float(updated_order['specified_funds']) - float(updated_order['funds']))))
order_done = True
elif updated_order['status'] == 'pending':
print("Order is {}. Let's check again in {} seconds. I'll try this {} more time{}".format(updated_order['status'],
seconds_to_wait_between_checks,
times_to_check - times_checked,
'' if times_to_check - times_checked == 1 else 's'))
if order_done:
print("All done. Have a nice day.")
else:
print("Could not determine if the order was completed or not. Please check manually.")
else:
print("Something went wrong.")
```
|
{
"source": "JeffreyILipton/ME480",
"score": 4
}
|
#### File: ME480/Local/sumZeta.py
```python
from zeta import *
from itertools import islice
import math
def sumZeta(N):
print(N)
ZP2 = Zeta_part(2)
Zeta_2 = sum(islice(ZP2,0,N))
diff = (math.pi**2)/6-Zeta_2
return(Zeta_2,diff)
#print(f'Zeta_2 from n=1 to n={n} is approximately {Zeta_2}, and is off by {diff} from the true value')
if __name__ == "__main__":
print(sumZeta(50))
```
|
{
"source": "jeffreyjeanpierre/serverscripts",
"score": 4
}
|
#### File: serverscripts/listdispatch/listdispatcher.py
```python
names_list = []
def ask():
'''This function will take input for
the amount of names to add to the
names_list list.'''
global names_list
counter = 1
while counter:
try:
count = int(raw_input("How many names: "))
break
except:
print "Must be a number."
while count > 0:
placeholder = raw_input("Enter number %s's name: " % count)
names_list.append(placeholder)
count -= 1
def names():
'''This function will format the names list.
Please run the run()
function instead of using this.'''
if len(names_list) > 1:
for name in names_list:
if name != names_list[-1]: print name +",",
print "and " + name +"."
else:
for name in names_list: print name +"."
def run():
'''Run this after running ask().'''
print("hello"),
names()
```
|
{
"source": "jeffreyjgong/cpsc447-project",
"score": 3
}
|
#### File: cpsc447-project/src/evaluator.py
```python
from starter import MY_TROTTER_STEPS, my_trotter # feel free to replace to your module
"""
Recommended development procedure:
1. use noiseless simulator to verify your trotterization is correct: as `trotter_steps` goes high, fidelity should approach 100%
2. use jakarta simulator to see how it performs on noisy simulator, observe what's the bottleneck
3. use real jakarta to test fidelity. Since the hardware resource is limited, you may wait for a long time before it executes
"""
USE_NOISELESS_SIMULATOR = True
USE_REAL_JAKARTA = False
if USE_NOISELESS_SIMULATOR:
print("[backend] noiseless simulator")
print("\033[93m[warning] this result is not for grading\033[0m")
else:
from qiskit import IBMQ
assert False, "please fill in your token from IBM-Q"
# IBMQ.save_account(TOKEN) # replace TOKEN with your API token string (https://quantum-computing.ibm.com/lab/docs/iql/manage/account/ibmq)
IBMQ.load_account()
if USE_REAL_JAKARTA:
print("[backend] jakarta")
else:
print("[backend] jakarta simulator")
"""
Here we provide you some useful evaluation functions
"""
def main():
""" evaluate fidelity for a single case, [warning] this is NOT your grading fidelity """
# print(evaluate_fidelity(MY_TROTTER_STEPS))
""" This will scan over some range of trotter_steps, for you to find the optimal value of trotter_steps """
# scan_trotter_steps(4, 13)
"""
[grading fidelity]
This is what we do when grading: we'll not only test the final state, but the state after every trotter step
If you do trotterization correctly, the fidelity should go lower with more trotter steps (but can have fluctuation if your fidelity is high): noise accumulates
If you observe an extreme low fidelity in the middle, you may need to debug whether your trotterization is correct (basically we want to prevent a solution that simply recovers the final state)
"""
do_grading(MY_TROTTER_STEPS)
def scan_trotter_steps(start, end, step=1):
fidelities = []
for trotter_steps in range(start, end, step):
fidelity, fidelity_stderr = evaluate_fidelity(trotter_steps)
print(f"trotter_steps {trotter_steps}: fidelity = {fidelity} stderr = {fidelity_stderr}")
fidelities.append((trotter_steps, fidelity, fidelity_stderr))
return fidelities
def do_grading(trotter_steps):
# evaluate every fidelity after each trotter step
fidelities = []
for intermediate_trottor in range(1, trotter_steps+1):
fidelity, fidelity_stderr = evaluate_fidelity(trotter_steps, intermediate_trottor)
print(f"intermediate_trottor {intermediate_trottor}: fidelity = {fidelity} stderr = {fidelity_stderr}")
fidelities.append((intermediate_trottor, fidelity, fidelity_stderr))
# find the minimum fidelity in every case
min_fidelity = fidelities[0][0]
for intermediate_trottor, fidelity, fidelity_stderr in fidelities:
if fidelity < min_fidelity:
min_fidelity = fidelity
print(f"[grade] fidelity = {min_fidelity}")
return min_fidelity, fidelities
"""
Library code
"""
import numpy as np
from contextlib import redirect_stdout
import warnings
warnings.filterwarnings('ignore')
import matplotlib.pyplot as plt
from qiskit import QuantumCircuit, QuantumRegister, execute, transpile
from qiskit.providers.aer import QasmSimulator
from qiskit.tools.monitor import job_monitor
from qiskit.circuit import Parameter
# Import state tomography modules
from qiskit.ignis.verification.tomography import state_tomography_circuits, StateTomographyFitter
from qiskit.quantum_info import state_fidelity
from qiskit.opflow import Zero, One, I, X, Y, Z
backend = QasmSimulator()
if not USE_NOISELESS_SIMULATOR:
provider = IBMQ.get_provider(hub='ibm-q-education', group='yale-uni-2', project='cpsc647-quantum')
jakarta = provider.get_backend('ibmq_jakarta')
if USE_REAL_JAKARTA:
backend = jakarta
else:
backend = QasmSimulator.from_backend(jakarta)
def H_heis3():
XXs = (I^X^X) + (X^X^I)
YYs = (I^Y^Y) + (Y^Y^I)
ZZs = (I^Z^Z) + (Z^Z^I)
H = XXs + YYs + ZZs
return H
def U_heis3(t):
H = H_heis3()
return (t * H).exp_i()
def evaluate_fidelity(trotter_steps, intermediate_trottor=None, reps=8, shots=8192):
# by default only evaluate the state after the last trotter step
# by in order to verify that one indeed do trotterization, we will also evaluate the state in the middle, by specifying `intermediate_trottor` in a range of [1, trotter_steps]
if intermediate_trottor is None:
intermediate_trottor = trotter_steps
target_time = np.pi * intermediate_trottor / trotter_steps
# The expected final state; necessary to determine state tomography fidelity
target_state = (U_heis3(target_time) @ (One^One^Zero)).eval().to_matrix() # DO NOT MODIFY (|q_5,q_3,q_1> = |110>)
Trot_gate, target_qubits = my_trotter(trotter_steps)
# Initialize quantum circuit for 3 qubits
qr = QuantumRegister(7)
qc = QuantumCircuit(qr)
# Prepare initial state (remember we are only evolving 3 of the 7 qubits on jakarta qubits (q_5, q_3, q_1) corresponding to the state |110>)
qc.x([3,5]) # DO NOT MODIFY (|q_5,q_3,q_1> = |110>)
# Simulate time evolution under H_heis3 Hamiltonian
for _ in range(intermediate_trottor):
qc.append(Trot_gate, [qr[i] for i in target_qubits])
# Generate state tomography circuits to evaluate fidelity of simulation
st_qcs = state_tomography_circuits(qc, [qr[1], qr[3], qr[5]])
jobs = []
for _ in range(reps):
job = execute(st_qcs, backend, shots=shots)
jobs.append(job)
for job in jobs:
job_monitor(job, quiet=True)
try:
if job.error_message() is not None:
print(f"[error: trotter_steps={trotter_steps}]", job.error_message())
except:
pass
# Compute the state tomography based on the st_qcs quantum circuits and the results from those ciricuits
def state_tomo(result, st_qcs):
# Fit state tomography results
tomo_fitter = StateTomographyFitter(result, st_qcs)
rho_fit = tomo_fitter.fit(method='lstsq')
# Compute fidelity
fid = state_fidelity(rho_fit, target_state)
return fid
# Compute tomography fidelities for each repetition
fids = []
for job in jobs:
fid = state_tomo(job.result(), st_qcs)
fids.append(fid)
# print('state tomography fidelity = {:.4f} \u00B1 {:.4f}'.format(np.mean(fids), np.std(fids)))
fidelity = np.mean(fids)
fidelity_stderr = np.std(fids)
return (fidelity, fidelity_stderr)
if __name__ == "__main__":
main()
```
|
{
"source": "jeffreyjgong/qtcodes",
"score": 2
}
|
#### File: qtcodes/circuits/base.py
```python
from abc import abstractmethod, ABCMeta
from typing import TypeVar, Tuple, Dict, List, Generic, Optional, Type, Any
from qiskit import QuantumRegister, QuantumCircuit, ClassicalRegister
from qiskit.circuit.quantumregister import Qubit
from qtcodes.common.constants import DH, DW
TQubit = TypeVar("TQubit")
class LatticeError(Exception):
"""
Lattice Inconsistency Errors
"""
class _Stabilizer(metaclass=ABCMeta):
"""
A blueprint for stabilizer classes, such as plaquettes for surface codes.
"""
def __init__(self, circ: QuantumCircuit, qubit_indices: List[List[Qubit]]):
self.circ = circ
self.qubit_indices = qubit_indices
@abstractmethod
def entangle(self):
"""
Entangles qubits to form a plaquette
"""
class _TopologicalLattice(Generic[TQubit], metaclass=ABCMeta):
"""
This abstract class contains a blueprint for lattice construction.
"""
# Constants
@property
def H(self) -> int:
"""Constant for lattice height"""
return DH
@property
def W(self) -> int:
"""Constant for lattice width"""
return DW
@property
def SYNX(self) -> int:
"""Constant for X syndromes index"""
return 0
@property
def SYNZ(self) -> int:
"""Constant for Z syndromes index"""
return 1
def __init__(
self, params: Dict[str, Any], name: str, circ: QuantumCircuit,
):
"""
Initializes this Topological Lattice class.
Args:
params (Dict[str,int or Tuple(int, int)]):
Contains params such as d, where d is the number of
physical "data" qubits lining rows / columns of the lattice.
name (str):
Useful when combining multiple TopologicalQubits together.
Prepended to all registers.
circ (QuantumCircuit):
QuantumCircuit on top of which the topological qubit is built.
This is often shared amongst multiple TQubits.
"""
self.name = name
self.circ = circ
self.params: Dict[str, Any] = params
self._params_validate_and_generate()
self.qregisters: Dict[str, QuantumRegister] = {} # quantum
self.cregisters: Dict[str, ClassicalRegister] = {} # classical
self._gen_registers()
assert "data" in self.qregisters, "There should be a data qubits register."
# add quantum/classical registers to circuit
registers = list(self.qregisters.values()) + list(self.cregisters.values())
self.circ.add_register(*registers)
self.qubit_indices, self.stabilizers = self._gen_qubit_indices_and_stabilizers()
@abstractmethod
def _params_validate_and_generate(self) -> None:
"""
Validate and generate params.
E.g.
self.params["num_data"] = self.params["d"][self.H] * self.params["d"][self.W]
"""
@abstractmethod
def _gen_registers(self) -> None:
"""
Implement this method to create quantum and classical registers.
E.g.
qregisters["data"] = QuantumRegister(params["num_data"], name=name + "_data")
"""
@abstractmethod
def _gen_qubit_indices_and_stabilizers(
self,
) -> Tuple[List[List[Qubit]], List[Type[Any]]]:
"""
Generates lattice blueprint for rotated surface code lattice with our
chosen layout and numbering.
Returns:
qubit_indices (List[List[Qubit]]):
List of lists of Qubits that comprise each plaquette.
stabilizers (List[_Stabilizer]):
List of stabilizers for each plaquette.
"""
def entangle(
self,
qubit_indices: Optional[List[List[Qubit]]] = None,
stabilizers: Optional[List[Type[_Stabilizer]]] = None,
) -> None:
"""
Entangles plaquettes as per the instruction set stored in qubit_indices
and stabilizers and generated by _gen_qubit_indices_and_stabilizers
Args:
qubit_indices (Optional[List[List[Qubit]]]):
List of lists of Qubits that comprise each plaquette.
This is optional, and will be used instead of self.qubit_indices if provided.
stabilizers (Optional[List[_Stabilizer]]):
List of stabilizers for each plaquette.
This is optional, and will be used instead of self.stabilizers if provided.
"""
qubit_indices = qubit_indices if qubit_indices else self.qubit_indices
stabilizers = stabilizers if stabilizers else self.stabilizers
for i, stabilizer_cls in enumerate(stabilizers):
stabilizer = stabilizer_cls(self.circ, qubit_indices[i])
stabilizer.entangle()
self.circ.barrier()
@abstractmethod
def reset_x(self) -> None:
"""
Initialize/reset to a logical |x+> state.
"""
@abstractmethod
def reset_z(self) -> None:
"""
Initialize/reset to a logical |z+> state.
"""
@abstractmethod
def x(self) -> None:
"""
Logical X operator on the topological qubit.
"""
@abstractmethod
def z(self) -> None:
"""
Logical Z operator on the topological qubit.
"""
@abstractmethod
def x_c_if(self, classical: ClassicalRegister, val: int) -> None:
"""
Classically conditioned logical X operator on the topological qubit.
"""
@abstractmethod
def z_c_if(self, classical: ClassicalRegister, val: int) -> None:
"""
Classically conditioned logical Z operator on the topological qubit.
"""
@abstractmethod
def cx(self, control: Optional[Qubit] = None, target: Optional[Qubit] = None):
"""
Logical CX Gate
Args:
control (Optional[Qubit]): If provided, then this gate will implement
a logical x gate on this tqubit conditioned on source
target (Optional[Qubit]): If provided, then this gate will implement
a logical x gate on target conditioned on this tqubit
"""
@abstractmethod
def readout_x(self, readout_creg: Optional[ClassicalRegister] = None) -> None:
"""
Convenience method to read-out the logical-X projection.
"""
@abstractmethod
def readout_z(self, readout_creg: Optional[ClassicalRegister] = None) -> None:
"""
Convenience method to read-out the logical-Z projection.
"""
@abstractmethod
def lattice_readout_x(self) -> None:
"""
Readout all data qubits that constitute the lattice.
This readout can be used to extract a final round of stabilizer measurments,
as well as a logical X readout.
"""
@abstractmethod
def lattice_readout_z(self) -> None:
"""
Readout all data qubits that constitute the lattice.
This readout can be used to extract a final round of stabilizer measurments,
as well as a logical Z readout.
"""
@abstractmethod
def parse_readout(
self, readout_string: str, readout_type: Optional[str] = None
) -> Tuple[int, Dict[str, List[TQubit]]]:
"""
Helper method to turn a result string (e.g. 1 10100000 10010000) into an
appropriate logical readout value and XOR-ed syndrome locations
according to our grid coordinate convention.
The implementation varies with different topological qubits,
but here's an example from the rotated surface code:
Args:
readout_string (str):
Readout of the form "0 00000000 00000000" (logical_readout syndrome_1 syndrome_0)
or of the form "000000000 00000000 00000000" (lattice_readout syndrome_1 syndrome_0)
Returns:
logical_readout (int):
logical readout value
syndromes (Dict[str, List[TQubit]]]):
key: syndrome type
value: (time, row, col) of parsed syndrome hits (changes between consecutive rounds)
"""
class TopologicalQubit(Generic[TQubit], metaclass=ABCMeta):
"""
A single topological code logical qubit.
This stores a QuantumCircuit object onto which the topological circuit is built.
This abstract class contains a list of abstract methods
that should be implemented by subclasses.
"""
@property
@abstractmethod
def lattice_type(self):
"""
Subclass of _TopologicalLattice
"""
def __init__(
self,
params: Optional[Dict[str, int]] = None,
name: str = "tq",
circ: Optional[QuantumCircuit] = None,
) -> None:
"""
Initializes this Topological Qubit class.
Args:
params (Dict[str,int]):
Contains params such as d, where d is the number of
physical "data" qubits lining a row or column of the lattice.
name (str):
Useful when combining multiple TopologicalQubits together.
Prepended to all registers.
circ (Optional[QuantumCircuit]):
QuantumCircuit on top of which the topological qubit is built.
This is often shared amongst multiple TQubits.
If none is provided, then a new QuantumCircuit is initialized and stored.
"""
# == None is necessary, as `not QuantumCircuit()` is True
circ = QuantumCircuit() if circ is None else circ
params = params if params else {}
self.lattice: _TopologicalLattice = self.lattice_type(params, name, circ)
self.name = name
self.circ = circ
def draw(self, **kwargs) -> None:
"""
Convenience method to draw quantum circuit.
"""
return self.circ.draw(**kwargs)
def __str__(self) -> str:
return self.circ.__str__()
@abstractmethod
def stabilize(self) -> None:
"""
Run a single round of stabilization (entangle and measure).
"""
def id(self) -> None:
"""
Inserts an identity on the data and syndrome qubits.
This allows us to create an isolated noise model by inserting errors only on identity gates.
"""
for register in self.lattice.qregisters.values():
self.circ.id(register)
self.circ.barrier()
def id_data(self) -> None:
"""
Inserts an identity on the data qubits only.
This allows us to create an isolated noise model by inserting errors only on identity gates.
"""
self.circ.id(self.lattice.qregisters["data"])
self.circ.barrier()
def reset_x(self) -> None:
"""
Initialize/reset to a logical |x+> state.
"""
self.lattice.reset_x()
def reset_z(self) -> None:
"""
Initialize/reset to a logical |z+> state.
"""
self.lattice.reset_z()
def x(self) -> None:
"""
Logical X operator on the topological qubit.
"""
self.lattice.x()
def z(self) -> None:
"""
Logical Z operator on the topological qubit.
"""
self.lattice.z()
def x_c_if(self, classical: ClassicalRegister, val: int) -> None:
"""
Classical conditioned logical X operator on the topological qubit.
"""
self.lattice.x_c_if(classical, val)
def z_c_if(self, classical: ClassicalRegister, val: int) -> None:
"""
Classical conditioned logical Z operator on the topological qubit.
"""
self.lattice.z_c_if(classical, val)
def cx(self, control: Optional[Qubit] = None, target: Optional[Qubit] = None):
"""
Logical CX Gate
Args:
control (Optional[Qubit]): If provided, then this gate will implement
a logical x gate on this tqubit conditioned on source
target (Optional[Qubit]): If provided, then this gate will implement
a logical x gate on target conditioned on this tqubit
Additional Information:
Exactly one of control or target must be provided.
"""
if not (bool(control) ^ bool(target)):
raise ValueError("Please specify exactly one of source or target")
self.lattice.cx(control, target)
def readout_x(self, readout_creg: Optional[ClassicalRegister] = None) -> None:
"""
Convenience method to read-out the logical-X projection.
"""
self.lattice.readout_x(readout_creg=readout_creg)
def readout_z(self, readout_creg: Optional[ClassicalRegister] = None) -> None:
"""
Convenience method to read-out the logical-Z projection.
"""
self.lattice.readout_z(readout_creg=readout_creg)
def lattice_readout_x(self) -> None:
"""
Readout all data qubits that constitute the lattice.
This readout can be used to extract a final round of X stabilizer measurments,
as well as a logical X readout.
"""
self.lattice.lattice_readout_x()
def lattice_readout_z(self) -> None:
"""
Readout all data qubits that constitute the lattice.
This readout can be used to extract a final round of Z stabilizer measurments,
as well as a logical Z readout.
"""
self.lattice.lattice_readout_z()
def parse_readout(
self, readout_string: str, readout_type: Optional[str] = None
) -> Tuple[int, Dict[str, List[TQubit]]]:
"""
Wrapper on helper method to turn a result string (e.g. 1 10100000 10010000) into an
appropriate logical readout value and XOR-ed syndrome locations
according to our grid coordinate convention.
"""
return self.lattice.parse_readout(readout_string, readout_type)
```
#### File: qtcodes/circuits/circ.py
```python
from typing import Union, Dict, cast, Optional, Any, Tuple, List, Type
from qiskit import QuantumCircuit, QuantumRegister
from qiskit.circuit.classicalregister import ClassicalRegister
from qtcodes.circuits.base import TopologicalQubit
from qtcodes.common.constants import *
from qtcodes.circuits.qubit_types import str2qtype
class TopologicalRegister:
"""
A blueprint for a TopologicalRegister that stores topological qubit(s)
"""
def __init__(
self,
circ: QuantumCircuit = None,
ctypes: Optional[List[str]] = None,
params: Optional[List[Dict[str, int]]] = None,
name: str = "treg",
):
"""
Args:
circ (QuantumCircuit):
QuantumCircuit on top of which the topological qubit is built.
This is often shared amongst multiple TQubits.
If none is provided, then a new QuantumCircuit is initialized and stored.
ctypes (List[str]):
Specifies the types of the TQubits being added
params (List[Dict[str,int]]):
Contains a list of params such as d, where d is the number of
physical "data" qubits lining a row or column of the lattice.
name (str):
Useful when combining multiple TopologicalQubits together.
Prepended to all registers.
"""
ctypes = [] if ctypes is None else ctypes
params = [] if params is None else params
self.params = []
self.name = name
self.n = 0
# == None is necessary, as "not circ" is true for circ=QuantumCircuit()
self.circ = QuantumCircuit() if circ is None else circ
self.tqubits: Dict[str, Dict[int, Type[Any]]] = {}
self.add_tqubits("data", ctypes, params)
def add_tqubits(
self,
sub_register: str,
ctypes: Optional[List[str]] = None,
params: Optional[List[Dict[str, int]]] = None
) -> None:
"""
Args:
sub_register (str):
Specifies the subregister
ctypes: (List[str]):
Specifies the types of the TQubits being added
params: (List[Dict[str, int]]):
Contains a list of params for each TQubit being added
"""
params = [] if params is None else params
ctypes = [] if ctypes is None else ctypes
if len(params) != len(ctypes):
raise ValueError(
"Please match the number of params with the number of Topological Qubits added: Current number of params - " + str(len(params)) + ", Current number of Topological Qubits: " + str(len(ctypes))
)
for i in range(len(ctypes)):
if ctypes[i] not in str2qtype:
raise ValueError(
"Please choose a Topological Qubit type from: "
+ str(list(str2qtype.keys()))
)
for i in range(len(ctypes)):
self.add_tqubit(self, sub_register, ctypes[i], params[i]);
def add_tqubit(self, sub_register: str, ctype: str, params: Dict[str, int]):
"""
Args:
sub_register (str):
Specifies the subregister
ctype (str):
Specifies the type of TQubit being added
params (Dict[str, int]):
Specifies the params for the Tqubit being added
"""
if sub_register not in self.tqubits:
self.tqubits[sub_register] = {}
self.tqubits[sub_register][self.n] = str2qtype[ctype](
params, name=self.name + "_" + str(self.n), circ=self.circ
)
self.params.append(params)
self.n += 1
def __getitem__(self, key: Union[str, int]):
"""
Allows us to return the nth element of TopologicalRegister as a list.
"""
if isinstance(key, str):
key = str(key)
return self.tqubits[key]
elif isinstance(key, int):
key = int(key)
for _, sub_reg in self.tqubits.items():
if key in sub_reg:
return sub_reg[key]
raise ValueError("Key not found!")
class TopologicalCircuit:
"""
TopologicalCircuit is like a QuantumCircuit built on Topological Qubits.
Shares the same QuantumCircuit object created in TopologicalRegister.
"""
def __init__(self, treg: TopologicalRegister):
self.treg = treg
self.qreg: Dict[str, QuantumRegister] = {}
self.creg: Dict[str, ClassicalRegister] = {}
self.circ = treg.circ
def add_creg(self, size=None, name=None, bits=None, override: bool = False) -> None:
if name in self.creg and not override:
return
creg = ClassicalRegister(size=size, name=name, bits=bits)
self.creg[name] = creg
self.circ.add_register(creg)
def add_qreg(self, size=None, name=None, bits=None, override: bool = False) -> None:
if name in self.qreg and not override:
return
qreg = QuantumRegister(size=size, name=name, bits=bits)
self.qreg[name] = qreg
self.circ.add_register(qreg)
def _get_index(self, tqubit: Union[TopologicalQubit, int]) -> TopologicalQubit:
"""
Takes in either a TopologicalQubit or an int, and returns a TopologicalQubit.
Args:
tqubit (Union[TopologicalQubit, int]):
Either already a TopologicalQubit or an int index in treg
Returns:
tqubit (TopologicalQubit):
Returns the corresponding TopologicalQubit from treg
"""
if isinstance(tqubit, int):
tqubit = cast(int, tqubit)
tqubit = self.treg[tqubit]
tqubit = cast(TopologicalQubit, tqubit)
return tqubit
def stabilize(self, tqubit: Union[TopologicalQubit, int]):
"""
Run a single round of stabilization (entangle and measure) on the tqubit.
Args:
tqubit (Union[TopologicalQubit, int]):
Either already a TopologicalQubit or an int index in treg
"""
tqubit = self._get_index(tqubit)
tqubit.stabilize()
def id(self, tqubit: Union[TopologicalQubit, int]) -> None:
"""
Inserts an identity on the data and syndrome qubits.
This allows us to create an isolated noise model by inserting errors only on identity gates.
Args:
tqubit (Union[TopologicalQubit, int]):
Either already a TopologicalQubit or an int index in treg
"""
tqubit = self._get_index(tqubit)
tqubit.id()
def id_data(self, tqubit: Union[TopologicalQubit, int]) -> None:
"""
Inserts an identity on the data qubits only.
This allows us to create an isolated noise model by inserting errors only on identity gates.
Args:
tqubit (Union[TopologicalQubit, int]):
Either already a TopologicalQubit or an int index in treg
"""
tqubit = self._get_index(tqubit)
tqubit.id_data()
def reset_x(self, tqubit: Union[TopologicalQubit, int]) -> None:
"""
Initialize/reset to a logical |x+> state on the tqubit.
Args:
tqubit (Union[TopologicalQubit, int]):
Either already a TopologicalQubit or an int index in treg
"""
tqubit = self._get_index(tqubit)
tqubit.reset_x()
def reset_z(self, tqubit: Union[TopologicalQubit, int]):
"""
Initialize/reset to a logical |z+> state on the tqubit.
Args:
tqubit (Union[TopologicalQubit, int]):
Either already a TopologicalQubit or an int index in treg
"""
tqubit = self._get_index(tqubit)
tqubit.reset_z()
def x(self, tqubit: Union[TopologicalQubit, int]):
"""
Logical X operator on the topological qubit.
Args:
tqubit (Union[TopologicalQubit, int]):
Either already a TopologicalQubit or an int index in treg
"""
tqubit = self._get_index(tqubit)
tqubit.x()
def z(self, tqubit: Union[TopologicalQubit, int]):
"""
Logical Z operator on the topological qubit.
Args:
tqubit (Union[TopologicalQubit, int]):
Either already a TopologicalQubit or an int index in treg
"""
tqubit = self._get_index(tqubit)
tqubit.z()
def cx(
self,
control: Union[TopologicalQubit, int],
target: Union[TopologicalQubit, int],
ancilla_ctype: Optional[str] = None,
ancilla_params: Optional[Dict[str, int]] = None
):
"""
CNOT operator on control and target topological qubit
Args:
control (Union[TopologicalQubit, int]):
Either already a TopologicalQubit or an int index in treg
target (Union[TopologicalQubit, int]):
Either already a TopologicalQubit or an int index in treg
ancilla_ctype (Optional[str]):
Specifies the logical type of ancilla bit
ancilla_params (Optional[Dict[str, int]]):
Specifies the parameters of the ancilla bit
"""
#default ctype and params
if (ancilla_ctype is not None) ^ (ancilla_params is not None):
raise ValueError("Please provide both a ctype and params or neither to use the control qubit ctype and params by default.")
elif ancilla_ctype is None:
control_q = self._get_index(control)
ancilla_ctype = type(control_q).__name__.replace('Qubit', '')
ancilla_params = control_q.lattice.params
# get qubits
control = self._get_index(control)
target = self._get_index(target)
if "ancilla" not in self.treg.tqubits:
self.treg.add_tqubit("ancilla", ancilla_ctype, ancilla_params)
ancilla = cast(TopologicalQubit, list(self.treg["ancilla"].values())[-1])
# prepare bits
self.add_creg(1, "m1")
self.add_creg(1, "m2")
self.add_creg(1, "m3")
# prepare ancilla
ancilla.reset_x()
# Z (control) x Z (ancilla)
self.add_qreg(1, "cnot_readout")
readout = self.qreg["cnot_readout"][0]
# Z x Z
self.circ.reset(readout)
control.cx(target=readout)
ancilla.cx(target=readout)
self.circ.measure(readout, self.creg["m1"][0])
# X x X
self.circ.reset(readout)
self.circ.h(readout)
target.cx(control=readout)
ancilla.cx(control=readout)
self.circ.h(readout)
self.circ.measure(readout, self.creg["m2"][0])
# Z
ancilla.readout_z(readout_creg=self.creg["m3"])
# classical conditioned
control.z_c_if(self.creg["m2"], 1)
target.x_c_if(self.creg["m1"], 1)
target.x_c_if(self.creg["m3"], 1)
def measure_x(
self,
tqubit: Union[TopologicalQubit, int],
readout_creg: Optional[ClassicalRegister] = None,
):
"""
Convenience method to read-out the logical-X projection.
Args:
tqubit (Union[TopologicalQubit, int]):
Either already a TopologicalQubit or an int index in treg
"""
tqubit = self._get_index(tqubit)
tqubit.readout_x(readout_creg=readout_creg)
def measure_z(
self,
tqubit: Union[TopologicalQubit, int],
readout_creg: Optional[ClassicalRegister] = None,
):
"""
Convenience method to read-out the logical-Z projection.
Args:
tqubit (Union[TopologicalQubit, int]):
Either already a TopologicalQubit or an int index in treg
"""
tqubit = self._get_index(tqubit)
tqubit.readout_z(readout_creg=readout_creg)
def measure_lattice_x(self, tqubit: Union[TopologicalQubit, int]):
"""
Readout all data qubits that constitute the lattice.
This readout can be used to extract a final round of stabilizer measurments,
as well as a logical X readout.
Args:
tqubit (Union[TopologicalQubit, int]):
Either already a TopologicalQubit or an int index in treg
"""
tqubit = self._get_index(tqubit)
tqubit.lattice_readout_x()
def measure_lattice_z(self, tqubit: Union[TopologicalQubit, int]):
"""
Readout all data qubits that constitute the lattice.
This readout can be used to extract a final round of stabilizer measurments,
as well as a logical Z readout.
Args:
tqubit (Union[TopologicalQubit, int]):
Either already a TopologicalQubit or an int index in treg
"""
tqubit = self._get_index(tqubit)
tqubit.lattice_readout_z()
def parse_readout(
self,
tqubit: Union[TopologicalQubit, int],
readout_string: str,
readout_type: Optional[str] = "Z",
) -> Tuple[int, Dict[str, List[Any]]]:
"""
Helper method to turn a result string (e.g. 1 10100000 10010000) into an
appropriate logical readout value and XOR-ed syndrome locations
according to our grid coordinate convention, based on the topological qubit of choice.
The implementation varies with different topological qubits,
but here's an example from the rotated surface code:
Args:
tqubit (Union[TopologicalQubit, int]):
Either already a TopologicalQubit or an int index in treg
readout_string (str):
Readout of the form "0 00000000 00000000" (logical_readout syndrome_1 syndrome_0)
or of the form "000000000 00000000 00000000" (lattice_readout syndrome_1 syndrome_0)
Returns:
logical_readout (int):
logical readout value
syndromes (Dict[str, List[TQubit]]]):
key: syndrome type
value: (time, row, col) of parsed syndrome hits (changes between consecutive rounds)
"""
tqubit = self._get_index(tqubit)
return tqubit.parse_readout(readout_string, readout_type)
def draw(self, **kwargs):
"""
Convenience method to draw underlying quantum circuit.
"""
return self.circ.draw(**kwargs)
def __str__(self):
return self.circ.__str__()
```
#### File: qtcodes/tests/rotated_surface.py
```python
from abc import ABCMeta
import sys
import unittest
from qtcodes.common import constants
from tests.base import TestBase
sys.path.insert(0, "../")
from qtcodes import XXZZQubit, XZZXQubit, RotatedDecoder
class TestSquareXXZZ(TestBase, unittest.TestCase):
"""
Unit tests for the XXZZ (CSS) Rotated Surface Code
"""
encoder_type = XXZZQubit
def setUp(self):
self.params = {"d": (5, 5)}
self.params["T"] = 1
self.decoder = RotatedDecoder(self.params)
class TestRectangularXXZZ(TestBase, unittest.TestCase):
"""
Unit tests for the XXZZ (CSS) Rotated Surface Code
"""
encoder_type = XXZZQubit
def setUp(self):
self.params = {"d": (3, 5)}
self.params["T"] = 1
self.decoder = RotatedDecoder(self.params)
class Test1DXXZZ(TestBase, unittest.TestCase):
"""
Unit tests for the XXZZ (CSS) Rotated Surface Code
"""
encoder_type = XXZZQubit
def setUp(self):
self.params = {"d": (5, 1)}
self.params["T"] = 1
self.decoder = RotatedDecoder(self.params)
class TestXZZX(TestBase, metaclass=ABCMeta):
encoder_type = XZZXQubit
def get_neighbors(self, indx: int, error_type: str):
"""
Returns the syndrome node positions given some data qubit index
and error_type on that data qubit.
Args:
indx (int): index of data qubit
error_type (str): either "x" or "z" error on data qubit
Returns:
neighbors (List[Tuple[float]]):
List of neighboring syndrome nodes
that would be set off by the specified error
on the specified data qubit.
"""
d = self.params["d"]
dw = d[constants.DW]
row = indx // dw
col = indx % dw
valid_syndrome = lambda x: self.decoder._valid_syndrome(
x, "X"
) or self.decoder._valid_syndrome(x, "Z")
if error_type == "x":
neighbors = [
(0.0, row - 0.5, col + 0.5),
(0.0, row + 0.5, col - 0.5),
]
return [x for x in neighbors if valid_syndrome(x[1:])]
elif error_type == "z":
neighbors = [
(0.0, row - 0.5, col - 0.5),
(0.0, row + 0.5, col + 0.5),
]
return [x for x in neighbors if valid_syndrome(x[1:])]
return []
class TestSquareXZZX(TestXZZX, unittest.TestCase):
"""
Unit tests for the XZZX Rotated Surface Code
"""
def setUp(self):
self.params = {"d": (5, 5)}
self.params["T"] = 1
self.decoder = RotatedDecoder(self.params)
class TestRectangularXZZX(TestXZZX, unittest.TestCase):
"""
Unit tests for the XZZX Rotated Surface Code
"""
def setUp(self):
self.params = {"d": (3, 5)}
self.params["T"] = 1
self.decoder = RotatedDecoder(self.params)
# %%
if __name__ == "__main__":
unittest.main()
```
|
{
"source": "jeffreyjohnens/style_rank",
"score": 2
}
|
#### File: src/style_rank/api.py
```python
import os
import csv
import json
import numpy as np
import warnings
from scipy.stats import rankdata
from subprocess import call
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics.pairwise import cosine_distances
from sklearn.preprocessing import OneHotEncoder
# import c++ code
from ._style_rank import get_features_internal, get_feature_names_internal
def get_feature_names(tag="ORIGINAL"):
return get_feature_names_internal(tag)
# checking arguments ...
def validate_argument(x, name):
class domain:
def __init__(self,lb=None,ub=None,dom=None):
if lb is not None:
assert lb <= ub
self.lb=lb
self.ub=ub
self.dom=dom
def check(self,x):
if self.lb is not None:
return self.lb <= x <= self.ub
return x in self.dom
def __repr__(self):
if self.lb is not None:
return "[%d,%d]" % (self.lb, self.ub)
return str(self.dom)
TOTAL_UPPER_BOUND = 100000
recommend = {
"upper_bound" : domain(lb=100, ub=500),
"resolution" : domain(dom=[0,4,8,16]),
"n_estimators" : domain(lb=50,ub=500),
"max_depth" : domain(dom=[2,3]),
}
valid = {
"upper_bound" : domain(lb=1, ub=TOTAL_UPPER_BOUND),
"resolution" : domain(lb=0,ub=TOTAL_UPPER_BOUND),
"n_estimators" : domain(lb=1,ub=TOTAL_UPPER_BOUND),
"max_depth" : domain(lb=1,ub=TOTAL_UPPER_BOUND)
}
if not recommend[name].check(x):
args = (name, str(x), str(recommend[name]))
warnings.warn('%s=%s is outside of recommended range %s' % args)
if not valid[name].check(x):
args = (name, str(x), str(valid[name]))
raise ValueError('%s=%s is outside valid range %s' % args)
def validate_labels(labels):
"""ensure that labels are well formed.
Args:
labels (list): a list of integers on the range [0,1].
"""
if (labels==0).sum() == 0:
raise Exception("All rank_set were corrupt")
if (labels==1).sum() == 0:
raise Exception("All style_set were corrupt")
def validate_features(paths, labels, features):
"""ensure that the features are well formed with respect to the paths and labels.
Args:
paths (list): a list of midi filepaths.
labels (list): a list of integers on the range [0,1].
features (dict): a dictionary containing one or more features.
"""
if not type(features) == dict:
raise TypeError('Provided features are not a dictionary')
if len(features) == 0:
raise Exception('Provided features are empty')
is_numpy = np.all([type(v) == np.ndarray for v in features.values()])
is_size = np.all([len(v) == len(paths) for v in features.values()])
if not is_numpy or not is_size:
raise Exception('Each feature must be a matrix of size (len(paths),d)')
def validate_paths(paths, list_name=None):
"""ensure that paths are well formed.
Args:
paths (list): a list of midi filepaths
list_name (str): an identifier for the paths list
"""
valid_paths = []
indices = []
for i,path in enumerate(np.atleast_1d(paths)):
if not os.path.exists(path):
warnings.warn('{} does not exist.'.format(path))
elif not path.endswith(".mid"):
warnings.warn('{} if not a MIDI file (.mid).'.format(path))
else:
valid_paths.append(path)
indices.append(i)
if len(valid_paths) == 0:
if list_name is not None:
raise Exception('No valid filepaths provided in {}'.format(list_name))
raise Exception('No valid filepaths provided')
return np.array(valid_paths), np.array(indices)
def get_features(paths, upper_bound=500, feature_names=[], resolution=0, include_offsets=False):
"""extract features for a list of midis
Args:
paths (list): a list of midi filepaths.
upper_bound (int): the maximum cardinality of each categorical distribution.
feature_names (list): a list of features to extract
resolution (int): the number of divisions per beat for the quantization of time-based values. If resolution=0, no quantization will take place.
include_offsets (int): a boolean flag indicating if offsets will be considered for chord segment boundaries.
Returns:
fs (dict): a dictionary of categorical distributions (np.ndarray) indexed by feature name.
domains (dict): a dictionary of categorical domains (np.ndarray) indexed by feature name.
path_indices (np.ndarray): an integer array indexing the filepaths from which features were sucessfully extracted.
"""
validate_argument(upper_bound, "upper_bound")
validate_argument(resolution, "resolution")
paths, path_indices = validate_paths(paths)
feature_names = [f for f in feature_names if f in get_feature_names("ALL")]
(fs, domains, indices) = get_features_internal(paths, feature_names, upper_bound, resolution, include_offsets)
fs = {k : np.array(v).reshape(-1,len(domains[k])+1) for k,v in fs.items()}
domains = {k : np.array(v) for k,v in domains.items()}
path_indices = path_indices[np.array(indices)]
return fs, domains, path_indices
def get_feature_csv(paths, output_dir, upper_bound=500, feature_names=[], resolution=0, include_offsets=False):
"""extract features for a list of midis and output to csv's
Args:
paths (list): a list of midi filepaths.
output_dir (str): a directory to store the feature .csv's
upper_bound (int): the maximum cardinality of each categorical distribution.
feature_names (list): a list of features to extract.
resolution (int): the number of divisions per beat for the quantization of time-based values. If resolution=0, no quantization will take place.
include_offsets (int): a boolean flag indicating if offsets will be considered for chord segment boundaries.
"""
data, domains, indices = get_features(
paths, upper_bound=upper_bound, feature_names=feature_names, resolution=resolution, include_offsets=include_offsets)
call(["mkdir", "-p", output_dir])
for k,v in data.items():
with open(os.path.join(output_dir, k) + ".csv", "w") as f:
w = csv.writer(f)
w.writerow(["filepath"] + list(domains[k]) + ["remain"])
for path, vv in zip(np.array(paths)[indices], v):
w.writerow([path] + list(vv))
def rf_embed(feature, labels, n_estimators=100, max_depth=3):
"""construct an embedding using a random forest.
Args:
feature (np.ndarray): a matrix of shape (len(labels),D) with D>0.
labels (list): a list of integers on the range [0,1]
n_estimators (int): the number of trees in the random forest
max_depth (int): the maximum depth of each tree
Returns:
np.ndarray: a matrix containg all pairwise similarities for a single categorical distribution (feature).
"""
clf = RandomForestClassifier(n_estimators=n_estimators, max_depth=max_depth, bootstrap=True, criterion='entropy', class_weight='balanced')
clf.fit(feature, labels)
leaves = clf.apply(feature)
embedded = np.array(
OneHotEncoder(categories='auto').fit_transform(leaves).todense())
return 1. - cosine_distances(embedded)
def get_similarity_matrix(rank_set, style_set, raw_features=None, upper_bound=500, n_estimators=100, max_depth=3, return_paths_and_labels=False, resolution=0, include_offsets=False, feature_names=[]):
"""construct a similarity matrix
Args:
rank_set (list/np.ndarray): a list/array of midis to be ranked.
style_set (list/np.ndarray): a list/array of midis to define the style.
raw_features (dict): a dictionary of categorical distributions (np.ndarray) indexed by feature name.
upper_bound (int): the maximum cardinality of each categorical distribution.
n_estimators (int): the number of trees in the random forest.
max_depth (int): the maximum depth of each tree.
return_paths_and_labels (int): a boolean flag indicating whether these items should be returned or not
resolution (int): the number of divisions per beat for the quantization of time-based values. If resolution=0, no quantization will take place.
include_offsets (int): a boolean flag indicating if offsets will be considered for chord segment boundaries.
feature_names (list): a list of features to extract. if feature_names=[] all features will be used.
Returns:
sim_mat (np.ndarray): a matrix containg all pairwise similarities.
paths (np.ndarray) : an array of midi filepaths corresponding to each row/col in the similarity matrix.
labels (np.ndarray): an array of labels corresponding to each row/col in the similarity matrix.
"""
validate_argument(n_estimators, "n_estimators")
validate_argument(max_depth, "max_depth")
# create paths and labels
rank_set,_ = validate_paths(rank_set, list_name="rank_set")
style_set,_ = validate_paths(style_set, list_name="style_set")
paths = np.hstack([rank_set, style_set])
labels = np.array([0] * len(rank_set) + [1] * len(style_set))
# extract features
if raw_features is None:
features, _, indices = get_features(paths, upper_bound=upper_bound, resolution=resolution, include_offsets=include_offsets, feature_names=feature_names)
labels = labels[indices]
else:
validate_features(paths, labels, raw_features)
indices = np.arange(len(paths))
features = raw_features
# ensure style_set and rank_set were parsed
validate_labels(labels)
# create embedding via trained random forests
sim_mat = np.zeros((len(labels), len(labels)))
for _, feature in features.items():
sim_mat += rf_embed(feature, labels, n_estimators=n_estimators, max_depth=max_depth)
sim_mat /= len(features)
if return_paths_and_labels:
return sim_mat, paths[indices], labels
return sim_mat
def rank(rank_set, style_set, raw_features=None, upper_bound=500, n_estimators=100, max_depth=3, return_similarity=False, resolution=0, include_offsets=False, feature_names=[], json_path=None):
"""construct a similarity matrix
Args:
rank_set (list/np.ndarray): a list/array of midis to be ranked.
style_set (list/np.ndarray): a list/array of midis to define the style.
features (dict): a dictionary of categorical distributions (np.ndarray) indexed by feature name.
upper_bound (int): the maximum cardinality of each categorical distribution.
n_estimators (int): the number of trees in the random forest.
max_depth (int): the maximum depth of each tree.
return_similarity (bool) : return the cosine similarity to the corpus for each ranked MIDI.
resolution (int): the number of divisions per beat for the quantization of time-based values. If resolution=0, no quantization will take place.
include_offsets (int): a boolean flag indicating if offsets will be considered for chord segment boundaries.
feature_names (list): a list of features to extract. if feature_names=[] all features will be used.
json_path (str): if not None, the ranks will be written to a .json file.
Returns:
paths (np.ndarray): an array containing the rank_set sorted from most to least stylistically similar to the corpus.
"""
sim_mat,paths,labels = get_similarity_matrix(rank_set, style_set, upper_bound=upper_bound, n_estimators=n_estimators, max_depth=max_depth, return_paths_and_labels=True, raw_features=raw_features, resolution=resolution, include_offsets=include_offsets, feature_names=feature_names)
sims = sim_mat[labels==0][:,labels==1].sum(1)
order = np.argsort(sims)[::-1]
output = list(zip(paths[order], sims[order]))
if json_path is not None:
with open(json_path, "w") as f:
f.write(json.dumps(
{str(p):float(d) for p,d in sorted(output,key=lambda x:x[1])},
indent=4))
if return_similarity:
return output
return paths[order]
```
|
{
"source": "jeffreykennethli/ludwig",
"score": 2
}
|
#### File: tests/integration_tests/test_visualization_api.py
```python
import glob
import logging
import os
from tempfile import TemporaryDirectory
import numpy as np
import pandas as pd
import pytest
from ludwig import visualize
from ludwig.api import LudwigModel
from ludwig.constants import NAME, PREDICTIONS, PROBABILITIES, PROBABILITY, SPLIT, TRAINER
from ludwig.data.preprocessing import get_split
from ludwig.utils.data_utils import read_csv, split_dataset_ttv
from tests.integration_tests.utils import (
bag_feature,
binary_feature,
category_feature,
generate_data,
number_feature,
sequence_feature,
set_feature,
text_feature,
)
def run_api_experiment(input_features, output_features):
"""Helper method to avoid code repetition in running an experiment.
:param input_features: input schema
:param output_features: output schema
:return: None
"""
config = {
"input_features": input_features,
"output_features": output_features,
"combiner": {"type": "concat", "output_size": 14},
TRAINER: {"epochs": 2},
}
model = LudwigModel(config)
return model
@pytest.fixture(scope="module")
def experiment_to_use():
with TemporaryDirectory() as tmpdir:
experiment = Experiment("data_for_test.csv", tmpdir)
return experiment
class Experiment:
"""Helper class to create model test data, setup and run experiment.
Contain the needed model experiment statistics as class attributes.
"""
def __init__(self, csv_filename, tmpdir):
self.tmpdir = tmpdir
self.csv_file = os.path.join(tmpdir, csv_filename)
self.input_features = [category_feature(vocab_size=10)]
self.output_features = [category_feature(vocab_size=2, reduce_input="sum")]
data_csv = generate_data(self.input_features, self.output_features, self.csv_file)
self.model = self._create_model()
test_df, train_df, val_df = obtain_df_splits(data_csv)
(self.train_stats, self.preprocessed_data, self.output_dir) = self.model.train(
training_set=train_df, validation_set=val_df, output_directory=os.path.join(tmpdir, "results")
)
self.test_stats_full, predictions, self.output_dir = self.model.evaluate(
dataset=test_df,
collect_overall_stats=True,
collect_predictions=True,
output_directory=self.output_dir,
return_type="dict",
)
self.output_feature_name = self.output_features[0][NAME]
self.ground_truth_metadata = self.preprocessed_data[3]
self.ground_truth = test_df[self.output_feature_name]
# probabilities need to be list of lists containing each row data
# from the probability columns
# ref: https://ludwig-ai.github.io/ludwig-docs/latest/user_guide/api/LudwigModel#evaluate - Return
self.probability = predictions[self.output_feature_name][PROBABILITY]
self.probabilities = predictions[self.output_feature_name][PROBABILITIES]
self.predictions = predictions[self.output_feature_name][PREDICTIONS]
# numeric encoded values required for some visualizations
of_metadata = self.ground_truth_metadata[self.output_feature_name]
self.predictions_num = [of_metadata["str2idx"][x] for x in self.predictions]
def _create_model(self):
"""Configure and setup test model."""
config = {
"input_features": self.input_features,
"output_features": self.output_features,
"combiner": {"type": "concat", "output_size": 14},
TRAINER: {"epochs": 2},
}
return LudwigModel(config, logging_level=logging.WARN)
def obtain_df_splits(data_csv):
"""Split input data csv file in to train, validation and test dataframes.
:param data_csv: Input data CSV file.
:return test_df, train_df, val_df: Train, validation and test dataframe
splits
"""
data_df = read_csv(data_csv)
# Obtain data split array mapping data rows to split type
# 0-train, 1-validation, 2-test
data_df[SPLIT] = get_split(data_df)
train_split, test_split, val_split = split_dataset_ttv(data_df, SPLIT)
# Splits are python dictionaries not dataframes- they need to be converted.
test_df = pd.DataFrame(test_split)
train_df = pd.DataFrame(train_split)
val_df = pd.DataFrame(val_split)
return test_df, train_df, val_df
def test_learning_curves_vis_api(experiment_to_use):
"""Ensure pdf and png figures can be saved via visualization API call.
:param experiment_to_use: Object containing trained model and results to
test visualization
:return: None
"""
experiment = experiment_to_use
viz_outputs = ("pdf", "png")
with TemporaryDirectory() as tmpvizdir:
for viz_output in viz_outputs:
vis_output_pattern_pdf = tmpvizdir + f"/*.{viz_output}"
visualize.learning_curves(
[experiment.train_stats], output_feature_name=None, output_directory=tmpvizdir, file_format=viz_output
)
figure_cnt = glob.glob(vis_output_pattern_pdf)
assert 3 == len(figure_cnt)
def test_compare_performance_vis_api(experiment_to_use):
"""Ensure pdf and png figures can be saved via visualization API call.
:param experiment_to_use: Object containing trained model and results to
test visualization
:return: None
"""
experiment = experiment_to_use
# extract test stats only
test_stats = experiment.test_stats_full
viz_outputs = ("pdf", "png")
with TemporaryDirectory() as tmpvizdir:
for viz_output in viz_outputs:
vis_output_pattern_pdf = tmpvizdir + f"/*.{viz_output}"
visualize.compare_performance(
[test_stats, test_stats],
output_feature_name=None,
model_names=["Model1", "Model2"],
output_directory=tmpvizdir,
file_format=viz_output,
)
figure_cnt = glob.glob(vis_output_pattern_pdf)
assert 1 == len(figure_cnt)
def test_compare_classifier_performance_from_prob_vis_api(experiment_to_use):
"""Ensure pdf and png figures can be saved via visualization API call.
:param experiment_to_use: Object containing trained model and results to
test visualization
:return: None
"""
experiment = experiment_to_use
probability = experiment.probabilities
viz_outputs = ("pdf", "png")
with TemporaryDirectory() as tmpvizdir:
for viz_output in viz_outputs:
vis_output_pattern_pdf = tmpvizdir + f"/*.{viz_output}"
visualize.compare_classifiers_performance_from_prob(
[probability, probability],
experiment.ground_truth,
experiment.ground_truth_metadata,
experiment.output_feature_name,
top_n_classes=[0],
labels_limit=0,
model_namess=["Model1", "Model2"],
output_directory=tmpvizdir,
file_format=viz_output,
)
figure_cnt = glob.glob(vis_output_pattern_pdf)
assert 1 == len(figure_cnt)
def test_compare_classifier_performance_from_pred_vis_api(experiment_to_use):
"""Ensure pdf and png figures can be saved via visualization API call.
:param experiment_to_use: Object containing trained model and results to
test visualization
:return: None
"""
experiment = experiment_to_use
prediction = experiment.predictions
viz_outputs = ("pdf", "png")
with TemporaryDirectory() as tmpvizdir:
for viz_output in viz_outputs:
vis_output_pattern_pdf = tmpvizdir + f"/*.{viz_output}"
visualize.compare_classifiers_performance_from_pred(
[prediction, prediction],
experiment.ground_truth,
experiment.ground_truth_metadata,
experiment.output_feature_name,
labels_limit=0,
model_namess=["Model1", "Model2"],
output_directory=tmpvizdir,
file_format=viz_output,
)
figure_cnt = glob.glob(vis_output_pattern_pdf)
assert 1 == len(figure_cnt)
def test_compare_classifiers_performance_subset_vis_api(experiment_to_use):
"""Ensure pdf and png figures can be saved via visualization API call.
:param experiment_to_use: Object containing trained model and results to
test visualization
:return: None
"""
experiment = experiment_to_use
probabilities = experiment.probabilities
viz_outputs = ("pdf", "png")
with TemporaryDirectory() as tmpvizdir:
for viz_output in viz_outputs:
vis_output_pattern_pdf = tmpvizdir + f"/*.{viz_output}"
visualize.compare_classifiers_performance_subset(
[probabilities, probabilities],
experiment.ground_truth,
experiment.ground_truth_metadata,
experiment.output_feature_name,
top_n_classes=[6],
labels_limit=0,
subset="ground_truth",
model_namess=["Model1", "Model2"],
output_directory=tmpvizdir,
file_format=viz_output,
)
figure_cnt = glob.glob(vis_output_pattern_pdf)
assert 1 == len(figure_cnt)
def test_compare_classifiers_performance_changing_k_vis_api(experiment_to_use):
"""Ensure pdf and png figures can be saved via visualization API call.
:param experiment_to_use: Object containing trained model and results to
test visualization
:return: None
"""
experiment = experiment_to_use
probabilities = experiment.probabilities
viz_outputs = ("pdf", "png")
with TemporaryDirectory() as tmpvizdir:
for viz_output in viz_outputs:
vis_output_pattern_pdf = tmpvizdir + f"/*.{viz_output}"
visualize.compare_classifiers_performance_changing_k(
[probabilities, probabilities],
experiment.ground_truth,
experiment.ground_truth_metadata,
experiment.output_feature_name,
top_k=3,
labels_limit=0,
model_namess=["Model1", "Model2"],
output_directory=tmpvizdir,
file_format=viz_output,
)
figure_cnt = glob.glob(vis_output_pattern_pdf)
assert 1 == len(figure_cnt)
def test_compare_classifiers_multiclass_multimetric_vis_api(experiment_to_use):
"""Ensure pdf and png figures can be saved via visualization API call.
:param experiment_to_use: Object containing trained model and results to
test visualization
:return: None
"""
experiment = experiment_to_use
# extract test stats only
test_stats = experiment.test_stats_full
viz_outputs = ("pdf", "png")
with TemporaryDirectory() as tmpvizdir:
for viz_output in viz_outputs:
vis_output_pattern_pdf = tmpvizdir + f"/*.{viz_output}"
visualize.compare_classifiers_multiclass_multimetric(
[test_stats, test_stats],
experiment.ground_truth_metadata,
experiment.output_feature_name,
top_n_classes=[6],
model_namess=["Model1", "Model2"],
output_directory=tmpvizdir,
file_format=viz_output,
)
figure_cnt = glob.glob(vis_output_pattern_pdf)
assert 4 == len(figure_cnt)
def test_compare_classifiers_predictions_vis_api(experiment_to_use):
"""Ensure pdf and png figures can be saved via visualization API call.
:param experiment_to_use: Object containing trained model and results to
test visualization
:return: None
"""
experiment = experiment_to_use
predictions = experiment.predictions
viz_outputs = ("pdf", "png")
with TemporaryDirectory() as tmpvizdir:
for viz_output in viz_outputs:
vis_output_pattern_pdf = tmpvizdir + f"/*.{viz_output}"
visualize.compare_classifiers_predictions(
[predictions, predictions],
experiment.ground_truth,
experiment.ground_truth_metadata,
experiment.output_feature_name,
labels_limit=0,
model_names=["Model1", "Model2"],
output_directory=tmpvizdir,
file_format=viz_output,
)
figure_cnt = glob.glob(vis_output_pattern_pdf)
assert 1 == len(figure_cnt)
def test_compare_classifiers_predictions_distribution_vis_api(experiment_to_use):
"""Ensure pdf and png figures can be saved via visualization API call.
:param experiment_to_use: Object containing trained model and results to
test visualization
:return: None
"""
experiment = experiment_to_use
predictions = experiment.predictions_num
viz_outputs = ("pdf", "png")
with TemporaryDirectory() as tmpvizdir:
for viz_output in viz_outputs:
vis_output_pattern_pdf = tmpvizdir + f"/*.{viz_output}"
visualize.compare_classifiers_predictions_distribution(
[predictions, predictions],
experiment.ground_truth,
experiment.ground_truth_metadata,
experiment.output_feature_name,
labels_limit=0,
model_names=["Model1", "Model2"],
output_directory=tmpvizdir,
file_format=viz_output,
)
figure_cnt = glob.glob(vis_output_pattern_pdf)
assert 1 == len(figure_cnt)
def test_confidence_thresholding_vis_api(experiment_to_use):
"""Ensure pdf and png figures can be saved via visualization API call.
:param experiment_to_use: Object containing trained model and results to
test visualization
:return: None
"""
experiment = experiment_to_use
probabilities = experiment.probabilities
viz_outputs = ("pdf", "png")
with TemporaryDirectory() as tmpvizdir:
for viz_output in viz_outputs:
vis_output_pattern_pdf = tmpvizdir + f"/*.{viz_output}"
visualize.confidence_thresholding(
[probabilities, probabilities],
experiment.ground_truth,
experiment.ground_truth_metadata,
experiment.output_feature_name,
labels_limit=0,
model_names=["Model1", "Model2"],
output_directory=tmpvizdir,
file_format=viz_output,
)
figure_cnt = glob.glob(vis_output_pattern_pdf)
assert 1 == len(figure_cnt)
def test_confidence_thresholding_data_vs_acc_vis_api(experiment_to_use):
"""Ensure pdf and png figures can be saved via visualization API call.
:param experiment_to_use: Object containing trained model and results to
test visualization
:return: None
"""
experiment = experiment_to_use
probabilities = experiment.probabilities
viz_outputs = ("pdf", "png")
with TemporaryDirectory() as tmpvizdir:
for viz_output in viz_outputs:
vis_output_pattern_pdf = tmpvizdir + f"/*.{viz_output}"
visualize.confidence_thresholding_data_vs_acc(
[probabilities, probabilities],
experiment.ground_truth,
experiment.ground_truth_metadata,
experiment.output_feature_name,
labels_limit=0,
model_names=["Model1", "Model2"],
output_directory=tmpvizdir,
file_format=viz_output,
)
figure_cnt = glob.glob(vis_output_pattern_pdf)
assert 1 == len(figure_cnt)
def test_confidence_thresholding_data_vs_acc_subset_vis_api(experiment_to_use):
"""Ensure pdf and png figures can be saved via visualization API call.
:param experiment_to_use: Object containing trained model and results to
test visualization
:return: None
"""
experiment = experiment_to_use
probabilities = experiment.probabilities
viz_outputs = ("pdf", "png")
with TemporaryDirectory() as tmpvizdir:
for viz_output in viz_outputs:
vis_output_pattern_pdf = tmpvizdir + f"/*.{viz_output}"
visualize.confidence_thresholding_data_vs_acc_subset(
[probabilities, probabilities],
experiment.ground_truth,
experiment.ground_truth_metadata,
experiment.output_feature_name,
top_n_classes=[3],
labels_limit=0,
subset="ground_truth",
model_names=["Model1", "Model2"],
output_directory=tmpvizdir,
file_format=viz_output,
)
figure_cnt = glob.glob(vis_output_pattern_pdf)
assert 1 == len(figure_cnt)
def test_confidence_thresholding_data_vs_acc_subset_per_class_vis_api(experiment_to_use):
"""Ensure pdf and png figures can be saved via visualization API call.
:param experiment_to_use: Object containing trained model and results to
test visualization
:return: None
"""
experiment = experiment_to_use
probabilities = experiment.probabilities
viz_outputs = ("pdf", "png")
with TemporaryDirectory() as tmpvizdir:
for viz_output in viz_outputs:
vis_output_pattern_pdf = tmpvizdir + f"/*.{viz_output}"
visualize.confidence_thresholding_data_vs_acc_subset_per_class(
[probabilities, probabilities],
experiment.ground_truth,
experiment.ground_truth_metadata,
experiment.output_feature_name,
top_n_classes=[3],
labels_limit=0,
subset="ground_truth",
model_names=["Model1", "Model2"],
output_directory=tmpvizdir,
file_format=viz_output,
)
figure_cnt = glob.glob(vis_output_pattern_pdf)
# 3 figures should be saved because experiment setting top_n_classes = 3
# hence one figure per class
assert 3 == len(figure_cnt)
def test_confidence_thresholding_2thresholds_2d_vis_api(csv_filename):
"""Ensure pdf and png figures can be saved via visualization API call.
:param csv_filename: csv fixture from tests.fixtures.filenames.csv_filename
:return: None
"""
input_features = [
text_feature(vocab_size=10, min_len=1, encoder="stacked_cnn"),
number_feature(),
category_feature(vocab_size=10, embedding_size=5),
set_feature(),
sequence_feature(vocab_size=10, max_len=10, encoder="embed"),
]
output_features = [
category_feature(vocab_size=2, reduce_input="sum"),
category_feature(vocab_size=2, reduce_input="sum"),
]
encoder = "parallel_cnn"
with TemporaryDirectory() as tmpvizdir:
# Generate test data
data_csv = generate_data(input_features, output_features, os.path.join(tmpvizdir, csv_filename))
input_features[0]["encoder"] = encoder
model = run_api_experiment(input_features, output_features)
test_df, train_df, val_df = obtain_df_splits(data_csv)
_, _, output_dir = model.train(
training_set=train_df, validation_set=val_df, output_directory=os.path.join(tmpvizdir, "results")
)
test_stats, predictions, _ = model.evaluate(dataset=test_df, collect_predictions=True, output_dir=output_dir)
output_feature_name1 = output_features[0]["name"]
output_feature_name2 = output_features[1]["name"]
ground_truth_metadata = model.training_set_metadata
feature1_cols = [
f"{output_feature_name1}_probabilities_{label}"
for label in ground_truth_metadata[output_feature_name1]["idx2str"]
]
feature2_cols = [
f"{output_feature_name2}_probabilities_{label}"
for label in ground_truth_metadata[output_feature_name2]["idx2str"]
]
# probabilities need to be list of lists containing each row data from the
# probability columns ref: https://ludwig-ai.github.io/ludwig-docs/latest/user_guide/api/LudwigModel#evaluate
probability1 = predictions.loc[:, feature1_cols].values
probability2 = predictions.loc[:, feature2_cols].values
target_predictions1 = test_df[output_feature_name1]
target_predictions2 = test_df[output_feature_name2]
ground_truth1 = np.asarray(
[ground_truth_metadata[output_feature_name1]["str2idx"][prediction] for prediction in target_predictions1]
)
ground_truth2 = np.asarray(
[ground_truth_metadata[output_feature_name2]["str2idx"][prediction] for prediction in target_predictions2]
)
viz_outputs = ("pdf", "png")
for viz_output in viz_outputs:
vis_output_pattern_pdf = os.path.join(output_dir, "*.{}").format(viz_output)
visualize.confidence_thresholding_2thresholds_2d(
[probability1, probability2],
[ground_truth1, ground_truth2],
model.training_set_metadata,
[output_feature_name1, output_feature_name2],
labels_limit=0,
model_names=["Model1"],
output_directory=output_dir,
file_format=viz_output,
)
figure_cnt = glob.glob(vis_output_pattern_pdf)
assert 3 == len(figure_cnt)
def test_confidence_thresholding_2thresholds_3d_vis_api(csv_filename):
"""Ensure pdf and png figures can be saved via visualization API call.
:param csv_filename: csv fixture from tests.fixtures.filenames.csv_filename
:return: None
"""
input_features = [
text_feature(vocab_size=10, min_len=1, encoder="stacked_cnn"),
number_feature(),
category_feature(vocab_size=10, embedding_size=5),
set_feature(),
sequence_feature(vocab_size=10, max_len=10, encoder="embed"),
]
output_features = [
category_feature(vocab_size=2, reduce_input="sum"),
category_feature(vocab_size=2, reduce_input="sum"),
]
encoder = "parallel_cnn"
with TemporaryDirectory() as tmpvizdir:
# Generate test data
data_csv = generate_data(input_features, output_features, os.path.join(tmpvizdir, csv_filename))
input_features[0]["encoder"] = encoder
model = run_api_experiment(input_features, output_features)
test_df, train_df, val_df = obtain_df_splits(data_csv)
_, _, output_dir = model.train(
training_set=train_df, validation_set=val_df, output_directory=os.path.join(tmpvizdir, "results")
)
test_stats, predictions, _ = model.evaluate(
dataset=test_df, collect_predictions=True, output_directory=output_dir
)
output_feature_name1 = output_features[0]["name"]
output_feature_name2 = output_features[1]["name"]
ground_truth_metadata = model.training_set_metadata
feature1_cols = [
f"{output_feature_name1}_probabilities_{label}"
for label in ground_truth_metadata[output_feature_name1]["idx2str"]
]
feature2_cols = [
f"{output_feature_name2}_probabilities_{label}"
for label in ground_truth_metadata[output_feature_name2]["idx2str"]
]
# probabilities need to be list of lists containing each row data from the
# probability columns ref: https://ludwig-ai.github.io/ludwig-docs/latest/user_guide/api/LudwigModel#evaluate
probability1 = predictions.loc[:, feature1_cols].values
probability2 = predictions.loc[:, feature2_cols].values
target_predictions1 = test_df[output_feature_name1]
target_predictions2 = test_df[output_feature_name2]
ground_truth1 = np.asarray(
[ground_truth_metadata[output_feature_name1]["str2idx"][prediction] for prediction in target_predictions1]
)
ground_truth2 = np.asarray(
[ground_truth_metadata[output_feature_name2]["str2idx"][prediction] for prediction in target_predictions2]
)
viz_outputs = ("pdf", "png")
for viz_output in viz_outputs:
vis_output_pattern_pdf = os.path.join(output_dir, f"*.{viz_output}")
visualize.confidence_thresholding_2thresholds_3d(
[probability1, probability2],
[ground_truth1, ground_truth2],
model.training_set_metadata,
[output_feature_name1, output_feature_name2],
labels_limit=0,
output_directory=output_dir,
file_format=viz_output,
)
figure_cnt = glob.glob(vis_output_pattern_pdf)
assert 1 == len(figure_cnt)
def test_binary_threshold_vs_metric_vis_api(experiment_to_use):
"""Ensure pdf and png figures can be saved via visualization API call.
:param experiment_to_use: Object containing trained model and results to
test visualization
:return: None
"""
experiment = experiment_to_use
probabilities = experiment.probabilities
viz_outputs = ("pdf", "png")
metrics = ["accuracy"]
positive_label = 2
with TemporaryDirectory() as tmpvizdir:
for viz_output in viz_outputs:
vis_output_pattern_pdf = tmpvizdir + f"/*.{viz_output}"
visualize.binary_threshold_vs_metric(
[probabilities, probabilities],
experiment.ground_truth,
experiment.ground_truth_metadata,
experiment.output_feature_name,
metrics,
positive_label,
model_names=["Model1", "Model2"],
output_directory=tmpvizdir,
file_format=viz_output,
)
figure_cnt = glob.glob(vis_output_pattern_pdf)
assert 1 == len(figure_cnt)
def test_roc_curves_vis_api(experiment_to_use):
"""Ensure pdf and png figures can be saved via visualization API call.
:param experiment_to_use: Object containing trained model and results to
test visualization
:return: None
"""
experiment = experiment_to_use
probabilities = experiment.probabilities
viz_outputs = ("pdf", "png")
positive_label = 2
with TemporaryDirectory() as tmpvizdir:
for viz_output in viz_outputs:
vis_output_pattern_pdf = tmpvizdir + f"/*.{viz_output}"
visualize.roc_curves(
[probabilities, probabilities],
experiment.ground_truth,
experiment.ground_truth_metadata,
experiment.output_feature_name,
positive_label,
model_names=["Model1", "Model2"],
output_directory=tmpvizdir,
file_format=viz_output,
)
figure_cnt = glob.glob(vis_output_pattern_pdf)
assert 1 == len(figure_cnt)
def test_roc_curves_from_test_statistics_vis_api(csv_filename):
"""Ensure pdf and png figures can be saved via visualization API call.
:param csv_filename: csv fixture from tests.fixtures.filenames.csv_filename
:return: None
"""
input_features = [binary_feature(), bag_feature()]
output_features = [binary_feature()]
with TemporaryDirectory() as tmpvizdir:
# Generate test data
data_csv = generate_data(input_features, output_features, os.path.join(tmpvizdir, csv_filename))
output_feature_name = output_features[0]["name"]
model = run_api_experiment(input_features, output_features)
data_df = read_csv(data_csv)
_, _, output_dir = model.train(dataset=data_df, output_directory=os.path.join(tmpvizdir, "results"))
# extract test metrics
test_stats, _, _ = model.evaluate(dataset=data_df, collect_overall_stats=True, output_directory=output_dir)
test_stats = test_stats
viz_outputs = ("pdf", "png")
for viz_output in viz_outputs:
vis_output_pattern_pdf = os.path.join(output_dir, f"*.{viz_output}")
visualize.roc_curves_from_test_statistics(
[test_stats, test_stats],
output_feature_name,
model_names=["Model1", "Model2"],
output_directory=output_dir,
file_format=viz_output,
)
figure_cnt = glob.glob(vis_output_pattern_pdf)
assert 1 == len(figure_cnt)
def test_calibration_1_vs_all_vis_api(experiment_to_use):
"""Ensure pdf and png figures can be saved via visualization API call.
:param experiment_to_use: Object containing trained model and results to
test visualization
:return: None
"""
experiment = experiment_to_use
probabilities = experiment.probabilities
viz_outputs = ("pdf", "png")
with TemporaryDirectory() as tmpvizdir:
for viz_output in viz_outputs:
vis_output_pattern_pdf = os.path.join(tmpvizdir, f"*.{viz_output}")
visualize.calibration_1_vs_all(
[probabilities, probabilities],
experiment.ground_truth,
experiment.ground_truth_metadata,
experiment.output_feature_name,
top_n_classes=[6],
labels_limit=0,
model_namess=["Model1", "Model2"],
output_directory=tmpvizdir,
file_format=viz_output,
)
figure_cnt = glob.glob(vis_output_pattern_pdf)
assert 7 == len(figure_cnt)
def test_calibration_multiclass_vis_api(experiment_to_use):
"""Ensure pdf and png figures can be saved via visualization API call.
:param experiment_to_use: Object containing trained model and results to
test visualization
:return: None
"""
experiment = experiment_to_use
probabilities = experiment.probabilities
viz_outputs = ("pdf", "png")
with TemporaryDirectory() as tmpvizdir:
for viz_output in viz_outputs:
vis_output_pattern_pdf = tmpvizdir + f"/*.{viz_output}"
visualize.calibration_multiclass(
[probabilities, probabilities],
experiment.ground_truth,
experiment.ground_truth_metadata,
experiment.output_feature_name,
labels_limit=0,
model_names=["Model1", "Model2"],
output_directory=tmpvizdir,
file_format=viz_output,
)
figure_cnt = glob.glob(vis_output_pattern_pdf)
assert 2 == len(figure_cnt)
def test_confusion_matrix_vis_api(experiment_to_use):
"""Ensure pdf and png figures can be saved via visualization API call.
:param experiment_to_use: Object containing trained model and results to
test visualization
:return: None
"""
experiment = experiment_to_use
# extract test stats only
test_stats = experiment.test_stats_full
viz_outputs = ("pdf", "png")
with TemporaryDirectory() as tmpvizdir:
for viz_output in viz_outputs:
vis_output_pattern_pdf = tmpvizdir + f"/*.{viz_output}"
visualize.confusion_matrix(
[test_stats, test_stats],
experiment.ground_truth_metadata,
experiment.output_feature_name,
top_n_classes=[0],
normalize=False,
model_names=["Model1", "Model2"],
output_directory=tmpvizdir,
file_format=viz_output,
)
figure_cnt = glob.glob(vis_output_pattern_pdf)
assert 4 == len(figure_cnt)
def test_frequency_vs_f1_vis_api(experiment_to_use):
"""Ensure pdf and png figures can be saved via visualization API call.
:param experiment_to_use: Object containing trained model and results to
test visualization
:return: None
"""
experiment = experiment_to_use
# extract test stats
test_stats = experiment.test_stats_full
viz_outputs = ("pdf", "png")
with TemporaryDirectory() as tmpvizdir:
for viz_output in viz_outputs:
vis_output_pattern_pdf = tmpvizdir + f"/*.{viz_output}"
visualize.frequency_vs_f1(
[test_stats, test_stats],
experiment.ground_truth_metadata,
experiment.output_feature_name,
top_n_classes=[0],
model_names=["Model1", "Model2"],
output_directory=tmpvizdir,
file_format=viz_output,
)
figure_cnt = glob.glob(vis_output_pattern_pdf)
assert 2 == len(figure_cnt)
def test_hyperopt_report_vis_api(hyperopt_results):
with TemporaryDirectory() as tmpdir:
vis_dir = os.path.join(tmpdir, "visualizations")
visualize.hyperopt_report(os.path.join(hyperopt_results, "hyperopt_statistics.json"), output_directory=vis_dir)
# test for creation of output directory
assert os.path.isdir(vis_dir)
figure_cnt = glob.glob(os.path.join(vis_dir, "*"))
assert 4 == len(figure_cnt)
def test_hyperopt_hiplot_vis_api(hyperopt_results):
with TemporaryDirectory() as tmpdir:
vis_dir = os.path.join(tmpdir, "visualizations")
visualize.hyperopt_hiplot(os.path.join(hyperopt_results, "hyperopt_statistics.json"), output_directory=vis_dir)
# test for creation of output directory
assert os.path.isdir(vis_dir)
# test for generatated html page
assert os.path.isfile(os.path.join(vis_dir, "hyperopt_hiplot.html"))
```
|
{
"source": "jeffreykennethli/modin",
"score": 2
}
|
#### File: pyarrow_on_ray/partitioning/partition.py
```python
import ray
import pyarrow
from modin.core.execution.ray.implementations.pandas_on_ray.partitioning.partition import (
PandasOnRayDataframePartition,
)
class PyarrowOnRayDataframePartition(PandasOnRayDataframePartition):
"""
Class provides partition interface specific for PyArrow storage format and Ray engine.
Inherits functionality from the ``PandasOnRayDataframePartition`` class.
Parameters
----------
object_id : ray.ObjectRef
A reference to ``pyarrow.Table`` that needs to be wrapped with this class.
length : ray.ObjectRef or int, optional
Length or reference to it of wrapped ``pyarrow.Table``.
width : ray.ObjectRef or int, optional
Width or reference to it of wrapped ``pyarrow.Table``.
ip : ray.ObjectRef or str, optional
Node IP address or reference to it that holds wrapped ``pyarrow.Table``.
call_queue : list, optional
Call queue that needs to be executed on wrapped ``pyarrow.Table``.
"""
@classmethod
def put(cls, obj):
"""
Put an object in the Plasma store and wrap it in this object.
Parameters
----------
obj : object
The object to be put.
Returns
-------
PyarrowOnRayDataframePartition
A ``PyarrowOnRayDataframePartition`` object.
"""
return PyarrowOnRayDataframePartition(ray.put(pyarrow.Table.from_pandas(obj)))
@classmethod
def _length_extraction_fn(cls):
"""
Return the callable that extracts the number of rows from the given ``pyarrow.Table``.
Returns
-------
callable
"""
return lambda table: table.num_rows
@classmethod
def _width_extraction_fn(cls):
"""
Return the callable that extracts the number of columns from the given ``pyarrow.Table``.
Returns
-------
callable
"""
return lambda table: table.num_columns - (1 if "index" in table.columns else 0)
```
|
{
"source": "jeffreykrodgers/jr_boggle_solver",
"score": 3
}
|
#### File: jeffreykrodgers/jr_boggle_solver/jr_tester.py
```python
import sys
from jr_boggler import solve
def comp(list1, list2):
for val in list1:
if val in list2:
return True
return False
puzzle = open("boards/boggle_board_10x10.txt").read()
key = open("solutions/boggle_board_10x10.txt").read().lower()
solution = solve(puzzle)
winner = comp(solution, key)
for word in solution:
sys.stdout.write(word + "\n")
if winner:
sys.stdout.write("This Boggler is working correctly.\n")
give_jeff_a_job = True
else:
sys.stdout.write("Please check the Boggler, something is wrong!\n")
```
|
{
"source": "jeffreykthomas/capstone-emotion-recognition",
"score": 3
}
|
#### File: capstone-emotion-recognition/src/conditional_gan.py
```python
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.preprocessing.image import ImageDataGenerator
# from tensorflow_docs.vis import embed
import matplotlib.pyplot as plt
import tensorflow as tf
import numpy as np
import pandas as pd
import imageio
import argparse
ap = argparse.ArgumentParser()
ap.add_argument("--mode", help="save/train")
mode = ap.parse_args().mode
"""
## Constants and hyperparameters
"""
batch_size = 32
num_channels = 3
num_classes = 5
image_size = 64
latent_dim = 128
num_epoch = 400
"""
## Loading the AffectNet dataset and preprocessing it
"""
train_datagen = ImageDataGenerator(
rescale=1. / 255,
)
val_datagen = ImageDataGenerator(rescale=1. / 255)
df_train = pd.read_pickle('df_train.pkl')
df_val = pd.read_pickle('df_val.pkl')
df = pd.concat([df_val, df_train])
df = df[(df['y_col'] != 'Contempt') & (df['y_col'] != 'Disgust') & (df['y_col'] != 'Fear')]
train_generator = train_datagen.flow_from_dataframe(
df,
x_col='x_col',
y_col='y_col',
target_size=(64, 64),
batch_size=batch_size,
color_mode="rgb",
class_mode='categorical'
)
num_train = len(df)
dataset = tf.data.Dataset.from_generator(
lambda: train_generator,
output_types=(tf.float32, tf.float32),
output_shapes=([None, image_size, image_size, num_channels], [None, num_classes])
)
# dataset = dataset.shuffle(buffer_size=1024).batch(batch_size)
"""
## Calculating the number of input channel for the generator and discriminator
In a regular (unconditional) GAN, we start by sampling noise (of some fixed
dimension) from a normal distribution. In our case, we also need to account
for the class labels. We will have to add the number of classes to
the input channels of the generator (noise input) as well as the discriminator
(generated image input).
"""
generator_in_channels = latent_dim + num_classes
discriminator_in_channels = num_channels + num_classes
print(generator_in_channels, discriminator_in_channels)
# Create the discriminator.
discriminator = keras.Sequential(
[
keras.Input(shape=(64, 64, discriminator_in_channels)),
layers.Conv2D(64, kernel_size=4, strides=2, padding="same"),
layers.LeakyReLU(alpha=0.2),
layers.Dropout(0.25),
layers.Conv2D(128, kernel_size=4, strides=2, padding="same"),
layers.LeakyReLU(alpha=0.2),
layers.Conv2D(128, kernel_size=4, strides=2, padding="same"),
layers.BatchNormalization(momentum=0.8),
layers.LeakyReLU(alpha=0.2),
layers.Dropout(0.25),
layers.Conv2D(256, kernel_size=3, strides=1, padding="same"),
layers.BatchNormalization(momentum=0.8),
layers.LeakyReLU(alpha=0.2),
layers.Dropout(0.25),
layers.Flatten(),
layers.Dense(1, activation="sigmoid"),
],
name="discriminator",
)
# Create the generator.
generator = keras.Sequential(
[
keras.Input(shape=(generator_in_channels,)),
layers.Dense(8 * 8 * generator_in_channels),
layers.Reshape((8, 8, generator_in_channels)),
layers.UpSampling2D(),
layers.Conv2D(128, kernel_size=3, padding="same"),
layers.BatchNormalization(momentum=0.8),
layers.ReLU(),
layers.UpSampling2D(),
layers.Conv2D(256, kernel_size=3, padding="same"),
layers.BatchNormalization(momentum=0.8),
layers.ReLU(),
layers.UpSampling2D(),
layers.Conv2D(512, kernel_size=3, padding="same"),
layers.BatchNormalization(momentum=0.8),
layers.ReLU(),
layers.Conv2D(3, kernel_size=5, padding="same", activation="sigmoid")
],
name="generator",
)
"""
## Creating a `ConditionalGAN` model
"""
class ConditionalGAN(keras.Model):
def __init__(self, discriminator, generator, latent_dim):
super(ConditionalGAN, self).__init__()
self.discriminator = discriminator
self.generator = generator
self.latent_dim = latent_dim
self.gen_loss_tracker = keras.metrics.Mean(name="generator_loss")
self.disc_loss_tracker = keras.metrics.Mean(name="discriminator_loss")
@property
def metrics(self):
return [self.gen_loss_tracker, self.disc_loss_tracker]
def compile(self, d_optimizer, g_optimizer, loss_fn):
super(ConditionalGAN, self).compile()
self.d_optimizer = d_optimizer
self.g_optimizer = g_optimizer
self.loss_fn = loss_fn
def train_step(self, data):
# Unpack the data.
real_images, one_hot_labels = data
# Add dummy dimensions to the labels so that they can be concatenated with
# the images. This is for the discriminator.
image_one_hot_labels = one_hot_labels[:, :, None, None]
image_one_hot_labels = tf.repeat(
image_one_hot_labels, repeats=[image_size * image_size]
)
image_one_hot_labels = tf.reshape(
image_one_hot_labels, (-1, image_size, image_size, num_classes)
)
# Sample random points in the latent space and concatenate the labels.
# This is for the generator.
batch_size = tf.shape(real_images)[0]
random_latent_vectors = tf.random.normal(shape=(batch_size, self.latent_dim))
random_vector_labels = tf.concat(
[random_latent_vectors, one_hot_labels], axis=1
)
# Decode the noise (guided by labels) to fake images.
generated_images = self.generator(random_vector_labels)
# Combine them with real images. Note that we are concatenating the labels
# with these images here.
fake_image_and_labels = tf.concat([generated_images, image_one_hot_labels], -1)
real_image_and_labels = tf.concat([real_images, image_one_hot_labels], -1)
combined_images = tf.concat(
[fake_image_and_labels, real_image_and_labels], axis=0
)
# Assemble labels discriminating real from fake images.
labels = tf.concat(
[tf.ones((batch_size, 1)), tf.zeros((batch_size, 1))], axis=0
)
# Train the discriminator.
with tf.GradientTape() as tape:
predictions = self.discriminator(combined_images)
d_loss = self.loss_fn(labels, predictions)
grads = tape.gradient(d_loss, self.discriminator.trainable_weights)
self.d_optimizer.apply_gradients(
zip(grads, self.discriminator.trainable_weights)
)
# Sample random points in the latent space.
random_latent_vectors = tf.random.normal(shape=(batch_size, self.latent_dim))
random_vector_labels = tf.concat(
[random_latent_vectors, one_hot_labels], axis=1
)
# Assemble labels that say "all real images".
misleading_labels = tf.zeros((batch_size, 1))
# Train the generator (note that we should *not* update the weights
# of the discriminator)!
with tf.GradientTape() as tape:
fake_images = self.generator(random_vector_labels)
fake_image_and_labels = tf.concat([fake_images, image_one_hot_labels], -1)
predictions = self.discriminator(fake_image_and_labels)
g_loss = self.loss_fn(misleading_labels, predictions)
grads = tape.gradient(g_loss, self.generator.trainable_weights)
self.g_optimizer.apply_gradients(zip(grads, self.generator.trainable_weights))
# Monitor loss.
self.gen_loss_tracker.update_state(g_loss)
self.disc_loss_tracker.update_state(d_loss)
return {
"g_loss": self.gen_loss_tracker.result(),
"d_loss": self.disc_loss_tracker.result(),
}
class GANMonitor(keras.callbacks.Callback):
def __init__(self, num_img=3, latent_dim=128):
self.num_img = num_img
self.latent_dim = latent_dim
def on_epoch_end(self, epoch, logs=None):
num_rows = 4
num_cols = 5
label_mapper = {v: k for k, v in train_generator.class_indices.items()}
# label_array = [0, 1, 2, 3, 4, 5, 6, 7, 0, 1, 2, 3, 4, 5, 6, 7, 0, 1, 2, 3, 4, 5, 6, 7]
label_array = [0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4]
plt.figure(figsize=(num_cols * 2.0, num_rows * 2.0))
for row in range(num_rows):
for col in range(num_cols):
index = row * num_cols + col
plt.subplot(num_rows, num_cols, index + 1)
label = keras.utils.to_categorical([label_array[index]], num_classes)
label = tf.cast(label, tf.float32)
noise = tf.random.normal(shape=(1, latent_dim))
noise_and_label = tf.concat([noise, label], 1)
generated_image = self.model.generator(noise_and_label)
plt.gca().set_title(label_mapper[label_array[index]])
plt.imshow(generated_image[0])
plt.axis("off")
plt.tight_layout()
plt.savefig('conditional_gan/5cat/images/generated_img_%d.png' % (epoch + 44))
plt.close()
filepath = 'conditional_gan/5cat/checkpoints/model_checkpoint_{epoch:02d}'
epochCheckpoint = keras.callbacks.ModelCheckpoint(
filepath,
monitor='g_loss',
verbose=1,
save_weights_only=True,
save_best_only=False,
mode='min'
)
"""
## Training the Conditional GAN
"""
cond_gan = ConditionalGAN(
discriminator=discriminator, generator=generator, latent_dim=latent_dim
)
cond_gan.compile(
d_optimizer=keras.optimizers.Adam(learning_rate=0.0001),
g_optimizer=keras.optimizers.Adam(learning_rate=0.0001),
loss_fn=keras.losses.BinaryCrossentropy(),
)
"""
## Interpolating between classes with the trained generator
"""
def test():
# We first extract the trained generator from our Conditional GAN.
trained_gen = cond_gan.generator
num_rows = 4
num_cols = 6
label_mapper = {v: k for k, v in train_generator.class_indices.items()}
label_array = [0, 1, 2, 3, 4, 5, 6, 7, 0, 1, 2, 3, 4, 5, 6, 7, 0, 1, 2, 3, 4, 5, 6, 7]
plt.figure(figsize=(num_cols * 2.0, num_rows * 2.0))
for row in range(num_rows):
for col in range(num_cols):
index = row * num_cols + col
plt.subplot(num_rows, num_cols, index + 1)
label = keras.utils.to_categorical([label_array[index]], num_classes)
label = tf.cast(label, tf.float32)
noise = tf.random.normal(shape=(1, latent_dim))
noise_and_label = tf.concat([noise, label], 1)
generated_image = trained_gen(noise_and_label)
plt.gca().set_title(label_mapper[label_array[index]])
image = keras.preprocessing.image.array_to_img(generated_image[0])
plt.imshow(image)
plt.axis("off")
plt.tight_layout()
plt.savefig('conditional_gan/images/generated_img_%d.png' % 25)
plt.close()
# Choose the number of intermediate images that would be generated in
# between the interpolation + 2 (start and last images).
# num_interpolation = 9 # @param {type:"integer"}
#
# # Sample noise for the interpolation.
# interpolation_noise = tf.random.normal(shape=(1, latent_dim))
# interpolation_noise = tf.repeat(interpolation_noise, repeats=num_interpolation)
# interpolation_noise = tf.reshape(interpolation_noise, (num_interpolation, latent_dim))
#
# def interpolate_class(first_number, second_number):
# # Convert the start and end labels to one-hot encoded vectors.
# first_label = keras.utils.to_categorical([first_number], num_classes)
# second_label = keras.utils.to_categorical([second_number], num_classes)
# first_label = tf.cast(first_label, tf.float32)
# second_label = tf.cast(second_label, tf.float32)
#
# # Calculate the interpolation vector between the two labels.
# percent_second_label = tf.linspace(0, 1, num_interpolation)[:, None]
# percent_second_label = tf.cast(percent_second_label, tf.float32)
# interpolation_labels = (
# first_label * (1 - percent_second_label) + second_label * percent_second_label
# )
#
# # Combine the noise and the labels and run inference with the generator.
# noise_and_labels = tf.concat([interpolation_noise, interpolation_labels], 1)
# fake = trained_gen.predict(noise_and_labels)
# return fake
#
# start_class = 1 # @param {type:"slider", min:0, max:9, step:1}
# end_class = 5 # @param {type:"slider", min:0, max:9, step:1}
#
# fake_images = interpolate_class(start_class, end_class)
#
# """
# Here, we first sample noise from a normal distribution and then we repeat that for
# `num_interpolation` times and reshape the result accordingly.
# We then distribute it uniformly for `num_interpolation`
# with the label indentities being present in some proportion.
# """
#
# fake_images *= 255.0
# converted_images = fake_images.astype(np.uint8)
# converted_images = tf.image.resize(converted_images, (96, 96)).numpy().astype(np.uint8)
# imageio.mimsave("conditional_gan/images/animation.gif", converted_images, fps=1)
# for i in range(converted_images):
# img = keras.preprocessing.image.array_to_img(converted_images[i])
# img.save("conditional_gan/images/generated_img_%d.png" % i)
def train_gan():
cond_gan.fit(
dataset,
epochs=num_epoch,
steps_per_epoch=num_train // batch_size,
callbacks=[GANMonitor(num_img=10, latent_dim=latent_dim), epochCheckpoint]
)
if mode == 'train':
train_gan()
elif mode == 'grow':
cond_gan.load_weights('conditional_gan/checkpoints/model_checkpoint_08')
train_gan()
elif mode == 'create':
label_mapper = {v: k for k, v in test_generator.class_indices.items()}
label_array = [0, 1, 2, 3, 4, 5, 6, 7]
num_images = 800
array = []
for n in range(num_images):
label = keras.utils.to_categorical([label_array[n % 8]], num_classes)
label = tf.cast(label, tf.float32)
noise = tf.random.normal(shape=(1, latent_dim))
noise_and_label = tf.concat([noise, label], 1)
generated_image = model.generator(noise_and_label, training=False)
image = keras.preprocessing.image.array_to_img(generated_image[0])
prediction = fer_model.predict(generated_image)[0]
predicted_class = np.argmax(prediction, axis=1)
if predicted_class == (n % 8):
filepath = 'data/gen_images/1/generated_img_%d.jpeg' % n
image.save(filepath)
label_name = label_mapper[label_array[n % 8]]
array.append([filepath, label_name])
df_generated = pd.DataFrame(data=array, columns=['x_col', 'y_col'])
df_generated.to_pickle('df_generated_1.pkl')
else:
cond_gan.load_weights('conditional_gan/checkpoints/model_checkpoint_19')
test()
```
#### File: capstone-emotion-recognition/src/resize_fer.py
```python
import tensorflow as tf
import os
import math
import numpy as np
import cv2
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.preprocessing.image import load_img
from tensorflow.keras.preprocessing.image import array_to_img
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.preprocessing import image_dataset_from_directory
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes
from mpl_toolkits.axes_grid1.inset_locator import mark_inset
import PIL
import argparse
ap = argparse.ArgumentParser()
ap.add_argument("--mode", help="train/save")
mode = ap.parse_args().mode
root_dir = os.path.join("data/BSR", "BSDS500/data")
crop_size = 300
upscale_factor = 4
input_size = crop_size // upscale_factor
batch_size = 8
train_ds = image_dataset_from_directory(
root_dir,
batch_size=batch_size,
image_size=(crop_size, crop_size),
validation_split=0.2,
subset="training",
seed=1337,
label_mode=None,
)
valid_ds = image_dataset_from_directory(
root_dir,
batch_size=batch_size,
image_size=(crop_size, crop_size),
validation_split=0.2,
subset="validation",
seed=1337,
label_mode=None,
)
def scaling(input_image):
input_image = input_image / 255.0
return input_image
# Scale from (0, 255) to (0, 1)
train_ds = train_ds.map(scaling)
valid_ds = valid_ds.map(scaling)
dataset = os.path.join(root_dir, "images")
test_path = os.path.join(dataset, "test")
test_img_paths = sorted(
[
os.path.join(test_path, fname)
for fname in os.listdir(test_path)
if fname.endswith(".jpg")
]
)
fer_path = os.path.join('data/fer2013/images')
fer_img_paths = sorted([
os.path.join(fer_path, fname)
for fname in os.listdir(fer_path)
if fname.endswith(".png")
]
)
def process_input(input, input_size, upscale_factor):
input = tf.image.rgb_to_yuv(input)
last_dimension_axis = len(input.shape) - 1
y, u, v = tf.split(input, 3, axis=last_dimension_axis)
return tf.image.resize(y, [input_size, input_size], method="area")
def process_target(input):
input = tf.image.rgb_to_yuv(input)
last_dimension_axis = len(input.shape) - 1
y, u, v = tf.split(input, 3, axis=last_dimension_axis)
return y
train_ds = train_ds.map(
lambda x: (process_input(x, input_size, upscale_factor), process_target(x))
)
train_ds = train_ds.prefetch(buffer_size=32)
valid_ds = valid_ds.map(
lambda x: (process_input(x, input_size, upscale_factor), process_target(x))
)
valid_ds = valid_ds.prefetch(buffer_size=32)
def get_model(upscale_factor=4.667, channels=1):
conv_args = {
"activation": "relu",
"kernel_initializer": "Orthogonal",
"padding": "same",
}
inputs = keras.Input(shape=(None, None, channels))
x = layers.Conv2D(64, 5, **conv_args)(inputs)
x = layers.Conv2D(64, 3, **conv_args)(x)
x = layers.Conv2D(32, 3, **conv_args)(x)
x = layers.Conv2D(32, 3, **conv_args)(x)
x = layers.Conv2D(channels * (upscale_factor ** 2), 3, **conv_args)(x)
outputs = tf.nn.depth_to_space(x, upscale_factor)
return keras.Model(inputs, outputs)
def plot_results(img, prefix, title):
"""Plot the result with zoom-in area."""
img_array = img_to_array(img)
img_array = img_array.astype("float32") / 255.0
# Create a new figure with a default 111 subplot.
fig, ax = plt.subplots()
im = ax.imshow(img_array[::-1], origin="lower")
plt.title(title)
# zoom-factor: 2.0, location: upper-left
axins = zoomed_inset_axes(ax, 2, loc=2)
axins.imshow(img_array[::-1], origin="lower")
# Specify the limits.
x1, x2, y1, y2 = 200, 300, 100, 200
# Apply the x-limits.
axins.set_xlim(x1, x2)
# Apply the y-limits.
axins.set_ylim(y1, y2)
plt.yticks(visible=False)
plt.xticks(visible=False)
# Make the line.
mark_inset(ax, axins, loc1=1, loc2=3, fc="none", ec="blue")
plt.savefig(str(prefix) + "-" + title + ".png")
def get_lowres_image(img, upscale_factor):
"""Return low-resolution image to use as model input."""
return img.resize(
(img.size[0] // upscale_factor, img.size[1] // upscale_factor),
PIL.Image.BICUBIC,
)
def upscale_image(model, img):
"""Predict the result based on input image and restore the image as RGB."""
ycbcr = img.convert("YCbCr")
y, cb, cr = ycbcr.split()
y = img_to_array(y)
y = y.astype("float32") / 255.0
input = np.expand_dims(y, axis=0)
out = model.predict(input)
out_img_y = out[0]
out_img_y *= 255.0
# Restore the image in RGB color space.
out_img_y = out_img_y.clip(0, 255)
out_img_y = out_img_y.reshape((np.shape(out_img_y)[0], np.shape(out_img_y)[1]))
out_img_y = PIL.Image.fromarray(np.uint8(out_img_y), mode="L")
out_img_cb = cb.resize(out_img_y.size, PIL.Image.BICUBIC)
out_img_cr = cr.resize(out_img_y.size, PIL.Image.BICUBIC)
out_img = PIL.Image.merge("YCbCr", (out_img_y, out_img_cb, out_img_cr)).convert(
"RGB"
)
return out_img
class ESPCNCallback(keras.callbacks.Callback):
def __init__(self):
super(ESPCNCallback, self).__init__()
self.test_img = get_lowres_image(load_img(test_img_paths[0]), upscale_factor)
# Store PSNR value in each epoch.
def on_epoch_begin(self, epoch, logs=None):
self.psnr = []
def on_epoch_end(self, epoch, logs=None):
print("Mean PSNR for epoch: %.2f" % (np.mean(self.psnr)))
if epoch % 20 == 0:
prediction = upscale_image(self.model, self.test_img)
plot_results(prediction, "epoch-" + str(epoch), "prediction")
def on_test_batch_end(self, batch, logs=None):
self.psnr.append(10 * math.log10(1 / logs["loss"]))
early_stopping_callback = keras.callbacks.EarlyStopping(monitor="loss", patience=10)
checkpoint_filepath = "resize_checkpoint/"
model_checkpoint_callback = keras.callbacks.ModelCheckpoint(
filepath=checkpoint_filepath,
save_weights_only=True,
monitor="loss",
mode="min",
save_best_only=True,
)
model = get_model(upscale_factor=upscale_factor, channels=1)
model.summary()
callbacks = [ESPCNCallback(), early_stopping_callback, model_checkpoint_callback]
loss_fn = keras.losses.MeanSquaredError()
optimizer = keras.optimizers.Adam(learning_rate=0.001)
if mode == 'train':
epochs = 100
model.compile(
optimizer=optimizer, loss=loss_fn
)
model.fit(
train_ds, epochs=epochs, callbacks=callbacks, validation_data=valid_ds, verbose=2
)
else:
model.load_weights(checkpoint_filepath)
total_bicubic_psnr = 0.0
total_test_psnr = 0.0
for index, test_img_path in enumerate(fer_img_paths):
img_name = test_img_path.split('/')[-1]
print(index)
img = load_img(test_img_path)
w = img.size[0] * upscale_factor
h = img.size[1] * upscale_factor
prediction = upscale_image(model, img)
dim = (224, 224)
pred_array = img_to_array(prediction)
resized = cv2.resize(pred_array, dim, interpolation=cv2.INTER_AREA)
cv2.imwrite('data/fer2013/up_scaled/' + img_name, resized)
```
|
{
"source": "jeffreykuang/mmocr-1",
"score": 2
}
|
#### File: mmocr/apis/inference.py
```python
import torch
from mmcv.ops import RoIPool
from mmcv.parallel import collate, scatter
from mmdet.datasets.pipelines import Compose
def model_inference(model, img):
"""Inference image(s) with the detector.
Args:
model (nn.Module): The loaded detector.
imgs (str): Image files.
Returns:
result (dict): Detection results.
"""
assert isinstance(img, str)
cfg = model.cfg
device = next(model.parameters()).device # model device
data = dict(img_info=dict(filename=img), img_prefix=None)
# build the data pipeline
test_pipeline = Compose(cfg.data.test.pipeline)
data = test_pipeline(data)
data = collate([data], samples_per_gpu=1)
# process img_metas
data['img_metas'] = data['img_metas'][0].data
if next(model.parameters()).is_cuda:
# scatter to specified GPU
data = scatter(data, [device])[0]
else:
for m in model.modules():
assert not isinstance(
m, RoIPool
), 'CPU inference with RoIPool is not supported currently.'
# forward the model
with torch.no_grad():
result = model(return_loss=False, rescale=True, **data)[0]
return result
```
#### File: mmocr/core/mask.py
```python
import cv2
import numpy as np
import mmocr.utils as utils
def points2boundary(points, text_repr_type, text_score=None, min_width=-1):
"""Convert a text mask represented by point coordinates sequence into a
text boundary.
Args:
points (ndarray): Mask index of size (n, 2).
text_repr_type (str): Text instance encoding type
('quad' for quadrangle or 'poly' for polygon).
text_score (float): Text score.
Returns:
boundary (list[float]): The text boundary point coordinates (x, y)
list. Return None if no text boundary found.
"""
assert isinstance(points, np.ndarray)
assert points.shape[1] == 2
assert text_repr_type in ['quad', 'poly']
assert text_score is None or 0 <= text_score <= 1
if text_repr_type == 'quad':
rect = cv2.minAreaRect(points)
vertices = cv2.boxPoints(rect)
boundary = []
if min(rect[1]) > min_width:
boundary = [p for p in vertices.flatten().tolist()]
elif text_repr_type == 'poly':
height = np.max(points[:, 1]) + 10
width = np.max(points[:, 0]) + 10
mask = np.zeros((height, width), np.uint8)
mask[points[:, 1], points[:, 0]] = 255
contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
boundary = list(contours[0].flatten().tolist())
if text_score is not None:
boundary = boundary + [text_score]
if len(boundary) < 8:
return None
return boundary
def seg2boundary(seg, text_repr_type, text_score=None):
"""Convert a segmentation mask to a text boundary.
Args:
seg (ndarray): The segmentation mask.
text_repr_type (str): Text instance encoding type
('quad' for quadrangle or 'poly' for polygon).
text_score (float): The text score.
Returns:
boundary (list): The text boundary. Return None if no text found.
"""
assert isinstance(seg, np.ndarray)
assert isinstance(text_repr_type, str)
assert text_score is None or 0 <= text_score <= 1
points = np.where(seg)
# x, y order
points = np.concatenate([points[1], points[0]]).reshape(2, -1).transpose()
boundary = None
if len(points) != 0:
boundary = points2boundary(points, text_repr_type, text_score)
return boundary
def extract_boundary(result):
"""Extract boundaries and their scores from result.
Args:
result (dict): The detection result with the key 'boundary_result'
of one image.
Returns:
boundaries_with_scores (list[list[float]]): The boundary and score
list.
boundaries (list[list[float]]): The boundary list.
scores (list[float]): The boundary score list.
"""
assert isinstance(result, dict)
assert 'boundary_result' in result.keys()
boundaries_with_scores = result['boundary_result']
assert utils.is_2dlist(boundaries_with_scores)
boundaries = [b[:-1] for b in boundaries_with_scores]
scores = [b[-1] for b in boundaries_with_scores]
return (boundaries_with_scores, boundaries, scores)
```
#### File: datasets/pipelines/crop.py
```python
import cv2
import numpy as np
from shapely.geometry import LineString, Point
import mmocr.utils as utils
from .box_utils import sort_vertex
def box_jitter(points_x, points_y, jitter_ratio_x=0.5, jitter_ratio_y=0.1):
"""Jitter on the coordinates of bounding box.
Args:
points_x (list[float | int]): List of y for four vertices.
points_y (list[float | int]): List of x for four vertices.
jitter_ratio_x (float): Horizontal jitter ratio relative to the height.
jitter_ratio_y (float): Vertical jitter ratio relative to the height.
"""
assert len(points_x) == 4
assert len(points_y) == 4
assert isinstance(jitter_ratio_x, float)
assert isinstance(jitter_ratio_y, float)
assert 0 <= jitter_ratio_x < 1
assert 0 <= jitter_ratio_y < 1
points = [Point(points_x[i], points_y[i]) for i in range(4)]
line_list = [
LineString([points[i], points[i + 1 if i < 3 else 0]])
for i in range(4)
]
tmp_h = max(line_list[1].length, line_list[3].length)
for i in range(4):
jitter_pixel_x = (np.random.rand() - 0.5) * 2 * jitter_ratio_x * tmp_h
jitter_pixel_y = (np.random.rand() - 0.5) * 2 * jitter_ratio_y * tmp_h
points_x[i] += jitter_pixel_x
points_y[i] += jitter_pixel_y
def warp_img(src_img,
box,
jitter_flag=False,
jitter_ratio_x=0.5,
jitter_ratio_y=0.1):
"""Crop box area from image using opencv warpPerspective w/o box jitter.
Args:
src_img (np.array): Image before cropping.
box (list[float | int]): Coordinates of quadrangle.
"""
assert utils.is_type_list(box, float) or utils.is_type_list(box, int)
assert len(box) == 8
h, w = src_img.shape[:2]
points_x = [min(max(x, 0), w) for x in box[0:8:2]]
points_y = [min(max(y, 0), h) for y in box[1:9:2]]
points_x, points_y = sort_vertex(points_x, points_y)
if jitter_flag:
box_jitter(
points_x,
points_y,
jitter_ratio_x=jitter_ratio_x,
jitter_ratio_y=jitter_ratio_y)
points = [Point(points_x[i], points_y[i]) for i in range(4)]
edges = [
LineString([points[i], points[i + 1 if i < 3 else 0]])
for i in range(4)
]
pts1 = np.float32([[points[i].x, points[i].y] for i in range(4)])
box_width = max(edges[0].length, edges[2].length)
box_height = max(edges[1].length, edges[3].length)
pts2 = np.float32([[0, 0], [box_width, 0], [box_width, box_height],
[0, box_height]])
M = cv2.getPerspectiveTransform(pts1, pts2)
dst_img = cv2.warpPerspective(src_img, M,
(int(box_width), int(box_height)))
return dst_img
def crop_img(src_img, box):
"""Crop box area to rectangle.
Args:
src_img (np.array): Image before crop.
box (list[float | int]): Points of quadrangle.
"""
assert utils.is_type_list(box, float) or utils.is_type_list(box, int)
assert len(box) == 8
h, w = src_img.shape[:2]
points_x = [min(max(x, 0), w) for x in box[0:8:2]]
points_y = [min(max(y, 0), h) for y in box[1:9:2]]
left = int(min(points_x))
top = int(min(points_y))
right = int(max(points_x))
bottom = int(max(points_y))
dst_img = src_img[top:bottom, left:right]
return dst_img
```
#### File: pipelines/textdet_targets/base_textdet_targets.py
```python
import sys
import cv2
import numpy as np
import Polygon as plg
import pyclipper
from mmcv.utils import print_log
import mmocr.utils.check_argument as check_argument
class BaseTextDetTargets:
"""Generate text detector ground truths."""
def __init__(self):
pass
def point2line(self, xs, ys, point_1, point_2):
"""Compute the distance from point to a line. This is adapted from
https://github.com/MhLiao/DB.
Args:
xs (ndarray): The x coordinates of size hxw.
ys (ndarray): The y coordinates of size hxw.
point_1 (ndarray): The first point with shape 1x2.
point_2 (ndarray): The second point with shape 1x2.
Returns:
result (ndarray): The distance matrix of size hxw.
"""
# suppose a triangle with three edge abc with c=point_1 point_2
# a^2
a_square = np.square(xs - point_1[0]) + np.square(ys - point_1[1])
# b^2
b_square = np.square(xs - point_2[0]) + np.square(ys - point_2[1])
# c^2
c_square = np.square(point_1[0] - point_2[0]) + np.square(point_1[1] -
point_2[1])
# -cosC=(c^2-a^2-b^2)/2(ab)
neg_cos_c = (
(c_square - a_square - b_square) /
(np.finfo(np.float32).eps + 2 * np.sqrt(a_square * b_square)))
# sinC^2=1-cosC^2
square_sin = 1 - np.square(neg_cos_c)
square_sin = np.nan_to_num(square_sin)
# distance=a*b*sinC/c=a*h/c=2*area/c
result = np.sqrt(a_square * b_square * square_sin /
(np.finfo(np.float32).eps + c_square))
# set result to minimum edge if C<pi/2
result[neg_cos_c < 0] = np.sqrt(np.fmin(a_square,
b_square))[neg_cos_c < 0]
return result
def polygon_area(self, polygon):
"""Compute the polygon area. Please refer to Green's theorem.
https://en.wikipedia.org/wiki/Green%27s_theorem. This is adapted from
https://github.com/MhLiao/DB.
Args:
polygon (ndarray): The polygon boundary points.
"""
polygon = polygon.reshape(-1, 2)
edge = 0
for i in range(polygon.shape[0]):
next_index = (i + 1) % polygon.shape[0]
edge += (polygon[next_index, 0] - polygon[i, 0]) * (
polygon[next_index, 1] + polygon[i, 1])
return edge / 2.
def polygon_size(self, polygon):
"""Estimate the height and width of the minimum bounding box of the
polygon.
Args:
polygon (ndarray): The polygon point sequence.
Returns:
size (tuple): The height and width of the minimum bounding box.
"""
poly = polygon.reshape(-1, 2)
rect = cv2.minAreaRect(poly.astype(np.int32))
size = rect[1]
return size
def generate_kernels(self,
img_size,
text_polys,
shrink_ratio,
max_shrink=sys.maxsize,
ignore_tags=None):
"""Generate text instance kernels for one shrink ratio.
Args:
img_size (tuple(int, int)): The image size of (height, width).
text_polys (list[list[ndarray]]: The list of text polygons.
shrink_ratio (float): The shrink ratio of kernel.
Returns:
text_kernel (ndarray): The text kernel mask of (height, width).
"""
assert isinstance(img_size, tuple)
assert check_argument.is_2dlist(text_polys)
assert isinstance(shrink_ratio, float)
h, w = img_size
text_kernel = np.zeros((h, w), dtype=np.float32)
for text_ind, poly in enumerate(text_polys):
instance = poly[0].reshape(-1, 2).astype(np.int32)
area = plg.Polygon(instance).area()
peri = cv2.arcLength(instance, True)
distance = min(
int(area * (1 - shrink_ratio * shrink_ratio) / (peri + 0.001) +
0.5), max_shrink)
pco = pyclipper.PyclipperOffset()
pco.AddPath(instance, pyclipper.JT_ROUND,
pyclipper.ET_CLOSEDPOLYGON)
shrinked = np.array(pco.Execute(-distance))
# check shrinked == [] or empty ndarray
if len(shrinked) == 0 or shrinked.size == 0:
if ignore_tags is not None:
ignore_tags[text_ind] = True
continue
try:
shrinked = np.array(shrinked[0]).reshape(-1, 2)
except Exception as e:
print_log(f'{shrinked} with error {e}')
if ignore_tags is not None:
ignore_tags[text_ind] = True
continue
cv2.fillPoly(text_kernel, [shrinked.astype(np.int32)],
text_ind + 1)
return text_kernel, ignore_tags
def generate_effective_mask(self, mask_size: tuple, polygons_ignore):
"""Generate effective mask by setting the ineffective regions to 0 and
effective regions to 1.
Args:
mask_size (tuple): The mask size.
polygons_ignore (list[[ndarray]]: The list of ignored text
polygons.
Returns:
mask (ndarray): The effective mask of (height, width).
"""
assert check_argument.is_2dlist(polygons_ignore)
mask = np.ones(mask_size, dtype=np.uint8)
for poly in polygons_ignore:
instance = poly[0].reshape(-1,
2).astype(np.int32).reshape(1, -1, 2)
cv2.fillPoly(mask, instance, 0)
return mask
def generate_targets(self, results):
raise NotImplementedError
def __call__(self, results):
results = self.generate_targets(results)
return results
```
#### File: mmocr/models/builder.py
```python
from mmcv.utils import Registry, build_from_cfg
RECOGNIZERS = Registry('recognizer')
CONVERTORS = Registry('convertor')
ENCODERS = Registry('encoder')
DECODERS = Registry('decoder')
PREPROCESSOR = Registry('preprocessor')
def build_recognizer(cfg, train_cfg=None, test_cfg=None):
"""Build recognizer."""
return build_from_cfg(cfg, RECOGNIZERS,
dict(train_cfg=train_cfg, test_cfg=test_cfg))
def build_convertor(cfg):
"""Build label convertor for scene text recognizer."""
return build_from_cfg(cfg, CONVERTORS)
def build_encoder(cfg):
"""Build encoder for scene text recognizer."""
return build_from_cfg(cfg, ENCODERS)
def build_decoder(cfg):
"""Build decoder for scene text recognizer."""
return build_from_cfg(cfg, DECODERS)
def build_preprocessor(cfg):
"""Build preprocessor for scene text recognizer."""
return build_from_cfg(cfg, PREPROCESSOR)
```
#### File: textdet/dense_heads/textsnake_head.py
```python
import torch.nn as nn
from mmcv.cnn import normal_init
from mmdet.models.builder import HEADS, build_loss
from . import HeadMixin
@HEADS.register_module()
class TextSnakeHead(HeadMixin, nn.Module):
"""The class for TextSnake head: TextSnake: A Flexible Representation for
Detecting Text of Arbitrary Shapes.
[https://arxiv.org/abs/1807.01544]
"""
def __init__(self,
in_channels,
decoding_type='textsnake',
text_repr_type='poly',
loss=dict(type='TextSnakeLoss'),
train_cfg=None,
test_cfg=None):
super().__init__()
assert isinstance(in_channels, int)
self.in_channels = in_channels
self.out_channels = 5
self.downsample_ratio = 1.0
self.decoding_type = decoding_type
self.text_repr_type = text_repr_type
self.loss_module = build_loss(loss)
self.train_cfg = train_cfg
self.test_cfg = test_cfg
self.out_conv = nn.Conv2d(
in_channels=self.in_channels,
out_channels=self.out_channels,
kernel_size=1,
stride=1,
padding=0)
self.init_weights()
def init_weights(self):
normal_init(self.out_conv, mean=0, std=0.01)
def forward(self, inputs):
outputs = self.out_conv(inputs)
return outputs
```
#### File: tests/test_apis/test_model_inference.py
```python
import os
import shutil
import urllib
import pytest
from mmdet.apis import init_detector
from mmocr.apis.inference import model_inference
def test_model_inference():
project_dir = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
print(project_dir)
config_file = os.path.join(
project_dir,
'../configs/textrecog/sar/sar_r31_parallel_decoder_academic.py')
checkpoint_file = os.path.join(
project_dir,
'../checkpoints/sar_r31_parallel_decoder_academic-dba3a4a3.pth')
if not os.path.exists(checkpoint_file):
url = ('https://download.openmmlab.com/mmocr'
'/textrecog/sar/'
'sar_r31_parallel_decoder_academic-dba3a4a3.pth')
print(f'Downloading {url} ...')
local_filename, _ = urllib.request.urlretrieve(url)
os.makedirs(os.path.dirname(checkpoint_file), exist_ok=True)
shutil.move(local_filename, checkpoint_file)
print(f'Saved as {checkpoint_file}')
else:
print(f'Using existing checkpoint {checkpoint_file}')
device = 'cpu'
model = init_detector(
config_file, checkpoint=checkpoint_file, device=device)
if model.cfg.data.test['type'] == 'ConcatDataset':
model.cfg.data.test.pipeline = model.cfg.data.test['datasets'][
0].pipeline
img = os.path.join(project_dir, '../demo/demo_text_recog.jpg')
with pytest.raises(AssertionError):
model_inference(model, 1)
model_inference(model, img)
```
#### File: tests/test_models/test_loss.py
```python
import numpy as np
import torch
import mmocr.models.textdet.losses as losses
from mmdet.core import BitmapMasks
def test_panloss():
panloss = losses.PANLoss()
# test bitmasks2tensor
mask = [[1, 0, 1], [1, 1, 1], [0, 0, 1]]
target = [[1, 0, 1, 0, 0], [1, 1, 1, 0, 0], [0, 0, 1, 0, 0],
[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]]
masks = [np.array(mask)]
bitmasks = BitmapMasks(masks, 3, 3)
target_sz = (6, 5)
results = panloss.bitmasks2tensor([bitmasks], target_sz)
assert len(results) == 1
assert torch.sum(torch.abs(results[0].float() -
torch.Tensor(target))).item() == 0
def test_textsnakeloss():
textsnakeloss = losses.TextSnakeLoss()
# test balanced_bce_loss
pred = torch.tensor([[0, 1, 0], [1, 1, 1], [0, 1, 0]], dtype=torch.float)
target = torch.tensor([[0, 1, 0], [1, 0, 1], [0, 1, 0]], dtype=torch.long)
mask = torch.tensor([[0, 1, 0], [1, 0, 1], [0, 1, 0]], dtype=torch.long)
bce_loss = textsnakeloss.balanced_bce_loss(pred, target, mask).item()
assert np.allclose(bce_loss, 0)
```
#### File: tests/test_models/test_ocr_head.py
```python
import pytest
import torch
from mmocr.models.textrecog import SegHead
def test_seg_head():
with pytest.raises(AssertionError):
SegHead(num_classes='100')
with pytest.raises(AssertionError):
SegHead(num_classes=-1)
seg_head = SegHead(num_classes=37)
out_neck = (torch.rand(1, 128, 32, 32), )
out_head = seg_head(out_neck)
assert out_head.shape == torch.Size([1, 37, 32, 32])
```
#### File: tests/test_models/test_recog_config.py
```python
import copy
from os.path import dirname, exists, join
import numpy as np
import pytest
import torch
def _demo_mm_inputs(num_kernels=0, input_shape=(1, 3, 300, 300),
num_items=None): # yapf: disable
"""Create a superset of inputs needed to run test or train batches.
Args:
input_shape (tuple): Input batch dimensions.
num_items (None | list[int]): Specifies the number of boxes
for each batch item.
"""
(N, C, H, W) = input_shape
rng = np.random.RandomState(0)
imgs = rng.rand(*input_shape)
img_metas = [{
'img_shape': (H, W, C),
'ori_shape': (H, W, C),
'pad_shape': (H, W, C),
'filename': '<demo>.png',
'text': 'hello',
'valid_ratio': 1.0,
} for _ in range(N)]
mm_inputs = {
'imgs': torch.FloatTensor(imgs).requires_grad_(True),
'img_metas': img_metas
}
return mm_inputs
def _demo_gt_kernel_inputs(num_kernels=3, input_shape=(1, 3, 300, 300),
num_items=None): # yapf: disable
"""Create a superset of inputs needed to run test or train batches.
Args:
input_shape (tuple): Input batch dimensions.
num_items (None | list[int]): Specifies the number of boxes
for each batch item.
"""
from mmdet.core import BitmapMasks
(N, C, H, W) = input_shape
gt_kernels = []
for batch_idx in range(N):
kernels = []
for kernel_inx in range(num_kernels):
kernel = np.random.rand(H, W)
kernels.append(kernel)
gt_kernels.append(BitmapMasks(kernels, H, W))
return gt_kernels
def _get_config_directory():
"""Find the predefined detector config directory."""
try:
# Assume we are running in the source mmocr repo
repo_dpath = dirname(dirname(dirname(__file__)))
except NameError:
# For IPython development when this __file__ is not defined
import mmocr
repo_dpath = dirname(dirname(mmocr.__file__))
config_dpath = join(repo_dpath, 'configs')
if not exists(config_dpath):
raise Exception('Cannot find config path')
return config_dpath
def _get_config_module(fname):
"""Load a configuration as a python module."""
from mmcv import Config
config_dpath = _get_config_directory()
config_fpath = join(config_dpath, fname)
config_mod = Config.fromfile(config_fpath)
return config_mod
def _get_detector_cfg(fname):
"""Grab configs necessary to create a detector.
These are deep copied to allow for safe modification of parameters without
influencing other tests.
"""
config = _get_config_module(fname)
model = copy.deepcopy(config.model)
return model
@pytest.mark.parametrize('cfg_file', [
'textrecog/sar/sar_r31_parallel_decoder_academic.py',
'textrecog/crnn/crnn_academic_dataset.py',
'textrecog/nrtr/nrtr_r31_1by16_1by8_academic.py',
'textrecog/robust_scanner/robustscanner_r31_academic.py',
'textrecog/seg/seg_r31_1by16_fpnocr_academic.py'
])
def test_encoder_decoder_pipeline(cfg_file):
model = _get_detector_cfg(cfg_file)
model['pretrained'] = None
from mmocr.models import build_detector
detector = build_detector(model)
input_shape = (1, 3, 32, 160)
if 'crnn' in cfg_file:
input_shape = (1, 1, 32, 160)
mm_inputs = _demo_mm_inputs(0, input_shape)
gt_kernels = None
if 'seg' in cfg_file:
gt_kernels = _demo_gt_kernel_inputs(3, input_shape)
imgs = mm_inputs.pop('imgs')
img_metas = mm_inputs.pop('img_metas')
# Test forward train
if 'seg' in cfg_file:
losses = detector.forward(imgs, img_metas, gt_kernels=gt_kernels)
else:
losses = detector.forward(imgs, img_metas)
assert isinstance(losses, dict)
# Test forward test
with torch.no_grad():
img_list = [g[None, :] for g in imgs]
batch_results = []
for one_img, one_meta in zip(img_list, img_metas):
result = detector.forward([one_img], [[one_meta]],
return_loss=False)
batch_results.append(result)
# Test show_result
results = {'text': 'hello', 'score': 1.0}
img = np.random.rand(5, 5, 3)
detector.show_result(img, results)
```
|
{
"source": "JeffreyLambert/COVID-19-Data-Pipeline",
"score": 2
}
|
#### File: COVID-19-Data-Pipeline/tests/test_etl.py
```python
import pytest
import pandas as pd
from covid_pipeline.etl import Pipeline
from pandas.api.types import is_datetime64_dtype
@pytest.fixture
def create_object():
return Pipeline()
def test_retrieve_current(create_object):
etl = create_object
etl.retrieve_current()
assert etl.resp is not None
assert etl.resp.status_code == 200
def test_retrieve_historical(create_object):
etl = create_object
etl.retrieve_historical()
assert etl.resp is not None
assert etl.resp.status_code == 200
def test_retrieve_status(create_object):
etl = create_object
etl.retrieve_status()
assert etl.resp is not None
assert etl.resp.status_code == 200
def test_clean_dates(create_object):
etl = create_object
etl.df = pd.DataFrame({'dates': [20201009, 20201019]})
etl.clean_dates()
assert is_datetime64_dtype(etl.df.dates)
```
|
{
"source": "jeffreyleeon/code-analyst",
"score": 2
}
|
#### File: lib/common/test_constants.py
```python
import os
import sys
# Import 3rd party libraries
import pytest
# Import custom libraries
sys.path.append(os.path.join(os.path.dirname(__file__), "../../../"))
import lib.common.constants as constants
def test_CODE_ANALYST_OPT_TYPES():
assert constants.CODE_ANALYST_OPT_TYPES == {
'ALL': 'all',
'COMMIT_MSG': 'commit_msg'
}
def test_GITHUB_BASE_URL():
assert constants.GITHUB_BASE_URL == 'https://github.com/'
```
#### File: lib/common/test_type_validator.py
```python
import os
import sys
# Import 3rd party libraries
import pytest
# Import custom libraries
sys.path.append(os.path.join(os.path.dirname(__file__), "../../../"))
import lib.common.type_validator as type_validator
class TestBaseValidator(object):
def test_base_validator_is_none(self):
assert type_validator.BaseValidator.is_none(None) == True
assert type_validator.BaseValidator.is_none(False) == False
assert type_validator.BaseValidator.is_none(1) == False
assert type_validator.BaseValidator.is_none('str') == False
assert type_validator.BaseValidator.is_none([]) == False
assert type_validator.BaseValidator.is_none({}) == False
def test_base_validator_is_not_none(self):
assert type_validator.BaseValidator.is_not_none(None) == False
assert type_validator.BaseValidator.is_not_none(False) == True
assert type_validator.BaseValidator.is_not_none(1) == True
assert type_validator.BaseValidator.is_not_none('str') == True
assert type_validator.BaseValidator.is_not_none([]) == True
assert type_validator.BaseValidator.is_not_none({}) == True
class TestArrayValidator(object):
def test_array_validator_is_array(self):
assert type_validator.ArrayValidator.is_array(None) == False
assert type_validator.ArrayValidator.is_array(False) == False
assert type_validator.ArrayValidator.is_array(1) == False
assert type_validator.ArrayValidator.is_array('str') == False
assert type_validator.ArrayValidator.is_array([]) == True
assert type_validator.ArrayValidator.is_array(['a', 'b']) == True
assert type_validator.ArrayValidator.is_array({}) == False
def test_array_validator_is_empty_array(self):
assert type_validator.ArrayValidator.is_empty_array(None) == False
assert type_validator.ArrayValidator.is_empty_array(False) == False
assert type_validator.ArrayValidator.is_empty_array(1) == False
assert type_validator.ArrayValidator.is_empty_array('str') == False
assert type_validator.ArrayValidator.is_empty_array([]) == True
assert type_validator.ArrayValidator.is_empty_array(['a', 'b']) == False
assert type_validator.ArrayValidator.is_empty_array({}) == False
def test_array_validator_is_empty_array(self):
assert type_validator.ArrayValidator.is_non_empty_array(None) == False
assert type_validator.ArrayValidator.is_non_empty_array(False) == False
assert type_validator.ArrayValidator.is_non_empty_array(1) == False
assert type_validator.ArrayValidator.is_non_empty_array('str') == False
assert type_validator.ArrayValidator.is_non_empty_array([]) == False
assert type_validator.ArrayValidator.is_non_empty_array(['a', 'b']) == True
assert type_validator.ArrayValidator.is_non_empty_array({}) == False
```
|
{
"source": "jeffreylee-zingbox/python-o365",
"score": 3
}
|
#### File: python-o365/O365/inbox.py
```python
from O365.message import Message
import logging
import json
import requests
log = logging.getLogger(__name__)
class Inbox( object ):
'''
Wrapper class for an inbox which mostly holds a list of messages.
Methods:
getMessages -- downloads messages to local memory.
Variables:
inbox_url -- url used for fetching emails.
'''
#url for fetching emails. Takes a flag for whether they are read or not.
inbox_url = 'https://outlook.office365.com/api/v1.0/me/messages'
def __init__(self, auth, getNow=True):
'''
Creates a new inbox wrapper. Send email and password for authentication.
set getNow to false if you don't want to immedeatly download new messages.
'''
log.debug('creating inbox for the email %s',auth[0])
self.auth = auth
self.messages = []
self.filters = ''
if getNow:
self.filters = 'IsRead eq false'
self.getMessages()
def getMessages(self, number = 10):
'''
Downloads messages to local memory.
You create an inbox to be the container class for messages, this method
then pulls those messages down to the local disk. This is called in the
init method, so it's kind of pointless for you. Unless you think new
messages have come in.
You can filter only certain emails by setting filters. See the set and
get filters methods for more information.
'''
log.debug('fetching messages.')
response = requests.get(self.inbox_url,auth=self.auth,params={'$filter':self.filters, '$top':number})
log.info('Response from O365: %s', str(response))
for message in response.json()['value']:
try:
duplicate = False
for i,m in enumerate(self.messages):
if message['Id'] == m.json['Id']:
self.messages[i] = Message(message,self.auth)
duplicate = True
break
if not duplicate:
self.messages.append(Message(message,self.auth))
log.debug('appended message: %s',message['Subject'])
except Exception as e:
log.info('failed to append message: %',str(e))
log.debug('all messages retrieved and put in to the list.')
return True
def getFilter(self):
'''get the value set for a specific filter, if exists, else None'''
return self.filters
def setFilter(self,f_string):
'''
Set the value of a filter. More information on what filters are available
can be found here:
https://msdn.microsoft.com/office/office365/APi/complex-types-for-mail-contacts-calendar#RESTAPIResourcesMessage
I may in the future have the ability to add these in yourself. but right now that is to complicated.
Arguments:
f_string -- The string that represents the filters you want to enact.
should be something like: (HasAttachments eq true) and (IsRead eq false)
or just: IsRead eq false
test your filter stirng here: https://outlook.office365.com/api/v1.0/me/messages?$filter=
if that accepts it then you know it works.
'''
self.filters = f_string
return True
#To the King!
```
|
{
"source": "jeffreyleifer/draw_rect",
"score": 3
}
|
#### File: jeffreyleifer/draw_rect/rectangle.py
```python
from PySide2.QtCore import QRect
from PySide2.QtCore import QPoint
"""
Rectangle
:param begin: QPoint object for top left coordinate of rectangle
:param end: QPoint object for bottom right coordinate of rectangle
* Rectangle class derived from QRect
* Includes rotation functions and helper function for points
"""
class Rectangle(QRect):
def __init__(self,begin,end):
super().__init__(begin,end)
"""
flip_h
* Flip Coordinates of the rectangle horizontally
"""
def flip_h(self):
tl = self.topLeft()
tr = self.topRight()
bl = self.bottomLeft()
br = self.bottomRight()
self.setTopLeft(tr)
self.setTopRight(tl)
self.setBottomRight(bl)
self.setBottomLeft(br)
"""
flip_v
* Flip Coordinates of the rectangle vertically
"""
def flip_v(self):
tl = self.topLeft()
tr = self.topRight()
bl = self.bottomLeft()
br = self.bottomRight()
self.setTopLeft(bl)
self.setTopRight(br)
self.setBottomRight(tr)
self.setBottomLeft(tl)
"""
refl
* Calls flip_h and flip_v to create a reflextion of original rectangle
"""
def refl(self):
rect = self.flip_h()
rect = self.flip_v()
```
#### File: jeffreyleifer/draw_rect/rect_creator.py
```python
from PySide2.QtWidgets import QWidget
from PySide2.QtCore import Qt
from PySide2.QtCore import QPoint
from PySide2.QtWidgets import QWidget, QLabel
from PySide2.QtGui import QPainter
from PySide2.QtGui import QPixmap
from PySide2.QtGui import QPen
from draw_utils import is_adjacent, is_contained,is_intersect
from rectangle import Rectangle
from constants import RECT_A,RECT_B,PEN_WIDTH
"""
Rectangle Creator:
* GUI to create rectangles
* Extended from QWidget
"""
class RectangleCreator(QWidget):
def __init__(self):
super().__init__()
""" Setup """
self.setMouseTracking(True)
self.begin = QPoint()
self.end = QPoint()
self.coord_list = []
self.rect_list = []
self.clicked = False
"""
Paint Event
* Paints Rectangles onto a Pixmap from a list of coordinates
* Stores created rectangles in a list
* Rectangle store is cleared and rebuild each iteration
"""
def paintEvent(self, event):
"""Create pallet"""
pixmap = QPixmap()
painter = QPainter(self)
painter.drawPixmap(self.rect(), pixmap)
pen = QPen()
pen.setWidth(PEN_WIDTH)
pen.setColor(Qt.black)
painter.setPen(pen)
"""Rebuild rectangle store"""
self.rect_list.clear()
for coord in self.coord_list:
rec = Rectangle(coord[RECT_A], coord[RECT_B])
self.rect_list.append(rec)
painter.drawRect(rec)
if not self.clicked:
return
"""Create new rectangle"""
rec = Rectangle(self.begin, self.end)
self.rect_list.append(rec)
painter.drawRect(rec)
"""
mousePressEvent
* Deletes oldest rectangle from the coordinate list
* Updates begin and end values
* Tracks click for use in display of rectangles
"""
def mousePressEvent(self, event):
"""Remove oldest"""
if len(self.coord_list) > 1:
self.coord_list.pop(0)
"""Update tracking variables"""
self.begin = event.pos()
self.end = event.pos()
self.clicked = True
self.update()
"""
mouseMoveEvent
* Updates endpoint
* Updates Coordinates on display
"""
def mouseMoveEvent(self, event):
self.end = event.pos()
self.setWindowTitle('Coordinates: ( x = %d : y = %d )' % (event.x(), event.y()))
self.update()
"""
mouseReleaseEvent
* Checks for position of start and end points of rectangle
* Transforms rectangle so start is topleft and end is bottom right
* Adds rectangle coordinates to the coordinates list
* If two rectangle exist:
* Runs test for Adjacent, contained and intersection
"""
def mouseReleaseEvent(self, event):
"""Needs horizontal flip?"""
if self.begin.x() > self.end.x() and self.begin.y() < self.end.y():
if len(self.rect_list) == 1:
self.rect_list[RECT_A] = self.flip_hor(self.rect_list[RECT_A])
else:
self.rect_list[RECT_B] = self.flip_hor(self.rect_list[RECT_B])
"""Needs vertical flip?"""
if self.begin.x() < self.end.x() and self.begin.y() > self.end.y():
if len(self.rect_list) == 1:
self.rect_list[RECT_A] = self.flip_ver(self.rect_list[RECT_A])
else:
self.rect_list[RECT_B] = self.flip_ver(self.rect_list[RECT_B])
"""Needs refection?"""
if self.begin.x() > self.end.x() and self.begin.y() > self.end.y():
if len(self.rect_list) == 1:
self.rect_list[RECT_A] = self.reflect(self.rect_list[RECT_A])
else:
self.rect_list[RECT_B] = self.reflect(self.rect_list[RECT_B])
self.clicked = False
self.update()
"""Add new coordinates to the coordinates list"""
self.coord_list.append([self.begin,self.end])
"""Run Tests"""
if len(self.coord_list) == 2:
is_adjacent(self.rect_list[RECT_A],self.rect_list[RECT_B],silent=False)
contained = is_contained(self.rect_list[RECT_A],self.rect_list[RECT_B])
if not contained:
contained = is_contained(self.rect_list[RECT_B],self.rect_list[RECT_A])
if not contained:
is_intersect(self.rect_list[RECT_A],self.rect_list[RECT_B])
print('------')
"""
flip_hor
* Call rectangle flip_h function
* Flip start and end points horizontal
"""
def flip_hor(self,rect):
rect.flip_h()
self.begin = rect.topLeft()
self.end = rect.bottomRight()
return rect
"""
flip_ver
* Calls rectangle flip_v function and
* Flip start and end points vertical
"""
def flip_ver(self,rect):
rect.flip_v()
self.begin = rect.topLeft()
self.end = rect.bottomRight()
return rect
"""
reflect
* Calls flip_hor then flip_ver to produce a reflection of the start and end points
* Same as above for the input rectangle coordinates
"""
def reflect(self,rect):
rect = self.flip_hor(rect)
rect = self.flip_ver(rect)
return rect
```
|
{
"source": "JeffreyLin39/chess-engine-python",
"score": 3
}
|
#### File: JeffreyLin39/chess-engine-python/engine.py
```python
import math
from tables import *
def switchPos(pos1, pos2, game):
if game.board[pos1[0]][pos1[1]] == 'K':
game.wK = (pos2[0], pos2[1])
elif game.board[pos1[0]][pos1[1]] == 'k':
game.bK = (pos2[0], pos2[1])
if game.initial == '0':
game.initial = game.board[pos2[0]][pos2[1]]
game.board[pos2[0]][pos2[1]] = game.board[pos1[0]][pos1[1]]
game.board[pos1[0]][pos1[1]] = '0'
else:
game.board[pos2[0]][pos2[1]] = game.board[pos1[0]][pos1[1]]
game.board[pos1[0]][pos1[1]] = game.initial
game.initial = '0'
def isBKingChecked(game, side):
i, j = game.bK
if i > 1:
if j > 0 and game.board[i-2][j-1] == 'N':
return True
if j < 7 and game.board[i-2][j+1] == 'N':
return True
if i > 0:
if j > 1 and game.board[i-1][j-2] == 'N':
return True
if j < 6 and game.board[i-1][j+2] == 'N':
return True
if i < 5:
if j > 0 and game.board[i+2][j-1] == 'N':
return True
if j < 7 and game.board[i+2][j+1] == 'N':
return True
if i < 6:
if j > 1 and game.board[i+1][j-2] == 'N':
return True
if j < 6 and game.board[i+1][j+2] == 'N':
return True
for k in range (i-1, -1, -1):
if game.board[k][j] == 'R' or game.board[k][j] == 'Q' or (game.board[k][j] == 'K' and k == i-1):
return True
if not game.board[k][j].isdigit():
break
for k in range (i+1, 8):
if game.board[k][j] == 'R' or game.board[k][j] == 'Q' or (game.board[k][j] == 'K' and k == i+1):
return True
if not game.board[k][j].isdigit():
break
for k in range (j-1, -1, -1):
if game.board[i][k] == 'R' or game.board[i][k] == 'Q' or (game.board[i][k] == 'K' and k == j-1):
return True
if not game.board[i][k].isdigit():
break
for k in range (j+1, 8):
if game.board[i][k] == 'R' or game.board[i][k] == 'Q' or (game.board[i][k] == 'K' and k == j+1):
return True
if not game.board[i][k].isdigit():
break
for k in range (1, min(i, j)+1):
if game.board[i-k][j-k] == 'B' or game.board[i-k][j-k] == 'Q' or (game.board[i-k][j-k] == 'K' and k == 1):
return True
if not game.board[i-k][j-k].isdigit():
break
for k in range (1, min(i, (7-j))+1):
if game.board[i-k][j+k] == 'B' or game.board[i-k][j+k] == 'Q' or (game.board[i-k][j+k] == 'K' and k == 1):
return True
if not game.board[i-k][j+k].isdigit():
break
for k in range (1, min((7-i), j)+1):
if game.board[i+k][j-k] == 'B' or game.board[i+k][j-k] == 'Q' or ((game.board[i+k][j-k] == 'P' or game.board[i+k][j-k] == 'K') and k == 1):
return True
if not game.board[i+k][j-k].isdigit():
break
for k in range (1, min((7-i), (7-j))+1):
if game.board[i+k][j+k] == 'B' or game.board[i+k][j+k] == 'Q' or ((game.board[i+k][j+k] == 'P' or game.board[i+k][j+k] == 'K') and k == 1):
return True
if not game.board[i+k][j+k].isdigit():
break
return False
def isWKingChecked(game, side):
i, j = game.wK
if i > 1:
if j > 0 and game.board[i-2][j-1] =='n':
return True
if j < 7 and game.board[i-2][j+1] =='n':
return True
if i > 0:
if j > 1 and game.board[i-1][j-2] =='n':
return True
if j < 6 and game.board[i-1][j+2] =='n':
return True
if i < 5:
if j > 0 and game.board[i+2][j-1] =='n':
return True
if j < 7 and game.board[i+2][j+1] =='n':
return True
if i < 6:
if j > 1 and game.board[i+1][j-2] =='n':
return True
if j < 6 and game.board[i+1][j+2] =='n':
return True
for k in range (i-1, -1, -1):
if game.board[k][j] == 'r' or game.board[k][j] == 'q' or (game.board[k][j] == 'k' and k == i-1):
return True
if not game.board[k][j].isdigit():
break
for k in range (i+1, 8):
if game.board[k][j] == 'r' or game.board[k][j] == 'q' or (game.board[k][j] == 'k' and k == i+i):
return True
if not game.board[k][j].isdigit():
break
for k in range (j-1, -1, -1):
if game.board[i][k] == 'r' or game.board[i][k] == 'q' or (game.board[i][k] == 'k' and k == j-1):
return True
if not game.board[i][k].isdigit():
break
for k in range (j+1, 8):
if game.board[i][k] == 'r' or game.board[i][k] == 'q' or (game.board[i][k] == 'k' and k == j+1):
return True
if not game.board[i][k].isdigit():
break
for k in range (1, min(i, j)+1):
if game.board[i-k][j-k] == 'b' or game.board[i-k][j-k] == 'q' or ((game.board[i-k][j-k] == 'p' or game.board[i-k][j-k] == 'k') and k == 1):
return True
if not game.board[i-k][j-k].isdigit():
break
for k in range (1, min(i, (7-j))+1):
if game.board[i-k][j+k] == 'b' or game.board[i-k][j+k] == 'q' or ((game.board[i-k][j+k] == 'p' or game.board[i-k][j+k] == 'k') and k == 1):
return True
if not game.board[i-k][j+k].isdigit():
break
for k in range (1, min((7-i), j)+1):
if game.board[i+k][j-k] == 'b' or game.board[i+k][j-k] == 'q' or (game.board[i+k][j-k] == 'k' and k == 1):
return True
if not game.board[i+k][j-k].isdigit():
break
for k in range (1, min((7-i), (7-j))+1):
if game.board[i+k][j+k] == 'b' or game.board[i+k][j+k] == 'q' or (game.board[i+k][j+k] == 'k' and k == 1):
return True
if not game.board[i+k][j+k].isdigit():
break
return False
def getWP(position, game):
validMoves = []
if position[0] > 0 and game.board[position[0]-1][position[1]] == '0':
switchPos((position[0], position[1]), (position[0]-1, position[1]), game)
if not isWKingChecked(game, 0):
validMoves.append((position[0]-1, position[1]))
if position[0] == 6 and game.board[position[0]-2][position[1]] == '0':
switchPos((position[0], position[1]), (position[0]-2, position[1]), game)
if not isWKingChecked(game, 0):
validMoves.append((position[0]-2, position[1]))
switchPos((position[0]-2, position[1]), (position[0], position[1]), game)
switchPos((position[0]-1, position[1]), (position[0], position[1]), game)
if position[0] > 0 and position[1] > 0 and game.board[position[0]-1][position[1]-1].islower():
switchPos((position[0], position[1]), (position[0]-1, position[1]-1), game)
if not isWKingChecked(game, 0):
validMoves.append((position[0]-1, position[1]-1))
switchPos((position[0]-1, position[1]-1), (position[0], position[1]), game)
if position[0] > 0 and position[1] < 7 and game.board[position[0]-1][position[1]+1].islower():
switchPos((position[0], position[1]), (position[0]-1, position[1]+1), game)
if not isWKingChecked(game, 0):
validMoves.append((position[0]-1, position[1]+1))
switchPos((position[0]-1, position[1]+1), (position[0], position[1]), game)
if not game.enpassant == (-1, -1, -1) and position[0] == game.enpassant[0] and (position[1] - 1 == game.enpassant[1] or position[1] + 1 == game.enpassant[1]) and game.enpassant[2] == 0:
switchPos((position[0], position[1]), (game.enpassant[0]-1, game.enpassant[1]), game)
if not isWKingChecked(game, 0):
validMoves.append((game.enpassant[0]-1, game.enpassant[1]))
switchPos((game.enpassant[0]-1, game.enpassant[1]), (position[0], position[1]), game)
return validMoves
def getBP(position, game):
validMoves = []
if position[0] < 7 and game.board[position[0]+1][position[1]] == '0':
switchPos((position[0], position[1]), (position[0]+1, position[1]), game)
if not isBKingChecked(game, 1):
validMoves.append((position[0]+1, position[1]))
if position[0] == 1 and game.board[position[0]+2][position[1]] == '0':
switchPos((position[0], position[1]), (position[0]+2, position[1]), game)
if not isBKingChecked(game, 1):
validMoves.append((position[0]+2, position[1]))
switchPos((position[0]+2, position[1]), (position[0], position[1]), game)
switchPos((position[0]+1, position[1]), (position[0], position[1]), game)
if position[0] < 7 and position[1] > 0 and not game.board[position[0]+1][position[1]-1].islower() and not game.board[position[0]+1][position[1]-1].isdigit():
switchPos((position[0], position[1]), (position[0]+1, position[1]-1), game)
if not isBKingChecked(game, 1):
validMoves.append((position[0]+1, position[1]-1))
switchPos((position[0]+1, position[1]-1), (position[0], position[1]), game)
if position[0] < 7 and position[1] < 7 and not game.board[position[0]+1][position[1]+1].islower() and not game.board[position[0]+1][position[1]+1].isdigit():
switchPos((position[0], position[1]), (position[0]+1, position[1]+1), game)
if not isBKingChecked(game, 1):
validMoves.append((position[0]+1, position[1]+1))
switchPos((position[0]+1, position[1]+1), (position[0], position[1]), game)
if not game.enpassant == (-1, -1, -1) and position[0] == game.enpassant[0] and (position[1] - 1 == game.enpassant[1] or position[1] + 1 == game.enpassant[1]) and game.enpassant[2] == 1:
switchPos((position[0], position[1]), (game.enpassant[0]+1, game.enpassant[1]), game)
if not isBKingChecked(game, 1):
validMoves.append((game.enpassant[0]+1, game.enpassant[1]))
switchPos((game.enpassant[0]+1, game.enpassant[1]), (position[0], position[1]), game)
return validMoves
def getWB(position, game):
validMoves = []
i, j = position[0], position[1]
for k in range (1, min(i, j)+1):
if not game.board[i-k][j-k].islower() and not game.board[i-k][j-k].isdigit():
break
switchPos((position[0], position[1]), (position[0]-k, position[1]-k), game)
if not isWKingChecked(game, 0):
validMoves.append((position[0]-k, position[1]-k))
switchPos((position[0]-k, position[1]-k), (position[0], position[1]), game)
if game.board[i-k][j-k].islower():
break
for k in range (1, min(i, (7-j))+1):
if not game.board[i-k][j+k].islower() and not game.board[i-k][j+k].isdigit():
break
switchPos((position[0], position[1]), (position[0]-k, position[1]+k), game)
if not isWKingChecked(game, 0):
validMoves.append((position[0]-k, position[1]+k))
switchPos((position[0]-k, position[1]+k), (position[0], position[1]), game)
if game.board[i-k][j+k].islower():
break
for k in range (1, min((7-i), j)+1):
if not game.board[i+k][j-k].islower() and not game.board[i+k][j-k].isdigit():
break
switchPos((position[0], position[1]), (position[0]+k, position[1]-k), game)
if not isWKingChecked(game, 0):
validMoves.append((position[0]+k, position[1]-k))
switchPos((position[0]+k, position[1]-k), (position[0], position[1]), game)
if game.board[i+k][j-k].islower():
break
for k in range (1, min((7-i), (7-j))+1):
if not game.board[i+k][j+k].islower() and not game.board[i+k][j+k].isdigit():
break
switchPos((position[0], position[1]), (position[0]+k, position[1]+k), game)
if not isWKingChecked(game, 0):
validMoves.append((position[0]+k, position[1]+k))
switchPos((position[0]+k, position[1]+k), (position[0], position[1]), game)
if game.board[i+k][j+k].islower():
break
return validMoves
def getBB(position, game):
validMoves = []
i, j = position[0], position[1]
for k in range (1, min(i, j)+1):
if game.board[i-k][j-k].islower():
break
switchPos((position[0], position[1]), (position[0]-k, position[1]-k), game)
if not isBKingChecked(game, 1):
validMoves.append((position[0]-k, position[1]-k))
switchPos((position[0]-k, position[1]-k), (position[0], position[1]), game)
if not game.board[i-k][j-k].islower() and not game.board[i-k][j-k].isdigit():
break
for k in range (1, min(i, (7-j))+1):
if game.board[i-k][j+k].islower():
break
switchPos((position[0], position[1]), (position[0]-k, position[1]+k), game)
if not isBKingChecked(game, 1):
validMoves.append((position[0]-k, position[1]+k))
switchPos((position[0]-k, position[1]+k), (position[0], position[1]), game)
if not game.board[i-k][j+k].isdigit() and not game.board[i-k][j+k].islower():
break
for k in range (1, min((7-i), j)+1):
if game.board[i+k][j-k].islower():
break
switchPos((position[0], position[1]), (position[0]+k, position[1]-k), game)
if not isBKingChecked(game, 1):
validMoves.append((position[0]+k, position[1]-k))
switchPos((position[0]+k, position[1]-k), (position[0], position[1]), game)
if not game.board[i+k][j-k].isdigit() and not game.board[i+k][j-k].islower():
break
for k in range (1, min((7-i), (7-j))+1):
if game.board[i+k][j+k].islower():
break
switchPos((position[0], position[1]), (position[0]+k, position[1]+k), game)
if not isBKingChecked(game, 1):
validMoves.append((position[0]+k, position[1]+k))
switchPos((position[0]+k, position[1]+k), (position[0], position[1]), game)
if not game.board[i+k][j+k].isdigit() and not game.board[i+k][j+k].islower():
break
return validMoves
def getWN(position, game):
validMoves = []
i,j = position[0], position[1]
if i > 1:
if j > 0 and(game.board[position[0]-2][position[1]-1].islower() or game.board[position[0]-2][position[1]-1].isdigit()):
switchPos((position[0], position[1]), (position[0]-2, position[1]-1), game)
if not isWKingChecked(game, 0):
validMoves.append((position[0]-2, position[1]-1))
switchPos((position[0]-2, position[1]-1),(position[0], position[1]), game)
if j < 7 and(game.board[position[0]-2][position[1]+1].islower() or game.board[position[0]-2][position[1]+1].isdigit()):
switchPos((position[0], position[1]), (position[0]-2, position[1]+1), game)
if not isWKingChecked(game, 0):
validMoves.append((position[0]-2, position[1]+1))
switchPos((position[0]-2, position[1]+1), (position[0], position[1]), game)
if i > 0:
if j > 1 and(game.board[position[0]-1][position[1]-2].islower() or game.board[position[0]-1][position[1]-2].isdigit()):
switchPos((position[0], position[1]), (position[0]-1, position[1]-2), game)
if not isWKingChecked(game, 0):
validMoves.append((position[0]-1, position[1]-2))
switchPos((position[0]-1, position[1]-2), (position[0], position[1]), game)
if j < 6 and(game.board[position[0]-1][position[1]+2].islower() or game.board[position[0]-1][position[1]+2].isdigit()):
switchPos((position[0], position[1]), (position[0]-1, position[1]+2), game)
if not isWKingChecked(game, 0):
validMoves.append((position[0]-1, position[1]+2))
switchPos((position[0]-1, position[1]+2), (position[0], position[1]), game)
if i < 6:
if j > 0 and(game.board[position[0]+2][position[1]-1].islower() or game.board[position[0]+2][position[1]-1].isdigit()):
switchPos((position[0], position[1]), (position[0]+2, position[1]-1), game)
if not isWKingChecked(game, 0):
validMoves.append((position[0]+2, position[1]-1))
switchPos((position[0]+2, position[1]-1), (position[0], position[1]), game)
if j < 7 and(game.board[position[0]+2][position[1]+1].islower() or game.board[position[0]+2][position[1]+1].isdigit()):
switchPos((position[0], position[1]), (position[0]+2, position[1]+1), game)
if not isWKingChecked(game, 0):
validMoves.append((position[0]+2, position[1]+1))
switchPos((position[0]+2, position[1]+1), (position[0], position[1]), game)
if i < 7:
if j > 1 and(game.board[position[0]+1][position[1]-2].islower() or game.board[position[0]+1][position[1]-2].isdigit()):
switchPos((position[0], position[1]), (position[0]+1, position[1]-2), game)
if not isWKingChecked(game, 0):
validMoves.append((position[0]+1, position[1]-2))
switchPos((position[0]+1, position[1]-2), (position[0], position[1]), game)
if j < 6 and(game.board[position[0]+1][position[1]+2].islower() or game.board[position[0]+1][position[1]+2].isdigit()):
switchPos((position[0], position[1]), (position[0]+1, position[1]+2), game)
if not isWKingChecked(game, 0):
validMoves.append((position[0]+1, position[1]+2))
switchPos((position[0]+1, position[1]+2), (position[0], position[1]), game)
return validMoves
def getBN(position, game):
validMoves = []
i,j = position[0], position[1]
if i > 1:
if j > 0 and (not game.board[position[0]-2][position[1]-1].islower()):
switchPos((position[0], position[1]), (position[0]-2, position[1]-1), game)
if not isBKingChecked(game, 1):
validMoves.append((position[0]-2, position[1]-1))
switchPos((position[0]-2, position[1]-1), (position[0], position[1]), game)
if j < 7 and (not game.board[position[0]-2][position[1]+1].islower()):
switchPos((position[0], position[1]), (position[0]-2, position[1]+1), game)
if not isBKingChecked(game, 1):
validMoves.append((position[0]-2, position[1]+1))
switchPos((position[0]-2, position[1]+1), (position[0], position[1]), game)
if i > 0:
if j > 1 and (not game.board[position[0]-1][position[1]-2].islower()):
switchPos((position[0], position[1]), (position[0]-1, position[1]-2), game)
if not isBKingChecked(game, 1):
validMoves.append((position[0]-1, position[1]-2))
switchPos((position[0]-1, position[1]-2), (position[0], position[1]), game)
if j < 6 and (not game.board[position[0]-1][position[1]+2].islower()):
switchPos((position[0], position[1]), (position[0]-1, position[1]+2), game)
if not isBKingChecked(game, 1):
validMoves.append((position[0]-1, position[1]+2))
switchPos((position[0]-1, position[1]+2), (position[0], position[1]), game)
if i < 6:
if j > 0 and (not game.board[position[0]+2][position[1]-1].islower()):
switchPos((position[0], position[1]), (position[0]+2, position[1]-1), game)
if not isBKingChecked(game, 1):
validMoves.append((position[0]+2, position[1]-1))
switchPos((position[0]+2, position[1]-1), (position[0], position[1]), game)
if j < 7 and (not game.board[position[0]+2][position[1]+1].islower()):
switchPos((position[0], position[1]), (position[0]+2, position[1]+1), game)
if not isBKingChecked(game, 1):
validMoves.append((position[0]+2, position[1]+1))
switchPos((position[0]+2, position[1]+1), (position[0], position[1]), game)
if i < 7:
if j > 1 and (not game.board[position[0]+1][position[1]-2].islower()):
switchPos((position[0], position[1]), (position[0]+1, position[1]-2), game)
if not isBKingChecked(game, 1):
validMoves.append((position[0]+1, position[1]-2))
switchPos((position[0]+1, position[1]-2), (position[0], position[1]), game)
if j < 6 and (not game.board[position[0]+1][position[1]+2].islower()):
switchPos((position[0], position[1]), (position[0]+1, position[1]+2), game)
if not isBKingChecked(game, 1):
validMoves.append((position[0]+1, position[1]+2))
switchPos((position[0]+1, position[1]+2), (position[0], position[1]), game)
return validMoves
def getWR(position, game):
validMoves = []
i,j = position[0], position[1]
for k in range (i-1, -1, -1):
if not game.board[k][j].islower() and not game.board[k][j].isdigit():
break
switchPos((position[0], position[1]), (k, position[1]), game)
if not isWKingChecked(game, 0):
validMoves.append((k, position[1]))
switchPos((k, position[1]), (position[0], position[1]), game)
if game.board[k][j].islower():
break
for k in range (i+1, 8):
if not game.board[k][j].islower() and not game.board[k][j].isdigit():
break
switchPos((position[0], position[1]), (k, position[1]), game)
if not isWKingChecked(game, 0):
validMoves.append((k, position[1]))
switchPos((k, position[1]), (position[0], position[1]), game)
if game.board[k][j].islower():
break
for k in range (j-1, -1, -1):
if not game.board[i][k].islower() and not game.board[i][k].isdigit():
break
switchPos((position[0], position[1]), (position[0], k), game)
if not isWKingChecked(game, 0):
validMoves.append((position[0], k))
switchPos((position[0], k), (position[0], position[1]), game)
if game.board[i][k].islower():
break
for k in range (j+1, 8):
if not game.board[i][k].islower() and not game.board[i][k].isdigit():
break
switchPos((position[0], position[1]), (position[0], k), game)
if not isWKingChecked(game, 0):
validMoves.append((position[0], k))
switchPos((position[0], k), (position[0], position[1]), game)
if game.board[i][k].islower():
break
return validMoves
def getBR(position, game):
validMoves = []
i,j = position[0], position[1]
for k in range (i-1, -1, -1):
if game.board[k][j].islower():
break
switchPos((position[0], position[1]), (k, position[1]), game)
if not isBKingChecked(game, 1):
validMoves.append((k, position[1]))
switchPos((k, position[1]), (position[0], position[1]), game)
if not game.board[k][j].islower() and not game.board[k][j].isdigit():
break
for k in range (i+1, 8):
if game.board[k][j].islower():
break
switchPos((position[0], position[1]), (k, position[1]), game)
if not isBKingChecked(game, 1):
validMoves.append((k, position[1]))
switchPos((k, position[1]), (position[0], position[1]), game)
if not game.board[k][j].islower() and not game.board[k][j].isdigit():
break
for k in range (j-1, -1, -1):
if game.board[i][k].islower():
break
switchPos((position[0], position[1]), (position[0], k), game)
if not isBKingChecked(game, 1):
validMoves.append((position[0], k))
switchPos((position[0], k), (position[0], position[1]), game)
if not game.board[i][k].islower() and not game.board[i][k].isdigit():
break
for k in range (j+1, 8):
if game.board[i][k].islower():
break
switchPos((position[0], position[1]), (position[0], k), game)
if not isBKingChecked(game, 1):
validMoves.append((position[0], k))
switchPos((position[0], k), (position[0], position[1]), game)
if not game.board[i][k].islower() and not game.board[i][k].isdigit():
break
return validMoves
def getWQ(position, game):
validMoves = []
validMoves.extend(getWB(position, game))
validMoves.extend(getWR(position, game))
return validMoves
def getBQ(position, game):
validMoves = []
validMoves.extend(getBB(position, game))
validMoves.extend(getBR(position, game))
return validMoves
def getWK(position, game):
validMoves = []
if position[0] > 0:
if game.board[position[0]-1][position[1]].islower() or game.board[position[0]-1][position[1]].isdigit():
switchPos((position[0], position[1]), (position[0]-1, position[1]), game)
if not isWKingChecked(game, 0):
validMoves.append((position[0]-1, position[1]))
switchPos((position[0]-1, position[1]), (position[0], position[1]), game)
if position[1] > 0 and(game.board[position[0]-1][position[1]-1].islower() or game.board[position[0]-1][position[1]-1].isdigit()):
switchPos((position[0], position[1]), (position[0]-1, position[1]-1), game)
if not isWKingChecked(game, 0):
validMoves.append((position[0]-1, position[1]-1))
switchPos((position[0]-1, position[1]-1), (position[0], position[1]), game)
if position[1] < 7 and(game.board[position[0]-1][position[1]+1].islower() or game.board[position[0]-1][position[1]+1].isdigit()):
switchPos((position[0], position[1]), (position[0]-1, position[1]+1), game)
if not isWKingChecked(game, 0):
validMoves.append((position[0]-1, position[1]+1))
switchPos((position[0]-1, position[1]+1), (position[0], position[1]), game)
if position[0] < 7:
if game.board[position[0]+1][position[1]].islower() or game.board[position[0]+1][position[1]].isdigit():
switchPos((position[0], position[1]), (position[0]+1, position[1]), game)
if not isWKingChecked(game, 0):
validMoves.append((position[0]+1, position[1]))
switchPos((position[0]+1, position[1]), (position[0], position[1]), game)
if position[1] > 0 and(game.board[position[0]+1][position[1]-1].islower() or game.board[position[0]+1][position[1]-1].isdigit()):
switchPos((position[0], position[1]), (position[0]+1, position[1]-1), game)
if not isWKingChecked(game, 0):
validMoves.append((position[0]+1, position[1]-1))
switchPos((position[0]+1, position[1]-1), (position[0], position[1]), game)
if position[1] < 7 and(game.board[position[0]+1][position[1]+1].islower() or game.board[position[0]+1][position[1]+1].isdigit()):
switchPos((position[0], position[1]), (position[0]+1, position[1]+1), game)
if not isWKingChecked(game, 0):
validMoves.append((position[0]+1, position[1]+1))
switchPos((position[0]+1, position[1]+1), (position[0], position[1]), game)
if position[1] > 0:
if game.board[position[0]][position[1]-1].islower() or game.board[position[0]][position[1]-1].isdigit():
switchPos((position[0], position[1]), (position[0], position[1]-1), game)
if not isWKingChecked(game, 0):
validMoves.append((position[0], position[1]-1))
switchPos((position[0], position[1]-1), (position[0], position[1]), game)
if position[1] < 7:
if game.board[position[0]][position[1]+1].islower() or game.board[position[0]][position[1]+1].isdigit():
switchPos((position[0], position[1]), (position[0], position[1]+1), game)
if not isWKingChecked(game, 0):
validMoves.append((position[0], position[1]+1))
switchPos((position[0], position[1]+1), (position[0], position[1]), game)
if not isWKingChecked(game, 0) and game.wK == (7, 4):
if game.wCastleL and game.board[7][3] == '0' and game.board[7][2] == '0' and game.board[7][1] == '0':
switchPos((7, 4), (7, 3), game)
if not isWKingChecked(game, 1):
switchPos((7, 3), (7, 2), game)
if not isWKingChecked(game, 0):
validMoves.append((7, 2))
switchPos((7, 2), (7, 4), game)
else:
switchPos((7, 3), (7, 4), game)
if game.wCastleR and game.board[7][5] == '0' and game.board[7][6] == '0':
switchPos((7, 4), (7, 5), game)
if not isWKingChecked(game, 0):
switchPos((7, 5), (7, 6), game)
if not isWKingChecked(game, 0):
validMoves.append((7, 6))
switchPos((7, 6), (7, 4), game)
else:
switchPos((7, 5), (7, 4), game)
return validMoves
def getBK(position, game):
validMoves = []
if position[0] > 0:
if not game.board[position[0]-1][position[1]].islower():
switchPos((position[0], position[1]), (position[0]-1, position[1]), game)
if not isBKingChecked(game, 1):
validMoves.append((position[0]-1, position[1]))
switchPos((position[0]-1, position[1]), (position[0], position[1]), game)
if position[1] > 0 and not game.board[position[0]-1][position[1]-1].islower():
switchPos((position[0], position[1]), (position[0]-1, position[1]-1), game)
if not isBKingChecked(game, 1):
validMoves.append((position[0]-1, position[1]-1))
switchPos((position[0]-1, position[1]-1), (position[0], position[1]), game)
if position[1] < 7 and not game.board[position[0]-1][position[1]+1].islower():
switchPos((position[0], position[1]), (position[0]-1, position[1]+1), game)
if not isBKingChecked(game, 1):
validMoves.append((position[0]-1, position[1]+1))
switchPos((position[0]-1, position[1]+1), (position[0], position[1]), game)
if position[0] < 7:
if not game.board[position[0]+1][position[1]].islower():
switchPos((position[0], position[1]), (position[0]+1, position[1]), game)
if not isBKingChecked(game, 1):
validMoves.append((position[0]+1, position[1]))
switchPos((position[0]+1, position[1]), (position[0], position[1]), game)
if position[1] > 0 and not game.board[position[0]+1][position[1]-1].islower():
switchPos((position[0], position[1]), (position[0]+1, position[1]-1), game)
if not isBKingChecked(game, 1):
validMoves.append((position[0]+1, position[1]-1))
switchPos((position[0]+1, position[1]-1), (position[0], position[1]), game)
if position[1] < 7 and not game.board[position[0]+1][position[1]+1].islower():
switchPos((position[0], position[1]), (position[0]+1, position[1]+1), game)
if not isBKingChecked(game, 1):
validMoves.append((position[0]+1, position[1]+1))
switchPos((position[0]+1, position[1]+1), (position[0], position[1]), game)
if position[1] > 0:
if not game.board[position[0]][position[1]-1].islower():
switchPos((position[0], position[1]), (position[0], position[1]-1), game)
if not isBKingChecked(game, 1):
validMoves.append((position[0], position[1]-1))
switchPos((position[0], position[1]-1), (position[0], position[1]), game)
if position[1] < 7:
if not game.board[position[0]][position[1]+1].islower():
switchPos((position[0], position[1]), (position[0], position[1]+1), game)
if not isBKingChecked(game, 1):
validMoves.append((position[0], position[1]+1))
switchPos((position[0], position[1]+1), (position[0], position[1]), game)
if not isBKingChecked(game, 0) and game.bK == (0, 4):
if game.bCastleL and game.board[0][3] == '0' and game.board[0][2] == '0' and game.board[0][1] == '0':
switchPos((0, 4), (0, 3), game)
if not isBKingChecked(game, 0):
switchPos((0, 3), (0, 2), game)
if not isBKingChecked(game, 0):
validMoves.append((0, 2))
switchPos((0, 2), (0, 4), game)
else:
switchPos((0, 3), (0, 4), game)
if game.bCastleR and game.board[0][5] == '0' and game.board[0][6] == '0':
switchPos((0, 4), (0, 5), game)
if not isBKingChecked(game, 0):
switchPos((0, 5), (0, 6), game)
if not isBKingChecked(game, 0):
validMoves.append((0, 6))
switchPos((0, 6), (0, 4), game)
else:
switchPos((0, 5), (0, 4), game)
return validMoves
def getMoves(position, game):
tmp = game.board[position[0]][position[1]]
if tmp == 'p':
return getBP(position, game)
elif tmp == 'P':
return getWP(position, game)
elif tmp == 'n':
return getBN(position, game)
elif tmp == 'N':
return getWN(position, game)
elif tmp == 'b':
return getBB(position, game)
elif tmp == 'B':
return getWB(position, game)
elif tmp == 'r':
return getBR(position, game)
elif tmp == 'R':
return getWR(position, game)
elif tmp == 'q':
return getBQ(position, game)
elif tmp == 'Q':
return getWQ(position, game)
elif tmp == 'k':
return getBK(position, game)
elif tmp == 'K':
return getWK(position, game)
return
def calculateBoard(game):
if isWKingChecked(game, 0):
return -100000
if isBKingChecked(game, 0):
return 100000
sum = 0
for i in range (8):
for j in range (8):
if game.board[i][j] == 'P':
if game.whiteScore < 14 and game.blackScore < 14:
sum += mid_value[0] + mid_pawn_table[i*j]
else:
sum += end_value[0] + end_pawn_table[i*j]
elif game.board[i][j] == 'N':
if game.whiteScore < 14 and game.blackScore < 14:
sum += mid_value[1] + mid_knight_table[i*j]
else:
sum += end_value[1] + end_knight_table[i*j]
elif game.board[i][j] == 'B':
if game.whiteScore < 14 and game.blackScore < 14:
sum += mid_value[2] + mid_bishop_table[i*j]
else:
sum += mid_value[2] + end_bishop_table[i*j]
elif game.board[i][j] == 'R':
if game.whiteScore < 14 and game.blackScore < 14:
sum += mid_value[3] + mid_rook_table[i*j]
else:
sum += mid_value[3] + end_rook_table[i*j]
elif game.board[i][j] == 'Q':
if game.whiteScore < 14 and game.blackScore < 14:
sum += mid_value[4] + mid_queen_table[i*j]
else:
sum += mid_value[4] + end_queen_table[i*j]
elif game.board[i][j] == 'K':
if game.whiteScore < 14 and game.blackScore < 14:
sum += mid_king_table[i*j]
else:
sum += end_king_table[i*j]
elif game.board[i][j] == 'p':
if game.whiteScore < 14 and game.blackScore < 14:
sum = sum - mid_value[0] - mid_pawn_table[flip[i*j]]
else:
sum = sum - end_value[0] - end_pawn_table[flip[i*j]]
elif game.board[i][j] == 'n':
if game.whiteScore < 14 and game.blackScore < 14:
sum = sum - mid_value[1] - mid_knight_table[flip[i*j]]
else:
sum = sum - end_value[1] - end_knight_table[flip[i*j]]
elif game.board[i][j] == 'b':
if game.whiteScore < 14 and game.blackScore < 14:
sum = sum - mid_value[2] - mid_bishop_table[flip[i*j]]
else:
sum = sum - mid_value[2] - end_bishop_table[flip[i*j]]
elif game.board[i][j] == 'r':
if game.whiteScore < 14 and game.blackScore < 14:
sum = sum - mid_value[3] - mid_rook_table[flip[i*j]]
else:
sum = sum - mid_value[3] - end_rook_table[flip[i*j]]
elif game.board[i][j] == 'q':
if game.whiteScore < 14 and game.blackScore < 14:
sum = sum - mid_value[4] - mid_queen_table[flip[i*j]]
else:
sum = sum - mid_value[4] - end_queen_table[flip[i*j]]
elif game.board[i][j] == 'k':
if game.whiteScore < 14 and game.blackScore < 14:
sum = sum - mid_king_table[flip[i*j]]
else:
sum = sum - end_king_table[flip[i*j]]
return sum
def computeMove(game, depth, isMaximizingPlayer, alpha, beta):
if depth == 0:
return calculateBoard(game)
if isMaximizingPlayer:
bestVal = -1000000
for i in range (8):
for j in range (8):
if not game.board[i][j] == '0' and not game.board[i][j].islower():
for move in getMoves( (i, j), game):
initial =game.board[move[0]][move[1]]
switchPos((i, j), move, game)
if game.board[move[0]][move[1]] == 'P' and move[0] == 0:
game.board[move[0]][move[1]] = 'N'
val = computeMove(game, depth - 1, False, alpha, beta)
if val > bestVal:
bestVal = val
game.bestMove = (i, j, move[0], move[1], 'N')
alpha = max( alpha, bestVal)
if beta <= alpha:
bestVal = val
game.board[move[0]][move[1]] = 'p'
switchPos(move, (i, j), game)
game.board[move[0]][move[1]] = initial
return bestVal
game.board[move[0]][move[1]] = 'B'
val = computeMove(game, depth - 1, False, alpha, beta)
if val > bestVal:
game.bestMove = (i, j, move[0], move[1], 'B')
alpha = max( alpha, bestVal)
if beta <= alpha:
bestVal = val
game.board[move[0]][move[1]] = 'p'
switchPos(move, (i, j), game)
game.board[move[0]][move[1]] = initial
return bestVal
game.board[move[0]][move[1]] = 'R'
val = computeMove(game, depth - 1, False, alpha, beta)
if val > bestVal:
bestVal = val
game.bestMove = (i, j, move[0], move[1], 'R')
alpha = max( alpha, bestVal)
if beta <= alpha:
game.board[move[0]][move[1]] = 'p'
switchPos(move, (i, j), game)
game.board[move[0]][move[1]] = initial
return bestVal
game.board[move[0]][move[1]] = 'Q'
val = computeMove(game, depth - 1, False, alpha, beta)
if val > bestVal:
bestVal = val
game.bestMove = (i, j, move[0], move[1], 'Q')
alpha = max( alpha, bestVal)
if beta <= alpha:
game.board[move[0]][move[1]] = 'p'
switchPos(move, (i, j), game)
game.board[move[0]][move[1]] = initial
return bestVal
game.board[move[0]][move[1]] = 'P'
elif game.board[move[0]][move[1]] == 'p' and move[0] == 7:
game.board[move[0]][move[1]] = 'n'
val = computeMove(game, depth - 1, False, alpha, beta)
if val > bestVal:
bestVal = val
game.bestMove = (i, j, move[0], move[1], 'n')
alpha = max( alpha, bestVal)
if beta <= alpha:
game.board[move[0]][move[1]] = 'p'
switchPos(move, (i, j), game)
game.board[move[0]][move[1]] = initial
return bestVal
game.board[move[0]][move[1]] = 'b'
val = computeMove(game, depth - 1, False, alpha, beta)
if val > bestVal:
bestVal = val
game.bestMove = (i, j, move[0], move[1], 'b')
alpha = max( alpha, bestVal)
if beta <= alpha:
game.board[move[0]][move[1]] = 'p'
switchPos(move, (i, j), game)
game.board[move[0]][move[1]] = initial
return bestVal
game.board[move[0]][move[1]] = 'r'
val = computeMove(game, depth - 1, False, alpha, beta)
if val > bestVal:
bestVal = val
game.bestMove = (i, j, move[0], move[1], 'r')
alpha = max( alpha, bestVal)
if beta <= alpha:
game.board[move[0]][move[1]] = 'p'
switchPos(move, (i, j), game)
game.board[move[0]][move[1]] = initial
return bestVal
game.board[move[0]][move[1]] = 'q'
val = computeMove(game, depth - 1, False, alpha, beta)
if val > bestVal:
bestVal = val
game.bestMove = (i, j, move[0], move[1], 'q')
alpha = max( alpha, bestVal)
if beta <= alpha:
game.board[move[0]][move[1]] = 'p'
switchPos(move, (i, j), game)
game.board[move[0]][move[1]] = initial
return bestVal
game.board[move[0]][move[1]] = 'p'
else:
val = computeMove(game, depth - 1, False, alpha, beta)
if val > bestVal:
bestVal = val
game.bestMove = (i, j, move[0], move[1], -1)
alpha = max( alpha, bestVal)
if beta <= alpha:
switchPos(move, (i, j), game)
game.board[move[0]][move[1]] = initial
return bestVal
switchPos(move, (i, j), game)
game.board[move[0]][move[1]] = initial
return bestVal
else:
bestVal = 1000000
for i in range (8):
for j in range (8):
if game.board[i][j].islower():
for move in getMoves( (i, j), game):
initial = game.board[move[0]][move[1]]
switchPos((i, j), move, game)
if game.board[move[0]][move[1]] == 'P' and move[0] == 0:
game.board[move[0]][move[1]] = 'N'
val = computeMove(game, depth - 1, False, alpha, beta)
if val < bestVal:
bestVal = val
game.bestMove = (i, j, move[0], move[1], 'N')
beta = min( beta, bestVal)
if beta <= alpha:
game.board[move[0]][move[1]] = 'p'
switchPos(move, (i, j), game)
game.board[move[0]][move[1]] = initial
return bestVal
game.board[move[0]][move[1]] = 'B'
val = computeMove(game, depth - 1, False, alpha, beta)
if val < bestVal:
bestVal = val
game.bestMove = (i, j, move[0], move[1], 'B')
beta = min( beta, bestVal)
if beta <= alpha:
game.board[move[0]][move[1]] = 'p'
switchPos(move, (i, j), game)
game.board[move[0]][move[1]] = initial
return bestVal
game.board[move[0]][move[1]] = 'R'
val = computeMove(game, depth - 1, False, alpha, beta)
if val < bestVal:
bestVal = val
game.bestMove = (i, j, move[0], move[1], 'R')
beta = min( beta, bestVal)
if beta <= alpha:
game.board[move[0]][move[1]] = 'p'
switchPos(move, (i, j), game)
game.board[move[0]][move[1]] = initial
return bestVal
game.board[move[0]][move[1]] = 'Q'
val = computeMove(game, depth - 1, False, alpha, beta)
if val < bestVal:
bestVal = val
game.bestMove = (i, j, move[0], move[1], 'Q')
beta = min( beta, bestVal)
if beta <= alpha:
game.board[move[0]][move[1]] = 'p'
switchPos(move, (i, j), game)
game.board[move[0]][move[1]] = initial
return bestVal
game.board[move[0]][move[1]] = 'P'
elif game.board[move[0]][move[1]] == 'p' and move[0] == 7:
game.board[move[0]][move[1]] = 'n'
val = computeMove(game, depth - 1, False, alpha, beta)
if val < bestVal:
bestVal = val
game.bestMove = (i, j, move[0], move[1], 'n')
beta = min( beta, bestVal)
if beta <= alpha:
game.board[move[0]][move[1]] = 'p'
switchPos(move, (i, j), game)
game.board[move[0]][move[1]] = initial
return bestVal
game.board[move[0]][move[1]] = 'b'
val = computeMove(game, depth - 1, False, alpha, beta)
if val < bestVal:
bestVal = val
game.bestMove = (i, j, move[0], move[1], 'b')
beta = min( beta, bestVal)
if beta <= alpha:
game.board[move[0]][move[1]] = 'p'
switchPos(move, (i, j), game)
game.board[move[0]][move[1]] = initial
return bestVal
game.board[move[0]][move[1]] = 'r'
val = computeMove(game, depth - 1, False, alpha, beta)
if val < bestVal:
bestVal = val
game.bestMove = (i, j, move[0], move[1], 'r')
beta = min( beta, bestVal)
if beta <= alpha:
game.board[move[0]][move[1]] = 'p'
switchPos(move, (i, j), game)
game.board[move[0]][move[1]] = initial
return bestVal
game.board[move[0]][move[1]] = 'q'
val = computeMove(game, depth - 1, False, alpha, beta)
if val < bestVal:
bestVal = val
game.bestMove = (i, j, move[0], move[1], 'q')
beta = min( beta, bestVal)
if beta <= alpha:
game.board[move[0]][move[1]] = 'p'
switchPos(move, (i, j), game)
game.board[move[0]][move[1]] = initial
return bestVal
game.board[move[0]][move[1]] = 'p'
else:
val = computeMove(game, depth - 1, False, alpha, beta)
if val < bestVal:
bestVal = val
game.bestMove = (i, j, move[0], move[1], -1)
beta = min( beta, bestVal)
if beta <= alpha:
switchPos(move, (i, j), game)
game.board[move[0]][move[1]] = initial
return bestVal
switchPos(move, (i, j), game)
game.board[move[0]][move[1]] = initial
return bestVal
```
#### File: JeffreyLin39/chess-engine-python/main.py
```python
import pygame
import random
from Board_class import Game
from GUI import draw_entire_board, printBoard, select, moveSelected, drawSquare
from engine import computeMove
def isNotSame(game, selected, position):
if game.board[position[0]][position[1]].isdigit():
return True
if game.board[position[0]][position[1]].islower() == game.board[selected[0]][selected[1]].islower():
return False
return True
def switchTurn(game):
if game.turn:
game.turn = False
else:
game.turn = True
def controller(game, mouseButton, position):
if mouseButton == 1:
game.highlightMode = False
draw_entire_board(game)
if not game.selected == (-1,-1) and isNotSame(game, game.selected, position):
flag = moveSelected(game, game.selected, position)
game.selected = (-1, -1)
if flag:
switchTurn(game)
elif game.selected == position:
game.selected = (-1,-1)
elif game.board[position[0]][position[1]] != '0' and game.turn == game.board[position[0]][position[1]].islower():
game.selected = position
select(game, position)
if mouseButton == 3:
if not game.highlightMode:
draw_entire_board(game)
game.highlightMode = True
drawSquare(game, position[0], position[1], False, True, False)
pygame.display.update()
def refresh(game):
game.selected = (-1, -1)
game.turn = False
game.highlightMode = False
game.board = [
['r', 'n', 'b', 'q', 'k', 'b', 'n', 'r'],
['p', 'p', 'p', 'p', 'p', 'p', 'p', 'p'],
['0', '0', '0', '0', '0', '0', '0', '0'],
['0', '0', '0', '0', '0', '0', '0', '0'],
['0', '0', '0', '0', '0', '0', '0', '0'],
['0', '0', '0', '0', '0', '0', '0', '0'],
['P', 'P', 'P', 'P', 'P', 'P', 'P', 'P'],
['R', 'N', 'B', 'Q', 'K', 'B', 'N', 'R']
]
game.enpassant = (-1, -1, -1)
game.bK = (0, 4)
game.wK = (7, 4)
game.wCastleL = True
game.wCastleR = True
game.bCastleL = True
game.bCastleR = True
game.initial = '0'
game.moves = []
game.history = {}
game.fiftyMove = 0
game.blackScore = 0
game.whiteScore = 0
game.restart = False
game.flipped = False
def main(game):
running = True
while running:
if game.restart:
running = False
start()
if not game.turn == game.flipped:
computeMove(game, 1, not game.turn, -10000, 10000)
controller(game, 1, (game.bestMove[0], game.bestMove[1]))
controller(game, 1, (game.bestMove[2], game.bestMove[3]))
for event in pygame.event.get():
if event.type == pygame.MOUSEBUTTONDOWN:
if game.flipped:
controller(game, event.button, (7-(pygame.mouse.get_pos()[1] // 90), 7- (pygame.mouse.get_pos()[0] // 90)))
else:
controller(game, event.button, (pygame.mouse.get_pos()[1] // 90, pygame.mouse.get_pos()[0] // 90))
if event.type == pygame.QUIT:
running = False
pygame.time.Clock().tick(60)
def start():
game = Game()
refresh(game)
if random.randint(0,1) == 1:
game.flipped = True
draw_entire_board(game)
main(game)
start()
```
|
{
"source": "jeffreylovitz/grammarinator",
"score": 2
}
|
#### File: grammarinator/grammarinator/parse.py
```python
import importlib
import json
import os
import shutil
from argparse import ArgumentParser
from math import inf
from multiprocessing import Pool
from os.path import basename, exists, join
from antlr4 import CommonTokenStream, error, FileStream, ParserRuleContext, TerminalNode, Token
from .cli import add_antlr_argument, add_disable_cleanup_argument, add_jobs_argument, add_log_level_argument, add_sys_path_argument, add_sys_recursion_limit_argument, add_version_argument, logger, process_antlr_argument, process_log_level_argument, process_sys_path_argument, process_sys_recursion_limit_argument
from .parser_builder import build_grammars
from .pkgdata import default_antlr_path
from .runtime import Tree, UnlexerRule, UnparserRule
# Override ConsoleErrorListener to suppress parse issues in non-verbose mode.
class ConsoleListener(error.ErrorListener.ConsoleErrorListener):
def syntaxError(self, recognizer, offendingSymbol, line, column, msg, e):
logger.debug('line %d:%d %s', line, column, msg)
error.ErrorListener.ConsoleErrorListener.INSTANCE = ConsoleListener()
def import_entity(name):
steps = name.split('.')
return getattr(importlib.import_module('.'.join(steps[0:-1])), steps[-1])
class ParserFactory(object):
"""
Class to parse existing sources and create Grammarinator compatible tree representation
from them. These trees can be reused later by generation.
"""
def __init__(self, grammars, parser_dir,
hidden=None, transformers=None, antlr=default_antlr_path, max_depth=inf, cleanup=True):
self.max_depth = float(max_depth)
self.cleanup = cleanup in [True, 1, 'True', 'true']
transformers = transformers if isinstance(transformers, list) else json.loads(transformers) if transformers else []
self.transformers = [import_entity(transformer) if isinstance(transformer, str) else transformer for transformer in transformers]
self.hidden = hidden if isinstance(hidden, list) else json.loads(hidden) if hidden else []
self.parser_dir = parser_dir
os.makedirs(self.parser_dir, exist_ok=True)
grammars = grammars if isinstance(grammars, list) else json.loads(grammars)
for i, grammar in enumerate(grammars):
shutil.copy(grammar, self.parser_dir)
grammars[i] = basename(grammar)
self.lexer_cls, self.parser_cls, self.listener_cls = build_grammars(grammars, self.parser_dir, antlr)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self.cleanup:
shutil.rmtree(self.parser_dir, ignore_errors=True)
def antlr_to_grammarinator_tree(self, antlr_node, parser, visited=None):
if visited is None:
visited = set()
if isinstance(antlr_node, ParserRuleContext):
rule_name = parser.ruleNames[antlr_node.getRuleIndex()]
class_name = antlr_node.__class__.__name__
# Check if the rule is a labeled alternative.
if not class_name.lower().startswith(rule_name.lower()):
alt_name = class_name[:-len('Context')] if class_name.endswith('Context') else class_name
rule_name = '{rule_name}_{alternative}'.format(
rule_name=rule_name,
alternative=alt_name[0].upper() + alt_name[1:])
node = UnparserRule(name=rule_name)
assert node.name, 'Node name of a parser rule is empty or None.'
for child in (antlr_node.children or []):
node += self.antlr_to_grammarinator_tree(child, parser, visited)
else:
assert isinstance(antlr_node, TerminalNode), 'An ANTLR node must either be a ParserRuleContext or a TerminalNode but {node_cls} was found.'.format(node_cls=antlr_node.__class__.__name__)
name, text = (parser.symbolicNames[antlr_node.symbol.type], antlr_node.symbol.text) if antlr_node.symbol.type != Token.EOF else ('EOF', '')
assert name, '{name} is None or empty'.format(name=name)
if not self.hidden:
node = UnlexerRule(name=name, src=text)
else:
hidden_tokens_to_left = parser.getTokenStream().getHiddenTokensToLeft(antlr_node.symbol.tokenIndex, -1) or []
node = []
for token in hidden_tokens_to_left:
if parser.symbolicNames[token.type] in self.hidden:
if token not in visited:
node.append(UnlexerRule(name=parser.symbolicNames[token.type], src=token.text))
visited.add(token)
node.append(UnlexerRule(name=name, src=text))
hidden_tokens_to_right = parser.getTokenStream().getHiddenTokensToRight(antlr_node.symbol.tokenIndex, -1) or []
for token in hidden_tokens_to_right:
if parser.symbolicNames[token.type] in self.hidden:
if token not in visited:
node.append(UnlexerRule(name=parser.symbolicNames[token.type], src=token.text))
visited.add(token)
return node
def create_tree(self, input_stream, rule, fn=None):
try:
parser = self.parser_cls(CommonTokenStream(self.lexer_cls(input_stream)))
rule = rule or self.parser_cls.ruleNames[0]
parse_tree_root = getattr(parser, rule)()
if not parser._syntaxErrors:
tree = Tree(self.antlr_to_grammarinator_tree(parse_tree_root, parser))
for transformer in self.transformers:
tree.root = transformer(tree.root)
return tree
logger.warning('%s syntax errors detected%s.', parser._syntaxErrors, ' in {fn}'.format(fn=fn) if fn else '')
except Exception as e:
logger.warning('Exception while parsing%s.', ' {fn}'.format(fn=fn) if fn else '', exc_info=e)
return None
def tree_from_file(self, fn, rule, out, encoding):
logger.info('Process file %s.', fn)
try:
tree = self.create_tree(FileStream(fn, encoding=encoding), rule, fn)
if tree is not None:
tree.save(join(out, basename(fn) + Tree.extension), max_depth=self.max_depth)
except Exception as e:
logger.warning('Exception while processing %s.', fn, exc_info=e)
def iterate_tests(files, rule, out, encoding):
for test in files:
yield (test, rule, out, encoding)
def execute():
parser = ArgumentParser(description='Grammarinator: Parser',
epilog="""
The tool parses files with ANTLR v4 grammars, builds Grammarinator-
compatible tree representations from them and saves them for further
reuse.
""")
parser.add_argument('grammar', metavar='FILE', nargs='+',
help='ANTLR grammar files describing the expected format of input to parse.')
parser.add_argument('-i', '--input', metavar='FILE', nargs='+', required=True,
help='input files to process.')
parser.add_argument('-r', '--rule', metavar='NAME',
help='name of the rule to start parsing with (default: first parser rule).')
parser.add_argument('-t', '--transformer', metavar='NAME', action='append', default=[],
help='reference to a transformer (in package.module.function format) to postprocess the parsed tree.')
parser.add_argument('--hidden', metavar='NAME', action='append', default=[],
help='list of hidden tokens to be built into the parsed tree.')
parser.add_argument('--encoding', metavar='ENC', default='utf-8',
help='input file encoding (default: %(default)s).')
parser.add_argument('--max-depth', type=int, default=inf,
help='maximum expected tree depth (deeper tests will be discarded (default: %(default)f)).')
parser.add_argument('-o', '--out', metavar='DIR', default=os.getcwd(),
help='directory to save the trees (default: %(default)s).')
parser.add_argument('--parser-dir', metavar='DIR',
help='directory to save the parser grammars (default: <OUTDIR>/grammars).')
add_disable_cleanup_argument(parser)
add_jobs_argument(parser)
add_antlr_argument(parser)
add_sys_path_argument(parser)
add_sys_recursion_limit_argument(parser)
add_log_level_argument(parser)
add_version_argument(parser)
args = parser.parse_args()
for grammar in args.grammar:
if not exists(grammar):
parser.error('{grammar} does not exist.'.format(grammar=grammar))
if not args.parser_dir:
args.parser_dir = join(args.out, 'grammars')
process_log_level_argument(args)
process_sys_path_argument(args)
process_sys_recursion_limit_argument(args)
process_antlr_argument(args)
with ParserFactory(grammars=args.grammar, hidden=args.hidden, transformers=args.transformer, parser_dir=args.parser_dir, antlr=args.antlr,
max_depth=args.max_depth, cleanup=args.cleanup) as factory:
if args.jobs > 1:
with Pool(args.jobs) as pool:
pool.starmap(factory.tree_from_file, iterate_tests(args.input, args.rule, args.out, args.encoding))
else:
for create_args in iterate_tests(args.input, args.rule, args.out, args.encoding):
factory.tree_from_file(*create_args)
if __name__ == '__main__':
execute()
```
|
{
"source": "jeffreylovitz/RedisGraph",
"score": 2
}
|
#### File: tests/flow/test_path_filter.py
```python
import os
import sys
from RLTest import Env
from redisgraph import Graph, Node, Edge
from collections import Counter
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from base import FlowTestsBase
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/../../')
from demo import QueryInfo
GRAPH_ID = "G"
redis_con = None
redis_graph = None
class testPathFilter(FlowTestsBase):
def __init__(self):
self.env = Env()
global redis_con
redis_con = self.env.getConnection()
def setUp(self):
global redis_graph
redis_graph = Graph(GRAPH_ID, redis_con)
self.env.flush()
def test00_simple_path_filter(self):
node0 = Node(node_id=0, label="L")
node1 = Node(node_id=1, label="L", properties={'x':1})
edge01 = Edge(src_node=node0, dest_node=node1, relation="R")
redis_graph.add_node(node0)
redis_graph.add_node(node1)
redis_graph.add_edge(edge01)
redis_graph.flush()
query = "MATCH (n:L) WHERE (n)-[:R]->(:L) RETURN n"
result_set = redis_graph.query(query)
expected_results = [[node0]]
query_info = QueryInfo(query = query, description="Tests simple path filter", expected_result = expected_results)
self._assert_resultset_equals_expected(result_set, query_info)
def test01_negated_simple_path_filter(self):
node0 = Node(node_id=0, label="L")
node1 = Node(node_id=1, label="L", properties={'x':1})
edge01 = Edge(src_node=node0, dest_node=node1, relation="R")
redis_graph.add_node(node0)
redis_graph.add_node(node1)
redis_graph.add_edge(edge01)
redis_graph.flush()
query = "MATCH (n:L) WHERE NOT (n)-[:R]->(:L) RETURN n"
result_set = redis_graph.query(query)
expected_results = [[node1]]
query_info = QueryInfo(query = query, description="Tests simple negated path filter", expected_result = expected_results)
self._assert_resultset_equals_expected(result_set, query_info)
def test02_test_path_filter_or_property_filter(self):
node0 = Node(node_id=0, label="L")
node1 = Node(node_id=1, label="L", properties={'x':1})
edge01 = Edge(src_node=node0, dest_node=node1, relation="R")
redis_graph.add_node(node0)
redis_graph.add_node(node1)
redis_graph.add_edge(edge01)
redis_graph.flush()
query = "MATCH (n:L) WHERE (n)-[:R]->(:L) OR n.x=1 RETURN n"
result_set = redis_graph.query(query)
expected_results = [[node0],[node1]]
query_info = QueryInfo(query = query, description="Tests OR condition with simple filter and path filter", expected_result = expected_results)
self._assert_resultset_and_expected_mutually_included(result_set, query_info)
def test03_path_filter_or_negated_path_filter(self):
node0 = Node(node_id=0, label="L")
node1 = Node(node_id=1, label="L", properties={'x':1})
edge01 = Edge(src_node=node0, dest_node=node1, relation="R")
redis_graph.add_node(node0)
redis_graph.add_node(node1)
redis_graph.add_edge(edge01)
redis_graph.flush()
query = "MATCH (n:L) WHERE (n)-[:R]->(:L) OR NOT (n)-[:R]->(:L) RETURN n"
result_set = redis_graph.query(query)
expected_results = [[node0],[node1]]
query_info = QueryInfo(query = query, description="Tests OR condition with path and negated path filters", expected_result = expected_results)
self._assert_resultset_and_expected_mutually_included(result_set, query_info)
def test04_test_level_1_nesting_logical_operators_over_path_and_property_filters(self):
node0 = Node(node_id=0, label="L")
node1 = Node(node_id=1, label="L", properties={'x':1})
edge01 = Edge(src_node=node0, dest_node=node1, relation="R")
redis_graph.add_node(node0)
redis_graph.add_node(node1)
redis_graph.add_edge(edge01)
redis_graph.flush()
query = "MATCH (n:L) WHERE (n)-[:R]->(:L) OR (n.x=1 AND NOT (n)-[:R]->(:L)) RETURN n"
result_set = redis_graph.query(query)
expected_results = [[node0],[node1]]
query_info = QueryInfo(query = query, description="Tests AND condition with simple filter and negated path filter", expected_result = expected_results)
self._assert_resultset_and_expected_mutually_included(result_set, query_info)
def test05_test_level_2_nesting_logical_operators_over_path_and_property_filters(self):
node0 = Node(node_id=0, label="L")
node1 = Node(node_id=1, label="L", properties={'x':1})
edge01 = Edge(src_node=node0, dest_node=node1, relation="R")
redis_graph.add_node(node0)
redis_graph.add_node(node1)
redis_graph.add_edge(edge01)
redis_graph.flush()
query = "MATCH (n:L) WHERE (n)-[:R]->(:L) OR (n.x=1 AND (n.x = 2 OR NOT (n)-[:R]->(:L))) RETURN n"
result_set = redis_graph.query(query)
expected_results = [[node0],[node1]]
query_info = QueryInfo(query = query, description="Tests AND condition with simple filter and nested OR", expected_result = expected_results)
self._assert_resultset_and_expected_mutually_included(result_set, query_info)
def test06_test_level_2_nesting_logical_operators_over_path_filters(self):
node0 = Node(node_id=0, label="L")
node1 = Node(node_id=1, label="L", properties={'x':1})
node2 = Node(node_id=2, label="L2")
edge01 = Edge(src_node=node0, dest_node=node1, relation="R")
edge12 = Edge(src_node=node1, dest_node=node2, relation="R2")
redis_graph.add_node(node0)
redis_graph.add_node(node1)
redis_graph.add_node(node2)
redis_graph.add_edge(edge01)
redis_graph.add_edge(edge12)
redis_graph.flush()
query = "MATCH (n:L) WHERE (n)-[:R]->(:L) OR (n.x=1 AND ((n)-[:R2]->(:L2) OR (n)-[:R]->(:L))) RETURN n"
result_set = redis_graph.query(query)
expected_results = [[node0],[node1]]
query_info = QueryInfo(query = query, description="Tests AND condition with simple filter and nested OR", expected_result = expected_results)
self._assert_resultset_and_expected_mutually_included(result_set, query_info)
def test07_test_edge_filters(self):
node0 = Node(node_id=0, label="L", properties={'x': 'a'})
node1 = Node(node_id=1, label="L", properties={'x': 'b'})
node2 = Node(node_id=2, label="L", properties={'x': 'c'})
edge01 = Edge(src_node=node0, dest_node=node1, relation="R", properties={'x': 1})
edge12 = Edge(src_node=node1, dest_node=node2, relation="R")
redis_graph.add_node(node0)
redis_graph.add_node(node1)
redis_graph.add_node(node2)
redis_graph.add_edge(edge01)
redis_graph.add_edge(edge12)
redis_graph.flush()
query = "MATCH (n:L) WHERE (n)-[:R {x:1}]->() RETURN n.x"
result_set = redis_graph.query(query)
expected_results = [['a']]
query_info = QueryInfo(query = query, description="Tests pattern filter edge conditions", expected_result = expected_results)
self._assert_resultset_and_expected_mutually_included(result_set, query_info)
def test08_indexed_child_stream_resolution(self):
node0 = Node(node_id=0, label="L", properties={'x': 'a'})
node1 = Node(node_id=1, label="L", properties={'x': 'b'})
node2 = Node(node_id=2, label="L", properties={'x': 'c'})
edge01 = Edge(src_node=node0, dest_node=node1, relation="R")
edge12 = Edge(src_node=node1, dest_node=node2, relation="R")
redis_graph.add_node(node0)
redis_graph.add_node(node1)
redis_graph.add_node(node2)
redis_graph.add_edge(edge01)
redis_graph.add_edge(edge12)
redis_graph.flush()
# Create index.
query = "CREATE INDEX ON :L(x)"
result_set = redis_graph.query(query)
self.env.assertEquals(result_set.indices_created, 1)
# Issue a query in which the bound variable stream of the SemiApply op is an Index Scan.
query = "MATCH (n:L) WHERE (:L)<-[]-(n)<-[]-(:L {x: 'a'}) AND n.x = 'b' RETURN n.x"
result_set = redis_graph.query(query)
expected_results = [['b']]
self.env.assertEquals(result_set.result_set, expected_results)
def test09_no_invalid_expand_into(self):
node0 = Node(node_id=0, label="L", properties={'x': 'a'})
node1 = Node(node_id=1, label="L", properties={'x': 'b'})
node2 = Node(node_id=2, label="L", properties={'x': 'c'})
edge01 = Edge(src_node=node0, dest_node=node1, relation="R")
edge12 = Edge(src_node=node1, dest_node=node2, relation="R")
redis_graph.add_node(node0)
redis_graph.add_node(node1)
redis_graph.add_node(node2)
redis_graph.add_edge(edge01)
redis_graph.add_edge(edge12)
redis_graph.flush()
# Issue a query in which the match stream and the bound stream must both perform traversal.
query = "MATCH (n:L)-[]->(:L) WHERE ({x: 'a'})-[]->(n) RETURN n.x"
plan = redis_graph.execution_plan(query)
# Verify that the execution plan has no Expand Into and two traversals.
self.env.assertNotIn("Expand Into", plan)
self.env.assertEquals(2, plan.count("Conditional Traverse"))
result_set = redis_graph.query(query)
expected_results = [['b']]
self.env.assertEquals(result_set.result_set, expected_results)
def test10_verify_apply_results(self):
# Build a graph with 3 nodes and 3 edges, 2 of which have the same source.
node0 = Node(node_id=0, label="L", properties={'x': 'a'})
node1 = Node(node_id=1, label="L", properties={'x': 'b'})
node2 = Node(node_id=2, label="L", properties={'x': 'c'})
edge01 = Edge(src_node=node0, dest_node=node1, relation="R")
edge02 = Edge(src_node=node0, dest_node=node2, relation="R")
edge12 = Edge(src_node=node1, dest_node=node2, relation="R")
redis_graph.add_node(node0)
redis_graph.add_node(node1)
redis_graph.add_node(node2)
redis_graph.add_edge(edge01)
redis_graph.add_edge(edge02)
redis_graph.add_edge(edge12)
redis_graph.flush()
query = "MATCH (n:L) WHERE (n)-[]->() RETURN n.x ORDER BY n.x"
result_set = redis_graph.query(query)
# Each source node should be returned exactly once.
expected_results = [['a'], ['b']]
self.env.assertEquals(result_set.result_set, expected_results)
```
|
{
"source": "jeffreylu1/HQ-Trivia-Aid",
"score": 2
}
|
#### File: jeffreylu1/HQ-Trivia-Aid/googler.py
```python
import requests
from bs4 import BeautifulSoup
from random import choice
desktop_agents = ['Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_1) AppleWebKit/602.2.14 (KHTML, like Gecko) Version/10.0.1 Safari/602.2.14',
'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.71 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.98 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.98 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.71 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:50.0) Gecko/20100101 Firefox/50.0']
def random_headers():
return {'User-Agent': choice(desktop_agents),'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8'}
USER_AGENT = random_headers()
class google:
def __init__(self):
pass
def fetch_results(self, search_term, number_results, language_code):
assert isinstance(search_term, str), 'Search term must be a string'
assert isinstance(number_results, int), 'Number of results must be an integer'
self.escaped_search_term = search_term.replace(' ', '+')
self.google_url = 'https://www.google.com/search?q={}&num={}&hl={}'.format(self.escaped_search_term, number_results, language_code)
self.response = requests.get(self.google_url, headers=USER_AGENT)
self.response.raise_for_status()
return search_term, self.response.text
def parse_results(self, html, keyword):
self.soup = BeautifulSoup(html, 'html.parser')
self.found_results = []
self.rank = 1
self.result_block = self.soup.find_all('div', attrs={'class': 'g'})
for self.result in self.result_block:
self.link = self.result.find('a', href=True)
self.title = self.result.find('h3', attrs={'class': 'r'})
self.description = self.result.find('span', attrs={'class': 'st'})
if self.link and self.title:
self.link = self.link['href']
self.title = self.title.get_text()
if self.description:
self.description = self.description.get_text()
if self.link != '#':
self.found_results.append({'keyword': keyword, 'rank': self.rank, 'description': self.description})
self.rank += 1
return self.found_results
def search_google(self, question):
self.question = question # Question to Google
#print(self.question)
self.keyword, self.html = self.fetch_results(self.question, 400, 'en')
self.results = self.parse_results(self.html, self.keyword)
return self.results
```
|
{
"source": "jeffreymanzione/jeff-vm-lang-",
"score": 2
}
|
#### File: jeffreymanzione/jeff-vm-lang-/jeff_lang.bzl
```python
def _jeff_vm_library_impl(ctx):
compiler_executable = ctx.attr.compiler.files_to_run.executable
compiler_executable_path = "./" + compiler_executable.short_path
src_files = [file for target in ctx.attr.srcs for file in target.files.to_list()]
out_files = []
for src in src_files:
a_out_file = ctx.actions.declare_file(src.basename.replace(".jv", ".ja"))
out_files.append(a_out_file)
b_out_file = ctx.actions.declare_file(src.basename.replace(".jv", ".jb"))
out_files.append(b_out_file)
out_dir = a_out_file.root.path + "/" + src.dirname
jvc_args = ["-a", "--assembly_out_dir=" + out_dir, "-b", "--binary_out_dir=" + out_dir, src.short_path]
ctx.actions.run(
outputs = [a_out_file, b_out_file],
inputs = [src],
executable = compiler_executable,
arguments = jvc_args,
mnemonic = "CompileJL",
progress_message = "Running command: %s %s" % (compiler_executable_path, " ".join(jvc_args)),
)
return [
DefaultInfo(
files = depset(out_files),
),
]
_jeff_vm_library = rule(
implementation = _jeff_vm_library_impl,
attrs = {
"srcs": attr.label_list(
allow_files = True,
doc = "Source files",
),
"deps": attr.label_list(),
"compiler": attr.label(
default = Label("//compile:jvc"),
executable = True,
allow_single_file = True,
cfg = "target",
),
},
)
def _prioritize_bin(file):
if file.extension.endswith("jb"):
return 0
else:
return 1
def jeff_vm_library(
name,
srcs):
return _jeff_vm_library(name = name, srcs = srcs)
def _jeff_vm_binary_impl(ctx):
runner_executable = ctx.attr.runner.files_to_run.executable
builtins = [file for target in ctx.attr.builtins for file in target.files.to_list()]
main_file = sorted(ctx.attr.main.files.to_list(), key = _prioritize_bin)[0]
input_files = [file for target in ctx.attr.deps for file in target.files.to_list() if file.path.endswith(".ja")]
input_files = [main_file] + input_files
jvr_command = "./%s %s" % (runner_executable.short_path, " ".join([file.short_path for file in input_files]))
run_sh = ctx.actions.declare_file(ctx.label.name + ".sh")
ctx.actions.write(
output = run_sh,
is_executable = True,
content = jvr_command,
)
return [
DefaultInfo(
files = depset(input_files + builtins + [runner_executable, run_sh]),
executable = run_sh,
default_runfiles = ctx.runfiles(files = input_files + [runner_executable, run_sh] + builtins),
),
]
_jeff_vm_binary = rule(
implementation = _jeff_vm_binary_impl,
attrs = {
"main": attr.label(
doc = "Main file",
),
"deps": attr.label_list(),
"runner": attr.label(
default = Label("//run:jvr"),
executable = True,
allow_single_file = True,
cfg = "target",
),
"builtins": attr.label_list(),
"executable_ext": attr.string(default = ".sh"),
},
executable = True,
)
def jeff_vm_binary(name, main, srcs = [], deps = []):
if main in srcs:
srcs.remove(main)
jeff_vm_library(
"%s_main" % name,
srcs = [main],
)
if len(srcs) > 0:
jeff_vm_library(
"%s_srcs" % name,
srcs = srcs,
)
deps = [":%s_srcs" % name] + deps
return _jeff_vm_binary(
name = name,
main = ":%s_main" % name,
deps = deps,
builtins = ["//lib"],
)
```
|
{
"source": "jeffreymanzione/jeff-vm",
"score": 2
}
|
#### File: jeff-vm/toolchain/cc_toolchain_config.bzl
```python
load("@bazel_tools//tools/build_defs/cc:action_names.bzl", "ACTION_NAMES")
# NEW
load(
"@bazel_tools//tools/cpp:cc_toolchain_config_lib.bzl",
"artifact_name_pattern",
"feature",
"flag_group",
"flag_set",
"tool_path",
)
def mingw_directories(mingw_version):
return [
"C:/MinGW/include",
"C:/MinGW/mingw32/include",
"C:/MinGW/lib/gcc/mingw32/%s/include-fixed" % mingw_version,
"C:/MinGW/lib/gcc/mingw32/%s/include" % mingw_version,
"C:/MinGW/lib/gcc/mingw32/%s" % mingw_version,
]
all_link_actions = [
ACTION_NAMES.cpp_link_executable,
ACTION_NAMES.cpp_link_dynamic_library,
ACTION_NAMES.cpp_link_nodeps_dynamic_library,
]
def _impl(ctx):
cxx_builtin_include_directories = ctx.attr.builtin_include_directories
tool_paths = [
tool_path(
name = "gcc",
path = "C:/MinGW/bin/gcc",
),
tool_path(
name = "ld",
path = "C:/MinGW/bin/ld",
),
tool_path(
name = "ar",
path = "C:/MinGW/bin/ar",
),
tool_path(
name = "cpp",
path = "C:/MinGW/bin/cpp",
),
tool_path(
name = "gcov",
path = "C:/MinGW/bin/gcov",
),
tool_path(
name = "nm",
path = "C:/MinGW/bin/nm",
),
tool_path(
name = "objdump",
path = "C:/MinGW/bin/objdump",
),
tool_path(
name = "strip",
path = "C:/MinGW/bin/strip",
),
]
features = [
feature(
name = "default_linker_flags",
enabled = True,
flag_sets = [
flag_set(
actions = all_link_actions,
flag_groups = ([
flag_group(
flags = [
"-lstdc++",
],
),
]),
),
],
),
]
return cc_common.create_cc_toolchain_config_info(
ctx = ctx,
features = features,
cxx_builtin_include_directories = cxx_builtin_include_directories,
toolchain_identifier = "local",
host_system_name = "local",
target_system_name = "local",
target_cpu = "x64_windows",
target_libc = "unknown",
compiler = "gcc",
abi_version = "unknown",
abi_libc_version = "unknown",
tool_paths = tool_paths,
artifact_name_patterns = [
artifact_name_pattern(
category_name = "executable",
prefix = "",
extension = ".exe",
),
],
)
cc_toolchain_config = rule(
implementation = _impl,
attrs = {
"builtin_include_directories": attr.string_list(
doc = "Default include paths",
),
},
provides = [CcToolchainConfigInfo],
)
```
|
{
"source": "jeffreymanzione/website",
"score": 3
}
|
#### File: server/api/elections.py
```python
import json
import logging
from flask import Response, request
from .api import Api
TABLE_NAME = 'ElectionPredictions'
LIST_QUERY = 'SELECT * FROM {table_name};'.format(table_name=TABLE_NAME)
class ElectionsApi(Api):
def __init__(self, app, database):
super().__init__(app, database)
self.register_method(self._list_election_predictions, 'list')
def _list_election_predictions(self):
return self.select(LIST_QUERY)
```
|
{
"source": "jeffreymcallister/oneAPI-spec",
"score": 2
}
|
#### File: oneDAL/dalapi/generator.py
```python
from typing import (List, Text)
from docutils.statemachine import ViewList
class RstBuilder(object):
def __init__(self, placeholder: ViewList, filename: Text, lineno: int):
self._rst_list = placeholder
self._filename = filename
self._lineno = lineno
def add_class(self, kind: str, declaration: str, namespace: str = None, level=0):
self._add_name(kind, declaration, namespace, level)
def add_typedef(self, declaration: str, namespace: str = None, level=0):
self._add_name('type', declaration, namespace, level)
def add_function(self, declaration: str, namespace: str = None, level=0):
self._add_name('function', declaration, namespace, level)
def add_param(self, tag: str, name: str, doc_text: str, level=0):
assert tag in ['param', 'tparam']
assert name
assert doc_text
formatted = self._format_text(doc_text)
self(f':{tag} {name}: {formatted}', level)
def add_member(self, declaration: str, level=0):
assert declaration
self(f'.. cpp:member:: {declaration}', level)
self.add_blank_like()
def add_doc(self, doc_text: str, level=0):
assert doc_text
self(self._format_text(doc_text), level)
self.add_blank_like()
def add_code_block(self, listing: List[Text], level=0):
assert listing is not None
self(f'.. code-block:: cpp', level)
self.add_blank_like()
for line in listing:
self(line, level + 1)
self.add_blank_like()
def add_blank_like(self):
self.add()
def add(self, string: str = '', level: int = 0):
self._rst_list.append(' ' * level * 3 + string, self._filename, self._lineno)
# TODO: Remove
def __call__(self, string: str = '', level:int = 0):
self._rst_list.append(' ' * level * 3 + string, self._filename, self._lineno)
def _add_name(self, tag: str, declaration: str, namespace: str = None, level=0):
assert declaration
if namespace:
self(f'.. cpp:namespace:: {namespace}', level)
self.add_blank_like()
self(f'.. cpp:{tag}:: {declaration}', level)
self.add_blank_like()
def _format_text(self, text):
text = text.strip()
if not (text.endswith('.') or text.endswith('|')):
text += '.'
return text
```
|
{
"source": "jeffreymelvin-wf/aws-lambda-fsm-workflows",
"score": 2
}
|
#### File: examples/encrypt_s3/actions.py
```python
from boto.s3.connection import S3Connection
from boto.s3.key import Key
from boto.s3.bucket import Bucket
# application imports
from aws_lambda_fsm.action import Action
class CheckIfFileExists(Action):
"""
Checks if the file exists.
"""
def execute(self, context, obj):
connection = S3Connection()
bucket = Bucket(connection=connection, name=context['bucket'])
key = Key(bucket=bucket, name=context['name'])
if key.exists():
return 'done'
else:
return 'missing'
class EncryptFile(Action):
"""
Downloads, Encrpyts (with a no-op) and Uploads a file.
"""
def execute(self, context, obj):
connection = S3Connection()
bucket = Bucket(connection=connection, name=context['bucket'])
key1 = Key(bucket=bucket, name=context['name'])
key2 = Key(bucket=bucket, name=context['name'] + '.encrypted')
key2.set_contents_from_string(key1.get_contents_as_string())
return 'done'
class RemoveOldFile(Action):
"""
Removes the unencrypted file.
"""
def execute(self, context, obj):
connection = S3Connection()
bucket = Bucket(connection=connection, name=context['bucket'])
key = Key(bucket=bucket, name=context['name'])
key.delete()
return 'done'
```
|
{
"source": "jeffreymew/StarterCode",
"score": 3
}
|
#### File: StarterCode/api/api.py
```python
import os
from flask import Flask, request, render_template, jsonify
from api import app
from sqlalchemy.exc import IntegrityError
from api.task import Task
@app.route("/", methods=['GET'])
def index():
return render_template('index.html')
@app.route('/<path:path>', methods=['GET'])
def any_root_path(path):
return render_template('index.html')
@app.route("/api/v1/get_all_tasks")
def get_all_tasks():
allTasks = Task.get_all_tasks()
return jsonify(tasks=allTasks)
@app.route("/api/v1/add_task", methods=['POST'])
def add_task():
incoming = request.get_json()
task = incoming.get('task')
try:
Task.add_task(task)
return jsonify({'success': True}), 200
except IntegrityError:
return jsonify({'success': False}), 403
```
|
{
"source": "JeffreyMFarley/Bailiwick",
"score": 3
}
|
#### File: griot/griot/fine_tune.py
```python
import sys
import os
import csv
import griot
from operator import itemgetter
PATH_INPUT = r'../data/fine_tune.txt'
class FineTune:
def __init__(self, inputFileName=PATH_INPUT):
with open(inputFileName, 'r') as f:
reader = csv.DictReader(f, dialect=csv.excel_tab)
self.tuning = {(x['w1'], x['c1_orig']): x['c1_new'] for x in reader}
def map(self, x):
tuple = (x['w1'], x['c1'])
if tuple not in self.tuning:
return x
x['c1'] = self.tuning[tuple]
return x
def __str__(self):
return 'Fine Tune Parts of Speech'
#-------------------------------------------------------------------------------
# Main
#-------------------------------------------------------------------------------
if __name__ == '__main__':
singleton = FineTune()
# process
result = list(map(singleton.map, griot.iterateSource()))
griot.save(result, griot.stagingFileName)
```
#### File: Bailiwick/pyliwick/find_simple_sentences.py
```python
import os
import sys
import json
class TestSimpleSyntax:
def __init__(self):
self.reset()
def reset(self):
self.result = True
self.hasAgent = False
self.hasAction = False
self.hasDeterminer = False
def process(self, tag):
generalTag = tag[0]
self.result = self.result and generalTag in ['A', 'D', 'E', 'I', 'P', 'N', 'V', 'X']
self.hasAgent = self.hasAgent or generalTag in ['E', 'P', 'N']
self.hasAction = self.hasAction or generalTag in ['V']
self.hasDeterminer = self.hasDeterminer or generalTag in ['A', 'D']
def isValid(self):
return self.result and self.hasAction and self.hasAgent and self.hasDeterminer
class FindSimpleSentences:
def __init__(self):
pass
#--------------------------------------------------------------------------
def isSimpleSentence(self, listOfTaggedWords):
test = TestSimpleSyntax()
for w0 in listOfTaggedWords:
if 'w' in w0:
test.process(w0['t'])
else:
for t in w0['ts']:
test.process(t)
return test.isValid()
def hasMissingVVG(self, listOfTaggedWords):
for w0 in listOfTaggedWords:
if 'w' in w0 and w0['t'] == 'VVG' and w0['w'].lower() in ['taking',
'saying']:
return True
return False
#--------------------------------------------------------------------------
def run(self, inputFileName, outputFileName):
# extract
print('Loading Source')
fullCorpus = {}
with open(inputFileName, 'r') as f:
fullCorpus = json.load(f)
# filter
self.subset = {k:v for k,v in fullCorpus.items()
if self.hasMissingVVG(v) }
# load
print('Saving Results')
#------------------------------------------------------------------------------
# Main
#------------------------------------------------------------------------------
INPUT = '../Tests/Training/BrownCorpusClaws.json'
OUTPUT = '../Tests/Training/SimpleSentences.json'
if __name__ == '__main__':
# where is this script?
thisScriptDir = os.path.dirname(__file__)
# get the absolute paths
inputFileName = os.path.join(thisScriptDir, INPUT)
outputFileName = os.path.join(thisScriptDir, OUTPUT)
# run the jewels
pipeline = FindSimpleSentences()
pipeline.run(inputFileName, outputFileName)
for k in sorted(pipeline.subset):
print('"'+k+'",')
```
|
{
"source": "JeffreyMFarley/ccdb5-api",
"score": 2
}
|
#### File: complaint_search/tests/test_views_document.py
```python
from django.core.cache import cache
import mock
from complaint_search.throttling import _CCDB_UI_URL, DocumentAnonRateThrottle
from elasticsearch import TransportError
from rest_framework import status
from rest_framework.test import APITestCase
try:
from django.urls import reverse
except ImportError:
from django.core.urlresolvers import reverse
class DocumentTests(APITestCase):
def setUp(self):
self.orig_document_anon_rate = DocumentAnonRateThrottle.rate
# Setting rates to something really big so it doesn't affect testing
DocumentAnonRateThrottle.rate = '2000/min'
def tearDown(self):
cache.clear()
DocumentAnonRateThrottle.rate = self.orig_document_anon_rate
@mock.patch('complaint_search.es_interface.document')
def test_document__valid(self, mock_esdocument):
"""
documenting with an ID
"""
url = reverse('complaint_search:complaint', kwargs={"id": "123456"})
mock_esdocument.return_value = 'OK'
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
mock_esdocument.assert_called_once_with("123456")
self.assertEqual('OK', response.data)
@mock.patch('complaint_search.es_interface.document')
def test_document_with_document_anon_rate_throttle(self, mock_esdocument):
url = reverse('complaint_search:complaint', kwargs={"id": "123456"})
mock_esdocument.return_value = 'OK'
DocumentAnonRateThrottle.rate = self.orig_document_anon_rate
limit = int(self.orig_document_anon_rate.split('/')[0])
for i in range(limit):
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual('OK', response.data)
response = self.client.get(url)
self.assertEqual(
response.status_code, status.HTTP_429_TOO_MANY_REQUESTS
)
self.assertIsNotNone(response.data.get('detail'))
self.assertIn("Request was throttled", response.data.get('detail'))
self.assertEqual(limit, mock_esdocument.call_count)
self.assertEqual(5, limit)
@mock.patch('complaint_search.es_interface.document')
def test_document_with_document_ui_rate_throttle(self, mock_esdocument):
url = reverse('complaint_search:complaint', kwargs={"id": "123456"})
mock_esdocument.return_value = 'OK'
DocumentAnonRateThrottle.rate = self.orig_document_anon_rate
limit = int(self.orig_document_anon_rate.split('/')[0])
for _ in range(limit):
response = self.client.get(url, HTTP_REFERER=_CCDB_UI_URL)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual('OK', response.data)
response = self.client.get(url, HTTP_REFERER=_CCDB_UI_URL)
self.assertEqual(response.status_code, 200)
self.assertEqual('OK', response.data)
self.assertEqual(limit + 1, mock_esdocument.call_count)
self.assertEqual(5, limit)
@mock.patch('complaint_search.es_interface.document')
def test_document__transport_error(self, mock_esdocument):
mock_esdocument.side_effect = TransportError('N/A', "Error")
url = reverse('complaint_search:complaint', kwargs={"id": "123456"})
response = self.client.get(url)
self.assertEqual(response.status_code, 424)
self.assertDictEqual(
{"error": "There was an error calling Elasticsearch"},
response.data
)
```
#### File: complaint_search/tests/test_view_search_renderers.py
```python
import mock
from rest_framework import status
from rest_framework.test import APITestCase
try:
from django.urls import reverse
except ImportError:
from django.core.urlresolvers import reverse
DEFAULT_ACCEPT = (
'text/html,application/xhtml+xml,application/xml;q=0.9,'
'image/webp,image/apng,*/*;q=0.8'
)
class SearchRendererTests(APITestCase):
@mock.patch('complaint_search.es_interface.search')
def test_search_no_format_chrome_request(self, mock_essearch):
expected = {'foo': 'bar'}
mock_essearch.return_value = expected
url = reverse('complaint_search:search')
response = self.client.get(url, HTTP_ACCEPT=DEFAULT_ACCEPT)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(expected, response.data)
self.assertEqual(response['Content-Type'], 'application/json')
@mock.patch('complaint_search.es_interface.search')
def test_search_accept_html_only(self, mock_essearch):
expected = {'foo': 'bar'}
mock_essearch.return_value = expected
accept = 'text/html'
url = reverse('complaint_search:search')
response = self.client.get(url, HTTP_ACCEPT=accept)
self.assertEqual(response.status_code, 406)
self.assertEqual(response['Content-Type'], 'application/json')
```
#### File: complaint_search/tests/test_views_suggest.py
```python
from django.conf import settings
import mock
from elasticsearch import TransportError
from rest_framework import status
from rest_framework.test import APITestCase
try:
from django.urls import reverse
except ImportError:
from django.core.urlresolvers import reverse
class SuggestTests(APITestCase):
def setUp(self):
pass
@mock.patch('complaint_search.es_interface.suggest')
def test_suggest_no_param(self, mock_essuggest):
"""
Suggesting with no parameters
"""
url = reverse('complaint_search:suggest')
mock_essuggest.return_value = 'OK'
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
mock_essuggest.assert_called_once_with()
self.assertEqual('OK', response.data)
@mock.patch('complaint_search.es_interface.suggest')
def test_suggest_text__valid(self, mock_essuggest):
"""
Suggesting with no parameters
"""
url = reverse('complaint_search:suggest')
param = {"text": "Mortgage"}
mock_essuggest.return_value = 'OK'
response = self.client.get(url, param)
self.assertEqual(response.status_code, status.HTTP_200_OK)
mock_essuggest.assert_called_once_with(**param)
self.assertEqual('OK', response.data)
@mock.patch('complaint_search.es_interface.suggest')
def test_suggest_size__valid(self, mock_essuggest):
"""
Suggesting with no parameters
"""
url = reverse('complaint_search:suggest')
param = {"size": 50}
mock_essuggest.return_value = 'OK'
response = self.client.get(url, param)
self.assertEqual(response.status_code, status.HTTP_200_OK)
mock_essuggest.assert_called_once_with(**param)
self.assertEqual('OK', response.data)
@mock.patch('complaint_search.es_interface.suggest')
def test_suggest_with_size__invalid_smaller_than_min_number(
self, mock_essuggest
):
url = reverse('complaint_search:suggest')
params = {"size": 0}
mock_essuggest.return_value = 'OK'
response = self.client.get(url, params)
self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)
mock_essuggest.assert_not_called()
self.assertDictEqual(
{"size": ["Ensure this value is greater than or equal to 1."]},
response.data)
@mock.patch('complaint_search.es_interface.suggest')
def test_suggest_size__invalid_exceed_number(self, mock_essuggest):
"""
Suggesting with no parameters
"""
url = reverse('complaint_search:suggest')
param = {"size": 100001}
mock_essuggest.return_value = 'OK'
response = self.client.get(url, param)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
mock_essuggest.assert_not_called()
self.assertDictEqual(
{"size": ["Ensure this value is less than or equal to 100000."]},
response.data)
@mock.patch('complaint_search.es_interface.suggest')
def test_suggest_cors_headers(self, mock_essuggest):
"""
Make sure the response has CORS headers in debug mode
"""
settings.DEBUG = True
url = reverse('complaint_search:suggest')
mock_essuggest.return_value = 'OK'
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertTrue(response.has_header('Access-Control-Allow-Origin'))
@mock.patch('complaint_search.es_interface.suggest')
def test_suggest__transport_error(self, mock_essuggest):
mock_essuggest.side_effect = TransportError('N/A', "Error")
url = reverse('complaint_search:suggest')
param = {"text": "test"}
response = self.client.get(url, param)
self.assertEqual(response.status_code, 424)
self.assertDictEqual(
{"error": "There was an error calling Elasticsearch"},
response.data
)
```
#### File: complaint_search/tests/test_views_trends.py
```python
import copy
import mock
from complaint_search.defaults import PARAMS
from rest_framework import status
from rest_framework.test import APITestCase
try:
from django.urls import reverse
except ImportError:
from django.core.urlresolvers import reverse
class TrendsTests(APITestCase):
def setUp(self):
pass
def buildDefaultParams(self, overrides):
params = copy.deepcopy(PARAMS)
params.update(overrides)
return params
@mock.patch('complaint_search.es_interface.trends')
def test_trends_no_required_params__fails(self, mock_essearch):
"""
Searching with no parameters
"""
url = reverse('complaint_search:trends')
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
mock_essearch.assert_not_called()
@mock.patch('complaint_search.es_interface.trends')
def test_trends_default_params__fails(self, mock_essearch):
"""
Searching with no required trends parameters
"""
url = reverse('complaint_search:trends')
response = self.client.get(url, **self.buildDefaultParams({}))
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
mock_essearch.assert_not_called()
@mock.patch('complaint_search.es_interface.trends')
def test_trends_invalid_params__fails(self, mock_essearch):
"""
Searching with invalid required trends parameters
"""
url = reverse('complaint_search:trends')
params = {
'lens': 'foo'
}
response = self.client.get(url, params)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
mock_essearch.assert_not_called()
self.assertTrue('lens' in response.data.keys())
self.assertTrue('trend_interval' in response.data.keys())
@mock.patch('complaint_search.es_interface.trends')
def test_trends_default_params__passes(self, mock_essearch):
"""
Searching with default but valid required trends parameters
"""
url = reverse('complaint_search:trends')
params = {
'lens': 'overview',
'trend_interval': 'month'
}
mock_essearch.return_value = 'results'
response = self.client.get(url, params)
self.assertEqual(response.status_code, status.HTTP_200_OK)
mock_essearch.assert_called_once()
```
|
{
"source": "JeffreyMFarley/ccdb-data-pipeline",
"score": 3
}
|
#### File: ccdb-data-pipeline/common/csv2json.py
```python
import csv
import errno
import io
import json
import sys
from itertools import count
import configargparse
# -----------------------------------------------------------------------------
# Unicode CSV Iterator
# -----------------------------------------------------------------------------
def unicode_csv_reader(unicode_csv_data, dialect=csv.excel, **kwargs):
csv_reader = csv.reader(unicode_csv_data, dialect=dialect, **kwargs)
for row in csv_reader: # pragma: no branch
yield row
# -----------------------------------------------------------------------------
# Formatter Functions
# -----------------------------------------------------------------------------
def saveNewlineDelimitedJson(options):
fileName = options.outfile
with io.open(fileName, 'w', encoding='utf-8', newline='') as f:
try:
for i in count(): # pragma: no branch
row = yield i
f.write(json.dumps(row))
f.write('\n')
finally:
pass
def saveStandardJson(options):
fileName = options.outfile
sep = '\n'
with io.open(fileName, 'w', encoding='utf-8', newline='') as f:
f.write('[')
try:
for i in count(): # pragma: no branch
row = yield i
f.write(sep)
f.write(json.dumps(row))
sep = ',\n'
finally:
f.write('\n]')
# -----------------------------------------------------------------------------
# Process
# -----------------------------------------------------------------------------
def run(options):
# Load the column names
ovr_columns = []
if options.fields:
with open(options.fields) as f:
ovr_columns = [line.strip() for line in f]
formatters = {
'JSON': saveStandardJson,
'NDJSON': saveNewlineDelimitedJson
}
formatter = formatters[options.jsonFormat](options)
i = next(formatter)
with io.open(options.infile, 'r', encoding='utf-8') as f:
parser = unicode_csv_reader(f)
src_columns = next(parser)
columns = src_columns
if len(ovr_columns):
if len(ovr_columns) == len(src_columns):
columns = ovr_columns
else:
sys.stderr.write(
'{} has {} fields. Expected {}\n'.format(
options.fields, len(ovr_columns), len(src_columns)
)
)
sys.stderr.flush()
sys.exit(errno.ENOENT)
for row in parser: # pragma: no branch
obj = dict(zip(columns, row))
i = formatter.send(obj)
if (i % options.heartbeat) == 0:
print(' {:,d} rows processed'.format(i))
if options.limit and i >= options.limit:
break
formatter.close()
# -----------------------------------------------------------------------------
# Main
# -----------------------------------------------------------------------------
def build_arg_parser():
p = configargparse.ArgParser(prog='csv2json',
description='converts a CSV to JSON',
ignore_unknown_config_file_keys=True)
p.add('--fields', dest='fields', default=None,
help='The columns names to use instead of the source names')
p.add('--limit', '-n', dest='limit', type=int, default=0,
help='Stop at this many records')
p.add('--json-format', dest='jsonFormat',
choices=['JSON', 'NDJSON'], default='JSON',
help='The output format')
p.add('--heartbeat', dest='heartbeat', type=int, default=10000,
help='Indicate rows are being processed every N records')
p.add('infile', help="The name of the CSV file")
p.add('outfile', help="The name of the JSON file to write")
return p
def main():
p = build_arg_parser()
cfg = p.parse_args()
run(cfg)
if __name__ == '__main__':
main()
```
#### File: common/tests/test_es_proxy.py
```python
import unittest
from unittest.mock import patch
import common.es_proxy as sut
from common.tests import make_configargs
# -------------------------------------------------------------------------
# Test Classes
class TestEdges(unittest.TestCase):
@patch('common.es_proxy.Elasticsearch')
def test_get_es_connection(self, mock_es):
options = make_configargs({
'es_host': 'www.example.org',
'es_port': '9222',
'es_username': 'king',
'es_password': '<PASSWORD>',
})
mock_es.return_value = 'passed'
actual = sut.get_es_connection(options)
self.assertEqual(actual, 'passed')
mock_es.assert_called_once_with(
'http://www.example.org:9222',
http_auth=('king', 'kong'),
timeout=1000,
user_ssl=True)
```
#### File: complaints/ccdb/build_metadata_javascript.py
```python
import errno
import io
import json
import sys
import configargparse
VALID_ATTR = ['metadata_timestamp', 'qas_timestamp', 'total_count']
# -----------------------------------------------------------------------------
# Process
# -----------------------------------------------------------------------------
def load_metadata(filename):
with io.open(filename, 'r', encoding='utf-8') as f:
try:
jo = json.load(f)
except Exception:
print("'{0}' is not a valid JSON document.".format(filename),
file=sys.stderr)
sys.exit(errno.ENOENT)
return jo
def save_javascript(metadata, filename):
try:
with io.open(filename, 'w', encoding='utf-8') as f:
f.write('var complaint_public_metadata = ')
json.dump(metadata, f, indent=2, sort_keys=True)
except Exception:
print("Unable to write '{0}'".format(filename), file=sys.stderr)
sys.exit(errno.EIO)
# -----------------------------------------------------------------------------
# Main
# -----------------------------------------------------------------------------
def build_arg_parser():
p = configargparse.ArgParser(
prog='build_metadata_javascript',
description='makes metadata that can be statically loaded in a JS app'
)
p.add('infile', help="The name of the public metadata file")
p.add('outfile', help="The name of the Javascript file")
return p
def main():
p = build_arg_parser()
options = p.parse_args()
save_javascript(load_metadata(options.infile), options.outfile)
if __name__ == '__main__':
main()
```
#### File: ccdb/tests/test_index_ccdb.py
```python
import unittest
from unittest.mock import ANY, Mock, patch
import complaints.ccdb.index_ccdb as sut
from common.tests import build_argv, captured_output
from freezegun import freeze_time
def toAbsolute(relative):
import os
# where is _this_ file?
thisScriptDir = os.path.dirname(__file__)
return os.path.join(thisScriptDir, relative)
# ------------------------------------------------------------------------------
# Classes
# ------------------------------------------------------------------------------
class TestIndexCCDB(unittest.TestCase):
def setUp(self):
self.logger = Mock()
def test_update_indexes_in_alias__happy_path(self):
es = Mock()
es.indices.exists_alias.side_effect = [True, True]
expected = {
'actions': [
{'remove': {'index': 'baz', 'alias': 'foo'}},
{'add': {'index': 'bar', 'alias': 'foo'}}
]
}
sut.update_indexes_in_alias(es, self.logger, 'foo', 'baz', 'bar')
es.indices.update_aliases.assert_called_once_with(body=expected)
def test_update_indexes_in_alias__no_aliases(self):
es = Mock()
es.indices.exists_alias.side_effect = [False]
sut.update_indexes_in_alias(es, self.logger, 'foo', 'baz', 'bar')
es.indices.put_alias.assert_called_once_with(name='foo', index='bar')
def test_update_indexes_in_alias__no_backup(self):
es = Mock()
es.indices.exists_alias.side_effect = [True, False]
expected = {
'actions': [
{'add': {'index': 'bar', 'alias': 'foo'}}
]
}
sut.update_indexes_in_alias(es, self.logger, 'foo', 'baz', 'bar')
es.indices.update_aliases.assert_called_once_with(body=expected)
# -------------------------------------------------------------------------
def test_swap_backup_index__happy_path(self):
es = Mock()
es.indices.exists_alias.side_effect = [True, True]
a, b = sut.swap_backup_index(es, self.logger, 'foo', 'bar', 'baz')
self.assertEqual(a, 'baz')
self.assertEqual(b, 'bar')
def test_swap_backup_index__no_alias(self):
es = Mock()
es.indices.exists_alias.side_effect = [False]
a, b = sut.swap_backup_index(es, self.logger, 'foo', 'bar', 'baz')
self.assertEqual(a, 'bar')
self.assertEqual(b, 'baz')
def test_swap_backup_index__no_primary(self):
es = Mock()
es.indices.exists_alias.side_effect = [True, False]
a, b = sut.swap_backup_index(es, self.logger, 'foo', 'bar', 'baz')
self.assertEqual(a, 'bar')
self.assertEqual(b, 'baz')
# -------------------------------------------------------------------------
def test_load_json_happy(self):
fileName = toAbsolute('../../settings.json')
actual = sut.load_json(self.logger, fileName)
self.assertIsNotNone(actual)
def test_load_json_fail(self):
fileName = toAbsolute('../../tests/__fixtures__/ccdb.ndjson')
with self.assertRaises(SystemExit):
sut.load_json(self.logger, fileName)
# -------------------------------------------------------------------------
def test_yield_chunked_docs(self):
def mock_data_fn(data):
for x in data:
yield x
data = ['foo', 'bar', 'baz', 'qaz', 'quux']
gen = sut.yield_chunked_docs(mock_data_fn, data, 2)
actual = next(gen)
self.assertEqual(actual, ['foo', 'bar'])
actual = next(gen)
self.assertEqual(actual, ['baz', 'qaz'])
rest = [x for x in gen]
self.assertEqual(rest, [['quux']])
class TestMain(unittest.TestCase):
def setUp(self):
self.optional = [
'--es-host', 'www.example.com',
'--index-name', 'onion',
'--settings', toAbsolute('../../settings.json'),
'--mapping', toAbsolute('../ccdb_mapping.json'),
'--dataset', toAbsolute('__fixtures__/from_s3.ndjson')
]
self.actual_file = toAbsolute('__fixtures__/actual.json')
def tearDown(self):
import os
try:
os.remove(self.actual_file)
except Exception:
pass
def capture_actions(self, *args, **kwargs):
import io
import json
with io.open(self.actual_file, mode='w', encoding='utf-8') as f:
for action in kwargs['actions']:
f.write(json.dumps(action, ensure_ascii=False))
f.write('\n')
return (1001, 99)
def validate_actions(self, expected_file):
import io
import json
self.maxDiff = None
with io.open(self.actual_file, 'r', encoding='utf-8') as f:
actuals = [line for line in f]
with io.open(expected_file, 'r', encoding='utf-8') as f:
expecteds = [line for line in f]
assert len(actuals) == len(expecteds)
for i, act in enumerate(actuals):
actual = json.loads(act)
expected = json.loads(expecteds[i])
self.assertDictEqual(expected, actual)
# --------------------------------------------------------------------------
# Tests
@patch('complaints.ccdb.index_ccdb.bulk')
@patch('complaints.ccdb.index_ccdb.get_es_connection')
@patch('complaints.ccdb.index_ccdb.setup_logging')
def test_main_happy_path_socrata(self, logger_setup, es_conn, bulk):
logger = Mock()
logger_setup.return_value = logger
es = Mock()
es.indices.exists_alias.return_value = False
es_conn.return_value = es
bulk.side_effect = self.capture_actions
self.optional.insert(0, '--dump-config')
self.optional[-1] = toAbsolute('../../tests/__fixtures__/ccdb.ndjson')
argv = build_argv(self.optional)
with captured_output(argv) as (out, err):
sut.main()
# Expected index create calls
es.indices.create.assert_any_call(index='onion-v1', ignore=400)
es.indices.create.assert_any_call(index='onion-v2', ignore=400)
es.indices.create.assert_any_call(index='onion-v1', body=ANY)
# Expected index put_alias calls
es.indices.put_alias.assert_called_once_with(name='onion',
index='onion-v1')
# Expected index delete calls
es.indices.delete.assert_called_once_with(index='onion-v1')
# Bulk
bulk.assert_called_once_with(
es, actions=ANY, index='onion-v1', doc_type='complaint',
chunk_size=20000, refresh=True
)
self.validate_actions(toAbsolute('__fixtures__/exp_socrata.ndjson'))
logger.info.assert_any_call('Running index_ccdb with')
logger.info.assert_any_call('Deleting and recreating onion-v1')
logger.info.assert_any_call(
'Loading data into onion-v1 with doc_type complaint'
)
logger.info.assert_any_call('chunk retrieved, now bulk load')
logger.info.assert_any_call('1,001 records indexed, total = 1,001')
logger.info.assert_any_call('Adding alias onion for index onion-v1')
@freeze_time("2019-09-09")
@patch('complaints.ccdb.index_ccdb.format_timestamp_local')
@patch('complaints.ccdb.index_ccdb.bulk')
@patch('complaints.ccdb.index_ccdb.get_es_connection')
@patch('complaints.ccdb.index_ccdb.setup_logging')
def test_main_happy_path_s3(self, logger_setup, es_conn, bulk, local_time):
logger = Mock()
logger_setup.return_value = logger
es = Mock()
es.indices.exists_alias.return_value = False
es_conn.return_value = es
bulk.side_effect = self.capture_actions
# GMT: Monday, September 9, 2019 4:00:00 AM
# EDT: Monday, September 9, 2019 12:00:00 AM
local_time.return_value = 1568001600
self.optional[-1] = toAbsolute('__fixtures__/from_s3.ndjson')
self.optional.append('--metadata')
self.optional.append(toAbsolute('__fixtures__/metadata.json'))
argv = build_argv(self.optional)
with captured_output(argv) as (out, err):
sut.main()
# Expected index create calls
es.indices.create.assert_any_call(index='onion-v1', ignore=400)
es.indices.create.assert_any_call(index='onion-v2', ignore=400)
es.indices.create.assert_any_call(index='onion-v1', body=ANY)
# Expected index put_alias calls
es.indices.put_alias.assert_called_once_with(name='onion',
index='onion-v1')
# Expected index delete calls
es.indices.delete.assert_called_once_with(index='onion-v1')
# Bulk
bulk.assert_called_once_with(
es, actions=ANY, index='onion-v1', doc_type='complaint',
chunk_size=20000, refresh=True
)
self.validate_actions(toAbsolute('__fixtures__/exp_s3.ndjson'))
logger.info.assert_any_call('Deleting and recreating onion-v1')
logger.info.assert_any_call(
'Loading data into onion-v1 with doc_type complaint'
)
logger.info.assert_any_call('chunk retrieved, now bulk load')
logger.info.assert_any_call('1,001 records indexed, total = 1,001')
logger.info.assert_any_call('Adding alias onion for index onion-v1')
@patch('complaints.ccdb.index_ccdb.bulk')
@patch('complaints.ccdb.index_ccdb.get_es_connection')
@patch('complaints.ccdb.index_ccdb.setup_logging')
def test_main_transport_error(self, logger_setup, es_conn, bulk):
from elasticsearch import TransportError
logger = Mock()
logger_setup.return_value = logger
es = Mock()
es.indices.exists_alias.return_value = False
es_conn.return_value = es
bulk.side_effect = TransportError(404, 'oops')
argv = build_argv(self.optional)
with captured_output(argv) as (out, err):
with self.assertRaises(SystemExit):
sut.main()
# Rollback
es.indices.put_alias.assert_called_once_with(name='onion',
index='onion-v2')
self.assertEqual(logger.error.call_count, 1)
```
#### File: ccdb/tests/test_verify_s3.py
```python
import os
import unittest
from unittest.mock import Mock, patch
import complaints.ccdb.verify_s3 as sut
from common.tests import build_argv, captured_output, make_configargs
def toAbsolute(relative):
# where is _this_ file?
thisScriptDir = os.path.dirname(__file__)
return os.path.join(thisScriptDir, relative)
# ------------------------------------------------------------------------------
# Classes
# ------------------------------------------------------------------------------
class TestMain(unittest.TestCase):
def setUp(self):
self.optional = [
'--s3-bucket', 'foo',
'--s3-folder', 'bar'
]
self.json_size_file = toAbsolute('__fixtures__/prev_json_size.txt')
self.cache_size_file = toAbsolute('__fixtures__/prev_cache_size.txt')
self.positional = [
'json_data.json',
self.json_size_file,
self.cache_size_file
]
def tearDown(self):
with open(self.json_size_file, 'w+') as f:
f.write(str(0))
with open(self.cache_size_file, 'w+') as f:
f.write(str(0))
@patch('complaints.ccdb.verify_s3.boto3')
def test_verify_happy_path(self, boto3):
dataset = make_configargs({
'content_length': 180
})
bucket = Mock()
bucket.Object.return_value = dataset
s3 = Mock()
s3.Bucket.return_value = bucket
boto3.resource.return_value = s3
self.optional.insert(0, '--dump-config')
argv = build_argv(self.optional, self.positional)
with captured_output(argv) as (out, err):
sut.main()
# assert calls
boto3.resource.assert_called_once_with('s3')
s3.Bucket.assert_called_once_with('foo')
# assert file size update
with open(self.json_size_file, 'r') as f:
prev_json_size = int(f.read())
self.assertTrue(prev_json_size == 180)
# assert cache size update
with open(self.cache_size_file, 'r') as f:
prev_cache_size = int(f.read())
self.assertTrue(prev_cache_size != 0)
@patch('complaints.ccdb.verify_s3.boto3')
def test_verify_file_verify_failure(self, boto3):
dataset = make_configargs({
'content_length': -1
})
bucket = Mock()
bucket.Object.return_value = dataset
s3 = Mock()
s3.Bucket.return_value = bucket
boto3.resource.return_value = s3
with self.assertRaises(SystemExit) as ex:
argv = build_argv(self.optional, self.positional)
with captured_output(argv) as (out, err):
sut.main()
# assert calls
boto3.resource.assert_called_once_with('s3')
s3.Bucket.assert_called_once_with('foo')
# assert exit code
self.assertEqual(ex.exception.code, 2)
@patch('complaints.ccdb.verify_s3.boto3')
def test_verify_json_file_invalid(self, boto3):
invalid_count_file = \
toAbsolute('__fixtures__/prev_json_size_invalid.txt')
test_positional = [
'json_data.json',
invalid_count_file,
self.cache_size_file
]
dataset = make_configargs({
'content_length': 1
})
bucket = Mock()
bucket.Object.return_value = dataset
s3 = Mock()
s3.Bucket.return_value = bucket
boto3.resource.return_value = s3
argv = build_argv(self.optional, test_positional)
with captured_output(argv) as (out, err):
sut.main()
# assert json size update
with open(invalid_count_file, 'r') as f:
prev_json_size = int(f.read())
self.assertTrue(prev_json_size != 0)
# Clean up
with open(invalid_count_file, 'w+') as f:
f.write(str('Invalid'))
@patch('complaints.ccdb.verify_s3.boto3')
def test_verify_cache_verify_failure(self, boto3):
dataset = make_configargs({
'content_length': 1
})
bucket = Mock()
bucket.Object.return_value = dataset
# set up intentionally large cache size
with open(self.cache_size_file, 'w+') as f:
f.write(str(99999999))
s3 = Mock()
s3.Bucket.return_value = bucket
boto3.resource.return_value = s3
with self.assertRaises(SystemExit) as ex:
argv = build_argv(self.optional, self.positional)
with captured_output(argv) as (out, err):
sut.main()
# assert calls
boto3.resource.assert_called_once_with('s3')
s3.Bucket.assert_called_once_with('foo')
# assert exit code
self.assertEqual(ex.exception.code, 2)
@patch('complaints.ccdb.verify_s3.boto3')
def test_verify_cache_file_invalid(self, boto3):
invalid_count_file = \
toAbsolute('__fixtures__/prev_cache_size_invalid.txt')
test_positional = [
'json_data.json',
self.json_size_file,
invalid_count_file
]
dataset = make_configargs({
'content_length': 1
})
bucket = Mock()
bucket.Object.return_value = dataset
s3 = Mock()
s3.Bucket.return_value = bucket
boto3.resource.return_value = s3
argv = build_argv(self.optional, test_positional)
with captured_output(argv) as (out, err):
sut.main()
# assert json size update
with open(invalid_count_file, 'r') as f:
prev_cache_size = int(f.read())
self.assertTrue(prev_cache_size != 0)
# Clean up
with open(invalid_count_file, 'w+') as f:
f.write(str('Invalid'))
```
|
{
"source": "JeffreyMFarley/mereo",
"score": 3
}
|
#### File: mereo/mereo/inventory.py
```python
from mereo import ORDER
from mereo.part import Part
class Inventory(object):
"""implemented as a fluent interface"""
# -------------------------------------------------------------------------
# Customization Methods
def __init__(self):
from collections import defaultdict
self.inv = defaultdict(dict)
def __len__(self):
return len(self.inv)
def __iter__(self):
for key in ORDER:
if key in self.inv:
for part in self.inv[key].values():
yield key, part
# -------------------------------------------------------------------------
# Properties
@property
def xMajor(self):
return [
141, 245, 349, 452, 556, 660, 764, 868, 972, 1076, 1180, 1284,
1388, 1492, 1596, 1700, 1804, 1908
]
@property
def yMajor(self):
return [
100, 204, 308, 412, 516, 620, 724, 828, 932, 1036, 1140, 1244, 1348
]
# -------------------------------------------------------------------------
# Non-fluent methods
def addPart(self, key, part):
self.inv[key][part.ID] = part
# -------------------------------------------------------------------------
# I/O
@staticmethod
def load(filename):
instance = Inventory()
import json
try:
with open(filename, 'r') as f:
d = json.load(f)
for key, parts in d.items():
for pid, a in parts.items():
tokens = pid.split('_')
part = Part(a['d'])
part.ID = tokens
instance.addPart(key, part)
except IOError:
pass
return instance
def save(self, filename):
import json
with open(filename, 'w') as f:
json.dump(self.inv, f, sort_keys=True, indent=2,
separators=(',', ': '))
return self
# -------------------------------------------------------------------------
# SVG I/O
def updateFromSvg(self, filename):
from svgpathtools import svg2paths2
_, attributes, _ = svg2paths2(filename)
for i, a in enumerate(attributes):
if 'id' in a:
tokens = a['id'].split('_')
part = Part(a['d'])
part.ID = tokens[1:]
self.addPart(tokens[0], part)
return self
# -------------------------------------------------------------------------
# Predicate Logic
def select(self, fn):
clone = Inventory()
for key, part in self:
if fn(key, part):
clone.addPart(key, part)
return clone
def selectPose(self, partList):
clone = Inventory()
for key, part in self:
if key + part.ID in partList:
clone.addPart(key, part)
return clone
# -------------------------------------------------------------------------
# Actions
def merge(self, other):
for key, part in other:
self.addPart(key, part)
return self
def quantize(self):
for _, part in self:
part.quantize()
return self
def snap(self, threshold):
from hew import KDTree
tics = 5
xcells = list(self.xMajor)
for i in range(len(self.xMajor) - 1):
dx = float(self.xMajor[i+1] - self.xMajor[i]) / tics
for xx in range(1, tics):
xcells.append(xx * dx + self.xMajor[i])
ycells = list(self.yMajor)
for i in range(len(self.yMajor) - 1):
dy = float(self.yMajor[i+1] - self.yMajor[i]) / tics
for yy in range(1, tics):
ycells.append(yy * dy + self.yMajor[i])
pairs = []
for x in xcells:
for y in ycells:
pairs.append(([x, y], []))
tree = KDTree(pairs)
for key, part in self:
part.snap(tree, threshold)
return self
def toConsole(self):
for key in ORDER:
if key in self.inv:
print(key)
for part in self.inv[key].values():
part.toConsole()
return self
def translate(self, x, y):
for _, part in self:
part.translate(x, y)
return self
```
#### File: mereo/mereo/__main__.py
```python
import os
from mereo.in_betweens import generateInBetweens
from mereo.inventory import Inventory
from mereo.svg import Svg
# -----------------------------------------------------------------------------
# I/O
def fullPath(fileName):
return os.path.join('/Users/farleyj/Desktop/Bernard/', fileName)
# -----------------------------------------------------------------------------
# Main
pose_neutral = [
'foot-left',
'foot-right',
'hand-left_z270',
'hand-right_z90',
'hips',
'lower-arm-left_z270',
'lower-arm-right_z90',
'lower-leg-left',
'lower-leg-right',
'shoulder-left',
'shoulder-right',
'trunk',
'upper-arm-left_z270',
'upper-arm-right_z90',
'upper-leg-left',
'upper-leg-right'
]
pose_z0 = [
'foot-left',
'foot-right',
'hand-left_y90_splayed',
'hand-right_z90',
'hips',
'lower-arm-left_y90',
'lower-arm-right_z90',
'lower-leg-left',
'lower-leg-right',
'shoulder-left_y90',
'shoulder-right',
'trunk',
'upper-arm-left_y90',
'upper-arm-right_z90',
'upper-leg-left',
'upper-leg-right'
]
pose_z270 = [
'foot-left_z270',
'hips_z270',
'lower-arm-left_z180',
'lower-leg-left_z270',
'shoulder-left_z270',
'trunk_z270',
'upper-arm-left_z180',
'upper-leg-left_z270'
]
def anatomical(key, part):
if part['y'] == 0 and part['z'] == 0:
return True
return False
if __name__ == "__main__":
inventoryPath = fullPath('inventory.json')
inv = Inventory().load(inventoryPath).\
selectPose(pose_neutral + pose_z270)
parts = generateInBetweens(
inv.selectPose(pose_neutral), inv.selectPose(pose_z270), 1
)
inv.merge(parts)
svg = Svg(inv).showGrid().\
showParts().\
write(fullPath('foo.svg'))
# inv.snap(8)
# svg.clear().showGrid().\
# showParts().\
# write(fullPath('bar.svg'))
# inv.save(fullPath('tighten.json'))
```
#### File: mereo/mereo/svg.py
```python
from mereo import ORDER
from svgpathtools import wsvg, parse_path
def encodeColor(key, part):
rgb = [255, 24, 24]
rgb[0] -= ORDER.index(key) + 1
rgb[1] -= (part['y'] / 15)
rgb[2] -= (part['z'] / 15)
asHex = '#{0:02x}{1:02x}{2:02x}'.format(*rgb)
return asHex
class Svg(object):
"""implemented as a fluent interface"""
# -------------------------------------------------------------------------
# Customization Methods
def __init__(self, inventory):
self.inventory = inventory
self.clear()
# -------------------------------------------------------------------------
# Non-fluent methods
def _add(self, pathAsString, attribute):
self.paths.append(parse_path(pathAsString))
self.attributes.append(attribute)
def _tryGetGrids(self):
return self.inventory.xMajor, self.inventory.yMajor
# -------------------------------------------------------------------------
# Fluent Methods
def clear(self):
self.paths = []
self.attributes = []
self.svg_attributes = {
'size': ('2000', '1800')
}
return self
def showGrid(self):
majorStyle = {
'stroke-width': 2,
'stroke': '#999999',
'fill': 'none',
'opacity': .3
}
minorStyle = {
'stroke-width': 1,
'stroke': '#999999',
'fill': 'none',
'opacity': .3
}
xMajor, yMajor = self._tryGetGrids()
tics = 5
s = ''
for x in xMajor:
s += 'M {0},{1} L{0},{2} '.format(x, yMajor[0], yMajor[-1])
for y in yMajor:
s += 'M {1},{0} L{2},{0} '.format(y, xMajor[0], xMajor[-1])
self._add(s, majorStyle)
s = ''
for i in range(len(xMajor) - 1):
dx = float(xMajor[i+1] - xMajor[i]) / tics
for xx in range(1, tics):
s += 'M {0},{1} L{0},{2} '.format(
xx * dx + xMajor[i], yMajor[0], yMajor[-1]
)
for i in range(len(yMajor) - 1):
dy = float(yMajor[i+1] - yMajor[i]) / tics
for yy in range(1, tics):
s += 'M {1},{0} L{2},{0} '.format(
yy * dy + yMajor[i], xMajor[0], xMajor[-1]
)
self._add(s, minorStyle)
return self
def showBoundingBoxes(self):
style = {
'stroke-width': 2,
'stroke': '#0000ff',
'fill': 'none',
'opacity': .5
}
for key, part in self.inventory:
path = parse_path(part['d'])
x0, x1, y0, y1 = path.bbox()
s = 'M{0},{1} L{0},{3} L{2},{3} L{2},{1} Z'.format(x0, y0, x1, y1)
self._add(s, style)
return self
def showParts(self):
for key, part in self.inventory:
att = {
'id': key + part.ID,
'stroke-width': 2,
'stroke': encodeColor(key, part),
'fill': 'none',
'opacity': 1
}
self._add(part['d'], att)
return self
def write(self, filename):
wsvg(self.paths,
attributes=self.attributes,
svg_attributes=self.svg_attributes,
filename=filename)
return self
```
|
{
"source": "JeffreyMFarley/wand",
"score": 3
}
|
#### File: JeffreyMFarley/wand/assertions.py
```python
import deep
from wand.normalizer import normalizeResponse
def assertRequestsEqual(expected, actual):
diff = deep.diff(actual, expected)
if diff:
diff.print_full()
raise AssertionError("Requests do not match")
def assertResponsesEqual(expected, actual):
e = normalizeResponse(expected)
a = normalizeResponse(actual)
diff = deep.diff(a, e)
if diff:
diff.print_full()
raise AssertionError("Responses do not match")
```
#### File: management/commands/passthrough.py
```python
from django.conf import settings
from django.core.management import call_command
from django.core.management.base import BaseCommand
class Command(BaseCommand):
args = '<test_path>'
help = 'Runs tests against Elasticsearch without validation'
def handle(self, *args, **options):
if(len(args) == 1):
settings.NOSE_ARGS = settings.NOSE_ARGS + [args[0]]
settings.ES_HOST = settings.DEV_ES_HOST
settings.WAND_CONFIGURATION['mode'] = 'passthrough'
call_command('test')
```
|
{
"source": "jeffreymkabot/adventofcode2018",
"score": 3
}
|
#### File: adventofcode2018/day12/part1.py
```python
import sys
import os.path
import re
from collections import defaultdict
from typing import List, DefaultDict, Iterator
initial_state_re = re.compile(r"initial state: ([.#]+)")
rule_re = re.compile(r"([.#]+) => ([.#])")
State = DefaultDict[int, str]
"""Represent a configuration of plants by mapping index to "#" if the pot has a plant, else ".".
"""
Rules = DefaultDict[str, str]
"""Represent the generation rules by mapping a sequence of five pots
to "#" if the center pot has a plant, else ".".
"""
def parse_initial_state(line: str) -> State:
initial_state: State = defaultdict(lambda: ".")
match = initial_state_re.match(line)
if not match:
raise ValueError
for idx, c in enumerate(match.group(1)):
initial_state[idx] = c
return initial_state
def parse_rules(lines: List[str]) -> Rules:
rules: Rules = defaultdict(lambda: ".")
for line in lines:
match = rule_re.search(line)
if not match:
raise ValueError
rules[match.group(1)] = match.group(2)
return rules
def simulate(initial_state: State, rules: Rules) -> Iterator[State]:
"""Produce an iterator yielding successive generations of plants.
"""
prev_state = initial_state.copy()
while True:
state = prev_state.copy()
left = min(k for k in state.keys() if state[k] == "#")
right = max(k for k in state.keys() if state[k] == "#")
for pot in range(left - 2, right + 3):
sequence = "".join(prev_state[p] for p in range(pot - 2, pot + 3))
state[pot] = rules[sequence]
yield state
prev_state = state
def sum_state(state: State) -> int:
return sum(p for p in state.keys() if state[p] == "#")
def main():
file_name = os.path.dirname(__file__) + "/input.txt"
if len(sys.argv) >= 2:
file_name = sys.argv[1]
with open(file_name, "rU") as f:
lines = f.read().strip().split("\n")
initial_state = parse_initial_state(lines[0])
rules = parse_rules(lines[2:])
sim = simulate(initial_state, rules)
for x in range(20):
gen = next(sim)
result = sum(gen)
print(result)
if __name__ == "__main__":
main()
```
#### File: adventofcode2018/day12/part2.py
```python
import sys
import os.path
from part1 import State, parse_initial_state, parse_rules, simulate, sum_state
from typing import List, Tuple
from statistics import mean, variance
Sample = Tuple[int, int]
def hash_state(state: State) -> str:
"""Create a string representing the pattern of plants
**starting at the first plant and ending at the last plant**.
"""
left = min(k for k in state.keys() if state[k] == "#")
right = max(k for k in state.keys() if state[k] == "#")
order = sorted(state.keys())[left : right + 1]
return "".join(state[pot] for pot in order)
def linear_regression(samples: List[Sample]) -> Tuple[float, float]:
"""Linear regression
https://en.wikipedia.org/wiki/Simple_linear_regression
"""
xs = [s[0] for s in samples]
ys = [s[1] for s in samples]
x_mean = mean(xs)
y_mean = mean(ys)
cov = sum((x - x_mean) * (y - y_mean) for (x, y) in samples)
var = sum((x - x_mean) ** 2 for x in xs)
coeff = cov / var
intercept = y_mean - coeff * x_mean
return (coeff, intercept)
def main():
file_name = os.path.dirname(__file__) + "/input.txt"
if len(sys.argv) >= 2:
file_name = sys.argv[1]
with open(file_name, "rU") as f:
lines = f.read().strip().split("\n")
initial_state = parse_initial_state(lines[0])
rules = parse_rules(lines[2:])
# find where the plant pattern starts to repeat (though maybe not starting at the same index)
# sample the generation number (x) and the sum of plant pots (y)
# assume a linear relationship and extrapolate for the 50 billionth generation
found_cycle = False
hashes = []
samples = []
sim = simulate(initial_state, rules)
for x in range(1, 1000):
gen = next(sim)
h = hash_state(gen)
found_cycle = h in hashes
if not found_cycle:
hashes.append(h)
else:
samples.append((x, sum_state(gen)))
if not found_cycle:
print("No cycles")
return
coeff, intercept = linear_regression(samples)
result = int(coeff * 50_000_000_000 + intercept)
print(result)
if __name__ == "__main__":
main()
```
#### File: adventofcode2018/day7/part1.py
```python
import sys
import re
from typing import Dict, List, Tuple
edge_re = re.compile(r"Step (\w+) must be finished before step (\w+) can begin.")
def parse_edge(line: str) -> Tuple[str, str]:
match = edge_re.match(line)
if not match:
raise ValueError
return (match.group(1), match.group(2))
def is_root(vertex: str, graph: Dict[str, List[str]]):
"""A vertex is a root vertex if it has no incoming edges.
"""
return all(vertex not in x for x in graph.values())
def topological_sort(graph: Dict[str, List[str]]) -> List[str]:
"""https://en.wikipedia.org/wiki/Topological_sorting#Kahn's_algorithm
"""
out = []
roots = [A for A in graph.keys() if is_root(A, graph)]
while len(roots) > 0:
roots.sort()
A = roots.pop(0)
out.append(A)
children = graph.get(A, [])
for B in sorted(children):
children.remove(B)
if is_root(B, graph):
roots.append(B)
return out
def main():
file_name = "input.txt"
if len(sys.argv) >= 2:
file_name = sys.argv[1]
with open(file_name, "rU") as f:
lines = f.read().strip().split("\n")
edges = [parse_edge(line) for line in lines]
graph = {}
for (A, B) in edges:
# make sure there is an entry for each vertex, even if it has no children
graph[B] = graph.get(B, [])
graph[A] = graph.get(A, []) + [B]
order = topological_sort(graph)
result = "".join(order)
print(result)
if __name__ == "__main__":
main()
```
#### File: adventofcode2018/day8/part1.py
```python
import sys
from collections import namedtuple
from typing import List
Node = namedtuple("Node", ["children", "metadata"])
def build_tree(data: List[int], cursor_start=0) -> Node:
cursor = cursor_start
def parse_node(data: List[int]) -> Node:
nonlocal cursor
node = Node(children=[], metadata=[])
n_children = data[cursor]
n_metadata = data[cursor + 1]
cursor = cursor + 2
for x in range(n_children):
child = parse_node(data)
node.children.append(child)
metadata = data[cursor : cursor + n_metadata]
for m in metadata:
node.metadata.append(m)
cursor = cursor + n_metadata
return node
return parse_node(data)
def sum_metadata(node: Node) -> int:
return sum(node.metadata) + sum(sum_metadata(child) for child in node.children)
def main():
file_name = "input.txt"
if len(sys.argv) >= 2:
file_name = sys.argv[1]
with open(file_name, "rU") as f:
data = [int(d) for d in f.read().strip().split(" ")]
root = build_tree(data)
result = sum_metadata(root)
print(result)
if __name__ == "__main__":
main()
```
|
{
"source": "jeffreymli/awesome_ml_utils",
"score": 3
}
|
#### File: awesome_ml_utils/src/pre_processing.py
```python
import pandas as pd
from scipy import stats
import numpy as np
import logging
def missing_values_table(df):
mis_val = df.isnull().sum()
mis_val_percent = 100 * df.isnull().sum() / len(df)
mis_val_table = pd.concat([mis_val, mis_val_percent], axis=1)
mis_val_table_ren_columns = mis_val_table.rename(
columns={0: 'Missing Values', 1: '% of Total Values'})
mis_val_table_ren_columns = mis_val_table_ren_columns[
mis_val_table_ren_columns.iloc[:, 1] != 0].sort_values(
'% of Total Values', ascending=False).round(1)
print ("Your selected dataframe has " + str(df.shape[1]) + " columns.\n"
"There are " + str(mis_val_table_ren_columns.shape[0]) +
" columns that have missing values.")
return mis_val_table_ren_columns
class OutlierImputation:
def __init__(self, df, cont_cols, std_threshold):
self.df = df
self.std_threshold = std_threshold
self.outliers = None
if isinstance(cont_cols, str):
self.cont_cols = [cont_cols]
else:
self.cont_cols = cont_cols
def _get_outliers(self):
if isinstance(self.cont_cols, str):
self.cont_cols = [self.cont_cols]
assert set(
self.cont_cols).issubset(
self.df.columns), 'Specified columns do not exist in dataset'
self.cont_data = self.df[self.cont_cols].copy()
self.z = pd.DataFrame(np.abs(stats.zscore(self.cont_data)))
self.z.columns = self.cont_data.columns
self.outliers = self.z[self.z[self.cont_cols] > self.std_threshold]
def fit(self):
self._get_outliers()
def _calculate_transform(
self,
impute_type,
upper_bound,
lower_bound,
custom_value=None):
if upper_bound is None:
# Three standard deviations
upper_bound = 99.85
if lower_bound is None:
lower_bound = 0.15
for col in self.cont_cols:
original_df = self.df.copy()
lower = np.percentile(original_df[col], lower_bound)
upper = np.percentile(original_df[col], upper_bound)
col_name = str(col) + '_out_imp'
if impute_type == 'winsor':
print(
"Column name {0} is now called {1} with imputation".format(
str(col), col_name))
self.df.loc[:, col_name] = np.where(
self.df[col] > upper, upper, self.df[col])
self.df.loc[:, col_name] = np.where(
self.df[col_name] < lower, lower,self.df[col_name])
if impute_type == 'drop':
total_length = len(self.df[col])
self.df = self.df[self.df[col] <= upper].copy()
dropped_upper_counts = total_length - len(self.df[col].values)
print("Dropping {0} Upper Bound Values for column {1}".format(dropped_upper_counts, col))
self.df = self.df[self.df[col] >= lower].copy()
dropped_lower_counts = total_length - dropped_upper_counts - len(self.df[col])
print("Dropping {0} Lower Bound Values for column {1}".format(dropped_lower_counts, col))
print(
"We have dropped a total of {0} values from column {1} ".format(
str(dropped_lower_counts + dropped_upper_counts), col_name))
print('\n')
if impute_type == 'custom':
print(
"Column name {0} is now called {1} with imputation".format(
str(col), col_name))
self.df.loc[:, col_name] = np.where(
self.df[col] > upper, custom_value, self.df[col])
self.df.loc[:, col_name] = np.where(
self.df[col_name] < lower, custom_value,self.df[col_name])
def transform(
self,
impute_type,
custom_value=None,
upper_bound_perc=None,
lower_bound_perc=None,
**kwargs):
if impute_type == 'winsor' or impute_type is None:
print("Applying Winsor transformation.......")
if upper_bound_perc is None:
print(
"No upper bound percentile specified. Using default 3 standard deviations")
if lower_bound_perc is None:
print(
"No lower bound percentile specified. Using default 3 standard deviations")
self._calculate_transform(
impute_type, upper_bound_perc, lower_bound_perc)
if impute_type == 'drop':
print("Dropping all outliers........")
print('\n')
if len(self.cont_cols) > 1:
print("You specified more than one column.")
print("Be careful with this method. We will drop all rows containing outliers starting with the first column.....")
print("This means we may be dropping more values than we want")
print('\n')
if upper_bound_perc is None:
print(
"No upper bound percentile specified. Using default 3 standard deviations")
if lower_bound_perc is None:
print(
"No lower bound percentile specified. Using default 3 standard deviations")
self._calculate_transform(
impute_type, upper_bound_perc, lower_bound_perc)
if impute_type == 'custom':
print("Using a custom function")
if upper_bound_perc is None:
print(
"No upper bound percentile specified. Using default 3 standard deviations")
if lower_bound_perc is None:
print(
"No lower bound percentile specified. Using default 3 standard deviations")
self._calculate_transform(
impute_type,
upper_bound_perc,
lower_bound_perc,
custom_value,
**kwargs)
return self.df.copy()
def get_outlier_report(self):
report = pd.DataFrame(self.outliers.count())
report.loc[:, 'outliers_pct'] = self.outliers.count() / self.df.count()
report['mean'] = self.cont_data[self.outliers.notnull()].mean()
report['min'] = self.cont_data[self.outliers.notnull()].min()
report['max'] = self.cont_data[self.outliers.notnull()].max()
report.columns = [
'outlier_count',
'outliers_pct',
'outlier_mean',
'outlier_min',
'outlier_max']
return report
```
|
{
"source": "JeffreyNederend/CFDPython",
"score": 4
}
|
#### File: lessons/personal/02_Step_2.py
```python
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
# initialization function: plot the background of each frame
def init():
line.set_data([], [])
return line,
# animation function. This is called sequentially
def animate(i):
x = np.linspace(0,2,gridPoints)
y = uMatrix[i,:]
line.set_data(x, y)
return line,
gridPoints = 41
dx = 2/(gridPoints-1) # Domain [0,2]
timeSteps = 20
dt = 0.025
u = np.ones(gridPoints)
u[int(0.5/dx):int(1/dx + 1)] = 2 #initial u: {u, u=2 for 0.5<=x<=1; else 1}
un = u.copy()
uMatrix = np.ones((timeSteps,gridPoints))
for n in range(timeSteps):
un = u.copy()
uMatrix[n:] = un
for i in range(1,gridPoints):
u[i] = un[i]*(1 - (dt/dx)*(un[i]-un[i-1]))
# plt.figure()
# plt.plot(np.linspace(0,2,gridPoints),u)
# plt.show()
fig = plt.figure()
ax = plt.axes(xlim=(0, 3), ylim=(0, 3))
line, = ax.plot([], [], lw=2)
anim = animation.FuncAnimation(fig, animate, init_func=init,
frames=timeSteps, interval=40, blit=True)
# anim.save('02_Step_2.mp4', fps=10, extra_args=['-vcodec', 'libx264'])
plt.show()
print(uMatrix)
```
|
{
"source": "Jeffreyo3/AdventOfCode2020",
"score": 4
}
|
#### File: AdventOfCode2020/challenges/day14.py
```python
f = open("challenges\data\day14data.txt", "r")
def processData(file):
data = []
for x in f:
x=x.strip().replace('\n', '').split(" = ")
data.append((x[0], x[1]))
return data
# Function to convert Decimal number
# to Binary number
def decimalToBinary(n):
return bin(n).replace("0b", "")
def leadingZeros(length, bin_num):
leadingZeros = length - len(bin_num)
return "0"*leadingZeros + bin_num
def initialize(commands):
memory = {}
mask = "X"*36
for c in commands:
if c[0] == "mask":
mask = c[1]
else:
address = c[0][c[0].index("[")+1:len(c[0])-1]
binaryValue = decimalToBinary(int(c[1]))
binary36 = leadingZeros(36, binaryValue)
memory[address] = ""
for i in range(len(mask)):
if mask[i] == "X":
memory[address] += binary36[i]
else:
memory[address] += mask[i]
sum = 0
for val in memory.values():
sum += int("".join(val), 2)
return sum
"""
--- Part Two ---
For some reason, the sea port's computer system still can't communicate with your ferry's docking program. It must be using version 2 of the decoder chip!
A version 2 decoder chip doesn't modify the values being written at all. Instead, it acts as a memory address decoder. Immediately before a value is written to memory, each bit in the bitmask modifies the corresponding bit of the destination memory address in the following way:
If the bitmask bit is 0, the corresponding memory address bit is unchanged.
If the bitmask bit is 1, the corresponding memory address bit is overwritten with 1.
If the bitmask bit is X, the corresponding memory address bit is floating.
A floating bit is not connected to anything and instead fluctuates unpredictably. In practice, this means the floating bits will take on all possible values, potentially causing many memory addresses to be written all at once!
For example, consider the following program:
mask = 000000000000000000000000000000X1001X
mem[42] = 100
mask = 00000000000000000000000000000000X0XX
mem[26] = 1
When this program goes to write to memory address 42, it first applies the bitmask:
address: 000000000000000000000000000000101010 (decimal 42)
mask: 000000000000000000000000000000X1001X
result: 000000000000000000000000000000X1101X
After applying the mask, four bits are overwritten, three of which are different, and two of which are floating. Floating bits take on every possible combination of values; with two floating bits, four actual memory addresses are written:
000000000000000000000000000000011010 (decimal 26)
000000000000000000000000000000011011 (decimal 27)
000000000000000000000000000000111010 (decimal 58)
000000000000000000000000000000111011 (decimal 59)
Next, the program is about to write to memory address 26 with a different bitmask:
address: 000000000000000000000000000000011010 (decimal 26)
mask: 00000000000000000000000000000000X0XX
result: 00000000000000000000000000000001X0XX
This results in an address with three floating bits, causing writes to eight memory addresses:
000000000000000000000000000000010000 (decimal 16)
000000000000000000000000000000010001 (decimal 17)
000000000000000000000000000000010010 (decimal 18)
000000000000000000000000000000010011 (decimal 19)
000000000000000000000000000000011000 (decimal 24)
000000000000000000000000000000011001 (decimal 25)
000000000000000000000000000000011010 (decimal 26)
000000000000000000000000000000011011 (decimal 27)
The entire 36-bit address space still begins initialized to the value 0 at every address, and you still need the sum of all values left in memory at the end of the program. In this example, the sum is 208.
Execute the initialization program using an emulator for a version 2 decoder chip. What is the sum of all values left in memory after it completes?
"""
def calculateCombinations(bin_address):
combinations = []
# xCount = 0
xPositions = []
for i in range(len(bin_address)):
# find each X and add its idx to a list
if bin_address[i] == "X":
xPositions.append(i)
# xCount += 1
if len(xPositions) > 0:
for i in range(2**(len(xPositions))):
# need to generate all possible combos of 0s & 1s
# w/ leading 0s
possible = decimalToBinary(i)
while len(possible) < len(xPositions):
possible = "0"+possible
combinations.append(possible)
addresses = []
for c in combinations:
# need to insert combination[i] into binary number
# current combo associated idx is in xPositions[i]
newAddress = ""
currPos = 0
for i in range(len(bin_address)):
if currPos < len(xPositions) and i == xPositions[currPos]:
newAddress += c[currPos]
currPos += 1
else:
newAddress += bin_address[i]
addresses.append(newAddress)
return addresses
def initialize_v2(commands):
memory = {}
mask = "X"*36
for c in commands:
if c[0] == "mask":
mask = c[1]
else:
address = c[0][c[0].index("[")+1:len(c[0])-1]
binaryAddress = decimalToBinary(int(address))
binary36 = leadingZeros(36, binaryAddress)
newVal = ""
for i in range(len(mask)):
if mask[i] != "0":
newVal += mask[i]
else:
newVal += binary36[i]
addresses = calculateCombinations(newVal)
for a in addresses:
memory[a] = int(c[1])
sum = 0
for val in memory.values():
sum += val
# print(memory)
return sum
data = processData(f)
# [print(d) for d in data]
sumAllValues = initialize(data)
print("Part 1:", sumAllValues)
sumAllValuesV2 = initialize_v2(data)
print("Part 2:", sumAllValuesV2)
# binary = decimalToBinary(33323)
# binary = leadingZeros(36, binary)
# print(binary)
# combos = initialize_v2([("mask", "100X100X101011111X100000100X11010011"),
# ("mem[33323]", "349380")])
# print(combos)
```
#### File: AdventOfCode2020/challenges/day1.py
```python
import re
f = open("challenges\data\day1data.txt", "r")
def processData(file):
data = []
for x in f:
x=x.strip().replace('\n', '')
data.append(int(x))
return data
data = processData(f)
def findProductOfTwoNumsSummingToTarget(arr, target):
dictionary = {}
for num in arr:
difference = target - num
if difference in dictionary.keys():
return difference * num
dictionary[num] = difference
return None
result = findProductOfTwoNumsSummingToTarget(data, 2020)
print(result)
"""
Challenge 2:
The Elves in accounting are thankful for your help; one of them even offers you a starfish coin they had left over from a past vacation. They offer you a second one if you can find three numbers in your expense report that meet the same criteria.
Using the above example again, the three entries that sum to 2020 are 979, 366, and 675. Multiplying them together produces the answer, 241861950.
In your expense report, what is the product of the three entries that sum to 2020?
"""
def findProductOfThreeNumsSummingToTarget(arr, target):
arr.sort()
for i in range(0, len(arr) - 2):
first = i + 1
last = len(arr) - 1
while(first < last):
if ( arr[i] + arr[first] + arr[last] == target):
return arr[i] * arr[first] * arr[last]
elif( arr[i] + arr[first] + arr[last] < target):
first += 1
else:
last -= 1
return None
result = findProductOfThreeNumsSummingToTarget(data, 2020)
print(result)
```
#### File: AdventOfCode2020/challenges/day8.py
```python
import re
f = open("challenges\data\day8data.txt", "r")
def processData(file):
data = []
for x in f:
x=x.strip().replace('\n', '').split()
data.append(x)
return data
class Handheld:
def __init__(self, commandList):
self.commandList = commandList
self.accumulator = 0
def printList(self):
[print(item) for item in self.commandList]
def noOp(self):
print("No OPeration. Continuing to next command.")
def accumulate(self, value):
if value > 0:
print("Increasing accumulator to:", (self.accumulator + value))
elif value < 0:
print("Decreasing accumulator to:", (self.accumulator + value))
else:
print("Accumulator is not changing. Value:", self.accumulator)
def jump(self, idx, jmpInterval):
print("Jumping from command:", idx, "to command:", (idx + jmpInterval))
def executeCommands(self):
self.accumulator = 0
visited = set()
idx = 0
while idx < len(self.commandList):
if idx in visited:
# print("Error: Endless loop detected")
return False
visited.add(idx)
if self.commandList[idx][0] == "acc":
# self.accumulate(int(self.commandList[idx][1]))
self.accumulator += int(self.commandList[idx][1])
idx += 1
elif self.commandList[idx][0] == "nop":
# self.noOp()
idx += 1
elif self.commandList[idx][0] == "jmp":
# self.jump(idx, int(self.commandList[idx][1]))
idx += int(self.commandList[idx][1])
return True
def boot(self):
if not self.executeCommands():
print("Error: Endless loop detected")
return self.accumulator
def swapCommand(self, command):
if command == "jmp":
return "nop"
elif command == "nop":
return "jmp"
else:
return command
def fixEndlessLoop(self):
# hold our original
copyCommands = self.commandList
for command in copyCommands:
if command[0] != "nop" and command[0] != "jmp":
continue
# swap a command
command[0] = self.swapCommand(command[0])
# update list
self.commandList = copyCommands
if (self.executeCommands()):
print("New list successfully terminates")
return self.accumulator
else:
# if unsuccessful, swap back for next iteration
command[0] = self.swapCommand(command[0])
return None
commandList = processData(f)
h = Handheld(commandList)
print("Part 1:", h.boot())
print("Part 2:", h.fixEndlessLoop())
```
#### File: AdventOfCode2020/challenges/day9.py
```python
import re
f = open("challenges\data\day9data.txt", "r")
def processData(file):
data = []
for x in f:
x=x.strip().replace('\n', '')
data.append(x)
return data
def findFirstInvalidNumber(data, preamble):
preambleSet = set()
for i in range(len(data)):
preambleSet.add(int(data[i]))
if len(preambleSet) <= preamble:
continue
inSet = False
for item in preambleSet:
if (int(data[i]) - item) in preambleSet:
inSet = True
if not inSet:
return int(data[i])
preambleSet.remove(int(data[i-(preamble)]))
"""
--- Part Two ---
The final step in breaking the XMAS encryption relies on the invalid number you just found: you must find a contiguous set of at least two numbers in your list which sum to the invalid number from step 1.
Again consider the above example:
35
20
15
25
47
40
62
55
65
95
102
117
150
182
127
219
299
277
309
576
In this list, adding up all of the numbers from 15 through 40 produces the invalid number from step 1, 127. (Of course, the contiguous set of numbers in your actual list might be much longer.)
To find the encryption weakness, add together the smallest and largest number in this contiguous range; in this example, these are 15 and 47, producing 62.
What is the encryption weakness in your XMAS-encrypted list of numbers?
"""
def findEncryptionWeakness(data, invalidNumber):
numbers = []
sumNumbers = 0
for num in data:
numbers.append(int(num))
sumNumbers += int(num)
while(sumNumbers > invalidNum):
removed = numbers.pop(0)
sumNumbers -= removed
if sumNumbers == invalidNumber:
return numbers
return None
data = processData(f)
invalidNum = findFirstInvalidNumber(data, 25)
print("Part 1:", invalidNum)
encryptionWeakness = findEncryptionWeakness(data, invalidNum)
encryptionWeakness.sort()
print("Part 2:", encryptionWeakness[0] + encryptionWeakness[-1])
```
|
{
"source": "jeffreyoldham/data",
"score": 2
}
|
#### File: app/resource/system_run.py
```python
import enum
import flask_restful
from flask_restful import reqparse
from app.model import system_run_model
from app.service import system_run_database
from app.service import validation
from app import utils
_MODEL = system_run_model.SystemRunModel
class SystemRunStatus(enum.Enum):
"""Allowed status of a system run.
The status of a system run can only be one of these.
"""
CREATED = 'created'
SUCCEEDED = 'succeeded'
FAILED = 'failed'
SYSTEM_RUN_STATUS = frozenset(status.value for status in SystemRunStatus)
def set_system_run_default_values(system_run):
"""Sets default values for some fields of a system run.
import_attempts is set to an empty list.
status is set to 'created'.
time_created is set to the current time.
logs is set to an empty list.
Args:
system_run: System run as a dict.
Returns:
The same system run with the fields set, as a dict.
"""
system_run.setdefault(_MODEL.import_attempts, [])
system_run.setdefault(_MODEL.status, SystemRunStatus.CREATED.value)
system_run.setdefault(_MODEL.time_created, utils.utctime())
system_run.setdefault(_MODEL.logs, [])
return system_run
class SystemRun(flask_restful.Resource):
"""Base class for a system run resource.
Attributes:
client: datastore Client object used to communicate with Datastore
database: SystemRunDatabase object for querying and storing
system runs using the client
"""
parser = reqparse.RequestParser()
optional_fields = ((_MODEL.run_id, str), (_MODEL.repo_name,),
(_MODEL.branch_name,), (_MODEL.pr_number, int),
(_MODEL.commit_sha,), (_MODEL.time_created,),
(_MODEL.time_completed,), (_MODEL.import_attempts, str,
'append'),
(_MODEL.logs, str, 'append'), (_MODEL.status,))
utils.add_fields(parser, optional_fields, required=False)
def __init__(self, client=None):
"""Constructs a SystemRun."""
if not client:
client = utils.create_datastore_client()
self.client = client
self.database = system_run_database.SystemRunDatabase(self.client)
class SystemRunByID(SystemRun):
"""API for managing system runs by run_id associated with the endpoint
'/system_runs/<string:run_id>'.
See SystemRun.
"""
def get(self, run_id):
"""Retrieves a system run by its run_id.
Args:
run_id: ID of the system run as a string
Returns:
The system run with the run_id if successful as a
datastore Entity object. Otherwise, (error message, error code),
where the error message is a string and the error code is an int.
"""
run = self.database.get(run_id)
if not run:
return validation.get_not_found_error(_MODEL.run_id, run_id)
return run
def patch(self, run_id):
"""Modifies the value of a field of an existing system run.
The run_id and import_attempts of an existing system run are
forbidden to be patched.
Args:
run_id: ID of the system run as a string
Returns:
The system run with the run_id if successful as a
datastore Entity object. Otherwise, (error message, error code),
where the error message is a string and the error code is an int.
"""
args = SystemRunByID.parser.parse_args()
if _MODEL.run_id in args or _MODEL.import_attempts in args:
return validation.get_patch_forbidden_error(
(_MODEL.run_id, _MODEL.import_attempts))
valid, err, code = validation.is_system_run_valid(args, run_id=run_id)
if not valid:
return err, code
with self.client.transaction():
run = self.database.get(run_id)
if not run:
return validation.get_not_found_error(_MODEL.run_id, run_id)
run.update(args)
return self.database.save(run)
```
#### File: progress-dashboard-rest/app/utils.py
```python
import datetime
import uuid
from google.cloud import logging
from google.cloud import datastore
from google.cloud import storage
from app import configs
def utctime():
"""Returns the current time string in ISO 8601 with timezone UTC+0, e.g.
'2020-06-30T04:28:53.717569+00:00'."""
return datetime.datetime.now(datetime.timezone.utc).isoformat()
def add_fields(parser, fields, required=True):
"""Adds a set of fields to the parser.
Args:
parser: A reqparse RequestParser.
fields: A set of fields to add as a list, tuple, or anything iterable.
Each field is represented as a tuple. The first element is the name
of field as a string. The second element, if present, is the data
type of the string. If absent, str is used. The third element, if
present, is the action the parser should take when encountering
the field. If absent, 'store' is used. See
https://flask-restful.readthedocs.io/en/latest/api.html?highlight=RequestParser#reqparse.Argument.
required: Whether the fields are required, as a boolean.
"""
for field in fields:
field_name = field[0]
data_type = field[1] if len(field) > 1 else str
action = field[2] if len(field) > 2 else 'store'
parser.add_argument(field_name,
type=data_type,
action=action,
store_missing=False,
required=required,
location='json')
def setup_logging():
"""Connects the default logger to Google Cloud Logging.
Only logs at INFO level or higher will be captured.
"""
client = logging.Client()
client.get_default_handler()
client.setup_logging()
def create_storage_bucket(project=configs.PROJECT_ID,
bucket_name=configs.LOG_BUCKET_NAME):
"""Creates a Google Cloud Storage bucket.
Args:
project: ID of the Google Cloud project as a string.
bucket_name: Name of the bucket as a string.
"""
return storage.Client(project).bucket(bucket_name)
def create_datastore_client(project=configs.PROJECT_ID,
namespace=configs.DASHBOARD_NAMESPACE,
credentials=None):
"""
Args:
project: ID of the Google Cloud project as a string.
namespace: Namespace in which the import attempts will be stored
as a string.
credentials: Credentials to authenticate with Datastore.
"""
return datastore.Client(project=project,
namespace=namespace,
credentials=credentials)
def get_id():
"""Returns a random UUID as a hex string."""
return uuid.uuid4().hex
def list_to_str(a_list, sep=', '):
"""Converts a list to string.
Args:
a_list: The list to convert to string.
sep: Separator between elements.
Returns:
String representation of the list.
"""
return sep.join(a_list)
```
#### File: progress-dashboard-rest/test/progress_log_database_test.py
```python
import unittest
from unittest import mock
from google.cloud import exceptions
from test import utils
from app.model import progress_log_model
from app.service import progress_log_database
_MODEL = progress_log_model.ProgressLogModel
def setUpModule():
utils.EMULATOR.start_emulator()
class ProgressLogDatabaseTest(unittest.TestCase):
"""Tests for BaseDatabase."""
@mock.patch('app.utils.create_datastore_client',
utils.create_test_datastore_client)
@mock.patch('app.service.log_message_manager.LogMessageManager',
utils.LogMessageManagerMock)
def setUp(self):
"""Ingests several logs before every test."""
self.database = progress_log_database.ProgressLogDatabase()
logs = [self.database.get(make_new=True) for _ in range(4)]
logs[0].update({_MODEL.level: 'info', _MODEL.message: 'first'})
logs[1].update({_MODEL.level: 'warning', _MODEL.message: 'second'})
logs[2].update({_MODEL.level: 'warning', _MODEL.message: 'third'})
logs[3].update({_MODEL.level: 'severe', _MODEL.message: 'fourth'})
self.logs_save_content = [logs[0], logs[1]]
self.logs_not_save_content = [logs[2], logs[3]]
for i, log in enumerate(self.logs_save_content):
self.logs_save_content[i] = self.database.save(log,
save_content=True)
for i, log in enumerate(self.logs_not_save_content):
self.logs_not_save_content[i] = self.database.save(
log, save_content=False)
def test_load_content(self):
"""Tests that get with load_content=True loads the message."""
expected = ['first', 'second']
for i, message in enumerate(expected):
log = self.logs_save_content[i]
retrieved = self.database.get(entity_id=log[_MODEL.log_id],
load_content=True)
self.assertEqual(message, retrieved[_MODEL.message])
def test_not_load_content(self):
"""Tests that get with load_content=False does not load the message."""
for log in self.logs_save_content:
log_id = log[_MODEL.log_id]
retrieved = self.database.get(entity_id=log_id, load_content=False)
self.assertEqual(log_id, retrieved[_MODEL.message])
def test_not_save_content(self):
"""Tests that save with save_content=False does not save the message to
a bucket."""
expected = ['third', 'fourth']
for i, message in enumerate(expected):
log_id = self.logs_not_save_content[i][_MODEL.log_id]
retrieved = self.database.get(entity_id=log_id, load_content=False)
self.assertEqual(message, retrieved[_MODEL.message])
self.assertRaises(exceptions.NotFound,
self.database.get,
entity_id=log_id,
load_content=True)
def test_load_logs(self):
"""Tests that load_logs correctly loads log messages and throws
an exception when the messages have not been saved."""
loaded = self.database.load_logs(
[log[_MODEL.log_id] for log in self.logs_save_content])
messages = [log[_MODEL.message] for log in loaded]
self.assertEqual(['first', 'second'], messages)
self.assertRaises(
exceptions.NotFound, self.database.load_logs,
[log[_MODEL.log_id] for log in self.logs_not_save_content])
```
#### File: regional_demography/deaths/preprocess_csv.py
```python
import csv
import json
import pandas as pd
def multi_index_to_single_index(df):
columns = []
for column in df.columns:
column = list(column)
column[1] = str(column[1])
columns.append(''.join(column))
df.columns = columns
return df.reset_index()
df = pd.read_csv('REGION_DEMOGR_death_tl3.csv')
# First remove geos with names that we don't have mappings to dcid for.
name2dcid = dict(json.loads(open('../name2dcid.json').read()))
df = df[df['Region'].isin(name2dcid.keys())]
# Second, replace the names with dcids.
df.replace({'Region': name2dcid}, inplace=True)
df['Year'] = '"' + df['Year'].astype(str) + '"'
temp = df[['REG_ID', 'Region', 'VAR', 'SEX', 'Year', 'Value']]
temp_multi_index = temp.pivot_table(values='Value',
index=['REG_ID', 'Region', 'Year'],
columns=['VAR', 'SEX'])
df_cleaned = multi_index_to_single_index(temp_multi_index)
VAR_to_statsvars = {
'D_TT': 'Count_MortalityEvent',
'D_Y0_4T': 'Count_MortalityEvent_Upto4Years',
'D_Y5_9T': 'Count_MortalityEvent_5To9Years',
'D_Y10_14T': 'Count_MortalityEvent_10To14Years',
'D_Y15_19T': 'Count_MortalityEvent_15To19Years',
'D_Y20_24T': 'Count_MortalityEvent_20To24Years',
'D_Y25_29T': 'Count_MortalityEvent_25To29Years',
'D_Y30_34T': 'Count_MortalityEvent_30To34Years',
'D_Y35_39T': 'Count_MortalityEvent_35To39Years',
'D_Y40_44T': 'Count_MortalityEvent_40To44Years',
'D_Y45_49T': 'Count_MortalityEvent_45To49Years',
'D_Y50_54T': 'Count_MortalityEvent_50To54Years',
'D_Y55_59T': 'Count_MortalityEvent_55To59Years',
'D_Y60_64T': 'Count_MortalityEvent_60To64Years',
'D_Y65_69T': 'Count_MortalityEvent_65To69Years',
'D_Y70_74T': 'Count_MortalityEvent_70To74Years',
'D_Y75_79T': 'Count_MortalityEvent_75To79Years',
'D_Y80_MAXT': 'Count_MortalityEvent_80OrMoreYears',
'D_Y0_14T': 'Count_MortalityEvent_Upto14Years',
'D_Y15_64T': 'Count_MortalityEvent_15To64Years',
'D_Y65_MAXT': 'Count_MortalityEvent_65OrMoreYears',
'D_TM': 'Count_MortalityEvent_Male',
'D_Y0_4M': 'Count_MortalityEvent_Upto4Years_Male',
'D_Y5_9M': 'Count_MortalityEvent_5To9Years_Male',
'D_Y10_14M': 'Count_MortalityEvent_10To14Years_Male',
'D_Y15_19M': 'Count_MortalityEvent_15To19Years_Male',
'D_Y20_24M': 'Count_MortalityEvent_20To24Years_Male',
'D_Y25_29M': 'Count_MortalityEvent_25To29Years_Male',
'D_Y30_34M': 'Count_MortalityEvent_30To34Years_Male',
'D_Y35_39M': 'Count_MortalityEvent_35To39Years_Male',
'D_Y40_44M': 'Count_MortalityEvent_40To44Years_Male',
'D_Y45_49M': 'Count_MortalityEvent_45To49Years_Male',
'D_Y50_54M': 'Count_MortalityEvent_50To54Years_Male',
'D_Y55_59M': 'Count_MortalityEvent_55To59Years_Male',
'D_Y60_64M': 'Count_MortalityEvent_60To64Years_Male',
'D_Y65_69M': 'Count_MortalityEvent_65To69Years_Male',
'D_Y70_74M': 'Count_MortalityEvent_70To74Years_Male',
'D_Y75_79M': 'Count_MortalityEvent_75To79Years_Male',
'D_Y80_MAXM': 'Count_MortalityEvent_80OrMoreYears_Male',
'D_Y0_14M': 'Count_MortalityEvent_Upto14Years_Male',
'D_Y15_64M': 'Count_MortalityEvent_15To64Years_Male',
'D_Y65_MAXM': 'Count_MortalityEvent_65OrMoreYears_Male',
'D_TF': 'Count_MortalityEvent_Female',
'D_Y0_4F': 'Count_MortalityEvent_Upto4Years_Female',
'D_Y5_9F': 'Count_MortalityEvent_5To9Years_Female',
'D_Y10_14F': 'Count_MortalityEvent_10To14Years_Female',
'D_Y15_19F': 'Count_MortalityEvent_15To19Years_Female',
'D_Y20_24F': 'Count_MortalityEvent_20To24Years_Female',
'D_Y25_29F': 'Count_MortalityEvent_25To29Years_Female',
'D_Y30_34F': 'Count_MortalityEvent_30To34Years_Female',
'D_Y35_39F': 'Count_MortalityEvent_35To39Years_Female',
'D_Y40_44F': 'Count_MortalityEvent_40To44Years_Female',
'D_Y45_49F': 'Count_MortalityEvent_45To49Years_Female',
'D_Y50_54F': 'Count_MortalityEvent_50To54Years_Female',
'D_Y55_59F': 'Count_MortalityEvent_55To59Years_Female',
'D_Y60_64F': 'Count_MortalityEvent_60To64Years_Female',
'D_Y65_69F': 'Count_MortalityEvent_65To69Years_Female',
'D_Y70_74F': 'Count_MortalityEvent_70To74Years_Female',
'D_Y75_79F': 'Count_MortalityEvent_75To79Years_Female',
'D_Y80_MAXF': 'Count_MortalityEvent_80OrMoreYears_Female',
'D_Y0_14F': 'Count_MortalityEvent_Upto14Years_Female',
'D_Y15_64F': 'Count_MortalityEvent_15To64Years_Female',
'D_Y65_MAXF': 'Count_MortalityEvent_65OrMoreYears_Female',
}
df_cleaned.rename(columns=VAR_to_statsvars, inplace=True)
df_cleaned.to_csv('OECD_deaths_cleaned.csv',
index=False,
quoting=csv.QUOTE_NONE)
# Automate Template MCF generation since there are many Statistical Variables.
TEMPLATE_MCF_TEMPLATE = """
Node: E:OECD_deaths_cleaned->E{index}
typeOf: dcs:StatVarObservation
variableMeasured: dcs:{stat_var}
measurementMethod: dcs:OECDRegionalStatistics
observationAbout: C:OECD_deaths_cleaned->Region
observationDate: C:OECD_deaths_cleaned->Year
observationPeriod: "P1Y"
value: C:OECD_deaths_cleaned->{stat_var}
"""
stat_vars = df_cleaned.columns[3:]
with open('OECD_deaths.tmcf', 'w', newline='') as f_out:
for i in range(len(stat_vars)):
f_out.write(
TEMPLATE_MCF_TEMPLATE.format_map({
'index': i + 1,
'stat_var': stat_vars[i]
}))
```
|
{
"source": "jeffreyparker/duo_client_python",
"score": 2
}
|
#### File: duo_client_python/tests/test_client.py
```python
from __future__ import absolute_import
import hashlib
import mock
import unittest
import six.moves.urllib
import duo_client.client
from . import util
import base64
import collections
import json
JSON_BODY = {
'data': 'abc123',
'alpha': ['a', 'b', 'c', 'd'],
'info': {
'test': 1,
'another': 2,
}
}
JSON_STRING = '{"alpha":["a","b","c","d"],"data":"abc123","info":{"another":2,"test":1}}'
class TestQueryParameters(unittest.TestCase):
"""
Tests for the proper canonicalization of query parameters for signing.
"""
def assert_canon_params(self, params, expected):
params = duo_client.client.normalize_params(params)
self.assertEqual(
duo_client.client.canon_params(params),
expected,
)
def test_zero_params(self):
self.assert_canon_params(
{},
'',
)
def test_one_param(self):
self.assert_canon_params(
{'realname': ['First Last']},
'realname=First%20Last',
)
def test_two_params(self):
self.assert_canon_params(
{'realname': ['First Last'], 'username': ['root']},
'realname=First%20Last&username=root')
def test_list_string(self):
""" A list and a string will both get converted. """
self.assert_canon_params(
{'realname': 'First Last', 'username': ['root']},
'realname=First%20Last&username=root')
def test_printable_ascii_characters(self):
self.assert_canon_params(
{
'digits': ['0123456789'],
'letters': ['abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'],
'punctuation': ['!"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~'],
'whitespace': ['\t\n\x0b\x0c\r '],
},
'digits=0123456789&letters=abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ&punctuation=%21%22%23%24%25%26%27%28%29%2A%2B%2C-.%2F%3A%3B%3C%3D%3E%3F%40%5B%5C%5D%5E_%60%7B%7C%7D~&whitespace=%09%0A%0B%0C%0D%20'
)
def test_unicode_fuzz_values(self):
self.assert_canon_params(
{
u'bar': [u'\u2815\uaaa3\u37cf\u4bb7\u36e9\ucc05\u668e\u8162\uc2bd\ua1f1'],
u'baz': [u'\u0df3\u84bd\u5669\u9985\ub8a4\uac3a\u7be7\u6f69\u934a\ub91c'],
u'foo': [u'\ud4ce\ud6d6\u7938\u50c0\u8a20\u8f15\ufd0b\u8024\u5cb3\uc655'],
u'qux': [u'\u8b97\uc846-\u828e\u831a\uccca\ua2d4\u8c3e\ub8b2\u99be'],
},
'bar=%E2%A0%95%EA%AA%A3%E3%9F%8F%E4%AE%B7%E3%9B%A9%EC%B0%85%E6%9A%8E%E8%85%A2%EC%8A%BD%EA%87%B1&baz=%E0%B7%B3%E8%92%BD%E5%99%A9%E9%A6%85%EB%A2%A4%EA%B0%BA%E7%AF%A7%E6%BD%A9%E9%8D%8A%EB%A4%9C&foo=%ED%93%8E%ED%9B%96%E7%A4%B8%E5%83%80%E8%A8%A0%E8%BC%95%EF%B4%8B%E8%80%A4%E5%B2%B3%EC%99%95&qux=%E8%AE%97%EC%A1%86-%E8%8A%8E%E8%8C%9A%EC%B3%8A%EA%8B%94%E8%B0%BE%EB%A2%B2%E9%A6%BE',
)
def test_unicode_fuzz_keys_and_values(self):
self.assert_canon_params(
{
u'\u469a\u287b\u35d0\u8ef3\u6727\u502a\u0810\ud091\xc8\uc170': [u'\u0f45\u1a76\u341a\u654c\uc23f\u9b09\uabe2\u8343\u1b27\u60d0'],
u'\u7449\u7e4b\uccfb\u59ff\ufe5f\u83b7\uadcc\u900c\ucfd1\u7813': [u'\u8db7\u5022\u92d3\u42ef\u207d\u8730\uacfe\u5617\u0946\u4e30'],
u'\u7470\u9314\u901c\u9eae\u40d8\u4201\u82d8\u8c70\u1d31\ua042': [u'\u17d9\u0ba8\u9358\uaadf\ua42a\u48be\ufb96\u6fe9\ub7ff\u32f3'],
u'\uc2c5\u2c1d\u2620\u3617\u96b3F\u8605\u20e8\uac21\u5934': [u'\ufba9\u41aa\ubd83\u840b\u2615\u3e6e\u652d\ua8b5\ud56bU'],
},
'%E4%9A%9A%E2%A1%BB%E3%97%90%E8%BB%B3%E6%9C%A7%E5%80%AA%E0%A0%90%ED%82%91%C3%88%EC%85%B0=%E0%BD%85%E1%A9%B6%E3%90%9A%E6%95%8C%EC%88%BF%E9%AC%89%EA%AF%A2%E8%8D%83%E1%AC%A7%E6%83%90&%E7%91%89%E7%B9%8B%EC%B3%BB%E5%A7%BF%EF%B9%9F%E8%8E%B7%EA%B7%8C%E9%80%8C%EC%BF%91%E7%A0%93=%E8%B6%B7%E5%80%A2%E9%8B%93%E4%8B%AF%E2%81%BD%E8%9C%B0%EA%B3%BE%E5%98%97%E0%A5%86%E4%B8%B0&%E7%91%B0%E9%8C%94%E9%80%9C%E9%BA%AE%E4%83%98%E4%88%81%E8%8B%98%E8%B1%B0%E1%B4%B1%EA%81%82=%E1%9F%99%E0%AE%A8%E9%8D%98%EA%AB%9F%EA%90%AA%E4%A2%BE%EF%AE%96%E6%BF%A9%EB%9F%BF%E3%8B%B3&%EC%8B%85%E2%B0%9D%E2%98%A0%E3%98%97%E9%9A%B3F%E8%98%85%E2%83%A8%EA%B0%A1%E5%A4%B4=%EF%AE%A9%E4%86%AA%EB%B6%83%E8%90%8B%E2%98%95%E3%B9%AE%E6%94%AD%EA%A2%B5%ED%95%ABU',
)
def test_sort_order_with_common_prefix(self):
self.assert_canon_params(
{
'foo_bar': '2',
'foo': '1',
},
'foo=1&foo_bar=2',
)
class TestCanonicalize(unittest.TestCase):
"""
Tests of the canonicalization of request attributes and parameters
for signing.
"""
def test_v1(self):
test = {
'host': 'foO.BAr52.cOm',
'method': 'PoSt',
'params': {
u'\u469a\u287b\u35d0\u8ef3\u6727\u502a\u0810\ud091\xc8\uc170': [u'\u0f45\u1a76\u341a\u654c\uc23f\u9b09\uabe2\u8343\u1b27\u60d0'],
u'\u7449\u7e4b\uccfb\u59ff\ufe5f\u83b7\uadcc\u900c\ucfd1\u7813': [u'\u8db7\u5022\u92d3\u42ef\u207d\u8730\uacfe\u5617\u0946\u4e30'],
u'\u7470\u9314\u901c\u9eae\u40d8\u4201\u82d8\u8c70\u1d31\ua042': [u'\u17d9\u0ba8\u9358\uaadf\ua42a\u48be\ufb96\u6fe9\ub7ff\u32f3'],
u'\uc2c5\u2c1d\u2620\u3617\u96b3F\u8605\u20e8\uac21\u5934': [u'\ufba9\u41aa\ubd83\u840b\u2615\u3e6e\u652d\ua8b5\ud56bU'],
},
'uri': '/Foo/BaR2/qux',
}
test['params'] = duo_client.client.normalize_params(test['params'])
self.assertEqual(duo_client.client.canonicalize(sig_version=1,
date=None,
**test),
'POST\nfoo.bar52.com\n/Foo/BaR2/qux\n%E4%9A%9A%E2%A1%BB%E3%97%90%E8%BB%B3%E6%9C%A7%E5%80%AA%E0%A0%90%ED%82%91%C3%88%EC%85%B0=%E0%BD%85%E1%A9%B6%E3%90%9A%E6%95%8C%EC%88%BF%E9%AC%89%EA%AF%A2%E8%8D%83%E1%AC%A7%E6%83%90&%E7%91%89%E7%B9%8B%EC%B3%BB%E5%A7%BF%EF%B9%9F%E8%8E%B7%EA%B7%8C%E9%80%8C%EC%BF%91%E7%A0%93=%E8%B6%B7%E5%80%A2%E9%8B%93%E4%8B%AF%E2%81%BD%E8%9C%B0%EA%B3%BE%E5%98%97%E0%A5%86%E4%B8%B0&%E7%91%B0%E9%8C%94%E9%80%9C%E9%BA%AE%E4%83%98%E4%88%81%E8%8B%98%E8%B1%B0%E1%B4%B1%EA%81%82=%E1%9F%99%E0%AE%A8%E9%8D%98%EA%AB%9F%EA%90%AA%E4%A2%BE%EF%AE%96%E6%BF%A9%EB%9F%BF%E3%8B%B3&%EC%8B%85%E2%B0%9D%E2%98%A0%E3%98%97%E9%9A%B3F%E8%98%85%E2%83%A8%EA%B0%A1%E5%A4%B4=%EF%AE%A9%E4%86%AA%EB%B6%83%E8%90%8B%E2%98%95%E3%B9%AE%E6%94%AD%EA%A2%B5%ED%95%ABU')
def test_v2(self):
test = {
'date': 'Fri, 07 Dec 2012 17:18:00 -0000',
'host': 'foO.BAr52.cOm',
'method': 'PoSt',
'params': {u'\u469a\u287b\u35d0\u8ef3\u6727\u502a\u0810\ud091\xc8\uc170': [u'\u0f45\u1a76\u341a\u654c\uc23f\u9b09\uabe2\u8343\u1b27\u60d0'],
u'\u7449\u7e4b\uccfb\u59ff\ufe5f\u83b7\uadcc\u900c\ucfd1\u7813': [u'\u8db7\u5022\u92d3\u42ef\u207d\u8730\uacfe\u5617\u0946\u4e30'],
u'\u7470\u9314\u901c\u9eae\u40d8\u4201\u82d8\u8c70\u1d31\ua042': [u'\u17d9\u0ba8\u9358\uaadf\ua42a\u48be\ufb96\u6fe9\ub7ff\u32f3'],
u'\uc2c5\u2c1d\u2620\u3617\u96b3F\u8605\u20e8\uac21\u5934': [u'\ufba9\u41aa\ubd83\u840b\u2615\u3e6e\u652d\ua8b5\ud56bU']},
'uri': '/Foo/BaR2/qux',
}
test['params'] = duo_client.client.normalize_params(test['params'])
self.assertEqual(duo_client.client.canonicalize(sig_version=2,
**test),
'Fri, 07 Dec 2012 17:18:00 -0000\nPOST\nfoo.bar52.com\n/Foo/BaR2/qux\n%E4%9A%9A%E2%A1%BB%E3%97%90%E8%BB%B3%E6%9C%A7%E5%80%AA%E0%A0%90%ED%82%91%C3%88%EC%85%B0=%E0%BD%85%E1%A9%B6%E3%90%9A%E6%95%8C%EC%88%BF%E9%AC%89%EA%AF%A2%E8%8D%83%E1%AC%A7%E6%83%90&%E7%91%89%E7%B9%8B%EC%B3%BB%E5%A7%BF%EF%B9%9F%E8%8E%B7%EA%B7%8C%E9%80%8C%EC%BF%91%E7%A0%93=%E8%B6%B7%E5%80%A2%E9%8B%93%E4%8B%AF%E2%81%BD%E8%9C%B0%EA%B3%BE%E5%98%97%E0%A5%86%E4%B8%B0&%E7%91%B0%E9%8C%94%E9%80%9C%E9%BA%AE%E4%83%98%E4%88%81%E8%8B%98%E8%B1%B0%E1%B4%B1%EA%81%82=%E1%9F%99%E0%AE%A8%E9%8D%98%EA%AB%9F%EA%90%AA%E4%A2%BE%EF%AE%96%E6%BF%A9%EB%9F%BF%E3%8B%B3&%EC%8B%85%E2%B0%9D%E2%98%A0%E3%98%97%E9%9A%B3F%E8%98%85%E2%83%A8%EA%B0%A1%E5%A4%B4=%EF%AE%A9%E4%86%AA%EB%B6%83%E8%90%8B%E2%98%95%E3%B9%AE%E6%94%AD%EA%A2%B5%ED%95%ABU')
def test_v2_with_json(self):
expected = (
'Tue, 04 Jul 2017 14:12:00\n'
'POST\n'
'foo.bar52.com\n'
'/Foo/BaR2/qux\n'
'{"alpha":["a","b","c","d"],"data":"abc123","info":{"another":2,"test":1}}'
)
params = duo_client.client.Client.canon_json(JSON_BODY)
actual = duo_client.client.canonicalize(
'POST', 'foO.BaR52.cOm', '/Foo/BaR2/qux', params,
'Tue, 04 Jul 2017 14:12:00', sig_version=3)
self.assertEqual(actual, expected)
def test_v4_with_json(self):
hashed_body = hashlib.sha512(JSON_STRING.encode('utf-8')).hexdigest()
expected = (
'Tue, 04 Jul 2017 14:12:00\n'
'POST\n'
'foo.bar52.com\n'
'/Foo/BaR2/qux\n\n' + hashed_body)
params = duo_client.client.Client.canon_json(JSON_BODY)
actual = duo_client.client.canonicalize(
'POST', 'foO.BaR52.cOm', '/Foo/BaR2/qux', params,
'Tue, 04 Jul 2017 14:12:00', sig_version=4)
self.assertEqual(actual, expected)
def test_invalid_signature_version_raises(self):
params = duo_client.client.Client.canon_json(JSON_BODY)
with self.assertRaises(ValueError) as e:
duo_client.client.canonicalize(
'POST', 'foO.BaR52.cOm', '/Foo/BaR2/qux', params,
'Tue, 04 Jul 2017 14:12:00', sig_version=999)
self.assertEqual(
e.exception.args[0],
"Unknown signature version: {}".format(999))
class TestNormalizePageArgs(unittest.TestCase):
def setUp(self):
self.client = duo_client.client.Client(
'test_ikey', 'test_akey', 'example.com')
def test_normalize_page_args(self):
tests = [
(
{},
(None, '0')
),
(
{'offset': 9001},
(None, '9001'),
),
(
{'limit': 2},
('2', '0'),
),
(
{'limit': '3'},
('3', '0'),
),
(
{'limit': 5, 'offset': 9002},
('5', '9002')
)
]
for (input, expected) in tests:
output = self.client.normalize_paging_args(**input)
self.assertEqual(output, expected)
class TestSign(unittest.TestCase):
"""
Tests for proper signature creation for a request.
"""
def test_hmac_sha1(self):
test = {
'date': 'Fri, 07 Dec 2012 17:18:00 -0000',
'host': 'foO.BAr52.cOm',
'method': 'PoSt',
'params': {u'\u469a\u287b\u35d0\u8ef3\u6727\u502a\u0810\ud091\xc8\uc170': [u'\u0f45\u1a76\u341a\u654c\uc23f\u9b09\uabe2\u8343\u1b27\u60d0'],
u'\u7449\u7e4b\uccfb\u59ff\ufe5f\u83b7\uadcc\u900c\ucfd1\u7813': [u'\u8db7\u5022\u92d3\u42ef\u207d\u8730\uacfe\u5617\u0946\u4e30'],
u'\u7470\u9314\u901c\u9eae\u40d8\u4201\u82d8\u8c70\u1d31\ua042': [u'\u17d9\u0ba8\u9358\uaadf\ua42a\u48be\ufb96\u6fe9\ub7ff\u32f3'],
u'\uc2c5\u2c1d\u2620\u3617\u96b3F\u8605\u20e8\uac21\u5934': [u'\ufba9\u41aa\ubd83\u840b\u2615\u3e6e\u652d\ua8b5\ud56bU']},
'uri': '/Foo/BaR2/qux',
}
test['params'] = duo_client.client.normalize_params(test['params'])
ikey = 'test_ikey'
actual = duo_client.client.sign(
sig_version=2,
ikey=ikey,
skey='<KEY>',
**test
)
expected = 'f01811cbbf9561623ab45b893096267fd46a5178'
expected = ikey + ':' + expected
if isinstance(expected, six.text_type):
expected = expected.encode('utf-8')
expected = base64.b64encode(expected).strip()
if not isinstance(expected, six.text_type):
expected = expected.decode('utf-8')
expected = 'Basic ' + expected
self.assertEqual(actual,
expected)
def test_hmac_sha1_json(self):
ikey = 'test_ikey'
actual = duo_client.client.sign(
sig_version=3,
ikey=ikey,
skey='<KEY>2PLM2ODVTkvoT',
date='Tue, 04 Jul 2017 14:12:00',
host='foO.BAr52.cOm',
method='POST',
params=duo_client.client.Client.canon_json(JSON_BODY),
uri='/Foo/BaR2/qux'
)
sig = '7bf8cf95d689091cf7fdb72178f16d1c19ef92c1'
auth = '%s:%s' % (ikey, sig)
auth = auth.encode('utf-8')
b64 = base64.b64encode(auth)
b64 = b64.decode('utf-8')
expected = 'Basic %s' % b64
self.assertEqual(actual, expected)
class TestRequest(unittest.TestCase):
""" Tests for the request created by api_call and json_api_call. """
# usful args for testing
args_in = {
'foo':['bar'],
'baz':['qux', 'quux=quuux', 'foobar=foobar&barbaz=barbaz']}
args_out = dict(
(key, [six.moves.urllib.parse.quote(v) for v in val])
for (key, val) in list(args_in.items()))
def setUp(self):
self.client = duo_client.client.Client(
'test_ikey', 'test_akey', 'example.com')
# monkeypatch client's _connect()
self.client._connect = lambda: util.MockHTTPConnection()
def test_api_call_get_no_params(self):
(response, dummy) = self.client.api_call('GET', '/foo/bar', {})
self.assertEqual(response.method, 'GET')
self.assertEqual(response.uri, '/foo/bar?')
def test_api_call_post_no_params(self):
(response, dummy) = self.client.api_call('POST', '/foo/bar', {})
self.assertEqual(response.method, 'POST')
self.assertEqual(response.uri, '/foo/bar')
self.assertEqual(response.body, '')
def test_api_call_get_params(self):
(response, dummy) = self.client.api_call(
'GET', '/foo/bar', self.args_in)
self.assertEqual(response.method, 'GET')
(uri, args) = response.uri.split('?')
self.assertEqual(uri, '/foo/bar')
self.assertEqual(util.params_to_dict(args), self.args_out)
def test_api_call_post_params(self):
(response, dummy) = self.client.api_call(
'POST', '/foo/bar', self.args_in)
self.assertEqual(response.method, 'POST')
self.assertEqual(response.uri, '/foo/bar')
self.assertEqual(util.params_to_dict(response.body), self.args_out)
def test_json_api_call_get_no_params(self):
response = self.client.json_api_call('GET', '/foo/bar', {})
self.assertEqual(response['method'], 'GET')
self.assertEqual(response['uri'], '/foo/bar?')
self.assertEqual(response['body'], None)
def test_json_api_call_post_no_params(self):
response = self.client.json_api_call('POST', '/foo/bar', {})
self.assertEqual(response['method'], 'POST')
self.assertEqual(response['uri'], '/foo/bar')
self.assertEqual(response['body'], '')
def test_json_api_call_get_params(self):
response = self.client.json_api_call(
'GET', '/foo/bar', self.args_in)
self.assertEqual(response['method'], 'GET')
(uri, args) = response['uri'].split('?')
self.assertEqual(uri, '/foo/bar')
self.assertEqual(util.params_to_dict(args), self.args_out)
def test_json_api_call_post_params(self):
response = self.client.json_api_call(
'POST', '/foo/bar', self.args_in)
self.assertEqual(response['method'], 'POST')
self.assertEqual(response['uri'], '/foo/bar')
self.assertEqual(util.params_to_dict(response['body']), self.args_out)
class TestPaging(unittest.TestCase):
def setUp(self):
self.client = util.CountingClient(
'test_ikey', 'test_akey', 'example.com', paging_limit=100)
self.objects = [util.MockJsonObject() for i in range(1000)]
self.client._connect = lambda: util.MockPagingHTTPConnection(self.objects)
def test_get_objects_paging(self):
response = self.client.json_paging_api_call(
'GET', '/admin/v1/objects', {})
self.assertEqual(len(self.objects), len(list(response)))
self.assertEqual(10, self.client.counter)
def test_get_no_objects_paging(self):
self.objects = []
self.client._connect = lambda: util.MockPagingHTTPConnection(self.objects)
response = self.client.json_paging_api_call(
'GET', '/admin/v1/objects', {})
self.assertEqual(len(self.objects), len(list(response)))
self.assertEqual(1, self.client.counter)
def test_get_objects_paging_limit(self):
response = self.client.json_paging_api_call(
'GET', '/admin/v1/objects', {'limit':'250'})
self.assertEqual(len(self.objects), len(list(response)))
self.assertEqual(4, self.client.counter)
def test_get_all_objects(self):
response = self.client.json_paging_api_call(
'GET', '/admin/v1/objects', {'limit':'1000'})
expected = [obj.to_json() for obj in self.objects]
self.assertListEqual(expected, list(response))
self.assertEqual(1, self.client.counter)
class TestJsonRequests(unittest.TestCase):
def setUp(self):
self.client = duo_client.client.Client(
'test_ikey', 'test_akey', 'example.com', sig_timezone='America/Detroit',
sig_version=3)
# monkeypatch client's _connect()
self.client._connect = lambda: util.MockHTTPConnection()
def test_json_post(self):
(response, dummy) = self.client.api_call('POST', '/foo/bar', JSON_BODY)
self.assertEqual(response.method, 'POST')
self.assertEqual(response.uri, '/foo/bar')
self.assertEqual(response.body, JSON_STRING)
self.assertIn('Content-type', response.headers)
self.assertEqual(response.headers['Content-type'], 'application/json')
self.assertIn('Authorization', response.headers)
def test_json_fails_with_bad_args(self):
with self.assertRaises(ValueError) as e:
(response, dummy) = self.client.api_call('POST', '/foo/bar', '')
self.assertEqual(e.exception.args[0], "JSON request must be an object.")
def test_json_put(self):
(response, dummy) = self.client.api_call('PUT', '/foo/bar', JSON_BODY)
self.assertEqual(response.method, 'PUT')
self.assertEqual(response.uri, '/foo/bar')
self.assertEqual(response.body, JSON_STRING)
self.assertIn('Content-type', response.headers)
self.assertEqual(response.headers['Content-type'], 'application/json')
self.assertIn('Authorization', response.headers)
def test_json_request(self):
client = duo_client.client.Client(
'test_ikey', 'test_akey', 'example.com', sig_timezone='America/Detroit',
sig_version=3)
client._connect = lambda: util.MockHTTPConnection()
(response, dummy) = client.api_call(
'POST', '/foo/bar', JSON_BODY)
self.assertEqual(response.method, 'POST')
self.assertEqual(response.uri, '/foo/bar')
self.assertEqual(response.body, JSON_STRING)
self.assertIn('Content-type', response.headers)
self.assertEqual(response.headers['Content-type'], 'application/json')
self.assertIn('Authorization', response.headers)
class TestParseJsonResponse(unittest.TestCase):
APIResponse = collections.namedtuple('APIResponse', 'status reason')
def setUp(self):
self.client = duo_client.client.Client(
'test_ikey', 'test_akey', 'example.com', sig_timezone='America/Detroit',
sig_version=2)
def test_good_response(self):
api_res = self.APIResponse(200, '')
expected_data = {
'foo': 'bar'
}
res_body = {
'response': expected_data,
'stat': 'OK'
}
data = self.client.parse_json_response(api_res, json.dumps(res_body))
self.assertEqual(data, expected_data)
def test_response_contains_invalid_json(self):
api_res = self.APIResponse(200, 'Fake reason')
response = 'Bad JSON'
with self.assertRaises(RuntimeError) as e:
self.client.parse_json_response(api_res, response)
self.assertEqual(e.exception.status, api_res.status)
self.assertEqual(e.exception.reason, api_res.reason)
self.assertEqual(e.exception.data, response)
def test_response_stat_isnot_OK(self):
api_res = self.APIResponse(200, 'Fake reason')
res_body = {
'response': {
'foo': 'bar'
},
'stat': 'FAIL'
}
with self.assertRaises(RuntimeError) as e:
self.client.parse_json_response(api_res, json.dumps(res_body))
self.assertEqual(e.exception.status, api_res.status)
self.assertEqual(e.exception.reason, api_res.reason)
self.assertEqual(e.exception.data, res_body)
def test_response_is_http_error(self):
for code in range(201, 600):
api_res = self.APIResponse(code, 'fake reason')
res_body = {
'response': 'some message',
'stat': 'OK'
}
with self.assertRaises(RuntimeError) as e:
self.client.parse_json_response(api_res, json.dumps(res_body))
self.assertEqual(e.exception.status, api_res.status)
self.assertEqual(e.exception.reason, api_res.reason)
self.assertEqual(e.exception.data, res_body)
class TestParseJsonResponseAndMetadata(unittest.TestCase):
APIResponse = collections.namedtuple('APIResponse', 'status reason')
def setUp(self):
self.client = duo_client.client.Client(
'test_ikey', 'test_akey', 'example.com', sig_timezone='America/Detroit',
sig_version=2)
def test_good_response(self):
api_res = self.APIResponse(200, '')
expected_data = {
'foo': 'bar'
}
res_body = {
'response': expected_data,
'stat': 'OK'
}
data, metadata = self.client.parse_json_response_and_metadata(api_res, json.dumps(res_body))
self.assertEqual(data, expected_data)
self.assertEqual(metadata, {})
def test_response_contains_invalid_json(self):
api_res = self.APIResponse(200, 'Fake reason')
response = 'Bad JSON'
with self.assertRaises(RuntimeError) as e:
self.client.parse_json_response_and_metadata(api_res, response)
self.assertEqual(e.exception.status, api_res.status)
self.assertEqual(e.exception.reason, api_res.reason)
self.assertEqual(e.exception.data, response)
def test_response_stat_isnot_OK(self):
api_res = self.APIResponse(200, 'Fake reason')
res_body = {
'response': {
'foo': 'bar'
},
'stat': 'FAIL'
}
with self.assertRaises(RuntimeError) as e:
self.client.parse_json_response_and_metadata(api_res, json.dumps(res_body))
self.assertEqual(e.exception.status, api_res.status)
self.assertEqual(e.exception.reason, api_res.reason)
self.assertEqual(e.exception.data, res_body)
def test_response_is_http_error(self):
for code in range(201, 600):
api_res = self.APIResponse(code, 'fake reason')
res_body = {
'response': 'some message',
'stat': 'OK'
}
with self.assertRaises(RuntimeError) as e:
self.client.parse_json_response_and_metadata(api_res, json.dumps(res_body))
self.assertEqual(e.exception.status, api_res.status)
self.assertEqual(e.exception.reason, api_res.reason)
self.assertEqual(e.exception.data, res_body)
@mock.patch('duo_client.client.sleep')
class TestRetryRequests(unittest.TestCase):
def setUp(self):
self.client = duo_client.client.Client(
'test_ikey', 'test_akey', 'example.com',
)
def test_non_limited_reponse(self, mock_sleep):
# monkeypatch client's _connect()
mock_connection = util.MockMultipleRequestHTTPConnection(
[200])
self.client._connect = lambda: mock_connection
(response, dummy) = self.client.api_call('GET', '/foo/bar', {})
mock_sleep.assert_not_called()
self.assertEqual(response.status, 200)
self.assertEqual(mock_connection.requests, 1)
@mock.patch('duo_client.client.random')
def test_single_limited_response(self, mock_random, mock_sleep):
mock_random.uniform.return_value = 0.123
# monkeypatch client's _connect()
mock_connection = util.MockMultipleRequestHTTPConnection(
[429, 200])
self.client._connect = lambda: mock_connection
(response, dummy) = self.client.api_call('GET', '/foo/bar', {})
mock_sleep.assert_called_once_with(1.123)
mock_random.uniform.assert_called_once()
self.assertEqual(response.status, 200)
self.assertEqual(mock_connection.requests, 2)
@mock.patch('duo_client.client.random')
def test_all_limited_responses(self, mock_random, mock_sleep):
mock_random.uniform.return_value = 0.123
# monkeypatch client's _connect()
mock_connection = util.MockMultipleRequestHTTPConnection(
[429, 429, 429, 429, 429, 429, 429])
self.client._connect = lambda: mock_connection
(response, data) = self.client.api_call('GET', '/foo/bar', {})
expected_sleep_calls = (
mock.call(1.123),
mock.call(2.123),
mock.call(4.123),
mock.call(8.123),
mock.call(16.123),
mock.call(32.123),
)
mock_sleep.assert_has_calls(expected_sleep_calls)
expected_random_calls = (
mock.call(0, 1),
mock.call(0, 1),
mock.call(0, 1),
mock.call(0, 1),
mock.call(0, 1),
mock.call(0, 1),
)
mock_random.uniform.assert_has_calls(expected_random_calls)
self.assertEqual(response.status, 429)
self.assertEqual(mock_connection.requests, 7)
if __name__ == '__main__':
unittest.main()
```
|
{
"source": "jeffreyparker/duo_universal_python",
"score": 3
}
|
#### File: duo_universal_python/tests/test_create_jwt_args.py
```python
from mock import MagicMock, patch
from duo_universal import client
import unittest
CLIENT_ID = "DIXXXXXXXXXXXXXXXXXX"
CLIENT_SECRET = "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef"
HOST = "api-XXXXXXX.test.duosecurity.com"
REDIRECT_URI = "https://www.example.com"
ERROR_TIMEOUT = "Connection to api-xxxxxxx.test.duosecurity.com timed out."
ERROR_NETWORK_CONNECTION_FAILED = "Failed to establish a new connection"
EXPIRATION_TIME = 10 + client.FIVE_MINUTES_IN_SECONDS
RAND_ALPHANUMERIC_STR = "deadbeef"
SUCCESS_JWT_ARGS = {
'iss': CLIENT_ID,
'sub': CLIENT_ID,
'aud': client.OAUTH_V1_TOKEN_ENDPOINT,
'exp': EXPIRATION_TIME,
'jti': RAND_ALPHANUMERIC_STR
}
class TestCreateJwtArgs(unittest.TestCase):
def setUp(self):
self.client = client.Client(CLIENT_ID, CLIENT_SECRET, HOST, REDIRECT_URI)
@patch("time.time", MagicMock(return_value=10))
def test_create_jwt_args_success(self):
"""
Test that _create_jwt_args creates proper jwt arguments
"""
self.client._generate_rand_alphanumeric = MagicMock(return_value=RAND_ALPHANUMERIC_STR)
actual_jwt_args = self.client._create_jwt_args(client.OAUTH_V1_TOKEN_ENDPOINT)
self.assertEqual(SUCCESS_JWT_ARGS, actual_jwt_args)
if __name__ == '__main__':
unittest.main()
```
|
{
"source": "jeffreyparker/duo_unix",
"score": 2
}
|
#### File: duo_unix/tests/mockduo.py
```python
import BaseHTTPServer
import cgi
import bson
try:
from hashlib import sha1
except ImportError:
import sha as sha1
import hmac
import os
import ssl
import sys
import time
import urllib
import socket
IKEY = '<KEY>'
SKEY = '<KEY>'
# Used to check if the FQDN is set to either the ipv4 or ipv6 address
IPV6_LOOPBACK_ADDR = '1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.ip6.arpa'
IPV4_LOOPBACK_ADDR = '172.16.58.3.in-addr.arpa'
tx_msgs = {
'txPUSH1': [ '0:Pushed a login request to your phone.',
'1:Success. Logging you in...' ],
'txVOICE1': [ '0:Dialing XXX-XXX-1234...',
"1:Answered. Press '#' on your phone to log in.",
'1:Success. Logging you in...' ],
'txSMSREFRESH1': [ '0:New SMS passcodes sent' ],
'txVOICE2': [ '0:Dialing XXX-XXX-5678...',
"1:Answered. Press '#' on your phone to log in.",
'2:Authentication timed out.' ],
}
class MockDuoHandler(BaseHTTPServer.BaseHTTPRequestHandler):
server_version = 'MockDuo/1.0'
protocol_version = 'HTTP/1.1'
def _verify_sig(self):
authz = self.headers['Authorization'].split()[1].decode('base64')
ikey, sig = authz.split(':')
if ikey != IKEY:
return False
canon = [ self.method,
self.headers['Host'].split(':')[0].lower(),
self.path ]
l = []
for k in sorted(self.args.keys()):
l.append('%s=%s' % (urllib.quote(k, '~'),
urllib.quote(self.args[k], '~')))
canon.append('&'.join(l))
h = hmac.new(SKEY, '\n'.join(canon), sha1)
return sig == h.hexdigest()
def _get_args(self):
if self.method == 'POST':
env = { 'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': self.headers['Content-Type'] }
fs = cgi.FieldStorage(fp=self.rfile, headers=self.headers,
environ=env)
args = {}
for k in fs.keys():
args[k] = fs[k].value
else:
args = dict(cgi.parse_qsl(self.qs))
print 'got %s %s args: %s' % (self.method, self.path, args)
return args
def _get_tx_response(self, txid, async):
last = True
if txid not in tx_msgs:
secs, msg = 0, 'Invalid passcode, please try again.'
elif async:
secs, msg = tx_msgs[txid].pop(0).split(':', 1)
last = not tx_msgs[txid]
else:
secs, msg = tx_msgs[txid][-1].split(':', 1)
if msg.startswith('Success'):
rsp = { 'result': 'allow', 'status': msg }
elif async and not last:
rsp = { 'status': msg }
else:
rsp = { 'result': 'deny', 'status': msg }
time.sleep(int(secs))
return rsp
def _send(self, code, buf=''):
self.send_response(code)
self.send_header("Content-length", str(len(buf)))
if buf:
self.send_header("Content-type", "application/bson")
self.end_headers()
self.wfile.write(buf)
else:
self.end_headers()
def do_GET(self):
self.method = 'GET'
self.path, self.qs = self.path.split('?', 1)
self.args = self._get_args()
if not self._verify_sig():
return self._send(401)
ret = { 'stat': 'OK' }
if self.path == '/rest/v1/status.bson':
ret['response'] = self._get_tx_response(self.args['txid'], 1)
buf = bson.dumps(ret)
return self._send(200, buf)
self._send(404)
def hostname_check(self, hostname):
domain_fqdn = socket.getfqdn().lower()
if hostname == domain_fqdn.lower() or hostname == socket.gethostname().lower():
return True
#Check if socket.getfqdn() is the loopback address for ipv4 or ipv6 then check the hostname of the machine
if domain_fqdn == IPV6_LOOPBACK_ADDR or domain_fqdn == IPV4_LOOPBACK_ADDR:
if hostname == socket.gethostbyaddr(socket.gethostname())[0].lower():
return True
return False
def do_POST(self):
self.method = 'POST'
self.args = self._get_args()
if not self._verify_sig():
return self._send(401)
try:
return self._send(int(self.args['user']))
except:
ret = { 'stat': 'OK' }
if self.path == '/rest/v1/preauth.bson':
if self.args['user'] == 'preauth-ok-missing_response':
pass
elif self.args['user'] == 'preauth-fail-missing_response':
ret['stat'] = 'FAIL'
elif self.args['user'] == 'preauth-bad-stat':
ret['stat'] = 'BAD_STATUS'
elif self.args['user'] == 'preauth-fail':
ret = { 'stat': 'FAIL', 'code': 1000, 'message': 'Pre-authentication failed' }
elif self.args['user'] == 'preauth-deny':
ret['response'] = { 'result': 'deny', 'status': 'preauth-denied' }
elif self.args['user'] == 'preauth-allow':
ret['response'] = { 'result': 'allow', 'status': 'preauth-allowed' }
elif self.args['user'] == 'preauth-allow-bad_response':
ret['response'] = { 'result': 'allow', 'xxx': 'preauth-allowed-bad-response' }
elif (self.args['user'] == 'hostname'):
if (self.hostname_check(self.args['hostname'].lower())):
ret['response'] = { 'result': 'deny', 'status': 'correct hostname' }
else:
response = "hostname recieved: " + self.args['hostname'] + " found: " + socket.getfqdn()
ret['response'] = { 'result': 'deny', 'status': response }
elif self.args['user'] == 'failopen':
if self.args['failmode'] == 'open':
ret['response'] = { 'result': 'deny', 'status': 'correct failmode' }
else:
ret['response'] = { 'result': 'deny', 'status': 'incorrect failmode' }
elif self.args['user'] == 'failclosed':
if self.args['failmode'] == 'closed':
ret['response'] = { 'result': 'deny', 'status': 'correct failmode' }
else:
ret['response'] = { 'result': 'deny', 'status': 'incorrect failmode' }
elif self.args['user'] == 'gecos_user_gecos_field6':
ret['response'] = { 'result': 'allow', 'status': 'gecos-user-gecos-field6-allowed' }
elif self.args['user'] == 'gecos_user_gecos_field3':
ret['response'] = { 'result': 'allow', 'status': 'gecos-user-gecos-field3-allowed' }
elif self.args['user'] == 'full_gecos_field':
ret['response'] = { 'result': 'allow', 'status': 'full-gecos-field' }
elif self.args['user'] == 'gecos/6':
ret['response'] = { 'result': 'allow', 'status': 'gecos/6' }
else:
ret['response'] = {
'result': 'auth',
'prompt': 'Duo login for %s\n\n' % self.args['user'] + \
'Choose or lose:\n\n' + \
' 1. Push 1\n 2. Phone 1\n' + \
' 3. SMS 1 (deny)\n 4. Phone 2 (deny)\n\n' + \
'Passcode or option (1-4): ',
'factors': {
'default': 'push1',
'1': 'push1',
'2': 'voice1',
'3': 'smsrefresh1',
'4': 'voice2',
}
}
elif self.path == '/rest/v1/auth.bson':
if self.args['factor'] == 'auto':
txid = 'tx' + self.args['auto'].upper()
if self.args['async'] == '1':
ret['response'] = { 'txid': txid }
else:
ret['response'] = self._get_tx_response(txid, 0)
else:
ret['response'] = { 'result': 'deny',
'status': 'no %s' % self.args['factor'] }
if (self.args['user'] == 'auth_timeout'):
return self._send(500)
else:
return self._send(404)
buf = bson.dumps(ret)
return self._send(200, buf)
def main():
port = 4443
host = 'localhost'
if len(sys.argv) == 1:
cafile = os.path.realpath('%s/certs/mockduo.pem' %
os.path.dirname(__file__))
elif len(sys.argv) == 2:
cafile = sys.argv[1]
else:
print >>sys.stderr, 'Usage: %s [certfile]\n' % sys.argv[0]
sys.exit(1)
httpd = BaseHTTPServer.HTTPServer((host, port), MockDuoHandler)
httpd.socket = ssl.wrap_socket(
httpd.socket,
certfile=cafile,
server_side=True
)
httpd.serve_forever()
if __name__ == '__main__':
main()
```
|
{
"source": "jeffreypaul15/sunkit-image",
"score": 3
}
|
#### File: sunkit_image/utils/noise.py
```python
import numpy as np
from scipy.ndimage import correlate
from scipy.stats import gamma
from skimage.util import view_as_windows
__all__ = ["noise_estimation", "noiselevel", "conv2d_matrix", "weak_texture_mask"]
def noise_estimation(img, patchsize=7, decim=0, confidence=1 - 1e-6, iterations=3):
"""
Estimates the noise level of an image.
Additive white Gaussian noise (AWGN) is a basic noise model used in Information Theory
to mimic the effect of many random processes that occur in nature.
Parameters
----------
img: `numpy.ndarray`
Single Numpy image array.
patchsize : `int`, optional
Patch size, defaults to 7.
decim : `int`, optional
Decimation factor, defaults to 0.
If you use large number, the calculation will be accelerated.
confidence : `float`, optional
Confidence interval to determine the threshold for the weak texture.
In this algorithm, this value is usually set the value very close to one.
Defaults to 0.99.
iterations : `int`, optional
Number of iterations, defaults to 3.
Returns
-------
`dict`
A dictionary containing the estimated noise levels, `nlevel`; threshold to extract weak texture
patches at the last iteration, `thresh`; number of extracted weak texture patches `num` and the
weak texture mask, `mask`.
Examples
--------
>>> import numpy as np
>>> np.random.seed(0)
>>> noisy_image_array = np.random.randn(100, 100)
>>> estimate = noise_estimation(noisy_image_array, patchsize=11, iterations=10)
>>> estimate['mask'] # Prints mask
array([[1., 1., 1., ..., 1., 1., 0.],
[1., 1., 1., ..., 1., 1., 0.],
[1., 1., 1., ..., 1., 1., 0.],
...,
[1., 1., 1., ..., 1., 1., 0.],
[1., 1., 1., ..., 1., 1., 0.],
[0., 0., 0., ..., 0., 0., 0.]])
>>> estimate['nlevel'] # Prints nlevel
array([1.0014616])
>>> estimate['thresh'] # Prints thresh
array([173.61530607])
>>> estimate['num'] # Prints num
array([8100.])
References
----------
* <NAME>, <NAME> and <NAME>
Noise Level Estimation Using Weak Textured Patches of a Single Noisy Image
IEEE International Conference on Image Processing (ICIP), 2012.
DOI: 10.1109/ICIP.2012.6466947
* <NAME>, <NAME> and <NAME>
Single-Image Noise Level Estimation for Blind Denoising Noisy Image
IEEE Transactions on Image Processing, Vol.22, No.12, pp.5226-5237, December, 2013.
DOI: 10.1109/TIP.2013.2283400
"""
try:
img = np.array(img)
except:
raise TypeError("Input image should be a NumPy ndarray")
try:
patchsize = int(patchsize)
except ValueError:
raise TypeError("patchsize must be an integer, or int-compatible, variable")
try:
decim = int(decim)
except ValueError:
raise TypeError("decim must be an integer, or int-compatible, variable")
try:
confidence = float(confidence)
except ValueError:
raise TypeError("confidence must be a float, or float-compatible, value between 0 and 1")
if not (confidence >= 0 and confidence <= 1):
raise ValueError("confidence must be defined in the interval 0 <= confidence <= 1")
try:
iterations = int(iterations)
except ValueError:
raise TypeError("iterations must be an integer, or int-compatible, variable")
output = {}
nlevel, thresh, num = noiselevel(img, patchsize, decim, confidence, iterations)
mask = weak_texture_mask(img, patchsize, thresh)
output["nlevel"] = nlevel
output["thresh"] = thresh
output["num"] = num
output["mask"] = mask
return output
def noiselevel(img, patchsize, decim, confidence, iterations):
"""
Calculates the noise level of the input array.
Parameters
----------
img: `numpy.ndarray`
Single Numpy image array.
patchsize : `int`, optional
Patch size, defaults to 7.
decim : `int`, optional
Decimation factor, defaults to 0.
If you use large number, the calculation will be accelerated.
confidence : `float`, optional
Confidence interval to determine the threshold for the weak texture.
In this algorithm, this value is usually set the value very close to one.
Defaults to 0.99.
iterations : `int`, optional
Number of iterations, defaults to 3.
Returns
-------
`tuple`
A tuple containing the estimated noise levels, threshold to extract weak texture
patches at the last iteration, and number of extracted weak texture patches.
"""
if len(img.shape) < 3:
img = np.expand_dims(img, 2)
nlevel = np.ndarray(img.shape[2])
thresh = np.ndarray(img.shape[2])
num = np.ndarray(img.shape[2])
kh = np.expand_dims(np.expand_dims(np.array([-0.5, 0, 0.5]), 0), 2)
imgh = correlate(img, kh, mode="nearest")
imgh = imgh[:, 1 : imgh.shape[1] - 1, :]
imgh = imgh * imgh
kv = np.expand_dims(np.vstack(np.array([-0.5, 0, 0.5])), 2)
imgv = correlate(img, kv, mode="nearest")
imgv = imgv[1 : imgv.shape[0] - 1, :, :]
imgv = imgv * imgv
Dh = conv2d_matrix(np.squeeze(kh, 2), patchsize, patchsize)
Dv = conv2d_matrix(np.squeeze(kv, 2), patchsize, patchsize)
DD = np.transpose(Dh) @ Dh + np.transpose(Dv) @ Dv
r = np.double(np.linalg.matrix_rank(DD))
Dtr = np.trace(DD)
tau0 = gamma.ppf(confidence, r / 2, scale=(2 * Dtr / r))
for cha in range(img.shape[2]):
X = view_as_windows(img[:, :, cha], (patchsize, patchsize))
X = X.reshape(int(X.size / patchsize ** 2), patchsize ** 2, order="F").transpose()
Xh = view_as_windows(imgh[:, :, cha], (patchsize, patchsize - 2))
Xh = Xh.reshape(
int(Xh.size / ((patchsize - 2) * patchsize)), ((patchsize - 2) * patchsize), order="F"
).transpose()
Xv = view_as_windows(imgv[:, :, cha], (patchsize - 2, patchsize))
Xv = Xv.reshape(
int(Xv.size / ((patchsize - 2) * patchsize)), ((patchsize - 2) * patchsize), order="F"
).transpose()
Xtr = np.expand_dims(np.sum(np.concatenate((Xh, Xv), axis=0), axis=0), 0)
if decim > 0:
XtrX = np.transpose(np.concatenate((Xtr, X), axis=0))
XtrX = np.transpose(
XtrX[
XtrX[:, 0].argsort(),
]
)
p = np.floor(XtrX.shape[1] / (decim + 1))
p = np.expand_dims(np.arange(0, p) * (decim + 1), 0)
Xtr = XtrX[0, p.astype("int")]
X = np.squeeze(XtrX[1 : XtrX.shape[1], p.astype("int")])
# noise level estimation
tau = np.inf
if X.shape[1] < X.shape[0]:
sig2 = 0
else:
cov = (X @ np.transpose(X)) / (X.shape[1] - 1)
d = np.flip(np.linalg.eig(cov)[0], axis=0)
sig2 = d[0]
for _ in range(1, iterations):
# weak texture selection
tau = sig2 * tau0
p = Xtr < tau
Xtr = Xtr[p]
X = X[:, np.squeeze(p)]
# noise level estimation
if X.shape[1] < X.shape[0]:
break
cov = (X @ np.transpose(X)) / (X.shape[1] - 1)
d = np.flip(np.linalg.eig(cov)[0], axis=0)
sig2 = d[0]
nlevel[cha] = np.sqrt(sig2)
thresh[cha] = tau
num[cha] = X.shape[1]
# clean up
img = np.squeeze(img)
return nlevel, thresh, num
def conv2d_matrix(H, rows, columns):
"""
Specialized 2D convolution matrix generation.
Parameters
----------
H : `numpy.ndarray`
Input matrix.
rows : `numpy.ndarray`
Rows in convolution matrix.
columns : `numpy.ndarray`
Columns in convolution matrix.
Returns
-------
T : `numpy.ndarray`
The new convoluted matrix.
"""
s = np.shape(H)
rows = int(rows)
columns = int(columns)
matr_row = rows - s[0] + 1
matr_column = columns - s[1] + 1
T = np.zeros([matr_row * matr_column, rows * columns])
k = 0
for i in range(matr_row):
for j in range(matr_column):
for p in range(s[0]):
start = (i + p) * columns + j
T[k, start : start + s[1]] = H[p, :]
k += 1
return T
def weak_texture_mask(img, patchsize, thresh):
"""
Calculates the weak texture mask.
Parameters
----------
img: `numpy.ndarray`
Single Numpy image array.
patchsize : `int`, optional
Patch size, defaults to 7.
thresh: `numpy.ndarray`
Threshold to extract weak texture patches at the last iteration.
Returns
-------
mask: `numpy.ndarray`
Weak-texture mask. 0 and 1 represent non-weak-texture and weak-texture regions, respectively.
"""
if img.ndim < 3:
img = np.expand_dims(img, 2)
kh = np.expand_dims(np.transpose(np.vstack(np.array([-0.5, 0, 0.5]))), 2)
imgh = correlate(img, kh, mode="nearest")
imgh = imgh[:, 1 : imgh.shape[1] - 1, :]
imgh = imgh * imgh
kv = np.expand_dims(np.vstack(np.array([-0.5, 0, 0.5])), 1)
imgv = correlate(img, kv, mode="nearest")
imgv = imgv[1 : imgv.shape[0] - 1, :, :]
imgv = imgv * imgv
s = img.shape
msk = np.zeros_like(img)
for cha in range(s[2]):
m = view_as_windows(img[:, :, cha], (patchsize, patchsize))
m = np.zeros_like(m.reshape(int(m.size / patchsize ** 2), patchsize ** 2, order="F").transpose())
Xh = view_as_windows(imgh[:, :, cha], (patchsize, patchsize - 2))
Xh = Xh.reshape(
int(Xh.size / ((patchsize - 2) * patchsize)), ((patchsize - 2) * patchsize), order="F"
).transpose()
Xv = view_as_windows(imgv[:, :, cha], (patchsize - 2, patchsize))
Xv = Xv.reshape(
int(Xv.size / ((patchsize - 2) * patchsize)), ((patchsize - 2) * patchsize), order="F"
).transpose()
Xtr = np.expand_dims(np.sum(np.concatenate((Xh, Xv), axis=0), axis=0), 0)
p = Xtr < thresh[cha]
ind = 0
for col in range(0, s[1] - patchsize + 1):
for row in range(0, s[0] - patchsize + 1):
if p[:, ind]:
msk[row : row + patchsize - 1, col : col + patchsize - 1, cha] = 1
ind = ind + 1
# clean up
img = np.squeeze(img)
return np.squeeze(msk)
```
|
{
"source": "jeffreypaul15/sunpy",
"score": 2
}
|
#### File: net/vso/vso.py
```python
import os
import cgi
import copy
import json
import socket
import inspect
import datetime
import itertools
from pathlib import Path
from functools import partial
from urllib.error import URLError, HTTPError
from urllib.parse import urlencode
from urllib.request import Request, urlopen
import zeep
from sunpy import config, log
from sunpy.net.attr import and_
from sunpy.net.base_client import BaseClient, QueryResponseRow
from sunpy.net.vso import attrs
from sunpy.net.vso.attrs import _walker as walker
from sunpy.util.exceptions import warn_user
from sunpy.util.net import slugify
from sunpy.util.parfive_helpers import Downloader, Results
from .. import _attrs as core_attrs
from .exceptions import (
DownloadFailed,
MissingInformation,
MultipleChoices,
NoData,
UnknownMethod,
UnknownStatus,
UnknownVersion,
)
from .legacy_response import QueryResponse
from .table_response import VSOQueryResponseTable
from .zeep_plugins import SunPyLoggingZeepPlugin
DEFAULT_URL_PORT = [{'url': 'http://docs.virtualsolar.org/WSDL/VSOi_rpc_literal.wsdl',
'port': 'nsoVSOi'},
{'url': 'https://sdac.virtualsolar.org/API/VSOi_rpc_literal.wsdl',
'port': 'sdacVSOi'}]
class _Str(str):
""" Subclass of string that contains a meta attribute for the
record_item associated with the file. """
meta = None
# ----------------------------------------
def check_connection(url):
try:
return urlopen(url).getcode() == 200
except (socket.error, socket.timeout, HTTPError, URLError) as e:
warn_user(f"Connection to {url} failed with error {e}. Retrying with different url and port.")
return None
def get_online_vso_url():
"""
Return the first VSO url and port combination that is online.
"""
for mirror in DEFAULT_URL_PORT:
if check_connection(mirror['url']):
return mirror
def build_client(url=None, port_name=None, **kwargs):
"""
Construct a `zeep.Client` object to connect to VSO.
Parameters
----------
url : `str`
The URL to connect to.
port_name : `str`
The "port" to use.
kwargs : `dict`
All extra keyword arguments are passed to `zeep.Client`.
Returns
-------
`zeep.Client`
"""
if url is None and port_name is None:
mirror = get_online_vso_url()
if mirror is None:
raise ConnectionError("No online VSO mirrors could be found.")
url = mirror['url']
port_name = mirror['port']
elif url and port_name:
if not check_connection(url):
raise ConnectionError(f"Can't connect to url {url}")
else:
raise ValueError("Both url and port_name must be specified if either is.")
if "plugins" not in kwargs:
kwargs["plugins"] = [SunPyLoggingZeepPlugin()]
client = zeep.Client(url, port_name=port_name, **kwargs)
client.set_ns_prefix('VSO', 'http://virtualsolar.org/VSO/VSOi')
return client
class VSOClient(BaseClient):
"""
Provides access to query and download from Virtual Solar Observatory (VSO).
Parameters
----------
url : `str`, optional
The VSO url to use. If not specified will use the first online known URL.
port : `str`, optional
The VSO port name to use. If not specified will use the first online known URL.
api : `zeep.Client`, optional
The `zeep.Client` instance to use for interacting with the VSO. If not
specified one will be created.
"""
method_order = [
'URL-FILE_Rice', 'URL-FILE', 'URL-packaged', 'URL-TAR_GZ', 'URL-ZIP', 'URL-TAR',
]
def __init__(self, url=None, port=None, api=None):
if not isinstance(api, zeep.Client):
api = build_client(url, port)
if api is None:
raise ConnectionError("Cannot find an online VSO mirror.")
self.api = api
def __deepcopy__(self, memo):
"""
Copy the client but don't copy the API object.
"""
memo[id(self.api)] = self.api
deepcopy_method = self.__deepcopy__
self.__deepcopy__ = None
cp = copy.deepcopy(self, memo)
self.__deepcopy__ = deepcopy_method
cp.__deepcopy__ = deepcopy_method
return cp
def make(self, atype, **kwargs):
"""
Create a new SOAP object.
"""
obj = self.api.get_type(f"VSO:{atype}")
return obj(**kwargs)
def search(self, *query, response_format=None):
"""
Query data from the VSO with the new API. Takes a variable number
of attributes as parameter, which are chained together using AND.
Parameters
----------
response_format: {``"legacy"``, ``"table"``}, optional
The response format from the search, this can be either
``"legacy"`` to return a list-like object of the zeep responses, or
``"table"`` to return the responses in a subclass of
`~astropy.table.QTable`.
Examples
--------
Query all data from eit or aia between 2010-01-01T00:00 and
2010-01-01T01:00.
>>> from datetime import datetime
>>> from sunpy.net import vso, attrs as a
>>> client = vso.VSOClient() # doctest: +REMOTE_DATA
>>> client.search(
... a.Time(datetime(2010, 1, 1), datetime(2010, 1, 1, 1)),
... a.Instrument.eit | a.Instrument.aia,
... response_format="table") # doctest: +REMOTE_DATA
<sunpy.net.vso.table_response.VSOQueryResponseTable object at ...>
Start Time End Time Source ... Extent Type Size
... Mibyte
----------------------- ----------------------- ------ ... ----------- -------
2010-01-01 00:00:08.000 2010-01-01 00:00:20.000 SOHO ... FULLDISK 2.01074
2010-01-01 00:12:08.000 2010-01-01 00:12:20.000 SOHO ... FULLDISK 2.01074
2010-01-01 00:24:10.000 2010-01-01 00:24:22.000 SOHO ... FULLDISK 2.01074
2010-01-01 00:36:08.000 2010-01-01 00:36:20.000 SOHO ... FULLDISK 2.01074
2010-01-01 00:48:09.000 2010-01-01 00:48:21.000 SOHO ... FULLDISK 2.01074
Returns
-------
out : `~sunpy.net.vso.table_response.VSOQueryResponseTable`
Matched items. Return value is of same type as the one of
:meth:`VSOClient.search`.
"""
if response_format is None:
response_format = "table"
query = and_(*query)
QueryRequest = self.api.get_type('VSO:QueryRequest')
VSOQueryResponse = self.api.get_type('VSO:QueryResponse')
responses = []
exceptions = []
for block in walker.create(query, self.api):
try:
query_response = self.api.service.Query(
QueryRequest(block=block)
)
for resp in query_response:
if resp["error"]:
warn_user(resp["error"])
responses.append(
VSOQueryResponse(query_response)
)
except Exception as ex:
exceptions.append(ex)
responses = self.merge(responses)
if response_format == "legacy":
response = QueryResponse.create(responses)
else:
response = VSOQueryResponseTable.from_zeep_response(responses, client=self)
for ex in exceptions:
response.add_error(ex)
return response
def merge(self, queryresponses):
""" Merge responses into one. """
if len(queryresponses) == 1:
return queryresponses[0]
fileids = set()
providers = {}
for queryresponse in queryresponses:
for provideritem in queryresponse.provideritem:
provider = provideritem.provider
if not hasattr(provideritem, 'record'):
continue
if not hasattr(provideritem.record, 'recorditem'):
continue
if provideritem.provider not in providers:
providers[provider] = provideritem
fileids |= {
record_item.fileid
for record_item in provideritem.record.recorditem
}
else:
for record_item in provideritem.record.recorditem:
if record_item.fileid not in fileids:
fileids.add(record_item.fileid)
providers[provider].record.recorditem.append(
record_item
)
providers[provider].no_of_records_found += 1
providers[provider].no_of_records_returned += 1
return self.make('QueryResponse',
provideritem=list(providers.values()))
@staticmethod
def mk_filename(pattern, queryresponserow, resp, url):
"""
Generate the best possible (or least-worse) filename for a VSO download.
* Use the ``content-disposition`` header.
* Use ``fileid`` to generate a file name if content-disposition fails
* If everything else fails use the last segment of the URL and hope.
"""
name = None
if resp:
cdheader = resp.headers.get("Content-Disposition", None)
if cdheader:
_, params = cgi.parse_header(cdheader)
name = params.get('filename', "")
# Work around https://github.com/sunpy/sunpy/issues/3372
if name.count('"') >= 2:
name = name.split('"')[1]
if name is None:
# Advice from the VSO is to fallback to providerid + fileid for a filename
# As it's possible multiple providers give the same fileid.
# However, I haven't implemented this yet as it would be a breaking
# change to the filenames we expect.
fileid = queryresponserow['fileid']
# Some providers make fileid a path
# Some also don't specify a file extension, but not a lot we can do
# about that.
name = fileid.split("/")[-1]
# If somehow we have got this far with an empty string, fallback to url segment
if not name:
name = url.split('/')[-1]
# Remove any not-filename appropriate characters
name = slugify(name)
# If absolutely everything else fails make a filename based on download time
if not name:
name = f"vso_file_{datetime.datetime.now().strftime('%Y%m%d%H%M%S%f')}"
fname = pattern.format(file=name,
**queryresponserow.response_block_map)
return fname
def fetch(self, query_response, path=None, methods=None, site=None,
progress=True, overwrite=False, downloader=None, wait=True):
"""
Download data specified in the query_response.
Parameters
----------
query_response : sunpy.net.vso.VSOQueryResponseTable
QueryResponse containing the items to be downloaded.
path : str
Specify where the data is to be downloaded. Can refer to arbitrary
fields of the QueryResponseItem (instrument, source, time, ...) via
string formatting, moreover the file-name of the file downloaded can
be referred to as file, e.g.
"{source}/{instrument}/{time.start}/{file}".
methods : `list` of `str`
Download methods, defaults to URL-FILE_Rice then URL-FILE.
Methods are a concatenation of one PREFIX followed by any number of
SUFFIXES i.e. ``PREFIX-SUFFIX_SUFFIX2_SUFFIX3``.
The full list of
`PREFIXES <https://sdac.virtualsolar.org/cgi/show_details?keyword=METHOD_PREFIX>`_
and `SUFFIXES <https://sdac.virtualsolar.org/cgi/show_details?keyword=METHOD_SUFFIX>`_
are listed on the VSO site.
site : str
There are a number of caching mirrors for SDO and other
instruments, some available ones are listed below.
=============== ========================================================
NSO National Solar Observatory, Tucson (US)
SAO (aka CFA) Smithonian Astronomical Observatory, Harvard U. (US)
SDAC (aka GSFC) Solar Data Analysis Center, NASA/GSFC (US)
ROB Royal Observatory of Belgium (Belgium)
MPS Max Planck Institute for Solar System Research (Germany)
UCLan University of Central Lancashire (UK)
IAS Institut Aeronautique et Spatial (France)
KIS Kiepenheuer-Institut fur Sonnenphysik Germany)
NMSU New Mexico State University (US)
=============== ========================================================
progress : `bool`, optional
If `True` show a progress bar showing how many of the total files
have been downloaded. If `False`, no progress bars will be shown at all.
overwrite : `bool` or `str`, optional
Determine how to handle downloading if a file already exists with the
same name. If `False` the file download will be skipped and the path
returned to the existing file, if `True` the file will be downloaded
and the existing file will be overwritten, if ``'unique'`` the filename
will be modified to be unique.
downloader : `parfive.Downloader`, optional
The download manager to use.
wait : `bool`, optional
If `False` ``downloader.download()`` will not be called. Only has
any effect if ``downloader`` is not `None`.
Returns
-------
out : `parfive.Results`
Object that supplies a list of filenames and any errors.
Examples
--------
>>> files = fetch(qr) # doctest:+SKIP
"""
if path is None:
path = Path(config.get('downloads', 'download_dir')) / '{file}'
elif isinstance(path, (str, os.PathLike)) and '{file}' not in str(path):
path = Path(path) / '{file}'
else:
path = Path(path)
path = path.expanduser()
dl_set = True
if not downloader:
dl_set = False
downloader = Downloader(progress=progress, overwrite=overwrite)
if isinstance(query_response, (QueryResponse, list)):
query_response = VSOQueryResponseTable.from_zeep_response(query_response,
client=self,
_sort=False)
if isinstance(query_response, QueryResponseRow):
query_response = query_response.as_table()
if not len(query_response):
return downloader.download() if wait else Results()
# Adding the site parameter to the info
info = {}
if site is not None:
info['site'] = site
VSOGetDataResponse = self.api.get_type("VSO:VSOGetDataResponse")
data_request = self.make_getdatarequest(query_response, methods, info)
data_response = VSOGetDataResponse(self.api.service.GetData(data_request))
err_results = self.download_all(data_response,
methods,
downloader,
str(path),
self.by_fileid(query_response))
if dl_set and not wait:
return err_results
results = downloader.download()
results += err_results
results._errors += err_results.errors
return results
def make_getdatarequest(self, response, methods=None, info=None):
""" Make datarequest with methods from response. """
if methods is None:
methods = self.method_order + ['URL']
return self.create_getdatarequest(
{g[0]['Provider']: list(g['fileid']) for g in response.group_by('Provider').groups},
methods, info
)
def create_getdatarequest(self, maps, methods, info=None):
""" Create datarequest from maps mapping data provider to
fileids and methods, """
if info is None:
info = {}
if 'email' not in info:
info['email'] = 'sunpy'
# For the JSOC provider we need to make a DataRequestItem for each
# series, not just one for the whole provider.
# Remove JSOC provider items from the map
jsoc = maps.pop('JSOC', [])
# Make DRIs for everything that's not JSOC one per provider
dris = [self.make('DataRequestItem', provider=k, fileiditem={'fileid': v})
for k, v in maps.items()]
def series_func(x):
""" Extract the series from the fileid. """
return x.split(':')[0]
# Sort the JSOC fileids by series
# This is a precursor to groupby as recommended by the groupby docs
series_sorted = sorted(jsoc, key=series_func)
# Iterate over the series and make a DRI for each.
# groupby creates an iterator based on a key function, in this case
# based on the series (the part before the first ':')
for series, fileids in itertools.groupby(series_sorted, key=series_func):
dris.append(self.make('DataRequestItem',
provider='JSOC',
fileiditem={'fileid': list(fileids)}))
request = {'method': {'methodtype': methods},
'info': info,
'datacontainer': {'datarequestitem': dris}
}
return self.make('VSOGetDataRequest', request=request)
def download_all(self, response, methods, downloader, path, qr, info=None):
results = Results()
GET_VERSION = [
('0.8', (5, 8)),
('0.7', (1, 4)),
('0.6', (0, 3)),
]
for dresponse in response.getdataresponseitem:
for version, (from_, to) in GET_VERSION:
if getattr(dresponse, version, '0.6') >= version:
break
else:
results.add_error('', UnknownVersion(dresponse))
continue
# If from_ and to are uninitialized, the else block of the loop
# continues the outer loop and thus this code is never reached.
code = (
dresponse.status[from_:to]
if getattr(dresponse, 'status', None) else '200'
)
if code == '200':
for dataitem in dresponse.getdataitem.dataitem:
try:
self.download(
dresponse.method.methodtype[0],
dataitem.url,
downloader,
path,
qr[dataitem.fileiditem.fileid[0]]
)
except NoData:
results.add_error('', '', DownloadFailed(dresponse))
continue
elif code == '300' or code == '412' or code == '405':
if code == '300':
try:
methods = self.multiple_choices(
dresponse.method.methodtype, dresponse
)
except NoData:
results.add_error('', '', MultipleChoices(dresponse))
continue
elif code == '412':
try:
info = self.missing_information(
info, dresponse.info
)
except NoData:
results.add_error('', '', MissingInformation(dresponse))
continue
elif code == '405':
try:
methods = self.unknown_method(dresponse)
except NoData:
results.add_error('', '', UnknownMethod(dresponse))
continue
files = []
for dataitem in dresponse.getdataitem.dataitem:
files.extend(dataitem.fileiditem.fileid)
request = self.create_getdatarequest(
{dresponse.provider: files}, methods, info
)
self.download_all(
self.api.service.GetData(request), methods, downloader, path,
qr, info
)
else:
results.add_error('', '', UnknownStatus(dresponse))
return results
def download(self, method, url, downloader, *args):
""" Enqueue a file to be downloaded, extra args are passed to ``mk_filename``"""
if method.startswith('URL'):
return downloader.enqueue_file(url, filename=partial(self.mk_filename, *args))
raise NoData
@staticmethod
def by_fileid(response):
"""
Returns a dictionary of fileids
corresponding to records in the response.
"""
return {
record['fileid']: record for record in response
}
def multiple_choices(self, choices, response):
""" Override to pick between multiple download choices. """
for elem in self.method_order:
if elem in choices:
return [elem]
raise NoData
def missing_information(self, info, field):
""" Override to provide missing information. """
raise NoData
def unknown_method(self, response):
""" Override to pick a new method if the current one is unknown. """
raise NoData
@classmethod
def _can_handle_query(cls, *query):
required = {core_attrs.Time}
# Get all classes in core_attrs and attrs
optional = {value for (name, value) in inspect.getmembers(core_attrs) if
name in core_attrs.__all__}
optional.update(value for (name, value) in inspect.getmembers(attrs) if
name in attrs.__all__)
return cls.check_attr_types_in_query(query, required, optional)
@classmethod
def _attrs_module(cls):
return 'vso', 'sunpy.net.vso.attrs'
def __del__(self):
"""
Attempt to close the connection, but if it fails, continue.
"""
try:
self.api.transport.session.close()
except Exception as e:
log.debug(f"Failed to close VSO API connection with: {e}")
@classmethod
def register_values(cls):
# We always use the local file for now.
return cls.load_vso_values()
@staticmethod
def load_vso_values():
"""
We take this list and register all the keywords as corresponding Attrs.
Returns
-------
dict
The constructed Attrs dictionary ready to be passed into Attr registry.
"""
from sunpy.net import attrs as a
here = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(here, 'data', 'attrs.json'), 'r') as attrs_file:
keyword_info = json.load(attrs_file)
# Now to traverse the saved dict and give them attr keys.
attrs = {}
for key, value in keyword_info.items():
attr = getattr(a, key.capitalize(), None)
if attr is None:
attr = getattr(a.vso, key.capitalize())
attrs[attr] = value
return attrs
@staticmethod
def create_parse_vso_values():
"""
Makes a network call to the VSO API that returns what keywords they support.
We take this list and register all the keywords as corresponding Attrs.
"""
here = os.path.dirname(os.path.realpath(__file__))
# Keywords we are after
keywords = ["+detector", "+instrument", "+source", "+provider", "+physobs", "+level"]
# Construct and format the request
keyword_info = {}
url = "https://vso1.nascom.nasa.gov/cgi-bin/registry_json.cgi"
headers = {"Content-Type": "application/x-www-form-urlencoded"}
for keyword in keywords:
data = urlencode({'fields': f"['{keyword}']".replace("'", '"')}).encode('ascii')
req = Request(url=url, data=data, headers=headers)
response = urlopen(req)
keyword_info[keyword.replace("+", "")] = json.loads(response.read())
# Now to traverse the return and create attrs out of them.
attrs = {}
for key, value in keyword_info.items():
attrs[key] = []
for item in value:
if item:
if key == "level":
attrs[key].append((str(item[key]), str(item[key])))
else:
attrs[key].append((str(item[key]), str(item[key+"_long"])))
with open(os.path.join(here, 'data', 'attrs.json'), 'w') as attrs_file:
json.dump(attrs, attrs_file, indent=2)
```
|
{
"source": "jeffrey-phillips/nacc_ensemble",
"score": 2
}
|
#### File: jeffrey-phillips/nacc_ensemble/data_prep.py
```python
import pandas as pd
import numpy as np
import datetime
# Read in full dataset. Warning: this is about 340 MB.
fulldf = pd.read_csv('investigator_nacc48.csv')
# List of Uniform Data Set (UDS) values that will serve as potential
# predictors. Those with a "False" next to them will be excluded after data
# preparation; those with a True will be kept.
xvar = pd.read_csv('xvar.csv')
# Variables from the NACC neuropathology table that will be used to group
# individuals by pathology class:
# 1) Alzheimer's disease (AD);
# 2) frontotemporal lobar degeneration due to tauopathy (FTLD-tau)
# 3) frontotemporal lobar degeneration due to TDP-43 (FTLD-TDP)
# 4) Lewy body disease due to alpha synuclein (including Lewy body dementia and Parkinson's disease)
# 5) vascular disease
# Path classes: AD (ABC criteria); FTLD-tau; FTLD-TDP, including ALS; Lewy body disease (are PD patients captured here?); vascular
npvar = pd.DataFrame(np.array(["NPPMIH",0, # Postmortem interval--keep in as a potential confound variable?
"NPFIX",0,
"NPFIXX",0,
"NPWBRWT",0,
"NPWBRF",0,
"NACCBRNN",0,
"NPGRCCA",0,
"NPGRLA",0,
"NPGRHA",0,
"NPGRSNH",0,
"NPGRLCH",0,
"NACCAVAS",0,
"NPTAN",False,
"NPTANX",False,
"NPABAN",False,
"NPABANX",False,
"NPASAN",False,
"NPASANX",False,
"NPTDPAN",False,
"NPTDPANX",False,
"NPHISMB",False,
"NPHISG",False,
"NPHISSS",False,
"NPHIST",False,
"NPHISO",False,
"NPHISOX",False,
"NPTHAL",False,# Use for ABC scoring to create ordinal measure of AD change
"NACCBRAA",False,# Use for ABC scoring to create ordinal measure of AD change
"NACCNEUR",False,# Use for ABC scoring to create ordinal measure of AD change
"NPADNC",False,# Use for ABC scoring to create ordinal measure of AD change
"NACCDIFF",False,
"NACCVASC",False,# Vasc presence/absence
"NACCAMY",False,
"NPLINF",False,
"NPLAC",False,
"NPINF",False,# Derived variable summarizing several assessments of infarcts and lacunes
"NPINF1A",False,
"NPINF1B",False,
"NPINF1D",False,
"NPINF1F",False,
"NPINF2A",False,
"NPINF2B",False,
"NPINF2D",False,
"NPINF2F",False,
"NPINF3A",False,
"NPINF3B",False,
"NPINF3D",False,
"NPINF3F",False,
"NPINF4A",False,
"NPINF4B",False,
"NPINF4D",False,
"NPINF4F",False,
"NACCINF",False,
"NPHEM",False,
"NPHEMO",False,
"NPHEMO1",False,
"NPHEMO2",False,
"NPHEMO3",False,
"NPMICRO",False,
"NPOLD",False,
"NPOLD1",False,
"NPOLD2",False,
"NPOLD3",False,
"NPOLD4",False,
"NACCMICR",False,# Derived variable for microinfarcts
"NPOLDD",False,
"NPOLDD1",False,
"NPOLDD2",False,
"NPOLDD3",False,
"NPOLDD4",False,
"NACCHEM",False,# Derived variables for microbleeds and hemorrhages
"NACCARTE",False,
"NPWMR",False,
"NPPATH",False,# Other ischemic/vascular pathology
"NACCNEC",False,
"NPPATH2",False,
"NPPATH3",False,
"NPPATH4",False,
"NPPATH5",False,
"NPPATH6",False,
"NPPATH7",False,
"NPPATH8",False,
"NPPATH9",False,
"NPPATH10",False,
"NPPATH11",False,
"NPPATHO",False,
"NPPATHOX",False,
"NPART",False,
"NPOANG",False,
"NACCLEWY",False,# Note that limbic/transitional and amygdala-predominant are not differentiated
"NPLBOD",False,# But here they are differentiated!
"NPNLOSS",False,
"NPHIPSCL",False,
"NPSCL",False,
"NPFTDTAU",False,# FTLD-tau
"NACCPICK",False,# FTLD-tau
"NPFTDT2",False,# FTLD-tau
"NACCCBD",False,# FTLD-tau
"NACCPROG",False,# FTLD-tau
"NPFTDT5",False,# FTLD-tau
"NPFTDT6",False,# FTLD-tau
"NPFTDT7",False,# FTLD-tau
"NPFTDT8",False,# This is FTLD-tau but associated with ALS/parkinsonism--wut?
"NPFTDT9",False,# tangle-dominant disease--is this PART? Maybe exclude cases who have this as only path type.
"NPFTDT10",False,# FTLD-tau: other 3R+4R tauopathy. What is this if not AD? Maybe exclude. How many cases?
"NPFRONT",False,# FTLD-tau
"NPTAU",False,# FTLD-tau
"NPFTD",False,# FTLD-TDP
"NPFTDTDP",False,# FTLD-TDP
"NPALSMND",False,# FTLD-TDP (but exclude FUS and SOD1)
"NPOFTD",False,
"NPOFTD1",False,
"NPOFTD2",False,
"NPOFTD3",False,
"NPOFTD4",False,
"NPOFTD5",False,
"NPFTDNO",False,
"NPFTDSPC",False,
"NPTDPA",False,# In second pass, use anatomical distribution to stage
"NPTDPB",False,# In second pass, use anatomical distribution to stage
"NPTDPC",False,# In second pass, use anatomical distribution to stage
"NPTDPD",False,# In second pass, use anatomical distribution to stage
"NPTDPE",False,# In second pass, use anatomical distribution to stage
"NPPDXA",False,# Exclude?
"NPPDXB",False,# Exclude
"NACCPRIO",False,# Exclude
"NPPDXD",False,# Exclude
"NPPDXE",False,
"NPPDXF",False,
"NPPDXG",False,
"NPPDXH",False,
"NPPDXI",False,
"NPPDXJ",False,
"NPPDXK",False,
"NPPDXL",False,
"NPPDXM",False,
"NPPDXN",False,
"NACCDOWN",False,
"NACCOTHP",False,# Survey for exclusion criteria
"NACCWRI1",False,# Survey for exclusion criteria
"NACCWRI2",False,# Survey for exclusion criteria
"NACCWRI3",False,# Survey for exclusion criteria
"NACCBNKF",False,
"NPBNKB",False,
"NACCFORM",False,
"NACCPARA",False,
"NACCCSFP",False,
"NPBNKF",False,
"NPFAUT",False,
"NPFAUT1",False,
"NPFAUT2",False,
"NPFAUT3",False,
"NPFAUT4",False,
"NACCINT",False,
"NPNIT",False,
"NPCERAD",False,# What sort of variable?
"NPADRDA",False,
"NPOCRIT",False,
"NPVOTH",False,
"NPLEWYCS",False,
"NPGENE",True,# Family history--include in predictors?
"NPFHSPEC",False,# Code as dummy variables if useful.
"NPCHROM",False,# Exclusion factor? Genetic/chromosomal abnormalities
"NPPNORM",False,# Check all the following variables for redundancy with the ones above.
"NPCNORM",False,
"NPPADP",False,
"NPCADP",False,
"NPPAD",False,
"NPCAD",False,
"NPPLEWY",False,
"NPCLEWY",False,
"NPPVASC",False,
"NPCVASC",False,
"NPPFTLD",False,
"NPCFTLD",False,
"NPPHIPP",False,
"NPCHIPP",False,
"NPPPRION",False,
"NPCPRION",False,
"NPPOTH1",False,
"NPCOTH1",False,
"NPOTH1X",False,
"NPPOTH2",False,
"NPCOTH2",False,
"NPOTH2X",False,
"NPPOTH3",False,
"NPCOTH3",False,
"NPOTH3X",0]).reshape((-1,2)))
npvar.columns = ['Variable','Keep']
## Case selection process.
# Include only those with autopsy data.
aut = fulldf[fulldf.NACCAUTP == 1]
del fulldf
def table(a,b):
print(pd.crosstab(aut[a],aut[b],dropna=False,margins=True))
# Exclude for Down's, Huntington's, and other conditions.
aut = aut.loc[aut.DOWNS != 1]
aut = aut.loc[aut.HUNT != 1]
aut = aut.loc[aut.PRION != 1]
aut = aut.loc[~aut.MSAIF.isin([1,2,3])]
aut = aut.loc[~aut.NEOPIF.isin([1,2,3])]
aut = aut.loc[~aut.SCHIZOIF.isin([1,2,3])]
aut.index = list(range(aut.shape[0]))
# How many unique IDs?
# For now, keep in follow-up visits to increase our training data.
uids = aut.NACCID[~aut.NACCID.duplicated()]
#aut = aut[~aut.NACCID.duplicated()]
## Coding of pathology class outcomes.
# Create binary variables for the presence of each pathology class of interest.
# Code Alzheimer's disease pathology based on NPADNC, which implements
# ABC scoring based on Montine et al. (2012).
aut = aut.assign(ADPath = 0)
aut.loc[aut.NPADNC.isin((2,3)),'ADPath'] = 1
aut.loc[aut.NPPAD == 1,'ADPath'] = 1
# The following two commands make the ADPath variable false if the AD path
# diagnosis is as contributing, not as primary.
aut.loc[aut.NPPAD == 2,'ADPath'] = 0
aut.loc[aut.NPCAD == 1,'ADPath'] = 0
aut.loc[aut.NPPVASC == 1,'ADPath'] = 0
aut.loc[aut.NPPLEWY == 1,'ADPath'] = 0
aut.loc[aut.NPPFTLD == 1,'ADPath'] = 0
# Several variables pertain to FTLD tauopathies.
aut = aut.assign(TauPath = [0 for i in range(aut.shape[0])])
aut.loc[aut.NPFTDTAU == 1,'TauPath'] = 1
aut.loc[aut.NACCPICK == 1,'TauPath'] = 1
aut.loc[aut.NACCCBD == 1,'TauPath'] = 1
aut.loc[aut.NACCPROG == 1,'TauPath'] = 1
aut.loc[aut.NPFTDT2 == 1,'TauPath'] = 1
aut.loc[aut.NPFTDT5 == 1,'TauPath'] = 1
aut.loc[aut.NPFTDT6 == 1,'TauPath'] = 1
aut.loc[aut.NPFTDT7 == 1,'TauPath'] = 1
aut.loc[aut.NPFTDT9 == 1,'TauPath'] = 1
aut.loc[aut.NPFRONT == 1,'TauPath'] = 1
aut.loc[aut.NPTAU == 1,'TauPath'] = 1
aut.loc[aut.ADPath == 1, 'TauPath'] = 0
aut.loc[aut.NPCFTLD == 1, 'TauPath'] = 0
# Code Lewy body disease based on NPLBOD variable. Do not include amygdala-
# predominant, brainstem-predominant, or olfactory-only cases.
# See Toledo et al. (2016, Acta Neuropathol) and Irwin et al. (2018, Nat Rev
# Neuro).
aut = aut.assign(LBPath = [0 for i in range(aut.shape[0])])
aut.loc[aut.NPLBOD.isin((2,3)),'LBPath'] = 1
aut.loc[aut.NPPLEWY == 1,'LBPath'] = 1
aut.loc[aut.NPPLEWY == 2,'LBPath'] = 0
aut.loc[aut.NPCLEWY == 1,'LBPath'] = 0
aut.loc[aut.ADPath == 1 & (aut.NPPLEWY != 1), 'LBPath'] = 0
aut.loc[aut.TauPath == 1 & (aut.NPPLEWY != 1),'LBPath'] = 0
# Code TDP-43 pathology based on NPFTDTDP and NPALSMND, excluding FUS and SOD1
# cases.
aut = aut.assign(TDPPath = [0 for i in range(aut.shape[0])])
aut.loc[aut.NPFTD == 1,'TDPPath'] = 1
aut.loc[aut.NPFTDTDP == 1,'TDPPath'] = 1
aut.loc[aut.NPALSMND == 1,'TDPPath'] = 1
aut.loc[aut.ADPath == 1, 'TDPPath'] = 0
aut.loc[aut.LBPath == 1, 'TDPPath'] = 0
aut.loc[aut.TauPath == 1, 'TDPPath'] = 0
# Code vascular disease based on relevant derived variables:
aut = aut.assign(VPath = [0 for i in range(aut.shape[0])])
aut.loc[aut.NPINF == 1,'VPath'] = 1
aut.loc[aut.NACCMICR == 1,'VPath'] = 1
aut.loc[aut.NACCHEM == 1,'VPath'] = 1
aut.loc[aut.NPPATH == 1,'VPath'] = 1
aut.loc[aut.NPPVASC == 1,'VPath'] = 1
aut.loc[aut.NPPVASC == 2,'VPath'] = 0
aut.loc[aut.NPCVASC == 1,'VPath'] = 0
aut.loc[aut.ADPath == 1 & (aut.NPPVASC != 1), 'VPath'] = 0
aut.loc[aut.LBPath == 1 & (aut.NPPVASC != 1), 'VPath'] = 0
aut.loc[aut.NPPFTLD == 1 & (aut.NPPVASC != 1),'VPath'] = 0
aut.loc[aut.TDPPath == 1 & (aut.NPPVASC != 1), 'VPath'] = 0
aut.loc[aut.TauPath == 1 & (aut.NPPVASC != 1), 'VPath'] = 0
aut = aut.assign(Class = aut.ADPath)
aut.loc[aut.TauPath == 1,'Class'] = 2
aut.loc[aut.TDPPath == 1,'Class'] = 3
aut.loc[aut.LBPath == 1,'Class'] = 4
aut.loc[aut.VPath == 1,'Class'] = 5
aut = aut.loc[aut.Class != 0]
aut.index = list(range(aut.shape[0]))
## Predictor variable preparation: one-hot-encoding, date/age/interval operations,
# consolidating redundant variables, consolidating free-text variables.
aut = aut.assign(DOB = aut.BIRTHYR)
aut = aut.assign(DOD = aut.NACCYOD)
aut = aut.assign(VISITDATE = aut.VISITYR)
for i in range(aut.shape[0]):
aut.loc[i,'DOB'] = datetime.datetime.strptime('-'.join([str(aut.BIRTHYR.loc[i]),str(aut.BIRTHMO.loc[i]),'01']),'%Y-%m-%d')
aut.loc[i,'DOD'] = datetime.datetime.strptime('-'.join([str(aut.NACCYOD.loc[i]),str(aut.NACCMOD.loc[i]),'01']),'%Y-%m-%d')
aut.loc[i,'VISITDATE'] = datetime.datetime.strptime('-'.join([str(aut.VISITYR.loc[i]),str(aut.VISITMO.loc[i]),str(aut.VISITDAY.loc[i])]),'%Y-%m-%d')
# Some time/interval variables
aut = aut.assign(SinceQUITSMOK = aut.NACCAGE - aut.QUITSMOK) # Years since quitting smoking
aut = aut.assign(AgeStroke = aut.NACCSTYR - aut.BIRTHYR)
aut = aut.assign(AgeTIA = aut.NACCTIYR - aut.BIRTHYR)
aut = aut.assign(AgePD = aut.PDYR - aut.BIRTHYR)
aut = aut.assign(AgePDOTHR = aut.PDOTHRYR - aut.BIRTHYR)
aut = aut.assign(AgeTBI = aut.TBIYEAR - aut.BIRTHYR)
aut = aut.assign(Duration = aut.NACCAGE - aut.DECAGE)
# Hispanic origin
aut.HISPORX = aut.HISPORX.str.lower()
aut.loc[aut.HISPORX == 'spanish','HISPORX'] = 'spain'
# Race. RACESECX and RACETERX have too few values to be useful.
aut.RACEX = aut.RACEX.str.lower().str.replace(' ','').str.replace('-','')
aut.loc[aut.RACEX.isin(['hispanic','puerto rican']),'RACEX'] = 'latino'
aut.loc[aut.RACEX.isin(['guam - chamorro']),'RACEX'] = 'chamorro'
aut.loc[aut.RACEX.isin(['multi racial']),'RACEX'] = 'multiracial'
# Other language. But actually, let's just drop this and code as English/non-English.
#aut.PRIMLANX = aut.PRIMLANX.str.lower().str.replace(' ','').str.replace('-','')
# Drug list. First get a list of all the unique drug names, then code as dummy variables.
# Update as of 04/01/2020: drugs alone are going to be a huge amount of work.
# For now, just rely on the NACC derived variables for diabetes meds, cardiac drugs, etc.
drugcols = ['DRUG' + str(i) for i in range(1,41)]
drugs = aut[drugcols].stack()
# Several varieties of insulin--important to distinguish?
# drop "*not-codable"
# drop "diphtheria/hepb/pertussis,acel/polio/tetanus"
drugs = drugs.unique()
drugs = [eachdrug.lower() for eachdrug in drugs.tolist()]
drugs = pd.Series(drugs)
drug_corrections = [("multivitamin with minerals","multivitamin"),
("multivitamin, prenatal","multivitamin"),
("omega 3-6-9","omega369"),
("omega-3","omega3"),
("vitamin-d","vitamin d"),
("acetyl-l-carnitine","acetyl l carnitine"),
("levodopa","levadopa"),
("pro-stat","prostat"),
("alpha-d-galactosidase","alpha d galactosidase"),
("indium pentetate in-111","indium pentetate in111"),
("fludeoxyglucose f-18","fludeoxyglucose f18"),
("calcium with vitamins d and k", "calcium-vitamin d-vitamin k"),
("aloe vera topical", "aloe vera"),
("ammonium lactate topical", "ammonium lactate")]
for i in range(len(drug_corrections)):
oldval = drug_corrections[i][0]
newval = drug_corrections[i][1]
drugs = drugs.str.replace(pat = oldval, repl = newval)
drugs = drugs.loc[drugs != "*not codable*"]
drugs = drugs.loc[drugs != "diphtheria/hepb/pertussis,acel/polio/tetanus"]
drugs = np.unique([ss for eachdrug in drugs for ss in eachdrug.split('-')])
drugs = np.unique([ss for eachdrug in drugs for ss in eachdrug.split('/')])
drugs.sort()
## Combining redundant variables. Often this reflects a change in form or
# variable name between UDS version 2 & 3.
aut.loc[(aut.CVPACE == -4) & (aut.CVPACDEF == 0),'CVPACE'] = 0
aut.loc[(aut.CVPACE == -4) & (aut.CVPACDEF == 1),'CVPACE'] = 1
xvar.loc[xvar.Variable == 'CVPACDEF','Keep'] = False
# Combine TBIBRIEF and TRAUMBRF.
aut.loc[(aut.TBIBRIEF == -4) & (aut.TRAUMBRF.isin([0])),'TBIBRIEF'] = 0
aut.loc[(aut.TBIBRIEF == -4) & (aut.TRAUMBRF.isin([1,2])),'TBIBRIEF'] = 1
xvar.loc[xvar.Variable == 'TRAUMBRF','Keep'] = False
# More data cleaning
aut.ABRUPT = aut.ABRUPT.replace(to_replace = 2, value = 1)
aut.FOCLSYM = aut.FOCLSYM.replace(to_replace = 2, value = 1)
aut.FOCLSIGN = aut.FOCLSIGN.replace(to_replace = 2, value = 1)
# Convert language to a binary variable (English/non-English)
aut = aut.assign(English = 0)
aut.loc[aut.PRIMLANG == 1,'English'] = 1
xvar.loc[xvar.Variable == 'PRIMLANG','Keep'] = False
# Some dummy coding
vv = xvar.Variable.loc[(xvar.Keep) & (xvar.Comments == "Dummy coding for (95,96,97,98)")]
for v in vv:
aut[v + '_couldnt'] = 0
aut.loc[aut[v].isin([95,96,97,98]),v + '_couldnt'] = 1
vv = xvar.Variable.loc[xvar.Comments == "Dummy coding for (995,996,997,998)"]
for v in vv:
aut[v + '_couldnt'] = 0
aut.loc[aut[v].isin([995,996,997,998]),v + '_couldnt'] = 1
# Drop all columns where xvar.Keep == False.
aut2 = aut
xvar.loc[xvar.Variable == 'NACCID','Keep'] = True
xvar.loc[xvar.Variable == 'NACCID','Type'] = "ID"
xvar.loc[xvar.Variable == 'VISITDATE','Keep'] = True
xvar.loc[xvar.Variable == 'VISITDATE','Type'] = "ID"
aut = aut.drop(columns = xvar.Variable[~xvar.Keep])
# Fill with NA values
xvar = xvar.loc[xvar.Keep]
xvar.index = range(xvar.shape[0])
for i in range(xvar.shape[0]):
if not xvar.NaNValues.isna()[i]:
v = xvar.Variable[i]
badval = eval(xvar.NaNValues[i])
#print(v,badval)
if isinstance(badval,int):
badval = [badval]
aut[v].mask(aut[v].isin(badval),inplace = True)
# Get rid of variables with very few meaningful observations.
valcounts = aut.describe().iloc[0]
aut = aut.drop(columns = valcounts.loc[valcounts < 100].index)
#aut = aut[valcounts.loc[valcounts >= 100].index]
# Find correlated variables and drop.
ac = aut.corr()
acs = ac.unstack(level = 0)
acs = acs.loc[abs(acs)>0.8]
acsind = list(acs.index)
diagnames = [ind for ind in acsind if ind[0] == ind[1]]
acs = acs.drop(labels=diagnames)
acs = pd.DataFrame(acs)
acs.columns = ['r']
acs['v1'] = acs.index
acs[['v1','v2']] = pd.DataFrame(acs['v1'].tolist(),index = acs.index)
y = aut.Class
X = aut.drop(columns = npvar.Variable.loc[npvar.Variable.isin(aut.columns)])
X = X.drop(columns = ['Class','ADPath','TauPath','TDPPath','LBPath','VPath'])
xd = X.describe().iloc[0]
# Impute numeric variables with the mean.
from sklearn.impute import SimpleImputer
numvar = X.columns.intersection(xvar.Variable.loc[xvar.Type == "Numeric"])
imp_mean = SimpleImputer(missing_values=np.nan, strategy='mean')
imp_mean.fit(X[numvar])
Xnumimp = imp_mean.transform(X[numvar])
Xnumimp = pd.DataFrame(Xnumimp)
Xnumimp.columns = X[numvar].columns
# Impute ordinal variables with the median.
ordvar = X.columns.intersection(xvar.Variable.loc[xvar.Type == "Ordinal"])
imp_med = SimpleImputer(missing_values=np.nan, strategy='median')
imp_med.fit(X[ordvar])
Xordimp = imp_med.transform(X[ordvar])
Xordimp = pd.DataFrame(Xordimp)
Xordimp.columns = X[ordvar].columns
# Impute boolean variables with zero.
boolvar = X.columns.intersection(xvar.Variable.loc[xvar.Type == "Boolean"])
boolenc = SimpleImputer(missing_values = np.nan, strategy = 'constant',
fill_value = 0)
boolenc.fit(X[boolvar])
Xbool = boolenc.transform(X[boolvar])
Xbool = pd.DataFrame(Xbool)
Xbool.columns = X[boolvar].columns
# One-hot encoding for nominal (not boolean, ordinal, or numeric) variables.
from sklearn.preprocessing import OneHotEncoder
nomvar = X.columns.intersection(xvar.Variable.loc[xvar.Type == "Nominal"])
enc = OneHotEncoder(handle_unknown='ignore',sparse = False)
Xfull = X[nomvar].fillna(value = 0)
enc.fit(Xfull)
Xohe = enc.transform(Xfull)
Xohe = pd.DataFrame(Xohe)
Xohe.columns = enc.get_feature_names(Xfull.columns)
# Put it all together
X = X.drop(columns = boolvar)
X = X.drop(columns = numvar)
X = X.drop(columns = ordvar)
X = pd.concat([X,Xbool,Xnumimp,Xordimp,Xohe],axis = 1)
X = X.drop(columns = nomvar)
# Create 80/20 split between data for training and final testing.
# Do data split stratified by pathology class.
from sklearn.model_selection import train_test_split
classy = aut[['Class','SEX','EDUC']]
classy = classy.assign(HighEd = classy.EDUC > 12)
classy = classy.drop(columns = ['EDUC'])
classy = classy.assign(MasterClass = classy.astype(str).apply(lambda x: '_'.join(x),axis = 1))
uclass = np.unique(classy.MasterClass)
X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.2, random_state=666, stratify=classy.MasterClass)
# Create a further split within the training dataset for CV and for validation.
classy2 = classy.iloc[X_train.index]
X_cv, X_val, y_cv, y_val = train_test_split( X_train, y_train, test_size=0.25, random_state=666, stratify=classy2.MasterClass)
X_cv.index = range(X_cv.shape[0])
y_cv.index = range(y_cv.shape[0])
X_val.index = range(X_val.shape[0])
y_val.index = range(y_val.shape[0])
X_test.index = range(X_test.shape[0])
y_test.index = range(y_test.shape[0])
#import pickle
#PIK = "nacc_train.pkl"
#data = [X_cv,y_cv,X_val,y_val]
#with open(PIK, "wb") as f:
# pickle.dump(data, f)
#with open(PIK, "rb") as f:
# pickle_list = pickle.load(f)
# Now load in classifier & classified data to do error analyses.
import pickle
pik = "weovr_classifier_og_data.pickle"
with open(pik, "rb") as f:
pickle_list = pickle.load(f)
# Here are the contents of the pickle:
#data = [weovr_clf, X_train, X_test, y_train, y_test, OG_X, OG_y, OG_weovr_pred]
wovr = pickle_list[0]
X_aug_train = pickle_list[1]
X_aug_val = pickle_list[2]
y_aug_train = pickle_list[3]
y_aug_val = pickle_list[4]
pikX = pd.DataFrame(pickle_list[5])
feat = pd.read_csv("selected_features.csv")
feat = list(feat.columns)
pikX.columns = feat
piky = pd.DataFrame(pickle_list[6])
wovr_pred = pd.Series(pickle_list[7])
#tmptrain = pd.read_csv("X_cv.csv")
#tmptest = pd.read_csv("X_val.csv")
#tmp = pd.concat([tmptrain,tmptest], axis = 0)
OG_X = pd.concat([X_cv, X_val], axis = 0)
OG_X['WOVR'] = wovr_pred
OG_y = pd.DataFrame(pd.concat([y_cv, y_val], axis = 0))
OG_y += -1
OG_y.columns = ["Class"]
OG_y.index = OG_X.index
#Xy = pd.concat([OG_X, OG_y], axis = 1)
addcol = [*['NACCID','VISITDATE','Class','ADPath','TauPath','TDPPath','LBPath','VPath'], *npvar.Variable.to_list()]
Xy = OG_X.merge(right = aut[addcol], how='inner', on=['NACCID','VISITDATE'],
indicator='Merge', validate="1:1")
Xy.Class = Xy.Class - 1
#Xy['WOVR'] = wovr_pred
from sklearn.metrics import confusion_matrix
confusion_matrix(Xy.Class, Xy.WOVR, normalize=None)
# Code some additional neuropath measures.
Xy['Braak03'] = np.ceil(Xy.NACCBRAA/2)
Xy.loc[Xy.Braak03 > 3,'Braak03'] = np.nan
thal = [0, 1, 2, 3, 4, 5,-4, 8, 9]
ascore = [0, 1, 1, 2, 3, 3, np.nan, np.nan, np.nan]
adict = dict(zip(thal,ascore))
Xy['Ascore'] = [adict[a] for a in Xy['NPTHAL']]
Xy['Bscore'] = np.ceil(Xy.NACCBRAA/2)
Xy['Cscore'] = Xy.NACCNEUR
Xy.loc[Xy['Cscore'].isin([8,9]), 'Cscore'] = np.nan
Xy['ABC'] = 0
Xy.loc[(Xy['Ascore'] == 1) & (Xy['Cscore'] < 2),'ABC'] = 1
Xy.loc[(Xy['Ascore'] > 0) & (Xy['Bscore'] < 2),'ABC'] = 1
Xy.loc[(Xy['Ascore'] == 1) & (Xy['Bscore'] > 1) & (Xy['Cscore'] > 1) ,'ABC'] = 2
Xy.loc[(Xy['Ascore'] > 1) & (Xy['Bscore'] > 1),'ABC'] = 2
Xy.loc[(Xy['Ascore'] == 3) & (Xy['Bscore'] == 3) & (Xy['Cscore'] > 1) ,'ABC'] = 3
# AD false alarms: people with primary non-AD pathology who were called AD.
print("Distribution of ABC scores for primary non-AD cases who were classified as AD:")
adfa = Xy.loc[(Xy.WOVR == 0) & (Xy.Class != 0),:]
adfatab = pd.crosstab(adfa['Class'],adfa['ABC'])
adfatab.index = ['Tau', 'TDP', 'LB', 'Vasc']
adfatab.to_latex('adfatab.tex')
# Non-AD false alarms: people with primary AD pathology who were called non-AD.
print("Distribution of ABC scores for primary AD cases who were classified as non-AD:")
nadfa = Xy.loc[(Xy.WOVR != 0) & (Xy.Class == 0),:]
pd.crosstab(nadfa['Class'],nadfa['ABC'])
nadfa.loc[nadfa.NPFTDTAU == 1,'TauPath'] = 1
nadfa.loc[nadfa.NACCPICK == 1,'TauPath'] = 1
nadfa.loc[nadfa.NACCCBD == 1,'TauPath'] = 1
nadfa.loc[nadfa.NACCPROG == 1,'TauPath'] = 1
nadfa.loc[nadfa.NPFTDT2 == 1,'TauPath'] = 1
nadfa.loc[nadfa.NPFTDT5 == 1,'TauPath'] = 1
nadfa.loc[nadfa.NPFTDT6 == 1,'TauPath'] = 1
nadfa.loc[nadfa.NPFTDT7 == 1,'TauPath'] = 1
nadfa.loc[nadfa.NPFTDT9 == 1,'TauPath'] = 1
nadfa.loc[nadfa.NPFRONT == 1,'TauPath'] = 1
nadfa.loc[nadfa.NPTAU == 1,'TauPath'] = 1
# Code Lewy body disease based on NPLBOD variable. Do not include amygdala-
# predominant, brainstem-predominant, or olfactory-only cases.
# See Toledo et al. (2016, Acta Neuropathol) and Irwin et al. (2018, Nat Rev
# Neuro).
nadfa.loc[nadfa.NPLBOD.isin((2,3)),'LBPath'] = 1
nadfa.loc[nadfa.NPPLEWY == 1,'LBPath'] = 1
nadfa.loc[nadfa.NPPLEWY == 2,'LBPath'] = 0
# Code TDP-43 pathology based on NPFTDTDP and NPALSMND, excluding FUS and SOD1
# cases.
nadfa.loc[nadfa.NPFTD == 1,'TDPPath'] = 1
nadfa.loc[nadfa.NPFTDTDP == 1,'TDPPath'] = 1
nadfa.loc[nadfa.NPALSMND == 1,'TDPPath'] = 1
# Code vascular disease based on relevant derived variables:
nadfa.loc[nadfa.NPINF == 1,'VPath'] = 1
nadfa.loc[nadfa.NACCMICR == 1,'VPath'] = 1
nadfa.loc[nadfa.NACCHEM == 1,'VPath'] = 1
nadfa.loc[nadfa.NPPATH == 1,'VPath'] = 1
nadfa.loc[nadfa.NPPVASC == 1,'VPath'] = 1
nadfatab = pd.DataFrame(np.stack([ nadfa.TauPath.value_counts(),
nadfa.TDPPath.value_counts(),
nadfa.LBPath.value_counts(),
nadfa.VPath.value_counts() ]))
nadfatab.index = ['Tau','TDP','LB','Vasc']
nadfatab.columns = ['No','Yes']
nadfatab.to_latex('nadfatab.tex')
# Non-AD false alarms: people with primary AD pathology who were called non-AD.
print("Presence of vascular pathology in cases misclassified as primarily vascular:")
vfa = Xy.loc[(Xy.WOVR == 4) & (Xy.Class != 4),:]
vfa['NPINF'] = vfa['NPINF'].replace(to_replace = [-4,8,9], value = np.nan)
vfa['NACCMICR'] = vfa['NACCMICR'].replace(to_replace = [-4,8,9], value = np.nan)
vfa['NACCHEM'] = vfa['NACCHEM'].replace(to_replace = [-4,8,9], value = np.nan)
vfa['NACCMICR'] = vfa['NACCMICR'].replace(to_replace = [-4,8,9], value = np.nan)
vfa['NPPATH'] = vfa['NPPATH'].replace(to_replace = [-4,8,9], value = np.nan)
vfa['NPPVASC'] = vfa['NPPVASC'].replace(to_replace = [2], value = 0)
vfa['NPPVASC'] = vfa['NPPVASC'].replace(to_replace = [-4,8,9], value = np.nan)
vfa.loc[vfa.NPINF == 1,'VPath'] = 1
vfa.loc[vfa.NACCMICR == 1,'VPath'] = 1
vfa.loc[vfa.NACCHEM == 1,'VPath'] = 1
vfa.loc[vfa.NPPATH == 1,'VPath'] = 1
vfa.loc[vfa.NPPVASC == 1,'VPath'] = 1
vfatab = pd.DataFrame(np.stack([ vfa.NPPVASC.value_counts(),
vfa.NPINF.value_counts(),
vfa.NACCMICR.value_counts(),
vfa.NACCHEM.value_counts(),
vfa.NPPATH.value_counts() ]))
vfatab.index = ['Primary vascular','Old infarcts', 'Microinfarcts','Hemorrhages','Other']
vfatab.columns = ['No','Yes']
vfatab.to_latex('vfatab.tex')
```
|
{
"source": "jeffreypicard/lbry-sdk",
"score": 2
}
|
#### File: lbry-sdk/scripts/dht_node.py
```python
import asyncio
import argparse
import logging
from typing import Optional
from lbry.dht.constants import generate_id
from lbry.dht.node import Node
from lbry.dht.peer import PeerManager
from lbry.extras.daemon.storage import SQLiteStorage
from lbry.conf import Config
logging.basicConfig(level=logging.INFO, format="%(asctime)s %(levelname)-4s %(name)s:%(lineno)d: %(message)s")
log = logging.getLogger(__name__)
async def main(host: str, port: int, db_file_path: str, bootstrap_node: Optional[str]):
loop = asyncio.get_event_loop()
conf = Config()
storage = SQLiteStorage(conf, db_file_path, loop, loop.time)
if bootstrap_node:
nodes = bootstrap_node.split(':')
nodes = [(nodes[0], int(nodes[1]))]
else:
nodes = conf.known_dht_nodes
await storage.open()
node = Node(
loop, PeerManager(loop), generate_id(), port, port, 3333, None,
storage=storage
)
node.start(host, nodes)
while True:
await asyncio.sleep(10)
log.info("Known peers: %d. Storing contact information for %d blobs from %d peers.",
len(node.protocol.routing_table.get_peers()), len(node.protocol.data_store),
len(node.protocol.data_store.get_storing_contacts()))
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Starts a single DHT node, which then can be used as a seed node or just a contributing node.")
parser.add_argument("--host", default='0.0.0.0', type=str, help="Host to listen for requests. Default: 0.0.0.0")
parser.add_argument("--port", default=4444, type=int, help="Port to listen for requests. Default: 4444")
parser.add_argument("--db_file", default='/tmp/dht.db', type=str, help="DB file to save peers. Default: /tmp/dht.db")
parser.add_argument("--bootstrap_node", default=None, type=str,
help="Node to connect for bootstraping this node. Leave unset to use the default ones. "
"Format: host:port Example: lbrynet1.lbry.com:4444")
args = parser.parse_args()
asyncio.run(main(args.host, args.port, args.db_file, args.bootstrap_node))
```
|
{
"source": "Jeffrey-P-McAteer/ntest",
"score": 3
}
|
#### File: ntest/nbuild/util.py
```python
import hashlib
import shutil
import os
def hash16(data):
"""
Return a hex string of the data's hash. Currently uses md5.
"""
hash_object = hashlib.md5(bytes(data, 'utf-8'))
return hash_object.hexdigest()
def deflate_dir(dst_path):
"""
We move files up until there is more than 1 item at the root (dst_path)
This avoids messy issues where we extract to "ABC/" and get
"ABC/ABC-1.2.3/<actual stuff we wanted under ABC>"
"""
remaining_loops = 5
while len(os.listdir(dst_path)) < 2 and remaining_loops > 0:
remaining_loops -= 1
# Move everything in dst_path/<directory>/* into dst_path
child_dir = os.path.join(dst_path, os.listdir(dst_path)[0])
for child_f in os.listdir(child_dir):
shutil.move(os.path.join(child_dir, child_f), os.path.join(dst_path, child_f))
os.rmdir(child_dir)
```
|
{
"source": "Jeffrey-P-McAteer/ros_object_recognition",
"score": 2
}
|
#### File: whs_navigation/src/move_base_stump.py
```python
import rospy
import actionlib
from move_base_msgs.msg import MoveBaseAction
def goal_cb(goal):
rospy.loginfo("I received:")
rospy.loginfo("\tHeader:")
rospy.loginfo("\t\tframe_id: %s" %goal.target_pose.header.frame_id)
rospy.loginfo("\t\tstamp: %s" %goal.target_pose.header.stamp)
rospy.loginfo("\t\tseq: %s" %goal.target_pose.header.seq)
rospy.loginfo("\tPose:")
rospy.loginfo("\t\tposition.x: %s" %goal.target_pose.pose.position.x)
rospy.loginfo("\t\tposition.y: %s" %goal.target_pose.pose.position.y)
rospy.loginfo("\t\tposition.z: %s" %goal.target_pose.pose.position.z)
rospy.loginfo("\t\torientation.x: %s" %goal.target_pose.pose.orientation.x)
rospy.loginfo("\t\torientation.y: %s" %goal.target_pose.pose.orientation.y)
rospy.loginfo("\t\torientation.z: %s" %goal.target_pose.pose.orientation.z)
rospy.loginfo("\t\torientation.w: %s" %goal.target_pose.pose.orientation.w)
rospy.sleep(1.0)
server.set_succeeded()
if __name__ == "__main__":
## Setup ==============================================
rospy.init_node("move_base_stump")
server = actionlib.SimpleActionServer("move_base", MoveBaseAction, goal_cb,
False)
server.start()
## Event loop ==============================================
rospy.spin()
```
|
{
"source": "jeffreyp-perfectomobile-com/Reporting-Samples",
"score": 3
}
|
#### File: Python/export-api-sample/reporting-public-api.py
```python
import requests
import os
import time
import json
import shutil
# The Perfecto Continuous Quality Lab you work with
CQL_NAME = 'MY_CQL_NAME'
# The reporting Server address depends on the location of the lab. Please refer to the documentation at
# http://developers.perfectomobile.com/display/PD/Reporting#Reporting-ReportingserverAccessingthereports
# to find your relevant address
# For example the following is used for US:
REPORTING_SERVER_URL = 'https://' + CQL_NAME + '.reporting.perfectomobile.com'
# See http://developers.perfectomobile.com/display/PD/Using+the+Reporting+Public+API on how to obtain an Offline Token
# In this case the offline token is stored as a env variable
OFFLINE_TOKEN = os.environ['offline-token']
CQL_SERVER_URL = 'https://' + CQL_NAME + '.perfectomobile.com'
def retrieve_tests_executions():
"""
Retrieve a list of test executions within the last month
:return: JSON object contains the executions
"""
api_url = REPORTING_SERVER_URL + '/export/api/v1/test-executions'
# Optional parameters to be passed with our http request, In this example:
# retrieve test executions of the past month (result may contain tests of multiple driver executions)
current_time_millis = lambda: int(round(time.time() * 1000))
payload = {
'startExecutionTime[0]': current_time_millis() - (30 * 24 * 60 * 60 * 1000),
'endExecutionTime[0]': current_time_millis(),
}
# creates http get request with the url, given parameters (payload) and header (for authentication)
r = requests.get(api_url, params=payload, headers={'PERFECTO_AUTHORIZATION': OFFLINE_TOKEN})
return r.content
def retrieve_test_commands(test_id):
"""
retrieve commands of test with a given test id
:param test_id: a test id
:return: a JSON object contains the commands of the test with a given test id
"""
api_url = REPORTING_SERVER_URL + "/export/api/v1/test-executions/" + test_id + "/commands"
r = requests.get(api_url, headers={'PERFECTO_AUTHORIZATION': OFFLINE_TOKEN})
return r.content
def download_execution_summary_report(driver_execution_id):
"""
download execution report summary (pdf format)
:param driver_execution_id: execution id
"""
api_url = REPORTING_SERVER_URL + '/export/api/v1/test-executions/pdf'
payload = {
'externalId[0]': driver_execution_id
}
r = requests.get(api_url, params=payload, headers={'PERFECTO_AUTHORIZATION': OFFLINE_TOKEN}, stream=True)
download_file_attachment(r, driver_execution_id + '.pdf')
def download_test_report(test_id):
"""
download test report summary (pdf format)
:param test_id: test id
"""
api_url = REPORTING_SERVER_URL + "/export/api/v1/test-executions/pdf/" + test_id
r = requests.get(api_url, headers={'PERFECTO_AUTHORIZATION': OFFLINE_TOKEN}, stream=True)
download_file_attachment(r, test_id + '.pdf')
def download_video(test_execution):
"""
downloads a video for given test execution
:param test_execution: execution JSON object
"""
videos = test_execution['videos']
if len(videos) > 0:
video = videos[0]
download_url = video['downloadUrl'] # retrieve the video download url
video_format = '.' + video['format'] # set the video format
test_id = test_execution['id']
r = requests.get(download_url, stream=True)
download_file_attachment(r, test_id + video_format)
else:
print('No videos were found for the given test execution...')
def download_attachments(test_execution):
"""
downloads attachments for a given test execution
:param test_execution: test execution JSON object
"""
artifacts = test_execution['artifacts']
if len(artifacts) > 0:
for arti in artifacts:
type = arti['type']
if type == 'DEVICE_LOGS':
test_id = test_execution['id']
path = arti['path']
r = requests.get(path, stream=True)
download_file_attachment(r, test_id + '.zip')
else:
print('No artifacts found for the given test_execution')
def download_file_attachment(r, filename):
"""
Downloads attachment as pdf file from request object
:param r: request to handle
:param filename: name for the pdf file
"""
if r.status_code == 200:
with open(filename, 'wb') as f:
r.raw.decode_content = True
shutil.copyfileobj(r.raw, f)
else:
raise Exception('Request status code is not valid: ' + r.status_code)
if __name__ == '__main__':
# Retrieve a list of the test executions in your lab (as a json)
executions = retrieve_tests_executions()
# Loads JSON string into JSON object
executions = json.loads(executions)
resources = executions['resources']
if len(resources) == 0:
print('there are no test executions for that period of time')
else:
test_execution = resources[1] # retrieve a test execution
driver_execution_id = test_execution['externalId'] # retrieve the execution id
test_id = test_execution['id'] # retrieve a single test id
# retrieve the test commands
test_commands = retrieve_test_commands(test_id)
# download execution report in pdf format
download_execution_summary_report(driver_execution_id)
# downloads the test report in pdf format
download_test_report(test_id)
# downloads video
download_video(test_execution)
# Download attachments such as device logs, vitals or network files (relevant for Mobile tests only)
download_attachments(test_execution)
```
|
{
"source": "jeffreyquirino/backup_looksistem",
"score": 2
}
|
#### File: LookProjeto/catalog/models.py
```python
from django.db import models
from django.core.urlresolvers import reverse
class Category(models.Model): #modelo que representa uma tabela no banco de dados
name = models.CharField('Nome', max_length=100) #CharFiel seria como o varchar do sql
slug = models.SlugField('Indentificador', max_length=100) #um tipo de charfield, mas ele é unico no banco
created = models.DateTimeField('Criado em', auto_now_add=True) #seta a data atual e não muda
modified = models.DateTimeField('Modificado em', auto_now=True) # toda vez quer for salvo pega a data atual
class Meta: #são metas aplicações aceita neste modelo
verbose_name = 'Categoria' # o verbose_name seria como descrever uma classe
verbose_name_plural = 'Categorias'
ordering = ['name'] #deixa em ordem alfabetica
def __str__(self): #representação em string de um objeto
return self.name
def get_absolute_url(self):
return reverse('catalog:category', kwargs={'slug': self.slug}) #kwargs parametros nomeados, essa função é um padrao para utilizar url
class Product(models.Model): #classe do produto
name = models.CharField('Nome', max_length=100)
slug = models.SlugField('Identificador', max_length=100)
category = models.ForeignKey('catalog.Category', on_delete=models.PROTECT, verbose_name='Categoria') #a categoria tem um ou mais produtos associados
description = models.TextField('Descrição', blank=True) #campo não obrigatorio
price = models.DecimalField('Preço', decimal_places=2, max_digits=8)
created = models.DateTimeField('Criado em', auto_now_add=True)
modified = models.DateTimeField('Modificado em', auto_now=True)
class Meta:
verbose_name = 'Produto'
verbose_name_plural = 'Produtos'
ordering = ['name']
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('catalog:product', kwargs={'slug': self.slug}) #kwargs parametros nomeados, essa função é um padrao para utilizar url
```
#### File: core/templatetags/pagination.py
```python
from django.template import Library
register = Library() #registra a biblioteca de templates
@register.inclusion_tag('pagination.html') #tag de inclusão, onde vc inclui um template
def pagination(request, paginator, page_obj): #essa funcao pode ser colocada dentro do template
context = {}
context['paginator'] = paginator
context['request'] = request
context['page_obj'] = page_obj
getvars = request.GET.copy() #variaveis querystring
if 'page' in getvars:
del getvars['page'] #remove o page na visualização
if len(getvars) > 0:
context['getvars'] = '&{0}'.format(getvars.urlencode()) #ele vai separar mais contextos por & e mantem a querystring
else:
context['getvars'] = ''
return context
```
|
{
"source": "jeffreyrack/pre-commit",
"score": 3
}
|
#### File: pre_commit/commands/migrate_config.py
```python
from __future__ import print_function
from __future__ import unicode_literals
import io
import re
import yaml
from aspy.yaml import ordered_load
def _indent(s):
lines = s.splitlines(True)
return ''.join(' ' * 4 + line if line.strip() else line for line in lines)
def _is_header_line(line):
return (line.startswith(('#', '---')) or not line.strip())
def _migrate_map(contents):
# Find the first non-header line
lines = contents.splitlines(True)
i = 0
while _is_header_line(lines[i]):
i += 1
header = ''.join(lines[:i])
rest = ''.join(lines[i:])
if isinstance(ordered_load(contents), list):
# If they are using the "default" flow style of yaml, this operation
# will yield a valid configuration
try:
trial_contents = header + 'repos:\n' + rest
ordered_load(trial_contents)
contents = trial_contents
except yaml.YAMLError:
contents = header + 'repos:\n' + _indent(rest)
return contents
def _migrate_sha_to_rev(contents):
reg = re.compile(r'(\n\s+)sha:')
return reg.sub(r'\1rev:', contents)
def migrate_config(runner, quiet=False):
with io.open(runner.config_file_path) as f:
orig_contents = contents = f.read()
contents = _migrate_map(contents)
contents = _migrate_sha_to_rev(contents)
if contents != orig_contents:
with io.open(runner.config_file_path, 'w') as f:
f.write(contents)
print('Configuration has been migrated.')
elif not quiet:
print('Configuration is already migrated.')
```
#### File: pre_commit/languages/fail.py
```python
from __future__ import unicode_literals
from pre_commit.languages import helpers
ENVIRONMENT_DIR = None
get_default_version = helpers.basic_get_default_version
healthy = helpers.basic_healthy
install_environment = helpers.no_install
def run_hook(prefix, hook, file_args):
out = hook['entry'].encode('UTF-8') + b'\n\n'
out += b'\n'.join(f.encode('UTF-8') for f in file_args) + b'\n'
return 1, out, b''
```
#### File: tests/commands/clean_test.py
```python
from __future__ import unicode_literals
import os.path
import mock
import pytest
from pre_commit.commands.clean import clean
@pytest.fixture(autouse=True)
def fake_old_dir(tempdir_factory):
fake_old_dir = tempdir_factory.get()
def _expanduser(path, *args, **kwargs):
assert path == '~/.pre-commit'
return fake_old_dir
with mock.patch.object(os.path, 'expanduser', side_effect=_expanduser):
yield fake_old_dir
def test_clean(store, fake_old_dir):
store.require_created()
assert os.path.exists(fake_old_dir)
assert os.path.exists(store.directory)
clean(store)
assert not os.path.exists(fake_old_dir)
assert not os.path.exists(store.directory)
def test_clean_idempotent(store):
assert not os.path.exists(store.directory)
clean(store)
assert not os.path.exists(store.directory)
```
#### File: pre-commit/tests/main_test.py
```python
from __future__ import absolute_import
from __future__ import unicode_literals
import argparse
import os.path
import mock
import pytest
from pre_commit import main
from testing.auto_namedtuple import auto_namedtuple
from testing.util import cwd
FNS = (
'autoupdate', 'clean', 'install', 'install_hooks', 'migrate_config', 'run',
'sample_config', 'uninstall',
)
CMDS = tuple(fn.replace('_', '-') for fn in FNS)
@pytest.fixture
def mock_commands():
mcks = {fn: mock.patch.object(main, fn).start() for fn in FNS}
ret = auto_namedtuple(**mcks)
yield ret
for mck in ret:
mck.stop()
class CalledExit(Exception):
pass
@pytest.fixture
def argparse_exit_mock():
with mock.patch.object(
argparse.ArgumentParser, 'exit', side_effect=CalledExit,
) as exit_mock:
yield exit_mock
@pytest.fixture
def argparse_parse_args_spy():
parse_args_mock = mock.Mock()
original_parse_args = argparse.ArgumentParser.parse_args
def fake_parse_args(self, args):
# call our spy object
parse_args_mock(args)
return original_parse_args(self, args)
with mock.patch.object(
argparse.ArgumentParser, 'parse_args', fake_parse_args,
):
yield parse_args_mock
def assert_only_one_mock_called(mock_objs):
total_call_count = sum(mock_obj.call_count for mock_obj in mock_objs)
assert total_call_count == 1
def test_overall_help(mock_commands, argparse_exit_mock):
with pytest.raises(CalledExit):
main.main(['--help'])
def test_help_command(
mock_commands, argparse_exit_mock, argparse_parse_args_spy,
):
with pytest.raises(CalledExit):
main.main(['help'])
argparse_parse_args_spy.assert_has_calls([
mock.call(['help']),
mock.call(['--help']),
])
def test_help_other_command(
mock_commands, argparse_exit_mock, argparse_parse_args_spy,
):
with pytest.raises(CalledExit):
main.main(['help', 'run'])
argparse_parse_args_spy.assert_has_calls([
mock.call(['help', 'run']),
mock.call(['run', '--help']),
])
@pytest.mark.parametrize('command', CMDS)
def test_all_cmds(command, mock_commands, mock_store_dir):
main.main((command,))
assert getattr(mock_commands, command.replace('-', '_')).call_count == 1
assert_only_one_mock_called(mock_commands)
def test_try_repo(mock_store_dir):
with mock.patch.object(main, 'try_repo') as patch:
main.main(('try-repo', '.'))
assert patch.call_count == 1
def test_help_cmd_in_empty_directory(
mock_commands,
tempdir_factory,
argparse_exit_mock,
argparse_parse_args_spy,
):
path = tempdir_factory.get()
with cwd(path):
with pytest.raises(CalledExit):
main.main(['help', 'run'])
argparse_parse_args_spy.assert_has_calls([
mock.call(['help', 'run']),
mock.call(['run', '--help']),
])
def test_expected_fatal_error_no_git_repo(
tempdir_factory, cap_out, mock_store_dir,
):
with cwd(tempdir_factory.get()):
with pytest.raises(SystemExit):
main.main([])
log_file = os.path.join(mock_store_dir, 'pre-commit.log')
assert cap_out.get() == (
'An error has occurred: FatalError: git failed. '
'Is it installed, and are you in a Git repository directory?\n'
'Check the log at {}\n'.format(log_file)
)
def test_warning_on_tags_only(mock_commands, cap_out, mock_store_dir):
main.main(('autoupdate', '--tags-only'))
assert '--tags-only is the default' in cap_out.get()
```
#### File: pre-commit/tests/runner_test.py
```python
from __future__ import absolute_import
from __future__ import unicode_literals
import os.path
import pre_commit.constants as C
from pre_commit.runner import Runner
from testing.fixtures import git_dir
from testing.util import cwd
def test_init_has_no_side_effects(tmpdir):
current_wd = os.getcwd()
runner = Runner(tmpdir.strpath, C.CONFIG_FILE)
assert runner.git_root == tmpdir.strpath
assert os.getcwd() == current_wd
def test_create_sets_correct_directory(tempdir_factory):
path = git_dir(tempdir_factory)
with cwd(path):
runner = Runner.create(C.CONFIG_FILE)
assert os.path.normcase(runner.git_root) == os.path.normcase(path)
assert os.path.normcase(os.getcwd()) == os.path.normcase(path)
def test_create_changes_to_git_root(tempdir_factory):
path = git_dir(tempdir_factory)
with cwd(path):
# Change into some directory, create should set to root
foo_path = os.path.join(path, 'foo')
os.mkdir(foo_path)
os.chdir(foo_path)
assert os.getcwd() != path
runner = Runner.create(C.CONFIG_FILE)
assert os.path.normcase(runner.git_root) == os.path.normcase(path)
assert os.path.normcase(os.getcwd()) == os.path.normcase(path)
def test_config_file_path():
runner = Runner(os.path.join('foo', 'bar'), C.CONFIG_FILE)
expected_path = os.path.join('foo', 'bar', C.CONFIG_FILE)
assert runner.config_file_path == expected_path
```
|
{
"source": "jeffreyruffolo/evolocity",
"score": 2
}
|
#### File: evolocity/preprocessing/featurize_seqs.py
```python
from .. import logging as logg
from .utils import mkdir_p
from collections import Counter
from anndata import AnnData
import math
import numpy as np
import os
def get_model(model_name, model_path=None, vocab_file=None):
if model_name == 'esm1':
from ..tools.fb_model import FBModel
model = FBModel(
'esm1_t34_670M_UR50S',
repr_layer=[-1],
)
elif model_name == 'esm1b':
from ..tools.fb_model import FBModel
model = FBModel(
'esm1b_t33_650M_UR50S',
repr_layer=[-1],
)
elif model_name == 'tape':
from ..tools.tape_model import TAPEModel
model = TAPEModel('bert-base', )
elif 'BERT' in model_name:
from ..tools.bert_model import BertModel
model = BertModel(model_name, model_path, vocab_file)
else:
raise ValueError('Invalid model {}'.format(model_name))
return model
def embed_seqs(
model,
seqs,
namespace,
verbose=True,
):
if 'esm' in model.name_:
from ..tools.fb_semantics import embed_seqs_fb
seqs_fb = sorted([seq for seq in seqs])
embedded = embed_seqs_fb(
model.model_,
seqs_fb,
model.repr_layers_,
model.alphabet_,
use_cache=False,
verbose=verbose,
)
X_embed = np.array([embedded[seq][0]['embedding'] for seq in seqs_fb])
elif 'BERT' in model.name_:
import transformers
seq_strings = [" ".join(str(s)) for s in seqs.keys()]
feat_extract = transformers.pipeline("feature-extraction",
model=model.model_,
tokenizer=model.tokenizer_)
feats = feat_extract(seq_strings)
X_embed = [np.array(f) for f in feats]
else:
raise ValueError(
'Model {} not supported for sequence embedding'.format(
model.name_))
sorted_seqs = sorted(seqs)
for seq_idx, seq in enumerate(sorted_seqs):
for meta in seqs[seq]:
meta['embedding'] = X_embed[seq_idx]
return seqs
def populate_embedding(
model,
seqs,
namespace=None,
use_cache=False,
batch_size=3000,
verbose=True,
):
if namespace is None:
namespace = 'protein'
if use_cache:
mkdir_p('target/{}/embedding'.format(namespace))
embed_prefix = ('target/{}/embedding/{}_512'.format(
namespace,
model.name_,
))
sorted_seqs = np.array([str(s) for s in sorted(seqs.keys())])
n_batches = math.ceil(len(sorted_seqs) / float(batch_size))
for batchi in range(n_batches):
if verbose:
logg.info('Embedding sequence batch {} / {}'.format(
batchi + 1, n_batches))
# Identify the batch.
start = batchi * batch_size
end = (batchi + 1) * batch_size
sorted_seqs_batch = sorted_seqs[start:end]
seqs_batch = {seq: seqs[seq] for seq in sorted_seqs_batch}
# Load from cache if available.
if use_cache:
embed_fname = embed_prefix + '.{}.npy'.format(batchi)
if os.path.exists(embed_fname):
X_embed = np.load(embed_fname, allow_pickle=True)
if X_embed.shape[0] == len(sorted_seqs_batch):
for seq_idx, seq in enumerate(sorted_seqs_batch):
for meta in seqs[seq]:
meta['embedding'] = X_embed[seq_idx]
continue
# Embed the sequences.
seqs_batch = embed_seqs(
model,
seqs_batch,
namespace,
verbose=verbose,
)
if use_cache:
X_embed = []
for seq in sorted_seqs_batch:
for meta in seqs[seq]:
meta['embedding'] = seqs_batch[seq][0]['embedding'].mean(0)
if use_cache:
X_embed.append(seqs[seq][0]['embedding'].ravel())
del seqs_batch
if use_cache:
np.save(embed_fname, np.array(X_embed))
return seqs
def seqs_to_anndata(seqs):
X, obs = [], {}
obs['n_seq'] = []
obs['seq'] = []
obs['seq_len'] = []
for seq in seqs:
meta = seqs[seq][0]
X.append(meta['embedding'])
for key in meta:
if key == 'embedding':
continue
if key not in obs:
obs[key] = []
obs[key].append(
Counter([meta[key]
for meta in seqs[seq]]).most_common(1)[0][0])
obs['n_seq'].append(len(seqs[seq]))
obs['seq'].append(str(seq))
obs['seq_len'].append(len(seq))
X = np.array(X)
adata = AnnData(X)
for key in obs:
adata.obs[key] = obs[key]
return adata
def featurize_seqs(
seqs,
model_name='esm1b',
model_path=None,
vocab_file=None,
mkey='model',
embed_batch_size=3000,
use_cache=False,
cache_namespace='protein',
):
"""Embeds a list of sequences.
Takes a list of sequences and returns an :class:`~anndata.Anndata`
object with sequence embeddings in the `adata.X` matrix.
Arguments
---------
seqs: `list`
List of protein sequences.
model_name: `str` (default: `'esm1b'`)
Language model used to compute likelihoods.
mkey: `str` (default: `'model'`)
Name at which language model is stored.
embed_batch_size: `int` (default: `3000`)
Batch size to embed sequences. Lower to fit into GPU memory.
use_cache: `bool` (default: `False`)
Cache embeddings to disk for faster future loading.
cache_namespace: `str` (default: `'protein'`)
Namespace at which to store cache.
Returns
-------
Returns an :class:`~anndata.Anndata` object with the attributes
`.X`
Matrix where rows correspond to sequences and columns are
language model embedding dimensions
seq: `.obs`
Sequences corresponding to rows in `adata.X`
model: `.uns`
language model
"""
model = get_model(model_name, model_path, vocab_file)
seqs = {str(seq): [{}] for seq in seqs}
seqs = populate_embedding(
model,
seqs,
namespace=cache_namespace,
use_cache=use_cache,
batch_size=embed_batch_size,
)
adata = seqs_to_anndata(seqs)
adata.uns[f'featurize_{mkey}'] = model
adata.uns[f'{mkey}'] = model
return adata
def featurize_fasta(
fname,
model_name='esm1b',
model_path=None,
vocab_file=None,
mkey='model',
embed_batch_size=3000,
use_cache=True,
cache_namespace=None,
):
"""Embeds a FASTA file.
Takes a FASTA file containing sequences and returns an
:class:`~anndata.Anndata` object with sequence embeddings
in the `adata.X` matrix.
Assumes metadata is storred in FASTA record as `key=value`
pairs that are separated by vertical bar "|" characters.
Arguments
---------
fname: `str`
Path to FASTA file.
model_name: `str` (default: `'esm1b'`)
Language model used to compute likelihoods.
mkey: `str` (default: `'model'`)
Name at which language model is stored.
embed_batch_size: `int` (default: `3000`)
Batch size to embed sequences. Lower to fit into GPU memory.
use_cache: `bool` (default: `False`)
Cache embeddings to disk for faster future loading.
cache_namespace: `str` (default: `'protein'`)
Namespace at which to store cache.
Returns
-------
Returns an :class:`~anndata.Anndata` object with the attributes
`.X`
Matrix where rows correspond to sequences and columns are
language model embedding dimensions
seq: `.obs`
Sequences corresponding to rows in `adata.X`
model: `.uns`
language model
"""
model = get_model(model_name, model_path, vocab_file)
# Parse fasta.
from Bio import SeqIO
seqs = {}
with open(fname, 'r') as f:
for record in SeqIO.parse(f, 'fasta'):
fields = record.id.split('|')
meta = {
field.split('=')[0]: field.split('=')[1]
for field in fields
}
seq = str(record.seq)
if seq not in seqs:
seqs[seq] = []
seqs[seq].append(meta)
seqs = populate_embedding(
model,
seqs,
namespace=cache_namespace,
use_cache=use_cache,
batch_size=embed_batch_size,
)
adata = seqs_to_anndata(seqs)
adata.uns[mkey] = model
adata.uns[f'featurize_{mkey}'] = model
return adata
```
|
{
"source": "jeffreyruffolo/ProteinSuperfamPredictor",
"score": 2
}
|
#### File: psfpred/dataset/generate_h5_file.py
```python
from itertools import chain
import os
from glob import glob
from tqdm import tqdm
import torch
import numpy as np
import h5py
import matplotlib.pyplot as plt
from Bio.PDB import PDBParser
_aa_dict = {
'A': 0,
'C': 1,
'D': 2,
'E': 3,
'F': 4,
'G': 5,
'H': 6,
'I': 7,
'K': 8,
'L': 9,
'M': 10,
'N': 11,
'P': 12,
'Q': 13,
'R': 14,
'S': 15,
'T': 16,
'V': 17,
'W': 18,
'Y': 19,
'X': 20
}
_aa_3_1_dict = {
'ALA': 'A',
'CYS': 'C',
'ASP': 'D',
'GLU': 'E',
'PHE': 'F',
'GLY': 'G',
'HIS': 'H',
'ILE': 'I',
'LYS': 'K',
'LEU': 'L',
'MET': 'M',
'ASN': 'N',
'PRO': 'P',
'GLN': 'Q',
'ARG': 'R',
'SER': 'S',
'THR': 'T',
'VAL': 'V',
'TRP': 'W',
'TYR': 'Y'
}
def get_dataset_stats(data, max_protein_len=300):
num_proteins = 0
for pdb_id, chain_residue_range, superfam in tqdm(data):
pdb_file = os.path.join(pdb_dir, "{}.pdb".format(pdb_id))
if not os.path.exists(pdb_file):
continue
if len(chain_residue_range.split(":")) != 2:
continue
chain_id, residue_range = chain_residue_range.split(":")
residue_range = get_residue_range(residue_range)
if residue_range == None:
continue
_, sequence = get_residue_range_coords(pdb_file, pdb_id, chain_id,
residue_range)
if len(sequence) > max_protein_len:
continue
num_proteins += 1
return num_proteins
def get_ca_coord(residue):
if 'CA' in residue:
return residue['CA'].get_coord()
else:
return [0, 0, 0]
def get_residue_range(residue_range):
residue_range_split = residue_range.split("-")
try:
if len(residue_range_split) == 2:
return (int(residue_range_split[0])), int(residue_range_split[1])
elif len(residue_range_split) == 3:
return (-1 * int(residue_range_split[1])), int(
residue_range_split[2])
elif len(residue_range_split) == 4:
return (
-1 *
int(residue_range_split[1])), -1 * int(residue_range_split[3])
except:
print("Invalid residue range: {}".format(residue_range))
return None
def get_residue_range_coords(pdb_file, pdb_id, chain_id, residue_range):
p = PDBParser()
structure = p.get_structure(pdb_id, pdb_file)
for chain in structure.get_chains():
if chain.id == chain_id:
residues = list(chain.get_residues())
residues = [
r for r in residues if residue_range[0] <= r.id[1] <= residue_range[1]
]
ca_coords = torch.tensor([get_ca_coord(r) for r in residues])
sequence = "".join(
[_aa_3_1_dict.setdefault(r.get_resname(), "X") for r in residues])
return ca_coords, sequence
def calc_dist_mat(coords):
mat_shape = (len(coords), len(coords), 3)
a_coords = coords.unsqueeze(0).expand(mat_shape)
b_coords = coords.unsqueeze(1).expand(mat_shape)
dist_mat = (a_coords - b_coords).norm(dim=-1)
return dist_mat
pdb_dir = "data/pdb_files"
data = np.genfromtxt("data/scop-simplified.tsv", delimiter="\t",
dtype=str)[:500]
max_protein_len = 300
num_proteins = get_dataset_stats(data, max_protein_len)
print("\nCreating dataset of ")
out_file = "data/dataset.h5"
h5_file = h5py.File(out_file, 'w')
id_set = h5_file.create_dataset('id', (num_proteins, ),
compression='lzf',
dtype='S25',
maxshape=(None, ))
superfam_set = h5_file.create_dataset('superfam', (num_proteins, ),
compression='lzf',
dtype='S25',
maxshape=(None, ))
sequence_set = h5_file.create_dataset('sequence',
(num_proteins, max_protein_len),
compression='lzf',
dtype='uint8',
maxshape=(None, max_protein_len),
fillvalue=-1)
sequence_len_set = h5_file.create_dataset('sequence_len', (num_proteins, ),
compression='lzf',
dtype='uint16',
maxshape=(None, ),
fillvalue=-1)
dist_mat_set = h5_file.create_dataset(
'dist_mat', (num_proteins, max_protein_len, max_protein_len),
maxshape=(None, max_protein_len, max_protein_len),
compression='lzf',
dtype='float',
fillvalue=-1)
h5_index = 0
for pdb_id, chain_residue_range, superfam in data:
pdb_file = os.path.join(pdb_dir, "{}.pdb".format(pdb_id))
if not os.path.exists(pdb_file):
continue
if len(superfam) < 2:
print()
if len(chain_residue_range.split(":")) != 2:
continue
chain_id, residue_range = chain_residue_range.split(":")
residue_range = get_residue_range(residue_range)
ca_coords, sequence = get_residue_range_coords(pdb_file, pdb_id, chain_id,
residue_range)
if len(sequence) > max_protein_len:
continue
dist_mat = calc_dist_mat(ca_coords)
encoded_sequence = np.array([_aa_dict[s] for s in sequence])
protein_id = "{}_{}_{}_{}".format(pdb_id, chain_id, residue_range[0],
residue_range[1])
id_set[h5_index] = protein_id
superfam_set[h5_index] = superfam
sequence_set[h5_index, :len(sequence)] = encoded_sequence
sequence_len_set[h5_index] = len(encoded_sequence)
dist_mat_set[h5_index, :len(sequence), :len(sequence)] = dist_mat
h5_index += 1
```
#### File: ProteinSuperfamPredictor/psfpred/train.py
```python
import os
from tqdm import tqdm
import torch
import matplotlib.pyplot as plt
import numpy as np
from psfpred.dataset import ProteinDataset
from psfpred.model import Model
device_type = 'cuda' if torch.cuda.is_available() else 'cpu'
dev = torch.device(device_type)
# To use TPU (not sure if this works)
# import torch_xla
# import torch_xla.core.xla_model as xm
# dev = xm.xla_device()
print("Using {} as device".format(dev))
out_dir = "/home-2/<EMAIL>/code/ProteinSuperfamPredictor/training_runs/adam_10_2d"
# out_dir = "/home-2/<EMAIL>ff<EMAIL>/code/ProteinSuperfamPredictor/training_runs/adam_6_2d"
# out_dir = "/home-2/<EMAIL>/code/ProteinSuperfamPredictor/training_runs/adam_2_2d"
# out_dir = "/home-2/<EMAIL>/code/ProteinSuperfamPredictor/training_runs/sgd_10_2d"
# out_dir = "/home-2/<EMAIL>/code/ProteinSuperfamPredictor/training_runs/adam_10_2d_noseq"
# out_dir = "/home-2/<EMAIL>/code/ProteinSuperfamPredictor/training_runs/adam_10_2d_nodist"
os.makedirs(out_dir, exist_ok=True)
max_seq_len = 100
num_classes = 51
batch_size = 32
dataset = ProteinDataset("data/dataset.h5",
crop_size=max_seq_len,
num_classes=num_classes)
validation_split = 0.1
test_split = 0.2
train_split = 1 - validation_split - test_split
validation_split_length = int(len(dataset) * validation_split)
test_split_length = int(len(dataset) * test_split)
train_split_length = len(dataset) - validation_split_length - test_split_length
torch.manual_seed(0)
train_dataset, validation_dataset, test_dataset = torch.utils.data.random_split(
dataset, [train_split_length, validation_split_length, test_split_length])
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=batch_size,
collate_fn=ProteinDataset.merge_samples_to_minibatch)
validation_loader = torch.utils.data.DataLoader(
validation_dataset,
batch_size=batch_size,
collate_fn=ProteinDataset.merge_samples_to_minibatch)
# test_loader = torch.utils.data.DataLoader(
# validation_dataset,
# batch_size=batch_size,
# collate_fn=ProteinDataset.merge_samples_to_minibatch)
model = Model(max_seq_len=max_seq_len, num_classes=num_classes).to(dev)
# model = Model(max_seq_len=max_seq_len,
# num_classes=num_classes,
# ignore_seq=True).to(dev)
# model = Model(max_seq_len=max_seq_len,
# num_classes=num_classes,
# ignore_dist=True).to(dev)
optimizer = torch.optim.Adam(model.parameters())
# optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min')
loss_func = torch.nn.CrossEntropyLoss().to(dev)
epochs = 40
train_losses = []
validation_losses = []
for i in tqdm(range(epochs)):
model.train()
train_loss = 0
for (seq_inputs, dist_inputs), superfam_labels in tqdm(train_loader):
optimizer.zero_grad()
seq_inputs = seq_inputs.to(dev)
dist_inputs = dist_inputs.unsqueeze(1).to(dev)
def handle_batch():
outputs = model(seq_inputs, dist_inputs, dev=dev)
loss = loss_func(outputs, superfam_labels.to(dev))
loss.backward()
optimizer.step()
return loss.item()
train_loss += handle_batch()
validation_loss = 0
with torch.no_grad():
model.eval()
for (seq_inputs,
dist_inputs), superfam_labels in tqdm(validation_loader):
seq_inputs = seq_inputs.to(dev)
dist_inputs = dist_inputs.unsqueeze(1).to(dev)
def handle_batch():
outputs = model(seq_inputs, dist_inputs, dev=dev)
loss = loss_func(outputs, superfam_labels.to(dev))
return loss.item()
validation_loss += handle_batch()
scheduler.step(validation_loss / len(validation_loader))
train_losses.append(train_loss / len(train_loader))
validation_losses.append(validation_loss / len(validation_loader))
print("train", train_losses[-1])
print("validation", validation_losses[-1])
plt.figure(dpi=500)
plt.plot(train_losses, label="Train")
plt.plot(validation_losses, label="Validation")
plt.ylabel("CCE Loss")
plt.xlabel("Epoch")
plt.legend()
plt.savefig(os.path.join(out_dir, "loss.png"))
plt.close()
np.savetxt(os.path.join(out_dir, "loss_data.csv"),
np.array([
list(range(len(train_losses))), train_losses,
validation_losses
]).T,
delimiter=",")
torch.save(model, os.path.join(out_dir, "model.e{}.torch".format(i + 1)))
torch.save(model, os.path.join(out_dir, "model.torch"))
```
|
{
"source": "Jeffrey-Sardina/SignRecorder",
"score": 3
}
|
#### File: Jeffrey-Sardina/SignRecorder/SignRecorder.pyw
```python
import cv2
import tkinter as tk
from tkinter import filedialog
import threading
import traceback
import sys
import logging
import os
import time
from PIL import Image, ImageTk
import abc
import json
'''
Program data global variables
'''
#backend
logger = None
#experiment data
webcam_num = 1
study_name = ''
study_files = []
subject_id_entry_box = None
experiment = None
#recording
window = None
recorder = None
just_started = True
#files
out_dir = None
video_path = None
video_id = None
last_video_id = None
#tk ui
main_frame = None
pop_up_window = None
width = 0
height = 0
key_tracker = None
padding_x = 10
padding_y = 10
default_font = 20
#Settings
settings_dict_defaults = {'backcolor': '#000000',
'ui_element_color': '#888888',
'forecolor': '#000000',
'draggable_color0': '#888888',
'draggable_color1': '#aaaaaa'}
settings_dict = {}
'''
Initialization
'''
def main():
'''
First method called in this thread of program execution.
Runs through a servies of initialization steps and then loads the gui.
Once the gui is loaded, the gui takes over control of program execution from there onwards.
'''
init_logging()
load_config()
init_gui()
def init_logging():
'''
Initializes the loggins system. The logging system is intended to allow the program to save data about each run to disk,
and the logger itself will re-write any existing logs each time so as to conserve space and avoid cluttering the running
directory with files. This method also triggers the first log write.
Most methods in this program trigger a log call. For simplicity, the calls to logging are not mentioned in the method
descriptions in general.
'''
global logger
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
file_handler = logging.FileHandler('SignRecorder.log', mode='w')
logger.addHandler(file_handler)
file_handler_format = logging.Formatter('%(levelname)s - %(asctime)s - %(message)s')
file_handler.setFormatter(file_handler_format)
logger.info('init_logging: Starting log')
def load_config():
'''
Loads the user settings from the config.csv file. If the file is not pressent or is corrputed, it will use default values
and write a new config file, overwritting the old one if it is present.
'''
global settings_dict
logger.info('load_config:')
try:
with open('config.json', 'r') as config:
settings_dict = json.loads(config.read())
except Exception as err:
message = 'load_config: Could not read config file'
logger.error(message + ': ' + str(err))
recover_config_file()
def recover_config_file():
'''
This method should only be called if the config file is corrupted or missing. It re-writes the config data and replaces all
data with the default values, or the last loaded non-corrupt data value if such a data value is present.
If this operations fails, the program will continue to run, but no config file will be generated.
'''
global settings_dict
logger.info('recover_config_file: loading default settings and attempting to recover the config file')
settings_dict = settings_dict_defaults
try:
with open('config.json', 'w') as config:
print(json.dumps(settings_dict_defaults), file=config)
except Exception as err:
message = 'Attempt to recover config file failed: Could not write new config file'
logger.critical('recover_config_file:' + message + ': ' + str(err))
pop_up(message)
def find_webcams(search_num):
'''
Searches to see how many webcams are attactched to the current system. This is done by attempting to open each webcam from
number 0 (the default webcam) to number search_num, which is given to the method. If the webcam opens, then the program knows
it has found a wewbcam; if not, the webcam either cannot be accessed by the program or is not present. All opened webcams are
closed after searching, and not webcam inpout is recorded at this step.
Parameters:
search_num: The number of webcams for which to search
'''
global webcam_num
webcam_num = 0
for i in range(search_num):
webcam_i = cv2.VideoCapture(i)
if webcam_i.isOpened():
webcam_num += 1
webcam_i.release()
logger.info('find_webcams: ' + str(webcam_num) + ' webcams found for recording')
def init_gui():
'''
Initializes the gui. The gui is created in a maximized state and takes over main-thread program execution. Note that all gui
operations should remain on the main thread, except where the gui allows (suchs as in triggered events). This method also sets
up the key_tracker to manage keypress events and attempt to authenticate them (since some OS's will trigger a key press and /
or release repeatedly when a key is help down).
The gui also maps the default close event (~the red X) to an ooperations that cleans up the program state properly. This
should help to prevent memory leaks on an unexpected closure.
'''
global width, height, window, key_tracker, main_frame
logger.info('init_gui:')
#Master window
window = tk.Tk()
window.wm_title('Sign Recorder')
window.config(background = settings_dict['backcolor'])
width = window.winfo_screenwidth()
height = window.winfo_screenheight()
window.geometry("%dx%d+0+0" % (width, height))
#Main Frame in window
main_frame = MainFrame(window, background = settings_dict['backcolor'])
main_frame.prepare_display()
main_frame.pack(side="top", fill="both", expand=True)
#input
key_tracker = KeyTracker()
window.bind_all('<KeyPress>', key_tracker.report_key_press)
window.bind_all('<KeyRelease>', key_tracker.report_key_release)
key_tracker.track('space')
#Exitting
window.protocol("WM_DELETE_WINDOW", on_close)
#Show window
window.mainloop()
'''
Core backend program functionality
'''
def pop_up(message):
'''
Creates a pop-up window to display a message. Please only call this method for important errors such as files that fail
to load--each pop up will take focue from the main window and thus disrupt the user.
'''
global pop_up_window
logger.info('pop_up: message=' + message)
pop_up_window = tk.Tk()
pop_up_window.wm_title('Message')
pop_up_window.config(background = settings_dict['backcolor'])
pop_up_text = tk.Text(pop_up_window, font = default_font, height = 5, width = 70, background = settings_dict['ui_element_color'], foreground = settings_dict['forecolor'])
pop_up_text.insert(tk.INSERT, message)
pop_up_text.config(state = 'disabled')
pop_up_text.grid(row = 0, column = 0, padx = padding_x, pady = padding_y)
select_files_button = tk.Button(pop_up_window, text ="Close", command = pop_up_window.destroy, font = default_font, height = 3, width = 10, background = settings_dict['ui_element_color'], foreground = settings_dict['forecolor'])
select_files_button.grid(row=1, column=0)
def write_out(name, message):
'''
Writes out a meta-file that contains metadata about the recorded video. The data is:
stimulus_name: the name of the file used as a stimulus for this recording
display_time: the amount of time the stimulus was displayed before recording
recording_time: the time length of the recording
total_time: the total time for this step of the experiment, = display_time + recording_time
'''
logger.info('write_out: writing meta file at path=' + out_dir + ' with name=' + name)
file_name = os.path.join(out_dir, name + '.meta.csv')
try:
if os.path.exists(file_name):
message = 'Cannot overwrite existing meta file: '
logger.critical('write_out: ' + message + file_name)
pop_up(message + '\n' + file_name)
raise Exception(message + file_name)
with open(file_name, 'w') as meta:
print(message, file=meta)
except Exception as err:
message = 'Failed to write out file: ' + file_name
logger.critical('write_out: ' + message + ': ' + str(err))
pop_up(message)
raise Exception(message)
def on_close(close = True):
'''
Handles properly closing the program and its spawned resources. As much as possible, all close events should be routed to
this method rather than immediately calling an exit function.
Parameter:
close: Whether this method should close the program. If false, all program resources still will be cleared but the program
will not be closed. This should be done when a critical exception occurs so the exception can still be raised.
'''
logger.info('on_close: Cleaning resources and closing' if close else 'on_close: Cleaning resources to prepare for closing')
window.destroy()
try:
recorder.end()
except:
pass
if close:
sys.exit(0)
'''
Experiment and data collection
'''
def on_key_press(event):
'''
Called whenever a key press event is detected. This method should not be linked to key press events directly; the use of the
KeyTracker class is necessary to authenticate key presses as valid before attempting to respond to them.
If this program has just started, the program runs a different method to respond to the key press event to manage the yet
un-initialized variables.
'''
experiment.on_input_press(event.keysym)
def on_key_release(event):
'''
Called whenever a key release event is detected. This method should not be linked to key release events directly; the use
of the KeyTracker class is necessary to authenticate key releases as valid before attempting to respond to them.
'''
experiment.on_input_release(event.keysym)
class Experiment(abc.ABC):
@abc.abstractmethod
def on_input_press(self, input_key):
pass
@abc.abstractmethod
def on_input_release(self, input_key):
pass
class Naming_Experiment(Experiment):
#Experiment Parameters
stimuli = []
subject_id = None
stimulus_type = None
#Experiment Running
recording = False
last_video_id = None
video_id = None
keep_displaying = True
current_stimulus = 0
can_start_recording = True
data = ''
#Timing
display_timer = None
recording_timer = None
def __init__(self, data):
'''
Creates a new Naming_Experiment, which is an experiemnt in which single stimuli are presented and the subject is asked to
produce the sign for what is shown.
Parameters:
data: A dictionary containing experiment data. This should contain the follwoing keys:
file, containing absolute paths to all of the stimuli to use;
stimulus_type, which tells whether the stimulus is Image or Video
'''
self.stimuli = data['stimulus_files']
self.stimulus_type = data['stimulus_type']
self.display_timer = Timer()
self.recording_timer = Timer()
def on_input_press(self, input_key):
'''
This method should be called every time space is pressed (if that space-press has been authenticated). It proceeds
to the next stimulus and will begin recording, ending the current stimulus and recording. It also ends the recording_timer if
it was running, and updates program state tracking variables to refect the current state of the progam.
If there are no more stimuli to run, the program displays a pop-up message stating that data collection is complete.
'''
logger.info('Naming_Experiment: on_input_press: current_stimulus=' + str(self.current_stimulus) + ' recording=' + str(self.recording))
self.recording = False
if self.recording_timer.active():
self.recording_timer.end()
self.data += str(self.current_stimulus) + '_' + '_recordingTime,' + str(self.recording_timer.timespan) + '\n'
if self.current_stimulus >= len(self.stimuli):
message = 'All data for ' + str(self.subject_id) + ' has been collected'
write_out(self.subject_id + '_timing.csv', self.data)
pop_up(message)
logger.info('Naming_Experiment: on_input_press: ' + message)
main_frame.select_start_experiment()
self.reset_for_next_subject()
else:
self.load_stimulus()
def on_input_release(self, input_key):
'''
This method should be called when space is released (if that release has been authenticated). It begins recording and starts
the recording timer. It also updates program state tracking variables to refect the current state of the progam.
'''
if self.subject_id == None:
self.subject_id = subject_id_entry_box.get().strip()
if self.can_start_recording and self.current_stimulus < len(self.stimuli):
logger.info('Naming_Experiment: on_input_release: current_stimulus=' + str(self.current_stimulus) + '; recording starting')
self.last_video_id = self.video_id
self.video_id = os.path.basename(self.stimuli[self.current_stimulus].strip())
self.recording = True
self.keep_displaying = False
self.recording_timer.begin()
self.current_stimulus += 1
recorder = Recorder(self.subject_id + '-' + self.video_id, True)
recorder.begin()
else:
logger.warning('Naming_Experiment: on_input_release: can_start_recording is False, video must end before the signer may be recorded')
def load_stimulus(self):
'''
Loads and displays the next stimulis for the current subject, but should not be used for the first stimulus of a subjecct.
It resets the display timer, which measures the time that a stimulus is displayed before signing.
Later, it will also write timer output to a meta file with the same name as the output file. Timer data it not yet verified
though, so it is not ready for use.
'''
global keep_displaying
logger.info('Naming_Experiment: load_stimulus: current_stimulus=' + str(self.current_stimulus) + ' stimulus type=' + str(self.stimulus_type))
keep_displaying = True
stimulus = self.stimuli[self.current_stimulus].strip()
if self.display_timer.active():
self.display_timer.end()
self.data += str(self.current_stimulus) + '_' + '_displayTime,' + str(self.display_timer.timespan) + '\n'
if self.stimulus_type == 'Image':
self.display_timer.begin()
Image_Displayer(stimulus).begin()
elif self.stimulus_type == 'Video':
self.display_timer.begin()
Video_Displayer(stimulus).begin()
def reset_for_next_subject(self):
'''
Resets the environment so that the next subject can begin the experiment.
'''
logger.info('Naming_Experiment: reset_for_next_subject: Resetting the environment for the next subject')
key_tracker.reset()
self.subject_id = None
self.recording = False
self.last_video_id = None
self.video_id = None
self.keep_displaying = True
self.current_stimulus = 0
self.can_start_recording = True
subject_id_entry_box.delete(0, last='end')
class Lexical_Priming_Experiment(Experiment):
#Experiment Parameters
stimuli_tuples = []
subject_id = None
stimulus_type = None
primer_type = None
primer_time = 5
#Experiment Running
recording = False
last_video_id = None
video_id = None
keep_displaying = True
current_round = 0
can_start_recording = True
display_primer = True
#Timing
display_timer = None
recording_timer = None
primer_timer = None
def __init__(self, data):
'''
Creates a new Lexical_Priming_Experiment, in which there is a primer image followed by a stimulus for signing. Recording begins
after the stimulus has been shown. Transition between the primer and stimulus is done using space, as is transition between the
stimulus and the recording, etc.
Parameters:
data: A dictionary containing experiment data. This should contain the follwoing keys:
files, which contains a tuple of: (absolute path of the primer, that of the stimulus);
stimulus_type, which tells whether the stimulus is Image or Video;
primer_type, which tells whether the primer is Image or Video;
primer_time, which contains the time to show the primer (in seconds, only needed if primer_time is Image)
'''
self.stimuli_tuples = data['files']
self.stimulus_type = data['stimulus_type']
self.primer_type = data['primer_type']
self.primer_time = data['primer_time'] if self.primer_type == 'Image' else 0
self.display_timer = Timer()
self.recording_timer = Timer()
self.prim_timers = Timer()
def on_input_press(self, input_key):
'''
This method should be called every time space is pressed (if that press has been authenticated), except the first. It proceeds
to the next stimulus and will begin recording, ending the current stimulus and recording. It also ends the recording_timer if
it was running, and updates program state tracking variables to refect the current state of the progam.
If there are no more stimuli to run, the program displays a pop-up message stating that data collection is complete.
'''
logger.info('Lexical_Priming_Experiment: on_input_press: current_stimulus=' + str(self.current_round) + ' recording=' + str(self.recording) + ', display_primer=' + self.display_primer)
if self.display_primer:
pass
else:
self.recording = False
if self.recording_timer.active():
self.recording_timer.end()
if self.current_round >= len(self.stimuli_tuples):
main_frame.select_start_experiment()
pop_up('All data for ' + self.subject_id + ' has been collected')
self.reset_for_next_subject()
else:
self.load_primer()
def on_input_release(self, input_key):
'''
This method should be called when space is released (if that release has been authenticated). It begins recording and starts
the recording timer. It also updates program state tracking variables to refect the current state of the progam.
'''
if self.display_primer:
self.display_primer = False
#code here
else:
if self.subject_id == None:
self.subject_id = subject_id_entry_box.get().strip()
if self.can_start_recording and self.current_round < len(self.stimuli_tuples):
logger.info('Lexical_Priming_Experiment: on_input_release: current_round=' + str(self.current_round) + '; recording starting')
self.last_video_id = self.video_id
self.video_id = os.path.basename(self.stimuli_tuples[self.current_round][0].strip()) + '-' + os.path.basename(self.stimuli_tuples[self.current_round][1].strip())
self.recording = True
self.keep_displaying = False
self.recording_timer.begin()
self.current_round += 1
recorder = Recorder(self.subject_id + '-' + self.video_id, True)
recorder.begin()
else:
logger.warning('Naming_Experiment: on_input_release: can_start_recording is False, video must end before the signer may be recorded')
def load_primer(self): #WHat about stimuli?
'''
Loads and displays the next stimulus for the current subject, but should not be used for the first stimulus of a subjecct.
It resets the display timer, which measures the time that a stimulus is displayed before signing.
Later, it will also write timer output to a meta file with the same name as the output file. Timer data it not yet verified
though, so it is not ready for use.
'''
global keep_displaying
logger.info('Lexical_Priming_Experiment: load_stimulus: current_stimulus=' + str(self.current_round) + ' stimulus type=' + str(self.stimulus_type))
keep_displaying = True
primer = self.stimuli_tuples[self.current_round][0].strip()
timer = None
if self.display_timer.active():
self.display_timer.end()
if self.primer_type == 'Image':
self.display_timer.begin()
Image_Displayer(primer).begin()
timer = threading.Timer(self.primer_time, self.on_primer_finished) #In seconds
elif self.primer_type == 'Video':
self.display_timer.begin()
Video_Displayer(primer).begin()
timer = threading.Timer(self.primer_time, self.on_primer_finished) #In seconds
timer.start()
def on_primer_finished(self):
stimulus = self.stimuli_tuples[self.current_round][1].strip()
if self.stimulus_type == 'Image':
self.display_timer.begin()
Image_Displayer(stimulus).begin()
elif self.stimulus_type == 'Video':
self.display_timer.begin()
Video_Displayer(stimulus).begin()
def reset_for_next_subject(self):
'''
Resets the environment so that the next subject can begin the experiment.
'''
logger.info('Lexical_Priming_Experiment: reset_for_next_subject: Resetting the environment for the next subject')
key_tracker.reset()
self.subject_id = None
self.recording = False
self.last_video_id = None
self.video_id = None
self.keep_displaying = True
self.current_stimulus = 0
self.can_start_recording = True
subject_id_entry_box.delete(0, last='end')
'''
Data and user-input
'''
class Recorder():
'''
This class handles all recording using the webcam, and writes recrodings to disk.
'''
name = ''
mirror = False
web_cam = None
video_writer = None
def __init__(self, name, mirror):
'''
Initializes the Recorder. Parameters:
name: The name to five to the recording file
mirror: Whether the recording should be mirrored when saved to disk
'''
logger.info('Recorder: __init__: name=' + self.name + ' mirror=' + str(mirror))
self.name = name
self.mirror = mirror
def begin(self):
'''
Begins recording from the webcam. The recording will continue untill end is called, or until 1 is pressed.
Note that pressing 1 to quit should be used for debug purposes only
'''
# Capturing video from webcam:
self.web_cam = cv2.VideoCapture(0)
fps = self.web_cam.get(cv2.CAP_PROP_FPS)
#get width and height of reading frame
width = int(self.web_cam.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(self.web_cam.get(cv2.CAP_PROP_FRAME_HEIGHT))
# Define the codec and
fourcc = cv2.VideoWriter_fourcc(*"XVID")
#create VideoWriter object
file_name = os.path.join(out_dir, self.name + '.avi')
if os.path.exists(file_name):
message = 'Cannot overwrite existing video file: '
logger.critical('Recorder: begin: ' + message + file_name)
pop_up(message + '\n' + file_name)
raise Exception(message + file_name)
else:
self.video_writer = cv2.VideoWriter(file_name, fourcc, fps, (width, height))
if not self.web_cam.isOpened():
message = 'Could not open webcam'
logger.warning('Recorder: begin: ' + message)
pop_up(message)
raise Exception(message)
logger.info('Recorder: begin: starting recording loop')
while self.web_cam.isOpened():
# Capture frame-by-frame
is_reading, frame = self.web_cam.read()
if is_reading and experiment.recording:
if self.mirror:
frame = cv2.flip(frame, 1)
self.video_writer.write(frame)
else:
break
if cv2.waitKey(1) & 0xFF == ord('1'): #quit on 1
experiment.on_input_release('space')
break
self.end()
def end(self):
'''
Ends the recording and releases resources.
'''
logger.info('Recorder: end: recording ended, releasing resources')
self.web_cam.release()
self.video_writer.release()
class Video_Displayer():
file_name = ''
video_input = None
fps = None
display = None
callback = None
def __init__(self, file_name, callback = None):
logger.info('Video_Displayer: __init__: file_name=' + file_name)
self.file_name = file_name
self.callback = callback
def begin(self):
experiment.can_start_recording = False
self.video_input = cv2.VideoCapture(self.file_name)
self.fps = int(self.video_input.get(cv2.CAP_PROP_FPS))
self.display = main_frame.page_show_stimuli.display_region
logger.info('Video_Displayer: begin: ' + self.file_name + ' running at fps=' + str(int(self.fps)))
if not self.video_input.isOpened():
message = 'Could not open video file for reading'
logger.warning('Video_Displayer: begin: ' + message)
pop_up(message)
raise Exception(message)
main_frame.select_show_stimuli()
self.run_frame()
def run_frame(self):
#Get the next frame
is_reading, frame = self.video_input.read()
if is_reading:
#Load the image for the current frame and convert to imagetk
cv2image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA)
img = Image.fromarray(cv2image)
img_width, img_height = get_proper_resize_dimensions_for_fullscreen(img)
img = img.resize((int(img_width), int(img_height)))
imgtk = ImageTk.PhotoImage(image=img)
self.display.imgtk = imgtk
self.display.configure(image=imgtk)
if self.video_input.isOpened():
self.display.after(self.fps, self.run_frame)
else:
self.end('Video_Displayer: run_frame: display ended due to unexpected closure of video_input')
experiment.can_start_recording = True
if not self.callback == None:
self.callback()
else:
self.end('Video_Displayer: run_frame: display ended naturally')
experiment.can_start_recording = True
if not self.callback == None:
self.callback()
def end(self, message = 'Video_Displayer: end: run_frame ended'):
logger.info(message)
self.video_input.release()
if not self.callback == None:
self.callback()
class Image_Displayer():
file_name = ''
display = None
def __init__(self, file_name):
logger.info('Image_Displayer: __init__: ' + file_name)
self.file_name = file_name
def begin(self):
logger.info('Image_Displayer: begin:')
self.display = main_frame.page_show_stimuli.display_region
main_frame.select_show_stimuli()
#Load the image for the current frame and convert to imagetk
cv2image = cv2.imread(self.file_name)
b,g,r = cv2.split(cv2image)
cv2image = cv2.merge((r,g,b))
img = Image.fromarray(cv2image)
img_width, img_height = get_proper_resize_dimensions_for_fullscreen(img)
img = img.resize((int(img_width), int(img_height)))
imgtk = ImageTk.PhotoImage(image=img)
# Put it in the display window
self.display.imgtk = imgtk
self.display.configure(image=imgtk)
class KeyTracker():
key = ''
last_press_time = 0
last_release_time = 0
last_release_callback_time = 0
first_callback_call = True
last_event_was_press = False
def track(self, key):
logger.info('KeyTracker: track: key=' + key)
self.key = key
def reset(self):
logger.info('KeyTracker: reset: resetting')
self.last_press_time = 0
self.last_release_time = 0
self.last_release_callback_time = 0
self.first_callback_call = True
self.last_event_was_press = False
def is_pressed(self):
return time.time() - self.last_press_time < .01 #In seconds
def report_key_press(self, event):
if not self.last_event_was_press and event.keysym == self.key:
self.last_event_was_press = True
if not self.is_pressed():
logger.info('KeyTracker: report_key_press: valid keypress detected: key=' + self.key)
self.last_press_time = time.time()
on_key_press(event)
else:
self.last_press_time = time.time()
def report_key_release(self, event):
if self.last_event_was_press and event.keysym == self.key:
self.last_event_was_press = False
self.last_release_time = time.time()
timer = threading.Timer(.015, self.report_key_release_callback, args=[event]) #In seconds
timer.start()
def report_key_release_callback(self, event):
if self.first_callback_call:
self.last_release_callback_time = time.time()
self.first_callback_call = False
if not self.is_pressed():
self.last_release_callback_time = time.time()
logger.info('KeyTracker: report_key_release_callback: key=' + self.key + ', is released= ' + str((not self.is_pressed())))
if not self.is_pressed():
on_key_release(event)
class Timer():
start_time = 0
end_time = 0
timespan = 0
def begin(self):
self.start_time = time.time()
def end(self):
self.end_time = time.time()
self.timespan = self.end_time - self.start_time
def active(self):
return self.start_time > self.end_time
'''
UI and layout management
'''
def arrange_header_in(page):
button_frame = page.button_frame
top_bar_buttons = []
#Place buttons in the top-level button frame
top_bar_buttons.append(tk.Button(button_frame, text="Main Menu", font=default_font, command=main_frame.select_main_menu, background = settings_dict['ui_element_color'], foreground = settings_dict['forecolor']))
top_bar_buttons.append(tk.Button(button_frame, text="Create Experiment", font=default_font, command=main_frame.select_create_experiment, background = settings_dict['ui_element_color'], foreground = settings_dict['forecolor']))
top_bar_buttons.append(tk.Button(button_frame, text="Start Experiment", font=default_font, command=main_frame.select_start_experiment, background = settings_dict['ui_element_color'], foreground = settings_dict['forecolor']))
#Grid buttons
col = 0
for button in top_bar_buttons:
button.grid(row=0, column=col, pady=10)
col += 1
button_frame.grid(row=0, column=0)
def get_proper_resize_dimensions_for_fullscreen(img):
'''
This method gets the largest-area resize of an image to be displayed on a fullscreen display without allowing any of the
image to overflow off-screen. It maintains the image aspect ratio.
'''
#Get image dimensions
original_width = img.width
original_height = img.height
#Get the scalars that transform the original size into the fullscreen dize
width_scalar = width / original_width
height_scalar = height / original_height
#Our goal is to make the image as largs as possible without goinf over the screen size.
#We also do not want to loose out aspect ratio. So let's see whether using the width_scalar
# or the height_scalar does that best
width_based_scaling_height = original_height * width_scalar
width_based_scaling_valid = True
if width_based_scaling_height > height:
width_based_scaling_valid = False
height_based_scaling_width = original_width * height_scalar
height_based_scaling_valid = True
if height_based_scaling_width > width:
height_based_scaling_valid = False
if width_based_scaling_valid and not height_based_scaling_valid:
return width, width_based_scaling_height
else:
return height_based_scaling_width, height
class Widget_Drag_Controller():
'''
This class handles dragging widgets that below to a common set in such a way as to change their positions in that set. So if the numbers
1, 2, 3 are shown on screen, it's goal is to allow you to click on, say, the 3 and move it in from of the one, and then to read back that
array in the new order.
'''
item = None
callback = None
move_frame = None
def __init__(self, item, widgets, callback):
'''
Parameters:
item: The specific item out of the set that can be dragged
widgets: The set of all widgets that are ordered on the screen and can be dragged
callback: the function to call after a drag and drop. It should accept a list of files in the new order as a parameter.
'''
self.item = item
self.widgets = widgets
self.callback = callback
#Bind clicks on the item itself
self.item.bind("<ButtonPress-1>", self.on_start)
self.item.bind("<B1-Motion>", self.on_move)
self.item.bind("<ButtonRelease-1>", self.on_end)
self.item.configure(cursor="hand1")
def on_start(self, event):
print('on_start')
self.last_seen = self.item
self.move_frame = tk.Frame()
tk.Label(self.move_frame, text = self.item.cget('text'), font = self.item.cget('font'), anchor = self.item.cget('anchor'), background = self.item.cget('background')).pack(side = 'top', fill = 'x')
def on_move(self, event):
print('on_move')
x, y = event.widget.winfo_pointerxy()
self.move_frame.place(x = x, y = int(y - (self.item.winfo_height() + self.item.winfo_height() / 2)))
def on_end(self, event):
print('on_end')
self.move_frame.destroy()
x, y = event.widget.winfo_pointerxy()
target = event.widget.winfo_containing(x, y)
move_to = self.widgets.index(target)
move_from = self.widgets.index(self.item)
if move_to > move_from:
self.widgets.insert(move_to + 1, self.item)
del self.widgets[move_from]
elif move_to < move_from:
self.widgets.insert(move_to, self.item)
del self.widgets[move_from + 1]
files = [widget.cget('text') for widget in self.widgets]
self.callback(files)
class File_Arrangement_Region():
canvas = None
display_frame = None
elements = []
width = 0
height = 0
owner = None
widget_drag_controllers = []
owner_update_callback = None
def __init__(self, owner, owner_update_callback, root, width, height, row, column, rowspan = 1, columnspan = 1):
self.owner_update_callback = owner_update_callback
self.width = width
self.height = height
self.owner = owner
outer_frame = tk.Frame(root, background = settings_dict['ui_element_color'])
outer_frame.grid(row = row, column = column, rowspan = rowspan, columnspan = columnspan, padx=padding_x, pady=padding_y)
self.canvas = tk.Canvas(outer_frame, background = settings_dict['ui_element_color'])
self.display_frame = tk.Frame(self.canvas, background = settings_dict['ui_element_color'])
scrollbar_y = tk.Scrollbar(outer_frame, orient = 'vertical',command = self.canvas.yview)
scrollbar_x = tk.Scrollbar(outer_frame, orient = 'horizontal',command = self.canvas.xview)
self.canvas.configure(yscrollcommand = scrollbar_y.set)
self.canvas.configure(xscrollcommand = scrollbar_x.set)
scrollbar_y.pack(side = 'right',fill = 'y')
scrollbar_x.pack(side = 'bottom', fill = 'x')
self.canvas.pack(side = 'left')
self.canvas.create_window((0, 0), window = self.display_frame, anchor = 'nw')
self.display_frame.bind('<Configure>', self.scroll_configure)
def scroll_configure(self, event):
self.canvas.configure(scrollregion = self.canvas.bbox('all'), width = self.width, height = self.height)
def set_elements(self, files):
#Remove old elements
self.widget_drag_controllers.clear()
for child in self.display_frame.winfo_children():
child.destroy()
#Add new elements (OLD)
for i in range(len(files)):
highlight = settings_dict['draggable_color' + str(i % 2)]
tk.Label(self.display_frame, text=files[i], font = default_font, anchor = 'w', background = highlight).pack(side = 'top', fill = 'x')
for label in self.display_frame.winfo_children():
print(label);
self.widget_drag_controllers.append(Widget_Drag_Controller(label, self.display_frame.winfo_children(), self.update_owner_data))
'''#Add new elements
for i in range(len(files)):
highlight = settings_dict['draggable_color' + str(i % 2)]
container = tk.Frame(self.display_frame, width = width, height = int(height / 1.25), background = highlight)
label = tk.Label(container, text=files[i], font = default_font, anchor = 'w', background = highlight)
label.pack(side = 'left')
remove_button = tk.Button(container, text ="-", command = self.on_button_remove, font = default_font, height = 1, width = 3, background = highlight, foreground = settings_dict['forecolor'])
remove_button.pack(side = 'right')
label.bindtags(("draggable",) + label.bindtags())
container.pack(side = 'top', fill = 'x')
for element in self.display_frame.winfo_children():
print(element);
self.widget_drag_controllers.append(Widget_Drag_Controller(element, self.display_frame.winfo_children(), self.update_owner_data))'''
def on_button_remove(self):
pass
def update_owner_data(self, files):
self.owner_update_callback(files)
class Page(tk.Frame):
button_frame = None
def __init__(self, *args, **kwargs):
tk.Frame.__init__(self, *args, **kwargs)
self.button_frame = tk.Frame(self, background = settings_dict['backcolor'])
def show(self):
print('Show')
print(self)
self.lift()
class Page_Main_Menu(Page):
def __init__(self, *args, **kwargs):
Page.__init__(self, *args, **kwargs)
self.init_elements()
def init_elements(self):
about_text = '''
Version: 0.2.1beta
Developer: <NAME>
SignRecorder is a simple program for recording and saving
video files for sign language data collection and
experiments. It is currently hosted on GitHub
(https://github.com/Jeffrey-Sardina/SignRecorder)
as an open-source project.
To get started, click on either 'Create Experiemnt' or
'Start Experiment'. The program will guide you through
loading stimuli and experimental methods. Once you have
made an experiemnt, save it so that you can load it later.
'''
arrange_header_in(self)
file_text = tk.Text(self, font = default_font, height = 15, width = 70, background = settings_dict['ui_element_color'], foreground = settings_dict['forecolor'])
file_text.insert(tk.INSERT, about_text)
file_text.config(state = 'disabled')
file_text.grid(row=1, column=0, padx=padding_x, pady=padding_y)
class Page_Naming_Paradigm(Page):
file_arrangement_region = None
stimulus_option_selected = None
num_rows = 0
def __init__(self, *args, **kwargs):
Page.__init__(self, *args, **kwargs)
self.init_elements()
def init_elements(self):
stimulus_text = tk.Text(self, font = default_font, height = 3, width = 70, background = settings_dict['ui_element_color'], foreground = settings_dict['forecolor'])
stimulus_text.insert(tk.INSERT, '\nSelect Stimulus Type')
stimulus_text.tag_configure("center", justify='center')
stimulus_text.tag_add("center", 1.0, "end")
stimulus_text.config(state = 'disabled')
stimulus_text.grid(row=0, column=0, padx=padding_x, pady=padding_y)
self.num_rows += 1
stimulus_options = ['Video', 'Image']
stimulus_default_option = stimulus_options[0]
self.stimulus_option_selected = tk.StringVar(self)
stimulus_option_menu = tk.OptionMenu(self, self.stimulus_option_selected, *stimulus_options)
self.stimulus_option_selected.set(stimulus_default_option)
stimulus_option_menu.config(background = settings_dict['ui_element_color'], foreground = settings_dict['forecolor'], height = 1, width = 30, font = default_font)
stimulus_option_menu.grid(row=1, column=0, padx=padding_x, pady=padding_y)
self.num_rows += 1
select_files_button = tk.Button(self, text ="Select Stimulus Files", command = self.load_files, font = default_font, height = 1, width = 30, background = settings_dict['ui_element_color'], foreground = settings_dict['forecolor'])
select_files_button.grid(row=2, column=0, padx=padding_x, pady=padding_y)
self.num_rows += 1
self.file_arrangement_region = File_Arrangement_Region(self, self.change_files, self, width / 2, height / 1.5, 0, 1, 20)
self.file_arrangement_region.set_elements(['Selected Files:'])
def load_files(self):
logger.info('Page_Naming_Paradigm: load_files: loading files')
self.files = filedialog.askopenfilenames(parent=self, initialdir="/", title='Select Files' + self.stimulus_option_selected.get() + ' files')
display_strs = self.files
self.file_arrangement_region.set_elements(display_strs)
def change_files(self, files):
self.files = files
self.file_arrangement_region.set_elements(self.files)
def dict_data(self):
data = {}
data['paradigm'] = 'Naming'
data['stimulus_type'] = self.stimulus_option_selected.get()
data['stimulus_files'] = self.files
return data
class Page_Lexical_Priming(Page):
stimulus_file_arrangement_region = None
primer_file_arrangement_region = None
stimulus_option_selected = None
primer_option_selected = None
stimulus_files = []
primer_files = []
num_rows = 0
def __init__(self, *args, **kwargs):
Page.__init__(self, *args, **kwargs)
self.init_elements()
def init_elements(self):
stimulus_text = tk.Text(self, font = default_font, height = 3, width = 70, background = settings_dict['ui_element_color'], foreground = settings_dict['forecolor'])
stimulus_text.insert(tk.INSERT, '\nSelect Stimulus Type')
stimulus_text.tag_configure("center", justify='center')
stimulus_text.tag_add("center", 1.0, "end")
stimulus_text.config(state = 'disabled')
stimulus_text.grid(row=0, column=0, padx=padding_x, pady=padding_y)
self.num_rows += 1
stimulus_options = ['Video', 'Image']
stimulus_default_option = stimulus_options[0]
self.stimulus_option_selected = tk.StringVar(self)
stimulus_option_menu = tk.OptionMenu(self, self.stimulus_option_selected, *stimulus_options)
self.stimulus_option_selected.set(stimulus_default_option)
stimulus_option_menu.config(background = settings_dict['ui_element_color'], foreground = settings_dict['forecolor'], height = 1, width = 30, font = default_font)
stimulus_option_menu.grid(row=1, column=0, padx=padding_x, pady=padding_y)
self.num_rows += 1
primer_text = tk.Text(self, font = default_font, height = 3, width = 70, background = settings_dict['ui_element_color'], foreground = settings_dict['forecolor'])
primer_text.insert(tk.INSERT, '\nSelect Primer Type')
primer_text.tag_configure("center", justify='center')
primer_text.tag_add("center", 1.0, "end")
primer_text.config(state = 'disabled')
primer_text.grid(row=2, column=0, padx=padding_x, pady=padding_y)
self.num_rows += 1
primer_options = ['Video', 'Image']
primer_default_option = primer_options[0]
self.primer_option_selected = tk.StringVar(self)
primer_option_menu = tk.OptionMenu(self, self.primer_option_selected, *primer_options)
self.primer_option_selected.set(primer_default_option)
primer_option_menu.config(background = settings_dict['ui_element_color'], foreground = settings_dict['forecolor'], height = 1, width = 30, font = default_font)
primer_option_menu.grid(row=3, column=0, padx=padding_x, pady=padding_y)
self.num_rows += 1
stimulus_select_files_button = tk.Button(self, text ="Select Stimulus Files", command = self.load_stimulus_files, font = default_font, height = 1, width = 30, background = settings_dict['ui_element_color'], foreground = settings_dict['forecolor'])
stimulus_select_files_button.grid(row=4, column=0, padx=padding_x, pady=padding_y)
self.num_rows += 1
self.stimulus_file_arrangement_region = File_Arrangement_Region(self, self.change_stimulus_files, self, width / 4, height / 1.5, 0, 1, 20)
self.stimulus_file_arrangement_region.set_elements(['Selected Files:'])
primer_select_files_button = tk.Button(self, text ="Select Primer Files", command = self.load_primer_files, font = default_font, height = 1, width = 30, background = settings_dict['ui_element_color'], foreground = settings_dict['forecolor'])
primer_select_files_button.grid(row=5, column=0, padx=padding_x, pady=padding_y)
self.num_rows += 1
self.primer_file_arrangement_region = File_Arrangement_Region(self, self.change_primer_files, self, width / 4, height / 1.5, 0, 2, 20)
self.primer_file_arrangement_region.set_elements(['Selected Files:'])
def load_stimulus_files(self):
self.load_files(True)
def load_primer_files(self):
self.load_files(False)
def load_files(self, is_stimulus):
logger.info('Page_Lexical_Priming: load_files: loading files, is_stimulus=' + str(is_stimulus))
if is_stimulus:
self.stimulus_files = filedialog.askopenfilenames(parent=self, initialdir="/", title='Select Stimulus Files' + self.stimulus_option_selected.get() + ' files')
self.stimulus_file_arrangement_region.set_elements(self.stimulus_files)
else:
self.primer_files = filedialog.askopenfilenames(parent=self, initialdir="/", title='Select Primer Files' + self.primer_option_selected.get() + ' files')
self.primer_file_arrangement_region.set_elements(self.primer_files)
def change_stimulus_files(self, files):
self.stimulus_files = files
self.stimulus_file_arrangement_region.set_elements(self.stimulus_files)
def change_primer_files(self, files):
self.primer_files = files
self.primer_file_arrangement_region.set_elements(self.primer_files)
def dict_data(self):
primers = self.primer_option_selected.get()
stimuli = self.stimulus_files
if len(primers) != len(stimuli):
message = 'Cannot write file: There must be a 1:1 mapping of primers to stimuli'
logger.warning('Page_Lexical_Priming: dict_data: ' + message)
pop_up(message)
return []
data = {}
data['paradigm'] = 'Lexical_Priming'
data['files'] = [(primer, stimulus) for primer in primers for stimulus in stimuli]
data['stimulus_type'] = self.stimulus_option_selected.get()
return data
class Page_Create_Experiment(Page):
files = []
option_selected = None
paradigm_option_selected = None
entry = None
selected_files_info_text = None
page_naming_paradigm = None
page_lexical_priming = None
create_experiment_button = None
def __init__(self, *args, **kwargs):
Page.__init__(self, *args, **kwargs)
self.init_elements()
def init_elements(self):
arrange_header_in(self)
self.page_naming_paradigm = Page_Naming_Paradigm(self, background = settings_dict['backcolor'])
self.page_lexical_priming = Page_Lexical_Priming(self, background = settings_dict['backcolor'])
paradigm_text = tk.Text(self, font = default_font, height = 3, width = 70, background = settings_dict['ui_element_color'], foreground = settings_dict['forecolor'])
paradigm_text.insert(tk.INSERT, '\nSelect Experiemnt Paradigm')
paradigm_text.tag_configure("center", justify='center')
paradigm_text.tag_add("center", 1.0, "end")
paradigm_text.config(state = 'disabled')
paradigm_text.grid(row=1, column=0, padx=padding_x, pady=padding_y)
paradigm_options = ['Naming', 'Lexcial Priming']
default_paradigm_option = paradigm_options[0]
self.paradigm_option_selected = tk.StringVar(self)
self.paradigm_option_selected.trace('w', self.paradigm_selected)
paradigm_option_menu = tk.OptionMenu(self, self.paradigm_option_selected, *paradigm_options)
self.paradigm_option_selected.set(default_paradigm_option)
paradigm_option_menu.config(background = settings_dict['ui_element_color'], foreground = settings_dict['forecolor'], height = 1, width = 30, font = default_font)
paradigm_option_menu.grid(row=2, column=0, padx=padding_x, pady=padding_y)
#Container
container = tk.Frame(self, width = width, height = int(height / 1.25), background = settings_dict['backcolor'])
container.grid(row=3, column=0, rowspan = 10, columnspan = 400, padx=0, pady=padding_y)
#Place pages in the container frame
self.page_naming_paradigm.place(in_=container)
self.page_lexical_priming.place(in_=container)
paradigm_option_string = self.paradigm_option_selected.get()
if paradigm_option_string == 'Naming':
self.page_naming_paradigm.show()
elif paradigm_option_string == 'Lexcial Priming':
self.page_lexical_priming.show()
#Create Experiment Button
self.create_experiment_button = tk.Button(self, text ="Create Experiment", command = self.create_experiment, font = default_font, height = 1, width = 30, background = settings_dict['ui_element_color'], foreground = settings_dict['forecolor'])
self.create_experiment_button.grid(row = 1, column = 1, padx = padding_x, pady = padding_y)
def create_experiment(self):
logger.info('Page_Create_Experiment: create_experiment: creating experiment')
paradigm_option_string = self.paradigm_option_selected.get()
exp_data = None
if paradigm_option_string == 'Naming':
exp_data = self.page_naming_paradigm.dict_data()
elif paradigm_option_string == 'Lexcial Priming':
exp_data = self.page_lexical_priming.dict_data()
data = {}
data['paradigm'] = paradigm_option_string
data.update(exp_data)
try:
experimant_name = filedialog.asksaveasfilename(initialdir = "/", title = "Save file", filetypes = (("experiment files","*.exp"), ("all files","*.*")))
with open(experimant_name, 'w') as experiment:
print(json.dumps(data), file=experiment)
except Exception as err:
message = 'Error: Could not write experiment file'
logger.error('Page_Create_Experiment: create_experiment: ' + message + ': ' + str(err))
pop_up(message)
def paradigm_selected(self, name, index, mode):
paradigm_option_string = self.paradigm_option_selected.get()
logger.info('Page_Create_Experiment: paradigm_selected: paradigm_option_string=' + paradigm_option_string)
if paradigm_option_string == 'Naming':
pass
#self.page_naming_paradigm.lift()
#self.page_lexical_priming.lower()
elif paradigm_option_string == 'Lexcial Priming':
pass
#self.page_lexical_priming.lift()
#self.page_naming_paradigm.lower()
class Page_Start_Experiment(Page):
def __init__(self, *args, **kwargs):
Page.__init__(self, *args, **kwargs)
self.init_elements()
def init_elements(self):
global subject_id_entry_box
arrange_header_in(self)
file_text = tk.Text(self, font = default_font, height = 3, width = 70, background = settings_dict['ui_element_color'], foreground = settings_dict['forecolor'])
file_text.insert(tk.INSERT, '\nSelect an experiment file to load')
file_text.tag_configure("center", justify='center')
file_text.tag_add("center", 1.0, "end")
file_text.config(state = 'disabled')
file_text.grid(row=1, column=0, padx=padding_x, pady=padding_y)
select_file_button = tk.Button(self, text ="Choose file", command = self.load_experiment, font = default_font, height = 1, width = 30, background = settings_dict['ui_element_color'], foreground = settings_dict['forecolor'])
select_file_button.grid(row=2, column=0, padx=padding_x, pady=padding_y)
dir_text = tk.Text(self, font = default_font, height = 3, width = 70, background = settings_dict['ui_element_color'], foreground = settings_dict['forecolor'])
dir_text.insert(tk.INSERT, '\nSelect a folder in which to save the output')
dir_text.tag_configure("center", justify='center')
dir_text.tag_add("center", 1.0, "end")
dir_text.config(state = 'disabled')
dir_text.grid(row=3, column=0, padx=padding_x, pady=padding_y)
select_file_button = tk.Button(self, text ="Choose output folder", command = self.load_dir, font = default_font, height = 1, width = 30, background = settings_dict['ui_element_color'], foreground = settings_dict['forecolor'])
select_file_button.grid(row=4, column=0, padx=padding_x, pady=padding_y)
entry_text = tk.Text(self, font = default_font, height = 3, width = 70, background = settings_dict['ui_element_color'], foreground = settings_dict['forecolor'])
entry_text.insert(tk.INSERT, '\nEnter subject ID')
entry_text.tag_configure("center", justify='center')
entry_text.tag_add("center", 1.0, "end")
entry_text.config(state = 'disabled')
entry_text.grid(row=5, column=0, padx=padding_x, pady=padding_y)
subject_id_entry_box = tk.Entry(self, font = default_font, background = settings_dict['ui_element_color'], foreground = settings_dict['forecolor'])
subject_id_entry_box.grid(row=6, column=0, padx=padding_x, pady=padding_y)
how_to_string = '''
When you are ready to begin the experiment, press the space bar.
Once you are ready to sign based on what you see, press the space
bar to start recording.
Once you are done signing, press the space bar again. You will then
see the next prompt and the program will begin recording.
'''
how_to_text = tk.Text(self, font = default_font, height = 9, width = 70, background = settings_dict['ui_element_color'], foreground = settings_dict['forecolor'])
how_to_text.insert(tk.INSERT, how_to_string)
how_to_text.config(state = 'disabled')
how_to_text.grid(row=7, column=0, padx=padding_x, pady=padding_y)
def load_dir(self):
global out_dir
logger.info('Page_Start_Experiment: load_dir: loading save folder')
try:
out_dir = filedialog.askdirectory(parent = self, initialdir="/", title='Select Save Folder')
except Exception as err:
message = 'Could not load the selected directory'
logger.error('Page_Start_Experiment: load_dir: ' + message + ': ' + str(err))
pop_up(message)
def load_experiment(self):
global experiment
logger.info('Page_Start_Experiment: load_experiment: loading experiment')
experiment_file = filedialog.askopenfilename(parent=self, initialdir="/", title='Select Experiment')
try:
with open(experiment_file, 'r') as experiment_data:
data = json.loads(experiment_data.read())
if data['paradigm'] == 'Naming':
experiment = Naming_Experiment(data)
elif data['paradigm'] == 'Lexical Priming':
pass
except Exception as err:
message = 'Could not load experiment file'
logger.error('Page_Start_Experiment: load_experiment:' + message + ': ' + str(err))
pop_up(message)
class Page_Show_Stimuli(Page):
display_region = None
def __init__(self, *args, **kwargs):
Page.__init__(self, *args, **kwargs)
self.init_display_region()
def init_display_region(self):
self.display_region = tk.Label(self)
self.display_region.config(background = "#000000")
self.display_region.grid(row=0, column=0)
class MainFrame(tk.Frame):
page_main_menu = None
page_create_experiment = None
page_start_experiment = None
page_show_stimuli = None
def __init__(self, *args, **kwargs):
tk.Frame.__init__(self, *args, **kwargs)
#self.buttonframe = tk.Frame(main_frame, background = settings_dict['backcolor'])
self.config(background = settings_dict['backcolor'])
def prepare_display(self):
#Pages
self.page_main_menu = Page_Main_Menu(self, width = width, height = height, background = settings_dict['backcolor'])
self.page_create_experiment = Page_Create_Experiment(self, width = width, height = height, background = settings_dict['backcolor'])
self.page_start_experiment = Page_Start_Experiment(self, width = width, height = height, background = settings_dict['backcolor'])
self.page_show_stimuli = Page_Show_Stimuli(self, width = width, height = height, background = settings_dict['backcolor'])
#Container
container = tk.Frame(self, background = settings_dict['backcolor'])
container.pack(side="top", fill="both", expand=True)
#Place pages in the container frame
self.page_main_menu.place(in_=container)
self.page_create_experiment.place(in_=container)
self.page_start_experiment.place(in_=container)
self.page_show_stimuli.place(in_=container)
#Show the main menu
self.page_main_menu.show()
def select_main_menu(self):
self.set_fullscreen_exclusive(False)
self.page_main_menu.lift()
self.page_create_experiment.lower()
self.page_start_experiment.lower()
self.page_show_stimuli.lower()
def select_create_experiment(self):
self.set_fullscreen_exclusive(False)
self.page_create_experiment.lift()
self.page_main_menu.lower()
self.page_start_experiment.lower()
self.page_show_stimuli.lower()
def select_start_experiment(self):
self.set_fullscreen_exclusive(False)
self.page_start_experiment.lift()
self.page_create_experiment.lower()
self.page_main_menu.lower()
self.page_show_stimuli.lower()
def select_show_stimuli(self):
self.set_fullscreen_exclusive(True)
self.page_show_stimuli.lift()
self.page_main_menu.lower()
self.page_create_experiment.lower()
self.page_start_experiment.lower()
def set_fullscreen_exclusive(self, fullscreen_exclusive):
window.attributes('-fullscreen', fullscreen_exclusive)
'''
This code starts program execition. The entire program is run in a try-except statement so that, should any error occur:
The program can write out the error to the command line
The program can still run on on_close() function to try to clean up all resources
The logger is not used here since, among the possible errors that could cause a crash is the logger not having write permissions
and it is still important the the failure be printed to a readable output.
'''
try:
main()
except Exception as e:
trace = traceback.format_exc()
print('Something bad happened ' + str(e) + '\n' + str(trace))
on_close(False)
raise
```
|
{
"source": "Jeffrey-Sardina/twig-idgl",
"score": 3
}
|
#### File: twig-idgl/NAS_module/analyse.py
```python
import os
import matplotlib.pyplot as plt
import hpbandster.core.result as hpres
import hpbandster.visualization as hpvis
def correlation_analysis(config, save_to_disk=True):
log_dir = os.path.join(config["bohb_log_dir"], config['run_id'])
# load the example run from the log files
result = hpres.logged_results_to_HBS_result(log_dir)
# get all executed runs
all_runs = result.get_all_runs()
# get the 'dict' that translates config ids to the actual configurations
id2conf = result.get_id2config_mapping()
# Here is how you get he incumbent (best configuration)
inc_id = result.get_incumbent_id()
# let's grab the run on the highest budget
inc_runs = result.get_runs_by_id(inc_id)
inc_run = inc_runs[-1]
# We have access to all information: the config, the loss observed during
#optimization, and all the additional information
# inc_loss = inc_run.loss
# inc_config = id2conf[inc_id]['config']
# inc_test_loss = inc_run.info['test accuracy']
# print('Best found configuration:')
# print(inc_config)
# print('It achieved accuracies of %f (validation) and %f (test).'%(1-inc_loss, inc_test_loss))
# Let's plot the observed losses grouped by budget,
hpvis.losses_over_time(all_runs)
plt.show()
if save_to_disk:
plt.savefig(os.path.join(log_dir, config["run_id"] + "_losses_over_time.png"), bbox_inches="tight")
# the number of concurent runs,
hpvis.concurrent_runs_over_time(all_runs)
plt.show()
plt.savefig(os.path.join(log_dir, config["run_id"] + "_concurrent_runs_over_time.png"), bbox_inches="tight")
# and the number of finished runs.
hpvis.finished_runs_over_time(all_runs)
plt.show()
if save_to_disk:
plt.savefig(os.path.join(log_dir, config["run_id"] + "_finished_runs_over_time.png"), bbox_inches="tight")
# This one visualizes the spearman rank correlation coefficients of the losses
# between different budgets.
hpvis.correlation_across_budgets(result)
plt.show()
if save_to_disk:
plt.savefig(os.path.join(log_dir, config["run_id"] + "_correlation_across_budgets.png"), bbox_inches="tight")
# For model based optimizers, one might wonder how much the model actually helped.
# The next plot compares the performance of configs picked by the model vs. random ones
hpvis.performance_histogram_model_vs_random(all_runs, id2conf)
plt.show()
if save_to_disk:
plt.savefig(os.path.join(log_dir, config["run_id"] + "_performance_histogram_model_vs_random.png"), bbox_inches="tight")
def load_config(filename):
'''
Load the config file from disk
'''
import yaml
try:
with open(filename, 'r') as config_file:
config = yaml.load(config_file, Loader=yaml.FullLoader)
except:
raise ValueError("Could not load configuration for running Twig.", filename)
return config
if __name__ == "__main__":
import sys
config_file = sys.argv[1]
config = load_config(config_file)
correlation_analysis(config)
```
|
{
"source": "JeffreySarnoff/UsingSollya",
"score": 2
}
|
#### File: metalibm-master/metalibm_functions/fast_exp2i.py
```python
from metalibm_core.core.ml_operations import (Max, Min, ExponentInsertion, Return)
from metalibm_core.core.ml_formats import (ML_Int32, ML_Binary32)
from metalibm_core.core.precisions import ML_Faithful
from metalibm_core.core.simple_scalar_function import ScalarUnaryFunction
from metalibm_core.code_generation.generic_processor import GenericProcessor
from metalibm_core.utility.ml_template import (
DefaultArgTemplate, ML_NewArgTemplate)
from metalibm_core.utility.debug_utils import debug_multi
class FastExp2i(ScalarUnaryFunction):
""" Meta-implementation of fast-exponentation of integers to
floating-point values """
function_name = "fast_exp2i"
def __init__(self, args=DefaultArgTemplate):
# initializing base class
super(ScalarUnaryFunction, self).__init__(args)
def generate_scalar_scheme(self, vx):
output_precision = self.precision
input_precision = vx.get_precision()
bias = -output_precision.get_bias()
bound_exp = Max(
Min(vx, output_precision.get_emax(), precision=input_precision),
output_precision.get_emin_normal(), precision=input_precision) + bias
scheme = Return(
ExponentInsertion(bound_exp,
specifier=ExponentInsertion.NoOffset,
precision=self.precision), tag="result", debug=debug_multi)
return scheme
def numeric_emulate(self, input_value):
return 2**input_value
@staticmethod
def get_default_args(**kw):
""" Return a structure containing the arguments for MetalibmSqrt,
builtin from a default argument mapping overloaded with @p kw """
default_args_fast_exp2i = {
"output_file": "fast_expi.c",
"function_name": "fast_expi",
"input_precisions": [ML_Int32],
"precision": ML_Binary32,
"accuracy": ML_Faithful,
"target": GenericProcessor.get_target_instance()
}
default_args_fast_exp2i.update(kw)
return DefaultArgTemplate(**default_args_fast_exp2i)
if __name__ == "__main__":
# auto-test
ARG_TEMPLATE = ML_NewArgTemplate(default_arg=FastExp2i.get_default_args())
ARGS = ARG_TEMPLATE.arg_extraction()
ML_FAST_EXP_I = FastExp2i(ARGS)
ML_FAST_EXP_I.gen_implementation()
```
|
{
"source": "jeffreysfllo24/alpha-zero-general-chess-and-battlesnake",
"score": 3
}
|
#### File: old_code/deepChess/play_vs_MCTSBot.py
```python
import time
from deepChess.ChessUtils import MyChessEnv
from deepChess.RandomAgent import RandomBot
from deepChess.MCTSAgent import MCTSBot
def main():
game = MyChessEnv(debug=False)
# player1 =
player2 = MCTSBot(800,1.5)
while not game.done:
time.sleep(0.5)
# clear screen
print(chr(27) + "[2J")
if game.white_to_move:
move = input('-- ')
# game.step(human_move)
else:
move = player2.select_move(game)
game.step(move)
game.render()
print("Result winner: ", game.winner)
if __name__ == '__main__':
main()
```
|
{
"source": "jeffreyshen19/blog",
"score": 3
}
|
#### File: _code/police-militarization/get_category.py
```python
def get_category(name):
# Clean up str
name = name.replace("DESC=", "").lower()
category = ""
if "grenade" in name:
category = "grenade-launchers"
elif "night vision" in name:
category = "night-vision"
elif name == "rifle,5.56 millimeter" or name == "rifle,7.62 millimeter":
category = "assault-rifles"
elif name in ["truck,tank", "light armored vehicle", "grader,road,motorized,armored", "armored vehicle", "truck,armored","car,armored", "tractor semi armored","semitrailer,tank", "mine resistant vehicle", "zbv military trailer", "frame,armor,vehicular window","plate,armor,radiator", "armor,transparent,vehicular window"]:
category = "armored-vehicles" # Tanks, Trucks, and Cars and armored plating
elif name in ["aircraft, fixed wing", "aircraft, rotary wing", "helicopter,search and rescue", "helicopter,observation", "helicopter,utility", "helicopter,flight trainer th55a", "helicopter,medevac","airplane,flight t42a","airplane,cargo-transport","airplane,utility u8f"]:
category = "aircraft"
elif (("armor," in name and name != "kit assembly,4-door,mak armor,hmmwv,m1165") or "helmet," in name) or name in ["riot control shield", "helmet,riot", "faceshield,riot control", "riot gear", "riot shield", "faceshield,military,riot control", "helmet,battle,mk7"]:
category = "body-armor"
else:
category = "other"
return category
```
|
{
"source": "jeffreyshen19/Digital-Transcription",
"score": 4
}
|
#### File: Digital-Transcription/transcriber/spellCheck.py
```python
import json
import os
import re
from collections import Counter
class SpellCheck:
def __init__(self, corpus=[]):
self.WORDS = Counter(SpellCheck.words(open('./transcriber/corpus.txt').read().lower())) # Get the occurences of a large corpus of English texts
self.N = sum(self.WORDS.values()) # total number of words
self.custom_corpus = corpus
def check(self, str):
tokens = re.split(r'([^\w]+)', str) # Get all tokens, split into an array
output = ""
for token in tokens:
if re.match(r"[a-z]{3,}", token): # If it's a lowercase word (solely alphabetic) with more than 3 characters, continue
output += self.correction(token)
else: # Otherwise, just append it to the output
output += token
return output
def P(self, word):
"Probability of `word`."
if word in self.custom_corpus:
return 1
return self.WORDS[word] / self.N
def correction(self, word):
"Most probable spelling correction for word."
return max(self.candidates(word), key=self.P)
def candidates(self, word):
"Generate possible spelling corrections for word."
return (self.known([word]) or self.known(self.edits1(word)) or self.known(self.edits2(word)) or [word])
def known(self, words):
"The subset of `words` that appear in the dictionary of WORDS."
return set(w for w in words if (w in self.WORDS or w in self.custom_corpus))
def edits1(self, word):
"All edits that are one edit away from `word`."
letters = 'abcdefghijklmnopqrstuvwxyz'
splits = [(word[:i], word[i:]) for i in range(len(word) + 1)]
deletes = [L + R[1:] for L, R in splits if (R and len(L + R[1:]) >= 3)]
transposes = [L + R[1] + R[0] + R[2:] for L, R in splits if len(R)>1]
replaces = [L + c + R[1:] for L, R in splits if R for c in letters]
inserts = [L + c + R for L, R in splits for c in letters]
return set(deletes + transposes + replaces + inserts)
def edits2(self, word):
"All edits that are two edits away from `word`."
return (e2 for e1 in self.edits1(word) for e2 in self.edits1(e1))
'''Helper Functions'''
def valid_languages(languages):
'''Check if the document is in a language spellchecker works on'''
excluded_languages = ["zh"] # Languages NOT to run spell checker on TODO: make this configurable
for language in languages:
if language in excluded_languages: return False
return True
def words(text):
'''Gets all the words within a string'''
return re.findall(r'\w+', text)
```
|
{
"source": "Jeffrey-shipping-it/Crypto-time-series",
"score": 3
}
|
#### File: Crypto-time-series/forecaster/utils.py
```python
import pickle5 as pickle
import os
def _save_model_dict(model_dict, save_dir=os.listdir()):
with open('models.pickle', 'wb') as handle:
pickle.dump(model_dict, handle, protocol=pickle.HIGHEST_PROTOCOL)
def _load_model_dict():
with open('notebooks/models.pickle', 'rb') as handle:
model_dict = pickle.load(handle)
return model_dict
```
|
{
"source": "jeffreysimpson/calcium_triplet_metallicities",
"score": 2
}
|
#### File: calcium_triplet_metallicities/code/ESO280_common.py
```python
import astropy.units as u
import numpy as np
import pandas as pd
from astropy import uncertainty as unc
from astropy.coordinates import SkyCoord
from astropy import constants as const
import gaia_funcs
__author__ = "<NAME>"
__copyright__ = "Copyright 2019, <NAME>"
__credits__ = ["<NAME>"]
__license__ = "MIT"
__version__ = "0.0.1"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
cluster_centre = SkyCoord(ra=272.27612983712135*u.degree,
dec=-46.422586369434775*u.degree)
def load_table(DROP_DUPLICATES=True, PHOTOM_CORRECTION=True, TMASS=False):
"""Load the data table. By default this removes twice observed stars."""
if TMASS:
# Use the table with 2MASS photometry.
observed = pd.read_csv("../data/ESO280_100_unrefined_hb_tmass.out")
else:
observed = pd.read_csv("../data/ESO280_100_unrefined_hb.out")
if PHOTOM_CORRECTION:
observed['phot_g_mean_mag'] -= gaia_funcs.gaia_correction(observed)
if DROP_DUPLICATES:
return observed.sort_values('snr', ascending=False).drop_duplicates(
'source_id', keep='first')
else:
return observed
def ESO280_params(PRINT=True):
params_dict = {"eso_280_m_M_V": 17.011,
"eso_280_e_m_M_V": 0.045,
"eso_280_ebv": 0.141,
"eso_280_e_ebv": 0.006,
"rv": 94.644,
"e_rv": 0.476,
"std_rv": 2.305,
"e_std_rv": 0.363,
"r_t": [0.14693*u.deg, 0.04126*u.deg],
"r_c": [0.00410*u.deg, 0.00009*u.deg]}
n_samples = 10000
eso_280_m_M_V_dist = unc.normal(params_dict['eso_280_m_M_V'],
std=params_dict['eso_280_e_m_M_V'],
n_samples=n_samples)
eso_280_ebv_dist = unc.normal(params_dict['eso_280_ebv'],
std=params_dict['eso_280_e_ebv'],
n_samples=n_samples)
eso_280_m_M_0_dist = eso_280_m_M_V_dist - 3.1*eso_280_ebv_dist
eso_280_dist_dist = unc.Distribution(
10**(1+eso_280_m_M_0_dist/5).distribution*u.pc)
# Hardcoded values. Calculated using velocity_estimate.py
rv_dist = unc.normal(params_dict['rv']*u.km/u.s,
std=params_dict['e_rv']*u.km/u.s,
n_samples=n_samples)
rv_std_dist = unc.normal(params_dict['std_rv']*u.km/u.s,
std=params_dict['e_std_rv']*u.km/u.s,
n_samples=10000)
# Size values from ASteCA
r_0_dist = unc.normal(params_dict['r_c'][0],
std=params_dict['r_c'][1],
n_samples=10000)
r_t_dist = unc.normal(params_dict['r_t'][0],
std=params_dict['r_t'][1],
n_samples=10000)
size_dist = (np.tan(r_0_dist) * eso_280_dist_dist)
tidal_dist = (np.tan(r_t_dist) * eso_280_dist_dist)
cluster_mass = ((7.5 * rv_std_dist**2 * 4/3*size_dist) / const.G)
sc_best = SkyCoord(
ra=cluster_centre.ra,
dec=cluster_centre.dec,
radial_velocity=rv_dist.pdf_mean(),
distance=eso_280_dist_dist.pdf_mean(),
pm_ra_cosdec=-0.548*u.mas/u.yr,
pm_dec=-2.688*u.mas/u.yr
)
eso_280_pmra_dist = unc.normal(sc_best.pm_ra_cosdec,
std=0.073*u.mas/u.yr,
n_samples=n_samples)
eso_280_pmdec_dist = unc.normal(sc_best.pm_dec,
std=0.052*u.mas/u.yr,
n_samples=n_samples)
sc_dist = SkyCoord(
ra=np.ones(eso_280_dist_dist.n_samples)*cluster_centre.ra,
dec=np.ones(eso_280_dist_dist.n_samples)*cluster_centre.dec,
radial_velocity=rv_dist.distribution,
distance=eso_280_dist_dist.distribution,
pm_ra_cosdec=eso_280_pmra_dist.distribution,
pm_dec=eso_280_pmdec_dist.distribution)
if PRINT:
print(
rf"$r_c$ & ${params_dict['r_c'][0].to(u.arcsec).value:0.2f}\pm{params_dict['r_c'][1].to(u.arcsec).value:0.2f}$~arcsec\\")
print(
rf"$r_t$ & ${params_dict['r_t'][0].to(u.arcmin).value:0.2f}\pm{params_dict['r_t'][1].to(u.arcmin).value:0.2f}$~arcmin\\")
print(
rf"$(m-M)_V$ & ${params_dict['eso_280_m_M_V']:0.2f}\pm{params_dict['eso_280_e_m_M_V']:0.2f}$\\")
print(
rf"$\ebv$ & ${params_dict['eso_280_ebv']:0.2f}\pm{params_dict['eso_280_e_ebv']:0.2f}$\\")
print(
rf"$(m-M)_0$ & ${eso_280_m_M_0_dist.pdf_mean:0.2f}\pm{eso_280_m_M_0_dist.pdf_std:0.2f}$\\")
print(
rf"$d_\odot$ & ${eso_280_dist_dist.pdf_mean.to(u.kpc).value:0.1f}\pm{eso_280_dist_dist.pdf_std.to(u.kpc).value:0.1f}$~kpc\\")
print(
rf"$r_c$ & ${size_dist.pdf_mean.to(u.pc).value:0.2f}\pm{size_dist.pdf_std.to(u.pc).value:0.2f}$~pc\\")
print(
rf"$r_t$ & ${tidal_dist.pdf_mean.to(u.pc).value:0.1f}\pm{tidal_dist.pdf_std.to(u.pc).value:0.1f}$~pc\\")
print(rf"Mass & $({cluster_mass.pdf_mean.to(u.solMass).value/1000:0.1f}\pm{cluster_mass.pdf_std.to(u.solMass).value/1000:0.1f})\times10^3$~M$_\odot$\\")
print(rf"$v_r$ & ${params_dict['rv']:0.2f}\pm{params_dict['e_rv']:0.2f}$\kms\\")
print(
rf"$\sigma_r$ & ${params_dict['std_rv']:0.2f}\pm{params_dict['e_std_rv']:0.2f}$\kms\\")
return params_dict, sc_best, sc_dist
def ESO280_idxs(observed):
"""Select for various groups of stars in ESO280."""
params_dict, *_ = ESO280_params(PRINT=False)
ang_distance = params_dict["r_t"][0]
c_observed = SkyCoord(ra=np.array(observed.ra)*u.degree,
dec=np.array(observed.dec)*u.degree)
close_observed_idx = c_observed.separation(cluster_centre) < ang_distance
# low_ew_idx = observed.sum_ew_med < 5.
rv_idx = ((observed.true_rv_med > 95-25.) &
(observed.true_rv_med < 95+25.))
pm_idx = np.sqrt((observed.pmra--0.548)**2 +
(observed.pmdec--2.688)**2) < 1.5
ew_G_idx = -0.5*observed.phot_g_mean_mag + 12.2 > observed.sum_ew_med
# color_select_idx = observed.bp_rp > 0.8
hb_idx = observed.num_good == 200
rgb_possible_idx = ew_G_idx & rv_idx & ~hb_idx & pm_idx
hb_possible_idx = rv_idx & hb_idx & pm_idx
members_not_hb_idx = rgb_possible_idx & close_observed_idx
et_not_hb_idx = rgb_possible_idx & ~close_observed_idx
members_hb_idx = hb_possible_idx & close_observed_idx
et_hb_idx = hb_possible_idx & ~close_observed_idx
# extra_tidal_idx = np.in1d(observed.source_id,
# [6719718983068881664,
# 6719556186624323456,
# # 6719616316170550528, # 6719903155571676544,
# 6719533204255711488,
# 6719866626860277760])
cn_star_idx = np.in1d(observed.source_id, [6719598900092253184])
ch_star_idx = np.in1d(observed.source_id, [6719599101938996864])
idx_dict = {"close_observed_idx": close_observed_idx,
# "low_ew_idx": low_ew_idx,
"rv_idx": rv_idx,
"members_not_hb_idx": members_not_hb_idx,
"members_hb_idx": members_hb_idx,
"et_not_hb_idx": et_not_hb_idx,
"et_hb_idx": et_hb_idx,
# "extra_tidal_idx": extra_tidal_idx,
"cn_star_idx": cn_star_idx,
"ch_star_idx": ch_star_idx,
"hb_idx": hb_idx}
plot_dict = [{"idx": (~(members_not_hb_idx | members_hb_idx |
hb_idx | et_not_hb_idx | et_hb_idx) &
~close_observed_idx),
"label": r"$>8.8$ arcmin field",
"kwargs": dict(alpha=0.3, ms=3, mfc=[0.6, 0.6, 0.6],
mec="None", fmt='.')},
{"idx": (~(members_not_hb_idx | members_hb_idx | hb_idx) &
close_observed_idx &
((observed.num_good == 200) |
(observed.sum_ew_p < 2.0))),
"label": r"$<8.8$ arcmin field",
"kwargs": dict(alpha=0.5, ms=4, mfc='k',
mec="None", fmt='.')},
{"idx": hb_idx & ~members_hb_idx,
"label": "__nolabel__",
"kwargs": dict(alpha=0.3, ms=3, mfc=[0.6, 0.6, 0.6],
mec="None", fmt='.')},
{"idx": members_not_hb_idx & ~(cn_star_idx),
"label": "Members",
"kwargs": dict(alpha=0.8, ms=5, mfc='C3',
mec="None", fmt='o')},
{"idx": members_hb_idx & ~(cn_star_idx),
"label": "__nolabel__",
"kwargs": dict(alpha=0.8, ms=5, mfc='C3',
mec="None", fmt='o')},
{"idx": cn_star_idx,
"label": "CN-strong star",
"kwargs": dict(alpha=0.8, ms=15, mfc='C0',
mec="None", fmt='*', zorder=1000)},
{"idx": et_not_hb_idx | et_hb_idx,
"label": "Extra-tidal stars",
"kwargs": dict(alpha=0.8, ms=7, mfc='C2',
mec="None", fmt='s')}]
return idx_dict, plot_dict
```
#### File: calcium_triplet_metallicities/code/gaia_funcs.py
```python
__author__ = "<NAME>"
__copyright__ = "Copyright 2020, <NAME>"
__credits__ = ["<NAME>"]
__license__ = "MIT"
__version__ = "0.0.1"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
def good_tmass_photom(table):
"""Requre stars to have A quality 2MASS J and Ks photometry."""
return [(ph_qual[0] == 'A') and (ph_qual[2] == 'A')
if type(ph_qual) is str else False for ph_qual in table.ph_qual]
def good_photom_idx(table, FITS=False):
"""Use the photometric quality criteria from Evans+2018."""
if FITS:
bp_rp_excess = table[1].data['phot_bp_rp_excess_factor']
bp_rp = table[1].data['bp_rp']
else:
bp_rp_excess = table.phot_bp_rp_excess_factor
bp_rp = table.bp_rp
return ((bp_rp_excess <
1.3 + 0.06*bp_rp**2) &
(bp_rp_excess >
1.0 + 0.015*bp_rp**2))
def good_astrom_idx(table, FITS=False):
"""Require the star to have good astrometry."""
if FITS:
ruwe = table[1].data['ruwe']
else:
ruwe = table.ruwe
return ruwe < 1.4
def k_calc(BP_RP, A_0, c_array):
"""Calculate the extinction coefficient.
Parameters
----------
BP_RP : array
The Gaia BP-RP colour of the target(s)
A_0 : float
The extinction coefficient
c_array : list
Parameters used to derive the Gaia extinction
coefficients as a function of colour and extinction
Returns
----------
The ratio of A_X/A_0 for a given BP-RP colour.
References
----------
Babusiaux et al (2018) 10.1051/0004-6361/201832843
"""
return (c_array[0] +
c_array[1]*BP_RP +
c_array[2]*BP_RP**2 +
c_array[3]*BP_RP**3 +
c_array[4]*A_0 +
c_array[5]*A_0**2 +
c_array[6]*BP_RP*A_0)
def m_red_correction(BP_RP, ebv, m_M_V, UNREDDENED=False):
"""Calculate the distance modulus and reddening in Gaia photometry.
Parameters
----------
BP_RP : array
The Gaia BP-RP colour of the target(s)
ebv : float
The E(B-V) to be converted to E(BP-RP)
m_M_V : float
The (m-M)_V to be converted to (m-M)_G
UNREDDENED : bool
Is the provided distance modulus m_M_V or m_M_0?
Returns
----------
ebr : array
The E(BP-RP) values for all the input values of BP_RP
m_M_G : array
The (m-M)_G value for all the input values of BP_RP
References
----------
Babusiaux et al (2018) 10.1051/0004-6361/201832843
"""
c_B = [1.1517, -0.0871, -0.0333, 0.0173, -0.0230, 0.0006, 0.0043]
c_R = [0.6104, -0.0170, -0.0026, -0.0017, -0.0078, 0.00005, 0.0006]
c_G = [0.9761, -0.1704, 0.0086, 0.0011, -0.0438, 0.0013, 0.0099]
A_0 = 3.1*ebv
# This checks if we are getting the (m-M)_0 or (m-M)_V
if not UNREDDENED:
m_M_0 = m_M_V - A_0
else:
m_M_0 = m_M_V
k_B = k_calc(BP_RP, A_0, c_B)
k_R = k_calc(BP_RP, A_0, c_R)
k_G = k_calc(BP_RP, A_0, c_G)
ebr = A_0 * (k_B - k_R)
A_G = A_0 * k_G
m_M_G = m_M_0 + A_G
return ebr, m_M_G, A_G
def gaia_correction(table):
"""Correction to G magnitudes published in Gaia DR2."""
# https://www.cosmos.esa.int/web/gaia/dr2-known-issues
correction = np.ones(len(table)) * 0.032
idx_pairs = [[(table.phot_g_mean_mag > 6) & (table.phot_g_mean_mag <= 16),
0.0032*(table.phot_g_mean_mag-6)]]
for idx_pair in idx_pairs:
correction[idx_pair[0]] = idx_pair[1][idx_pair[0]]
return correction
```
#### File: calcium_triplet_metallicities/code/PseudoVoigt.py
```python
import math
import numpy as np
from astropy.modeling import Fittable1DModel, Parameter
__author__ = "<NAME>"
__copyright__ = "Copyright 2020, <NAME>"
__credits__ = ["<NAME>"]
__license__ = "MIT"
__version__ = "0.0.1"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
def tie_ca_lines_1(model):
"""Tie the second CaT line wavelength to the first."""
mean = 8542.09 - 8498.03 + model.x_0_0
return mean
def tie_ca_lines_2(model):
"""Tie the third CaT line wavelength to the first."""
mean = 8662.14 - 8498.03 + model.x_0_0
return mean
FLOAT_EPSILON = float(np.finfo(np.float32).tiny)
class PseudoVoigt1D(Fittable1DModel):
"""
One dimensional Pseudo-Voigt model.
Parameters
----------
amplitude : float
Amplitude of the Pseudo-Voigt.
x_0 : float
Mean of the Pseudo-Voigt.
gamma_L : float
Standard deviation of the Lorentzian.
gamma_G : float
Standard deviation of the Gaussian.
Notes
-----
Using function has defined by Thompson et al (1987)
DOI: 10.1107/S0021889887087090
"""
x_0 = Parameter(default=0)
# Ensure gamma_X makes sense if their bounds are not explicitly set.
# gamma_X must be non-zero and positive.
gamma_L = Parameter(default=1, bounds=(FLOAT_EPSILON, None))
gamma_G = Parameter(default=1, bounds=(FLOAT_EPSILON, None))
amplitude = Parameter(default=1, bounds=(FLOAT_EPSILON, None))
@staticmethod
def evaluate(x, x_0, gamma_L, gamma_G, amplitude):
"""Calculate the pseudo-Voigt function."""
Γ_G = 2*math.sqrt(math.log1p(2))*gamma_G
Γ_L = 2*gamma_L
Γ_int = (Γ_G**5 +
2.69269 * Γ_G**4 * Γ_L**1 +
2.42843 * Γ_G**3 * Γ_L**2 +
4.47163 * Γ_G**2 * Γ_L**3 +
0.07842 * Γ_G**1 * Γ_L**4 +
Γ_L**5)
Γ = np.power(Γ_int, 1/5)
η = (1.36603 * (Γ_L/Γ) -
0.47719 * (Γ_L/Γ)**2 +
0.11116 * (Γ_L/Γ)**3)
G_x = ((1/(math.sqrt(np.pi)*gamma_G)) *
np.exp((-1*np.power(x-x_0, 2)) / (gamma_G**2)))
L_x = gamma_L / (np.pi * (np.power(x-x_0, 2) + gamma_L**2))
return amplitude*(η*L_x + (1-η)*G_x)
class ThreePseudoVoigts(PseudoVoigt1D + PseudoVoigt1D + PseudoVoigt1D):
"""Evaluates the sum of three PseudoVoigt1D."""
```
|
{
"source": "jeffreysimpson/jeffreysimpson.github.io",
"score": 2
}
|
#### File: jeffreysimpson.github.io/_bibliography/create_papersbib.py
```python
import os
import requests
import pandas as pd
import numpy as np
free_access_keys = {"10.1093/mnras/stab2012": "https://academic.oup.com/mnras/article/507/1/43/6321839?guestAccessKey=bee13ddf-207b-4889-a894-5dcd9488d637",
"10.1093/mnras/stz3105": "https://academic.oup.com/mnras/article/491/3/3374/5613398?guestAccessKey=569ca73d-405e-482f-91f9-278fbf620525",
"10.1093/mnras/stz2611": "https://academic.oup.com/mnras/article/490/1/741/5570616?guestAccessKey=<KEY>",
"10.1093/mnras/sty3042": "https://academic.oup.com/mnras/article/482/4/5302/5173100?guestAccessKey=65582380-8b2d-4ef9-b027-428a4f52e95a",
"10.1093/mnras/sty1281": "https://academic.oup.com/mnras/article/478/4/4513/4996802?guestAccessKey=02d5df4d-0a31-47d8-ae4d-5d5d6de9e64c",
"10.1093/mnras/stz3479": "https://academic.oup.com/mnras/article/492/1/1370/5681406?guestAccessKey=1b2999f1-5e8c-44ee-9a29-6744ee9385b7"}
def get_config():
"""
Load ADS developer key from file and
and return the headers for the request
"""
if os.getenv('ADS_TOKEN') is None:
with open(os.path.expanduser("~/.ads/dev_key")) as f:
token = f.read().strip()
else:
token = os.getenv('ADS_TOKEN')
return {
"url": "https://api.adsabs.harvard.edu/v1/biblib",
"headers": {
"Authorization": "Bearer:{}".format(token),
"Content-Type": "application/json",
},
}
def get_bibcodes(library_id):
"""Get the bibcodes for all the papers in the library."""
start = 0
rows = 1000
config = get_config()
url = f"{config['url']}/libraries/{library_id}"
r = requests.get(url,
params={"start": start,
"rows": rows},
headers=config["headers"],
)
# Get all the documents that are inside the library
try:
bibcodes = r.json()["documents"]
except ValueError:
raise ValueError(r.text)
return bibcodes
def get_pub_df(library_id):
config = get_config()
bibcodes = get_bibcodes(library_id)
fields_wants = ["bibcode","title","year","bibstem","author_count","citation_count",
"volume","pub","page_range","issue","identifier","author","doi","date","doctype",
"abstract", "bibstem"]
r = requests.post("https://api.adsabs.harvard.edu/v1/search/bigquery",
params={"q": "*:*",
"fl": ",".join(fields_wants),
"rows": 2000},
headers={'Authorization': config['headers']['Authorization'],
'Content-Type': 'big-query/csv'},
data="bibcode\n" + "\n".join(bibcodes))
doc_dict = r.json()['response']['docs']
pub_df = pd.DataFrame(doc_dict)
pub_df.fillna(value=" ", inplace=True)
return pub_df
def reorder(a):
a = a.replace("Galah", "GALAH")
if "," not in a:
return a
return a #" ".join([a.split(", ")[1],a.split(", ")[0]])
def get_arxiv_str(pub):
arXiv_id = [i for i in pub['identifier'] if i.startswith("arXiv:")]
if len(arXiv_id) == 0:
return None
return f"{arXiv_id[0]}"
def fix_title(title):
things_to_fix = [[r"$\sim$", "~"],
[r"$R$", "*R*"],
[r"[$\alpha/\rm Fe]$", "[α/Fe]"],
[r"$\alpha$", "α"],
[r"∼", "~"],
[r"$< -0.75$", "< −0.75"],
[r"$\textit{TESS}$", "*TESS*"],
[r"$Gaia$", "*Gaia*"],
[r"${S}^5$", "S⁵"],
[r"$S^5$", "S⁵"],
["(S5)", "(S⁵)"],
["S <SUP>5</SUP>", "S⁵"],
["S<SUP>5</SUP>", "S⁵"],]
for thing_to_fix in things_to_fix:
title = title.replace(thing_to_fix[0], thing_to_fix[1])
return title
def write_bibtex(bibtex_file, pub_df):
with open(bibtex_file, 'w') as bibfile:
for *_, publication in pub_df.sort_values(['date', 'bibcode'], ascending=[False, False]).iterrows():
if publication.doctype in ignore_doctype:
continue
bibfile.write(f"@{publication.doctype}")
bibfile.write("{")
bibfile.write(f"{publication.bibcode},\n")
bibfile.write(f" year={{{publication.year}}},\n")
bibfile.write(f" title={{{fix_title(publication.title[0])}}},\n")
bibfile.write(f" author={{{' and '.join([reorder(a) for a in publication.author])}}},\n")
bibfile.write(f" journal={{{publication.bibstem[0]}}},\n")
if publication.volume != ' ':
bibfile.write(f" volume={{{publication.volume}}},\n")
if publication.issue != ' ':
bibfile.write(f" issue={{{publication.issue}}},\n")
if publication.page_range != ' ':
bibfile.write(f" pages={{{publication.page_range}}},\n")
if publication.doi != ' ':
bibfile.write(f" doi={{{publication.doi[0]}}},\n")
if get_arxiv_str(publication) is not None:
bibfile.write(f" arxiv={{{get_arxiv_str(publication)}}},\n")
bibfile.write(f" html={{https://ui.adsabs.harvard.edu/abs/{publication.bibcode}}},\n")
bibfile.write(f" abstract={{{publication.abstract}}},\n")
if publication.bibcode in selected_publications:
bibfile.write(f" selected={{true}},\n")
bibfile.write("}\n\n")
selected_publications = ["2021MNRAS.507...43S",
"2021MNRAS.505.5340M",
"2020MNRAS.495L.129K",
"2020MNRAS.491.2465K",
"2020MNRAS.491.3374S",
"2020Natur.583..768W",
"2019MNRAS.490..741S"]
ignore_doctype = ["catalog", "proposal", "inproceedings", "abstract"]
write_bibtex("assets/bibliography/projects.bib", get_pub_df("8YvxNAmnT1Kf09WBpxQ4Sg"))
pub_df = get_pub_df("XWjvILPkS_qyDvkCLtLOPw")
write_bibtex("_bibliography/papers.bib", pub_df)
h_index = sum(c >= i + 1 for i, c in enumerate(sorted(pub_df.citation_count, reverse=True)))
with open("_pages/publications.md", 'w') as pub_md:
pub_md.write(f"""---
layout: page
permalink: /publications/
title: publications
description: all of my publications in reversed chronological order. generated by jekyll-scholar.
years: {sorted([int(y) for y in pub_df[~np.isin(pub_df.doctype,ignore_doctype)].year.unique()],reverse=True)}
nav: true
---
<div class="publications">
""")
pub_md.write(f"""Metrics are obviously problematic, but anyway, I have an h-index of {h_index}.""")
pub_md.write("""
{% for y in page.years %}
<h2 class="year">{{y}}</h2>
{% bibliography -f papers -q @*[year={{y}}]* %}
{% endfor %}
</div>
""")
```
|
{
"source": "jeffreysimpson/new-cv",
"score": 3
}
|
#### File: new-cv/code/write_tex.py
```python
from __future__ import division, print_function
import json
from datetime import date
# from operator import itemgetter
__all__ = ["format_pub"]
JOURNAL_MAP = {
"ArXiv e-prints": "ArXiv",
"Monthly Notices of the Royal Astronomical Society": "\\mnras",
"The Astrophysical Journal": "\\apj",
"The Astronomical Journal": "\\aj",
"Publications of the Astronomical Society of the Pacific": "\\pasp",
"IAU General Assembly": "IAU",
"American Astronomical Society Meeting Abstracts": "AAS",
}
def format_pub(args):
ind, pub = args
pub["title"] = pub["title"].replace("<SUP>5</SUP>", "$^5$")
pub["title"] = pub["title"].replace("<SUP>-1</SUP>", "$^{-1}$")
pub["title"] = pub["title"].replace("ω", "$\omega$")
fmt = "\\item[{{\\color{{numcolor}}\\scriptsize{0}}}] ".format(ind)
n = [i for i in range(len(pub["authors"]))
if "<NAME>" in pub["authors"][i]][0]
pub["authors"][n] = "\\textbf{<NAME>.}"
if len(pub["authors"]) > 4:
# print(pub["authors"])
# print(pub["authors"].index('\\textbf{<NAME>.}'))
fmt += ", ".join(pub["authors"][:3])
fmt += r", \etal"
if n >= 3:
fmt += "\\ (incl.\\ \\textbf{JDS})"
elif len(pub["authors"]) > 1:
fmt += ", ".join(pub["authors"][:-1])
fmt += ", \\& " + pub["authors"][-1]
else:
fmt += pub["authors"][0]
fmt += ", {0}".format(pub["year"])
if pub["doi"] is not None:
fmt += ", \\doi{{{0}}}{{{1}}}".format(pub["doi"], pub["title"])
else:
fmt += ", " + pub["title"]
if not pub["pub"] in [None, "ArXiv e-prints"]:
fmt += ", " + JOURNAL_MAP.get(pub["pub"].strip("0123456789# "),
pub["pub"])
if pub["volume"] is not None:
fmt += ", \\textbf{{{0}}}".format(pub["volume"])
if pub["page"] is not None:
fmt += ", {0}".format(pub["page"])
if (pub["doi"] is None) & (pub["arxiv"] is not None):
fmt += " (\\arxiv{{{0}}})".format(pub["arxiv"])
if pub["citations"] > 1:
fmt += " [\\href{{{0}}}{{{1}~citations}}]".format(pub["url"],
pub["citations"])
if pub["citations"] == 1:
fmt += " [\\href{{{0}}}{{{1}~citation}}]".format(pub["url"],
pub["citations"])
return fmt
if __name__ == "__main__":
with open("json/pubs.json", "r") as f:
pubs = json.load(f)
pubs = [p for p in pubs if p["doctype"] in ["article",
"eprint",
"inproceedings",
"abstract"]]
ref = [p for p in pubs if p["doctype"] == "article"]
inproceedings = [p for p in pubs if p["doctype"] in ["abstract",
"inproceedings"]]
unref = [p for p in pubs if p["doctype"] == "eprint"]
# Compute citation stats
npapers = len(ref)
nfirst = sum(1 for p in ref if "Simpson" in p["authors"][0])
cites = sorted((p["citations"] for p in pubs), reverse=True)
print(cites)
ncitations = sum(cites)
hindex = sum(c >= i + 1 for i, c in enumerate(cites))
summary = (
"""{1} refereed publications. {2} refeered publications as first author.
Total citations~=~{3}; h-index~=~{4} ({0})""".format(
date.today(), npapers, nfirst, ncitations, hindex))
with open("tex_files/pubs_summary.tex", "w") as f:
f.write(summary)
ref = list(map(format_pub, zip(range(len(ref), 0, -1), ref)))
unref = list(map(format_pub, zip(range(len(unref), 0, -1), unref)))
inproceedings = list(map(
format_pub, zip(range(len(inproceedings), 0, -1), inproceedings)))
with open("tex_files/pubs_ref.tex", "w") as f:
f.write("\n\n".join(ref))
with open("tex_files/pubs_unref.tex", "w") as f:
f.write("\n\n".join(unref))
with open("tex_files/pubs_inproceedings.tex", "w") as f:
f.write("\n\n".join(inproceedings))
```
|
{
"source": "jeffreysimpson/robot_galah",
"score": 3
}
|
#### File: jeffreysimpson/robot_galah/get_images.py
```python
import logging
import logging.config
import shutil
import sys
from pathlib import Path
import requests
from PIL import Image, ImageDraw, ImageFont
def download_image(survey_url, star_ra, star_dec, logger, base_image):
"""Downloads the HiPS image.
This research made use of hips2fits,
(https://alasky.u-strasbg.fr/hips-image-services/hips2fits)
a service provided by CDS."""
response = requests.get(
url="http://alasky.u-strasbg.fr/hips-image-services/hips2fits",
params={
"hips": survey_url,
"width": 1000,
"height": 1000,
"fov": 0.25,
"projection": "TAN",
"coordsys": "icrs",
"ra": star_ra,
"dec": star_dec,
"format": "jpg",
"stretch": "linear",
},
stream=True,
)
logger.debug("Tried %s", response.url)
if response.status_code < 400:
logger.debug("HTTP response: %s", response.status_code)
with open(base_image, "wb") as out_file:
shutil.copyfileobj(response.raw, out_file)
logger.info("Saved the image to %s", base_image)
del response
else:
logger.error("BAD HTTP response: %s", response.status_code)
logger.error("%s", response.json()["title"])
logger.error("Did not get the sky image. Quitting.")
sys.exit("Did not get the sky image. Quitting.")
def get_best_survey(avail_hips, wanted_surveys, star_dec):
"""This ranks the avaiable HIPS in order of preference."""
rankings = dict(zip(wanted_surveys, range(len(wanted_surveys))))
# The PanSTARRS MOC is wrong and you get blank images for stars south of -29.5.
# So make the PanSTARRS ranking really low for those stars.
if star_dec < -29.5:
rankings["CDS/P/PanSTARRS/DR1/color-z-zg-g"] = 999
best_survey_id = wanted_surveys[
min([rankings[avail_hip["ID"]] for avail_hip in avail_hips])
]
return list(filter(lambda x: x["ID"] == best_survey_id, avail_hips))[0]
def add_overlay(
base_image, secrets_dict, logger, tweet_content_dir, BEST_NAME, survey_name
):
# Necessary to force to a string here for the ImageFont bit.
font = ImageFont.truetype(
str(Path.joinpath(Path(secrets_dict["font_dir"]), "Roboto-Bold.ttf")), 40
)
try:
img_sky = Image.open(base_image)
except FileNotFoundError as e:
logger.error(e)
logger.error("Could not load the sky image. Quitting.")
sys.exit("Could not load the sky image. Quitting.")
logger.info("Adding the overlay")
draw = ImageDraw.Draw(img_sky, "RGBA")
draw.line([((500 - 80), 500), ((500 - 20), 500)], fill="white", width=5)
draw.line([(500, (500 + 80)), (500, (500 + 20))], fill="white", width=5)
draw.line(
[(815, (1000 - 70)), (815 + 1000 / 15 * 2, (1000 - 70))], fill="white", width=5
)
draw.text((30, 10), f"{BEST_NAME}", (255, 255, 255), font=font)
draw.text((30, (1000 - 60)), f"{survey_name}", (255, 255, 255), font=font)
draw.text((800, (1000 - 60)), "2 arcmin", (255, 255, 255), font=font)
overlayed_image = Path.joinpath(tweet_content_dir, "sky_image_overlay.jpg")
img_sky.save(overlayed_image)
logger.info("Saved overlayed image to %s", overlayed_image)
def get_hips_image(star_ra, star_dec, BEST_NAME, secrets_dict):
"""Main function to get a sky image for the given star."""
cwd = Path(__file__).parent
tweet_content_dir = Path.joinpath(cwd, "tweet_content")
config_file = Path.joinpath(cwd, "logging.conf")
logging.config.fileConfig(config_file)
# create logger
logger = logging.getLogger("get_images")
base_image = Path.joinpath(tweet_content_dir, "sky_image.jpg")
wanted_surveys = [
"CDS/P/DECaLS/DR5/color",
"cds/P/DES-DR1/ColorIRG",
"CDS/P/PanSTARRS/DR1/color-z-zg-g",
"CDS/P/SDSS9/color-alt",
"CDS/P/DSS2/color",
]
logger.info("Getting the list of useful HIPS")
response = requests.get(
url="http://alasky.unistra.fr/MocServer/query",
params={
"fmt": "json",
"RA": star_ra,
"DEC": star_dec,
"SR": 0.25,
"intersect": "enclosed",
# "dataproduct_subtype":"color",
"fields": ",".join(["ID", "hips_service_url", "obs_title"]),
"creator_did": ",".join([f"*{i}*" for i in wanted_surveys]),
},
)
if response.status_code < 400:
logger.debug("HTTP response: %s", response.status_code)
avail_hips = response.json()
for possible_survey in avail_hips:
logger.debug("Possible HIPS options: %s", possible_survey["ID"])
best_survey = get_best_survey(avail_hips, wanted_surveys, star_dec)
logger.info("The best ranking survey is: %s", best_survey["ID"])
download_image(
best_survey["hips_service_url"], star_ra, star_dec, logger, base_image
)
del response
else:
logger.error("BAD HTTP response: %s", response.status_code)
logger.error("%s", response.json()["title"])
logger.error("Did not get list of HIPS. Quitting.")
sys.exit("Did not get list of HIPS. Quitting.")
image_source = " ".join(best_survey["ID"].split("/")[2:])
add_overlay(
base_image, secrets_dict, logger, tweet_content_dir, BEST_NAME, image_source
)
return image_source
```
#### File: jeffreysimpson/robot_galah/plot_stellar_params.py
```python
import logging
import logging.config
import sys
from pathlib import Path
import galah_plotting
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import rcParams
from matplotlib.colors import LogNorm
from matplotlib.offsetbox import AnchoredText
def plot_stellar_params(galah_dr3, the_star, BEST_NAME, basest_idx_galah):
rcParams["font.family"] = "sans-serif"
rcParams["font.sans-serif"] = ["Roboto"]
rcParams["figure.facecolor"] = "white"
plt.style.use("dark_background")
cwd = Path(__file__).parent
tweet_content_dir = Path.joinpath(cwd, "tweet_content")
config_file = Path.joinpath(cwd, "logging.conf")
logging.config.fileConfig(config_file)
# create logger
logger = logging.getLogger("plot_stellar_params")
plot_list_bases = [
[["teff", "logg"], ["fe_h", "alpha_fe"]],
[["L_Z", "Energy"], ["V_UVW", "U_UVW_W_UVW"]],
]
for plot_list_base in plot_list_bases:
logger.info(
"Creating the %s vs %s and %s vs %s plot",
plot_list_base[0][0],
plot_list_base[0][1],
plot_list_base[1][0],
plot_list_base[1][1],
)
fig, axes, redo_axes_list, *_ = galah_plotting.initialize_plots(
figsize=(2.0 * 1.15, 4 * 1.15),
things_to_plot=plot_list_base,
nrows=2,
ncols=1,
)
the_star_highlight = [
{
"idx": galah_dr3["sobject_id"] == the_star["sobject_id"],
"kwargs": dict(
s=50,
marker="*",
lw=0.4,
alpha=1.0,
c="C3",
zorder=100,
label="GES stars",
),
"errors": False,
},
]
for stars_to_highlight in [[], the_star_highlight]:
galah_plotting.plot_base_all(
plot_list_base,
stars_to_highlight,
basest_idx_galah,
axes,
table=galah_dr3,
SCATTER_DENSITY=True,
scatter_density_kwarg=dict(
cmap="viridis",
zorder=0,
alpha=1.0,
dpi=75,
norm=LogNorm(vmin=1, vmax=2000),
),
)
if plot_list_base[0][0] == "teff":
redo_axes_list["teff__logg"].update(
{
"xticks": np.arange(4500, 9000, 1000),
"yticks": np.arange(0, 6, 1),
"xlim": [8000, 4000],
# "xlabel": 'Effective temperature (K)',
# "ylabel": 'Surface gravity',
}
)
redo_axes_list["fe_h__alpha_fe"].update(
{
"xticks": np.arange(-3, 2, 1),
"yticks": np.arange(-1, 3, 1),
# "xlabel": '[Fe/H]',
# "ylabel": '[α/Fe]',
"xlim": [-2.7, 0.7],
"ylim": [-1.2, 1.5],
}
)
axes["teff__logg"].set_title(
f"GALAH DR3 stellar parameters of\n{BEST_NAME}"
)
if the_star["flag_alpha_fe"] != 0:
# axes['fe_h__alpha_fe'].axvline(the_star['fe_h'], c='C3', lw=2, alpha=0.5)
anchored_text = AnchoredText(
"[α/Fe] not measured for this star.",
loc="lower left",
frameon=False,
pad=0,
prop=dict(color="C3"),
)
axes["fe_h__alpha_fe"].add_artist(anchored_text)
if plot_list_base[0][0] == "L_Z":
redo_axes_list["L_Z__Energy"].update(
{
"xticks": np.arange(-4, 5, 2),
"yticks": np.arange(-4, 1, 1),
"xlim": [-2.5, 4.1],
"ylim": [-3.0, -0.8],
}
)
redo_axes_list["V_UVW__U_UVW_W_UVW"].update(
{
"xticks": np.arange(-400, 200, 200),
"yticks": np.arange(0, 500, 200),
"xlim": [-600, 100],
"ylim": [0, 500],
}
)
axes["L_Z__Energy"].set_title(
f"GALAH DR3 orbital properties of\n{BEST_NAME}"
)
galah_plotting.redo_plot_lims(axes, redo_axes_list)
# plt.show()
save_file_loc = Path.joinpath(
tweet_content_dir, f"stellar_params_{plot_list_base[0][0]}.png"
)
try:
fig.savefig(save_file_loc, bbox_inches="tight", dpi=500, transparent=False)
except TypeError as e:
logger.error(e)
logger.error("Did make stellar parameters plot. Quitting.")
sys.exit("Did make stellar parameters plot. Quitting.")
# fig.close()
logger.info("Saved plot to %s", save_file_loc)
plt.close(fig)
return 0
```
|
{
"source": "jeffreysmith-jrs/ccb",
"score": 3
}
|
#### File: ccb/ccb/_core.py
```python
import pandas as _pd
import multiprocessing as _mp
from psutil import virtual_memory as _vm
# get info on the cpu for setting memory/thread limits
_ncpu = _mp.cpu_count()
_mems = _vm.virtual_memory()
_memb = _mems.total
class maxent:
def __init__(self, samples=None, layers=None, outdir=None, projection=None):
"""
"""
# set up a bunch of the parameters for running maxent from the command line
# boolean for creating graphs showing predicted relative probabilities
self.response_curves = response_curves
# boolean for creaing output pictures of predicted spatial distributions
self.pictures = pictures
# boolean to measure variable importance
self.jackknife
# set the format for output data reporting
formats = ['cloglog', 'logistic', 'cumulative', 'raw']
formats_default = 'logistic'
if outformat.lower() in formats:
self.outformat = outformat
else:
print("[ ERROR! ]: incorrect output format specified: {}".format(outformat))
print("[ ERROR! ]: must be one of: {}".format(', '.join(formats)))
print("[ ERROR! ]: using default: {}".format(formats_default))
self.outformat = formats_default
# set the output file type
types = ['asc', 'bil', 'grd', 'mxe']
types_default = 'bil'
if outtype.lower() in types:
self.outtype = outtype
else:
print("[ ERROR! ]: incorrect output data type specified: {}".format(outtype))
print("[ ERROR! ]: must be one of: {}".format(', '.join(types)))
print("[ ERROR! ]: using default: {}".format(types_default))
self.outtype = types_default
# set the output directory
self.outdir = outdir
# set the directories or files for projection
self.projection = projection
# set the sample file
self.samples = samples
# boolean for log-scaling output pictures
self.logscale = logscale
# booleans for displaying warnings/tooltips
self.warnings = warnings
self.tooltips = tooltips
# booleans for overwriting/skipping existing files
self.overwrite = overwrite
self.skip_exists = skip_exists
# boolean to remove duplicate data points in the same grid cell
self.remove_duplicates = remove_duplicates
# booleans to write certain outputs
self.write_clamp_grid = write_clamp_grid
self.write_mess = write_mess
self.write_plot = write_plot
self.write_grids = write_grids
self.write_plots = write_plots
# parameters for sampling test data
self.test_samples = test_sample_file
# set test percentage to an integer if a float is passed
test_pct_default = 30
if type(test_pct) is float:
self.test_pct = int(100 * test_pct)
else:
try:
self.test_pct = int(test_pct)
except:
print("[ ERROR! ]: incorrect test percent specified: {}".format(test_pct))
print("[ ERROR! ]: must be an integer between 0-100")
print("[ ERROR! ]: using default: {}".format(test_pct_default))
self.test_pct = test_pct_default
# set the beta multiplier
self.beta_multiplier = beta_multiplier
# the number of background points
self.n_background = n_background
# set replicate parameters
self.n_replicates = n_replicates
# sample bias file (should be raster with 0 < values < 100)
self.bias_file = bias_file
# set how replicates are handled
replicate_types = ['crossvalidate', 'bootstrap', 'subsample']
replicate_types_default = 'crossvalidate'
if replicate_type.lower() in replicate_types:
self.replicate_type = replicate_type
else:
print("[ ERROR! ]: incorrect replicate type specified: {}".format(replicate_type))
print("[ ERROR! ]: must be one of: {}".format(', '.join(replicate_types)))
print("[ ERROR! ]: using default: {}".format(replicate_types_default))
self.replicate_type = replicate_types_default
# booleans for writing additional output files
self.per_species_results = per_species_results
self.write_background = write_background
# set options for the features to use
features_types = ['linear', 'quadratic', 'product', 'threshold', 'hinge', 'auto']
features_default = ['hinge']
if features in features_types:
self.features = features
else:
print("[ ERROR! ]: incorrect features specified: {}".format(', '.join(features)))
print("[ ERROR! ]: must be one of: {}".format(', '.join(features_types)))
print("[ ERROR! ]: using default: {}".format(', '.join(features_default)))
# set options for adding background samples
self.add_samples_background = add_samples_background
self.add_all_background = add_all_background
# set clamping options
self.fade_by_clamping = fade_by_clamping
self.clamp = clamp
# enable extrapolation to novel conditions
self.extrapolate = self.extrapolate
# set a dummy variable to state this object has not yet been initialized
# (i.e., the sample file parsed for species)
self.initialized = False
# finally, set the memory and threads for running maxent
self.memory = memory
self.threads = threads
def initialize(self):
"""
"""
# check that the bare minimum data have been set
if self.samples is None:
print("[ ERROR! ]: no sample file has been set. Unable to initialize.")
return -1
if self.layers is None:
print("[ ERROR! ]: no layers have been set. Unable to initialize.")
return -1
def set_layers(self, directory, layers=None):
"""
"""
def build_string(self):
"""
"""
def run(self):
"""
"""
# check that the object has been initialized to check on
if not self.initialized:
print("[ ERROR! ]: unable to run maxent. run {}.initialize() first".format(self.__name__))
return -1
# fist build the string to create the
```
|
{
"source": "jeffreysmith-jrs/general",
"score": 2
}
|
#### File: general/maxentProcessing/postMaxent.py
```python
import os, codecs
import numpy as np
import pandas as pd
from glob import glob
from math import exp
def postMaxent(outputDirectory, species):
if isinstance(species, basestring):
species = [species]
for t in range(len(species)):
species = str(species[t])
#Read in maxent HTML
htmlFile = codecs.open(str(outputDirectory + species + '.html'), 'r')
maxentHTML = htmlFile.read()
htmlFile.close()
#Get the variables used in the Maxent Run
batchLineCommand = maxentHTML.split('\n')[-2]
batchLineCommand = batchLineCommand.replace('<br>', ' ') #End on a space
#Figure out where in the string things start and extract relevant information
startSamples = batchLineCommand.find('samplesfile') + 12
endSamples = batchLineCommand[startSamples:].find(' ') + startSamples
samplesFile = batchLineCommand[startSamples:endSamples]
#Get environmental layers used to make the maxent model
startEnv = batchLineCommand.find('environmentallayers') + 20
endEnv = batchLineCommand[startEnv:].find(' ') + startEnv
inputAsciis = batchLineCommand[startEnv:endEnv]
#Deal with replicated runs
startReplicates = batchLineCommand.find('replicates') + 11
if startReplicates != 10:
#This became an issue if replicates was the last thing listed in the command line
#So I had to add to search for either the next space or the line break
endReplicates = batchLineCommand[startReplicates:].find(' ')
if endReplicates > 0:
endReplicates = endReplicates + startReplicates
else:
endReplicates = batchLineCommand[startReplicates:].find(' ') + startReplicates
replicates = int(batchLineCommand[startReplicates:endReplicates])
#Unreplicated runs
else:
replicates = 1
#Figure out which, if any asciis were excluded from analysis
toExclude = batchLineCommand
excludedLayers = []
while toExclude.find('-N') > 0:
startExclude = toExclude.find('-N') + 3
endExclude = toExclude[startExclude:].find(' ') + startExclude
excludeLayer = toExclude[startExclude:endExclude]
toExclude = toExclude[endExclude:]
excludedLayers = excludedLayers + [excludeLayer]
#Read in dataframes
#Make blank dictionary to hold them all
predictors = {}
environmentalLayers = glob(str(inputAsciis + '\\*.asc'))
for i in range(len(environmentalLayers)):
asciiName = environmentalLayers[i].split('\\')[-1].split('.')[0]
#Make sure those asciis were not excludled from the analysis
if asciiName not in excludedLayers:
predictors[asciiName] = np.genfromtxt(environmentalLayers[i], skip_header = 6)
predictors[asciiName][predictors[asciiName] == -9999] = np.nan
replicatedRuns = np.zeros(shape = (replicates, len(predictors[asciiName]), len(predictors[asciiName][0])))
for k in range(replicates):
#Make blank basemap
fx = np.zeros_like(predictors[asciiName]) * predictors[asciiName]
fx = fx.astype(np.float64)
#Read in lambdas file from maxent
if replicates > 1:
fname = str(outputDirectory + species + '_' + str(k) + '.lambdas')
else:
fname = str(outputDirectory + species + '.lambdas')
lambdas = pd.read_csv(fname, names = ['variable', 'lambda', 'min', 'max'], dtype = {'varaible':str})
#Extract constants
linearPredictorNormalizer = float(lambdas.loc[lambdas['variable'] == 'linearPredictorNormalizer']['lambda'])
densityNormalizer = float(lambdas.loc[lambdas['variable'] == 'densityNormalizer']['lambda'])
numBackgroundPoints = float(lambdas.loc[lambdas['variable'] == 'numBackgroundPoints']['lambda'])
entropy = float(lambdas.loc[lambdas['variable'] == 'entropy']['lambda'])
#Drop rows with constants
lambdas = lambdas[:-4]
#Check for catagorical features:
catagoricalFeatures = lambdas[lambdas['variable'].str.contains('\=')]
lambdas.drop(catagoricalFeatures.index, inplace = True)
catagoricalFeatures = catagoricalFeatures.reset_index(drop = True)
if len(catagoricalFeatures) >= 1:
for i in range(len(catagoricalFeatures)):
layer, cutoff = catagoricalFeatures['variable'][i].strip('/(').strip('/)').split('=')
x = predictors[layer]
category = np.where(x == float(cutoff), catagoricalFeatures['lambda'][i], 0)
fx += category
#Check for threshold features:
thresholdFeatures = lambdas[lambdas['variable'].str.contains('\<')]
lambdas.drop(thresholdFeatures.index, inplace = True)
thresholdFeatures = thresholdFeatures.reset_index(drop = True)
if len(thresholdFeatures) >= 1:
for i in range(len(thresholdFeatures)):
cutoff, layer = thresholdFeatures['variable'][i].strip('/(').strip('/)').split('<')
x = predictors[layer]
threshold = np.where(x < float(cutoff), 0, thresholdFeatures['lambda'][i])
fx += threshold
#Check for reverse hinge features
reverseFeatures = lambdas[lambdas['variable'].str.contains('`')]
lambdas.drop(reverseFeatures.index, inplace = True)
reverseFeatures = reverseFeatures.reset_index(drop = True)
if len(reverseFeatures) >= 1:
for i in range(len(reverseFeatures)):
a = reverseFeatures['variable'][i].split('`')[1]
x = predictors[a]
hinge = np.where(x < reverseFeatures['max'][i], (reverseFeatures['lambda'][i] *
(reverseFeatures['max'][i] - x) /
(reverseFeatures['max'][i] - reverseFeatures['min'][i])),
0)
fx += hinge
#Check for y
forwardFeatures = lambdas[lambdas['variable'].str.contains("'")]
lambdas.drop(forwardFeatures.index, inplace = True)
forwardFeatures = forwardFeatures.reset_index(drop = True)
if len(forwardFeatures) >= 1:
for i in range(len(forwardFeatures)):
a = forwardFeatures['variable'][i].split("'")[1]
x = predictors[a]
hinge = np.where(x >= forwardFeatures['min'][i], (forwardFeatures['lambda'][i] *
(x - forwardFeatures['min'][i]) /
(forwardFeatures['max'][i] - forwardFeatures['min'][i])),
0)
fx += hinge
#Check for multiplicative features
multiplicativeFeatures = lambdas[lambdas['variable'].str.contains('\*')]
lambdas.drop(multiplicativeFeatures.index, inplace = True)
multiplicativeFeatures = multiplicativeFeatures.reset_index(drop = True)
#Calculate contribution of multiplicative features
if len(multiplicativeFeatures) >= 1:
for i in range(len(multiplicativeFeatures)):
a, b = multiplicativeFeatures['variable'][i].split('*')
x1 = predictors[a]
x2 = predictors[b]
fx += ( multiplicativeFeatures['lambda'][i] *
(x1 * x2 - multiplicativeFeatures['min'][i]) /
(multiplicativeFeatures['max'][i] - multiplicativeFeatures['min'][i]))
#Check for squared features
squareFeatures = lambdas[lambdas['variable'].str.contains('\^')]
lambdas.drop(squareFeatures.index, inplace = True)
squareFeatures = squareFeatures.reset_index(drop = True)
#Calculate contribution of squared features
if len(squareFeatures) >= 1:
for i in range(len(squareFeatures)):
a = squareFeatures['variable'][i].split('^')[0]
x = predictors[a]
fx += ( squareFeatures['lambda'][i] *
(x * x - squareFeatures['min'][i]) /
(squareFeatures['max'][i] - squareFeatures['min'][i]))
#Finally do linear features
lambdas = lambdas.reset_index(drop = True)
if len(lambdas) >= 1:
for i in range(len(lambdas)):
a = lambdas['variable'][i]
x = predictors[a]
fx += ( lambdas['lambda'][i] *
(x - lambdas['min'][i]) /
(lambdas['max'][i] - lambdas['min'][i]))
s = fx - linearPredictorNormalizer
qx = np.exp(s) / densityNormalizer
ls = (qx * np.exp(entropy) / (1 + qx * np.exp(entropy)))
replicatedRuns[k,:,:] = np.copy(ls)
return replicatedRuns
```
|
{
"source": "JeffreyStrahm/Creer",
"score": 3
}
|
#### File: Creer/creer/input.py
```python
import glob
def validate(inputs):
validated_inputs = []
for input_dir in inputs:
dirs = glob.glob(input_dir)
if not dirs:
raise Exception("No directories matching {}".format(input_dir))
validated_inputs.extend(dirs)
for validated_input in validated_inputs:
print(">> Input Directory:", validated_input)
return validated_inputs
```
|
{
"source": "jeffreysward/cmaqpy",
"score": 3
}
|
#### File: cmaqpy/cmaqpy/runcmaq.py
```python
import datetime
import os
import sys
import time
from . import utils
from .data.fetch_data import fetch_yaml
class CMAQModel:
"""
This class provides a framework for running the CMAQ Model.
NOTE: evetnually need to figure out how to link files for representative days
through the following Sunday. Right now, I do this manually by adding 1 to the
length of days.
Parameters
----------
:param start_datetime: string
Start date for the CMAQ simulation. I think you can technically
start CMAQ at any time, but it's probably best to simply start at 00:00 UTC.
:param end_datetime: string
Date after the last day for which you want to run CMAQ. E.g., if you list
August 4, 2016 the simulation will end August 4, 2016 00:00 UTC.
:param appl: string
Application name. Used primarily for directory and file naming.
:param coord_name: string
Coordinate name which must match the GRIDDESC file (e.g., LAM_40N100W).
:param grid_name: string
Grid name which must match the GRIDDESC file (e.g., 12OTC2).
:param chem_mech: string
CMAQ chemical mechanism. This defaults to "cb6r3_ae7_aq."
:param cctm_vrsn: string
Version identifier for CCTM, which is required for identifying the
executables.
:param setup_yaml: string
Name of the yaml file containin your directory paths if located in this directory.
Otherwise, use the full file path.
:param compiler: string
Compiler identifier for use in naming.
:param compiler_vrsn: string
Compiler version number for use in naming.
:param new_mcip: bool
Option to run MCIP or use MCIP output from a previous run. Defaults to True.
:param new_icon: bool
Option to run ICON or use ICON output (or a CGRID file) from a previous run.
Defualts to False.
:param icon_vrsn: string
ICON version number for use in naming.
:param icon_type: string
Method for creating initial conditions. Options are [profile, regrid].
:param new_bcon: bool
Option to run BCON or use BCON output from a previous run. Defaults to True.
:param bcon_vrsn: string
BCON version number for use in naming.
:param bcon_type: string
Method for creating boundary conditions. Options are [profile, regrid].
:param verbose: bool
When True, additional information is prited to the screen about simulation progress.
See also
--------
SMOKEModel: setup and run the SMOKE model.
"""
def __init__(self, start_datetime, end_datetime, appl, coord_name, grid_name, chem_mech='cb6r3_ae7_aq', cctm_vrsn='v533', setup_yaml='dirpaths.yml', compiler='gcc', compiler_vrsn='9.3.1', new_mcip=True, new_icon=False, icon_vrsn='v532', icon_type='regrid', new_bcon=True, bcon_vrsn='v532', bcon_type='regrid', verbose=False):
self.appl = appl
self.coord_name = coord_name
self.grid_name = grid_name
self.chem_mech = chem_mech
self.cctm_vrsn = cctm_vrsn
self.compiler = compiler
self.compiler_vrsn = compiler_vrsn
self.new_icon = new_icon
self.icon_vrsn = icon_vrsn
self.icon_type = icon_type
self.new_bcon = new_bcon
self.bcon_vrsn = bcon_vrsn
self.bcon_type = bcon_type
self.verbose = verbose
self.cctm_runid = f'{self.cctm_vrsn}_{self.compiler}{self.compiler_vrsn}_{self.appl}'
if self.verbose:
print(f'Application name: {self.appl}\nCoordinate name: {self.coord_name}\nGrid name: {self.grid_name}')
print(f'CCTM RUNID: {self.cctm_runid}')
# Format the forecast start/end and determine the total time.
self.start_datetime = utils.format_date(start_datetime)
self.end_datetime = utils.format_date(end_datetime)
self.delt = self.end_datetime - self.start_datetime
if self.verbose:
print(f'CMAQ run starting on: {self.start_datetime}')
print(f'CMAQ run ending on: {self.end_datetime}')
# Define the domain windowing paramters for MCIP.
# Could perhaps move this to the run_mcip method.
if self.grid_name == '12OTC2':
self.mcip_btrim = -1
self.mcip_x0 = 141
self.mcip_y0 = 15
self.mcip_ncols = 273
self.mcip_nrows = 246
elif self.grid_name == '4OTC2':
self.mcip_btrim = -1
self.mcip_x0 = 87
self.mcip_y0 = 9
self.mcip_ncols = 126
self.mcip_nrows = 156
else:
# This will use the entire WRF domain
self.mcip_btrim = 0
self.mcip_x0 = 0
self.mcip_y0 = 0
self.mcip_ncols = 0
self.mcip_nrows = 0
# Set working and WRF model directory names
dirs = fetch_yaml(setup_yaml)
self.dirpaths = dirs.get('directory_paths')
self.filepaths = dirs.get('file_paths')
self.filenames = dirs.get('file_names')
self.CMAQ_HOME = self.dirpaths.get('CMAQ_HOME')
self.MCIP_SCRIPTS = f'{self.CMAQ_HOME}/PREP/mcip/scripts'
self.ICON_SCRIPTS = f'{self.CMAQ_HOME}/PREP/icon/scripts'
self.BCON_SCRIPTS = f'{self.CMAQ_HOME}/PREP/bcon/scripts'
self.CCTM_SCRIPTS = f'{self.CMAQ_HOME}/CCTM/scripts'
self.COMBINE_SCRIPTS = f'{self.CMAQ_HOME}/POST/combine/scripts'
self.CMAQ_DATA = self.dirpaths.get('CMAQ_DATA')
if new_mcip:
self.MCIP_OUT = f'{self.CMAQ_DATA}/{self.appl}/mcip'
else:
self.MCIP_OUT = self.dirpaths.get('LOC_MCIP')
self.CCTM_INPDIR = f'{self.CMAQ_DATA}/{self.appl}/input'
self.CCTM_OUTDIR = f'{self.CMAQ_DATA}/{self.appl}/output_CCTM_{self.cctm_runid}'
self.ICBC = f'{self.CCTM_INPDIR}/icbc'
self.CCTM_GRIDDED = f'{self.CCTM_INPDIR}/emis/gridded_area'
# self.CCTM_RWC = f'{self.CCTM_INPDIR}/emis/gridded_area/rwc'
# self.CCTM_BEIS = f'{self.CCTM_INPDIR}/emis/gridded_area/beis'
self.CCTM_PT = f'{self.CCTM_INPDIR}/emis/inln_point'
self.CCTM_LAND = f'{self.CCTM_INPDIR}/land'
self.POST = f'{self.CMAQ_DATA}/{self.appl}/post'
if new_icon:
self.LOC_IC = self.CCTM_OUTDIR
else:
self.LOC_IC = self.dirpaths.get('LOC_IC')
if new_bcon:
self.LOC_BC = f'{self.CMAQ_DATA}/{self.appl}/bcon'
else:
self.LOC_BC = self.dirpaths.get('LOC_BC')
self.LOC_GRIDDED_AREA = self.dirpaths.get('LOC_GRIDDED_AREA')
self.LOC_RWC = self.dirpaths.get('LOC_RWC')
self.LOC_BEIS = self.dirpaths.get('LOC_BEIS')
self.LOC_IN_PT = self.dirpaths.get('LOC_IN_PT')
self.LOC_ERTAC = self.dirpaths.get('LOC_ERTAC')
self.LOC_SMK_MERGE_DATES = self.dirpaths.get('LOC_SMK_MERGE_DATES')
self.LOC_LAND = self.dirpaths.get('LOC_LAND')
self.DIR_TEMPLATES = self.dirpaths.get('DIR_TEMPLATES')
self.InMetDir = self.dirpaths.get('InMetDir')
self.InGeoDir = self.dirpaths.get('InGeoDir')
# Define the locations for CMAQ inputs
self.GRIDDESC = self.filepaths.get('GRIDDESC')
self.SECTORLIST = self.filepaths.get('SECTORLIST')
# Define the names of the CMAQ output files
#### Maybe use this in the future ####
# Define linux command aliai
self.CMD_LN = 'ln -sf %s %s'
self.CMD_CP = 'cp %s %s'
self.CMD_MV = 'mv %s %s'
self.CMD_RM = 'rm %s'
self.CMD_GUNZIP = 'gunzip %s'
def run_mcip(self, mcip_start_datetime=None, mcip_end_datetime=None, metfile_list=[], geo_file='geo_em.d01.nc', t_step=60, run_hours=4, setup_only=False):
"""
Setup and run MCIP, which formats meteorological files (e.g. wrfout*.nc) for CMAQ.
Parameters
----------
:param mcip_start_datetime: string
Start date and time for MCIP. Defaults to None in which case the MCIP start
datetime will be assigned from the CMAQ start datetime.
:param mcip_end_datetime: string
End date and time for MCIP. Defaults to None in which case the MCIP end
datetime will be assigned from the CMAQ end datetime.
:param metfile_list: list
List of wrfout* files, which must be located in `self.InMetDir`, that will
be processed by MCIP.
:param geo_file: string
Name of geo_em* file associated with the wrfout* files you are processing.
:param t_step: int
Time step (MCIP INTVL parameter) of output data in minutes. Defaults to 60
min (1 hour).
:param run_hours: int
Number of hours to request from the scheduler.
:setup_only: bool
Option to setup the directories and write the scripts without running MCIP.
"""
## SETUP MCIP
if mcip_start_datetime is None:
mcip_start_datetime = self.start_datetime
else:
mcip_start_datetime = utils.format_date(mcip_start_datetime)
if mcip_end_datetime is None:
mcip_end_datetime = self.end_datetime
else:
mcip_end_datetime = utils.format_date(mcip_end_datetime)
# Set an 'MCIP APPL,' which will control file names
mcip_sdatestr = mcip_start_datetime.strftime("%y%m%d")
self.mcip_appl = f'{self.appl}_{mcip_sdatestr}'
# Remove existing log file
cmd = self.CMD_RM % (f'{self.MCIP_SCRIPTS}/run_mcip_{self.mcip_appl}.log')
os.system(cmd)
# Copy the template MCIP run script to the scripts directory
run_mcip_path = f'{self.MCIP_SCRIPTS}/run_mcip_{self.mcip_appl}.csh'
cmd = self.CMD_CP % (f'{self.DIR_TEMPLATES}/template_run_mcip.csh', run_mcip_path)
os.system(cmd)
# Write Slurm info
mcip_slurm = f'#SBATCH -J mcip_{self.appl} # Job name\n'
mcip_slurm += f'#SBATCH -o {self.MCIP_SCRIPTS}/run_mcip_{self.mcip_appl}.log\n'
mcip_slurm += f'#SBATCH --nodes=1 # Total number of nodes requested\n'
mcip_slurm += f'#SBATCH --ntasks=1 # Total number of tasks to be configured for.\n'
mcip_slurm += f'#SBATCH --tasks-per-node=1 # sets number of tasks to run on each node.\n'
mcip_slurm += f'#SBATCH --cpus-per-task=1 # sets number of cpus needed by each task.\n'
mcip_slurm += f'#SBATCH --get-user-env # tells sbatch to retrieve the users login environment.\n'
mcip_slurm += f'#SBATCH -t {run_hours}:00:00 # Run time (hh:mm:ss)\n'
mcip_slurm += f'#SBATCH --mem=20000M # memory required per node\n'
mcip_slurm += f'#SBATCH --partition=default_cpu # Which queue it should run on.\n'
utils.write_to_template(run_mcip_path, mcip_slurm, id='%SLURM%')
# Write IO info to the MCIP run script
mcip_io = f'source {self.CMAQ_HOME}/config_cmaq.csh {self.compiler} {self.compiler_vrsn}\n'
mcip_io += f'set APPL = {mcip_sdatestr}\n'
mcip_io += f'set CoordName = {self.coord_name}\n'
mcip_io += f'set GridName = {self.grid_name}\n'
mcip_io += f'set DataPath = {self.CMAQ_DATA}\n'
mcip_io += f'set InMetDir = {self.InMetDir}\n'
mcip_io += f'set InGeoDir = {self.InGeoDir}\n'
mcip_io += f'set OutDir = {self.MCIP_OUT}\n'
mcip_io += f'set ProgDir = $CMAQ_HOME/PREP/mcip/src\n'
mcip_io += f'set WorkDir = $OutDir\n'
utils.write_to_template(run_mcip_path, mcip_io, id='%IO%')
# Write met info to the MCIP run script
mcip_met = f'set InMetFiles = ( '
for ii, metfile in enumerate(metfile_list):
if ii < len(metfile_list) - 1:
mcip_met += f'$InMetDir/{metfile} \\\n'
else:
mcip_met += f'$InMetDir/{metfile} )\n'
mcip_met += f'set IfGeo = "T"\n'
mcip_met += f'set InGeoFile = {self.InGeoDir}/{geo_file}\n'
utils.write_to_template(run_mcip_path, mcip_met, id='%MET%')
# Write start/end info to MCIP run script
mcip_time = f'set MCIP_START = {mcip_start_datetime.strftime("%Y-%m-%d_%H:%M:%S.0000")}\n' # [UTC]
mcip_time += f'set MCIP_END = {mcip_end_datetime.strftime("%Y-%m-%d_%H:%M:%S.0000")}\n' # [UTC]
mcip_time += f'set INTVL = {t_step}\n' # [min]
utils.write_to_template(run_mcip_path, mcip_time, id='%TIME%')
# Write domain windowing parameters to MCIP run script
mcip_domain = f'set BTRIM = {self.mcip_btrim}\n'
mcip_domain += f'set X0 = {self.mcip_x0}\n'
mcip_domain += f'set Y0 = {self.mcip_y0}\n'
mcip_domain += f'set NCOLS = {self.mcip_ncols}\n'
mcip_domain += f'set NROWS = {self.mcip_nrows}\n'
utils.write_to_template(run_mcip_path, mcip_domain, id='%DOMAIN%')
if self.verbose:
print(f'Wrote MCIP run script to\n{run_mcip_path}')
## RUN MCIP
if not setup_only:
# Begin MCIP simulation clock
simstart = datetime.datetime.now()
if self.verbose:
print('Starting MCIP at: ' + str(simstart))
# sys.stdout.flush()
os.system(f'sbatch --requeue {self.MCIP_SCRIPTS}/run_mcip_{self.mcip_appl}.csh')
# Sleep until the run_mcip_{self.appl}.log file exists
while not os.path.exists(f'{self.MCIP_SCRIPTS}/run_mcip_{self.mcip_appl}.log'):
time.sleep(1)
mcip_sim = self.finish_check('mcip')
while mcip_sim != 'complete':
if mcip_sim == 'failed':
return False
else:
time.sleep(2)
mcip_sim = self.finish_check('mcip')
elapsed = datetime.datetime.now() - simstart
if self.verbose:
print(f'MCIP ran in: {utils.strfdelta(elapsed)}\n')
return True
def run_mcip_multiday(self, metfile_dir=None, metfile_list=[], geo_file='geo_em.d01.nc', t_step=60):
"""
Run MCIP over multiple days. Per CMAQ convention, daily MCIP files contain
25 hours each all the hours from the current day, and the first hour (00:00)
from the following day.
Parameters
----------
:param metfile_dir: string
Path to the directory where the wrfout* files are located.
:param metfile_list: list
List of wrfout* files, located in `metfile_dir`, that will be processed by MCIP.
:param geo_file: string
Name of geo_em* file associated with the wrfout* files you are processing.
:param t_step: int
Time step (MCIP INTVL parameter) of output data in minutes. Defaults to 60
min (1 hour).
"""
# Loop over each day
for day_no in range(self.delt.days):
success = False
# Set the start datetime, end datetime, and metfile list for the day
mcip_start_datetime = self.start_datetime + datetime.timedelta(day_no)
mcip_end_datetime = self.start_datetime + datetime.timedelta(day_no + 1)
if self.verbose:
print(f'--> Working on MCIP for {mcip_start_datetime}')
if metfile_dir is None:
# If all the met data is stored in the same file, pass that file in
# using metfile_list and set metfile_dir=None
metfile_list = metfile_list
else:
# Eventually, can add scripting here that assumes there's a different
# wrfout file produced every day and they are all located in metfile_dir.
pass
# run mcip for that day
self.run_mcip(mcip_start_datetime=mcip_start_datetime, mcip_end_datetime=mcip_end_datetime, metfile_list=metfile_list, geo_file=geo_file, t_step=t_step, setup_only=False)
def run_icon(self, coarse_grid_appl='coarse', run_hours=2, setup_only=False):
"""
Setup and run ICON, which produces initial conditions for CMAQ.
Parameters
----------
:param coarse_grid_appl: string
Application name for the coarse grid from which you are deriving initial conditions.
:param run_hours: int
Number of hours to request from the scheduler.
:setup_only: bool
Option to setup the directories and write the scripts without running ICON.
"""
## SETUP ICON
# Copy the template ICON run script to the scripts directory
run_icon_path = f'{self.ICON_SCRIPTS}/run_icon.csh'
cmd = self.CMD_CP % (f'{self.DIR_TEMPLATES}/template_run_icon.csh', run_icon_path)
os.system(cmd)
# Write Slurm info
icon_slurm = f'#SBATCH -J icon_{self.appl} # Job name\n'
icon_slurm += f'#SBATCH -o {self.ICON_SCRIPTS}/run_icon_{self.appl}.log\n'
icon_slurm += f'#SBATCH --nodes=1 # Total number of nodes requested\n'
icon_slurm += f'#SBATCH --ntasks=1 # Total number of tasks to be configured for.\n'
icon_slurm += f'#SBATCH --tasks-per-node=1 # sets number of tasks to run on each node.\n'
icon_slurm += f'#SBATCH --cpus-per-task=1 # sets number of cpus needed by each task.\n'
icon_slurm += f'#SBATCH --get-user-env # tells sbatch to retrieve the users login environment.\n'
icon_slurm += f'#SBATCH -t {run_hours}:00:00 # Run time (hh:mm:ss)\n'
icon_slurm += f'#SBATCH --mem=20000M # memory required per node\n'
icon_slurm += f'#SBATCH --partition=default_cpu # Which queue it should run on.\n'
utils.write_to_template(run_icon_path, icon_slurm, id='%SLURM%')
# Write ICON runtime info to the run script.
icon_runtime = f'#> Source the config_cmaq file to set the run environment\n'
icon_runtime += f'source {self.CMAQ_HOME}/config_cmaq.csh {self.compiler} {self.compiler_vrsn}\n'
#> Code Version
icon_runtime += f'set VRSN = {self.icon_vrsn}\n'
#> Application Name
icon_runtime += f'set APPL = {self.appl}\n'
#> Initial conditions type [profile|regrid]
icon_runtime += f'ICTYPE = {self.icon_type}\n'
#> check GRIDDESC file for GRID_NAME options
icon_runtime += f'setenv GRID_NAME {self.grid_name}\n'
#> grid description file path
icon_runtime += f'setenv GRIDDESC {self.CMAQ_DATA}/{self.appl}/mcip/GRIDDESC\n'
#> GCTP spheroid, use 20 for WRF-based modeling
icon_runtime += f'setenv IOAPI_ISPH 20\n'
#> turn on excess WRITE3 logging [ options: T | F ]
icon_runtime += f'setenv IOAPI_LOG_WRITE F\n'
#> support large timestep records (>2GB/timestep record) [ options: YES | NO ]
icon_runtime += f'setenv IOAPI_OFFSET_64 YES\n'
#> output file directory
icon_runtime += f'OUTDIR = {self.CMAQ_DATA}/{self.appl}/icon\n'
#> define the model execution id
icon_runtime += f'setenv EXECUTION_ID $EXEC\n'
utils.write_to_template(run_icon_path, icon_runtime, id='%RUNTIME%')
# Write input file info to the run script
icon_files = f' setenv SDATE {self.start_datetime.strftime("%Y%j")}\n'
icon_files += f' setenv STIME {self.start_datetime.strftime("%H%M%S")}\n'
icon_files += f'if ( $ICON_TYPE == regrid ) then\n'
icon_files += f' setenv CTM_CONC_1 {self.CMAQ_DATA}/{coarse_grid_appl}/output_{self.cctm_runid}/CCTM_CONC_{self.cctm_runid}_{self.start_datetime.strftime("%Y%m%d")}.nc\n'
icon_files += f' setenv MET_CRO_3D_CRS {self.CMAQ_DATA}/{coarse_grid_appl}/mcip/METCRO3D_{self.start_datetime.strftime("%y%m%d")}\n'
icon_files += f' setenv MET_CRO_3D_FIN {self.CMAQ_DATA}/{self.appl}/mcip/METCRO3D_{self.start_datetime.strftime("%y%m%d")}.nc\n'
icon_files += f' setenv INIT_CONC_1 "$OUTDIR/ICON_{self.icon_vrsn}_{self.appl}_{self.icon_type}_{self.start_datetime.strftime("%Y%m%d")} -v"\n'
icon_files += f'endif\n'
icon_files += f'if ( $ICON_TYPE == profile ) then\n'
icon_files += f' setenv IC_PROFILE $BLD/avprofile_cb6r3m_ae7_kmtbr_hemi2016_v53beta2_m3dry_col051_row068.csv\n'
icon_files += f' setenv MET_CRO_3D_FIN {self.CMAQ_DATA}/{self.appl}/mcip/METCRO3D_{self.start_datetime.strftime("%y%m%d")}.nc\n'
icon_files += f' setenv INIT_CONC_1 "$OUTDIR/ICON_{self.icon_vrsn}_{self.appl}_{self.icon_type}_{self.start_datetime.strftime("%Y%m%d")} -v"\n'
icon_files += f'endif\n'
utils.write_to_template(run_icon_path, icon_files, id='%INFILES%')
## RUN ICON
if not setup_only:
CMD_ICON = f'sbatch --requeue {run_icon_path}'
os.system(CMD_ICON)
# Sleep until the run_icon_{self.appl}.log file exists
while not os.path.exists(f'{self.ICON_SCRIPTS}/run_icon_{self.appl}.log'):
time.sleep(1)
# Begin ICON simulation clock
simstart = datetime.datetime.now()
if self.verbose:
print('Starting ICON at: ' + str(simstart))
sys.stdout.flush()
icon_sim = self.finish_check('icon')
while icon_sim != 'complete':
if icon_sim == 'failed':
return False
else:
time.sleep(2)
icon_sim = self.finish_check('icon')
elapsed = datetime.datetime.now() - simstart
if self.verbose:
print(f'ICON ran in: {utils.strfdelta(elapsed)}')
return True
def run_bcon(self, bcon_start_datetime=None, bcon_end_datetime=None, coarse_grid_appl='coarse',
run_hours=2, setup_only=False):
"""
Setup and run BCON, which produces boundary conditions for CMAQ.
Parameters
----------
:param bcon_start_datetime: string
Start date and time for BCON. Defaults to None in which case the BCON start
datetime will be assigned from the CMAQ start datetime.
:param bcon_end_datetime: string
End date and time for BCON. Defaults to None in which case the BCON end
datetime will be assigned from the CMAQ end datetime.
:param coarse_grid_appl: string
Application name for the coarse grid from which you are deriving boundary conditions.
:param run_hours: int
Number of hours to request from the scheduler.
:setup_only: bool
Option to setup the directories and write the scripts without running BCON.
"""
# Set the start and end dates
if bcon_start_datetime is None:
bcon_start_datetime = self.start_datetime
else:
bcon_start_datetime = utils.format_date(bcon_start_datetime)
if bcon_end_datetime is None:
bcon_end_datetime = self.end_datetime
else:
bcon_end_datetime = utils.format_date(bcon_end_datetime)
# Determine the length of the BCON run
bcon_delt = bcon_end_datetime - bcon_start_datetime
# Define the coarse grid runid
coarse_runid = f'{self.cctm_vrsn}_{self.compiler}{self.compiler_vrsn}_{coarse_grid_appl}'
## SETUP BCON
# Copy the template BCON run script to the scripts directory
run_bcon_path = f'{self.BCON_SCRIPTS}/run_bcon.csh'
cmd = self.CMD_CP % (f'{self.DIR_TEMPLATES}/template_run_bcon.csh', run_bcon_path)
os.system(cmd)
# Specify the BCON log
bcon_log_file = f'{self.BCON_SCRIPTS}/run_bcon_{self.appl}_{bcon_start_datetime.strftime("%Y%m%d")}.log'
# Write Slurm info
bcon_slurm = f'#SBATCH -J bcon_{self.appl} # Job name\n'
bcon_slurm += f'#SBATCH -o {bcon_log_file}\n'
bcon_slurm += f'#SBATCH --nodes=1 # Total number of nodes requested\n'
bcon_slurm += f'#SBATCH --ntasks=1 # Total number of tasks to be configured for.\n'
bcon_slurm += f'#SBATCH --tasks-per-node=1 # sets number of tasks to run on each node.\n'
bcon_slurm += f'#SBATCH --cpus-per-task=1 # sets number of cpus needed by each task.\n'
bcon_slurm += f'#SBATCH --get-user-env # tells sbatch to retrieve the users login environment.\n'
bcon_slurm += f'#SBATCH -t {run_hours}:00:00 # Run time (hh:mm:ss)\n'
bcon_slurm += f'#SBATCH --mem=20000M # memory required per node\n'
bcon_slurm += f'#SBATCH --partition=default_cpu # Which queue it should run on.\n'
utils.write_to_template(run_bcon_path, bcon_slurm, id='%SLURM%')
# Write BCON runtime info to the run script.
bcon_runtime = f'#> Source the config_cmaq file to set the run environment\n'
bcon_runtime += f'source {self.CMAQ_HOME}/config_cmaq.csh {self.compiler} {self.compiler_vrsn}\n'
bcon_runtime += f'#> Code Version\n'
bcon_runtime += f'set VRSN = {self.bcon_vrsn}\n'
bcon_runtime += f'#> Application Name\n'
bcon_runtime += f'set APPL = {self.appl}\n'
bcon_runtime += f'#> Boundary condition type [profile|regrid]\n'
bcon_runtime += f'set BCTYPE = {self.bcon_type}\n'
bcon_runtime += f'#> check GRIDDESC file for GRID_NAME options\n'
bcon_runtime += f'setenv GRID_NAME {self.grid_name}\n'
bcon_runtime += f'#> grid description file\n'
bcon_runtime += f'setenv GRIDDESC {self.GRIDDESC}\n'
bcon_runtime += f'#> GCTP spheroid, use 20 for WRF-based modeling\n'
bcon_runtime += f'setenv IOAPI_ISPH 20\n'
bcon_runtime += f'#> turn on excess WRITE3 logging [ options: T | F ]\n'
bcon_runtime += f'setenv IOAPI_LOG_WRITE F\n'
bcon_runtime += f'#> support large timestep records (>2GB/timestep record) [ options: YES | NO ]\n'
bcon_runtime += f'setenv IOAPI_OFFSET_64 YES\n'
bcon_runtime += f'#> output file directory\n'
bcon_runtime += f'set OUTDIR = {self.CMAQ_DATA}/{self.appl}/bcon\n'
bcon_runtime += f'#> Set the build directory:\n'
bcon_runtime += f'set BLD = {self.CMAQ_HOME}/PREP/bcon/scripts/BLD_BCON_{self.bcon_vrsn}_{self.compiler}{self.compiler_vrsn}\n'
bcon_runtime += f'set EXEC = BCON_{self.bcon_vrsn}.exe\n'
bcon_runtime += f'#> define the model execution id\n'
bcon_runtime += f'setenv EXECUTION_ID $EXEC\n'
utils.write_to_template(run_bcon_path, bcon_runtime, id='%RUNTIME%')
# Write input file info to the run script
# bcon_files = f' setenv SDATE {bcon_start_datetime.strftime("%Y%j")}\n'
# bcon_files += f' setenv STIME {bcon_start_datetime.strftime("%H%M%S")}\n'
# bcon_files += f' setenv RUNLEN {utils.strfdelta(bcon_delt, fmt="{H:02}{M:02}{S:02}")}\n'
bcon_files = f' if ( $BCON_TYPE == regrid ) then\n'
bcon_files += f' setenv CTM_CONC_1 {self.CMAQ_DATA}/{coarse_grid_appl}/output_CCTM_{coarse_runid}/CCTM_CONC_{coarse_runid}_{bcon_start_datetime.strftime("%Y%m%d")}.nc\n'
bcon_files += f' setenv MET_CRO_3D_CRS {self.CMAQ_DATA}/{coarse_grid_appl}/mcip/METCRO3D_{bcon_start_datetime.strftime("%y%m%d")}.nc\n'
bcon_files += f' setenv MET_BDY_3D_FIN {self.CMAQ_DATA}/{self.appl}/mcip/METBDY3D_{bcon_start_datetime.strftime("%y%m%d")}.nc\n'
bcon_files += f' setenv BNDY_CONC_1 "$OUTDIR/BCON_{self.bcon_vrsn}_{self.appl}_{self.bcon_type}_{bcon_start_datetime.strftime("%Y%m%d")} -v"\n'
bcon_files += f' endif\n'
bcon_files += f' if ( $BCON_TYPE == profile ) then\n'
bcon_files += f' setenv BC_PROFILE $BLD/avprofile_cb6r3m_ae7_kmtbr_hemi2016_v53beta2_m3dry_col051_row068.csv\n'
bcon_files += f' setenv MET_BDY_3D_FIN {self.CMAQ_DATA}/{self.appl}/mcip/METBDY3D_{bcon_start_datetime.strftime("%y%m%d")}.nc\n'
bcon_files += f' setenv BNDY_CONC_1 "$OUTDIR/BCON_{self.bcon_vrsn}_{self.appl}_{self.bcon_type}_{bcon_start_datetime.strftime("%Y%m%d")} -v"\n'
bcon_files += f' endif\n'
utils.write_to_template(run_bcon_path, bcon_files, id='%INFILES%')
## RUN BCON
if not setup_only:
# Remove log from previous identical run
os.system(self.CMD_RM % (bcon_log_file))
# Submit BCON to the scheduler
CMD_BCON = f'sbatch --requeue {run_bcon_path}'
os.system(CMD_BCON)
# Begin BCON simulation clock
simstart = datetime.datetime.now()
# Sleep until the run_bcon_{self.appl}.log file exists
while not os.path.exists(bcon_log_file):
time.sleep(1)
if self.verbose:
print('Starting BCON at: ' + str(simstart))
sys.stdout.flush()
bcon_sim = self.finish_check('bcon', custom_log=bcon_log_file)
while bcon_sim != 'complete':
if bcon_sim == 'failed':
return False
else:
time.sleep(2)
bcon_sim = self.finish_check('bcon', custom_log=bcon_log_file)
elapsed = datetime.datetime.now() - simstart
if self.verbose:
print(f'BCON ran in: {utils.strfdelta(elapsed)}')
return True
def run_bcon_multiday(self, coarse_grid_appl='coarse', run_hours=2, setup_only=False):
"""
Run BCON over multiple days. Per CMAQ convention, BCON will run for the same length
as CCTM -- i.e., a single day.
Parameters
----------
:param coarse_grid_appl: string
Application name for the coarse grid from which you are deriving boundary conditions.
:param run_hours: int
Number of hours to request from the scheduler.
:setup_only: bool
Option to setup the directories and write the scripts without running BCON.
"""
# Loop over each day
for day_no in range(self.delt.days):
# Set the start datetime and end datetime for the day
bcon_start_datetime = self.start_datetime + datetime.timedelta(day_no)
bcon_end_datetime = self.start_datetime + datetime.timedelta(day_no + 1)
if self.verbose:
print(f'--> Working on BCON for {bcon_start_datetime}')
# run bcon for that day
self.run_bcon(bcon_start_datetime=bcon_start_datetime, bcon_end_datetime=bcon_end_datetime,
coarse_grid_appl=coarse_grid_appl, run_hours=run_hours, setup_only=setup_only)
def setup_inpdir(self, n_emis_gr=2, gr_emis_labs=['all', 'rwc'], n_emis_pt=9,
pt_emis_labs=['ptnonertac', 'ptertac', 'othpt', 'ptagfire', 'ptfire', 'ptfire_othna', 'pt_oilgas', 'cmv_c3_12', 'cmv_c1c2_12'],
stkgrps_daily=[False, False, False, True, True, True, False, False, False]):
"""
Links all the necessary files to the locations in INPDIR where CCTM expects to find them.
Parameters
----------
:param n_emis_gr: int
Number of gridded emissions sectors.
:param gr_emis_labs: list of strings
Labels for each of the gridded emissions sectors.
:param n_emis_pt: int
Number of point emissions sectors.
:param pt_emis_labs: list of strings
Labels for each of the point emissions sectors.
:param stkgrps_daily: list of bools
Boolean indicating if each point sector uses daily stack groups files.
For example, fire sectors use daily stack groups files.
"""
# Remove the existing input directory if it already exists and remake it
utils.remove_dir(self.CCTM_INPDIR)
utils.make_dirs(self.CCTM_INPDIR)
# Make a list of the start dates for date-specific inputs
start_datetimes_lst = [single_date for single_date in (self.start_datetime + datetime.timedelta(n) for n in range(self.delt.days + 1))]
# Make lists of representative days
# These are necessary because some of the point sectors use representative days
# Right now, this simply links the smoke merge dates, but it could actually use the representative days at some point in the future
utils.make_dirs(f'{self.CCTM_INPDIR}/emis')
cmd = 'echo "Starting to link files..."'
for date in start_datetimes_lst:
cmd = cmd + '; ' + self.CMD_LN % (f'{self.LOC_SMK_MERGE_DATES}/smk_merge_dates_{date.strftime("%Y%m")}*', f'{self.CCTM_INPDIR}/emis')
os.system(cmd)
mwdss_N_lst = utils.get_rep_dates(f'{self.LOC_SMK_MERGE_DATES}', start_datetimes_lst, date_type=' mwdss_N')
mwdss_Y_lst = utils.get_rep_dates(f'{self.LOC_SMK_MERGE_DATES}', start_datetimes_lst, date_type=' mwdss_Y')
# Link the GRIDDESC to $INPDIR
cmd = self.CMD_LN % (self.GRIDDESC, f'{self.CCTM_INPDIR}/')
cmd_gunzip = self.CMD_GUNZIP % (self.GRIDDESC)
# Link Boundary Conditions to $INPDIR/icbc
utils.make_dirs(self.ICBC)
for date in start_datetimes_lst:
local_bc_file = f'{self.LOC_BC}/*{date.strftime("%y%m%d")}'
cmd = cmd + '; ' + self.CMD_LN % (local_bc_file, f'{self.ICBC}/')
cmd_gunzip = cmd_gunzip + ' >/dev/null 2>&1; ' + self.CMD_GUNZIP % (local_bc_file)
# Link Initial Conditions to self.CCTM_OUTDIR
utils.make_dirs(self.CCTM_OUTDIR)
yesterday = start_datetimes_lst[0] - datetime.timedelta(days=1)
local_ic_file = f'{self.LOC_IC}/CCTM_CGRID_*{yesterday.strftime("%Y%m%d")}.nc'
cmd = cmd + '; ' + self.CMD_LN % (local_ic_file, f'{self.CCTM_OUTDIR}/CCTM_CGRID_{self.cctm_runid}_{yesterday.strftime("%Y%m%d")}.nc')
local_init_medc_1_file = f'{self.LOC_IC}/CCTM_MEDIA_CONC_*{yesterday.strftime("%y%m%d")}.nc'
cmd = cmd + '; ' + self.CMD_LN % (local_init_medc_1_file, f'{self.CCTM_OUTDIR}/CCTM_MEDIA_CONC_{self.cctm_runid}_{yesterday.strftime("%Y%m%d")}.nc')
# Link gridded emissions to $INPDIR/emis/gridded_area
utils.make_dirs(self.CCTM_GRIDDED)
for ii in range(n_emis_gr + 1):
# Get the name of the directory where theis gridded sector is stored
gr_emis_dir = self.dirpaths.get(f'LOC_GR_EMIS_{str(ii).zfill(3)}')
if self.verbose:
print(f'Linking gridded emissions from:\n{gr_emis_dir}')
for date in start_datetimes_lst:
local_gridded_file = f'{gr_emis_dir}/emis_mole_{gr_emis_labs[ii-1]}_{date.strftime("%Y%m%d")}*'
if self.verbose:
print(f'... Linking: {local_gridded_file}')
cmd = cmd + '; ' + self.CMD_LN % (local_gridded_file, f'{self.CCTM_GRIDDED}/')
cmd_gunzip = cmd_gunzip + ' >/dev/null 2>&1; ' + self.CMD_GUNZIP % (local_gridded_file)
# Link point source emissions to $INPDIR/emis/inln_point
# and the associated stack groups to $INPDIR/emis/inln_point/stack_groups
utils.make_dirs(f'{self.CCTM_PT}/stack_groups')
for ii in range(n_emis_pt + 1):
if self.verbose:
print(f'Linking the {pt_emis_labs[ii-1]} sector emissions')
for date in start_datetimes_lst:
# Link the day-dependent point sector emissions file
if pt_emis_labs[ii-1] == 'ptertac':
local_point_file = f'{self.LOC_ERTAC}/inln_mole_ptertac_{date.strftime("%Y%m%d")}*'
else:
local_point_file = f'{self.LOC_IN_PT}/{pt_emis_labs[ii-1]}/inln_mole_{pt_emis_labs[ii-1]}_{date.strftime("%Y%m%d")}*'
if self.verbose:
print(f'... Linking: {local_point_file}')
cmd = cmd + '; ' + self.CMD_LN % (local_point_file, f'{self.CCTM_PT}/')
cmd_gunzip = cmd_gunzip + ' >/dev/null 2>&1; ' + self.CMD_GUNZIP % (local_point_file)
# Link the day-dependent stack groups file (e.g., for fire sectors)
if stkgrps_daily[ii-1]:
local_stkgrps_file = f'{self.LOC_IN_PT}/{pt_emis_labs[ii-1]}/stack_groups_{pt_emis_labs[ii-1]}_{date.strftime("%Y%m%d")}*'
cmd = cmd + '; ' + self.CMD_LN % (local_stkgrps_file, f'{self.CCTM_PT}/stack_groups/')
cmd_gunzip = cmd_gunzip + ' >/dev/null 2>&1; ' + self.CMD_GUNZIP % (local_stkgrps_file)
# Link the day-independent stack groups file
if not stkgrps_daily[ii-1]:
if pt_emis_labs[ii-1] == 'ptertac':
local_stkgrps_file = f'{self.LOC_ERTAC}/stack_groups_ptertac_*'
else:
local_stkgrps_file = f'{self.LOC_IN_PT}/{pt_emis_labs[ii-1]}/stack_groups_{pt_emis_labs[ii-1]}_*'
cmd = cmd + '; ' + self.CMD_LN % (local_stkgrps_file, f'{self.CCTM_PT}/stack_groups/')
cmd_gunzip = cmd_gunzip + ' >/dev/null 2>&1; ' + self.CMD_GUNZIP % (local_stkgrps_file)
# Link sector list to $INPDIR/emis
cmd = cmd + '; ' + self.CMD_LN % (f'{self.SECTORLIST}', f'{self.CCTM_INPDIR}/emis')
# Link files for emissions scaling and sea spray to $INPDIR/land
# NOTE: these could be made more general...
utils.make_dirs(f'{self.CCTM_LAND}/toCMAQ_festc1.4_epic')
for date in start_datetimes_lst:
local_festc_file = f'{self.LOC_LAND}/toCMAQ_festc1.4_epic/us1_2016_cmaq12km_time20*{date.strftime("%y%m%d")}*'
cmd = cmd + '; ' + self.CMD_LN % (local_festc_file, f'{self.CCTM_LAND}/toCMAQ_festc1.4_epic/')
cmd_gunzip = cmd_gunzip + ' >/dev/null 2>&1; ' + self.CMD_GUNZIP % (local_festc_file)
cmd = cmd + '; ' + self.CMD_LN % (f'{self.LOC_LAND}/toCMAQ_festc1.4_epic/us1_2016_cmaq12km_soil.12otc2.ncf', f'{self.CCTM_LAND}/toCMAQ_festc1.4_epic/')
cmd = cmd + '; ' + self.CMD_LN % (f'{self.LOC_LAND}/{self.filenames.get("OCEAN_1")}', f'{self.CCTM_LAND}/')
cmd = cmd + '; ' + self.CMD_LN % (f'{self.LOC_LAND}/beld41_feb2017_waterfix_envcan_12US2.12OTC2.ncf', f'{self.CCTM_LAND}/')
# Run the gunzip commands
cmd_gunzip += ' >/dev/null 2>&1'
os.system(cmd_gunzip)
# Run the link commands
os.system(cmd)
# Remove broken links from the input dir
os.system(f'find {self.CCTM_INPDIR} -xtype l -delete')
def run_cctm(self, n_emis_gr=2, gr_emis_labs=['all', 'rwc'], n_emis_pt=9,
pt_emis_labs=['ptnonertac', 'ptertac', 'othpt', 'ptagfire', 'ptfire', 'ptfire_othna', 'pt_oilgas', 'cmv_c3_12', 'cmv_c1c2_12'],
stkgrps_daily=[False, False, False, True, True, True, False, False, False],
ctm_abflux='Y',
stkcaseg = '12US1_2016fh_16j', stkcasee = '12US1_cmaq_cb6_2016fh_16j',
delete_existing_output='TRUE', new_sim='FALSE', tstep='010000',
cctm_hours=24, n_procs=16, gb_mem=50, run_hours=24, setup_only=False):
"""
Setup and run CCTM, CMAQ's chemical transport model.
Parameters
----------
:param n_emis_gr: int
Number of gridded emissions sectors.
:param gr_emis_labs: list of strings
Labels for each of the gridded emissions sectors.
:param n_emis_pt: int
Number of point emissions sectors.
:param pt_emis_labs: list of strings
Labels for each of the point emissions sectors.
:param stkgrps_daily: list of bools
Boolean indicating if each point sector uses daily stack groups files.
For example, fire sectors use daily stack groups files.
:param ctm_abflux: string
Turns on/off ammonia bi-directional flux for in-line deposition.
Options are [Y, N].
:param stkcaseg: string
Stack group version label.
:param stkcasee: string
Stack emission version label
:param delete_existing_output: string
If TRUE, any files or logs previously created by CCTM will be deleted.
If FALSE, previous files or logs will cause CCTM to fail. Options are
[TRUE, FALSE].
:param new_sim: string
Set to FALSE for model restart. Options are [TRUE, FALSE].
:param tstep: string
Output time step interval (HHMMSS). Defaults to 010000.
:param cctm_hours: int
Time duration for this run of CCTM in hours.
:param n_procs: int
Number of processors to request from the scheduler.
:param gb_mem: int
Number of GB of memory per node to request from the scheduler.
:param run_hours: int
Run length, in hours, to request from the scheduler.
:param setup_only: bool
Option to setup the directories and write the scripts without running CCTM.
"""
# Check that a consistent number of labels were passed
if len(gr_emis_labs) != n_emis_gr:
raise ValueError(f'n_emis_gr ({n_emis_gr}) should match the length of gr_emis_labs (len={len(gr_emis_labs)})')
if len(pt_emis_labs) != n_emis_pt:
raise ValueError(f'n_emis_pt ({n_emis_pt}) should match the length of pt_emis_labs (len={len(pt_emis_labs)})')
if len(stkgrps_daily) != n_emis_pt:
raise ValueError(f'n_emis_pt ({n_emis_pt}) should match the length of stkgrps_daily (len={len(stkgrps_daily)})')
## SETUP CCTM
# Copy the template CCTM run script to the scripts directory
run_cctm_path = f'{self.CCTM_SCRIPTS}/run_cctm_{self.appl}.csh'
cmd = self.CMD_CP % (f'{self.DIR_TEMPLATES}/template_run_cctm.csh', run_cctm_path)
os.system(cmd)
# Copy the template CCTM submission script to the scripts directory
submit_cctm_path = f'{self.CCTM_SCRIPTS}/submit_cctm.csh'
cmd = self.CMD_CP % (f'{self.DIR_TEMPLATES}/template_submit_cctm.csh', submit_cctm_path)
os.system(cmd)
# Setup the input directory using the setup_inpdir method
self.setup_inpdir(n_emis_gr=n_emis_gr, gr_emis_labs=gr_emis_labs,
n_emis_pt=n_emis_pt, pt_emis_labs=pt_emis_labs,stkgrps_daily=stkgrps_daily)
# Write CCTM setup options to the run script
cctm_runtime = f'#> Toggle Diagnostic Mode which will print verbose information to standard output\n'
cctm_runtime += f'setenv CTM_DIAG_LVL 0\n'
cctm_runtime += f'#> Source the config_cmaq file to set the run environment\n'
cctm_runtime += f'source {self.CMAQ_HOME}/config_cmaq.csh {self.compiler} {self.compiler_vrsn}\n'
cctm_runtime += f'#> Change back to the CCTM scripts directory\n'
cctm_runtime += f'cd {self.CCTM_SCRIPTS}\n'
cctm_runtime += f'#> Set General Parameters for Configuring the Simulation\n'
cctm_runtime += f'set VRSN = {self.cctm_vrsn} #> Code Version - note this must be updated if using ISAM or DDM\n'
cctm_runtime += f'set PROC = mpi #> serial or mpi\n'
cctm_runtime += f'set MECH = {self.chem_mech} #> Mechanism ID\n'
cctm_runtime += f'set APPL = {self.appl} #> Application Name (e.g. Gridname)\n\n'
cctm_runtime += f'#> Define RUNID as any combination of parameters above or others. By default,\n'
cctm_runtime += f'#> this information will be collected into this one string, $RUNID, for easy\n'
cctm_runtime += f'#> referencing in output binaries and log files as well as in other scripts.\n'
cctm_runtime += f'setenv RUNID {self.cctm_runid}\n\n'
cctm_runtime += f'#> Set Working, Input, and Output Directories\n'
cctm_runtime += f'setenv WORKDIR {self.CCTM_SCRIPTS} #> Working Directory. Where the runscript is.\n'
cctm_runtime += f'setenv OUTDIR {self.CCTM_OUTDIR} #> Output Directory\n'
cctm_runtime += f'setenv INPDIR {self.CCTM_INPDIR} #> Input Directory\n'
cctm_runtime += f'setenv GRIDDESC {self.GRIDDESC} #> grid description file\n'
cctm_runtime += f'setenv GRID_NAME {self.grid_name} #> check GRIDDESC file for GRID_NAME options\n\n'
cctm_runtime += f'#> Keep or Delete Existing Output Files\n'
cctm_runtime += f'set CLOBBER_DATA = {delete_existing_output}\n'
utils.write_to_template(run_cctm_path, cctm_runtime, id='%SETUP%')
# Write CCTM start, end, and timestepping options to the run script
cctm_time = f'#> Set Start and End Days for looping\n'
cctm_time += f'setenv NEW_START {new_sim} #> Set to FALSE for model restart\n'
cctm_time += f'set START_DATE = "{self.start_datetime.strftime("%Y-%m-%d")}" #> beginning date\n'
cctm_time += f'set END_DATE = "{self.end_datetime.strftime("%Y-%m-%d")}" #> ending date\n\n'
cctm_time += f'#> Set Timestepping Parameters\n'
cctm_time += f'set STTIME = {self.start_datetime.strftime("%H%M%S")} #> beginning GMT time (HHMMSS)\n'
cctm_time += f'set NSTEPS = {cctm_hours}0000 #> time duration (HHMMSS) for this run\n'
cctm_time += f'set TSTEP = {tstep} #> output time step interval (HHMMSS)\n'
utils.write_to_template(run_cctm_path, cctm_time, id='%TIME%')
# Control domain subsetting among processors -- these will always be closest to a square
if n_procs == 8:
cctm_proc = '@ NPCOL = 2; @ NPROW = 4'
elif n_procs == 12:
cctm_proc = '@ NPCOL = 3; @ NPROW = 4'
elif n_procs == 16:
cctm_proc = '@ NPCOL = 4; @ NPROW = 4'
elif n_procs == 24:
cctm_proc = '@ NPCOL = 4; @ NPROW = 6'
elif n_procs == 32:
cctm_proc = '@ NPCOL = 4; @ NPROW = 8'
elif n_procs == 48:
cctm_proc = '@ NPCOL = 6; @ NPROW = 8'
else:
print(f'No {n_procs} processor setup has been specified. Use [8, 12, 16, 24, 32, or 48].')
raise ValueError
utils.write_to_template(run_cctm_path, cctm_proc, id='%PROC%')
# Write CCTM physics information
# NOTE: at some point the number of physics options should be expanded.
cctm_physics = f'setenv CTM_ABFLUX {ctm_abflux} #> ammonia bi-directional flux for in-line deposition\n'
cctm_physics += f' #> velocities [ default: N ]\n'
utils.write_to_template(run_cctm_path, cctm_physics, id='%PHYSICS%')
# Write CCTM input input directory information
cctm_files = f'set ICpath = {self.CCTM_OUTDIR} #> initial conditions input directory\n'
cctm_files += f'set BCpath = {self.ICBC} #> boundary conditions input directory\n'
cctm_files += f'set IN_PTpath = {self.CCTM_PT} #> point source emissions input directory\n'
cctm_files += f'set IN_LTpath = $INPDIR/lightning #> lightning NOx input directory\n'
cctm_files += f'set METpath = {self.MCIP_OUT} #> meteorology input directory\n'
cctm_files += f'#set JVALpath = $INPDIR/jproc #> offline photolysis rate table directory\n'
cctm_files += f'set OMIpath = $BLD #> ozone column data for the photolysis model\n'
cctm_files += f'set LUpath = {self.CCTM_LAND} #> BELD landuse data for windblown dust model\n'
cctm_files += f'set SZpath = {self.CCTM_LAND} #> surf zone file for in-line seaspray emissions\n'
utils.write_to_template(run_cctm_path, cctm_files, id='%FILES%')
# Write CCTM IC and BC information
# NOTE: the two spaces at the beginning of each of these lines are necessary
# because this is all happening inside a loop in the csh script.
cctm_icbc = f' #> Initial conditions\n'
cctm_icbc += f' if ($NEW_START == true || $NEW_START == TRUE ) then\n'
cctm_icbc += f' setenv ICON_{self.icon_vrsn}_{self.appl}_{self.icon_type}_{self.start_datetime.strftime("%Y%m%d")}\n'
cctm_icbc += f' setenv INIT_MEDC_1 notused\n'
cctm_icbc += f' else\n'
cctm_icbc += f' set ICpath = $OUTDIR\n'
cctm_icbc += ' setenv ICFILE CCTM_CGRID_${RUNID}_${YESTERDAY}.nc\n'
cctm_icbc += ' # setenv INIT_MEDC_1 $ICpath/CCTM_MEDIA_CONC_${RUNID}_${YESTERDAY}.nc\n'
cctm_icbc += f' setenv INIT_MEDC_1 notused\n'
cctm_icbc += f' setenv INITIAL_RUN N\n'
cctm_icbc += f' endif\n'
cctm_icbc += f' \n'
cctm_icbc += f' #> Boundary conditions\n'
if self.new_bcon:
cctm_icbc += f' set BCFILE = BCON_{self.bcon_vrsn}_{self.appl}_{self.bcon_type}_$YYYYMMDD\n'
else:
cctm_icbc += f' set BCFILE = {self.filenames.get("BCFILE")}\n'
utils.write_to_template(run_cctm_path, cctm_icbc, id='%ICBC%')
# Write CCTM ocean file information.
# NOTE: the two spaces at the beginning of each of these lines are necessary
# because this is all happening inside a loop in the csh script.
cctm_ocean = f' #> In-line sea spray emissions configuration\n'
cctm_ocean += f' setenv OCEAN_1 $SZpath/{self.filenames.get("OCEAN_1")} #> horizontal grid-dependent surf zone file\n'
utils.write_to_template(run_cctm_path, cctm_ocean, id='%OCEAN%')
# Write CCTM gridded emissions information
# NOTE: the two spaces at the beginning of each of these lines are necessary
# because this is all happening inside a loop in the csh script.
cctm_gr = f' #> Gridded Emissions Files\n'
cctm_gr += f' setenv N_EMIS_GR {n_emis_gr} #> Number of gridded emissions groups\n'
for ii in range(1, n_emis_gr + 1):
gr_emis_file = self.filenames.get(f'GR_EMIS_{str(ii).zfill(3)}')
cctm_gr += f' setenv GR_EMIS_{str(ii).zfill(3)} {self.CCTM_GRIDDED}/{gr_emis_file}\n'
cctm_gr += f' # Label each gridded emissions stream\n'
cctm_gr += f' setenv GR_EMIS_LAB_{str(ii).zfill(3)} {gr_emis_labs[ii-1]}\n'
cctm_gr += f' # Do not allow CMAQ to use gridded source files with dates that do not match the model date\n'
cctm_gr += f' setenv GR_EM_SYM_DATE_{str(ii).zfill(3)} F\n'
utils.write_to_template(run_cctm_path, cctm_gr, id='%GRIDDED%')
# Write CCTM point source emissions information
# NOTE: the two spaces at the beginning of each of these lines are necessary
# because this is all happening inside a loop in the csh script.
cctm_pt = f' #> In-line point emissions configuration\n'
cctm_pt += f' setenv N_EMIS_PT {n_emis_pt} #> Number of elevated source groups\n'
cctm_pt += f' set STKCASEG = {stkcaseg} # Stack Group Version Label\n'
cctm_pt += f' set STKCASEE = {stkcasee} # Stack Emission Version Label\n'
for ii in range(1, n_emis_pt + 1):
stk_emis_file = self.filenames.get(f'STK_EMIS_{str(ii).zfill(3)}')
stk_grps_file = self.filenames.get(f'STK_GRPS_{str(ii).zfill(3)}')
cctm_pt += f' # Time-Independent Stack Parameters for Inline Point Sources\n'
cctm_pt += f' setenv STK_GRPS_{str(ii).zfill(3)} $IN_PTpath/stack_groups/{stk_grps_file}\n'
cctm_pt += f' # Time-Dependent Emissions file\n'
cctm_pt += f' setenv STK_EMIS_{str(ii).zfill(3)} $IN_PTpath/{stk_emis_file}\n'
cctm_pt += f' # Label Each Emissions Stream\n'
cctm_pt += f' setenv STK_EMIS_LAB_{str(ii).zfill(3)} {pt_emis_labs[ii-1]}\n'
cctm_pt += f' # Allow CMAQ to Use Point Source files with dates that do not match the internal model date\n'
cctm_pt += f' setenv STK_EM_SYM_DATE_{str(ii).zfill(3)} T\n'
utils.write_to_template(run_cctm_path, cctm_pt, id='%POINT%')
# Write CCTM submission script
cctm_sub = f'#!/bin/csh\n'
cctm_sub += f'\n'
cctm_sub += f'#SBATCH -J cctm_{self.appl} # Job name\n'
# cctm_sub += f'#SBATCH -o {self.CCTM_SCRIPTS}/out.cctm_{self.appl} # Name of stdout output file\n'
cctm_sub += f'#SBATCH -o /dev/null # Name of stdout output file\n'
# cctm_sub += f'#SBATCH -e {self.CCTM_SCRIPTS}/errors.cctm_{self.appl} # Name of stderr output file\n'
cctm_sub += f'#SBATCH -e /dev/null # Name of stderr output file\n'
cctm_sub += f'#SBATCH --ntasks={n_procs} # Total number of tasks to be configured for.\n'
cctm_sub += f'#SBATCH --tasks-per-node={n_procs} # sets number of tasks to run on each node.\n'
cctm_sub += f'#SBATCH --cpus-per-task=1 # sets number of cpus needed by each task (if task is "make -j3" number should be 3).\n'
cctm_sub += f'#SBATCH --get-user-env # tells sbatch to retrieve the users login environment. \n'
cctm_sub += f'#SBATCH -t {run_hours}:00:00 # Run time (hh:mm:ss)\n'
cctm_sub += f'#SBATCH --mem={gb_mem}000M # memory required per node\n'
cctm_sub += f'#SBATCH --partition=default_cpu # Which queue it should run on.\n'
cctm_sub += f'\n'
cctm_sub += f'{self.CCTM_SCRIPTS}/run_cctm_{self.appl}.csh >&! {self.CCTM_SCRIPTS}/cctm_{self.appl}.log\n'
utils.write_to_template(submit_cctm_path, cctm_sub, id='%ALL%')
if self.verbose:
print('Done writing CCTM scripts!\n')
## RUN CCTM
if not setup_only:
# Remove logs from previous runs
os.system(self.CMD_RM % (f'{self.CCTM_SCRIPTS}/CTM_LOG*{self.appl}*'))
# Submit CCTM to Slurm
CMD_CCTM = f'sbatch --requeue {submit_cctm_path}'
os.system(CMD_CCTM)
# Give the log a few seconds to reset itself.
time.sleep(10)
# Sleep until the run_cctm_{self.appl}.log file exists
while not os.path.exists(f'{self.CCTM_SCRIPTS}/cctm_{self.appl}.log'):
time.sleep(1)
# Begin CCTM simulation clock
simstart = datetime.datetime.now()
if self.verbose:
print('Starting CCTM at: ' + str(simstart))
sys.stdout.flush()
cctm_sim = self.finish_check('cctm')
while cctm_sim != 'complete':
if cctm_sim == 'failed':
return False
else:
time.sleep(2)
cctm_sim = self.finish_check('cctm')
elapsed = datetime.datetime.now() - simstart
if self.verbose:
print(f'CCTM ran in: {utils.strfdelta(elapsed)}')
sys.stdout.flush()
return True
def run_combine(self, run_hours=2, mem_per_node=20, combine_vrsn='v532'):
"""
Setup and run the combine program. Combine is a CMAQ post-processing program that formats
the CCTM output data in a more convenient way.
Parameters
----------
:param run_hours: int
Run length, in hours, to request from the scheduler.
:param mem_per_node: int
Number of GB of memory per node to request from the scheduler.
:param combine_vrsn: string
Version number of combine for identifying executables.
"""
## Setup Combine
# Copy the template combine run script to the scripts directory
run_combine_path = f'{self.COMBINE_SCRIPTS}/run_combine.csh'
cmd = self.CMD_CP % (f'{self.DIR_TEMPLATES}/template_run_combine.csh', run_combine_path)
os.system(cmd)
# Write slurm info
combine_slrum = f'#SBATCH -J combine_{self.appl} # Job name\n'
combine_slrum += f'#SBATCH -o {self.COMBINE_SCRIPTS}/out_combine_{self.appl}.log # Name of stdout output file\n'
combine_slrum += f'#SBATCH --ntasks=1 # Total number of tasks\n'
combine_slrum += f'#SBATCH --tasks-per-node=1 # sets number of tasks to run on each node\n'
combine_slrum += f'#SBATCH --cpus-per-task=1 # sets number of cpus needed by each task\n'
combine_slrum += f'#SBATCH --get-user-env # tells sbatch to retrieve the users login environment\n'
combine_slrum += f'#SBATCH -t {run_hours}:00:00 # Run time (hh:mm:ss)\n'
combine_slrum += f'#SBATCH --mem={mem_per_node}000M # memory required per node\n'
combine_slrum += f'#SBATCH --partition=default_cpu # Which queue it should run on\n'
utils.write_to_template(run_combine_path, combine_slrum, id='%SLURM%')
# Write runtime info
combine_runtime = f'#> Choose compiler and set up CMAQ environment with correct\n'
combine_runtime += f'#> libraries using config.cmaq. Options: intel | gcc | pgi\n'
combine_runtime += f'setenv compiler {self.compiler}\n'
combine_runtime += f'setenv compilerVrsn {self.compiler_vrsn}\n'
combine_runtime += f'\n'
combine_runtime += f'#> Source the config.cmaq file to set the build environment\n'
combine_runtime += f'source {self.CMAQ_HOME}/config_cmaq.csh {self.compiler} {self.compiler_vrsn}\n'
combine_runtime += f'\n'
combine_runtime += f'#> Set General Parameters for Configuring the Simulation\n'
combine_runtime += f'set VRSN = {self.cctm_vrsn} #> Code Version\n'
combine_runtime += f'set PROC = mpi #> serial or mpi\n'
combine_runtime += f'set MECH = {self.chem_mech} #> Mechanism ID\n'
combine_runtime += f'set APPL = {self.appl} #> Application Name (e.g. Gridname)\n'
combine_runtime += f'\n'
combine_runtime += f'#> Define RUNID as any combination of parameters above or others. By default,\n'
combine_runtime += f'#> this information will be collected into this one string, $RUNID, for easy\n'
combine_runtime += f'#> referencing in output binaries and log files as well as in other scripts.\n'
combine_runtime += f'set RUNID = {self.cctm_runid}\n'
combine_runtime += f'\n'
combine_runtime += f'#> Set the build directory if this was not set above\n'
combine_runtime += f'#> (this is where the CMAQ executable is located by default).\n'
combine_runtime += f'if ( ! $?BINDIR ) then\n'
combine_runtime += f'set BINDIR = {self.COMBINE_SCRIPTS}/BLD_combine_{combine_vrsn}_{self.compiler}{self.compiler_vrsn}\n'
combine_runtime += f'endif\n'
combine_runtime += f'\n'
combine_runtime += f'#> Set the name of the executable.\n'
combine_runtime += f'set EXEC = combine_{combine_vrsn}.exe\n'
combine_runtime += f'\n'
combine_runtime += f'#> Set location of CMAQ repo. This will be used to point to the correct species definition files.\n'
combine_runtime += f'set REPO_HOME = {self.CMAQ_HOME}\n'
combine_runtime += f'\n'
combine_runtime += f'#> Set working, input and output directories\n'
combine_runtime += f'set METDIR = {self.MCIP_OUT} #> Met Output Directory\n'
combine_runtime += f'set CCTMOUTDIR = {self.CCTM_OUTDIR} #> CCTM Output Directory\n'
combine_runtime += f'set POSTDIR = {self.POST} #> Location where combine file will be written\n'
utils.write_to_template(run_combine_path, combine_runtime, id='%RUNTIME%')
combine_setup = f'#> Set Start and End Days for looping\n'
combine_setup += f'set START_DATE = "{self.start_datetime.strftime("%Y-%m-%d")}" #> beginning date\n'
combine_setup += f'set END_DATE = "{self.end_datetime.strftime("%Y-%m-%d")}" #> ending date\n'
combine_setup += f'\n'
combine_setup += f'#> Set location of species definition files for concentration and deposition species.\n'
combine_setup += f'setenv SPEC_CONC {self.COMBINE_SCRIPTS}/spec_def_files/SpecDef_{self.chem_mech}.txt\n'
combine_setup += f'setenv SPEC_DEP {self.COMBINE_SCRIPTS}/spec_def_files/SpecDef_Dep_{self.chem_mech}.txt\n'
utils.write_to_template(run_combine_path, combine_setup, id='%SETUP%')
# Submit combine to slurm
CMD_COMBINE = f'sbatch --requeue {run_combine_path}'
os.system(CMD_COMBINE)
def finish_check(self, program, custom_log=None):
"""
Check if a specified CMAQ subprogram has finished running.
Parameters
----------
:param program: string
CMAQ subprogram name whose status is to be checked.
:param custom_log: string
Path to a log file with a name that's different than those assigned
in the class methods. Defaults to None.
:return: string 'running' or 'complete' or 'failed'
Run status of the program.
"""
if program == 'mcip':
if custom_log is not None:
msg = utils.read_last(custom_log, n_lines=1)
else:
msg = utils.read_last(f'{self.MCIP_SCRIPTS}/run_mcip_{self.mcip_appl}.log', n_lines=1)
complete = 'NORMAL TERMINATION' in msg
failed = 'Error running mcip' in msg
elif program == 'icon':
if custom_log is not None:
msg = utils.read_last(custom_log, n_lines=20)
else:
msg = utils.read_last(f'{self.ICON_SCRIPTS}/run_icon_{self.appl}.log', n_lines=10)
complete = '>>----> Program ICON completed successfully <----<<' in msg
failed = '*** ERROR ABORT' in msg
elif program == 'bcon':
if custom_log is not None:
msg = utils.read_last(custom_log, n_lines=10)
else:
msg = utils.read_last(f'{self.BCON_SCRIPTS}/run_bcon_{self.appl}.log', n_lines=10)
complete = '>>----> Program BCON completed successfully <----<<' in msg
failed = '*** ERROR ABORT' in msg
elif program == 'cctm':
if custom_log is not None:
msg = utils.read_last(custom_log, n_lines=40)
else:
msg = utils.read_last(f'{self.CCTM_SCRIPTS}/cctm_{self.appl}.log', n_lines=40)
complete = '|>--- PROGRAM COMPLETED SUCCESSFULLY ---<|' in msg
failed = 'Runscript Detected an Error' in msg
else:
complete = False
failed = False
if failed:
print(f'\nCMAQPyError: {program} has failed. Last message was:\n{msg}')
return 'failed'
elif complete:
return 'complete'
else:
return 'running'
```
|
{
"source": "jeffreysward/enspp",
"score": 3
}
|
#### File: enspp/enspp/util.py
```python
from rpy2.robjects.packages import STAP
import numpy as np
import pandas as pd
import xarray as xr
def _get_r_module(path, module_name):
# Read the file with the R code
with open(path, 'r') as f:
string = f.read()
# Parse the package using STAP
module = STAP(string, module_name)
return module
def _attach_obs(wrfda, obsda, location='north', height=100):
# Get data for only north buoy at 100m and format
if 'location' in wrfda.dims:
wrfda_loc = wrfda.sel(location=location, height=height)
else:
wrfda_loc = wrfda
obsda_loc = obsda.sel(Time=slice(wrfda.Time[0], wrfda.Time[-1]), location=location, height=height)
# Put the observations and the ensemble forecast into the same DataFrame (north buoy)
data_loc = xr.concat([obsda_loc, wrfda_loc], 'model')
return data_loc
def _xr2pd(da, drop_na=True):
if drop_na:
df = da.T.to_pandas().dropna().reset_index()
else:
df = da.T.to_pandas().reset_index()
return df
def _fxda(quantiles, wrfda_loc):
try:
lon_vals = wrfda_loc.XLONG.values
except:
lon_vals = 999
try:
lat_vals = wrfda_loc.XLAT.values
except:
lat_vals = 999
# Format the forecast and obs variables into xarray DataSets.
fx = xr.DataArray(
data=[quantiles],
dims=[
"Start_time",
"Step_index",
"Percentile"
],
coords=dict(
Start_time=wrfda_loc.Time[0:1].values,
Step_index=np.arange(0, len(wrfda_loc.Time), 1),
Percentile=np.arange(1, quantiles.shape[1] + 1, 1),
XLONG=lon_vals,
XLAT=lat_vals
),
attrs=dict(
description="Wind Speed 100m",
units="m s-1",
),
)
return fx
def _fxda_grid(quantiles, wspdgrid_unstacked, wrfda):
# Use the multi-index from the unstacked version of the gridded DataFrame to create the new DataFrame
fx = pd.DataFrame(quantiles, index=wspdgrid_unstacked.index, columns=np.array([10, 50, 90]))
# Stack the percentiles into the multi-index
fx = fx.stack(dropna=False)
# Convert the pd.DataFrame to a xr.DataArray
fx = fx.to_xarray()
# Rename the dimension that pandas created when stacking the percentiles
fx = fx.rename({'level_3':'Percentile'})
# Add the latitude and longitude information back to the DataArray
fx = fx.assign_coords(XLONG=(['south_north', 'west_east'], wrfda.XLONG.values))
fx = fx.assign_coords(XLAT=(['south_north', 'west_east'], wrfda.XLAT.values))
return fx
def _get_ensdirs(date_string):
"""
Get directory names for all ensemble members.
"""
ensdir1 = f'{date_string}_28mp4lw4sw2lsm5pbl1cu/' # Lee 2017
ensdir2 = f'{date_string}_8mp1lw1sw2lsm1pbl1cu/' # Draxl 2014a
ensdir3 = f'{date_string}_8mp1lw1sw2lsm2pbl1cu/' # Draxl 2014b
ensdir4 = f'{date_string}_8mp1lw1sw2lsm2pbl3cu/' # Vernon 2018
ensdir5 = f'{date_string}_8mp4lw2sw2lsm5pbl3cu/' # Optis 2021
return [ensdir1, ensdir2, ensdir3, ensdir4, ensdir5]
# return [ensdir2, ensdir4, ensdir5]
```
#### File: enspp/examples/ngr_code_jeff.py
```python
import numpy as np
def post_process_2018(reforecast_data_all, obs_all, pp_days):
days = 25 # last 25 days for the given time (e.g., 12z last 25 days)
weights_all = np.full((11, pp_days), np.nan)
x0 = np.asarray([15.0, 0.10, 0.15, 0.23, 0.30, 0.10, 0.15, 0.23,
0.30, 0.10, 0.15, 0.23, 6.5, 0.3])
CRPS_pp_all = np.full((pp_days), np.nan)
CRPS_reg_all = np.full((pp_days), np.nan)
i_pp_all = np.full((len(obs_all)), np.nan)
i_reg_all = np.full((len(obs_all)), np.nan)
pred_mean_all = np.full((pp_days), np.nan)
me_all = np.full((pp_days), np.nan)
stan_dev_all = np.full((pp_days),np.nan)
obs_pp = np.full((pp_days),np.nan)
cdf_all_pp = np.full((pp_days), np.nan)
cdf_all_reg = np.full((pp_days), np.nan)
idx_nan = np.argwhere(np.isnan(obs_all))
obs_all = np.delete(obs_all, idx_nan)
reforecast_data_all = np.delete(reforecast_data_all, idx_nan, axis=0)
# Constraint
con = {'type': 'ineq',
'fun': lambda x: x[1:14]}
cons = [con]
if len(obs_all) < 54:
low_me_avg = np.nan
high_me_avg = np.nan
CRPS_pp_all = np.nan
CRPS_reg_all = np.nan
i_pp_all = np.nan
i_reg_all = np.nan
me_all = np.nan
else:
for w in range(0, pp_days): # this is the total number of days we are pping
#print w
start = w
end = w + days
reforecast_data = reforecast_data_all[start:end+1, :]
obs = obs_all[start:end+1] # this was +2 in the original (?)
S_squared = np.var(reforecast_data, axis=1) # variance is s^2
def crps(x):
''' Define the CRPS function '''
crps_all = 0
for j in range(days):
standard_dev = math.sqrt(math.sqrt(x[12]**2) + math.sqrt(x[13]**2) * S_squared[j])
Z = (obs[j] - (x[0] + x[1] * reforecast_data[j,0] +\
x[2] * reforecast_data[j,1] +\
x[3] * reforecast_data[j,2] +\
x[4] * reforecast_data[j,3] +\
x[5] * reforecast_data[j,4] +\
x[6] * reforecast_data[j,5] +\
x[7] * reforecast_data[j,6] +\
x[8] * reforecast_data[j,7] +\
x[9] * reforecast_data[j,8] +\
x[10] * reforecast_data[j,9] +\
x[11] * reforecast_data[j,10]))\
/ standard_dev
crps_one = standard_dev * (Z * (2 * norm.cdf(Z) - 1) + 2 * norm.pdf(Z) - \
(1 / math.sqrt(math.pi)))
crps_all = crps_all + crps_one
crps_mean = crps_all / float(days)
return crps_mean
res = minimize(crps, x0, method='SLSQP', constraints=cons)
new_x = np.around(np.asarray(res.x), decimals=3) # new coefficients
print ' new_x: ', new_x
weights_all[:,w] = new_x[1:12]
# Calculate variables for probabilistic forecasts
# post-processed (pp)
standard_dev_pp = math.sqrt(math.sqrt(new_x[12]**2) +\
math.sqrt(new_x[13]**2) * S_squared[-1]) # this was 7 (?)
#stan_dev_all[w] = standard_dev_pp
pred_mean_pp = new_x[0] + new_x[1] * reforecast_data[-1,0] +\
new_x[2] * reforecast_data[-1,1] +\
new_x[3] * reforecast_data[-1,2] +\
new_x[4] * reforecast_data[-1,3] +\
new_x[5] * reforecast_data[-1,4] +\
new_x[6] * reforecast_data[-1,5] +\
new_x[7] * reforecast_data[-1,6] +\
new_x[8] * reforecast_data[-1,7] +\
new_x[9] * reforecast_data[-1,8] +\
new_x[10] * reforecast_data[-1,9] +\
new_x[11] * reforecast_data[-1,10]
pred_mean_all[w] = pred_mean_pp
obs_pp[w] = obs[-1]
# regular (reg), i.e. no post-processing
standard_dev_reg = np.sqrt(np.var(reforecast_data[-1,:]))
mean_reg = np.mean(reforecast_data[-1,:])
print ' mean regular: ', mean_reg
print ' mean pp: ', pred_mean_pp
print ' obs: ', obs[-1]
# Calculate ME
me = pred_mean_pp - obs[-1]
me_all[w] = me
## Calculate CRPS for both scenarios
Z_pp = (obs[-1] - pred_mean_pp) / standard_dev_pp
cdf_all_pp[w] = Z_pp
CRPS_pp = standard_dev_pp * (Z_pp * (2 * norm.cdf(Z_pp) - 1) + 2 * norm.pdf(Z_pp) -
(1 / math.sqrt(math.pi)))
CRPS_pp_all[w] = CRPS_pp
Z_reg = (obs[-1] - mean_reg) / standard_dev_reg
cdf_all_reg[w] = Z_reg
CRPS_reg = standard_dev_reg * (Z_reg * (2 * norm.cdf(Z_reg) - 1) + 2 * norm.pdf(Z_reg) -
(1 / math.sqrt(math.pi)))
CRPS_reg_all[w] = CRPS_reg
## Calculate ignorance score for both scenarios
i_pp = np.log(2 * math.pi * standard_dev_pp**2) / 2 + (obs[-1] - pred_mean_pp)**2 / \
(2 * standard_dev_pp**2)
i_pp_all[w] = i_pp
i_reg = np.log(2 * math.pi * standard_dev_reg**2) / 2 + (obs[-1] - mean_reg)**2 / \
(2 * standard_dev_reg**2)
i_reg_all[w] = i_reg
# Locate 5 warmest and coldest days
obs_new = obs_all[days:days+pp_days]
highest = np.argpartition(obs_new, -5)[-5:]
lowest = np.argpartition(obs_new, 5)[:5]
high_me = me_all[highest]
low_me = me_all[lowest]
high_me_avg = np.mean(high_me)
low_me_avg = np.mean(low_me)
i_pp_all = np.nanmedian(i_pp_all)
i_reg_all = np.nanmedian(i_reg_all)
me_all = np.nanmean(me_all)
return CRPS_pp_all, CRPS_reg_all, me_all, weights_all, high_me_avg, low_me_avg, pred_mean_all, obs_pp, cdf_all_pp, cdf_all_reg
```
|
{
"source": "jeffreysward/pvlib-python",
"score": 2
}
|
#### File: pvlib-python/pvlib/wrfcast.py
```python
import datetime
from netCDF4 import num2date, Dataset
import numpy as np
import pandas as pd
import xarray as xr
from requests.exceptions import HTTPError
from xml.etree.ElementTree import ParseError
from pvlib.location import Location
from pvlib.irradiance import campbell_norman, get_extra_radiation, disc
from pvlib.irradiance import _liujordan
import wrf
from siphon.catalog import TDSCatalog
from siphon.ncss import NCSS
import warnings
# warnings.warn(
# 'The forecast module algorithms and features are highly experimental. '
# 'The API may change, the functionality may be consolidated into an io '
# 'module, or the module may be separated into its own package.')
class ForecastModel(object):
"""
An object for querying and holding forecast model information for
use within the pvlib library.
Simplifies use of siphon library on a THREDDS server.
Parameters
----------
model_type: string
UNIDATA category in which the model is located.
model_name: string
Name of the UNIDATA forecast model.
set_type: string
Model dataset type.
Attributes
----------
access_url: string
URL specifying the dataset from data will be retrieved.
base_tds_url : string
The top level server address
catalog_url : string
The url path of the catalog to parse.
data: pd.DataFrame
Data returned from the query.
data_format: string
Format of the forecast data being requested from UNIDATA.
dataset: Dataset
Object containing information used to access forecast data.
dataframe_variables: list
Model variables that are present in the data.
datasets_list: list
List of all available datasets.
fm_models: Dataset
TDSCatalog object containing all available
forecast models from UNIDATA.
fm_models_list: list
List of all available forecast models from UNIDATA.
latitude: list
A list of floats containing latitude values.
location: Location
A pvlib Location object containing geographic quantities.
longitude: list
A list of floats containing longitude values.
lbox: boolean
Indicates the use of a location bounding box.
ncss: NCSS object
NCSS
model_name: string
Name of the UNIDATA forecast model.
model: Dataset
A dictionary of Dataset object, whose keys are the name of the
dataset's name.
model_url: string
The url path of the dataset to parse.
modelvariables: list
Common variable names that correspond to queryvariables.
query: NCSS query object
NCSS object used to complete the forecast data retrival.
queryvariables: list
Variables that are used to query the THREDDS Data Server.
time: DatetimeIndex
Time range.
variables: dict
Defines the variables to obtain from the weather
model and how they should be renamed to common variable names.
units: dict
Dictionary containing the units of the standard variables
and the model specific variables.
vert_level: float or integer
Vertical altitude for query data.
"""
data_format = 'netcdf'
units = {
'temp_air': 'C',
'wind_speed': 'm/s',
'ghi': 'W/m^2',
'ghi_raw': 'W/m^2',
'dni': 'W/m^2',
'dhi': 'W/m^2',
'total_clouds': '%',
'low_clouds': '%',
'mid_clouds': '%',
'high_clouds': '%'}
def __init__(self, model_type, model_name, set_type, vert_level=None):
self.model_type = model_type
self.model_name = model_name
self.set_type = set_type
self.connected = False
self.vert_level = vert_level
def __repr__(self):
return '{}, {}'.format(self.model_name, self.set_type)
def get_data(self, latitude, longitude, start, end,
vert_level=None, query_variables=None,
close_netcdf_data=True, **kwargs):
"""
Submits a query to the UNIDATA servers using Siphon NCSS and
converts the netcdf data to a pandas DataFrame.
Parameters
----------
latitude: float
The latitude value.
longitude: float
The longitude value.
start: datetime or timestamp
The start time.
end: datetime or timestamp
The end time.
vert_level: None, float or integer, default None
Vertical altitude of interest.
query_variables: None or list, default None
If None, uses self.variables.
close_netcdf_data: bool, default True
Controls if the temporary netcdf data file should be closed.
Set to False to access the raw data.
**kwargs:
Additional keyword arguments are silently ignored.
Returns
-------
forecast_data : DataFrame
column names are the weather model's variable names.
"""
if not self.connected:
self.connect_to_catalog()
if vert_level is not None:
self.vert_level = vert_level
if query_variables is None:
self.query_variables = list(self.variables.values())
else:
self.query_variables = query_variables
self.latitude = latitude
self.longitude = longitude
self.set_query_latlon() # modifies self.query
self.set_location(start, latitude, longitude)
self.start = start
self.end = end
self.query.time_range(self.start, self.end)
if self.vert_level is not None:
self.query.vertical_level(self.vert_level)
self.query.variables(*self.query_variables)
self.query.accept(self.data_format)
self.netcdf_data = self.ncss.get_data(self.query)
# might be better to go to xarray here so that we can handle
# higher dimensional data for more advanced applications
self.data = self._netcdf2pandas(self.netcdf_data, self.query_variables,
self.start, self.end)
if close_netcdf_data:
self.netcdf_data.close()
return self.data
def get_wrf_data(self, wrfout_file, start, end,
vert_level=None, query_variables=None,
close_netcdf_data=True, **kwargs):
"""
Finds a local wrfout file and
converts the netcdf data to a xarray Dataset.
Parameters
----------
wrfout_file: str
Location of wrfout NetCDF file.
vert_level: None, float or integer, default None
Vertical altitude of interest.
query_variables: None or list, default None
If None, uses self.variables.
close_netcdf_data: bool, default True
Controls if the temporary netcdf data file should be closed.
Set to False to access the raw data.
**kwargs:
Additional keyword arguments are silently ignored.
Returns
-------
forecast_data : DataFrame
column names are the weather model's variable names.
"""
if vert_level is not None:
self.vert_level = vert_level
if query_variables is None:
self.query_variables = list(self.variables.values())
else:
self.query_variables = query_variables
self.netcdf_data = Dataset(wrfout_file)
self.data = self._wrf2xarray(self.netcdf_data, self.query_variables,
start, end)
if close_netcdf_data:
self.netcdf_data.close()
return self.data
def process_data(self, data, **kwargs):
"""
Defines the steps needed to convert raw forecast data
into processed forecast data. Most forecast models implement
their own version of this method which also call this one.
Parameters
----------
data: DataFrame
Raw forecast data
Returns
-------
data: DataFrame
Processed forecast data.
"""
data = self.rename(data)
return data
def get_processed_data(self, *args, **kwargs):
"""
Get and process forecast data.
Parameters
----------
*args: positional arguments
Passed to get_data
**kwargs: keyword arguments
Passed to get_data and process_data
Returns
-------
data: DataFrame
Processed forecast data
"""
if self.model_name == 'WRF Forecast':
return self.process_data(self.get_wrf_data(*args, **kwargs), **kwargs)
else:
return self.process_data(self.get_data(*args, **kwargs), **kwargs)
def rename(self, data, variables=None):
"""
Renames the columns according the variable mapping.
Parameters
----------
data: DataFrame
variables: None or dict, default None
If None, uses self.variables
Returns
-------
data: DataFrame
Renamed data.
"""
if variables is None:
variables = self.variables
return data.rename(columns={y: x for x, y in variables.items()})
def _wrf2xarray(self, netcdf_data, query_variables, start, end):
"""
Transforms data from netcdf to xarray Dataset.
Parameters
----------
data: netcdf
Data returned from UNIDATA NCSS query, or from your local forecast.
query_variables: list
The variables requested.
start: Timestamp
The start time
end: Timestamp
The end time
Returns
-------
xarray.Dataset
"""
first = True
for key in query_variables:
var = wrf.getvar(netcdf_data, key, timeidx=wrf.ALL_TIMES)
if first:
data = var
first = False
else:
with xr.set_options(keep_attrs=True):
try:
data = xr.merge([data, var])
except ValueError:
data = data.drop_vars('Time')
data = xr.merge([data, var])
# Get global attributes from the NetCDF Dataset
wrfattrs_names = netcdf_data.ncattrs()
wrfattrs = wrf.extract_global_attrs(netcdf_data, wrfattrs_names)
data = data.assign_attrs(wrfattrs)
# Fix a bug in how wrfout data is read in -- attributes must be strings to be written to NetCDF
for var in data.data_vars:
try:
data[var].attrs['projection'] = str(data[var].attrs['projection'])
except KeyError:
pass
# Fix another bug that creates a conflict in the 'coordinates' attribute
for var in data.data_vars:
try:
del data[var].attrs['coordinates']
except KeyError:
pass
# Slice the dataset to only include specified time interval
data = data.sel(Time=slice(start, end))
return data
def _netcdf2pandas(self, netcdf_data, query_variables, start, end):
"""
Transforms data from netcdf to pandas DataFrame.
Parameters
----------
data: netcdf
Data returned from UNIDATA NCSS query, or from your local forecast.
query_variables: list
The variables requested.
start: Timestamp
The start time
end: Timestamp
The end time
Returns
-------
pd.DataFrame
"""
# set self.time
try:
time_var = 'time'
self.set_time(netcdf_data.variables[time_var])
except KeyError:
# which model does this dumb thing?
time_var = 'time1'
self.set_time(netcdf_data.variables[time_var])
data_dict = {}
for key, data in netcdf_data.variables.items():
# if accounts for possibility of extra variable returned
if key not in query_variables:
continue
squeezed = data[:].squeeze()
if squeezed.ndim == 1:
data_dict[key] = squeezed
elif squeezed.ndim == 2:
for num, data_level in enumerate(squeezed.T):
data_dict[key + '_' + str(num)] = data_level
else:
raise ValueError('cannot parse ndim > 2')
data = pd.DataFrame(data_dict, index=self.time)
# sometimes data is returned as hours since T0
# where T0 is before start. Then the hours between
# T0 and start are added *after* end. So sort and slice
# to remove the garbage
data = data.sort_index().loc[start:end]
return data
def set_time(self, time):
'''
Converts time data into a pandas date object.
Parameters
----------
time: netcdf
Contains time information.
Returns
-------
pandas.DatetimeIndex
'''
times = num2date(time[:].squeeze(), time.units)
self.time = pd.DatetimeIndex(pd.Series(times), tz=self.location.tz)
def cloud_cover_to_ghi_linear(self, cloud_cover, ghi_clear, offset=35,
**kwargs):
"""
Convert cloud cover to GHI using a linear relationship.
0% cloud cover returns ghi_clear.
100% cloud cover returns offset*ghi_clear.
Parameters
----------
cloud_cover: numeric
Cloud cover in %.
ghi_clear: numeric
GHI under clear sky conditions.
offset: numeric, default 35
Determines the minimum GHI.
kwargs
Not used.
Returns
-------
ghi: numeric
Estimated GHI.
References
----------
Larson et. al. "Day-ahead forecasting of solar power output from
photovoltaic plants in the American Southwest" Renewable Energy
91, 11-20 (2016).
"""
offset = offset / 100.
cloud_cover = cloud_cover / 100.
ghi = (offset + (1 - offset) * (1 - cloud_cover)) * ghi_clear
return ghi
def cloud_cover_to_irradiance_clearsky_scaling(self, cloud_cover,
method='linear',
**kwargs):
"""
Estimates irradiance from cloud cover in the following steps:
1. Determine clear sky GHI using Ineichen model and
climatological turbidity.
2. Estimate cloudy sky GHI using a function of
cloud_cover e.g.
:py:meth:`~ForecastModel.cloud_cover_to_ghi_linear`
3. Estimate cloudy sky DNI using the DISC model.
4. Calculate DHI from DNI and GHI.
Parameters
----------
cloud_cover : Series
Cloud cover in %.
method : str, default 'linear'
Method for converting cloud cover to GHI.
'linear' is currently the only option.
**kwargs
Passed to the method that does the conversion
Returns
-------
irrads : DataFrame
Estimated GHI, DNI, and DHI.
"""
solpos = self.location.get_solarposition(cloud_cover.index)
cs = self.location.get_clearsky(cloud_cover.index, model='ineichen',
solar_position=solpos)
method = method.lower()
if method == 'linear':
ghi = self.cloud_cover_to_ghi_linear(cloud_cover, cs['ghi'],
**kwargs)
else:
raise ValueError('invalid method argument')
dni = disc(ghi, solpos['zenith'], cloud_cover.index)['dni']
dhi = ghi - dni * np.cos(np.radians(solpos['zenith']))
irrads = pd.DataFrame({'ghi': ghi, 'dni': dni, 'dhi': dhi}).fillna(0)
return irrads
def cloud_cover_to_transmittance_linear(self, cloud_cover, offset=0.75,
**kwargs):
"""
Convert cloud cover to atmospheric transmittance using a linear
model.
0% cloud cover returns offset.
100% cloud cover returns 0.
Parameters
----------
cloud_cover : numeric
Cloud cover in %.
offset : numeric, default 0.75
Determines the maximum transmittance.
kwargs
Not used.
Returns
-------
ghi : numeric
Estimated GHI.
"""
transmittance = ((100.0 - cloud_cover) / 100.0) * offset
return transmittance
def cloud_cover_to_irradiance_liujordan(self, cloud_cover, **kwargs):
"""
Estimates irradiance from cloud cover in the following steps:
1. Determine transmittance using a function of cloud cover e.g.
:py:meth:`~ForecastModel.cloud_cover_to_transmittance_linear`
2. Calculate GHI, DNI, DHI using the
:py:func:`pvlib.irradiance.liujordan` model
Parameters
----------
cloud_cover : Series
Returns
-------
irradiance : DataFrame
Columns include ghi, dni, dhi
"""
# in principle, get_solarposition could use the forecast
# pressure, temp, etc., but the cloud cover forecast is not
# accurate enough to justify using these minor corrections
solar_position = self.location.get_solarposition(cloud_cover.index)
dni_extra = get_extra_radiation(cloud_cover.index)
airmass = self.location.get_airmass(cloud_cover.index)
transmittance = self.cloud_cover_to_transmittance_linear(cloud_cover,
**kwargs)
irrads = liujordan(solar_position['apparent_zenith'],
transmittance, airmass['airmass_absolute'],
dni_extra=dni_extra)
irrads = irrads.fillna(0)
return irrads
def cloud_cover_to_irradiance(self, cloud_cover, how='clearsky_scaling',
**kwargs):
"""
Convert cloud cover to irradiance. A wrapper method.
Parameters
----------
cloud_cover : Series
how : str, default 'clearsky_scaling'
Selects the method for conversion. Can be one of
clearsky_scaling or liujordan.
**kwargs
Passed to the selected method.
Returns
-------
irradiance : DataFrame
Columns include ghi, dni, dhi
"""
how = how.lower()
if how == 'clearsky_scaling':
irrads = self.cloud_cover_to_irradiance_clearsky_scaling(
cloud_cover, **kwargs)
elif how == 'liujordan':
irrads = self.cloud_cover_to_irradiance_liujordan(
cloud_cover, **kwargs)
else:
raise ValueError('invalid how argument')
return irrads
def dni_and_dhi_to_ghi(self, dni, dhi, cos_zenith, **kwargs):
"""
Calculates global horizontal irradiance.
Parameters
----------
dni : Series
Direct normal irradiance in W m-2.
dhi : Series
Diffuse normal irradiance in W m-2.
cos_zenith : Series
Cosine of the solar zenith angle (dimensionless).
**kwargs
Not used
Returns
-------
ghi : Series (but maybe should be DataFrame)
Global horizontal irradiance in W m-2.
"""
ghi = dhi + dni * cos_zenith
return ghi
def kelvin_to_celsius(self, temperature):
"""
Converts Kelvin to celsius.
Parameters
----------
temperature: numeric
Returns
-------
temperature: numeric
"""
return temperature - 273.15
def uv_to_speed(self, data):
"""
Computes wind speed from wind components.
Parameters
----------
data : DataFrame
Must contain the columns 'wind_speed_u' and 'wind_speed_v'.
Returns
-------
wind_speed : Series
"""
wind_speed = np.sqrt(data['wind_speed_u']**2 + data['wind_speed_v']**2)
return wind_speed
def gust_to_speed(self, data, scaling=1/1.4):
"""
Computes standard wind speed from gust.
Very approximate and location dependent.
Parameters
----------
data : DataFrame
Must contain the column 'wind_speed_gust'.
Returns
-------
wind_speed : Series
"""
wind_speed = data['wind_speed_gust'] * scaling
return wind_speed
class WRF(ForecastModel):
"""
Subclass of the ForecastModel class representing your own
WRF forecast model.
Parameters
----------
set_type: string, default 'best'
Not used
Attributes
----------
dataframe_variables: list
Common variables present in the final set of data.
model: string
Name of the UNIDATA forecast model.
model_type: string
UNIDATA category in which the model is located.
variables: dict
Defines the variables to obtain from the weather
model and how they should be renamed to common variable names.
units: dict
Dictionary containing the units of the standard variables
and the model specific variables.
"""
def __init__(self, set_type='best'):
model_type = 'Forecast Model Data'
model_name = 'WRF Forecast'
self.variables = {
'temp_air': 'T2', # TEMP at 2 M
'wind_speed_u': 'U10', # U at 10 M
'wind_speed_v': 'V10', # V at 10 M
'total_clouds': 'CLDFRA', # CLOUD FRACTION
'cos_zenith': 'COSZEN', # Cos of solar zenith angle
'dni': 'SWDDNI', # Shortwave surface downward direct normal irradiance
'dhi': 'SWDDIF', # Shortwave surface downward diffuse irradiance
}
self.output_variables = [
'temp_air',
'wind_speed',
'ghi',
'dni',
'dhi'
]
super().__init__(model_type, model_name, set_type, vert_level=None)
def get_data(self, wrfout_dir, wrfout_file, start=None, end=None,
query_variables=None, **kwargs):
if query_variables is None:
self.query_variables = list(self.variables.values())
else:
self.query_variables = query_variables
# Absolute path to wrfout data file
datapath = wrfout_dir + wrfout_file
# Read in the wrfout file using the netCDF4.Dataset method
# (You could probably also do this with an xarray method)
netcdf_data = Dataset(datapath)
# Create an xarray.Dataset from the wrf qurery_variables.
data = self._wrf2xarray(netcdf_data, self.query_variables)
# Slice the wrfout data if start and end times ares specified
if start and end is not None:
self.start = pd.Timestamp(start)
self.end = pd.Timestamp(end)
data = data.sel(Time=slice(self.start, self.end))
return data
def process_data(self, data, **kwargs):
"""
Defines the steps needed to convert raw forecast data
into processed forecast data.
Parameters
----------
data: xarray.Dataset
Raw forecast data
Returns
-------
data: DataFrame
Processed forecast data.
"""
# Rename the variables
# (we have to invert them first to maintain the input format)
data = xr.Dataset.rename(data, {y: x for x, y in self.variables.items()})
# Calculate other quantities
data['temp_air'] = self.kelvin_to_celsius(data['temp_air'])
data['wind_speed'] = self.uv_to_speed(data)
data['ghi'] = self.dni_and_dhi_to_ghi(data['dni'], data['dhi'], data['cos_zenith'])
# Drop unnecessary coordinate
data = xr.Dataset.reset_coords(data, ['XTIME'], drop=True)
return data[self.output_variables]
def get_wspd_wdir(self, netcdf_data, key):
"""
Formats the wind speed and wind direction so it can be merged into
an xarray Dataset with all the other variables extracted using getvar
:param netcdf_data:
:param key:
:return:
"""
var = wrf.getvar(netcdf_data, key, wrf.ALL_TIMES)
var = xr.DataArray.reset_coords(var, ['wspd_wdir'], drop=True)
var.name = key
return var
def _wrf2xarray(self, netcdf_data, query_variables):
"""
Gets data from the netcdf wrfout file and uses wrf-python
to create an xarray Dataset.
Parameters
----------
netcdf_data: netcdf
Data returned from your WRF forecast.
query_variables: list
The variables requested.
Returns
-------
xarray.Dataset
"""
first = True
for key in query_variables:
if key in ['wspd', 'wdir']:
var = self.get_wspd_wdir(netcdf_data, key)
else:
var = wrf.getvar(netcdf_data, key, timeidx=wrf.ALL_TIMES)
if first:
data = var
first = False
else:
with xr.set_options(keep_attrs=True):
try:
data = xr.merge([data, var])
except ValueError:
data = data.drop_vars('Time')
data = xr.merge([data, var])
# Get global attributes from the NetCDF Dataset
wrfattrs_names = netcdf_data.ncattrs()
wrfattrs = wrf.extract_global_attrs(netcdf_data, wrfattrs_names)
data = data.assign_attrs(wrfattrs)
# Fix a bug in how wrfout data is read in -- attributes must be strings to be written to NetCDF
for var in data.data_vars:
try:
data[var].attrs['projection'] = str(data[var].attrs['projection'])
except KeyError:
pass
# Fix another bug that creates a conflict in the 'coordinates' attribute
for var in data.data_vars:
try:
del data[var].attrs['coordinates']
except KeyError:
pass
return data
```
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.