metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "jimkon/adaptive-discretization",
"score": 3
} |
#### File: adaptive-discretization/adiscr/ntree.py
```python
import numpy as np
import math
import types
from adiscr.node import *
from adiscr.tree_vis import *
def actions_per_level(level, dims):
return 2**(level*dims)
def compute_level(n, dims):
actions = [1]
level = 0
while actions[level] < n:
level += 1
actions.append(actions[level-1]+actions_per_level(level, dims))
if actions[level] == n:
return level
else:
return level-1
class Tree:
def __init__(self, dims, size, error_function='direct'):
"""
dims: integer > 0
size: integer > 0
error_function: ['direct', 'sqrt', 'square', 'cubic'] or a lambda function
"""
assert dims > 0, "dims is < 1"
assert size > 0, "size is < 1"
assert (error_function in ['direct', 'sqrt', 'square', 'cubic']) or isinstance(
error_function, types.FunctionType), "error_function has to be one of ['direct', 'sqrt', 'square', 'cubic'] or a function"
self._dimensions = int(dims)
self._size = int(size)
Node._init_branch_matrix(self._dimensions)
if error_function is 'direct':
self._error_function = lambda d: d
elif error_function is 'sqrt':
self._error_function = lambda d: d**0.5
elif error_function is 'square':
self._error_function = lambda d: d**2
elif error_function is 'cubic':
self._error_function = lambda d: d**3
else:
self._error_function = error_function
self._branch_factor = self._dimensions * 2
root = Node(None, None, self._error_function, dims)
self._nodes = [root]
self._root = root
init_level = compute_level(size, self._dimensions)
for i in range(init_level):
self.add_layer()
self._total_distance = 0
self._total_distance_count = 0
def search_nearest_node(self, point, increase=True):
point = self.correct_point(point.flatten())
node, dist = self._root.search(point, increase)
self._total_distance += dist
self._total_distance_count += 1
return node
def update(self):
_points_before = np.array(self.get_points())
to_prune = self._prune_prospectives()
pruned = 0
for node in to_prune:
node.delete()
pruned += 1
excess = self.get_current_size() - self.get_size()
expanded = self._expand_usefull_nodes(pruned - excess)
self._refresh_nodes()
self._reset_values()
_points_after = np.array(self.get_points())
if pruned == expanded:
for i in range(len(_points_after)):
if np.linalg.norm(_points_before[i] - _points_after[i]) > 0:
return True
return False
else:
return True
def feed(self, samples):
for sample in samples:
self.search_nearest_node(sample)
def feed_and_update(self, samples):
self.feed(samples)
return self.update()
def adapt_to_samples(self, samples, max_iterations=10):
print("Adaption begun,", len(samples), "samples, max iterations", max_iterations)
count = 0
flag = True
while flag and count < max_iterations:
flag = self.feed_and_update(samples)
print("Iteration", count, ", adapted:", not flag)
count += 1
return not flag
def _prune_prospectives(self):
nodes = self.get_prunable_nodes()
mean_value = self.get_mean_value()
result = []
for node in nodes:
estimated_future_value = node.get_value() + node.get_value_increase_if_cut()
if(estimated_future_value < mean_value):
result.append(node)
return result
def _expand_usefull_nodes(self, n):
nodes = sorted(self.get_nodes(recalculate=True), key=lambda node: node.get_value())
suggestions = list(node.suggest_for_expand() for node in nodes)
count_expantions = 0
i = len(nodes)
while count_expantions < n and i >= 0:
i -= 1
if(nodes[i].get_value() == 0):
continue
to_expand = suggestions[i]
new_nodes = []
for suggestion in to_expand:
max_new_nodes = n - count_expantions - len(new_nodes)
new_nodes.extend(suggestion.expand(
nodes[i].get_location(), new_nodes_limit=max_new_nodes))
self._nodes.extend(new_nodes)
count_expantions += len(new_nodes)
return count_expantions
def get_node(self, index):
node = self.get_nodes()[index]
return node
def recursive_traversal(self, func=(lambda node: node),
traverse_cond_func=(lambda node: True),
collect_cond_func=(lambda node: True)):
res = []
self._root.recursive_collection(res, func, traverse_cond_func, collect_cond_func)
return res
def get_values(self):
return self.recursive_traversal(func=(lambda node: node.get_value()))
def get_total_value(self):
return np.sum(self.get_values())
def get_prunable_nodes(self):
return self.recursive_traversal(collect_cond_func=(lambda node: node.is_leaf()))
def get_expendable_nodes(self):
return self.recursive_traversal(collect_cond_func=(lambda node: node.is_expandable()))
def get_mean_error(self):
if self._total_distance_count == 0:
return 0
return np.sum(self.get_values()) / self._total_distance_count
def get_mean_value(self):
return np.sum(self.get_values()) / self.get_current_size()
def _get_max_mean_distance(self):
return 1 / (4 * self.get_current_size())
def _reset_values(self):
self.recursive_traversal(func=lambda node: node.reset_value())
self._total_distance = 0
self._total_distance_count = 0
def add_layer(self):
to_expand = self.get_expendable_nodes()
for node in to_expand:
new_nodes = node.expand()
self._nodes.extend(new_nodes)
def get_nodes(self, recalculate=False):
if recalculate:
self._nodes = self.recursive_traversal()
return self._nodes
def _refresh_nodes(self):
self._nodes = self.recursive_traversal()
def get_points(self):
return np.array(list(node.get_location() for node in self.get_nodes()))
def get_current_size(self):
return len(self._nodes)
def get_size(self):
return self._size
def print_all_nodes(self):
nodes = self._nodes
print('tree contains', len(nodes), 'nodes, min level=', self._min_level)
for node in nodes:
print(node)
def plot(self, red_levels=False, save=False, filename='', plot_density_flag=False):
plot(self, save=save, red_levels=red_levels,
path=filename, plot_density_flag=plot_density_flag)
@staticmethod
def correct_point(point):
new_point = []
for c in point:
if c > 1:
new_point.append(1)
elif c < 0:
new_point.append(0)
else:
new_point.append(c)
return new_point
``` |
{
"source": "jimkon/Deep-Reinforcement-Learning-in-Large-Discrete-Action-Spaces",
"score": 3
} |
#### File: Deep-Reinforcement-Learning-in-Large-Discrete-Action-Spaces/src/action_space.py
```python
import numpy as np
import itertools
import pyflann
import matplotlib.pyplot as plt
from util.data_process import plot_3d_points
"""
This class represents a n-dimensional unit cube with a specific number of points embeded.
Points are distributed uniformly in the initialization. A search can be made using the
search_point function that returns the k (given) nearest neighbors of the input point.
"""
class Space:
def __init__(self, low, high, points):
self._low = np.array(low)
self._high = np.array(high)
self._range = self._high - self._low
self._dimensions = len(low)
self.__space = init_uniform_space([0] * self._dimensions,
[1] * self._dimensions,
points)
self._flann = pyflann.FLANN()
self.rebuild_flann()
def rebuild_flann(self):
self._index = self._flann.build_index(self.__space, algorithm='kdtree')
def search_point(self, point, k):
p_in = self.import_point(point)
search_res, _ = self._flann.nn_index(p_in, k)
knns = self.__space[search_res]
p_out = []
for p in knns:
p_out.append(self.export_point(p))
if k == 1:
p_out = [p_out]
return np.array(p_out)
def import_point(self, point):
return (point - self._low) / self._range
def export_point(self, point):
return self._low + point * self._range
def get_space(self):
return self.__space
def shape(self):
return self.__space.shape
def get_number_of_actions(self):
return self.shape()[0]
def plot_space(self, additional_points=None):
dims = self._dimensions
if dims > 3:
print(
'Cannot plot a {}-dimensional space. Max 3 dimensions'.format(dims))
return
space = self.get_space()
if additional_points is not None:
for i in additional_points:
space = np.append(space, additional_points, axis=0)
if dims == 1:
for x in space:
plt.plot([x], [0], 'o')
plt.show()
elif dims == 2:
for x, y in space:
plt.plot([x], [y], 'o')
plt.show()
else:
plot_3d_points(space)
class Discrete_space(Space):
"""
Discrete action space with n actions (the integers in the range [0, n))
0, 1, 2, ..., n-2, n-1
"""
def __init__(self, n): # n: the number of the discrete actions
super().__init__([0], [n - 1], n)
def export_point(self, point):
return super().export_point(point).astype(int)
def init_uniform_space(low, high, points):
dims = len(low)
points_in_each_axis = round(points**(1 / dims))
axis = []
for i in range(dims):
axis.append(list(np.linspace(low[i], high[i], points_in_each_axis)))
space = []
for _ in itertools.product(*axis):
space.append(list(_))
return np.array(space)
```
#### File: src/ddpg/actor_net_bn.py
```python
import tensorflow as tf
import math
import batch_norm
import numpy as np
LEARNING_RATE = 0.0001
TAU = 0.001
BATCH_SIZE = 64
N_HIDDEN_1 = 400
N_HIDDEN_2 = 300
class ActorNet_bn:
""" Actor Network Model with Batch Normalization of DDPG Algorithm """
def __init__(self, num_states, num_actions):
tf.reset_default_graph()
self.g = tf.Graph()
with self.g.as_default():
self.sess = tf.InteractiveSession()
# actor network model parameters:
self.actor_state_in = tf.placeholder("float", [None, num_states])
self.W1_a = tf.Variable(tf.random_uniform(
[num_states, N_HIDDEN_1], -1 / math.sqrt(num_states), 1 / math.sqrt(num_states)))
self.B1_a = tf.Variable(tf.random_uniform(
[N_HIDDEN_1], -1 / math.sqrt(num_states), 1 / math.sqrt(num_states)))
self.W2_a = tf.Variable(tf.random_uniform(
[N_HIDDEN_1, N_HIDDEN_2], -1 / math.sqrt(N_HIDDEN_1), 1 / math.sqrt(N_HIDDEN_1)))
self.B2_a = tf.Variable(tf.random_uniform(
[N_HIDDEN_2], -1 / math.sqrt(N_HIDDEN_1), 1 / math.sqrt(N_HIDDEN_1)))
self.W3_a = tf.Variable(tf.random_uniform([N_HIDDEN_2, num_actions], -0.003, 0.003))
self.B3_a = tf.Variable(tf.random_uniform([num_actions], -0.003, 0.003))
self.is_training = tf.placeholder(tf.bool, [])
self.H1_t = tf.matmul(self.actor_state_in, self.W1_a)
self.H1_a_bn = batch_norm(self.H1_t, N_HIDDEN_1, self.is_training, self.sess)
self.H1_a = tf.nn.softplus(self.H1_a_bn.bnorm) + self.B1_a
self.H2_t = tf.matmul(self.H1_a, self.W2_a)
self.H2_a_bn = batch_norm(self.H2_t, N_HIDDEN_2, self.is_training, self.sess)
self.H2_a = tf.nn.tanh(self.H2_a_bn.bnorm) + self.B2_a
self.actor_model = tf.matmul(self.H2_a, self.W3_a) + self.B3_a
# target actor network model parameters:
self.t_actor_state_in = tf.placeholder("float", [None, num_states])
self.t_W1_a = tf.Variable(tf.random_uniform(
[num_states, N_HIDDEN_1], -1 / math.sqrt(num_states), 1 / math.sqrt(num_states)))
self.t_B1_a = tf.Variable(tf.random_uniform(
[N_HIDDEN_1], -1 / math.sqrt(num_states), 1 / math.sqrt(num_states)))
self.t_W2_a = tf.Variable(tf.random_uniform(
[N_HIDDEN_1, N_HIDDEN_2], -1 / math.sqrt(N_HIDDEN_1), 1 / math.sqrt(N_HIDDEN_1)))
self.t_B2_a = tf.Variable(tf.random_uniform(
[N_HIDDEN_2], -1 / math.sqrt(N_HIDDEN_1), 1 / math.sqrt(N_HIDDEN_1)))
self.t_W3_a = tf.Variable(tf.random_uniform([N_HIDDEN_2, num_actions], -0.003, 0.003))
self.t_B3_a = tf.Variable(tf.random_uniform([num_actions], -0.003, 0.003))
self.t_is_training = tf.placeholder(tf.bool, [])
self.t_H1_t = tf.matmul(self.t_actor_state_in, self.t_W1_a)
self.t_H1_a_bn = batch_norm(self.t_H1_t, N_HIDDEN_1,
self.t_is_training, self.sess, self.H1_a_bn)
self.t_H1_a = tf.nn.softplus(self.t_H1_a_bn.bnorm) + self.t_B1_a
self.t_H2_t = tf.matmul(self.t_H1_a, self.t_W2_a)
self.t_H2_a_bn = batch_norm(self.t_H2_t, N_HIDDEN_2,
self.t_is_training, self.sess, self.H2_a_bn)
self.t_H2_a = tf.nn.tanh(self.t_H2_a_bn.bnorm) + self.t_B2_a
self.t_actor_model = tf.matmul(self.t_H2_a, self.t_W3_a) + self.t_B3_a
# cost of actor network:
# gets input from action_gradient computed in critic network file
self.q_gradient_input = tf.placeholder("float", [None, num_actions])
self.actor_parameters = [self.W1_a, self.B1_a, self.W2_a, self.B2_a, self.W3_a,
self.B3_a, self.H1_a_bn.scale, self.H1_a_bn.beta, self.H2_a_bn.scale, self.H2_a_bn.beta]
# /BATCH_SIZE) changed -self.q_gradient to -
self.parameters_gradients = tf.gradients(
self.actor_model, self.actor_parameters, -self.q_gradient_input)
self.optimizer = tf.train.AdamOptimizer(
learning_rate=LEARNING_RATE, epsilon=1e-08).apply_gradients(zip(self.parameters_gradients, self.actor_parameters))
# initialize all tensor variable parameters:
self.sess.run(tf.initialize_all_variables())
# To make sure actor and target have same intial parmameters copy the parameters:
# copy target parameters
self.sess.run([
self.t_W1_a.assign(self.W1_a),
self.t_B1_a.assign(self.B1_a),
self.t_W2_a.assign(self.W2_a),
self.t_B2_a.assign(self.B2_a),
self.t_W3_a.assign(self.W3_a),
self.t_B3_a.assign(self.B3_a)])
def evaluate_actor(self, state_t):
return self.sess.run(self.actor_model, feed_dict={self.actor_state_in: state_t, self.is_training: False})
def evaluate_target_actor(self, state_t_1):
return self.sess.run(self.t_actor_model, feed_dict={self.t_actor_state_in: state_t_1, self.t_is_training: False})
def train_actor(self, actor_state_in, q_gradient_input):
self.sess.run([self.optimizer, self.H1_a_bn.train_mean, self.H1_a_bn.train_var, self.H2_a_bn.train_mean, self.H2_a_bn.train_var, self.t_H1_a_bn.train_mean, self.t_H1_a_bn.train_var, self.t_H2_a_bn.train_mean,
self.t_H2_a_bn.train_var], feed_dict={self.actor_state_in: actor_state_in, self.t_actor_state_in: actor_state_in, self.q_gradient_input: q_gradient_input, self.is_training: True, self.t_is_training: True})
def update_target_actor(self):
self.sess.run([
self.t_W1_a.assign(TAU * self.W1_a + (1 - TAU) * self.t_W1_a),
self.t_B1_a.assign(TAU * self.B1_a + (1 - TAU) * self.t_B1_a),
self.t_W2_a.assign(TAU * self.W2_a + (1 - TAU) * self.t_W2_a),
self.t_B2_a.assign(TAU * self.B2_a + (1 - TAU) * self.t_B2_a),
self.t_W3_a.assign(TAU * self.W3_a + (1 - TAU) * self.t_W3_a),
self.t_B3_a.assign(TAU * self.B3_a + (1 - TAU) * self.t_B3_a),
self.t_H1_a_bn.updateTarget,
self.t_H2_a_bn.updateTarget,
])
```
#### File: src/ddpg/batch_norm.py
```python
import tensorflow as tf
decay = 0.95
TAU = 0.001
class Batch_norm:
def __init__(self, inputs, size, is_training, sess, parForTarget=None, bn_param=None):
self.sess = sess
self.scale = tf.Variable(tf.random_uniform([size], 0.9, 1.1))
self.beta = tf.Variable(tf.random_uniform([size], -0.03, 0.03))
self.pop_mean = tf.Variable(tf.random_uniform([size], -0.03, 0.03), trainable=False)
self.pop_var = tf.Variable(tf.random_uniform([size], 0.9, 1.1), trainable=False)
self.batch_mean, self.batch_var = tf.nn.moments(inputs, [0])
self.train_mean = tf.assign(self.pop_mean, self.pop_mean *
decay + self.batch_mean * (1 - decay))
self.train_var = tf.assign(self.pop_var, self.pop_var *
decay + self.batch_var * (1 - decay))
def training():
return tf.nn.batch_normalization(inputs,
self.batch_mean, self.batch_var, self.beta, self.scale, 0.0000001)
def testing():
return tf.nn.batch_normalization(inputs,
self.pop_mean, self.pop_var, self.beta, self.scale, 0.0000001)
if parForTarget != None:
self.parForTarget = parForTarget
self.updateScale = self.scale.assign(
self.scale * (1 - TAU) + self.parForTarget.scale * TAU)
self.updateBeta = self.beta.assign(self.beta * (1 - TAU) + self.parForTarget.beta * TAU)
self.updateTarget = tf.group(self.updateScale, self.updateBeta)
self.bnorm = tf.cond(is_training, training, testing)
```
#### File: src/util/data_update.py
```python
import data
import numpy as np
import sys
sys.path.insert(
0, "/home/jim/Desktop/dip/Deep-Reinforcement-Learning-in-Large-Discrete-Action-Spaces/src/")
import action_space
def get_all_pkl_files(directory):
from os import listdir
from os.path import isfile, join, dirname, realpath, splitext
mypath = directory
# mypath = DIRECTORY
onlyfiles = [f for f in listdir(mypath) if isfile(join(mypath, f))]
files = []
for f in onlyfiles:
if splitext(f)[1] in '.zip':
files.append(f)
return files
def update_pickle_file(file_name, eps=0, k=0, v=0):
d_old = data_old.Data(file_name)
d_old.load()
print(file_name, 'loaded')
# d_old.print_fields()
d_new = data.Data()
d_new.set_agent('Wolp',
int(d_old.get_data('max_actions')[0]),
k,
v)
d_new.set_experiment(d_old.get_data('experiment')[0],
[-3],
[3],
eps)
space = action_space.Space([-3], [3], int(d_old.get_data('max_actions')[0]))
# print(space.get_space())
# d_new.print_data()
done = d_old.get_data('done')
actors_result = d_old.get_data('actors_result')
actions = d_old.get_data('actions')
state_0 = d_old.get_data('state_0').tolist()
state_1 = d_old.get_data('state_1').tolist()
state_2 = d_old.get_data('state_2').tolist()
state_3 = d_old.get_data('state_3').tolist()
rewards = d_old.get_data('rewards').tolist()
ep = 0
temp = 0
l = len(done)
for i in range(l):
d_new.set_action(space.import_point(actions[i]).tolist())
d_new.set_actors_action(space.import_point(actors_result[i]).tolist())
d_new.set_ndn_action(space.import_point(
space.search_point(actors_result[i], 1)[0]).tolist())
state = [state_0[i], state_1[i], state_2[i], state_3[i]]
d_new.set_state(state)
d_new.set_reward(1)
if done[i] > 0:
# print(ep, i - temp, 'progress', i / l)
temp = i
ep += 1
# if ep % 200 == 199:
# d_new.finish_and_store_episode()
# else:
d_new.end_of_episode()
d_new.save()
if __name__ == "__main__":
folder = "results/obj/"
files = get_all_pkl_files(folder)
f = 'data_10000_Wolp3_Inv100k10#0.json.zip'
d = data.load(folder + f)
for episode in d.data['simulation']['episodes']:
episode['rewards'] = 0
episode['actions'] = 0
episode['ndn_actions'] = 0
episode['actors_actions'] = 0
# episode['states'] = 0
d.save('saved/')
``` |
{
"source": "jimkon/rl",
"score": 3
} |
#### File: rl/tests/test_utils.py
```python
import unittest
from rl_lib.utils.utils import *
class TestMapper(unittest.TestCase):
def setUp(self):
self.mapper = Mapper()
def test_map(self):
self.assertTrue((self.mapper.map([-2, -3, 5]) == np.array([-2, -3, 5])).all(), self.mapper.map([-2, -3, 5]))
self.assertTrue((self.mapper.map([6, 1, 11]) == np.array([6, 1, 11])).all(), self.mapper.map([6, 1, 11]))
self.assertTrue((self.mapper.map([2, -1, 8]) == np.array([2, -1, 8])).all(), self.mapper.map([2, -1, 8]))
self.assertTrue((self.mapper.map([-1, 0, 5.6]) == np.array([-1, 0, 5.6])).all(), self.mapper.map([-1, 0, 5.6]))
class TestStandardMapper(unittest.TestCase):
def setUp(self):
self.mapper = StandardMapper([-2, -3, 5], [6, 1, 11])
def test_map(self):
self.assertTrue((self.mapper.map([-2, -3, 5]) == np.zeros(3)).all(), self.mapper.map([-2, -3, 5]))
self.assertTrue((self.mapper.map([6, 1, 11]) == np.ones(3)).all(), self.mapper.map([6, 1, 11]))
self.assertTrue((self.mapper.map([2, -1, 8]) == np.ones(3)*.5).all(), self.mapper.map([2, -1, 8]))
self.assertTrue(((self.mapper.map([-1, 0, 5.6]) - [.125, .75, .1])<1e-10).all(), self.mapper.map([-1, 0, 5.6]))
class TestUnitMapper(unittest.TestCase):
def setUp(self):
self.mapper = UnitMapper([-2, -3, 5], [6, 1, 11])
def test_map(self):
self.assertTrue((self.mapper.map([-2, -3, 5]) == -np.ones(3)).all(), self.mapper.map([-2, -3, 5]))
self.assertTrue((self.mapper.map([6, 1, 11]) == np.ones(3)).all(), self.mapper.map([6, 1, 11]))
self.assertTrue((self.mapper.map([2, -1, 8]) == np.zeros(3)).all(), self.mapper.map([2, -1, 8]))
self.assertTrue(((self.mapper.map([-1, 0, 5.6]) - [-.75, .5, -.8])<1e-10).all(), self.mapper.map([-1, 0, 5.6]))
if __name__ == '__main__':
unittest.main(verbosity=2)
``` |
{
"source": "jimkutter/rpi_lcars",
"score": 2
} |
#### File: app/screens/authorize.py
```python
from datetime import datetime, timedelta
import pygame
from pygame.mixer import Sound
from screens.base_screen import BaseScreen
from ui import colours
from ui.widgets.background import LcarsBackgroundImage
from ui.widgets.gifimage import LcarsGifImage
from ui.widgets.lcars_widgets import LcarsButton
from ui.widgets.lcars_widgets import LcarsText
class CodeButton(LcarsButton):
def __init__(self, colour, pos, text, handler=None, rectSize=None):
super().__init__(colour, pos, text, handler, rectSize)
self.code = None
class ScreenAuthorize(BaseScreen):
def __init__(self, app):
super().__init__(app, None, None)
self.login_timeout = None
self.reset_timer()
def setup(self, all_sprites):
all_sprites.add(LcarsBackgroundImage("assets/lcars_screen_2.png"), layer=0)
all_sprites.add(LcarsGifImage("assets/gadgets/stlogorotating.gif", (103, 369), 50), layer=0)
all_sprites.add(LcarsText(colours.ORANGE, (270, -1), "AUTHORIZATION REQUIRED", 2), layer=0)
all_sprites.add(LcarsText(colours.BLUE, (330, -1), "ONLY AUTHORIZED PERSONNEL MAY ACCESS THIS TERMINAL", 1.5),
layer=1)
all_sprites.add(LcarsText(colours.BLUE, (360, -1), "TOUCH TERMINAL TO PROCEED", 1.5), layer=1)
greek_alphabet = [
"alpha",
"beta",
"gamma",
"delta",
"epsilon",
"zeta",
"eta",
"theta",
"iota",
"kappa",
"lambda",
"mu",
"nu",
"xi",
"omicron",
"pi",
"rho",
"sigma",
"tau",
"upsilon",
"phi",
"chi",
"psi",
"omega",
]
x_orig = 127
y_orig = 75
padding = 20
width = 122
height = 44
row = 0
col = 0
for letter in greek_alphabet:
x = x_orig + (col * (width + padding / 2))
y = y_orig + (row * (height + padding / 2))
button = CodeButton(colours.GREY_BLUE, (y, x), letter.upper(), self.button_handler)
button.code = letter
col = col + 1
if col > 3:
row = row + 1
col = 0
all_sprites.add(button, layer=2)
self.layer1 = all_sprites.get_sprites_from_layer(1)
self.layer2 = all_sprites.get_sprites_from_layer(2)
# sounds
if not self.app.is_screen_off:
Sound("assets/audio/panel/215.wav").play()
self.sound_granted = Sound("assets/audio/accessing.wav")
self.sound_beep1 = Sound("assets/audio/panel/201.wav")
self.sound_denied = Sound("assets/audio/access_denied.wav")
self.sound_deny1 = Sound("assets/audio/deny_1.wav")
self.sound_deny2 = Sound("assets/audio/deny_2.wav")
############
# SET PIN CODE WITH THIS VARIABLE
############
self.pin = self.app.config['pin']
############
self.reset()
def reset(self):
# Variables for PIN code verification
self.correct = 0
self.pin_i = 0
self.granted = False
for sprite in self.layer1: sprite.visible = True
for sprite in self.layer2: sprite.visible = False
def screen_update(self):
super().screen_update()
if self.login_timeout:
auth_delta = self.login_timeout - datetime.now()
if int(auth_delta.total_seconds()) == 0:
self.reset()
def handleEvents(self, event, fpsClock):
if event.type == pygame.MOUSEBUTTONDOWN:
# Play sound
self.sound_beep1.play()
self.app.screen_on()
if event.type == pygame.MOUSEBUTTONUP:
if not self.layer2[0].visible:
self.show_login_controls()
elif self.pin_i == len(self.pin):
# Ran out of button presses
if self.correct == len(self.pin):
self.sound_granted.play()
from screens.main import ScreenMain
self.loadScreen(ScreenMain(self.app))
else:
self.sound_deny2.play()
self.sound_denied.play()
self.reset()
return False
def show_login_controls(self):
for sprite in self.layer1: sprite.visible = False
for sprite in self.layer2: sprite.visible = True
Sound("assets/audio/enter_authorization_code.wav").play()
self.reset_timer()
def button_handler(self, item, event, clock):
self.reset_timer()
if self.pin[self.pin_i] == item.code:
self.correct += 1
print(self.correct)
self.pin_i += 1
def reset_timer(self):
self.login_timeout = datetime.now() + timedelta(seconds=self.app.config['login_timeout'])
```
#### File: app/screens/register_hue.py
```python
from screens.base_screen import BaseScreen
from ui.widgets.lcars_widgets import *
class ScreenRegister(BaseScreen):
def __init__(self, app):
super().__init__(app, "assets/lcars_screen_1.png", "REGISTER HUE")
def setup(self, all_sprites):
super().setup(all_sprites)
# info text
all_sprites.add(LcarsText(colours.WHITE, (192, 174), "Push the connect button on your hue bridge.", 1.5),
layer=3)
all_sprites.add(LcarsButton(colours.ORANGE, (240, 150), "ABORT", self.cancel_handler),
layer=4)
all_sprites.add(LcarsButton(colours.BLUE, (240, 450), "CONFIRM", self.ok_handler),
layer=4)
def cancel_handler(self, item, event, clock):
from screens.main import ScreenMain
self.loadScreen(ScreenMain(self.app))
def ok_handler(self, item, event, clock):
from screens.lighting import ScreenLightGroups
self.loadScreen(ScreenLightGroups(self.app))
``` |
{
"source": "jimlaloi/eComma",
"score": 3
} |
#### File: jimlaloi/eComma/ecomma_Prep.py
```python
from xml.dom import minidom, getDOMImplementation
from xml.dom.minidom import parse, parseString
from faker import Faker
from collections import defaultdict
import re
import json
# Put the course number here before saving and running the script
coursenumber = '36360'
# Open the files and parse them with minidom
ch2data = open('ecomma_comments_export_%s_Ch2.xml' % coursenumber, encoding="UTF-8")
ch2doc = parse(ch2data)
ch3data = open('ecomma_comments_export_%s_Ch3.xml' % coursenumber, encoding="UTF-8")
ch3doc = parse(ch3data)
ch4data = open('ecomma_comments_export_%s_Ch4.xml' % coursenumber, encoding="UTF-8")
ch4doc = parse(ch4data)
ch5data = open('ecomma_comments_export_%s_Ch5.xml' % coursenumber, encoding="UTF-8")
ch5doc = parse(ch5data)
#ch6data = open('ecomma_comments_export_%s_Ch6.xml' % coursenumber, encoding="UTF-8")
#ch6doc = parse(ch6data)
#ch7data = open('ecomma_comments_export_%s_Ch7.xml' % coursenumber, encoding="UTF-8")
#ch7doc = parse(ch7data)
# Add a Chapter node to each comment
for node in ch2doc.getElementsByTagName('node'):
if len(node.getElementsByTagName('Chapter')) == 0:
ChapterNode = ch2doc.createElement('Chapter')
chapter = ch2doc.createTextNode('2')
tab = ch2doc.createTextNode(' ')
node.appendChild(tab)
node.appendChild(ChapterNode)
node.lastChild.appendChild(chapter)
newline = ch2doc.createTextNode('\n ')
node.appendChild(newline)
for node in ch3doc.getElementsByTagName('node'):
if len(node.getElementsByTagName('Chapter')) == 0:
ChapterNode = ch3doc.createElement('Chapter')
chapter = ch3doc.createTextNode('3')
tab = ch3doc.createTextNode(' ')
node.appendChild(tab)
node.appendChild(ChapterNode)
node.lastChild.appendChild(chapter)
newline = ch3doc.createTextNode('\n ')
node.appendChild(newline)
for node in ch4doc.getElementsByTagName('node'):
if len(node.getElementsByTagName('Chapter')) == 0:
ChapterNode = ch4doc.createElement('Chapter')
chapter = ch4doc.createTextNode('4')
tab = ch4doc.createTextNode(' ')
node.appendChild(tab)
node.appendChild(ChapterNode)
node.lastChild.appendChild(chapter)
newline = ch4doc.createTextNode('\n ')
node.appendChild(newline)
for node in ch5doc.getElementsByTagName('node'):
if len(node.getElementsByTagName('Chapter')) == 0:
ChapterNode = ch5doc.createElement('Chapter')
chapter = ch5doc.createTextNode('5')
tab = ch5doc.createTextNode(' ')
node.appendChild(tab)
node.appendChild(ChapterNode)
node.lastChild.appendChild(chapter)
newline = ch5doc.createTextNode('\n ')
node.appendChild(newline)
# for node in ch6doc.getElementsByTagName('node'):
# if len(node.getElementsByTagName('Chapter')) == 0:
# ChapterNode = ch6doc.createElement('Chapter')
# chapter = ch6doc.createTextNode('6')
# tab = ch6doc.createTextNode(' ')
# node.appendChild(tab)
# node.appendChild(ChapterNode)
# node.lastChild.appendChild(chapter)
# newline = ch6doc.createTextNode('\n ')
# node.appendChild(newline)
# for node in ch7doc.getElementsByTagName('node'):
# if len(node.getElementsByTagName('Chapter')) == 0:
# ChapterNode = ch7doc.createElement('Chapter')
# chapter = ch7doc.createTextNode('7')
# tab = ch7doc.createTextNode(' ')
# node.appendChild(tab)
# node.appendChild(ChapterNode)
# node.lastChild.appendChild(chapter)
# newline = ch7doc.createTextNode('\n ')
# node.appendChild(newline)
# Create new xml which merges comments from all the chapters
impl = getDOMImplementation()
doc = impl.createDocument(None, "nodes", None)
for node in ch2doc.getElementsByTagName('node'):
doc.firstChild.appendChild(node)
for node in ch3doc.getElementsByTagName('node'):
doc.firstChild.appendChild(node)
for node in ch4doc.getElementsByTagName('node'):
doc.firstChild.appendChild(node)
for node in ch5doc.getElementsByTagName('node'):
doc.firstChild.appendChild(node)
# for node in ch6doc.getElementsByTagName('node'):
# doc.firstChild.appendChild(node)
# for node in ch7doc.getElementsByTagName('node'):
# doc.firstChild.appendChild(node)
# Get list of students to redact from the data
with open('redactlist_%s.txt' % coursenumber) as f:
redactlist = f.read().splitlines()
# Redact comments by students on the redactlist
NodesToRedact = [node for node in doc.getElementsByTagName("Author") if node.firstChild.nodeValue in redactlist]
for node in NodesToRedact:
doc.documentElement.removeChild(node.parentNode);
# Change student names to fake names
# First, get the list of student names
studentlist = []
for node in doc.getElementsByTagName('Author'):
studentlist.append(node.firstChild.nodeValue)
def unique(sequence):
seen = set()
return [x for x in sequence if not (x in seen or seen.add(x))]
studentlist = unique(studentlist)
# Next, create a dictionary where each name is assigned a fake value
fake = Faker()
fake.seed(coursenumber) #Use the same seed every time, but a different seed for each course
studentdict = {i:fake.name() for i in studentlist}
with open('fakenames_%s.txt' % coursenumber, 'w') as f:
f.write(json.dumps(studentdict))
# Add wordcount node to each comment
for node in doc.getElementsByTagName('node'):
if len(node.getElementsByTagName('Wordcount')) == 0:
WordcountNode = doc.createElement('Wordcount')
CommentNode = node.childNodes[9]
CommentText = CommentNode.firstChild.nodeValue
countthewords = str(len(CommentText.split(" ")))
wordcount = doc.createTextNode(countthewords)
node.appendChild(WordcountNode)
node.lastChild.appendChild(wordcount)
newline = doc.createTextNode('\n ')
node.appendChild(newline)
# Add Language node to each comment
for node in doc.getElementsByTagName('node'):
if len(node.getElementsByTagName('Language')) == 0:
LanguageNode = doc.createElement('Language')
defaultlanguage = doc.createTextNode('English')
node.appendChild(LanguageNode)
node.lastChild.appendChild(defaultlanguage)
newline = doc.createTextNode('\n ')
node.appendChild(newline)
# Add Affordance node to each comment
for node in doc.getElementsByTagName('node'):
if len(node.getElementsByTagName('Affordance')) == 0:
AffordanceNode = doc.createElement('Affordance')
node.appendChild(AffordanceNode)
newline = doc.createTextNode('\n ')
node.appendChild(newline)
# Add Speech Act node to each comment
for node in doc.getElementsByTagName('node'):
if len(node.getElementsByTagName('Speech-Act')) == 0:
SpeechActNode = doc.createElement('Speech-Act')
node.appendChild(SpeechActNode)
newline = doc.createTextNode('\n')
node.appendChild(newline)
# Write the data to a new file
with open('ecomma_prepped_%s.xml' % coursenumber,'w+', encoding="UTF-8") as newfile:
doc.writexml(newfile)
# Define function for replacing names with fake names
def find_replace_multi(string, dictionary):
for item in dictionary.keys():
# sub item for item's paired value in string
string = re.sub(item, dictionary[item], string)
return string
# Replace names with fake names, and fix encoding of some special characters
with open('ecomma_prepped_%s.xml' % coursenumber, "r+", encoding = "UTF-8") as f:
data = f.read()
data = data.replace('"','"')
data = data.replace("'","'")
data = find_replace_multi(data, studentdict)
f.seek(0)
f.write(data)
f.truncate()
``` |
{
"source": "jimlawless/AoC2020",
"score": 3
} |
#### File: AoC2020/Day_9/puzzle9.py
```python
def checkNums(start,length,val,partOne):
global nums
for k in range(start,length):
if(k==partOne):
continue
if ((nums[k]+nums[partOne])==val):
print("{} + {} = {}".format(nums[k],nums[partOne],val))
return True
return False
nums=[]
infile = open("input.txt","r")
for line in infile:
line=line.rstrip()
nums.append(int(line))
infile.close()
consider=25
for i in range(consider,len(nums)):
num=nums[i]
found=False
for j in range(i-consider,i):
res=checkNums(i-consider,i,num,j)
if res:
found=True
break
if not found:
print("Sum not found for {}".format(num))
exit(0)
``` |
{
"source": "jimlawton/codility",
"score": 3
} |
#### File: 02/odd_occurrences_in_array/test_challenge.py
```python
from challenge import solution
def test_challenge_all():
# Single entry slot
assert solution([99]) == 99
# Answer in last slot
assert solution([2, 2, 3, 3, 4]) == 4
# Answer in first slot
assert solution([2, 3, 3, 4, 4]) == 2
# Answer in middle slot
assert solution([2, 2, 3, 4, 4]) == 3
# Different positions
assert solution([9, 3, 9, 3, 9, 7, 9]) == 7
assert solution([9, 3, 9, 3, 9, 1, 9]) == 1
assert solution([9, 1, 9, 1, 9, 2, 9]) == 2
assert solution([9, 1, 9, 1, 9, 2, 9]) == 2
# Benchmark
def test_challenge_a(benchmark):
assert benchmark(solution, [99]) == 99
def test_challenge_b(benchmark):
assert benchmark(solution, [2, 2, 3, 3, 4]) == 4
def test_challenge_c(benchmark):
assert benchmark(solution, [9, 3, 9, 3, 9, 7, 9]) == 7
```
#### File: 03/frog_imp/test_challenge.py
```python
from challenge import solution
def test_challenge():
# Use 0 or 1
# Use upper and lower limits
assert solution(10, 85, 30) == 3
```
#### File: 03/perm_missing_elem/test_challenge.py
```python
from challenge import solution
def test_challenge():
# Create empty array (dependent on test)
# Create single entry array
# Create an array with the answer at the start
# Create an array with the answer at the end
# Create an array with the answer in the middle
# Single entry slot
assert(solution([2, 3, 1, 5]) == 4)
```
#### File: 04/perm_check/challenge.py
```python
def solution(A):
N = len(A)
if N == 1:
if A[0] == 1:
return 1
else:
return 0
count = {}
for i in range(N):
if A[i] not in count:
count[A[i]] = 0
count[A[i]] += 1
if count[A[i]] > 1:
return 0
# print(count)
values = count.keys()
# print(values)
if max(values) == N:
return 1
return 0
``` |
{
"source": "jimlawton/gdrive-linux",
"score": 2
} |
#### File: jimlawton/gdrive-linux/dirtree.py
```python
from UserDict import DictMixin
# Singleton sentinel.
class _Null(object):
pass
class _Node(dict):
"""A class representing a node in the directory tree.
>>> n = _Node()
>>> n[1] = 1
>>> n[1]
1
>>> n = { 1: 1, 2: 2, 3: 3 }
>>> n
{1: 1, 2: 2, 3: 3}
>>> 1 in n
True
>>> 4 in n
False
>>> n[5]
Traceback (most recent call last):
File "/usr/lib64/python2.7/doctest.py", line 1289, in __run
compileflags, 1) in test.globs
File "<doctest __main__._Node[7]>", line 1, in <module>
n[5]
KeyError: 5
"""
def __init__(self, value=_Null):
super(_Node, self).__init__() # The base dictionary object.
self.path = None # Stores the path to this node.
self.value = value
self.children = {}
def numkeys(self):
'''Return the number of keys in the subtree rooted at this node.'''
numk = 0
if self.value is not _Null:
numk = sum(child.numkeys() for child in self.children.itervalues())
return numk
def __repr__(self):
valstr = '_Null'
if self.value is not _Null:
valstr = repr(self.value)
return '(%s, {%s})' % (valstr, ', '.join('%r: %r' % cstr for cstr in self.children.iteritems()))
def __getstate__(self):
return (self.value, self.children)
def __setstate__(self, state):
self.value, self.children = state
class DirectoryTree(DictMixin, object):
"""A prefix tree (Trie) implementation to represent a directory tree.
>>> t = DirectoryTree()
>>> t.add("/a/b/c/d")
>>> t.add("/a/b/c/d/e")
>>> t.add("/foo/bar")
>>> print t
DirectoryTree({'': '/', '/a/b/c/d': '/a/b/c/d', '/a/b/c/d/e': '/a/b/c/d/e', '/foo/bar': '/foo/bar'})
>>> t.keys()
['', '/a/b/c/d', '/a/b/c/d/e', '/foo/bar']
>>> t.values()
['/', '/a/b/c/d', '/a/b/c/d/e', '/foo/bar']
>>> t.items()
[('', '/'), ('/a/b/c/d', '/a/b/c/d'), ('/a/b/c/d/e', '/a/b/c/d/e'), ('/foo/bar', '/foo/bar')]
>>> t.search("/a/b/c")
['/a/b/c/d', '/a/b/c/d/e']
"""
def __init__(self, seq=None, **kwargs):
self._root = _Node('/')
self.update(seq, **kwargs)
def __len__(self):
return self._root.numkeys()
def __iter__(self):
return self.iterkeys()
def __contains__(self, key):
node = self._find(key)
return node is not None and node.value is not _Null
def __getitem__(self, key):
node = self._find(key)
if node is None or node.value is _Null:
raise KeyError
return node.value
def __setitem__(self, key, value):
node = self._root
for part in key.split('/'):
next_node = node.children.get(part)
if next_node is None:
node = node.children.setdefault(part, _Node())
else:
node = next_node
node.value = value
def __delitem__(self, key):
parts = []
node = self._root
for part in key.split('/'):
parts.append(node, part)
node = node.children.get(part)
if node is None:
break
if node is None or node.value is _Null:
raise KeyError
node.value = _Null
while node.value is _Null and not node.children and parts:
node, part = parts.pop()
del node.children[part]
def __repr__(self):
return '%s({%s})' % (self.__class__.__name__, ', '.join('%r: %r' % t for t in self.iteritems()))
def __str__(self):
lines = ["{"]
for key, value in self.iteritems():
lines.append("%s: %s" % (key, value))
lines.append("}")
return '\n'.join(lines)
def _find(self, key):
node = self._root
for part in key.split('/'):
node = node.children.get(part)
if node is None:
break
return node
def keys(self, prefix=None):
"Return a list of the trie keys."
return list(self.iterkeys(prefix))
def values(self, prefix=None):
"Return a list of the trie values."
return list(self.itervalues(prefix))
def items(self, prefix=None):
"Return a list of the trie (key, value) tuples."
return list(self.iteritems(prefix))
def iteritems(self, prefix=None):
"Return an iterator over the trie (key, value) tuples."
parts = []
def generator(node, parts=parts):
if node.value is not _Null:
yield ('/'.join(parts), node.value)
for part, child in node.children.iteritems():
parts.append(part)
for subresult in generator(child):
yield subresult
del parts[-1]
node = self._root
if prefix is not None:
for part in prefix.split('/'):
parts.append(part)
node = node.children.get(part)
if node is None:
node = _Node()
break
return generator(node)
def iterkeys(self, prefix=None):
"Return an iterator over the trie keys."
return (key for key, value in self.iteritems(prefix))
def itervalues(self, prefix=None):
"Return an iterator over the trie values."
return (value for key, value in self.iteritems(prefix))
def add(self, path, value=None):
"Add a path to the trie."
if value is not None:
self[path] = value
else:
self[path] = path
def search(self, prefix=None):
"Return a list of keys in the trie matching the supplied prefix."
return list(self.iterkeys(prefix))
if __name__ == "__main__":
import doctest
doctest.testmod()
```
#### File: jimlawton/gdrive-linux/gdrive.py
```python
import sys
import optparse
import pprint
import gdocs
from drived import DriveDaemon
from drive_config import DriveConfig
session = None
commands = {}
aliases = {}
def command(func):
if not func.__doc__:
sys.exit("Error: commands must have formatted docstrings!")
commands[func.func_name] = func
func_aliases = [alias for alias in aliases.iterkeys() if aliases[alias].func_name == func.func_name]
if func_aliases:
func.__doc__ += "Aliases: %s" % ",".join(func_aliases)
return func
def alias(name):
def decorator(func):
if name in commands:
sys.exit("Error, this alias has the same name as a command!")
aliases[name] = func
return func
return decorator
@command
def help(argv):
"""Print help message.
gdrive help [COMMAND]
If no argument is specified, print a list of commands with a short description of each. If a command is specified, print help on that command.
"""
if not argv:
return usage()
for cmd in commands:
if cmd == argv[0]:
print commands[cmd].__doc__.split('\n', 1)[1]
return
print >>sys.stderr, "Unknown command '%s'" % argv[0]
def usage():
"Print usage."
print "Google Drive command-line interface"
print
print "Commands:"
print
print "Note: use gdrive help <command> to view usage for a specific command."
print
lines = []
cmdlist = commands.keys()
cmdlist.sort()
for cmd in cmdlist:
lines.append((cmd, commands[cmd].__doc__.splitlines()[0]))
space = max(len(line[0]) + 3 for line in lines)
for line in lines:
print " %-*s%s" % (space, line[0], line[1])
print
@command
def start(argv):
"""Start GDrive daemon.
gdrive start
Starts the GDrive daemon, gdrived, if it is not already running.
"""
daemon = DriveDaemon()
daemon.start()
@command
def stop(argv):
"""Stop GDrive daemon.
gdrive stop
Stops the GDrive daemon, gdrived, if it is running.
"""
daemon = DriveDaemon()
daemon.stop()
@command
def status(argv):
"""Show the status of the GDrive daemon.
gdrive status
Shows the status of the GDrive daemon.
"""
daemon = DriveDaemon()
print daemon.status()
@command
def restart(argv):
"""Restart GDrive daemon.
gdrive restart
Restarts the GDrive daemon, gdrived.
"""
daemon = DriveDaemon()
daemon.restart()
@command
@alias("ls")
def list(argv):
"""List folder contents.
gdrive list <path>
Lists the contents of the specified path. If no path is specified, then the root folder is assumed.
"""
if len(argv) == 0:
root = '/'
else:
root = argv[0]
folders, files = session.readFolder(root)
for path in folders:
print path
for path in files:
print path, session.getFileSize(path), session.getFileDate(path), session.getFileChecksum(path)
@command
def filestatus(argv):
"""Show the status of a file.
gdrive filestatus <path>
Shows the status of the specified path.
"""
path = None
if len(argv) == 0:
return usage()
else:
path = argv[0]
print session.filestatus(path, interactive=True)
@command
def md5(argv):
"""Print the MD5 checksums of the local and remote copies of the specified remote file path.
gdrive md5 <path>
prints the local and remote MD5 checksums of the specified path.
"""
path = None
if len(argv) == 0:
return usage()
else:
path = argv[0]
config = DriveConfig()
rhash = session.getRemoteFileChecksum(path)
lpath = config.getLocalPath(path)
lhash = session.getLocalFileChecksum(lpath)
print "Local: path=%s md5=%s Remote: path=%s md5=%s" % (lpath, lhash, path, rhash)
@command
@alias("up")
def update(argv):
"""Update a folder.
gdrive update <path>
Update the contents of the specified path, recursively. If no path is specified, then the entire GDrive will be updated.
"""
if len(argv) == 0:
path = '/'
else:
path = argv[0]
session.update(path, download=True, interactive=True)
@command
@alias("get")
def download(argv):
"""Download a file or folder.
gdrive download [<path> [<localpath>]]
Download the contents of the specified path, recursively. If no path is specified, then the entire GDrive will be downloaded.
This command will create a local copy of the specified file or folder tree, or if it exists already, will update it to match
the server contents. If a local path is specified, then the file or folder will be downloaded at that path. If no localpath
is specified, then the "localstore"/"path" configuration option will be used.
"""
localpath = None
if len(argv) == 0:
path = '/'
else:
path = argv[0]
if len(argv) > 1:
localpath = argv[1]
session.download(path, localpath, interactive=True)
@command
@alias("put")
def upload(argv):
"""Upload a file or folder.
gdrive upload <localpath> [<path>]
Upload the contents of the specified path, recursively. A local path must be specified.
This command will create a server copy of the specified file or folder tree, or if it exists already on the server, will update
it to match the local contents. If a remote path is specified, then the file or folder will be uploaded at that path relative to
the root of the server tree.
"""
path = None
if len(argv) == 0:
return usage()
else:
localpath = argv[0]
if len(argv) > 1:
path = argv[1]
session.upload(localpath, path, interactive=True)
@command
def reset(argv):
"""Reset GDrive cached metadata.
gdrive reset
This command clears cached GDrive metadata and recreates it by querying the server.
"""
session.reset()
@command
def dump(argv):
"""Dump GDrive cached metadata.
gdrive dump
This command dumps cached GDrive metadata.
"""
session.dump()
@command
def info(argv):
"""Print general information.
gdrive info
Prints out general information, e.g. total number of files/folders, quotas used, etc.
"""
pprint.pprint(session.getInfo())
def _parseArgs():
"Parse command-line arguments."
helpStr = """
%prog [options]
Utility to access Google Drive.
"""
parser = optparse.OptionParser(description=helpStr)
parser.add_option('-v', '--verbose', dest='verbose', action='store_true', default=False, help='Turn on extra logging')
parser.add_option('-d', '--debug', dest='debug', action='store_true', default=False, help='Turn on debug logging')
(options, args) = parser.parse_args()
return (options, args)
def main(argv):
"Main function."
(opts, args) = _parseArgs()
cmdfound = False
i = 0
for i in range(len(args)):
if args[i] in commands or args[i] in aliases:
cmdfound = True
break
if not cmdfound:
usage()
return None
global session
session = gdocs.Session(verbose=opts.verbose, debug=opts.debug)
if session == None:
sys.exit("Error, could not create Google Docs session!")
res = None
if args[i] in commands:
res = commands[args[i]](args[i+1:])
elif args[i] in aliases:
res = aliases[args[i]](args[i+1:])
return res
if __name__ == "__main__":
result = main(sys.argv)
if result != None:
sys.exit(result)
``` |
{
"source": "jimlee0817/ML_INCOME_PREDICTION",
"score": 3
} |
#### File: ML_INCOME_PREDICTION/Logistic_Regression/Income_Prediction.py
```python
import sys
import numpy as np
X_TRAIN_PATH = sys.argv[1]
Y_TRAIN_PATH = sys.argv[2]
print("Running the File", sys.argv[0])
print("Directory 1: ", X_TRAIN_PATH)
print("Directory 2: ", Y_TRAIN_PATH)
'''
For Testing
'''
'''
X_TRAIN_PATH = 'X_train'
Y_TRAIN_PATH = 'Y_train'
'''
X_train = np.genfromtxt(X_TRAIN_PATH, delimiter=',', skip_header=1)
Y_train = np.genfromtxt(Y_TRAIN_PATH, delimiter=',', skip_header=1)
"""Do the normalization of the data"""
def normalizeColumn(X, specifiedColumns = None, X_mean = None, X_stdev = None):
if specifiedColumns == None:
specifiedColumns = np.arange(X.shape[1])
length = len(specifiedColumns)
X_mean = np.reshape(np.mean(X[:,specifiedColumns], 0), (1, length))
X_stdev = np.reshape(np.std(X[:,specifiedColumns], 0), (1, length))
X[:,specifiedColumns] = np.divide(np.subtract(X[:, specifiedColumns], X_mean), X_stdev)
return X, X_mean, X_stdev
'''Shuffle the data in a random order'''
def shuffle(X, Y):
randomIndex = np.arange(len(X))
np.random.shuffle(randomIndex)
return (X[randomIndex], Y[randomIndex])
'''Split the data into training data and validation data'''
def splitTrainAndValidationData(X, Y, validation_size = 0.1):
train_size = int(round(len(X) * (1 - validation_size)))
return X[0:train_size], Y[0:train_size], X[train_size:None], Y[train_size:None]
def sigmoid(Z):
return np.clip(1 / (1 + np.exp(-Z)), 1e-6, 1-1e-6)
def getY(X,w,b):
return sigmoid(np.add(np.matmul(X, w),b))
def getRoundY(y):
for i in range(len(y)):
if y[i] < 0.5:
y[i] = 0
else:
y[i] = 1
return y
def computeCrossEntropy(y, y_label):
return -np.dot(y_label, np.log(y)) - np.dot(1 - y_label, np.log(1 - y))
def getLoss(y, y_label):
return computeCrossEntropy(y, y_label)
def getGradient(X, y_label, w, b):
y = getY(X, w, b)
loss = y_label - y
w_grad = -np.mean(np.multiply(loss.T, X.T), axis = 1)
b_grad = -np.mean(loss)
return w_grad, b_grad
def getAccuracy(y, y_label):
return np.sum(y == y_label) / len(y)
def train(X, Y, method = 'GRADIENT_ADAM'):
validation_size = 0.1
X_train, y_label, X_validation, y_validation = splitTrainAndValidationData(X, Y, validation_size)
print(X_train.shape)
print(y_label.shape)
print(X_validation.shape)
print(y_validation.shape)
'''Initialize the weight and bias'''
w = np.zeros(X_train.shape[1])
b = np.zeros([1])
eipsilon = 1e-8
if method == 'GRADIENT_ADAM':
beta1 = 0.9
beta2 = 0.999
v_w = np.zeros(w.shape)
s_w = np.zeros(w.shape)
v_b = np.zeros(b.shape)
s_b = np.zeros(b.shape)
max_interation = 41
batch_size = 25
learningRate = 0.0001
step = 1
trainAccuracy_list = []
trainLoss_list = []
validationAccuracy_list = []
validationLoss_list = []
for epoch in range(max_interation):
X_train_epoch, y_train_epoch = shuffle(X_train, y_label)
for i in range(int(np.floor(len(X_train)) / batch_size)):
X_train_batch = X_train_epoch[i * batch_size: (i + 1) * batch_size]
y_train_batch = y_train_epoch[i * batch_size: (i + 1) * batch_size]
if method == 'GRADIENT':
w_grad, b_grad = getGradient(X_train_batch, y_train_batch, w, b)
w = w - learningRate / np.sqrt(step) * w_grad
b = b - learningRate / np.sqrt(step) * b_grad
elif method == 'GRADIENT_ADAM':
w_grad, b_grad = getGradient(X_train_batch, y_train_batch, w, b)
v_w = beta1 * v_w + (1 - beta1) * w_grad
s_w = beta2 * s_w + (1 - beta2) * w_grad ** 2
v_b = beta1 * v_b + (1 - beta1) * b_grad
s_b = beta2 * s_b + (1 - beta2) * b_grad ** 2
v_w_correction = v_w / (1 - beta1 ** step)
s_w_correction = s_w / (1 - beta2 ** step)
v_b_correction = v_b / (1 - beta1 ** step)
s_b_correction = s_b / (1 - beta2 ** step)
w = w - learningRate * v_w_correction / (np.sqrt(s_w_correction) + eipsilon)
b = b - learningRate * v_b_correction / (np.sqrt(s_b_correction) + eipsilon)
step += 1
y_train_predicted = getY(X_train, w, b)
trainLoss_list.append(getLoss(y_train_predicted, y_label) / len(y_train_predicted))
y_train_predicted = getRoundY(y_train_predicted)
trainAccuracy_list.append(getAccuracy(y_train_predicted, y_label))
y_validation_predicted = getY(X_validation, w, b)
validationLoss_list.append(getLoss(y_validation_predicted, y_validation) / len(y_validation_predicted))
y_validation_predicted = getRoundY(y_validation_predicted)
validationAccuracy_list.append(getAccuracy(y_validation_predicted, y_validation))
print("Epoch", epoch, " Training Accuracy: ", (getAccuracy(y_train_predicted, y_label)), " Validation Accuracy: ", (getAccuracy(y_validation_predicted, y_validation)))
return w, b, trainAccuracy_list, validationAccuracy_list, trainLoss_list, validationLoss_list
X_train, X_mean, X_stdev = normalizeColumn(X_train)
weight, bias, trainAccList, validationAccList, trainLossList, validationLossList = train(X_train, Y_train, method = 'GRADIENT_ADAM')
'''
import matplotlib.pyplot as plt
plt.figure(1)
plt.plot(trainAccList)
plt.plot(validationAccList)
plt.figure(2)
plt.plot(trainLossList)
plt.plot(validationLossList)
plt.legend(['train', 'validation'])
plt.show()
'''
``` |
{
"source": "jimlee4262/webscrape",
"score": 3
} |
#### File: jimlee4262/webscrape/app.py
```python
from flask import Flask, url_for, render_template, redirect
from flask_pymongo import PyMongo
import scrape_mars
#create flask setup
app = Flask(__name__)
#establish mongoDB connection w/ pymongo
mongo = PyMongo(app, uri="mongodb://localhost:27017/mars_app")
#render index.html template using data from Mongo
@app.route("/")
def index():
#Find data with mongoDB
mars = mongo.db.mars.find_one()
print(mars)
#return template & data
return render_template("index.html", mars=mars)
#trigger scrape fxn
@app.route("/scrape")
def scraper():
mars_information=scrape_mars.scrape()
#update mongoDB update & upsert
mongo.db.mars.update({}, mars_information, upsert=True)
#redirect home
return redirect("/")
if __name__ == "__main__":
app.run(debug = True)
```
#### File: jimlee4262/webscrape/scrape_mars.py
```python
from splinter import Browser
from bs4 import BeautifulSoup
import pandas as pd
import time
#initizalize splinter function
def initiate_browser():
executable_path = {'executable_path': 'chromedriver.exe'}
return Browser("chrome", **executable_path, headless=False)
def scrape():
browser = initiate_browser()
#NASA
nasaurl = 'https://mars.nasa.gov/news/'
browser.visit(nasaurl)
time.sleep(10)
#beautiful soup
nasahtml = browser.html
nasasoup = BeautifulSoup(nasahtml, 'html.parser')
#scrape the top headline and description
article = nasasoup.find('div', class_='list_text')
header = article.find('div', class_='content_title').text
paragraph = article.find('div', class_='article_teaser_body').text
#JPL Image
jplurl = 'https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars'
browser.visit(jplurl)
#Beautiful Soup
jplhtml = browser.html
jplsoup = BeautifulSoup(jplhtml, 'html.parser')
#JPL Image
featured_image_url = "https://www.jpl.nasa.gov" + jplsoup.find('img', class_='thumb')['src']
#Twitter
twitterurl = 'https://twitter.com/marswxreport?lang=en'
browser.visit(twitterurl)
time.sleep(10)
#Beautiful Soup
twitterhtml = browser.html
time.sleep(10)
twittersoup = BeautifulSoup(twitterhtml, 'html.parser')
#Get recent tweet
mars_weather = twittersoup.find('div', class_='css-901oao r-hkyrab r-1qd0xha r-a023e6 r-16dba41 r-ad9z0x r-bcqeeo r-bnwqim r-qvutc0').\
find('span', class_="css-901oao css-16my406 r-1qd0xha r-ad9z0x r-bcqeeo r-qvutc0").\
text
#Mars Facts
marsfacturl = 'https://space-facts.com/mars/'
#Gettting the Mars Table via Pandas
tables = pd.read_html(marsfacturl)
df = tables[0]
#setting up the columns
df.columns=['Mars Planet Profile', 'Numbers']
#use the Mars Planet profile column as the index
df = df.set_index(['Mars Planet Profile'])
#converting html table string w/ pandas
df_html = df.to_html()
df_html = df_html.replace('\n', '')
#Mars Hemispheres
marshem = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars'
browser.visit(marshem)
marshtml = browser.html
marssoup = BeautifulSoup(marshtml, 'html.parser')
#Get the link for each Hemisphere
hemisphere_link=[]
#Run a loop to get the list of links
hemisphere_information = marssoup.find_all('div', class_='description')
for links in hemisphere_information:
info = links.find('a', class_='itemLink product-item')
link = info['href']
complete_link = (f'https://astrogeology.usgs.gov{link}')
hemisphere_link.append(complete_link)
#loop through the links to get the title and image
hemisphere_dictionary = []
for i in range(len(hemisphere_link)):
browser.visit(hemisphere_link[i])
html = browser.html
soup = BeautifulSoup(html, 'html.parser')
image_link = soup.find('img',class_= 'wide-image')['src']
complete_image_link = (f'https://astrogeology.usgs.gov{image_link}')
image_name = soup.find('h2',class_='title').text
image_dictionary = {'title':image_name, 'link':complete_image_link}
hemisphere_dictionary.append(image_dictionary)
#Close the browser
browser.quit()
mars_information = {"Mars_Headline": header,
"Mars_Description": paragraph,
"jpl_link": featured_image_url,
"table_html": df_html,
"twitter": mars_weather,
"hemisphere": hemisphere_dictionary}
return mars_information
``` |
{
"source": "jimleroux/interview_problems",
"score": 3
} |
#### File: enphaseAI/utils/matplotlib_plotter.py
```python
import os
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import numpy as np
from enphaseAI.utils.constants import FIGURE_DIR
def basic_plot(
name: str,
data: np.ndarray,
labels: np.ndarray,
xlabel: str = "x",
ylabel: str = "y"
) -> None:
plt.style.use("ggplot")
fig, ax = plt.subplots()
clusters = set(labels)
num_ele = data.shape[0]
num_clusters = len(clusters)
ax.set_xlabel(xlabel, fontsize=15)
ax.set_ylabel(ylabel, fontsize=15)
ax.tick_params(labelsize=15)
colors = [cm.Set1(lab + 1) for lab in labels]
for i in range(num_clusters):
cluster = list(clusters)[i]
x = data[labels == cluster, 0]
y = data[labels == cluster, 1]
scatter = ax.scatter(x, y, color=np.array(colors)[labels == cluster], label=str(int(cluster)))
plt.tight_layout()
plt.legend()
plt.savefig(os.path.join(FIGURE_DIR, name))
plt.show()
``` |
{
"source": "jimle-uk/mygpxdata",
"score": 3
} |
#### File: mygpxdata/mygpxdata/parser.py
```python
from .xmlquery import XMLQuery
class Parser(object):
def __init__(self):
pass
def load(self, filename):
pass
def run(self):
pass
def parse(self, filename):
self.load(filename)
return self.run()
class XMLParser(Parser):
def __init__(self):
super(XMLParser, self).__init__()
self._xmlQuery = XMLQuery()
def load(self, filename):
self._xmlQuery.open(filename)
def run(self):
return self._xmlQuery.toDict()
```
#### File: mygpxdata/mygpxdata/utils.py
```python
import math
import datetime
# ==============================================================================
# dates
# ==============================================================================
def convertTimestamp(timestamp, date_format="%Y-%m-%dT%H:%M:%SZ"):
return datetime.datetime.strptime(timestamp, date_format)
# ==============================================================================
# math
# ==============================================================================
pi = math.pi
glOffset = math.pow(2, 28)
glRadius = glOffset / pi
def lonToX(lon):
return round(glOffset + glRadius * lon * pi);
def latToY(lat):
return round(glOffset - glRadius * math.log((1 + math.sin(lat * pi)) / (1 - math.sin(lat * pi))) / 2);
def calculateBounds(coordinates):
"""
caculates the upper and lower bounds of the coordinate set
"""
x_values = [coordinate[0] for coordinate in coordinates]
y_values = [coordinate[1] for coordinate in coordinates]
max_x, min_x = max(x_values), min(x_values)
max_y, min_y = max(y_values), min(y_values)
return min_x, max_x, min_y, max_y
def calculateScale(w, h, min_x, max_x, min_y, max_y):
"""
returns scale factor between route bounds (dx,dy) and the canvas
"""
dx = max_x - min_x
dy = max_y - min_y
scale = dy if dx < dy else dx
scale_x = float(w)/scale
scale_y = float(h)/scale
return scale_x, scale_y
def calculateOffset(w, h, min_x, max_x, min_y, max_y):
"""
calculates offset of the svg path to position it center within the regions
of canvas.
"""
offset_x = (w/2) - ((max_x - min_x)/2)
offset_y = 0 - (h/2) - ((max_y - min_y)/2)
return (offset_x, offset_y)
def calculateAngle(p1, p2):
"""
calculates angle between p1 and p2. Returns angle in degrees.
"""
dx = p2[0] - p1[0]
dy = p2[1] - p1[1]
rads = math.atan2(dy, dx)
degs = math.degrees(rads)
return degs
# ==============================================================================
# track
# ==============================================================================
def flatten(l):
return [i for n in l for i in n]
def trackSegmentsToCoordinates(segments):
coordinates = []
for seg in segments:
coordinates.append([(lonToX(float(trkpt.get('lon'))), latToY(float(trkpt.get('lat')))) for trkpt in seg])
return coordinates
def transformCoordinates(coordinates, min_x, min_y, scale_x, scale_y):
"""
Manipulates coordinates to ensure they display within
the bounds of canvas
"""
transformed_coordinates = []
for seg in coordinates:
transformed_coordinates.append([((trkpt[0]-min_x)*scale_x,(trkpt[1]-min_y)*scale_y) for trkpt in seg])
return transformed_coordinates
def filterCoordinatesByAngle(coordinates, degrees=0):
"""
filters out coordinates based on difference in angle between coordinate (C)
and previous coordinate (B) and previous coordinate (B) and previous-previous
coordinate (A).
eg:
[
(1,1), # coordinate (A)
(2,2), # coordinate (B)
(3,3), # coordinate (C)
]
primary use case is to exclude coordinates which do not diverge much from
previous coordinate if terms of direction. We assume if they're not
diverging much, then it is safe to assume direction is a relatively straight line.
We can then omit the coordinate and reduce the number of coordinates to
render giving us a much smoother route.
ie. if -degrees < CB - BA > +degrees then include else omit
"""
retVal = []
A = None
B = None
for coordinate in coordinates:
C = coordinate
angle_1 = calculateAngle(A, B) if A and B else 0
angle_2 = calculateAngle(B, C) if B else 0
A,B = (B,C) if B else (A,C)
if abs(angle_2 - angle_1) >= degrees:
retVal.append(coordinate)
return retVal
```
#### File: mygpxdata/tests/tests_utils.py
```python
import unittest
import math
class TestUtils(unittest.TestCase):
def setUp(self):
self.lonlat = [
( "-0.137579000", "51.419945000" ),
( "-0.137578000", "51.419945000" ),
( "-0.137979000", "51.420126000" ),
( "-0.138086000", "51.420218000" ),
( "-0.138302000", "51.420234000" ),
( "-0.138433000", "51.420290000" ),
( "-0.138589000", "51.420313000" ),
( "-0.138712000", "51.420373000" ),
( "-0.138751000", "51.420468000" )]
self.trkseg = [[{ "lon": "-0.137579000", "lat": "51.419945000" }]]
def test_pi(self):
self.assertEqual(utils.pi, math.pi)
def test_glOffset(self):
self.assertEqual(utils.glOffset, math.pow(2, 28))
def test_glRadius(self):
self.assertEqual(utils.glRadius, math.pow(2, 28) / math.pi)
def test_lonToX(self):
x = [utils.lonToX(float(x[0])) for x in self.lonlat]
expected_results = [231504374.0, 231504643.0, 231397000.0, 231368278.0, 231310296.0, 231275131.0, 231233255.0, 231200237.0, 231189768.0]
self.assertEqual(x, expected_results)
def test_latToY(self):
y = [utils.lonToX(float(x[1])) for x in self.lonlat]
expected_results = [14071371840.0, 14071371840.0, 14071420426.0, 14071445122.0, 14071449417.0, 14071464450.0, 14071470624.0, 14071486730.0, 14071512231.0]
self.assertEqual(y, expected_results)
def test_calculateBounds(self):
expected_results = ('-0.137578000', '-0.138751000', '51.419945000', '51.420468000')
self.assertEqual(utils.calculateBounds(self.lonlat), expected_results)
def test_calculateScale(self):
test_a = {"w":100, "h":100, "min_x":1, "max_x":10, "min_y":1, "max_y":10}
expected_a = (11.11111111111111, 11.11111111111111)
test_b = {"w":200, "h":100, "min_x":1, "max_x":10, "min_y":1, "max_y":10}
expected_b = (22.22222222222222, 11.11111111111111)
test_c = {"w":100, "h":200, "min_x":1, "max_x":10, "min_y":1, "max_y":10}
expected_c = (11.11111111111111, 22.22222222222222)
test_d = {"w":-100, "h":-100, "min_x":-1, "max_x":-10, "min_y":-1, "max_y":-10}
self.assertEqual(utils.calculateScale(**test_a), expected_a)
self.assertEqual(utils.calculateScale(**test_b), expected_b)
self.assertEqual(utils.calculateScale(**test_c), expected_c)
self.assertEqual(utils.calculateScale(**test_d), expected_a)
def test_calculateOffset(self):
test_a = {"w":100, "h":100, "min_x":1, "max_x":10, "min_y":1, "max_y":10}
expected_a = (46, -54)
test_b = {"w":200, "h":100, "min_x":1, "max_x":10, "min_y":1, "max_y":10}
expected_b = (96, -54)
test_c = {"w":100, "h":200, "min_x":1, "max_x":10, "min_y":1, "max_y":10}
expected_c = (46, -104)
test_d = {"w":-100, "h":-100, "min_x":-1, "max_x":-10, "min_y":-1, "max_y":-10}
expected_d = (-45, 55)
self.assertEqual(utils.calculateOffset(**test_a), expected_a)
self.assertEqual(utils.calculateOffset(**test_b), expected_b)
self.assertEqual(utils.calculateOffset(**test_c), expected_c)
self.assertEqual(utils.calculateOffset(**test_d), expected_d)
def test_calculateAngle(self):
self.assertEquals(utils.calculateAngle((0,0), (1,1)), 45.0) # north east
self.assertEquals(utils.calculateAngle((0,0), (0,1)), 90) # north
self.assertEquals(utils.calculateAngle((0,0), (-1,1)), 135.0) # north west
self.assertEquals(utils.calculateAngle((0,0), (-1,0)), 180.0) # west
self.assertEquals(utils.calculateAngle((0,0), (-1,-1)), -135) # south west
self.assertEquals(utils.calculateAngle((0,0), (0,-1)), -90.0) # south
self.assertEquals(utils.calculateAngle((0,0), (1,-1)), -45) # south east
self.assertEquals(utils.calculateAngle((0,0), (0,0)), 0) # east
def test_flatten(self):
test_a = [[1,2,3]]
expected_a = [1,2,3]
test_b = [[1,2,3], [4,5,6]]
expected_b = [1,2,3,4,5,6]
test_c = [[1,2,3],[4,5,6],[7],[8,9]]
expected_c = [1,2,3,4,5,6,7,8,9]
self.assertEquals(utils.flatten(test_a), expected_a)
self.assertEquals(utils.flatten(test_b), expected_b)
self.assertEquals(utils.flatten(test_c), expected_c)
def test_trackSegmentsToCoordinates(self):
self.assertEquals(utils.trackSegmentsToCoordinates(self.trkseg), [[(231504374.0, 445151444.0)]])
def test_transformCoordinates(self):
coordinates = utils.trackSegmentsToCoordinates(self.trkseg)
test_a = {"coordinates": coordinates, "min_x": coordinates[0][0][0], "min_y": coordinates[0][0][1], "scale_x": 1, "scale_y": 1}
expected_a = [[(0.0, 0.0)]]
test_b = {"coordinates": coordinates, "min_x": 0, "min_y": 0, "scale_x": 200, "scale_y": 200}
expected_b = [[(46300874800.0, 89030288800.0)]]
self.assertEquals(utils.transformCoordinates(**test_a), expected_a)
self.assertEquals(utils.transformCoordinates(**test_b), expected_b)
def test_filterCoordinatesByAngle(self):
coordinates = [
(0,0), # 0
(1,1), # 45.0
(1,1.4), # 45.0
(-1,-1), # -219.8
(-1.2,-1), # 309.8
(-1,-1) # -180
]
self.assertEquals(len(utils.filterCoordinatesByAngle(coordinates, degrees=45)), 5)
self.assertEquals(len(utils.filterCoordinatesByAngle(coordinates, degrees=180)), 3)
self.assertEquals(len(utils.filterCoordinatesByAngle(coordinates, degrees=200)), 2)
self.assertEquals(len(utils.filterCoordinatesByAngle(coordinates, degrees=300)), 1)
if __name__ == '__main__':
if __package__ is None:
import sys
from os import path
sys.path.append( path.dirname( path.dirname( path.abspath(__file__) ) ) )
from mygpxdata import utils
unittest.main()
``` |
{
"source": "Jim-Lin/dark-classifier",
"score": 3
} |
#### File: dark-classifier/tensorflow/label_image.py
```python
import numpy as np
import tensorflow as tf
import argparse
def create_graph(model):
"""Creates a graph from saved GraphDef file and returns a saver."""
# Creates graph from saved graph_def.pb.
with tf.gfile.FastGFile(model, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
_ = tf.import_graph_def(graph_def, name='')
def run_inference_on_image(args):
answer = None
if not tf.gfile.Exists(args.image):
tf.logging.fatal('File does not exist %s', args.image)
return answer
image_data = tf.gfile.FastGFile(args.image, 'rb').read()
# Creates graph from saved GraphDef.
create_graph(args.model)
with tf.Session() as sess:
softmax_tensor = sess.graph.get_tensor_by_name('final_result:0')
predictions = sess.run(softmax_tensor,
{'DecodeJpeg/contents:0': image_data})
predictions = np.squeeze(predictions)
top_k = predictions.argsort()[-3:][::-1] # Getting top 3 predictions
f = open(args.labels, 'rb')
lines = f.readlines()
labels = [str(w).replace("\n", "") for w in lines]
for node_id in top_k:
human_string = labels[node_id]
score = predictions[node_id]
print('%s (score = %.5f)' % (human_string, score))
answer = labels[top_k[0]]
return answer
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--image',
type=str,
help='imagePath',
required=True
)
parser.add_argument(
'--model',
type=str,
help='modelFullPath',
required=True
)
parser.add_argument(
'--labels',
type=str,
help='labelsFullPath',
required=True
)
args = parser.parse_args()
run_inference_on_image(args)
```
#### File: dark-classifier/util/move_image.py
```python
import datetime
import os
import shutil
root = "/home/shuai/face/"
today = datetime.date.today().strftime('%Y-%m-%d')
root_training_images_today = root + today + "-training-images/"
root_training_images = root + "training-images/"
root_aligned_images_today = root + today + "-aligned-images/"
root_aligned_images = root + "aligned-images/"
def main(args):
ids = [f for f in os.listdir(args[0]) if not f.startswith('.')]
for actress_id in ids:
dir_from = os.path.join(args[0], actress_id)
dir_to = os.path.join(args[1], actress_id)
if not os.path.exists(dir_to):
os.makedirs(dir_to)
files = [f for f in os.listdir(dir_from) if not f.startswith('.')]
for file in files:
file_from = os.path.join(dir_from, file)
file_to = os.path.join(dir_to, file)
if not os.path.isfile(file_to):
shutil.move(file_from, file_to)
print file_from
print file_to
if __name__ == '__main__':
if os.path.exists(root_training_images_today):
main([root_training_images_today, root_training_images])
shutil.rmtree(root_training_images_today)
if os.path.exists(root_aligned_images_today):
main([root_aligned_images_today, root_aligned_images])
shutil.rmtree(root_aligned_images_today)
``` |
{
"source": "jimlloyd/siteswap-states",
"score": 3
} |
#### File: jimlloyd/siteswap-states/states.py
```python
import sys
from optparse import OptionParser
empty = '-'
inhand = 'x'
def Quoted(s):
"""Return the string s within quotes."""
return '"' + str(s) + '"'
def PatternStrToList(pattern):
"""Return a list of integers for the given pattern string.
PatternStrToList('531') -> [5, 3, 1]
"""
return [ord(p)-ord('0') for p in pattern]
def PatternListToStr(pattern):
"""Return a pattern string for the given list of integers.
PatternListToStr([5,3,1]) -> '531'
"""
return ''.join([chr(p) for p in pattern])
class SiteSwapStates:
"""A class that contains a state space graph for siteswap juggling for a given number
of balls and a maximum throw height."""
def __init__(self, options):
"""The primary data structure is the states dictionary. The keys of the states dict are state strings,
e.g. 'xxx--'. The value for any such key is a dictionary that stores all valid transitions
from the key state. Using 3 balls with max throw of 5, the base state is 'xxx--'. self.states['xxx--']
is a dictionary containing the two valid transitions: {4: 'xx-x-', 5: 'xx--x'}."""
self.nBalls = options.nBalls
self.maxThrow = options.maxThrow
self.states = {}
self.ComputeStates()
def BaseState(self):
"""Return the base state for the given nBalls and maxThrow.
E.g. for 3 balls max throw 5 the base state is 'xxx--'."""
return inhand*self.nBalls + empty*(self.maxThrow-self.nBalls)
def ComputeStates(self):
"""Compute the entire state space graph"""
todo = set() # The set of states for which transitions have not yet been calculated.
done = set() # The set of states for which transitions have already been calculated.
# start with the base state, although we could start with any valid state.
todo.add(self.BaseState())
while todo:
state = todo.pop()
done.add(state)
assert state not in self.states
transitions = {}
self.states[state] = transitions
now = state[0]
assert now==inhand or now==empty
canthrow = now==inhand
newstate = state[1:] + empty
if not canthrow:
transitions[0] = newstate
else:
for i in range(self.maxThrow):
if newstate[i] == empty:
t = list(newstate)
t[i] = inhand
s = ''.join(t)
transitions[i+1] = s
if s not in done:
todo.add(s)
def PrintStates(self):
for state in reversed(sorted(self.states.keys())):
transitions = self.states[state]
print "Transitions for", state, "are:", transitions
def PrintDot(self):
print "Digraph states {"
print "rankdir=LR;"
for fromState in reversed(sorted(self.states.keys())):
transitions = self.states[fromState]
for throw in sorted(transitions.keys()):
fromLabel = Quoted(fromState)
toLabel = Quoted(transitions[throw])
edgeLabel = Quoted(throw)
print "%s -> %s [label=%s]" % (fromLabel, toLabel, edgeLabel)
print "}"
parser = OptionParser()
parser.add_option("-b", "--balls", dest="nBalls", type="int", default=3, help="Number of balls being juggled")
parser.add_option("-m", "--maxThrow", dest="maxThrow", type="int", default=5, help="Maximum throw height")
(options, args) = parser.parse_args()
siteSwapStates = SiteSwapStates(options)
siteSwapStates.PrintDot()
``` |
{
"source": "jimlmq/DyeVim",
"score": 2
} |
#### File: python/dye/dyevim.py
```python
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from .buffer import Buffer
from .window import Window
from .utils import log
from .utils.dict import Dict
from collections import defaultdict
import vim
DV_SUPPORTED_FILETYPES = set( [ 'c', 'cpp' ] )
DV_UNIQUE_WID_VAR = 'DyeVimUniqueWId'
class DyeVim( object ):
def __init__( self, ycm ):
#log.InitLogging('debug')
ycm.RegisterFileParseReadyCallback( self.OnSemanticTokensReady )
self._ycm = ycm
self._buffers = Dict( lambda bufnr: Buffer( bufnr, self._ycm ) )
self._windows = Dict( lambda wid: Window( wid,
self._GetWIdBuffer( wid ) ) )
self._windowBuffer = defaultdict( int )
self._initialized_filetypes = set()
self._nextUniqueWId = 1
def OnSemanticTokensReady( self, bufnr ):
if not self._IsFileTypeSupported():
return
log.info( 'OnSemanticTokensReady buffer: {0}'.format( bufnr ) )
if vim.current.buffer.number != bufnr:
return
self._windows[ self._GetCurrentWId() ].OnUpdateTokens()
def OnCursorMoved( self ):
if self._IsFileTypeSupported():
self._windows[ self._GetCurrentWId() ].OnCursorMoved()
def OnWindowEnter( self ):
self._SetCurrentWId()
def OnBufferEnter( self ):
wid = self._GetCurrentWId()
if not self._IsFileTypeSupported():
self._windows[ wid ].ClearWindow()
return
self._InitializeCurrentFiletypeIfNeeded()
bnr = vim.current.buffer.number
if self._windowBuffer[ wid ] != bnr:
self._windowBuffer[ wid ] = bnr
self._windows[ wid ].OnBufferChanged( self._buffers[ bnr ] )
def OnFileTypeChanged( self ):
wid = self._GetCurrentWId()
if not self._IsFileTypeSupported():
self._windows[ wid ].ClearWindow()
return
self._InitializeCurrentFiletypeIfNeeded()
wid = self._GetCurrentWId()
bnr = vim.current.buffer.number
self._windows[ wid ].OnBufferChanged( self._buffers[ bnr ] )
def _InitializeCurrentFiletypeIfNeeded( self ):
ft = vim.current.buffer.options[ 'filetype' ]
if not isinstance(ft, str):
ft = str(ft, encoding='utf-8')
if ft not in self._initialized_filetypes:
try:
vim.command('call dyevim#ft#' + ft + '#Setup()')
except:
pass
self._initialized_filetypes.add( ft )
def _IsFileTypeSupported( self ):
ft = vim.current.buffer.options[ 'filetype' ]
if not isinstance(ft, str):
ft = str(ft, encoding='utf-8')
return ft in DV_SUPPORTED_FILETYPES
def _GetCurrentWId( self ):
try:
return vim.current.window.vars[ DV_UNIQUE_WID_VAR ]
except:
return 0
def _SetCurrentWId( self ):
if not vim.current.window.vars.has_key( DV_UNIQUE_WID_VAR ):
wid = self._nextUniqueWId
vim.current.window.vars[ DV_UNIQUE_WID_VAR ] = self._nextUniqueWId
self._nextUniqueWId += 1
if not self._IsFileTypeSupported():
return
self._InitializeCurrentFiletypeIfNeeded()
bnr = vim.current.buffer.number
if self._windowBuffer[ wid ] != bnr:
self._windowBuffer[ wid ] = bnr
self._windows[ wid ].OnBufferChanged( self._buffers[ bnr ] )
def _GetWIdBuffer( self, wid ):
return self._buffers[ self._windowBuffer[ wid ] ]
```
#### File: dye/tests/interval_set_test.py
```python
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from dye.interval.interval import Interval
from dye.interval.interval_set import IntervalSet
from nose.tools import eq_, ok_, raises
def String_test():
# 100% coverity baby
eq_( "%s" % IntervalSet( Interval( 1, 10 ), Interval( 20, 30 ) ),
"([1, 10], [20, 30])" )
def Construction_test():
eq_( IntervalSet( Interval( 1, 10 ), Interval( 20, 30 ) ),
IntervalSet( Interval( 20, 30 ), Interval( 1, 10 ) ) )
def Contains_test():
s = IntervalSet( Interval( 1, 10 ), Interval( 20, 30 ), Interval( 40, 50 ) )
ok_( Interval( 1, 10 ) in s )
ok_( Interval( 25, 27 ) in s )
ok_( Interval( 40, 48 ) in s )
ok_( Interval( 8, 11 ) not in s )
ok_( Interval( 11, 15 ) not in s )
ok_( Interval( 5, 25 ) not in s )
ok_( Interval( 25, 45 ) not in s )
ok_( Interval( 15, 35 ) not in s )
ok_( Interval( 0, 55 ) not in s )
ok_( 1 in s )
ok_( 30 in s )
ok_( 45 in s )
ok_( 0 not in s )
ok_( 11 not in s )
ok_( 35 not in s )
ok_( 39 not in s )
ok_( 55 not in s )
def Union_test():
s = IntervalSet()
s |= Interval( 1, 10 )
eq_( s, IntervalSet( Interval( 1, 10 ) ) )
s |= Interval( 20, 30 )
eq_( s, IntervalSet( Interval( 1, 10 ), Interval( 20, 30 ) ) )
s |= Interval( 40, 50 )
eq_( s, IntervalSet( Interval( 1, 10 ), Interval( 20, 30 ),
Interval( 40, 50 ) ) )
eq_( s | Interval( 11, 19 ), IntervalSet( Interval( 1, 30 ),
Interval( 40, 50 ) ) )
eq_( s | Interval( 11, 15 ), IntervalSet( Interval( 1, 15 ),
Interval( 20, 30 ),
Interval( 40, 50 ) ) )
eq_( s | Interval( 32, 39 ), IntervalSet( Interval( 1, 10 ),
Interval( 20, 30 ),
Interval( 32, 50 ) ) )
eq_( s | Interval( 32, 38 ), IntervalSet( Interval( 1, 10 ),
Interval( 20, 30 ),
Interval( 32, 38 ),
Interval( 40, 50 ) ) )
eq_( s | Interval(15, 35), IntervalSet( Interval( 1, 10 ),
Interval( 15, 35 ),
Interval( 40, 50 ) ) )
eq_( s | Interval(5, 45), IntervalSet( Interval( 1, 50 ) ) )
eq_( s | IntervalSet( Interval( 11, 19 ), Interval( 35, 39 ) ),
IntervalSet( Interval( 1, 30 ), Interval( 35, 50 ) ) )
def Intersect_test():
s = IntervalSet()
eq_( s & Interval( 1, 10 ), IntervalSet() )
s = IntervalSet( Interval( 1, 10 ), Interval( 20, 30 ), Interval( 40, 50 ) )
eq_( s & Interval( 11, 19 ), IntervalSet() )
eq_( s & Interval( 5, 15 ), IntervalSet( Interval( 5, 10 ) ) )
eq_( s & Interval( 5, 25 ), IntervalSet( Interval( 5, 10 ),
Interval( 20, 25 ) ) )
eq_( s & Interval( 0, 51 ), s )
def Subtract_test():
s = IntervalSet( Interval( 1, 10 ), Interval( 20, 30 ), Interval( 40, 50 ) )
eq_( s - Interval( 1, 50 ), IntervalSet() )
eq_( s - Interval( 1, 9 ), IntervalSet( Interval( 10, 10 ),
Interval( 20, 30 ),
Interval( 40, 50 ) ) )
eq_( s - Interval( 25, 25 ), IntervalSet( Interval( 1, 10 ),
Interval( 20, 24 ),
Interval( 26, 30 ),
Interval( 40, 50 ) ) )
eq_( s - Interval( 12, 18 ), IntervalSet( Interval( 1, 10 ),
Interval( 20, 30 ),
Interval( 40, 50 ) ) )
def BestQueryRange_test():
s = IntervalSet()
r = s.GetIntervalForQuery( Interval( 30, 40 ), 10 )
eq_( r, Interval( 30, 40 ) )
s |= r # [ [30, 40] ]
r = s.GetIntervalForQuery( Interval( 29, 29 ), 10 )
eq_( r, Interval( 20, 29 ) )
s |= r # [ [20, 40] ]
r = s.GetIntervalForQuery( Interval( 31, 41 ), 10 )
eq_( r, Interval( 41, 50 ) )
s |= r # [ [20, 50] ]
r = s.GetIntervalForQuery( Interval( 60, 60 ), 10 )
eq_( r, Interval( 60, 69 ) )
s |= r # [ [20, 50], [60, 69] ]
r = s.GetIntervalForQuery( Interval( 59, 68 ), 10 )
eq_( r, Interval( 51, 59 ) )
s |= r # [ [20, 69] ]
r = s.GetIntervalForQuery( Interval( 85, 100 ), 10 )
eq_( r, Interval( 85, 100 ) )
s |= r # [ [20, 69], [85, 100] ]
r = s.GetIntervalForQuery( Interval( 61, 70 ), 10 )
eq_( r, Interval( 70, 79 ) )
# [ [20, 69], [85, 100] ]
r = s.GetIntervalForQuery( Interval( 83, 92 ), 10 )
eq_( r, Interval( 75, 84 ) )
# [ [20, 69], [85, 100] ]
r = s.GetIntervalForQuery( Interval( 72, 81 ), 10 )
eq_( r, Interval( 72, 81 ) )
s |= r # [ [20, 69], [72, 81], [85, 100] ]
r = s.GetIntervalForQuery( Interval( 105, 106 ), 10 )
eq_( r, Interval( 105, 114 ) )
s |= r # [ [20, 69], [72, 81], [85, 100], [105, 114] ]
r = s.GetIntervalForQuery( Interval( 70, 104 ), 10 )
eq_( r, IntervalSet( Interval( 70, 71 ), Interval( 82, 84 ),
Interval( 101, 104 ) ) )
s |= r # [20, 114]
eq_( s, IntervalSet( Interval( 20, 114 ) ) )
s = IntervalSet()
r = s.GetIntervalForQuery( Interval( 9, 9 ), 20 )
eq_( r, Interval( 9, 28 ) )
s |= r # [9, 28]
r = s.GetIntervalForQuery( Interval( 8, 8 ), 20 )
eq_( r, Interval( 1, 8 ) )
s |= r # [1, 28]
r = s.GetIntervalForQuery( Interval( 35, 40 ), 10 )
eq_( r, Interval( 35, 44 ) )
s |= r # [ [1, 28], [35, 44] ]
r = s.GetIntervalForQuery( Interval( 30, 31 ), 10 )
eq_( r, Interval( 29, 34 ) )
r = s.GetIntervalForQuery( Interval( 26, 31 ), 10 )
eq_( r, Interval( 29, 34 ) )
s |= r # [1, 44]
eq_( s, IntervalSet( Interval( 1, 44 ) ) )
@raises(TypeError)
def ContainsError_test():
IntervalSet( Interval( 2, 8 ) ) in IntervalSet( Interval( 1, 10 ) )
@raises(TypeError)
def IntersectError_test():
IntervalSet( Interval( 2, 8 ) ) & IntervalSet( Interval( 1, 10 ) )
@raises(TypeError)
def SubtractError_test():
IntervalSet( Interval( 1, 10 ) ) - IntervalSet( Interval( 2, 8 ) )
```
#### File: dye/utils/log.py
```python
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
import logging
def InitLogging( level = None ):
logging_level = None
if level == 'info':
logging_level = logging.INFO
elif level == 'debug':
logging_level = logging.DEBUG
if not logging_level:
return
global info, debug
info = _info
if logging_level == logging.DEBUG:
debug = _debug
logger = logging.getLogger( 'dyevim' )
logger.setLevel( logging_level )
fh = logging.FileHandler( 'dyevim.log' )
fh.setLevel( logging_level )
logger.addHandler( fh )
def debug( *args ):
pass
def info( *args ):
pass
def _get():
return logging.getLogger( 'dyevim' )
def _debug( *args ):
_get().debug( *args )
def _info( *args ):
_get().info( *args )
```
#### File: dye/utils/vimsupport.py
```python
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
import vim
def GetCurrentBufferNumber():
return vim.current.buffer.number
def BufNumberToName( bufnr ):
return vim.eval( 'bufname({0})'.format( bufnr ) )
def GetCurrentTopLine():
return int( vim.eval( 'line("w0")' ) )
def GetCurrentBottomLine():
return int( vim.eval( 'line("w$")' ) )
def GetCurrentWindowHeight():
return int( vim.current.window.height )
def GetFileType( bufnr ):
return vim.buffers[ bufnr ].options[ 'filetype' ]
def GetBufferLen( bufnr ):
return len( vim.buffers[ bufnr ] )
def GetLineLen( bufnr, line ):
# line index is 1 based, but vim python interface is 0 based
return len( vim.buffers[ bufnr ][ line - 1 ] )
def GetIntValue( name ):
return int( vim.eval( name ) )
def PostVimWarning( message ):
# Displaying a new message while previous ones are still on the status line
# might lead to a hit-enter prompt or the message appearing without a
# newline so we do a redraw first.
vim.command( 'redraw' )
vim.command( 'echohl WarningMsg' )
vim.command( "echom '{0}'".format( message ) )
vim.command( 'echohl None' )
``` |
{
"source": "Jim-Luo/MyQQRobot2",
"score": 3
} |
#### File: Jim-Luo/MyQQRobot2/learn.py
```python
import re
import time
learnList={
}
def learnProcess(bot, contact, content):
for label,reply in learnList.items():
if re.search(re.compile(label),content):
for term in learnList[label]:
bot.SendTo(contact,term)
time.sleep(0.5)
def learnListAppend(bot, contact, content):
try:
contentList=content.split(' ')
rawContentList = contentList[2:]
learnList[contentList[1]] = rawContentList
except:
bot.SendTo(contact,'额,你说啥?')
else:
bot.SendTo(contact,'学习了')
``` |
{
"source": "JimLusk/python-algorithms",
"score": 4
} |
#### File: JimLusk/python-algorithms/array-product-sans-current.py
```python
originalNumberList = [1, 2, 3, 4, 5]
resultNumberList = []
def construct_array_product_sans_current():
#calculate product of all numbers in given set
for i in range(len(originalNumberList)) :
if i==0:
result = originalNumberList[i]
else:
result=result * originalNumberList[i]
for j in range(len(originalNumberList)) :
resultNumberList.insert(j, result/originalNumberList[j])
return resultNumberList
print("Original Number List: " + str(originalNumberList))
print("Result Number List: " + str(construct_array_product_sans_current()))
``` |
{
"source": "JimMadge/Advent-of-Code-2020",
"score": 4
} |
#### File: Advent-of-Code-2020/adventofcode/day02.py
```python
import re
regex = re.compile(r"(\d+)-(\d+) ([a-z]): ([a-z]+)")
def parse_line(line):
match = re.match(regex, line)
if match:
return (int(match[1]), int(match[2]), match[3], match[4])
return None
def valid_password1(minimum, maximum, character, password):
return minimum <= password.count(character) <= maximum
def valid_password2(first_pos, second_pos, character, password):
a = password[first_pos-1] == character
b = password[second_pos-1] == character
return (a and not b) or (b and not a)
def count_valid_passwords(validator, lines):
return sum([validator(*parse_line(line)) for line in lines])
```
#### File: Advent-of-Code-2020/adventofcode/day04.py
```python
import re
required_fields = [
"byr",
"iyr",
"eyr",
"hgt",
"hcl",
"ecl",
"pid",
# "cid",
]
def process_passports(passports):
# Split each key:value pair by ":" and create a dictionary for each
# passport
return [
dict(item.split(":") for item in passport.split())
for passport in passports.split("\n\n")
]
def valid_passport(passport):
return all([key in passport.keys() for key in required_fields])
def count_valid_passports(passports):
return sum(
[valid_passport(passport) for passport in passports]
)
height_range = {
"cm": (150, 193),
"in": (59, 76)
}
hair_colour = re.compile(r"#[0-9A-Fa-f]{6}")
eye_colours = ["amb", "blu", "brn", "gry", "grn", "hzl", "oth"]
passport_number = re.compile(r"\d{9}")
def valid_passport2(passport):
if not all([key in passport.keys() for key in required_fields]):
return False
if not 1920 <= int(passport["byr"]) <= 2002:
return False
if not 2010 <= int(passport["iyr"]) <= 2020:
return False
if not 2020 <= int(passport["eyr"]) <= 2030:
return False
if (height_unit := passport["hgt"][-2:]) in ["cm", "in"]:
if not (height_range[height_unit][0] <= int(passport["hgt"][:-2])
<= height_range[height_unit][1]):
return False
else:
return False
if not hair_colour.fullmatch(passport["hcl"]):
return False
if not passport["ecl"] in eye_colours:
return False
if not passport_number.fullmatch(passport["pid"]):
return False
return True
def count_valid_passports2(passports):
return sum(
[valid_passport2(passport) for passport in passports]
)
```
#### File: Advent-of-Code-2020/adventofcode/day10.py
```python
from collections import Counter
from functools import cache
def pick_order(adapters):
# Add outlet and device
return sorted(adapters[:] + [0] + [max(adapters) + 3])
def count_jumps(adapters):
jumps = Counter()
for i in range(len(adapters)-1):
difference = abs(adapters[i] - adapters[i+1])
jumps[difference] += 1
return jumps
def jump_product(adapters):
jumps = count_jumps(adapters)
return jumps[1] * jumps[3]
def count_routes(adapters):
adapters = [0] + sorted(adapters)
@cache
def routes(index):
# If we have reached the end this is the only route
if index == len(adapters) - 1:
return 1
return sum(
routes(i) for i in range(index+1, min(index+4, len(adapters)))
if adapters[i] - adapters[index] <= 3
)
return routes(0)
```
#### File: Advent-of-Code-2020/adventofcode/day11.py
```python
from itertools import chain
class Seat(object):
def __init__(self, occupied=False):
self.occupied = occupied
self.neighbours = []
self.pending_flip = False
def flip(self):
self.occupied = not self.occupied
self.pending_flip = False
def occupied_neighbours(self):
return sum(neighbour.occupied for neighbour in self.neighbours)
def read_plan(plan):
d = {
'L': Seat,
'.': lambda: None,
}
seats = []
for line in plan.splitlines():
seats.append(
[d[char]() for char in line]
)
return seats
displacements = [
(0, 1),
(1, 1),
(1, 0),
(1, -1),
(0, -1),
(-1, -1),
(-1, 0),
(-1, 1),
]
def create_neighbours(seats):
height = len(seats)
width = len(seats[0])
for i in range(height):
for j in range(width):
# Skip empty space
if seats[i][j] is None:
continue
# Try each neighbour
for displacement in displacements:
ni = i + displacement[0]
nj = j + displacement[1]
# Skip if neighbouring tile is out of bounds
if ni >= height:
continue
if ni < 0:
continue
if nj >= width:
continue
if nj < 0:
continue
# Only add if neighbouring tile is a seat (not None)
if (neighbour := seats[ni][nj]):
seats[i][j].neighbours.append(neighbour)
def create_neighbours2(seats):
height = len(seats)
width = len(seats[0])
for i in range(height):
for j in range(width):
# Skip empty space
if seats[i][j] is None:
continue
# Try each direction
for displacement in displacements:
# Look further until a neighbour is found
multiplier = 1
while True:
ni = i + displacement[0]*multiplier
nj = j + displacement[1]*multiplier
# End if neighbouring tile is out of bounds
if ni >= height:
break
if ni < 0:
break
if nj >= width:
break
if nj < 0:
break
# Only add if neighbouring tile is a seat (not None)
if (neighbour := seats[ni][nj]):
seats[i][j].neighbours.append(neighbour)
break
multiplier += 1
def step(seats, visible=4):
flipped = 0
for seat in chain.from_iterable(seats):
if seat is None:
continue
if seat.occupied:
if seat.occupied_neighbours() >= visible:
seat.pending_flip = True
else:
if seat.occupied_neighbours() == 0:
seat.pending_flip = True
for seat in chain.from_iterable(seats):
if seat is None:
continue
if seat.pending_flip:
seat.flip()
flipped += 1
return flipped
def optimise(seats, visible=4):
flipped = 1
while flipped != 0:
flipped = step(seats, visible)
def count_occupied(seats):
count = 0
for seat in chain.from_iterable(seats):
if seat is None:
continue
if seat.occupied:
count += 1
return count
def settle(plan):
seats = read_plan(plan)
create_neighbours(seats)
optimise(seats)
return count_occupied(seats)
def settle2(plan):
seats = read_plan(plan)
create_neighbours2(seats)
optimise(seats, 5)
return count_occupied(seats)
```
#### File: Advent-of-Code-2020/adventofcode/day12.py
```python
from enum import Enum, auto
class Direction(Enum):
EAST = auto()
NORTH = auto()
WEST = auto()
SOUTH = auto()
def translate(pos, direction, distance):
if direction == "N":
pos = (pos[0]+distance, pos[1])
elif direction == "S":
pos = (pos[0]-distance, pos[1])
elif direction == "E":
pos = (pos[0], pos[1]+distance)
elif direction == "W":
pos = (pos[0], pos[1]-distance)
return pos
def turn(heading, direction, degrees):
turns = {
"L": {
Direction.EAST: Direction.NORTH,
Direction.NORTH: Direction.WEST,
Direction.WEST: Direction.SOUTH,
Direction.SOUTH: Direction.EAST
},
"R": {
Direction.EAST: Direction.SOUTH,
Direction.SOUTH: Direction.WEST,
Direction.WEST: Direction.NORTH,
Direction.NORTH: Direction.EAST
}
}
n_turns = degrees // 90
for turn in range(n_turns):
heading = turns[direction][heading]
return heading
def forward(pos, heading, distance):
if heading == Direction.EAST:
pos = (pos[0], pos[1]+distance)
elif heading == Direction.NORTH:
pos = (pos[0]+distance, pos[1])
elif heading == Direction.WEST:
pos = (pos[0], pos[1]-distance)
elif heading == Direction.SOUTH:
pos = (pos[0]-distance, pos[1])
return pos
def update_position(instruction, pos, heading):
action, arg = instruction[0], int(instruction[1:])
if action in ["N", "S", "E", "W"]:
pos = translate(pos, action, arg)
elif action in ["L", "R"]:
heading = turn(heading, action, arg)
elif action == "F":
pos = forward(pos, heading, arg)
return pos, heading
def follow_route(route):
pos = (0, 0)
heading = Direction.EAST
for instruction in route.splitlines():
pos, heading = update_position(instruction, pos, heading)
return pos, heading
def manhattan_distance(pos):
return abs(pos[0]) + abs(pos[1])
def rotate(waypoint, direction, degrees):
n_turns = degrees // 90
for i in range(n_turns):
if direction == "L":
waypoint = (waypoint[1], -waypoint[0])
if direction == "R":
waypoint = (-waypoint[1], waypoint[0])
return waypoint
def forward2(pos, waypoint, distance):
return (pos[0]+waypoint[0]*distance, pos[1]+waypoint[1]*distance)
def update_position2(instruction, pos, waypoint):
action, arg = instruction[0], int(instruction[1:])
if action in ["N", "S", "E", "W"]:
waypoint = translate(waypoint, action, arg)
elif action in ["L", "R"]:
waypoint = rotate(waypoint, action, arg)
elif action == "F":
pos = forward2(pos, waypoint, arg)
return pos, waypoint
def follow_route2(route):
pos = (0, 0)
waypoint = (1, 10)
for instruction in route.splitlines():
pos, waypoint = update_position2(instruction, pos, waypoint)
return pos
```
#### File: Advent-of-Code-2020/adventofcode/day15.py
```python
def memory_game(starting_numbers, end_turn=2020):
# Create log of numbers when they were last said and if this is the first
# time they have been said
numbers = dict()
# Initialise using starting numbers except for the last
numbers = {number: turn
for turn, number in enumerate(starting_numbers[:-1], start=1)}
# Play the game
last_number = starting_numbers[-1]
for turn in range(len(starting_numbers)+1, end_turn+1):
if last_number in numbers.keys():
# Current number is difference between last turn and previous time
# last number was said
current_number = (turn - 1) - numbers[last_number]
else:
# First time number was said, current number is 0
current_number = 0
numbers[last_number] = turn - 1
last_number = current_number
return current_number
```
#### File: adventofcode/tests/test_day02.py
```python
from ..day02 import (parse_line, valid_password1, valid_password2,
count_valid_passwords)
import pytest
test_lines = [
"1-3 a: abcde",
"1-3 b: cdefg",
"2-9 c: ccccccccc"
]
def test_parse_line():
assert parse_line("1-3 a: abcde") == (1, 3, "a", "abcde")
test_data = zip(test_lines, [True, False, True])
@pytest.mark.parametrize("test_line,result", test_data)
def test_valid_password1(test_line, result):
assert valid_password1(*parse_line(test_line)) == result
test_data = zip(test_lines, [True, False, False])
@pytest.mark.parametrize("test_line,result", test_data)
def test_valid_password2(test_line, result):
assert valid_password2(*parse_line(test_line)) == result
def test_count_valid_passwords():
assert count_valid_passwords(valid_password1, test_lines) == 2
def test_count_valid_passwords2():
assert count_valid_passwords(valid_password2, test_lines) == 1
```
#### File: adventofcode/tests/test_day06.py
```python
from ..day06 import count_any_affirmative, count_all_affirmative
from textwrap import dedent
test_data = dedent("""\
abc
a
b
c
ab
ac
a
a
a
a
b""")
def test_count_any_affirmative():
assert count_any_affirmative(test_data.split("\n\n")) == 11
def test_count_all_affirmative():
assert count_all_affirmative(test_data.split("\n\n")) == 6
``` |
{
"source": "JimMadge/aws-fpga",
"score": 2
} |
#### File: tests/simulation_tests/test_sims.py
```python
from __future__ import print_function
import logging
import os
from os.path import dirname, realpath
import pytest
import subprocess
import sys
import traceback
import re
try:
import aws_fpga_utils
import aws_fpga_test_utils
from aws_fpga_test_utils.AwsFpgaTestBase import AwsFpgaTestBase
except ImportError as e:
traceback.print_tb(sys.exc_info()[2])
print("error: {}\nMake sure to source hdk_setup.sh".format(sys.exc_info()[1]))
sys.exit(1)
logger = aws_fpga_utils.get_logger(__name__)
class TestSims(AwsFpgaTestBase):
"""
Pytest test class.
NOTE: Cannot have an __init__ method.
"""
ADD_SIMULATOR = True
ADD_BATCH = True
@classmethod
def setup_class(cls):
"""
Do any setup required for tests.
"""
AwsFpgaTestBase.setup_class(cls, __file__)
AwsFpgaTestBase.assert_hdk_setup()
cls.RUN_SIM_SCRIPT = dirname(realpath(__file__)) + "/run_sim.sh"
assert os.path.exists(cls.RUN_SIM_SCRIPT)
cls.set_simulation_error_signatures()
cls.set_simulation_pass_signatures()
return
@classmethod
def set_simulation_error_signatures(cls):
"""
Adding compiled errors
"""
cls.failure_messages = [
r'.*\*{0,3}\s*ERROR\s*\*{0,3}',
r'.*\*{0,3}\s*TEST[\s_-]{0,2}FAILED\s*\*{0,3}.*',
r'.*Detected\s*[1-9]\d*\s*error[s]?.*'
]
cls.compiled_failure_messages = []
for failure_message in cls.failure_messages:
cls.compiled_failure_messages.append(re.compile(failure_message))
@classmethod
def set_simulation_pass_signatures(cls):
"""
Adding compiled pass signatures
"""
cls.pass_messages = [
r'.*[\*\!]{0,3}\s*TEST[\s_-]{0,2}PASSED\s*[\*\!]{0,3}.*',
]
cls.compiled_pass_messages = []
for pass_message in cls.pass_messages:
cls.compiled_pass_messages.append(re.compile(pass_message))
@classmethod
def parse_simulation_output(cls, test_name, test_type, test_stdout, test_stderr):
"""
Parse stdout and stderr and see if the test had any fail signatures
Also check if Test Passed. a no Test passed signature is
"""
failure_messages = []
pass_messages = []
# Check failures
for stdout_line in test_stdout:
for fail_regex in cls.compiled_failure_messages:
if fail_regex.match(stdout_line):
failure_messages.append(stdout_line)
# Check passes
for stdout_line in test_stdout:
for pass_regex in cls.compiled_pass_messages:
if pass_regex.match(stdout_line):
pass_messages.append(stdout_line)
return_dict = {
"passes": pass_messages,
"fails": failure_messages
}
return return_dict
def run_sim(self, test_dir="", test_name="", test_type="", simulator="", batch=""):
vivado_version = os.environ.get('VIVADO_TOOL_VERSION', 'unknown')
# Error on defaults
if not(test_dir and test_name and test_type):
self.fail("Please enter non empty test_dir, test_name and test_type when calling run_sim")
command_line = [self.RUN_SIM_SCRIPT,
'--test-name', test_name,
'--test-dir', test_dir,
'--test-type', test_type,
'--simulator', simulator,
'--batch', batch,
'--vivado-version', vivado_version
]
(rc, stdout_lines, stderr_lines) = self.run_cmd(" ".join(command_line))
# write simulation output
if simulator == "vivado":
simulator_version = "{}_{}".format(simulator, vivado_version)
else:
simulator_version = simulator
stdout_file_name = "{}/{}_{}_{}.stdout.sim.log".format(test_dir, test_name, test_type, simulator_version)
with open(stdout_file_name, 'w') as f:
for item in stdout_lines:
f.write("%s\n" % item)
# Only write if there is something to write
if stderr_lines:
stderr_file_name = "{}/{}_{}_{}.stderr.sim.log".format(test_dir, test_name, test_type, simulator_version)
with open(stderr_file_name, 'w') as f:
for item in stderr_lines:
f.write("%s\n" % item)
# Check exit code
assert rc == 0, "Sim failed. Received Non-Zero return code"
return_dict = self.parse_simulation_output(test_name=test_name,
test_type=test_type,
test_stdout=stdout_lines,
test_stderr=stderr_lines)
# Check for fail signatures
assert [] == return_dict["fails"], "Found failures {}".format(return_dict["fails"])
# Check for pass signatures. We need at least one to make the test as a pass
assert [] != return_dict["passes"], "Found no matching pass statements"
# cl_dram_dma sv
def test_cl_dram_dma__dram_dma__sv(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_dram_dma/verif/scripts'
test_name = 'test_dram_dma'
test_type = 'sv'
self.run_sim(test_dir=test_dir, test_name=test_name, test_type=test_type, simulator=simulator, batch=batch)
def test_cl_dram_dma__dram_dma__sv_fast(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_dram_dma/verif/scripts'
test_name = 'test_dram_dma'
test_type = 'sv_fast'
self.run_sim(test_dir=test_dir, test_name=test_name, test_type=test_type, simulator=simulator, batch=batch)
def test_cl_dram_dma__dram_dma_axi_mstr__sv(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_dram_dma/verif/scripts'
test_name = 'test_dram_dma_axi_mstr'
test_type = 'sv'
self.run_sim(test_dir=test_dir, test_name=test_name, test_type=test_type, simulator=simulator, batch=batch)
def test_cl_dram_dma__dram_dma_rnd__sv(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_dram_dma/verif/scripts'
test_name = 'test_dram_dma_rnd'
test_type = 'sv'
self.run_sim(test_dir=test_dir, test_name=test_name, test_type=test_type, simulator=simulator, batch=batch)
def test_cl_dram_dma__dram_dma_rnd__sv_fast(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_dram_dma/verif/scripts'
test_name = 'test_dram_dma_rnd'
test_type = 'sv_fast'
self.run_sim(test_dir=test_dir, test_name=test_name, test_type=test_type, simulator=simulator, batch=batch)
def test_cl_dram_dma__dram_dma_rnd__sv_fast_ecc_direct(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_dram_dma/verif/scripts'
test_name = 'test_dram_dma_rnd'
test_type = 'sv_fast_ecc_direct'
self.run_sim(test_dir=test_dir, test_name=test_name, test_type=test_type, simulator=simulator, batch=batch)
def test_cl_dram_dma__dram_dma_rnd__sv_fast_ecc_rnd(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_dram_dma/verif/scripts'
test_name = 'test_dram_dma_rnd'
test_type = 'sv_fast_ecc_rnd'
self.run_sim(test_dir=test_dir, test_name=test_name, test_type=test_type, simulator=simulator, batch=batch)
def test_cl_dram_dma__dram_dma_4k_crossing__sv(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_dram_dma/verif/scripts'
test_name = 'test_dram_dma_4k_crossing'
test_type = 'sv'
self.run_sim(test_dir=test_dir, test_name=test_name, test_type=test_type, simulator=simulator, batch=batch)
def test_cl_dram_dma__dram_dma_single_beat_4k__sv(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_dram_dma/verif/scripts'
test_name = 'test_dram_dma_single_beat_4k'
test_type = 'sv'
self.run_sim(test_dir=test_dir, test_name=test_name, test_type=test_type, simulator=simulator, batch=batch)
def test_cl_dram_dma__dram_dma_single_beat_4k__sv_fast(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_dram_dma/verif/scripts'
test_name = 'test_dram_dma_single_beat_4k'
test_type = 'sv_fast'
self.run_sim(test_dir=test_dir, test_name=test_name, test_type=test_type, simulator=simulator, batch=batch)
def test_cl_dram_dma__dram_dma_single_beat_4k__sv_fast_ecc_direct(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_dram_dma/verif/scripts'
test_name = 'test_dram_dma_single_beat_4k'
test_type = 'sv_fast_ecc_direct'
self.run_sim(test_dir=test_dir, test_name=test_name, test_type=test_type, simulator=simulator, batch=batch)
def test_cl_dram_dma__dram_dma_single_beat_4k__sv_fast_ecc_rnd(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_dram_dma/verif/scripts'
test_name = 'test_dram_dma_single_beat_4k'
test_type = 'sv_fast_ecc_rnd'
self.run_sim(test_dir=test_dir, test_name=test_name, test_type=test_type, simulator=simulator, batch=batch)
def test_cl_dram_dma__dma_pcis_concurrent__sv(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_dram_dma/verif/scripts'
test_name = 'test_dma_pcis_concurrent'
test_type = 'sv'
self.run_sim(test_dir=test_dir, test_name=test_name, test_type=test_type, simulator=simulator, batch=batch)
def test_cl_dram_dma__dma_pcis_concurrent__sv_fast(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_dram_dma/verif/scripts'
test_name = 'test_dma_pcis_concurrent'
test_type = 'sv_fast'
self.run_sim(test_dir=test_dir, test_name=test_name, test_type=test_type, simulator=simulator, batch=batch)
def test_cl_dram_dma__dma_pcis_concurrent__sv_fast_ecc_direct(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_dram_dma/verif/scripts'
test_name = 'test_dma_pcis_concurrent'
test_type = 'sv_fast_ecc_direct'
self.run_sim(test_dir=test_dir, test_name=test_name, test_type=test_type, simulator=simulator, batch=batch)
def test_cl_dram_dma__dma_pcis_concurrent__sv_fast_ecc_rnd(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_dram_dma/verif/scripts'
test_name = 'test_dma_pcis_concurrent'
test_type = 'sv_fast_ecc_rnd'
self.run_sim(test_dir=test_dir, test_name=test_name, test_type=test_type, simulator=simulator, batch=batch)
def test_cl_dram_dma__host_pcim__sv(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_dram_dma/verif/scripts'
test_name = 'test_host_pcim'
test_type = 'sv'
self.run_sim(test_dir=test_dir, test_name=test_name, test_type=test_type, simulator=simulator, batch=batch)
def test_cl_dram_dma__dma_pcim_concurrent__sv(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_dram_dma/verif/scripts'
test_name = 'test_dma_pcim_concurrent'
test_type = 'sv'
self.run_sim(test_dir=test_dir, test_name=test_name, test_type=test_type, simulator=simulator, batch=batch)
def test_cl_dram_dma__dma_pcim_concurrent__sv_fast(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_dram_dma/verif/scripts'
test_name = 'test_dma_pcim_concurrent'
test_type = 'sv_fast'
self.run_sim(test_dir=test_dir, test_name=test_name, test_type=test_type, simulator=simulator, batch=batch)
def test_cl_dram_dma__dma_pcim_concurrent__sv_fast_ecc_direct(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_dram_dma/verif/scripts'
test_name = 'test_dma_pcim_concurrent'
test_type = 'sv_fast_ecc_direct'
self.run_sim(test_dir=test_dir, test_name=test_name, test_type=test_type, simulator=simulator, batch=batch)
def test_cl_dram_dma__dma_pcim_concurrent__sv_fast_ecc_rnd(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_dram_dma/verif/scripts'
test_name = 'test_dma_pcim_concurrent'
test_type = 'sv_fast_ecc_rnd'
self.run_sim(test_dir=test_dir, test_name=test_name, test_type=test_type, simulator=simulator, batch=batch)
def test_cl_dram_dma__dma_sda_concurrent__sv(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_dram_dma/verif/scripts'
test_name = 'test_dma_sda_concurrent'
test_type = 'sv'
self.run_sim(test_dir=test_dir, test_name=test_name, test_type=test_type, simulator=simulator, batch=batch)
def test_cl_dram_dma__dma_sda_concurrent__sv_fast(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_dram_dma/verif/scripts'
test_name = 'test_dma_sda_concurrent'
test_type = 'sv_fast'
self.run_sim(test_dir=test_dir, test_name=test_name, test_type=test_type, simulator=simulator, batch=batch)
def test_cl_dram_dma__dma_sda_concurrent__sv_fast_ecc_direct(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_dram_dma/verif/scripts'
test_name = 'test_dma_sda_concurrent'
test_type = 'sv_fast_ecc_direct'
self.run_sim(test_dir=test_dir, test_name=test_name, test_type=test_type, simulator=simulator, batch=batch)
def test_cl_dram_dma__dma_sda_concurrent__sv_fast_ecc_rnd(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_dram_dma/verif/scripts'
test_name = 'test_dma_sda_concurrent'
test_type = 'sv_fast_ecc_rnd'
self.run_sim(test_dir=test_dir, test_name=test_name, test_type=test_type, simulator=simulator, batch=batch)
def test_cl_dram_dma__ddr__sv(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_dram_dma/verif/scripts'
test_name = 'test_ddr'
test_type = 'sv'
self.run_sim(test_dir=test_dir, test_name=test_name, test_type=test_type, simulator=simulator, batch=batch)
def test_cl_dram_dma__clk_recipe__sv(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_dram_dma/verif/scripts'
test_name = 'test_clk_recipe'
test_type = 'sv'
self.run_sim(test_dir=test_dir, test_name=test_name, test_type=test_type, simulator=simulator, batch=batch)
def test_cl_dram_dma__int__sv(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_dram_dma/verif/scripts'
test_name = 'test_int'
test_type = 'sv'
self.run_sim(test_dir=test_dir, test_name=test_name, test_type=test_type, simulator=simulator, batch=batch)
def test_cl_dram_dma__peek_poke__sv(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_dram_dma/verif/scripts'
test_name = 'test_peek_poke'
test_type = 'sv'
self.run_sim(test_dir=test_dir, test_name=test_name, test_type=test_type, simulator=simulator, batch=batch)
def test_cl_dram_dma__peek_poke__sv_fast(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_dram_dma/verif/scripts'
test_name = 'test_peek_poke'
test_type = 'sv_fast'
self.run_sim(test_dir=test_dir, test_name=test_name, test_type=test_type, simulator=simulator, batch=batch)
def test_cl_dram_dma__peek_poke__sv_fast_ecc_direct(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_dram_dma/verif/scripts'
test_name = 'test_peek_poke'
test_type = 'sv_fast_ecc_direct'
self.run_sim(test_dir=test_dir, test_name=test_name, test_type=test_type, simulator=simulator, batch=batch)
def test_cl_dram_dma__peek_poke__sv_fast_ecc_rnd(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_dram_dma/verif/scripts'
test_name = 'test_peek_poke'
test_type = 'sv_fast_ecc_rnd'
self.run_sim(test_dir=test_dir, test_name=test_name, test_type=test_type, simulator=simulator, batch=batch)
def test_cl_dram_dma__peek_poke_wc__sv(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_dram_dma/verif/scripts'
test_name = 'test_peek_poke_wc'
test_type = 'sv'
self.run_sim(test_dir=test_dir, test_name=test_name, test_type=test_type, simulator=simulator, batch=batch)
def test_cl_dram_dma__peek_poke_wc__sv_fast(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_dram_dma/verif/scripts'
test_name = 'test_peek_poke_wc'
test_type = 'sv_fast'
self.run_sim(test_dir=test_dir, test_name=test_name, test_type=test_type, simulator=simulator, batch=batch)
def test_cl_dram_dma__peek_poke_wc__sv_fast_ecc_direct(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_dram_dma/verif/scripts'
test_name = 'test_peek_poke_wc'
test_type = 'sv_fast_ecc_direct'
self.run_sim(test_dir=test_dir, test_name=test_name, test_type=test_type, simulator=simulator, batch=batch)
def test_cl_dram_dma__peek_poke_wc__sv_fast_ecc_rnd(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_dram_dma/verif/scripts'
test_name = 'test_peek_poke_wc'
test_type = 'sv_fast_ecc_rnd'
self.run_sim(test_dir=test_dir, test_name=test_name, test_type=test_type, simulator=simulator, batch=batch)
def test_cl_dram_dma__peek_poke_len__sv(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_dram_dma/verif/scripts'
test_name = 'test_peek_poke_len'
test_type = 'sv'
self.run_sim(test_dir=test_dir, test_name=test_name, test_type=test_type, simulator=simulator, batch=batch)
def test_cl_dram_dma__peek_poke_len__sv_fast(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_dram_dma/verif/scripts'
test_name = 'test_peek_poke_len'
test_type = 'sv_fast'
self.run_sim(test_dir=test_dir, test_name=test_name, test_type=test_type, simulator=simulator, batch=batch)
def test_cl_dram_dma__peek_poke_len__sv_fast_ecc_direct(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_dram_dma/verif/scripts'
test_name = 'test_peek_poke_len'
test_type = 'sv_fast_ecc_direct'
self.run_sim(test_dir=test_dir, test_name=test_name, test_type=test_type, simulator=simulator, batch=batch)
def test_cl_dram_dma__peek_poke_len__sv_fast_ecc_rnd(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_dram_dma/verif/scripts'
test_name = 'test_peek_poke_len'
test_type = 'sv_fast_ecc_rnd'
self.run_sim(test_dir=test_dir, test_name=test_name, test_type=test_type, simulator=simulator, batch=batch)
def test_cl_dram_dma__peek_poke_pcis_axsize__sv(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_dram_dma/verif/scripts'
test_name = 'test_peek_poke_pcis_axsize'
test_type = 'sv'
self.run_sim(test_dir=test_dir, test_name=test_name, test_type=test_type, simulator=simulator, batch=batch)
def test_cl_dram_dma__peek_poke_pcis_axsize__sv_fast(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_dram_dma/verif/scripts'
test_name = 'test_peek_poke_pcis_axsize'
test_type = 'sv_fast'
self.run_sim(test_dir=test_dir, test_name=test_name, test_type=test_type, simulator=simulator, batch=batch)
def test_cl_dram_dma__peek_poke_pcis_axsize__sv_fast_ecc_direct(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_dram_dma/verif/scripts'
test_name = 'test_peek_poke_pcis_axsize'
test_type = 'sv_fast_ecc_direct'
self.run_sim(test_dir=test_dir, test_name=test_name, test_type=test_type, simulator=simulator, batch=batch)
def test_cl_dram_dma__peek_poke_pcis_axsize__sv_fast_ecc_rnd(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_dram_dma/verif/scripts'
test_name = 'test_peek_poke_pcis_axsize'
test_type = 'sv_fast_ecc_rnd'
self.run_sim(test_dir=test_dir, test_name=test_name, test_type=test_type, simulator=simulator, batch=batch)
def test_cl_dram_dma__ddr_peek_poke__sv(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_dram_dma/verif/scripts'
test_name = 'test_ddr_peek_poke'
test_type = 'sv'
self.run_sim(test_dir=test_dir, test_name=test_name, test_type=test_type, simulator=simulator, batch=batch)
def test_cl_dram_dma__ddr_peek_bdr_walking_ones__sv(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_dram_dma/verif/scripts'
test_name = 'test_ddr_peek_bdr_walking_ones'
test_type = 'sv_ddr_bkdr'
self.run_sim(test_dir=test_dir, test_name=test_name, test_type=test_type, simulator=simulator, batch=batch)
def test_cl_dram_dma__dram_dma_dram_bdr_row_col_combo__sv(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_dram_dma/verif/scripts'
test_name = 'test_dram_dma_dram_bdr_row_col_combo'
test_type = 'sv_ddr_bkdr'
self.run_sim(test_dir=test_dir, test_name=test_name, test_type=test_type, simulator=simulator, batch=batch)
def test_cl_dram_dma__dram_dma_mem_model_bdr_wr__sv(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_dram_dma/verif/scripts'
test_name = 'test_dram_dma_mem_model_bdr_wr'
test_type = 'sv_fast'
self.run_sim(test_dir=test_dir, test_name=test_name, test_type=test_type, simulator=simulator, batch=batch)
def test_cl_dram_dma__dram_dma_mem_model_bdr_rd__sv(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_dram_dma/verif/scripts'
test_name = 'test_dram_dma_mem_model_bdr_rd'
test_type = 'sv_fast'
self.run_sim(test_dir=test_dir, test_name=test_name, test_type=test_type, simulator=simulator, batch=batch)
def test_cl_dram_dma__axi_mstr_multi_rw__sv(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_dram_dma/verif/scripts'
test_name = 'test_axi_mstr_multi_rw'
test_type = 'sv'
def test_cl_dram_dma__bar1__sv(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_dram_dma/verif/scripts'
test_name = 'test_bar1'
test_type = 'sv'
def test_cl_dram_dma__dram_dma_allgn_addr_4k__sv(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_dram_dma/verif/scripts'
test_name = 'test_dram_dma_allgn_addr_4k'
test_type = 'sv'
def test_ddr_peek_bdr_walking_ones__sv(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_dram_dma/verif/scripts'
test_name = 'test_ddr_peek_bdr_walking_ones'
test_type = 'sv'
self.run_sim(test_dir=test_dir, test_name=test_name, test_type=test_type, simulator=simulator, batch=batch)
# cl_uram_example c
def test_cl_uram_example__uram_example__c(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_uram_example/verif/scripts'
test_name = 'test_uram_example'
test_type = 'c'
self.run_sim(test_dir=test_dir, test_name=test_name, test_type=test_type, simulator=simulator, batch=batch)
# cl_dram_dma c
def test_cl_dram_dma__sda__sv(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_dram_dma/verif/scripts'
test_name = 'test_sda'
test_type = 'sv'
self.run_sim(test_dir=test_dir, test_name=test_name, test_type=test_type, simulator=simulator, batch=batch)
def test_cl_dram_dma__dram_dma_hwsw_cosim__c(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_dram_dma/verif/scripts'
test_name = 'test_dram_dma_hwsw_cosim'
test_type = 'c'
self.run_sim(test_dir=test_dir, test_name=test_name, test_type=test_type, simulator=simulator, batch=batch)
# cl_hello_world sv
def test_cl_hello_world__hello_world__sv(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_hello_world/verif/scripts'
test_name = 'test_hello_world'
test_type = 'sv'
self.run_sim(test_dir=test_dir, test_name=test_name, test_type=test_type, simulator=simulator, batch=batch)
# cl_test_gl_cntr sv
def test_cl_hello_world__gl_cntr__sv(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_hello_world/verif/scripts'
test_name = 'test_gl_cntr'
test_type = 'sv'
self.run_sim(test_dir=test_dir, test_name=test_name, test_type=test_type, simulator=simulator, batch=batch)
# cl_hello_world vhdl
def test_cl_vhdl_hello_world__hello_world__vhdl(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_hello_world_vhdl/verif/scripts'
test_name = 'test_hello_world'
test_type = 'vhdl'
self.run_sim(test_dir=test_dir, test_name=test_name, test_type=test_type, simulator=simulator, batch=batch)
# cl_hello_world c
def test_cl_hello_world__hello_world__c(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_hello_world/verif/scripts'
test_name = 'test_hello_world'
test_type = 'c'
self.run_sim(test_dir=test_dir, test_name=test_name, test_type=test_type, simulator=simulator, batch=batch)
# cl_sde_c2h sv
def test_cl_sde__test_simple_c2h__sv(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_sde/verif/scripts'
test_name = 'test_simple_c2h'
test_type = 'sv'
self.run_sim(test_dir=test_dir, test_name=test_name, test_type=test_type, simulator=simulator, batch=batch)
# cl_sde_h2c sv
def test_cl_sde__test_simple_h2c__sv(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_sde/verif/scripts'
test_name = 'test_simple_h2c'
test_type = 'sv'
self.run_sim(test_dir=test_dir, test_name=test_name, test_type=test_type, simulator=simulator, batch=batch)
```
#### File: SDAccel/tests/test_find_sdaccel_examples.py
```python
from __future__ import print_function
import os
from os.path import dirname, realpath
import json
try:
import aws_fpga_utils
import aws_fpga_test_utils
from aws_fpga_test_utils.AwsFpgaTestBase import AwsFpgaTestBase
except ImportError as e:
traceback.print_tb(sys.exc_info()[2])
print("error: {}\nMake sure to source shared/bin/setup_test_env.sh".format(sys.exc_info()[1]))
sys.exit(1)
logger = aws_fpga_utils.get_logger(__name__)
class TestFindSDAccelExamples(AwsFpgaTestBase):
'''
Pytest test class.
NOTE: Cannot have an __init__ method.
'''
ADD_XILINX_VERSION = True
@classmethod
def setup_class(cls):
'''
Do any setup required for tests.
'''
AwsFpgaTestBase.setup_class(cls, __file__)
return
def test_find_example_makefiles(self, xilinxVersion):
assert os.path.exists(self.xilinx_sdaccel_examples_dir), "The Xilinx SDAccel example dir does not exist: {}".format(self.xilinx_sdaccel_examples_dir)
assert os.listdir(self.xilinx_sdaccel_examples_dir) != [], "Xilinx SDAccel example submodule not cloned or does not exist"
xilinx_examples_makefiles = []
xilinx_sdaccel_example_map = {}
for root, dirs, files in os.walk(self.xilinx_sdaccel_examples_dir):
ignore = False
if os.path.exists(root + "/description.json") and os.path.exists(root + "/Makefile"):
with open(root + "/description.json", "r") as description_file:
description = json.load(description_file)
if "containers" in description:
if len(description["containers"]) > 1:
ignore = True
logger.info("Ignoring {} as >1 containers found in description.json.".format(root))
else:
ignore = True
logger.info("Ignoring {} as no containers found in description.json.".format(root))
continue
if "nboard" in description:
if "xilinx_aws-vu9p-f1-04261818" in description["nboard"]:
ignore = True
logger.info("Ignoring {} as F1 device found in nboard.".format(root))
continue
else:
ignore = True
logger.warn("Ignoring: {} as no Makefile/description.json exist".format(root))
if not ignore:
xilinx_examples_makefiles.append(root)
logger.info("Adding: " + root)
assert len(xilinx_examples_makefiles) != 0, "Could not find any Xilinx SDAccel example in %s" % self.xilinx_sdaccel_examples_dir
# Remove the workspace path so that the next node can reference this path directly
# So we don't face cases like /workspace@3 ..
xilinx_examples_makefiles = [os.path.relpath(full_path, self.WORKSPACE) for full_path in xilinx_examples_makefiles]
for example_path in xilinx_examples_makefiles:
example_test_class = example_path.replace('/', '__').capitalize()
xilinx_sdaccel_example_map[example_test_class] = example_path
with open(self.xilinx_sdaccel_examples_list_file, 'w') as outfile:
json.dump(xilinx_sdaccel_example_map, outfile)
# Also write the archive file
with open(self.xilinx_sdaccel_examples_list_file + "." + xilinxVersion, 'w') as archive_file:
json.dump(xilinx_sdaccel_example_map, archive_file)
assert os.path.getsize(self.xilinx_sdaccel_examples_list_file) > 0, "%s is a non zero file. We need to have some data in the file" % self.xilinx_sdaccel_examples_list_file
```
#### File: sdk/tests/test_py_bindings.py
```python
import os
from os.path import basename, dirname, realpath
import pytest
import sys
import traceback
import requests
import time
import signal
import subprocess
import re
try:
import aws_fpga_utils
import aws_fpga_test_utils
from aws_fpga_test_utils.AwsFpgaTestBase import AwsFpgaTestBase
except ImportError as e:
traceback.print_tb(sys.exc_info()[2])
print "error: {}\nMake sure to source sdk_setup.sh".format(sys.exc_info()[1])
sys.exit(1)
logger = aws_fpga_utils.get_logger(__name__)
class TestPyBindings(AwsFpgaTestBase):
'''
Pytest test class.
NOTE: Cannot have an __init__ method.
Test all example CLs with different strategies and clock recipes.
'''
def setup_class(cls):
'''
Do any setup required for tests.
'''
AwsFpgaTestBase.setup_class(cls, __file__)
AwsFpgaTestBase.assert_sdk_setup()
assert AwsFpgaTestBase.running_on_f1_instance(), "This test must be run on an F1 instance. Running on {}".format(aws_fpga_test_utils.get_instance_type())
(cls.cl_hello_world_agfi, cl_hello_world_afi) = cls.get_agfi_from_readme('cl_hello_world')
def setup_method(self, test_method):
os.putenv('BSWAPPER_AFI', self.cl_hello_world_agfi)
os.putenv('BSWAPPER_SLOT', "0")
os.putenv('BSWAPPER_REG', "0x500")
def teardown_method(self, test_method):
pass
def stop_server(self):
if hasattr(self, 'pid'):
cmd = ['sudo', 'kill', '-2', str(self.pid)]
logger.info("Signalling {}".format(self.pid))
logger.info("using command {}".format(cmd))
subprocess.call(cmd)
def test_flask_app(self):
server_script = os.environ['SDK_DIR'] + "/tests/test_py.sh"
cmd = ['sudo', '-E', server_script]
try:
logger.info("Starting server using {}".format(cmd))
server = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if server.stdout is not None:
server.stdout.flush()
line = server.stdout.readline()
mo = re.match("CHILDPID (\d+)?", line)
if mo is not None:
self.pid = mo.group(1)
logger.info("Server PID: {}".format(self.pid))
except:
logger.error(traceback.print_exc())
max_retries = 30
retry = 0
while True:
if retry >= max_retries:
logger.info("Exceeded max retries...")
self.stop_server()
sys.exit(1)
# Add sleep before first check as flask server may still be coming up.
time.sleep(1)
try:
r1 = requests.get('http://127.0.0.1:5000/status')
if r1.status_code != 200:
logger.info("Recived response {}".format(r1.status_code))
else:
if r1.text == "FPGA_STATUS_LOADED":
logger.info("Server ready")
break
else:
logger.info("FPGA not loaded with AFI yet.")
except:
logger.info("Exception caught during status check.")
logger.info(traceback.print_exc())
logger.info("Retry status check...")
retry += 1
payload = { 'input_data' : '0x12345678'}
r2 = requests.get('http://1192.168.127.12:5000/', params=payload)
logger.info("Stopping server")
self.stop_server()
if r2.status_code == 200:
swapped_val = r2.text
logger.info("Swapped value: {}".format(swapped_val))
if swapped_val != "0x78563412":
logger.info("Swapped value not correct!")
sys.exit(2)
else:
logger.info("Status received {}".format(r.status_code))
sys.exit(3)
```
#### File: shared/tests/test_check_src_headers.py
```python
import boto3
import logging
import os
from os.path import basename, dirname, realpath
import pytest
import re
import sys
import traceback
import check_src_headers
import fileprovider
try:
import aws_fpga_test_utils
from aws_fpga_test_utils.AwsFpgaTestBase import AwsFpgaTestBase
import aws_fpga_utils
except ImportError as e:
traceback.print_tb(sys.exc_info()[2])
print "error: {}\nMake sure to source shared/tests/bin/setup_test_env.sh".format(sys.exc_info()[1])
sys.exit(1)
logger = aws_fpga_utils.get_logger(__name__)
class TestCheckSrcHeaders(AwsFpgaTestBase):
'''
Pytest test class.
NOTE: Cannot have an __init__ method.
Create AFI from DCP.
'''
@classmethod
def setup_class(cls):
'''
Do any setup required for tests.
'''
AwsFpgaTestBase.setup_class(cls, __file__)
return
def test_check_src_headers(self):
# fileprovider.logger.setLevel(logging.DEBUG)
# check_project_headers.logger.setLevel(logging.DEBUG)
rc = check_src_headers.check_headers('.')
assert rc == 0
``` |
{
"source": "JimMadge/healthy",
"score": 2
} |
#### File: healthy/healthy/__main__.py
```python
import docker # type: ignore
from docker.models.containers import Container # type: ignore
from typing import Optional
def main() -> None:
client = docker.from_env()
for container in client.containers.list():
health_check(container)
def health_check(container: Container) -> None:
name = container.name
status = get_health_status(container)
if status is None:
status = "no health check"
skip_statuses = ["no health check", "healthy"]
restart_statuses = ["unhealthy"]
if status in skip_statuses:
output(name, status, "skipping")
elif status in restart_statuses:
output(name, status, "restarting")
container.restart()
else:
output(name, status, "skipping")
def get_health_status(container: Container) -> Optional[str]:
state = container.attrs["State"]
if "Health" in state.keys():
return str(state["Health"]["Status"])
else:
return None
def output(name: str, status: str, action: str) -> None:
print(f"{name} - {status} - {action}")
if __name__ == "__main__": # pragma no cover
main()
``` |
{
"source": "JimMadge/OpenCL-Reduction-Example",
"score": 2
} |
#### File: OpenCL-Reduction-Example/reduction/reduction.py
```python
import numpy as np
import pathlib
import pyopencl as cl
from pyopencl import mem_flags as mf
kernel_file = pathlib.Path(__file__).parent.absolute()/"reduction.cl"
kernel_source = open(kernel_file).read()
def redsum(array, context, program):
# Create queue
queue = cl.CommandQueue(context)
# Set the work-group size (number of work-items per work-group)
group_size = 256
# Pad an array
a_h = pad(array, group_size)
# Determine number of work-groups (work-items / work-group size)
work_groups = a_h.shape[0]//group_size
# Assign array of sum per work group
p_h = np.zeros(work_groups)
# Determine memory per work group (total size of array in bytes / number of
# work groups, or, size of element of array in bytes * of work-group size)
local_memory_size = a_h.nbytes//work_groups
# Create buffers
a_d = cl.Buffer(context, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=a_h)
b_d = cl.LocalMemory(local_memory_size)
p_d = cl.Buffer(context, mf.WRITE_ONLY, p_h.nbytes)
# Call kernel
redsum = program.sum
redsum(queue, a_h.shape, (group_size,), a_d, b_d, p_d)
cl.enqueue_copy(queue, p_h, p_d)
# Sum of residuals
return np.sum(p_h)
def redsum_axis0(array, context, program, group_size=256):
# Create queue
queue = cl.CommandQueue(context)
# Pad array
a_h = pad(array, group_size)
print(a_h.shape)
n_cols = a_h.shape[1]
col_length = a_h.shape[0]
# Determine number of work-groups (work-items / work-group size)
work_groups_per_col = col_length//group_size
# Assign array of sum per work group
p_h = np.zeros((work_groups_per_col, n_cols))
print(p_h.shape)
# Determine memory per work group (total size of array in bytes / number of
# work groups, or, size of element of array in bytes * of work-group size)
local_memory_size = a_h[:, 0].nbytes//work_groups_per_col
print(local_memory_size)
# Create buffers
a_d = cl.Buffer(context, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=a_h)
b_d = cl.LocalMemory(local_memory_size)
p_d = cl.Buffer(context, mf.WRITE_ONLY, p_h.nbytes)
# Call kernel
redsum = program.sum_axis0
redsum(queue, a_h.shape, (group_size, 1,), a_d, b_d, p_d)
cl.enqueue_copy(queue, p_h, p_d)
# Sum residuals
print(p_h)
return np.sum(p_h, axis=0)
def redsum_axis1(array, context, program, group_size=256):
# Create queue
queue = cl.CommandQueue(context)
# Pad array
a_h = pad(array, group_size, axis=1)
n_rows = a_h.shape[0]
row_length = a_h.shape[1]
# Determine number of work-groups (work-items / work-group size)
work_groups_per_row = row_length//group_size
# Assign array of sum per work group
p_h = np.zeros((n_rows, work_groups_per_row))
# Determine memory per work group (total size of array in bytes / number of
# work groups, or, size of element of array in bytes * of work-group size)
local_memory_size = a_h[0].nbytes//work_groups_per_row
# Create buffers
a_d = cl.Buffer(context, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=a_h)
b_d = cl.LocalMemory(local_memory_size)
p_d = cl.Buffer(context, mf.WRITE_ONLY, p_h.nbytes)
# Call kernel
redsum = program.sum_axis1
redsum(queue, a_h.shape, (1, group_size,), a_d, b_d, p_d)
cl.enqueue_copy(queue, p_h, p_d)
# Sum residuals
return np.sum(p_h, axis=1)
def pad(array, group_size, axis=0):
"""
Pad an array with zeros so that it is a multiple of the group size.
:arg array: Array to pad.
:type array: :class:`numpy.ndarray`
:arg int group_size: OpenCL group size.
:arg int axis: The axis to pad with zeros. Default is 0.
:returns: `array` padded with an appropriate number of zeros.
:rtype: :class:`numpy.ndarray`
"""
array_size = array.shape[axis]
remainder = array_size % group_size
if remainder == 0:
return array
else:
padding = group_size - array_size % group_size
padding_shape = list(array.shape)
padding_shape[axis] = padding
return np.concatenate(
(array, np.zeros(padding_shape, dtype=array.dtype)), axis=axis
)
```
#### File: reduction/test/conftest.py
```python
from .. import kernel_source
import pyopencl as cl
import pytest
@pytest.fixture(scope="session")
def context():
platform = cl.get_platforms()
devices = platform[0].get_devices(cl.device_type.GPU)
if devices == []:
devices = platform[0].get_devices(cl.device_type.DEFAULT)
context = cl.Context([devices[0]])
return context
@pytest.fixture(scope="session")
def program(context):
program = cl.Program(context, kernel_source).build()
return program
``` |
{
"source": "jimmahoney/tron-bot-jim",
"score": 3
} |
#### File: bots/python/MyTronBot.py
```python
import tron, random
def which_move(board):
return random.choice(board.moves())
for board in tron.Board.generate():
tron.move(which_move(board))
```
#### File: bots/python/right_hand.py
```python
import random, tron, utilities
history = utilities.BotHistory()
debug = utilities.LogFile("logs/right_hand", randomizeName=True)
def lookForWall(board):
""" Return a move along a right hand wall, or a random move. """
for direction in tron.DIRECTIONS:
directionString = utilities.direction2string[direction]
debug.log(" looking %s for open with wall right " % directionString)
right = utilities.rightwards[direction]
if board.passable(board.rel(direction)) \
and not board.passable(board.rel(right)):
debug.log(" yup, let's do that.")
return direction
debug.log(" no motion along a wall found; picking randomly.")
return random.choice(board.moves())
def which_move(board):
history.update(board)
debug.log("move %i" % history.length())
if history.length() == 1:
debug.log(" 1st move")
direction = lookForWall(board)
else:
oldDirection = history.lastDirection()
debug.log(" was moving %s" % utilities.direction2string[oldDirection])
direction = utilities.rightwards[oldDirection]
finalTry = utilities.rightwards[direction]
while True:
directionString = utilities.direction2string[direction]
debug.log(" looking at %s ..." % directionString)
if board.passable(board.rel(direction)):
debug.log(" yup, am moving %s" % directionString)
return direction
elif direction == finalTry:
debug.log(" no where to go - moving north to die")
return tron.NORTH
else:
debug.log(" turning left ...")
direction = utilities.leftwards[direction]
# --- main -------------------
for board in tron.Board.generate():
tron.move(which_move(board))
``` |
{
"source": "jimmahoney/tron-engine",
"score": 3
} |
#### File: tron-engine/bots/failbot.py
```python
import tron, random
def which_move(board):
return 'yup'
for board in tron.Board.generate():
tron.move(which_move(board))
```
#### File: tron-engine/bots/fill2.py
```python
import random, tron
########## Find free area around square ##############
def area_finder(board,square_coordinates):
if not board.passable(square_coordinates):
return 0
UNSEEN='u'
SEEN='s'
area=0
#Create map to keep track of which areas have been seen by algorithm
map=[]
for line in xrange(board.height):
map.append([])
for line in map:
for i in xrange(board.width):
line.append(UNSEEN)
#Expand out into adjacent unseen squares
fringe=[square_coordinates]
map[square_coordinates[0]][square_coordinates[1]]=SEEN
while len(fringe) > 0:
area+=1
for sq in board.adjacent(fringe[0]):
if board.passable(sq):
if map[sq[0]][sq[1]]==UNSEEN:
fringe.append(sq)
map[sq[0]][sq[1]]=SEEN
del fringe[0]
return area
def which_move(board):
#unused test info dump to file
'''
f=open("tronoutput.txt", 'w')
s=''
for line in board.board:
s=s+str(line)+'\n'
f.write(s)
f.close
'''
moves=(1,2,3,4)
# Make move into largest free area or default to north.
best_move=1
best_area = 0
for move in moves:
sq=board.rel(move)
area = area_finder(board,sq)
if area > best_area:
best_area = area
best_move = move
return best_move
# Get board and make move
for board in tron.Board.generate():
tron.move(which_move(board))
```
#### File: tron-engine/bots/kamikazebot.py
```python
import math, tron
def distance(one, two):
# Finds the distance between two points
y1, x1 = one
y2, x2 = two
distance = math.sqrt((y2 - y1)**2+(x2 - x1)**2)
return distance
def which_move(board):
# Chooses which move to make each turn
moves = list(board.moves())
decision = moves[0]
for dir in moves:
if distance(board.rel(dir), board.them()) <= distance (board.rel(decision), board.them()):
decision = dir
return decision
# make a move each turn
for board in tron.Board.generate():
tron.move(which_move(board))
```
#### File: tron-engine/bots/logbot.py
```python
import random, tron, time
DEBUG = False
if DEBUG:
# On csmarlboro.org, change 'everyone' to your username.
# Keep the rest of the path - that folder is world writable.
# This opens that file for appending, and writes one line
# to it with the date and time.
logfile = '/var/www/csmarlboro/tron/logs/everyone.txt'
tron.init_error_log(logfile)
tron.warn("Starting logbot") # Change this to whatever you like.
def debug(message):
if DEBUG:
tron.warn(message)
def which_move(board):
""" Return the direction to move (1,2,3,4) given a Board object. """
choices = board.moves()
debug('')
debug(' legal moves are' + str(choices))
move = random.choice(choices)
debug(' chosen move is ' + str(move))
## test: take longer than the 1 sec allowd.
# time.sleep(2)
## test: occasionaly crash (divide by zero error)
# if random.randint(1,10) == 3: a = 1/0
return move
# You probably don't need to change this part.
for board in tron.Board.generate():
tron.move(which_move(board))
```
#### File: tron-engine/bots/tron.py
```python
import sys, os, datetime
# --- start debugging stuff ---------------------------------------------
## To set up your bot for debugging :
##
## 1. From the command line, create a logfile in a world-writable folder,
## and make that file writeable by anyone.
## On csmarlboro.org, I've set up /var/www/csmarlboro/tron/logs
## to be a good place; create a file with your username there.
## (Change 'you' to your username, so only your bot's messages go there.)
##
## $ touch /var/www/csmarlboro/tron/logs/you.txt # Create it.
## $ chmod o+w /var/www/csmarlboro/tron/logs/you.txt # Set permissions.
##
## 2. In your robot.py file, send all errors and warn() output
## to that file. (Note that this means you'll need to look there
## to see errors, even when running it yourself.)
## Use a an absolute path (starting with /) so that the file
## will have the same name no matter what folder the bot is run from.
## Use your username for "you" in what's below.
##
## import tron
## tron.init_logfile('/var/www/csmarlboro/tron/logs/you.txt')
##
## 3. For debugging print statements, use the "warn" function "
## in your robot.py file. For example if you want to
## log the value of variable foo in some function bar :
##
## tron.warn(' in function bar, foo={}'.format(foo))
##
## 4. Run your bot, either manually from the command line,
## or with the terminal "run" script, or in a tournament.
## Then each game will append to the logfile, starting
## with a line like "=== starting bot at ... ==="
##
## 5. Look in that file to see what happened. From the command line
##
## $ tail /var/www/csmarlboro/tron/logs/you.txt
##
## or
##
## $ less /home/user/tron_logfile.txt
##
## See logbot.py for an example.
##
## Questions? Ask someone.
##
def warn(message):
""" Send a warning message to stderr.
This works well with the logfile set with init_logfile(path) """
# Note that message is a string, so use " ... {} ".format() or str()
# to convert variables to strings, i.e. warn("a={}".format(a))
sys.stderr.write(message + "\n")
def init_error_log(logfilename):
""" send stderr to a logfile, which will then have
errors and warn(messages) appended to it. """
# USE A FULL PATH, i.e. /var/www/csmarlboro/tron/logs/you.txt
# so that this will work no matter which folder the bot is run from, and
# MAKE SURE THAT FILE IS WRITABLE, i.e.
# $ export logfile=/var/www/csmarlboro/tron/logs/you.txt
# $ touch $logfile # Create it if need be,
# $ chmod o+w $logfile # and make it world writable.
#
# Adapted from pages given by googling "python stderr to file"
sys.stderr = open(logfilename, "a") # set stderr to file and append to it
warn("=== starting python bot at {} ===".format(
datetime.datetime.now().ctime()))
# --- end debugging stuff ------------------------------------------------
NORTH = 1
EAST = 2
SOUTH = 3
WEST = 4
FLOOR = ' '
WALL = '#'
ME = '1'
THEM = '2'
DIRECTIONS = (NORTH, EAST, SOUTH, WEST)
def direction(which):
""" Return a string (e.g. 'north') from a direction number (e.g. 1) """
return ['', 'north', 'east', 'south', 'west'][which]
def move(direction):
""" Send a move to the referee. """
print direction
sys.stdout.flush()
def _invalid_input(message):
print >>sys.stderr, "Invalid input: %s" % message
sys.exit(1)
def _readline(buf):
while not '\n' in buf:
tmp = os.read(0, 1024) # standard input, max 1kB
if not tmp:
break
buf += tmp
if not buf.strip():
return None, buf
if not '\n' in buf:
_invalid_input('unexpected EOF after "%s"' % buf)
index = buf.find('\n')
line = buf[0:index]
rest = buf[index + 1:]
return line, rest
class Board(object):
""" The Tron Board """
def __init__(self, width, height, board):
self.board = board
self.height = height
self.width = width
self._me = None
self._them = None
@staticmethod
def _read(buf):
meta, buf = _readline(buf)
if not meta:
return None, buf
dim = meta.split(' ')
if len(dim) != 2:
_invalid_input("expected dimensions on first line")
try:
width, height = int(dim[0]), int(dim[1])
except ValueError:
_invalid_input("malformed dimensions on first line")
lines = []
while len(lines) != height:
line, buf = _readline(buf)
if not line:
_invalid_input("unexpected EOF reading board")
lines.append(line)
board = [line[:width] for line in lines]
if len(board) != height or any(len(board[y]) != width for y in xrange(height)):
_invalid_input("malformed board")
return Board(width, height, board), buf
@staticmethod
def generate():
""" Generate board objects, once per turn. """
buf = ''
while True:
board, buf = Board._read(buf)
if not board:
break
yield board
if buf.strip():
_invalid_input("garbage after last board: %s" % buf)
def __getitem__(self, coords):
""" Retrieve the object at the specified coordinates.
Use it like this:
if board[3, 2] == tron.THEM:
# oh no, the other player is at (3,2)
run_away()
Coordinate System:
The coordinate (y, x) corresponds to row y, column x.
The top left is (0, 0) and the bottom right is
(board.height - 1, board.width - 1). Out-of-range
coordinates are always considered walls.
Items on the board:
tron.FLOOR - an empty square
tron.WALL - a wall or trail of a bot
tron.ME - your bot
tron.THEM - the enemy bot
"""
y, x = coords
if not 0 <= x < self.width or not 0 <= y < self.height:
return WALL
return self.board[y][x]
def me(self):
""" Returns your position on the board.
It is always true that board[board.me()] == tron.ME.
"""
if not self._me:
self._me = self._find(ME)
return self._me
def them(self):
""" Finds the other player's position on the board.
It is always true that board[board.them()] == tron.THEM.
"""
if not self._them:
self._them = self._find(THEM)
return self._them
def _find(self, obj):
for y in xrange(self.height):
for x in xrange(self.width):
if self[y, x] == obj:
return y, x
raise KeyError("object '%s' is not in the board" % obj)
def passable(self, coords):
""" Determine if a position in the board is passable.
You can only safely move onto passable tiles, and only
floor tiles are passable.
"""
return self[coords] == FLOOR
def rel(self, direction, origin=None):
""" Calculate which tile is in the given direction from origin.
The default origin is you. Therefore, board.rel(tron.NORTH))
is the tile north of your current position. Similarly,
board.rel(tron.SOUTH, board.them()) is the tile south of
the other bot's position.
"""
if not origin:
origin = self.me()
y, x = origin
if direction == NORTH:
return y - 1, x
elif direction == SOUTH:
return y + 1, x
elif direction == EAST:
return y, x + 1
elif direction == WEST:
return y, x - 1
else:
raise KeyError("not a valid direction: %s" % direction)
def adjacent(self, origin):
""" Calculate the four locations that are adjacent to origin.
Particularly, board.adjacent(board.me()) returns the four
tiles to which you can move to this turn. This does not
return tiles diagonally adjacent to origin.
"""
return [self.rel(dir, origin) for dir in DIRECTIONS]
def moves(self):
""" Calculate which moves are safe to make this turn.
Any move in the returned list is a valid move.
There are two ways moving to one of these tiles could end the game:
1. At the beginning of the following turn, there are no valid moves.
2. The other player also moves onto this tile, and you collide.
"""
possible = dict((dir, self.rel(dir)) for dir in DIRECTIONS)
passable = [dir for dir in possible if self.passable(possible[dir])]
if not passable:
# it seems we have already lost
return [NORTH]
return passable
def as_string(self):
""" Return a printable string of the current board. """
result = ""
for line in self.board:
result += line + "\n"
return result
```
#### File: tron-engine/engines/player.py
```python
import signal, os, sys, string, tempfile
from time import clock
from subprocess import Popen, PIPE
P0BOARD = string.maketrans('1\x81\x82\x83\x842\xa1\xa2\xa3\xa4.*\x88\x89\x8a\xa8\xa9\xaa',' ########')
P1BOARD = string.maketrans('1\x81\x82\x83\x842\xa1\xa2\xa3\xa4.*\x88\x89\x8a\xa8\xa9\xaa','1111122222########')
P2BOARD = string.maketrans('1\x81\x82\x83\x842\xa1\xa2\xa3\xa4.*\x88\x89\x8a\xa8\xa9\xaa','2222211111########')
has_alarm = (hasattr(signal, 'SIGALRM') and hasattr(signal, 'alarm'))
if not has_alarm:
print "Warning: System does not support alarm(); timeouts will not be strictly enforced."
if not hasattr(os, 'kill'):
try:
import win32api
except ImportError:
print "Warning: System does not support kill(); process may end up spinning upon process exit."
def kill(pid, sig=signal.SIGTERM):
# http://www.python.org/doc/faq/windows/
import win32api
if sig != signal.SIGTERM:
raise OSError("Sending any signal except SIGTERM is not supported on Windows.")
handle = win32api.OpenProcess(1, 0, pid)
return (0 != win32api.TerminateProcess(handle, 0))
use_shell = True
else:
kill = os.kill
use_shell = True
class TimeoutException(Exception):
pass
def alarm(*args):
raise TimeoutException("Time limit exceeded.")
def set_alarm(timeout=1):
if has_alarm:
old_alarm=signal.signal(signal.SIGALRM, alarm)
signal.alarm(timeout)
return old_alarm
else:
return clock() + timeout
def reset_alarm(old_alarm):
if has_alarm:
signal.alarm(0)
signal.signal(signal.SIGALRM, old_alarm)
else:
if clock() > old_alarm:
raise TimeoutException("Time limit exceeded.")
class PlayerFailedException(Exception):
def __init__(self, player_symbol, player_name, msg):
self.player = "bot {} {}".format(player_symbol, player_name)
Exception.__init__(self, "{} failed: {}".format(self.player, msg))
""" http://code.activestate.com/recipes/134892/ """
class _Getch:
"""Gets a single character from standard input. Does not echo to the screen."""
def __init__(self):
try:
self.impl = _GetchWindows()
except ImportError:
self.impl = _GetchUnix()
def __call__(self): return self.impl()
class _GetchUnix:
def __init__(self):
import tty, sys
def __call__(self):
import sys, tty, termios
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
class _GetchWindows:
def __init__(self):
import msvcrt
def __call__(self):
import msvcrt
return msvcrt.getch()
class Player(object):
MOVES = {'N':'1', 'E':'2', 'S':'3', 'W':'4', 'I':'1', 'J':'4', 'K':'3', 'L':'2'}
def __init__(self, cmd, name):
if cmd == '-':
self.interactive = True
self.last_move = 'N'
try:
self.get_input = _Getch()
except:
self.get_input = raw_input
else:
self.interactive = False
self.stderr = tempfile.TemporaryFile()
self.process = Popen(cmd, shell=use_shell,
stdin=PIPE, stdout=PIPE, stderr=self.stderr)
self.name = name
def readchar(self):
while True:
c = self.process.stdout.read(1)
if c and c.isspace(): continue
return c
def readline(self):
return self.process.stdout.readline()
def writeline(self, s):
self.process.stdin.write(s+'\n')
self.process.stdin.flush()
def send_eof(self):
if not self.interactive:
self.process.stdin.close()
def getmove(self, board, player):
try:
if self.interactive:
sys.stdout.write("Move (i/j/k/l or n/e/s/w) [%s]? "%self.last_move)
ret = self.get_input().strip()
ret = ret[:1].upper()
if not ret:
ret = self.last_move
else:
self.last_move = ret
if ret in self.MOVES:
ret = self.MOVES[ret]
else:
self.writeline(board.getdims())
for l in board.getboard():
if player == '1':
self.writeline(l.translate(P1BOARD))
else:
self.writeline(l.translate(P2BOARD))
old_alarm = set_alarm(timeout=1)
ret = self.readchar()
reset_alarm(old_alarm)
assert ret in '1234', \
"Player made an invalid move '{}'".format(ret)
assert ret != '', "Player made blank move ''."
return int(ret)
except Exception, e:
self.stderr.seek(0)
stderr_text = "\n------ {} stderr ------\n".format(self.name) + \
self.stderr.read() + "\n"
raise PlayerFailedException(player, self.name, str(e) + stderr_text)
def send_signal(self, sig):
if self.process.poll() is None:
try:
kill(self.process.pid, sig)
except:
return False
return True
return False
def sigterm(self):
if self.interactive:
return True
return self.send_signal(signal.SIGTERM)
def sigkill(self):
if self.interactive:
return True
if hasattr(signal, 'SIGKILL'):
return self.send_signal(signal.SIGKILL)
else:
return self.send_signal(signal.SIGTERM)
``` |
{
"source": "jimmahoney/umber",
"score": 3
} |
#### File: src/misc/test_getattr.py
```python
class Foo:
def __getattr__(self, key):
if not 'name' in self.__dir__():
self.name = 'unknown'
return self.__getattribute__(key)
f = Foo()
name = f.name
print(f" name = {name} ")
age = f.age
print(f" age = {age} ")
```
#### File: misc/uwsgitest/utest.py
```python
from flask import Flask
app = Flask(__name__)
@app.route("/utest/")
def hello():
return "<h1 style='color:blue'>Hello from 'utest' !</h1>"
if __name__ == "__main__":
app.run(host='0.0.0.0')
```
#### File: umber/src/model.py
```python
import os, yaml, re, mimetypes, shutil, random
from functools import reduce
from flask import url_for
from werkzeug.security import generate_password_hash, check_password_hash
from peewee import ( SqliteDatabase, Model, TextField, IntegerField,
PrimaryKeyField, ForeignKeyField )
from bs4 import BeautifulSoup
from utilities import ( markdown2html, link_translate, static_url, md5, Time,
ext_to_filetype, filetype_to_icon, size_in_bytes,
stringify_access, print_debug, clean_access_dict )
from settings import ( os_db, umber_url, protocol, hostname, umber_mime_types,
os_root, os_courses, photos_url, url_base,
os_default_course, site_course_path, site_home,
due_grace_hours )
import gitlocal
db = SqliteDatabase(os_db)
class BaseModel(Model):
class Meta:
database = db # (peewee requires this 'database' name)
def __repr__(self):
# e.g.
fields = ', '.join(["{}={}".format(x[0],repr(x[1]))
for x in list(self.__dict__['_data'].items())])
return '<{}({}) at 0x{:X}>'.format(self.__class__.__name__,
fields, id(self))
@classmethod
def first(cls):
return cls.select().first()
@classmethod
def all(cls):
return list(cls.select().execute())
class Person(BaseModel):
class Meta:
db_table = 'Person'
person_id = PrimaryKeyField(db_column='person_id')
username = TextField(unique=True)
password = TextField()
name = TextField()
email = TextField()
notes = TextField()
_by_username = {} # cache
_admins = None
@staticmethod
def from_comma_string(comma_string):
""" Return people list from a string of usernames e.g. "john,mary" """
return list(map(Person.by_username, comma_string.split(',')))
@staticmethod
def searchname(partialname, maxresults=32):
""" search for a name or username - returning up to a given number """
people = Person.select().where( \
Person.name.contains(partialname) | \
Person.username.contains(partialname)) \
.order_by(Person.username).limit(maxresults)
return [p.username for p in people]
@staticmethod
def create_person(username, name, email, password='', is_admin=False):
# TODO : restrict legal usernames ...
# * prohibit leading '_' (reserved for system)
# * lowercase_only? underbar? numbers?
# * enforce uniqueness
with db.atomic():
(user, created) = Person.get_or_create(username=username)
if created:
user.name = name
user.email = email
if not password:
password = str(random.getrandbits(32))
user.set_password(password)
user.save()
# enroll_site has in it db.atomic() too ...
# the docs say its OK to nest them.
Course.enroll_site(user, is_admin=is_admin)
return user
@staticmethod
def edit_person(username, name, email, password):
try:
with db.atomic():
user = Person.by_username(username)
user.name = name
user.email = email
if password != '':
user.set_password(password)
user.save()
except:
print_debug('OOPS : Person.edit_user(username="{}") failed' \
.format(username))
def course_data(self):
""" return courses that this person is registered in
as a dict with keys role,course,url,semester """
registrations = list(Registration.select()
.where(Registration.person == self))
registrations.sort(key=lambda r: r.course.name)
registrations.sort(key=lambda r: r.course.start_date, reverse=True)
return [{'role':r.role.name,
'course':r.course.name,
'url':r.course.url,
'semester':Time(r.course.start_date).semester()}
for r in registrations if not r.course.name == 'Umber']
def get_username(self, username):
return Person.by_username(username)
def make_admin(self):
""" Turn this person into a site admin """
with db.atomic():
umber = Course.get_site()
site_registration = Registration.get(course=umber, person=self)
site_registration.role = Role.by_name('admin')
site_registration.save()
def works(self, course):
query = (Work.select()
.where( Work.person == self,
Work.course == course ))
return list(query.execute())
def _save(self):
""" save to database and invalidate caches """
try:
del Person._by_username[self.username]
except KeyError:
pass
Person._admins = None
self.save()
def set_password(self, passwordtext):
with db.atomic():
self.password = generate_password_hash(passwordtext)
self._save()
def check_password(self, passwordtext):
return check_password_hash(self.password, passwordtext)
def get_role(self, course):
""" Return role of this person in that course """
if self.username in course.username_to_role:
return course.username_to_role[self]
else:
return Role.by_name('visitor')
def get_last_first(self):
names = self.name.split(' ')
return names[-1] + ', ' + names[0]
# -- Flask-Login methods & tools --
@staticmethod
def get_anonymous():
""" Create and return an anonymous Person """
# Not saved to database (i.e. save() not called).
# Not logged in.
anon = Person(name='anonymous', username='')
anon.anonymous = True
return anon
def is_authenticated(self):
return not self.is_anonymous()
def is_active(self):
return not self.is_anonymous()
def is_anonymous(self):
try:
return self.anonymous
except:
return False
def is_admin(self):
""" return True if this user is an admin, false otherwise """
return self.username in Person.admins()
def get_id(self):
if self.username == None:
return str('')
else:
return str(self.username)
def get_photo_url(self):
return photos_url + '/' + self.username + '.jpg'
@staticmethod
def generic_photo_url():
return photos_url + '/generic_student.png'
@staticmethod
def by_username(username):
""" Returns anonymous person if not found """
if username not in Person._by_username:
try:
person = Person.get(username = username)
#person = Person.select() \
# .where(Person.username==username).first()
except:
return Person.get_anonymous()
Person._by_username[username] = person
return Person._by_username[username]
@staticmethod
def by_rolename(rolename):
""" Return list of users who have a given type of registration """
# ... in any registration record i.e. any course
return list(Person.select()
.join(Registration)
.where(Registration.role == Role.by_name(rolename))
.execute())
@staticmethod
def admins():
""" Return list of administrators """
if not Person._admins:
Person._admins = {p.username : True
for p in Person.by_rolename('admin')}
return Person._admins
class Course(BaseModel):
class Meta:
db_table = 'Course'
course_id = PrimaryKeyField(db_column='course_id')
active = IntegerField()
assignments_md5 = TextField()
credits = IntegerField()
end_date = TextField()
name = TextField()
name_as_title = TextField()
notes = TextField()
path = TextField(unique=True)
start_date = TextField()
_site_course = None # course for site data
def prepared(self):
""" setup this instance after it's attributes are set """
# This method is essentially __init__ for these database objects.
self._set_users()
self.assignments = self._get_assignments()
if not self.start_date:
self.semester = ''
else:
self.semester = Time(self.start_date).semester()
# url without request though that info is also in request
self.url = umber_url + '/' + self.path
self.abspath = os.path.join(os_courses, self.path)
def __getattr__(self, key):
# Define some attributes (.url, .abspath, .students, ...)
# when they're needed.
#
# June 2019 : there is no "prepared" in python3's peewee;
# see https://github.com/coleifer/peewee/issues/1479
# So I need another way to call this after instantiating a Course.
# See docs.python.org/3/reference/expressions.html#attribute-references;
# ... I can override __getattr__ to fill 'em in when first accessed.
if not 'abspath' in self.__dir__():
self.prepared()
return self.__getattribute__(key)
def _set_users(self):
""" define self.students, .faculty, .guests, .username_to_role """
# .students includes tutors;
# username_to_rolename lists their role as 'tutor'
registrations = list(Registration.select()
.where((Registration.course == self)
& (Registration.status != 'drop')))
self.students = [reg.person for reg in registrations
if reg.role == Role.by_name('student')]
self.faculty = [reg.person for reg in registrations
if reg.role == Role.by_name('faculty')]
self.guests = [reg.person for reg in registrations
if reg.role == Role.by_name('guest')]
self.students.sort(key=lambda s: s.name)
self.faculty.sort(key=lambda s: s.name)
self.username_to_role = {reg.person.username : reg.role
for reg in registrations}
self.username_to_rolename = {reg.person.username : reg.rolename()
for reg in registrations}
@staticmethod
def get_all():
""" Return all but the 'Umber' course, sorted by semester & name """
result = [c for c in Course.all() if not c.name == 'Umber']
result.sort(key=lambda c: c.name)
result.sort(key=lambda c: c.start_date, reverse=True)
return result
@staticmethod
def create_course(name, path, start='', name_as_title='',
copyfrom=os_default_course, user=None):
if name_as_title == '':
name_as_title = name
if start == '':
now = str(Time())
now_year = now[:4]
now_month = now[5:7]
if now_month < '06':
start = now_year + '-' + '01-01' # spring
elif now_month < '09':
start = now_year + '-' + '06-01' # summer
else:
start = now_year + '-' + '09-01' # fall
with db.atomic():
(course, created) = Course.get_or_create(
name = name,
path = path,
start_date = start,
name_as_title = name_as_title
)
# Create a disk folder for a course by copying
# files from some other course.
# (If 'copyfrom' isn't defined, then those course files
# had better already exist ... which is the case
# for defaultcourse and democourse.)
if copyfrom:
abspath = os.path.join(os_courses, path)
abscopyfrom = os.path.join(os_courses, copyfrom)
shutil.copytree(abscopyfrom, abspath)
# remove the old copied .git folder
shutil.rmtree(os.path.join(abspath, '.git'), ignore_errors=True)
gitlocal.init_add_commit(course, user) # initalize its .git folder
return course
@staticmethod
def get_site():
""" return site admin course 'Umber' """
if not Course._site_course:
Course._site_course = Course.get(name='Umber')
return Course._site_course
@staticmethod
def create_site():
""" create site couse 'Umber' """
# for site resoruces i.e. help files, user id photos etc.
with db.atomic():
(sitecourse, created) = Course.get_or_create(
name = 'Umber',
name_as_title = 'Umber<div>a course<br>managment<br>system</div>',
path = site_course_path,
start_date = '2018-01-01')
return sitecourse
def get_shortname(self):
""" used in html title ; see Page.html_title """
# TODO : ... something shorter ?
return self.name
def is_site(self):
""" true if this is the site course """
return self.path == site_course_path
def person_to_role(self, person):
""" Return role of person in course, or visitor """
return self.username_to_role.get(person.username,
Role.by_name('visitor'))
def username_is_member(self, username):
return username in self.username_to_role
def _get_assignments(self):
return list(Assignment.select() \
.where(Assignment.course == self) \
.order_by(Assignment.nth))
def drop(self, user):
""" Drop user (Person or username) from this course """
# (Students who are registered may have submitted work.
# Rather than delete their files and database records,
# I'm just changing their status to 'drop', and ignoring
# those people in _set_users
try:
person = user
name = person.name # Is this a Person object?
except AttributeError:
person = Person.by_username(user) # No - treat it as a username.
name = person.name
if name == 'anonymous' or name == '':
return "error in drop with user '{}'".format(str(user))
with db.atomic():
registration = Registration.get(person=person, course=self)
registration.status = 'drop'
registration.date = str(Time())
registration.save()
# refresh course data
self._set_users()
return "OK, dropped {}.".format(name)
def get_profile_url(self):
# site course ends with / ; others don't ... slightly different behavior.
if self.url[-1] == '/':
return self.url + 'sys/user'
else:
return self.url + '/sys/user'
def get_home_url(self):
""" return url for course home page """
if self.is_site():
# special case : Umber course home is docs/home,
# so that all public stuff can be in docs/*
home_path = site_home
else:
home_path = 'home'
return os.path.join(self.url, home_path)
def get_registered(self, rolename=None):
registrations = list(Registration.select()
.where((Registration.course == self)
& (Registration.status != 'drop')))
if rolename == 'tutor':
people = [reg.person for reg in registrations if reg.grade == 'tutor']
elif not rolename:
people = [reg.person for reg in registrations]
elif rolename == 'student':
people = [reg.person for reg in registrations
if (reg.role.name == rolename and reg.grade != 'tutor')]
else:
people = [reg.person for reg in registrations
if reg.role.name==rolename]
people.sort(key=lambda p: p.get_last_first())
return people
def email_everyone_html(self):
return "mailto:" + ','.join([p.email for p in self.get_registered()])
def has_username(self, username):
return username in self.username_to_role
def get_faculty_data(self):
""" return {'email', 'name'} of faculty """
return [{'email':person.email, 'name':person.name}
for person in self.faculty]
def grade_data_list(self, student):
""" return student's view grade list for templates/grades.html """
# See the description below for the faculty grid.
result = list(self.get_assignments_with_extras())
for ass in result:
# Hmmm - not sure why this needs .person_id here, but errors without.
# Maybe something about how the jinja2 template treats variables?
# Or because the assignment has had its fields modified??
ass.work = ass.get_work(student.person_id)
(grade, css_grade) = ass.work.get_grade_css(faculty_view=True)
ass.work_grade = grade
ass.work_css_grade = "grade-{}".format(css_grade)
ass.duedate = Time(ass.due).assigndate()
return result
def grade_data_grid(self):
""" return faculty's grade grid for templates/grades.html """
# Returned data is list of dicts, one per student.
# Each student dict includes list of student works, one per assignment.
# The grade will be shown as
# '…' if not submitted and not yet due
# 'overdue' if not submitted and past due date
# 'ungraded' if submitted and not graded
# work.grade if submitted and graded
#
# The grade css class is "grade-*"
# where * is one of (green,darkgreen,darkred,red,black)
# for faculty viewing the color is :
# red overdue : due > today and not submitted
# brown faculty modified date > student seen date
# darkgreen student modified date > faculty seen date
# green ungraded : student has submitted; faculty hasn't graded
# black none of above
# for student viewing the color is :
# brown ungraded : student has submitted; faculty hasn't graded
# brown student modified; faculty hasn't seen
# dark green faculty modified; student hasn't seen
# green overdue : due > today and not submitted
# black none of above
#
# The basic idea of the colors is that
# green-ish means the viewer should respond (i.e. "go")
# red-ish means that the other person should do something
# (i.e. a problem)
#
result = []
for stud in self.students:
# skip grade line for student if they are a tutor
if self.username_to_rolename[stud.username] == 'tutor': continue
works = []
for ass in self.assignments:
work = ass.get_work(stud)
(grade, css_grade) = work.get_grade_css(faculty_view=True)
works.append({'url': work.get_url(),
'css_grade': 'grade-{}'.format(css_grade),
'grade': grade,
'id': work.work_id
})
result.append({'email': stud.email,
'name' : stud.name,
'works': works
})
return result
def get_assignment_by_nth(self, nth):
""" Return nth assignment in this course """
try:
return Assignment.select() \
.where(Assignment.course == self, \
Assignment.nth == nth) \
.first()
except:
return None
def update_assignments(self, assignments_data):
""" Update course assignments from
a dict of assignments_data[nth][name, due, blurb] """
# Note: passed argument is *not* made up of Assignment objects.
# Note: this is designed to update *all* assignments.
db_assignments = {a.nth : a for a in self._get_assignments()}
with db.atomic():
for nth in assignments_data:
if nth not in db_assignments:
(db_assignments[nth], status) = Assignment.get_or_create(
course=self, nth=nth)
db_assignments[nth].name = assignments_data[nth]['name']
duedate = assignments_data[nth]['due']
db_assignments[nth].due = str(Time.parse(duedate))
new_blurb = assignments_data[nth]['blurb']
old_blurb = db_assignments[nth].blurb
#print_debug(f" debug update_assignments : '{duedate}'")
#print_debug(f" md5(new_blurb) = '{md5(new_blurb)}'")
#print_debug(f" new_blurb = '{new_blurb}' ")
#print_debug(f" ass.blurb_hash = '{db_assignments[nth].blurb_hash}'")
#print_debug(f" ass.blurb = '{old_blurb}' ")
#print_debug(f" new == old ? {new_blurb == old_blurb}")
if md5(new_blurb) != db_assignments[nth].blurb_hash: # is this changed?
db_assignments[nth].blurb = new_blurb # yes: update it
db_assignments[nth].blurb_hash = md5(new_blurb)
db_assignments[nth].blurb_html = markdown2html(
link_translate(self, new_blurb))
#print_debug(" updating cache ")
#else:
#print_debug(" NOT updating cache ")
db_assignments[nth].save()
self.assignments = self._get_assignments()
def get_assignments_with_extras(self):
""" Return list of assignments in this course with extra info """
# ... i.e. prepare the data for html display
now = Time()
# print(" now = " + str(now))
if len(self.assignments) == 0:
self.assignment_nth_plus1 = 1
else:
self.assignment_nth_plus1 = self.assignments[-1].nth + 1
for ass in self.assignments:
duedate = Time(ass.due)
if duedate < now:
#print(" PAST : duedate = " + str(duedate))
ass.dateclass = 'assign-date-past'
else:
#print(" FUTURE : duedate = " + str(duedate))
ass.dateclass = 'assign-date'
ass.date = duedate.assigndate() # for assignment list display
ass.ISOdate = duedate.assignISOdate() # ditto
return self.assignments
def nav_page(self, user):
""" return course's navigation page """
# TODO: put this in a "try" and do something reasonable if it fails.
# (otherwise, pages in courses without sys/navigation.md will crash.)
# TODO: should this be cached to self._nav_page ?
# (Need it for both displaying and editing course's navigation page.)
return Page.get_from_path(os.path.join(self.path,
'sys', 'navigation.md'), user=user)
def nav_html(self, user, page):
""" Return html for course's navigation menu
for a given user & a given page """
return self.nav_page(user).nav_content_as_html(page)
@staticmethod
def enroll_site(person, datestring=None, is_admin=False):
""" enroll a person in the site course """
# All users should be in this course.
if not datestring:
datestring = str(Time())
site_course = Course.get_site()
if is_admin:
site_role = Role.by_name('admin')
else:
site_role = Role.by_name('member')
with db.atomic():
(reg, created) = Registration.get_or_create(
person = person,
course = site_course)
if created or is_admin: # update role & date
reg.role = site_role
reg.date = datestring
reg.status = ''
reg.save()
site_course._set_users()
def make_student_work_folders(self):
for person in self.students:
student_abspath = os.path.join(self.abspath,
'students', person.username)
if not os.path.exists(student_abspath):
Page.new_folder(student_abspath, user=person,
accessdict= {'read':person.username,
'write':person.username})
work_abspath = os.path.join(student_abspath, 'work')
if not os.path.exists(work_abspath):
Page.new_folder(work_abspath, user=person)
def enroll(self, person, rolename, datestring=None, create_work=False):
""" Enroll a person in this course with this role. """
# If there is an existing registration for the course&person, modify it.
# Also enroll this person in the site couse if they aren't already
# and if this isn't the site course itself.
# Optionally create their work folder (if it doesn't already exist)
#
# add tutors by using student role with registration.grade='tutor'
is_tutor = rolename=='tutor'
if rolename == 'tutor':
rolename = 'student'
if not datestring:
datestring = str(Time())
with db.atomic():
(reg, created) = Registration.get_or_create(
person = person,
course = self)
reg.role = Role.by_name(rolename)
reg.grade = 'tutor' if is_tutor else ''
reg.status = '' # if re-enrolling would have been 'drop'
reg.date = datestring
reg.save()
if not self.name == 'Umber':
Course.enroll_site(person, datestring=datestring)
if create_work:
# Create folder for student work within the course folder.
# The absolute path for their student work folder is
# e.g. course/students/johnsmith/ with its .access.yaml
# & course/students/johnsmith/work/
student_abspath = os.path.join(self.abspath,
'students', person.username)
Page.new_folder(student_abspath, user=person,
accessdict= {'read':person.username,
'write':person.username})
work_abspath = os.path.join(student_abspath, 'work')
Page.new_folder(work_abspath, user=person)
# refresh students
self._set_users()
class Page(BaseModel):
# --- path, filename, url definitions ---
# With settings on my laptop development machine as
# os_courses /Users/mahoney/academics/umber/courses
# then for the 'notes/week1' file within a course at 'fall/math' ,
# the parts are
# url: http://127.0.0.1:5000/ umber / fall/math / notes/week1
# protocol hostname url_base path...................
# file: /Users/mahoney/academics/umber/courses / fall/math / notes/week1
# os_courses path...................
# Following python's os.path phrasing, other terms used here are
# basename last word in address (same as os.path.basename)
# abspath e.g. /Users/mahoney/.../fall/math/notes/week1
# dirname e.g. /Users/mahoney/.../fall/math/notes
# This url would have in its flask request object the attributes
# request.url_root 'http://localhost:8090/'
# request.path '/umber/fall/math/notes/week1'
#
# Note that Page.path (e.g. fall/math/notes/week1)
# does not have a leading slash or contain the url_base,
# while request.path (e.g. /umber/fall/math/notes/week1) does.
#
# The Page object will also contain various extra data
# that isn't stored in the sql database but is instead
# pulled from the filesystem.
class Meta:
db_table = 'Page'
# Each course has some sys/* pages which get special treatment.
# Also here are site/sys/* pages for editing users and courses,
# which are only accessible within the 'site' course.
system_pages = ('assignments', 'navigation', 'error', 'folder',
'grades', 'roster', 'user', 'users', 'course',
'courses', 'registration', 'newuser', 'newcourse')
editable_system_pages = ('assignments', 'navigation',
'grades', 'user', 'course')
page_id = PrimaryKeyField(db_column='page_id')
html = TextField()
html_lastmodified = TextField()
notes = TextField()
path = TextField(unique=True)
course = ForeignKeyField(model=Course,
db_column='course_id',
to_field='course_id')
_mime_types = None
@staticmethod
def new_folder(abspath, accessdict=None, user=None):
""" Create a new folder with the given abspath.
Add it into the github repo.
Optionally create its .access.yaml file. """
if os.path.exists(abspath):
# bail without doing anything of this already exists
# print_debug(' new_folder {} already exists '.format(abspath))
return None
try:
os.makedirs(abspath) # makes intermediate folders if need be.
except:
# bail with error message if the OS system won't do it.
print_debug(' os.makdir("{}") failed '.format(abspath))
return None
# Add an empty .keep file in this new folder,
# as a workaround to force git to include this new folder.
# (Git pays attention to files, not folders.)
open(os.path.join(abspath, '.keep'), 'w').close() # unix 'touch'
# Create the new folder object.
path = os.path.relpath(abspath, os_courses)
folder = Page.get_from_path(path, user=user)
if accessdict:
# don't do a git commit here - wait to do whole folder
folder.write_access_file(accessdict, do_git=False)
gitlocal.add_commit(folder)
return folder
@classmethod
def get_from_path(cls, path, revision=None, action=None, user=None):
""" Get or create a Page and set up all its internal data
i.e. course, file info, user permissions, etc """
(page, iscreated) = Page.get_or_create(path=path)
if user == None:
user = Person.get_anonymous()
page.user = user
page.action = action
page.revision = revision
page._setup_file_properties() # sets page.is_file etc
page.gitpath = os.path.join(os_courses, page.path_with_ext)
page.login_google_url = url_for('login_google', pagepath=path)
page.course = page.get_course()
try:
if page.course.page_error:
### Unexpected (to me anyway) behavior here :
### page.course = None
### if page.course: # This throws an error!
### ...
### Apparently the peewee database code has put hooks into
### the Page object to do tricky stuff for "page.course",
### seems to drop into peewee and complain.
### I'm avoiding this by returning the Umber site course
### but with a .page_error attribute set.
### In umber.py this will turn the request into 404 not found.
return page
except AttributeError:
# .page_error field not set; keep going.
pass
page.relpath = page._get_relpath()
page._setup_sys() # do this before .get_access()
page.access = page.get_access() # gets .access.yaml property.
page._setup_user_permissions() # sets page.can['read'] etc
if revision or action=='history':
page._setup_revision_data() # sets page.history etc
page._setup_attachments() # sets .has_attachments
page._setup_work() #
page.html_title = page.get_html_title()
return page
def get_html_title(self):
""" Return string for the <title></title> html tag. """
try:
return self.course.get_shortname() + ' : ' + self.relpath
except:
return self.path
def get_gitpath(self, abspath=None):
""" Return file path of page (or abspath file) relative to course path,
including file extension if any """
# This abspath option is used in gitlocal.py and umber.py:ajax_upload ;
# for attachments the page is not the upload file.
_abspath = self.abspath if abspath==None else abspath
return os.path.relpath(_abspath, self.course.abspath)
def _get_relpath(self):
""" Return path of page relative to course path,
e.g. notes/home for path=demo/notes/home in course 'demo' """
# self.course must be already set.
return os.path.relpath(self.path, self.course.path)
def attachments_folder(self):
return self.abspath.replace(self.ext, '.attachments')
def _setup_attachments(self):
if self.is_file and self.ext == '.md':
attach_dir = self.attachments_folder()
if os.path.exists(attach_dir) and os.path.isdir(attach_dir):
self.attachments = self.children(abspath=attach_dir)
else:
self.attachments = []
self.has_attachments = len(self.attachments) > 0
else:
self.attachments = []
self.has_attachments = False
def _setup_work(self):
""" see if this is a students/<name>/work/<number> student work page;
define .is_work and .work, set up .work for html display,
update
"""
# print(' _setup_work : relpath = {}'.format(self.relpath))
m = re.match(r'students/(\w+)/work/(\d+)(\?.*)?', self.relpath)
if m:
now = Time()
self.is_work = True
(work_username, work_nth, ignore) = m.groups()
work_nth = int(work_nth)
self.work_person = Person.by_username(work_username)
self.work_assignment = self.course.get_assignment_by_nth(work_nth)
self.work = self.work_assignment.get_work(self.work_person)
duedate = Time(self.work_assignment.due)
self.work_due = duedate.assigndatedetail()
# ... but give students a extra grace period of a few hours
# before marking things as "late";
# this let's me get "end of day" to something reasonable,
# without changing server timezone
duedate.arrow = duedate.arrow.shift(hours=due_grace_hours)
if self.work.submitted:
submitdate = Time(self.work.submitted)
self.work_submitted = submitdate.assigndate()
self.work_is_late = submitdate > duedate
else:
self.work_submitted = ''
self.work_is_late = now > duedate
self.work_grade = self.work.grade
# update *_seen fields in the database
# TODO : think about whether there's a better
# transactional way to update the database here.
if self.user_role.name == 'faculty':
self.work.faculty_seen = str(now)
self.work.save()
if self.user.username == work_username:
self.work.student_seen = str(now)
self.work.save()
else:
self.is_work = False
#self.work = None
#self.work_assignment = None
#self.work_person = None
#self.work_due = ''
#self.work_submitted = ''
#self.work_is_late = False
#self.work_grade = ''
def _setup_sys(self):
""" define .is_sys.
if it is, also define .sys_template, ./sys_edit_template """
# If relpath is 'sys/assignments', then is_sys will be true,
# the template will be 'umber/sys/assignments.html'
# and the edit template will be 'umber/sys/edit_assignments.html',
# (and the access permissions will be in the first line of the template.)
self.is_sys = self.relpath[:4] == 'sys/'
# -- default values for sys templates for all pages --
if self.is_sys:
which = self.relpath[4:]
if which == '':
which = 'folder'
if which not in Page.system_pages:
which = 'error'
self.sys_template = 'sys/' + which + '.html'
if which in Page.editable_system_pages:
self.sys_edit_template = 'sys/edit_' + which + '.html'
else:
self.sys_edit_template = 'sys/editerror.html'
def get_course(self):
""" return this page's course """
# And if there is no course for this page,
# return the site course but also set an error within it.
#
# extract path pieces e.g. ['demo', 'home']
path_parts = self.path.split('/')
# build partial paths e.g. ['demo', 'demo/home']
# (stackoverflow.com/questions/13221896/python-partial-sum-of-numbers)
paths = reduce(lambda x,y: x + [x[-1]+'/'+y],
path_parts[1:], path_parts[0:1])
# build peewee's "where condition" to find matching courses.
condition = Course.path
for c in paths:
condition = condition | Course.path % c
# Get list of matching courses from database.
# Choose the one with the longest path,
# if more than on was found ...
# which would only happen for courses directories
# embedded within another course, which shouldn't happen.
# TODO: make sure to test for that issue during new course creation
query = Course.select().where(condition)
courses = list(query.execute())
#
if courses:
return max(courses, key=lambda c: len(c.path))
else:
# Couldn't find a course for that page, so return
# the default course with a flag indicating the error.
umber = Course.get_site()
umber.page_error = True
return umber
def write_access_file(self, accessdict, do_git=True):
""" Given an access dict from user input e.g.
{'read':'students', 'write':['faculty','bob']} ,
write it to a .access.yaml file, and return its abspath. """
assert self.is_dir # this page should be a folder
accesspath = os.path.join(self.abspath, '.access.yaml')
accessfile = open(accesspath, 'w') # open or create
# replace yaml permissions
# (yaml.dump turns u'string' into ugly stuff so I convert to str().
accessfile.write(yaml.dump(clean_access_dict(accessdict)))
accessfile.close()
if do_git:
# I've left an option to avoid this to handle
# the case of a new folder efficiently, since
# we can in that case commit the whole folder in one go
# after this .access.yaml is created.
gitlocal.add_commit(self)
return accesspath
def get_access(self):
""" Return .access dict from .access.yaml in an enclosing folder
or from the first line of a sys_template
"""
# e.g. {'read': ['janedoe', 'johnsmith'], 'write': 'faculty'}
# default if we don't find it.
#access_dict = {'read':'all', 'write':'faculty'}
if self.is_sys:
## navigation is a special case : since it's a faculty editable file,
## I'll fill it it manually and not require that it have
## the {# #} first line.
if self.relpath == 'sys/navigation' or \
self.relpath == 'sys/navigation.md':
access_dict = {'read':'member', 'write':'faculty'}
else:
## all other system files have an access spec as their first line
## e.g. {# {'read':'all', 'write':'faculty' #}
template = os.path.join(os_root, 'templates', self.sys_template)
firstline = open(template).readline()
try:
access_dict = eval(firstline.replace('{#','').replace('#}',''))
except:
# something fairly safe as a fall-back
access_dict = {'read':'faculty', 'write':'faculty'}
else:
if self.is_dir:
abspath = self.abspath
else:
abspath = os.path.dirname(self.abspath)
while len(abspath) >= len(os_courses):
accesspath = os.path.join(abspath, '.access.yaml')
if os.path.exists(accesspath):
accessfile = open(accesspath)
# see https://msg.pyyaml.org/load
access_dict = yaml.full_load(accessfile)
accessfile.close()
if type(access_dict) == type({}):
# OK, we found an access dict, so stop here.
break
abspath = os.path.dirname(abspath) # i.e. "cd .."
if 'read' not in access_dict:
access_dict['read'] = ''
if 'write' not in access_dict:
access_dict['write'] = ''
# clean up for display :
self.read_access = stringify_access(access_dict['read'])
self.write_access = stringify_access(access_dict['write'])
return access_dict
def _setup_user_permissions(self):
""" Set page.can['read'], page.can['write'],
page.user_role, page.user_rank
from page.user, page.access, and page.course """
# Note that admins who are faculty in a given course
# will have a displayed role of 'faculty' in that course
# but will have admin access to nav menus etc.
assert self.course != None # call self.set_course() first.
assert self.access != None # call self.set_access() first.
assert self.user != None
self.user_role = self.course.person_to_role(self.user)
# this includes 'tutor' even though there is no 'tutor' role;
# and so I'm using this one in the displayed login role
try:
self.user_rolename = self.course.username_to_rolename[
self.user.username]
except:
self.user_rolename = 'visitor'
self.user_rank = self.user_role.rank
if self.user_role.name in ('faculty', 'admin') and not self.is_sys:
# faculty & admin can read or write anything
# ... but not system pages - I don't want 'edit' tab on all pages.
self.can = {'read': True, 'write': True}
return
if self.user.is_admin():
# Let site admins do what they want in any course.
# Change their display name to 'admin' if it isn't 'faculty'.
# i.e. leave 'faculty' or 'student' display names as is.
self.user_rank = Role.by_name('admin').rank
if self.user_role.name != 'faculty':
self.user_role = Role.by_name('admin')
self.can = {'read':False, 'write':False} # default is deny access
for permission in ('read', 'write'):
yaml_rights = self.access[permission]
access_needed = 10 # i.e. more than anyone has by default
# can be list e.g. ['faculty', 'bob'] or string 'students'
if type(yaml_rights) == type(''):
yaml_rights = [ yaml_rights ]
for name_or_role in yaml_rights:
if name_or_role == self.user.username:
self.can[permission] = True
break
elif name_or_role in Role.name_alias:
access_needed = min(access_needed, \
Role.by_name(name_or_role).rank)
if self.user_rank >= access_needed:
self.can[permission] = True
def get_mimetype(self):
""" Return e.g. 'image/jpeg' for '.jpg' file """
if not Page._mime_types:
mimetypes.init()
Page._mime_types = mimetypes.types_map.copy()
for key in umber_mime_types:
Page._mime_types[key] = umber_mime_types[key]
if self.ext == '':
return 'text/plain'
return Page._mime_types.get(self.ext, 'application/octet-stream')
def children(self, abspath=''):
""" return page for each file or folder below this folder """
result = []
if abspath == '':
abspath = self.abspath
try:
path = os.path.relpath(abspath, os_courses)
for name in sorted(os.listdir(abspath)):
if name[0] == '.': # skip invisible files e.g. .access.yaml
continue
result.append(Page.get_from_path(os.path.join(path, name), user=self.user))
except OSError: # i.e. if abspath isn't a directory.
pass
return result
def icon_url(self):
""" return url for icon for this file type """
return static_url(filetype_to_icon[self.filetype])
def _setup_revision_data(self):
""" read and store within page the git file revision data """
# The log is a list of tuples [(revision, date, author), ...]
log = gitlocal.get_history(self)
if len(log) == 0:
link = self.url
date = self.lastmodified.daydatetimesec()
author = ''
self.githashes = tuple()
self.history = ([link, 'current', date, author], )
self.revision_date = date
self.revision_commit = ''
self.revision_prev_url = ''
self.revision_next_url = ''
self.revision_count = 1
self.revision = None # No git revision stored.
else:
self.githashes = tuple((githash for (githash, date, author) in log))
self.history = [None] * len(log)
for i in range(len(log)):
# say len(log) == 4
# nth => (new) current 3 2 1 (old)
# i => 0 1 2 3 (old)
if i == 0:
nth = 'current'
url = self.url
else:
nth = len(log) - i
url = self.url + '?revision={}'.format(nth)
# history => 0:url 1:nth 2:date 3:author
self.history[i] = tuple((url, nth, log[i][1], log[i][2]))
self.revision_count = len(log)
self.revision_date = self.history[0][2]
if self.revision:
self.revision = int(self.revision)
index = self.revision_count - self.revision
self.revision_date = self.history[index][2]
self.revision_commit = self.githashes[index]
self.revision_next_url = self.url + '?revision={}'.format(
min(self.revision + 1, len(log)))
self.revision_prev_url = self.url + '?revision={}'.format(
max(self.revision - 1, 1))
def _setup_file_properties(self):
""" given self.path, set a bunch of information about the file
including self.absfilename, self.exists, self.is_file, self.is_dir,
self.lastmodified, self.breadcrumbs
"""
self.abspath = os.path.join(os_courses, self.path)
self.path_with_ext = self.path # default, unless modified below
if not os.path.exists(self.abspath):
for ext in ['.md', '.html']:
if ext == '.md' and os.path.exists(self.abspath + ext):
self.abspath = self.abspath + ext
self.path_with_ext = self.path + ext
(ignore, self.ext) = os.path.splitext(self.abspath)
self.exists = os.path.exists(self.abspath)
#print_debug(f'debug _setup_file_properties : path={self.path} exists={self.exists} ')
if not self.exists and self.ext == '':
# creating a new file, so make it a .md markdown file
self.ext = '.md'
self.abspath += '.md'
self.name_with_ext = os.path.split(self.abspath)[-1]
if self.ext == '':
self.name = self.name_with_ext
else:
self.name = self.name_with_ext[: - len(self.ext) ]
# self.name_underlined = self.name + '\n' + '='*len(self.name)
self.path_no_name = self.path[: - len(self.name) ]
self.is_file = os.path.isfile(self.abspath)
self.is_dir = os.path.isdir(self.abspath)
if self.exists:
stat = os.stat(self.abspath)
#print_debug(f'debug _setup_file_properties : stat={str(stat)}')
self.lastmodified = Time(stat.st_mtime)
if self.is_dir:
self.size = None
self.filetype = 'directory'
self.name_with_ext += '/'
elif self.is_file:
self.size = stat.st_size
self.filetype = ext_to_filetype.get(self.ext, 'unknown')
else:
self.size = None
self.filetype = 'unknown'
else:
self.lastmodified = None
self.size = None
# -- build url links for page breadcrumbs --
url_list = [url_base] + self.path.split('/')
urlsofar = protocol + hostname
self.breadcrumbs = '<a href="{}">{}</a>'.format(urlsofar, urlsofar)
while url_list:
pathpart = '/' + url_list.pop(0)
urlsofar += pathpart
self.breadcrumbs += ' ' + '<a href="{}">{}</a>'.format(
urlsofar, pathpart)
self.url = umber_url + '/' + self.path
self.url_for_print_version = self.url + '?print=1'
self.bytesize = size_in_bytes(self.size)
def revision_content_as_html(self):
content = gitlocal.get_revision(self)
content_with_links = link_translate(self.course, content)
return markdown2html(content_with_links)
def content(self):
""" Return file or github (revision) data for a page """
# python3 gotchas:
# for text, I convert to a python3 string (utf8)
# but for other (i.e. binary) data, I leave as python3 bytes
if self.exists and self.is_file:
if self.revision:
text = gitlocal.get_revision(self)
else:
with open(self.abspath, 'rb') as _file:
text_bytes = _file.read()
try:
text = text_bytes.decode('utf8')
except:
text = text_bytes # e.g. *.png files
else:
text = ''
#print_debug(" page.content : page.action = '{}'".format(page.action))
return text
def write_content(self, new_content):
""" Write new data to page's file; return number of bytes written """
if self.can['write']: # shouldn't get here without this anyway
with open(self.abspath, 'wb') as _file:
# open as binary ... need to write bytes.
try:
new_bytes = new_content.encode('utf8')
except:
new_bytes = new_content
bytes_written = _file.write(new_bytes)
return bytes_written
def content_as_html(self):
""" Return file contents as html. """
# This also handles revisions since self.content() does.
if not self.exists:
return ''
elif self.ext == '.md':
# I'm caching the html version of .md pages in the sql database
# (for the current version)
# checking to see if the cache is stale with
# the file's lastmodified and a sql db html_lastmodified fields.
#print_debug(f" debug content_as_html cache")
#print_debug(f" lastmodified='{self.lastmodified}' ; " + \
# f"html_lastmodified='{self.html_lastmodified}'")
if self.revision:
content = self.content() # pull from git repo
content_with_links = link_translate(self.course, content)
self.html = markdown2html(content_with_links)
self.html_lastmodified = str(self.lastmodified)
elif str(self.lastmodified) != self.html_lastmodified:
#print_debug(f" updating {self.path}")
with db.atomic():
content = self.content() # pull from file
content_with_links = link_translate(self.course, content)
self.html = markdown2html(content_with_links)
self.html_lastmodified = str(self.lastmodified)
self.save()
#else:
#print_debug(f" using cache {self.path}")
# cache : just use .html already read from sql
html = self.html
else:
# Not markdown, so send the file (txt, html, ...) as is.
html = self.content() # from file or git repo
return html
def action_query(self):
""" Return empty string or '&action=edit' if editing """
if self.action == 'edit':
return '&action=edit'
else:
return ''
def nav_content_as_html(self, page):
""" Return authorized parts of html & markdown at html . """
# Here self is the navigation.md page.
# TODO: unlinkify current page
# TODO: This implementation is pretty ugly.
# Perhaps just do this explicitly without BeautifulSoup?
# And make some tests ...
# Each course has a menu navigation page which is a mixture of html
# and markdown, including access tags that look like this :
# <div access='student'>
# ...
# </div>
# This method converts the content of that file to html,
# keeping only the parts that this user is allowed to see.
#
# And do the link_translate first, before any markdown stuff,
# so that it can see the context.
content = self.content()
content = link_translate(self.course, content)
#
parser = BeautifulSoup(content, 'html.parser')
for role in list(Role.name_rank.keys()):
divs = parser.find_all('div', access=role)
if self.user_rank < Role.by_name(role).rank:
for div in divs:
div.extract() # remove this div from its parent parser
insides = []
marker = '.~*#!#*~.' # something that won't be in the html.
for divm in parser.find_all('div', markdown=1):
contents = ''.join(divm.stripped_strings)
mstring = markdown2html(contents)
insides.append(mstring)
divm.string = marker
html = str(parser) # convert beautiful soup object to formatted unicode
while insides:
inside = insides.pop(0)
html = html.replace(marker, inside, 1)
# If the current page is one of the links in the nav menu,
# that link should be unlinkified ... which I'm doing
# with another (ugh) pass through BeautifulSoup,
# now that markdown has run.
# -------------
# TODO do the right thing for file.md, file.html,
# and folder ; currently only "file" and "folder/" will work
# in the nav markdown; the other non-canonical with redirectrs won't.
# (So check other options in a loop, eh?)
parser = BeautifulSoup(html, 'html.parser')
anchor = parser.find('a', href=page.url)
if anchor:
span = parser.new_tag('span')
span['class'] = 'thispage'
span.string = anchor.string
parser.find('a', href=page.url).replace_with(span)
html = str(parser)
return html
class Assignment(BaseModel):
class Meta:
db_table = 'Assignment'
assignment_id = PrimaryKeyField(db_column='assignment_id')
nth = IntegerField(null=False, unique=True)
active = IntegerField()
blurb = TextField()
blurb_hash = TextField()
blurb_html = TextField()
due = TextField(null=True)
name = TextField()
notes = TextField()
course = ForeignKeyField(model=Course,
db_column='course_id',
to_field='course_id')
def get_url(self):
return '{}/sys/assignments#{}'.format(self.course.url, self.nth)
def name_smaller(self):
""" return html version of assignment name with <br> instead of spaces """
return self.name.replace(' ', '<br>')
def get_work(self, person):
""" Return Work for this assignment by given student """
# i.e. work = assignment.get_work(student)
with db.atomic():
(work, created) = Work.get_or_create(assignment = self,
person = person)
if created:
work.grade = '' # | I would have expected this to be
work.notes = '' # | created with the sql defaults ...
work.submitted = '' # | but apparently not.
work.student_modified = ''
work.faculty_modified = ''
work.student_seen = ''
work.faculty_seen = ''
work.page = 0
work.save()
return work
class Role(BaseModel):
class Meta:
db_table = 'Role'
role_id = PrimaryKeyField(db_column='role_id')
name = TextField()
rank = IntegerField()
name_rank = {'admin': 5,
'faculty': 4,
'student': 3,
'member': 2,
'visitor': 1
}
name_alias = {'admin': 'admin',
'administrator': 'admin',
'faculty': 'faculty',
'student': 'student',
'students': 'student',
'tutor': 'student',
'class': 'student',
'guests': 'member',
'guest': 'member',
'member': 'member',
'all': 'visitor',
'any': 'visitor',
'visitor': 'visitor'
}
_cache = {}
@staticmethod
def by_name(name):
if not name in Role.name_rank:
if name in Role.name_alias:
name = Role.name_alias[name]
else:
name = 'visitor'
if not name in Role._cache:
Role._cache[name] = Role.get(name=name)
return Role._cache[name]
@staticmethod
def unalias(alias):
""" Convert alias to its standard role name. """
return Role.name_alias[alias]
@staticmethod
def create_defaults():
with db.atomic():
for (name, rank) in list(Role.name_rank.items()):
Role.get_or_create(name=name, rank=rank)
class Registration(BaseModel):
class Meta:
db_table = 'Registration'
registration_id = PrimaryKeyField(db_column='registration_id')
credits = IntegerField()
date = TextField(null=True)
grade = TextField()
midterm = TextField()
status = TextField()
course = ForeignKeyField(model=Course,
db_column='course_id',
to_field='course_id')
person = ForeignKeyField(model=Person,
db_column='person_id',
to_field='person_id')
role = ForeignKeyField(model=Role,
db_column='role_id',
to_field='role_id')
def rolename(self):
""" return rolname for this registration, including 'tutor' """
return 'tutor' if self.grade=='tutor' else self.role.name
class Work(BaseModel):
class Meta:
db_table = 'Work'
work_id = PrimaryKeyField(db_column='work_id')
grade = TextField()
notes = TextField()
submitted = TextField()
student_modified = TextField(db_column='student_modified')
student_seen = TextField(db_column='student_seen')
faculty_modified = TextField(db_column='faculty_modified')
faculty_seen = TextField(db_column='faculty_seen')
assignment = ForeignKeyField(model=Assignment,
db_column='assignment_id',
to_field='assignment_id')
person = ForeignKeyField(model=Person,
db_column='person_id',
to_field='person_id')
page = ForeignKeyField(model=Page,
db_column='page_id',
to_field='page_id')
@staticmethod
def edit_grades(id_grade_dict):
""" id_grade_dict is web form with some {'work_<id>':new_grade}
extract id's & change grades """
# the dict also has other keys i.e. 'submit_work'; ignore them.
try:
with db.atomic():
for key in id_grade_dict:
if key[:5] == 'work_':
id = int(key[5:])
work = Work.get(work_id=id)
# See get_grade_css for special grades ...,
# The special grades "...", "overdue', 'ungraded'
# are created when the actual grade is not set yet.
grade = id_grade_dict[key]
if grade in ('…', '...', 'overdue', 'ungraded'):
grade = ''
work.grade = grade
work.save()
except:
print_debug('OOPS : Work.edit_grades(id_grade_dict="{}") failed' \
.format(id_grade_dict))
def get_url(self):
# Also see templates/assignments.html
return '{}/students/{}/work/{}.md'.format(self.assignment.course.url,
self.person.username,
self.assignment.nth)
def get_grade_css(self, faculty_view):
css_class = 'black' # the default
#
duedate = Time(self.assignment.due)
duedate.arrow = duedate.arrow.shift(hours=due_grace_hours)
now = Time()
before_due_date = now < duedate
#
# Set blank times to '1901' to avoid errors.
faculty_modified = self.faculty_modified or '1901'
faculty_seen = self.faculty_seen or '1901'
student_modified = self.student_modified or '1901'
student_seen = self.student_seen or '1901'
#print_debug(" faculty_modified = '{}'".format(faculty_modified))
#print_debug(" faculty_seen = '{}'".format(faculty_seen))
#print_debug(" student_modified = '{}'".format(student_modified))
#print_debug(" student_seen = '{}'".format(student_seen))
if faculty_view:
if Time(faculty_modified) > Time(student_seen):
css_class = 'brown'
if Time(student_modified) > Time(faculty_seen):
css_class = 'darkgreen'
if not self.submitted:
if before_due_date:
grade = '…'
else:
grade = 'overdue'
css_class = 'red'
else:
if not self.grade:
grade = 'ungraded'
css_class = 'green'
else:
grade = self.grade
else:
if Time(student_modified) > Time(faculty_seen):
css_class = 'brown'
if Time(faculty_modified) > Time(student_seen):
css_class = 'darkgreen'
if not self.submitted:
if before_due_date:
grade = '…'
else:
grade = 'l͟a͟t͟e͟' # l͟a͟t͟e͟
css_class = 'green'
else:
if not self.grade:
grade = 'ungraded'
css_class = 'brown'
else:
grade = self.grade
if self.grade: # If a grade has been assigned, show it. Period.
grade = self.grade
return (grade, css_class)
def init_db():
""" Create base database objects """
# i.e. roles & site course.
# The Roles data must be in place for the login system to work.
# And the Umber course must exist for user photos and site docs
# and admin user role.
# The sql database must already exist; see bin/init_db .
# All these are "get_or_create", so running 'em multiple times won't hurt.
Role.create_defaults()
Course.create_site()
def populate_production_db(interactive=False):
""" create initial objects for production database """
# see umber/bin/init_db
from utilities import toggle_debug
toggle_debug()
make_admin = False
if interactive:
make_admin = input(' Create admin? (y/n) ').lower()[0] == 'y'
if admin:
admin_username = input(' Admin username? ')
admin_name = input(' Admin full name? ')
admin_passwd = input(' <PASSWORD>? ')
admin_email = input(' Admin email? ')
with db.atomic():
defaultcourse = Course.create_course(
name = 'Default Course',
name_as_title = 'Default<br>Course',
path = 'default_course',
start = '2018-01-01',
copyfrom = False
)
if make_admin:
(admin, created) = Person.get_or_create(username = admin_username)
if created:
admin.name = admin_name
admin.email = admin_email
password = <PASSWORD>
else:
if interactive:
print(f' username "{admin_username}" already exists')
print(' ... setting their is_admin=True')
print(' ... leaving their name & email unchanged.')
admin.is_admin = True
admin.save()
toggle_debug()
def populate_db():
""" Create test & example development objects """
# i.e. democourse, jane, ted, john, adam; examples and tests.
#print("Populating development database.")
from utilities import toggle_debug
toggle_debug()
with db.atomic():
student = Role.by_name('student')
faculty = Role.by_name('faculty')
democourse = Course.create_course(
name = 'Demo Course',
name_as_title = 'Demo<br>Course',
path = 'demo',
start = '2018-01-01',
copyfrom = False
)
defaultcourse = Course.create_course(
name = 'Default Course',
name_as_title = 'Default<br>Course',
path = 'default_course',
start = '2018-01-01',
copyfrom = False
)
jane = Person.create_person(
username = 'janedoe',
name = '<NAME>',
email = '<EMAIL>',
password = '<PASSWORD>' )
john = Person.create_person(
username = 'johnsmith',
name = '<NAME>',
email = '<EMAIL>',
password = '<PASSWORD>' )
ted = Person.create_person(
username = 'tedteacher',
name = '<NAME>',
email = '<EMAIL>',
password = '<PASSWORD>' )
tammy = Person.create_person(
username = 'tammytutor',
name = '<NAME>',
email = '<EMAIL>',
password = '<PASSWORD>' )
adam = Person.create_person(
username = 'adamadmin',
name = '<NAME>',
email = '<EMAIL>',
password = '<PASSWORD>',
is_admin = True )
default_date = '2018-01-02'
democourse.enroll(john, 'student', default_date, create_work=False)
democourse.enroll(jane, 'student', default_date, create_work=False)
democourse.enroll(tammy, 'tutor', default_date, create_work=False)
democourse.enroll(ted, 'faculty', default_date, create_work=False)
# Assignments are set with a dict {nth: {name, due, blurb}.
assignments_data = {
1: {'name': 'week 1',
'due': '2018-01-23',
'blurb': 'Do chap 1 exercises 1 to 10.'},
2: {'name': 'week 2',
'due': 'Jan 28 2018 5pm',
'blurb': 'Write a four part fugue.'}
}
democourse.update_assignments(assignments_data)
assign1 = democourse.get_assignment_by_nth(1)
johns_work = assign1.get_work(john)
johns_work.grade = 'B'
johns_work.submitted = '2018-01-22T18:20:23-05:00' # on time
johns_work.student_seen = johns_work.submitted
johns_work.student_modified = johns_work.submitted
johns_work.faculty_seen = '2018-01-28T16:00:00-05:00'
johns_work.faculty_modified = johns_work.faculty_seen
johns_work.save()
janes_work = assign1.get_work(jane)
janes_work.submitted = '2018-02-04T22:23:24-05:00', # past due
# janes_work.grade = '' # not graded yet
janes_work.student_seen = janes_work.submitted
janes_work.student_modified = janes_work.submitted
janes_work.save()
toggle_debug()
if __name__ == '__main__':
import doctest
doctest.testmod()
``` |
{
"source": "jimman2003/meson",
"score": 2
} |
#### File: mesonbuild/interpreterbase/decorators.py
```python
from .. import mesonlib, mlog
from .baseobjects import TV_func, TYPE_var
from .disabler import Disabler
from .exceptions import InterpreterException, InvalidArguments
from .helpers import check_stringlist, get_callee_args
from ._unholder import _unholder
from functools import wraps
import abc
import itertools
import typing as T
def noPosargs(f: TV_func) -> TV_func:
@wraps(f)
def wrapped(*wrapped_args: T.Any, **wrapped_kwargs: T.Any) -> T.Any:
args = get_callee_args(wrapped_args)[2]
if args:
raise InvalidArguments('Function does not take positional arguments.')
return f(*wrapped_args, **wrapped_kwargs)
return T.cast(TV_func, wrapped)
def builtinMethodNoKwargs(f: TV_func) -> TV_func:
@wraps(f)
def wrapped(*wrapped_args: T.Any, **wrapped_kwargs: T.Any) -> T.Any:
node = wrapped_args[0].current_node
method_name = wrapped_args[2]
kwargs = wrapped_args[4]
if kwargs:
mlog.warning(f'Method {method_name!r} does not take keyword arguments.',
'This will become a hard error in the future',
location=node)
return f(*wrapped_args, **wrapped_kwargs)
return T.cast(TV_func, wrapped)
def noKwargs(f: TV_func) -> TV_func:
@wraps(f)
def wrapped(*wrapped_args: T.Any, **wrapped_kwargs: T.Any) -> T.Any:
kwargs = get_callee_args(wrapped_args)[3]
if kwargs:
raise InvalidArguments('Function does not take keyword arguments.')
return f(*wrapped_args, **wrapped_kwargs)
return T.cast(TV_func, wrapped)
def stringArgs(f: TV_func) -> TV_func:
@wraps(f)
def wrapped(*wrapped_args: T.Any, **wrapped_kwargs: T.Any) -> T.Any:
args = get_callee_args(wrapped_args)[2]
assert(isinstance(args, list))
check_stringlist(args)
return f(*wrapped_args, **wrapped_kwargs)
return T.cast(TV_func, wrapped)
def noArgsFlattening(f: TV_func) -> TV_func:
setattr(f, 'no-args-flattening', True) # noqa: B010
return f
def noSecondLevelHolderResolving(f: TV_func) -> TV_func:
setattr(f, 'no-second-level-holder-flattening', True) # noqa: B010
return f
def permissive_unholder_return(f: TV_func) -> T.Callable[..., TYPE_var]:
@wraps(f)
def wrapped(*wrapped_args: T.Any, **wrapped_kwargs: T.Any) -> T.Any:
res = f(*wrapped_args, **wrapped_kwargs)
return _unholder(res, permissive=True)
return T.cast(T.Callable[..., TYPE_var], wrapped)
def disablerIfNotFound(f: TV_func) -> TV_func:
@wraps(f)
def wrapped(*wrapped_args: T.Any, **wrapped_kwargs: T.Any) -> T.Any:
kwargs = get_callee_args(wrapped_args)[3]
disabler = kwargs.pop('disabler', False)
ret = f(*wrapped_args, **wrapped_kwargs)
if disabler and not ret.found():
return Disabler()
return ret
return T.cast(TV_func, wrapped)
class permittedKwargs:
def __init__(self, permitted: T.Set[str]):
self.permitted = permitted # type: T.Set[str]
def __call__(self, f: TV_func) -> TV_func:
@wraps(f)
def wrapped(*wrapped_args: T.Any, **wrapped_kwargs: T.Any) -> T.Any:
s, node, args, kwargs, _ = get_callee_args(wrapped_args)
for k in kwargs:
if k not in self.permitted:
mlog.warning(f'''Passed invalid keyword argument "{k}".''', location=node)
mlog.warning('This will become a hard error in the future.')
return f(*wrapped_args, **wrapped_kwargs)
return T.cast(TV_func, wrapped)
def typed_pos_args(name: str, *types: T.Union[T.Type, T.Tuple[T.Type, ...]],
varargs: T.Optional[T.Union[T.Type, T.Tuple[T.Type, ...]]] = None,
optargs: T.Optional[T.List[T.Union[T.Type, T.Tuple[T.Type, ...]]]] = None,
min_varargs: int = 0, max_varargs: int = 0) -> T.Callable[..., T.Any]:
"""Decorator that types type checking of positional arguments.
This supports two different models of optional aguments, the first is the
variadic argument model. Variadic arguments are a possibly bounded,
possibly unbounded number of arguments of the same type (unions are
supported). The second is the standard default value model, in this case
a number of optional arguments may be provided, but they are still
ordered, and they may have different types.
This function does not support mixing variadic and default arguments.
:name: The name of the decorated function (as displayed in error messages)
:varargs: They type(s) of any variadic arguments the function takes. If
None the function takes no variadic args
:min_varargs: the minimum number of variadic arguments taken
:max_varargs: the maximum number of variadic arguments taken. 0 means unlimited
:optargs: The types of any optional arguments parameters taken. If None
then no optional paramters are taken.
Some examples of usage blow:
>>> @typed_pos_args('mod.func', str, (str, int))
... def func(self, state: ModuleState, args: T.Tuple[str, T.Union[str, int]], kwargs: T.Dict[str, T.Any]) -> T.Any:
... pass
>>> @typed_pos_args('method', str, varargs=str)
... def method(self, node: BaseNode, args: T.Tuple[str, T.List[str]], kwargs: T.Dict[str, T.Any]) -> T.Any:
... pass
>>> @typed_pos_args('method', varargs=str, min_varargs=1)
... def method(self, node: BaseNode, args: T.Tuple[T.List[str]], kwargs: T.Dict[str, T.Any]) -> T.Any:
... pass
>>> @typed_pos_args('method', str, optargs=[(str, int), str])
... def method(self, node: BaseNode, args: T.Tuple[str, T.Optional[T.Union[str, int]], T.Optional[str]], kwargs: T.Dict[str, T.Any]) -> T.Any:
... pass
When should you chose `typed_pos_args('name', varargs=str,
min_varargs=1)` vs `typed_pos_args('name', str, varargs=str)`?
The answer has to do with the semantics of the function, if all of the
inputs are the same type (such as with `files()`) then the former is
correct, all of the arguments are string names of files. If the first
argument is something else the it should be separated.
"""
def inner(f: TV_func) -> TV_func:
@wraps(f)
def wrapper(*wrapped_args: T.Any, **wrapped_kwargs: T.Any) -> T.Any:
args = get_callee_args(wrapped_args)[2]
# These are implementation programming errors, end users should never see them.
assert isinstance(args, list), args
assert max_varargs >= 0, 'max_varags cannot be negative'
assert min_varargs >= 0, 'min_varags cannot be negative'
assert optargs is None or varargs is None, \
'varargs and optargs not supported together as this would be ambiguous'
num_args = len(args)
num_types = len(types)
a_types = types
if varargs:
min_args = num_types + min_varargs
max_args = num_types + max_varargs
if max_varargs == 0 and num_args < min_args:
raise InvalidArguments(f'{name} takes at least {min_args} arguments, but got {num_args}.')
elif max_varargs != 0 and (num_args < min_args or num_args > max_args):
raise InvalidArguments(f'{name} takes between {min_args} and {max_args} arguments, but got {num_args}.')
elif optargs:
if num_args < num_types:
raise InvalidArguments(f'{name} takes at least {num_types} arguments, but got {num_args}.')
elif num_args > num_types + len(optargs):
raise InvalidArguments(f'{name} takes at most {num_types + len(optargs)} arguments, but got {num_args}.')
# Add the number of positional arguments required
if num_args > num_types:
diff = num_args - num_types
a_types = tuple(list(types) + list(optargs[:diff]))
elif num_args != num_types:
raise InvalidArguments(f'{name} takes exactly {num_types} arguments, but got {num_args}.')
for i, (arg, type_) in enumerate(itertools.zip_longest(args, a_types, fillvalue=varargs), start=1):
if not isinstance(arg, type_):
if isinstance(type_, tuple):
shouldbe = 'one of: {}'.format(", ".join(f'"{t.__name__}"' for t in type_))
else:
shouldbe = f'"{type_.__name__}"'
raise InvalidArguments(f'{name} argument {i} was of type "{type(arg).__name__}" but should have been {shouldbe}')
# Ensure that we're actually passing a tuple.
# Depending on what kind of function we're calling the length of
# wrapped_args can vary.
nargs = list(wrapped_args)
i = nargs.index(args)
if varargs:
# if we have varargs we need to split them into a separate
# tuple, as python's typing doesn't understand tuples with
# fixed elements and variadic elements, only one or the other.
# so in that case we need T.Tuple[int, str, float, T.Tuple[str, ...]]
pos = args[:len(types)]
var = list(args[len(types):])
pos.append(var)
nargs[i] = tuple(pos)
elif optargs:
if num_args < num_types + len(optargs):
diff = num_types + len(optargs) - num_args
nargs[i] = tuple(list(args) + [None] * diff)
else:
nargs[i] = args
else:
nargs[i] = tuple(args)
return f(*nargs, **wrapped_kwargs)
return T.cast(TV_func, wrapper)
return inner
class ContainerTypeInfo:
"""Container information for keyword arguments.
For keyword arguments that are containers (list or dict), this class encodes
that information.
:param container: the type of container
:param contains: the types the container holds
:param pairs: if the container is supposed to be of even length.
This is mainly used for interfaces that predate the addition of dictionaries, and use
`[key, value, key2, value2]` format.
:param allow_empty: Whether this container is allowed to be empty
There are some cases where containers not only must be passed, but must
not be empty, and other cases where an empty container is allowed.
"""
def __init__(self, container: T.Type, contains: T.Union[T.Type, T.Tuple[T.Type, ...]], *,
pairs: bool = False, allow_empty: bool = True) :
self.container = container
self.contains = contains
self.pairs = pairs
self.allow_empty = allow_empty
def check(self, value: T.Any) -> T.Optional[str]:
"""Check that a value is valid.
:param value: A value to check
:return: If there is an error then a string message, otherwise None
"""
if not isinstance(value, self.container):
return f'container type was "{type(value).__name__}", but should have been "{self.container.__name__}"'
iter_ = iter(value.values()) if isinstance(value, dict) else iter(value)
for each in iter_:
if not isinstance(each, self.contains):
if isinstance(self.contains, tuple):
shouldbe = 'one of: {}'.format(", ".join(f'"{t.__name__}"' for t in self.contains))
else:
shouldbe = f'"{self.contains.__name__}"'
return f'contained a value of type "{type(each).__name__}" but should have been {shouldbe}'
if self.pairs and len(value) % 2 != 0:
return 'container should be of even length, but is not'
if not value and not self.allow_empty:
return 'container is empty, but not allowed to be'
return None
_T = T.TypeVar('_T')
class _NULL_T:
"""Special null type for evolution, this is an implementation detail."""
_NULL = _NULL_T()
class KwargInfo(T.Generic[_T]):
"""A description of a keyword argument to a meson function
This is used to describe a value to the :func:typed_kwargs function.
:param name: the name of the parameter
:param types: A type or tuple of types that are allowed, or a :class:ContainerType
:param required: Whether this is a required keyword argument. defaults to False
:param listify: If true, then the argument will be listified before being
checked. This is useful for cases where the Meson DSL allows a scalar or
a container, but internally we only want to work with containers
:param default: A default value to use if this isn't set. defaults to None,
this may be safely set to a mutable type, as long as that type does not
itself contain mutable types, typed_kwargs will copy the default
:param since: Meson version in which this argument has been added. defaults to None
:param deprecated: Meson version in which this argument has been deprecated. defaults to None
:param validator: A callable that does additional validation. This is mainly
intended for cases where a string is expected, but only a few specific
values are accepted. Must return None if the input is valid, or a
message if the input is invalid
:param convertor: A callable that converts the raw input value into a
different type. This is intended for cases such as the meson DSL using a
string, but the implementation using an Enum. This should not do
validation, just converstion.
:param deprecated_values: a dictionary mapping a value to the version of
meson it was deprecated in.
:param since_values: a dictionary mapping a value to the version of meson it was
added in.
:param not_set_warning: A warning messsage that is logged if the kwarg is not
set by the user.
"""
def __init__(self, name: str, types: T.Union[T.Type[_T], T.Tuple[T.Type[_T], ...], ContainerTypeInfo],
*, required: bool = False, listify: bool = False,
default: T.Optional[_T] = None,
since: T.Optional[str] = None,
since_values: T.Optional[T.Dict[str, str]] = None,
deprecated: T.Optional[str] = None,
deprecated_values: T.Optional[T.Dict[str, str]] = None,
validator: T.Optional[T.Callable[[_T], T.Optional[str]]] = None,
convertor: T.Optional[T.Callable[[_T], object]] = None,
not_set_warning: T.Optional[str] = None):
self.name = name
self.types = types
self.required = required
self.listify = listify
self.default = default
self.since_values = since_values
self.since = since
self.deprecated = deprecated
self.deprecated_values = deprecated_values
self.validator = validator
self.convertor = convertor
self.not_set_warning = not_set_warning
def evolve(self, *,
name: T.Union[str, _NULL_T] = _NULL,
required: T.Union[bool, _NULL_T] = _NULL,
listify: T.Union[bool, _NULL_T] = _NULL,
default: T.Union[_T, None, _NULL_T] = _NULL,
since: T.Union[str, None, _NULL_T] = _NULL,
since_values: T.Union[T.Dict[str, str], None, _NULL_T] = _NULL,
deprecated: T.Union[str, None, _NULL_T] = _NULL,
deprecated_values: T.Union[T.Dict[str, str], None, _NULL_T] = _NULL,
validator: T.Union[T.Callable[[_T], T.Optional[str]], None, _NULL_T] = _NULL,
convertor: T.Union[T.Callable[[_T], TYPE_var], None, _NULL_T] = _NULL) -> 'KwargInfo':
"""Create a shallow copy of this KwargInfo, with modifications.
This allows us to create a new copy of a KwargInfo with modifications.
This allows us to use a shared kwarg that implements complex logic, but
has slight differences in usage, such as being added to different
functions in different versions of Meson.
The use the _NULL special value here allows us to pass None, which has
meaning in many of these cases. _NULL itself is never stored, always
being replaced by either the copy in self, or the provided new version.
"""
return type(self)(
name if not isinstance(name, _NULL_T) else self.name,
self.types,
listify=listify if not isinstance(listify, _NULL_T) else self.listify,
required=required if not isinstance(required, _NULL_T) else self.required,
default=default if not isinstance(default, _NULL_T) else self.default,
since=since if not isinstance(since, _NULL_T) else self.since,
since_values=since_values if not isinstance(since_values, _NULL_T) else self.since_values,
deprecated=deprecated if not isinstance(deprecated, _NULL_T) else self.deprecated,
deprecated_values=deprecated_values if not isinstance(deprecated_values, _NULL_T) else self.deprecated_values,
validator=validator if not isinstance(validator, _NULL_T) else self.validator,
convertor=convertor if not isinstance(convertor, _NULL_T) else self.convertor,
)
def typed_kwargs(name: str, *types: KwargInfo) -> T.Callable[..., T.Any]:
"""Decorator for type checking keyword arguments.
Used to wrap a meson DSL implementation function, where it checks various
things about keyword arguments, including the type, and various other
information. For non-required values it sets the value to a default, which
means the value will always be provided.
If type tyhpe is a :class:ContainerTypeInfo, then the default value will be
passed as an argument to the container initializer, making a shallow copy
:param name: the name of the function, including the object it's attached ot
(if applicable)
:param *types: KwargInfo entries for each keyword argument.
"""
def inner(f: TV_func) -> TV_func:
@wraps(f)
def wrapper(*wrapped_args: T.Any, **wrapped_kwargs: T.Any) -> T.Any:
_kwargs, subproject = get_callee_args(wrapped_args, want_subproject=True)[3:5]
# Cast here, as the convertor function may place something other than a TYPE_var in the kwargs
kwargs = T.cast(T.Dict[str, object], _kwargs)
all_names = {t.name for t in types}
unknowns = set(kwargs).difference(all_names)
if unknowns:
# Warn about unknown argumnts, delete them and continue. This
# keeps current behavior
ustr = ', '.join([f'"{u}"' for u in sorted(unknowns)])
mlog.warning(f'{name} got unknown keyword arguments {ustr}')
for u in unknowns:
del kwargs[u]
for info in types:
value = kwargs.get(info.name)
if value is not None:
if info.since:
feature_name = info.name + ' arg in ' + name
FeatureNew.single_use(feature_name, info.since, subproject)
if info.deprecated:
feature_name = info.name + ' arg in ' + name
FeatureDeprecated.single_use(feature_name, info.deprecated, subproject)
if info.listify:
kwargs[info.name] = value = mesonlib.listify(value)
if isinstance(info.types, ContainerTypeInfo):
msg = info.types.check(value)
if msg is not None:
raise InvalidArguments(f'{name} keyword argument "{info.name}" {msg}')
else:
if not isinstance(value, info.types):
if isinstance(info.types, tuple):
shouldbe = 'one of: {}'.format(", ".join(f'"{t.__name__}"' for t in info.types))
else:
shouldbe = f'"{info.types.__name__}"'
raise InvalidArguments(f'{name} keyword argument "{info.name}"" was of type "{type(value).__name__}" but should have been {shouldbe}')
if info.validator is not None:
msg = info.validator(value)
if msg is not None:
raise InvalidArguments(f'{name} keyword argument "{info.name}" {msg}')
warn: bool
if info.deprecated_values is not None:
for n, version in info.deprecated_values.items():
if isinstance(value, (dict, list)):
warn = n in value
else:
warn = n == value
if warn:
FeatureDeprecated.single_use(f'"{name}" keyword argument "{info.name}" value "{n}"', version, subproject)
if info.since_values is not None:
for n, version in info.since_values.items():
if isinstance(value, (dict, list)):
warn = n in value
else:
warn = n == value
if warn:
FeatureNew.single_use(f'"{name}" keyword argument "{info.name}" value "{n}"', version, subproject)
elif info.required:
raise InvalidArguments(f'{name} is missing required keyword argument "{info.name}"')
else:
# set the value to the default, this ensuring all kwargs are present
# This both simplifies the typing checking and the usage
# Create a shallow copy of the container (and do a type
# conversion if necessary). This allows mutable types to
# be used safely as default values
if isinstance(info.types, ContainerTypeInfo):
assert isinstance(info.default, info.types.container), f'In function {name} default value of {info.name} is not a valid type, got {type(info.default)}, expected {info.types.container}[{info.types.contains}]'
for item in info.default:
assert isinstance(item, info.types.contains), f'In function {name} default value of {info.name}, container has invalid value of {item}, which is of type {type(item)}, but should be {info.types.contains}'
kwargs[info.name] = info.types.container(info.default)
else:
assert isinstance(info.default, info.types), f'In funcion {name} default value of {info.name} is not a valid type, got {type(info.default)} expected {info.types}'
kwargs[info.name] = info.default
if info.not_set_warning:
mlog.warning(info.not_set_warning)
if info.convertor:
kwargs[info.name] = info.convertor(kwargs[info.name])
return f(*wrapped_args, **wrapped_kwargs)
return T.cast(TV_func, wrapper)
return inner
class FeatureCheckBase(metaclass=abc.ABCMeta):
"Base class for feature version checks"
# In python 3.6 we can just forward declare this, but in 3.5 we can't
# This will be overwritten by the subclasses by necessity
feature_registry = {} # type: T.ClassVar[T.Dict[str, T.Dict[str, T.Set[str]]]]
def __init__(self, feature_name: str, version: str, extra_message: T.Optional[str] = None):
self.feature_name = feature_name # type: str
self.feature_version = version # type: str
self.extra_message = extra_message or '' # type: str
@staticmethod
def get_target_version(subproject: str) -> str:
# Don't do any checks if project() has not been parsed yet
if subproject not in mesonlib.project_meson_versions:
return ''
return mesonlib.project_meson_versions[subproject]
@staticmethod
@abc.abstractmethod
def check_version(target_version: str, feature_Version: str) -> bool:
pass
def use(self, subproject: str) -> None:
tv = self.get_target_version(subproject)
# No target version
if tv == '':
return
# Target version is new enough
if self.check_version(tv, self.feature_version):
return
# Feature is too new for target version, register it
if subproject not in self.feature_registry:
self.feature_registry[subproject] = {self.feature_version: set()}
register = self.feature_registry[subproject]
if self.feature_version not in register:
register[self.feature_version] = set()
if self.feature_name in register[self.feature_version]:
# Don't warn about the same feature multiple times
# FIXME: This is needed to prevent duplicate warnings, but also
# means we won't warn about a feature used in multiple places.
return
register[self.feature_version].add(self.feature_name)
self.log_usage_warning(tv)
@classmethod
def report(cls, subproject: str) -> None:
if subproject not in cls.feature_registry:
return
warning_str = cls.get_warning_str_prefix(cls.get_target_version(subproject))
fv = cls.feature_registry[subproject]
for version in sorted(fv.keys()):
warning_str += '\n * {}: {}'.format(version, fv[version])
mlog.warning(warning_str)
def log_usage_warning(self, tv: str) -> None:
raise InterpreterException('log_usage_warning not implemented')
@staticmethod
def get_warning_str_prefix(tv: str) -> str:
raise InterpreterException('get_warning_str_prefix not implemented')
def __call__(self, f: TV_func) -> TV_func:
@wraps(f)
def wrapped(*wrapped_args: T.Any, **wrapped_kwargs: T.Any) -> T.Any:
subproject = get_callee_args(wrapped_args, want_subproject=True)[4]
if subproject is None:
raise AssertionError(f'{wrapped_args!r}')
self.use(subproject)
return f(*wrapped_args, **wrapped_kwargs)
return T.cast(TV_func, wrapped)
@classmethod
def single_use(cls, feature_name: str, version: str, subproject: str,
extra_message: T.Optional[str] = None) -> None:
"""Oneline version that instantiates and calls use()."""
cls(feature_name, version, extra_message).use(subproject)
class FeatureNew(FeatureCheckBase):
"""Checks for new features"""
# Class variable, shared across all instances
#
# Format: {subproject: {feature_version: set(feature_names)}}
feature_registry = {} # type: T.ClassVar[T.Dict[str, T.Dict[str, T.Set[str]]]]
@staticmethod
def check_version(target_version: str, feature_version: str) -> bool:
return mesonlib.version_compare_condition_with_min(target_version, feature_version)
@staticmethod
def get_warning_str_prefix(tv: str) -> str:
return f'Project specifies a minimum meson_version \'{tv}\' but uses features which were added in newer versions:'
def log_usage_warning(self, tv: str) -> None:
args = [
'Project targeting', f"'{tv}'",
'but tried to use feature introduced in',
f"'{self.feature_version}':",
f'{self.feature_name}.',
]
if self.extra_message:
args.append(self.extra_message)
mlog.warning(*args)
class FeatureDeprecated(FeatureCheckBase):
"""Checks for deprecated features"""
# Class variable, shared across all instances
#
# Format: {subproject: {feature_version: set(feature_names)}}
feature_registry = {} # type: T.ClassVar[T.Dict[str, T.Dict[str, T.Set[str]]]]
@staticmethod
def check_version(target_version: str, feature_version: str) -> bool:
# For deprecation checks we need to return the inverse of FeatureNew checks
return not mesonlib.version_compare_condition_with_min(target_version, feature_version)
@staticmethod
def get_warning_str_prefix(tv: str) -> str:
return 'Deprecated features used:'
def log_usage_warning(self, tv: str) -> None:
args = [
'Project targeting', f"'{tv}'",
'but tried to use feature deprecated since',
f"'{self.feature_version}':",
f'{self.feature_name}.',
]
if self.extra_message:
args.append(self.extra_message)
mlog.warning(*args)
class FeatureCheckKwargsBase(metaclass=abc.ABCMeta):
@property
@abc.abstractmethod
def feature_check_class(self) -> T.Type[FeatureCheckBase]:
pass
def __init__(self, feature_name: str, feature_version: str,
kwargs: T.List[str], extra_message: T.Optional[str] = None):
self.feature_name = feature_name
self.feature_version = feature_version
self.kwargs = kwargs
self.extra_message = extra_message
def __call__(self, f: TV_func) -> TV_func:
@wraps(f)
def wrapped(*wrapped_args: T.Any, **wrapped_kwargs: T.Any) -> T.Any:
kwargs, subproject = get_callee_args(wrapped_args, want_subproject=True)[3:5]
if subproject is None:
raise AssertionError(f'{wrapped_args!r}')
for arg in self.kwargs:
if arg not in kwargs:
continue
name = arg + ' arg in ' + self.feature_name
self.feature_check_class.single_use(
name, self.feature_version, subproject, self.extra_message)
return f(*wrapped_args, **wrapped_kwargs)
return T.cast(TV_func, wrapped)
class FeatureNewKwargs(FeatureCheckKwargsBase):
feature_check_class = FeatureNew
class FeatureDeprecatedKwargs(FeatureCheckKwargsBase):
feature_check_class = FeatureDeprecated
``` |
{
"source": "jimman2003/openlibrary-to-sqlite",
"score": 3
} |
#### File: jimman2003/openlibrary-to-sqlite/download.py
```python
import urllib.request
from tqdm import tqdm
## Taken right from the docs at https://github.com/tqdm/tqdm
class TqdmUpTo(tqdm):
"""Provides `update_to(n)` which uses `tqdm.update(delta_n)`."""
def update_to(self, b=1, bsize=1, tsize=None):
"""
b : int, optional
Number of blocks transferred so far [default: 1].
bsize : int, optional
Size of each block (in tqdm units) [default: 1].
tsize : int, optional
Total size (in tqdm units). If [default: None] remains unchanged.
"""
if tsize is not None:
self.total = tsize
return self.update(b * bsize - self.n) # also sets self.n = b * bsize
def download_file_with_progress_bar(url, destination):
with TqdmUpTo(unit='B', unit_scale=True, unit_divisor=1024, miniters=1,
desc=url.split('/')[-1]) as t: # all optional kwargs
urllib.request.urlretrieve(url, filename=destination,
reporthook=t.update_to, data=None)
t.total = t.n
```
#### File: jimman2003/openlibrary-to-sqlite/main.py
```python
import os
from database import create_works_database, load_database
from download import download_file_with_progress_bar
import subprocess
import logging
DATABASE_LOCATION = "openlibrary_works.sqlite3"
def do_everything():
if not os.path.isfile(DATABASE_LOCATION):
dump_file_location = "ol_dump_works_latest.txt.gz"
if not os.path.isfile(dump_file_location):
logging.info("dump not detected, downloading")
url = "https://openlibrary.org/data/ol_dump_works_latest.txt.gz"
download_file_with_progress_bar(url, dump_file_location)
con = create_works_database(DATABASE_LOCATION)
load_database(con, dump_file_location)
subprocess.run(["datasette", DATABASE_LOCATION, "--setting", "sql_time_limit_ms", "35000"])
do_everything()
``` |
{
"source": "jimmayjr/swn-gen",
"score": 3
} |
#### File: swn-gen/swn/hexutils.py
```python
import exception
import math
# Notes ------------------------------------------------------------------------
# Flat topped hex vertices
# 2_______1
# / \
# / \
# 3/ \0
# \ /
# \ /
# 4\_______/5
# Axial coordinate conversions -------------------------------------------------
## Axial to cube.
def axial_to_cube(q, r):
x = q
z = r
y = -x-z
return(x,y,z)
## Axial to odd-q.
def axial_to_odd_q(q, r):
(x, y, z) = axial_to_cube(q, r)
(row, col) = cube_to_odd_q(x, y, z)
return(row, col)
# Cube coordinate conversions --------------------------------------------------
## Cube to axial.
def cube_to_axial(x, y, z):
q = x
r = z
return(q, r)
## Cube to odd-q.
def cube_to_odd_q(x, y, z):
col = x
row = z + (x - (x&1)) / 2
return(row, col)
# Offset coordinate conversions ------------------------------------------------
## Odd-q to axial.
def odd_q_to_axial(row, col):
(x. y, z) = odd_q_to_cube(row, col)
(q, r) = cube_to_axial(x, y, z)
return(q, r)
## Odd-q to cube.
def odd_q_to_cube(row, col):
x = col
z = row - (col - (col&1)) / 2
y = -x-z
return(x, y, z)
# Rounding ---------------------------------------------------------------------
def axial_round(q, r):
(x, y, z) = axial_to_cube(q, r)
(rx, ry, rz) = cube_round(x, y, z)
(rq, rr) = cube_to_axial(rx, ry, rz)
return(rQ, rR)
def cube_round(x, y, z):
rX = round(x)
rY = round(y)
rZ = round(z)
xDiff = abs(rX - x)
yDiff = abs(rY - y)
zDiff = abs(rZ - z)
if ( (xDiff > yDiff) and (xDiff > zDiff) ):
rX = -rY-rZ
elif ( yDiff > zDiff ):
rY = -rX-rZ
else:
rZ = -rX-rY
return(int(rX), int(rY), int(rZ))
def odd_q_round(row, col):
(x, y, z) = odd_q_to_cube(row, col)
(rX, rY, rZ) = cube_round(x, y, z)
(rRow, rCol) = cube_to_odd_q(rX, rY, rZ)
return(rRow, rCol)
# Distances --------------------------------------------------------------------
## Axial coordinate distance.
def axial_distance(aQ, aR, bQ, bR):
(aX, aY, aZ) = axial_to_cube(aQ, aR)
(bX, bY, bZ) = axial_to_cube(bQ, bR)
return(cube_distance(aX, aY, aZ, bX, bY, bZ))
## Cube coordinate distance.
def cube_distance(aX, aY, aZ, bX, bY, bZ):
return((abs(aX - bX) + abs(aY - bY) + abs(aZ - bZ))/2)
## Odd-r coordinate distance.
def odd_q_distance(aRow, aCol, bRow, bCol):
(aX, aY, aZ) = odd_q_to_cube(aRow, aCol)
(bX, bY, bZ) = odd_q_to_cube(bRow, bCol)
return(cube_distance(aX, aY, aZ, bX, bY, bZ))
# Neighbors --------------------------------------------------------------------
def axial_neighbors(q, r):
(x, y, z) = axialtoCube(q, r)
cn = cube_neighbors(x, y, z)
neighbors = list()
for c in cn:
neighbors.append(cube_to_axial(c[0], c[1], c[2]))
return(neighbors)
def cube_neighbors(x, y, z):
relNeighbors = [[ 1,-1, 0],
[ 1, 0,-1],
[ 0, 1,-1],
[-1, 1, 0],
[-1, 0, 1],
[0, -1, 1]]
neighbors = list()
for rn in relNeighbors:
neighbors.append([x+rn[0], y+rn[1], z+rn[2]])
return(neighbors)
def odd_q_neighbors(row, col):
neighbors = list()
(x, y, z) = odd_q_to_cube(row, col)
cn = cube_neighbors(x, y, z)
neighbors = list()
for c in cn:
neighbors.append(cube_to_odd_q(c[0], c[1], c[2]))
return(neighbors)
# Lines ------------------------------------------------------------------------
def axial_line(aQ, aR, bQ, bR):
(aX, aY, aZ) = axial_to_cube(aQ, aR)
(bX, bY, bZ) = axial_to_cube(bQ, bR)
cubeLineList = cube_line(aX, aY, aZ, bX, bY, bZ)
axialLineList = list()
for cl in cubeLineList:
axialLineList.append(cube_to_axial(cl[0], cl[1], cl[2]))
return(axialLineList)
def cube_line(aX, aY, aZ, bX, bY, bZ):
d = cube_distance(aX, aY, aZ, bX, bY, bZ)
line = list()
for step in range(d+1):
(rX, rY, rZ) = cube_linterp(aX, aY, aZ, bX, bY, bZ,1.0/d*step)
line.append(cube_round(rX, rY, rZ))
return(line)
def odd_q_line(aRow, aCol, bRow, bCol):
(aX, aY, aZ) = odd_q_to_cube(aRow, aCol)
(bX, bY, bZ) = odd_q_to_cube(bRow, bCol)
cubeLineList = cube_line(aX, aY, aZ, bX, bY, bZ)
oddQLineList = list()
for cl in cubeLineList:
oddQLineList.append(cube_to_odd_q(cl[0], cl[1], cl[2]))
return(oddQLineList)
# Linear interpolation ---------------------------------------------------------
def axial_linterp(aQ, aR, bQ, bR, t):
(aX, aY, aZ) = axial_to_cube(aQ,aR)
(bX, bY, bZ) = axial_to_cube(bQ,bR)
(x, y, z) = cube_linterp(aX, aY, aZ, bX, bY, bZ, t)
(q, r) = cube_to_axial(x, y, z)
return(q, r)
def cube_linterp(aX, aY, aZ, bX, bY, bZ, t):
x = aX + (bX - aX) * t
y = aY + (bY - aY) * t
z = aZ + (bZ - aZ) * t
return(x, y, z)
# Pixel conversions ------------------------------------------------------------
def flat_height(size):
# Check arguments
exception.arg_check(size, float)
# Get width
width = flat_width(size)
# Calculate height
return(math.sqrt(3.)/2.*width)
def flat_horizontal_spacing(size):
# Check arguments
exception.arg_check(size, float)
# Calculate horizontal spacing
return(flat_width(size)*3./4.)
def flat_vertical_spacing(size):
# Check arguments
exception.arg_check(size, float)
# Calculate vertical spacing
return(flat_height(size))
def flat_width(size):
# Check arguments
exception.arg_check(size, float)
# Calculate width
return(size*2.)
def flat_vertex(size, vertex):
# Check arguments
exception.arg_check(vertex,int)
# Vertex angle
deg = 60.*vertex
rad = (math.pi/180.)*deg
# Pixel position
return(size*math.cos(rad), size*math.sin(rad))
def odd_q_center(size, row, col):
# Check arguments
exception.arg_check(size, float)
exception.arg_check(row, int)
exception.arg_check(col, int)
# Hex size
width = flat_width(size)
height = flat_height(size)
# Center
cX = width*((1./2.) + (3./4.)*float(col))
# Center position Y
# Even columns
if (col % 2 == 0):
cY = height*((1./2.)+float(row))
# Odd columns
else:
cY = height*(1.+float(row))
# Center position
return(cX, cY)
```
#### File: swn-gen/swn/random.py
```python
import numpy as np
SEED_ALPHABET = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
SEED_ALPHABET_DICT = dict((c, i) for i, c in enumerate(SEED_ALPHABET))
SEED_MAX = 'ZZZZZ'
SEED_MAX_CHAR_LEN = 5 # ZZZZZ is under max uint32, ZZZZZZ is above max uint32
## Dice roll + modifer
#
# Rolls N number of D dice, adding M as a modifier to the result.
# @param num Number of dice to roll.
# @param die Which sided die to roll.
# @param mod Modifier to add to the roll sum result. Default is 0.
def dice_roll(num,die,mod=0):
return(sum(np.random.random_integers(1,die,num))+mod)
## Random Seed
#
# Randomly selects a seed string and then sets is as the seed.
def random_seed():
randomSeedUInt = np.random.random_integers(0,seed_alphabet_decode(SEED_MAX))
randomSeedString = seed_alphabet_encode(randomSeedUInt)
set_seed(randomSeedUInt)
return(randomSeedString)
## Random seed alphabet decode
#
# Decodes a seed into an unsigned integer.
# @param seedString String to be decoded.
def seed_alphabet_decode(seedString):
# Check length
if (len(seedString)>SEED_MAX_CHAR_LEN):
raise(InvalidSeedLengthError("Seed length exceeds max allowed: length %s and max %s" % (len(seedString),SEED_MAX_CHAR_LEN)))
# Check for invalid characters
for char in seedString:
if (char not in SEED_ALPHABET):
raise(InvalidSeedCharError("Invalid seed character: %s in %s" % (char,seedString)))
# Convert to uInt
reverse_base = SEED_ALPHABET_DICT
length = len(reverse_base)
ret = 0
for i, c in enumerate(seedString[::-1]):
ret += (length ** i) * reverse_base[c]
return(ret)
## Random seed alphabet encode
#
# Encodes an unsigned integer into the seed alphabet.
# @param seedUInt Integer to be encoded.
def seed_alphabet_encode(seedUInt):
if (seedUInt<0):
raise(InvalidSeedNumberError("Negative number: %i" % seedUInt))
if (seedUInt>seed_alphabet_decode(SEED_MAX)):
raise(InvalidSeedNumberError("Seed too large: %i" % seedUInt))
base=SEED_ALPHABET
length = len(base)
ret = ''
while seedUInt != 0:
ret = base[seedUInt % length] + ret
seedUInt /= length
return(ret)
## Set random number generator seed
#
# Set the seed for the numpy random number generator.
# @param seedInt
def set_seed(seedInt):
np.random.seed(seedInt)
## Invalid seed character exception class.
class InvalidSeedCharError(Exception):
pass
## Invalid seed length exception class.
class InvalidSeedLengthError(Exception):
pass
## Invalid seed number exception class.
class InvalidSeedNumberError(Exception):
pass
``` |
{
"source": "jimmayxu/scVI",
"score": 3
} |
#### File: scVI/de/de_compare.py
```python
import argparse
import os
import matplotlib.pyplot as plt
from utils import name_to_dataset
from de_models import ScVIClassic, Wilcoxon, EdgeR
def parse_args():
parser = argparse.ArgumentParser(description='Compare methods for a given dataset')
parser.add_argument('--dataset', type=str, help='Name of considered dataset')
parser.add_argument('--nb_genes', type=int, default=None)
parser.add_argument('--save_dir', type=str, default=None)
return parser.parse_args()
if __name__ == '__main__':
# args = parse_args()
# dataset_name = args.dataset
# nb_genes = args.nb_genes
dataset_name = "powsimr"
nb_genes = 1200
save_dir = '.'
dataset = name_to_dataset[dataset_name]()
if nb_genes is not None:
dataset.subsample_genes(new_n_genes=nb_genes)
models = [
ScVIClassic(dataset=dataset, reconstruction_loss='zinb', n_latent=5,
full_cov=False, do_mean_variance=False,
name='scVI_classic'),
# ScVIClassic(dataset=dataset, reconstruction_loss='zinb', n_latent=5,
# full_cov=False, do_mean_variance=True,
# name='scVI_mean_variance'),
# ScVIClassic(dataset=dataset, reconstruction_loss='zinb',
# n_latent=5, full_cov=True, do_mean_variance=False, name='scVI_full_covariance'),
# EdgeR(dataset=dataset, name='EdgeR'),
# Wilcoxon(dataset=dataset, name='Wilcoxon'),
]
results = {}
dataframes = {}
for model in models:
model_name = model.name
model.full_init()
model.train()
model_perfs = model.predict_de()
dataframes[model_name] = model_perfs
results[model_name] = model.precision_recall_curve()
###
import numpy as np
model_perfs.loc[:, 'scVI_classic_gamma_score'] = np.abs(model_perfs['scVI_classic_gamma_bayes1'])
results['scVI_classic_gamma'] = model.precision_recall_curve(model_perfs.scVI_classic_gamma_score)
assert dataset_name == 'powsimr'
res_df = dataset.gene_properties
# Precision Recall curve:
for key in results:
precision, recall, mAP = results[key]
plt.plot(recall, precision, alpha=1.0, label='{}@AP={:0.2f}'.format(key, mAP))
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.ylim([0.0, 1.05])
plt.xlim([0.0, 1.0])
plt.title('Precision Recall for all models')
plt.legend()
plt.savefig(os.path.join(save_dir, '{}_precision_recall.png'.format(dataset_name)))
plt.show()
# Saving dataframes if needed for later use
for key in dataframes:
res_df = res_df.join(dataframes[key])
res_df.to_csv(os.path.join(save_dir, '{}_de.tsv'.format(dataset_name)), sep='\t')
```
#### File: de/de_models/scvi_classic.py
```python
from scvi.models import VAE, MeanVarianceVAE
from scvi.inference import UnsupervisedTrainer
from .de_model import DEModel
import numpy as np
class ScVIClassic(DEModel):
def __init__(self, dataset, reconstruction_loss, n_latent, full_cov=False,
do_mean_variance=False, name=''):
super().__init__(dataset=dataset, name=name)
self.reconstruction_loss = reconstruction_loss
self.n_latent = n_latent
self.full_cov = full_cov
if do_mean_variance:
self.model_type = MeanVarianceVAE
else:
self.model_type = VAE
self.model = None
self.trainer = None
self.is_fully_init = False
# early_stopping_kwargs={'early_stopping_metric': 'll',
# 'save_best_state_metric': 'll',
# 'patience': 15, 'threshold': 3}
def full_init(self):
self.model = self.model_type(n_input=self.dataset.nb_genes, n_batch=self.dataset.n_batches,
reconstruction_loss=self.reconstruction_loss,
n_latent=self.n_latent,
full_cov=self.full_cov)
self.trainer = UnsupervisedTrainer(model=self.model, gene_dataset=self.dataset,
use_cuda=True,
train_size=0.7, kl=1, frequency=1)
self.is_fully_init = True
def train(self, **train_params):
assert self.is_fully_init
if len(train_params) == 0:
train_params = {'n_epochs': 150, 'lr': 1e-3}
self.trainer.train(**train_params)
def predict_de(self, n_samples=10000, M_permutation=100000, n_for_each=10,
idx1=None, idx2=None, mode='rho'):
assert mode in ['rho', 'gamma']
full = self.trainer.create_posterior(self.model, self.dataset,
indices=np.arange(len(self.dataset)))
if idx1 is None and idx2 is None:
cell_pos1 = np.where(self.dataset.labels.ravel() == 0)[0][:n_for_each]
cell_pos2 = np.where(self.dataset.labels.ravel() == 1)[0][:n_for_each]
cell_idx1 = np.isin(np.arange(len(self.dataset)), cell_pos1)
cell_idx2 = np.isin(np.arange(len(self.dataset)), cell_pos2)
else:
cell_idx1 = idx1
cell_idx2 = idx2
de_res = full.differential_expression_score(cell_idx1, cell_idx2, n_samples=n_samples,
M_permutation=M_permutation)
de_res_gamma = full.differential_expression_gamma(cell_idx1, cell_idx2, n_samples=n_samples,
M_permutation=M_permutation)
de_res.loc[:, 'gamma_bayes1'] = de_res_gamma
de_res = de_res.sort_index()
self.de_pred = de_res.bayes1.abs()
de_res.columns = [self.name+'_'+col for col in de_res.columns]
return de_res
```
#### File: scVI/main/new.py
```python
import torch
import numpy as np
torch.manual_seed(0)
np.random.seed(0)
import pandas as pd
import scanpy as sc
sc.settings.verbosity = 0
import scanpy as sc
import os , sys
sys.path.append(os.path.abspath("../.."))
n_epochs_all = None
test_mode = False
def if_not_test_else(x, y):
if not test_mode:
return x
else:
return y
sys.path.append(os.path.abspath("../.."))
n_epochs_all = None
test_mode = False
def if_not_test_else(x, y):
if not test_mode:
return x
else:
return y
torch.manual_seed(0)
np.random.seed(0)
save_path = "data/"
filename = "../data/ica_bone_marrow_h5.h5"
adata = sc.read_10x_h5(filename)
adata.var_names_make_unique()
adata.obs_names_make_unique()
# adata.shape
# adata.obs_names
# adata.var_names
sc.pp.filter_cells(adata, min_genes= 200)
sc.pp.filter_genes(adata, min_cells=3)
sc.pp.filter_cells(adata, min_genes=1)
mito_genes = adata.var_names.str.startswith("MT-")
adata.obs["percent_mito"] = (
np.sum(adata[:, mito_genes].X, axis=1).A1 / np.sum(adata.X, axis=1).A1
)
adata.obs["n_counts"] = adata.X.sum(axis=1).A1
adata = adata[adata.obs["n_genes"] < 2500, :]
adata = adata[adata.obs["percent_mito"] < 0.05, :]
"""
Normalization and more filtering
We only keep highly variable genes
"""
adata_original = adata.copy()
sc.pp.normalize_per_cell(adata, counts_per_cell_after=1e4)
sc.pp.log1p(adata)
min_mean = if_not_test_else(0.0125, -np.inf)
max_mean = if_not_test_else(3, np.inf)
min_disp = if_not_test_else(0.5, -np.inf)
max_disp = if_not_test_else(None, np.inf)
sc.pp.highly_variable_genes(
adata,
min_mean=min_mean,
max_mean=max_mean,
min_disp=min_disp,
max_disp=max_disp
# n_top_genes=500
)
adata.raw = adata
highly_variable_genes = adata.var["highly_variable"]
adata = adata[:, highly_variable_genes]
sc.pp.regress_out(adata, ["n_counts", "percent_mito"])
sc.pp.scale(adata, max_value=10)
# Also filter the original adata genes
adata_original = adata_original[:, highly_variable_genes]
print(highly_variable_genes.sum())
# We also store adata_original into adata.raw
# (which was designed for this purpose but actually has limited functionnalities)
adata.raw = adata_original
"""
Compute the scVI latent space
"""
import scvi
from scvi.dataset.anndata import AnnDataset
from scvi.inference import UnsupervisedTrainer
from scvi.models.vae import VAE
from typing import Tuple
def compute_scvi_latent(
adata: sc.AnnData,
n_latent: int = 5,
n_epochs: int = 100,
lr: float = 1e-3,
use_batches: bool = False,
use_cuda: bool = True,
) -> Tuple[scvi.inference.Posterior, np.ndarray]:
"""Train and return a scVI model and sample a latent space
:param adata: sc.AnnData object non-normalized
:param n_latent: dimension of the latent space
:param n_epochs: number of training epochs
:param lr: learning rate
:param use_batches
:param use_cuda
:return: (scvi.Posterior, latent_space)
"""
# Convert easily to scvi dataset
scviDataset = AnnDataset(adata)
# Train a model
vae = VAE(
scviDataset.nb_genes,
n_batch=scviDataset.n_batches * use_batches,
n_latent=n_latent,
)
trainer = UnsupervisedTrainer(vae, scviDataset, train_size=1.0, use_cuda=use_cuda)
trainer.train(n_epochs=n_epochs, lr=lr)
####
# Extract latent space
posterior = trainer.create_posterior(
trainer.model, scviDataset, indices=np.arange(len(scviDataset))
).sequential()
latent, _, _ = posterior.get_latent()
return posterior, latent
n_epochs = 10 if n_epochs_all is None else n_epochs_all
scvi_posterior, scvi_latent = compute_scvi_latent(
adata_original, n_epochs=n_epochs, n_latent=6
)
adata.obsm["X_scvi"] = scvi_latent
"""
Finding marker genes
"""
def rank_genes_groups_bayes(
adata: sc.AnnData,
scvi_posterior: scvi.inference.Posterior,
n_samples: int = None,
M_permutation: int = None,
n_genes: int = 25,
label_name: str = "louvain_scvi",
) -> pd.DataFrame:
"""
Rank genes for characterizing groups.
Computes Bayes factor for each cluster against the others to test for differential expression.
See Nature article (https://rdcu.be/bdHYQ)
:param adata: sc.AnnData object non-normalized
:param scvi_posterior:
:param n_samples:
:param M_permutation:
:param n_genes:
:param label_name: The groups tested are taken from adata.obs[label_name] which can be computed
using clustering like Louvain (Ex: sc.tl.louvain(adata, key_added=label_name) )
:return: Summary of Bayes factor per gene, per cluster
"""
# Call scvi function
per_cluster_de, cluster_id = scvi_posterior.one_vs_all_degenes(
cell_labels=np.asarray(adata.obs[label_name].values).astype(int).ravel(),
min_cells=1,
n_samples=n_samples,
M_permutation=M_permutation,
)
# convert to ScanPy format -- this is just about feeding scvi results into a format readable by ScanPy
markers = []
scores = []
names = []
for i, x in enumerate(per_cluster_de):
subset_de = x[:n_genes]
markers.append(subset_de)
scores.append(tuple(subset_de["bayes1"].values))
names.append(tuple(subset_de.index.values))
markers = pd.concat(markers)
dtypes_scores = [(str(i), "<f4") for i in range(len(scores))]
dtypes_names = [(str(i), "<U50") for i in range(len(names))]
scores = np.array([tuple(row) for row in np.array(scores).T], dtype=dtypes_scores)
scores = scores.view(np.recarray)
names = np.array([tuple(row) for row in np.array(names).T], dtype=dtypes_names)
names = names.view(np.recarray)
adata.uns["rank_genes_groups_scvi"] = {
"params": {
"groupby": "",
"reference": "rest",
"method": "",
"use_raw": True,
"corr_method": "",
},
"scores": scores,
"names": names,
}
return markers
```
#### File: scvi/dataset/powsimr.py
```python
from . import GeneExpressionDataset
import numpy as np
import os
from scipy.interpolate import interp1d
import pandas as pd
import torch.distributions as distributions
batch_lfc = distributions.Normal(loc=0.0, scale=0.25)
class SignedGamma:
def __init__(self, dim, proba_pos=0.75, shape=2, rate=4):
self.proba_pos = proba_pos
self.shape = shape
self.rate = rate
self.dim = dim
def sample(self, size):
if type(size) == int:
sample_size = (size, self.dim)
else:
sample_size = list(size) + [self.dim]
signs = 2.0 * distributions.Bernoulli(probs=0.75).sample(sample_size) - 1.0
gammas = distributions.Gamma(concentration=self.shape, rate=self.rate).sample(
sample_size
)
return signs * gammas
class PowSimSynthetic(GeneExpressionDataset):
def __init__(
self,
cluster_to_samples=[20, 100, 30, 25, 500],
n_genes=10000,
n_genes_zi=100,
p_dropout=0.4,
real_data_path=None,
de_p=0.1,
de_lfc=None,
batch_p=0.0,
batch_lfc=None,
batch_pattern=None,
marker_p=0.0,
marker_lfc=0.0,
do_spike_in=False,
do_downsample=False,
geneset=False,
cst_mu=None,
mode="NB",
seed=42,
):
# TODO: replace current ZI policy by the one implemented by powsim
# In their case I guess it is similar to ZIFA
"""
:param cluster_to_samples:
:param n_genes:
:param de_p:
:param de_lfc:
:param batch_p:
:param batch_lfc:
:param marker_p:
:param marker_lfc:
:param do_spike_in:
:param do_downsample:
:param geneset:
:param mode:
:param seed:
"""
super().__init__()
np.random.seed(seed)
dir_path = os.path.dirname(os.path.realpath(__file__))
real_data_path = os.path.join(dir_path, "kolodziejczk_param.csv")
self.real_data_df = pd.read_csv(real_data_path)
if de_lfc is None:
de_lfc = SignedGamma(dim=len(cluster_to_samples))
n_cells_total = sum(cluster_to_samples)
self.n_clusters = len(cluster_to_samples)
self.cluster_to_samples = cluster_to_samples
self.phenotypes = np.concatenate(
[
self._get_one_hot(idx=idx, n_idx_total=self.n_clusters, size=val)
for (idx, val) in enumerate(self.cluster_to_samples)
]
)
labels = np.array([v.argmax() for v in self.phenotypes])
assert len(labels) == len(self.phenotypes)
self.mode = mode
assert self.mode in ["NB", "ZINB"]
self.geneset = geneset
assert not self.geneset
self.do_downsample = do_downsample
assert not do_downsample
assert not do_spike_in
# Gene expression parameters
n_genes_de = int(n_genes * de_p)
n_genes_batch = int(batch_p * n_genes)
n_genes_marker = int(marker_p * n_genes)
assert n_genes_marker == 0
assert n_genes_batch == 0
assert n_genes_de > 0, "No genes differentially expressed"
# Diff exp genes
self.de_lfc = np.zeros((n_genes, self.n_clusters))
self.de_genes_idx = np.random.choice(a=n_genes, size=n_genes_de, replace=False)
self.de_lfc[self.de_genes_idx] = self.unvectorize(
de_lfc.sample((len(self.de_genes_idx),))
)
# ZI (uniform for now)
self.zi_genes_idx = np.random.choice(a=n_genes, size=n_genes_zi, replace=False)
self.p_dropout = p_dropout
# Batch affected genes
if n_genes_batch != 0:
batch_genes_id = np.random.choice(
a=n_genes, size=n_genes_batch, replace=False
)
self.batch_lfc = np.zeros(n_genes, self.n_clusters)
self.batch_lfc[batch_genes_id] = self.unvectorize(
batch_lfc.sample((len(batch_genes_id),))
)
assert batch_pattern in ["uncorrelated", "orthogonal", "correlated"]
self.batch_pattern = batch_pattern
else:
self.batch_lfc = None
self.batch_pattern = None
# Marker genes
if n_genes_marker != 0:
pass
else:
self.marker_lfc = None
self.ids = self.de_genes_idx
self.lfc = self.de_lfc
self._log_mu_mat = None
self._mu_mat = None
self._sizes = None
self.cst_mu = cst_mu
sim_data = self.generate_data(n_cells_total=n_cells_total, n_genes=n_genes)
# sim_data[sim_data >= 1000] = 1000
assert sim_data.shape == (n_cells_total, n_genes)
sim_data = np.expand_dims(sim_data, axis=0)
labels = np.expand_dims(labels, axis=0)
gene_names = np.arange(n_genes).astype(str)
self.populate_from_per_batch_list(
sim_data,
labels_per_batch=labels,
gene_names=gene_names,
)
gene_data = {
"lfc{}".format(idx): arr for (idx, arr) in enumerate(self.de_lfc.T)
}
self.gene_properties = pd.DataFrame(data=gene_data, index=gene_names)
def generate_data(self, n_cells_total, n_genes):
if self.batch_lfc is None:
model_matrix = self.phenotypes
coeffs = self.lfc
batch = None
else:
if self.batch_pattern == "uncorrelated":
raise NotImplementedError
elif self.batch_pattern == "orthogonal":
raise NotImplementedError
else:
raise NotImplementedError
# Generating data based on those parameters
if self.mode == "NB":
new_data = self.generate_nb(model_matrix, coeffs, n_cells_total, n_genes)
elif self.mode == "ZINB":
new_data = self.generate_zinb(model_matrix, coeffs, n_cells_total, n_genes)
return new_data
def generate_nb(self, model_matrix, coeffs, n_cells_total, nb_genes):
"""
DIFFERENCE WITH ORIGINAL IMPLEMENTATION
HERE WE WORK WITH N_CELLS, N_GENES REPRESENTATIONS
:param model_matrix: Mask Matrice (n_cells, n_clusters)
:param coeffs: LFC Coefficients (n_genes, n_clusters)
:return:
"""
if self.cst_mu is not None:
true_means = self.cst_mu * np.ones(nb_genes)
else:
mu = self.real_data_df["means"]
true_means = np.random.choice(a=mu, size=nb_genes, replace=True)
true_means = true_means
true_means[true_means >= 200] = 200
print(true_means.min(), true_means.mean(), true_means.max())
log_mu = np.log2(1.0 + true_means)
print(log_mu.min(), log_mu.mean(), log_mu.max())
# log_mu = np.minimum(log_mu, 5.0)
# NN interpolation
interpolator_mean = interp1d(
self.real_data_df.x,
self.real_data_df.y,
kind="nearest",
bounds_error=False,
fill_value="extrapolate",
)
size_mean = interpolator_mean(log_mu)
interpolator_std = interp1d(
self.real_data_df.x,
self.real_data_df.sd,
kind="nearest",
bounds_error=False,
fill_value="extrapolate",
)
size_std = interpolator_std(log_mu)
# TODO: Verify Size
log_sizes = np.random.normal(loc=size_mean, scale=size_std)
assert log_sizes.shape == size_std.shape
# all_facs = np.ones(n_samples)
# effective_means = np.repeat(true_means, repeats=n_samples, axis=0)
# assert effective_means.shape == (self.nb_genes, n_samples)
effective_means = true_means # when no size factor
log_effective = np.log2(effective_means + 1.0)
_perturbations = model_matrix.dot(coeffs.T)
log_mu_mat = np.log2(effective_means + 1.0) + model_matrix.dot(coeffs.T)
log_mu_mat[log_mu_mat < 0] = log_effective.min()
mu_mat = 2 ** log_mu_mat
sizes = 2 ** log_sizes
self._log_mu_mat = log_mu_mat
self._mu_mat = mu_mat
self._sizes = sizes
nb_proba = sizes / (sizes + mu_mat)
# TODO: Verify no mistakes here
sim_data = np.random.negative_binomial(
n=sizes, p=nb_proba, size=(n_cells_total, nb_genes)
)
return sim_data
def generate_zinb(self, model_matrix, coeffs, n_cells_total, nb_genes):
sim_data = self.generate_nb(model_matrix, coeffs, n_cells_total, nb_genes)
# Zero inflation
zi_probas = np.ones_like(sim_data)
zi_probas[:, self.zi_genes_idx] = 1.0 - self.p_dropout
draws = np.random.rand(n_cells_total, nb_genes)
do_drop = draws >= zi_probas
sim_data = do_drop * sim_data
return sim_data
@staticmethod
def _get_one_hot(idx, n_idx_total, size):
res = np.zeros((size, n_idx_total))
res[:, idx] = 1
return res
@staticmethod
def unvectorize(vec):
if len(vec.shape) == 1:
return vec.reshape((-1, 1))
return vec
```
#### File: scvi/dataset/svensson.py
```python
from . import GeneExpressionDataset
from .anndataset import AnnDatasetFromAnnData, DownloadableAnnDataset
import torch
import pickle
import os
import numpy as np
import pandas as pd
import anndata
class AnnDatasetKeywords(GeneExpressionDataset):
def __init__(self, data, select_genes_keywords=[]):
super().__init__()
if isinstance(data, str):
anndataset = anndata.read(data)
else:
anndataset = data
idx_and_gene_names = [
(idx, gene_name) for idx, gene_name in enumerate(list(anndataset.var.index))
]
for keyword in select_genes_keywords:
idx_and_gene_names = [
(idx, gene_name)
for idx, gene_name in idx_and_gene_names
if keyword.lower() in gene_name.lower()
]
gene_indices = np.array([idx for idx, _ in idx_and_gene_names])
gene_names = np.array([gene_name for _, gene_name in idx_and_gene_names])
expression_mat = np.array(anndataset.X[:, gene_indices].todense())
select_cells = expression_mat.sum(axis=1) > 0
expression_mat = expression_mat[select_cells, :]
select_genes = (expression_mat > 0).mean(axis=0) > 0.21
gene_names = gene_names[select_genes]
expression_mat = expression_mat[:, select_genes]
print("Final dataset shape :", expression_mat.shape)
self.populate_from_data(X=expression_mat, gene_names=gene_names)
class ZhengDataset(AnnDatasetKeywords):
def __init__(self):
current_dir = os.path.dirname(os.path.realpath(__file__))
zheng = anndata.read(os.path.join(current_dir, "zheng_gemcode_control.h5ad"))
super(ZhengDataset, self).__init__(zheng, select_genes_keywords=["ercc"])
class MacosDataset(AnnDatasetKeywords):
def __init__(self):
current_dir = os.path.dirname(os.path.realpath(__file__))
macos = anndata.read(os.path.join(current_dir, "macosko_dropseq_control.h5ad"))
super(MacosDataset, self).__init__(macos, select_genes_keywords=["ercc"])
class KleinDataset(AnnDatasetKeywords):
def __init__(self):
current_dir = os.path.dirname(os.path.realpath(__file__))
klein = anndata.read(
os.path.join(current_dir, "klein_indrops_control_GSM1599501.h5ad")
)
super(KleinDataset, self).__init__(klein, select_genes_keywords=["ercc"])
class Sven1Dataset(AnnDatasetKeywords):
def __init__(self):
current_dir = os.path.dirname(os.path.realpath(__file__))
svens = anndata.read(
os.path.join(current_dir, "svensson_chromium_control.h5ad")
)
sven1 = svens[svens.obs.query('sample == "20311"').index]
super(Sven1Dataset, self).__init__(sven1, select_genes_keywords=["ercc"])
class Sven2Dataset(AnnDatasetKeywords):
def __init__(self):
current_dir = os.path.dirname(os.path.realpath(__file__))
svens = anndata.read(
os.path.join(current_dir, "svensson_chromium_control.h5ad")
)
sven2 = svens[svens.obs.query('sample == "20312"').index]
super(Sven2Dataset, self).__init__(sven2, select_genes_keywords=["ercc"])
class AnnDatasetRNA(GeneExpressionDataset):
def __init__(self, data, n_genes=100):
super().__init__()
if isinstance(data, str):
anndataset = anndata.read(data)
else:
anndataset = data
# Select RNA genes
idx_and_gene_names = [
(idx, gene_name)
for idx, gene_name in enumerate(list(anndataset.var.index))
if "ercc" not in gene_name.lower()
]
gene_indices = np.array([idx for idx, _ in idx_and_gene_names])
gene_names = np.array([gene_name for _, gene_name in idx_and_gene_names])
expression_mat = np.array(anndataset.X[:, gene_indices].todense())
# Find n_genes most expressed genes (wrt average gene expression)
argsort_genes_exp = np.argsort(np.mean(expression_mat, axis=0))
expression_mat = expression_mat[:, argsort_genes_exp[-n_genes:]]
gene_names = gene_names[argsort_genes_exp[-n_genes:]]
# Remove zero cells, then zero genes
select_cells = expression_mat.sum(axis=1) > 0
expression_mat = expression_mat[select_cells, :]
select_genes = (expression_mat > 0).mean(axis=0) >= 0.21
gene_names = gene_names[select_genes]
expression_mat = expression_mat[:, select_genes]
print("Final dataset shape :", expression_mat.shape)
self.populate_from_data(X=expression_mat, gene_names=gene_names)
class KleinDatasetRNA(AnnDatasetRNA):
def __init__(self, n_genes=100):
current_dir = os.path.dirname(os.path.realpath(__file__))
klein = anndata.read(
os.path.join(current_dir, "klein_indrops_control_GSM1599501.h5ad")
)
super(KleinDatasetRNA, self).__init__(klein, n_genes=n_genes)
class Sven1DatasetRNA(AnnDatasetRNA):
def __init__(self, n_genes=100):
current_dir = os.path.dirname(os.path.realpath(__file__))
svens = anndata.read(
os.path.join(current_dir, "svensson_chromium_control.h5ad")
)
sven1 = svens[svens.obs.query('sample == "20311"').index]
super(Sven1DatasetRNA, self).__init__(sven1, n_genes=n_genes)
class Sven2DatasetRNA(AnnDatasetRNA):
def __init__(self, n_genes=100):
current_dir = os.path.dirname(os.path.realpath(__file__))
svens = anndata.read(
os.path.join(current_dir, "svensson_chromium_control.h5ad")
)
sven2 = svens[svens.obs.query('sample == "20312"').index]
super(Sven2DatasetRNA, self).__init__(sven2, n_genes=n_genes)
class AnnDatasetMixed(GeneExpressionDataset):
def __init__(self, data, matching_func="l2", n_matches=3, threshold=0.01):
super().__init__()
assert matching_func in [
"l2",
"l2_sort",
"means",
"cosine",
"cosine_sort",
"random",
]
self.matching_func = matching_func
self.n_matches = n_matches
if isinstance(data, str):
anndataset = anndata.read(data)
else:
anndataset = data
expression_mat = np.array(anndataset.X.todense())
# Select ERCC genes
ercc_idx_and_gene_names = [
(idx, gene_name)
for idx, gene_name in enumerate(list(anndataset.var.index))
if "ercc" in gene_name.lower()
]
# Eliminate zero cells and zero genes
select_cells = expression_mat.sum(axis=1) > 0
expression_mat = expression_mat[select_cells, :]
select_genes = expression_mat.sum(axis=0) > 0
expression_mat = expression_mat[:, select_genes]
# Select ERCC genes
gene_names = np.array(
[
gene_name
for idx, gene_name in enumerate(list(anndataset.var.index))
if select_genes[idx]
]
)
ercc_gene_indices = np.array(
[
idx
for idx, gene_name in enumerate(gene_names)
if "ercc" in gene_name.lower()
]
)
# Match ERCC genes with RNA genes, select matched genes
selected_matched_genes = self._match_genes(expression_mat, ercc_gene_indices)
expression_mat = expression_mat[:, selected_matched_genes]
gene_names = gene_names[selected_matched_genes]
# Remove induced zero cells and keep only genes present in at least 21% of cells
select_cells = expression_mat.sum(axis=1) > 0
expression_mat = expression_mat[select_cells, :]
select_genes = (expression_mat > 0).mean(axis=0) >= threshold
gene_names = gene_names[select_genes]
expression_mat = expression_mat[:, select_genes]
print("Final dataset shape :", expression_mat.shape)
print(
"ERCC genes :",
len([gene_name for gene_name in gene_names if "ercc" in gene_name.lower()]),
)
self.is_ercc = np.array(
["ercc" in gene_name.lower() for gene_name in gene_names]
)
self.populate_from_data(X=expression_mat, gene_names=gene_names)
def _matching_func(self, ref_col, mat):
if self.matching_func == "l2":
return np.linalg.norm(mat - ref_col, axis=0)
elif self.matching_func == "l2_sort":
return np.linalg.norm(
np.sort(mat, axis=0) - np.sort(ref_col, axis=0), axis=0
)
elif self.matching_func == "means":
return np.abs(np.mean(mat, axis=0) - np.mean(ref_col))
elif self.matching_func == "cosine":
return 1.0 - np.sum(mat * ref_col, axis=0) / (
np.linalg.norm(mat, axis=0) * np.linalg.norm(ref_col)
)
elif self.matching_func == "cosine_sort":
return 1.0 - np.sum(
np.sort(mat, axis=0) * np.sort(ref_col, axis=0), axis=0
) / (np.linalg.norm(mat, axis=0) * np.linalg.norm(ref_col))
elif self.matching_func == "random":
np.random.seed(0)
return np.random.uniform(0.0, 100.0, size=(mat.shape[1],))
else:
raise Exception("Matching function not recognized")
def _match_given_gene(self, expression_mat, ref_gene_index, selected_genes):
scores = self._matching_func(
expression_mat[:, ref_gene_index][:, np.newaxis], expression_mat
)
scores[selected_genes] = np.inf
new_matches = np.arange(expression_mat.shape[1])[
np.argsort(scores)[: self.n_matches]
]
selected_genes[new_matches] = True
return selected_genes
def _match_genes(self, expression_mat, ref_gene_indices):
selected_genes = np.zeros(shape=(expression_mat.shape[1],), dtype=bool)
selected_genes[ref_gene_indices] = True
if self.n_matches > 0:
for ref_gene_index in ref_gene_indices:
selected_genes = self._match_given_gene(
expression_mat, ref_gene_index, selected_genes
)
return selected_genes
class KleinDatasetMixed(AnnDatasetMixed):
def __init__(self, matching_func="l2", n_matches=3, threshold=0.01):
current_dir = os.path.dirname(os.path.realpath(__file__))
klein = anndata.read(
os.path.join(current_dir, "klein_indrops_control_GSM1599501.h5ad")
)
super(KleinDatasetMixed, self).__init__(
klein, matching_func=matching_func, n_matches=n_matches, threshold=threshold
)
class Sven1DatasetMixed(AnnDatasetMixed):
def __init__(self, matching_func="l2", n_matches=3, threshold=0.01):
current_dir = os.path.dirname(os.path.realpath(__file__))
svens = anndata.read(
os.path.join(current_dir, "svensson_chromium_control.h5ad")
)
sven1 = svens[svens.obs.query('sample == "20311"').index]
super(Sven1DatasetMixed, self).__init__(
sven1, matching_func=matching_func, n_matches=n_matches, threshold=threshold
)
class Sven2DatasetMixed(AnnDatasetMixed):
def __init__(self, matching_func="l2", n_matches=3, threshold=0.01):
current_dir = os.path.dirname(os.path.realpath(__file__))
svens = anndata.read(
os.path.join(current_dir, "svensson_chromium_control.h5ad")
)
sven2 = svens[svens.obs.query('sample == "20312"').index]
super(Sven2DatasetMixed, self).__init__(
sven2, matching_func=matching_func, n_matches=n_matches, threshold=threshold
)
```
#### File: scvi/inference/posterior.py
```python
import copy
import os
import logging
from typing import List, Optional, Union, Tuple
import numpy as np
import pandas as pd
import scipy
import torch
import torch.distributions as distributions
from matplotlib import pyplot as plt
from scipy.stats import kde, entropy
from sklearn.cluster import KMeans
from sklearn.manifold import TSNE
from sklearn.metrics import adjusted_rand_score as ARI
from sklearn.metrics import normalized_mutual_info_score as NMI
from sklearn.metrics import silhouette_score
from sklearn.mixture import GaussianMixture as GMM
from sklearn.neighbors import NearestNeighbors, KNeighborsRegressor
from sklearn.utils.linear_assignment_ import linear_assignment
from torch.utils.data import DataLoader
from torch.utils.data.sampler import SequentialSampler, SubsetRandomSampler, RandomSampler
from scipy.special import betainc
from torch.distributions import Normal, kl_divergence as kl
from scvi.dataset import GeneExpressionDataset
from scvi.models.log_likelihood import compute_elbo, compute_reconstruction_error, compute_marginal_log_likelihood
logger = logging.getLogger(__name__)
class SequentialSubsetSampler(SubsetRandomSampler):
def __iter__(self):
return iter(self.indices)
class Posterior:
r"""The functional data unit. A `Posterior` instance is instantiated with a model and a gene_dataset, and
as well as additional arguments that for Pytorch's `DataLoader`. A subset of indices can be specified, for
purposes such as splitting the data into train/test or labelled/unlabelled (for semi-supervised learning).
Each trainer instance of the `Trainer` class can therefore have multiple `Posterior` instances to train a model.
A `Posterior` instance also comes with many methods or utilities for its corresponding data.
:param model: A model instance from class ``VAE``, ``VAEC``, ``SCANVI``
:param gene_dataset: A gene_dataset instance like ``CortexDataset()``
:param shuffle: Specifies if a `RandomSampler` or a `SequentialSampler` should be used
:param indices: Specifies how the data should be split with regards to train/test or labelled/unlabelled
:param use_cuda: Default: ``True``
:param data_loader_kwarg: Keyword arguments to passed into the `DataLoader`
Examples:
Let us instantiate a `trainer`, with a gene_dataset and a model
>>> gene_dataset = CortexDataset()
>>> vae = VAE(gene_dataset.nb_genes, n_batch=gene_dataset.n_batches * False,
... n_labels=gene_dataset.n_labels, use_cuda=True)
>>> trainer = UnsupervisedTrainer(vae, gene_dataset)
>>> trainer.train(n_epochs=50)
A `UnsupervisedTrainer` instance has two `Posterior` attributes: `train_set` and `test_set`
For this subset of the original gene_dataset instance, we can examine the differential expression,
log_likelihood, entropy batch mixing, ... or display the TSNE of the data in the latent space through the
scVI model
>>> trainer.train_set.differential_expression_stats()
>>> trainer.train_set.reconstruction_error()
>>> trainer.train_set.entropy_batch_mixing()
>>> trainer.train_set.show_t_sne(n_samples=1000, color_by="labels")
"""
def __init__(
self,
model,
gene_dataset: GeneExpressionDataset,
shuffle=False,
indices=None,
use_cuda=True,
data_loader_kwargs=dict(),
):
"""
When added to annotation, has a private name attribute
"""
self.model = model
self.gene_dataset = gene_dataset
self.to_monitor = []
self.use_cuda = use_cuda
if indices is not None and shuffle:
raise ValueError("indices is mutually exclusive with shuffle")
if indices is None:
if shuffle:
sampler = RandomSampler(gene_dataset)
else:
sampler = SequentialSampler(gene_dataset)
else:
if hasattr(indices, "dtype") and indices.dtype is np.dtype("bool"):
indices = np.where(indices)[0].ravel()
sampler = SubsetRandomSampler(indices)
self.data_loader_kwargs = copy.copy(data_loader_kwargs)
self.data_loader_kwargs.update(
{"collate_fn": gene_dataset.collate_fn_builder(), "sampler": sampler}
)
self.data_loader = DataLoader(gene_dataset, **self.data_loader_kwargs)
def accuracy(self):
pass
accuracy.mode = "max"
@property
def indices(self):
if hasattr(self.data_loader.sampler, "indices"):
return self.data_loader.sampler.indices
else:
return np.arange(len(self.gene_dataset))
@property
def nb_cells(self):
if hasattr(self.data_loader.sampler, "indices"):
return len(self.data_loader.sampler.indices)
else:
return self.gene_dataset.nb_cells
def __iter__(self):
return map(self.to_cuda, iter(self.data_loader))
def to_cuda(self, tensors):
return [t.cuda() if self.use_cuda else t for t in tensors]
def update(self, data_loader_kwargs):
posterior = copy.copy(self)
posterior.data_loader_kwargs = copy.copy(self.data_loader_kwargs)
posterior.data_loader_kwargs.update(data_loader_kwargs)
posterior.data_loader = DataLoader(self.gene_dataset, **posterior.data_loader_kwargs)
return posterior
def sequential(self, batch_size=128):
return self.update({"batch_size": batch_size, "sampler": SequentialSubsetSampler(indices=self.indices)})
def corrupted(self):
return self.update(
{"collate_fn": self.gene_dataset.collate_fn_builder(corrupted=True)}
)
def uncorrupted(self):
return self.update({"collate_fn": self.gene_dataset.collate_fn_builder()})
@torch.no_grad()
def get_latents(self, n_samples=1, other=None, device='gpu'):
"""
Computes all quantities of interest for DE in a sequential order
WARNING: BATCH EFFECTS NOT TAKEN INTO ACCOUNT AS FOR NOW
# TODO: TAKE THEM INTO ACCOUNT (NOT THAT HARD)
:param n_samples:
:param other:
:return:
"""
zs = []
labels = []
scales = []
n_bio_batches = self.gene_dataset.n_batches
with torch.no_grad():
for tensors in self.sequential():
sample_batch, _, _, batch_index, label = tensors
outputs = self.model.inference(sample_batch, batch_index, n_samples=n_samples)
z = outputs['z']
norm_library = 4. * torch.ones_like(sample_batch[:, [0]])
scale_batch = []
if other is not None:
for bio_batch in range(n_bio_batches):
batch_index = 1.0 * torch.ones_like(sample_batch[:, [0]])
scale_batch.append(
self.model.decoder.forward('gene', z, norm_library, batch_index)[0]
)
# each elem of scale_batch has shape (n_samples, n_batch, n_genes)
scale_batch = torch.cat(scale_batch, dim=0)
if device == 'cpu':
label = label.cpu()
z = z.cpu()
if other is not None:
scale_batch = scale_batch.cpu()
# print(label.device, z.device, scale_batch.device)
labels.append(label)
zs.append(z)
scales.append(scale_batch)
if n_samples > 1:
# Then each z element has shape (n_samples, n_batch, n_latent)
# Hence we concatenate on dimension 1
zs = torch.cat(zs, dim=1)
if other is not None:
scales = torch.cat(scales, dim=1)
# zs = zs.transpose(0, 1)
# zs = zs.transpose(1, 2)
# New shape (n_batch, b)
else:
zs = torch.cat(zs)
labels = torch.cat(labels)
if other is not None:
return zs, labels, scales
return zs, labels
@torch.no_grad()
def get_data(self):
"""
:return:
"""
xs, labels = [], []
for tensors in self.sequential():
sample_batch, _, _, batch_index, label = tensors
xs.append(sample_batch.cpu())
labels.append(label.cpu())
xs = torch.cat(xs)
labels = torch.cat(labels)
return xs, labels
@torch.no_grad()
def elbo(self):
elbo = compute_elbo(self.model, self)
logger.debug("ELBO : %.4f" % elbo)
return elbo
elbo.mode = "min"
@torch.no_grad()
def reconstruction_error(self):
reconstruction_error = compute_reconstruction_error(self.model, self)
logger.debug("Reconstruction Error : %.4f" % reconstruction_error)
return reconstruction_error
reconstruction_error.mode = "min"
@torch.no_grad()
def marginal_ll(self, n_mc_samples=1000, ratio_loss=False):
ll = compute_marginal_log_likelihood(self.model, self, n_mc_samples, ratio_loss=ratio_loss)
logger.debug("True LL : %.4f" % ll)
return ll
@torch.no_grad()
def get_latent(self, sample=False):
"""
Output posterior z mean or sample, batch index, and label
:param sample: z mean or z sample
:return: three np.ndarrays, latent, batch_indices, labels
"""
latent = []
batch_indices = []
labels = []
for tensors in self:
sample_batch, local_l_mean, local_l_var, batch_index, label = tensors
give_mean = not sample
latent += [self.model.sample_from_posterior_z(sample_batch, give_mean=give_mean).cpu()]
batch_indices += [batch_index.cpu()]
labels += [label.cpu()]
return np.array(torch.cat(latent)), np.array(torch.cat(batch_indices)), np.array(torch.cat(labels)).ravel()
@torch.no_grad()
def entropy_batch_mixing(self, **kwargs):
if self.gene_dataset.n_batches == 2:
latent, batch_indices, labels = self.get_latent()
be_score = entropy_batch_mixing(latent, batch_indices, **kwargs)
logger.debug("Entropy batch mixing :", be_score)
return be_score
entropy_batch_mixing.mode = "max"
@torch.no_grad()
def differential_expression_stats(self, M_sampling=100):
"""
Output average over statistics in a symmetric way (a against b)
forget the sets if permutation is True
:param M_sampling: number of samples
:return: Tuple px_scales, all_labels where:
- px_scales: scales of shape (M_sampling, n_genes)
- all_labels: labels of shape (M_sampling, )
"""
px_scales = []
all_labels = []
batch_size = max(self.data_loader_kwargs["batch_size"] // M_sampling, 2) # Reduce batch_size on GPU
if len(self.gene_dataset) % batch_size == 1:
batch_size += 1
for tensors in self.update({"batch_size": batch_size}):
sample_batch, _, _, batch_index, labels = tensors
px_scales += [
np.array(
(
self.model.get_sample_scale(
sample_batch, batch_index=batch_index, y=labels, n_samples=M_sampling
)
).cpu()
)
]
# Align the sampling
if M_sampling > 1:
px_scales[-1] = (px_scales[-1].transpose((1, 0, 2))).reshape(-1, px_scales[-1].shape[-1])
all_labels += [np.array((labels.repeat(1, M_sampling).view(-1, 1)).cpu())]
px_scales = np.concatenate(px_scales)
all_labels = np.concatenate(all_labels).ravel() # this will be used as boolean
return px_scales, all_labels
@torch.no_grad()
def sample_scale_from_batch(self, n_samples, batchid=None, selection=None):
#TODO: Implement log probas
px_scales = []
if selection is None:
raise ValueError("selections should be a list of cell subsets indices")
else:
if selection.dtype is np.dtype("bool"):
selection = np.asarray(np.where(selection)[0].ravel())
old_loader = self.data_loader
for i in batchid:
idx = np.random.choice(np.arange(len(self.gene_dataset))[selection], n_samples)
sampler = SubsetRandomSampler(idx)
self.data_loader_kwargs.update({"sampler": sampler})
self.data_loader = DataLoader(self.gene_dataset, **self.data_loader_kwargs)
px_scales.append(self.get_harmonized_scale(i))
self.data_loader = old_loader
px_scales = np.concatenate(px_scales)
return px_scales, None
@torch.no_grad()
def sample_poisson_from_batch(self, n_samples, batchid=None, selection=None):
# TODO: Refactor?
px_scales = []
log_probas = []
if selection is None:
raise ValueError("selections should be a list of cell subsets indices")
else:
if selection.dtype is np.dtype('bool'):
selection = np.asarray(np.where(selection)[0].ravel())
old_loader = self.data_loader
for i in batchid:
idx = np.random.choice(np.arange(len(self.gene_dataset))[selection], n_samples)
sampler = SubsetRandomSampler(idx)
self.data_loader_kwargs.update({'sampler': sampler})
self.data_loader = DataLoader(self.gene_dataset, **self.data_loader_kwargs)
for tensors in self:
sample_batch, local_l_mean, local_l_var, batch_index, label = tensors
px_scale, px_dispersion, px_rate, px_dropout, qz_m, qz_v, z, ql_m, ql_v, library = \
self.model.inference(sample_batch, batch_index, label)
# px_rate = self.get_harmonized_scale(i)
# p = px_rate / (px_rate + px_dispersion.cpu().numpy())
# r = px_dispersion.cpu().numpy()
# p = (px_scale / (px_scale + px_dispersion)).cpu().numpy()
p = (px_rate / (px_rate + px_dispersion)).cpu().numpy()
r = px_dispersion.cpu().numpy()
l_train = np.random.gamma(r, p / (1 - p))
px_scales.append(l_train)
log_px_z = self.model._reconstruction_loss(sample_batch, px_rate, px_dispersion,
px_dropout)
log_pz = Normal(torch.zeros_like(qz_m), torch.ones_like(qz_v)).log_prob(z).sum(
dim=-1)
log_qz_x = Normal(qz_m, qz_v.sqrt()).log_prob(z).sum(dim=-1)
log_p = log_pz + log_px_z - log_qz_x
log_probas.append(log_p.cpu().numpy())
self.data_loader = old_loader
px_scales = np.concatenate(px_scales)
log_probas = np.concatenate(log_probas)
return px_scales, log_probas
@torch.no_grad()
def sample_gamma_params_from_batch(self, n_samples, batchid=None, selection=None):
shapes_res, scales_res = [], []
dispersions = []
if selection is None:
raise ValueError("selections should be a list of cell subsets indices")
else:
if selection.dtype is np.dtype('bool'):
selection = np.asarray(np.where(selection)[0].ravel())
old_loader = self.data_loader
for i in batchid:
idx = np.random.choice(np.arange(len(self.gene_dataset))[selection], n_samples)
sampler = SubsetRandomSampler(idx)
self.data_loader_kwargs.update({'sampler': sampler})
self.data_loader = DataLoader(self.gene_dataset, **self.data_loader_kwargs)
#
# # fixed_batch = float(i)
for tensors in self:
sample_batch, local_l_mean, local_l_var, batch_index, label = tensors
px_scale, px_dispersion, px_rate = self.model.inference(sample_batch, batch_index, label)[0:3]
# px_rate = self.get_harmonized_scale(i)
# p = px_rate / (px_rate + px_dispersion.cpu().numpy())
# r = px_dispersion.cpu().numpy()
# p = (px_scale / (px_scale + px_dispersion)).cpu().numpy()
p = (px_rate / (px_rate + px_dispersion)).cpu().numpy()
r = px_dispersion.cpu().numpy()
shapes_batch = r
scales_batch = p / (1.0 - p)
if len(shapes_batch.shape) == 1:
shapes_batch = np.repeat(shapes_batch.reshape((1, -1)),
repeats=scales_batch.shape[0], axis=0)
shapes_res.append(shapes_batch)
scales_res.append(scales_batch)
self.data_loader = old_loader
shapes_res = np.concatenate(shapes_res)
scales_res = np.concatenate(scales_res)
assert shapes_res.shape == scales_res.shape, (shapes_res.shape, scales_res.shape)
return shapes_res, scales_res
@torch.no_grad()
def differential_expression_gamma(self, idx1, idx2, batchid1=None, batchid2=None,
genes=None, n_samples=None, M_permutation=None, all_stats=True,
sample_pairs=True):
n_samples = 5000 if n_samples is None else n_samples
M_permutation = 10000 if M_permutation is None else M_permutation
if batchid1 is None:
batchid1 = np.arange(self.gene_dataset.n_batches)
if batchid2 is None:
batchid2 = np.arange(self.gene_dataset.n_batches)
shapes1, scales1 = self.sample_gamma_params_from_batch(selection=idx1, batchid=batchid1,
n_samples=n_samples)
shapes2, scales2 = self.sample_gamma_params_from_batch(selection=idx2, batchid=batchid2,
n_samples=n_samples)
print(shapes1.shape, scales1.shape, shapes2.shape, scales2.shape)
all_labels = np.concatenate((np.repeat(0, len(shapes1)), np.repeat(1, len(shapes2))),
axis=0)
if genes is not None:
shapes1 = shapes1[:, self.gene_dataset._gene_idx(genes)]
scales1 = scales1[:, self.gene_dataset._gene_idx(genes)]
shapes2 = shapes2[:, self.gene_dataset._gene_idx(genes)]
scales2 = scales2[:, self.gene_dataset._gene_idx(genes)]
shapes = np.concatenate((shapes1, shapes2), axis=0)
scales = np.concatenate((scales1, scales2), axis=0)
assert shapes.shape == scales.shape, (shapes.shape, scales.shape)
data = (shapes, scales)
bayes1 = get_bayes_gamma(data, all_labels, cell_idx=0, M_permutation=M_permutation,
permutation=False, sample_pairs=sample_pairs)
bayes1 = pd.Series(data=bayes1, index=self.gene_dataset.gene_names)
return bayes1
@torch.no_grad()
def differential_expression_score(
self,
idx1: Union[List[bool], np.ndarray],
idx2: Union[List[bool], np.ndarray],
batchid1: Optional[Union[List[int], np.ndarray]] = None,
batchid2: Optional[Union[List[int], np.ndarray]] = None,
genes: Optional[Union[List[str], np.ndarray]] = None,
n_samples: int = None,
sample_pairs: bool = True,
M_permutation: int = None,
all_stats: bool = True,
sample_gamma: bool = False,
importance_sampling: bool = False
):
"""Computes gene specific Bayes factors using masks idx1 and idx2
To that purpose we sample the Posterior in the following way:
1. The posterior is sampled n_samples times for each subpopulation
2. For computation efficiency (posterior sampling is quite expensive), instead of
comparing element-wise the obtained samples, we can permute posterior samples.
Remember that computing the Bayes Factor requires sampling
q(z_A | x_A) and q(z_B | x_B)
:param idx1: bool array masking subpopulation cells 1. Should be True where cell is
from associated population
:param idx2: bool array masking subpopulation cells 2. Should be True where cell is
from associated population
:param batchid1: List of batch ids for which you want to perform DE Analysis for
subpopulation 1. By default, all ids are taken into account
:param batchid2: List of batch ids for which you want to perform DE Analysis for
subpopulation 2. By default, all ids are taken into account
:param genes: list Names of genes for which Bayes factors will be computed
:param n_samples: Number of times the posterior will be sampled for each pop
:param sample_pairs: Activates step 2 described above.
Simply formulated, pairs obtained from posterior sampling (when calling
`sample_scale_from_batch`) will be randomly permuted so that the number of
pairs used to compute Bayes Factors becomes M_permutation.
:param M_permutation: Number of times we will "mix" posterior samples in step 2.
Only makes sense when sample_pairs=True
:param all_stats: If False returns Bayes factors alone
else, returns not only Bayes Factor of population 1 vs population 2 but other metrics as
well, mostly used for sanity checks, such as
- Bayes Factors of 2 vs 1
- Bayes factors obtained when indices used to computed bayes are chosen randomly
(ie we compute Bayes factors of Completely Random vs Completely Random).
These can be seen as control tests.
- Gene expression statistics (mean, scale ...)
:return:
"""
n_samples = 5000 if n_samples is None else n_samples
M_permutation = 10000 if M_permutation is None else M_permutation
if batchid1 is None:
batchid1 = np.arange(self.gene_dataset.n_batches)
if batchid2 is None:
batchid2 = np.arange(self.gene_dataset.n_batches)
if sample_gamma:
px_scale1, log_probas1 = self.sample_poisson_from_batch(selection=idx1,
batchid=batchid1,
n_samples=n_samples)
px_scale2, log_probas2 = self.sample_poisson_from_batch(selection=idx2,
batchid=batchid2,
n_samples=n_samples)
else:
px_scale1, log_probas1 = self.sample_scale_from_batch(selection=idx1, batchid=batchid1,
n_samples=n_samples)
px_scale2, log_probas2 = self.sample_scale_from_batch(selection=idx2, batchid=batchid2,
n_samples=n_samples)
px_scale_mean1 = px_scale1.mean(axis=0)
px_scale_mean2 = px_scale2.mean(axis=0)
px_scale = np.concatenate((px_scale1, px_scale2), axis=0)
if log_probas1 is not None:
log_probas = np.concatenate((log_probas1, log_probas2), axis=0)
else:
log_probas = None
# print('px_scales1 shapes', px_scale1.shape)
# print('px_scales2 shapes', px_scale2.shape)
all_labels = np.concatenate((np.repeat(0, len(px_scale1)), np.repeat(1, len(px_scale2))),
axis=0)
if genes is not None:
px_scale = px_scale[:, self.gene_dataset.genes_to_index(genes)]
bayes1 = get_bayes_factors(px_scale, all_labels, cell_idx=0, M_permutation=M_permutation,
permutation=False, sample_pairs=sample_pairs,
importance_sampling=importance_sampling,
log_ratios=log_probas)
if all_stats is True:
bayes1_permuted = get_bayes_factors(px_scale, all_labels, cell_idx=0,
M_permutation=M_permutation,
permutation=True, sample_pairs=sample_pairs,
importance_sampling=importance_sampling,
log_ratios=log_probas)
bayes2 = get_bayes_factors(px_scale, all_labels, cell_idx=1,
M_permutation=M_permutation,
permutation=False, sample_pairs=sample_pairs,
importance_sampling=importance_sampling,
log_ratios=log_probas)
bayes2_permuted = get_bayes_factors(px_scale, all_labels, cell_idx=1,
M_permutation=M_permutation,
permutation=True, sample_pairs=sample_pairs,
importance_sampling=importance_sampling,
log_ratios=log_probas)
mean1, mean2, nonz1, nonz2, norm_mean1, norm_mean2 = \
self.gene_dataset.raw_counts_properties(idx1, idx2)
res = pd.DataFrame([bayes1, bayes1_permuted, bayes2, bayes2_permuted,
mean1, mean2, nonz1, nonz2, norm_mean1, norm_mean2,
px_scale_mean1, px_scale_mean2],
index=["bayes1", "bayes1_permuted", "bayes2", "bayes2_permuted",
"mean1", "mean2", "nonz1", "nonz2", "norm_mean1", "norm_mean2",
"scale1", "scale2"],
columns=self.gene_dataset.gene_names).T
res = res.sort_values(by=["bayes1"], ascending=False)
return res
else:
return pd.Series(data=bayes1, index=self.gene_dataset.gene_names)
@torch.no_grad()
def one_vs_all_degenes(
self,
subset: Optional[Union[List[bool], np.ndarray]] = None,
cell_labels: Optional[Union[List, np.ndarray]] = None,
min_cells: int = 10,
n_samples: int = None,
sample_pairs: bool = False,
M_permutation: int = None,
output_file: bool = False,
save_dir: str = "./",
filename="one2all",
):
"""
Performs one population vs all others Differential Expression Analysis
given labels or using cell types, for each type of population
:param subset: None Or
bool array masking subset of cells you are interested in (True when you want to select cell).
In that case, it should have same length than `gene_dataset`
:param cell_labels: optional: Labels of cells
:param min_cells: Ceil number of cells used to compute Bayes Factors
:param n_samples: Number of times the posterior will be sampled for each pop
:param sample_pairs: Activates pair random permutations.
Simply formulated, pairs obtained from posterior sampling (when calling
`sample_scale_from_batch`) will be randomly permuted so that the number of
pairs used to compute Bayes Factors becomes M_permutation.
:param M_permutation: Number of times we will "mix" posterior samples in step 2.
Only makes sense when sample_pairs=True
:param output_file: Bool: save file?
:param save_dir:
:param filename:
:return: Tuple (de_res, de_cluster)
- de_res is a list of length nb_clusters (based on provided labels or on hardcoded cell
types). de_res[i] contains Bayes Factors for population number i vs all the rest
- de_cluster returns the associated names of clusters
Are contains in this results only clusters for which we have at least `min_cells`
elements to compute predicted Bayes Factors
"""
if cell_labels is not None:
if len(cell_labels) != len(self.gene_dataset):
raise ValueError(" the length of cell_labels have to be "
"the same as the number of cells")
if (cell_labels is None) and not hasattr(self.gene_dataset, 'cell_types'):
raise ValueError("If gene_dataset is not annotated with labels and cell types,"
" then must provide cell_labels")
# Input cell_labels take precedence over cell type label annotation in dataset
elif cell_labels is not None:
cluster_id = np.unique(cell_labels[cell_labels >= 0])
# Can make cell_labels < 0 to filter out cells when computing DE
else:
cluster_id = self.gene_dataset.cell_types
cell_labels = self.gene_dataset.labels.ravel()
de_res = []
de_cluster = []
for i, x in enumerate(cluster_id):
if subset is None:
idx1 = (cell_labels == i)
idx2 = (cell_labels != i)
else:
idx1 = (cell_labels == i) * subset
idx2 = (cell_labels != i) * subset
if np.sum(idx1) > min_cells and np.sum(idx2) > min_cells:
de_cluster.append(x)
# TODO: Understand issue when Sample_pairs=True
res = self.differential_expression_score(idx1=idx1, idx2=idx2,
M_permutation=M_permutation,
n_samples=n_samples,
sample_pairs=sample_pairs)
res["clusters"] = np.repeat(x, len(res.index))
de_res.append(res)
if output_file: # store as an excel spreadsheet
writer = pd.ExcelWriter(save_dir + "differential_expression.%s.xlsx" % filename,
engine="xlsxwriter")
for i, x in enumerate(de_cluster):
de_res[i].to_excel(writer, sheet_name=str(x))
writer.close()
return de_res, de_cluster
def within_cluster_degenes(
self,
cell_labels: Optional[Union[List, np.ndarray]] = None,
min_cells: int = 10,
states: Union[List[bool], np.ndarray] = [],
batch1: Optional[Union[List[int], np.ndarray]] = None,
batch2: Optional[Union[List[int], np.ndarray]] = None,
subset: Optional[Union[List[bool], np.ndarray]] = None,
n_samples: int = None,
sample_pairs: bool = False,
M_permutation: int = None,
output_file: bool = False,
save_dir: str = "./",
filename: str = "within_cluster",
):
"""
Performs Differential Expression within clusters for different cell states
:param cell_labels: optional: Labels of cells
:param min_cells: Ceil number of cells used to compute Bayes Factors
:param states: States of the cells.
:param batch1: List of batch ids for which you want to perform DE Analysis for
subpopulation 1. By default, all ids are taken into account
:param batch2: List of batch ids for which you want to perform DE Analysis for
subpopulation 2. By default, all ids are taken into account
:param subset: MASK: Subset of cells you are insterested in.
:param n_samples: Number of times the posterior will be sampled for each pop
:param sample_pairs: Activates pair random permutations.
Simply formulated, pairs obtained from posterior sampling (when calling
`sample_scale_from_batch`) will be randomly permuted so that the number of
pairs used to compute Bayes Factors becomes M_permutation.
:param M_permutation: Number of times we will "mix" posterior samples in step 2.
Only makes sense when sample_pairs=True
:param output_file: Bool: save file?
:param save_dir:
:param filename:
:return: Tuple (de_res, de_cluster)
- de_res is a list of length nb_clusters (based on provided labels or on hardcoded cell
types). de_res[i] contains Bayes Factors for population number i vs all the rest
- de_cluster returns the associated names of clusters
Are contains in this results only clusters for which we have at least `min_cells`
elements to compute predicted Bayes Factors
"""
if len(self.gene_dataset) != len(states):
raise ValueError(" the length of states have to be the same as the number of cells")
if (cell_labels is None) and not hasattr(self.gene_dataset, "cell_types"):
raise ValueError("If gene_dataset is not annotated with labels and cell types,"
" then must provide cell_labels")
# Input cell_labels take precedence over cell type label annotation in dataset
elif cell_labels is not None:
cluster_id = np.unique(cell_labels[cell_labels >= 0])
# Can make cell_labels < 0 to filter out cells when computing DE
else:
cluster_id = self.gene_dataset.cell_types
cell_labels = self.gene_dataset.labels.ravel()
de_res = []
de_cluster = []
states = np.asarray([1 if x else 0 for x in states])
nstates = np.asarray([0 if x else 1 for x in states])
for i, x in enumerate(cluster_id):
if subset is None:
idx1 = (cell_labels == i) * states
idx2 = (cell_labels == i) * nstates
else:
idx1 = (cell_labels == i) * subset * states
idx2 = (cell_labels == i) * subset * nstates
if np.sum(idx1) > min_cells and np.sum(idx2) > min_cells:
de_cluster.append(x)
res = self.differential_expression_score(idx1=idx1, idx2=idx2,
batchid1=batch1, batchid2=batch2,
M_permutation=M_permutation,
n_samples=n_samples,
sample_pairs=sample_pairs)
res["clusters"] = np.repeat(x, len(res.index))
de_res.append(res)
if output_file: # store as an excel spreadsheet
writer = pd.ExcelWriter(save_dir + "differential_expression.%s.xlsx" % filename,
engine="xlsxwriter")
for i, x in enumerate(de_cluster):
de_res[i].to_excel(writer, sheet_name=str(x))
writer.close()
return de_res, de_cluster
@torch.no_grad()
def imputation(self, n_samples=1):
imputed_list = []
for tensors in self:
sample_batch, _, _, batch_index, labels = tensors
px_rate = self.model.get_sample_rate(sample_batch, batch_index=batch_index, y=labels, n_samples=n_samples)
imputed_list += [np.array(px_rate.cpu())]
imputed_list = np.concatenate(imputed_list)
return imputed_list.squeeze()
@torch.no_grad()
def generate(
self,
n_samples: int = 100,
genes: Union[list, np.ndarray] = None,
batch_size: int = 128
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Create observation samples from the Posterior Predictive distribution
:param n_samples: Number of required samples for each cell
:param genes: Indices of genes of interest
:param batch_size: Desired Batch size to generate data
:return: Tuple (x_new, x_old)
Where x_old has shape (n_cells, n_genes)
Where x_new has shape (n_cells, n_genes, n_samples)
"""
assert self.model.reconstruction_loss in ['zinb', 'nb']
zero_inflated = self.model.reconstruction_loss == 'zinb'
x_old = []
x_new = []
for tensors in self.update({"batch_size": batch_size}):
sample_batch, _, _, batch_index, labels = tensors
outputs = self.model.inference(
sample_batch,
batch_index=batch_index,
y=labels,
n_samples=n_samples
)
px_dispersion = outputs['px_r']
px_rate = outputs['px_rate']
px_dropout = outputs['px_dropout']
p = px_rate / (px_rate + px_dispersion)
r = px_dispersion
# Important remark: Gamma is parametrized by the rate = 1/scale!
l_train = distributions.Gamma(concentration=r, rate=(1 - p) / p).sample()
# Clamping as distributions objects can have buggy behaviors when
# their parameters are too high
l_train = torch.clamp(l_train, max=1e8)
gene_expressions = distributions.Poisson(l_train).sample() # Shape : (n_samples, n_cells_batch, n_genes)
if zero_inflated:
p_zero = (1.0 + torch.exp(-px_dropout)).pow(-1)
random_prob = torch.rand_like(p_zero)
gene_expressions[random_prob <= p_zero] = 0
gene_expressions = gene_expressions.permute([1, 2, 0]) # Shape : (n_cells_batch, n_genes, n_samples)
x_old.append(sample_batch)
x_new.append(gene_expressions)
x_old = torch.cat(x_old) # Shape (n_cells, n_genes)
x_new = torch.cat(x_new) # Shape (n_cells, n_genes, n_samples)
if genes is not None:
gene_ids = self.gene_dataset.genes_to_index(genes)
x_new = x_new[:, gene_ids, :]
x_old = x_old[:, gene_ids]
return x_new.cpu().numpy(), x_old.cpu().numpy()
@torch.no_grad()
def generate_parameters(self):
dropout_list = []
mean_list = []
dispersion_list = []
for tensors in self.sequential(1000):
sample_batch, _, _, batch_index, labels = tensors
outputs = self.model.inference(
sample_batch,
batch_index=batch_index,
y=labels,
n_samples=1
)
px_dispersion = outputs['px_r']
px_rate = outputs['px_rate']
px_dropout = outputs['px_dropout']
dispersion_list += [np.repeat(np.array(px_dispersion.cpu())[np.newaxis, :], px_rate.size(0), axis=0)]
mean_list += [np.array(px_rate.cpu())]
dropout_list += [np.array(px_dropout.cpu())]
return np.concatenate(dropout_list), np.concatenate(mean_list), np.concatenate(dispersion_list)
@torch.no_grad()
def get_stats(self):
libraries = []
for tensors in self.sequential(batch_size=128):
x, local_l_mean, local_l_var, batch_index, y = tensors
library = self.model.inference(x, batch_index, y)['library']
libraries += [np.array(library.cpu())]
libraries = np.concatenate(libraries)
return libraries.ravel()
@torch.no_grad()
def get_harmonized_scale(self, fixed_batch):
px_scales = []
fixed_batch = float(fixed_batch)
for tensors in self:
sample_batch, local_l_mean, local_l_var, batch_index, label = tensors
px_scales += [self.model.scale_from_z(sample_batch, fixed_batch).cpu()]
return np.concatenate(px_scales)
@torch.no_grad()
def get_sample_scale(self):
px_scales = []
for tensors in self:
sample_batch, _, _, batch_index, labels = tensors
px_scales += [
np.array(
(
self.model.get_sample_scale(
sample_batch, batch_index=batch_index, y=labels, n_samples=1
)
).cpu()
)
]
return np.concatenate(px_scales)
@torch.no_grad()
def imputation_list(self, n_samples=1):
original_list = []
imputed_list = []
batch_size = 10000 # self.data_loader_kwargs["batch_size"] // n_samples
for tensors, corrupted_tensors in zip(self.uncorrupted().sequential(batch_size=batch_size),
self.corrupted().sequential(batch_size=batch_size)):
batch = tensors[0]
actual_batch_size = batch.size(0)
dropout_batch, _, _, batch_index, labels = corrupted_tensors
px_rate = self.model.get_sample_rate(dropout_batch, batch_index=batch_index, y=labels, n_samples=n_samples)
indices_dropout = torch.nonzero(batch - dropout_batch)
if indices_dropout.size() != torch.Size([0]):
i = indices_dropout[:, 0]
j = indices_dropout[:, 1]
batch = batch.unsqueeze(0).expand((n_samples, batch.size(0), batch.size(1)))
original = np.array(batch[:, i, j].view(-1).cpu())
imputed = np.array(px_rate[..., i, j].view(-1).cpu())
cells_index = np.tile(np.array(i.cpu()), n_samples)
original_list += [original[cells_index == i] for i in range(actual_batch_size)]
imputed_list += [imputed[cells_index == i] for i in range(actual_batch_size)]
else:
original_list = np.array([])
imputed_list = np.array([])
return original_list, imputed_list
@torch.no_grad()
def imputation_score(self, original_list=None, imputed_list=None, n_samples=1):
if original_list is None or imputed_list is None:
original_list, imputed_list = self.imputation_list(n_samples=n_samples)
original_list_concat = np.concatenate(original_list)
imputed_list_concat = np.concatenate(imputed_list)
are_lists_empty = (len(original_list_concat) == 0) and (len(imputed_list_concat) == 0)
if are_lists_empty:
logger.info("No difference between corrupted dataset and uncorrupted dataset")
return 0.0
else:
return np.median(np.abs(original_list_concat - imputed_list_concat))
@torch.no_grad()
def imputation_benchmark(self, n_samples=8, show_plot=True, title_plot="imputation", save_path=""):
original_list, imputed_list = self.imputation_list(n_samples=n_samples)
# Median of medians for all distances
median_score = self.imputation_score(original_list=original_list, imputed_list=imputed_list)
# Mean of medians for each cell
imputation_cells = []
for original, imputed in zip(original_list, imputed_list):
has_imputation = len(original) and len(imputed)
imputation_cells += [np.median(np.abs(original - imputed)) if has_imputation else 0]
mean_score = np.mean(imputation_cells)
logger.debug("\nMedian of Median: %.4f\nMean of Median for each cell: %.4f" % (median_score, mean_score))
plot_imputation(np.concatenate(original_list), np.concatenate(imputed_list), show_plot=show_plot,
title=os.path.join(save_path, title_plot))
return original_list, imputed_list
@torch.no_grad()
def knn_purity(self):
latent, _, labels = self.get_latent()
score = knn_purity(latent, labels)
logger.debug("KNN purity score :", score)
return score
knn_purity.mode = "max"
@torch.no_grad()
def clustering_scores(self, prediction_algorithm="knn"):
if self.gene_dataset.n_labels > 1:
latent, _, labels = self.get_latent()
if prediction_algorithm == "knn":
labels_pred = KMeans(self.gene_dataset.n_labels, n_init=200).fit_predict(latent) # n_jobs>1 ?
elif prediction_algorithm == "gmm":
gmm = GMM(self.gene_dataset.n_labels)
gmm.fit(latent)
labels_pred = gmm.predict(latent)
asw_score = silhouette_score(latent, labels)
nmi_score = NMI(labels, labels_pred)
ari_score = ARI(labels, labels_pred)
uca_score = unsupervised_clustering_accuracy(labels, labels_pred)[0]
logger.debug("Clustering Scores:\nSilhouette: %.4f\nNMI: %.4f\nARI: %.4f\nUCA: %.4f" %
(asw_score, nmi_score, ari_score, uca_score))
return asw_score, nmi_score, ari_score, uca_score
@torch.no_grad()
def nn_overlap_score(self, **kwargs):
"""
Quantify how much the similarity between cells in the mRNA latent space resembles their similarity at the
protein level. Compute the overlap fold enrichment between the protein and mRNA-based cell 100-nearest neighbor
graph and the Spearman correlation of the adjacency matrices.
"""
if hasattr(self.gene_dataset, "protein_expression_clr"):
latent, _, _ = self.sequential().get_latent()
protein_data = self.gene_dataset.protein_expression_clr[self.indices]
spearman_correlation, fold_enrichment = nn_overlap(latent, protein_data, **kwargs)
logger.debug("Overlap Scores:\nSpearman Correlation: %.4f\nFold Enrichment: %.4f" %
(spearman_correlation, fold_enrichment))
return spearman_correlation, fold_enrichment
@torch.no_grad()
def show_t_sne(self, n_samples=1000, color_by="", save_name="", latent=None, batch_indices=None,
labels=None, n_batch=None):
# If no latent representation is given
if latent is None:
latent, batch_indices, labels = self.get_latent(sample=True)
latent, idx_t_sne = self.apply_t_sne(latent, n_samples)
batch_indices = batch_indices[idx_t_sne].ravel()
labels = labels[idx_t_sne].ravel()
if not color_by:
plt.figure(figsize=(10, 10))
plt.scatter(latent[:, 0], latent[:, 1])
if color_by == "scalar":
plt.figure(figsize=(10, 10))
plt.scatter(latent[:, 0], latent[:, 1], c=labels.ravel())
else:
if n_batch is None:
n_batch = self.gene_dataset.n_batches
if color_by == "batches" or color_by == "labels":
indices = batch_indices.ravel() if color_by == "batches" else labels.ravel()
n = n_batch if color_by == "batches" else self.gene_dataset.n_labels
if self.gene_dataset.cell_types is not None and color_by == "labels":
plt_labels = self.gene_dataset.cell_types
else:
plt_labels = [str(i) for i in range(len(np.unique(indices)))]
plt.figure(figsize=(10, 10))
for i, label in zip(range(n), plt_labels):
plt.scatter(latent[indices == i, 0], latent[indices == i, 1], label=label)
plt.legend()
elif color_by == "batches and labels":
fig, axes = plt.subplots(1, 2, figsize=(14, 7))
batch_indices = batch_indices.ravel()
for i in range(n_batch):
axes[0].scatter(latent[batch_indices == i, 0], latent[batch_indices == i, 1], label=str(i))
axes[0].set_title("batch coloring")
axes[0].axis("off")
axes[0].legend()
indices = labels.ravel()
if hasattr(self.gene_dataset, "cell_types"):
plt_labels = self.gene_dataset.cell_types
else:
plt_labels = [str(i) for i in range(len(np.unique(indices)))]
for i, cell_type in zip(range(self.gene_dataset.n_labels), plt_labels):
axes[1].scatter(latent[indices == i, 0], latent[indices == i, 1], label=cell_type)
axes[1].set_title("label coloring")
axes[1].axis("off")
axes[1].legend()
plt.axis("off")
plt.tight_layout()
if save_name:
plt.savefig(save_name)
@staticmethod
def apply_t_sne(latent, n_samples=1000):
idx_t_sne = np.random.permutation(len(latent))[:n_samples] if n_samples else np.arange(len(latent))
if latent.shape[1] != 2:
latent = TSNE().fit_transform(latent[idx_t_sne])
return latent, idx_t_sne
def raw_data(self):
"""
Returns raw data for classification
"""
return self.gene_dataset.X[self.indices], self.gene_dataset.labels[self.indices].ravel()
def entropy_from_indices(indices):
return entropy(np.array(np.unique(indices, return_counts=True)[1].astype(np.int32)))
def entropy_batch_mixing(latent_space, batches, n_neighbors=50, n_pools=50, n_samples_per_pool=100):
def entropy(hist_data):
n_batches = len(np.unique(hist_data))
if n_batches > 2:
raise ValueError("Should be only two clusters for this metric")
frequency = np.mean(hist_data == 1)
if frequency == 0 or frequency == 1:
return 0
return -frequency * np.log(frequency) - (1 - frequency) * np.log(1 - frequency)
n_neighbors = min(n_neighbors, len(latent_space) - 1)
nne = NearestNeighbors(n_neighbors=1 + n_neighbors, n_jobs=8)
nne.fit(latent_space)
kmatrix = nne.kneighbors_graph(latent_space) - scipy.sparse.identity(latent_space.shape[0])
score = 0
for t in range(n_pools):
indices = np.random.choice(np.arange(latent_space.shape[0]), size=n_samples_per_pool)
score += np.mean([entropy(batches[kmatrix[indices].nonzero()[1][kmatrix[indices].nonzero()[0] == i]])
for i in range(n_samples_per_pool)])
return score / float(n_pools)
def softmax(x, axis=None):
"""
Compute the softmax of each element along an axis of X.
Parameters
----------
x: ND-Array. Probably should be floats.
theta (optional): float parameter, used as a multiplier
prior to exponentiation. Default = 1.0
axis (optional): axis to compute values along. Default is the
first non-singleton axis.
Returns an array the same size as X. The result will sum to 1
along the specified axis.
"""
y = np.atleast_2d(x)
if axis is None:
axis = next(j[0] for j in enumerate(y.shape) if j[1] > 1)
y = y - np.expand_dims(np.max(y, axis=axis), axis)
y = np.exp(y)
ax_sum = np.expand_dims(np.sum(y, axis=axis), axis)
p = y / ax_sum
if len(x.shape) == 1:
p = p.flatten()
return p
def get_sampling_pair_idx(list_1, list_2, do_sample=True, permutation=False, M_permutation=10000,
probas_a=None, probas_b=None):
"""
Returns the indices of the sampled quantities of populations 1 and 2
that will be compared.
This function has several modes based on the values of do_sample and permutation
as described below
:param list_1: Indices corresponding to population 1
:param list_2: Indices corresponding to population 2
:param do_sample: Are pairs sampled? If not, we compare the posterior quantities
TERMISE
:param permutation: has only effect when do_sample is True.
- if permutation=False, NORMAL BEHAVIOR : elements used for pop 2 come from list_1
and vice-versa for 2
- if permutation=True, SPECIFIC BEHAVIOR: All elements are sampled.
Should only be used as a sanity check.
:param M_permutation:
:param probas_a: used for Importance Sampling, set to None by default
:param probas_b: used for Importance Sampling, set to None by default
:return:
"""
if do_sample:
if not permutation:
# case1: no permutation, sample from A and then from B
u, v = np.random.choice(list_1, size=M_permutation, p=probas_a), \
np.random.choice(list_2, size=M_permutation, p=probas_b)
else:
# case2: permutation, sample from A+B twice
u, v = (np.random.choice(list_1 + list_2, size=M_permutation),
np.random.choice(list_1 + list_2, size=M_permutation))
else:
# TODO: Assert good behavior
u, v = list_1, list_2
assert len(u) == len(v), 'Inconsistent number of indices used for pairs'
return u, v
def get_bayes_factors(
px_scale: Union[List[float], np.ndarray],
all_labels: Union[List, np.ndarray],
cell_idx: Union[int, str],
other_cell_idx: Optional[Union[int, str]] = None,
genes_idx: Union[List[int], np.ndarray] = None,
log_ratios: Union[List[int], np.ndarray] = None,
importance_sampling : bool = False,
M_permutation: int = 10000,
permutation: bool = False,
sample_pairs: bool = True,
):
"""
Returns an array of bayes factor for all genes
:param px_scale: The gene frequency array for all cells (might contain multiple samples per cells)
:param all_labels: The labels array for the corresponding cell types
:param cell_idx: The first cell type population to consider. Either a string or an idx
:param other_cell_idx: (optional) The second cell type population to consider. Either a string or an idx
:param genes_idx: Indices of genes for which DE Analysis applies
:param sample_pairs: Activates subsampling.
Simply formulated, pairs obtained from posterior sampling (when calling
`sample_scale_from_batch`) will be randomly permuted so that the number of
pairs used to compute Bayes Factors becomes M_permutation.
:param log_ratios: un-normalized weights for importance sampling
:param importance_sampling: whether to use IS
:param M_permutation: Number of times we will "mix" posterior samples in step 2.
Only makes sense when sample_pairs=True
:param permutation: Whether or not to permute. Normal behavior is False.
Setting permutation=True basically shuffles cell_idx and other_cell_idx so that we
estimate Bayes Factors of random populations of the union of cell_idx and other_cell_idx.
:return:
"""
idx = (all_labels == cell_idx)
idx_other = (all_labels == other_cell_idx) if other_cell_idx is not None else (
all_labels != cell_idx)
if genes_idx is not None:
px_scale = px_scale[:, genes_idx]
# first extract the data
# Assert that at this point we no longer have batch dimensions
assert len(px_scale.shape) == 2
sample_rate_a = px_scale[idx, :]
sample_rate_b = px_scale[idx_other, :]
# sample_rate_a = px_scale[:, idx, :]
# sample_rate_b = px_scale[:, idx_other, :]
sample_rate_a = sample_rate_a.reshape((-1, px_scale.shape[-1]))
sample_rate_b = sample_rate_b.reshape((-1, px_scale.shape[-1]))
samples = np.concatenate((sample_rate_a, sample_rate_b), axis=0)
# prepare the pairs for sampling
list_1 = list(np.arange(sample_rate_a.shape[0]))
list_2 = list(sample_rate_a.shape[0] + np.arange(sample_rate_b.shape[0]))
if importance_sampling:
print('Importance Sampling')
# weight_a = log_ratios[:, idx]
# weight_b = log_ratios[:, idx_other]
print(log_ratios.shape)
weight_a = log_ratios[idx]
weight_b = log_ratios[idx_other]
# second let's normalize the weights
weight_a = softmax(weight_a)
weight_b = softmax(weight_b)
# reshape and aggregate dataset
weight_a = weight_a.flatten()
weight_b = weight_b.flatten()
weights = np.concatenate((weight_a, weight_b))
# probas_a = weight_a / np.sum(idx)
# probas_b = weight_b / np.sum(idx_other)
probas_a = weight_a
probas_b = weight_b
print('IS A MAX', probas_a.max(), 'IS B MAX', probas_b.max())
u, v = get_sampling_pair_idx(list_1, list_2, do_sample=sample_pairs,
permutation=permutation, M_permutation=M_permutation,
probas_a=probas_a, probas_b=probas_b)
# then constitutes the pairs
first_samples = samples[u]
second_samples = samples[v]
first_weights = weights[u]
second_weights = weights[v]
# print('u v shapes', u.shape, v.shape)
to_sum = first_weights[:, np.newaxis] * second_weights[:, np.newaxis] * (
first_samples >= second_samples)
incomplete_weights = first_weights * second_weights
res = np.sum(to_sum, axis=0) / np.sum(incomplete_weights, axis=0)
else:
probas_a = None
probas_b = None
u, v = get_sampling_pair_idx(list_1, list_2, do_sample=sample_pairs,
permutation=permutation, M_permutation=M_permutation,
probas_a=probas_a, probas_b=probas_b)
# then constitutes the pairs
first_samples = samples[u]
second_samples = samples[v]
res = np.mean(first_samples >= second_samples, axis=0)
res = np.log(res + 1e-8) - np.log(1 - res + 1e-8)
return res
def _p_wa_higher_wb(k1, k2, theta1, theta2):
"""
:param k1: Shape of wa
:param k2: Shape of wb
:param theta1: Scale of wa
:param theta2: Scale of wb
:return:
"""
a = k2
b = k1
x = theta1 / (theta1 + theta2)
return betainc(a, b, x)
def get_bayes_gamma(data, all_labels, cell_idx, other_cell_idx=None, genes_idx=None,
M_permutation=10000, permutation=False, sample_pairs=True):
"""
Returns a list of bayes factor for all genes
:param px_scale: The gene frequency array for all cells (might contain multiple samples per cells)
:param all_labels: The labels array for the corresponding cell types
:param cell_idx: The first cell type population to consider. Either a string or an idx
:param other_cell_idx: (optional) The second cell type population to consider. Either a string or an idx
:param M_permutation: The number of permuted samples.
:param permutation: Whether or not to permute.
:return:
"""
res = []
idx = (all_labels == cell_idx)
idx_other = (all_labels == other_cell_idx) if other_cell_idx is not None else (all_labels != cell_idx)
shapes, scales = data
if genes_idx is not None:
shapes = shapes[:, genes_idx]
scales = scales[:, genes_idx]
sample_shape_a = shapes[idx].squeeze()
sample_scales_a = scales[idx_other].squeeze()
sample_shape_b = shapes[idx].squeeze()
sample_scales_b = scales[idx_other].squeeze()
assert sample_shape_a.shape == sample_scales_a.shape
assert sample_shape_b.shape == sample_scales_b.shape
# agregate dataset
samples_shape = np.vstack((sample_shape_a, sample_shape_b))
samples_scales = np.vstack((sample_scales_a, sample_scales_b))
# prepare the pairs for sampling
list_1 = list(np.arange(sample_shape_a.shape[0]))
list_2 = list(sample_shape_a.shape[0] + np.arange(sample_shape_b.shape[0]))
u, v = get_sampling_pair_idx(list_1, list_2, permutation=permutation,
M_permutation=M_permutation,
probas_a=None, probas_b=None, do_sample=sample_pairs)
# then constitutes the pairs
first_set = (samples_shape[u], samples_scales[u])
second_set = (samples_shape[v], samples_scales[v])
shapes_a, scales_a = first_set
shapes_b, scales_b = second_set
for shape_a, scale_a, shape_b, scale_b in zip(shapes_a, scales_a, shapes_b, scales_b):
res.append(_p_wa_higher_wb(shape_a, shape_b, scale_a, scale_b))
res = np.array(res)
res = np.mean(res, axis=0)
assert len(res) == shapes_a.shape[1]
res = np.log(res + 1e-8) - np.log(1 - res + 1e-8)
return res
def plot_imputation(original, imputed, show_plot=True, title="Imputation"):
y = imputed
x = original
ymax = 10
mask = x < ymax
x = x[mask]
y = y[mask]
mask = y < ymax
x = x[mask]
y = y[mask]
l_minimum = np.minimum(x.shape[0], y.shape[0])
x = x[:l_minimum]
y = y[:l_minimum]
data = np.vstack([x, y])
plt.figure(figsize=(5, 5))
axes = plt.gca()
axes.set_xlim([0, ymax])
axes.set_ylim([0, ymax])
nbins = 50
# Evaluate a gaussian kde on a regular grid of nbins x nbins over data extents
k = kde.gaussian_kde(data)
xi, yi = np.mgrid[0:ymax:nbins * 1j, 0:ymax:nbins * 1j]
zi = k(np.vstack([xi.flatten(), yi.flatten()]))
plt.title(title, fontsize=12)
plt.ylabel("Imputed counts")
plt.xlabel("Original counts")
plt.pcolormesh(yi, xi, zi.reshape(xi.shape), cmap="Reds")
a, _, _, _ = np.linalg.lstsq(y[:, np.newaxis], x, rcond=-1)
linspace = np.linspace(0, ymax)
plt.plot(linspace, a * linspace, color="black")
plt.plot(linspace, linspace, color="black", linestyle=":")
if show_plot:
plt.show()
plt.savefig(title + ".png")
def nn_overlap(X1, X2, k=100):
"""
Compute the overlap between the k-nearest neighbor graph of X1 and X2 using Spearman correlation of the
adjacency matrices.
"""
assert len(X1) == len(X2)
n_samples = len(X1)
k = min(k, n_samples - 1)
nne = NearestNeighbors(n_neighbors=k + 1) # "n_jobs=8
nne.fit(X1)
kmatrix_1 = nne.kneighbors_graph(X1) - scipy.sparse.identity(n_samples)
nne.fit(X2)
kmatrix_2 = nne.kneighbors_graph(X2) - scipy.sparse.identity(n_samples)
# 1 - spearman correlation from knn graphs
spearman_correlation = scipy.stats.spearmanr(kmatrix_1.A.flatten(), kmatrix_2.A.flatten())[0]
# 2 - fold enrichment
set_1 = set(np.where(kmatrix_1.A.flatten() == 1)[0])
set_2 = set(np.where(kmatrix_2.A.flatten() == 1)[0])
fold_enrichment = len(set_1.intersection(set_2)) * n_samples ** 2 / (float(len(set_1)) * len(set_2))
return spearman_correlation, fold_enrichment
def unsupervised_clustering_accuracy(y, y_pred):
"""
Unsupervised Clustering Accuracy
"""
assert len(y_pred) == len(y)
u = np.unique(np.concatenate((y, y_pred)))
n_clusters = len(u)
mapping = dict(zip(u, range(n_clusters)))
reward_matrix = np.zeros((n_clusters, n_clusters), dtype=np.int64)
for y_pred_, y_ in zip(y_pred, y):
if y_ in mapping:
reward_matrix[mapping[y_pred_], mapping[y_]] += 1
cost_matrix = reward_matrix.max() - reward_matrix
ind = linear_assignment(cost_matrix)
return sum([reward_matrix[i, j] for i, j in ind]) * 1.0 / y_pred.size, ind
def knn_purity(latent, label, n_neighbors=30):
nbrs = NearestNeighbors(n_neighbors=n_neighbors + 1).fit(latent)
indices = nbrs.kneighbors(latent, return_distance=False)[:, 1:]
neighbors_labels = np.vectorize(lambda i: label[i])(indices)
# pre cell purity scores
scores = ((neighbors_labels - label.reshape(-1, 1)) == 0).mean(axis=1)
res = [np.mean(scores[label == i]) for i in np.unique(label)] # per cell-type purity
return np.mean(res)
def proximity_imputation(real_latent1, normed_gene_exp_1, real_latent2, k=4):
knn = KNeighborsRegressor(k, weights="distance")
y = knn.fit(real_latent1, normed_gene_exp_1).predict(real_latent2)
return y
```
#### File: scvi/utils/utils.py
```python
import os
import pandas as pd
import numpy as np
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import average_precision_score
import matplotlib.pyplot as plt
import arviz as az
def make_dir_if_necessary(directory):
if not os.path.exists(directory):
os.makedirs(directory)
class IterativeDict:
"""
Structure useful to save metrics for different models over different trainings
Saved in a nested dictionnary
Structure:
model_name ==> metric_name ==> table [n_trainings, ...]
"""
def __init__(self, model_names):
self.values = {key: {} for key in model_names}
def set_values(self, model_name, metric_name, values):
if metric_name not in self.values[model_name]:
self.values[model_name][metric_name] = [values]
else:
self.values[model_name][metric_name].append(values)
def to_df(self):
return pd.DataFrame(self.values)
def plot_traj(history, x=None, **plot_params):
"""
:param history: (n_sim, n_x_values) array
:param x: associated x values used for plotting
:param plot_params: Plot parameters fed to plt.plot
:return:
"""
plot_params = {} if plot_params is None else plot_params
history_np = np.array(history)
theta_mean = np.mean(history_np, axis=0)
theta_std = np.std(history_np, axis=0)
n_iter = len(theta_mean)
x = np.arange(n_iter) if x is None else x
plt.plot(x, theta_mean, **plot_params)
plt.fill_between(
x=x, y1=theta_mean - theta_std, y2=theta_mean + theta_std, alpha=0.25
)
def plot_identity():
xmin, xmax = plt.xlim()
vals = np.linspace(xmin, xmax, 50)
plt.plot(vals, vals, "--", label="identity")
def plot_precision_recall(y_test, y_score, label=""):
average_precision = average_precision_score(y_test, y_score)
precision, recall, _ = precision_recall_curve(y_test, y_score)
# In matplotlib < 1.5, plt.fill_between does not have a 'step' argument
step_kwargs = {"step": "post"}
legend = "{0} PR curve: AP={1:0.2f}".format(label, average_precision)
plt.step(recall, precision, color="b", alpha=0.2, where="post", label=legend)
plt.fill_between(recall, precision, alpha=0.2, **step_kwargs)
plt.xlabel("Recall")
plt.ylabel("Precision")
plt.ylim([0.0, 1.05])
plt.xlim([0.0, 1.0])
def compute_hdi(arr, credible_interval=0.64):
"""
Given array of (simulations, dimensions) computes Highest Density Intervals
Sample dimension should be first dimension
:param arr: Array of shape (n_samples, n_genes)
:param credible_interval:
:return:
"""
return az.hpd(arr, credible_interval=credible_interval)
def demultiply(arr1, arr2, factor=2):
"""
Suppose you have at disposal
arr1 ~ p(h|x_a)
arr2 ~ p(h|x_b)
Then artificially increase the sizes on respective arrays
so that you can sample from
p(f(h1, h2) | x_a, x_b) under the right assumptions
:param arr1:
:param arr2:
:param factor:
:return:
"""
assert arr1.shape == arr2.shape
n_original = len(arr1)
idx_1 = np.random.choice(n_original, size=n_original * factor, replace=True)
idx_2 = np.random.choice(n_original, size=n_original * factor, replace=True)
return arr1[idx_1], arr2[idx_2]
def predict_de_genes(posterior_probas: np.ndarray, desired_fdr: float):
"""
:param posterior_probas: Shape (n_samples, n_genes)
:param desired_fdr:
:return:
"""
assert posterior_probas.ndim == 1
sorted_genes = np.argsort(-posterior_probas)
sorted_pgs = posterior_probas[sorted_genes]
cumulative_fdr = (1.0 - sorted_pgs).cumsum() / (1.0 + np.arange(len(sorted_pgs)))
d = (cumulative_fdr <= desired_fdr).sum() - 1
pred_de_genes = sorted_genes[:d]
is_pred_de = np.zeros_like(cumulative_fdr).astype(bool)
is_pred_de[pred_de_genes] = True
return is_pred_de
``` |
{
"source": "jimmcgaw/gleaner",
"score": 2
} |
#### File: gleaner/blogit/models.py
```python
from __future__ import unicode_literals
from django.template.defaultfilters import slugify
from django.db import models
from django.db.models import permalink
class PublishedPostManager(models.Manager):
def get_queryset(self):
return super(PublishedPostManager, self).get_queryset().filter(is_published=True)
# Create your models here.
class Post(models.Model):
title = models.CharField(max_length=255, unique=True)
slug = models.SlugField(max_length=255, unique=True)
author = models.ForeignKey('auth.User', null=True, blank=True)
content = models.TextField()
is_published = models.BooleanField(default=False)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
objects = models.Manager()
published = PublishedPostManager()
def __unicode__(self):
return "%s" % self.title
@permalink
def get_absolute_url(self):
return ('view_blog_post', None, { 'slug': self.slug })
def save(self, *args, **kwargs):
if not self.id:
# Newly created object, so set slug
self.slug = slugify(self.title)
super(Post, self).save(*args, **kwargs)
```
#### File: gleaner/gleans/forms.py
```python
from django import forms
from .models import Crop
class CropForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
""" basically make all fieldnames bootstrappy """
super(CropForm, self).__init__(*args, **kwargs)
for field_name, value in self.fields.iteritems():
self.fields[field_name].widget.attrs['class'] = 'form-control'
class Meta:
model = Crop
fields = ['first_name', 'last_name', 'phone_number', 'contact_address1',
'contact_address2', 'contact_city', 'contact_state', 'contact_zip',
'crop_address1', 'crop_address2', 'crop_city', 'crop_state', 'crop_zip',
'crop_type', 'receipt_preference', 'crop_description']
```
#### File: gleaner/gleans/views.py
```python
from django.shortcuts import render
from django.contrib.auth.decorators import login_required
from .models import Crop
from .forms import CropForm
from blogit.models import Post
def login(request):
return render(request, 'login.html')
def index(request):
posts = Post.published.all().order_by('-created_at')[:5]
return render(request, 'index.html', locals())
@login_required
def user_home(request):
""" homepage for a user after they've logged in """
# crop = Crop.objects.get(user=request.user)
return render(request, 'user_home.html', locals())
@login_required
def crop_form(request):
crop, created = Crop.objects.get_or_create(user=request.user)
if request.method == 'POST':
crop_data = request.POST.copy()
crop_data['user'] = request.user
form = CropForm(crop_data)
else:
form = CropForm(instance=crop)
return render(request, 'crop_form.html', locals())
@login_required
def glean_list(request):
return render(request, 'glean_list.html')
``` |
{
"source": "jimmcslim/example-django",
"score": 2
} |
#### File: greet/migrations/0002_data.py
```python
from __future__ import annotations
from datetime import time
from django.db import migrations
def create_greetings(apps, schema_editor):
Greeting = apps.get_model("greet", "Greeting")
def create(
slug: str, salutation: str, start_time: str | None, end_time: str | None
) -> None:
Greeting(
slug=slug,
salutation=salutation,
start_time=time.fromisoformat(start_time) if start_time else None,
end_time=time.fromisoformat(end_time) if end_time else None,
).save()
create("hello", "Hello", None, None)
create("howareyou", "How are you", None, None)
create("goodmorning", "Good morning", "05:00:00", "11:59:59")
create("goodevening", "Good evening", "17:00:00", "20:59:59")
create("goodnight", "Good night", "21:00:00", "23:59:59")
class Migration(migrations.Migration):
dependencies = [
("greet", "0001_initial"),
]
operations = [
migrations.RunPython(create_greetings),
]
```
#### File: helloworld/person/models.py
```python
from django.db import models
# Note: This is distinct from the standard auth app's User model, for clarity.
class Person(models.Model):
slug = models.CharField(max_length=20, unique=True)
full_name = models.CharField(max_length=50)
def __str__(self):
return self.slug
```
#### File: example-django/helloworld/settings_base.py
```python
from __future__ import annotations
import os
from typing import Any
from helloworld.util.per_app_db_router import PerAppDBRouter
HELLOWORLD_MODE = os.environ.get("HELLOWORLD_MODE", "DEV")
SECRET_KEY = "DEV_SECURITY_KEY"
DEBUG = True
ALLOWED_HOSTS: list[str] = []
DEFAULT_AUTO_FIELD = "django.db.models.AutoField"
# Application definition
MIDDLEWARE = [
"django.middleware.common.CommonMiddleware",
]
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
"django.template.context_processors.debug",
"django.template.context_processors.request",
],
},
},
]
DATABASES: dict[str, Any] = {
# Django chokes if 'default' isn't present at all. But it can be set to an empty dict, which
# will be treated as a dummy db.
"default": {}
}
def set_up_database(db_name: str):
"""Set a service up to connect to a named db."""
# TODO: Consult HELLOWORLD_MODE to distinguish dev/staging/prod dbs.
DATABASES[db_name] = {
"ENGINE": "django.db.backends.sqlite3",
"NAME": f"{db_name}.sqlite3",
}
DATABASE_ROUTERS = [
PerAppDBRouter(
{
"contenttypes": "users",
"sessions": "users",
"auth": "users",
"admin": "users",
"person": "users",
"greet": "greetings",
"translate": "greetings",
}
)
]
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
WSGI_APPLICATION = "helloworld.wsgi.application"
```
#### File: helloworld/translate/models_test.py
```python
import pytest
from helloworld.greet.models import Greeting
from helloworld.translate.models import ( # noqa: F401 (so fk related name works).
Translation,
)
@pytest.mark.django_db
def test_database_is_seeded():
hello = Greeting.objects.get(slug="hello")
translations = {tr.lang: tr.translation for tr in hello.translation_set.all()}
assert {
"en": "Hello",
"es": "Hola",
"fr": "Allo",
"de": "Hallo",
} == translations
```
#### File: helloworld/translate/views.py
```python
from django.http import Http404, HttpResponse
from helloworld.translate.models import Translation
def index(request, slug: str, lang: str) -> HttpResponse:
try:
translation = Translation.objects.get(greeting__slug=slug, lang=lang)
return HttpResponse(translation.translation)
except Translation.DoesNotExist:
raise Http404(f"No translation in {lang} for {slug}")
``` |
{
"source": "jimme0421/NLPer-Arsenal",
"score": 3
} |
#### File: nlper/utils/datasets.py
```python
r"""
封装处理NLP各个任务的数据
"""
import torch
from torch.utils.data import Dataset
class DatasetCLF(Dataset):
def __init__(self,
data,
tokenizer,
max_len=512,
load_label=True,
**kwargs):
"""封装文本分类数据集,现在仅支持二分类和多分类,暂不支持多标签分类
:param data: 标准数据格式为List[List[str, int]],例如[[’今天天气很好‘, 1], ['我心情不好', 0]]
:param tokenizer: transformers.xxxTokenizer
:param max_len: 分词后的文本长度上限,default=512
:param load_label: 是否加载标签,default=True
:param model_type:
"""
self.tokenizer = tokenizer
self.max_len = max_len
self.data = data
self.load_label = load_label
def __len__(self):
return len(self.data)
def __getitem__(self, index):
text = self.data[index][0]
encode_inputs = self.tokenizer(text,
truncation=True,
max_length=self.max_len,
return_tensors='pt')
example = {}
for key, value in encode_inputs.items():
example[key] = value[0]
if self.load_label:
label = self.data[index][1]
example['labels'] = torch.tensor(label, dtype=torch.long)
return example
return example
```
#### File: codes/tricks/text_clf_handler.py
```python
r"""
文本分类任务控制器,通过text_clf_xxx.yaml快速配置,包含fit、test、eval_test等功能
"""
import importlib
import sys
import os
sys.path.append('..')
from typing import Union
import torch
from transformers import AutoTokenizer
from codes.nlper.utils import Dict2Obj, format_convert, download_dataset
from codes.nlper.modules.metrics import Metrics
from codes.nlper import mini_pytorch_lightning as mpl
# 根据数据集名称,查找数据转换函数,按标准数据格式读取数据
convert_dataset = {
'text_clf/smp2020-ewect-usual': format_convert.smp2020_ewect_convert,
'text_clf/smp2020-ewect-virus': format_convert.smp2020_ewect_convert
}
class TextCLFHandler():
def __init__(self, configs: Union[dict, Dict2Obj], specialModels=None):
self.configs = configs if isinstance(configs, Dict2Obj) else Dict2Obj(configs)
self.specialModels = specialModels
self._build_data()
self._build_metrics()
self._build_model()
self._build_trainer()
def _build_data(self):
# 如果三者同时不存在
if not (
os.path.isfile(self.configs.train_file)
or os.path.isfile(self.configs.val_file)
or os.path.isfile(self.configs.test_file)
):
# 自动下载数据集
is_over = download_dataset(self.configs.dataset_name, self.configs.dataset_cache_dir)
if not is_over:
print(f'please download dataset manually, and mask sure data file path is correct')
exit()
def _build_optimizer(self):
pass
def _build_metrics(self):
metrics_dict = {}
for metric_name in self.configs.metrics:
if metric_name.lower() == 'p':
from codes.nlper.modules.metrics import PMetric
metrics_dict['P'] = PMetric(average='macro')
elif metric_name.lower() == 'r':
from codes.nlper.modules.metrics import RMetric
metrics_dict['R'] = RMetric(average='macro')
elif metric_name.lower() == 'f1':
from codes.nlper.modules.metrics import F1Metric
metrics_dict['F1'] = F1Metric(average='macro')
self.metrics = Metrics(metrics_dict,
target_metric=self.configs.target_metric)
def _build_model(self):
self.tokenizer = AutoTokenizer.from_pretrained(self.configs.pretrained_model)
# 根据模型名自动加载在nlper.models.text_clf下的同名模型
module = importlib.import_module('codes.nlper.models')
model_name = self.configs.whole_model
if hasattr(module, model_name):
self.model = getattr(module, model_name)(self.configs)
else:
raise ValueError(
f'{model_name} not found in codes.nlper.models.text_clf'
)
# 对于非标准数据集,必须通过convert_fn转换
if self.configs.use_convert:
if self.configs.dataset_name in convert_dataset:
convert_fn = convert_dataset[self.configs.dataset_name]
else:
raise RuntimeError(
'use_convert is True, but convert function has not been found'
)
else:
convert_fn = None
if self.specialModels:
self.lightningCLF = self.specialModels.CLFModel(
self.model,
self.tokenizer,
self.configs,
metrics=self.metrics,
convert_fn=convert_fn)
else:
from codes.nlper.models import LightningCLF
self.lightningCLF = LightningCLF(self.model,
self.tokenizer,
self.configs,
metrics=self.metrics,
convert_fn=convert_fn)
def _build_trainer(self):
self._trainer = mpl.StandardTrainer(self.lightningCLF,
self.configs.trainer_args.gpus)
def fit(self, train_loader=None, val_loader=None):
self._trainer.fit(train_loader, val_loader, **self.configs.trainer_args.toDict())
def test(self, test_loader=None, load_best=True):
self._trainer.test(test_loader, load_best=load_best)
def eval_test(self, test_loader=None, checkpoint_path=''):
torch.cuda.empty_cache()
if test_loader:
self._trainer.eval(test_loader, checkpoint_path)
else:
self._trainer.eval(self._trainer.test_loader, checkpoint_path)
``` |
{
"source": "jimmeak/graveyard",
"score": 2
} |
#### File: graveyard/ddcz/context_processors.py
```python
from django.conf import settings
from django.contrib.staticfiles.storage import staticfiles_storage
from .forms.authentication import LoginForm
def common_variables(request):
skin = request.session.get("skin", "light")
skin_directory = skin if skin not in ["light", "dark"] else "light-dark"
deploy_info_html = settings.DEPLOY_VERSION
if settings.DEPLOY_HASH:
deploy_info_html = f'{deploy_info_html} (<a href="https://github.com/dracidoupe/graveyard/commit/{settings.DEPLOY_HASH}">{settings.DEPLOY_HASH}</a>)'
if settings.DEPLOY_DATE:
deploy_info_html = f"{deploy_info_html} ze dne {settings.DEPLOY_DATE}"
return {
"user": request.user,
"ddcz_profile": request.ddcz_profile,
"skin": skin,
"skin_for_include": skin_directory,
"current_page_url": request.get_full_path(),
"skin_css_url": staticfiles_storage.url(
"skins/%(skin)s/css/main.css" % {"skin": skin}
),
"skin_favico_url": staticfiles_storage.url(
"skins/%(skin)s/img/drak.ico" % {"skin": skin}
),
"skin_logo_url": staticfiles_storage.url("skins/%s/img/logo.svg" % skin),
"login_form": LoginForm(),
"discord_invite_link": settings.DISCORD_INVITE_LINK,
"bugfix_tavern_table_id": settings.BUGFIX_TAVERN_TABLE_ID,
"deploy_info_html": deploy_info_html,
}
```
#### File: management/commands/notify.py
```python
from django.core.management.base import BaseCommand
from ddcz.notifications import notify_scheduled, send_email_batch
class Command(BaseCommand):
help = "Send all scheduled notifications"
def handle(self, *args, **options):
notify_scheduled()
send_email_batch()
```
#### File: graveyard/ddcz/middleware.py
```python
def attach_profile(get_response):
def middleware(request):
if request.user.is_authenticated:
request.ddcz_profile = request.user.userprofile
else:
request.ddcz_profile = None
response = get_response(request)
return response
return middleware
```
#### File: ddcz/migrations/0112_emaillist_autofill.py
```python
from django.db import migrations
def fill_ids(apps, schema_editor):
CreationEmailSubscription = apps.get_model("ddcz", "CreationEmailSubscription")
i = 1
for relation in CreationEmailSubscription.objects.all():
# can't do instance.save since that would check for integrity of PK
CreationEmailSubscription.objects.filter(
user_profile_id=relation.user_profile_id,
creative_page_slug=relation.creative_page_slug,
).update(django_id=i)
i += 1
class Migration(migrations.Migration):
dependencies = [
("ddcz", "0111_creationemailsubscription_django_id"),
]
operations = [
migrations.RunPython(fill_ids),
]
```
#### File: ddcz/templatetags/dating.py
```python
from django import template
register = template.Library()
DATING_GROUP_MAP = {"hledam_pj": "Hledám PJe", "hledam_hrace": "Hledám hráče"}
@register.filter
def dating_group_map(key):
try:
return DATING_GROUP_MAP[key]
except KeyError:
return "Hledám"
```
#### File: ddcz/templatetags/tavern.py
```python
import logging
from django import template
from django.urls import reverse_lazy
from django.utils.html import format_html
from django.utils.safestring import mark_safe
from ..html import encode_valid_html
logger = logging.getLogger(__name__)
register = template.Library()
@register.simple_tag
def tavern_table_name(listing_table, user_profile):
show_link = listing_table.show_listing_link(user_profile)
return format_html(
'{}<span class="tavern-table-name{}" data-table-id="{}">{}</span>{} [{}{}]',
mark_safe(
f'<a href="{reverse_lazy("ddcz:tavern-posts", kwargs={"tavern_table_id": listing_table.pk})}">'
)
if show_link
else "",
" tavern_table_name__unread"
if listing_table.new_comments_no is not None
and listing_table.new_comments_no > 0
else "",
listing_table.pk,
mark_safe(encode_valid_html(listing_table.name)),
mark_safe("</a>") if show_link else "",
f"{listing_table.new_comments_no}/"
if listing_table.new_comments_no is not None
else "",
listing_table.posts_no,
)
```
#### File: tests/test_ui/cases.py
```python
from enum import Enum
import socket
from django.conf import settings
from django.contrib.staticfiles.testing import StaticLiveServerTestCase
from django.utils.decorators import classproperty
from selenium import webdriver
class MainPage(Enum):
BODY = "//body"
MAIN_TITLE = "//h1[contains(@class, 'page-heading')]"
LOGIN_USERNAME_INPUT = '//*[@id="id_nick"]'
LOGIN_PASSWORD_INPUT = '//*[@id="<PASSWORD>"]'
LOGIN_SUBMIT = '//*[@id="login_submit"]'
LOGOUT_SUBMIT = '//*[@id="logout_submit"]'
CONTROL_NICK = '//*[@id="ddcz_nick"]'
NAVIGATION_TAVERN = '//*[@id="ddcz_nav_tavern"]'
NAVIGATION_PHORUM = '//*[@id="ddcz_nav_phorum"]'
class SeleniumTestCase(StaticLiveServerTestCase):
# We are always connect to all interfaces
# to simplify the local and CI setup
host = "0.0.0.0"
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.main_page_nav = MainPage
@classproperty
def live_server_url(cls):
return "http://%s:%s" % (
getattr(settings, "TEST_LIVE_SERVER_HOST", None) or socket.gethostname(),
cls.server_thread.port,
)
@classmethod
def setUpClass(cls):
super().setUpClass()
if not settings.SELENIUM_HUB_HOST:
cls.selenium = webdriver.Chrome()
else:
cls.selenium = webdriver.Remote(
desired_capabilities=webdriver.DesiredCapabilities.CHROME,
command_executor="http://%s:4444/wd/hub" % settings.SELENIUM_HUB_HOST,
)
cls.selenium.implicitly_wait(settings.SELENIUM_IMPLICIT_WAIT)
@classmethod
def tearDownClass(cls):
cls.selenium.quit()
super().tearDownClass()
###
# Helper methods for navigating the specific DDCZ webpage
###
def navigate_as_authenticated_user(
self, user_profile, *, navigation_element, expected_title
):
already_correct = False
if self.is_logged_in():
nick = self.el(MainPage.CONTROL_NICK).text
if nick == user_profile.nick:
already_correct = True
else:
self.el(MainPage.LOGOUT_SUBMIT).submit()
if not already_correct:
self.el(MainPage.LOGIN_USERNAME_INPUT).send_keys(user_profile.user.username)
self.el(MainPage.LOGIN_PASSWORD_INPUT).send_keys(user_profile.user.email)
self.el(MainPage.LOGIN_SUBMIT).submit()
self.assertEquals(
user_profile.nick,
self.el(MainPage.CONTROL_NICK).text,
)
self.el(navigation_element).click()
self.assertEquals(
expected_title,
self.el(MainPage.MAIN_TITLE).text,
)
###
# Helper methods to retrieve information from the current page
###
def el(self, enum):
return self.selenium.find_element_by_xpath(enum.value)
def els(self, enum):
return self.selenium.find_elements_by_xpath(enum.value)
def is_logged_in(self):
return self.el(MainPage.BODY).get_attribute("data-logged-in") == "1"
```
#### File: ddcz/views/news.py
```python
from datetime import timedelta
import logging
from django.core.cache import cache
from django.core.paginator import Paginator
from django.shortcuts import render
from django.utils import timezone
from django.views.decorators.http import require_http_methods
from django.views.decorators.vary import vary_on_cookie
from ..creations import ApprovalChoices
from ..models import News, CreativePage, CreationComment
logger = logging.getLogger(__name__)
DEFAULT_LIST_SIZE = 10
NEWSFEED_OLDEST_ARTICLE_INTERVAL = timedelta(weeks=26)
NEWSFEED_MAX_CREATIONS = 20
NEWSFEED_MAX_COMMENTS = 10
NEWSFEED_CACHE_INTERVAL = 10 * 60 # 10 minutes
NEWSFEED_CACHE_KEY = "newsfeed:list"
@require_http_methods(["HEAD", "GET"])
@vary_on_cookie
def list(request):
page = request.GET.get("z_s", 1)
cache_key = "info:news:list"
news = None
if page == 1:
news = cache.get(cache_key)
if not news:
news_list = News.objects.order_by("-date")
paginator = Paginator(news_list, DEFAULT_LIST_SIZE)
news = paginator.get_page(page)
if page == 1:
cache.set(cache_key, news)
return render(request, "news/list.html", {"news": news})
@require_http_methods(["HEAD", "GET"])
@vary_on_cookie
def newsfeed(request):
cached = cache.get(NEWSFEED_CACHE_KEY)
if cached:
articles = cached["articles"]
comments = cached["comments"]
else:
min_date = timezone.now() - NEWSFEED_OLDEST_ARTICLE_INTERVAL
pages = CreativePage.get_all_models()
# This could have been a simple list comprehension. But for a reason unknown to me,
# list(queryset) returns 'QuerySet' object has no attribute 'method', whereas iterating
# over it works fine, as well as [a for a in queryset] list comprehension
# Maybe try out again once we upgrade to newest Django
articles = []
for page in pages:
model = page["model"]
query = model.objects.filter(
is_published=ApprovalChoices.APPROVED.value, published__gte=min_date
).order_by("-published")
if model.__name__ == "CommonArticle":
query = query.filter(creative_page_slug=page["page"].slug)
query = query[0:NEWSFEED_MAX_CREATIONS]
for creation in query:
creation.creative_page = page["page"]
articles.append(creation)
articles.sort(key=lambda article: article.published, reverse=True)
comments = CreationComment.objects.all().order_by("-date")[
0:NEWSFEED_MAX_COMMENTS
]
# FIXME: This should be resolvable via GenericRelation once we migrate to it
page_slug_map = {page["page"].slug: page for page in pages}
for comment in comments:
comment_model = page_slug_map[comment.foreign_table]["model"]
try:
comment.creation = comment_model.objects.get(pk=comment.foreign_id)
comment.creation.creative_page = page_slug_map[comment.foreign_table][
"page"
]
except comment_model.DoesNotExist:
logger.exception(
f"Can't look up creation for comment {comment.pk} for model {comment_model}"
)
cache.set(
NEWSFEED_CACHE_KEY,
{"articles": articles, "comments": comments},
timeout=NEWSFEED_CACHE_INTERVAL,
)
return render(
request, "news/newsfeed.html", {"articles": articles, "comments": comments}
)
``` |
{
"source": "JimmeeX/ur5_t2_4230",
"score": 3
} |
#### File: ur5_t2_4230/src/vision.py
```python
from geometry_msgs.msg import Point
from sensor_msgs.msg import (
CompressedImage,
Image
)
from ur5_t2_4230.srv import (
ObjectDetect,
ObjectDetectRequest,
ObjectDetectResponse
)
import rospy
import cv2
import time
import os
from cv_bridge import CvBridge, CvBridgeError
from utils.detect_object import detectObject
SLEEP_RATE = 3 # Hz
DIR_NAME = "images"
class Vision():
def __init__(self, *args, **kwargs):
rospy.loginfo("[Vision] Initialising Node")
self._rate = rospy.Rate(SLEEP_RATE)
self._bridge = CvBridge()
self._im = None # Latest Image
# Initialise Subscribers
self._subscribers = {}
self._subscribers['camera_color_image_raw'] = rospy.Subscriber('/camera/color/image_raw', Image, self.handleImageCallback, queue_size=1)
# Initialise Publishers
self._publishers = {}
self._publishers['vision_sent_image'] = rospy.Publisher('/vision/sent_image', CompressedImage, queue_size=1)
# Initialise Servers
self._servers = {}
self._servers['vision_detect_object'] = rospy.Service("/vision/detect_object", ObjectDetect, self.handleDetectObject)
"""
####################
SERVER-SIDE HANDLERS
####################
"""
def handleDetectObject(self, request):
# Short Delay to ensure that image is "up to date" (without this the y-coordinate will not be correct)
time.sleep(0.1) #
result, im_debug = detectObject(self._im)
color, shape, x, y, z = result
self.pubSentImage(im_debug)
response = ObjectDetectResponse(
success=True,
message='{} {} detected at [x={}, y={} z={}]'.format(color, shape, x, y, z),
color=color,
shape=shape,
location=Point(
x=x,
y=y,
z=z
)
)
return response
"""
##########################
CLASS SUBSCRIBER CALLBACKS
##########################
"""
def handleImageCallback(self, msg):
try:
self._im = self._bridge.imgmsg_to_cv2(msg)
except CvBridgeError as e:
rospy.logerr(e)
"""
################
HELPER FUNCTIONS
################
"""
def pubSentImage(self, im):
msg = self._bridge.cv2_to_compressed_imgmsg(im)
publisher = self._publishers['vision_sent_image']
publisher.publish(msg)
def saveImage(self):
fileName = str(int(time.time() * 1000)) + '.jpg'
filePath = os.path.join(DIR_NAME, fileName)
rospy.loginfo("[Vision] Saving: " + filePath)
im_bgr = cv2.cvtColor(self._im, cv2.COLOR_RGB2BGR)
cv2.imwrite(filePath, im_bgr)
if __name__ == '__main__':
rospy.init_node('vision')
Vision()
rospy.spin()
``` |
{
"source": "jimmellor/flask-volcasample-helper",
"score": 2
} |
#### File: flask-volcasample-helper/volcasamplehelper/app.py
```python
from flask import Flask, request, redirect, url_for, render_template, jsonify
import os
import shutil
import json
import glob
from flask_bootstrap import Bootstrap
from af import afplay, syroconvert, syroplay
app = Flask(__name__)
Bootstrap(app)
APP_ROOT = os.path.dirname(os.path.abspath(__file__))
UPLOAD_ROOT = "static/uploads"
def get_file(root):
"""
Return the first file in root
"""
if not os.path.isdir(root):
raise Exception("%s directory not found" % root)
files = []
for file in glob.glob("%s/*.*" % root):
fname = file.split(os.sep)[-1]
files.append(fname)
return os.path.join(root,files[0])
@app.route("/")
def index():
return render_template("index.html")
@app.route('/upload/<slot_id>', methods=["POST"])
def upload(slot_id):
"""
Upload a file
"""
form = request.form
# Is the upload using Ajax, or a direct POST by the form?
is_ajax = False
if form.get("__ajax", None) == "true":
is_ajax = True
# Target folder for these uploads.
target = os.path.join(APP_ROOT, UPLOAD_ROOT, slot_id)
if os.path.isdir(target):
shutil.rmtree(target)
os.mkdir(target)
for upload in request.files.getlist("file"):
filename = upload.filename.rsplit("/")[0]
destination = "/".join([target, filename])
upload.save(destination)
# convert the file to syro format
syroconvert(os.path.join(APP_ROOT, destination), slot_id)
return ajax_response(True, slot_id)
@app.route("/play/<slot_id>")
def play(slot_id):
"""
Returns the path of the sound file for the browser to play
"""
# Get the files.
web_root = os.path.join(UPLOAD_ROOT, slot_id)
root = os.path.join(APP_ROOT, web_root)
files = []
for file in glob.glob("%s/*.*" % root):
fname = file.split(os.sep)[-1]
files.append(fname)
filename = os.path.join(web_root,files[0])
return ajax_response(True, filename)
@app.route("/syroupload/<slot_id>")
def syroupload(slot_id):
"""
Play the syro-encoded file via the server audio jack
"""
# Get the files.
filename = get_file(os.path.join(APP_ROOT, UPLOAD_ROOT, slot_id))
syroplay(filename)
return ajax_response(True, filename)
@app.route("/status/<slot_id>")
def status(slot_id):
"""
Check if there's a file in the slot
"""
try:
filename = get_file(os.path.join(APP_ROOT, UPLOAD_ROOT, slot_id))
return ajax_response(True, filename)
except Exception, e:
return ajax_response(False, slot_id)
@app.context_processor
def utility_processor():
def get_status(slot_id):
try:
get_file(os.path.join(APP_ROOT, UPLOAD_ROOT, str(slot_id)))
return "ready"
except Exception, e:
return "empty"
return dict(get_status=get_status)
def ajax_response(status, msg):
status_code = 200 if status else 404
message = {
'status': status_code,
'msg': msg,
}
resp = jsonify(message)
resp.status_code = status_code
return resp
``` |
{
"source": "jimmelville/nornir",
"score": 2
} |
#### File: nornir/nornir/init_nornir.py
```python
from typing import Any, Callable, Dict
from nornir.core import Nornir
from nornir.core.connections import Connections
from nornir.core.deserializer.configuration import Config
from nornir.core.state import GlobalState
from nornir.plugins.connections.napalm import Napalm
from nornir.plugins.connections.netmiko import Netmiko
from nornir.plugins.connections.paramiko import Paramiko
def register_default_connection_plugins() -> None:
Connections.register("napalm", Napalm)
Connections.register("netmiko", Netmiko)
Connections.register("paramiko", Paramiko)
def cls_to_string(cls: Callable[..., Any]) -> str:
return f"{cls.__module__}.{cls.__name__}"
def InitNornir(
config_file: str = "",
dry_run: bool = False,
configure_logging: bool = True,
**kwargs: Dict[str, Any],
) -> Nornir:
"""
Arguments:
config_file(str): Path to the configuration file (optional)
dry_run(bool): Whether to simulate changes or not
**kwargs: Extra information to pass to the
:obj:`nornir.core.configuration.Config` object
Returns:
:obj:`nornir.core.Nornir`: fully instantiated and configured
"""
register_default_connection_plugins()
if callable(kwargs.get("inventory", {}).get("plugin", "")):
kwargs["inventory"]["plugin"] = cls_to_string(kwargs["inventory"]["plugin"])
if callable(kwargs.get("inventory", {}).get("transform_function", "")):
kwargs["inventory"]["transform_function"] = cls_to_string(
kwargs["inventory"]["transform_function"]
)
conf = Config.load_from_file(config_file, **kwargs)
data = GlobalState(dry_run=dry_run)
if configure_logging:
conf.logging.configure()
inv = conf.inventory.plugin.deserialize(
transform_function=conf.inventory.transform_function,
transform_function_options=conf.inventory.transform_function_options,
config=conf,
**conf.inventory.options,
)
return Nornir(inventory=inv, config=conf, data=data)
``` |
{
"source": "Jimmers2001/QR-Bar_Workshop",
"score": 4
} |
#### File: Jimmers2001/QR-Bar_Workshop/Barcode_creation.py
```python
from barcode import EAN13 # a type of format
from barcode.writer import ImageWriter #to save the barcode as a png file
def barcode_create(file_name, digits):
with open(file_name, 'wb') as f: #write in binary mode
#numbers written in barcode where the last one is automatically generated
EAN13(digits, writer=ImageWriter()).write(f)
if __name__ == "__main__":
#Command to run: python3 barcode_creation.py
file_name = "workshop_barcode.png"
digits = "346297563925697"
barcode_create(file_name, digits)
``` |
{
"source": "jimmerson17/podcast-downloader",
"score": 3
} |
#### File: jimmerson17/podcast-downloader/multixmldownloader.py
```python
import sys, os
import zipfile
import requests
from multiprocessing import Pool, cpu_count
from functools import partial
from io import BytesIO
import xml.etree.ElementTree as ET
class PodcastListing(object):
def __init__(self, name, url):
self._name = name
self._url = url
@property
def name(self):
return self._name
@name.setter
def name(self, n):
self._name = n
@property
def url(self):
return self._url
@url.setter
def url(self, u):
self._url = u
def download_zip(url, filePath):
try:
print("starting to get {}".format(url.name))
file_name = '{}.xml'.format(url.name)
response = requests.get(url.url)
with open(file_name, 'wb') as foutput:
foutput.write(response.content)
print("done getting {}".format(url.name))
except Exception as e:
print(e)
if __name__ == "__main__":
filePath = os.path.dirname(os.path.abspath(__file__))
print(filePath)
if (len(sys.argv) > 1):
rootXml = ET.parse('{}/{}'.format(filePath, sys.argv[1]))
else:
rootXml = ET.parse('{}/input.xml'.format(filePath))
root = rootXml.getroot()
podcastList = []
for outline in root.iter('outline'):
podcastList.append(PodcastListing(outline.attrib['text'], outline.attrib['xmlUrl']))
print("filePath is %s " % filePath)
# sys.path.append(filePath) # why do you need this?
print("There are {} CPUs on this machine ".format(cpu_count()))
pool = Pool(cpu_count())
download_func = partial(download_zip, filePath = filePath)
results = pool.map(download_func, podcastList)
pool.close()
pool.join()
print("k done")
``` |
{
"source": "jimmeycool/ASCII_Webcam",
"score": 4
} |
#### File: jimmeycool/ASCII_Webcam/ascii_webcam.py
```python
import os
import argparse
from .webcam import Webcam, run
parser = argparse.ArgumentParser(description='''
ASCII Console Webcam
Converts the terminal into a webcam display using only
ASCII charaters. You can specify the dimensions, which
camera to use, and how to clear the window.
''')
parser.add_argument('-w',
'--width',
type=int,
default=80,
help="The width in charaters of the screen")
parser.add_argument('-H',
'--height',
type=int,
default=30,
help="The height in lines of the screen")
parser.add_argument('-c',
'--clear',
type=str,
default=None,
help="Override the clear console command to specified")
parser.add_argument(
'-d',
'--deviceid',
type=str,
default=0,
help="Camera id to use, 0 is system default, rest are other cameras")
args = parser.parse_args()
def _get_clear() -> str:
"""
Gets the clear command for the printer
Returns:
str: clear command
"""
return args.clear or 'cls' if os.name == 'nt' else 'clear'
def main():
"""
Process the args and run the webcam
"""
with Webcam(args.deviceid).start() as cam:
run(cam, args.width, args.height, _get_clear())
if __name__ == '__main__':
main()
```
#### File: ASCII_Webcam/webcam/ascii_driver.py
```python
import os
import sys
from typing import Iterable
import cv2
import numpy as np
from webcam.webcam import Webcam
ASCII_MAP = " .'`^\",:;Il!i><~+_-?][}{1)(|\\/tfjrxnuvczXYUJCLQ0OZmwqpdbkhao*#MW&8%B@$"
"""
A list of the range of ascii brightness values, starting from
0 brightness to full brightness
"""
def _create_ascii(numbers: Iterable[Iterable[int]]) -> str:
"""
Converts the list of numbers into a string of ascii
chars.
Args:
numbers (Iterable[Iterable[int]]): 2D array of numbers to convert
Returns:
str: String of ascii chars
"""
return os.linesep.join(
[''.join([ASCII_MAP[num] for num in row]) for row in numbers])
def print_ascii(numbers: Iterable[Iterable[int]], clear: str) -> None:
"""
Converts the 2D arrray of
Args:
numbers (Iterable[Iterable[int]]): 2D array of numbers to convert
clear (str): The clear command for clearing the screen
"""
os.system(clear)
sys.stdout.flush()
sys.stdout.write(_create_ascii(numbers))
def run(cam: Webcam, width: int, height: int, clear: str):
"""
Runs the ASCII webcam
Args:
cam ([type]): Connection to webcam
width (int): Width of screen
height (int): Height of screen
clear (str): The clear command for clearing the screen
"""
while True:
grayscale = cam.grey_frame(width, height)
# init ASCII array with normalized grey img
normalized = cv2.normalize(grayscale, np.zeros((width, height)), 0,
len(ASCII_MAP) - 1, cv2.NORM_MINMAX)
print_ascii(normalized, clear)
sys.stdout.write(
f'{os.linesep}{os.linesep}Ctrl+C to stop...{os.linesep}')
if cv2.waitKey(1) & 0xFF == ord('q'):
break
``` |
{
"source": "jim-meyer/tensorflow-onnx",
"score": 3
} |
#### File: tensorflow-onnx/tf2onnx/convert.py
```python
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import tensorflow as tf
from tf2onnx.graph import GraphUtil
from tf2onnx.tfonnx import process_tf_graph, tf_optimize
from . import constants, loader, logging, utils
# pylint: disable=unused-argument
def get_args():
"""Parse commandline."""
parser = argparse.ArgumentParser()
parser.add_argument("--input", help="input from graphdef")
parser.add_argument("--graphdef", help="input from graphdef")
parser.add_argument("--saved-model", help="input from saved model")
parser.add_argument("--checkpoint", help="input from checkpoint")
parser.add_argument("--output", help="output model file")
parser.add_argument("--inputs", help="model input_names")
parser.add_argument("--outputs", help="model output_names")
parser.add_argument("--opset", type=int, default=None, help="opset version to use for onnx domain")
parser.add_argument("--custom-ops", help="list of custom ops")
parser.add_argument("--extra_opset", default=None,
help="extra opset with format like domain:version, e.g. com.microsoft:1")
parser.add_argument("--target", default=",".join(constants.DEFAULT_TARGET), choices=constants.POSSIBLE_TARGETS,
help="target platform")
parser.add_argument("--continue_on_error", help="continue_on_error", action="store_true")
parser.add_argument("--verbose", "-v", help="verbose output, option is additive", action="count")
parser.add_argument("--debug", help="debug mode", action="store_true")
parser.add_argument("--fold_const", help="enable tf constant_folding transformation before conversion",
action="store_true")
# experimental
parser.add_argument("--inputs-as-nchw", help="transpose inputs as from nhwc to nchw")
# depreciated, going to be removed some time in the future
parser.add_argument("--unknown-dim", type=int, default=-1, help="default for unknown dimensions")
args = parser.parse_args()
args.shape_override = None
if args.input:
# for backward compativility
args.graphdef = args.input
if args.graphdef or args.checkpoint:
if not args.input and not args.outputs:
raise ValueError("graphdef and checkpoint models need to provide inputs and outputs")
if not any([args.graphdef, args.checkpoint, args.saved_model]):
raise ValueError("need input as graphdef, checkpoint or saved_model")
if args.inputs:
args.inputs, args.shape_override = utils.split_nodename_and_shape(args.inputs)
if args.outputs:
args.outputs = args.outputs.split(",")
if args.inputs_as_nchw:
args.inputs_as_nchw = args.inputs_as_nchw.split(",")
if args.target:
args.target = args.target.split(",")
if args.extra_opset:
tokens = args.extra_opset.split(':')
if len(tokens) != 2:
raise ValueError("invalid extra_opset argument")
args.extra_opset = [utils.make_opsetid(tokens[0], int(tokens[1]))]
return args
def default_custom_op_handler(ctx, node, name, args):
node.domain = constants.TENSORFLOW_OPSET.domain
return node
def main():
args = get_args()
logging.basicConfig(level=logging.get_verbosity_level(args.verbose))
if args.debug:
utils.set_debug_mode(True)
# override unknown dimensions from -1 to 1 (aka batchsize 1) since not every runtime does
# support unknown dimensions.
utils.ONNX_UNKNOWN_DIMENSION = args.unknown_dim
extra_opset = args.extra_opset or []
custom_ops = {}
if args.custom_ops:
# default custom ops for tensorflow-onnx are in the "tf" namespace
custom_ops = {op: (default_custom_op_handler, []) for op in args.custom_ops.split(",")}
extra_opset.append(constants.TENSORFLOW_OPSET)
# get the frozen tensorflow model from graphdef, checkpoint or saved_model.
if args.graphdef:
graph_def, inputs, outputs = loader.from_graphdef(args.graphdef, args.inputs, args.outputs)
model_path = args.graphdef
if args.checkpoint:
graph_def, inputs, outputs = loader.from_checkpoint(args.checkpoint, args.inputs, args.outputs)
model_path = args.checkpoint
if args.saved_model:
graph_def, inputs, outputs = loader.from_saved_model(args.saved_model, args.inputs, args.outputs)
model_path = args.saved_model
# todo: consider to enable const folding by default?
graph_def = tf_optimize(inputs, outputs, graph_def, args.fold_const)
with tf.Graph().as_default() as tf_graph:
tf.import_graph_def(graph_def, name='')
with tf.Session(graph=tf_graph):
g = process_tf_graph(tf_graph,
continue_on_error=args.continue_on_error,
target=args.target,
opset=args.opset,
custom_op_handlers=custom_ops,
extra_opset=extra_opset,
shape_override=args.shape_override,
input_names=inputs,
output_names=outputs,
inputs_as_nchw=args.inputs_as_nchw)
model_proto = g.make_model("converted from {}".format(model_path))
new_model_proto = GraphUtil.optimize_model_proto(model_proto)
if new_model_proto:
model_proto = new_model_proto
else:
print("NON-CRITICAL, optimizers are not applied successfully")
# write onnx graph
if args.output:
utils.save_protobuf(args.output, model_proto)
print("\nComplete successfully, the onnx model is generated at " + args.output)
if __name__ == "__main__":
main()
```
#### File: tf2onnx/optimizer/__init__.py
```python
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import sys
import traceback
from collections import OrderedDict
from tf2onnx.optimizer.const_fold_optimizer import ConstFoldOptimizer
from tf2onnx.optimizer.identity_optimizer import IdentityOptimizer
from tf2onnx.optimizer.merge_duplicated_nodes_optimizer import MergeDuplicatedNodesOptimizer
from tf2onnx.optimizer.transpose_optimizer import TransposeOptimizer
# pylint: disable=missing-docstring, broad-except
# optimizer sequence need to be considered carefully
_optimizers = OrderedDict([
("transpose_opt", TransposeOptimizer),
("fold_const", ConstFoldOptimizer),
# merge_duplicated_nodes should be used after transpose_opt
# for transpose_opt may have some trans nodes that can be merge
("merge_duplicated_nodes", MergeDuplicatedNodesOptimizer),
("identity_opt", IdentityOptimizer),
])
def optimize_graph(graph):
try:
opts = _get_optimizers()
for opt in opts.values():
graph = opt().optimize(graph)
graph.update_proto()
return graph
except Exception:
# degradation to non-optimized model proto
type_, value_, traceback_ = sys.exc_info()
ex_ext = traceback.format_exception(type_, value_, traceback_)
print("NON-CRITICAL error in optimizer: ", ex_ext)
return None
def _get_optimizers():
return _optimizers
```
#### File: tensorflow-onnx/tf2onnx/schemas.py
```python
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from collections import defaultdict, OrderedDict
from onnx import defs
from . import constants
class OnnxOpSchema(object):
"""Wrapper for Onnx schema."""
def __init__(self, name, domain, since_version, attributes):
"""Create a Onnx schema
Args:
name (str): op name
attributes (List[str]): valid attributes
domain (str): default value "" means it's Onnx domain
since_version (int): opset version, default is 1
"""
self._name = name
self._domain = domain
self._attributes = attributes
self._since_version = since_version
@property
def attributes(self):
return self._attributes
@property
def domain(self):
return self._domain
@property
def name(self):
return self._name
@property
def since_version(self):
return self._since_version
@staticmethod
def from_onnx_schema(onnx_schema):
name = onnx_schema.name
domain = onnx_schema.domain
since_version = int(onnx_schema.since_version)
attributes = onnx_schema.attributes
return OnnxOpSchema(name, domain, since_version, attributes)
def has_attribute(self, attr):
return attr in self.attributes
def _register_all_schemas_with_history():
"""Register all schemas with history"""
onnx_schemas = defs.get_all_schemas_with_history()
name_domain_version_schema_map = defaultdict(lambda: defaultdict(dict))
for s in onnx_schemas:
schema = OnnxOpSchema.from_onnx_schema(s)
name_domain_version_schema_map[schema.name][schema.domain][schema.since_version] = schema
ordered_map = defaultdict(lambda: defaultdict(OrderedDict))
for name, domain_version_schema_map in name_domain_version_schema_map.items():
for domain, version_schema_map in domain_version_schema_map.items():
ordered_map[name][domain] = OrderedDict(
sorted(version_schema_map.items(), key=lambda x: -x[0])
)
return ordered_map
def _parse_domain_opset_versions(schemas):
""" Get max opset version among all schemas within each domain. """
domain_opset_versions = dict()
for domain_version_schema_map in schemas.values():
for domain, version_schema_map in domain_version_schema_map.items():
# version_schema_map is sorted by since_version in descend order
max_version = next(iter(version_schema_map))
if domain not in domain_opset_versions:
domain_opset_versions[domain] = int(max_version)
else:
domain_opset_versions[domain] = max(domain_opset_versions[domain], int(max_version))
return domain_opset_versions
# format is <OpName, <Domain, <SinceVersion, OpSchema>>>
# SinceVersion is sorted from high to low
_schemas = _register_all_schemas_with_history()
_domain_opset_versions = _parse_domain_opset_versions(_schemas)
def get_schema(name, max_inclusive_opset_version, domain=None):
"""Get schema by name within specific version."""
domain = domain or constants.ONNX_DOMAIN
domain_version_schema_map = _schemas[name]
version_schema_map = domain_version_schema_map[domain]
for version, schema in version_schema_map.items():
if version <= max_inclusive_opset_version:
return schema
return None
def get_max_supported_opset_version(domain=None):
"""Get max supported opset version by current onnx package given a domain."""
domain = domain or constants.ONNX_DOMAIN
return _domain_opset_versions.get(domain, None)
``` |
{
"source": "jim-meyer/tensorflow",
"score": 2
} |
#### File: python/framework/convert_to_constants_test.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from tensorflow.python import keras
from tensorflow.python.client import session as session_lib
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import convert_to_constants
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import rnn
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.saved_model import simple_save
from tensorflow.python.saved_model.load import load
from tensorflow.python.saved_model.save import save
from tensorflow.python.training.tracking import tracking
from tensorflow.python.util import nest
class VariablesToConstantsTest(test.TestCase):
def _hasStatefulPartitionedCallOp(self, graph_def):
"""Determines if a StatefulPartitionedCall op exists in the graph."""
for node in graph_def.node:
if node.op == "StatefulPartitionedCall":
return True
return False
def _getNumVariables(self, graph_def):
"""Returns the number of ReadVariableOp in the graph."""
return sum(node.op == "ReadVariableOp" for node in graph_def.node)
def _testConvertedFunction(self, obj, func, converted_concrete_func,
input_data):
# Check that the converted ConcreteFunction produces the same result as the
# original Function.
expected_value = nest.flatten(func(**input_data))
actual_value = nest.flatten(converted_concrete_func(**input_data))
for expected, actual in zip(expected_value, actual_value):
np.testing.assert_almost_equal(expected.numpy(), actual.numpy())
# Ensure the shape is retained.
for tensor in converted_concrete_func.inputs:
actual_shape = input_data[tensor.name.split(":")[0]].shape
self.assertEqual(tensor.shape, actual_shape)
# Save the converted ConcreteFunction as a signature.
save_dir = os.path.join(self.get_temp_dir(), "frozen_saved_model")
root = tracking.AutoTrackable()
root.f = converted_concrete_func
save(root, save_dir, {"mykey": converted_concrete_func})
# Load it back and make sure it works.
loaded_obj = load(save_dir)
actual_value = nest.flatten(loaded_obj.signatures["mykey"](**input_data))
for expected, actual in zip(expected_value, actual_value):
np.testing.assert_almost_equal(expected.numpy(), actual.numpy())
@test_util.run_v2_only
def testConstSavedModel(self):
"""Test a basic model with functions to make sure functions are inlined."""
input_data = {"x": constant_op.constant(1., shape=[1])}
root = tracking.AutoTrackable()
root.f = def_function.function(lambda x: 2. * x)
to_save = root.f.get_concrete_function(input_data["x"])
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
save(root, save_dir, to_save)
saved_model = load(save_dir)
input_func = saved_model.signatures["serving_default"]
variable_graph_def = input_func.graph.as_graph_def()
self.assertEqual(0, self._getNumVariables(variable_graph_def))
self.assertTrue(variable_graph_def.library.function)
output_func = convert_to_constants.convert_variables_to_constants_v2(
input_func)
constant_graph_def = output_func.graph.as_graph_def()
self.assertEqual(0, self._getNumVariables(constant_graph_def))
self.assertFalse(constant_graph_def.library.function)
self._testConvertedFunction(root, root.f, output_func, input_data)
@test_util.run_v2_only
def testVariableModel(self):
"""Test a basic model with Variables."""
input_data = {"x": constant_op.constant(1., shape=[1])}
root = tracking.AutoTrackable()
root.v1 = variables.Variable(3.)
root.v2 = variables.Variable(2.)
root.f = def_function.function(lambda x: root.v1 * root.v2 * x)
input_func = root.f.get_concrete_function(input_data["x"])
variable_graph_def = input_func.graph.as_graph_def()
self.assertEqual(2, self._getNumVariables(variable_graph_def))
output_func = convert_to_constants.convert_variables_to_constants_v2(
input_func)
constant_graph_def = output_func.graph.as_graph_def()
self.assertEqual(0, self._getNumVariables(constant_graph_def))
self.assertFalse(self._hasStatefulPartitionedCallOp(constant_graph_def))
self._testConvertedFunction(root, root.f, output_func, input_data)
@test_util.run_v2_only
def testScalarModel(self):
"""Test a basic model with Variables."""
input_data = {"x": constant_op.constant(1., shape=[])}
root = tracking.AutoTrackable()
root.v1 = variables.Variable(3.)
root.v2 = variables.Variable(2.)
root.f = def_function.function(lambda x: root.v1 * root.v2 * x)
input_func = root.f.get_concrete_function(input_data["x"])
variable_graph_def = input_func.graph.as_graph_def()
self.assertEqual(2, self._getNumVariables(variable_graph_def))
output_func = convert_to_constants.convert_variables_to_constants_v2(
input_func)
constant_graph_def = output_func.graph.as_graph_def()
self.assertEqual(0, self._getNumVariables(constant_graph_def))
self.assertFalse(self._hasStatefulPartitionedCallOp(constant_graph_def))
self._testConvertedFunction(root, root.f, output_func, input_data)
@test_util.run_v2_only
def testVariableSavedModel(self):
"""Test a basic model with Variables with saving/loading the SavedModel."""
input_data = {"x": constant_op.constant(1., shape=[1])}
root = tracking.AutoTrackable()
root.v1 = variables.Variable(3.)
root.v2 = variables.Variable(2.)
root.f = def_function.function(lambda x: root.v1 * root.v2 * x)
to_save = root.f.get_concrete_function(input_data["x"])
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
save(root, save_dir, to_save)
saved_model = load(save_dir)
input_func = saved_model.signatures["serving_default"]
variable_graph_def = input_func.graph.as_graph_def()
self.assertTrue(self._hasStatefulPartitionedCallOp(variable_graph_def))
output_func = convert_to_constants.convert_variables_to_constants_v2(
input_func)
constant_graph_def = output_func.graph.as_graph_def()
self.assertEqual(0, self._getNumVariables(constant_graph_def))
self.assertFalse(self._hasStatefulPartitionedCallOp(constant_graph_def))
self._testConvertedFunction(root, root.f, output_func, input_data)
@test_util.run_v2_only
def testMultiFunctionModel(self):
"""Test a basic model with Variables."""
class BasicModel(tracking.AutoTrackable):
def __init__(self):
self.y = None
self.z = None
@def_function.function
def add(self, x):
if self.y is None:
self.y = variables.Variable(2.)
return x + self.y
@def_function.function
def sub(self, x):
if self.z is None:
self.z = variables.Variable(3.)
return x - self.z
input_data = {"x": constant_op.constant(1., shape=[1])}
root = BasicModel()
input_func = root.add.get_concrete_function(input_data["x"])
variable_graph_def = input_func.graph.as_graph_def()
self.assertEqual(1, self._getNumVariables(variable_graph_def))
output_func = convert_to_constants.convert_variables_to_constants_v2(
input_func)
constant_graph_def = output_func.graph.as_graph_def()
self.assertEqual(0, self._getNumVariables(constant_graph_def))
self.assertFalse(self._hasStatefulPartitionedCallOp(constant_graph_def))
self._testConvertedFunction(root, root.add, output_func, input_data)
@test_util.run_v2_only
def testKerasModel(self):
input_data = constant_op.constant(1., shape=[1, 1])
# Create a simple Keras model.
x = [-1, 0, 1, 2, 3, 4]
y = [-3, -1, 1, 3, 5, 7]
model = keras.models.Sequential(
[keras.layers.Dense(units=1, input_shape=[1])])
model.compile(optimizer="sgd", loss="mean_squared_error")
model.fit(x, y, epochs=1)
# Get the concrete function from the Keras model.
@def_function.function
def to_save(x):
return model(x)
input_func = to_save.get_concrete_function(input_data)
variable_graph_def = input_func.graph.as_graph_def()
self.assertEqual(2, self._getNumVariables(variable_graph_def))
output_func = convert_to_constants.convert_variables_to_constants_v2(
input_func)
constant_graph_def = output_func.graph.as_graph_def()
self.assertEqual(0, self._getNumVariables(constant_graph_def))
self.assertFalse(self._hasStatefulPartitionedCallOp(constant_graph_def))
# Check value.
expected_value = to_save(input_data)
actual_value = nest.flatten(output_func(input_data))
self.assertEqual(expected_value.numpy(), actual_value)
def _singleMetaGraphSavedModel(self):
export_graph = ops.Graph()
with export_graph.as_default():
start = array_ops.placeholder(
shape=[1, 1], dtype=dtypes.float32, name="start")
distractor = variables.RefVariable(-1., name="distractor")
v = variables.RefVariable(3., name="v")
local_variable = variables.VariableV1(
1.,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
trainable=False,
use_resource=True)
output = array_ops.identity(start * v * local_variable, name="output")
with session_lib.Session() as session:
session.run([v.initializer, distractor.initializer,
local_variable.initializer])
path = os.path.join(self.get_temp_dir(), "saved_model", str(ops.uid()))
simple_save.simple_save(
session,
path,
inputs={"start": start},
outputs={"output": output},
legacy_init_op=local_variable.initializer)
return path
@test_util.run_v2_only
def testRefVariableImport(self):
saved = self._singleMetaGraphSavedModel()
imported = load(saved)
fn = imported.signatures["serving_default"]
output_func = convert_to_constants.convert_variables_to_constants_v2(fn)
constant_graph_def = output_func.graph.as_graph_def()
self.assertEqual(0, self._getNumVariables(constant_graph_def))
self.assertFalse(self._hasStatefulPartitionedCallOp(constant_graph_def))
input_data = {"start": constant_op.constant(1., shape=[1, 1])}
root = tracking.AutoTrackable()
self._testConvertedFunction(root, fn, output_func, input_data)
@test_util.run_v2_only
def testControlFlow(self):
input_data = {
"x": constant_op.constant([1., 2.], shape=[1, 2]),
"b": constant_op.constant(True)
}
weights = variables.Variable([[0.1, 0.2], [0.3, 0.4]], dtype=dtypes.float32)
def true_fn(x):
return math_ops.matmul(x, weights)
def false_fn(x):
return math_ops.add(x, weights)
@def_function.function(input_signature=[
tensor_spec.TensorSpec(shape=[1, 2], dtype=dtypes.float32),
tensor_spec.TensorSpec(shape=(), dtype=dtypes.bool)
])
def model(x, b):
return control_flow_ops.cond(
b, true_fn=lambda: true_fn(x), false_fn=lambda: false_fn(x))
root = tracking.AutoTrackable()
root.f = model
input_func = root.f.get_concrete_function()
input_func(**input_data)
output_func = convert_to_constants.convert_variables_to_constants_v2(
input_func, lower_control_flow=False)
constant_graph_def = output_func.graph.as_graph_def()
self.assertEqual(0, self._getNumVariables(constant_graph_def))
self.assertFalse(self._hasStatefulPartitionedCallOp(constant_graph_def))
self._testConvertedFunction(root, root.f, output_func, input_data)
@test_util.run_v2_only
def testStaticRnn(self):
input_data = {
"x":
constant_op.constant(
np.array(np.random.random_sample((3, 10)), dtype=np.float32))
}
cell = rnn_cell_impl.LSTMCell(10)
@def_function.function(input_signature=[
tensor_spec.TensorSpec(shape=[3, 10], dtype=dtypes.float32)
])
def model(x):
seq = array_ops.split(x, 3, 0)
return rnn.static_rnn(
cell, seq, dtype=dtypes.float32, sequence_length=[1])
root = tracking.AutoTrackable()
root.f = model
input_func = root.f.get_concrete_function()
output_func = convert_to_constants.convert_variables_to_constants_v2(
input_func, lower_control_flow=False)
constant_graph_def = output_func.graph.as_graph_def()
self.assertEqual(0, self._getNumVariables(constant_graph_def))
self.assertFalse(self._hasStatefulPartitionedCallOp(constant_graph_def))
self._testConvertedFunction(root, root.f, output_func, input_data)
if __name__ == "__main__":
test.main()
``` |
{
"source": "jimmg35/Sensor_Crawling",
"score": 3
} |
#### File: jimmg35/Sensor_Crawling/minuteData.py
```python
import requests
import sys
import json
from typing import List, Dict
import pandas as pd
# import main functionality
from src.dbcontext import Dbcontext, Storer
from src.utils import UrlBundler, Key
from src.requester import Requester
from src.parser import Parser
# projects = ['528','671','672','673','674','675',
# '677','678','680','709','756','1024','1025',
# '1027','1029','1032','1034','1035','1036',
# '1048','1058','1071','1072','1075','1079',
# '1084','1085','1102','1120','1145','1147',
# '1162','1167','1184','1189','1192','1207']
projects = ['1024']
item = ['voc', 'pm2_5', 'humidity', 'temperature']
class TimeStamper():
def build(self, chunk):
return chunk[0] + "-" + chunk[1] + "-" + chunk[2] + " " + chunk[3] + ":" + chunk[4] + ":" + chunk[5]
def build_minute(self, chunk):
return chunk[0] + "-" + chunk[1] + "-" + chunk[2] + " " + chunk[3] + ":" + chunk[4]
def parse(self, stamp):
date = stamp.split(' ')[0]
time = stamp.split(' ')[1]
year = date.split('-')[0]
month = date.split('-')[1]
day = date.split('-')[2]
hour = time.split(":")[0]
minute = time.split(":")[1]
second = time.split(":")[2]
return [year,month,day,hour,minute,second]
def parse_minute(self, stamp):
date = stamp.split(' ')[0]
time = stamp.split(' ')[1]
year = date.split('-')[0]
month = date.split('-')[1]
day = date.split('-')[2]
hour = time.split(":")[0]
minute = time.split(":")[1]
return [year,month,day,hour,minute]
start_month = 1
if __name__ == "__main__":
# initializer timer
ts = TimeStamper()
# initialize basic object.
myKey = Key()
myBundler = UrlBundler()
myReq = Requester(myBundler, myKey)
# initialize dbcontext
myDBcontext = Dbcontext({"user":str(sys.argv[1]),
"password":str(<PASSWORD>[2]),
"host":str(sys.argv[3]),
"port":str(sys.argv[4])}, "sensordata")
myStorage = Storer(myDBcontext)
for project in projects[0:1]:
data = myDBcontext.queryMinuteMetadata(project)
for device in data: #[0:1]
# build the time stamp
start = ts.build(["2021", "01", "01", "00", "00", "00.000"])
end = ts.build(["2021", "01", "31", "23", "59", "00.000"])
compare = ts.build_minute(["2021", "01", "31", "23", "59"])
# request for data of a device in a time interval
data = myReq.getMinuteDataOfProject_interval_device(device[0], device[1], start, end, compare, ts)
if data == None: # if request failed
continue
else: # if request seccess
data_s, date, time, deviceid = Parser.parseMinuteData(data, ts)
myDBcontext.ImportMinuteData(deviceid, data_s, date, time, project, start_month)
print("-- {} import complete".format(device))
print("--- {} project complete".format(project))
print("=======================================")
```
#### File: Sensor_Crawling/src/dbcontext.py
```python
import datetime
import schedule
import psycopg2
from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT
class Storer():
"""
Storing processed data from Parser.
"""
data_list = []
storage = {}
def __init__(self, dbcontext):
self.dbcontext = dbcontext
def insert(self, data, name: str):
self.storage[name] = data
self.data_list.append(name)
def import2Database(self, item: str, y=None, sm=None, em=None):
if item == "ProjectData" and self.importGate(item):
self.dbcontext.ImportProjectMeta(self.storage[item])
if item == "DeviceMeta" and self.importGate(item):
self.dbcontext.ImportDeviceMeta(self.storage[item])
if item == "SensorMeta" and self.importGate(item):
self.dbcontext.ImportSensorMeta(self.storage[item])
if item == "FixedData" and self.importGate(item):
self.dbcontext.ImportFixedSensorData(self.storage[item], y, sm, em)
def importGate(self, item):
if self.data_list.index(item) != -1:
return True
else:
print("Data is not accessible!")
return False
class Dbcontext():
"""
Importing data into database.
"""
def __init__(self, PGSQL_user_data, database):
# PostgreSQL server variable.
self.PGSQL_user_data = PGSQL_user_data
# Connect to local Postgresql server.
self.cursor = self.ConnectToDatabase(database)
def ConnectToDatabase(self, database):
"""
Connect to PostgreSQL database.
"""
conn = psycopg2.connect(database=database,
user=self.PGSQL_user_data["user"],
password=self.PGSQL_user_data["password"],
host=self.PGSQL_user_data["host"],
port=self.PGSQL_user_data["port"])
conn.autocommit = True
conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
print(f'Successfully connected to local PostgreSQL server| User: @{self.PGSQL_user_data["user"]}')
print(f' Currently connected to database: @{database}')
cursor = conn.cursor()
return cursor
def ImportProjectMeta(self, projectMeta):
"""
Import porject metadata into database.
"""
for projID in list(projectMeta.keys()):
keys_arr: str = "'{"
for index, i in enumerate(projectMeta[projID]["keys"]):
if index == (len(projectMeta[projID]["keys"])-1):
keys_arr += '"' + str(i) + '"' + "}'"
break
keys_arr += '"' + str(i) + '"' + ','
query = '''INSERT INTO projectmeta (projectid, projectname, projectkeys)
VALUES({}, \'{}\', {});'''.format(str(projID),
projectMeta[projID]["name"],
keys_arr)
self.cursor.execute(query)
print("Project Metadata has been stored into database!")
def ImportDeviceMeta(self, deviceMeta):
"""
Import device meta into database.
"""
column_str = "("
query = "select column_name from information_schema.columns where table_name = 'devicemeta';"
self.cursor.execute(query)
column = [i[0] for i in self.cursor.fetchall()]
for index, i in enumerate(column):
if index == (len(column)-1):
column_str += i + ")"
break
column_str += i + ","
for index, i in enumerate(deviceMeta):
values = self.bulidDeviceMetaQuery(i, index)
query = "INSERT INTO devicemeta " + column_str + values
self.cursor.execute(query)
print("Device Metadata has been stored into database!")
def ImportSensorMeta(self, SensorMeta):
"""
Import metadata of sensor of each device into database.
"""
ids = 1
for device in SensorMeta:
sensor_id = "'{"
for index, i in enumerate(device[2]):
if index == (len(device[2])-1):
sensor_id += '"' + str(i) + '"' + "}'"
break
sensor_id += '"' + str(i) + '"' + ','
query = '''INSERT INTO sensormeta (id, deviceid, projectkey, sensor_id)
VALUES({}, \'{}\', \'{}\', {});'''.format(ids, device[0], device[1], sensor_id)
self.cursor.execute(query)
ids += 1
print("Sensor Metadata has been stored into database!")
def bulidDeviceMetaQuery(self, device, count):
"""
Helper function of ImportDeviceMeta(),
for handling exception.
"""
output = " VALUES(" + str(count) + "," + device["id"] + ","
for index, i in enumerate(list(device.keys())):
if index == (len(list(device.keys())) - 1):
output += "'" + str(device[i]) + "')"
break
if i == "id":
continue
if str(device[i]) == "旗山區公所前'":
output += "'" + "旗山區公所前" + "',"
continue
output += "'" + str(device[i]) + "',"
return output
def queryDeviceSensorMeta_fixed(self):
"""
query specific metadata from database.
"""
query = '''SELECT projectid, projectkey, deviceid, sensor_id FROM sensormeta INNER JOIN projectmeta ON sensormeta.projectkey = ANY(projectmeta.projectkeys) WHERE projectid IN ('528','671','672','673','674',
'675','677','678','680','709','756','1024','1025','1027','1029','1032','1034','1035','1036','1048',
'1058','1071','1072','1075','1079','1084','1085','1102','1120','1145','1147','1162','1167','1184','1189','1192','1207');'''
self.cursor.execute(query)
return self.cursor.fetchall()
def ImportFixedSensorData(self, FixedSensorData, year, start_m, end_m):
print("=================== Import into database ===================")
table_dict = {"1": "minute", "60": "hour"}
for interval in list(FixedSensorData.keys()):
for projectid in list(FixedSensorData[interval].keys()):
# get biggest id in that table
table_name = table_dict[interval] + "_" + projectid + "_" + str(year) + "_" + str(int(start_m)) + "to" + str(int(end_m)+1)
print(table_name)
if self.getBiggestId(table_name) == None:
id_for_proj = 1
else:
id_for_proj = self.getBiggestId(table_name) + 1
# insert data into table
for a_row in FixedSensorData[interval][projectid]:
try:
query = '''INSERT INTO {} (id, deviceid,
voc_avg, voc_max, voc_min, voc_median,
pm2_5_avg, pm2_5_max, pm2_5_min, pm2_5_median,
humidity_avg, humidity_max, humidity_min, humidity_median,
temperature_avg, temperature_max, temperature_min, temperature_median,
year, month, day, hour, minute, second, time)
VALUES({},\'{}\',{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},\'{}\');
'''.format(table_name, id_for_proj, a_row[0], a_row[1], a_row[2], a_row[3], a_row[4],a_row[5], a_row[6], a_row[7], a_row[8],
a_row[9], a_row[10], a_row[11], a_row[12],a_row[13], a_row[14], a_row[15], a_row[16], a_row[17], a_row[18], a_row[19],
a_row[20],a_row[21], a_row[22], a_row[23])
self.cursor.execute(query)
id_for_proj += 1
except:
print("insert exception at -> interval:{} projectid:{} ".format(interval, projectid))
print("insert complete -> {}".format(table_name))
def getBiggestId(self, table_name):
query = '''SELECT max(id) FROM {};'''.format(table_name)
self.cursor.execute(query)
return self.cursor.fetchall()[0][0]
def queryDeviceSensorMeta_spacial(self):
query = '''SELECT projectid, projectkey, deviceid FROM devicemeta WHERE projectid IN ('1156', '565', '624', '891');'''
self.cursor.execute(query)
return self.cursor.fetchall()
def queryMinuteMetadata(self, project):
query = '''SELECT deviceid, projectkey FROM sensormeta INNER JOIN
projectmeta ON sensormeta.projectkey =
ANY(projectmeta.projectkeys) WHERE projectid = '{}';'''.format(project)
self.cursor.execute(query)
data = self.cursor.fetchall()
return [[i[0], i[1]]for i in data]
def ImportMinuteData(self, deviceid, data, date, time, project, start_month):
""" 將時間區段內的一台感測器資料輸入至資料庫 """
table_name = "minute_{}_{}to{}".format(project, start_month, start_month+1)
if self.getBiggestId(table_name) == None:
ids = 1
else:
ids = self.getBiggestId(table_name) + 1
for i in range(0, len(deviceid)):
query = '''INSERT INTO {} (id, deviceid, voc, pm2_5, humidity, temperature, date, hour, minute, second)
VALUES({}, \'{}\', \'{}\', \'{}\', \'{}\', \'{}\', \'{}\', \'{}\', \'{}\', \'{}\');
'''.format(table_name, ids, deviceid[i], data[i]["voc"], data[i]["pm2_5"],
data[i]["humidity"], data[i]["temperature"],
date[i], time[i][0], time[i][1], time[i][2])
self.cursor.execute(query)
ids += 1
def launchPatch(self):
queries = ['''DELETE FROM devicemeta WHERE projectid
NOT IN ('528','671','672','673','674',
'675','677','678','680','709',
'756','1024','1025','1027','1029',
'1032','1034','1035','1036','1048',
'1058','1071','1072','1075','1079',
'1084','1085','1102','1120','1145',
'1147','1162','1167','1184','1189',
'1192','1207','1156','565','624','891');''']
for index, i in enumerate(queries):
print("Patch {} has been applied to database!".format(index))
self.cursor.execute(i)
``` |
{
"source": "jimmg35/Sensor_Crawling_v2",
"score": 3
} |
#### File: dbcontext/Module/construct.py
```python
import time
import hmac
import base64
import datetime
import schedule
import psycopg2
from time import mktime
from hashlib import sha1
from pprint import pprint
from requests import request
from datetime import datetime
from wsgiref.handlers import format_date_time
from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT
class Constructor():
def __init__(self, DB_list, DB_detail, PGSQLDetail):
# Databases need to be constructed
self.DB_list = DB_list
# Information of Databases and Tables
self.DB_detail = DB_detail
# PostgreSQL server variable
self.user = PGSQLDetail['user']
self.password = PGSQLDetail['password']
self.host = PGSQLDetail['host']
self.port = PGSQLDetail['port']
# Connect to PostgreSQL
self.cursor = self.ConnectToPGSQL(self.user, self.password, self.host, self.port)
# Kill query
self.Kill = '''SELECT pg_terminate_backend(pid) FROM pg_stat_activity WHERE pid <> pg_backend_pid() AND datname = 'template1';'''
def ConnectToPGSQL(self, user, password, host, port):
'''Connect to PostgreSQL'''
conn = psycopg2.connect(user=user,password=password,host=host,port=port)
conn.autocommit = True
conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
print(f'Successfully connected | User:{user}')
cursor = conn.cursor()
return cursor
def ConnectToDatabase(self, database, user, password, host, port):
'''Connect to Database'''
conn = psycopg2.connect(database = database, user=user,password=password,host=host,port=port)
conn.autocommit = True
conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
print(f'Successfully connected | User:{user}')
cursor = conn.cursor()
return cursor
def constructDatabases(self):
'''Create Databases'''
print("Initializing Databases...")
self.cursor.execute(self.Kill)
for DB in self.DB_list:
self.cursor.execute("SELECT 1 FROM pg_catalog.pg_database WHERE datname = \'{}\'".format(DB))
exists = self.cursor.fetchone()
if exists == None:
# Create Database
self.cursor.execute("CREATE DATABASE {}".format(DB))
print("Database {} has been created!".format(DB))
else:
print("Database {} is already existed!".format(DB))
def constructTables(self):
'''Iterate through each database and create tables'''
for DB in self.DB_detail.keys():
temp_cursor = self.ConnectToDatabase(DB,self.user,self.password,self.host,self.port)
for table in self.DB_detail[DB].keys():
query = self.TableBuilder(DB, table)
temp_cursor.execute(query)
print("Table {} has been created in {}".format(table, DB))
def TableBuilder(self, DB, table):
'''Helper function of constructTable function'''
query_head = '''CREATE TABLE {} '''.format(table)
query_head += self.DB_detail[DB][table]
#print(query_head)
return query_head
``` |
{
"source": "jimmi2051/Lab_Erp",
"score": 2
} |
#### File: Lab_Erp/website/cart.py
```python
from django.http import HttpResponse
from cart.cart import Cart
from website.models import *
import json
def add(request, id_product):
# can kiem tra dang nhap
#.........................
if 'user' not in request.session:
return HttpResponse('Vui lòng đăng nhập để thêm sản phẩm')
cart = Cart(request.session)
product = Product.objects.get(id=id_product)
if product.type_product == False:
id_origin_product = Link_Type.objects.get(product_id_id=id_product).parent_product
else:
return HttpResponse('Vui lòng lựa chọn phiên bản!')
post = Post_Product.objects.filter(product_id_id=id_origin_product, is_lock=False, is_activity=True)
if post.exists():
availability = post[0].quantity - post[0].bought
price = (product.price * (100 - product.discount_percent))/100
if product not in cart:
if product.account_created_id == request.session.get('user')['id']:
return HttpResponse('Bạn không được phép mua sản phẩm này')
cart.add(product, price=int(round(price, 0)))
return HttpResponse(1)
for x in cart.items:
if x.product.id == product.id:
if x.quantity + 1 > availability:
return HttpResponse('Sản phẩm không đủ số lượng!')
else:
cart.add(product, price=int(round(price, 0)))
return HttpResponse(1)
else:
return HttpResponse('Sản phẩm không hợp lễ!')
def sub(request, id_product):
# can kiem tra dang nhap
#.........................
if 'user' not in request.session:
return HttpResponse('Vui lòng đăng nhập để thêm sản phẩm')
cart = Cart(request.session)
product = Product.objects.get(id=id_product)
if product.type_product == False:
id_origin_product = Link_Type.objects.get(product_id_id=id_product).parent_product
else:
return HttpResponse('Vui lòng lựa chọn phiên bản!')
post = Post_Product.objects.filter(product_id_id=id_origin_product, is_lock=False, is_activity=True)
if post.exists():
if product not in cart:
return HttpResponse("Sản phẩm không tồn tại trong giỏ hàng!")
for x in cart.items:
if x.product.id == product.id:
qty = x.quantity - 1
if qty <= 0:
cart.remove(product)
return HttpResponse(1)
else:
cart.set_quantity(product, quantity=qty)
return HttpResponse(1)
else:
return HttpResponse('Sản phẩm không hợp lệ!')
def add_qty(request, id_product, qty):
# can kiem tra dang nhap
#.........................
if 'user' not in request.session:
return HttpResponse(-4)
cart = Cart(request.session)
product = Product.objects.get(id=id_product)
if product.type_product == False:
id_origin_product = Link_Type.objects.get(product_id_id=id_product).parent_product
else:
return HttpResponse(-2)
post = Post_Product.objects.filter(product_id_id=id_origin_product, is_lock=False, is_activity=True)
if post.exists():
availability = post[0].quantity - post[0].bought
price = (product.price * (100 - product.discount_percent))/100
if qty <= 0 or qty > availability:
return HttpResponse(-1)
if product not in cart:
if product.account_created_id == request.session.get('user')['id']:
return HttpResponse(-3)
cart.add(product, quantity=qty, price=int(round(price, 0)))
return HttpResponse("Thêm thành công")
for x in cart.items:
if x.product.id == product.id:
if x.quantity + qty > availability:
return HttpResponse(-1)
else:
cart.set_quantity(product, quantity=(x.quantity + qty))
return HttpResponse("Thêm thành công")
else:
return HttpResponse('Sản phẩm không hợp lễ!')
def set_qty(request, id_product, qty):
# can kiem tra dang nhap
#.........................
if 'user' not in request.session:
return HttpResponse('Vui lòng đăng nhập để thêm sản phẩm')
cart = Cart(request.session)
product = Product.objects.get(id=id_product)
if product.type_product == False:
id_origin_product = Link_Type.objects.get(product_id_id=id_product).parent_product
else:
id_origin_product = id_product
post = Post_Product.objects.filter(product_id_id=id_origin_product, is_lock=False, is_activity=True)
if post.exists():
availability = post[0].quantity - post[0].bought
if product not in cart:
return HttpResponse("Sản phẩm không tồn tại trong giỏ hàng!")
if qty <= 0 or qty > availability:
return HttpResponse('Số lượng không hợp lệ!')
cart.set_quantity(product, quantity=qty)
return HttpResponse(1)
else:
return HttpResponse('Sản phẩm không hợp lệ')
def remove(request, id_product):
# can kiem tra dang nhap
#.........................
if 'user' not in request.session:
return HttpResponse('Vui lòng đăng nhập để thêm sản phẩm')
cart = Cart(request.session)
product = Product.objects.get(pk=id_product)
cart.remove(product)
return HttpResponse("Đã xóa!")
def show(request):
# can kiem tra dang nhap
#.........................
if 'user' not in request.session:
return HttpResponse('Vui lòng đăng nhập để thêm sản phẩm')
cart = Cart(request.session)
for item in cart.items:
print(item.quantity)
return HttpResponse(cart.items)
def count(request):
# can kiem tra dang nhap
#.........................
if 'user' not in request.session:
return HttpResponse('Vui lòng đăng nhập để thêm sản phẩm')
cart = Cart(request.session)
return HttpResponse(cart.count)
def clear(request):
# can kiem tra dang nhap
#.........................
if 'user' not in request.session:
return HttpResponse('Vui lòng đăng nhập để thêm sản phẩm')
cart = Cart(request.session)
cart.clear()
#messages.success(request, message='Danh sách sản phẩm đã được xóa!', extra_tags='alert')
return HttpResponse("Da xoa")
```
#### File: Lab_Erp/website/models.py
```python
from django.db import models
# Create your models here.
class Account(models.Model):
username = models.CharField(max_length=200, unique=True)
email = models.EmailField(unique=True)
password = models.CharField(max_length=200)
name = models.CharField(max_length=100)
birthday = models.DateField(null=True)
sex = models.BooleanField(default=False)
phone = models.CharField(max_length=12, null=True)
id_card = models.CharField(max_length=15, null=True)
address = models.CharField(max_length=200, null=True)
name_shop = models.CharField(max_length=200, null=True)
activity_account = models.BooleanField(default=False)
activity_merchant = models.BooleanField(default=False)
activity_advertiser = models.BooleanField(default=False)
q_post = models.IntegerField(default=0)
q_vip = models.IntegerField(default=0)
code_act_account = models.CharField(max_length=60)
code_act_merchant = models.CharField(max_length=60)
code_act_ads = models.CharField(max_length=60)
token_ghtk = models.CharField(max_length=100, null=True)
is_admin = models.BooleanField(default=False)
created = models.DateTimeField(auto_now=True)
is_lock = models.BooleanField(default=False)
def __str__(self):
return self.email
class Account_Service(models.Model):
account = models.ForeignKey('Account', on_delete=models.CASCADE)
service = models.ForeignKey('Service', on_delete=models.CASCADE)
remain = models.IntegerField(default=0)
class Product (models.Model):
name = models.CharField(max_length=200)
detail = models.TextField(max_length=2000, null=True)
origin = models.CharField(max_length=200)
type_product = models.BooleanField()
price = models.IntegerField()
discount_percent = models.IntegerField(default=0)
code = models.CharField(max_length=200)
is_activity = models.BooleanField(default=True)
archive = models.BooleanField(default=False)
account_created = models.ForeignKey('Account', on_delete=models.CASCADE)
archive_at = models.DateTimeField(null=True)
def __str__(self):
return self.name
class Category (models.Model):
name_category = models.CharField(max_length=200)
quantity = models.IntegerField(default=0)
is_active = models.BooleanField(default=True)
def __str__(self):
return self.name_category
class Product_Category(models.Model):
product_id = models.ForeignKey('Product', on_delete=models.CASCADE)
category_id = models.ForeignKey('Category', on_delete=models.CASCADE)
archive = models.BooleanField(default=False)
archive_at = models.DateTimeField(null=True)
lock = models.BooleanField(default=False)
class Attribute (models.Model):
label = models.CharField(max_length=200)
is_active = models.BooleanField(default=True)
def __str__(self):
return self.label
class Product_Attribute (models.Model):
product_id = models.ForeignKey('Product', on_delete=models.CASCADE)
attribute_id = models.ForeignKey('Attribute', on_delete=models.CASCADE)
value = models.CharField(max_length=200)
archive = models.BooleanField(default=False)
archive_at = models.DateTimeField(null=True)
lock = models.BooleanField(default=False)
class Product_Image(models.Model):
product_id = models.ForeignKey('Product', on_delete=models.CASCADE)
image_id = models.ForeignKey('Image', on_delete=models.CASCADE)
archive = models.BooleanField(default=False)
archive_at = models.DateTimeField(null=True)
class Image (models.Model):
image_link = models.ImageField(upload_to='merchant/product/')
is_default = models.BooleanField(default=False)
user_id = models.ForeignKey('Account', on_delete=models.CASCADE)
class Link_Type(models.Model):
product_id = models.ForeignKey('Product', on_delete=models.CASCADE)
parent_product = models.IntegerField()
class Tag(models.Model):
tag_key = models.CharField(max_length=200)
tag_value = models.CharField(max_length=200)
tag_type = models.IntegerField()
see = models.IntegerField(default=0)
archive = models.BooleanField(default=False)
archive_at = models.DateTimeField(null=True)
class Post_Product (models.Model):
product_id = models.ForeignKey('Product', on_delete=models.CASCADE)
post_type = models.ForeignKey('Service', on_delete=models.CASCADE)
creator_id = models.ForeignKey('Account', on_delete=models.CASCADE)
quantity = models.IntegerField()
expire = models.DateTimeField()
visable_vip = models.BooleanField()
created = models.DateTimeField(auto_now=True)
is_activity = models.BooleanField(default=True)
views = models.IntegerField(default=0)
is_lock = models.BooleanField(default=False)
bought = models.IntegerField(default=0)
# archive = models.BooleanField(default=False)
class Rating (models.Model):
customer = models.ForeignKey('Account', on_delete=models.CASCADE, related_name='Customer')
merchant = models.ForeignKey('Account', on_delete=models.CASCADE, related_name='Merchant')
num_of_star = models.IntegerField()
comment = models.CharField(max_length=2000, null=True)
confirm_bought = models.BooleanField()
is_activity = models.BooleanField(default=True)
def __str__(self):
return self.customer.name
class Rating_Customer (models.Model):
customer = models.ForeignKey('Account', on_delete=models.CASCADE, related_name='cus')
merchant = models.ForeignKey('Account', on_delete=models.CASCADE, related_name='mer')
num_of_star = models.IntegerField()
confirm_bought = models.BooleanField()
is_activity = models.BooleanField(default=True)
def __str__(self):
return self.customer.name
class Order (models.Model):
customer = models.ForeignKey('Account', on_delete=models.CASCADE)
amount = models.IntegerField()
name = models.CharField(max_length=200)
email = models.CharField(max_length=200)
address = models.CharField(max_length=200)
phone = models.CharField(max_length=12)
note = models.CharField(max_length=200, null=True)
CHOICES_STATE = (('1', 'Thành công'), ('0', 'Hủy bỏ'), ('2', 'Đặt hàng'))
state = models.CharField(max_length=1, choices=CHOICES_STATE)
manner = models.BooleanField(default=True) # payment by COD or paypal
is_paid = models.BooleanField()
is_activity = models.BooleanField()
created = models.DateTimeField(auto_now=True)
archive = models.BooleanField()
canceler_id = models.IntegerField(null=True)
class Order_Detail (models.Model):
order = models.ForeignKey('Order', on_delete=models.CASCADE)
product = models.ForeignKey('Product', on_delete=models.CASCADE)
post = models.ForeignKey('Post_Product', on_delete=models.CASCADE)
merchant = models.ForeignKey('Account', on_delete=models.CASCADE)
quantity = models.IntegerField()
price = models.IntegerField()
discount = models.IntegerField()
CHOICES_STATE = (('1', 'Thành công'), ('0', 'Hủy bỏ'), ('2', 'Đặt hàng'), ('3', 'Đang gói hàng'), ('4', 'Đang vận chuyển'))
state = models.CharField(max_length=1, choices=CHOICES_STATE)
confirm_of_merchant = models.BooleanField()
canceler_id = models.IntegerField(null=True)
is_seen =models.BooleanField(default=False)
# class Email_Template(models.Model):
# name_template = models.CharField(max_length=100)
# type_template = models.IntegerField()
# content = models.TextField()
# state = models.BooleanField(default=True)
class Service(models.Model):
service_name = models.CharField(max_length=200)
amount = models.IntegerField()
value = models.IntegerField()
quantity_product = models.IntegerField()
created = models.DateTimeField(auto_now=True)
day_limit = models.IntegerField()
visable_vip = models.BooleanField()
is_active = models.BooleanField(default=True)
archive = models.BooleanField(default=False)
creator_id = models.IntegerField()
canceler_id = models.IntegerField(null=True)
def __str__(self):
return self.service_name
class Purchase_Service(models.Model):
purchase_name = models.CharField(max_length=200)
merchant_id = models.ForeignKey('Account', on_delete=models.CASCADE)
service_id = models.ForeignKey('Service', on_delete=models.CASCADE)
amount = models.FloatField(null=True, blank=True, default=None)
state = models.IntegerField()
success_at = models.DateTimeField(auto_now=True)
is_active = models.BooleanField(default=True)
archive = models.BooleanField(default=False)
class Service_Ads(models.Model):
service_name = models.CharField(max_length=200)
position = models.CharField(max_length=200)
amount = models.IntegerField()
created = models.DateTimeField(auto_now=True)
day_limit = models.IntegerField()
is_active = models.BooleanField(default=True)
archive = models.BooleanField(default=False)
creator_id = models.IntegerField()
canceler_id = models.IntegerField(null=True)
class Service_Ads_Post(models.Model):
service_name = models.CharField(max_length=200)
purchase_service_id = models.ForeignKey('Purchase_Service_Ads', on_delete=models.CASCADE)
customer_id = models.ForeignKey('Account',on_delete=models.CASCADE)
image_1 = models.CharField(max_length=200)
image_1_url = models.CharField(max_length=200)
image_1_content = models.CharField(max_length=200)
image_2 = models.CharField(max_length=200,blank=True,null=True)
image_2_url = models.CharField(max_length=200,blank=True,null=True)
image_2_content = models.CharField(max_length=200,blank=True,null=True)
image_3 = models.CharField(max_length=200,blank=True,null=True)
image_3_url = models.CharField(max_length=200,blank=True,null=True)
image_3_content = models.CharField(max_length=200,blank=True,null=True)
CHOICES_STATE = (('1','IsPosting'),('2','IsConfirm'),('0','Cancel'))
state = models.CharField(max_length=1, choices=CHOICES_STATE)
class Purchase_Service_Ads(models.Model):
purchase_name = models.CharField(max_length=200)
merchant_id = models.ForeignKey('Account', on_delete=models.CASCADE)
service_ads_id = models.ForeignKey('Service_Ads',on_delete=models.CASCADE)
amount = models.IntegerField()
CHOICES_STATE = (('1', 'Success'), ('0', 'Cancel'), ('2','IsPosted'), ('3','IsConfirmed'),('4', 'IsActiving'),('5','Expired'))
state = models.CharField(max_length=1, choices=CHOICES_STATE)
date_start = models.DateTimeField(blank=True,null=True)
success_at = models.DateTimeField(auto_now=True)
is_active = models.BooleanField(default=True)
archive = models.BooleanField(default=False)
#Hau mai khach hang
class Account_Gift(models.Model):
account = models.ForeignKey('Account', on_delete=models.CASCADE)
is_10 = models.BooleanField(default=False)
is_50 = models.BooleanField(default=False)
is_100 = models.BooleanField(default=False)
is_daily_7 = models.BooleanField(default=False)
is_daily_15 = models.BooleanField(default=False)
```
#### File: Lab_Erp/website/views.py
```python
import random
from django.shortcuts import render, redirect
from .models import *
import random
from random import randint
from django.db.models import Sum
# Create your views here.
from django.http import HttpResponse
from passlib.hash import pbkdf2_sha256
from sender import Mail, Message
from django.core.exceptions import ObjectDoesNotExist
from django.contrib import messages
from cart.cart import Cart
def check_session(request):
if 'user' in request.session:
return 1
return 0
def role_user_session(account):
# account = Account.objects.get(email=email)
role = []
if account.is_admin:
role.append(0)
if account.activity_account:
role.append(1)
if account.activity_merchant:
role.append(2)
if account.activity_advertiser:
role.append(3)
return role
def index (request):
return render(request, 'website/index.html')
def login (request):
if check_session(request):
messages.warning(request, message='Lỗi! Bạn đã đăng nhập hệ thống!', extra_tags='alert')
return redirect('/')
if request.method == 'POST':
email = request.POST.get('inputEmail')
password = request.POST.get('inputPassword')
try:
account = Account.objects.get(email=email)
if pbkdf2_sha256.verify(password, account.password):
if account.activity_account == True:
if account.activity_merchant == True or account.activity_advertiser == True or account.is_admin == True:
messages.warning(request, message='Tài khoản không tồn tại!', extra_tags='alert')
return redirect('/login')
request.session['user'] = {
'id': account.id,
'email': account.email,
'role': role_user_session(account),
}
messages.success(request, message='Đăng nhập thành công!', extra_tags='alert')
return redirect('/customer/profile')
else:
messages.warning(request, message='Vui lòng xác nhận email!', extra_tags='alert')
return redirect('/login')
messages.warning(request, message='Mật khẩu không đúng!', extra_tags='alert')
return redirect('/login')
except ObjectDoesNotExist:
messages.warning(request, message='Email không tồn tại!', extra_tags='alert')
return redirect('/login')
return
return render(request, 'website/login.html')
def logout (request):
if check_session(request):
del request.session['user']
messages.success(request, message='Đăng xuất thành công!', extra_tags='alert')
return redirect('/')
else:
messages.warning(request, message='Lỗi! Bạn chưa đăng nhập', extra_tags='alert')
return redirect('/')
def register (request):
if check_session(request):
messages.warning(request, message='Lỗi! Bạn đã đăng nhập vào hệ thống', extra_tags='alert')
return redirect('/')
return render(request, 'website/register.html')
def activity_account(request, email, code):
try:
account = Account.objects.get(email=email)
if account.activity_account == 1:
messages.warning(request, message='Lỗi! Tài khoản của bạn đã kích hoạt', extra_tags='alert')
return redirect('/')
else:
if account.code_act_account == code:
account.activity_account = True
account.save()
request.session['user'] = {
'id': account.id,
'email': account.email,
'role': role_user_session(account),
}
messages.success(request, message='Tài khoản của bạn đã kích hoạt!', extra_tags='alert')
return redirect('/')
else:
messages.warning(request, message='Lỗi! Mã code không hợp lệ!!', extra_tags='alert')
return redirect('/')
except ObjectDoesNotExist:
messages.warning(request, message='Lỗi! Tài khoản kích hoạt không tồn tại!', extra_tags='alert')
return redirect('/')
return
def activity_merchant(request, email, code):
try:
account = Account.objects.get(email=email)
if account.activity_merchant == 1:
messages.warning(request, message='Lỗi! Tài khoản của bạn đã kích hoạt bán hàng!', extra_tags='alert')
return redirect('/')
else:
if account.code_act_merchant == code:
account.activity_merchant = True
account.activity_account = True
account.save()
print(role_user_session(account))
request.session['user'] = {
'id': account.id,
'email': account.email,
'role': role_user_session(account),
}
service_all = Service.objects.all()
for item in service_all:
Account_Service.objects.create(
account=account,
service=item,
remain=0,
)
admin = Account.objects.filter(is_admin=True).first()
Rating.objects.create(customer=admin, merchant=account, num_of_star=3, confirm_bought=False, is_activity=True)
messages.success(request, message='Tài khoản của bạn đã kích hoạt!', extra_tags='alert')
return redirect('/')
else:
messages.warning(request, message='Lỗi! Mã code không hợp lệ!!', extra_tags='alert')
return redirect('/')
except ObjectDoesNotExist:
messages.warning(request, message='Lỗi! Tài khoản kích hoạt không tồn tại!', extra_tags='alert')
return redirect('/')
return
def activity_ad(request, email, code):
try:
account = Account.objects.get(email=email)
if account.activity_advertiser == 1:
messages.warning(request, message='Lỗi! Tài khoản của bạn đã kích hoạt quảng cáo!', extra_tags='alert')
return redirect('/')
else:
if account.code_act_ads == code:
account.activity_advertiser = True
account.activity_account = True
account.save()
print(role_user_session(account))
request.session['user'] = {
'id': account.id,
'email': account.email,
'role': role_user_session(account),
}
messages.success(request, message='Tài khoản của bạn đã kích hoạt!', extra_tags='alert')
return redirect('/')
else:
messages.warning(request, message='Lỗi! Mã code không hợp lệ!!', extra_tags='alert')
return redirect('/')
except ObjectDoesNotExist:
messages.warning(request, message='Lỗi! Tài khoản kích hoạt không tồn tại!', extra_tags='alert')
return redirect('/')
return
def request_new_password(request, email, code):
if Account.objects.filter(email=email).exists() == False:
messages.warning(request, message='Lỗi! Tài khoản không hợp lệ!', extra_tags='alert')
return redirect('/')
account = Account.objects.get(email=email)
if account.code_act_account == code:
account.activity_account = True
account.save()
request.session['user'] = {
'id': account.id,
'email': account.email,
'role': role_user_session(account),
}
return redirect('/customer/profile')
else:
messages.warning(request, message='Lỗi! Mã code không hợp lệ!!', extra_tags='alert')
return redirect('/')
# def request_merchant(request):
# if check_session(request) == 0:
# return redirect('/login')
# print(request.session.get('user'))
# if 2 in request.session.get('user')['role']:
# messages.warning(request, message='You were a merchant!', extra_tags='alert')
# return redirect('/')
# if request.method == 'POST':
# account = Account.objects.get(email=request.session.get('user')['email'])
# code = random_code_activity(40)
# email = account.email
# account.code_act_merchant = code
# account.save()
# send_mail_register('activity_merchant', email, code)
# messages.success(request, message='Request Success! Please check email!', extra_tags='alert')
# return redirect('/')
# return render(request, 'website/request_merchant.html')
#Product - collection - detail
def detail_post(request, id_post):
if Post_Product.objects.filter(pk=id_post, is_lock=False, is_activity=True).exists() == False:
messages.warning(request, message='Tin dang khong ton tai', extra_tags='alert')
return redirect('/')
post = Post_Product.objects.filter(pk=id_post, is_lock=False, is_activity=True).first()
view_old = post.views
post.views = view_old + 1
post.save()
rating = dict()
count_star = Rating.objects.filter(merchant_id=post.creator_id.id).aggregate(Sum('num_of_star'))['num_of_star__sum']
star_5 = Rating.objects.filter(merchant_id=post.creator_id.id, is_activity=True, num_of_star=5).count()
star_4 = Rating.objects.filter(merchant_id=post.creator_id.id, is_activity=True, num_of_star=4).count()
star_3 = Rating.objects.filter(merchant_id=post.creator_id.id, is_activity=True, num_of_star=3).count()
star_2 = Rating.objects.filter(merchant_id=post.creator_id.id, is_activity=True, num_of_star=2).count()
star_1 = Rating.objects.filter(merchant_id=post.creator_id.id, is_activity=True, num_of_star=1).count()
if count_star == None:
count_star = 0
count_person = Rating.objects.filter(merchant_id=post.creator_id.id, is_activity=True).count()
if count_person == 0:
rating['agv_rating'] = 0
else:
rating['agv_rating'] = float(count_star/count_person)
if count_person != 0:
rating['star_5'] = [star_5, (star_5/count_person) *100]
rating['star_4'] = [star_4, (star_4/count_person) *100]
rating['star_3'] = [star_3, (star_3/count_person) *100]
rating['star_2'] = [star_2, (star_2/count_person) *100]
rating['star_1'] = [star_1, (star_1/count_person) *100]
list_color = []
for i in range(0, int(count_star/count_person)):
list_color.append(1)
for j in range(0, 5 - int(count_star/count_person)):
list_color.append(0)
rating['list_color'] = list_color
# rating['star_g'] = 5 - int(count_star/count_person)
list_rating = Rating.objects.filter(merchant_id=post.creator_id.id, is_activity=True).order_by('-pk')
for item in list_rating:
if item.num_of_star == 1:
item.list_color = [1,0,0,0,0]
if item.num_of_star == 2:
item.list_color = [1,1,0,0,0]
if item.num_of_star == 3:
item.list_color = [1,1,1,0,0]
if item.num_of_star == 4:
item.list_color = [1,1,1,1,0]
if item.num_of_star == 5:
item.list_color = [1,1,1,1,1]
rating['list_rating'] = list_rating
product_category = Product_Category.objects.filter(product_id=post.product_id.id, archive=False).first()
category = product_category.category_id
list_poduct_avaliable = Product_Category.objects.filter(archive=False, category_id_id=category.id).values_list('product_id_id')
list_post = Post_Product.objects.filter(is_lock=False, is_activity=True, product_id_id__in=list_poduct_avaliable).order_by('-bought')[0:40]
list_random = []
count = list_post.count()
while len(list_random) < 4:
x = randint(0, count - 1)
if x not in list_random:
list_random.append(x)
if len(list_random) == count:
break
if list_post.count() < 5:
posts = list_post
else:
posts = [list_post[list_random[0]], list_post[list_random[1]], list_post[list_random[2]], list_post[list_random[3]]]
# print(posts)
array_post = []
for post in posts:
dict_post = post.__dict__
count_star = Rating.objects.filter(merchant_id=post.creator_id.id, is_activity=True).aggregate(Sum('num_of_star'))['num_of_star__sum']
if count_star == None:
count_star = 0
count_person = Rating.objects.filter(merchant_id=post.creator_id.id, is_activity=True).count()
if count_person == 0:
dict_post['rating'] = float(0)
else:
dict_post['rating'] = float(round(count_star/count_person, 1))
del dict_post['_state']
# if Product.objects.filter(pk=post.id).exists() == True:
dict_product = Product.objects.get(pk=post.product_id_id).__dict__
list_price = Link_Type.objects.filter(parent_product=dict_product['id'], product_id__archive=False).values_list('product_id__price')
dict_product['range_price'] = [max(list_price)[0], min(list_price)[0]]
del dict_product['_state']
image = Product_Image.objects.filter(product_id_id=dict_product['id']).order_by('image_id_id').first()
dict_product['image'] = 'http://localhost:8000/product' + image.image_id.image_link.url
dict_post['product'] = dict_product
array_post.append(dict_post)
return render(request,'website/product.html', {'rating': rating, 'array_post': array_post})
def collections(request, id_category):
if Category.objects.filter(pk=id_category).exists() == False:
messages.warning(request, message='Category khong ton tai', extra_tags='alert')
return redirect('/')
return render(request,'website/collection.html')
def search(request):
if 'r' not in request.GET:
messages.warning(request, message='Vui lòng nhập từ khóa!', extra_tags='alert')
return redirect('/')
return render(request, 'website/search.html')
def shop(request, id_shop):
if Account.objects.filter(pk=id_shop, activity_merchant=True, is_lock=False).exists() == False:
messages.warning(request, message='Cửa hàng không tồn tại!', extra_tags='alert')
return redirect('/')
account = Account.objects.filter(pk=id_shop, activity_merchant=True, is_lock=False).first()
rating = dict()
count_star = Rating.objects.filter(merchant_id=account.id).aggregate(Sum('num_of_star'))['num_of_star__sum']
star_5 = Rating.objects.filter(merchant_id=account.id, is_activity=True, num_of_star=5).count()
star_4 = Rating.objects.filter(merchant_id=account.id, is_activity=True, num_of_star=4).count()
star_3 = Rating.objects.filter(merchant_id=account.id, is_activity=True, num_of_star=3).count()
star_2 = Rating.objects.filter(merchant_id=account.id, is_activity=True, num_of_star=2).count()
star_1 = Rating.objects.filter(merchant_id=account.id, is_activity=True, num_of_star=1).count()
if count_star == None:
count_star = 0
count_person = Rating.objects.filter(merchant_id=account.id, is_activity=True).count()
if count_person == 0:
rating['agv_rating'] = 0
else:
rating['agv_rating'] = float(count_star/count_person)
if count_person != 0:
rating['star_5'] = [star_5, (star_5/count_person) *100]
rating['star_4'] = [star_4, (star_4/count_person) *100]
rating['star_3'] = [star_3, (star_3/count_person) *100]
rating['star_2'] = [star_2, (star_2/count_person) *100]
rating['star_1'] = [star_1, (star_1/count_person) *100]
list_color = []
for i in range(0, int(count_star/count_person)):
list_color.append(1)
for j in range(0, 5 - int(count_star/count_person)):
list_color.append(0)
rating['list_color'] = list_color
# rating['star_g'] = 5 - int(count_star/count_person)
list_rating = Rating.objects.filter(merchant_id=account.id, is_activity=True).order_by('-pk')
for item in list_rating:
if item.num_of_star == 1:
item.list_color = [1,0,0,0,0]
if item.num_of_star == 2:
item.list_color = [1,1,0,0,0]
if item.num_of_star == 3:
item.list_color = [1,1,1,0,0]
if item.num_of_star == 4:
item.list_color = [1,1,1,1,0]
if item.num_of_star == 5:
item.list_color = [1,1,1,1,1]
rating['list_rating'] = list_rating
return render(request, 'website/shop.html', {'rating': rating})
def checkout(request):
if 'user' in request.session:
cart = Cart(request.session)
if cart.count == 0:
messages.warning(request, message='Bạn chưa có sản phẩm nào trong giỏ hàng', extra_tags='alert')
return redirect('/')
account = Account.objects.get(pk=request.session.get('user')['id'])
return render(request,'website/checkout.html', {
'account': account,
})
else:
messages.warning(request, message='Vui lòng đăng nhập để thanh toán', extra_tags='alert')
return redirect('/')
# def profile(request):
# return render(request,'website/checkout')
def cart(request):
if 'user' in request.session:
return render(request,'website/cart.html')
else:
messages.warning(request, message='Vui lòng đăng nhập để mua hàng', extra_tags='alert')
return redirect('/')
def product(request):
return render(request,'website/product.html')
``` |
{
"source": "JimmiBram/DevitoSync",
"score": 3
} |
#### File: JimmiBram/DevitoSync/dsFile.py
```python
from __future__ import annotations #<-- Makes sure the fromlist method can return a dsFile object. Redundant in Python 4
import os, hashlib
class dsFile:
def __init__(self, path, new_base_path = ""):
self.path = path
self.md5 = ""
self.basepath = new_base_path
def __hash__(self) -> str:
return self.md5
def get_relative_path(self) -> str:
return self.path.replace(self.basepath, '')
def to_list(self) -> list:
return [self.path, self.basepath, self.md5]
@staticmethod
def from_list(data: list) -> dsFile:
return_object = dsFile(data[0], data[1])
return_object.md = data[3]
return return_object
```
#### File: DevitoSync/utils/io.py
```python
import hashlib
import json
import os
import platform
from typing import Any, List, Optional
from datetime import datetime
def read_json(filepath: str) -> List:
with open(filepath) as data_file:
data = json.load(data_file)
return data
def write_json(filepath: str, data: Any) -> Any:
kwargs = {'indent':4, 'sort_keys':True, 'separators': (",",": "), 'ensure_ascii': False}
with open(filepath, "w", encoding="utf8") as outfile:
str_ = json.dumps(data, **kwargs)
outfile.write(str_)
return data
def get_creation_datetime(filepath: str) -> Optional[datetime]:
"""
Get the date that a file was created.
Parameters
----------
filepath : str
Returns
-------
creation_datetime : Optional[datetime]
"""
if platform.system() == "Windows":
return datetime.fromtimestamp(os.path.getctime(filepath))
else:
stat = os.stat(filepath)
try:
return datetime.fromtimestamp(stat.st_birthtime)
except AttributeError:
# We're probably on Linux. No easy way to get creation dates here,
# so we'll settle for when its content was last modified.
return None
def get_modification_datetime(filepath: str) -> datetime:
"""
Get the datetime that a file was last modified.
Parameters
----------
filepath : str
Returns
-------
modification_datetime : datetime
"""
import tzlocal
timezone = tzlocal.get_localzone()
mtime = datetime.fromtimestamp(os.path.getmtime(filepath))
return mtime.replace(tzinfo=timezone)
def md5(fname):
hash_md5 = hashlib.md5()
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
``` |
{
"source": "Jimmie00x0000/DailyTools",
"score": 3
} |
#### File: DailyTools/python/ply_normalization.py
```python
from plyfile import PlyData, PlyElement
import math
import numpy as np
def _bounding_box(data):
x_min, y_min, z_min = math.inf, math.inf, math.inf
x_max, y_max, z_max = - math.inf, - math.inf, - math.inf
xs = data['x']
ys = data['y']
zs = data['z']
for x in xs:
x_min = min((x_min, x))
x_max = max((x_max, x))
for y in ys:
y_min = min((y_min, y))
y_max = max((y_max, y))
for z in zs:
z_min = min((z_min, z))
z_max = max((z_max, z))
return x_min, x_max, y_min, y_max, z_min, z_max
def _normalize_impl(bbox, vertex_data):
largest_length = max((bbox[1] - bbox[0], bbox[3] - bbox[2], bbox[5] - bbox[4]))
ratio = largest_length / 1.0
vertex_data = vertex_data.tolist()
for i in range(len(vertex_data)):
xyz = vertex_data[i]
new_xyz = (xyz[0] / ratio, xyz[1] / ratio, xyz[2] / ratio)
vertex_data[i] = new_xyz
pass
return np.array(vertex_data, dtype=[('x', 'f4'), ('y', 'f4'), ('z', 'f4')])
def normalize(file_name):
ply_data = PlyData.read(file_name)
for element in ply_data.elements:
if element.name == 'vertex' or element.name == 'vertices':
vertex_data = element.data
bbox = _bounding_box(vertex_data)
new_vertex_data = _normalize_impl(bbox, vertex_data)
_write_ply(file_name[:-4] + '.n.ply', new_vertex_data)
pass
pass
pass
def _write_ply(file_name, vertex_data):
vertex_ele = PlyElement.describe(vertex_data, 'vertex')
PlyData([vertex_ele], text=True).write(file_name)
pass
if __name__ == '__main__':
# normalize('simple.ply')
pass
``` |
{
"source": "jimmiebtlr/stable-baselines",
"score": 3
} |
#### File: stable_baselines/acktr/utils.py
```python
import tensorflow as tf
def dense(input_tensor, size, name, weight_init=None, bias_init=0, weight_loss_dict=None, reuse=None):
"""
A dense Layer
:param input_tensor: ([TensorFlow Tensor]) input
:param size: (int) number of hidden neurons
:param name: (str) layer name
:param weight_init: (function or int or float) initialize the weight
:param bias_init: (function or int or float) initialize the weight
:param weight_loss_dict: (dict) store the weight loss if not None
:param reuse: (bool) if can be reused
:return: ([TensorFlow Tensor]) the output of the dense Layer
"""
with tf.variable_scope(name, reuse=reuse):
assert len(tf.get_variable_scope().name.split('/')) == 2
weight = tf.get_variable("w", [input_tensor.get_shape()[1], size], initializer=weight_init)
bias = tf.get_variable("b", [size], initializer=tf.constant_initializer(bias_init))
weight_decay_fc = 3e-4
if weight_loss_dict is not None:
weight_decay = tf.multiply(tf.nn.l2_loss(weight), weight_decay_fc, name='weight_decay_loss')
weight_loss_dict[weight] = weight_decay_fc
weight_loss_dict[bias] = 0.0
tf.add_to_collection(tf.get_variable_scope().name.split('/')[0] + '_' + 'losses', weight_decay)
return tf.nn.bias_add(tf.matmul(input_tensor, weight), bias)
def kl_div(action_dist1, action_dist2, action_size):
"""
Kullback-Leibler divergence
:param action_dist1: ([TensorFlow Tensor]) action distribution 1
:param action_dist2: ([TensorFlow Tensor]) action distribution 2
:param action_size: (int) the shape of an action
:return: (float) Kullback-Leibler divergence
"""
mean1, std1 = action_dist1[:, :action_size], action_dist1[:, action_size:]
mean2, std2 = action_dist2[:, :action_size], action_dist2[:, action_size:]
numerator = tf.square(mean1 - mean2) + tf.square(std1) - tf.square(std2)
denominator = 2 * tf.square(std2) + 1e-8
return tf.reduce_sum(
numerator / denominator + tf.log(std2) - tf.log(std1), reduction_indices=-1)
```
#### File: stable-baselines/tests/test_identity.py
```python
import pytest
import numpy as np
from stable_baselines import A2C, ACER, ACKTR, DQN, DDPG, SAC, PPO1, PPO2, TD3, TRPO
from stable_baselines.ddpg import NormalActionNoise
from stable_baselines.common.identity_env import IdentityEnv, IdentityEnvBox
from stable_baselines.common.vec_env import DummyVecEnv
from stable_baselines.common import set_global_seeds
# Hyperparameters for learning identity for each RL model
LEARN_FUNC_DICT = {
'a2c': lambda e: A2C(policy="MlpPolicy", learning_rate=1e-3, n_steps=1,
gamma=0.7, env=e).learn(total_timesteps=10000, seed=0),
'acer': lambda e: ACER(policy="MlpPolicy", env=e,
n_steps=1, replay_ratio=1).learn(total_timesteps=15000, seed=0),
'acktr': lambda e: ACKTR(policy="MlpPolicy", env=e,
learning_rate=5e-4, n_steps=1).learn(total_timesteps=20000, seed=0),
'dqn': lambda e: DQN(policy="MlpPolicy", batch_size=16, gamma=0.1,
exploration_fraction=0.001, env=e).learn(total_timesteps=40000, seed=0),
'ppo1': lambda e: PPO1(policy="MlpPolicy", env=e, lam=0.5,
optim_batchsize=16, optim_stepsize=1e-3).learn(total_timesteps=15000, seed=0),
'ppo2': lambda e: PPO2(policy="MlpPolicy", env=e,
learning_rate=1.5e-3, lam=0.8).learn(total_timesteps=20000, seed=0),
'trpo': lambda e: TRPO(policy="MlpPolicy", env=e,
max_kl=0.05, lam=0.7).learn(total_timesteps=10000, seed=0),
}
@pytest.mark.slow
@pytest.mark.parametrize("model_name", ['a2c', 'acer', 'acktr', 'dqn', 'ppo1', 'ppo2', 'trpo'])
def test_identity(model_name):
"""
Test if the algorithm (with a given policy)
can learn an identity transformation (i.e. return observation as an action)
:param model_name: (str) Name of the RL model
"""
env = DummyVecEnv([lambda: IdentityEnv(10)])
model = LEARN_FUNC_DICT[model_name](env)
n_trials = 1000
reward_sum = 0
set_global_seeds(0)
obs = env.reset()
for _ in range(n_trials):
action, _ = model.predict(obs)
obs, reward, _, _ = env.step(action)
reward_sum += reward
assert model.action_probability(obs).shape == (1, 10), "Error: action_probability not returning correct shape"
action = env.action_space.sample()
action_prob = model.action_probability(obs, actions=action)
assert np.prod(action_prob.shape) == 1, "Error: not scalar probability"
action_logprob = model.action_probability(obs, actions=action, logp=True)
assert np.allclose(action_prob, np.exp(action_logprob)), (action_prob, action_logprob)
assert reward_sum > 0.9 * n_trials
# Free memory
del model, env
@pytest.mark.slow
@pytest.mark.parametrize("model_class", [DDPG, TD3, SAC])
def test_identity_continuous(model_class):
"""
Test if the algorithm (with a given policy)
can learn an identity transformation (i.e. return observation as an action)
"""
env = DummyVecEnv([lambda: IdentityEnvBox(eps=0.5)])
if model_class in [DDPG, TD3]:
n_actions = 1
action_noise = NormalActionNoise(mean=np.zeros(n_actions), sigma=0.1 * np.ones(n_actions))
else:
action_noise = None
model = model_class("MlpPolicy", env, gamma=0.1, action_noise=action_noise, buffer_size=int(1e6))
model.learn(total_timesteps=20000, seed=0)
n_trials = 1000
reward_sum = 0
set_global_seeds(0)
obs = env.reset()
for _ in range(n_trials):
action, _ = model.predict(obs)
obs, reward, _, _ = env.step(action)
reward_sum += reward
assert reward_sum > 0.9 * n_trials
# Free memory
del model, env
``` |
{
"source": "jimmiemunyi/Sign-Language-App",
"score": 3
} |
#### File: jimmiemunyi/Sign-Language-App/webcam_inference.py
```python
from collections import deque, Counter
import cv2
from fastai.vision.all import *
print('Loading our Inference model...')
# load our inference model
inf_model = load_learner('model/sign_language.pkl')
print('Model Loaded')
# define a deque to get rolling average of predictions
# I go with the last 10 predictions
rolling_predictions = deque([], maxlen=10)
# get the most common item in the deque
def most_common(D):
data = Counter(D)
return data.most_common(1)[0][0]
def hand_area(img):
# specify where hand should go
hand = img[50:324, 50:324]
# the images in the model were trainind on 200x200 pixels
hand = cv2.resize(hand, (200,200))
return hand
# capture video on the webcam
cap = cv2.VideoCapture(0)
# get the dimensions on the frame
frame_width = int(cap.get(3))
frame_height = int(cap.get(4))
# define codec and create our VideoWriter to save the video
# fourcc = cv2.VideoWriter_fourcc(*'mp4v')
# out = cv2.VideoWriter('output/sign-language.mp4', fourcc, 12, (frame_width, frame_height))
# read video
while True:
# capture each frame of the video
_, frame = cap.read()
# flip frame to feel more 'natural' to webcam
frame = cv2.flip(frame, flipCode = 1)
# draw a blue rectangle where to place hand
cv2.rectangle(frame, (50, 50), (324, 324), (255, 0, 0), 2)
# get the image
inference_image = hand_area(frame)
# get the current prediction on the hand
pred = inf_model.predict(inference_image)
# append the current prediction to our rolling predictions
rolling_predictions.append(pred[0])
# our prediction is going to be the most common letter
# in our rolling predictions
prediction_output = f'The predicted letter is {most_common(rolling_predictions)}'
# show predicted text
cv2.putText(frame, prediction_output, (10, 350), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (255, 0, 0), 2)
# show the frame
cv2.imshow('frame', frame)
# save the frames to out file
# out.write(frame)
# press `q` to exit
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# release VideoCapture()
cap.release()
# release out file
# out.release()
# close all frames and video windows
cv2.destroyAllWindows()
``` |
{
"source": "jimmingcheng/google_nest_client",
"score": 3
} |
#### File: google_nest_client/google_nest_client/client.py
```python
from google_nest_client.api_client import GoogleNestAPIClient
from google_nest_client.camera import Camera
from google_nest_client.thermostat import Thermostat
class GoogleNestClient(GoogleNestAPIClient):
def get_cameras(self):
return [
Camera(self, device)
for device in self.get_devices_by_type('sdm.devices.types.CAMERA')
]
def get_thermostats(self):
return [
Thermostat(self, device)
for device in self.get_devices_by_type('sdm.devices.types.THERMOSTAT')
]
def get_camera(self, device_id):
return Camera(self, self.get_device(device_id))
def get_thermostat(self, device_id=None):
if device_id:
return Thermostat(self, self.get_device(device_id))
else:
device_dicts = self.get_devices_by_type('sdm.devices.types.THERMOSTAT')
return Thermostat(self, device_dicts[0])
def get_camera_by_label(self, label):
matched_devices = self.get_devices_by_type_and_label(
'sdm.devices.types.CAMERA',
label,
)
if matched_devices:
return Camera(self, matched_devices[0])
else:
return None
def get_thermostat_by_label(self, label='Thermostat'):
matched_devices = self.get_devices_by_type_and_label(
'sdm.devices.types.THERMOSTAT',
label,
)
if matched_devices:
return Thermostat(self, matched_devices[0])
else:
return None
```
#### File: google_nest_client/google_nest_client/thermostat.py
```python
from google_nest_client.device import Device
class Thermostat(Device):
def get_hvac_status(self):
return self.get_trait('ThermostatHvac')['status']
def get_mode(self):
return self.get_trait('ThermostatMode')['mode']
def get_ambient_temperature(self):
deg_c = self.get_trait('Temperature')['ambientTemperatureCelsius']
return celsius_to_farenheit(deg_c)
def get_heating_temperature(self):
deg_c = self.get_trait('ThermostatTemperatureSetpoint')['heatCelsius']
return celsius_to_farenheit(deg_c)
def get_ambient_humidity(self):
return self.get_trait('Humidity')['ambientHumidityPercent']
def set_heat(self, deg_f):
self.api_client.execute_command(
self.device_id,
'sdm.devices.commands.ThermostatTemperatureSetpoint.SetHeat',
{'heatCelsius': farenheit_to_celsius(deg_f)},
)
def farenheit_to_celsius(f):
return (f - 32) * 5 / 9
def celsius_to_farenheit(c):
return c * 9 / 5 + 32
``` |
{
"source": "jim-minter/rp",
"score": 2
} |
#### File: aro/azext_aro/_aad.py
```python
import datetime
import uuid
from azure.cli.core._profile import Profile
from azure.cli.core.commands.client_factory import configure_common_settings
from azure.graphrbac import GraphRbacManagementClient
from azure.graphrbac.models import ApplicationCreateParameters
from azure.graphrbac.models import PasswordCredential
from azure.graphrbac.models import ServicePrincipalCreateParameters
class AADManager(object):
MANAGED_APP_PREFIX = "https://az.aro.azure.com/"
def __init__(self, cli_ctx):
profile = Profile(cli_ctx=cli_ctx)
credentials, _, tenant_id = profile.get_login_credentials(
resource=cli_ctx.cloud.endpoints.active_directory_graph_resource_id)
self.client = GraphRbacManagementClient(
credentials, tenant_id, base_url=cli_ctx.cloud.endpoints.active_directory_graph_resource_id)
configure_common_settings(cli_ctx, self.client)
def createManagedApplication(self, display_name):
password = <PASSWORD>()
try:
end_date = datetime.datetime(2299, 12, 31, tzinfo=datetime.timezone.utc)
except AttributeError:
end_date = datetime.datetime(2299, 12, 31)
app = self.client.applications.create(ApplicationCreateParameters(
display_name=display_name,
identifier_uris=[
self.MANAGED_APP_PREFIX + str(uuid.uuid4()),
],
password_credentials=[
PasswordCredential(
end_date=end_date,
value=password,
),
],
))
return app, password
def getApplication(self, app_id):
apps = list(self.client.applications.list(
filter="appId eq '%s'" % app_id))
if apps:
return apps[0]
return None
def deleteManagedApplication(self, app_id):
app = self.getApplication(app_id)
if app and app.identifier_uris and app.identifier_uris[0].startswith(self.MANAGED_APP_PREFIX):
self.client.applications.delete(app.object_id)
def getServicePrincipal(self, app_id):
sps = list(self.client.service_principals.list(
filter="appId eq '%s'" % app_id))
if sps:
return sps[0]
return None
def createServicePrincipal(self, app_id):
return self.client.service_principals.create(ServicePrincipalCreateParameters(
app_id=app_id,
))
``` |
{
"source": "jimmo/cpld-cpu",
"score": 2
} |
#### File: pysim/cpu_a_14/asm.py
```python
import collections
from lark import Lark, UnexpectedInput
l = Lark(open("cpu_a_14/asm.g").read(), parser="earley", lexer="auto")
class AssemblerTransformer:
def __init__(self, assembler):
self.assembler = assembler
def transform(self, ast):
for statement in ast.children:
if statement.data == "statement":
statement = statement.children[0]
if statement.data == "cmd":
self.cmd(statement.children)
elif statement.data == "op":
self.op(statement.children)
else:
raise ValueError("Unknown statment", statement)
def parse_number(self, token):
if token.type != "NUMBER":
raise ValueError(f'Invalid number token type {token.type} "{token}"')
if token.startswith("0x"):
return int(token, 16)
if token.startswith("0o"):
return int(token, 8)
return int(token, 10)
def cmd(self, m):
print(" ".join(x.value for x in m))
if m[0].type == "CMD_ORG":
self.assembler.org(self.parse_number(m[1]))
else:
raise ValueError(f"Unknown command: {m}")
def op(self, m):
if m[0].type == "LABEL":
print(m[0] + ":")
self.assembler.label(self.assembler.create_label(m[0]))
m = m[1:]
print(" ".join(x.value for x in m))
if m[0].type == "OP_DCB":
self.assembler.dcb(self.parse_number(m[1]))
return
label = None
if len(m) == 2:
if m[1].type == "NUMBER":
label = self.assembler.const(self.parse_number(m[1]))
else:
label = self.assembler.create_label(m[1])
if m[0].type == "OP_NOR":
self.assembler.nor(label)
elif m[0].type == "OP_ADD":
self.assembler.add(label)
elif m[0].type == "OP_STA":
self.assembler.sta(label)
elif m[0].type == "OP_CLR":
self.assembler.nor(self.assembler.create_label("allone"))
elif m[0].type == "OP_LDA":
self.assembler.lda(label)
elif m[0].type == "OP_NOT":
self.assembler.nor(self.assembler.create_label("zero"))
elif m[0].type == "OP_SUB":
self.assembler.nor(self.assembler.create_label("zero"))
self.assembler.add(label)
self.assembler.nor(self.assembler.create_label("zero"))
elif m[0].type == "OP_JCC":
self.assembler.jcc(label)
elif m[0].type == "OP_JMP":
self.assembler.jcc(label)
self.assembler.jcc(label)
elif m[0].type == "OP_JCS":
self.assembler.jcs(label)
elif m[0].type == "OP_HLT":
self.assembler.hlt()
elif m[0].type == "OP_OUT":
self.assembler.sta(self.assembler.create_label("display"))
self.assembler.lda(self.assembler.create_label("trigger"))
self.assembler.nor(self.assembler.create_label("one"))
self.assembler.sta(self.assembler.create_label("trigger"))
self.assembler.lda(self.assembler.create_label("display"))
else:
raise ValueError(f"Unknown op: {m}")
class Assembler:
PREFIX_NOR = 0b00000000_00000000
PREFIX_ADD = 0b01000000_00000000
PREFIX_STA = 0b10000000_00000000
PREFIX_JCC = 0b11000000_00000000
def __init__(self, data, addr):
print(len(data))
self._data = data
self._offset = 0
self._labels = collections.defaultdict(Assembler.Label)
self._indent = 0
self._consts = {}
self._nreserved = 0
def log(self, s):
print(" 0x{:04x}: {}{}".format(self._offset, " " * self._indent, s))
def create_label(self, name):
l = self._labels[name]
l._name = name
return l
class Label:
def __init__(self):
self._offset = None
self._name = None
self._fixups = []
self._register = False
def addr(self):
return self._offset
def write_instr(self, instr):
self._data[self._offset] = instr >> 8
self._data[self._offset + 1] = instr & 0xFF
self._offset += 2
def write_byte(self, b):
self._data[self._offset] = b & 0xFF
self._offset += 1
def __enter__(self):
self.reserve("one", 1, register=True)
self.reserve("allone", 0xFF, register=True)
self.reserve("zero", 0, register=True)
self.reserve("trigger", 0, register=True)
self.reserve("display", 0, register=True)
self.reserve("page1", 0, register=True)
self.reserve("page0", 0, register=True)
self.reserve("_tmp1", 0, register=True)
self.reserve("_tmp2", 0, register=True)
self.reserve("_sp", 0, register=True)
self.reserve("_stack", [0] * 32, register=True)
return self
def __exit__(self, a, b, c):
for l in self._labels.values():
if l._offset is None:
raise ValueError(f'Undefined label "{l._name}"')
for offset in l._fixups:
addr = l.addr()
self._data[offset] |= (addr >> 8) & 0x3F
self._data[offset + 1] |= addr & 0xFF
def org(self, addr):
self._offset = addr
def const(self, value):
if value in self._consts:
return self._consts[value]
name = "_const_{}".format(value)
l = self.create_label(name)
self.reserve(name, value)
self._consts[value] = l
return l
def reserve(self, name, value, register=False):
if isinstance(value, int):
value = [value]
prev_offset = self._offset
self._offset = 2 ** 14 - len(value) - self._nreserved
self._nreserved += len(value)
self.label(self.create_label(name), register)
offset = self._offset
for x in value:
self.dcb(x)
self._offset = prev_offset
return offset
def label(self, l, register=False):
self.log(' label "{}" at 0x{:04x}'.format(l._name, self._offset))
if l._offset is not None:
raise ValueError(f"Label redefinition: {l._name}")
l._offset = self._offset
l._register = register
l1 = self.create_label(l._name + "_")
l1._offset = self._offset + 1
def placeholder(self, label):
label._fixups.append(self._offset)
def nor(self, label):
self.log(" nor {}".format(label._name))
self.placeholder(label)
self.write_instr(Assembler.PREFIX_NOR)
def add(self, label):
self.log(" add {}".format(label._name))
self.placeholder(label)
self.write_instr(Assembler.PREFIX_ADD)
def sta(self, label):
self.log(" sta {}".format(label._name))
self.placeholder(label)
self.write_instr(Assembler.PREFIX_STA)
def lda(self, label):
self.log(" lda {}".format(label._name))
self._indent += 1
self.nor(self.create_label("allone"))
self.add(label)
self._indent -= 1
def jcc(self, label):
self.log(" jcc {}".format(label._name))
self.placeholder(label)
self.write_instr(Assembler.PREFIX_JCC)
def jcs(self, label):
self.log(" jcs {}".format(label._name))
self.write_instr(Assembler.PREFIX_JCC | (self._offset + 4))
self._indent += 1
self.jcc(label)
self._indent -= 1
def hlt(self):
self.log(" hlt")
self.write_instr(Assembler.PREFIX_JCC | self._offset)
self.write_instr(Assembler.PREFIX_JCC | self._offset)
def dcb(self, v):
self.log(" dcb 0x{:02x}".format(v))
self.write_byte(v)
def parse(self, path):
with open(path) as f:
contents = f.read()
try:
ast = l.parse(contents)
except UnexpectedInput as e:
self.log(f"{path}:{e.line}:{e.column}: unexpected input.")
self.log(" " + contents.split("\n")[e.line - 1])
self.log(" " + " " * e.column + "^")
return False
AssemblerTransformer(self).transform(ast)
return True
```
#### File: pysim/cpu_ah_16/zz.py
```python
import collections
import sys
from lark import Lark, Transformer, Tree, UnexpectedInput
from asm import Assembler
l = Lark(open("cpu_ah_16/zz.g").read())
class Variable:
def __init__(self, name, addr, typename):
self.name = name
self.addr = addr
self.signed = typename[0] == "i"
self.size = 1 if typename[1:] == "8" else 2
def desc(self):
return (
("" if self.signed else "u") + "int" + str(self.size * 8) + " " + self.name
)
class CompilerTransformer(Transformer):
def __init__(self):
self.vars = {}
self.next_var = 0
self.call_n = 0
# Init call stack pointer.
print(f" load c:d, 0xffef")
print(f" load a, 0")
print(f" wmem c:d, a")
def var(self, n):
if n not in self.vars:
raise ValueError(f'Unknown variable "{n}".')
return self.vars[n]
def format_expr(self, expr):
if isinstance(expr, Tree):
if expr.data == "bin_op":
return (
"("
+ self.format_expr(expr.children[0])
+ expr.children[1]
+ self.format_expr(expr.children[2])
+ ")"
)
raise ValueError(f"unknown expr type {expr.data}")
else:
if expr.type == "NUMBER":
return expr
elif expr.type == "ID":
v = self.var(expr)
return v.name
else:
raise ValueError(f"unknown token type {expr.type}")
ALU_OPS = {
"+": "add",
"-": "sub",
"&": "and",
"|": "or",
"^": "xor",
}
def number(self, token):
if token.type != "NUMBER":
raise ValueError(f'Invalid number token type {token.type} "{token}"')
if token.startswith("0x"):
return int(token, 16)
if token.startswith("0o"):
return int(token, 8)
return int(token, 10)
def eval(self, expr, n=0):
if isinstance(expr, Tree):
if expr.data == "bin_op":
self.eval(expr.children[2])
print(f" # save {self.format_expr(expr.children[2])}")
print(f" load c:d, 0x{0xffff-n:04x}")
print(f" wmem c:d, a")
self.eval(expr.children[0], n + 1)
print(f" # restore {self.format_expr(expr.children[2])}")
print(f" load c:d, 0x{0xffff-n:04x}")
print(f" rmem b, c:d")
op = CompilerTransformer.ALU_OPS[expr.children[1]]
print(f" # {expr.children[1]}")
print(f" {op} a")
else:
raise ValueError(f"unknown expr type {expr.data}")
else:
if expr.type == "NUMBER":
print(f" # {expr}")
v = self.number(expr)
print(f" load a, 0x{v:x}")
elif expr.type == "ID":
v = self.var(expr)
if v.size != 1:
raise ValueError("Must be 8-bit variable")
print(f" # {v.name}")
print(f" load c:d, 0x{v.addr:04x}")
print(f" rmem a, c:d")
else:
raise ValueError(f"unknown token type {expr.type}")
def eval16(self, expr, n=0):
if isinstance(expr, Tree):
if expr.data == "bin_op":
self.eval16(expr.children[2])
# TODO
print(f" mov a, b")
print(f" # save {self.format_expr(expr.children[2])}")
print(f" load c:d, 0x{0xffff-n:04x}")
print(f" wmem c:d, a")
self.eval16(expr.children[0], n + 1)
# TODO
print(f" mov a, b")
print(f" # restore {self.format_expr(expr.children[2])}")
print(f" load c:d, 0x{0xffff-n:04x}")
print(f" rmem b, c:d")
op = CompilerTransformer.ALU_OPS[expr.children[1]]
print(f" # {expr.children[1]}")
print(f" {op} a")
# TODO
print(f" mov b, a")
print(f" load a, 0")
else:
raise ValueError(f"unknown expr type {expr.data}")
else:
if expr.type == "NUMBER":
print(f" # {expr} (16-bit)")
v = self.number(expr)
print(f" load a:b, 0x{v:x}")
elif expr.type == "ID":
v = self.var(expr)
if v.size != 2:
raise ValueError("Must be 16-bit variable")
print(f" # {v.name} (16-bit)")
print(f" load c:d, 0x{v.addr:04x}")
print(f" rmem a, c:d")
print(f" mov b, a")
print(f" mov a, d")
print(f" inc a")
print(f" mov d, a")
print(f" rmem b, c:d")
else:
raise ValueError(f"unknown token type {expr.type}")
def new_assign(self, m):
if m[1] in self.vars:
raise ValueError(f"Redefinition of {m[1]}")
v = Variable(m[1], self.next_var, m[0])
self.vars[m[1]] = v
self.next_var += v.size
self.assign(m[1:])
def assign(self, m):
v = self.var(m[0])
print(f" # {v.name} = {self.format_expr(m[1])}")
if v.size == 1:
self.eval(m[1])
print(f" load c:d, 0x{v.addr:04x}")
print(f" wmem c:d, a")
elif v.size == 2:
self.eval16(m[1])
print(f" load c:d, 0x{v.addr:04x}")
print(f" wmem c:d, a")
print(f" mov a, d")
print(f" inc a")
print(f" mov d, a")
print(f" wmem c:d, b")
else:
raise ValueError("Only 8- and 16- bit values supported.")
print()
def deref_assign(self, m):
v = self.var(m[0][1:-1])
if v.size != 2 or v.signed:
raise ValueError(f"Can only dereference u16 type variables ({v.desc()}).")
print(f" # [{v.name}] = {self.format_expr(m[1])}")
print(f" load c:d, 0x{v.addr:04x}")
print(" rmem e, c:d")
print(" mov a, d")
print(" inc a")
print(" mov d, a")
print(" rmem f, c:d")
print(" mov g:h, e:f")
self.eval(m[1])
print(" wmem g:h, a")
print()
def hlt(self, m):
print(" hlt")
print()
JUMP_TYPES = {
"==": "je",
"!=": "jne",
">": "jxx",
"<": "jn",
">=": "jp",
"<=": "jxx",
}
def goto(self, m):
if len(m) == 1:
print(f" # goto {m[0]}")
print(f" load c:d, {m[0]}")
print(f" jmp c:d")
else:
print(f" # if {m[0]} {m[1]} {m[2]} goto {m[3]}")
self.eval(m[2])
print(" mov g, a")
self.eval(m[0])
print(" mov b, g")
print(" cmp")
print(f" load c:d, {m[3]}")
jmp = CompilerTransformer.JUMP_TYPES[m[1]]
print(f" {jmp} c:d")
print()
def label(self, m):
print(m[0] + ":")
def call(self, m):
n = f"_call_{self.call_n}"
self.call_n += 1
print(f" # call {m[0]}")
# Point c:d at the next location
print(f" load c:d, 0xffef")
print(f" rmem b, c:d")
print(f" load a, 0xed")
print(f" sub a")
print(f" mov d, a")
# Stash this label in c:d
print(f" load a:b, {n}")
print(f" wmem c:d, a")
print(f" mov a, d")
print(f" inc a")
print(f" mov d, a")
print(f" wmem c:d, b")
# Update stack pointer
print(f" load c:d, 0xffef")
print(f" rmem a, c:d")
print(f" load b, 2")
print(f" add a")
print(f" wmem c:d, a")
# Jump to call target
print(f" load c:d, {m[0]}")
print(f" jmp c:d")
print(f"{n}:")
print()
def ret(self, m):
print(f" # ret")
# Point c:d at the current location
print(f" load c:d, 0xffef")
print(f" rmem b, c:d")
print(f" load a, 0xef")
print(f" sub a")
print(f" mov d, a")
# Get return target
print(f" rmem b, c:d")
print(f" mov a, d")
print(f" inc a")
print(f" mov d, a")
print(f" mov a, b")
print(f" rmem b, c:d")
print(f" mov g:h, a:b")
# Update stack pointer
print(f" load c:d, 0xffef")
print(f" rmem a, c:d")
print(f" load b, 2")
print(f" sub a")
print(f" wmem c:d, a")
# Jump to target
print(f" jmp g:h")
print()
class Compiler:
def parse(self, path):
with open(path) as f:
contents = f.read()
try:
ast = l.parse(contents)
except UnexpectedInput as e:
print(f"{path}:{e.line}:{e.column}: unexpected input.")
print(" " + contents.split("\n")[e.line - 1])
print(" " + " " * e.column + "^")
return False
CompilerTransformer().transform(ast)
return True
Compiler().parse(sys.argv[1])
```
#### File: pysim/cpu_ax_13/cpu-combined.py
```python
import sys
from sim import (
Component,
Signal,
NotifySignal,
Net,
Register,
SplitRegister,
BusConnect,
Clock,
Ram,
Rom,
Power,
MemDisplay,
PagedRamController,
)
from .asm import Assembler
class Decoder(Component):
MASK_OP = 0b011
MASK_REG = 0b100
OP_NOR = 0b000
OP_ADD = 0b001
OP_ST = 0b010
OP_J = 0b011
REG_A = 0b000
REG_X = 0b100
def __init__(self):
super().__init__("decoder")
self.clk = NotifySignal(self, "clk", 1)
self.addr = Signal(self, "addr", 13)
self.data = Signal(self, "data", 8)
self.we = Signal(self, "we", 1)
self.oe = Signal(self, "oe", 1)
self.acc = 0
self.x = 0
self.adreg = 0
self.hi5 = 0
self.pc = 0
self.state = 0
self.op = 0
def reset(self):
self.addr <<= 0
self.data <<= None
self.oe <<= 1
self.we <<= 0
def update(self, signal):
if self.clk.had_edge(0, 1):
# print('clock {}'.format(self.state))
if self.state == 0:
self.pc = self.adreg + 2
self.adreg = self.adreg + 1
self.op = (self.data.value() >> 5) & 0b111
self.hi5 = self.data.value() & 0x1F
elif self.state == 1:
self.adreg = (self.hi5 << 8) | self.data.value()
elif self.state == 2:
self.adreg = self.pc
# ALU / Data Path
if self.op == Decoder.REG_A | Decoder.OP_ADD:
# print(' add a {} + {}'.format(self.acc, self.data.value()))
self.acc = ((self.acc & 0xFF) + self.data.value()) & 0x1FF
# print(' = {}'.format(self.acc))
elif self.op == Decoder.REG_A | Decoder.OP_NOR:
# print(' nor a {} + {}'.format(self.acc, self.data.value()))
carry = self.acc & 0b100000000
value = self.acc & 0xFF
nor = (~(value | self.data.value())) & 0xFF
self.acc = carry | nor
# print(' = {}'.format(self.acc))
elif self.op == Decoder.REG_X | Decoder.OP_ADD:
# print(' add x {} + {}'.format(self.x, self.data.value()))
self.x = ((self.x & 0xFF) + self.data.value()) & 0x1FF
elif self.op == Decoder.REG_X | Decoder.OP_NOR:
# print(' nor x {} + {}'.format(self.x, self.data.value()))
carry = self.x & 0b100000000
value = self.x & 0xFF
nor = (~(value | self.data.value())) & 0xFF
self.x = carry | nor
elif (self.op & Decoder.MASK_OP) == Decoder.OP_J:
# Clear carry on all non-taken jumps.
# print(' j not taken')
self.acc = self.acc & 0xFF
elif (self.op & Decoder.MASK_OP) == Decoder.OP_ST:
# print(' sta / stx')
pass
else:
print(" unknown op")
else:
print("unknown state")
# State machine
if self.state == 0:
# print('get next byte')
self.state = 1
elif self.state == 2:
self.state = 0
elif self.state == 1:
if (self.op & Decoder.MASK_OP) == Decoder.OP_J:
# print(' maybe jump {} {}'.format(self.acc >> 8, self.acc))
if self.op & Decoder.MASK_REG == Decoder.REG_A and (
self.acc & 0b100000000
):
# print(' jcc not taken')
self.state = 2
elif (
self.op & Decoder.MASK_REG == Decoder.REG_X
and (self.acc & 0xFF) == 0
):
# print(' jnz not taken')
self.state = 2
else:
# print(' branch taken')
self.state = 0
else:
self.state = 2
# print(' going to state={} op={:03b}'.format(self.state, self.op))
if self.op & Decoder.MASK_REG == 0:
# print('offset by x', self.x)
self.adreg += self.x
else:
print("unknown state")
clk = self.clk.value()
# print('addr: {:04x}'.format(self.adreg & 0x1fff))
self.addr <<= self.adreg & 0x1FFF
if self.state == 2 and self.op == Decoder.REG_A | Decoder.OP_ST:
self.data <<= self.acc & 0xFF
elif self.state == 2 and self.op == Decoder.REG_X | Decoder.OP_ST:
self.data <<= self.x & 0xFF
else:
self.data <<= None
if clk == 1:
self.oe <<= 0
self.we <<= 0
else:
if self.state == 2 and (self.op & Decoder.MASK_OP) == Decoder.OP_ST:
self.oe <<= 0
else:
self.oe <<= 1
if self.state == 2 and (self.op & Decoder.MASK_OP) == Decoder.OP_ST:
self.we <<= 1
else:
self.we <<= 0
def main():
dec = Decoder()
ram = Ram(addr_width=20)
paged_ram = PagedRamController(
addr_width=13, num_pages=2, reg_base_addr=2 ** 12 - 7
)
out = MemDisplay(addr_width=12, data_addr=2 ** 12 - 5, trigger_addr=2 ** 12 - 4)
clk = Clock(1)
dec.clk += clk.clk
paged_ram.in_addr[0:12] += ram.addr[0:12] + dec.addr[0:12] + out.addr
paged_ram.in_addr[12] += dec.addr[12]
ram.addr[12:20] += paged_ram.out_addr
ram.data += dec.data + out.data + paged_ram.data
ram.oe += dec.oe
ram.we += dec.we + out.we + paged_ram.we
print("Loading RAM...")
n = 0
with Assembler(ram.ram, 0) as asm:
if not asm.parse(sys.argv[1]):
return
ram.stdout()
for c in (dec, ram, paged_ram, clk):
c.info()
c.reset()
last_pc = None
cycles = 0
hlt = 0
try:
while True:
clk.tick()
cycles += 1
if dec.pc == last_pc:
hlt += 1
else:
hlt = 0
last_pc = dec.pc
if hlt > 10: # or cycles > 60:
break
except KeyboardInterrupt:
pass
print(f"Ran for {cycles} cycles and {Net.net_updates} net updates.")
ram.stdout()
if __name__ == "__main__":
main()
```
#### File: pysim/cpu_ax_5/asm.py
```python
import collections
from lark import Lark, UnexpectedInput
l = Lark(open("cpu_ax_5/asm.g").read(), parser="earley", lexer="auto")
class AssemblerTransformer:
def __init__(self, assembler):
self.assembler = assembler
def transform(self, ast):
for op in ast.children:
self.op(op.children)
def parse_number(self, token):
if token.type != "NUMBER":
raise ValueError(f'Invalid number token type {token.type} "{token}"')
if token.startswith("0x"):
return int(token, 16)
if token.startswith("0o"):
return int(token, 8)
return int(token, 10)
def op(self, m):
if m[0].type == "LABEL":
self.assembler.label(self.assembler.create_label(m[0]))
m = m[1:]
if m[0].type == "OP_DCB":
self.assembler.dcb(self.parse_number(m[1]))
elif m[0].type == "OP_NOR":
self.assembler.nor(self.assembler.create_label(m[1]))
elif m[0].type == "OP_ADD":
self.assembler.add(self.assembler.create_label(m[1]))
elif m[0].type == "OP_STA":
self.assembler.sta(self.assembler.create_label(m[1]))
elif m[0].type == "OP_CLR":
self.assembler.nor(self.assembler.create_label("allone"))
elif m[0].type == "OP_LDA":
self.assembler.lda(self.assembler.create_label(m[1]))
elif m[0].type == "OP_NOT":
self.assembler.nor(self.assembler.create_label("zero"))
elif m[0].type == "OP_SUB":
self.assembler.nor(self.assembler.create_label("zero"))
self.assembler.add(self.assembler.create_label(m[1]))
self.assembler.nor(self.assembler.create_label("zero"))
elif m[0].type == "OP_NORX":
self.assembler.norx(self.assembler.create_label(m[1]))
elif m[0].type == "OP_ADDX":
self.assembler.addx(self.assembler.create_label(m[1]))
elif m[0].type == "OP_STX":
self.assembler.stx(self.assembler.create_label(m[1]))
elif m[0].type == "OP_CLRX":
self.assembler.norx(self.assembler.create_label("allone"))
elif m[0].type == "OP_LDX":
self.assembler.ldx(self.assembler.create_label(m[1]))
elif m[0].type == "OP_NOTX":
self.assembler.norx(self.assembler.create_label("zero"))
elif m[0].type == "OP_SUBX":
self.assembler.norx(self.assembler.create_label("zero"))
self.assembler.addx(self.assembler.create_label(m[1]))
self.assembler.norx(self.assembler.create_label("zero"))
elif m[0].type == "OP_JCC":
self.assembler.jcc(self.assembler.create_label(m[1]))
elif m[0].type == "OP_JNZ":
self.assembler.jnz(self.assembler.create_label(m[1]))
elif m[0].type == "OP_JMP":
self.assembler.jcc(self.assembler.create_label(m[1]))
self.assembler.jcc(self.assembler.create_label(m[1]))
elif m[0].type == "OP_JCS":
self.assembler.jcs(self.assembler.create_label(m[1]))
elif m[0].type == "OP_JZ":
self.assembler.jz(self.assembler.create_label(m[1]))
elif m[0].type == "OP_HLT":
self.assembler.hlt()
elif m[0].type == "OP_OUT":
self.assembler.sta(self.assembler.create_label("display"))
self.assembler.lda(self.assembler.create_label("trigger"))
self.assembler.nor(self.assembler.create_label("one"))
self.assembler.sta(self.assembler.create_label("trigger"))
self.assembler.lda(self.assembler.create_label("display"))
else:
raise ValueError(f"Unknown op: {m}")
class Assembler:
PREFIX_NOR = 0b00000000
PREFIX_ADD = 0b00100000
PREFIX_STA = 0b01000000
PREFIX_JCC = 0b01100000
PREFIX_NORX = 0b10000000
PREFIX_ADDX = 0b10100000
PREFIX_STX = 0b11000000
PREFIX_JNZ = 0b11100000
def __init__(self, data, addr):
print(len(data))
self.data = data
self.addr = addr
self.labels = set()
self.labels_by_name = collections.defaultdict(Assembler.Label)
def create_label(self, name):
l = self.labels_by_name[name]
l.name = name
self.labels.add(l)
return l
class Label:
def __init__(self):
self.addr = None
self.name = None
self.fixups = []
def write(self, instr):
self.data[self.addr] = instr
self.addr += 1
def __enter__(self):
return self
def __exit__(self, a, b, c):
self.addr = 2 ** 5 - 5
self.label(self.create_label("display"))
self.dcb(0)
self.label(self.create_label("trigger"))
self.dcb(0)
self.label(self.create_label("zero"))
self.dcb(0)
self.label(self.create_label("allone"))
self.dcb(0xFF)
self.label(self.create_label("one"))
self.dcb(1)
for l in self.labels:
if l.addr is None:
raise ValueError(f'Undefined label "{l.name}"')
for f in l.fixups:
self.data[f] |= l.addr
def label(self, l):
if l.addr is not None:
raise ValueError("Label redefinition")
l.addr = self.addr
def placeholder(self, label):
self.labels.add(label)
label.fixups.append(self.addr)
def nor(self, label):
print("nor {}".format(label.name))
self.placeholder(label)
self.write(Assembler.PREFIX_NOR)
def add(self, label):
print("add {}".format(label.name))
self.placeholder(label)
self.write(Assembler.PREFIX_ADD)
def sta(self, label):
print("sta {}".format(label.name))
self.placeholder(label)
self.write(Assembler.PREFIX_STA)
def lda(self, label):
print(" lda {}".format(label.name))
self.nor(self.create_label("allone"))
self.add(label)
def norx(self, label):
print("norx {}".format(label.name))
self.placeholder(label)
self.write(Assembler.PREFIX_NORX)
def addx(self, label):
print("addx {}".format(label.name))
self.placeholder(label)
self.write(Assembler.PREFIX_ADDX)
def stx(self, label):
print("stx {}".format(label.name))
self.placeholder(label)
self.write(Assembler.PREFIX_STX)
def ldx(self, label):
print(" ldx {}".format(label.name))
self.norx(self.create_label("allone"))
self.addx(label)
def jcc(self, label):
print("jcc {}".format(label.name))
self.placeholder(label)
self.write(Assembler.PREFIX_JCC)
def jcs(self, label):
print(" jcs {}".format(label.name))
self.write(Assembler.PREFIX_JCC | (self.addr + 2))
self.jcc(label)
def jnz(self, label):
print("jnz {}".format(label.name))
self.placeholder(label)
self.write(Assembler.PREFIX_JNZ)
def jz(self, label):
print(" jz {}".format(label.name))
self.write(Assembler.PREFIX_JNZ | (self.addr + 2))
self.jnz(label)
def hlt(self):
print(" hlt")
self.write(Assembler.PREFIX_JCC | self.addr)
self.write(Assembler.PREFIX_JCC | self.addr)
def dcb(self, v):
print("dcb 0x{:02x} (at 0x{:02x})".format(v, self.addr))
self.write(v)
def parse(self, path):
with open(path) as f:
contents = f.read()
try:
ast = l.parse(contents)
except UnexpectedInput as e:
print(f"{path}:{e.line}:{e.column}: unexpected input.")
print(" " + contents.split("\n")[e.line - 1])
print(" " + " " * e.column + "^")
return False
AssemblerTransformer(self).transform(ast)
return True
``` |
{
"source": "jimmo/stair-lights",
"score": 2
} |
#### File: stair-lights/esp8266/boot.py
```python
import gc
def connect_sta():
import network
import config
ap_if = network.WLAN(network.AP_IF)
ap_if.active(False)
sta_if = network.WLAN(network.STA_IF)
sta_if.active(True)
sta_if.config(dhcp_hostname='stairs')
print('Connecting to network...')
sta_if.connect(config.WIFI_SSID, config.WIFI_PASSWORD)
while not sta_if.isconnected():
pass
print('Network config:', sta_if.ifconfig())
connect_sta()
import webrepl
webrepl.start()
gc.collect()
``` |
{
"source": "jimms/anime-downloader",
"score": 3
} |
#### File: anime_downloader/sites/erairaws.py
```python
from anime_downloader.sites.anime import Anime, AnimeEpisode, SearchResult
from anime_downloader.sites import helpers
from difflib import get_close_matches
import re
class EraiRaws(Anime, sitename='erai-raws'):
sitename = 'erai-raws'
QUALITIES = ['720p', '1080p']
# Bypass DDosGuard
def bypass(self):
host = "https://erai-raws.info"
resp = helpers.get("https://check.ddos-guard.net/check.js").text
ddosBypassPath = re.search("'(.*?)'", resp).groups()[0]
helpers.get(host + ddosBypassPath)
def parse(self, rows, url):
episodes = []
if self.quality == self.QUALITIES[0] and len(rows) > 1:
rows = rows[::2]
elif len(rows) > 1:
rows = rows[1::2]
for row in rows:
if row.parent.get("href")[-3:] != "mkv":
if url[-1] != '/':
url = url + '/'
folder = helpers.get(url + "index.php" + row.parent.get("href"))
folder = helpers.soupify(folder)
# Append all episodes in folder - folders are also seperated by quality
# So everything in a folder can be taken in one go
[episodes.append(url + x.parent.get("href")) for x in folder.find("ul", {"id": "directory-listing"}).find_all("div", {"class": "row"})]
else:
episodes.append(url + row.parent.get("href"))
episodes = episodes[1:]
if len(rows) == 1:
if rows[0].parent.get("href")[-3:] != "mkv":
url = f"{url}index.php" if url[:-1] == "/" else f"{url}/index.php"
folder = helpers.soupify(helpers.get(url + rows[0].parent.get("href")))
episodes = [url + x.parent.get("href") for x in folder.find("ul", {"id": "directory-listing"}).find_all("div", {"class": "row"})]
else:
episodes = [url + rows[0].parent["href"]]
return episodes
@classmethod
def search(cls, query):
cls.bypass(cls)
soup = helpers.soupify(helpers.get("https://erai-raws.info/anime-list/"))
result_data = soup.find("div", {"class": "shows-wrapper"}).find_all("a")
titles = [x.text.strip() for x in result_data]
# Erai-raws doesnt have a search that I could find - so I've opted to implement it myself
titles = get_close_matches(query, titles, cutoff=0.2)
result_data = [x for x in result_data if x.text.strip() in titles]
search_results = [
SearchResult(
title=result.text.strip(),
url="https://erai-raws.info/anime-list/" + result.get("href")
)
for result in result_data
]
return search_results
def _scrape_episodes(self):
self.bypass()
soup = helpers.soupify(helpers.get(self.url))
files = soup.find("div", {"class": "ddmega"}).find("a").get("href")
if files[-1] != '/':
files = files + '/'
index = files + "index.php"
html = helpers.get(index, headers={"Referer": files})
soup = helpers.soupify(html)
rows = soup.find("ul", {"id": "directory-listing"}).find_all("div", {"class": "row"})
episodes = self.parse(rows, files)
return episodes
def _scrape_metadata(self):
soup = helpers.soupify(helpers.get(self.url))
self.title = soup.find("h1").find("span").text
class EraiRawsEpisode(AnimeEpisode, sitename='erai-raws'):
def _get_sources(self):
return [("no_extractor", self.url)]
``` |
{
"source": "jimm-with-a-j/dt-api",
"score": 2
} |
#### File: dynatrace/environment_v1/event.py
```python
from datetime import datetime
from typing import Optional, List
from requests import Response
from dynatrace.dynatrace_object import DynatraceObject
from dynatrace.http_client import HttpClient
EVENT_TYPE_AVAILABILITY_EVENT = "AVAILABILITY_EVENT"
EVENT_TYPE_CUSTOM_ALERT = "CUSTOM_ALERT"
EVENT_TYPE_CUSTOM_ANNOTATION = "CUSTOM_ANNOTATION"
EVENT_TYPE_CUSTOM_CONFIGURATION = "CUSTOM_CONFIGURATION"
EVENT_TYPE_CUSTOM_DEPLOYMENT = "CUSTOM_DEPLOYMENT"
EVENT_TYPE_CUSTOM_INFO = "CUSTOM_INFO"
EVENT_TYPE_ERROR_EVENT = "ERROR_EVENT"
EVENT_TYPE_MARKED_FOR_TERMINATION = "MARKED_FOR_TERMINATION"
EVENT_TYPE_PERFORMANCE_EVENT = "PERFORMANCE_EVENT"
EVENT_TYPE_RESOURCE_CONTENTION = "RESOURCE_CONTENTION"
class EventService:
def __init__(self, http_client: HttpClient):
self.__http_client = http_client
def create_event(
self,
event_type: str,
entity_id: str,
source: str,
start: Optional[datetime] = None,
end: Optional[datetime] = None,
timeout_minutes: Optional[int] = None,
annotation_type: Optional[str] = None,
annotation_description: Optional[str] = None,
description: Optional[str] = None,
title: Optional[str] = None,
custom_properties: Optional[str] = None,
allow_davis_merge: Optional[bool] = None,
) -> Response:
attach_rules = PushEventAttachRules(entity_ids=[entity_id], tag_rule=None)
return EventCreation(
self.__http_client,
event_type=event_type,
attach_rules=attach_rules,
source=source,
start=start,
end=end,
timeout_minutes=timeout_minutes,
annotation_type=annotation_type,
annotation_description=annotation_description,
description=description,
title=title,
custom_properties=custom_properties,
allow_davis_merge=allow_davis_merge,
).post()
class EventCreation(DynatraceObject):
def __init__(
self,
http_client,
event_type: str,
attach_rules: "PushEventAttachRules",
source: str,
start: Optional[datetime] = None,
end: Optional[datetime] = None,
timeout_minutes: Optional[int] = None,
annotation_type: Optional[str] = None,
annotation_description: Optional[str] = None,
description: Optional[str] = None,
title: Optional[str] = None,
custom_properties: Optional[str] = None,
allow_davis_merge: Optional[bool] = None,
):
raw_element = {
"eventType": event_type,
"start": int(start.timestamp()) * 1000 if start else None,
"end": int(end.timestamp()) * 1000 if start else None,
"timeoutMinutes": timeout_minutes,
"source": source,
"annotationType": annotation_type,
"annotationDescription": annotation_description,
"attachRules": attach_rules._raw_element,
"description": description,
"title": title,
"customProperties": custom_properties,
"allowDavisMerge": allow_davis_merge,
}
super().__init__(http_client, None, raw_element)
def post(self):
return self._http_client.make_request(f"/api/v1/events", params=self._raw_element, method="POST")
class PushEventAttachRules:
def __init__(self, entity_ids: Optional[List[str]], tag_rule: Optional[List["TagMatchRule"]]):
self._raw_element = {
"entityIds": entity_ids,
"tagRule": [t._raw_element for t in tag_rule] if tag_rule else None,
}
class TagMatchRule:
def __init__(self, me_types: List[str], tags: List[str]):
self._raw_element = {
"meTypes": me_types,
"tags": tags,
}
"""
type EventStoreResult struct {
StoredEventIds []int `json:"storedEventIds,omitempty"`
StoredIds []string `json:"storedIds,omitempty"`
StoredCorrelationIds []string `json:"storedCorrelationIds,omitempty"`
}
"""
```
#### File: dynatrace/environment_v1/synthetic_third_party.py
```python
from collections import defaultdict
from datetime import datetime
from typing import List, Optional, Dict
from dynatrace.dynatrace_object import DynatraceObject
from dynatrace.http_client import HttpClient
SYNTHETIC_EVENT_TYPE_OUTAGE = "testOutage"
SYNTHETIC_EVENT_TYPE_SLOWDOWN = "testSlowdown"
class ThirdPartySyntheticTestsService:
def __init__(self, http_client: HttpClient):
self.__http_client = http_client
def report_simple_thirdparty_synthetic_test(
self,
engine_name: str,
timestamp: datetime,
location_id: str,
location_name: str,
test_id: str,
test_title: str,
schedule_interval: int,
success: bool,
response_time: int,
icon_url: str = None,
edit_link: str = None,
step_title: Optional[str] = None,
detailed_steps: Optional[List["SyntheticTestStep"]] = None,
detailed_step_results: Optional[List["SyntheticMonitorStepResult"]] = None,
):
location = ThirdPartySyntheticLocation(self.__http_client, location_id, location_name)
synthetic_location = SyntheticTestLocation(self.__http_client, location_id)
if detailed_steps is None:
detailed_steps = [SyntheticTestStep(self.__http_client, 1, step_title)]
monitor = ThirdPartySyntheticMonitor(
self.__http_client, test_id, test_title, [synthetic_location], schedule_interval, steps=detailed_steps, edit_link=edit_link,
)
if detailed_step_results is None:
detailed_step_results = [SyntheticMonitorStepResult(self.__http_client, 1, timestamp, response_time_millis=response_time)]
location_result = ThirdPartySyntheticLocationTestResult(self.__http_client, location_id, timestamp, success, step_results=detailed_step_results)
test_result = ThirdPartySyntheticResult(self.__http_client, test_id, len(detailed_steps), [location_result])
tests = ThirdPartySyntheticTests(self.__http_client, engine_name, timestamp, [location], [monitor], [test_result], synthetic_engine_icon_url=icon_url)
return tests.post()
def create_synthetic_test_step_result(self, step_id: int, timestamp: datetime, response_time: int) -> "SyntheticMonitorStepResult":
return SyntheticMonitorStepResult(self.__http_client, step_id, timestamp, response_time_millis=response_time)
def create_synthetic_test_step(self, step_id: int, step_title: str) -> "SyntheticTestStep":
return SyntheticTestStep(self.__http_client, step_id, step_title)
def report_simple_thirdparty_synthetic_test_event(
self, test_id: str, name: str, location_id: str, timestamp: datetime, state: str, event_type: str, reason: str, engine_name: str,
):
opened_events: List[ThirdPartyEventOpenNotification] = []
resolved_events = []
event_id = f"{test_id}_event"
if state == "open":
opened_events.append(ThirdPartyEventOpenNotification(self.__http_client, test_id, event_id, name, event_type, reason, timestamp, [location_id]))
else:
resolved_events.append(ThirdPartyEventResolvedNotification(self.__http_client, test_id, event_id, timestamp))
if opened_events or resolved_events:
events = ThirdPartySyntheticEvents(self.__http_client, engine_name, opened_events, resolved_events)
return events.post()
class ThirdPartySyntheticTests(DynatraceObject):
def __init__(
self,
http_client,
synthetic_engine_name: str,
message_timestamp: datetime,
locations: List["ThirdPartySyntheticLocation"],
tests: List["ThirdPartySyntheticMonitor"],
test_results: Optional[List["ThirdPartySyntheticResult"]] = None,
synthetic_engine_icon_url: Optional[str] = None,
):
raw_element = {
"syntheticEngineName": synthetic_engine_name,
"syntheticEngineIconUrl": synthetic_engine_icon_url,
"messageTimestamp": int(message_timestamp.timestamp() * 1000),
"locations": [location._raw_element for location in locations],
"tests": [test._raw_element for test in tests],
"testResults": [test_result._raw_element for test_result in test_results] if test_results else None,
}
super().__init__(http_client, None, raw_element)
def post(self):
return self._http_client.make_request(f"/api/v1/synthetic/ext/tests", params=self._raw_element, method="POST")
class ThirdPartySyntheticLocation(DynatraceObject):
def __init__(self, http_client, location_id: str, name: str, ip: Optional[str] = None):
raw_element = {"id": location_id, "name": name, "ip": ip}
super().__init__(http_client, None, raw_element)
class ThirdPartySyntheticMonitor(DynatraceObject):
def __init__(
self,
http_client,
test_id: str,
title: str,
locations: List["SyntheticTestLocation"],
schedule_interval_in_seconds: int,
description: Optional[str] = None,
test_setup: Optional[str] = None,
expiration_timestamp: Optional[int] = None,
drilldown_link: Optional[str] = None,
edit_link: Optional[str] = None,
deleted: Optional[bool] = None,
steps: Optional[List["SyntheticTestStep"]] = None,
no_data_timeout: Optional[int] = None,
):
raw_element = {
"id": test_id,
"title": title,
"description": description,
"testSetup": test_setup,
"expirationTimestamp": expiration_timestamp,
"drilldownLink": drilldown_link,
"editLink": edit_link,
"deleted": deleted,
"locations": [location._raw_element for location in locations],
"steps": [step._raw_element for step in steps] if steps else None,
"scheduleIntervalInSeconds": schedule_interval_in_seconds,
"noDataTimeout": no_data_timeout,
}
super().__init__(http_client, None, raw_element)
class SyntheticTestLocation(DynatraceObject):
def __init__(self, http_client, location_id: str, enabled: Optional[bool] = None):
raw_element = {"id": location_id, "enabled": enabled}
super().__init__(http_client, None, raw_element)
class SyntheticTestStep(DynatraceObject):
def __init__(self, http_client, step_id: int, title: str):
self.step_id = step_id
self.title = title
raw_element = {"id": step_id, "title": title}
super().__init__(http_client, None, raw_element)
class ThirdPartySyntheticResult(DynatraceObject):
def __init__(
self,
http_client,
test_id: str,
total_step_count: int,
location_results: List["ThirdPartySyntheticLocationTestResult"],
schedule_interval_in_seconds: Optional[int] = None,
):
raw_element = {
"id": test_id,
"scheduleIntervalInSeconds": schedule_interval_in_seconds,
"totalStepCount": total_step_count,
"locationResults": [location_result._raw_element for location_result in location_results],
}
super().__init__(http_client, None, raw_element)
class ThirdPartySyntheticLocationTestResult(DynatraceObject):
def __init__(
self,
http_client,
location_id: str,
start_timestamp: datetime,
success: bool,
success_rate: Optional[float] = None,
response_time_millis: Optional[int] = None,
step_results: Optional[List["SyntheticMonitorStepResult"]] = None,
):
raw_element = {
"id": location_id,
"startTimestamp": int(start_timestamp.timestamp() * 1000),
"successRate": success_rate,
"success": success,
"responseTimeMillis": response_time_millis,
"stepResults": [step_result._raw_element for step_result in step_results] if step_results else None,
}
super().__init__(http_client, None, raw_element)
class SyntheticMonitorStepResult(DynatraceObject):
def __init__(
self, http_client, step_id: int, start_timestamp: datetime, response_time_millis: Optional[int] = None, error: Optional["SyntheticMonitorError"] = None,
):
raw_element = {
"id": step_id,
"startTimestamp": int(start_timestamp.timestamp() * 1000),
"responseTimeMillis": response_time_millis,
"error": error._raw_element if error else None,
}
super().__init__(http_client, None, raw_element)
class SyntheticMonitorError(DynatraceObject):
def __init__(self, http_client, code: int, message: str):
raw_element = {"code": code, "message": message}
super().__init__(http_client, None, raw_element)
class ThirdPartySyntheticEvents(DynatraceObject):
def __init__(
self,
http_client,
synthetic_engine_name: str,
open_events: Optional[List["ThirdPartyEventOpenNotification"]],
resolved_events: Optional[List["ThirdPartyEventResolvedNotification"]],
):
raw_element = {
"syntheticEngineName": synthetic_engine_name,
"open": [open_event._raw_element for open_event in open_events] if open_events else None,
"resolved": [resolved_event._raw_element for resolved_event in resolved_events] if resolved_events else None,
}
super().__init__(http_client, None, raw_element)
def post(self):
return self._http_client.make_request(f"/api/v1/synthetic/ext/events", params=self._raw_element, method="POST")
class ThirdPartyEventOpenNotification(DynatraceObject):
def __init__(
self, http_client, test_id: str, event_id: str, name: str, event_type: str, reason: str, start_timestamp: datetime, location_ids: List[str],
):
raw_element = {
"testId": test_id,
"eventId": event_id,
"name": name,
"eventType": event_type,
"reason": reason,
"startTimestamp": int(start_timestamp.timestamp() * 1000),
"locationIds": location_ids,
}
super().__init__(http_client, None, raw_element)
class ThirdPartyEventResolvedNotification(DynatraceObject):
def __init__(self, http_client, test_id: str, event_id: str, end_timestamp: datetime):
raw_element = {"testId": test_id, "eventId": event_id, "endTimestamp": int(end_timestamp.timestamp() * 1000)}
super().__init__(http_client, None, raw_element)
```
#### File: dynatrace/environment_v2/metrics.py
```python
from datetime import datetime
from enum import Enum
from typing import List, Optional, Union, Dict, Any
from requests import Response
from dynatrace.dynatrace_object import DynatraceObject
from dynatrace.http_client import HttpClient
from dynatrace.pagination import PaginatedList
from dynatrace.utils import timestamp_to_string, int64_to_datetime
class MetricService:
def __init__(self, http_client: HttpClient):
self.__http_client = http_client
def query(
self,
metric_selector: str,
resolution: str = None,
time_from: Optional[Union[datetime, str]] = None,
time_to: Optional[Union[datetime, str]] = None,
entity_selector: Optional[str] = None,
) -> PaginatedList["MetricSeriesCollection"]:
params = {
"metricSelector": metric_selector,
"resolution": resolution,
"from": timestamp_to_string(time_from),
"to": timestamp_to_string(time_to),
"entitySelector": entity_selector,
}
return PaginatedList(MetricSeriesCollection, self.__http_client, "/api/v2/metrics/query", params, list_item="result")
def list(
self,
metric_selector: Optional[str] = None,
text: Optional[str] = None,
fields: Optional[str] = None,
written_since: Optional[Union[str, datetime]] = None,
metadata_selector: Optional[str] = None,
page_size=100,
) -> PaginatedList["MetricDescriptor"]:
params = {
"pageSize": page_size,
"metricSelector": metric_selector,
"text": text,
"fields": fields,
"writtenSince": timestamp_to_string(written_since),
"metadataSelector": metadata_selector,
}
return PaginatedList(MetricDescriptor, self.__http_client, "/api/v2/metrics", params, list_item="metrics")
def get(self, metric_id: str) -> "MetricDescriptor":
response = self.__http_client.make_request(f"/api/v2/metrics/{metric_id}").json()
return MetricDescriptor(http_client=self.__http_client, raw_element=response)
def delete(self, metric_id) -> Response:
return self.__http_client.make_request(f"/api/v2/metrics/{metric_id}", method="DELETE")
def ingest(self, lines: List[str]):
lines = "\n".join(lines)
return self.__http_client.make_request(
f"/api/v2/metrics/ingest", method="POST", data=lines, headers={"Content-Type": "text/plain; charset=utf-8"}
).json()
class MetricSeries(DynatraceObject):
def _create_from_raw_data(self, raw_element):
self.timestamps: List[datetime] = [int64_to_datetime(timestamp) for timestamp in raw_element.get("timestamps", [])]
self.dimensions: List[str] = raw_element.get("dimensions", [])
self.values: List[float] = raw_element.get("values", [])
self.dimension_map: Optional[Dict[str, Any]] = raw_element.get("dimensionMap", [])
class MetricSeriesCollection(DynatraceObject):
def _create_from_raw_data(self, raw_element: dict):
self.metric_id: str = raw_element.get("metricId")
self.data: List[MetricSeries] = [MetricSeries(self._http_client, self._headers, metric_serie) for metric_serie in raw_element.get("data", [])]
self.warnings: Optional[List[str]] = raw_element.get("warnings")
class MetricDefaultAggregation(DynatraceObject):
def _create_from_raw_data(self, raw_element):
self.parameter: float = raw_element.get("parameter")
self.type: str = raw_element.get("type")
class MetricDimensionDefinition(DynatraceObject):
def _create_from_raw_data(self, raw_element):
self.index: int = raw_element.get("index")
self.name: str = raw_element.get("name")
self.key: str = raw_element.get("key")
self.type: str = raw_element.get("type")
class AggregationType(Enum):
AUTO = "auto"
AVG = "avg"
COUNT = "count"
MAX = "max"
MEDIAN = "median"
MIN = "min"
PERCENTILE = "percentile"
SUM = "sum"
VALUE = "value"
class Transformation(Enum):
DEFAULT = "default"
FILTER = "filter"
FOLD = "fold"
LAST = "last"
LIMIT = "limit"
MERGE = "merge"
NAMES = "names"
PARENTS = "parents"
RATE = "rate"
SORT = "sort"
SPLITBY = "splitBy"
TIMESHIFT = "timeshift"
class Unit(Enum):
BIT = "Bit"
BITPERHOUR = "BitPerHour"
BITPERMINUTE = "BitPerMinute"
BITPERSECOND = "BitPerSecond"
BYTE = "Byte"
BYTEPERHOUR = "BytePerHour"
BYTEPERMINUTE = "BytePerMinute"
BYTEPERSECOND = "BytePerSecond"
CORES = "Cores"
COUNT = "Count"
DAY = "Day"
DECIBELMILLIWATT = "DecibelMilliWatt"
GIBIBYTE = "GibiByte"
GIGA = "Giga"
GIGABYTE = "GigaByte"
HOUR = "Hour"
KIBIBYTE = "KibiByte"
KIBIBYTEPERHOUR = "KibiBytePerHour"
KIBIBYTEPERMINUTE = "KibiBytePerMinute"
KIBIBYTEPERSECOND = "KibiBytePerSecond"
KILO = "Kilo"
KILOBYTE = "KiloByte"
KILOBYTEPERHOUR = "KiloBytePerHour"
KILOBYTEPERMINUTE = "KiloBytePerMinute"
KILOBYTEPERSECOND = "KiloBytePerSecond"
MSU = "MSU"
MEBIBYTE = "MebiByte"
MEBIBYTEPERHOUR = "MebiBytePerHour"
MEBIBYTEPERMINUTE = "MebiBytePerMinute"
MEBIBYTEPERSECOND = "MebiBytePerSecond"
MEGA = "Mega"
MEGABYTE = "MegaByte"
MEGABYTEPERHOUR = "MegaBytePerHour"
MEGABYTEPERMINUTE = "MegaBytePerMinute"
MEGABYTEPERSECOND = "MegaBytePerSecond"
MICROSECOND = "MicroSecond"
MILLICORES = "MilliCores"
MILLISECOND = "MilliSecond"
MILLISECONDPERMINUTE = "MilliSecondPerMinute"
MINUTE = "Minute"
MONTH = "Month"
NANOSECOND = "NanoSecond"
NANOSECONDPERMINUTE = "NanoSecondPerMinute"
NOTAPPLICABLE = "NotApplicable"
PERHOUR = "PerHour"
PERMINUTE = "PerMinute"
PERSECOND = "PerSecond"
PERCENT = "Percent"
PIXEL = "Pixel"
PROMILLE = "Promille"
RATIO = "Ratio"
SECOND = "Second"
STATE = "State"
UNSPECIFIED = "Unspecified"
WEEK = "Week"
YEAR = "Year"
class MetricDescriptor(DynatraceObject):
def _create_from_raw_data(self, raw_element):
# required
self.metric_id: str = raw_element.get("metricId")
# optional
self.aggregation_types: Optional[List[AggregationType]] = [AggregationType(element) for element in raw_element.get("aggregationTypes", [])]
self.created: Optional[datetime] = int64_to_datetime(raw_element.get("created"))
self.ddu_billable: Optional[bool] = raw_element.get("dduBillable")
self.default_aggregation: Optional[MetricDefaultAggregation] = MetricDefaultAggregation(raw_element=raw_element.get("defaultAggregation"))
self.description: Optional[str] = raw_element.get("description")
self.dimension_definitions: Optional[List[MetricDimensionDefinition]] = [
MetricDimensionDefinition(raw_element=element) for element in raw_element.get("dimensionDefinitions", [])
]
self.display_name: Optional[str] = raw_element.get("displayName")
self.entity_type: Optional[List[str]] = raw_element.get("entityType", [])
self.impact_relevant: Optional[bool] = raw_element.get("impactRelevant")
self.last_written: Optional[datetime] = int64_to_datetime(raw_element.get("lastWritten"))
self.maximum_value: Optional[float] = raw_element.get("maximumValue")
self.metric_value_type: Optional["MetricValueType"] = (
MetricValueType(raw_element=raw_element.get("metricValueType")) if raw_element.get("metricValueType") else None
)
self.minimum_value: Optional[float] = raw_element.get("minimumValue")
self.root_cause_relevant: Optional[bool] = raw_element.get("rootCauseRelevant")
self.tags: Optional[List[str]] = raw_element.get("tags")
self.transformations: Optional[List[Transformation]] = [Transformation(element) for element in raw_element.get("transformations", [])]
self.unit: Optional[Unit] = Unit(raw_element.get("unit"))
self.warnings: Optional[List[str]] = raw_element.get("warnings")
class ValueType(Enum):
ERROR = "error"
SCORE = "score"
UNKNOWN = "unknown"
class MetricValueType(DynatraceObject):
def _create_from_raw_data(self, raw_element: Dict[str, Any]):
self.type = ValueType(raw_element.get("type"))
```
#### File: test/environment_v1/test_smartscape_hosts.py
```python
from dynatrace import Dynatrace
from dynatrace.environment_v1.smartscape_hosts import Host, MonitoringMode, OSArchitecture
from dynatrace.pagination import HeaderPaginatedList
from dynatrace.environment_v1.smartscape_hosts import Host
def test_list(dt: Dynatrace):
hosts = dt.smartscape_hosts.list(page_size=20)
assert isinstance(hosts, HeaderPaginatedList)
for host in hosts:
assert isinstance(host, Host)
assert host.entity_id == "HOST-7EC661999923A6B9"
assert host.discovered_name == "TAG009444368559.clients.example.com"
assert host.last_seen_timestamp == 1621519976487
for tag in host.tags:
assert tag.context == "CONTEXTLESS"
assert tag.key == "APP1234567"
break
assert host.os_version == "Windows 10 Enterprise 20H2 2009, ver. 10.0.19042"
assert host.monitoring_mode == MonitoringMode.FULL_STACK
assert host.consumed_host_units == 2.0
assert host.os_architecture == OSArchitecture.X_EIGHTY_SIX
assert host.cpu_cores == 8
break
``` |
{
"source": "JimmXinu/brotlidecpy",
"score": 2
} |
#### File: brotlidecpy/test/test_decompress.py
```python
import unittest
import os
import timeit
import _test_utils
from brotlidecpy import decompress
from brotli import decompress as brotlidecompress
class TestDecompress(_test_utils.TestCase):
def _test_decompress(self, test_data):
"""This performs the same decompression tests as in the Python bindings of brotli reference implementation"""
temp_uncompressed = _test_utils.get_temp_uncompressed_name(test_data)
original = _test_utils.get_uncompressed_name(test_data)
with open(temp_uncompressed, 'wb') as out_file:
with open(test_data, 'rb') as in_file:
out_file.write(decompress(in_file.read()))
self.assertFilesMatch(temp_uncompressed, original)
def _test_brotli_decompress_buffer(self, test_data):
"""This tests that in memory buffer to buffer decompression of test data gets expected results"""
with open(test_data, 'rb') as f:
compressed_buffer = f.read()
with open(_test_utils.get_uncompressed_name(test_data), 'rb') as f:
uncompressed_buffer = f.read()
result_buffer = decompress(compressed_buffer)
self.assertSequenceEqual(uncompressed_buffer, result_buffer, "Failed decompress of %s" %
os.path.basename(test_data))
def _test_against_pylib_brotli(self, test_data):
"""This confirms that this package decompresses same as the C reference implementation that is in PyPI.
It also prints execution times to serve as a performance test, though unit tests are not usually for that"""
with open(_test_utils.get_uncompressed_name(test_data), 'rb') as f:
original_uncompressed_buffer = f.read()
with open(test_data, 'rb') as f:
compressed_buffer = f.read()
ref_time = timeit.default_timer()
ref_uncompressed_buffer = brotlidecompress(compressed_buffer) # using fast brotli library
ref_time = timeit.default_timer() - ref_time
test_time = timeit.default_timer()
test_uncompressed_buffer = decompress(compressed_buffer) # testing this package, should be intermediate time
test_time = timeit.default_timer() - test_time
self.assertSequenceEqual(ref_uncompressed_buffer, original_uncompressed_buffer,
msg="Something wrong with test:"
" Reference decompress does not match uncompressed test data file")
self.assertSequenceEqual(original_uncompressed_buffer, test_uncompressed_buffer,
msg="Test failure in decompress of %s" % os.path.basename(test_data))
print("File '%s' Times msec C ref: %.3f, this test: %.3f" %
(os.path.basename(test_data),
ref_time * 1000,
test_time * 1000))
_test_utils.generate_test_methods(TestDecompress)
# when running this from cli set PYTHONPATH to parent directory of brotlidecpy so import will work
# e.g., PYTHONPATH=. python test/decompress_test.py
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jimmy0087/faceai-master",
"score": 2
} |
#### File: DAN/models/dan_models.py
```python
import numpy as np
import tensorflow as tf
from ..utils.layers import AffineTransformLayer, TransformParamsLayer, LandmarkImageLayer, LandmarkTransformLayer
from ..utils.utils import bestFit,bestFitRect
from scipy import ndimage
IMGSIZE = 112
N_LANDMARK = 68
def NormRmse(GroudTruth, Prediction):
Gt = tf.reshape(GroudTruth, [-1, N_LANDMARK, 2])
Pt = tf.reshape(Prediction, [-1, N_LANDMARK, 2])
loss = tf.reduce_mean(tf.sqrt(tf.reduce_sum(tf.squared_difference(Gt, Pt), 2)), 1)
# norm = tf.sqrt(tf.reduce_sum(((tf.reduce_mean(Gt[:, 36:42, :],1) - \
# tf.reduce_mean(Gt[:, 42:48, :],1))**2), 1))
norm = tf.norm(tf.reduce_mean(Gt[:, 36:42, :],1) - tf.reduce_mean(Gt[:, 42:48, :],1), axis=1)
# cost = tf.reduce_mean(loss / norm)
return loss/norm
def DAN(MeanShapeNumpy):
MeanShape = tf.constant(MeanShapeNumpy, dtype=tf.float32)
InputImage = tf.placeholder(tf.float32,[None, IMGSIZE,IMGSIZE,1])
GroundTruth = tf.placeholder(tf.float32,[None, N_LANDMARK * 2])
S1_isTrain = tf.placeholder(tf.bool)
S2_isTrain = tf.placeholder(tf.bool)
Ret_dict = {}
Ret_dict['InputImage'] = InputImage
Ret_dict['GroundTruth'] = GroundTruth
Ret_dict['S1_isTrain'] = S1_isTrain
Ret_dict['S2_isTrain'] = S2_isTrain
with tf.variable_scope('Stage1'):
S1_Conv1a = tf.layers.batch_normalization(tf.layers.conv2d(InputImage,64,3,1,\
padding='same',activation=tf.nn.relu,kernel_initializer=tf.glorot_uniform_initializer()),training=S1_isTrain)
S1_Conv1b = tf.layers.batch_normalization(tf.layers.conv2d(S1_Conv1a,64,3,1,\
padding='same',activation=tf.nn.relu,kernel_initializer=tf.glorot_uniform_initializer()),training=S1_isTrain)
S1_Pool1 = tf.layers.max_pooling2d(S1_Conv1b,2,2,padding='same')
S1_Conv2a = tf.layers.batch_normalization(tf.layers.conv2d(S1_Pool1,128,3,1,\
padding='same',activation=tf.nn.relu,kernel_initializer=tf.glorot_uniform_initializer()),training=S1_isTrain)
S1_Conv2b = tf.layers.batch_normalization(tf.layers.conv2d(S1_Conv2a,128,3,1,\
padding='same',activation=tf.nn.relu,kernel_initializer=tf.glorot_uniform_initializer()),training=S1_isTrain)
S1_Pool2 = tf.layers.max_pooling2d(S1_Conv2b,2,2,padding='same')
S1_Conv3a = tf.layers.batch_normalization(tf.layers.conv2d(S1_Pool2,256,3,1,\
padding='same',activation=tf.nn.relu,kernel_initializer=tf.glorot_uniform_initializer()),training=S1_isTrain)
S1_Conv3b = tf.layers.batch_normalization(tf.layers.conv2d(S1_Conv3a,256,3,1,\
padding='same',activation=tf.nn.relu,kernel_initializer=tf.glorot_uniform_initializer()),training=S1_isTrain)
S1_Pool3 = tf.layers.max_pooling2d(S1_Conv3b,2,2,padding='same')
S1_Conv4a = tf.layers.batch_normalization(tf.layers.conv2d(S1_Pool3,512,3,1,\
padding='same',activation=tf.nn.relu,kernel_initializer=tf.glorot_uniform_initializer()),training=S1_isTrain)
S1_Conv4b = tf.layers.batch_normalization(tf.layers.conv2d(S1_Conv4a,512,3,1,\
padding='same',activation=tf.nn.relu,kernel_initializer=tf.glorot_uniform_initializer()),training=S1_isTrain)
S1_Pool4 = tf.layers.max_pooling2d(S1_Conv4b,2,2,padding='same')
S1_Pool4_Flat = tf.contrib.layers.flatten(S1_Pool4)
S1_DropOut = tf.layers.dropout(S1_Pool4_Flat,0.5,training=S1_isTrain)
S1_Fc1 = tf.layers.batch_normalization(tf.layers.dense(S1_DropOut,256,activation=tf.nn.relu,\
kernel_initializer=tf.glorot_uniform_initializer()),training=S1_isTrain,name = 'S1_Fc1')
S1_Fc2 = tf.layers.dense(S1_Fc1,N_LANDMARK * 2)
S1_Ret = S1_Fc2 + MeanShape
S1_Cost = tf.reduce_mean(NormRmse(GroundTruth, S1_Ret))
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS,'Stage1')):
S1_Optimizer = tf.train.AdamOptimizer(0.001).minimize(S1_Cost,\
var_list=tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,"Stage1"))
Ret_dict['S1_Ret'] = S1_Ret
Ret_dict['S1_Cost'] = S1_Cost
Ret_dict['S1_Optimizer'] = S1_Optimizer
with tf.variable_scope('Stage2'):
S2_AffineParam = TransformParamsLayer(S1_Ret, MeanShape)
S2_InputImage = AffineTransformLayer(InputImage, S2_AffineParam)
S2_InputLandmark = LandmarkTransformLayer(S1_Ret, S2_AffineParam)
S2_InputHeatmap = LandmarkImageLayer(S2_InputLandmark)
S2_Feature = tf.reshape(tf.layers.dense(S1_Fc1,int((IMGSIZE / 2) * (IMGSIZE / 2)),\
activation=tf.nn.relu,kernel_initializer=tf.glorot_uniform_initializer()),(-1,int(IMGSIZE / 2),int(IMGSIZE / 2),1))
S2_FeatureUpScale = tf.image.resize_images(S2_Feature,(IMGSIZE,IMGSIZE),1)
S2_ConcatInput = tf.layers.batch_normalization(tf.concat([S2_InputImage,S2_InputHeatmap,S2_FeatureUpScale],3),\
training=S2_isTrain)
S2_Conv1a = tf.layers.batch_normalization(tf.layers.conv2d(S2_ConcatInput,64,3,1,\
padding='same',activation=tf.nn.relu,kernel_initializer=tf.glorot_uniform_initializer()),training=S2_isTrain)
S2_Conv1b = tf.layers.batch_normalization(tf.layers.conv2d(S2_Conv1a,64,3,1,\
padding='same',activation=tf.nn.relu,kernel_initializer=tf.glorot_uniform_initializer()),training=S2_isTrain)
S2_Pool1 = tf.layers.max_pooling2d(S2_Conv1b,2,2,padding='same')
S2_Conv2a = tf.layers.batch_normalization(tf.layers.conv2d(S2_Pool1,128,3,1,\
padding='same',activation=tf.nn.relu,kernel_initializer=tf.glorot_uniform_initializer()),training=S2_isTrain)
S2_Conv2b = tf.layers.batch_normalization(tf.layers.conv2d(S2_Conv2a,128,3,1,\
padding='same',activation=tf.nn.relu,kernel_initializer=tf.glorot_uniform_initializer()),training=S2_isTrain)
S2_Pool2 = tf.layers.max_pooling2d(S2_Conv2b,2,2,padding='same')
S2_Conv3a = tf.layers.batch_normalization(tf.layers.conv2d(S2_Pool2,256,3,1,\
padding='same',activation=tf.nn.relu,kernel_initializer=tf.glorot_uniform_initializer()),training=S2_isTrain)
S2_Conv3b = tf.layers.batch_normalization(tf.layers.conv2d(S2_Conv3a,256,3,1,\
padding='same',activation=tf.nn.relu,kernel_initializer=tf.glorot_uniform_initializer()),training=S2_isTrain)
S2_Pool3 = tf.layers.max_pooling2d(S2_Conv3b,2,2,padding='same')
S2_Conv4a = tf.layers.batch_normalization(tf.layers.conv2d(S2_Pool3,512,3,1,\
padding='same',activation=tf.nn.relu,kernel_initializer=tf.glorot_uniform_initializer()),training=S2_isTrain)
S2_Conv4b = tf.layers.batch_normalization(tf.layers.conv2d(S2_Conv4a,512,3,1,\
padding='same',activation=tf.nn.relu,kernel_initializer=tf.glorot_uniform_initializer()),training=S2_isTrain)
S2_Pool4 = tf.layers.max_pooling2d(S2_Conv4b,2,2,padding='same')
S2_Pool4_Flat = tf.contrib.layers.flatten(S2_Pool4)
S2_DropOut = tf.layers.dropout(S2_Pool4_Flat,0.5,training=S2_isTrain)
S2_Fc1 = tf.layers.batch_normalization(tf.layers.dense(S2_DropOut,256,\
activation=tf.nn.relu,kernel_initializer=tf.glorot_uniform_initializer()),training=S2_isTrain)
S2_Fc2 = tf.layers.dense(S2_Fc1,N_LANDMARK * 2)
S2_Ret = LandmarkTransformLayer(S2_Fc2 + S2_InputLandmark,S2_AffineParam, Inverse=True)
S2_Cost = tf.reduce_mean(NormRmse(GroundTruth,S2_Ret))
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS,'Stage2')):
S2_Optimizer = tf.train.AdamOptimizer(0.0001).minimize(S2_Cost,\
var_list=tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,"Stage2"))
Ret_dict['S2_Ret'] = S2_Ret
Ret_dict['S2_Cost'] = S2_Cost
Ret_dict['S2_Optimizer'] = S2_Optimizer
Ret_dict['S2_InputImage'] = S2_InputImage
Ret_dict['S2_InputLandmark'] = S2_InputLandmark
Ret_dict['S2_InputHeatmap'] = S2_InputHeatmap
Ret_dict['S2_FeatureUpScale'] = S2_FeatureUpScale
return Ret_dict
class DANDetector(object):
def __init__(self,init_inf,model_path):
self.initLandmarks = init_inf["initLandmarks"].reshape((-1,2))
self.meanImg = init_inf["meanImg"]
self.stdDevImg = init_inf["stdDevImg"]
self.nChannels = 1
self.imageHeight = IMGSIZE
self.imageWidth = IMGSIZE
self.dan = DAN(init_inf["initLandmarks"])
self.sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True, gpu_options=tf.GPUOptions(allow_growth=True)))
saver = tf.train.Saver()
#model_dict = '/'.join(model_path.split('/')[:-1])
ckpt = tf.train.get_checkpoint_state(model_path)
print(model_path)
readstate = ckpt and ckpt.model_checkpoint_path
assert readstate, "the params dictionary is not valid"
print("restore models' param")
saver.restore(self.sess, model_path+'/model')
def predict(self, databatch):
s2_landmarks = self.sess.run(self.dan['S2_Ret'],
feed_dict={self.dan['InputImage']:databatch,
self.dan['S1_isTrain']:False,self.dan['S2_isTrain']:False})
return s2_landmarks
def processImg(self,input,recs):
gray_img = np.mean(input, axis=2).astype(np.uint8)
initLandmarks_fitrec = bestFitRect(None, self.initLandmarks, recs)
inputImg, transform = self.CropResizeRotate(gray_img[np.newaxis], initLandmarks_fitrec)
inputImg = inputImg[:,:,:,np.newaxis]
inputImg = inputImg - self.meanImg[np.newaxis]
inputImg = inputImg / self.stdDevImg[np.newaxis]
output = self.predict(inputImg)
landmarks = output.reshape((-1, 2))
return np.dot(landmarks - transform[1], np.linalg.inv(transform[0]))
def CropResizeRotate(self, img, inputShape):
A, t = bestFit(self.initLandmarks, inputShape, True)
A2 = np.linalg.inv(A)
t2 = np.dot(-t, A2)
outImg = np.zeros((self.nChannels, self.imageHeight, self.imageWidth), dtype=np.float32)
for i in range(img.shape[0]):
outImg[i] = ndimage.interpolation.affine_transform(img[i], A2, t2[[1, 0]],
output_shape=(self.imageHeight, self.imageWidth))
return outImg, [A, t]
```
#### File: DAN/models/DAN.py
```python
from faceai.Detection import *
from .dan_models import *
def dan(modelpath =None,
**kwargs):
"""
'dan()' function attempt to build DAN model with some initialization parameter.
:param modelpath: the path of the model's weights
:return: DAN model
"""
execution_path = os.path.dirname(__file__)
initInf = np.load(os.path.join(modelpath,'initInf.npz'))
danDetactor = DANDetector(initInf, modelpath)
return danDetactor
``` |
{
"source": "Jimmy01240397/balsn-2021-writeup",
"score": 3
} |
#### File: Crypto/Fast Cipher/cipher.py
```python
from secrets import randbelow
M = 2**1024
def f(x):
# this is a *fast* function
return (
4 * x**4 + 8 * x**8 + 7 * x**7 + 6 * x**6 + 3 * x**3 + 0x48763
) % M
def encrypt(pt, key):
ct = []
for c in pt:
ct.append(c ^ (key & 0xFF))
key = f(key)
return bytes(ct)
if __name__ == "__main__":
key = randbelow(M)
ct = encrypt(open("flag.txt", "rb").read().strip(), key)
print(ct.hex())
```
#### File: pekobot/chall/server.py
```python
from elliptic_curve import Curve, Point
from Crypto.Util.number import bytes_to_long
import os
from random import choice
from secrets import randbelow
flag = os.environb[b"FLAG"]
assert flag.startswith(b"AIS3{")
assert flag.endswith(b"}")
flag += os.urandom(64 - len(flag))
# NIST P-256
a = -3
b = 41058363725152142129326129780047268409114441015993725554835256314039467401291
p = 2**256 - 2**224 + 2**192 + 2**96 - 1
E = Curve(p, a, b)
n = 115792089210356248762697446949407573529996955224135760342422259061068512044369
Gx = 48439561293906451759052585252797914202762949526041747995844080717082404635286
Gy = 36134250956749795798585127919587881956611106672985015071877198253568414405109
G = Point(E, Gx, Gy)
d = randbelow(n)
P = G * d
def point_to_bytes(P):
return P.x.to_bytes(32, "big") + P.y.to_bytes(32, "big")
def encrypt(P, m):
key = point_to_bytes(P)
return bytes([x ^ y for x, y in zip(m.ljust(64, b"\0"), key)])
quotes = [
"Konpeko, konpeko, konpeko! Hololive san-kisei no Usada Pekora-peko! domo, domo!",
"Bun bun cha! Bun bun cha!",
"kitira!",
"usopeko deshou",
"HA↑HA↑HA↓HA↓HA↓",
"HA↑HA↑HA↑HA↑",
"it's me pekora!",
"ok peko",
]
print("Konpeko!")
print("watashi no public key: %s" % P)
while True:
try:
print("nani wo shitai desuka?")
print("1. Start a Diffie-Hellman key exchange")
print("2. Get an encrypted flag")
print("3. Exit")
option = int(input("> "))
if option == 1:
print("Public key wo kudasai!")
x = int(input("x: "))
y = int(input("y: "))
S = Point(E, x, y) * d
print(encrypt(S, choice(quotes).encode()).hex())
elif option == 2:
r = randbelow(n)
C1 = r * G
C2 = encrypt(r * P, flag)
print(point_to_bytes(C1).hex())
print(C2.hex())
elif option == 3:
print("otsupeko!")
break
print()
except Exception as ex:
print("kusa peko")
print(ex)
break
```
#### File: B64DLE/chall/b64dle.py
```python
from abc import ABC, abstractmethod
from enum import Enum
from base64 import b64encode, b64decode
from typing import List
class RoundResult(Enum):
error = 0
fail = 1
win = 2
class Wordle(ABC):
def __init__(self, target: str, rounds: int):
self.target = target
self.rounds = rounds
self.cur_round = 1
def play(self):
while self.cur_round <= self.rounds:
result = self.play_round(self.cur_round)
if result == RoundResult.error:
print("Invalid input")
continue
elif result == RoundResult.win:
return True
self.cur_round += 1
return False
def play_round(self, round) -> RoundResult:
inp = input(f"Round {round} > ")
if not self.check_input(inp):
return RoundResult.error
diff = self.compare(inp, self.target)
if self.is_win(diff):
return RoundResult.win
else:
print(diff)
return RoundResult.fail
@abstractmethod
def check_input(self, s: str) -> bool:
pass
@abstractmethod
def compare(self, s: str, target: str) -> str:
pass
@abstractmethod
def is_win(self, s: str) -> bool:
pass
class B64dle(Wordle):
def __init__(self, words: List[str], target: str, rounds: int):
super().__init__(b64encode(target.encode()).decode(), rounds)
self.words = words
self.exact = "O"
self.contains = "-"
self.wrong = "X"
def check_input(self, s: str) -> bool:
try:
word = b64decode(s.encode()).decode()
if word not in self.words:
return False
return len(s) == len(self.target)
except:
return False
def compare(self, s: str, target: str) -> str:
assert len(s) == len(target)
ret = ""
for i, c in enumerate(s):
if c == target[i]:
ret += self.exact
elif c in target:
ret += self.contains
else:
ret += self.wrong
return ret
def is_win(self, s: str) -> bool:
return all([x == self.exact for x in s])
with open("five_letter_words.txt") as f:
# https://raw.githubusercontent.com/charlesreid1/five-letter-words/b45fda30524a981c73ec709618271cecfb51c361/sgb-words.txt
words = list(map(str.strip, f))
```
#### File: chall/problems/astmath.py
```python
from problem import Problem
from typing import Any, Tuple
from random import randint
import ast
import json
def gen_num():
return str(randint(1, 9))
def gen_op():
return "+-*/"[randint(0, 3)]
def gen_expr(depth):
if randint(0, depth) == 0:
l = gen_expr(depth + 1)
r = gen_expr(depth + 1)
op = gen_op()
return f"({l}{op}{r})"
return f"({gen_num()})"
class ASTMath(Problem):
@property
def name(self) -> str:
return "AST Math"
@property
def desciption(self) -> str:
return """
Input: An AST of Python's arithmetic expression (only +,-,*,/)
Output: Result number
Examples:
Input: {"body": {"left": {"value": 1, "kind": null, "lineno": 1, "col_offset": 0, "end_lineno": 1, "end_col_offset": 1}, "op": "<_ast.Add object at 0x7f0387ccde20>", "right": {"value": 2, "kind": null, "lineno": 1, "col_offset": 2, "end_lineno": 1, "end_col_offset": 3}, "lineno": 1, "col_offset": 0, "end_lineno": 1, "end_col_offset": 3}}
Output: 3
Input: {"body": {"left": {"left": {"value": 8, "kind": null, "lineno": 1, "col_offset": 1, "end_lineno": 1, "end_col_offset": 2}, "op": "<_ast.Mult object at 0x7f20eb76aee0>", "right": {"value": 7, "kind": null, "lineno": 1, "col_offset": 3, "end_lineno": 1, "end_col_offset": 4}, "lineno": 1, "col_offset": 1, "end_lineno": 1, "end_col_offset": 4}, "op": "<_ast.Sub object at 0x7f20eb76ae80>", "right": {"left": {"value": 6, "kind": null, "lineno": 1, "col_offset": 7, "end_lineno": 1, "end_col_offset": 8}, "op": "<_ast.Mult object at 0x7f20eb76aee0>", "right": {"value": 3, "kind": null, "lineno": 1, "col_offset": 9, "end_lineno": 1, "end_col_offset": 10}, "lineno": 1, "col_offset": 7, "end_lineno": 1, "end_col_offset": 10}, "lineno": 1, "col_offset": 0, "end_lineno": 1, "end_col_offset": 11}}
Output: 38
"""
@property
def rounds(self) -> int:
return 10
def dumps(self, x):
return json.dumps(
x, default=lambda x: x.__dict__ if len(x.__dict__) else str(x)
)
def generate_testcase(self) -> Tuple[bool, Any]:
l = gen_expr(1)
r = gen_expr(1)
op = gen_op()
expr = f"{l}{op}{r}"
try:
result = eval(expr)
except ZeroDivisionError:
return self.generate_testcase()
return ast.parse(expr, mode="eval"), result
```
#### File: chall/problems/invert_binary_tree.py
```python
from __future__ import annotations
from problem import Problem
from typing import Any, Tuple, Union
from dataclasses import dataclass
from dataclasses_json import dataclass_json
from random import randint
@dataclass_json
@dataclass
class Node:
left: Union[Node, int]
right: Union[Node, int]
Tree = Union[Node, int]
def gen_tree(depth) -> Tree:
if randint(0, depth) == 0:
return Node(gen_tree(depth + 1), gen_tree(depth + 1))
return randint(1, 9)
def invert(t: Tree) -> Tree:
if isinstance(t, int):
return t
else:
return Node(invert(t.right), invert(t.left))
class InvertBinaryTree(Problem):
@property
def name(self) -> str:
return "Invert Binary Tree"
@property
def desciption(self) -> str:
return """
Input: A binary tree, where each leaf is an interger and node are objects with `left` and `right` property
Output: The inverted tree with `left` and `right` swapped, recursively
Examples:
Input: {"left": 1, "right": 3}
Output: {"left": 3, "right": 1}
Input: {"left": 1, "right": {"left": 1, "right": 3}}
Output: {"left": {"left": 3, "right": 1}, "right": 1}
"""
@property
def rounds(self) -> int:
return 10
def dumps(self, x):
return x.to_json()
def generate_testcase(self) -> Tuple[bool, Any]:
t = Node(gen_tree(1), gen_tree(1))
return t, invert(t)
```
#### File: release/app/main.py
```python
from flask import Flask, render_template, request, redirect, url_for, g, session, send_file
import sqlite3
import secrets
import os
import uuid
import mimetypes
import pathlib
from rq import Queue
from redis import Redis
app = Flask(__name__)
app.queue = Queue(connection=Redis('xss-bot'))
app.config.update({
'SECRET_KEY': secrets.token_bytes(16),
'UPLOAD_FOLDER': '/data/uploads',
'MAX_CONTENT_LENGTH': 32 * 1024 * 1024, # 32MB
})
IMAGE_EXTENSIONS = [ext for ext, type in mimetypes.types_map.items()
if type.startswith('image/')]
ADMIN_PASSWORD = os.getenv('ADMIN_PASSWORD', '<PASSWORD>')
FLAG_UUID = os.getenv('FLAG_UUID', str(uuid.uuid4()))
def db():
db = getattr(g, '_database', None)
if db is None:
db = g._database = sqlite3.connect('/tmp/db.sqlite3')
db.row_factory = sqlite3.Row
return db
@app.before_first_request
def create_tables():
cursor = db().cursor()
cursor.executescript("""
CREATE TABLE IF NOT EXISTS users (
id INTEGER PRIMARY KEY AUTOINCREMENT,
username TEXT,
password TEXT
);
CREATE TABLE IF NOT EXISTS images (
id INTEGER PRIMARY KEY AUTOINCREMENT,
uuid TEXT,
title TEXT,
filename TEXT,
user_id INTEGER,
FOREIGN KEY(user_id) REFERENCES users(id)
);
""")
cursor.execute("SELECT * FROM users WHERE username='admin'")
if cursor.fetchone() == None:
cursor.execute("INSERT INTO users (username, password) VALUES (?, ?)",
('admin', ADMIN_PASSWORD))
admin_id = cursor.lastrowid
cursor.execute("INSERT INTO images (user_id, uuid, filename, title) VALUES (?, ?, ?, ?)",
(admin_id, FLAG_UUID, FLAG_UUID+".png", "FLAG"))
db().commit()
@app.teardown_appcontext
def close_connection(exception):
db = getattr(g, '_database', None)
if db is not None:
db.close()
@app.after_request
def add_csp(response):
response.headers['Content-Security-Policy'] = ';'.join([
"default-src 'self'",
"font-src 'self' https://fonts.googleapis.com https://fonts.gstatic.com"
])
return response
@app.route('/')
def index():
if 'user_id' not in session:
return redirect(url_for('login'))
cursor = db().cursor()
cursor.execute("SELECT * FROM images WHERE user_id=?",
(session['user_id'],))
images = cursor.fetchall()
return render_template('index.html', images=images)
@app.route('/login', methods=['GET', 'POST'])
def login():
if request.method == 'GET':
return render_template('login.html')
else:
username = request.form['username']
password = request.form['password']
if len(username) < 5 or len(password) < 5:
return render_template('login.html', error="Username and password must be at least 5 characters long.")
cursor = db().cursor()
cursor.execute("SELECT * FROM users WHERE username=?", (username,))
user = cursor.fetchone()
if user is None:
user_id = cursor.execute("INSERT INTO users (username, password) VALUES (?, ?)",
(username, password)).lastrowid
session['user_id'] = user_id
db().commit()
return redirect(url_for('index'))
elif user['password'] == password:
session['user_id'] = user['id']
return redirect(url_for('index'))
else:
return render_template('login.html', error="Invalid username or password")
@app.route('/image/<uuid>')
def view(uuid):
cursor = db().cursor()
cursor.execute("SELECT * FROM images WHERE uuid=?", (uuid,))
image = cursor.fetchone()
if image:
if image['user_id'] != session['user_id'] and session['user_id'] != 1:
return "You don't have permission to view this image.", 403
return send_file(os.path.join(app.config['UPLOAD_FOLDER'], image['filename']))
else:
return "Image not found.", 404
@app.route('/image/<uuid>/download')
def download(uuid):
cursor = db().cursor()
cursor.execute("SELECT * FROM images WHERE uuid=?", (uuid,))
image = cursor.fetchone()
if image:
if image['user_id'] != session['user_id'] and session['user_id'] != 1:
return "You don't have permission to download this image.", 403
return send_file(os.path.join(app.config['UPLOAD_FOLDER'], image['filename']), as_attachment=True, mimetype='application/octet-stream')
else:
return "Image not found.", 404
@app.route('/upload', methods=['GET', 'POST'])
def upload():
if 'user_id' not in session:
return redirect(url_for('login'))
if request.method == 'GET':
return render_template('upload.html')
else:
title = request.form['title'] or '(No title)'
file = request.files['file']
if file.filename == '':
return render_template('upload.html', error="No file selected")
extension = pathlib.Path(file.filename).suffix
if extension not in IMAGE_EXTENSIONS:
return render_template('upload.html', error="File must be an image")
image_uuid = str(uuid.uuid4())
filename = image_uuid + extension
cursor = db().cursor()
cursor.execute("INSERT INTO images (user_id, uuid, title, filename) VALUES (?, ?, ?, ?)",
(session['user_id'], image_uuid, title, filename))
db().commit()
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
return redirect(url_for('index'))
@app.route('/report', methods=['GET', 'POST'])
def report():
if 'user_id' not in session:
return redirect(url_for('login'))
if request.method == 'GET':
return f'''
<h1>Report to admin</h1>
<p>注意:admin 會用 <code>http://web/</code> (而非 {request.url_root} 作為 base URL 來訪問你提交的網站。</p>
<form action="/report" method="POST">
<input type="text" name="url" placeholder="URL ({request.url_root}...)">
<input type="submit" value="Submit">
</form>
'''
else:
url = request.form['url']
if url.startswith(request.url_root):
url_path = url[len(request.url_root):]
app.queue.enqueue('xssbot.browse', url_path)
return 'Reported.'
else:
return f"[ERROR] Admin 只看 {request.url_root} 網址"
```
#### File: misc/darkknight/challenge.py
```python
import os
import shutil
base_dir = f"C:\\Users\\balsnctf\\Documents\\Dark Knight\\tmp-{os.urandom(16).hex()}"
def init():
os.mkdir(base_dir)
os.chdir(base_dir)
with open("39671", "w") as f:
f.write("alice\nalice1025")
with open("683077", "w") as f:
f.write("bob\nbob0105a")
def password_manager():
print("use a short pin code to achieve fast login!!")
while True:
pin = input("enter a pin code > ")
if len(pin) > 100:
print("too long...")
continue
if "\\" in pin or "/" in pin or ".." in pin or "*" in pin:
print("what do you want to do?(¬_¬)")
continue
flag = True
for c in pin.encode("utf8"):
if c > 0x7e or c < 0x20:
print("printable chars only!!")
flag = False
break
if flag:
break
while True:
username = input("enter username > ")
if len(username) > 100:
print("too long...")
continue
for c in username.encode("utf8"):
if c > 0x7e or c < 0x20:
print("printable chars only!!")
flag = False
break
if flag:
break
while True:
password = input("enter password > ")
if len(password) > 100:
print("too long...")
continue
for c in password.encode("utf8"):
if c > 0x7e or c < 0x20:
print("printable chars only!!")
flag = False
break
if flag:
break
try:
with open(pin, "w") as f:
f.write(username + "\n" + password)
print("saved!!")
except OSError:
print("pin is invalid!!")
def safety_guard():
print("safety guard activated. will delete all unsafe credentials hahaha...")
delete_file = []
for pin in os.listdir("."):
safe = True
with open(pin, "r") as f:
data = f.read().split("\n")
if len(data) != 2:
safe = False
elif len(data[0]) == 0 or len(data[1]) == 0:
safe = False
elif data[0].isalnum() == False or data[1].isalnum() == False:
safe = False
elif data[0] == "admin":
safe = False
if safe == False:
os.remove(pin)
delete_file.append(pin)
print(f"finished. delete {len(delete_file)} unsafe credentials: {delete_file}")
def fast_login():
while True:
pin = input("enter a pin code > ")
if len(pin) > 100:
print("too long...")
continue
if "\\" in pin or "/" in pin or ".." in pin:
print("what do you want to do?(¬_¬)")
continue
flag = True
for c in pin.encode("utf8"):
if c > 0x7e or c < 0x20:
print("printable chars only!!")
flag = False
break
if flag:
break
try:
with open(pin, "r") as f:
data = f.read().split("\n")
if len(data) != 2:
print("unknown error happened??")
return None, None
return data[0], data[1]
except FileNotFoundError:
print("this pin code is not registered.")
return None, None
def normal_login():
while True:
username = input("enter username > ")
if len(username) > 100:
print("too long...")
elif username.isalnum() == False:
print("strange username, huh?")
elif username == "admin":
print("no you are definitely not (╬ Ò ‸ Ó)")
else:
break
while True:
password = input("enter password > ")
if len(password) > 100:
print("too long...")
continue
elif password.isalnum() == False:
print("strange password, huh?")
else:
break
return username, password
def login():
safety_guard()
while True:
print("1. fast login")
print("2. normal login")
print("3. exit")
x = input("enter login type > ")
if x == "1":
username, password = fast_login()
elif x == "2":
username, password = normal_login()
elif x == "3":
print("bye-bye~")
return
else:
print("invalid input.")
continue
if username != None and password != None:
print(f"hello, {username}.")
if username == "admin":
while True:
x = input("do you want the flag? (y/n): ")
if x == "n":
print("OK, bye~")
return
elif x == "y":
break
else:
print("invalid input.")
while True:
x = input("beg me: ")
if x == "plz":
print("ok, here is your flag: BALSN{flag is here ...}")
break
return
def main():
init()
try:
while True:
print("1. passord manager")
print("2. login")
print("3. exit")
x = input("what do you want to do? > ")
if x == "1":
password_manager()
elif x == "2":
login()
elif x == "3":
print("bye-bye~")
break
else:
print(f"invalid input: {x}")
except KeyboardInterrupt:
print("bye-bye~")
except:
print("unexpected error occured.")
os.chdir("../")
shutil.rmtree(base_dir)
if __name__ == "__main__":
main()
``` |
{
"source": "Jimmy-0/BTC-Clients",
"score": 3
} |
#### File: Jimmy-0/BTC-Clients/check_midterm5.py
```python
import sys
import json
def read_json(filename):
f = open(filename)
chain = json.load(f)
return chain
if len(sys.argv)<2:
print("Running Instructions:\npython check.py <num_chains>")
sys.exit(0)
else:
numChains = int(sys.argv[1])
print("number of chains = {}".format(str(numChains)))
chains = []
min_length = -1
min_trx_throughput = -1
min_trx_per_block = -1
min_frac_unique_trx = 2
for i in range(numChains):
chain = read_json("expt/"+str(i)+".trx")
trx_count = read_json("expt/"+str(i)+".trx_count")
trx_count = int(trx_count)
chains.append(chain)
min_length = len(chain) if min_length==-1 else min(min_length, len(chain))
min_trx_throughput = trx_count if min_trx_throughput==-1 else min(min_trx_throughput, trx_count)
trx_per_block = 0 if len(chain)==1 else trx_count / (len(chain) - 1)
min_trx_per_block = trx_per_block if min_trx_per_block==-1 else min(min_trx_per_block, trx_per_block)
# count trx and unique trx
count = 0
s = set()
for c in chain:
count += len(c)
for t in c:
s.add(t)
assert trx_count == count, "trx_count not in accordance with chain length"
frac_unique_trx = len(s) / trx_count
assert frac_unique_trx >= 0.9, "frac_unique_trx < 0.9"
min_frac_unique_trx = min(min_frac_unique_trx, frac_unique_trx)
common_prefix = True
for chain_id in range(1, len(chains)):
common_prefix = common_prefix and chains[0][1][0]==chains[chain_id][1][0]
if not common_prefix:
break
time_in_min=5
print("min trx throughput = {} \n\
min trx per block = {} \n\
fraction of unique trx = {} \n\
common prefix = {} \n\
test = {}".format(min_trx_throughput, min_trx_per_block, min_frac_unique_trx, common_prefix, "PASS" if min_trx_throughput>=100*time_in_min and min_trx_per_block>=10 and min_trx_per_block<=100*time_in_min and min_frac_unique_trx>=0.9 and common_prefix else "FAIL"
))
``` |
{
"source": "jimmy15923/Common_tools",
"score": 3
} |
#### File: jimmy15923/Common_tools/keras_callbacks.py
```python
from keras.callbacks import Callback
from sklearn.metrics import roc_auc_score, f1_score, confusion_matrix
class logAUC(Callback):
"""
Use this function only in binary classification
"""
def __init__(self):
return
def on_train_begin(self, epoch, logs = {}):
logs = logs or {}
record_items = ["val_auc", "val_f1sc", "val_fp", "val_fn", "val_tp", "val_tn"]
for i in record_items:
if i not in self.params['metrics']:
self.params['metrics'].append(i)
def on_epoch_end(self, epoch, logs = {}):
logs = logs or {}
y_true = self.validation_data[1].argmax(axis = 1)
y_pred = self.model.predict(self.validation_data[0])
logs["val_auc"] = roc_auc_score(y_true, y_pred[:,1])
thres = 0.5 # prepare it, for further we can change our judgement threshold
y_pred = (y_pred[:, 1] >= thres) * 1
con_martrix = confusion_matrix(y_true= y_true, y_pred= y_pred)
tp = con_martrix[1][1]
tn = con_martrix[0][0]
fp = con_martrix[0][1]
fn = con_martrix[1][0]
logs["val_f1sc"] = f1_score(y_true = y_true, y_pred = y_pred)
logs["val_tp"],logs["val_tn"],logs["val_fp"],logs["val_fn"] = tp, tn, fp, fn
```
#### File: jimmy15923/Common_tools/result_summary.py
```python
from sklearn.metrics import confusion_matrix, precision_recall_curve, roc_auc_score, accuracy_score
import pandas_ml as pdml
import pandas as pd
import numpy as np
import csv
def create_report_binary(y_true, y_pred):
"""
Create quick summary,
Args
- Input
y_true: ground-truth (n_batch x n_classes), one-hot format
y_pred: prediction (n_batch x n_classes), one-hot format
- Return
res: pandas table of prediction/ground-truth/status (fp/fn)
conf: pdml confusion matrix table
auc: auc score
EXAMPLE of Usage
final_result, eval_metric_table, auc = create_report(y_val, val_pred)
eval_metric_table.stats()
"""
res = pd.DataFrame({'y_true':y_true.argmax(axis = 1),
'y_pred':y_pred.argmax(axis = 1)})
res['status'] = res['y_pred'] - res['y_true'] # 1: fp, -1: fn
auc = roc_auc_score(y_true=res.y_true,
y_score=res.y_pred)
conf = pdml.ConfusionMatrix(y_true=res.y_true,
y_pred=res.y_pred)
return res, conf, auc
def write_result_to_csv(pdml_table, file_name):
with open(file_name, 'w') as f:
for i in pdml_table.stats().keys():
this_line = i + "," + str(pdml_table.stats()[i]) + '\n'
f.writelines(this_line)
print("Successfully write result to %s" % file_name)
return True
``` |
{
"source": "jimmy201602/Wooey",
"score": 2
} |
#### File: wooey/tests/test_models.py
```python
import os
from django.test import Client, TestCase, TransactionTestCase
from six.moves.urllib_parse import quote
from wooey import models, version
from . import factories, config, mixins, utils as test_utils
class ScriptTestCase(mixins.ScriptFactoryMixin, TestCase):
def test_multiple_choices(self):
multiple_choice_param = 'two_choices'
single_choice_param = 'one_choice'
optional_choice_param = 'all_choices'
# test that we are a multiple choice entry
param = models.ScriptParameter.objects.get(slug=multiple_choice_param)
self.assertTrue(param.multiple_choice)
# test our limit
self.assertEqual(param.max_choices, 2)
# test with a singular case
param = models.ScriptParameter.objects.get(slug=single_choice_param)
self.assertFalse(param.multiple_choice)
self.assertEqual(param.max_choices, 1)
# test cases that have variable requirements
param = models.ScriptParameter.objects.get(slug=optional_choice_param)
self.assertTrue(param.multiple_choice)
self.assertEqual(param.max_choices, -1)
def test_deletes_related_objects(self):
self.assertTrue(models.ScriptVersion.objects.filter(pk=self.choice_script.pk).exists())
script = models.Script.objects.get(pk=self.choice_script.script.pk)
script.delete()
self.assertFalse(models.ScriptVersion.objects.filter(pk=self.choice_script.pk).exists())
class ScriptGroupTestCase(TestCase):
def test_script_group_creation(self):
group = factories.ScriptGroupFactory()
class TestScriptParsers(mixins.ScriptFactoryMixin, TestCase):
def test_renders_if_script_version_deleted(self):
parser = self.choice_script.scriptparser_set.first()
self.choice_script.delete()
self.assertIn(parser.name, str(parser))
class ScriptParameterTestCase(TestCase):
def test_script_parameter_default(self):
script_parameter = factories.ScriptParameterFactory()
pk = script_parameter.pk
for test_value in [123, 'abc', {'abc': 5}]:
script_parameter.default = test_value
script_parameter.save()
self.assertEqual(models.ScriptParameter.objects.get(pk=pk).default, test_value)
class TestScriptVersion(mixins.ScriptFactoryMixin, TestCase):
def test_script_version_url_with_spaces(self):
# Handles https://github.com/wooey/Wooey/issues/290
script_version = self.choice_script
spaced_version = 'v 1 0 0'
script_version.script_version = spaced_version
script_version.save()
url = script_version.get_version_url()
self.assertIn(quote(spaced_version), url)
class TestJob(mixins.ScriptFactoryMixin, mixins.FileCleanupMixin, mixins.FileMixin, TransactionTestCase):
def get_local_url(self, fileinfo):
from ..backend import utils
local_storage = utils.get_storage(local=True)
return local_storage.url(fileinfo['object'].filepath.name)
def test_jobs(self):
script = self.translate_script
from ..backend import utils
sequence_slug = test_utils.get_subparser_form_slug(script, 'sequence')
out_slug = test_utils.get_subparser_form_slug(script, 'out')
fasta_slug = test_utils.get_subparser_form_slug(script, 'fasta')
job = utils.create_wooey_job(
script_version_pk=script.pk,
data={
'job_name': 'abc',
sequence_slug: 'aaa',
out_slug: 'abc'
}
)
job = job.submit_to_celery()
old_pk = job.pk
new_job = job.submit_to_celery(resubmit=True)
self.assertNotEqual(old_pk, new_job.pk)
# test rerunning, our output should be removed
from ..models import UserFile
old_output = sorted([i.pk for i in UserFile.objects.filter(job=new_job)])
# the pk will not change here since we are using rerun=True
new_job.submit_to_celery(rerun=True)
# check that we overwrite our output
new_output = sorted([i.pk for i in UserFile.objects.filter(job=new_job)])
self.assertNotEqual(old_output, new_output)
self.assertEqual(len(old_output), len(new_output))
# check the old entries are gone
self.assertEqual([], list(UserFile.objects.filter(pk__in=old_output)))
file_previews = utils.get_file_previews(job)
for group, files in file_previews.items():
for fileinfo in files:
# for testing, we use the local url
response = Client().get(self.get_local_url(fileinfo))
self.assertEqual(response.status_code, 200)
# check our download links are ok
# upload the file first to our storage engine so this works in tests
local_storage = utils.get_storage(local=True)
fasta_path = local_storage.save('fasta.fasta', open(os.path.join(config.WOOEY_TEST_DATA, 'fasta.fasta')))
fasta_file = local_storage.open(fasta_path)
job = utils.create_wooey_job(
script_version_pk=script.pk,
data={
fasta_slug: fasta_file,
out_slug: 'abc',
'job_name': 'abc'
}
)
# check our upload link is ok
file_previews = utils.get_file_previews(job)
for group, files in file_previews.items():
for fileinfo in files:
response = Client().get(self.get_local_url(fileinfo))
self.assertEqual(response.status_code, 200)
def test_file_sharing(self):
# this tests whether a file uploaded by one job will be referenced by a second job instead of being duplicated
# on the file system
new_file = self.storage.open(self.get_any_file())
script = self.choice_script
script_slug = test_utils.get_subparser_form_slug(script, 'multiple_file_choices')
from ..backend import utils
job = utils.create_wooey_job(script_version_pk=script.pk, data={'job_name': 'job1', script_slug: new_file})
job = job.submit_to_celery()
job2 = utils.create_wooey_job(script_version_pk=script.pk, data={'job_name': 'job2', script_slug: new_file})
job2 = job2.submit_to_celery()
job1_files = [i for i in models.UserFile.objects.filter(job=job, parameter__isnull=False) if i.parameter.parameter.form_slug == script_slug]
job1_file = job1_files[0]
job2_files = [i for i in models.UserFile.objects.filter(job=job2, parameter__isnull=False) if i.parameter.parameter.form_slug == script_slug]
job2_file = job2_files[0]
self.assertNotEqual(job1_file.pk, job2_file.pk)
self.assertEqual(job1_file.system_file, job2_file.system_file)
def test_multiplechoices(self):
script = self.choice_script
choices = [2, 1, 3]
choice_slug = test_utils.get_subparser_form_slug(script, 'two_choices')
from ..backend import utils
job = utils.create_wooey_job(
script_version_pk=script.pk,
data={
'job_name': 'abc',
choice_slug: choices
}
)
# make sure we have our choices in the parameters
choice_params = [i.value for i in job.get_parameters() if i.parameter.form_slug == choice_slug]
self.assertEqual(choices, choice_params)
job = job.submit_to_celery()
class TestCustomWidgets(TestCase):
def test_widget_attributes(self):
widget = factories.WooeyWidgetFactory(
input_properties='custom-property',
input_attributes='attr1="custom1" attr2="custom2"',
input_class='custom-class',
)
self.assertEquals(
widget.widget_attributes,
{
'custom-property': True,
'attr1': 'custom1',
'attr2': 'custom2',
'class': 'custom-class',
}
)
``` |
{
"source": "Jimmy2027/MMVAE_mnist_svhn_text",
"score": 2
} |
#### File: celeba/networks/FeatureCompressor.py
```python
import numpy as np
import torch.nn as nn
from mmvae_hub.celeba.networks.ResidualBlocks import ResidualBlock1dConv
def make_res_block_encoder_feature_compressor(channels_in, channels_out, a_val=2, b_val=0.3):
downsample = None
if channels_in != channels_out:
downsample = nn.Sequential(nn.Conv1d(channels_in,
channels_out,
kernel_size=1,
stride=1,
padding=0,
dilation=1),
nn.BatchNorm1d(channels_out))
layers = []
layers.append(ResidualBlock1dConv(channels_in, channels_out, kernelsize=1, stride=1, padding=0, dilation=1,
downsample=downsample, a=a_val, b=b_val))
return nn.Sequential(*layers)
def make_layers_resnet_encoder_feature_compressor(start_channels, end_channels, a=2, b=0.3, l=1):
layers = []
num_compr_layers = int((1 / float(l)) * np.floor(np.log(start_channels / float(end_channels))))
for k in range(0, num_compr_layers):
in_channels = np.round(start_channels / float(2 ** (l * k))).astype(int)
out_channels = np.round(start_channels / float(2 ** (l * (k + 1)))).astype(int)
resblock = make_res_block_encoder_feature_compressor(in_channels, out_channels, a_val=a, b_val=b)
layers.append(resblock)
out_channels = np.round(start_channels / float(2 ** (l * num_compr_layers))).astype(int)
if out_channels > end_channels:
resblock = make_res_block_encoder_feature_compressor(out_channels, end_channels, a_val=a, b_val=b)
layers.append(resblock)
return nn.Sequential(*layers)
class ResidualFeatureCompressor(nn.Module):
def __init__(self, in_channels, out_channels_style, out_channels_content, a, b, compression_power):
super(ResidualFeatureCompressor, self).__init__()
self.a = a
self.b = b
self.compression_power = compression_power
self.style_mu = make_res_block_encoder_feature_compressor(in_channels, out_channels_style, a_val=self.a,
b_val=self.b)
self.style_logvar = make_res_block_encoder_feature_compressor(in_channels, out_channels_style, a_val=self.a,
b_val=self.b)
self.content_mu = make_res_block_encoder_feature_compressor(in_channels, out_channels_content, a_val=self.a,
b_val=self.b)
self.content_logvar = make_res_block_encoder_feature_compressor(in_channels, out_channels_content, a_val=self.a,
b_val=self.b)
def forward(self, feats):
mu_style, logvar_style = self.style_mu(feats), self.style_logvar(feats)
mu_content, logvar_content = self.content_mu(feats), self.content_logvar(feats)
return mu_style, logvar_style, mu_content, logvar_content
```
#### File: MMVAE_mnist_svhn_text/mmvae_hub/clean_experiment_checkpoints.py
```python
import glob
import json
import os
import shutil
from pathlib import Path
from mmvae_hub.utils.MongoDB import MongoDatabase
from mmvae_hub.utils.setup.flags_utils import get_config_path
MIN_EPOCH = 50
def clean_database():
"""Delete all experiment logs in database from experiments with less than MIN_EPOCH epochs."""
db = MongoDatabase(training=False)
experiments = db.connect()
for experiment in experiments.find({}):
if experiment['flags']['end_epoch'] < MIN_EPOCH or len(experiment['epoch_results']) < MIN_EPOCH:
print(f'Deleting experiment {experiment["_id"]} from database.')
experiments.delete_one({'_id': experiment['_id']})
def clean_database_model_checkpoints():
db = MongoDatabase(training=False)
experiments = db.connect()
experiment_ids = [exp['_id'] for exp in experiments.find({})]
fs = db.connect_with_gridfs()
for checkpoint in fs.find({}):
checkpoint_exp_id = checkpoint._id.__str__().split('__')[0]
if checkpoint_exp_id not in experiment_ids:
print(f'Removing checkpoint {checkpoint._id}.')
fs.delete(checkpoint._id)
def clean_exp_dirs(config: dict):
"""
Removes all experiment dirs that don't have a log dir or where the log dir is empty.
Theses experiment dir are rests from an unsuccessful "rm -r" command.
"""
db = MongoDatabase(training=False)
experiments = db.connect()
experiment_ids = [exp['_id'] for exp in experiments.find({})]
checkpoint_path = Path(config['dir_experiment']).expanduser()
for experiment_dir in checkpoint_path.iterdir():
if experiment_dir.name != 'fid' and experiment_dir.is_dir():
remove = False
d = experiments.find_one({'_id': experiment_dir.name})
if (
not os.path.exists(os.path.join(experiment_dir, 'logs'))
or len(os.listdir(os.path.join(experiment_dir, 'logs'))) == 0
):
remove = True
elif experiment_dir.name not in experiment_ids:
remove = True
elif d['flags']['end_epoch'] < MIN_EPOCH:
remove = True
if remove:
print(f'removing dir {experiment_dir}')
shutil.rmtree(experiment_dir)
# elif not (experiment_dir / 'checkpoints').exists() or len(
# (experiment_dir / 'checkpoints').iterdir()) == 0:
# print(f'removing dir {experiment_dir}')
# shutil.rmtree(experiment_dir)
#
# elif (max(int(d.name) for d in (experiment_dir / 'checkpoints').iterdir() if d.name.startswith('0')) < 10):
# print(f'removing dir {experiment_dir}')
# shutil.rmtree(experiment_dir)
def clean_early_checkpoints(parent_folder: Path):
for experiment_dir in parent_folder.iterdir():
checkpoints_dir = parent_folder / experiment_dir / 'checkpoints/0*'
checkpoints = glob.glob(checkpoints_dir.__str__())
checkpoint_epochs = sorted([Path(checkpoint).stem for checkpoint in checkpoints])
for checkpoint in checkpoints:
if Path(checkpoint).stem != checkpoint_epochs[-1]:
shutil.rmtree(checkpoint)
if __name__ == '__main__':
clean_database()
clean_database_model_checkpoints()
config_path_polymnist = get_config_path(dataset='polymnist')
config_path_mimic = get_config_path(dataset='mimic')
for config_path in [config_path_polymnist, config_path_polymnist]:
with open(config_path, 'rt') as json_file:
config = json.load(json_file)
clean_exp_dirs(config)
```
#### File: evaluation/eval_metrics/sample_quality.py
```python
import glob
import os
import numpy as np
from mmvae_hub.evaluation.fid.fid_score import calculate_frechet_distance
from mmvae_hub.evaluation.fid.fid_score import get_activations
from mmvae_hub.evaluation.fid.inception import InceptionV3
from mmvae_hub.evaluation.prd_score import prd_score as prd
def calc_inception_features(exp, dims=2048, batch_size=128):
block_idx = InceptionV3.BLOCK_INDEX_BY_DIM[dims]
model = InceptionV3([block_idx],
path_state_dict=exp.flags.inception_state_dict)
model = model.to(exp.flags.device)
paths = exp.paths_fid
for m, m_key in enumerate(exp.modalities.keys()):
mod = exp.modalities[m_key]
if mod.gen_quality_eval:
for k, key in enumerate(paths.keys()):
if key != '':
dir_gen = paths[key]
if not os.path.exists(dir_gen):
raise RuntimeError('Invalid path: %s' % dir_gen)
files_gen = glob.glob(os.path.join(dir_gen, mod.name, '*' +
mod.file_suffix))
fn = os.path.join(exp.flags.dir_gen_eval_fid,
key + '_' + mod.name + '_activations.npy')
act_gen = get_activations(files_gen, exp.flags, model, batch_size, dims,
True, verbose=False)
np.save(fn, act_gen)
def load_inception_activations(exp):
paths = exp.paths_fid
acts = dict()
for m, m_key in enumerate(exp.modalities.keys()):
mod = exp.modalities[m_key]
if mod.gen_quality_eval:
acts[mod.name] = {}
for k, key in enumerate(paths.keys()):
if key != '':
fn = os.path.join(exp.flags.dir_gen_eval_fid,
key + '_' + mod.name + '_activations.npy')
feats = np.load(fn)
acts[mod.name][key] = feats
return acts
def calculate_inception_features_for_gen_evaluation(flags, paths, modality=None, dims=2048, batch_size=128):
block_idx = InceptionV3.BLOCK_INDEX_BY_DIM[dims]
model = InceptionV3([block_idx], path_state_dict=flags.inception_state_dict)
model = model.to(flags.device)
if 'random' in list(paths.keys()):
dir_rand_gen = paths['random']
if not os.path.exists(dir_rand_gen):
raise RuntimeError('Invalid path: %s' % dir_rand_gen)
if modality is not None:
files_rand_gen = glob.glob(os.path.join(dir_rand_gen, modality, '*' + '.png'))
filename_random = os.path.join(flags.dir_gen_eval_fid_random,
'random_sampling_' + modality + '_activations.npy')
else:
files_rand_gen = glob.glob(os.path.join(dir_rand_gen, '*.png'))
filename_random = os.path.join(flags.dir_gen_eval_fid_random, 'random_img_activations.npy')
act_rand_gen = get_activations(files_rand_gen, model, batch_size, dims,
True, verbose=False)
np.save(filename_random, act_rand_gen)
if 'dynamic_prior' in list(paths.keys()):
dirs_dyn_prior = paths['dynamic_prior']
for k, key in enumerate(dirs_dyn_prior.keys()):
if not os.path.exists(dirs_dyn_prior[key]):
raise RuntimeError('Invalid path: %s' % dirs_dyn_prior[key])
files_dyn_gen = glob.glob(os.path.join(dirs_dyn_prior[key], modality, '*' + '.png'))
filename_dyn = os.path.join(dirs_dyn_prior[key], key + '_' + modality + '_activations.npy')
act_cond_gen = get_activations(files_dyn_gen, model, batch_size,
dims, True, verbose=False)
np.save(filename_dyn, act_cond_gen)
if 'conditional' in list(paths.keys()):
dir_cond_gen = paths['conditional']
if not os.path.exists(dir_cond_gen):
raise RuntimeError('Invalid path: %s' % dir_cond_gen)
if modality is not None:
files_cond_gen = glob.glob(os.path.join(dir_cond_gen, modality, '*' + '.png'))
filename_conditional = os.path.join(dir_cond_gen, 'cond_gen_' + modality + '_activations.npy')
else:
files_cond_gen = glob.glob(os.path.join(dir_cond_gen, '*.png'))
filename_conditional = os.path.join(flags.dir_gen_eval_fid_cond_gen, 'conditional_img_activations.npy')
act_cond_gen = get_activations(files_cond_gen, model, batch_size, dims,
True, verbose=False)
np.save(filename_conditional, act_cond_gen)
if 'conditional_2a1m' in list(paths.keys()):
dirs_cond_gen = paths['conditional_2a1m']
for k, key in enumerate(dirs_cond_gen.keys()):
if not os.path.exists(dirs_cond_gen[key]):
raise RuntimeError('Invalid path: %s' % dirs_cond_gen[key])
files_cond_gen = glob.glob(os.path.join(dirs_cond_gen[key], modality, '*' + '.png'))
filename_conditional = os.path.join(dirs_cond_gen[key], key + '_' + modality + '_activations.npy')
act_cond_gen = get_activations(files_cond_gen, model, batch_size,
dims, True, verbose=False)
np.save(filename_conditional, act_cond_gen)
if 'conditional_1a2m' in list(paths.keys()):
dirs_cond_gen = paths['conditional_1a2m']
for k, key in enumerate(dirs_cond_gen.keys()):
if not os.path.exists(dirs_cond_gen[key]):
raise RuntimeError('Invalid path: %s' % dirs_cond_gen[key])
files_cond_gen = glob.glob(os.path.join(dirs_cond_gen[key], modality, '*' + '.png'))
filename_conditional = os.path.join(dirs_cond_gen[key], key + '_' + modality + '_activations.npy')
act_cond_gen = get_activations(files_cond_gen, model, batch_size,
dims, True, verbose=False)
np.save(filename_conditional, act_cond_gen)
if 'real' in list(paths.keys()):
dir_real = paths['real']
if not os.path.exists(dir_real):
raise RuntimeError('Invalid path: %s' % dir_real)
if modality is not None:
files_real = glob.glob(os.path.join(dir_real, modality, '*' + '.png'))
filename_real = os.path.join(flags.dir_gen_eval_fid_real, 'real_' + modality + '_activations.npy')
else:
files_real = glob.glob(os.path.join(dir_real, '*.png'))
filename_real = os.path.join(flags.dir_gen_eval_fid_real, 'real_img_activations.npy')
act_real = get_activations(files_real, model, batch_size, dims, True, verbose=False)
np.save(filename_real, act_real)
def calculate_fid(feats_real, feats_gen):
mu_real = np.mean(feats_real, axis=0)
sigma_real = np.cov(feats_real, rowvar=False)
mu_gen = np.mean(feats_gen, axis=0)
sigma_gen = np.cov(feats_gen, rowvar=False)
return calculate_frechet_distance(mu_real, sigma_real, mu_gen, sigma_gen)
def calculate_fid_dict(feats_real, dict_feats_gen):
dict_fid = {}
for k, key in enumerate(dict_feats_gen.keys()):
feats_gen = dict_feats_gen[key]
dict_fid[key] = calculate_fid(feats_real, feats_gen)
return dict_fid
def calculate_prd(feats_real, feats_gen):
prd_val = prd.compute_prd_from_embedding(feats_real, feats_gen)
return np.mean(prd_val)
def calculate_prd_dict(feats_real, dict_feats_gen):
dict_fid = {}
for k, key in enumerate(dict_feats_gen.keys()):
feats_gen = dict_feats_gen[key]
dict_fid[key] = calculate_prd(feats_real, feats_gen)
return dict_fid
def get_clf_activations(flags, data, model):
model.eval()
act = model.get_activations(data)
act = act.cpu().data.numpy().reshape(flags.batch_size, -1)
return act
def calc_prd_score(exp):
calc_inception_features(exp)
acts = load_inception_activations(exp)
ap_prds = {}
for m, m_key in enumerate(exp.modalities.keys()):
mod = exp.modalities[m_key]
if mod.gen_quality_eval:
for k, key in enumerate(exp.subsets):
if key == '':
continue
ap_prd = calculate_prd(acts[mod.name]['real'],
acts[mod.name][key])
ap_prds[key + '_' + mod.name] = ap_prd
for m, m_key in enumerate(exp.modalities.keys()):
mod = exp.modalities[m_key]
if mod.gen_quality_eval:
ap_prd = calculate_prd(acts[mod.name]['real'],
acts[mod.name]['random'])
ap_prds['random_' + mod.name] = ap_prd
return ap_prds
```
#### File: mmvae_hub/leomed_utils/launch_jobs.py
```python
import os
import time
from pathlib import Path
import numpy as np
import mmvae_hub
def launch_leomed_jobs(which_dataset: str, params: dict) -> None:
mmvae_hub_dir = Path(mmvae_hub.__file__).parent
n_cores = 8
flags = ''.join(
f'--{k} {v} ' for k, v in params.items() if
k not in ['n_gpus', 'gpu_mem', 'factorized_representation', 'use_clf']
)
if 'gpu_mem' not in params:
params['gpu_mem'] = 5000
if 'n_gpus' not in params:
params['n_gpus'] = 1
# 100 epochs take about 5G of space
scratch_space = int((params['end_epoch'] // 100) * 5) // n_cores or 1
if which_dataset == 'polymnist':
python_file = mmvae_hub_dir / 'polymnist/main_polymnist.py'
mem = 700 * params['num_mods']
if params['method'] == 'mogfm' or params['method'].startswith('iw'):
num_hours = int(np.round((params['end_epoch'] * 10) / 60 * 0.5 * params['num_mods'])) or 1
else:
# 1 epochs needs approx. 2 minutes
num_hours = int(np.round((params['end_epoch'] * 2) / 60 * params['num_mods'])) or 1
# 100 epochs take about 5G of space
scratch_space = int(np.ceil(((params['end_epoch'] / 100) * 5) / n_cores))
elif which_dataset == 'mimic':
python_file = mmvae_hub_dir / 'mimic/main_mimic.py'
# 1 epochs needs approx. 20 minutes
num_hours = int(np.round((params['end_epoch'] * 20) / 60)) or 1
mem = 2500
# 100 epochs take about 10G of space
scratch_space = int(np.ceil(((params['end_epoch'] / 100) * 10) / n_cores))
elif which_dataset == 'celeba':
python_file = mmvae_hub_dir / 'celeba/main_celeba.py'
# 1 epochs needs approx. 15 minutes
num_hours = int(np.round((params['end_epoch'] * 15) / 60)) or 1
mem = 2500
# 100 epochs take about 10G of space
scratch_space = int(np.ceil(((params['end_epoch'] / 100) * 10) / n_cores))
elif which_dataset == 'mnistsvhntext':
python_file = mmvae_hub_dir / 'mnistsvhntext/main_svhnmnist.py'
mem = 2500
if params['method'] == 'mogfm' or params['method'].startswith('iw'):
num_hours = int(np.round((params['end_epoch'] * 10) / 60)) or 1
else:
# 1 epochs needs approx. 50 minutes
num_hours = int(np.round((params['end_epoch'] * 50) / 60)) or 1
# 100 epochs take about 5G of space
scratch_space = int(np.ceil(((params['end_epoch'] / 100) * 5) / n_cores))
command = f'bsub -n {n_cores} -W {num_hours}:00 -R "rusage[mem={mem},ngpus_excl_p={params["n_gpus"]},scratch={scratch_space}]" ' \
f'-R "select[gpu_mtotal0>={params["gpu_mem"] * params["n_gpus"]}]" ' \
f'python {python_file} {flags}'
# add boolean flags
if 'factorized_representation' in params and params['factorized_representation']:
command += ' --factorized_representation'
if 'use_clf' in params and params['use_clf']:
command += ' --use_clf'
print(command)
os.system(command)
time.sleep(1)
```
#### File: mmvae_hub/leomed_utils/upload_experimentzip.py
```python
import glob
import shutil
import tempfile
import zipfile
from pathlib import Path
import ppb
import torch
import typer
from norby import send_msg
from mmvae_hub import log
from mmvae_hub.experiment_vis.utils import run_notebook_convert
from mmvae_hub.utils.MongoDB import MongoDatabase
from mmvae_hub.utils.utils import json2dict
app = typer.Typer()
def upload_one(exp_path: Path):
"""
Upload one experiment result to database together with the model checkpoints,
the logfile and tensorboardlogs, then delete zipped experiment dir.
"""
is_zip = exp_path.suffix == '.zip'
with tempfile.TemporaryDirectory() as tmpdirname:
tmpdir = Path(tmpdirname) / exp_path.stem
tmpdir.mkdir()
if is_zip:
# unpack zip into tmpdir
log.info(f'Unpacking {exp_path} to {tmpdir}.')
with zipfile.ZipFile(exp_path) as z:
z.extractall(tmpdir)
exp_dir = Path(tmpdir)
else:
exp_dir = exp_path
flags = torch.load(exp_dir / 'flags.rar')
db = MongoDatabase(training=True, flags=flags)
results = {'epoch_results': {}}
epochs = sorted(int(str(epoch.stem)) for epoch in (exp_dir / 'epoch_results').iterdir())
for epoch in epochs:
epoch_str = str(epoch)
epoch_results = (exp_dir / 'epoch_results' / epoch_str).with_suffix('.json')
results['epoch_results'][epoch_str] = json2dict(epoch_results)
db.insert_dict(results)
modalities = [mod_str for mod_str in results['epoch_results'][str(epoch)]['train_results']['log_probs'] if
len(mod_str.split('_')) == 1]
dir_checkpoints = exp_dir / 'checkpoints'
db.save_networks_to_db(
dir_checkpoints=dir_checkpoints,
epoch=max(int(str(d.name)) for d in dir_checkpoints.iterdir()),
modalities=modalities,
)
db.upload_tensorbardlogs(exp_dir / 'logs')
pdf_path = run_notebook_convert(exp_dir)
expvis_url = ppb.upload(pdf_path, plain=True)
db.insert_dict({'expvis_url': expvis_url})
log_file = glob.glob(str(exp_dir) + '/*.log')
if len(log_file):
db.upload_logfile(Path(log_file[0]))
send_msg(f'Uploading of experiment {flags.experiment_uid} has finished. The experiment visualisation can be '
f'found here: {expvis_url}'
)
# delete exp_path
if is_zip:
exp_path.unlink()
else:
shutil.rmtree(exp_path)
@app.command()
def upload_all(src_dir: str):
"""
Upload all experiment results to database together with the model checkpoints,
the logfile and tensorboardlogs, then delete zipped experiment dir.
"""
src_dir = Path(src_dir).expanduser()
for experiment_zip in src_dir.iterdir():
try:
upload_one(experiment_zip)
except Exception as e:
print(e)
if __name__ == '__main__':
# app()
from norby.utils import norby
with norby('beginning upload experimentzip', 'finished beginning upload experimentmentzip'):
# upload_all('/mnt/data/hendrik/mmvae_hub/experiments')
# upload_all('/mnt/data/hendrik/leomed_results')
upload_all('/Users/Hendrik/Documents/master_4/leomed_experiments')
# upload_one(Path('/Users/Hendrik/Documents/master_4/leomed_experiments/polymnist_iwmogfm2__2021_10_14_23_37_27_515040.zip'))
```
#### File: mmvae_hub/mimic/find_dataset_stats.py
```python
import json
import os
import pandas as pd
from torch.utils.data import DataLoader
from mimic import log
from mimic.dataio.utils import get_str_labels
from mimic.utils.filehandling import get_config_path
from mimic.utils.flags import parser
from mimic.utils.flags import update_flags_with_config
def write_results_to_json(results: dict, path: str = 'dataset_stats.json'):
log.info(f'Writing to dict: {results} to {path}')
if os.path.exists(path):
with open(path, 'r') as jsonfile:
data = json.load(jsonfile)
results = {**results, **data}
with open(path, 'w') as outfile:
json.dump(results, outfile)
def get_mean_std(out_path, args):
# taken from https://discuss.pytorch.org/t/computing-the-mean-and-std-of-dataset/34949/2
# set normalization to False to initialise the dataset
args.normalization = False
from mimic.dataio.MimicDataset import Mimic
trainset = Mimic(args, get_str_labels(FLAGS.binary_labels), split='train', clf_training=False)
d_loader = DataLoader(trainset, batch_size=50, shuffle=False, num_workers=0)
args.normalization = True
for mod in ['PA', 'Lateral']:
mean = 0.
std = 0.
for images, _ in d_loader:
images = images[mod]
batch_samples = images.size(0) # batch size (the last batch can have smaller size!)
images = images.view(batch_samples, images.size(1), -1)
mean += images.mean(2).sum(0)
std += images.std(2).sum(0)
mean /= len(d_loader.dataset)
std /= len(d_loader.dataset)
stats = {f'{mod}_mean': mean.item(), f'{mod}_std': std.item()}
log.info(stats)
write_results_to_json(results=stats, path=out_path)
def get_label_counts(args):
dir_dataset = os.path.join(args.dir_data, 'files_small_128')
train_labels_path = os.path.join(dir_dataset, 'train_labels.csv')
train_labels_df = pd.read_csv(train_labels_path)[get_str_labels(args.binary_labels)].fillna(0)
indices = []
indices += train_labels_df.index[(train_labels_df['Lung Opacity'] == -1)].tolist()
indices += train_labels_df.index[(train_labels_df['Pleural Effusion'] == -1)].tolist()
indices += train_labels_df.index[(train_labels_df['Support Devices'] == -1)].tolist()
indices = list(set(indices))
train_labels_df = train_labels_df.drop(indices)
counts = train_labels_df[train_labels_df == 1].count()
write_results_to_json(results={'counts': counts.to_dict()})
if __name__ == '__main__':
FLAGS = parser.parse_args([])
config_path = get_config_path(FLAGS)
FLAGS = update_flags_with_config(config_path)
FLAGS.modality = 'PA'
get_label_counts(FLAGS)
get_mean_std(FLAGS)
```
#### File: mimic/modalities/MimicIMG.py
```python
import typing
from pathlib import Path
import torch
import torchvision.transforms as transforms
from matplotlib import pyplot as plt
from torch import Tensor
from mmvae_hub.mimic.classifiers.train_img_clfs import LM
from mmvae_hub.modalities.ModalityIMG import ModalityIMG
from mmvae_hub.networks.images.ConvNetworksImgMimic import EncoderImg, DecoderImg
from modun.download_utils import download_zip_from_url
class LM_(LM):
def __init__(self, str_labels: list, transforms):
super().__init__(str_labels)
self.transforms = transforms
def forward(self, x):
x_ = torch.cat([self.transforms(torch.cat([s for _ in range(3)])).unsqueeze(0) for s in x], dim=0).to(x.device)
return self.model(x_)
class MimicImg(ModalityIMG):
def __init__(self, data_size, flags, name, labels, rec_weight, plot_img_size):
super().__init__(data_size, flags, name)
self.labels = labels
self.labels = labels
self.gen_quality_eval = True
self.file_suffix = '.png'
self.encoder = EncoderImg(self.flags).to(flags.device)
self.decoder = DecoderImg(self.flags).to(flags.device)
self.rec_weight = rec_weight
self.plot_img_size = plot_img_size
self.clf_transforms = transforms.Compose([
transforms.ToPILImage(),
transforms.RandomHorizontalFlip(),
transforms.RandomRotation(15),
transforms.Resize(256),
transforms.CenterCrop(256),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
self.clf = self.get_clf()
def get_clf(self):
if self.flags.use_clf:
clf_name_mapping = {'PA': 'pa', 'Lateral': 'lat'}
# finding the directory of the classifier
img_clf_path = Path(
__file__).parent.parent / f'classifiers/state_dicts/{clf_name_mapping[self.name]}_clf_{self.flags.img_size}.pth'
if not img_clf_path.exists():
download_zip_from_url(
url='http://jimmy123.hopto.org:2095/nextcloud/index.php/s/GTc8pYiDKrq35ky/download',
dest_folder=img_clf_path.parent.parent, verbose=True)
lightning_module = LM_(str_labels=self.labels, transforms=self.clf_transforms)
lightning_module.model.load_state_dict(
torch.load(img_clf_path, map_location=self.flags.device))
return lightning_module.to(self.flags.device)
def plot_data_single_img(self, d: Tensor):
return plt.imshow(self.plot_data(d.squeeze(dim=0)).cpu().detach().squeeze(), cmap='gray')
class MimicPA(MimicImg):
def __init__(self, flags, labels: typing.Iterable[str], rec_weight, plot_img_size):
data_size = torch.Size((1, flags.img_size, flags.img_size))
super().__init__(data_size=data_size, flags=flags, name='PA', labels=labels, rec_weight=rec_weight,
plot_img_size=plot_img_size)
class MimicLateral(MimicImg):
def __init__(self, flags, labels: typing.Iterable[str], rec_weight, plot_img_size):
data_size = torch.Size((1, flags.img_size, flags.img_size))
super().__init__(data_size=data_size, flags=flags, name='Lateral', labels=labels, rec_weight=rec_weight,
plot_img_size=plot_img_size)
if __name__ == '__main__':
img_clf_path = Path(
__file__).parent.parent / f'classifiers/state_dicts/pa_clf_128.pth'
if not img_clf_path.exists():
download_zip_from_url(
url='http://jimmy123.hopto.org:2095/nextcloud/index.php/s/GTc8pYiDKrq35ky/download',
dest_folder=img_clf_path.parent.parent, verbose=True)
```
#### File: mmvae_hub/mnistsvhntext/experiment.py
```python
import json
import random
from pathlib import Path
import PIL.Image as Image
import numpy as np
import torch
from sklearn.metrics import accuracy_score
from torchvision import transforms
from mmvae_hub.base.BaseExperiment import BaseExperiment
from mmvae_hub.mnistsvhntext.MNISTmod import MNIST
from mmvae_hub.mnistsvhntext.SVHNMNISTDataset import SVHNMNIST
from mmvae_hub.mnistsvhntext.SVHNmod import SVHN
# from utils.BaseExperiment import BaseExperiment
from mmvae_hub.mnistsvhntext.metrics import mnistsvhntextMetrics
from mmvae_hub.mnistsvhntext.textmod import Text
class MNISTSVHNText(BaseExperiment):
def __init__(self, flags):
super().__init__(flags)
self.flags = flags
self.labels = ['digit']
alphabet_path = Path(__file__).parent.parent / ('modalities/text/alphabet.json')
with open(alphabet_path) as alphabet_file:
self.alphabet = str(''.join(json.load(alphabet_file)))
# self.flags.vocab_size = len(self.alphabet)
self.dataset_train, self.dataset_test = self.set_dataset()
self.plot_img_size = torch.Size((3, 28, 28))
if not hasattr(flags, 'num_features'):
self.flags.num_features = len(self.alphabet)
self.modalities = self.set_modalities()
self.num_modalities = len(self.modalities.keys())
self.subsets = self.set_subsets()
self.mm_vae = self.set_model()
self.optimizer = None
self.style_weights = self.set_style_weights()
self.test_samples = self.get_test_samples()
self.eval_metric = accuracy_score
self.metrics = mnistsvhntextMetrics
self.paths_fid = self.set_paths_fid()
def set_modalities(self):
mod1 = MNIST(self.flags, 'mnist')
mod2 = SVHN(self.flags, 'svhn')
mod3 = Text(self.flags, self.alphabet)
return {mod1.name: mod1, mod2.name: mod2, mod3.name: mod3}
def get_transform_mnist(self):
transform_mnist = transforms.Compose([transforms.ToTensor(),
transforms.ToPILImage(),
transforms.Resize(size=(28, 28), interpolation=Image.BICUBIC),
transforms.ToTensor()])
return transform_mnist
def get_transform_svhn(self):
transform_svhn = transforms.Compose([transforms.ToTensor()])
return transform_svhn
def set_dataset(self):
transform_mnist = self.get_transform_mnist()
transform_svhn = self.get_transform_svhn()
transforms = [transform_mnist, transform_svhn]
train = SVHNMNIST(self.flags,
self.alphabet,
train=True,
transform=transforms)
test = SVHNMNIST(self.flags,
self.alphabet,
train=False,
transform=transforms)
return train, test
def set_rec_weights(self):
rec_weights = dict()
ref_mod_d_size = self.modalities['svhn'].data_size.numel()
for k, m_key in enumerate(self.modalities.keys()):
mod = self.modalities[m_key]
numel_mod = mod.data_size.numel()
rec_weights[mod.name] = float(ref_mod_d_size / numel_mod)
return rec_weights
def set_style_weights(self):
weights = dict()
weights['mnist'] = self.flags.beta_m1_style
weights['svhn'] = self.flags.beta_m2_style
weights['text'] = self.flags.beta_m3_style
return weights
def get_test_samples(self, num_images=10):
n_test = self.dataset_test.__len__()
samples = []
for i in range(num_images):
while True:
sample, target = self.dataset_test.__getitem__(random.randint(0, n_test))
if target == i:
for k, key in enumerate(sample):
sample[key] = sample[key].to(self.flags.device)
samples.append(sample)
break
return samples
def mean_eval_metric(self, values):
return np.mean(np.array(values))
def get_prediction_from_attr(self, attr, index=None):
pred = np.argmax(attr, axis=1).astype(int)
return pred
def eval_label(self, values, labels, index):
pred = self.get_prediction_from_attr(values)
return self.eval_metric(labels, pred)
```
#### File: mmvae_hub/mnistsvhntext/MNISTmod.py
```python
import os
from pathlib import Path
import torch
from modun.download_utils import download_zip_from_url
from modun.file_io import json2dict
from mmvae_hub.mnistsvhntext.networks.ConvNetworkImgClfMNIST import ClfImg
from mmvae_hub.mnistsvhntext.networks.ConvNetworksImgMNIST import DecoderImg, EncoderImg
from mmvae_hub.polymnist.PolymnistMod import PolymnistMod
from mmvae_hub.utils.setup.flags_utils import get_config_path
class MNIST(PolymnistMod):
def __init__(self, flags, name):
super().__init__(flags=flags, name=name)
self.rec_weight = 1.
self.encoder = EncoderImg(flags).to(flags.device)
self.decoder = DecoderImg(flags).to(flags.device)
self.data_size = torch.Size((1, 28, 28))
self.gen_quality_eval = True
self.file_suffix = '.png'
def plot_data(self, d):
return d.repeat(1, 3, 1, 1)
def get_clf(self):
if self.flags.use_clf:
dir_clf = self.flags.dir_clf
if not dir_clf.exists():
download_zip_from_url(
url='https://www.dropbox.com/sh/lx8669lyok9ois6/AADM7Cs_QReijyo2kF8xzWqua/trained_classifiers/trained_clfs_mst?dl=1',
dest_folder=dir_clf)
model_clf = ClfImg()
model_clf.load_state_dict(
torch.load(os.path.join(self.flags.dir_clf, f"clf_m1"),
map_location=self.flags.device))
return model_clf.to(self.flags.device)
if __name__ == '__main__':
config = json2dict(Path(get_config_path(dataset='mnistsvhntext')))
download_zip_from_url(
url='https://www.dropbox.com/sh/lx8669lyok9ois6/AADM7Cs_QReijyo2kF8xzWqua/trained_classifiers/trained_clfs_mst?dl=1',
dest_folder=Path(config['dir_clf']).expanduser())
```
#### File: mnistsvhntext/networks/ConvNetworksImgSVHN.py
```python
import torch
import torch.nn as nn
class EncoderSVHN(nn.Module):
def __init__(self, flags):
super(EncoderSVHN, self).__init__()
self.flags = flags;
self.conv1 = nn.Conv2d(3, 32, kernel_size=4, stride=2, padding=1, dilation=1);
self.conv2 = nn.Conv2d(32, 64, kernel_size=4, stride=2, padding=1, dilation=1);
self.conv3 = nn.Conv2d(64, 64, kernel_size=4, stride=2, padding=1, dilation=1);
self.conv4 = nn.Conv2d(64, 128, kernel_size=4, stride=2, padding=0, dilation=1);
self.relu = nn.ReLU();
self.hidden_mu = nn.Linear(in_features=128, out_features=flags.class_dim, bias=True)
self.hidden_logvar = nn.Linear(in_features=128, out_features=flags.class_dim, bias=True)
def forward(self, x):
h = self.conv1(x);
h = self.relu(h);
h = self.conv2(h);
h = self.relu(h);
h = self.conv3(h);
h = self.relu(h);
h = self.conv4(h);
h = self.relu(h);
h = h.view(h.size(0), -1);
latent_space_mu = self.hidden_mu(h);
latent_space_logvar = self.hidden_logvar(h);
latent_space_mu = latent_space_mu.view(latent_space_mu.size(0), -1);
latent_space_logvar = latent_space_logvar.view(latent_space_logvar.size(0), -1);
return None, None, latent_space_mu, latent_space_logvar;
class DecoderSVHN(nn.Module):
def __init__(self, flags):
super(DecoderSVHN, self).__init__();
self.flags = flags;
self.linear = nn.Linear(flags.class_dim, 128);
self.conv1 = nn.ConvTranspose2d(128, 64, kernel_size=4, stride=1, padding=0, dilation=1);
self.conv2 = nn.ConvTranspose2d(64, 64, kernel_size=4, stride=2, padding=1, dilation=1);
self.conv3 = nn.ConvTranspose2d(64, 32, kernel_size=4, stride=2, padding=1, dilation=1);
self.conv4 = nn.ConvTranspose2d(32, 3, kernel_size=4, stride=2, padding=1, dilation=1);
self.relu = nn.ReLU();
def forward(self, class_latent_space):
z = self.linear(class_latent_space)
z = z.view(z.size(0), z.size(1), 1, 1);
x_hat = self.relu(z);
x_hat = self.conv1(x_hat);
x_hat = self.relu(x_hat);
x_hat = self.conv2(x_hat);
x_hat = self.relu(x_hat);
x_hat = self.conv3(x_hat);
x_hat = self.relu(x_hat);
x_hat = self.conv4(x_hat);
return x_hat, torch.tensor(0.75).to(z.device);
```
#### File: networks/flows/AffineFlows.py
```python
import FrEIA.framework as Ff
import FrEIA.modules as Fm
import torch
from torch import nn
from mmvae_hub.utils.Dataclasses.Dataclasses import PlanarFlowParams
class AffineFlow(nn.Module):
"""Affine coupling Flow"""
def __init__(self, class_dim, num_flows, coupling_dim, nbr_coupling_block_layers:int):
super().__init__()
self.nbr_coupling_block_layers = nbr_coupling_block_layers
self.num_flows = num_flows
self.coupling_dim = coupling_dim
if num_flows > 0:
# a simple chain of operations is collected by ReversibleSequential
# see here for more details: https://vll-hd.github.io/FrEIA/_build/html/FrEIA.modules.html#coupling-blocks
self.flow = Ff.SequenceINN(class_dim)
for _ in range(num_flows):
self.flow.append(Fm.AllInOneBlock, subnet_constructor=self.subnet_fc, permute_soft=True)
def forward(self, z0, flow_params=None):
if self.num_flows == 0:
return z0, torch.zeros_like(z0)
zk, log_det_jacobian = self.flow(z0)
return zk, log_det_jacobian
def rev(self, zk):
return self.flow(zk, rev=True)
def get_flow_params(self, h=None):
# for compat with amortized flows
return PlanarFlowParams(**{k: None for k in ['u', 'w', 'b']})
def subnet_fc(self, dims_in, dims_out):
block = [nn.Linear(dims_in, self.coupling_dim), nn.ReLU()]
for _ in range(self.nbr_coupling_block_layers):
block.extend([nn.Linear(self.coupling_dim, self.coupling_dim), nn.ReLU()])
block.append(nn.Linear(self.coupling_dim, dims_out))
return nn.Sequential(*block)
```
#### File: mmvae_hub/networks/GfMVaes.py
```python
import typing
import torch
import torch.distributions as distr
import torch.nn.functional as F
from torch.distributions import MultivariateNormal
from mmvae_hub.evaluation.divergence_measures.kl_div import log_normal_diag, log_normal_standard
from mmvae_hub.evaluation.divergence_measures.mm_div import GfMMMDiv, GfMoPDiv, PGfMMMDiv, BaseMMDiv, MoFoGfMMMDiv, \
BMoGfMMMDiv
from mmvae_hub.networks.BaseMMVae import BaseMMVAE
from mmvae_hub.networks.MixtureVaes import MoPoEMMVae
from mmvae_hub.networks.flows.AffineFlows import AffineFlow
from mmvae_hub.networks.iwVaes import log_mean_exp, iwMMVAE
from mmvae_hub.networks.utils.utils import get_distr
from mmvae_hub.utils.Dataclasses.gfmDataclasses import SubsetMoFoGfM, JointLatentsMoGfM, \
JointLatentsGfMoP, JointLatentsMoFoGfM
from mmvae_hub.utils.Dataclasses.iwdataclasses import *
from mmvae_hub.utils.fusion_functions import subsets_from_batchmods, mixture_component_selection_embedding
from torch.distributions.normal import Normal
class GfMVAE(BaseMMVAE):
def __init__(self, exp, flags, modalities, subsets):
BaseMMVAE.__init__(self, exp, flags, modalities, subsets)
self.mm_div = GfMMMDiv()
self.flow = AffineFlow(flags.class_dim, flags.num_gfm_flows, coupling_dim=flags.coupling_dim)
def fuse_modalities(self, enc_mods: Mapping[str, BaseEncMod],
batch_mods: typing.Iterable[str]) -> JointLatentsGfM:
"""
Create a subspace for all the combinations of the encoded modalities by combining them.
"""
batch_subsets = subsets_from_batchmods(batch_mods)
subset_embeddings = {}
# pass all experts through the flow
enc_mod_zks = {mod_key: self.flow(distr.latents_class.reparameterize())[0] for mod_key, distr in
enc_mods.items()}
# concatenate mus and logvars for every modality in each subset
for s_key in batch_subsets:
subset_zks = torch.Tensor().to(self.flags.device)
for mod in self.subsets[s_key]:
subset_zks = torch.cat((subset_zks, enc_mod_zks[mod.name].unsqueeze(dim=0)), dim=0)
# mean of zks
z_mean = torch.mean(subset_zks, dim=0)
# calculate inverse flow
z_subset, _ = self.flow.rev(z_mean)
subset_embeddings[s_key] = z_subset
if len(self.subsets[s_key]) == len(batch_mods):
joint_embedding = JointEmbeddingFoEM(embedding=z_subset, mod_strs=s_key.split('_'))
return JointLatentsGfM(joint_embedding=joint_embedding, subsets=subset_embeddings)
class iwMoGfMVAE_old(iwMMVAE, BaseMMVAE):
"""Importance Weighted Mixture of Generalized f-Means VAE"""
def __init__(self, exp, flags, modalities, subsets):
BaseMMVAE.__init__(self, exp, flags, modalities, subsets)
iwMMVAE.__init__(self, flags)
self.mm_div = GfMMMDiv(flags=flags, K=self.K)
self.flow = AffineFlow(flags.class_dim, flags.num_gfm_flows, coupling_dim=flags.coupling_dim,
nbr_coupling_block_layers=flags.nbr_coupling_block_layers)
def reparam_with_eps(self, distr: Distr, eps: Tensor):
"""Apply the reparameterization trick on distr with given epsilon"""
std = distr.logvar.mul(0.5).exp_()
# print(std.max())
return eps.mul(std).add_(distr.mu)
def fuse_modalities(self, enc_mods: Mapping[str, BaseEncMod],
batch_mods: typing.Iterable[str]) -> JointLatentsGfM:
"""
Create a subspace for all the combinations of the encoded modalities by combining them.
"""
batch_subsets = subsets_from_batchmods(batch_mods)
subset_samples = {}
# batch_size is not always equal to flags.batch_size
batch_size = enc_mods[[mod_str for mod_str in enc_mods][0]].latents_class.mu.shape[0]
# sample K*bs samples from prior
epss = MultivariateNormal(torch.zeros(self.flags.class_dim, device=self.flags.device),
torch.eye(self.flags.class_dim, device=self.flags.device)). \
sample((self.K * batch_size,)).reshape((self.K, batch_size, self.flags.class_dim))
transformed_enc_mods = {
mod_key: self.flow(
torch.cat(tuple(self.reparam_with_eps(distr.latents_class, epss[k_idx]).unsqueeze(dim=0) for k_idx in
range(self.K)),
dim=0).reshape((self.K * batch_size, self.flags.class_dim)))[
0] for mod_key, distr in
enc_mods.items()}
for s_key in batch_subsets:
subset_zks = torch.Tensor().to(self.flags.device)
for mod in self.subsets[s_key]:
subset_zks = torch.cat((subset_zks, transformed_enc_mods[mod.name].unsqueeze(dim=0)), dim=0)
# mean of zks
z_mean = torch.mean(subset_zks, dim=0)
# calculate inverse flow
samples = self.flow.rev(z_mean)[0]
samples = samples
subset_samples[s_key] = samples
subset_samples = {k: samples.reshape((self.K, batch_size, self.flags.class_dim)) for k, samples in
subset_samples.items()}
z_joint = torch.cat([v.squeeze() for _, v in subset_samples.items()], dim=0)
joint_embedding = JointEmbeddingFoEM(embedding=z_joint, mod_strs=[k for k in batch_subsets])
return JointLatentsiwMoGfM(joint_embedding=joint_embedding,
subsets=subset_samples,
subset_samples=subset_samples, enc_mods=enc_mods, epss=epss)
def calculate_loss(self, forward_results, batch_d: dict) -> tuple[
float, float, dict, Mapping[str, float]]:
subsets = forward_results.joint_latents.subsets
epss = forward_results.joint_latents.epss
losses = []
klds = {}
log_probs = {}
for mod_str, subset_samples in subsets.items():
epss = torch.where(epss.abs() <= 0.001, torch.tensor(0.01, device=self.flags.device), epss)
kl_div = (subset_samples * torch.log((subset_samples / epss).abs() + 1e-4)).mean(-1)
lpx_z = [px_z.log_prob(batch_d[out_mod_str]).view(*px_z.batch_shape[:2], -1).sum(-1)
for out_mod_str, px_z in forward_results.rec_mods[mod_str].items()]
lpx_z = torch.stack(lpx_z).sum(0)
loss = lpx_z + self.flags.beta * kl_div
losses.append(loss)
log_probs[mod_str] = lpx_z.mean()
klds[mod_str] = self.flags.beta * log_mean_exp(kl_div).sum()
total_loss = -log_mean_exp(torch.cat(losses, 1)).sum()
joint_div = torch.cat(tuple(div.unsqueeze(dim=0) for _, div in klds.items()))
# normalize with the number of samples
joint_div = joint_div.mean()
return total_loss, joint_div, log_probs, klds
def conditioned_generation(self, input_samples: dict, subset_key: str, style=None):
"""
Generate samples conditioned with input samples for a given subset.
subset_key str: The key indicating which subset is used for the generation.
"""
# infer latents from batch
enc_mods, joint_latents = self.inference(input_samples)
subset_embedding = joint_latents.subsets[subset_key].mean(dim=0)
cond_mod_in = ReparamLatent(content=subset_embedding, style=style)
return self.generate_from_latents(cond_mod_in)
class BaseiwMoGfMVAE(iwMMVAE, BaseMMVAE):
"""Base class for the Importance Weighted Mixture of Generalized f-Means VAE methods"""
def __init__(self, exp, flags, modalities, subsets):
BaseMMVAE.__init__(self, exp, flags, modalities, subsets)
iwMMVAE.__init__(self, flags)
self.mm_div = GfMMMDiv(flags=flags, K=self.K)
self.flow = AffineFlow(flags.class_dim, flags.num_gfm_flows, coupling_dim=flags.coupling_dim,
nbr_coupling_block_layers=flags.nbr_coupling_block_layers)
self.prior = Normal(torch.zeros((self.flags.batch_size, self.flags.class_dim), device=self.flags.device),
torch.ones((self.flags.batch_size, self.flags.class_dim), device=self.flags.device))
self.qz_x = get_distr(flags.qz_x) # posterior
def decode(self, enc_mods: Mapping[str, BaseEncMod], joint_latents: iwJointLatents) -> dict:
"""Decoder outputs each reconstructed modality as a dict."""
rec_mods = {}
for subset_str, subset in joint_latents.subsets.items():
subset_samples = subset[0].reshape((self.K * self.flags.batch_size, self.flags.class_dim))
rec_mods[subset_str] = {
out_mod_str: dec_mod.calc_likelihood(class_embeddings=subset_samples,
unflatten=(self.K, self.flags.batch_size)
)
for out_mod_str, dec_mod in self.modalities.items()
}
return rec_mods
def encode(self, input_batch: Mapping[str, Tensor]) -> Mapping[str, BaseEncMod]:
enc_mods = {}
for mod_str, mod in self.modalities.items():
if mod_str in input_batch:
enc_mods[mod_str] = {}
_, _, class_mu, class_logvar = mod.encoder(input_batch[mod_str])
latents_class = Normal(class_mu, F.softmax(class_logvar, dim=-1) * class_logvar.size(-1) + 1e-6)
enc_mods[mod_str] = BaseEncMod(latents_class=latents_class)
return enc_mods
def conditioned_generation(self, input_samples: dict, subset_key: str, style=None):
"""
Generate samples conditioned with input samples for a given subset.
subset_key str: The key indicating which subset is used for the generation.
"""
# infer latents from batch
enc_mods, joint_latents = self.inference(input_samples)
subset_embedding = joint_latents.subsets[subset_key][0].mean(dim=0)
cond_mod_in = ReparamLatent(content=subset_embedding, style=style)
return self.generate_from_latents(cond_mod_in)
def generate_sufficient_statistics_from_latents(self, latents: ReparamLatent) -> Mapping[str, Distribution]:
cond_gen = {}
for mod_str, mod in self.modalities.items():
content = latents.content
cond_gen_m = mod.px_z(*mod.decoder(content))
cond_gen[mod_str] = cond_gen_m
return cond_gen
class iwMoGfMVAE(BaseiwMoGfMVAE):
"""Importance Weighted Mixture of Generalized f-Means VAE"""
def __init__(self, exp, flags, modalities, subsets):
super().__init__(exp, flags, modalities, subsets)
def fuse_modalities(self, enc_mods: Mapping[str, BaseEncMod],
batch_mods: typing.Iterable[str]) -> JointLatentsiwMoGfM2:
"""
Create a subspace for all the combinations of the encoded modalities by combining them.
"""
batch_subsets = subsets_from_batchmods(batch_mods)
# batch_size is not always equal to flags.batch_size
batch_size = enc_mods[[mod_str for mod_str in enc_mods][0]].latents_class.loc.shape[0]
# M*k samples from the unimodal posteriors
zmss = {mod_key: enc_mod.latents_class.rsample((self.K,)) for mod_key, enc_mod in enc_mods.items()}
# transformed enc mods
transformed_enc_mods = {mod_key: self.flow(zms.reshape(self.K * batch_size, self.flags.class_dim)) for
mod_key, zms in zmss.items()}
subset_samples = {}
# approximations of the distributions of the sum of random variables (srv)
srv_proxies = {}
z_Gfs = {}
z_joint = torch.Tensor().to(self.flags.device)
for s_key in batch_subsets:
if len(self.subsets[s_key]) == 1:
# subset samples are tuples: (samples, log_det_J)
subset_samples[s_key] = (zmss[s_key], 0)
srv_proxies[s_key] = enc_mods[s_key].latents_class
z_Gf = zmss[s_key]
z_joint = torch.cat([z_joint, zmss[s_key]])
else:
# sum of random variables
subset_tf_enc_mods = torch.stack([transformed_enc_mods[mod.name][0] for mod in self.subsets[s_key]])
z_Gf = subset_tf_enc_mods.mean(dim=0)
# calculate inverse flow
zss, log_det_J = self.flow.rev(z_Gf)
# approximate the sum of random variables with a gaussian
z_Gf = z_Gf.reshape(self.K, -1, self.flags.class_dim)
subset_tf_enc_mods = subset_tf_enc_mods.reshape(subset_tf_enc_mods.shape[0], self.K, -1,
self.flags.class_dim)
# take the average mean over the K samples and average std
srv_proxies[s_key] = Normal(z_Gf.mean(0), subset_tf_enc_mods.std(0).mean(0))
subset_samples[s_key] = (zss.reshape((self.K, batch_size, self.flags.class_dim)),
log_det_J.reshape((self.K, batch_size)))
z_joint = torch.cat([z_joint, subset_samples[s_key][0]])
z_Gfs[s_key] = z_Gf
joint_embedding = JointEmbeddingFoEM(embedding=z_joint, mod_strs=[k for k in batch_subsets])
return JointLatentsiwMoGfM2(joint_embedding=joint_embedding,
z_Gfs=z_Gfs,
subsets=subset_samples, subset_samples=subset_samples,
enc_mods=enc_mods, srv_proxies=srv_proxies)
def calculate_loss(self, forward_results, batch_d: dict) -> tuple[float, float, dict, Mapping[str, float]]:
subsets = forward_results.joint_latents.subsets
enc_mods = forward_results.enc_mods
losses = []
klds = {}
log_probs = {}
for sub_str, subset_samples in subsets.items():
# distribution of sum of random variables inside f-mean
Gf = forward_results.joint_latents.srv_proxies[sub_str]
z_Gf = forward_results.joint_latents.z_Gfs[sub_str]
# print('z_Gf: ', z_Gf.mean())
lqz_x = Gf.log_prob(z_Gf).sum(-1) + subset_samples[1]
# print('lqz_x: ', lqz_x.mean())
lpz = self.prior.log_prob(subset_samples[0]).sum(-1)
d_kl = lqz_x - lpz
lpx_z = [px_z.log_prob(batch_d[out_mod_str]).view(*px_z.batch_shape[:2], -1).sum(-1)
for out_mod_str, px_z in forward_results.rec_mods[sub_str].items()]
# sum over #mods in subset
lpx_z = torch.stack(lpx_z).sum(0)
# loss = -(lpx_z - (lqz_x - lpz))
if self.flags.beta == 0:
loss = lpx_z
else:
loss = lpx_z - self.flags.beta * d_kl
# print('loss: ', loss.mean())
losses.append(loss)
log_probs[sub_str] = lpx_z.mean()
klds[sub_str] = d_kl.mean()
# concat over k samples (get k*number of subsets) as last dim
# take log_mean_exp over batch size
# log_mean_exp over k, then sum over number of subsets
total_loss = -log_mean_exp(torch.cat(losses, 1)).sum()
# print('total loss: ', total_loss)
joint_div = torch.cat(tuple(div.unsqueeze(dim=0) for _, div in klds.items()))
# normalize with the number of samples
joint_div = joint_div.mean()
return total_loss, joint_div, log_probs, klds
class iwMoGfMVAE2_(BaseiwMoGfMVAE):
"""Importance Weighted Mixture of Generalized f-Means VAE"""
def __init__(self, exp, flags, modalities, subsets):
super().__init__(exp, flags, modalities, subsets)
def fuse_modalities(self, enc_mods: Mapping[str, BaseEncMod],
batch_mods: typing.Iterable[str]) -> JointLatentsiwMoGfM2:
"""
Create a subspace for all the combinations of the encoded modalities by combining them.
"""
batch_subsets = subsets_from_batchmods(batch_mods)
# batch_size is not always equal to flags.batch_size
batch_size = enc_mods[[mod_str for mod_str in enc_mods][0]].latents_class.loc.shape[0]
# M*k samples from the unimodal posteriors
zmss = {mod_key: enc_mod.latents_class.rsample((self.K,)) for mod_key, enc_mod in enc_mods.items()}
# transformed enc mods
transformed_enc_mods = {mod_key: self.flow(zms.reshape(self.K * batch_size, self.flags.class_dim)) for
mod_key, zms in zmss.items()}
subset_samples = {}
# approximations of the distributions of the sum of random variables (srv)
srv_proxies = {}
z_Gfs = {}
z_joint = torch.Tensor().to(self.flags.device)
for s_key in batch_subsets:
if len(self.subsets[s_key]) == 1:
# subset samples are tuples: (samples, log_det_J)
subset_samples[s_key] = (zmss[s_key], 0)
srv_proxies[s_key] = enc_mods[s_key].latents_class
z_Gf = zmss[s_key]
z_joint = torch.cat([z_joint, zmss[s_key]])
else:
# sum of random variables
z_Gf = torch.stack([transformed_enc_mods[mod.name][0] for mod in self.subsets[s_key]]).mean(dim=0)
# calculate inverse flow
zss, log_det_J = self.flow.rev(z_Gf)
z_Gf = z_Gf.reshape(self.K, -1, self.flags.class_dim)
# approximate the subset posterior with a gaussian
srv_proxies[s_key] = Normal(z_Gf.mean(0), z_Gf.std(0) + 1e-6)
subset_samples[s_key] = (zss.reshape((self.K, batch_size, self.flags.class_dim)),
log_det_J.reshape((self.K, batch_size)))
z_joint = torch.cat([z_joint, subset_samples[s_key][0]])
z_Gfs[s_key] = z_Gf
joint_embedding = JointEmbeddingFoEM(embedding=z_joint, mod_strs=[k for k in batch_subsets])
return JointLatentsiwMoGfM2(joint_embedding=joint_embedding,
z_Gfs=z_Gfs,
subsets=subset_samples, subset_samples=subset_samples,
enc_mods=enc_mods, srv_proxies=srv_proxies)
def calculate_loss(self, forward_results, batch_d: dict) -> tuple[float, float, dict, Mapping[str, float]]:
subsets = forward_results.joint_latents.subsets
enc_mods = forward_results.enc_mods
losses = []
klds = {}
log_probs = {}
for sub_str, subset_samples in subsets.items():
# distribution of sum of random variables inside f-mean
Gf = forward_results.joint_latents.srv_proxies[sub_str]
z_Gf = forward_results.joint_latents.z_Gfs[sub_str]
# print('z_Gf: ', z_Gf.mean())
lqz_x = Gf.log_prob(z_Gf).sum(-1) + subset_samples[1]
# print('lqz_x: ', lqz_x.mean())
lpx_z = [px_z.log_prob(batch_d[out_mod_str]).view(*px_z.batch_shape[:2], -1).sum(-1)
for out_mod_str, px_z in forward_results.rec_mods[sub_str].items()]
# sum over #mods in subset
lpx_z = torch.stack(lpx_z).sum(0)
lpz = self.prior.log_prob(subset_samples[0]).sum(-1)
d_kl = lqz_x - lpz
# loss = -(lpx_z - (lqz_x - lpz))
loss = lpx_z - self.flags.beta * d_kl
# print('loss: ', loss.mean())
losses.append(loss)
log_probs[sub_str] = lpx_z.mean()
klds[sub_str] = d_kl.mean()
# concat over k samples (get k*number of subsets) as last dim
# take log_mean_exp over batch size
# log_mean_exp over k, then sum over number of subsets
total_loss = -log_mean_exp(torch.cat(losses, 1)).sum()
# print('total loss: ', total_loss)
joint_div = torch.cat(tuple(div.unsqueeze(dim=0) for _, div in klds.items()))
# normalize with the number of samples
joint_div = joint_div.mean()
return total_loss, joint_div, log_probs, klds
class iwMoGfMVAE4(BaseiwMoGfMVAE):
"""Importance Weighted Mixture of Generalized f-Means VAE where the joint distribution is assumed to be gaussian with inferred mean ind std."""
def __init__(self, exp, flags, modalities, subsets):
super().__init__(exp, flags, modalities, subsets)
def fuse_modalities(self, enc_mods: Mapping[str, BaseEncMod],
batch_mods: typing.Iterable[str]) -> JointLatentsiwMoGfM2:
"""
Create a subspace for all the combinations of the encoded modalities by combining them.
"""
batch_subsets = subsets_from_batchmods(batch_mods)
# batch_size is not always equal to flags.batch_size
batch_size = enc_mods[[mod_str for mod_str in enc_mods][0]].latents_class.loc.shape[0]
# M*k samples from the unimodal posteriors
zmss = {mod_key: enc_mod.latents_class.rsample((self.K,)) for mod_key, enc_mod in enc_mods.items()}
# transformed enc mods
transformed_enc_mods = {mod_key: self.flow(zms.reshape(self.K * batch_size, self.flags.class_dim)) for
mod_key, zms in zmss.items()}
subset_samples = {}
# approximations of the distributions of the sum of random variables (srv)
srv_proxies = {}
z_Gfs = {}
z_joint = torch.Tensor().to(self.flags.device)
for s_key in batch_subsets:
if len(self.subsets[s_key]) == 1:
# subset samples are tuples: (samples, log_det_J)
subset_samples[s_key] = (zmss[s_key], 0)
srv_proxies[s_key] = enc_mods[s_key].latents_class
z_Gf = zmss[s_key]
z_joint = torch.cat([z_joint, zmss[s_key]])
else:
# sum of random variables
z_Gf = torch.stack([transformed_enc_mods[mod.name][0] for mod in self.subsets[s_key]]).mean(dim=0)
# calculate inverse flow
zss, log_det_J = self.flow.rev(z_Gf)
# approximate the sum of random variables with a gaussian
z_Gf = z_Gf.reshape(self.K, -1, self.flags.class_dim)
srv_proxies[s_key] = None
subset_samples[s_key] = (zss.reshape((self.K, batch_size, self.flags.class_dim)),
log_det_J.reshape((self.K, batch_size)))
z_joint = torch.cat([z_joint, subset_samples[s_key][0]])
z_Gfs[s_key] = z_Gf
joint_embedding = JointEmbeddingFoEM(embedding=z_joint, mod_strs=[k for k in batch_subsets])
return JointLatentsiwMoGfM2(joint_embedding=joint_embedding,
z_Gfs=z_Gfs,
subsets=subset_samples, subset_samples=subset_samples,
enc_mods=enc_mods, srv_proxies=srv_proxies)
def calculate_loss(self, forward_results, batch_d: dict) -> tuple[float, float, dict, Mapping[str, float]]:
subsets = forward_results.joint_latents.subsets
enc_mods = forward_results.enc_mods
losses = []
klds = {}
log_probs = {}
for sub_str, subset_samples in subsets.items():
subset_posterior = Normal(loc=subset_samples[0].mean(0), scale=subset_samples[0].std(0) + 1e-6)
lqz_x = subset_posterior.log_prob(subset_samples[0]).sum(-1) + subset_samples[1]
# print('lqz_x: ', lqz_x.mean())
lpz = self.prior.log_prob(subset_samples[0]).sum(-1)
d_kl = lqz_x - lpz
lpx_z = [px_z.log_prob(batch_d[out_mod_str]).view(*px_z.batch_shape[:2], -1).sum(-1)
for out_mod_str, px_z in forward_results.rec_mods[sub_str].items()]
# sum over #mods in subset
lpx_z = torch.stack(lpx_z).sum(0)
# loss = -(lpx_z - (lqz_x - lpz))
if self.flags.beta == 0:
loss = lpx_z
else:
loss = lpx_z - self.flags.beta * d_kl
# print('loss: ', loss.mean())
losses.append(loss)
log_probs[sub_str] = lpx_z.mean()
klds[sub_str] = d_kl.mean()
# concat over k samples (get k*number of subsets) as last dim
# take log_mean_exp over batch size
# log_mean_exp over k, then sum over number of subsets
total_loss = -log_mean_exp(torch.cat(losses, 1)).sum()
# print('total loss: ', total_loss)
joint_div = torch.cat(tuple(div.unsqueeze(dim=0) for _, div in klds.items()))
# normalize with the number of samples
joint_div = joint_div.mean()
return total_loss, joint_div, log_probs, klds
class iwMoGfMVAE_amortized(BaseiwMoGfMVAE):
"""Importance Weighted Mixture of Generalized f-Means VAE"""
def __init__(self, exp, flags, modalities, subsets):
super().__init__(exp, flags, modalities, subsets)
def fuse_modalities(self, enc_mods: Mapping[str, BaseEncMod],
batch_mods: typing.Iterable[str]) -> JointLatentsiwMoGfMVAE_amortized:
"""
Create a subspace for all the combinations of the encoded modalities by combining them.
"""
batch_subsets = subsets_from_batchmods(batch_mods)
# batch_size is not always equal to flags.batch_size
batch_size = enc_mods[[mod_str for mod_str in enc_mods][0]].latents_class.loc.shape[0]
# M*k samples from the unimodal posteriors
zmss = {mod_key: enc_mod.latents_class.rsample((self.K,)) for mod_key, enc_mod in enc_mods.items()}
# transformed enc mods
transformed_enc_mods = {mod_key: self.flow(zms.reshape(self.K * batch_size, self.flags.class_dim))
for mod_key, zms in zmss.items()}
priors_tf_enc_mods = {
mod_key: Normal(self.flow(enc_mod.latents_class.loc)[0],
torch.ones(self.flags.class_dim, device=self.flags.device))
for mod_key, enc_mod in enc_mods.items()
}
subset_samples = {}
z_joint = torch.Tensor().to(self.flags.device)
for s_key in batch_subsets:
if len(self.subsets[s_key]) == 1:
# subset samples are tuples: (samples, log_det_J)
subset_samples[s_key] = (zmss[s_key], 0)
z_joint = torch.cat([z_joint, zmss[s_key]])
else:
# sum of random variables
z_Gf = torch.stack([transformed_enc_mods[mod.name][0] for mod in self.subsets[s_key]]).mean(dim=0)
# calculate inverse flow
zss, log_det_J = self.flow.rev(z_Gf)
subset_samples[s_key] = (zss.reshape((self.K, batch_size, self.flags.class_dim)),
log_det_J.reshape((self.K, batch_size)))
# reshape the transformed_enc_mods
transformed_enc_mods = {
k: (
samples[0].reshape((self.K, batch_size, self.flags.class_dim)),
samples[1].reshape((self.K, batch_size)))
for
k, samples in
transformed_enc_mods.items()}
joint_embedding = JointEmbeddingFoEM(embedding=z_joint, mod_strs=[k for k in batch_subsets])
return JointLatentsiwMoGfMVAE_amortized(joint_embedding=joint_embedding,
transformed_enc_mods=transformed_enc_mods,
subsets=subset_samples, subset_samples=subset_samples,
enc_mods=enc_mods, zmss=zmss, priors_tf_enc_mods=priors_tf_enc_mods)
def calculate_loss(self, forward_results, batch_d: dict) -> tuple[float, float, dict, Mapping[str, float]]:
subsets = forward_results.joint_latents.subsets
enc_mods = forward_results.enc_mods
transformed_enc_mods = forward_results.joint_latents.transformed_enc_mods
zmss = forward_results.joint_latents.zmss
losses = []
klds = {}
log_probs = {}
# amortized_priors = torch.stack([Normal(transformed_enc_mod,
# torch.ones(self.flags.batch_size, self.flags.class_dim, device=self.flags.device)) for transformed_enc_mod in ])
# minimize divergence between f(zm) and amortized prior
interm_loss = torch.stack([(
enc_mods[key].latents_class.log_prob(zmss[key]).sum(-1)
- forward_results.joint_latents.priors_tf_enc_mods[key].log_prob(transformed_enc_mods[key][0]).sum(-1)
- transformed_enc_mods[key][1]
) for key in enc_mods]).sum(0)
for sub_str, subset_samples in subsets.items():
# subset size is the number of modalities included in the subset
subset_size = len(self.subsets[sub_str])
if subset_size == 1:
Gf = enc_mods[sub_str].latents_class
z_Gf = zmss[sub_str]
else:
# distribution of sum of random variables inside f-mean
Gf = Normal(
torch.stack(
[transformed_enc_mod[0].mean(0) for _, transformed_enc_mod in transformed_enc_mods.items()]
).mean(0),
torch.tensor(1 / subset_size).sqrt() * torch.ones(self.flags.batch_size, self.flags.class_dim,
device=self.flags.device)
)
z_Gf = torch.stack([transformed_enc_mods[mod.name][0] for mod in self.subsets[sub_str]]).mean(dim=0)
# print('z_Gf: ', z_Gf.mean())
lqz_x = Gf.log_prob(z_Gf).sum(-1) + subset_samples[1]
# print('lqz_x: ', lqz_x.mean())
lpx_z = [px_z.log_prob(batch_d[out_mod_str]).view(*px_z.batch_shape[:2], -1).sum(-1)
for out_mod_str, px_z in forward_results.rec_mods[sub_str].items()]
# sum over #mods in subset
lpx_z = torch.stack(lpx_z).sum(0)
lpz = self.prior.log_prob(subset_samples[0]).sum(-1)
d_kl = lqz_x - lpz
# loss = -(lpx_z - (lqz_x - lpz))
if self.flags.beta == 0:
loss = lpx_z
else:
loss = lpx_z - self.flags.beta * d_kl
# print('loss: ', loss.mean())
losses.append(loss)
log_probs[sub_str] = lpx_z.mean()
klds[sub_str] = d_kl.mean()
# concat over k samples (get k*number of subsets) as last dim
# take log_mean_exp over batch size
# log_mean_exp over k, then sum over number of subsets
total_loss = -log_mean_exp(torch.cat(losses, 1)).sum() + 2.0 * interm_loss.sum()
# print('total loss: ', total_loss)
joint_div = torch.cat(tuple(div.unsqueeze(dim=0) for _, div in klds.items()))
# normalize with the number of samples
joint_div = joint_div.mean()
return total_loss, joint_div, log_probs, klds
class MoGfMVAE_amortized(iwMoGfMVAE_amortized):
"""Amortized Mixture of Generalized f-Means VAE"""
def __init__(self, exp, flags, modalities, subsets):
iwMoGfMVAE_amortized.__init__(self, exp, flags, modalities, subsets)
self.K = 1
class iwMoGfMVAE_multiloss(BaseiwMoGfMVAE):
"""Importance Weighted Mixture of Generalized f-Means VAE with an additional loss function that pushes the transformed enc mods to be a std normal distr"""
def __init__(self, exp, flags, modalities, subsets):
super().__init__(exp, flags, modalities, subsets)
def fuse_modalities(self, enc_mods: Mapping[str, BaseEncMod],
batch_mods: typing.Iterable[str]) -> JointLatentsiwMoGfMVAE_amortized:
"""
Create a subspace for all the combinations of the encoded modalities by combining them.
"""
batch_subsets = subsets_from_batchmods(batch_mods)
# batch_size is not always equal to flags.batch_size
batch_size = enc_mods[[mod_str for mod_str in enc_mods][0]].latents_class.loc.shape[0]
# M*k samples from the unimodal posteriors
zmss = {mod_key: enc_mod.latents_class.rsample((self.K,)) for mod_key, enc_mod in enc_mods.items()}
# transformed enc mods
transformed_enc_mods = {mod_key: self.flow(zms.reshape(self.K * batch_size, self.flags.class_dim))
for mod_key, zms in zmss.items()}
subset_samples = {}
z_joint = torch.Tensor().to(self.flags.device)
for s_key in batch_subsets:
if len(self.subsets[s_key]) == 1:
# subset samples are tuples: (samples, log_det_J)
subset_samples[s_key] = (zmss[s_key], 0)
z_joint = torch.cat([z_joint, zmss[s_key]])
else:
# sum of random variables
z_Gf = torch.stack([transformed_enc_mods[mod.name][0] for mod in self.subsets[s_key]]).mean(dim=0)
# calculate inverse flow
zss, log_det_J = self.flow.rev(z_Gf)
subset_samples[s_key] = (zss.reshape((self.K, batch_size, self.flags.class_dim)),
log_det_J.reshape((self.K, batch_size)))
# reshape the transformed_enc_mods
transformed_enc_mods = {
k: (
samples[0].reshape((self.K, batch_size, self.flags.class_dim)),
samples[1].reshape((self.K, batch_size)))
for
k, samples in
transformed_enc_mods.items()}
joint_embedding = JointEmbeddingFoEM(embedding=z_joint, mod_strs=[k for k in batch_subsets])
return JointLatentsiwMoGfMVAE_amortized(joint_embedding=joint_embedding,
transformed_enc_mods=transformed_enc_mods,
subsets=subset_samples, subset_samples=subset_samples,
enc_mods=enc_mods, zmss=zmss, priors_tf_enc_mods=None)
def calculate_loss(self, forward_results, batch_d: dict) -> tuple[float, float, dict, Mapping[str, float]]:
subsets = forward_results.joint_latents.subsets
enc_mods = forward_results.enc_mods
transformed_enc_mods = forward_results.joint_latents.transformed_enc_mods
zmss = forward_results.joint_latents.zmss
losses = []
klds = {}
log_probs = {}
# minimize divergence between f(zm) and amortized prior
interm_loss = torch.stack([(
enc_mods[key].latents_class.log_prob(zmss[key]).sum(-1)
- self.prior.log_prob(transformed_enc_mods[key][0]).sum(-1)
- transformed_enc_mods[key][1]
) for key in enc_mods]).sum(0)
for sub_str, subset_samples in subsets.items():
# subset size is the number of modalities included in the subset
subset_size = len(self.subsets[sub_str])
if subset_size == 1:
Gf = enc_mods[sub_str].latents_class
z_Gf = zmss[sub_str]
else:
# distribution of sum of random variables inside f-mean
Gf = Normal(torch.zeros(self.flags.batch_size, self.flags.class_dim, device=self.flags.device),
torch.tensor(1 / subset_size).sqrt() * torch.ones(self.flags.batch_size,
self.flags.class_dim,
device=self.flags.device)
)
z_Gf = torch.stack([transformed_enc_mods[mod.name][0] for mod in self.subsets[sub_str]]).mean(dim=0)
# print('z_Gf: ', z_Gf.mean())
lqz_x = Gf.log_prob(z_Gf).sum(-1) + subset_samples[1]
# print('lqz_x: ', lqz_x.mean())
lpx_z = [px_z.log_prob(batch_d[out_mod_str]).view(*px_z.batch_shape[:2], -1).sum(-1)
for out_mod_str, px_z in forward_results.rec_mods[sub_str].items()]
# sum over #mods in subset
lpx_z = torch.stack(lpx_z).sum(0)
lpz = self.prior.log_prob(subset_samples[0]).sum(-1)
d_kl = lqz_x - lpz
# loss = -(lpx_z - (lqz_x - lpz))
if self.flags.beta == 0:
loss = lpx_z
else:
loss = lpx_z - self.flags.beta * d_kl
# print('loss: ', loss.mean())
losses.append(loss)
log_probs[sub_str] = lpx_z.mean()
klds[sub_str] = d_kl.mean()
# concat over k samples (get k*number of subsets) as last dim
# take log_mean_exp over batch size
# log_mean_exp over k, then sum over number of subsets
total_loss = -log_mean_exp(torch.cat(losses, 1)).sum() + 2.0 * interm_loss.sum()
# print('total loss: ', total_loss)
joint_div = torch.cat(tuple(div.unsqueeze(dim=0) for _, div in klds.items()))
# normalize with the number of samples
joint_div = joint_div.mean()
return total_loss, joint_div, log_probs, klds
class iwMoGfMVAE_multiloss_(BaseiwMoGfMVAE):
"""Importance Weighted Mixture of Generalized f-Means VAE with an additional loss function that pushes the transformed enc mods to be a std normal distr"""
def __init__(self, exp, flags, modalities, subsets):
super().__init__(exp, flags, modalities, subsets)
def fuse_modalities(self, enc_mods: Mapping[str, BaseEncMod],
batch_mods: typing.Iterable[str]) -> JointLatentsiwMoGfMVAE_amortized:
"""
Create a subspace for all the combinations of the encoded modalities by combining them.
"""
batch_subsets = subsets_from_batchmods(batch_mods)
# batch_size is not always equal to flags.batch_size
batch_size = enc_mods[[mod_str for mod_str in enc_mods][0]].latents_class.loc.shape[0]
# M*k samples from the unimodal posteriors
zmss = {mod_key: enc_mod.latents_class.rsample((self.K,)) for mod_key, enc_mod in enc_mods.items()}
# transformed enc mods
transformed_enc_mods = {mod_key: self.flow(zms.reshape(self.K * batch_size, self.flags.class_dim))
for mod_key, zms in zmss.items()}
subset_samples = {}
z_joint = torch.Tensor().to(self.flags.device)
for s_key in batch_subsets:
if len(self.subsets[s_key]) == 1:
# subset samples are tuples: (samples, log_det_J)
subset_samples[s_key] = (zmss[s_key], 0)
z_joint = torch.cat([z_joint, zmss[s_key]])
else:
# sum of random variables
z_Gf = torch.stack([transformed_enc_mods[mod.name][0] for mod in self.subsets[s_key]]).mean(dim=0)
# calculate inverse flow
zss, log_det_J = self.flow.rev(z_Gf)
subset_samples[s_key] = (zss.reshape((self.K, batch_size, self.flags.class_dim)),
log_det_J.reshape((self.K, batch_size)))
# reshape the transformed_enc_mods
transformed_enc_mods = {
k: (
samples[0].reshape((self.K, batch_size, self.flags.class_dim)),
samples[1].reshape((self.K, batch_size)))
for
k, samples in
transformed_enc_mods.items()}
joint_embedding = JointEmbeddingFoEM(embedding=z_joint, mod_strs=[k for k in batch_subsets])
return JointLatentsiwMoGfMVAE_amortized(joint_embedding=joint_embedding,
transformed_enc_mods=transformed_enc_mods,
subsets=subset_samples, subset_samples=subset_samples,
enc_mods=enc_mods, zmss=zmss, priors_tf_enc_mods=None)
def calculate_loss(self, forward_results, batch_d: dict) -> tuple[float, float, dict, Mapping[str, float]]:
subsets = forward_results.joint_latents.subsets
enc_mods = forward_results.enc_mods
transformed_enc_mods = forward_results.joint_latents.transformed_enc_mods
zmss = forward_results.joint_latents.zmss
losses = []
klds = {}
log_probs = {}
# minimize divergence between f(zm) and amortized prior
interm_loss = torch.stack([(
enc_mods[key].latents_class.log_prob(zmss[key]).sum(-1)
- self.prior.log_prob(transformed_enc_mods[key][0]).sum(-1)
- transformed_enc_mods[key][1]
) for key in enc_mods]).sum(0)
for sub_str, subset_samples in subsets.items():
# subset size is the number of modalities included in the subset
subset_size = len(self.subsets[sub_str])
if subset_size == 1:
Gf = enc_mods[sub_str].latents_class
z_Gf = zmss[sub_str]
else:
# distribution of sum of random variables inside f-mean
Gf = Normal(torch.zeros(self.flags.batch_size, self.flags.class_dim, device=self.flags.device),
torch.tensor(1 / subset_size).sqrt() * torch.ones(self.flags.batch_size,
self.flags.class_dim,
device=self.flags.device)
)
z_Gf = torch.stack([transformed_enc_mods[mod.name][0] for mod in self.subsets[sub_str]]).mean(dim=0)
# print('z_Gf: ', z_Gf.mean())
lqz_x = Gf.log_prob(z_Gf).sum(-1) + subset_samples[1]
# print('lqz_x: ', lqz_x.mean())
lpx_z = [px_z.log_prob(batch_d[out_mod_str]).view(*px_z.batch_shape[:2], -1).sum(-1)
for out_mod_str, px_z in forward_results.rec_mods[sub_str].items()]
# sum over #mods in subset
lpx_z = torch.stack(lpx_z).sum(0)
lpz = self.prior.log_prob(subset_samples[0]).sum(-1)
d_kl = lpz - lqz_x
# loss = -(lpx_z - (lqz_x - lpz))
if self.flags.beta == 0:
loss = lpx_z
else:
loss = lpx_z - self.flags.beta * d_kl
# print('loss: ', loss.mean())
losses.append(loss)
log_probs[sub_str] = lpx_z.mean()
klds[sub_str] = d_kl.mean()
# concat over k samples (get k*number of subsets) as last dim
# take log_mean_exp over batch size
# log_mean_exp over k, then sum over number of subsets
total_loss = -log_mean_exp(torch.cat(losses, 1)).sum() + 2.0 * interm_loss.sum()
# print('total loss: ', total_loss)
joint_div = torch.cat(tuple(div.unsqueeze(dim=0) for _, div in klds.items()))
# normalize with the number of samples
joint_div = joint_div.mean()
return total_loss, joint_div, log_probs, klds
class iwMoGfMVAE3(iwMMVAE, BaseMMVAE):
"""Importance Weighted Mixture of Generalized f-Means VAE"""
def __init__(self, exp, flags, modalities, subsets):
BaseMMVAE.__init__(self, exp, flags, modalities, subsets)
iwMMVAE.__init__(self, flags)
self.mm_div = GfMMMDiv(flags=flags, K=self.K)
self.flow = AffineFlow(flags.class_dim, flags.num_gfm_flows, coupling_dim=flags.coupling_dim,
nbr_coupling_block_layers=flags.nbr_coupling_block_layers)
self.prior = Normal(torch.zeros(self.flags.class_dim, device=self.flags.device),
torch.ones(self.flags.class_dim, device=self.flags.device))
def reparam_with_eps(self, distr: Distr, eps: Tensor):
"""Apply the reparameterization trick on distr with given epsilon"""
std = distr.logvar.mul(0.5).exp_()
# print(std.max())
return eps.mul(std).add_(distr.mu)
def fuse_modalities(self, enc_mods: Mapping[str, BaseEncMod],
batch_mods: typing.Iterable[str]) -> JointLatentsiwMoGfM2:
"""
Create a subspace for all the combinations of the encoded modalities by combining them.
"""
batch_subsets = subsets_from_batchmods(batch_mods)
# batch_size is not always equal to flags.batch_size
batch_size = enc_mods[[mod_str for mod_str in enc_mods][0]].latents_class.mu.shape[0]
transformed_enc_mods = {
mod_key: self.flow(
torch.cat(tuple(distr.latents_class.reparameterize().unsqueeze(dim=0) for _ in
range(self.K)),
dim=0).reshape((self.K * batch_size, self.flags.class_dim))) for mod_key, distr in
enc_mods.items()}
subset_samples = {}
for s_key in batch_subsets:
subset_zks = torch.Tensor().to(self.flags.device)
for mod in self.subsets[s_key]:
subset_zks = torch.cat((subset_zks, transformed_enc_mods[mod.name][0].unsqueeze(dim=0)), dim=0)
# mean of zks
z_mean = torch.mean(subset_zks, dim=0)
# calculate inverse flow
samples = self.flow.rev(z_mean)
subset_samples[s_key] = samples
subset_samples = {k: (
samples[0].reshape((self.K, batch_size, self.flags.class_dim)), samples[1].reshape((self.K, batch_size)))
for
k, samples in
subset_samples.items()}
# z_joint has nbr_subsets*K samples
z_joint = torch.cat([v[0] for _, v in subset_samples.items()], dim=0)
joint_embedding = JointEmbeddingFoEM(embedding=z_joint, mod_strs=[k for k in batch_subsets])
return JointLatentsiwMoGfM2(joint_embedding=joint_embedding, transformed_enc_mods=transformed_enc_mods,
subsets=subset_samples,
subset_samples=subset_samples, enc_mods=enc_mods)
def calculate_loss(self, forward_results, batch_d: dict) -> tuple[float, float, dict, Mapping[str, float]]:
subsets = forward_results.joint_latents.subsets
transformed_enc_mods = forward_results.joint_latents.transformed_enc_mods
enc_mods = forward_results.enc_mods
losses = []
klds = {}
log_probs = {}
logprobs_tf_encmods = {
mod_k: Normal(enc_mods[mod_k].latents_class.mu, enc_mods[mod_k].latents_class.logvar.exp()).log_prob(
transformed_enc_mod[0].reshape(self.K, self.flags.batch_size, self.flags.class_dim)).sum(-1) -
transformed_enc_mod[1].reshape(self.K, self.flags.batch_size) for mod_k, transformed_enc_mod in
transformed_enc_mods.items()}
for sub_str, subset_samples in subsets.items():
lMz_x = torch.stack(
[logprobs_tf_encmods[mod_k] for mod_k in sub_str.split('_')]).mean(0)
lqz_x = lMz_x + subset_samples[1]
lpz = self.prior.log_prob(subset_samples[0]).sum(-1)
lpx_z = [px_z.log_prob(batch_d[out_mod_str]).view(*px_z.batch_shape[:2], -1).sum(-1)
for out_mod_str, px_z in forward_results.rec_mods[sub_str].items()]
# sum over #mods in subset
lpx_z = torch.stack(lpx_z).sum(0)
d_kl = lqz_x - lpz
# loss = -(lpx_z - (lqz_x - lpz))
loss = lpx_z - self.flags.beta * d_kl
print('dkl: ', d_kl.mean())
print('lMz_x: ', lMz_x)
print('lpx_z: ', lpx_z)
print('lqz_x: ', lqz_x)
print('lpz: ', lpz)
print('loss: ', loss.mean())
losses.append(loss)
log_probs[sub_str] = lpx_z.mean()
klds[sub_str] = lpx_z.sum()
# concat over k samples (get k*number of subsets) as last dim
# take log_mean_exp over batch size
# log_mean_exp over k, then mean over subsets
total_loss = -log_mean_exp(torch.cat(losses, 1)).mean()
print('total loss: ', total_loss)
joint_div = torch.cat(tuple(div.unsqueeze(dim=0) for _, div in klds.items()))
# normalize with the number of samples
joint_div = joint_div.mean()
return total_loss, joint_div, log_probs, klds
def conditioned_generation(self, input_samples: dict, subset_key: str, style=None):
"""
Generate samples conditioned with input samples for a given subset.
subset_key str: The key indicating which subset is used for the generation.
"""
# infer latents from batch
enc_mods, joint_latents = self.inference(input_samples)
subset_embedding = joint_latents.subsets[subset_key][0].mean(dim=0)
cond_mod_in = ReparamLatent(content=subset_embedding, style=style)
return self.generate_from_latents(cond_mod_in)
def generate_sufficient_statistics_from_latents(self, latents: ReparamLatent) -> Mapping[str, Distribution]:
cond_gen = {}
for mod_str, mod in self.modalities.items():
style_m = latents.style[mod_str]
content = latents.content
cond_gen_m = mod.likelihood(*mod.decoder(style_m, content))
cond_gen[mod_str] = cond_gen_m
return cond_gen
class MoGfMVAE(iwMoGfMVAE):
"""Mixture of Generalized f-Means VAE"""
def __init__(self, exp, flags, modalities, subsets):
iwMoGfMVAE.__init__(self, exp, flags, modalities, subsets)
self.K = 1
class MoFoGfMVAE(BaseMMVAE):
"""Mixture of Flow of Generalized f-Means VAE"""
def __init__(self, exp, flags, modalities, subsets):
BaseMMVAE.__init__(self, exp, flags, modalities, subsets)
self.mm_div = MoFoGfMMMDiv()
self.gfm_flow = AffineFlow(flags.class_dim, flags.num_gfm_flows, coupling_dim=flags.coupling_dim)
self.flow = AffineFlow(flags.class_dim, flags.num_flows, coupling_dim=flags.coupling_dim)
def fuse_modalities(self, enc_mods: Mapping[str, BaseEncMod],
batch_mods: typing.Iterable[str]) -> JointLatentsMoFoGfM:
"""
Create a subspace for all the combinations of the encoded modalities by combining them.
"""
batch_subsets = subsets_from_batchmods(batch_mods)
subset_embeddings = {}
# pass all experts through the flow
enc_mod_zks = {mod_key: self.gfm_flow(distr.latents_class.reparameterize())[0] for mod_key, distr in
enc_mods.items()}
for s_key in batch_subsets:
subset_zks = torch.Tensor().to(self.flags.device)
for mod in self.subsets[s_key]:
subset_zks = torch.cat((subset_zks, enc_mod_zks[mod.name].unsqueeze(dim=0)), dim=0)
# mean of zks
z_mean = torch.mean(subset_zks, dim=0)
# calculate inverse flow
z0_subset, _ = self.gfm_flow.rev(z_mean)
# pass subset through flow
zk_subset, log_det_j = self.flow.forward(z0_subset)
subset_embeddings[s_key] = SubsetMoFoGfM(z0=z0_subset, zk=zk_subset, log_det_j=log_det_j)
# select expert for z_joint
subsets = {k: v.zk for k, v in subset_embeddings.items()}
z_joint = mixture_component_selection_embedding(subset_embeds=subsets, s_key='all', flags=self.flags)
joint_embedding = JointEmbeddingFoEM(embedding=z_joint, mod_strs=[k for k in batch_subsets])
return JointLatentsMoFoGfM(joint_embedding=joint_embedding, subsets=subset_embeddings)
class BMoGfMVAE(MoGfMVAE):
""" Bounded Mixture of Generalized f-Means VAE"""
def __init__(self, exp, flags, modalities, subsets):
super().__init__(exp, flags, modalities, subsets)
self.mm_div = BMoGfMMMDiv()
class GfMoPVAE(MoPoEMMVae):
"""GfM of Product of experts VAE"""
def __init__(self, exp, flags, modalities, subsets):
BaseMMVAE.__init__(self, exp, flags, modalities, subsets)
self.mm_div = GfMoPDiv()
self.flow = AffineFlow(flags.class_dim, flags.num_gfm_flows, coupling_dim=flags.coupling_dim,
nbr_coupling_block_layers=flags.nbr_coupling_block_layers)
def fuse_modalities(self, enc_mods: Mapping[str, BaseEncMod],
batch_mods: typing.Iterable[str]) -> JointLatentsGfM:
"""
Create a subspace for all the combinations of the encoded modalities by combining them.
"""
batch_subsets = subsets_from_batchmods(batch_mods)
distr_subsets = {}
for s_key in batch_subsets:
distr_subset = self.fuse_subset(enc_mods, s_key)
distr_subsets[s_key] = distr_subset
z_joint = self.gfm(distr_subsets)
joint_embedding = JointEmbeddingFoEM(embedding=z_joint, mod_strs='joint')
return JointLatentsGfMoP(joint_embedding=joint_embedding, subsets=distr_subsets)
def gfm(self, distr_subsets: Mapping[str, Distr]) -> Tensor:
"""Merge subsets with a generalized f mean."""
# pass all subset experts through the flow
enc_subset_zks = {s_key: self.encode_expert(distr_subset) for s_key, distr_subset in distr_subsets.items()}
# get the mixture of each encoded subset
z_mixture = mixture_component_selection_embedding(enc_mods=enc_subset_zks, s_key='all', flags=self.flags)
# question: should I normalize by the number of modalities here?
# pass the mixture backwards through the flow.
z_joint, _ = self.flow.rev(z_mixture)
return z_joint
def encode_expert(self, expert_distr: Distr) -> EncModGfM:
zk, _ = self.flow(expert_distr.reparameterize())
return EncModGfM(zk=zk)
class PGfMVAE(BaseMMVAE):
"""
Params Generalized f-Means VAE: class of methods where the means and logvars of all experts are fused with a
generalized f-mean.
"""
def __init__(self, exp, flags, modalities, subsets):
BaseMMVAE.__init__(self, exp, flags, modalities, subsets)
self.mm_div = PGfMMMDiv()
self.flow_mus = AffineFlow(flags.class_dim, flags.num_gfm_flows, coupling_dim=flags.coupling_dim,
nbr_coupling_block_layers=flags.nbr_coupling_block_layers)
self.flow_logvars = AffineFlow(flags.class_dim, flags.num_gfm_flows, coupling_dim=flags.coupling_dim,
nbr_coupling_block_layers=flags.nbr_coupling_block_layers)
def fuse_modalities(self, enc_mods: Mapping[str, BaseEncMod],
batch_mods: typing.Iterable[str]) -> JointLatents:
"""
Create a subspace for all the combinations of the encoded modalities by combining them.
"""
batch_subsets = subsets_from_batchmods(batch_mods)
distr_subsets = {}
# pass all experts through the flow
enc_mod_transformed = {mod_key: self.encode_expert(enc_mod.latents_class) for mod_key, enc_mod in
enc_mods.items()}
# concatenate mus and logvars for every modality in each subset
for s_key in batch_subsets:
if len(self.subsets[s_key]) == 1:
q_subset = enc_mods[s_key].latents_class
else:
q_subset = self.gfm(distrs=[enc_mod_transformed[mod.name] for mod in self.subsets[s_key]])
distr_subsets[s_key] = q_subset
if len(self.subsets[s_key]) == len(batch_mods):
joint_distr = q_subset
fusion_subsets_keys = s_key.split('_')
joint_distr.mod_strs = fusion_subsets_keys
return JointLatents(fusion_subsets_keys, joint_distr=joint_distr, subsets=distr_subsets)
def gfm(self, distrs: typing.List[Distr]) -> Distr:
mus = torch.Tensor().to(self.flags.device)
logvars = torch.Tensor().to(self.flags.device)
for distr in distrs:
mus = torch.cat((mus, distr.mu.unsqueeze(dim=0)), dim=0)
logvars = torch.cat((logvars, distr.logvar.unsqueeze(dim=0)), dim=0)
mu_average = torch.mean(mus, dim=0)
logvar_average = torch.mean(logvars, dim=0)
mu_gfm, _ = self.flow_mus.rev(mu_average)
logvar_gfm, _ = self.flow_logvars.rev(logvar_average)
return Distr(mu=mu_gfm, logvar=logvar_gfm)
def encode_expert(self, expert_distr: Distr) -> Distr:
mu_k, _ = self.flow_mus(expert_distr.mu)
logvar_k, _ = self.flow_logvars(expert_distr.logvar)
return Distr(mu=mu_k, logvar=logvar_k)
class PGfMoPVAE(BaseMMVAE):
"""
Params Generalized f-Means of Product of Experts VAE: class of methods where the means and logvars of all experts
are fused with a generalized f-mean.
"""
def __init__(self, exp, flags, modalities, subsets):
BaseMMVAE.__init__(self, exp, flags, modalities, subsets)
self.mm_div = PGfMMMDiv()
self.flow_mus = AffineFlow(flags.class_dim, flags.num_gfm_flows, coupling_dim=flags.coupling_dim)
self.flow_logvars = AffineFlow(flags.class_dim, flags.num_gfm_flows, coupling_dim=flags.coupling_dim)
def fuse_modalities(self, enc_mods: Mapping[str, BaseEncMod],
batch_mods: typing.Iterable[str]) -> JointLatents:
"""
Create a subspace for all the combinations of the encoded modalities by combining them.
"""
batch_subsets = subsets_from_batchmods(batch_mods)
distr_subsets = {}
fusion_subsets_keys = []
for s_key in batch_subsets:
distr_subset = self.fuse_subset(enc_mods, s_key)
distr_subsets[s_key] = distr_subset
fusion_subsets_keys.append(s_key)
joint_distr = self.gfm(distr_subsets)
return JointLatents(fusion_subsets_keys, joint_distr=joint_distr, subsets=distr_subsets)
def gfm(self, distrs: Mapping[str, Distr]) -> Distr:
mus = torch.Tensor().to(self.flags.device)
logvars = torch.Tensor().to(self.flags.device)
# pass all mus and sigmas of subsets through flows and concatenate them
for _, distr in distrs.items():
mu_k, _ = self.flow_mus(distr.mu)
logvar_k, _ = self.flow_logvars(distr.logvar)
mus = torch.cat((mus, mu_k.unsqueeze(dim=0)), dim=0)
logvars = torch.cat((logvars, logvar_k.unsqueeze(dim=0)), dim=0)
mu_average = torch.mean(mus, dim=0)
logvar_average = torch.mean(logvars, dim=0)
mu_gfm, _ = self.flow_mus.rev(mu_average)
logvar_gfm, _ = self.flow_logvars.rev(logvar_average)
return Distr(mu=mu_gfm, logvar=logvar_gfm)
class iwmopgfm(iwMMVAE, PGfMVAE):
def __init__(self, exp, flags, modalities, subsets):
PGfMVAE.__init__(self, exp, flags, modalities, subsets)
iwMMVAE.__init__(self, flags)
self.mm_div = BaseMMDiv()
self.prior = get_distr(flags.prior)(loc=torch.zeros(1, self.flags.class_dim, device=self.flags.device),
scale=torch.ones(1, self.flags.class_dim, device=self.flags.device))
self.qz_x = get_distr(flags.qz_x) # posterior
self.K = flags.K
def encode(self, input_batch: Mapping[str, Tensor]) -> Mapping[str, BaseEncMod]:
enc_mods = {}
for mod_str, mod in self.modalities.items():
if mod_str in input_batch:
enc_mods[mod_str] = {}
_, _, class_mu, class_logvar = mod.encoder(input_batch[mod_str])
latents_class = self.qz_x(class_mu, F.softmax(class_logvar, dim=-1) * class_logvar.size(-1) + 1e-6)
enc_mods[mod_str] = BaseEncMod(latents_class=latents_class)
return enc_mods
def forward(self, input_batch: dict) -> iwForwardResults:
enc_mods, joint_latents = self.inference(input_batch)
# reconstruct modalities
rec_mods = self.decode(enc_mods, joint_latents)
return iwForwardResults(enc_mods=enc_mods, joint_latents=joint_latents, rec_mods=rec_mods)
def inference(self, input_batch) -> tuple[Mapping[str, BaseEncMod], iwJointLatents]:
enc_mods, joint_latents = super().inference(input_batch)
zss = {}
subsets = {
subset_str: iwSubset(
qz_x_tilde=subset, zs=subset.rsample(torch.Size([self.K]))
)
for subset_str, subset in joint_latents.subsets.items()
}
# find the subset will all modalities to get the joint distr
max_subset_size = max(len(subset_str.split('_')) for subset_str in joint_latents.fusion_subsets_keys)
joint_distr = subsets[[subset_str for subset_str in joint_latents.fusion_subsets_keys if
len(subset_str.split('_')) == max_subset_size][0]]
joint_latents = iwJointLatents(fusion_subsets_keys=joint_latents.fusion_subsets_keys, subsets=subsets, zss=zss,
joint_distr=joint_distr)
return enc_mods, joint_latents
def fuse_modalities(self, enc_mods: Mapping[str, BaseEncMod],
batch_mods: typing.Iterable[str]) -> JointLatents:
"""
Create a subspace for all the combinations of the encoded modalities by combining them.
"""
batch_subsets = subsets_from_batchmods(batch_mods)
distr_subsets = {}
# pass all experts through the flow
enc_mod_transformed = {mod_key: self.encode_expert(enc_mod.latents_class) for mod_key, enc_mod in
enc_mods.items()}
# concatenate mus and logvars for every modality in each subset
for s_key in batch_subsets:
# for the unimodal subset, the subset distr is equal to the uni modal distr
if len(self.subsets[s_key]) == 1:
q_subset = enc_mods[s_key].latents_class
else:
q_subset = self.gfm(distrs=[enc_mod_transformed[mod.name] for mod in self.subsets[s_key]])
distr_subsets[s_key] = q_subset
if len(self.subsets[s_key]) == len(batch_mods):
joint_distr = q_subset
fusion_subsets_keys = s_key.split('_')
joint_distr.mod_strs = fusion_subsets_keys
return JointLatents(fusion_subsets_keys, joint_distr=joint_distr, subsets=distr_subsets)
def calculate_loss(self, forward_results: iwForwardResults, batch_d: dict) -> tuple[
float, float, dict, Mapping[str, float]]:
subsets = forward_results.joint_latents.subsets
losses = []
klds = {}
log_probs = {}
for mod_str, subset in subsets.items():
lpz = self.prior.log_prob(subset.zs).sum(-1)
lqz_x = log_mean_exp(
torch.stack(
[subset_.qz_x_tilde.log_prob(subset_.zs).sum(-1) for _, subset_ in subsets.items()]
)
)
#
# lpx_z = [px_z.log_prob(batch_d[out_mod_str]).view(*px_z.batch_shape[:2], -1).sum(-1)
# for out_mod_str, px_z in forward_results.rec_mods[mod_str].items()]
lpx_z = [self.modalities[out_mod_str].log_likelihood(px_z, batch_d[out_mod_str]).view(*px_z.batch_shape[:2],
-1).sum(-1)
for out_mod_str, px_z in forward_results.rec_mods[mod_str].items()]
lpx_z = torch.stack(lpx_z).sum(0)
kl_div = lpz - lqz_x
loss = lpx_z + kl_div
losses.append(loss)
log_probs[mod_str] = lpx_z.mean()
klds[mod_str] = log_mean_exp(kl_div).sum()
total_loss = -log_mean_exp(torch.cat(losses, 1)).sum()
# joint_div average of all subset divs
joint_div = torch.cat(tuple(div.unsqueeze(dim=0) for _, div in klds.items()))
# normalize with the number of samples
joint_div = joint_div.mean()
return total_loss, joint_div, log_probs, klds
def gfm(self, distrs: typing.List[Distr]) -> Distr:
mus = torch.Tensor().to(self.flags.device)
logvars = torch.Tensor().to(self.flags.device)
for distr in distrs:
mus = torch.cat((mus, distr.mu.unsqueeze(dim=0)), dim=0)
logvars = torch.cat((logvars, distr.logvar.unsqueeze(dim=0)), dim=0)
mu_average = torch.mean(mus, dim=0)
logvar_average = torch.mean(logvars, dim=0)
scale = self.flow_logvars.rev(logvar_average)[0]
ok = Normal.arg_constraints["scale"].check(scale)
bad_elements = scale[~ok]
return self.qz_x(loc=self.flow_mus.rev(mu_average)[0], scale=F.softmax(scale, dim=-1) * scale.size(-1) + 1e-6)
def encode_expert(self, expert_distr: Distr) -> Distr:
mu_k, _ = self.flow_mus(expert_distr.loc)
logvar_k, _ = self.flow_logvars(expert_distr.scale)
return Distr(mu=mu_k, logvar=logvar_k)
class MopGfM(iwmopgfm):
"""Mixture of parameter GfM method."""
def __init__(self, exp, flags, modalities, subsets):
super().__init__(exp, flags, modalities, subsets)
self.K = 1 # mopfgm = iwmopgfm with K = 1
```
#### File: mmvae_hub/networks/iwVaes.py
```python
import math
import torch.distributions as distr
import torch.nn.functional as F
from mmvae_hub.networks.FlowVaes import MoFoPoE
# from mmvae_hub.networks.GfMVaes import MopGfM
#
from mmvae_hub.networks.MixtureVaes import MOEMMVae, MoPoEMMVae
from mmvae_hub.networks.PoEMMVAE import POEMMVae
from mmvae_hub.networks.utils.utils import get_distr
from mmvae_hub.utils.Dataclasses.iwdataclasses import *
from mmvae_hub.utils.metrics.likelihood import log_mean_exp
def log_mean_exp(value, dim=0, keepdim=False):
return torch.logsumexp(value, dim, keepdim=keepdim) - math.log(value.size(dim))
class iwMMVAE():
def __init__(self, flags):
self.K = flags.K
def conditioned_generation(self, input_samples: dict, subset_key: str, style=None):
"""
Generate samples conditioned with input samples for a given subset.
subset_key str: The key indicating which subset is used for the generation.
"""
# infer latents from batch
enc_mods, joint_latents = self.inference(input_samples)
subset_embedding = joint_latents.subsets[subset_key].qz_x_tilde.mean
cond_mod_in = ReparamLatent(content=subset_embedding, style=style)
return self.generate_from_latents(cond_mod_in)
def decode(self, enc_mods: Mapping[str, BaseEncMod], joint_latents: iwJointLatents) -> dict:
"""Decoder outputs each reconstructed modality as a dict."""
rec_mods = {}
for subset_str, subset in joint_latents.subsets.items():
subset_samples = subset.zs.reshape((self.K * self.flags.batch_size, self.flags.class_dim))
rec_mods[subset_str] = {
out_mod_str: dec_mod.calc_likelihood(class_embeddings=subset_samples,
unflatten=(self.K, self.flags.batch_size)
)
for out_mod_str, dec_mod in self.modalities.items()
}
return rec_mods
class iwPoE(iwMMVAE, POEMMVae):
def __init__(self, exp, flags, modalities, subsets):
POEMMVae.__init__(self, exp, flags, modalities, subsets)
iwMMVAE.__init__(self, flags)
self.prior = get_distr(flags.prior)(loc=torch.zeros(1, self.flags.class_dim, device=self.flags.device),
scale=torch.ones(1, self.flags.class_dim, device=self.flags.device))
def forward(self, input_batch: dict) -> iwForwardResults:
enc_mods, joint_latents = self.inference(input_batch)
# reconstruct modalities
rec_mods = self.decode(enc_mods, joint_latents)
return iwForwardResults(enc_mods=enc_mods, joint_latents=joint_latents, rec_mods=rec_mods)
def inference(self, input_batch) -> tuple[Mapping[str, BaseEncMod], iwJointLatents]:
enc_mods, joint_latents = super().inference(input_batch)
subsets = {}
zss = {}
for subset_str, subset in joint_latents.subsets.items():
qz_x_tilde = distr.Normal(loc=subset.mu, scale=subset.logvar)
subsets[subset_str] = iwSubset(qz_x_tilde=qz_x_tilde, zs=qz_x_tilde.rsample(torch.Size([self.K])))
# find the subset will all modalities to get the joint distr
max_subset_size = max(len(subset_str.split('_')) for subset_str in joint_latents.fusion_subsets_keys)
joint_distr = subsets[[subset_str for subset_str in joint_latents.fusion_subsets_keys if
len(subset_str.split('_')) == max_subset_size][0]]
joint_latents = iwJointLatents(fusion_subsets_keys=joint_latents.fusion_subsets_keys, subsets=subsets, zss=zss,
joint_distr=joint_distr)
return enc_mods, joint_latents
def encode(self, input_batch: Mapping[str, Tensor]) -> Mapping[str, BaseEncMod]:
enc_mods = {}
for mod_str, mod in self.modalities.items():
if mod_str in input_batch:
enc_mods[mod_str] = {}
_, _, class_mu, class_logvar = mod.encoder(input_batch[mod_str])
latents_class = Distr(mu=class_mu,
logvar=F.softmax(class_logvar, dim=-1) * class_logvar.size(-1) + 1e-6)
enc_mods[mod_str] = BaseEncMod(latents_class=latents_class)
return enc_mods
def calculate_loss(self, forward_results: iwForwardResults, batch_d: dict) -> tuple[
float, float, dict, Mapping[str, float]]:
subsets = forward_results.joint_latents.subsets
losses = []
klds = {}
log_probs = {}
for mod_str, enc_mod in forward_results.enc_mods.items():
subset = subsets[mod_str]
# sum(-1) is the sum over the class dim
lpz = self.prior.log_prob(
subset.zs).sum(-1)
# take the log mean exp over the modalities
lqz_x = log_mean_exp(
torch.stack(
[subsets[mod].qz_x_tilde.log_prob(subset.zs).sum(-1) for mod in forward_results.enc_mods]))
lpx_z = [px_z.log_prob(batch_d[out_mod_str]).view(*px_z.batch_shape[:2], -1).sum(-1)
for out_mod_str, px_z in forward_results.rec_mods[mod_str].items()]
# sum over modalities
lpx_z = torch.stack(lpx_z).sum(0)
kl_div = lpz - lqz_x
loss = lpx_z + kl_div
losses.append(loss)
log_probs[mod_str] = lpx_z.mean()
klds[mod_str] = log_mean_exp(kl_div).sum()
total_loss = -log_mean_exp(torch.cat(losses, 1)).sum()
# joint_div average of all subset divs
joint_div = torch.cat(tuple(div.unsqueeze(dim=0) for _, div in klds.items()))
# normalize with the number of samples
joint_div = joint_div.mean()
return total_loss, joint_div, log_probs, klds
class iwMoE(iwMMVAE, MOEMMVae):
def __init__(self, exp, flags, modalities, subsets):
MOEMMVae.__init__(self, exp, flags, modalities, subsets)
iwMMVAE.__init__(self, flags)
self.prior = get_distr(flags.prior)(loc=torch.zeros(1, self.flags.class_dim, device=self.flags.device),
scale=torch.ones(1, self.flags.class_dim, device=self.flags.device))
def forward(self, input_batch: dict) -> iwForwardResults:
enc_mods, joint_latents = self.inference(input_batch)
# reconstruct modalities
rec_mods = self.decode(enc_mods, joint_latents)
return iwForwardResults(enc_mods=enc_mods, joint_latents=joint_latents, rec_mods=rec_mods)
def inference(self, input_batch) -> tuple[Mapping[str, BaseEncMod], iwJointLatents]:
enc_mods, joint_latents = super().inference(input_batch)
subsets = {}
zss = {}
for subset_str, subset in joint_latents.subsets.items():
qz_x_tilde = distr.Normal(loc=subset.mu, scale=subset.logvar)
subsets[subset_str] = iwSubset(qz_x_tilde=qz_x_tilde, zs=qz_x_tilde.rsample(torch.Size([self.K])))
# find the subset will all modalities to get the joint distr
max_subset_size = max(len(subset_str.split('_')) for subset_str in joint_latents.fusion_subsets_keys)
joint_distr = subsets[[subset_str for subset_str in joint_latents.fusion_subsets_keys if
len(subset_str.split('_')) == max_subset_size][0]]
joint_latents = iwJointLatents(fusion_subsets_keys=joint_latents.fusion_subsets_keys, subsets=subsets, zss=zss,
joint_distr=joint_distr)
return enc_mods, joint_latents
def encode(self, input_batch: Mapping[str, Tensor]) -> Mapping[str, BaseEncMod]:
enc_mods = {}
for mod_str, mod in self.modalities.items():
if mod_str in input_batch:
enc_mods[mod_str] = {}
_, _, class_mu, class_logvar = mod.encoder(input_batch[mod_str])
latents_class = Distr(mu=class_mu,
logvar=F.softmax(class_logvar, dim=-1) * class_logvar.size(-1) + 1e-6)
enc_mods[mod_str] = BaseEncMod(latents_class=latents_class)
return enc_mods
def calculate_loss(self, forward_results: iwForwardResults, batch_d: dict) -> tuple[
float, float, dict, Mapping[str, float]]:
subsets = forward_results.joint_latents.subsets
losses = []
klds = {}
log_probs = {}
for mod_str, enc_mod in forward_results.enc_mods.items():
subset = subsets[mod_str]
# sum(-1) is the sum over the class dim
lpz = self.prior.log_prob(
subset.zs).sum(-1)
# take the log mean exp over the modalities
lqz_x = log_mean_exp(
torch.stack(
[subsets[mod].qz_x_tilde.log_prob(subset.zs).sum(-1) for mod in forward_results.enc_mods]))
lpx_z = [px_z.log_prob(batch_d[out_mod_str]).view(*px_z.batch_shape[:2], -1).sum(-1)
for out_mod_str, px_z in forward_results.rec_mods[mod_str].items()]
# sum over modalities
lpx_z = torch.stack(lpx_z).sum(0)
kl_div = lpz - lqz_x
loss = lpx_z + kl_div
losses.append(loss)
log_probs[mod_str] = lpx_z.mean()
klds[mod_str] = log_mean_exp(kl_div).sum()
total_loss = -log_mean_exp(torch.cat(losses, 1)).sum()
# joint_div average of all subset divs
joint_div = torch.cat(tuple(div.unsqueeze(dim=0) for _, div in klds.items()))
# normalize with the number of samples
joint_div = joint_div.mean()
return total_loss, joint_div, log_probs, klds
class iwMoPoE(iwMMVAE, MoPoEMMVae):
def __init__(self, exp, flags, modalities, subsets):
MoPoEMMVae.__init__(self, exp, flags, modalities, subsets)
iwMMVAE.__init__(self, flags)
self.prior = get_distr(flags.prior)(loc=torch.zeros(1, self.flags.class_dim, device=self.flags.device),
scale=torch.ones(1, self.flags.class_dim, device=self.flags.device))
self.K = flags.K
def forward(self, input_batch: dict) -> iwForwardResults:
enc_mods, joint_latents = self.inference(input_batch)
# reconstruct modalities
rec_mods = self.decode(enc_mods, joint_latents)
return iwForwardResults(enc_mods=enc_mods, joint_latents=joint_latents, rec_mods=rec_mods)
def inference(self, input_batch) -> tuple[Mapping[str, BaseEncMod], iwJointLatents]:
enc_mods, joint_latents = super().inference(input_batch)
subsets = {}
zss = {}
for subset_str, subset in joint_latents.subsets.items():
qz_x_tilde = distr.Normal(loc=subset.mu, scale=subset.logvar.exp())
subsets[subset_str] = iwSubset(qz_x_tilde=qz_x_tilde, zs=qz_x_tilde.rsample(torch.Size([self.K])))
# find the subset will all modalities to get the joint distr
max_subset_size = max(len(subset_str.split('_')) for subset_str in joint_latents.fusion_subsets_keys)
joint_distr = subsets[[subset_str for subset_str in joint_latents.fusion_subsets_keys if
len(subset_str.split('_')) == max_subset_size][0]]
joint_latents = iwJointLatents(fusion_subsets_keys=joint_latents.fusion_subsets_keys, subsets=subsets, zss=zss,
joint_distr=joint_distr)
return enc_mods, joint_latents
def encode(self, input_batch: Mapping[str, Tensor]) -> Mapping[str, BaseEncMod]:
enc_mods = {}
for mod_str, mod in self.modalities.items():
if mod_str in input_batch:
enc_mods[mod_str] = {}
_, _, class_mu, class_logvar = mod.encoder(input_batch[mod_str])
latents_class = Distr(mu=class_mu,
logvar=F.softmax(class_logvar, dim=-1) * class_logvar.size(-1) + 1e-6)
enc_mods[mod_str] = BaseEncMod(latents_class=latents_class)
return enc_mods
def calculate_loss(self, forward_results: iwForwardResults, batch_d: dict) -> tuple[
float, float, dict, Mapping[str, float]]:
subsets = forward_results.joint_latents.subsets
losses = []
klds = {}
log_probs = {}
for mod_str, subset in subsets.items():
# sum over last dim
lpz = self.prior.log_prob(subset.zs).sum(-1)
# lqz_x = log_mean_exp(
# torch.stack(
# [subset_.qz_x_tilde.log_prob(subset_.zs).sum(-1) for _, subset_ in subsets.items()]))
lqz_x = subset.qz_x_tilde.log_prob(subset.zs).sum(-1)
lpx_z = [px_z.log_prob(batch_d[out_mod_str]).view(*px_z.batch_shape[:2], -1).sum(-1)
for out_mod_str, px_z in forward_results.rec_mods[mod_str].items()]
lpx_z = torch.stack(lpx_z).sum(0)
kl_div = lpz - lqz_x
loss = lpx_z + kl_div
losses.append(loss)
log_probs[mod_str] = lpx_z.mean()
klds[mod_str] = log_mean_exp(kl_div).sum()
total_loss = -log_mean_exp(torch.cat(losses, 1)).sum()
# joint_div average of all subset divs
joint_div = torch.cat(tuple(div.unsqueeze(dim=0) for _, div in klds.items()))
# normalize with the number of samples
joint_div = joint_div.mean()
return total_loss, joint_div, log_probs, klds
```
#### File: networks/text/ConvNetworkTextClf.py
```python
import torch.nn as nn
from mmvae_hub.networks.text.mmvae_text_enc import make_res_block_enc_feat_ext
class ClfText(nn.Module):
def __init__(self, flags, labels):
super(ClfText, self).__init__()
self.args = flags
self.labels = labels
if flags.text_encoding == 'char':
self.conv1 = nn.Conv1d(self.args.num_features, self.args.DIM_text,
kernel_size=4, stride=2, padding=1, dilation=1)
elif flags.text_encoding == 'word':
self.embedding = nn.Embedding(num_embeddings=self.args.vocab_size, embedding_dim=self.args.DIM_text,
padding_idx=0)
self.conv1 = nn.Conv1d(self.args.DIM_text, self.args.DIM_text,
kernel_size=4, stride=2, padding=1, dilation=1)
self.resblock_1 = make_res_block_enc_feat_ext(self.args.DIM_text,
2 * self.args.DIM_text,
kernelsize=4, stride=2, padding=1, dilation=1)
self.resblock_2 = make_res_block_enc_feat_ext(2 * self.args.DIM_text,
3 * self.args.DIM_text,
kernelsize=4, stride=2, padding=1, dilation=1)
self.resblock_3 = make_res_block_enc_feat_ext(3 * self.args.DIM_text,
4 * self.args.DIM_text,
kernelsize=4, stride=2, padding=1, dilation=1)
self.resblock_4 = make_res_block_enc_feat_ext(4 * self.args.DIM_text,
4 * self.args.DIM_text,
kernelsize=4, stride=2, padding=1, dilation=1)
self.resblock_5 = make_res_block_enc_feat_ext(4 * self.args.DIM_text,
4 * self.args.DIM_text,
kernelsize=4, stride=2, padding=1, dilation=1)
self.resblock_6 = make_res_block_enc_feat_ext(4 * self.args.DIM_text,
5 * self.args.DIM_text,
kernelsize=4, stride=2, padding=1, dilation=1)
self.resblock_7 = make_res_block_enc_feat_ext(5 * self.args.DIM_text,
5 * self.args.DIM_text,
kernelsize=4, stride=2, padding=1, dilation=1)
self.resblock_8 = make_res_block_enc_feat_ext(5 * self.args.DIM_text,
5 * self.args.DIM_text,
kernelsize=4, stride=2, padding=0, dilation=1)
self.dropout = nn.Dropout(p=0.5, inplace=False)
self.linear = nn.Linear(in_features=5 * flags.DIM_text, out_features=len(self.labels), bias=True)
self.sigmoid = nn.Sigmoid()
def forward(self, x_text):
"""
input_shape: [batch_size, len_sentence, vocab_size]
Example:
torch.Size([10, 1024, 71])
torch.Size([10, 71, 1024])
torch.Size([10, 128, 512])
torch.Size([10, 256, 256])
torch.Size([10, 384, 128])
torch.Size([10, 512, 64])
torch.Size([10, 512, 32])
torch.Size([10, 512, 16])
torch.Size([10, 640, 8])
torch.Size([10, 640, 4])
torch.Size([10, 640, 1])
torch.Size([10, 640, 1])
torch.Size([10, 640, 1])
torch.Size([10, 3])
torch.Size([10, 3])
"""
if self.args.text_encoding == 'word':
out = self.embedding(x_text.long())
out = out.transpose(-2, -1)
out = self.conv1(out)
elif self.args.text_encoding == 'char':
x_text = x_text.transpose(-2, -1)
out = self.conv1(x_text)
out = self.resblock_1(out)
out = self.resblock_2(out)
out = self.resblock_3(out)
out = self.resblock_4(out)
out = self.resblock_5(out)
out = self.resblock_6(out)
if self.args.len_sequence > 500:
out = self.resblock_7(out)
out = self.resblock_8(out)
h = self.dropout(out)
h = h.view(h.size(0), -1)
h = self.linear(h)
out = self.sigmoid(h)
return out;
```
#### File: networks/text/mmvae_text_enc.py
```python
import torch.nn as nn
from mmvae_hub.networks.utils.ResidualBlocks import ResidualBlock1dConv
def make_res_block_enc_feat_ext(in_channels, out_channels, kernelsize, stride, padding, dilation, a_val=2.0, b_val=0.3):
downsample = None
if (stride != 1) or (in_channels != out_channels) or dilation != 1:
downsample = nn.Sequential(nn.Conv1d(in_channels, out_channels,
kernel_size=kernelsize,
stride=stride,
padding=padding,
dilation=dilation),
nn.BatchNorm1d(out_channels))
layers = []
layers.append(
ResidualBlock1dConv(in_channels, out_channels, kernelsize, stride, padding, dilation, downsample, a=a_val,
b=b_val))
return nn.Sequential(*layers)
class FeatureExtractorText(nn.Module):
def __init__(self, args, a=2.0, b=0.3):
super(FeatureExtractorText, self).__init__()
self.args = args
# self.embedding = nn.Embedding(self.args.DIM_text, self.args.DIM_text, padding_idx=0)
self.embedding = nn.Embedding(num_embeddings=self.args.vocab_size, embedding_dim=self.args.DIM_text,
padding_idx=0)
self.conv1 = nn.Conv1d(self.args.DIM_text, self.args.DIM_text,
kernel_size=4, stride=2, padding=1, dilation=1)
self.resblock_1 = make_res_block_enc_feat_ext(self.args.DIM_text,
2 * self.args.DIM_text,
kernelsize=4, stride=2, padding=1, dilation=1)
self.resblock_2 = make_res_block_enc_feat_ext(2 * self.args.DIM_text,
3 * self.args.DIM_text,
kernelsize=4, stride=2, padding=1, dilation=1)
self.resblock_3 = make_res_block_enc_feat_ext(3 * self.args.DIM_text,
4 * self.args.DIM_text,
kernelsize=4, stride=2, padding=1, dilation=1)
self.resblock_4 = make_res_block_enc_feat_ext(4 * self.args.DIM_text,
4 * self.args.DIM_text,
kernelsize=4, stride=2, padding=1, dilation=1)
self.resblock_5 = make_res_block_enc_feat_ext(4 * self.args.DIM_text,
4 * self.args.DIM_text,
kernelsize=4, stride=2, padding=1, dilation=1)
self.resblock_6 = make_res_block_enc_feat_ext(4 * self.args.DIM_text,
5 * self.args.DIM_text,
kernelsize=4, stride=2, padding=1, dilation=1)
if self.args.len_sequence > 500:
self.resblock_7 = make_res_block_enc_feat_ext(5 * self.args.DIM_text,
5 * self.args.DIM_text,
kernelsize=4, stride=2, padding=1, dilation=1)
self.resblock_8 = make_res_block_enc_feat_ext(5 * self.args.DIM_text,
5 * self.args.DIM_text,
kernelsize=4, stride=2, padding=0, dilation=1)
def forward(self, x):
"""
Example for x.shape=torch.Size([200, 1024]):
torch.Size([200, 1024, 128])
transpose: torch.Size([200, 128, 1024])
torch.Size([200, 128, 512])
torch.Size([200, 256, 256])
torch.Size([200, 384, 128])
torch.Size([200, 512, 64])
torch.Size([200, 512, 32])
torch.Size([200, 512, 16])
torch.Size([200, 640, 8])
torch.Size([200, 640, 4])
torch.Size([200, 640, 1])
"""
x = self.embedding(x.long())
x = x.transpose(-2, -1)
out = self.conv1(x)
out = self.resblock_1(out)
out = self.resblock_2(out)
out = self.resblock_3(out)
out = self.resblock_4(out)
out = self.resblock_5(out)
out = self.resblock_6(out)
if self.args.len_sequence > 500:
out = self.resblock_7(out)
out = self.resblock_8(out)
return out
```
#### File: mmvae_hub/polymnist/experiment.py
```python
import random
from pathlib import Path
from typing import Mapping, Iterable
import numpy as np
import torch
from sklearn.metrics import accuracy_score
from torch import Tensor
from torchvision import transforms
from mmvae_hub.base.BaseExperiment import BaseExperiment
from mmvae_hub.modalities import BaseModality
from mmvae_hub.polymnist.PolymnistDataset import PolymnistDataset, ToyPolymnistDataset
from mmvae_hub.polymnist.PolymnistMod import PolymnistMod
from mmvae_hub.polymnist.metrics import PolymnistMetrics
from mmvae_hub.utils.utils import dict_to_device
class PolymnistExperiment(BaseExperiment):
def __init__(self, flags):
super(PolymnistExperiment, self).__init__(flags)
self.labels = ['digit']
self.dataset_name = 'polymnist'
self.num_modalities = flags.num_mods
self.modalities = self.set_modalities()
self.subsets = self.set_subsets()
self.dataset_train, self.dataset_test = self.set_dataset()
self.mm_vae = self.set_model()
print(self.mm_vae)
self.optimizer = None
self.rec_weights = self.set_rec_weights()
self.style_weights = self.set_style_weights()
self.test_samples = self.get_test_samples()
self.eval_metric = accuracy_score
self.metrics = PolymnistMetrics
self.paths_fid = self.set_paths_fid()
def set_modalities(self) -> Mapping[str, BaseModality]:
mods = [PolymnistMod(self.flags, name="m%d" % m) for m in range(self.num_modalities)]
mods = {m.name: m for m in mods}
return mods
def set_dataset(self):
transform = transforms.Compose([transforms.ToTensor()])
if self.flags.dataset == 'toy':
train = ToyPolymnistDataset(num_modalities=self.num_modalities, seed=self.flags.seed)
test = ToyPolymnistDataset(num_modalities=self.num_modalities, seed=self.flags.seed)
else:
train = PolymnistDataset(Path(self.flags.dir_data) / 'train', transform=transform,
num_modalities=self.num_modalities)
test = PolymnistDataset(Path(self.flags.dir_data) / 'train', transform=transform,
num_modalities=self.num_modalities)
return train, test
def set_rec_weights(self):
rec_weights = {}
for k, m_key in enumerate(self.modalities.keys()):
mod = self.modalities[m_key]
numel_mod = mod.data_size.numel()
rec_weights[mod.name] = 1.0
return rec_weights
def set_style_weights(self):
return {"m%d" % m: self.flags.beta_style for m in range(self.num_modalities)}
def get_transform_polymnist(self):
return transforms.Compose([transforms.ToTensor()])
def get_test_samples(self, num_images=10) -> Iterable[Mapping[str, Tensor]]:
"""
Gets random samples for the cond. generation.
"""
random.seed(42)
n_test = len(self.dataset_test)
samples = []
for i in range(num_images):
while True:
# loop until sample with label i is found
ix = random.randint(0, n_test - 1)
sample, target = self.dataset_test[ix]
if target == i:
samples.append(dict_to_device(sample, self.flags.device))
break
return samples
def mean_eval_metric(self, values):
return np.mean(np.array(values))
def get_prediction_from_attr(self, attr, index=None):
return np.argmax(attr, axis=1).astype(int);
def eval_label(self, values, labels, index):
pred = self.get_prediction_from_attr(values);
return self.eval_metric(labels, pred);
```
#### File: mmvae_hub/polymnist/flags.py
```python
from pathlib import Path
from mmvae_hub.base.BaseFlags import parser as parser
from mmvae_hub.utils.setup.flags_utils import BaseFlagsSetup
parser.add_argument('--name', type=str, default='polymnist', help="name of the dataset")
parser.add_argument('--exp_str_prefix', type=str, default='polymnist', help="prefix of the experiment directory.")
# training
parser.add_argument('--num_mods', type=int, default=3, help="dimension of varying factor latent space")
parser.add_argument('--style_dim', type=int, default=0,
help="style dimensionality") # TODO: use modality-specific style dimensions?
# parser.add_argument('--style_m1_dim', type=int, default=0, help="dimension of varying factor latent space")
# parser.add_argument('--style_m2_dim', type=int, default=0, help="dimension of varying factor latent space")
# parser.add_argument('--style_m3_dim', type=int, default=0, help="dimension of varying factor latent space")
parser.add_argument('--num_classes', type=int, default=10, help="number of classes on which the data set trained")
parser.add_argument('--img_size_m1', type=int, default=28, help="img dimension (width/height)")
parser.add_argument('--num_channels_m1', type=int, default=1, help="number of channels in images")
parser.add_argument('--img_size_m2', type=int, default=32, help="img dimension (width/height)")
parser.add_argument('--num_channels_m2', type=int, default=3, help="number of channels in images")
parser.add_argument('--dim', type=int, default=64, help="number of classes on which the data set trained")
parser.add_argument('--num_hidden_layers', type=int, default=1, help="number of channels in images")
# multimodal
parser.add_argument('--subsampled_reconstruction', default=True, help="subsample reconstruction path")
parser.add_argument('--include_prior_expert', action='store_true', default=False, help="factorized_representation")
# weighting of loss terms
parser.add_argument('--beta_m1_style', type=float, default=1.0, help="default weight divergence term style modality 1")
parser.add_argument('--beta_m2_style', type=float, default=1.0, help="default weight divergence term style modality 2")
parser.add_argument('--beta_m3_style', type=float, default=1.0, help="default weight divergence term style modality 2")
parser.add_argument('--div_weight', type=float, default=None,
help="default weight divergence per modality, if None use 1/(num_mods+1).")
parser.add_argument('--div_weight_uniform_content', type=float, default=None,
help="default weight divergence term prior, if None use (1/num_mods+1)")
class FlagsSetup(BaseFlagsSetup):
def __init__(self, config_path: Path):
super().__init__(config_path)
self.parser = parser
def flags_set_alpha_modalities(self, flags):
flags.alpha_modalities = [flags.div_weight_uniform_content]
if flags.div_weight is None:
flags.div_weight = 1 / (flags.num_mods + 1)
flags.alpha_modalities.extend([flags.div_weight for _ in range(flags.num_mods)])
return flags
```
#### File: mmvae_hub/polymnist/PolymnistMod.py
```python
import os
import torch
from matplotlib import pyplot as plt
from torch import Tensor
from mmvae_hub.modalities.ModalityIMG import ModalityIMG
from mmvae_hub.polymnist.networks.ConvNetworkImgClfPolymnist import ClfImg
from mmvae_hub.polymnist.networks.ConvNetworksImgPolymnist import EncoderImg, DecoderImg
from mmvae_hub.polymnist.utils import download_polymnist_clfs
from mmvae_hub.utils.plotting.save_samples import write_samples_img_to_file
class PolymnistMod(ModalityIMG):
def __init__(self, flags, name: str):
super(PolymnistMod, self).__init__(data_size=torch.Size((3, 28, 28)), flags=flags, name=name)
self.plot_img_size = torch.Size((3, 28, 28))
self.gen_quality_eval = True
self.file_suffix = '.png'
self.encoder = EncoderImg(flags).to(flags.device)
self.decoder = DecoderImg(flags).to(flags.device)
self.rec_weight = 1.0
# self.transform = transforms.Compose([transforms.ToTensor()])
self.clf = self.get_clf()
def save_data(self, d, fn, args):
img_per_row = args['img_per_row']
write_samples_img_to_file(d, fn, img_per_row)
def plot_data(self, d):
# out = self.transform(d.squeeze(0).cpu()).cuda().unsqueeze(0)
# return out
return d
def get_clf(self):
if self.flags.use_clf:
dir_clf = self.flags.dir_clf
if not dir_clf.exists():
download_polymnist_clfs(dir_clf)
model_clf = ClfImg()
model_clf.load_state_dict(
torch.load(os.path.join(self.flags.dir_clf, f"pretrained_img_to_digit_clf_{self.name}"),
map_location=self.flags.device))
return model_clf.to(self.flags.device)
def plot_data_single_img(self, d: Tensor):
return plt.imshow(d.detach().cpu().squeeze().moveaxis(0, -1))
```
#### File: sylvester_flows/utils/distributions.py
```python
from __future__ import print_function
import math
import torch
import torch.utils.data
from torch.autograd import Variable
MIN_EPSILON = 1e-5
MAX_EPSILON = 1. - 1e-5
# PI = Variable(torch.FloatTensor([math.pi]))
# PI.requires_grad = False
# if torch.cuda.is_available():
# PI = PI.to()
# N(x | mu, var) = 1/sqrt{2pi var} exp[-1/(2 var) (x-mean)(x-mean)]
# log N(x| mu, var) = -log sqrt(2pi) -0.5 log var - 0.5 (x-mean)(x-mean)/var
def log_normal_diag(x, mean, log_var, average=False, reduce=True, dim=None):
log_norm = -0.5 * (log_var + (x - mean) * (x - mean) * log_var.exp().reciprocal())
if reduce:
if average:
return torch.mean(log_norm, dim)
else:
return torch.sum(log_norm, dim)
else:
return log_norm
def log_normal_normalized(x, mean, log_var, average=False, reduce=True, dim=None):
log_norm = -(x - mean) * (x - mean)
log_norm *= torch.reciprocal(2. * log_var.exp())
log_norm += -0.5 * log_var
log_norm += -0.5 * torch.log(2. * PI)
if reduce:
if average:
return torch.mean(log_norm, dim)
else:
return torch.sum(log_norm, dim)
else:
return log_norm
def log_normal_standard(x, average=False, reduce=True, dim=None):
log_norm = -0.5 * x * x
if reduce:
if average:
return torch.mean(log_norm, dim)
else:
return torch.sum(log_norm, dim)
else:
return log_norm
def log_bernoulli(x, mean, average=False, reduce=True, dim=None):
probs = torch.clamp(mean, min=MIN_EPSILON, max=MAX_EPSILON)
log_bern = x * torch.log(probs) + (1. - x) * torch.log(1. - probs)
if reduce:
if average:
return torch.mean(log_bern, dim)
else:
return torch.sum(log_bern, dim)
else:
return log_bern
```
#### File: utils/Dataclasses/iwdataclasses.py
```python
from typing import Tuple
from torch.distributions import Distribution
from mmvae_hub.utils.Dataclasses.Dataclasses import *
from dataclasses import dataclass
from torch import Tensor
from mmvae_hub.utils.Dataclasses.gfmDataclasses import JointLatentsGfM
@dataclass
class JointLatentsiwMoGfM2:
"""Joint Latens for generalized f-means methods."""
joint_embedding: JointEmbeddingFoEM
z_Gfs: Mapping[str, Tensor]
subset_samples: Mapping[str, Tuple]
subsets: Mapping[str, Tensor]
enc_mods: Mapping[str, BaseEncMod]
srv_proxies: Mapping[str,Distribution]
def get_joint_embeddings(self):
return self.joint_embedding.embedding.mean(dim=0)
def get_subset_embedding(self, s_key: str):
return self.subsets[s_key][0].mean(dim=0)
def get_q0(self, subset_key: str):
"""Get the mean of the unimodal latents and the embeddings of the multimodal latents."""
if subset_key == 'joint':
return self.joint_embedding.embedding.mean(dim=0)
return self.subsets[subset_key][0].mean(dim=0)
def get_lreval_data(self):
lr_data = {'q0': {}}
for key in self.subsets:
lr_data['q0'][key] = self.subsets[key][0].mean(dim=0).cpu()
lr_data['q0']['joint'] = self.joint_embedding.embedding.mean(dim=0).cpu()
return lr_data
def get_latent_samples(self, subset_key: str, n_imp_samples, model, mod_names=None, style=None):
"""Sample n_imp_samples from the latents."""
# modalities that span the subset
enc_mod_selection = subset_key.split('_')
batch_size, class_dim = self.enc_mods[[mod_str for mod_str in self.enc_mods][0]].latents_class.mu.shape
transformed_enc_mods = {
mod_key: model.flow(
torch.cat([distr.latents_class.reparameterize().unsqueeze(dim=0) for _ in range(n_imp_samples)],
dim=0).reshape((n_imp_samples * batch_size, class_dim)))[
0] for mod_key, distr in
self.enc_mods.items() if mod_key in enc_mod_selection}
subset_zks = torch.Tensor().to(self.enc_mods[[mod_str for mod_str in self.enc_mods][0]].latents_class.mu.device)
for mod_k in enc_mod_selection:
subset_zks = torch.cat((subset_zks, transformed_enc_mods[mod_k].unsqueeze(dim=0)), dim=0)
# mean of zks
z_mean = torch.mean(subset_zks, dim=0)
# calculate inverse flow
samples = model.flow.rev(z_mean)[0].reshape((n_imp_samples, batch_size, class_dim))
c = {'mu': None, 'logvar': None, 'z': samples}
return {'content': c, 'style': None}
@dataclass
class JointLatentsiwMoGfMVAE_amortized:
"""Joint Latens for generalized f-means methods."""
joint_embedding: JointEmbeddingFoEM
transformed_enc_mods: Mapping[str, Tuple]
subset_samples: Mapping[str, Tuple]
subsets: Mapping[str, Tensor]
enc_mods: Mapping[str, BaseEncMod]
zmss: Mapping[str, Tensor]
priors_tf_enc_mods: Mapping[str, Distribution]
def get_joint_embeddings(self):
return self.joint_embedding.embedding.mean(dim=0)
def get_subset_embedding(self, s_key: str):
return self.subsets[s_key][0].mean(dim=0)
def get_q0(self, subset_key: str):
"""Get the mean of the unimodal latents and the embeddings of the multimodal latents."""
if subset_key == 'joint':
return self.joint_embedding.embedding.mean(dim=0)
return self.subsets[subset_key][0].mean(dim=0)
def get_lreval_data(self):
lr_data = {'q0': {}}
for key in self.subsets:
lr_data['q0'][key] = self.subsets[key][0].mean(dim=0).cpu()
lr_data['q0']['joint'] = self.joint_embedding.embedding.mean(dim=0).cpu()
return lr_data
def get_latent_samples(self, subset_key: str, n_imp_samples, model, mod_names=None, style=None):
"""Sample n_imp_samples from the latents."""
# modalities that span the subset
enc_mod_selection = subset_key.split('_')
batch_size, class_dim = self.enc_mods[[mod_str for mod_str in self.enc_mods][0]].latents_class.mu.shape
transformed_enc_mods = {
mod_key: model.flow(
torch.cat([distr.latents_class.reparameterize().unsqueeze(dim=0) for _ in range(n_imp_samples)],
dim=0).reshape((n_imp_samples * batch_size, class_dim)))[
0] for mod_key, distr in
self.enc_mods.items() if mod_key in enc_mod_selection}
subset_zks = torch.Tensor().to(self.enc_mods[[mod_str for mod_str in self.enc_mods][0]].latents_class.mu.device)
for mod_k in enc_mod_selection:
subset_zks = torch.cat((subset_zks, transformed_enc_mods[mod_k].unsqueeze(dim=0)), dim=0)
# mean of zks
z_mean = torch.mean(subset_zks, dim=0)
# calculate inverse flow
samples = model.flow.rev(z_mean)[0].reshape((n_imp_samples, batch_size, class_dim))
c = {'mu': None, 'logvar': None, 'z': samples}
return {'content': c, 'style': None}
@dataclass
class JointLatentsiwMoGfM(JointLatentsGfM):
"""Joint Latents for mixture of generalized f-means methods."""
epss: Tensor
def get_lreval_data(self):
lr_data = {'q0': {}}
for key in self.subsets:
lr_data['q0'][key] = self.subsets[key].mean(dim=0).cpu()
lr_data['q0']['joint'] = self.joint_embedding.embedding.mean(dim=0).cpu()
return lr_data
def get_subset_embedding(self, s_key: str):
return self.subsets[s_key].mean(dim=0)
def get_joint_embeddings(self):
return self.joint_embedding.embedding.mean(dim=0)
@dataclass
class iwSubset:
zs: Tensor
qz_x_tilde: Distribution
@dataclass
class iwJointLatents:
fusion_subsets_keys: Iterable[str]
joint_distr: iwSubset
subsets: Mapping[str, iwSubset]
zss: Mapping[str, Distribution]
def get_joint_embeddings(self):
return self.joint_distr.qz_x_tilde.rsample()
def get_subset_embedding(self, s_key: str):
return self.subsets[s_key].qz_x_tilde.rsample()
def get_q0(self, subset_key: str):
"""Return the mean of the subset."""
if subset_key == 'joint':
return self.get_joint_q0()
return self.subsets[subset_key].qz_x_tilde.mean
def get_joint_q0(self):
return self.joint_distr.qz_x_tilde.mean
def get_lreval_data(self) -> dict:
"""Get lr_data for the lr evaluation."""
lr_data = {'q0': {}}
for key in self.subsets:
lr_data['q0'][key] = self.get_q0(key).cpu()
lr_data['q0']['joint'] = self.get_joint_q0().cpu()
return lr_data
def get_lreval_data_(self, data_train: dict):
"""Add lr values to data_train."""
for key in self.subsets:
data_train['q0'][key] = torch.cat((data_train['q0'][key], self.get_q0(key).cpu()), 0)
joint_q0 = self.get_joint_q0().cpu()
data_train['q0']['joint'] = torch.cat((data_train['q0']['joint'], joint_q0), 0)
return data_train
def get_latent_samples(self, subset_key: str, n_imp_samples, mod_names=None, style=None, model=None):
"""Sample n_imp_samples from the latents."""
c_embed = self.subsets[subset_key].qz_x_tilde.rsample((n_imp_samples,))
c = {'mu': self.subsets[subset_key].qz_x_tilde.loc.unsqueeze(0).repeat(n_imp_samples, 1, 1),
'logvar': self.subsets[subset_key].qz_x_tilde.scale.unsqueeze(0).repeat(n_imp_samples, 1, 1),
'z': c_embed}
styles = {key: None for k, key in enumerate(mod_names)}
return {'content': c, 'style': styles}
# def get_latent_samples(self, subset_key: str, n_imp_samples, mod_names=None, style=None, model=None):
# """Sample n_imp_samples from the latents."""
# c_embed = self.subsets[subset_key].qz_x_tilde.rsample(n_imp_samples)
# l_s = style
# l_c_m_rep = l_c.mu.unsqueeze(0).repeat(n_imp_samples, 1, 1)
# l_c_lv_rep = l_c.logvar.unsqueeze(0).repeat(n_imp_samples, 1, 1)
# c_emb = Distr(l_c_m_rep, l_c_lv_rep).reparameterize()
#
# styles = {}
# c = {'mu': l_c_m_rep, 'logvar': l_c_lv_rep, 'z': c_emb}
#
# if style is not None:
# for k, key in enumerate(l_s.keys()):
# l_s_mod = l_s[key]
# l_s_m_rep = l_s_mod[0].unsqueeze(0).repeat(n_imp_samples, 1, 1)
# l_s_lv_rep = l_s_mod[1].unsqueeze(0).repeat(n_imp_samples, 1, 1)
# s_emb = Distr(l_s_m_rep, l_s_lv_rep).reparameterize()
# s = {'mu': l_s_m_rep, 'logvar': l_s_lv_rep, 'z': s_emb}
# styles[key] = s
# else:
# for k, key in enumerate(mod_names):
# styles[key] = None
#
# return {'content': c, 'style': styles}
@dataclass
class iwForwardResults:
enc_mods: Mapping[str, BaseEncMod]
joint_latents: iwJointLatents
rec_mods: dict
```
#### File: utils/metrics/BaseMetrics.py
```python
import typing
from abc import abstractmethod
from typing import List
import torch
class BaseMetrics(object):
"""
Defines a set of metrics that are used to evaluate the performance of a model
"""
def __init__(self, prediction: torch.Tensor, groundtruth: torch.Tensor, str_labels: List[str]):
"""
params:
prediction: Tensor which is given as output of the network
groundtruth: Tensor which resembles the goundtruth
>>> import torch
>>> metrics = Metrics(torch.ones((10,1)), torch.ones((10,1)), str_labels=['my_labels'])
"""
self.str_labels = str_labels
self.prediction = prediction
self.groundtruth = groundtruth
self.prediction_bin: torch.Tensor = (prediction > 0.5) * 1
self.groundtruth_bin: torch.Tensor = (groundtruth > 0.5) * 1
# classwise binarized predictions
self.class_pred_bin: dict = {str_labels[i]: self.prediction_bin[:, i] for i in range(len(str_labels))}
self.class_gt_bin: dict = {str_labels[i]: self.groundtruth_bin[:, i] for i in range(len(str_labels))}
@abstractmethod
def evaluate(self) -> typing.Dict[str, list]:
"""
Computes the different metrics.
"""
pass
def get_counts(self) -> dict:
predicted_counts = {f'pred_count_{label}': [self.class_pred_bin[label].sum().item()] for label in
self.str_labels}
gt_counts = {f'gt_count_{label}': [self.class_gt_bin[label].sum().item()] for label in self.str_labels}
return {**predicted_counts, **gt_counts}
def extract_values(self, results: dict):
"""
Extract first values from list for each metric result.
>>> import torch
>>> metrics = Metrics(torch.ones((10,1)), torch.ones((10,1)), str_labels=['my_labels'])
>>> metrics.extract_values(results={'accuracy':[0.9], 'f1': [0.8], 'recall':[0.6]})
{'accuracy': 0.9, 'f1': 0.8, 'recall': 0.6}
"""
return {k: v[0] for k, v in results.items()}
```
#### File: mmvae_hub/utils/MongoDB.py
```python
import glob
import io
import pathlib
import shutil
import tempfile
from pathlib import Path
import gridfs
import torch
from pymongo import MongoClient
from mmvae_hub import log
from mmvae_hub.networks import BaseMMVae
from mmvae_hub.utils.utils import json2dict, unpack_zipfile
class MongoDatabase:
def __init__(self, flags=None, training: bool = True, _id: str = None):
"""
training: if true, experiment_uid and flags will be sent to db.
"""
self.mongodb_URI = self.get_mongodb_uri()
if flags is not None:
self.experiment_uid = flags.experiment_uid
elif _id is not None:
self.experiment_uid = _id
experiments = self.connect()
# create document in db for current experiment
if training and self.experiment_uid not in [str(id) for id in experiments.find().distinct('_id')]:
experiments.insert_one({'_id': self.experiment_uid, 'flags': self.encode_flags(flags), 'epoch_results': {},
'version': flags.version})
def connect(self):
client = MongoClient(self.mongodb_URI)
db = client.mmvae
return db.experiments
@staticmethod
def get_mongodb_uri():
dbconfig = json2dict(Path('~/.config/mmvaedb.json').expanduser())
return dbconfig['mongodb_URI']
def insert_dict(self, d: dict):
log.info('Inserting dict to database.')
experiments = self.connect()
experiments.find_one_and_update({'_id': self.experiment_uid}, {"$set": d})
@staticmethod
def encode_flags(flags):
flags_dict = vars(flags).copy()
for k, elem in flags_dict.items():
if type(elem) in [pathlib.PosixPath, torch.device]:
flags_dict[k] = str(elem)
return flags_dict
def get_experiment_dict(self):
experiments = self.connect()
return experiments.find_one({'_id': self.experiment_uid})
def delete_many(self, selection: dict, delete_all: bool = False):
"""
Delete all elements from database that correspond to selection.
delete_all bool: If True, remove all documents in database.
"""
experiment = self.connect()
if delete_all:
experiment.delete_many({})
else:
experiment.delete_many(selection)
def delete_one(self, _id: str):
"""
Removes one document from db
"""
log.info(f'Deleting document with _id: {_id}.')
experiment = self.connect()
experiment.delete_one({'_id': _id})
def save_networks_to_db(self, dir_checkpoints: Path, epoch: int, modalities):
"""
Inspired from https://medium.com/naukri-engineering/way-to-store-large-deep-learning-models-in-production-ready-environments-d8a4c66cc04c
There is probably a better way to store Tensors in MongoDB.
"""
fs = self.connect_with_gridfs()
checkpoint_dir = dir_checkpoints / str(epoch).zfill(4)
fs_ids = [elem._id for elem in fs.find({})]
for mod_str in modalities:
for prefix in ['en', 'de']:
filename = checkpoint_dir / f"{prefix}coderM{mod_str}"
_id = self.experiment_uid + f"__{prefix}coderM{mod_str}"
if _id not in fs_ids:
with io.FileIO(str(filename), 'r') as fileObject:
log.info(f'Saving checkpoint to db: {filename}')
fs.put(fileObject, filename=str(filename), _id=_id)
def upload_logfile(self, logfile_path: Path) -> None:
fs = self.connect_with_gridfs()
fs_ids = [elem._id for elem in fs.find({})]
logfile_id = self.experiment_uid + f"__logfile"
if logfile_id not in fs_ids:
with io.FileIO(str(logfile_path), 'r') as fileObject:
fs.put(fileObject, filename=str(logfile_path.name), _id=logfile_id)
def upload_tensorbardlogs(self, tensorboard_logdir: Path) -> None:
"""zip tensorboard logs and save them to db."""
fs = self.connect_with_gridfs()
fs_ids = [elem._id for elem in fs.find({})]
file_id = self.experiment_uid + f"__tensorboard_logs"
if file_id not in fs_ids:
with tempfile.TemporaryDirectory() as tmpdirname:
log.info(f'Zipping {file_id} to {tmpdirname}.')
zipfile = Path(tmpdirname) / tensorboard_logdir.name
shutil.make_archive(zipfile, 'zip', tensorboard_logdir, verbose=True)
log.info(f'Uploading tensorboard logs to db.')
with io.FileIO(str(zipfile.with_suffix('.zip')), 'r') as fileObject:
fs.put(fileObject, filename=str(tensorboard_logdir.name),
_id=file_id)
def connect_with_gridfs(self):
client = MongoClient(self.mongodb_URI)
db = client.mmvae
return gridfs.GridFS(db)
def load_networks_from_db(self, mmvae: BaseMMVae):
log.info(f'Loading networks from database for model {mmvae}.')
fs = self.connect_with_gridfs()
fs_ids = [elem._id for elem in fs.find({})]
with tempfile.TemporaryDirectory() as tmpdirname:
tmpdirname = Path(tmpdirname)
for mod_str in mmvae.modalities:
for prefix in ['en', 'de']:
filename = tmpdirname / f"{prefix}coderM{mod_str}"
model_id = self.experiment_uid + f"__{prefix}coderM{mod_str}"
with open(filename, 'wb') as fileobject:
fileobject.write(fs.get(model_id).read())
mmvae.load_networks(tmpdirname)
return mmvae
def load_experiment_results_to_db(self, experiments_dir: Path):
"""Iterate through the experiment_dir and load results to the db if they are not already there."""
experiments = self.connect()
exp_ids_db = [elem['_id'] for elem in experiments.find({})]
fs = self.connect_with_gridfs()
fs_ids = {elem._id.split('__')[0] for elem in fs.find({})}
for exp_dir in experiments_dir.iterdir():
print(exp_dir)
# get epoch results
if exp_dir.name not in exp_ids_db:
if (exp_dir / 'epoch_results').exists():
for epoch_result_dir in (exp_dir / 'epoch_results').iterdir():
# todo load epoch results to db
pass
# get models
if exp_dir.name not in fs_ids:
if (exp_dir / 'checkpoints').exists():
self.experiment_uid = exp_dir.name
latest_checkpoint = max(
int(d.name) for d in (exp_dir / 'checkpoints').iterdir() if d.name.isdigit())
dir_checkpoints = (exp_dir / 'checkpoints' / str(latest_checkpoint).zfill(4))
modalities = {Path(e).name.replace('decoderM', '') for e in
glob.glob(str(dir_checkpoints / 'decoderM*'))}
self.save_networks_to_db(
dir_checkpoints=(exp_dir / 'checkpoints'),
epoch=latest_checkpoint, modalities=modalities)
else:
print('checkpoint dir does not exist')
def get_tensorboardlogs(self, dest_dir: Path) -> None:
"""Get the tensorboard logs from the db as a zipfolder, unzip them and save them to dest_dir."""
fs = self.connect_with_gridfs()
log_id = self.experiment_uid + f"__tensorboard_logs"
with tempfile.TemporaryDirectory() as tmpdirname:
zip_file = Path(tmpdirname) / 'my_zip.zip'
with open(zip_file, 'wb') as fileobject:
fileobject.write(fs.get(log_id).read())
unpack_zipfile(zip_file, dest_dir)
if __name__ == '__main__':
id = 'polymnist_iwmogfm2__2021_09_28_11_26_27_056960'
# db = MongoDatabase(_id=id)
# db.get_tensorboardlogs(Path('/Users/Hendrik/Desktop/temp'))
out_dir = Path('/Users/Hendrik/Desktop/tensorboard_logs') / id
out_dir.mkdir(parents=True, exist_ok=True)
db = MongoDatabase(_id=id)
db.get_tensorboardlogs(dest_dir=out_dir)
```
#### File: utils/plotting/plot.py
```python
from pathlib import Path
import torch
from torchvision.utils import make_grid
from torchvision.utils import save_image
def create_fig(fn, img_data, num_img_row, save_figure=False):
if save_figure:
save_image(img_data.data.cpu(), fn, nrow=num_img_row)
grid = make_grid(img_data, nrow=num_img_row)
return (
grid.mul(255)
.add_(0.5)
.clamp_(0, 255)
.permute(1, 2, 0)
.to('cpu', torch.uint8)
.numpy()
)
def text_sample_to_file(log_tag: str, text_sample: str, epoch: int, exp_path: Path):
"""Write the generated text sample to a file with name log_tag."""
base_path = exp_path / 'text_gen'
if not base_path.exists():
base_path.mkdir()
file_path = base_path / log_tag
with open(file_path, 'a') as textfile:
textfile.write(f'\n{"*" * 20}\n Epoch: {epoch}\n{text_sample}\n{"*" * 20}')
```
#### File: MMVAE_mnist_svhn_text/tests/test_static.py
```python
import tempfile
from pathlib import Path
import numpy as np
import pytest
from mmvae_hub.polymnist.PolymnistTrainer import PolymnistTrainer
from mmvae_hub.utils.utils import json2dict, write_to_jsonfile
from tests.utils import set_me_up
# @pytest.mark.tox
@pytest.mark.parametrize("method", ['moe', 'joint_elbo', 'poe', 'planar_mixture', 'pfom'])
def test_static_results_1mod(method: str, update_static_results=False):
"""
Test if the results are constant. If the assertion fails, it means that the model or the evaluation has
changed, perhaps involuntarily.
"""
jsonfile = Path(__file__).parent / 'static_results.json'
static_results = json2dict(jsonfile)['static_results_1mod']
if method not in static_results:
write_to_jsonfile(jsonfile, [(f'static_results_1mod.{method}', {})])
static_results[method] = {}
static_results = static_results[method]
with tempfile.TemporaryDirectory() as tmpdirname:
mst = set_me_up(tmpdirname,dataset='polymnist',
method=method,
attributes={'num_flows': 0, 'num_mods': 1, 'deterministic': True, 'device': 'cpu',
'steps_per_training_epoch': 1, 'factorized_representation': False, 'calc_nll':False})
trainer = PolymnistTrainer(mst)
test_results = trainer.run_epochs()
if update_static_results:
static_results['joint_div'] = test_results.joint_div
static_results['klds'] = test_results.klds['m0']
# static_results['lhoods'] = test_results.lhoods['m0']['m0']
static_results['log_probs'] = test_results.log_probs['m0']
static_results['total_loss'] = test_results.total_loss
# static_results['lr_eval'] = test_results.lr_eval['m0']['accuracy']
static_results['latents_class'] = {
'mu': test_results.latents['m0']['latents_class']['mu']
}
write_to_jsonfile(jsonfile, [(f'static_results_1mod.{method}', static_results)])
are_they_equal = {
'joint_div': np.round(test_results.joint_div, 5) == np.round(static_results['joint_div'], 5),
'klds': np.round(test_results.klds['m0'], 5) == np.round(static_results['klds'], 5),
# 'lhoods': np.round(test_results.lhoods['m0']['m0'], 3) == np.round(static_results['lhoods'], 3),
'log_probs': test_results.log_probs['m0'] == static_results['log_probs'],
'total_loss': test_results.total_loss == static_results['total_loss'],
# 'lr_eval': test_results.lr_eval['m0']['accuracy'] == static_results['lr_eval'],
'latents_class_mu': np.round(test_results.latents['m0']['latents_class']['mu'], 8) == np.round(
static_results['latents_class']['mu'], 8)
}
assert all(v for _, v in are_they_equal.items()), f'Some results changed: {are_they_equal}'
# @pytest.mark.tox
@pytest.mark.parametrize("method", ['moe', 'joint_elbo'])
def test_static_results_2mods(method: str):
"""
Test if the results are constant. If the assertion fails, it means that the model or the evaluation has
changed, perhaps involuntarily.
"""
static_results = json2dict(Path('static_results.json'))['static_results_2mod']
with tempfile.TemporaryDirectory() as tmpdirname:
mst = set_me_up(tmpdirname,
method=method,
attributes={'num_flows': 0, 'num_mods': 2, 'deterministic': True, 'device': 'cpu',
'steps_per_training_epoch': 1, 'factorized_representation': False})
trainer = PolymnistTrainer(mst)
test_results = trainer.run_epochs()
assert np.round(test_results.joint_div, 1) == np.round(static_results[method]['joint_div'], 1)
assert np.round(test_results.klds['m0'], 1) == np.round(static_results[method]['klds'], 1)
assert np.round(test_results.lhoods['m0']['m0'], 1) == np.round(static_results[method]['lhoods'], 1)
assert np.round(test_results.log_probs['m0'], 0) == np.round(static_results[method]['log_probs'], 0)
assert np.round(test_results.total_loss, 0) == np.round(static_results[method]['total_loss'], 0)
assert np.round(test_results.lr_eval['m0']['accuracy'], 2) == np.round(static_results[method]['lr_eval'], 2)
assert np.round(test_results.latents['m0']['latents_class']['mu'], 2) == np.round(
static_results[method]['latents_class']['mu'], 2)
if __name__ == '__main__':
test_static_results_1mod('moe', update_static_results=False)
test_static_results_1mod('joint_elbo', update_static_results=False)
test_static_results_1mod('poe', update_static_results=False)
test_static_results_1mod('pfom', update_static_results=False)
test_static_results_1mod('planar_mixture', update_static_results=False)
```
#### File: MMVAE_mnist_svhn_text/tests/test_training.py
```python
import tempfile
from pathlib import Path
import pytest
from norby.utils import get_readable_elapsed_time
from mmvae_hub.celeba.CelebaTrainer import CelebaTrainer
from mmvae_hub.evaluation.eval_metrics.coherence import test_generation
from mmvae_hub.mimic.MimicTrainer import MimicTrainer
from mmvae_hub.mnistsvhntext.mnistsvhntextTrainer import mnistsvhnTrainer
from mmvae_hub.polymnist.PolymnistTrainer import PolymnistTrainer
from mmvae_hub.utils.plotting.plotting import generate_plots
from tests.utils import set_me_up
@pytest.mark.tox
@pytest.mark.parametrize("method", ['mopoe', 'moe', 'poe', 'mopgfm', 'iwmogfm', 'iwmogfm_amortized'])
# @pytest.mark.parametrize("method", ['joint_elbo'])
def test_run_epochs_polymnist(method: str):
"""
Test if the main training loop runs.
Assert if the total_test loss is constant. If the assertion fails, it means that the model or the evaluation has
changed, perhaps involuntarily.
"""
with tempfile.TemporaryDirectory() as tmpdirname:
# todo implement calc likelihood for flow based methods
calc_nll = False if method in ['mofop', 'planar_mixture', 'pfom', 'pope', 'fomfop', 'fomop', 'poe', 'gfm',
'planar_vae',
'sylvester_vae_noflow', 'iwmogfm', 'iwmogfm2', 'iwmogfm3', 'iwmogfm_amortized',
'iwmogfm_old'] else True
# calc_nll = False
mst = set_me_up(tmpdirname, dataset='polymnist', method=method, attributes={'calc_nll': calc_nll,
"K": 5,
"dir_clf": Path(
"/tmp/trained_clfs_polyMNIST")
# 'num_mods': 1
# 'num_flows': 1
})
trainer = PolymnistTrainer(mst)
test_results = trainer.run_epochs()
# @pytest.mark.tox
# @pytest.mark.parametrize("method", ['mopoe'])
def test_run_epochs_mimic(method: str):
"""
Test if the main training loop runs.
Assert if the total_test loss is constant. If the assertion fails, it means that the model or the evaluation has
changed, perhaps involuntarily.
"""
with tempfile.TemporaryDirectory() as tmpdirname:
# todo implement calc likelihood for flow based methods
calc_nll = method not in ['planar_mixture', 'pfom', 'pope']
mst = set_me_up(tmpdirname, dataset='mimic', method=method, attributes={'calc_nll': True,
'use_clf': True,
'batch_size': 2,
# 'calc_prd': True
})
trainer = MimicTrainer(mst)
test_results = trainer.run_epochs()
def test_run_epochs_celeba(method: str):
"""
Test if the main training loop runs.
Assert if the total_test loss is constant. If the assertion fails, it means that the model or the evaluation has
changed, perhaps involuntarily.
"""
with tempfile.TemporaryDirectory() as tmpdirname:
# todo implement calc likelihood for flow based methods
calc_nll = method not in ['planar_mixture', 'pfom', 'pope']
mst = set_me_up(tmpdirname, dataset='celeba', method=method, attributes={'calc_nll': True,
'use_clf': True,
'batch_size': 2,
# 'calc_prd': True
})
trainer = CelebaTrainer(mst)
test_results = trainer.run_epochs()
def test_run_epochs_mnistsvhntext(method: str):
"""
Test if the main training loop runs.
Assert if the total_test loss is constant. If the assertion fails, it means that the model or the evaluation has
changed, perhaps involuntarily.
"""
with tempfile.TemporaryDirectory() as tmpdirname:
# todo implement calc likelihood for flow based methods
calc_nll = method not in ['planar_mixture', 'pfom', 'pope']
mst = set_me_up(tmpdirname, dataset='mnistsvhntext', method=method, attributes={'calc_nll': True,
"dir_clf": Path(
"/tmp/trained_clfs_mst")
})
trainer = mnistsvhnTrainer(mst)
test_results = trainer.run_epochs()
# @pytest.mark.tox
def test_run_planar_mixture_no_flow():
"""
Test if the main training loop runs.
"""
with tempfile.TemporaryDirectory() as tmpdirname:
method = 'planar_mixture'
additional_attrs = {'num_flows': 0, 'num_mods': 1}
mst = set_me_up(tmpdirname, method, attributes=additional_attrs)
trainer = PolymnistTrainer(mst)
test_results = trainer.run_epochs()
def test_generate_plots():
with tempfile.TemporaryDirectory() as tmpdirname:
mst = set_me_up(tmpdirname)
generate_plots(mst, epoch=1)
def test_test_generation():
with tempfile.TemporaryDirectory() as tmpdirname:
mst = set_me_up(tmpdirname)
test_generation(mst)
if __name__ == '__main__':
# pass
from time import time
start_time = time()
# test_run_epochs_celeba(method='mopgfm')
test_run_epochs_polymnist(method='iwmogfm_amortized')
# test_run_epochs_polymnist(method='mopoe')
# test_run_epochs_polymnist(method='iwmogfm2')
# test_run_epochs_polymnist(method='iwmogfm')
# test_run_epochs_mnistsvhntext(method='mopoe')
# test_run_epochs_polymnist(method='iwmogfm_amortized')
# test_run_epochs_polymnist(method='iwmogfm2')
# test_run_epochs_mimic(method='iwmogfm')
# test_run_epochs_mnistsvhntext(method='mopoe')
# test_run_epochs_polymnist(method='iwmopgfm')
# test_run_epochs_polymnist(method='mopgfm')
# test_run_epochs_polymnist(method='mofop')
# test_run_epochs_polymnist(method='iwmopoe')
# test_run_epochs_polymnist(method='iwmoe')
# test_run_epochs_polymnist(method='mogfm')
elapsed_time = time() - start_time
print(get_readable_elapsed_time(elapsed_time))
# test_run_epochs_polymnist(method='mofogfm')
# test_run_epochs_polymnist(method='pope')
# test_run_planar_mixture_no_flow()
# test_generate_plots()
# test_test_generation()
```
#### File: MMVAE_mnist_svhn_text/tests/utils.py
```python
from pathlib import Path
from typing import Optional
from mmvae_hub.mnistsvhntext.experiment import MNISTSVHNText
import mmvae_hub
from mmvae_hub.mimic.experiment import MimicExperiment
from mmvae_hub.polymnist.experiment import PolymnistExperiment
def set_me_up(tmpdirname, dataset: str, method: str, attributes: Optional = None):
config_path = Path(mmvae_hub.__file__).parent.parent / f'configs/toy_config.json'
if dataset == 'polymnist':
from mmvae_hub.polymnist.flags import FlagsSetup, parser as polymnist_parser
flags = polymnist_parser.parse_args([])
flags_setup = FlagsSetup(config_path)
exp = PolymnistExperiment
elif dataset == 'mimic':
from mmvae_hub.mimic.flags import parser as mimic_parser, MimicFlagsSetup
flags = mimic_parser.parse_args([])
flags_setup = MimicFlagsSetup(config_path)
exp = MimicExperiment
elif dataset == 'mnistsvhntext':
from mmvae_hub.mnistsvhntext.flags import parser as mnistshvntext_parser, mnistsvhntextFlagsSetup
flags = mnistshvntext_parser.parse_args([])
flags_setup = mnistsvhntextFlagsSetup(config_path)
exp = MNISTSVHNText
elif dataset == 'celeba':
from mmvae_hub.celeba.flags import parser as mnistshvntext_parser, CelebaFlagsSetup
from mmvae_hub.celeba.experiment import CelebaExperiment
flags = mnistshvntext_parser.parse_args([])
flags_setup = CelebaFlagsSetup(config_path)
exp = CelebaExperiment
else:
raise NotImplementedError(f'not implemented for dataset {dataset}.')
flags = flags_setup.setup_test(flags, tmpdirname)
flags.method = method
if attributes:
for k, v in attributes.items():
setattr(flags, k, v)
mst = exp(flags)
mst.set_optimizer()
return mst
``` |
{
"source": "Jimmy2027/MoPoE-MIMIC",
"score": 2
} |
#### File: MoPoE-MIMIC/mimic/main_mimic.py
```python
import gc
import os
from argparse import Namespace
from timeit import default_timer as timer
from typing import Union
import numpy as np
import pandas as pd
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
from termcolor import colored
from mimic import log
from mimic.run_epochs import run_epochs
from mimic.utils.exceptions import NaNInLatent, CudaOutOfMemory
from mimic.utils.experiment import MimicExperiment
from mimic.utils.filehandling import create_dir_structure, create_dir_structure_testing, get_config_path, \
get_method
from mimic.utils.flags import parser
from mimic.utils.flags import setup_flags
from mimic.utils.utils import get_gpu_memory
class Main:
def __init__(self, flags: Namespace, testing=False):
"""
config_path: (optional) path to the json config file
"""
flags = setup_flags(flags, testing)
flags = get_method(flags)
print(colored(f"running on {flags.device} with text {flags.text_encoding} encoding "
f'with method {flags.method}, batch size: {flags.batch_size} and img size {flags.img_size}, '
f'fixed_image_extractor: {flags.fixed_image_extractor}', 'blue'))
self.flags = create_dir_structure(flags)
# because of bad initialisation, the vae might return nan values. If this is the case it is best to restart the
# experiment.
self.max_tries = 10 # maximum restarts of the experiment due to nan values
self.current_tries = 0
self.start_time = 0
self.exp = None
def setup_distributed(self):
self.flags.world_size = torch.cuda.device_count()
log.info(f'setting up distributed computing with world size {self.flags.world_size}')
self.flags.distributed = self.flags.world_size > 1
self.flags.batch_size = int(self.flags.batch_size / self.flags.world_size)
def run_epochs(self) -> Union[bool, str]:
"""
Wrapper of mimic.run_epochs.run_epochs that checks if the workflow was completed and starts it over otherwise.
returns
bool: true if run_epochs finishes, False if an error occurs
string: "cuda_out_of_memory" if GPU runs out of memory
"""
print(colored(f'current free GPU memory: {get_gpu_memory()}', 'red'))
self.start_time = timer()
# need to reinitialize MimicExperiment after each retry
self.exp = MimicExperiment(self.flags)
create_dir_structure_testing(self.exp)
self.expnumber_restarts = self.current_tries
try:
if self.flags.distributed:
self.setup_distributed()
mp.spawn(run_epochs, nprocs=self.flags.world_size, args=(self.exp,), join=True)
else:
run_epochs(self.flags.device, self.exp)
except NaNInLatent as e:
print(e)
return False
except CudaOutOfMemory as e:
print(e)
return 'cuda_out_of_memory'
self.exp.update_experiments_dataframe({'experiment_duration': (timer() - self.start_time) // 60})
return True
def restart(self) -> None:
"""
Clears old dir_structure and creates new one, deletes corresponding row in the experiment dataframe.
"""
exp_df = pd.read_csv('experiments_dataframe.csv')
exp_df.drop(exp_df.index[exp_df['str_experiment'] == self.flags.str_experiment])
exp_df.to_csv('experiments_dataframe.csv', index=False)
if self.exp.tb_logger:
self.exp.tb_logger.writer.close()
if self.flags.distributed:
dist.destroy_process_group()
torch.cuda.empty_cache()
gc.collect()
command = f'rm -r {self.flags.dir_experiment_run}'
print(command)
os.system(command)
self.flags = create_dir_structure(self.flags)
def main(self):
"""
Runs "run_epochs" until it returns True. If "run_epochs" fails because of full GPU memory,
the batch size is reduced and the workflow is started again.
If during the training, the model returns NaNs, bad initialization is
assumed and the workflow is started again.
"""
success = False
while not success and self.current_tries < self.max_tries:
success = self.run_epochs()
if not success:
self.current_tries += 1
log.info(f'******** RESTARTING EXPERIMENT FOR THE {self.current_tries} TIME ********')
if success == 'cuda_out_of_memory':
old_bs = self.flags.batch_size
self.flags.batch_size = int(np.floor(self.flags.batch_size * 0.8))
log.info(f'******** GPU ran out of memory with batch size {old_bs}, '
f'trying again with batch size: {self.flags.batch_size} ********')
success = False
if not success:
self.restart()
if __name__ == '__main__':
FLAGS: Namespace = parser.parse_args()
FLAGS.config_path = get_config_path(FLAGS)
main = Main(FLAGS)
try:
main.main()
except KeyboardInterrupt:
import logging
log.info("Aborted. Bye-bye.")
logging.shutdown()
```
#### File: mimic/modalities/MimicLateral.py
```python
import torch
import mimic.modalities.utils
from mimic.modalities.Modality import ModalityIMG
class MimicLateral(ModalityIMG):
def __init__(self, enc, dec, args):
self.name = 'Lateral'
self.likelihood_name = 'laplace'
self.data_size = torch.Size((1, args.img_size, args.img_size))
super().__init__(data_size=self.data_size)
self.gen_quality_eval = True
self.file_suffix = '.png'
self.encoder = enc
self.decoder = dec
self.likelihood = mimic.modalities.utils.get_likelihood(self.likelihood_name)
```
#### File: mimic/networks/CheXNet.py
```python
import torch.nn as nn
import torchvision
import torch.nn.functional as F
import torch
from mimic.dataio.utils import get_densenet_transforms
class CheXNet(nn.Module):
"""Taken from https://github.com/arnoweng/CheXNet/blob/master/model.py
The architecture of this model is the same as standard DenseNet121
except the classifier layer which has an additional sigmoid function.
"""
def __init__(self, out_size, fixed_extractor=True):
super(CheXNet, self).__init__()
self.densenet121 = torchvision.models.densenet121(pretrained=True)
if fixed_extractor:
for param in self.densenet121.parameters():
param.requires_grad = False
num_ftrs = self.densenet121.classifier.in_features
self.densenet121.classifier = nn.Sequential(
nn.Linear(num_ftrs, out_size),
nn.Sigmoid()
)
def forward(self, x):
return self.densenet121(x)
class PretrainedDenseNet(nn.Module):
def __init__(self, args):
super(PretrainedDenseNet, self).__init__()
original_model = torchvision.models.densenet121(pretrained=True)
self.features = nn.Sequential(*list(original_model.children())[:-1])
self.n_crops = args.n_crops
if args.fixed_image_extractor:
for param in self.parameters():
param.requires_grad = False
def forward(self, x):
if self.n_crops in [5, 10]:
bs, n_crops, c, h, w = x.size()
else:
bs, c, h, w = x.size()
imgs = torch.autograd.Variable(x.view(-1, c, h, w).cuda())
x = self.features(imgs)
# x.shape = [bs*n_crop, 1024, 8, 8]
x = F.relu(x, inplace=True)
x = F.avg_pool2d(x, kernel_size=7).view(x.size(0), -1)
return x
class DenseLayers(nn.Module):
def __init__(self, args, nb_out=320):
self.n_crops = args.n_crops
self.batch_size = args.batch_size
super().__init__()
if args.n_crops:
self.dens1 = nn.Linear(in_features=1024 * args.n_crops, out_features=1024)
self.dens2 = nn.Linear(in_features=1024, out_features=512)
else:
self.dens1 = nn.Linear(in_features=1024, out_features=768)
self.dens2 = nn.Linear(in_features=768, out_features=512)
self.dens3 = nn.Linear(in_features=512, out_features=nb_out)
def forward(self, x):
if self.n_crops in [5, 10]:
x = x.view(self.batch_size, 1024 * self.n_crops)
x = self.dens1(x)
x = nn.functional.selu(x)
x = F.dropout(x, p=0.25, training=self.training)
x = self.dens2(x)
x = nn.functional.selu(x)
x = F.dropout(x, p=0.25, training=self.training)
x = self.dens3(x)
return x
class DenseNetFeatureExtractor(nn.Module):
def __init__(self, args):
super().__init__()
self.args = args
self.pretrained_dense = PretrainedDenseNet(args)
self.dense_layers = DenseLayers(args)
self.transforms = get_densenet_transforms(args)
def forward(self, x):
x_tf = self.transform_batch(x)
out = self.pretrained_dense(x_tf)
out = self.dense_layers(out)
out = out.unsqueeze(-1)
return out
def transform_batch(self, x):
x_tf = torch.Tensor(x.shape[0], 3, *x.shape[2:])
for idx, elem in enumerate(x):
new = self.transforms(elem.cpu())
x_tf[idx] = new
x_tf.to(self.args.device)
return x_tf
if __name__ == '__main__':
model = CheXNet(3)
for param in model.parameters():
print(param)
# param.requires_grad = False
```
#### File: networks/classifiers/utils.py
```python
import os
import typing
from pathlib import Path
from timeit import default_timer as timer
from typing import Optional
from typing import Protocol
import numpy as np
import pandas as pd
import torch
from matplotlib import pyplot as plt
from sklearn.metrics import average_precision_score
from torch.autograd import Variable
from torch.optim.lr_scheduler import ReduceLROnPlateau
from mimic import log
from mimic.networks.CheXNet import CheXNet
from mimic.networks.ConvNetworkImgClf import ClfImg
from mimic.networks.ConvNetworkTextClf import ClfText
from mimic.utils.filehandling import create_dir
from mimic.utils.filehandling import expand_paths
from mimic.utils.filehandling import get_str_experiments
def save_flags(flags, modality, epoch):
if modality == 'text':
save_name = f'flags_clf_{modality}_vocabsize_{flags.vocab_size}' \
f'{"_bin_label" if flags.binary_labels else ""}_{epoch}.rar'
else:
save_name = f'flags_clf_{modality}_{epoch}.rar'
torch.save(flags, Path(flags.dir_clf) / save_name)
def get_labels(binary_labels: bool = False):
if binary_labels:
return ['Finding']
else:
return ['Lung Opacity', 'Pleural Effusion', 'Support Devices']
class ExperimentDfProto(Protocol):
dataset: str
experiment_uid: str
class ExperimentDf:
"""
clf experiment results dataframe which contains test results of previous experiments together with their
parameters
"""
def __init__(self, flags: ExperimentDfProto):
self.testing = flags.dataset == 'testing'
experiment_uid = flags.experiment_uid
if os.path.exists('clf_experiments_dataframe.csv'):
experiments_dataframe = pd.read_csv('clf_experiments_dataframe.csv')
flags_dict = vars(flags)
flags_dict['experiment_uid'] = experiment_uid
flags_dict['total_epochs'] = 0
flags_dict['experiment_duration'] = -1
else:
experiments_dataframe = pd.DataFrame()
flags_dict = vars(flags)
flags_dict['clf_experiment_uid'] = experiment_uid
self.experiments_dataframe = experiments_dataframe.append(flags_dict, ignore_index=True)
self.experiments_dataframe.to_csv('clf_experiments_dataframe.csv', index=False)
self.experiment_uid = experiment_uid
self.start_time = timer()
def update_experiments_dataframe(self, values_dict: dict):
"""
Updates the values in experiments dataframe with the new values from the values_dict and saves it if the
experiment is not a test run
"""
log.info(f"writing to experiment df: {values_dict}")
experiments_dataframe = pd.read_csv('clf_experiments_dataframe.csv')
for key, value_ in values_dict.items():
if isinstance(values_dict[key], list) and len(value_) == 1:
value = values_dict[key][0]
else:
value = values_dict[key]
experiments_dataframe.loc[
experiments_dataframe['experiment_uid'] == self.experiment_uid, key] = value
if not self.testing:
experiments_dataframe.to_csv('clf_experiments_dataframe.csv', index=False)
def write_experiment_time(self):
self.update_experiments_dataframe({'experiment_duration': (timer() - self.start_time) // 60})
class CallbacksProto(Protocol):
clf_save_m1: Optional[str]
clf_save_m2: Optional[str]
clf_save_m3: Optional[str]
dir_clf: str
dir_logs_clf: str
binary_labels: bool
vocab_size: int
class Callbacks:
def __init__(self, flags: CallbacksProto,
start_early_stopping_epoch: int, max_early_stopping_index,
modality: str,
experiment_df: ExperimentDf, logger: any, optimizer: torch.optim):
self.modality = modality
self.logger = logger
self.flags = flags
self.experiment_df = experiment_df
self.start_early_stopping_epoch = start_early_stopping_epoch
self.max_early_stopping_index = max_early_stopping_index
self.patience_idx = 1
self.scheduler = ReduceLROnPlateau(optimizer, 'min', patience=5, verbose=True)
self.elapsed_times = []
# metrics is a dict that will contain a list for each metric, containing a value for each epoch
self.metrics: typing.Mapping[str, list] = {}
# self.early_stopping_crit = 'mean_AP_total'
self.early_stopping_crit = 'dice'
# maximize metric or minimize
self.early_stopping_mode = 'maximize'
def plot_metrics(self):
for k, v in self.metrics.items():
plt.plot(v)
plt.title(k)
plt.savefig(os.path.join(self.flags.dir_logs_clf, f'{k}.png'))
plt.close()
def update_epoch(self, epoch: int, loss, val_results: typing.Dict[str, torch.Tensor], model, elapsed_time):
# calculate metrics
metrics = Metrics(val_results['predictions'], val_results['ground_truths'],
str_labels=get_labels(self.flags.binary_labels))
metrics_dict = metrics.evaluate()
metrics_dict['eval_loss'] = [loss]
early_stop_crit_val = metrics_dict[self.early_stopping_crit][0]
self._update_metrics(metrics_dict)
stop_early = False
self.elapsed_times.append(elapsed_time)
self.scheduler.step(loss)
# update logger
for k, v in metrics_dict.items():
k = k.replace(' ', '_')
self.logger.add_scalars(f'eval_clf_{self.modality}/{k}', {self.modality: v}, epoch)
self.logger.add_scalars(f'eval_clf_{self.modality}/mean_loss', {self.modality: loss}, epoch)
# evaluate progress
max_eval_metric = max(self.metrics[self.early_stopping_crit])
epoch_max_eval_metric = np.argmax(self.metrics[self.early_stopping_crit])
print(f'current eval loss: {loss}, metrics: {metrics_dict}')
if epoch >= self.start_early_stopping_epoch and early_stop_crit_val >= max_eval_metric:
print(
f'current {self.early_stopping_crit} {early_stop_crit_val} improved from {max_eval_metric}'
f' at epoch {epoch_max_eval_metric}')
self.experiment_df.update_experiments_dataframe(
{'mean_eval_loss': loss, 'total_epochs': epoch, **metrics_dict})
self._save_and_overwrite_model(model.state_dict(), epoch)
self.patience_idx = 1
elif self.patience_idx > self.max_early_stopping_index:
print(
f'stopping early at epoch {epoch} because current {self.early_stopping_crit} {early_stop_crit_val} '
f'did not improve from {max_eval_metric} at epoch {epoch_max_eval_metric}')
stop_early = True
else:
if epoch > self.start_early_stopping_epoch:
print(
f'current {self.early_stopping_crit} {early_stop_crit_val} did not improve from {max_eval_metric} '
f'at epoch {epoch_max_eval_metric}')
print(f'-- idx_early_stopping = {self.patience_idx} / {self.max_early_stopping_index}')
self.patience_idx += 1
return stop_early
def _update_metrics(self, metrics_dict: typing.Dict[str, list]):
if not self.metrics:
self.metrics = metrics_dict
else:
for k, v in metrics_dict.items():
self.metrics[k].extend(v)
def _save_and_overwrite_model(self, state_dict, epoch: int):
"""
saves the model to flags.dir_clf/flags.clf_save_m[1,2,3] and deletes old one
"""
if self.modality == 'PA':
filename = self.flags.clf_save_m1
elif self.modality == 'Lateral':
filename = self.flags.clf_save_m2
else:
filename = self.flags.clf_save_m3 + \
f'vocabsize_{self.flags.vocab_size}{"_bin_label" if self.flags.binary_labels else ""}'
for file in os.listdir(self.flags.dir_clf):
if file.startswith(filename):
print(f'deleting old checkpoint: {os.path.join(self.flags.dir_clf, file)}')
os.remove(os.path.join(self.flags.dir_clf, file))
log.info('saving model to {}'.format(os.path.join(self.flags.dir_clf, filename + f'_{epoch}')))
torch.save(state_dict, os.path.join(self.flags.dir_clf, filename + f'_{epoch}'))
class GetModelsProto(Protocol):
device: any
img_clf_type: str
distributed: bool
fixed_extractor: bool
binary_labels: bool
def get_models(flags: GetModelsProto, modality: str):
"""
Get the wanted classifier for specific modality
"""
# argument feature_extractor_img is only used for mimic_main.
# Need to make sure it is unset when training classifiers
flags.feature_extractor_img = ''
assert modality in ['PA', 'Lateral', 'text']
assert flags.img_clf_type in ['densenet', 'resnet', '']
if modality in ['PA', 'Lateral']:
if flags.img_clf_type == 'densenet':
model = CheXNet(len(get_labels(flags.binary_labels)), flags.fixed_extractor).cuda()
elif flags.img_clf_type == 'resnet':
model = ClfImg(flags, get_labels(flags.binary_labels)).to(flags.device)
else:
raise NotImplementedError(f'{flags.img_clf_type} is not implemented, chose between "densenet" and "resnet"')
elif modality == 'text':
model = ClfText(flags, get_labels(flags.binary_labels)).to(flags.device)
if flags.distributed and torch.cuda.device_count() > 1:
print(f'Training with {torch.cuda.device_count()} GPUs')
model = torch.nn.DataParallel(model)
return model
def set_clf_paths(flags):
"""
Used for the trianing of the classifiers.
dir_clf: path to the directory where the classifier checkpoints will be saved
clf_save_m{1,2,3}: filename of the classifier checkpoint
dir_logs_clf: path to the directory where the training logs will be saved
"""
flags.exp_str_prefix = f'clf_{flags.modality}' + f'{flags.exp_str_prefix}' * bool(flags.exp_str_prefix)
flags.experiment_uid = get_str_experiments(flags)
# flags.dir_logs_clf = os.path.join(os.path.expanduser(flags.dir_clf), 'logs', flags.experiment_uid)
flags.dir_logs_clf = Path(flags.dir_clf).expanduser() / f'logs/{flags.experiment_uid}'
create_dir(flags.dir_logs_clf)
# change dir_clf
if flags.modality in ['PA', 'Lateral']:
flags.dir_clf = os.path.expanduser(
os.path.join(flags.dir_clf,
f'Mimic{flags.img_size}_{flags.img_clf_type}{"_bin_label" if flags.binary_labels else ""}'))
else:
flags.dir_clf = Path(flags.dir_clf).expanduser()
if not os.path.exists(flags.dir_clf):
os.makedirs(flags.dir_clf)
flags = expand_paths(flags)
return flags
def get_imgs_from_crops(input: torch.Tensor, device):
"""
Reshapes the input such that the number of crops and the batch size are multiplied in the first dimension.
"""
bs, n_crops, c, h, w = input.size()
imgs = Variable(input.view(-1, c, h, w)).to(device)
return imgs, bs, n_crops
def get_input(args: any, input: torch.Tensor, modality):
if args.img_clf_type == 'densenet' and modality != 'text' and args.n_crops in [5, 10]:
imgs, bs, n_crops = get_imgs_from_crops(input, args.device)
else:
imgs = input.to(args.device)
bs = None
n_crops = 1
return imgs, bs, n_crops
class Metrics(object):
"""
Defines a set of metrics that are used to evaluate the performance of a model
Modified version of https://github.com/ParGG/MasterThesisOld/blob/44f7b93214fa16494ebaeef7763ff81943b5ffc3/losses.py#L142
"""
def __init__(self, prediction: torch.Tensor, groundtruth: torch.Tensor, str_labels):
"""
params:
prediction: Tensor which is given as output of the network
groundtruth: Tensor which resembles the goundtruth
>>> import torch
>>> metrics = Metrics(torch.ones((10,1)), torch.ones((10,1)), str_labels=['my_labels'])
"""
self.str_labels = str_labels
self.prediction = prediction
self.groundtruth = groundtruth
self.prediction_bin: torch.Tensor = (prediction > 0.5) * 1
self.groundtruth_bin: torch.Tensor = (groundtruth > 0.5) * 1
# classwise binarized predictions
self.class_pred_bin: dict = {str_labels[i]: self.prediction_bin[:, i] for i in range(len(str_labels))}
self.class_gt_bin: dict = {str_labels[i]: self.groundtruth_bin[:, i] for i in range(len(str_labels))}
def evaluate(self) -> typing.Dict[str, list]:
"""
Computes the different metrics (accuracy, recall, specificity, precision, f1 score, jaccard score, dice score).
NOTE: f1 and dice are the same
"""
return {**{
'accuracy': [Metrics.accuracy(self)],
'recall': [Metrics.recall(self)],
'specificity': [Metrics.specificity(self)],
'precision': [Metrics.precision(self)],
'f1': [Metrics.f1(self)],
'jaccard': [Metrics.jaccard(self)],
'dice': [Metrics.dice(self)],
},
**self.mean_AP(), **self.counts()
}
def extract_values(self, results: dict):
"""
Extract first values from list for each metric result.
>>> import torch
>>> metrics = Metrics(torch.ones((10,1)), torch.ones((10,1)), str_labels=['my_labels'])
>>> metrics.extract_values(results={'accuracy':[0.9], 'f1': [0.8], 'recall':[0.6]})
{'accuracy': 0.9, 'f1': 0.8, 'recall': 0.6}
"""
return {k: v[0] for k, v in results.items()}
def accuracy(self) -> float:
"""
Computes the accuracy
"""
self.INTER = torch.mul(self.prediction_bin, self.groundtruth_bin).sum()
self.INTER_NEG = torch.mul(1 - self.prediction_bin, 1 - self.groundtruth_bin).sum()
self.TOTAL = self.prediction_bin.nelement()
return float(self.INTER + self.INTER_NEG) / float(self.TOTAL)
def recall(self) -> float:
"""
Computes the recall
"""
self.TP = torch.mul(self.prediction_bin, self.groundtruth_bin).sum()
self.FN = torch.mul(1 - self.prediction_bin, self.groundtruth_bin).sum()
self.RC = float(self.TP) / (float(self.TP + self.FN) + 1e-6)
return self.RC
def specificity(self):
self.TN = torch.mul(1 - self.prediction_bin, 1 - self.groundtruth_bin).sum()
self.FP = torch.mul(self.prediction_bin, 1 - self.groundtruth_bin).sum()
self.SP = float(self.TN) / (float(self.TN + self.FP) + 1e-6)
return self.SP
def precision(self) -> float:
"""
Computes the precision
"""
self.PC = float(self.TP) / (float(self.TP + self.FP) + 1e-6)
return self.PC
def f1(self) -> float:
"""
Computes the f1 score (same as dice)
"""
return 2 * (self.RC * self.PC) / (self.RC + self.PC + 1e-6)
def jaccard(self) -> float:
"""
Computes the jaccard score
"""
return float(self.INTER) / (float(self.INTER + self.FP + self.FN) + 1e-6)
def dice(self):
"""
Computes the dice score (same as f1)
"""
return 2 * float(self.INTER) / (float(2 * self.INTER + self.FP + self.FN) + 1e-6)
def mean_AP(self) -> dict:
"""
Compute the mean average precision.
>>> import torch
>>> metrics = Metrics(torch.tensor([0, 0, 1, 1]).unsqueeze(-1), torch.tensor([0.1, 0.4, 0.35, 0.8]).unsqueeze(-1), str_labels=['my_labels'])
>>> metrics.mean_AP()
{'mean_AP_my_labels': [0.8333333333333333], 'mean_AP_total': [0.8333333333333333]}
"""
ap_values = {
f'mean_AP_{self.str_labels[i]}': [
average_precision_score(self.prediction[:, i].numpy().ravel(), self.groundtruth[:, i].numpy().ravel())]
for i in range(len(self.str_labels))}
ap_values['mean_AP_total'] = [average_precision_score(self.prediction.cpu().data.numpy().ravel(),
self.groundtruth.cpu().data.numpy().ravel())]
return ap_values
def counts(self) -> dict:
predicted_counts = {f'pred_count_{label}': [self.class_pred_bin[label].sum().item()] for label in
self.str_labels}
gt_counts = {f'gt_count_{label}': [self.class_gt_bin[label].sum().item()] for label in self.str_labels}
return {**predicted_counts, **gt_counts}
```
#### File: mimic/networks/ConvNetworksTextMimic.py
```python
import torch
import torch.nn as nn
from mimic.networks.FeatureCompressor import LinearFeatureCompressor
from mimic.networks.char_encoding import DataGeneratorText as DataGeneratorText_CharEnc
from mimic.networks.char_encoding import FeatureExtractorText as FeatureExtractorText_CharEnc
from mimic.networks.word_encoding import DataGeneratorText as DataGeneratorText_WordEnc
from mimic.networks.word_encoding.mmvae_text_enc import FeatureExtractorText as FeatureExtractorText_WordEnc
class EncoderText(nn.Module):
def __init__(self, flags, style_dim):
super(EncoderText, self).__init__()
self.args = flags
if flags.text_encoding == 'char':
self.feature_extractor = FeatureExtractorText_CharEnc(flags)
elif flags.text_encoding == 'word':
self.feature_extractor = FeatureExtractorText_WordEnc(flags)
self.feature_compressor = LinearFeatureCompressor(5 * flags.DIM_text,
style_dim,
flags.class_dim)
def forward(self, x_text):
# d_model must be divisible by nhead
# text_in = nn.functional.one_hot(x_text.to(torch.int64), num_classes=self.args.vocab_size)
# encoder_layer = nn.TransformerEncoderLayer(d_model=x_text.shape[-1], nhead=8)
# transformer_encoder = nn.TransformerEncoder(encoder_layer, num_layers=8)
# h_text = transformer_encoder(text_in)
# todo is this better?
h_text = self.feature_extractor(x_text)
if self.feature_compressor.style_mu and self.feature_compressor.style_logvar:
mu_style, logvar_style, mu_content, logvar_content = self.feature_compressor(h_text)
return mu_content, logvar_content, mu_style, logvar_style
else:
mu_content, logvar_content = self.feature_compressor(h_text)
return mu_content, logvar_content
class DecoderText(nn.Module):
def __init__(self, flags, style_dim):
super(DecoderText, self).__init__()
self.flags = flags
self.feature_generator = nn.Linear(style_dim + flags.class_dim,
5 * flags.DIM_text, bias=True)
if flags.text_encoding == 'char':
self.text_generator = DataGeneratorText_CharEnc(flags)
elif flags.text_encoding == 'word':
self.text_generator = DataGeneratorText_WordEnc(flags)
# self.text_generator = Dec(flags)
def forward(self, z_style, z_content):
if self.flags.factorized_representation:
z = torch.cat((z_style, z_content), dim=1).squeeze(-1)
# z.shape = [100, 64]
else:
z = z_content
text_feat_hat = self.feature_generator(z)
text_feat_hat = text_feat_hat.unsqueeze(-1)
# predict in batches to spare GPU memory
if text_feat_hat.shape[0] > self.flags.batch_size:
dl = torch.utils.data.DataLoader(text_feat_hat, batch_size=self.flags.batch_size)
text_hat = torch.Tensor().to(self.flags.device)
for batch in dl:
text_hat = torch.cat(tensors=(text_hat, self.text_generator(batch)))
else:
text_hat = self.text_generator(text_feat_hat)
text_hat = text_hat.transpose(-2, -1)
return [text_hat]
```
#### File: mimic/networks/VAEtrimodalMimic.py
```python
import os
import torch
import torch.nn as nn
from mimic.utils import utils
from mimic.utils.BaseMMVae import BaseMMVae
from torch.distributions.distribution import Distribution
import typing
class VAEtrimodalMimic(BaseMMVae, nn.Module):
def __init__(self, flags, modalities, subsets):
super(VAEtrimodalMimic, self).__init__(flags, modalities, subsets)
self.encoder_pa = modalities['PA'].encoder
self.encoder_lat = modalities['Lateral'].encoder
self.encoder_text = modalities['text'].encoder
self.decoder_pa = modalities['PA'].decoder
self.decoder_lat = modalities['Lateral'].decoder
self.decoder_text = modalities['text'].decoder
self.encoder_pa = self.encoder_pa.to(flags.device)
self.encoder_lat = self.encoder_lat.to(flags.device)
self.encoder_text = self.encoder_text.to(flags.device)
self.decoder_pa = self.decoder_pa.to(flags.device)
self.decoder_lat = self.decoder_lat.to(flags.device)
self.decoder_text = self.decoder_text.to(flags.device)
self.lhood_pa = modalities['PA'].likelihood
self.lhood_lat = modalities['Lateral'].likelihood
self.lhood_text = modalities['text'].likelihood
def forward(self, input_batch) -> typing.Mapping[str, any]:
latents = self.inference(input_batch)
results = {'latents': latents}
div = self.calc_joint_divergence(latents['mus'],
latents['logvars'],
latents['weights'])
results['group_distr'] = latents['joint']
class_embeddings = utils.reparameterize(latents['joint'][0],
latents['joint'][1])
for key in div:
results[key] = div[key]
results_rec: typing.Mapping[str, Distribution] = {}
for m_key in self.modalities:
input_mod = input_batch[m_key]
if input_mod is not None:
mod = self.modalities[m_key]
if self.flags.factorized_representation:
s_mu, s_logvar = latents['modalities'][m_key + '_style']
s_emb = utils.reparameterize(mu=s_mu, logvar=s_logvar)
else:
s_emb = None
if m_key == 'Lateral':
rec = self.lhood_lat(*self.decoder_lat(s_emb, class_embeddings))
elif m_key == 'PA':
rec = self.lhood_pa(*self.decoder_pa(s_emb, class_embeddings))
elif m_key == 'text':
rec = self.lhood_text(logits=self.decoder_text(s_emb, class_embeddings)[0])
results_rec[m_key] = rec
results['rec'] = results_rec
return results
def encode(self, input_batch):
latents = {}
if 'PA' in input_batch.keys():
i_m1 = input_batch['PA']
latents['PA'] = self.encoder_pa(i_m1)
if self.encoder_pa.feature_compressor.style_mu and self.encoder_pa.feature_compressor.style_logvar:
latents['PA_style'] = latents['PA'][2:]
latents['PA'] = latents['PA'][:2]
else:
latents['PA_style'] = [None, None]
latents['PA'] = [None, None]
if 'Lateral' in input_batch.keys():
i_m2 = input_batch['Lateral']
latents['Lateral'] = self.encoder_lat(i_m2)
if self.encoder_lat.feature_compressor.style_mu and self.encoder_lat.feature_compressor.style_logvar:
latents['Lateral_style'] = latents['Lateral'][2:]
latents['Lateral'] = latents['Lateral'][:2]
else:
latents['Lateral_style'] = [None, None]
latents['Lateral'] = [None, None]
if 'text' in input_batch.keys():
i_m3 = input_batch['text']
latents['text'] = self.encoder_text(i_m3)
if self.encoder_text.feature_compressor.style_mu and self.encoder_text.feature_compressor.style_logvar:
latents['text_style'] = latents['text'][2:]
latents['text'] = latents['text'][:2]
else:
latents['text_style'] = [None, None]
latents['text'] = [None, None]
return latents
def get_random_styles(self, num_samples):
if self.flags.factorized_representation:
z_style_1 = torch.randn(num_samples, self.flags.style_pa_dim)
z_style_2 = torch.randn(num_samples, self.flags.style_lat_dim)
z_style_3 = torch.randn(num_samples, self.flags.style_text_dim)
z_style_1 = z_style_1.to(self.flags.device)
z_style_2 = z_style_2.to(self.flags.device)
z_style_3 = z_style_3.to(self.flags.device)
else:
z_style_1 = None
z_style_2 = None
z_style_3 = None
return {'PA': z_style_1, 'Lateral': z_style_2, 'text': z_style_3}
def get_random_style_dists(self, num_samples):
s1_mu = torch.zeros(num_samples,
self.flags.style_pa_dim).to(self.flags.device)
s1_logvar = torch.zeros(num_samples,
self.flags.style_pa_dim).to(self.flags.device)
s2_mu = torch.zeros(num_samples,
self.flags.style_lat_dim).to(self.flags.device)
s2_logvar = torch.zeros(num_samples,
self.flags.style_lat_dim).to(self.flags.device)
s3_mu = torch.zeros(num_samples,
self.flags.style_text_dim).to(self.flags.device)
s3_logvar = torch.zeros(num_samples,
self.flags.style_text_dim).to(self.flags.device)
m1_dist = [s1_mu, s1_logvar]
m2_dist = [s2_mu, s2_logvar]
m3_dist = [s3_mu, s3_logvar]
return {'PA': m1_dist, 'Lateral': m2_dist, 'text': m3_dist}
def generate(self, num_samples: int = None) -> dict:
if num_samples is None:
num_samples = self.flags.batch_size
z_class = torch.randn(num_samples, self.flags.class_dim)
z_class = z_class.to(self.flags.device)
style_latents = self.get_random_styles(num_samples)
random_latents = {'content': z_class, 'style': style_latents}
return self.generate_from_latents(random_latents)
def generate_from_latents(self, latents: dict) -> dict:
suff_stats = self.generate_sufficient_statistics_from_latents(latents)
cond_gen_pa = suff_stats['PA'].mean
cond_gen_lat = suff_stats['Lateral'].mean
cond_gen_text = suff_stats['text'].mean
return {'PA': cond_gen_pa,
'Lateral': cond_gen_lat,
'text': cond_gen_text}
def generate_sufficient_statistics_from_latents(self, latents: dict) -> dict:
style_pa = latents['style']['PA']
style_lat = latents['style']['Lateral']
style_text = latents['style']['text']
content: dict = latents['content']
cond_gen_m1 = self.lhood_pa(*self.decoder_pa(style_pa, content))
cond_gen_m2 = self.lhood_lat(*self.decoder_lat(style_lat, content))
cond_gen_m3 = self.lhood_text(logits=self.decoder_text(style_text, content)[0])
return {'PA': cond_gen_m1, 'Lateral': cond_gen_m2, 'text': cond_gen_m3}
def save_networks(self):
torch.save(self.encoder_pa.state_dict(), os.path.join(self.flags.dir_checkpoints, self.flags.encoder_save_m1))
torch.save(self.decoder_pa.state_dict(), os.path.join(self.flags.dir_checkpoints, self.flags.decoder_save_m1))
torch.save(self.encoder_lat.state_dict(), os.path.join(self.flags.dir_checkpoints, self.flags.encoder_save_m2))
torch.save(self.decoder_lat.state_dict(), os.path.join(self.flags.dir_checkpoints, self.flags.decoder_save_m2))
torch.save(self.encoder_text.state_dict(), os.path.join(self.flags.dir_checkpoints, self.flags.encoder_save_m3))
torch.save(self.decoder_text.state_dict(), os.path.join(self.flags.dir_checkpoints, self.flags.decoder_save_m3))
class VAETextMimic(BaseMMVae, nn.Module):
def __init__(self, flags, modalities, subsets):
super(VAETextMimic, self).__init__(flags, modalities, subsets)
self.encoder_text = modalities['text'].encoder
self.decoder_text = modalities['text'].decoder
self.encoder_text = self.encoder_text.to(flags.device)
self.decoder_text = self.decoder_text.to(flags.device)
self.lhood_text = modalities['text'].likelihood
def forward(self, input_batch):
latents = self.inference(input_batch)
results = {'latents': latents}
div = self.calc_joint_divergence(latents['mus'],
latents['logvars'],
latents['weights'])
results['group_distr'] = latents['joint']
class_embeddings = utils.reparameterize(latents['joint'][0],
latents['joint'][1])
for k, key in enumerate(div.keys()):
results[key] = div[key]
results_rec = {}
for k, m_key in enumerate(self.modalities.keys()):
input_mod = input_batch[m_key]
if input_mod is not None:
mod = self.modalities[m_key]
if self.flags.factorized_representation:
s_mu, s_logvar = latents[m_key + '_style']
s_emb = utils.reparameterize(mu=s_mu, logvar=s_logvar)
else:
s_emb = None
if m_key == 'text':
rec = self.lhood_text(*self.decoder_text(s_emb, class_embeddings))
results_rec[m_key] = rec
results['rec'] = results_rec
return results
def encode(self, input_batch):
latents = {}
if 'text' in input_batch.keys():
i_m3 = input_batch['text']
latents['text'] = self.encoder_text(i_m3)
if self.encoder_text.feature_compressor.style_mu and self.encoder_text.feature_compressor.style_logvar:
latents['text_style'] = latents['text'][2:]
latents['text'] = latents['text'][:2]
else:
latents['text_style'] = [None, None]
latents['text'] = [None, None]
return latents
def get_random_styles(self, num_samples):
if self.flags.factorized_representation:
z_style_3 = torch.randn(num_samples, self.flags.style_text_dim)
z_style_3 = z_style_3.to(self.flags.device)
else:
z_style_3 = None
return {'text': z_style_3}
def get_random_style_dists(self, num_samples):
s3_mu = torch.zeros(num_samples,
self.flags.style_text_dim).to(self.flags.device)
s3_logvar = torch.zeros(num_samples,
self.flags.style_text_dim).to(self.flags.device)
m3_dist = [s3_mu, s3_logvar]
return {'text': m3_dist}
def generate(self, num_samples=None) -> dict:
if num_samples is None:
num_samples = self.flags.batch_size
z_class = torch.randn(num_samples, self.flags.class_dim)
z_class = z_class.to(self.flags.device)
style_latents = self.get_random_styles(num_samples)
random_latents = {'content': z_class, 'style': style_latents}
return self.generate_from_latents(random_latents)
def generate_from_latents(self, latents: dict) -> dict:
suff_stats = self.generate_sufficient_statistics_from_latents(latents)
cond_gen_text = suff_stats['text'].mean
return {'text': cond_gen_text}
def generate_sufficient_statistics_from_latents(self, latents: dict) -> dict:
style_text = latents['style']['text']
content = latents['content']
cond_gen_m3 = self.lhood_text(*self.decoder_text(style_text, content))
return {'text': cond_gen_m3}
def save_networks(self):
torch.save(self.encoder_text.state_dict(), os.path.join(self.flags.dir_checkpoints, self.flags.encoder_save_m3))
torch.save(self.decoder_text.state_dict(), os.path.join(self.flags.dir_checkpoints, self.flags.decoder_save_m3))
```
#### File: mimic/notebooks/temp.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
torch.manual_seed(1)
CONTEXT_SIZE = 2
EMBEDDING_DIM = 10
# We will use Shakespeare Sonnet 2
test_sentence = """When forty winters shall besiege thy brow,
And dig deep trenches in thy beauty's field,
Thy youth's proud livery so gazed on now,
Will be a totter'd weed of small worth held:
Then being asked, where all thy beauty lies,
Where all the treasure of thy lusty days;
To say, within thine own deep sunken eyes,
Were an all-eating shame, and thriftless praise.
How much more praise deserv'd thy beauty's use,
If thou couldst answer 'This fair child of mine
Shall sum my count, and make my old excuse,'
Proving his beauty by succession thine!
This were to be new made when thou art old,
And see thy blood warm when thou feel'st it cold.""".split()
# we should tokenize the input, but we will ignore that for now
# build a list of tuples. Each tuple is ([ word_i-2, word_i-1 ], target word)
trigrams = [([test_sentence[i], test_sentence[i + 1]], test_sentence[i + 2])
for i in range(len(test_sentence) - 2)]
# print the first 3, just so you can see what they look like
print(trigrams[:3])
vocab = set(test_sentence)
word_to_ix = {word: i for i, word in enumerate(vocab)}
class NGramLanguageModeler(nn.Module):
def __init__(self, vocab_size, embedding_dim, context_size):
super(NGramLanguageModeler, self).__init__()
self.embeddings = nn.Embedding(vocab_size, embedding_dim)
self.linear1 = nn.Linear(context_size * embedding_dim, 128)
self.linear2 = nn.Linear(128, vocab_size)
def forward(self, inputs):
embeds = self.embeddings(inputs).view((1, -1))
out = F.relu(self.linear1(embeds))
out = self.linear2(out)
log_probs = F.log_softmax(out, dim=1)
return log_probs
losses = []
loss_function = nn.NLLLoss()
model = NGramLanguageModeler(len(vocab), EMBEDDING_DIM, CONTEXT_SIZE)
optimizer = optim.SGD(model.parameters(), lr=0.001)
for epoch in range(10):
total_loss = 0
for context, target in trigrams:
# Step 1. Prepare the inputs to be passed to the model (i.e, turn the words
# into integer indices and wrap them in tensors)
context_idxs = torch.tensor([word_to_ix[w] for w in context], dtype=torch.long)
# Step 2. Recall that torch *accumulates* gradients. Before passing in a
# new instance, you need to zero out the gradients from the old
# instance
model.zero_grad()
# Step 3. Run the forward pass, getting log probabilities over next
# words
log_probs = model(context_idxs)
# Step 4. Compute your loss function. (Again, Torch wants the target
# word wrapped in a tensor)
loss = loss_function(log_probs, torch.tensor([word_to_ix[target]], dtype=torch.long))
# Step 5. Do the backward pass and update the gradient
loss.backward()
optimizer.step()
# Get the Python number from a 1-element Tensor by calling tensor.item()
total_loss += loss.item()
losses.append(total_loss)
print(losses) # The loss decreased every iteration over the training data!
```
#### File: MoPoE-MIMIC/mimic/run_epochs.py
```python
import random
import time
import typing
from contextlib import contextmanager
import numpy as np
import torch
import torch.distributed as dist
from termcolor import colored
from torch.autograd import Variable
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.utils.data import DataLoader
from tqdm import tqdm
from mimic import log
from mimic.dataio.utils import get_data_loaders, samplers_set_epoch
from mimic.evaluation.eval_metrics.coherence import test_generation, flatten_cond_gen_values
from mimic.evaluation.eval_metrics.likelihood import estimate_likelihoods
from mimic.evaluation.eval_metrics.representation import test_clf_lr_all_subsets, train_clf_lr_all_subsets
from mimic.evaluation.eval_metrics.sample_quality import calc_prd_score
from mimic.evaluation.losses import calc_log_probs, calc_klds, calc_klds_style, calc_poe_loss, calc_joint_elbo_loss
from mimic.utils import utils
from mimic.utils.average_meters import AverageMeter, AverageMeterDict, AverageMeterLatents
from mimic.utils.exceptions import CudaOutOfMemory
from mimic.utils.experiment import Callbacks, MimicExperiment
from mimic.utils.plotting import generate_plots
from mimic.utils.utils import check_latents, at_most_n, get_items_from_dict
# set the seed for reproducibility
def set_random_seed(seed: int):
np.random.seed(seed)
torch.manual_seed(seed)
random.seed(seed)
@contextmanager
def catching_cuda_out_of_memory(batch_size):
"""
Context that throws CudaOutOfMemory error if GPU is out of memory.
"""
try:
yield
# if the GPU runs out of memory, start the experiment again with a smaller batch size
except RuntimeError as e:
if str(e).startswith('CUDA out of memory.') and batch_size > 10:
raise CudaOutOfMemory(e)
else:
raise e
def basic_routine_epoch(exp, batch) -> typing.Mapping[str, any]:
# set up weights
beta_style = exp.flags.beta_style
beta_content = exp.flags.beta_content
beta = exp.flags.beta
mm_vae = exp.mm_vae
batch_d = batch[0]
mods = exp.modalities
for k, m_key in enumerate(batch_d.keys()):
batch_d[m_key] = Variable(batch_d[m_key]).to(exp.flags.device)
with catching_cuda_out_of_memory(batch_size=exp.flags.batch_size):
results = mm_vae(batch_d)
for key in results['latents']['modalities']:
results['latents']['modalities'][key][1].mean().item()
# checking if the latents contain NaNs. If they do raise NaNInLatent error and the experiment is started again
check_latents(exp.flags, results['latents']['modalities'][key])
# getting the log probabilities
with catching_cuda_out_of_memory(batch_size=exp.flags.batch_size):
log_probs, weighted_log_prob = calc_log_probs(exp, results, batch)
group_divergence = results['joint_divergence']
klds = calc_klds(exp, results)
if exp.flags.factorized_representation:
klds_style = calc_klds_style(exp, results)
else:
klds_style = None
# Calculation of the loss
if (exp.flags.modality_jsd or exp.flags.modality_moe
or exp.flags.joint_elbo):
total_loss = calc_joint_elbo_loss(exp, klds_style, group_divergence, beta_style, beta_content,
weighted_log_prob, beta)
elif exp.flags.modality_poe:
total_loss = calc_poe_loss(exp, mods, group_divergence, klds, klds_style, batch_d, mm_vae, log_probs)
return {
'results': results,
'log_probs': log_probs,
'total_loss': total_loss,
'klds': klds,
}
def train(exp: MimicExperiment, train_loader: DataLoader) -> None:
tb_logger = exp.tb_logger
mm_vae = exp.mm_vae
mm_vae.train()
exp.mm_vae = mm_vae
average_meters = {
'total_loss': AverageMeter('total_test_loss'),
'klds': AverageMeterDict('klds'),
'log_probs': AverageMeterDict('log_probs'),
'joint_divergence': AverageMeter('joint_divergence'),
'latents': AverageMeterLatents('latents'),
}
if 0 < exp.flags.steps_per_training_epoch < len(train_loader):
training_steps = exp.flags.steps_per_training_epoch
else:
training_steps = None
for iteration, batch in tqdm(enumerate(at_most_n(train_loader, training_steps or None)),
total=training_steps or len(train_loader), postfix='train'):
# log text input once evey epoch:
if iteration == 0:
tb_logger.write_tensor_to_text(batch[0]['text'][0], exp, log_tag='train_input')
basic_routine = basic_routine_epoch(exp, batch)
results = basic_routine['results']
total_loss = basic_routine['total_loss']
# backprop
exp.optimizer.zero_grad()
with catching_cuda_out_of_memory(exp.flags.batch_size):
total_loss.backward()
exp.optimizer.step()
batch_results = {
'total_loss': total_loss.item(),
'klds': get_items_from_dict(basic_routine['klds']),
'log_probs': get_items_from_dict(basic_routine['log_probs']),
'joint_divergence': results['joint_divergence'].item(),
'latents': results['latents']['modalities'],
}
for key, value in batch_results.items():
average_meters[key].update(value)
epoch_averages = {k: v.get_average() for k, v in average_meters.items()}
tb_logger.write_training_logs(**epoch_averages)
def test(epoch, exp, test_loader: DataLoader):
with torch.no_grad():
mm_vae = exp.mm_vae
mm_vae.eval()
exp.mm_vae = mm_vae
average_meters = {
'total_loss': AverageMeter('total_test_loss'),
'klds': AverageMeterDict('klds'),
'log_probs': AverageMeterDict('log_probs'),
'joint_divergence': AverageMeter('joint_divergence'),
'latents': AverageMeterLatents('latents'),
}
tb_logger = exp.tb_logger
for iteration, batch in tqdm(enumerate(test_loader), total=len(test_loader), postfix='test'):
basic_routine = basic_routine_epoch(exp, batch)
results = basic_routine['results']
batch_results = {
'total_loss': basic_routine['total_loss'].item(),
'klds': get_items_from_dict(basic_routine['klds']),
'log_probs': get_items_from_dict(basic_routine['log_probs']),
'joint_divergence': results['joint_divergence'].item(),
'latents': results['latents']['modalities'],
}
for key in batch_results:
average_meters[key].update(batch_results[key])
klds: typing.Mapping[str, float]
log_probs: typing.Mapping[str, float]
joint_divergence: float
latents: typing.Mapping[str, tuple]
test_results = {k: v.get_average() for k, v in average_meters.items()}
tb_logger.write_testing_logs(**test_results)
# set a lower batch_size for testing to spare GPU memory
log.info(f'setting batch size to {exp.flags.batch_size}')
training_batch_size = exp.flags.batch_size
exp.flags.batch_size = 30
if (epoch + 1) % exp.flags.eval_freq == 0 or (epoch + 1) == exp.flags.end_epoch:
log.info('generating plots')
plots = generate_plots(exp, epoch)
tb_logger.write_plots(plots, epoch)
if exp.flags.eval_lr:
log.info('evaluation of latent representation')
clf_lr = train_clf_lr_all_subsets(exp)
lr_eval = test_clf_lr_all_subsets(clf_lr, exp)
tb_logger.write_lr_eval(lr_eval)
test_results['lr_eval'] = lr_eval
if exp.flags.use_clf:
log.info('test generation')
gen_eval, text_gen_eval_results = test_generation(exp)
tb_logger.write_coherence_logs(gen_eval)
test_results['gen_eval'] = flatten_cond_gen_values(gen_eval)
test_results['text_gen_eval'] = text_gen_eval_results
if exp.flags.calc_nll:
log.info('estimating likelihoods')
lhoods = estimate_likelihoods(exp)
tb_logger.write_lhood_logs(lhoods)
test_results['lhoods'] = lhoods
if exp.flags.calc_prd and ((epoch + 1) % exp.flags.eval_freq_fid == 0):
log.info('calculating prediction score')
prd_scores = calc_prd_score(exp)
tb_logger.write_prd_scores(prd_scores)
test_results['prd_scores'] = prd_scores
test_results['latents'] = {mod: {'mu': test_results['latents'][mod][0],
'logvar': test_results['latents'][mod][1]} for mod in test_results['latents']}
exp.update_experiments_dataframe({'total_epochs': epoch, **utils.flatten(test_results)})
# setting batch size back to training batch size
exp.flags.batch_size = training_batch_size
return {k: v for k, v in test_results.items() if k in ['total_loss', 'lr_eval', 'text_gen_eval']}
def run_epochs(rank: any, exp: MimicExperiment) -> None:
"""
rank: is int if multiprocessing and torch.device otherwise
"""
log.info('running epochs')
set_random_seed(exp.flags.seed)
exp.set_optimizer()
exp.mm_vae = exp.mm_vae.to(rank)
args = exp.flags
args.device = rank
exp.tb_logger = exp.init_summary_writer()
if args.distributed:
utils.set_up_process_group(args.world_size, rank)
exp.mm_vae = DDP(exp.mm_vae, device_ids=[exp.flags.device])
train_sampler, train_loader = get_data_loaders(args, exp.dataset_train, which_set='train',
weighted_sampler=args.weighted_sampler)
test_sampler, test_loader = get_data_loaders(args, exp.dataset_test, which_set='eval')
callbacks = Callbacks(exp)
end = time.time()
for epoch in tqdm(range(exp.flags.start_epoch, exp.flags.end_epoch), postfix='epochs'):
print(colored(f'\nEpoch {epoch} {"-" * 140}\n', 'green'))
end = time.time()
samplers_set_epoch(args, train_sampler, test_sampler, epoch)
exp.tb_logger.set_epoch(epoch)
# one epoch of training and testing
train(exp, train_loader)
# mean_eval_loss, results_lr = test(epoch, exp, test_loader)
test_results = test(epoch, exp, test_loader)
if callbacks.update_epoch(epoch, test_results, time.time() - end):
break
if exp.tb_logger:
exp.tb_logger.writer.close()
if args.distributed:
dist.destroy_process_group()
```
#### File: mimic/tests/test_callbacks.py
```python
import os
import tempfile
from dataclasses import dataclass
from unittest import TestCase
import torch
import torch.optim as optim
from tensorboardX import SummaryWriter
from mimic.networks.classifiers.utils import ExperimentDf, get_models, Callbacks, CallbacksProto
@dataclass
class Args(CallbacksProto):
img_clf_type = 'resnet'
img_size = 128
image_channels = 10
clf_save_m1: str = 'temp'
dataset: str = 'testing'
experiment_uid: str = 'temp'
use_cuda = torch.cuda.is_available()
device = torch.device('cuda' if use_cuda else 'cpu')
distributed: bool = False
def __init__(self, tmpdirname: str):
self.tempdirname = tmpdirname
self.dir_clf = tmpdirname
self.dir_logs_clf = tmpdirname
class TestCallbacks(TestCase):
def test_callbacks_increasing_loss_decreasingAP(self):
with tempfile.TemporaryDirectory() as tmpdirname:
args = Args(tmpdirname)
modality = 'PA'
experiment_df = ExperimentDf(args)
model = get_models(args, modality)
optimizer = optim.Adam(list(model.parameters()))
logger = SummaryWriter(args.dir_logs_clf)
callbacks = Callbacks(args, 0, 5, modality,
experiment_df, logger, optimizer)
loss = 10
for epoch in range(100):
if epoch == 0:
val_results = {'predictions': torch.ones((10, 3)), 'ground_truths': torch.ones((10, 3))}
else:
val_results = {'predictions': torch.zeros((10, 3)), 'ground_truths': torch.ones((10, 3))}
loss += 1
if callbacks.update_epoch(epoch, loss, val_results, model, elapsed_time=1):
break
self.assertEqual(epoch, 6)
def test_callbacks_decreasing_loss_increasingAP(self):
with tempfile.TemporaryDirectory() as tmpdirname:
args = Args(tmpdirname)
modality = 'PA'
experiment_df = ExperimentDf(args)
model = get_models(args, modality)
optimizer = optim.Adam(list(model.parameters()))
logger = SummaryWriter(args.dir_logs_clf)
callbacks = Callbacks(args, 0, 5, modality,
experiment_df, logger, optimizer)
loss = 1000
for epoch in range(10):
loss -= 1
if epoch == 0:
val_results = {'predictions': torch.cat((torch.ones((1, 3)), torch.zeros((9, 3)))),
'ground_truths': torch.ones((10, 3))}
else:
val_results = {'predictions': torch.cat((torch.ones((epoch, 3)), torch.zeros((10 - epoch, 3)))),
'ground_truths': torch.ones((10, 3))}
if callbacks.update_epoch(epoch, loss, val_results, model, elapsed_time=1):
break
self.assertEqual(epoch, 9)
self.assertTrue(os.path.exists(f'{tmpdirname}/temp_9'))
```
#### File: mimic/utils/average_meters.py
```python
import typing
import numpy as np
class AverageMeter(object):
"""
Computes and stores the average and current value
Taken from https://github.com/pytorch/examples/blob/a3f28a26851867b314f4471ec6ca1c2c048217f1/imagenet/main.py#L363
"""
def __init__(self, name: str, fmt: str = ':f'):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self) -> str:
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
def get_average(self):
return self.val
class AverageMeterNestedDict:
"""
Computes and stores the average and current value
Inspired by https://github.com/pytorch/examples/blob/a3f28a26851867b314f4471ec6ca1c2c048217f1/imagenet/main.py#L363
"""
def __init__(self, name: str, structure: typing.Mapping[str, typing.Mapping[str, typing.Iterable[None]]]):
self.name = name
self.structure = structure
self.vals: typing.Mapping[str, typing.Mapping[str, typing.Iterable[typing.Optional[float]]]] = structure
def update(self, val: typing.Mapping[str, typing.Mapping[str, typing.Iterable[float]]]) -> None:
for k1 in self.structure:
for k2 in self.structure:
self.vals[k1][k2].append(val[k1][k2])
def get_average(self) -> typing.Mapping[str, typing.Mapping[str, float]]:
d = {}
for k1 in self.structure:
for k2 in self.structure:
d[k1][k2] = np.mean(self.vals[k1][k2])
return d
class AverageMeterDict:
"""
Computes and stores the average and current value
Inspired by https://github.com/pytorch/examples/blob/a3f28a26851867b314f4471ec6ca1c2c048217f1/imagenet/main.py#L363
"""
def __init__(self, name: str):
self.name = name
self.vals: typing.Optional[typing.Mapping[str, typing.Iterable[float]]] = None
def update(self, val: typing.Mapping[str, typing.Iterable[float]]) -> None:
if not self.vals:
self.vals = {k: [] for k in val}
for key in val:
self.vals[key].append(val[key])
def get_average(self) -> typing.Mapping[str, typing.Mapping[str, float]]:
return {key: np.mean(self.vals[key]) for key in self.vals}
class AverageMeterLatents(AverageMeterDict):
def __init__(self, name: str):
super().__init__(name=name)
def update(self, val: typing.Mapping[str, typing.Tuple[typing.Iterable[float], typing.Iterable[float]]]):
if not self.vals:
self.vals = {k: ([], []) for k in val}
for key in val:
self.vals[key][0].append(val[key][0].mean().item())
self.vals[key][1].append(val[key][1].mean().item())
def get_average(self) -> typing.Mapping[str, typing.Mapping[str, typing.Tuple[float, float]]]:
return {key: (np.mean(self.vals[key][0]), np.mean(self.vals[key][1])) for key in self.vals}
```
#### File: mimic/utils/BaseMMVae.py
```python
from abc import ABC, abstractmethod
import torch
import torch.nn as nn
from mimic.evaluation.divergence_measures.mm_div import calc_alphaJSD_modalities
from mimic.evaluation.divergence_measures.mm_div import calc_group_divergence_moe
from mimic.evaluation.divergence_measures.mm_div import poe
from mimic.utils import utils
import typing
from torch import Tensor
class BaseMMVae(ABC, nn.Module):
def __init__(self, flags, modalities, subsets):
super(BaseMMVae, self).__init__()
self.num_modalities = len(modalities.keys())
self.flags = flags
self.modalities = modalities
self.subsets = subsets
self.set_fusion_functions()
# assign encoders, decoders and likelihoods here # #
#
# ###############################################
@abstractmethod
def forward(self, input_batch):
pass
@abstractmethod
def encode(self, input_batch):
pass
@abstractmethod
def get_random_styles(self, num_samples):
pass
@abstractmethod
def get_random_style_dists(self, num_samples):
pass
@abstractmethod
def generate_sufficient_statistics_from_latents(self, latents):
pass
@abstractmethod
def save_networks(self):
pass
def set_fusion_functions(self):
weights = utils.reweight_weights(torch.Tensor(self.flags.alpha_modalities))
self.weights = weights.to(self.flags.device)
if self.flags.modality_moe:
self.modality_fusion = self.moe_fusion
self.fusion_condition = self.fusion_condition_moe
self.calc_joint_divergence = self.divergence_static_prior
elif self.flags.modality_jsd:
self.modality_fusion = self.moe_fusion
self.fusion_condition = self.fusion_condition_moe
self.calc_joint_divergence = self.divergence_dynamic_prior
elif self.flags.modality_poe:
self.modality_fusion = self.poe_fusion
self.fusion_condition = self.fusion_condition_poe
self.calc_joint_divergence = self.divergence_static_prior
elif self.flags.joint_elbo:
self.modality_fusion = self.poe_fusion
self.fusion_condition = self.fusion_condition_joint
self.calc_joint_divergence = self.divergence_static_prior
def divergence_static_prior(self, mus, logvars, weights=None):
if weights is None:
weights = self.weights
weights = weights.clone()
weights = utils.reweight_weights(weights)
div_measures = calc_group_divergence_moe(self.flags,
mus,
logvars,
weights,
normalization=self.flags.batch_size)
return {
'joint_divergence': div_measures[0],
'individual_divs': div_measures[1],
'dyn_prior': None,
}
def divergence_dynamic_prior(self, mus, logvars, weights=None):
if weights is None:
weights = self.weights
div_measures = calc_alphaJSD_modalities(self.flags,
mus,
logvars,
weights,
normalization=self.flags.batch_size)
return {
'joint_divergence': div_measures[0],
'individual_divs': div_measures[1],
'dyn_prior': div_measures[2],
}
def moe_fusion(self, mus, logvars, weights=None):
if weights is None:
weights = self.weights
weights = utils.reweight_weights(weights)
# mus = torch.cat(mus, dim=0)
# logvars = torch.cat(logvars, dim=0)
mu_moe, logvar_moe = utils.mixture_component_selection(self.flags,
mus,
logvars,
weights)
return [mu_moe, logvar_moe]
def poe_fusion(self, mus, logvars, weights=None):
"""
Fuses all modalities in subset with product of experts method.
"""
if self.flags.modality_poe:
num_samples = mus[0].shape[0]
mus = torch.cat((mus, torch.zeros(1, num_samples,
self.flags.class_dim).to(self.flags.device)),
dim=0)
logvars = torch.cat((logvars, torch.zeros(1, num_samples,
self.flags.class_dim).to(self.flags.device)),
dim=0)
# mus = torch.cat(mus, dim=0)
# logvars = torch.cat(logvars, dim=0)
mu_poe, logvar_poe = poe(mus, logvars)
return [mu_poe, logvar_poe]
def fusion_condition_moe(self, subset, input_batch=None):
return len(subset) == 1
def fusion_condition_poe(self, subset, input_batch=None):
return len(subset) == len(input_batch.keys())
def fusion_condition_joint(self, subset, input_batch=None):
return True
def inference(self, input_batch, num_samples=None):
num_samples = num_samples or self.flags.batch_size
latents = {}
enc_mods = self.encode(input_batch)
latents['modalities'] = enc_mods
mus = torch.Tensor().to(self.flags.device)
logvars = torch.Tensor().to(self.flags.device)
distr_subsets = {}
# concatenate mus and logvars for every modality in each subset
for s_key in self.subsets:
if s_key != '':
mods = self.subsets[s_key]
mus_subset = torch.Tensor().to(self.flags.device)
logvars_subset = torch.Tensor().to(self.flags.device)
mods_avail = True
for mod in mods:
if mod.name in input_batch:
mus_subset = torch.cat((mus_subset,
enc_mods[mod.name][0].unsqueeze(0)),
dim=0)
logvars_subset = torch.cat((logvars_subset,
enc_mods[mod.name][1].unsqueeze(0)),
dim=0)
else:
mods_avail = False
if mods_avail:
# normalize latents by number of modalities in subset
weights_subset = ((1 / float(len(mus_subset))) *
torch.ones(len(mus_subset)).to(self.flags.device))
s_mu, s_logvar = self.modality_fusion(mus_subset,
logvars_subset,
weights_subset)
distr_subsets[s_key] = [s_mu, s_logvar]
# fusion_condition always true
# store all s_mus and s_logvars in variables mus and logvars
if self.fusion_condition(mods, input_batch):
mus = torch.cat((mus, s_mu.unsqueeze(0)), dim=0)
logvars = torch.cat((logvars, s_logvar.unsqueeze(0)),
dim=0)
if self.flags.modality_jsd:
mus = torch.cat((mus, torch.zeros(1, num_samples,
self.flags.class_dim).to(self.flags.device)),
dim=0)
logvars = torch.cat((logvars, torch.zeros(1, num_samples,
self.flags.class_dim).to(self.flags.device)),
dim=0)
# weights = (1/float(len(mus)))*torch.ones(len(mus)).to(self.flags.device)
# normalize with number of subsets
weights = (1 / float(mus.shape[0])) * torch.ones(mus.shape[0]).to(self.flags.device)
joint_mu, joint_logvar = self.moe_fusion(mus, logvars, weights)
# mus = torch.cat(mus, dim=0)
# logvars = torch.cat(logvars, dim=0)
latents['mus'] = mus
latents['logvars'] = logvars
latents['weights'] = weights
latents['joint'] = [joint_mu, joint_logvar]
latents['subsets'] = distr_subsets
return latents
def generate(self, num_samples=None):
if num_samples is None:
num_samples = self.flags.batch_size
mu = torch.zeros(num_samples,
self.flags.class_dim).to(self.flags.device)
logvar = torch.zeros(num_samples,
self.flags.class_dim).to(self.flags.device)
z_class = utils.reparameterize(mu, logvar)
z_styles = self.get_random_styles(num_samples)
random_latents = {'content': z_class, 'style': z_styles}
return self.generate_from_latents(random_latents)
def generate_from_latents(self, latents) -> typing.Mapping[str, Tensor]:
suff_stats = self.generate_sufficient_statistics_from_latents(latents)
cond_gen = {}
for m, m_key in enumerate(latents['style'].keys()):
cond_gen_m = suff_stats[m_key].mean
cond_gen[m_key] = cond_gen_m
return cond_gen
def cond_generation(self, latent_distributions, num_samples=None) \
-> typing.Mapping[str, typing.Mapping[str, Tensor]]:
if num_samples is None:
num_samples = self.flags.batch_size
style_latents = self.get_random_styles(num_samples)
cond_gen_samples = {}
for key in latent_distributions.keys():
[mu, logvar] = latent_distributions[key]
content_rep = utils.reparameterize(mu=mu, logvar=logvar)
latents = {'content': content_rep, 'style': style_latents}
cond_gen_samples[key] = self.generate_from_latents(latents)
return cond_gen_samples
```
#### File: mimic/utils/experiment.py
```python
import os
import random
import typing
from argparse import Namespace
from pathlib import Path
import numpy as np
import pandas as pd
import torch
import torch.optim as optim
from PIL import ImageFont
from matplotlib import pyplot as plt
from sklearn.metrics import average_precision_score
from torch import Tensor
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.utils.tensorboard import SummaryWriter
from mimic import log
from mimic.dataio.MimicDataset import Mimic, Mimic_testing
from mimic.dataio.MimicDataset import MimicText as MimicTextDataset
from mimic.dataio.utils import get_transform_img, get_str_labels
from mimic.modalities.MimicLateral import MimicLateral
from mimic.modalities.MimicPA import MimicPA
from mimic.modalities.MimicText import MimicText
from mimic.modalities.Modality import Modality
from mimic.networks.CheXNet import CheXNet
from mimic.networks.ConvNetworkImgClf import ClfImg as ClfImg
from mimic.networks.ConvNetworkTextClf import ClfText as ClfText
from mimic.networks.ConvNetworksImgMimic import EncoderImg, DecoderImg
from mimic.networks.ConvNetworksTextMimic import EncoderText, DecoderText
from mimic.networks.VAEtrimodalMimic import VAEtrimodalMimic, VAETextMimic
from mimic.utils import utils
from mimic.utils.BaseExperiment import BaseExperiment
from mimic.utils.TBLogger import TBLogger
from mimic.utils.text import tensor_to_text
from mimic.utils.utils import get_clf_path, get_alphabet
from mimic.utils.utils import init_twolevel_nested_dict
class MimicExperiment(BaseExperiment):
def __init__(self, flags):
super().__init__(flags)
self.labels = get_str_labels(flags.binary_labels)
self.flags = flags
self.experiment_uid = flags.str_experiment
self.dataset = flags.dataset
self.plot_img_size = torch.Size((1, 128, 128))
if self.flags.text_encoding == 'char':
self.alphabet = get_alphabet()
self.flags.num_features = len(self.alphabet)
self.dataset_train, self.dataset_test, self.font = self.set_dataset()
self.modalities: typing.Mapping[str, Modality] = self.set_modalities()
self.num_modalities = len(self.modalities.keys())
self.subsets = self.set_subsets()
self.mm_vae = self.set_model()
self.clfs = self.set_clfs()
self.clf_transforms: dict = self.set_clf_transforms()
self.optimizer = None
self.rec_weights = self.set_rec_weights()
self.style_weights = self.set_style_weights()
self.test_samples = self.get_test_samples()
self.eval_metric = average_precision_score
self.paths_fid = self.set_paths_fid()
self.experiments_dataframe = self.get_experiments_dataframe()
self.restart_experiment = False # if true and the model returns nans, the workflow gets started again
self.number_restarts = 0
self.tb_logger = None
def set_model(self):
if self.flags.only_text_modality:
return VAETextMimic(self.flags, self.modalities, self.subsets)
else:
return VAEtrimodalMimic(self.flags, self.modalities, self.subsets)
def set_modalities(self) -> typing.Mapping[str, Modality]:
log.info('setting modalities')
mod1 = MimicPA(EncoderImg(self.flags, self.flags.style_pa_dim),
DecoderImg(self.flags, self.flags.style_pa_dim), self.flags)
mod2 = MimicLateral(EncoderImg(self.flags, self.flags.style_lat_dim),
DecoderImg(self.flags, self.flags.style_lat_dim), self.flags)
mod3 = MimicText(EncoderText(self.flags, self.flags.style_text_dim),
DecoderText(self.flags, self.flags.style_text_dim), self.flags.len_sequence,
self.plot_img_size, self.font, self.flags)
if self.flags.only_text_modality:
return {mod3.name: mod3}
else:
return {mod1.name: mod1, mod2.name: mod2, mod3.name: mod3}
def set_dataset(self):
font = ImageFont.truetype(str(Path(__file__).parent.parent / 'data/FreeSerif.ttf'),
20) if not self.flags.distributed else None
log.info('setting dataset')
# used for faster unittests i.e. a dummy dataset
if self.dataset == 'testing':
log.info('using testing dataset')
self.flags.vocab_size = 3517
d_train = Mimic_testing(self.flags)
d_eval = Mimic_testing(self.flags)
else:
if self.flags.only_text_modality:
d_train = MimicTextDataset(args=self.flags, str_labels=self.labels, split='train')
d_eval = MimicTextDataset(self.flags, self.labels, split='eval')
else:
d_train = Mimic(self.flags, self.labels, split='train')
d_eval = Mimic(self.flags, self.labels, split='eval')
return d_train, d_eval, font
def set_clf_transforms(self) -> dict:
if self.flags.text_clf_type == 'word':
def text_transform(x):
# converts one hot encoding to indices vector
return torch.argmax(x, dim=-1)
else:
def text_transform(x):
return x
# create temporary args to set the number of crops to 1
temp_args = Namespace(**vars(self.flags))
temp_args.n_crops = 1
return {
'PA': get_transform_img(temp_args, self.flags.img_clf_type),
'Lateral': get_transform_img(temp_args, self.flags.img_clf_type),
'text': text_transform
}
def set_clfs(self) -> typing.Mapping[str, torch.nn.Module]:
log.info('setting clfs')
# mapping clf type to clf_save_m*
clf_save_names: typing.Mapping[str, str] = {
'PA': self.flags.clf_save_m1,
'Lateral': self.flags.clf_save_m2,
'text': self.flags.clf_save_m3
}
clfs = {f'{mod}': None for mod in self.modalities}
if self.flags.use_clf:
for mod in self.modalities:
if mod in ['PA', 'Lateral']:
# finding the directory of the classifier
dir_img_clf = os.path.join(self.flags.dir_clf,
f'Mimic{self.flags.img_size}_{self.flags.img_clf_type}'
f'{"_bin_label" if self.flags.binary_labels else ""}')
dir_img_clf = os.path.expanduser(dir_img_clf)
# finding and loading state dict
clf = ClfImg(self.flags, self.labels) if self.flags.img_clf_type == 'resnet' else CheXNet(
len(self.labels))
clf_path = get_clf_path(dir_img_clf, clf_save_names[mod])
clf.load_state_dict(torch.load(clf_path, map_location=self.flags.device))
clfs[mod] = clf.to(self.flags.device)
elif mod == 'text':
# create temporary args to set the word encoding of the classifier to text_clf_type.
# This allows to have a different text encoding setting for the VAE than for the classifier.
temp_args = Namespace(**vars(self.flags))
temp_args.text_encoding = self.flags.text_clf_type
clf = ClfText(temp_args, self.labels)
clf_path = get_clf_path(self.flags.dir_clf, clf_save_names[
mod] + f'vocabsize_{self.flags.vocab_size}{"_bin_label" if self.flags.binary_labels else ""}')
clf.load_state_dict(torch.load(clf_path, map_location=self.flags.device))
clfs[mod] = clf.to(self.flags.device)
else:
raise NotImplementedError
return clfs
def set_optimizer(self):
log.info('setting optimizer')
# optimizer definition
optimizer = optim.Adam(
list(self.mm_vae.parameters()),
lr=self.flags.initial_learning_rate,
betas=(self.flags.beta_1, self.flags.beta_2))
self.optimizer = optimizer
def set_rec_weights(self):
"""
Sets the weights of the log probs for each modality.
"""
log.info('setting rec_weights')
return {
'PA': self.flags.rec_weight_m1,
'Lateral': self.flags.rec_weight_m2,
'text': self.flags.rec_weight_m3
}
def set_style_weights(self):
return {
'PA': self.flags.beta_m1_style,
'Lateral': self.flags.beta_m2_style,
'text': self.flags.beta_m3_style,
}
def get_prediction_from_attr(self, values):
return values.ravel()
def get_test_samples(self, num_images=10) -> typing.Iterable[typing.Mapping[str, Tensor]]:
"""
Gets random samples from the test dataset
"""
n_test = self.dataset_test.__len__()
samples = []
for _ in range(num_images):
sample, _ = self.dataset_test.__getitem__(random.randint(0, n_test - 1))
sample = utils.dict_to_device(sample, self.flags.device)
samples.append(sample)
return samples
def mean_eval_metric(self, values):
return np.mean(np.array(values))
def eval_label(self, values: Tensor, labels: Tensor, index: int = None):
"""
index: index of the labels
"""
pred = values[:, index]
gt = labels[:, index]
return self.eval_metric(gt, pred)
def get_experiments_dataframe(self) -> pd.DataFrame:
"""
Gets the experiment results dataframe which contains test results of previous experiments together with their
parameters
"""
if os.path.exists('experiments_dataframe.csv'):
experiments_dataframe = pd.read_csv('experiments_dataframe.csv')
flags_dict = vars(self.flags)
flags_dict['experiment_uid'] = self.experiment_uid
flags_dict['total_epochs'] = 0
flags_dict['experiment_duration'] = -1
else:
experiments_dataframe = pd.DataFrame()
flags_dict = vars(self.flags)
flags_dict['experiment_uid'] = self.experiment_uid
experiments_dataframe = experiments_dataframe.append(flags_dict, ignore_index=True)
experiments_dataframe.to_csv('experiments_dataframe.csv', index=False)
return experiments_dataframe
def update_experiments_dataframe(self, values_dict: dict):
"""
Updates the values in experiments dataframe with the new values from the values_dict and saves it if the
experiment is not a test run
"""
log.info(f"writing to experiment df with uid {self.experiment_uid}: {values_dict}")
# load dataframe every time in order not to overwrite other writers
if os.path.exists('experiments_dataframe.csv'):
self.experiments_dataframe = pd.read_csv('experiments_dataframe.csv')
for key, value in values_dict.items():
self.experiments_dataframe.loc[
self.experiments_dataframe['experiment_uid'] == self.experiment_uid, key] = value
if self.flags.dataset != 'testing':
self.experiments_dataframe.to_csv('experiments_dataframe.csv', index=False)
def init_summary_writer(self):
log.info(f'setting up summary writer for device {self.flags.device}')
# initialize summary writer
writer = SummaryWriter(self.flags.dir_logs)
tb_logger = TBLogger(self.flags.str_experiment, writer)
str_flags = utils.save_and_log_flags(self.flags)
tb_logger.writer.add_text('FLAGS', str_flags, 0)
# todo find a way to store model graph
# tb_logger.write_model_graph(exp.mm_vae)
self.log_text_test_samples(tb_logger)
return tb_logger
def log_text_test_samples(self, tb_logger):
"""
Logs the text test samples to the tb_logger to verify if the text encoding does what it is supposed to do.
"""
samples = self.test_samples
one_hot = self.flags.text_encoding != 'word'
text_test_samples = tensor_to_text(self,
torch.cat(([samples[i]['text'].unsqueeze(0) for i in range(5)]), 0),
one_hot=one_hot)
tb_logger.write_texts_from_list('test_samples', text_test_samples, text_encoding=self.flags.text_encoding)
class Callbacks:
def __init__(self, exp: MimicExperiment):
self.args = exp.flags
self.exp = exp
self.logger: TBLogger = exp.tb_logger
optimizer = exp.optimizer
self.experiment_df = exp.experiments_dataframe
self.start_early_stopping_epoch = self.args.start_early_stopping_epoch
self.max_early_stopping_index = self.args.max_early_stopping_index
# initialize with infinite loss
self.losses = [float('inf')]
self.patience_idx = 1
self.scheduler = ReduceLROnPlateau(optimizer, 'min', patience=5, verbose=True)
self.elapsed_times = []
self.results_lr = None
self.results = {}
def update_epoch(self, epoch, test_results, elapsed_time):
"""Evaluate the progress (i.e. if the performance is increasing) using the test results and save them to the
experiment dataframe. Return True if the metrics did not improve for more epochs than the patience_idx."""
loss = test_results['total_loss']
self._update_results_lr(test_results)
stop_early = False
self.elapsed_times.append(elapsed_time)
self.scheduler.step(loss)
self.logger.writer.add_scalars(f'test/mean_loss', {'mean_loss': loss}, epoch)
log.info(f'current test loss: {loss}')
self.save_checkpoint(epoch)
# evaluate progress
if epoch > self.start_early_stopping_epoch and loss < min(self.losses):
log.info(f'current test loss {loss} improved from {min(self.losses)}'
f' at epoch {np.argmin(self.losses)}')
self.exp.update_experiments_dataframe(
{'total_test_loss': loss, 'total_epochs': epoch, 'mean_epoch_time': np.mean(self.elapsed_times)})
self.patience_idx = 1
elif self.patience_idx > self.max_early_stopping_index:
log.info(
f'stopping early at epoch {epoch} because current test loss {loss} '
f'did not improve from {min(self.losses)} '
f'at epoch {np.argmin(self.losses)}')
stop_early = True
else:
if epoch > self.start_early_stopping_epoch:
log.info(f'current test loss {loss} did not improve from {min(self.losses)} '
f'at epoch {np.argmin(self.losses)}')
log.info(f'-- idx_early_stopping = {self.patience_idx} / {self.max_early_stopping_index}')
self.patience_idx += 1
self.losses.append(loss)
if (epoch + 1) % self.args.eval_freq == 0 or (epoch + 1) == self.args.end_epoch:
# plot evolution of metrics every Nth epochs
self.plot_results()
return stop_early
def plot_results(self):
"""Plot the results. The self.results dict needs to be in the form of {eval_method:{subset:{metrics:value}}}"""
if not self.exp.flags.dir_experiment_run.is_dir():
os.mkdir(self.exp.flags.dir_experiment_run)
for _, results in self.results.items():
for subset, sub_results in results.items():
for metric, value in sub_results.items():
plt.plot(value, label=subset)
plt.title(f'{metric}, eval freq: {self.args.eval_freq} epochs')
plt.legend()
out_path = self.exp.flags.dir_experiment_run / f"{metric.replace(' ', '_')}.png"
if out_path.is_file():
out_path.unlink()
plt.savefig(out_path)
log.info(f"Saving plot to {out_path}")
plt.close()
def _update_results_lr_(self, results_lr):
"""Save the lr eval results such that they can be plotted."""
# update values only if results_lr is not None, (the eval metrics are only evaluated every Nth epoch)
if results_lr:
if not self.results_lr:
self.results_lr = init_twolevel_nested_dict(results_lr.keys(),
results_lr[list(results_lr.keys())[0]].keys(),
init_val=[], copy_init_val=True)
for subset, results_sub in results_lr.items():
for metric in results_sub:
self.results_lr[subset][metric].append(results_sub[metric])
def _update_results_lr(self, test_results):
"""Save the lr eval results such that they can be plotted."""
# update values only if results_lr is not None, (the eval metrics are only evaluated every Nth epoch)
for eval_method, results in test_results.items():
if eval_method != 'total_loss':
if eval_method not in self.results:
self.results[eval_method] = init_twolevel_nested_dict(results.keys(),
results[list(results.keys())[0]].keys(),
init_val=[], copy_init_val=True)
for subset, results_sub in results.items():
for metric in results_sub:
self.results[eval_method][subset][metric].append(results_sub[metric])
def save_checkpoint(self, epoch):
# save checkpoints every 5 epochs
# when using DDP, the model is the same over all devices, only need to save it for one process
if ((epoch + 1) % 50 == 0 or (
epoch + 1) == self.exp.flags.end_epoch) and (
not self.args.distributed or self.exp.flags.device % self.exp.flags.world_size == 0):
dir_network_epoch = os.path.join(self.exp.flags.dir_checkpoints, str(epoch).zfill(4))
if not os.path.exists(dir_network_epoch):
os.makedirs(dir_network_epoch)
if self.args.distributed:
self.exp.mm_vae.module.save_networks()
else:
self.exp.mm_vae.save_networks()
torch.save(self.exp.mm_vae.state_dict(),
os.path.join(dir_network_epoch, self.exp.flags.mm_vae_save))
```
#### File: mimic/utils/flags.py
```python
import argparse
import json
from mimic import log
from mimic.utils.BaseFlags import parser as parser
from mimic.utils.filehandling import expand_paths
import os
import numpy as np
from typing import Union
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
parser.add_argument('--exp_str_prefix', type=str, default='Mimic', help="prefix of the experiment directory.")
parser.add_argument('--dataset', type=str, default='Mimic', help="name of the dataset")
parser.add_argument('--config_path', type=str, default=None, help="path to the json config")
parser.add_argument('--verbose', type=int, default=0, help="global verbosity level")
parser.add_argument('--load_flags', type=str, default=None, help="overwrite all values with parameters from an old "
"experiment. Give the path to the flags.rar "
"file as input.")
# Image dependent
parser.add_argument('--fixed_image_extractor', type=str2bool, default=True,
help="If the feature extraction layers of the "
"pretrained densenet are frozen. "
"Only works when img_clf_type classifier "
"is densenet.")
# DATA DEPENDENT
parser.add_argument('--only_text_modality', type=str2bool, default=False,
help="flag to indicate if only the text modality is to be used")
parser.add_argument('--undersample_dataset', type=str2bool, default=False,
help="flag to indicate if the dataset should be undersampled such that there are "
"the same number of datapoints that have no label than datapoints that have a label")
parser.add_argument('--weighted_sampler', type=str2bool, default=False,
help="If a weighted sampler should be used for the dataloader.")
parser.add_argument('--binary_labels', type=str2bool, default=False,
help="If True, label 'Finding' with classes 0 and 1 will be used for the classification evaluation.")
# Text Dependent
parser.add_argument('--text_encoding', type=str, default='char',
help="encoding of the text, either character or wordwise")
parser.add_argument('--len_sequence', type=int, default=1024, help="length of sequence")
parser.add_argument('--word_min_occ', type=int, default=3,
help="min occurence of a word in the dataset such that it is added to the vocabulary.")
parser.add_argument('--text_gen_lastlayer', type=str, default='softmax',
help="Last layer of the text generator. Chose between none, softmax and sigmoid.")
parser.add_argument('--style_pa_dim', type=int, default=0, help="dimension of varying factor latent space")
parser.add_argument('--style_lat_dim', type=int, default=0, help="dimension of varying factor latent space")
parser.add_argument('--style_text_dim', type=int, default=0, help="dimension of varying factor latent space")
parser.add_argument('--image_channels', type=int, default=1, help="number of classes on which the data set trained")
parser.add_argument('--img_size', type=int, default=128, help="size of the images on which the model is trained")
parser.add_argument('--DIM_img', type=int, default=128, help="number of classes on which the data set trained")
parser.add_argument('--DIM_text', type=int, default=128, help="number of classes on which the data set trained")
parser.add_argument('--likelihood_m1', type=str, default='laplace', help="output distribution")
parser.add_argument('--likelihood_m2', type=str, default='laplace', help="output distribution")
parser.add_argument('--likelihood_m3', type=str, default='categorical', help="output distribution")
parser.add_argument('--dataloader_workers', type=int, default=8, help="number of workers used for the Dataloader")
parser.add_argument('--use_toy_dataset', type=bool, default=False, help="if true uses small toy dataset")
# paths to save models
parser.add_argument('--encoder_save_m1', type=str, default='encoderM1', help="model save for encoder")
parser.add_argument('--encoder_save_m2', type=str, default='encoderM2', help="model save for encoder")
parser.add_argument('--encoder_save_m3', type=str, default='encoderM3', help="model save for decoder")
parser.add_argument('--decoder_save_m1', type=str, default='decoderM1', help="model save for decoder")
parser.add_argument('--decoder_save_m2', type=str, default='decoderM2', help="model save for decoder")
parser.add_argument('--decoder_save_m3', type=str, default='decoderM3', help="model save for decoder")
# classifiers
parser.add_argument('--text_clf_type', type=str, default='word',
help="text classifier type, implemented are 'word' and 'char'")
parser.add_argument('--img_clf_type', type=str, default='resnet',
help="image classifier type, implemented are 'resnet' and 'densenet'")
parser.add_argument('--clf_save_m1', type=str, default='clf_m1', help="model save for clf")
parser.add_argument('--clf_save_m2', type=str, default='clf_m2', help="model save for clf")
parser.add_argument('--clf_save_m3', type=str, default='clf_m3', help="model save for clf")
parser.add_argument('--clf_loss', type=str, default='binary_crossentropy',
choices=['binary_crossentropy', 'crossentropy', 'bce_with_logits'], help="model save for clf")
# Callbacks
parser.add_argument('--reduce_lr_on_plateau', type=bool, default=False,
help="boolean indicating if callback 'reduce lr on plateau' is used")
parser.add_argument('--max_early_stopping_index', type=int, default=5,
help="patience of the early stopper. If the target metric did not improve "
"for that amount of epochs, training is stopepd")
parser.add_argument('--start_early_stopping_epoch', type=int, default=0,
help="epoch on which to start the early stopping callback")
# LOSS TERM WEIGHTS
parser.add_argument('--beta_m1_style', type=float, default=1.0, help="default weight divergence term style modality 1")
parser.add_argument('--beta_m2_style', type=float, default=1.0, help="default weight divergence term style modality 2")
parser.add_argument('--beta_m3_style', type=float, default=1.0, help="default weight divergence term style modality 2")
parser.add_argument('--div_weight_m1_content', type=float, default=0.25,
help="default weight divergence term content modality 1")
parser.add_argument('--div_weight_m2_content', type=float, default=0.25,
help="default weight divergence term content modality 2")
parser.add_argument('--div_weight_m3_content', type=float, default=0.25,
help="default weight divergence term content modality 3")
parser.add_argument('--div_weight_uniform_content', type=float, default=0.25,
help="default weight divergence term prior")
parser.add_argument('--rec_weight_m1', default=0.33, type=float,
help="weight of the m1 modality for the log probs. Type should be either float or string.")
parser.add_argument('--rec_weight_m2', default=0.33, type=float,
help="weight of the m2 modality for the log probs. Type should be either float or string.")
parser.add_argument('--rec_weight_m3', default=0.33, type=float,
help="weight of the m3 modality for the log probs. Type should be either float or string.")
def update_flags_with_config(config_path: str, additional_args={}, testing=False):
"""
If testing is true, no cli arguments will be read.
"""
with open(config_path, 'rt') as json_file:
t_args = argparse.Namespace()
json_config = json.load(json_file)
t_args.__dict__.update({**json_config, **additional_args})
if testing:
return parser.parse_args([], namespace=t_args)
else:
return parser.parse_args(namespace=t_args)
def get_freer_gpu():
"""
Returns the index of the gpu with the most free memory.
Taken from https://discuss.pytorch.org/t/it-there-anyway-to-let-program-select-free-gpu-automatically/17560/6
"""
os.system('nvidia-smi -q -d Memory |grep -A4 GPU|grep Free >tmp')
memory_available = [int(x.split()[2]) for x in open('tmp', 'r').readlines()]
return np.argmax(memory_available)
def setup_flags(flags, testing=False):
"""
If testing is true, no cli arguments will be read.
"""
import torch
from pathlib import Path
import numpy as np
if flags.config_path:
flags = update_flags_with_config(config_path=flags.config_path, testing=testing)
flags = expand_paths(flags)
use_cuda = torch.cuda.is_available()
flags.device = torch.device('cuda' if use_cuda else 'cpu')
if str(flags.device) == 'cuda':
torch.cuda.set_device(get_freer_gpu())
flags = flags_set_alpha_modalities(flags)
flags.log_file = log.manager.root.handlers[1].baseFilename
flags.len_sequence = 128 if flags.text_encoding == 'word' else 1024
if flags.load_flags:
old_flags = torch.load(Path(flags.load_flags).expanduser())
# create param dict from all the params of old_flags that are not paths
params = {k: v for k, v in old_flags.item() if ('dir' not in v) and ('path' not in v)}
flags.__dict__.update(params)
if not flags.seed:
# set a random seed
flags.seed = np.random.randint(0, 10000)
return flags
def flags_set_alpha_modalities(flags):
flags.alpha_modalities = [flags.div_weight_uniform_content, flags.div_weight_m1_content,
flags.div_weight_m2_content, flags.div_weight_m3_content]
return flags
```
#### File: mimic/utils/TBLogger.py
```python
from typing import Iterable
import torch
from mimic.utils.text import tensor_to_text
class TBLogger():
def __init__(self, name, writer):
self.name = name
self.writer = writer
self.training_prefix = 'train'
self.testing_prefix = 'test'
self.step = 0
def write_log_probs(self, name, log_probs):
self.writer.add_scalars('%s/LogProb' % name,
log_probs,
self.step)
def write_klds(self, name, klds):
self.writer.add_scalars('%s/KLD' % name,
klds,
self.step)
def write_group_div(self, name, group_div):
self.writer.add_scalars('%s/group_divergence' % name,
{'group_div': group_div},
self.step)
def write_latent_distr(self, name, latents):
for k, key in enumerate(latents.keys()):
self.writer.add_scalars('%s/mu' % name,
{key: latents[key][0]},
self.step)
self.writer.add_scalars('%s/logvar' % name,
{key: latents[key][1]},
self.step)
def write_lr_eval(self, lr_eval):
for l_key in sorted(lr_eval.keys()):
mean_AP_keys = [k for k in lr_eval[l_key] if k.startswith('mean_AP')]
results = {k: v for k, v in lr_eval[l_key].items() if k in ['dice', 'accuracy', *mean_AP_keys]}
self.writer.add_scalars(f'Latent Representation/{l_key}', results, self.step)
def write_coherence_logs(self, gen_eval):
for j, l_key in enumerate(sorted(gen_eval['cond'].keys())):
for k, s_key in enumerate(gen_eval['cond'][l_key].keys()):
self.writer.add_scalars('Generation/%s/%s' %
(l_key, s_key),
gen_eval['cond'][l_key][s_key],
self.step)
self.writer.add_scalars('Generation/Random',
gen_eval['random'],
self.step)
def write_lhood_logs(self, lhoods):
for k, key in enumerate(sorted(lhoods.keys())):
self.writer.add_scalars('Likelihoods/%s' %
(key),
lhoods[key],
self.step)
def write_prd_scores(self, prd_scores):
self.writer.add_scalars('PRD',
prd_scores,
self.step)
def write_plots(self, plots, epoch):
for p_key in plots:
ps = plots[p_key]
for name in ps:
fig = ps[name]
self.writer.add_image(p_key + '_' + name,
fig,
epoch,
dataformats="HWC")
def add_basic_logs(self, name, joint_divergence, latents, loss, log_probs, klds):
self.writer.add_scalars('%s/Loss' % name,
{'loss': loss},
self.step)
self.write_log_probs(name, log_probs)
self.write_klds(name, klds)
self.write_group_div(name, joint_divergence)
self.write_latent_distr(name, latents=latents)
def write_training_logs(self, joint_divergence, latents, total_loss, log_probs, klds):
self.add_basic_logs(self.training_prefix, joint_divergence, latents, total_loss, log_probs,
klds)
def write_testing_logs(self, joint_divergence, latents, total_loss, log_probs, klds):
self.add_basic_logs(self.testing_prefix, joint_divergence, latents, total_loss, log_probs, klds)
def write_model_graph(self, model):
"""
writes the model graph to tensorboard
"""
self.writer.add_graph(model)
def write_text(self, log_tag: str, text: str):
self.writer.add_text(log_tag, text, global_step=self.step)
def write_texts_from_list(self, log_tag: str, texts: Iterable[str], text_encoding: str):
for i, text in enumerate(texts):
sep = ' ' if text_encoding == 'word' else ''
self.writer.add_text(log_tag, sep.join(text), global_step=i)
def set_epoch(self, epoch: int):
"""
Sets the epoch for all values that will be logged during that epoch.
"""
self.step = epoch
def write_tensor_to_text(self, text_tensor, exp, log_tag: str):
sep = ' ' if exp.flags.text_encoding == 'word' else ''
one_hot = exp.flags.text_encoding == 'char'
self.writer.add_text(log_tag, sep.join(tensor_to_text(exp, text_tensor, one_hot=one_hot)),
global_step=self.step)
``` |
{
"source": "Jimmy2027/torchio",
"score": 2
} |
#### File: tests/transforms/test_transforms.py
```python
import numpy as np
import torchio
from ..utils import TorchioTestCase
class TestTransforms(TorchioTestCase):
"""Tests for all transforms."""
def test_transforms(self):
landmarks_dict = dict(
t1=np.linspace(0, 100, 13),
t2=np.linspace(0, 100, 13),
)
elastic = torchio.RandomElasticDeformation(max_displacement=1)
transforms = (
torchio.CropOrPad((9, 21, 30)),
torchio.ToCanonical(),
torchio.Resample((1, 1.1, 1.25)),
torchio.RandomFlip(axes=(0, 1, 2), flip_probability=1),
torchio.RandomMotion(),
torchio.RandomGhosting(axes=(0, 1, 2)),
torchio.RandomSpike(),
torchio.RandomNoise(),
torchio.RandomBlur(),
torchio.RandomSwap(patch_size=2, num_iterations=5),
torchio.Lambda(lambda x: 2 * x, types_to_apply=torchio.INTENSITY),
torchio.RandomBiasField(),
torchio.RescaleIntensity((0, 1)),
torchio.ZNormalization(masking_method='label'),
torchio.HistogramStandardization(landmarks_dict=landmarks_dict),
elastic,
torchio.RandomAffine(),
torchio.OneOf({torchio.RandomAffine(): 3, elastic: 1}),
torchio.Pad((1, 2, 3, 0, 5, 6), padding_mode='constant', fill=3),
torchio.Crop((3, 2, 8, 0, 1, 4)),
)
transform = torchio.Compose(transforms)
transform(self.sample)
``` |
{
"source": "jimmy29304825/mydcard-spider",
"score": 2
} |
#### File: mydcard-spider/dcard/utils.py
```python
import logging
import itertools
from multiprocessing.dummy import Pool
from six.moves import http_client as httplib
import requests
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
from requests.exceptions import RetryError
from . import prequests
logger = logging.getLogger(__name__)
class Client:
max_retries = 5
def __init__(self, workers=8):
retries = Retry(
total=self.max_retries,
backoff_factor=0.1,
status_forcelist=[500, 502, 503, 504])
session = requests.Session()
session.mount('https://', HTTPAdapter(max_retries=retries))
session.headers['User-Agent'] = prequests.ua.random
self.session = session
self.pool = Pool(workers)
def get_json(self, url, **kwargs):
response = None
try:
response = self.session.get(url, **kwargs)
data = response.json()
if type(data) is dict and data.get('error'):
raise ServerResponsedError
return data
except ValueError as e:
retries = kwargs.get('retries', 0)
logger.error('when get <%d> %s, error %s (retry#%d)',
response.status_code, url, e, retries)
return {} if retries <= self.max_retries else \
self.get_json(url, retries=retries + 1)
except ServerResponsedError:
logger.error('when get <%d> %s, response: %s',
response.status_code, url, data)
return {}
except httplib.IncompleteRead as e:
logger.error('when get %s, error %s; partial: %s',
url, e, e.partial)
return {} # or shall we return `e.partial` ?
except RetryError as e:
logger.error('when get %s, retry error occurs. %s', url, e)
return {}
except Exception as e:
logger.error('error %s', e)
return {}
def get_stream(self, url, **kwargs):
request = self.session.get(url, stream=True, **kwargs)
return request
def get(self, url, **kwargs):
return prequests.get(url, session=self.session, **kwargs)
def imap(self, reqs):
return prequests.imap(reqs, stream=False, pool=self.pool)
def flatten_lists(meta_lists):
return list(itertools.chain.from_iterable(meta_lists))
class ServerResponsedError(Exception):
pass
``` |
{
"source": "Jimmy-312/TiMe",
"score": 3
} |
#### File: Jimmy-312/TiMe/main.py
```python
from task import *
import wx
import wx.adv
import wx.lib.buttons as buttons
class MyFrame(wx.Frame):
def __init__(self, parent, id):
print('Hello,TiMe!')
self.con=[]
self.producecon()
wx.Frame.__init__(self,parent,id,title = "time312",pos=(300,300),size = (306,306),style=0)
self.head= wx.Panel(self,size=(306,33))
self.main=wx.Panel(self,size=(306,150),pos=(0,33))
self.det=wx.Panel(self,size=(306,123),pos=(0,183))
iadd = wx.Image('mess/1.png').ConvertToBitmap()
label = wx.StaticText(self.head, label = "TiMe", pos = (3,1.5))
font=wx.Font(18,wx.DECORATIVE,wx.NORMAL,wx.BOLD,faceName='Microsoft YaHei UI')
self.head.SetBackgroundColour("white")
self.main.SetBackgroundColour((65,105,225))
self.det.SetBackgroundColour((30,144,255))
label.SetFont(font)
self.show()
self.editf=False
badd = buttons.GenBitmapButton(self.head, -1,iadd, pos=(273, 1.5),size=(30,30),style=0)
badd.SetBackgroundColour("white")
self.Bind(wx.EVT_BUTTON, lambda evt,f=1:self.edit(evt,f),badd)
self.onPlay(self)
def show(self):
self.producecon()
m1=self.info[0]
mtitle=m1['title']
tmtime=transtime(m1['time'])
tminfo=m1['info']
dtitle=transdet(self.info)
mtext=wx.StaticText(self.main,label='' , pos=(3,0),style=wx.ST_NO_AUTORESIZE,size=(300,80))
font=wx.Font(39,wx.DECORATIVE,wx.NORMAL,wx.BOLD,faceName='Microsoft YaHei UI')
mtext.SetLabel(mtitle)
mtext.SetBackgroundColour((65,105,225))
mtext.SetForegroundColour("White")
mtext.SetFont(font)
mtime=wx.StaticText(self.main,label='' , pos=(3,63))
font=wx.Font(13,wx.DECORATIVE,wx.NORMAL,wx.BOLD,faceName='Microsoft YaHei UI')
mtime.SetLabel(tmtime)
mtime.SetForegroundColour("White")
mtime.SetFont(font)
minfo=wx.StaticText(self.main,label='' , pos=(3,85),size=(297,60),style=wx.ST_NO_AUTORESIZE)
font=wx.Font(8,wx.DECORATIVE,wx.NORMAL,wx.BOLD,faceName='Microsoft YaHei UI')
minfo.SetLabel(tminfo)
minfo.SetForegroundColour("White")
minfo.SetFont(font)
dtext=wx.StaticText(self.det,label=dtitle , pos=(1.5,3),size=(260,110),style=wx.ST_NO_AUTORESIZE)
font=wx.Font(10,wx.DECORATIVE,wx.NORMAL,wx.BOLD,faceName='Microsoft YaHei UI')
dtext.SetForegroundColour((240,255,255))
dtext.SetFont(font)
def edit(self,event,f=0):
if self.editf:
self.dedit.Close()
self.editf=False
else:
try:
self.dadd.Close()
self.addf=False
except:
pass
self.dedit = wx.Frame(self,-1,title = "edit",pos=(610,300),size = (306,306),style=0)
head= wx.Panel(self.dedit,size=(306,33))
main= wx.Panel(self.dedit,size=(306,273),pos=(0,33))
label = wx.StaticText(head, label = "Edit", pos = (3,1.5))
font=wx.Font(18,wx.DECORATIVE,wx.NORMAL,wx.BOLD,faceName='Microsoft YaHei UI')
main.SetBackgroundColour((0,191,255))
label.SetFont(font)
bfinish = wx.Button(main, -1, "Okay", pos=(218, 240),style=0,size=(80,27))
bfinish.SetBackgroundColour("white")
self.Bind(wx.EVT_BUTTON,self.efinish,bfinish)
self.bdelete = wx.Button(main, -1, "Delete", pos=(128, 240),style=0,size=(80,27))
self.bdelete.SetBackgroundColour("white")
self.Bind(wx.EVT_BUTTON,lambda evt,op=1:self.efinish(evt,op),self.bdelete)
self.bdelete.Disable()
font=wx.Font(10,wx.DECORATIVE,wx.NORMAL,wx.BOLD,faceName='Microsoft YaHei UI')
tname=wx.StaticText(main,label='Title:' , pos=(5,8))
tname.SetForegroundColour("White")
tname.SetFont(font)
tdate=wx.StaticText(main,label='Date:' , pos=(5,35))
tdate.SetForegroundColour("White")
tdate.SetFont(font)
tmark=wx.StaticText(main,label='Mark:' , pos=(5,62))
tmark.SetForegroundColour("White")
tmark.SetFont(font)
tring=wx.StaticText(main,label='Sound:' , pos=(5,86))
tring.SetForegroundColour("White")
tring.SetFont(font)
tdet=wx.StaticText(main,label='Detail:' , pos=(5,110))
tdet.SetForegroundColour("White")
tdet.SetFont(font)
font=wx.Font(8,wx.DECORATIVE,wx.NORMAL,wx.BOLD,faceName='Microsoft YaHei UI')
imark = wx.RadioButton(main, -1, 'Lv.S', pos=(50, 64))
#imark.SetForegroundColour((100,100,100))
mark.SetFont(font)
imark1 = wx.RadioButton(main, -1, 'Lv.A', pos=(100, 64))
imark1.SetFont(font)
imark2 = wx.RadioButton(main, -1, 'Lv.B', pos=(150, 64))
imark2.SetFont(font)
imark3 = wx.RadioButton(main, -1, 'Lv.C', pos=(200, 64))
imark3.SetFont(font)
self.ename1 = wx.Choice(main,-1,choices=self.infoname,pos=(45,6),size=(80, 20))
self.ename = wx.TextCtrl(main,-1,'', size=(80, 24),pos=(130,6))
self.idatey = wx.Choice(main, -1, choices=self.con[0], pos=(45, 35),size=(55,20))
self.idatem = wx.Choice(main, -1, choices=self.con[1], pos=(105, 35),size=(40,20))
self.idated = wx.Choice(main, -1, choices=[''], pos=(150, 35),size=(40,20))
self.ename1.SetFont(font)
self.ename.SetFont(font)
self.ename1.SetSelection(0)
self.Bind(wx.EVT_CHOICE, lambda evt,inn=self.infoname:self.getinfo(inn), self.ename1)
self.Bind(wx.EVT_CHOICE, self.choosem, self.idatey)
self.Bind(wx.EVT_CHOICE, self.choosem, self.idatem)
self.idet = wx.TextCtrl(main,-1,"", size=(280, 100),pos=(10,135),style=wx.TE_MULTILINE|wx.TE_RICH2)
font=wx.Font(8,wx.DECORATIVE,wx.NORMAL,wx.NORMAL,faceName='Microsoft YaHei UI')
self.idet.SetFont(font)
self.dedit.Show()
self.editf=True
def onPlay(self, evt):
self.sound = wx.adv.Sound('mess/robot.wav')
if self.sound.IsOk():
self.sound.Play()
def getinfo(self,inn):
name=inn[self.ename1.GetSelection()]
if name=='New':
self.createt(self)
return
self.ename.SetValue(name)
self.bdelete.Enable()
for i in self.info:
if i['title']==name:
info=i
break
y=info['time'][2]
m=info['time'][0]
d=info['time'][1]
self.idatey.SetSelection(y-2000)
self.idatem.SetSelection(m-1)
self.choosem(self)
self.idated.SetSelection(d-1)
self.idet.SetValue(info['info'])
def choosem(self, event):
t =int(self.con[0][self.idatey.GetSelection()])
a=int(self.con[1][self.idatem.GetSelection()])
if a==2:
if (t==2000 or (t%4==0 and t%100!=0)):
self.idated.SetItems(self.con[5])
else:
self.idated.SetItems(self.con[4])
elif a in [1,3,5,7,8,10,12]:
self.idated.SetItems(self.con[2])
elif a in [4,6,9,11]:
self.idated.SetItems(self.con[3])
def createt(self,event):
if self.ename.GetValue()=='':
return 0
try:
data=[self.ename.GetValue(),[self.con[1][self.idatem.GetSelection()],self.idated.Items[self.idated.GetSelection()],\
self.con[0][self.idatey.GetSelection()]],self.idet.GetValue()]
handledata(data)
self.show()
self.show()#strange problem
self.add(self)
except:
return 0
def efinish(self,event,op=0):
try:
name=self.infoname[self.ename1.GetSelection()]
dname=self.ename.GetValue()
if name=='New':
self.createt(self)
if op==0:
if self.ename.GetValue()=='':
return 0
a={'title':dname,'time':[int(self.con[1][self.idatem.GetSelection()]),int(self.idated.Items[self.idated.GetSelection()]),\
int(self.con[0][self.idatey.GetSelection()])],'info':self.idet.GetValue()}
for i in range(len(self.info)):
if self.info[i]['title']==name:
self.info.pop(i)
self.info.append(a)
break
else:
for i in range(len(self.info)):
if self.info[i]['title']==name:
self.info.pop(i)
break
save(self.info)
self.show()
self.show()#strange problem
self.edit(self)
except:
return 0
def producecon(self):
self.info=orderinfo()
self.infoname=['New']
for i in self.info:
self.infoname.append(i['title'])
t=[]
for i in range(2000,2101):
t.append(str(i))
self.con.append(t)
t=[]
for i in range(1,13):
t.append(str(i))
self.con.append(t)
t=[]
for i in range(1,32):
t.append(str(i))
self.con.append(t)
t=[]
for i in range(1,31):
t.append(str(i))
self.con.append(t)
t=[]
for i in range(1,29):
t.append(str(i))
self.con.append(t)
t=[]
for i in range(1,30):
t.append(str(i))
self.con.append(t)
def main():
app = wx.App()
app.locale = wx.Locale(wx.LANGUAGE_CHINESE_SIMPLIFIED)
win = MyFrame(None,-1)
win.Show()
app.MainLoop()
if __name__=='__main__':
main()
```
#### File: Jimmy-312/TiMe/task.py
```python
import datetime
cal={1:'Jan.',2:'Feb.',3:'Mar.',4:'Apr.',5:'May.',6:'June.',7:'July.',8:'Aug.',9:'Sept.',10:'Oct.',11:'Nov.',12:'Dec.'}
week_dict = {
0 : 'Mon.',
1 : 'Tue.',
2 : 'Wed.',
3 : 'Thu.',
4 : 'Fri.',
5 : 'Sat.',
6 : 'Sun.',
}
def getinfo():
infolist=[]
with open('info.tm','rb') as f:
a=f.readlines()
for i in a:
infolist.append(eval(i))
return infolist
def orderinfo():
info=getinfo()
while True:
count=0
for i in range(len(info)-1):
#print(info)
t=info[i]['time']
a=info[i+1]['time']
if t[2]<a[2]:
c=info[i]
info[i]=info[i+1]
info[i+1]=c
count+=1
if t[2]==a[2]:
if t[0]<a[0]:
c=info[i]
info[i]=info[i+1]
info[i+1]=c
count+=1
if t[0]==a[0]:
if t[1]<a[1]:
c=info[i]
info[i]=info[i+1]
info[i+1]=c
count+=1
if count==0:
break
return info
def transtime(time):
a=datetime.datetime(time[2],time[0],time[1]).strftime("%w");
t=cal[time[0]]+f'{time[1]},{time[2]}'+' '+week_dict[int(a)]
return t
def transdet(info):
dt=''
for i in info[1:]:
a=i['title']+'-'+f"{i['time'][2]}.{i['time'][0]}.{i['time'][1]}"
dt=dt+a+'\n'
return dt
def handledata(data):
t={}
for i in range(len(data[1])):
data[1][i]=int(data[1][i])
t['title']=data[0]
t['info']=data[2]
t['time']=data[1]
with open('info.tm','a',encoding="utf-8") as f:
f.write(str(t)+'\n')
return t
def save(info):
with open('info.tm','w',encoding="utf-8") as f:
for i in info:
f.write(str(i)+'\n')
#ordermain(getinfo())
``` |
{
"source": "jimmy516/openptv-python",
"score": 3
} |
#### File: openptv-python/pyptv_gui/ext_sequence_denis.py
```python
import numpy as np
from scipy.misc import imread
import random
class Sequence():
""" Sequence class defines external tracking addon for pyptv
User needs to implement the following functions:
do_sequence(self)
Connection to C ptv module is given via self.ptv and provided by pyptv software
Connection to active parameters is given via self.exp1 and provided by pyptv software.
User responsibility is to read necessary files, make the calculations and write the files back.
"""
def __init__(self,ptv=None,exp1=None, camera_list=None):
self.ptv=ptv
self.exp1=exp1
self.camera_list=camera_list
# Do your initialization here
def do_sequence(self):
""" this function is callback for "tracking without display"
"""
print "inside denis_ext_sequence"
n_camera=self.exp1.active_params.m_params.Num_Cam
print ("Starting sequence action")
seq_first=self.exp1.active_params.m_params.Seq_First
seq_last=self.exp1.active_params.m_params.Seq_Last
print seq_first,seq_last
base_name=[]
for i in range (n_camera):
exec("base_name.append(self.exp1.active_params.m_params.Basename_%d_Seq)" %(i+1))
print base_name[i]
self.ptv.py_sequence_init(0) #init C sequence function
stepshake=self.ptv.py_get_from_sequence_init() #get parameters and pass to main loop
if not stepshake:
stepshake=1
print stepshake
temp_img=np.array([],dtype=np.ubyte)
# main loop - format image name, read it and call v.py_sequence_loop(..) for current step
for i in range(seq_first,seq_last+1,stepshake):
if i<10:
seq_ch="%01d" % i
elif i<100:
seq_ch="%02d" % i
else:
seq_ch="%03d" % i
for j in range (n_camera):
img_name=base_name[j]+seq_ch
print ("Setting image: ",img_name)
try:
temp_img=imread(img_name).astype(np.ubyte)
except:
print "Error reading file"
self.ptv.py_set_img(temp_img,j)
self.ptv.py_sequence_loop(0,i)
self.camera_list[0].drawquiver([int(300*random.random())],[int(300*random.random())],[int(300*random.random())],[int(300*random.random())],"green",linewidth=3.0)
self.camera_list[0]._plot.request_redraw()
```
#### File: openptv-python/pyptv_gui/pyptv_batch.py
```python
from scipy.misc import imread
import os
import sys
import numpy as np
# project specific inputs
import parameters as par
import general
# directory from which we run the software
cwd = os.getcwd()
# import pdb; pdb.set_trace()
if len(sys.argv) < 4:
print("Wrong number of inputs, usage: python pyptv_batch.py experiments/exp1 seq_first seq_last")
software_path = os.path.split(os.path.abspath(sys.argv[0]))[0]
print 'software_path=', software_path
try:
os.chdir(software_path)
except:
print("Error in instalation or software path")
src_path = os.path.join(os.path.split(os.path.abspath(os.getcwd()))[0],'src_c')
sys.path.append(src_path)
import ptv1 as ptv
exp_path = os.path.abspath(sys.argv[1])
print 'exp_path=', exp_path
try:
os.chdir(exp_path)
print(os.getcwd())
except:
print('Wrong experimental directory %s' % exp_path)
def sequence_tracking(n_img):
# get following variables from the parameters:
# n_camera, seq_first, seq_last, base_name
sequenceParams = par.SequenceParams(n_img, path = par.temp_path)
sequenceParams.read()
(base_name, seq_first, seq_last) = (sequenceParams.base_name, sequenceParams.first, sequenceParams.last)
print ("Starting sequence action")
ptv.py_sequence_init(0)
stepshake=ptv.py_get_from_sequence_init()
if not stepshake:
stepshake=1
print stepshake
temp_img=np.array([],dtype=np.ubyte)
for i in range(seq_first,seq_last+1,stepshake):
if i<10:
seq_ch="%01d" % i
elif i<100:
seq_ch="%02d" % i
else:
seq_ch="%03d" % i
for j in range (n_img):
img_name=base_name[j]+seq_ch
print ("Setting image: ",img_name)
try:
temp_img=imread(img_name).astype(np.ubyte)
except:
print "Error reading file"
ptv.py_set_img(temp_img,j)
ptv.py_sequence_loop(0,i)
# forward tracking
run_info = ptv.py_trackcorr_init()
print run_info.get_sequence_range()
for step in range(*run_info.get_sequence_range()):
print step
ptv.py_trackcorr_loop(run_info, step, display=0)
ptv.py_trackcorr_finish(run_info, step + 1)
print "tracking without display finished"
ptv.py_trackback_c()
print "tracking backwards is finished"
def sequence(n_img):
# get following variables from the parameters:
# n_camera, seq_first, seq_last, base_name
sequenceParams = par.SequenceParams(n_img, path = par.temp_path)
sequenceParams.read()
(base_name, seq_first, seq_last) = (sequenceParams.base_name, sequenceParams.first, sequenceParams.last)
print ("Starting sequence action")
ptv.py_sequence_init(0)
stepshake=ptv.py_get_from_sequence_init()
if not stepshake:
stepshake=1
print stepshake
temp_img=np.array([],dtype=np.ubyte)
for i in range(seq_first,seq_last+1,stepshake):
if i<10:
seq_ch="%01d" % i
elif i<100:
seq_ch="%02d" % i
else:
seq_ch="%03d" % i
for j in range (n_img):
img_name=base_name[j]+seq_ch
print ("Setting image: ",img_name)
try:
temp_img=imread(img_name).astype(np.ubyte)
except:
print "Error reading file"
ptv.py_set_img(temp_img,j)
ptv.py_sequence_loop(0,i)
def run_batch(new_seq_first,new_seq_last):
# import pdb; pdb.set_trace()
ptv.py_init_proc_c()
ptv.py_start_proc_c() # or ptv.py_init_proc_c()?
ptvParams = par.PtvParams(path = par.temp_path)
ptvParams.read()
(n_img, img_name, img_cal, hp_flag, allCam_flag, tiff_flag, imx, imy, pix_x, pix_y, chfield, mmp_n1, mmp_n2, mmp_n3, mmp_d) = \
(ptvParams.n_img, ptvParams.img_name, ptvParams.img_cal, ptvParams.hp_flag, ptvParams.allCam_flag, ptvParams.tiff_flag, \
ptvParams.imx, ptvParams.imy, ptvParams.pix_x, ptvParams.pix_y, ptvParams.chfield, ptvParams.mmp_n1, ptvParams.mmp_n2, ptvParams.mmp_n3, ptvParams.mmp_d)
# read the sequence parameters
sequenceParams = par.SequenceParams(n_img, path = par.temp_path)
sequenceParams.read()
(base_name, seq_first, seq_last) = (sequenceParams.base_name, sequenceParams.first, sequenceParams.last)
# write the new sequence parameters
par.SequenceParams(n_img, base_name,\
new_seq_first, new_seq_last, path = par.temp_path).write()
# if you need sequence and tracking:
sequence_tracking(n_img)
# if you need sequence only:
# sequence(n_img)
if __name__ == '__main__':
import time
start = time.time()
repetitions = 1 # 10 or 100 for heavy load
for i in range(repetitions):
try:
seq_first = sys.argv[2]
seq_last = sys.argv[3]
run_batch(eval(seq_first),eval(seq_last))
except:
print("something wrong with the software or folder")
general.printException()
end = time.time()
print 'time lapsed %f sec' % (end - start)
```
#### File: openptv-python/pyptv_gui/pyptv_gui.py
```python
from traits.api \
import HasTraits, Str, Int, List, Bool, Enum, Instance, Any
from traitsui.api \
import TreeEditor, TreeNode, View, Item, \
Handler, Group
from enable.component_editor import ComponentEditor
from chaco.api import Plot, ArrayPlotData, gray
from traitsui.menu import MenuBar, Menu, Action
from chaco.tools.api import ZoomTool,PanTool
from scipy.misc import imread
from threading import Thread
from pyface.api import GUI
import os
import sys
import numpy as np
# Parse inputs:
# Get the path to the software
if len(sys.argv) > 0:
software_path = os.path.split(os.path.abspath(sys.argv[0]))[0]
else:
software_path = os.path.abspath(os.getcwd())
if not os.path.isdir(software_path):
print ("Wrong experimental directory %s " % software_path)
# Path to the experiment
if len(sys.argv) > 1:
exp_path = os.path.abspath(sys.argv[1])
if not os.path.isdir(exp_path):
print ("Wrong experimental directory %s " % exp_path)
# change directory to the software path
try:
os.chdir(software_path)
except:
print("Wrong software path %s " % software_path)
src_path = os.path.join(os.path.split(software_path)[0],'src_c')
print 'src_path=', src_path
if not os.path.isdir(src_path):
print("Wrong src_c path %s" % src_path)
sys.path.append(src_path)
import ptv1 as ptv
from tracking_framebuf import read_targets
# pyPTV specific imports
import general
import parameters as par
from parameter_gui import *
from calibration_gui import *
from directory_editor import DirectoryEditorDialog
from quiverplot import QuiverPlot
from demo import *
if len(sys.argv) < 2:
directory_dialog = DirectoryEditorDialog()
directory_dialog.configure_traits()
exp_path = directory_dialog.dir_name # default_path+os.sep+'exp1'
cwd = os.getcwd()
try:
os.chdir(exp_path)
except:
print('Wrong experimental directory %s' % exp_path)
#
class Clicker(ImageInspectorTool):
""" Clicker class handles right mouse click actions from the tree and menubar actions
"""
left_changed = Int(1)
right_changed = Int(1)
x = 0
y = 0
def normal_left_down(self, event):
""" Handles the left mouse button being clicked.
Fires the **new_value** event with the data (if any) from the event's
position.
"""
plot = self.component
if plot is not None:
ndx = plot.map_index((event.x, event.y))
x_index, y_index = ndx
image_data=plot.value
self.x=(x_index)
self.y=(y_index)
self.data_value=image_data.data[y_index, x_index]
self.left_changed = 1 - self.left_changed
self.last_mouse_position = (event.x, event.y)
return
def normal_right_down(self, event):
""" Handles the right mouse button being clicked. Fires the **new_value** event
with the data (if any) from the event position.
"""
plot = self.component
if plot is not None:
ndx = plot.map_index((event.x, event.y))
x_index, y_index = ndx
# image_data=plot.value
self.x=(x_index)
self.y=(y_index)
self.right_changed = 1-self.right_changed
print "self.right_changed: ", self.right_changed
print self.x
print self.y
self.last_mouse_position = (event.x, event.y)
return
def normal_mouse_move(self, event):
pass
def __init__(self, *args, **kwargs):
super(Clicker, self).__init__(*args, **kwargs)
# --------------------------------------------------------------
class CameraWindow (HasTraits):
""" CameraWindow class contains the relevant information and functions for a single camera window: image, zoom, pan
important members:
_plot_data - contains image data to display (used by update_image)
_plot - instance of Plot class to use with _plot_data
_click_tool - instance of Clicker tool for the single camera window, to handle mouse processing
"""
_plot_data=Instance(ArrayPlotData)
_plot=Instance(Plot)
_click_tool=Instance(Clicker)
rclicked=Int(0)
cam_color = ''
name=Str
view = View( Item(name='_plot',editor=ComponentEditor(), show_label=False) )
# view = View( Item(name='_plot',show_label=False) )
def __init__(self, color):
""" Initialization of plot system
"""
padd=25
self._plot_data=ArrayPlotData()
self._plot=Plot(self._plot_data, default_origin="top left")
self._plot.padding_left=padd
self._plot.padding_right=padd
self._plot.padding_top=padd
self._plot.padding_bottom=padd
self.right_p_x0,self.right_p_y0,self.right_p_x1,self.right_p_y1,self._quiverplots=[],[],[],[],[]
self.cam_color = color
def attach_tools(self):
""" attach_tools(self) contains the relevant tools: clicker, pan, zoom
"""
self._click_tool=Clicker(self._img_plot)
self._click_tool.on_trait_change(self.left_clicked_event, 'left_changed') #set processing events for Clicker
self._click_tool.on_trait_change(self.right_clicked_event, 'right_changed')
self._img_plot.tools.append(self._click_tool)
pan = PanTool(self._plot, drag_button = 'middle')
zoom_tool= ZoomTool(self._plot, tool_mode="box", always_on=False)
# zoom_tool = BetterZoom(component=self._plot, tool_mode="box", always_on=False)
zoom_tool.max_zoom_out_factor=1.0 # Disable "bird view" zoom out
self._img_plot.overlays.append(zoom_tool)
self._img_plot.tools.append(pan)
def left_clicked_event(self): #TODO: why do we need the clicker_tool if we can handle mouse clicks here?
""" left_clicked_event - processes left click mouse avents and displays coordinate and grey value information
on the screen
"""
print "x = %d, y= %d, grey= %d " % (self._click_tool.x,self._click_tool.y,self._click_tool.data_value)
#need to priny gray value
def right_clicked_event(self):
self.rclicked=1 #flag that is tracked by main_gui, for right_click_process function of main_gui
#self._click_tool.y,self.name])
#self.drawcross("coord_x","coord_y",self._click_tool.x,self._click_tool.y,"red",5)
#print ("right clicked, "+self.name)
#need to print cross and manage other camera's crosses
def update_image(self,image,is_float):
""" update_image - displays/updates image in the curren camera window
parameters:
image - image data
is_float - if true, displays an image as float array,
else displays as byte array (B&W or gray)
example usage:
update_image(image,is_float=False)
"""
if is_float:
self._plot_data.set_data('imagedata',image.astype(np.float))
else:
self._plot_data.set_data('imagedata',image.astype(np.byte))
if not hasattr(self,'_img_plot'): #make a new plot if there is nothing to update
self._img_plot=Instance(ImagePlot)
self._img_plot=self._plot.img_plot('imagedata',colormap=gray)[0]
self.attach_tools()
# self._plot.request_redraw()
def drawcross(self, str_x,str_y,x,y,color1,mrk_size,marker1="plus"):
""" drawcross draws crosses at a given location (x,y) using color and marker in the current camera window
parameters:
str_x - label for x coordinates
str_y - label for y coordinates
x - array of x coordinates
y - array of y coordinates
mrk_size - marker size
makrer1 - type of marker, e.g "plus","circle"
example usage:
drawcross("coord_x","coord_y",[100,200,300],[100,200,300],2)
draws plus markers of size 2 at points (100,100),(200,200),(200,300)
"""
self._plot_data.set_data(str_x,x)
self._plot_data.set_data(str_y,y)
self._plot.plot((str_x,str_y),type="scatter",color=color1,marker=marker1,marker_size=mrk_size)
#self._plot.request_redraw()
def drawquiver(self,x1c,y1c,x2c,y2c,color,linewidth=1.0):
""" drawquiver draws multiple lines at once on the screen x1,y1->x2,y2 in the current camera window
parameters:
x1c - array of x1 coordinates
y1c - array of y1 coordinates
x2c - array of x2 coordinates
y2c - array of y2 coordinates
color - color of the line
linewidth - linewidth of the line
example usage:
drawquiver ([100,200],[100,100],[400,400],[300,200],'red',linewidth=2.0)
draws 2 red lines with thickness = 2 : 100,100->400,300 and 200,100->400,200
"""
x1,y1,x2,y2=self.remove_short_lines(x1c,y1c,x2c,y2c)
if len(x1)>0:
xs=ArrayDataSource(x1)
ys=ArrayDataSource(y1)
quiverplot=QuiverPlot(index=xs,value=ys,\
index_mapper=LinearMapper(range=self._plot.index_mapper.range),\
value_mapper=LinearMapper(range=self._plot.value_mapper.range),\
origin = self._plot.origin,arrow_size=0,\
line_color=color,line_width=linewidth,ep_index=np.array(x2),ep_value=np.array(y2)
)
self._plot.add(quiverplot)
self._quiverplots.append(quiverplot) #we need this to track how many quiverplots are in the current plot
# import pdb; pdb.set_trace()
def remove_short_lines(self,x1,y1,x2,y2):
""" removes short lines from the array of lines
parameters:
x1,y1,x2,y2 - start and end coordinates of the lines
returns:
x1f,y1f,x2f,y2f - start and end coordinates of the lines, with short lines removed
example usage:
x1,y1,x2,y2=remove_short_lines([100,200,300],[100,200,300],[100,200,300],[102,210,320])
3 input lines, 1 short line will be removed (100,100->100,102)
returned coordinates:
x1=[200,300]; y1=[200,300]; x2=[200,300]; y2=[210,320]
"""
dx,dy=2,2 #minimum allowable dx,dy
x1f,y1f,x2f,y2f=[],[],[],[]
for i in range(len(x1)):
if abs(x1[i]-x2[i])>dx or abs(y1[i]-y2[i])>dy:
x1f.append(x1[i])
y1f.append(y1[i])
x2f.append(x2[i])
y2f.append(y2[i])
return x1f,y1f,x2f,y2f
def drawline(self,str_x,str_y,x1,y1,x2,y2,color1):
""" drawline draws 1 line on the screen by using lineplot x1,y1->x2,y2
parameters:
str_x - label of x coordinate
str_y - label of y coordinate
x1,y1,x2,y2 - start and end coordinates of the line
color1 - color of the line
example usage:
drawline("x_coord","y_coord",100,100,200,200,red)
draws a red line 100,100->200,200
"""
self._plot_data.set_data(str_x,[x1,x2])
self._plot_data.set_data(str_y,[y1,y2])
self._plot.plot((str_x,str_y),type="line",color=color1)
#self._plot.request_redraw()
class TrackThread(Thread):
""" TrackThread is used by tracking with display function - runs separate thread that updates the gui
"""
def run(self):
print "tracking with display thread started"
run_info = ptv.py_trackcorr_init() #init the relevant C function
for step in range(*run_info.get_sequence_range()): #Loop over each step in sequence
self.track_step=step
#Call C function to process current step and store results for plotting
self.intx0, self.intx1, self.intx2, self.inty0, self.inty1, \
self.inty2, self.pnr1, self.pnr2, self.pnr3, self.m_tr = \
ptv.py_trackcorr_loop(run_info, step, display=1)
self.can_continue=False
GUI.invoke_later(setattr, main_gui, 'update_thread_plot', True) #invoke plotting when system is ready to process it
while not self.can_continue: # wait while plotting of the current step is finished, then continue for the next step
pass
del self.intx0,self.intx1
ptv.py_trackcorr_finish(run_info, step + 1) # call finishing C function (after all the steps in the loop are processed)
for i in range(len(main_gui.camera_list)): # refresh cameras
main_gui.camera_list[i]._plot.request_redraw()
print "tracking with display thread finished"
class TreeMenuHandler (Handler):
""" TreeMenuHanlder contains all the callback actions of menu bar, processing of tree editor, and reactions of the GUI to the user clicks
possible function declarations:
1) to process menubar actions:
def function(self, info):
parameters: self - needed for member function declaration,
info - contains pointer to calling parent class (e.g main_gui)
To access parent class objects use info.object, for example info.object.exp1 gives access to exp1 member of main_gui class
2) to process tree editor actions:
def function(self,editor,object) - see examples below
"""
def configure_main_par(self, editor, object):
experiment = editor.get_parent(object)
paramset = object
print 'Total paramsets:', len(experiment.paramsets)
if paramset.m_params==None:
#TODO: is it possible that control reaches here? If not, probably the if should be removed.
paramset.m_params=Main_Params()
else:
paramset.m_params._reload()
paramset.m_params.edit_traits(kind='modal')
def configure_cal_par(self, editor, object):
experiment = editor.get_parent(object)
paramset = object
print len(experiment.paramsets)
if paramset.c_params==None:
#TODO: is it possible that control reaches here? If not, probably the if should be removed.
paramset.c_params=Calib_Params()
else:
paramset.c_params._reload()
paramset.c_params.edit_traits(kind='modal')
def configure_track_par(self, editor, object):
experiment = editor.get_parent(object)
paramset = object
print len(experiment.paramsets)
if paramset.t_params==None:
#TODO: is it possible that control reaches here? If not, probably the if should be removed.
paramset.t_params=Tracking_Params()
paramset.t_params.edit_traits(kind='modal')
def set_active(self, editor, object):
experiment = editor.get_parent(object)
paramset = object
# experiment.active_params = paramset
experiment.setActive(paramset)
experiment.changed_active_params = True
# editor.object.__init__()
def copy_set_params(self, editor, object):
experiment = editor.get_parent(object)
paramset = object
i = 1
new_name = None
new_dir_path = None
flag = False
while not flag:
new_name = "%s (%d)" % (paramset.name, i)
new_dir_path = "%s%s" % (general.par_dir_prefix, new_name)
if not os.path.isdir(new_dir_path):
flag = True
else:
i=i+1
os.mkdir(new_dir_path)
par.copy_params_dir(paramset.par_path, new_dir_path)
experiment.addParamset(new_name, new_dir_path)
def rename_set_params(self,editor,object):
""" rename_set_params renames the node name on the tree and also the folder of parameters
"""
experiment = editor.get_parent(object)
paramset = object
# rename
editor._menu_rename_node()
new_name = object.name
new_dir_path = general.par_dir_prefix + new_name
os.mkdir(new_dir_path)
par.copy_params_dir(paramset.par_path, new_dir_path)
[os.remove(os.path.join(paramset.par_path,f)) for f in os.listdir(paramset.par_path)]
os.rmdir(paramset.par_path)
experiment.removeParamset(paramset)
experiment.addParamset(new_name, new_dir_path)
def delete_set_params(self, editor, object):
""" delete_set_params deletes the node and the folder of parameters
"""
# experiment = editor.get_parent(object)
paramset = object
# delete node
editor._menu_delete_node()
# delete all the parameter files
[os.remove(os.path.join(paramset.par_path,f)) for f in os.listdir(paramset.par_path)]
# remove folder
os.rmdir(paramset.par_path)
#------------------------------------------
# Menubar actions
#------------------------------------------
def new_action(self,info):
print("Not implemented")
def open_action(self,info):
directory_dialog = DirectoryEditorDialog()
directory_dialog.edit_traits()
exp_path = directory_dialog.dir_name
print "Changing experimental path to %s" % exp_path
os.chdir(exp_path)
info.object.exp1.populate_runs(exp_path)
def exit_action(self,info):
print ("not implemented yet")
def saveas_action(self,info):
print ("not implemented yet")
def showimg_action(self,info):
print "not implemented"
info.object.update_plots(info.object.orig_image)
def highpass_action(self,info):
""" highpass_action - calls ptv.py_pre_processing_c() binding which does highpass on working images (object.orig_image)
that were set with init action
"""
print ("highpass started")
ptv.py_pre_processing_c() # call the binding
info.object.update_plots(info.object.orig_image)
print ("highpass finished")
def img_coord_action(self,info):
""" img_coord_action - runs detection function by using ptv.py_detection_proc_c() binding. results are extracted
with help of ptv.py_get_pix(x,y) binding and plotted on the screen
"""
print ("detection proc started")
ptv.py_detection_proc_c()
print ("detection proc finished")
x=[]
y=[]
ptv.py_get_pix(x,y) # extract detected points from C with help of py_get_pix binding
info.object.drawcross("x","y",x,y,"blue",3)
def corresp_action(self,info):
""" corresp_action - calls ptv.py_correspondences_proc_c(quadriplets,triplets,pairs, unused) binding.
Result of correspondence action is filled to uadriplets,triplets,pairs, unused arrays
"""
print ("correspondence proc started")
quadriplets=[]
triplets=[]
pairs=[]
unused=[]
ptv.py_correspondences_proc_c(quadriplets,triplets,pairs, unused)
info.object.clear_plots(remove_background=False)
#info.object.update_plots(info.object.orig_image)
info.object.drawcross("quad_x","quad_y",quadriplets[0],quadriplets[1],"red",3) #draw quadriplets, triplets, etc...
info.object.drawcross("tripl_x","tripl_y",triplets[0],triplets[1],"green",3)
info.object.drawcross("pair_x","pair_y",pairs[0],pairs[1],"yellow",3)
info.object.drawcross("unused_x","unused_y",unused[0],unused[1],"blue",3)
def init_action(self,info):
""" init_action - clears existing plots from the camera windows,
initializes C image arrays with mainGui.orig_image and
calls appropriate start_proc_c by using ptv.py_start_proc_c()
"""
mainGui = info.object
mainGui.exp1.syncActiveDir() #synchronize the active run params dir with the temp params dir
for i in range (0,len(mainGui.camera_list)):
exec("mainGui.orig_image[%d]=imread(mainGui.exp1.active_params.m_params.Name_%d_Image).astype(np.ubyte)" %(i,i+1))
if hasattr(mainGui.camera_list[i],'_img_plot'):
del mainGui.camera_list[i]._img_plot
mainGui.clear_plots()
print("\nInit action\n")
mainGui.update_plots(mainGui.orig_image,is_float=1)
mainGui.set_images(mainGui.orig_image)
ptv.py_start_proc_c()
mainGui.pass_init=True
print ("done")
def calib_action(self,info):
""" calib_action - initializes calib class with appropriate number of plot windows,
passes to calib class pointer to ptv module and to exp1 class,
invokes the calibration GUI
"""
print ("Starting calibration dialog")
mainGui = info.object
mainGui.pass_init=False
num_cams=len(mainGui.camera_list)
#TODO: calibration_params should work with the actual parameters of the current run (not par.temp_path)
# Right. I replace par_path with info.object.exp1.active_params.par_path, Alex, 20.01. 10:43
mainGui.calib = calibration_gui(info.object.exp1.active_params.par_path)
for i in range (0,num_cams):
mainGui.calib.camera.append(plot_window())
mainGui.calib.camera[i].name="Camera"+str(i+1)
mainGui.calib.camera[i].cameraN=i
mainGui.calib.camera[i].py_rclick_delete=ptv.py_rclick_delete
mainGui.calib.camera[i].py_get_pix_N=ptv.py_get_pix_N
mainGui.calib.ptv=ptv
mainGui.calib.exp1=mainGui.exp1 #pass all the parameters to calib class
mainGui.calib.configure_traits()
def sequence_action(self,info):
"""sequence action - implements binding to C sequence function. Original function was split into 2 parts:
1) initialization - binded by ptv.py_sequence_init(..) function
2) main loop processing - binded by ptv.py_sequence_loop(..) function
"""
extern_sequence=info.object.plugins.sequence_alg
if extern_sequence!='default':
try:
current_path=os.path.abspath(os.getcwd()) # save working path
os.chdir(software_path) #change to software path, to load tracking module
seq=__import__(extern_sequence) #import choosen tracker from software dir
except:
print "Error loading "+extern_sequence+". Falling back to default sequence algorithm"
extern_sequence='default'
os.chdir(current_path) # change back to working path
if extern_sequence=='default':
n_camera=len(info.object.camera_list)
print ("Starting sequence action (default algorithm)")
seq_first=info.object.exp1.active_params.m_params.Seq_First
seq_last=info.object.exp1.active_params.m_params.Seq_Last
print seq_first,seq_last
base_name=[]
for i in range (n_camera):
exec("base_name.append(info.object.exp1.active_params.m_params.Basename_%d_Seq)" %(i+1))
print base_name[i]
ptv.py_sequence_init(0) #init C sequence function
stepshake=ptv.py_get_from_sequence_init() #get parameters and pass to main loop
if not stepshake:
stepshake=1
print stepshake
temp_img=np.array([],dtype=np.ubyte)
# main loop - format image name, read it and call v.py_sequence_loop(..) for current step
for i in range(seq_first,seq_last+1,stepshake):
seq_ch="%04d" % i
for j in range (n_camera):
img_name = base_name[j] + seq_ch
print ("Setting image: ",img_name)
try:
temp_img=imread(img_name).astype(np.ubyte)
except:
print "Error reading file"
ptv.py_set_img(temp_img,j)
ptv.py_sequence_loop(0,i)
else:
print "Sequence by using "+ extern_sequence
sequence=seq.Sequence(ptv=ptv, exp1=info.object.exp1,camera_list=info.object.camera_list)
sequence.do_sequence()
#print "Sequence by using "+extern_sequence+" has failed."
def track_no_disp_action(self, info):
""" track_no_disp_action uses ptv.py_trackcorr_loop(..) binding to call tracking without display
"""
extern_tracker=info.object.plugins.track_alg
if extern_tracker!='default':
try:
current_path=os.path.abspath(os.getcwd()) # save working path
os.chdir(software_path) #change to software path, to load tracking module
track=__import__(extern_tracker) #import choosen tracker from software dir
except:
print "Error loading "+extern_tracker+". Falling back to default tracker"
extern_tracker='default'
os.chdir(current_path) # change back to working path
if extern_tracker=='default':
print "Using default tracker"
run_info = ptv.py_trackcorr_init()
print run_info.get_sequence_range()
for step in range(*run_info.get_sequence_range()):
print step
ptv.py_trackcorr_loop(run_info, step, display=0)
#finalize tracking
ptv.py_trackcorr_finish(run_info, step + 1)
else:
print "Tracking by using "+extern_tracker
tracker=track.Tracking(ptv=ptv, exp1=info.object.exp1)
tracker.do_tracking()
print "tracking without display finished"
def track_disp_action(self,info):
""" tracking with display is handled by TrackThread which does processing step by step and
waits for GUI to update before procceeding to the next step
"""
info.object.clear_plots(remove_background=False)
info.object.tr_thread=TrackThread()
info.object.tr_thread.start()
def track_back_action(self,info):
""" tracking back action is handled by ptv.py_trackback_c() binding
"""
print ("Starting back tracking")
ptv.py_trackback_c()
def threed_positions(self,info):
ptv.py_determination_proc_c(0)
def multigrid_demo(self,info):
demo_window=DemoGUI(ptv=ptv, exp1=info.object.exp1)
demo_window.configure_traits()
def detect_part_track(self, info):
""" track detected particles is handled by 2 bindings:
1) tracking_framebuf.read_targets(..)
2) ptv.py_get_mark_track_c(..)
"""
info.object.clear_plots(remove_background=False) #clear everything
info.object.update_plots(info.object.orig_image,is_float=1)
prm = info.object.exp1.active_params.m_params
seq_first = prm.Seq_First #get sequence parameters
seq_last = prm.Seq_Last
base_names = [prm.Basename_1_Seq, prm.Basename_2_Seq,
prm.Basename_3_Seq, prm.Basename_4_Seq]
info.object.load_set_seq_image(seq_first) #load first seq image and set appropriate C array
n_images=len(info.object.camera_list)
print "Starting detect_part_track"
x1_a,x2_a,y1_a,y2_a=[],[],[],[]
for i in range (n_images): #initialize result arrays
x1_a.append([])
x2_a.append([])
y1_a.append([])
y2_a.append([])
for i_seq in range(seq_first, seq_last+1): #loop over sequences
for i_img in range(n_images):
intx_green,inty_green,intx_blue,inty_blue=[],[],[],[]
imx, imy, zoomx, zoomy, zoomf = ptv.py_get_mark_track_c(i_img)
targets = read_targets(base_names[i_img], i_seq)
for h in range(len(targets)):
#get data from C
tx, ty = targets[h].pos()
if (targets[h].tnr() > -1):
intx_green.append(int(imx/2 + zoomf*(tx - zoomx)))
inty_green.append(int(imy/2 + zoomf*(ty - zoomy)))
else:
intx_blue.append(int(imx/2 + zoomf*(tx - zoomx)))
inty_blue.append(int(imy/2 + zoomf*(ty - zoomy)))
x1_a[i_img]=x1_a[i_img]+intx_green # add current step to result array
x2_a[i_img]=x2_a[i_img]+intx_blue
y1_a[i_img]=y1_a[i_img]+inty_green
y2_a[i_img]=y2_a[i_img]+inty_blue
# info.object.camera_list[i_img].drawcross(str(i_seq)+"x_tr_gr",str(i_seq)+"y_tr_gr",intx_green,inty_green,"green",3)
# info.object.camera_list[i_img].drawcross(str(i_seq)+"x_tr_bl",str(i_seq)+"y_tr_bl",intx_blue,inty_blue,"blue",2)
#plot result arrays
for i_img in range(n_images):
info.object.camera_list[i_img].drawcross("x_tr_gr","y_tr_gr",x1_a[i_img],y1_a[i_img],"green",3)
info.object.camera_list[i_img].drawcross("x_tr_bl","y_tr_bl",x2_a[i_img],y2_a[i_img],"blue",2)
info.object.camera_list[i_img]._plot.request_redraw()
print "Finished detect_part_track"
def traject_action(self,info):
""" show trajectories is handled by ptv.py_traject_loop(..) which returns data to be plotted.
traject_action collects data to be plotted from all the steps and plots it at once.
"""
print "Starting show trajectories"
info.object.clear_plots(remove_background=False)
seq_first=info.object.exp1.active_params.m_params.Seq_First
seq_last=info.object.exp1.active_params.m_params.Seq_Last
info.object.load_set_seq_image(seq_first,display_only=True)
n_camera=len(info.object.camera_list)
x1_a,x2_a,y1_a,y2_a=[],[],[],[]
for i in range (n_camera): #initialize result arrays
x1_a.append([])
x2_a.append([])
y1_a.append([])
y2_a.append([])
for i_seq in range(seq_first, seq_last):
x1,y1,x2,y2,m1_tr=ptv.py_traject_loop(i_seq)
for i in range(n_camera):
x1_a[i]=x1_a[i]+x1[i]
x2_a[i]=x2_a[i]+x2[i]
y1_a[i]=y1_a[i]+y1[i]
y2_a[i]=y2_a[i]+y2[i]
print "Show trajectories finished"
for i in range(n_camera):
info.object.camera_list[i].drawcross("trajx1","trajy1",x1_a[i],y1_a[i],"blue",2)
info.object.camera_list[i].drawcross("trajx2","trajy2",x2_a[i],y2_a[i],"red",2)
info.object.camera_list[i].drawquiver(x1_a[i],y1_a[i],x2_a[i],y2_a[i],"green",linewidth=3.0)
info.object.camera_list[i]._plot.request_redraw()
def plugin_action(self,info):
""" Configure plugins by using GUI
"""
info.object.plugins.read()
info.object.plugins.configure_traits()
#----------------------------------------------------------------
# Actions associated with right mouse button clicks (treeeditor)
# ---------------------------------------------------------------
ConfigMainParams = Action(name="Main parameters",action='handler.configure_main_par(editor,object)')
ConfigCalibParams = Action(name="Calibration parameters",action='handler.configure_cal_par(editor,object)')
ConfigTrackParams = Action(name="Tracking parameters",action='handler.configure_track_par(editor,object)')
SetAsDefault = Action(name="Set as active",action='handler.set_active(editor,object)')
CopySetParams = Action(name="Copy set of parameters",action='handler.copy_set_params(editor,object)')
RenameSetParams = Action(name="Rename run",action='handler.rename_set_params(editor,object)')
DeleteSetParams = Action(name="Delete run",action='handler.delete_set_params(editor,object)')
# -----------------------------------------
# Defines the menubar
#------------------------------------------
menu_bar = MenuBar(
Menu(
Action(name='New',action='new_action'),
Action(name='Open',action='open_action'),
Action(name='Save As',action='saveas_action'),
Action(name='Exit',action='exit_action'),
name='File'
),
Menu(
Action(name='Init / Restart',action='init_action'),
name='Start'
),
Menu(
#Action(name='Show original image',action='showimg_action',enabled_when='pass_init'),
Action(name='High pass filter',action='highpass_action',enabled_when='pass_init'),
Action(name='Image coord',action='img_coord_action',enabled_when='pass_init'),
Action(name='Correspondences',action='corresp_action',enabled_when='pass_init'),
name='Preprocess'
),
Menu(
Action(name='3D positions',action='threed_positions'),
name='3D Positions'
),
Menu(
Action(name='Create calibration',action='calib_action'), #,enabled_when='pass_init'),
name='Calibration'
),
Menu(
Action(name='Sequence without display',action='sequence_action',enabled_when='pass_init'),
name='Sequence'
),
Menu(
Action(name='Detected Particles',action='detect_part_track',enabled_when='pass_init'),
Action(name='Tracking without display',action='track_no_disp_action',enabled_when='pass_init'),
Action(name='Tracking with display',action='track_disp_action',enabled_when='pass_init'),
Action(name='Tracking backwards',action='track_back_action',enabled_when='pass_init'),
Action(name='Show trajectories',action='traject_action',enabled_when='pass_init'),
name='Tracking'
),
Menu(
Action(name='Configure tracking/sequence',action='plugin_action'),
name='Plugins'
),
Menu(
Action(name='Run multigrid demo',action='multigrid_demo'),
name='Demo'
),
)
#----------------------------------------
# tree editor for the Experiment() class
#
tree_editor_exp=TreeEditor(
nodes=[
TreeNode(
node_for=[Experiment],
auto_open=True,
children = '',
label = '=Experiment',
),
TreeNode(
node_for=[Experiment],
auto_open=True,
children='paramsets',
label= '=Parameters',
add=[Paramset],
menu = Menu(
CopySetParams
)
),
TreeNode(
node_for=[Paramset],
auto_open=True,
children='',
label= 'name',
menu=Menu(
NewAction,
CopySetParams,
RenameSetParams,
DeleteSetParams,
Separator(),
ConfigMainParams,
ConfigCalibParams,
ConfigTrackParams,
Separator(),
SetAsDefault
)
)
],
editable = False,
)
# -------------------------------------------------------------------------
class Plugins (HasTraits):
track_list=List
seq_list=List
track_alg=Enum(values='track_list')
sequence_alg=Enum(values='seq_list')
view = View(
Group(
Item(name='track_alg', label="Choose tracking algorithm:"),
Item(name='sequence_alg', label="Choose sequence algorithm:")
),
buttons = [ 'OK'],
title = 'External plugins configuration'
)
def __init__(self):
self.read()
def read(self):
# reading external tracking
try:
f=open(os.path.join(software_path, "external_tracker_list.txt"), 'r')
trackers=f.read().split('\n')
trackers.insert(0,'default')
self.track_list=trackers
f.close()
except:
self.track_list=['default']
# reading external sequence
try:
f=open(os.path.join(software_path, "external_sequence_list.txt"), 'r')
seq=f.read().split('\n')
seq.insert(0,'default')
self.seq_list=seq
f.close()
except:
self.seq_list=['default']
# ----------------------------------------------
class MainGUI (HasTraits):
""" MainGUI is the main class under which the Model-View-Control (MVC) model is defined
"""
camera_list=List
imgplt_flag=0
pass_init=Bool(False)
update_thread_plot=Bool(False)
tr_thread=Instance(TrackThread)
selected = Any
# Defines GUI view --------------------------
view = View(
Group(
Group(Item(name = 'exp1', editor = tree_editor_exp, show_label=False, width=-400, resizable=False),
Item('camera_list',style = 'custom',editor =
ListEditor( use_notebook = True,deletable = False,
dock_style = 'tab',
page_name = '.name',
selected='selected'),
show_label = False),
orientation='horizontal',
show_left=False),
orientation='vertical'),
title = 'pyPTV',
id='main_view',
width = 1.,
height = 1.,
resizable = True,
handler=TreeMenuHandler(), # <== Handler class is attached
menubar=menu_bar)
def _selected_changed(self):
self.current_camera=int(self.selected.name.split(' ')[1])-1
#---------------------------------------------------
# Constructor and Chaco windows initialization
#---------------------------------------------------
def __init__(self):
super(MainGUI, self).__init__()
colors = ['yellow','green','red','blue']
self.exp1=Experiment()
self.exp1.populate_runs(exp_path)
self.plugins=Plugins()
self.n_camera=self.exp1.active_params.m_params.Num_Cam
print self.n_camera
self.orig_image=[]
self.hp_image=[]
self.current_camera=0
self.camera_list = []
for i in range(self.n_camera):
self.camera_list.append(CameraWindow(colors[i]))
self.camera_list[i].name="Camera "+str(i+1)
self.camera_list[i].on_trait_change(self.right_click_process, 'rclicked')
self.orig_image.append(np.array([],dtype=np.ubyte))
self.hp_image.append(np.array([]))
ptv.py_init_proc_c() #intialization of globals in ptv C module
#------------------------------------------------------
def right_click_process(self):
x_clicked, y_clicked, n_camera = 0,0,0
h_img = self.exp1.active_params.m_params.imx
v_img = self.exp1.active_params.m_params.imy
for i in range(len(self.camera_list)):
n_camera = i
x_clicked,y_clicked=self.camera_list[i]._click_tool.x,self.camera_list[i]._click_tool.y
x1,y1,x2,y2,x1_points,y1_points,intx1,inty1=ptv.py_right_click(\
x_clicked,y_clicked,n_camera)
if (x1!=-1 and y1!=-1):
self.camera_list[n_camera].right_p_x0.append(intx1)
self.camera_list[n_camera].right_p_y0.append(inty1)
self.camera_list[n_camera].drawcross("right_p_x0","right_p_y0",\
self.camera_list[n_camera].right_p_x0,\
self.camera_list[n_camera].right_p_y0, "cyan", 3, marker1 = "circle")
self.camera_list[n_camera]._plot.request_redraw()
print "right click process"
print "(x1,y1),(x2,y2),(x1_points,y1_points)"
print zip(x1,y1),zip(x2,y2),zip(x1_points,y1_points)
color_camera=['yellow','red','blue','green']
#print [x1[i]],[y1[i]],[x2[i]],[y2[i]]
for j in range(len(self.camera_list)):
if j is not n_camera:
count=self.camera_list[i]._plot.plots.keys()
self.camera_list[j].drawline("right_cl_x"+str(len(count)),"right_cl_y"+str(len(count)),x1[j],y1[j],x2[j],y2[j],color_camera[n_camera])
self.camera_list[j]._plot.index_mapper.range.set_bounds(0,h_img)
self.camera_list[j]._plot.value_mapper.range.set_bounds(0,v_img)
self.camera_list[j].drawcross("right_p_x1","right_p_y1",\
x1_points[j],y1_points[j],color_camera[n_camera],2)
self.camera_list[j]._plot.request_redraw()
else:
print ("No nearby points for epipolar lines")
self.camera_list[n_camera].rclicked=0
def update_plots(self,images,is_float=0):
for i in range(len(images)):
self.camera_list[i].update_image(images[i],is_float)
self.camera_list[i]._plot.request_redraw()
# set_images sets ptv's C module img[] array
def set_images(self,images):
for i in range(len(images)):
ptv.py_set_img(images[i],i)
def get_images(self,plot_index,images):
for i in plot_index:
ptv.py_get_img(images[i],i)
def drawcross(self,str_x,str_y,x,y,color1,size1):
for i in range(len(self.camera_list)):
self.camera_list[i].drawcross(str_x,str_y,x[i],y[i],color1,size1)
self.camera_list[i]._plot.request_redraw()
def clear_plots(self,remove_background=True):
# this function deletes all plotes except basic image plot
if not remove_background:
index='plot0'
else:
index=None
for i in range(len(self.camera_list)):
plot_list=self.camera_list[i]._plot.plots.keys()
#if not remove_background:
# index=None
try:
plot_list.remove(index)
except:
pass
self.camera_list[i]._plot.delplot(*plot_list[0:])
self.camera_list[i]._plot.tools=[]
self.camera_list[i]._plot.request_redraw()
for j in range(len(self.camera_list[i]._quiverplots)):
self.camera_list[i]._plot.remove(self.camera_list[i]._quiverplots[j])
self.camera_list[i]._quiverplots=[]
self.camera_list[i].right_p_x0=[]
self.camera_list[i].right_p_y0=[]
self.camera_list[i].right_p_x1=[]
self.camera_list[i].right_p_y1=[]
def _update_thread_plot_changed(self):
n_camera=len(self.camera_list)
if self.update_thread_plot and self.tr_thread:
print "updating plots..\n"
step=self.tr_thread.track_step
x0,x1,x2,y0,y1,y2,pnr1,pnr2,pnr3,m_tr=\
self.tr_thread.intx0,self.tr_thread.intx1,self.tr_thread.intx2,\
self.tr_thread.inty0,self.tr_thread.inty1,self.tr_thread.inty2,self.tr_thread.pnr1,\
self.tr_thread.pnr2,self.tr_thread.pnr3,self.tr_thread.m_tr
for i in range (n_camera):
self.camera_list[i].drawcross(str(step)+"x0",str(step)+"y0",x0[i],y0[i],"green",2)
self.camera_list[i].drawcross(str(step)+"x1",str(step)+"y1",x1[i],y1[i],"yellow",2)
self.camera_list[i].drawcross(str(step)+"x2",str(step)+"y2",x2[i],y2[i],"white",2)
self.camera_list[i].drawquiver(x0[i],y0[i],x1[i],y1[i],"orange")
self.camera_list[i].drawquiver(x1[i],y1[i],x2[i],y2[i],"white")
## for j in range (m_tr):
## str_plt=str(step)+"_"+str(j)
##
## self.camera_list[i].drawline\
## (str_plt+"vec_x0",str_plt+"vec_y0",x0[i][j],y0[i][j],x1[i][j],y1[i][j],"orange")
## self.camera_list[i].drawline\
## (str_plt+"vec_x1",str_plt+"vec_y1",x1[i][j],y1[i][j],x2[i][j],y2[i][j],"white")
self.load_set_seq_image(step,update_all=False,display_only=True)
self.camera_list[self.current_camera]._plot.request_redraw()
self.tr_thread.can_continue=True
self.update_thread_plot=False
def load_set_seq_image(self,seq, update_all=True,display_only=False):
n_camera=len(self.camera_list)
if not hasattr(self,'base_name'):
self.base_name=[]
for i in range (n_camera):
exec("self.base_name.append(self.exp1.active_params.m_params.Basename_%d_Seq)" %(i+1))
print self.base_name[i]
i=seq
seq_ch = "%04d" % i
if not update_all:
j=self.current_camera
img_name=self.base_name[j]+seq_ch
self.load_disp_image(img_name,j,display_only)
else:
for j in range (n_camera):
img_name=self.base_name[j]+seq_ch
self.load_disp_image(img_name,j,display_only)
def load_disp_image(self, img_name,j,display_only=False):
print ("Setting image: "+str(img_name))
temp_img=np.array([],dtype=np.ubyte)
try:
temp_img=imread(img_name).astype(np.ubyte)
if not display_only:
ptv.py_set_img(temp_img,j)
if len(temp_img)>0:
self.camera_list[j].update_image(temp_img,is_float=1)
except:
print "Error reading file"
# -------------------------------------------------------------
if __name__ == '__main__':
try:
main_gui = MainGUI()
#gui1.exp1.populate_runs(exp_path)
main_gui.configure_traits()
except:
print("something wrong with the software or folder")
general.printException()
os.chdir(cwd) #get back to the original workdir
```
#### File: pyptv_gui/test/test_processing.py
```python
import unittest
import os, shutil, glob, re
from scipy.misc import imread
from ptv1 import py_start_proc_c, py_init_proc_c
from ptv1 import py_sequence_init, py_sequence_loop, py_set_img
from ptv1 import py_trackcorr_init, py_trackcorr_loop, py_trackcorr_finish
from ptv1 import py_trackback_c
def compare_directories(dir1, dir2, mask=None):
"""
For all files in dir1, check that the file exists in dir2 and that the
files are equal line by line.
Arguments:
dir1, dir2 - path to the directories for comparison.
mask - a compiled regular wxpression. Only file names matching it will be
compared, others will be skipped. If None (default), all files are
compared.
Returns:
True if equal, False otherwise.
"""
for fname in os.listdir(dir1):
if (mask is not None) and (mask.search(fname) is None):
continue
fout = open(dir1 + fname, "r")
fcmp = open(dir2 + fname, "r")
while True:
lout = fout.readline()
lcmp = fcmp.readline()
if lout != lcmp:
return False
if len(lout) == 0: break
fout.close()
fcmp.close()
return True
class TestProcessing(unittest.TestCase):
def setUp(self):
os.chdir("testing_fodder/")
if os.path.exists("res/"):
shutil.rmtree("res/")
if os.path.exists("scene83_event1/"):
shutil.rmtree("scene83_event1/")
shutil.copytree("clean_scene/", "scene83_event1/")
def tearDown(self):
shutil.rmtree("res/")
shutil.rmtree("scene83_event1/")
os.chdir("../")
def test_sequencing(self):
"""Sequencing reproduces sample results"""
os.mkdir("res/")
py_init_proc_c()
py_start_proc_c()
py_sequence_init(0)
for frame in xrange(497, 598):
for cam in xrange(4):
img = imread(
"scene83_event1/cam%d_Scene83_%04d" % (cam + 1, frame))
py_set_img(img, cam)
py_sequence_loop(0, frame)
self.failUnless(compare_directories("res/", "after_sequencing/"))
self.failUnless(compare_directories(
"scene83_event1/", "after_sequencing_targets/",
mask=re.compile("_targets$")))
def test_tracking_run_object(self):
"""The TrackingRun object has access to needed attributes"""
shutil.copytree("after_sequencing/", "res/")
for fname in glob.iglob("after_sequencing_targets/*"):
shutil.copy(fname, "scene83_event1/")
run_info = py_trackcorr_init()
first, last = run_info.get_sequence_range()
self.failUnlessEqual(first, 497)
self.failUnlessEqual(last, 597)
def test_tracking(self):
"""Tracking reproduces sample results"""
shutil.copytree("after_sequencing/", "res/")
for fname in glob.iglob("after_sequencing_targets/*"):
shutil.copy(fname, "scene83_event1/")
py_init_proc_c()
py_start_proc_c()
run_info = py_trackcorr_init()
for frame in range(497, 597):
py_trackcorr_loop(run_info, frame, display=0)
py_trackcorr_finish(run_info, 597)
self.failUnless(compare_directories("res/", "after_tracking/"))
self.failUnless(compare_directories(
"scene83_event1/", "after_tracking_targets/",
mask=re.compile("_targets$")))
def test_backtracking(self):
"""Tracking back reproduces sample results"""
shutil.copytree("after_tracking/", "res/")
for fname in glob.iglob("after_tracking_targets/*"):
shutil.copy(fname, "scene83_event1/")
py_init_proc_c()
py_start_proc_c()
py_trackback_c()
self.failUnless(compare_directories("res/", "after_backtracking/"))
self.failUnless(compare_directories(
"scene83_event1/", "after_backtracking_targets/",
mask=re.compile("_targets$")))
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "Jimmy5467/track-down",
"score": 3
} |
#### File: track-down/ML/compare_faces.py
```python
import subprocess
import sys
import face_recognition
import cv2
import os
import numpy as np
def read_img(path):
img = cv2.imread(path)
(h, w) = img.shape[:2]
width = 500
ratio = width / float(w)
height = int(h * ratio)
return cv2.resize(img, (width, height))
Known_encodings = []
known_names = []
known_dir = '..//images//db//Known'
for file in os.listdir(known_dir):
if file.endswith(('.png', '.jpg', '.jpeg', 'tiff')):
img = read_img(known_dir + '/' + file)
img_enc = face_recognition.face_encodings(img)[0]
Known_encodings.append(img_enc)
known_names.append(file.split('.')[0])
flag = False
Unknown_dir = '..//images//db//Known//Unknown'
for file in os.listdir(Unknown_dir):
img = read_img(Unknown_dir + '/' + file)
img_enc = face_recognition.face_encodings(img)[0]
results = face_recognition.compare_faces(Known_encodings, img_enc, tolerance=0.47)
for i in range(len(results)):
if results[i]:
print(known_names[i])
flag = True
if not flag:
print('No found in data.')
```
#### File: pf_django/website/models.py
```python
from django.db import models
from django.utils import timezone
import os, glob
from uuid import uuid4
count = 0
def path_and_rename(instance, filename):
name1 = str(len(glob.glob('../images/db/Known/*')))
print(name1)
return os.path.join(name1 + ".jpg")
def rename(instance, filename):
os.remove('../images/db/Known/Unknown/0.jpg')
name1 = str(0)
return os.path.join('./Unknown/' + name1 + ".jpg")
class Uploader(models.Model):
fname_u = models.CharField(max_length=200)
lname_u = models.CharField(max_length=200)
email_u = models.EmailField()
mobile_u = models.IntegerField()
image_l = models.ImageField(upload_to=path_and_rename, null=True, blank=True)
# image of uploader
def __str__(self):
return self.fname_u + ' ' + self.lname_u
class Find(models.Model):
image = models.ImageField(upload_to=rename, null=True, blank=True)
# class Finder(models.Model):
# input image for finder
``` |
{
"source": "jimmy60504/seisnn",
"score": 2
} |
#### File: seisnn/scripts/compare_dataset.py
```python
import seisnn
from datetime import datetime
from obspy import UTCDateTime
import numpy as np
def get_from_time_to_time(pick, delta=0.1):
from_time = UTCDateTime(pick.time) - delta
from_time = datetime.strptime(str(from_time), '%Y-%m-%dT%H:%M:%S.%fZ')
to_time = UTCDateTime(pick.time) + delta
to_time = datetime.strptime(str(to_time), '%Y-%m-%dT%H:%M:%S.%fZ')
return from_time, to_time
def compare(station, database, delta=0.2):
tp = [0, 0]
db = seisnn.sql.Client(database=database)
P_picks = db.get_picks(tag='manual', phase='P', station=station.station)
S_picks = db.get_picks(tag='manual', phase='S', station=station.station)
for i, picks in enumerate([P_picks, S_picks]):
for pick in picks:
from_time, to_time = get_from_time_to_time(pick, delta)
correspond_pick = db.get_picks(from_time=from_time, to_time=to_time, tag='predict', phase=pick.phase,
station=pick.station)
if correspond_pick:
tp[i] = tp[i] + 1
print(station)
print(f'total in station: {len(picks)} true positive {tp[i]}')
return tp
def main():
stations = seisnn.sql.Client(database="Hualien.db").get_inventories()
tp = seisnn.utils.parallel(stations,
func=compare,
database="Hualien.db",
delta=0.1,
batch_size=1)
tp = np.array(tp).reshape(-1,2)
tp = tp.sum(axis = 0)
print(tp)
if __name__ == '__main__':
main()
```
#### File: seisnn/seisnn/core.py
```python
import os
import numpy as np
import scipy.signal
import obspy
import seisnn.example_proto
import seisnn.io
import seisnn.plot
import seisnn.sql
class Metadata:
"""
Main class for metadata.
"""
id = None
station = None
starttime = None
endtime = None
npts = None
delta = None
def __init__(self, input_data=None):
if isinstance(input_data, obspy.Trace):
self.from_trace(input_data)
elif isinstance(input_data, seisnn.example_proto.Feature):
self.from_feature(input_data)
def from_trace(self, trace):
self.id = trace.id
self.station = trace.stats.station
self.starttime = trace.stats.starttime
self.endtime = trace.stats.endtime
self.npts = trace.stats.npts
self.delta = trace.stats.delta
return self
def from_feature(self, feature):
self.id = feature.id
self.station = feature.station
self.starttime = obspy.UTCDateTime(feature.starttime)
self.endtime = obspy.UTCDateTime(feature.endtime)
self.npts = feature.npts
self.delta = feature.delta
return self
class Trace:
"""
Main class for trace data.
"""
metadata = None
channel = None
data = None
def __init__(self, input_data):
if isinstance(input_data, obspy.Stream):
self.from_stream(input_data)
elif isinstance(input_data, seisnn.example_proto.Feature):
self.from_feature(input_data)
def from_stream(self, stream):
"""
Gets waveform from Obspy stream.
:param stream: Obspy stream object.
:return: Waveform object.
"""
channel = []
data = np.zeros([3008, 3])
for i, comp in enumerate(['Z', 'N', 'E']):
try:
st = stream.select(component=comp)
data[:, i] = st.traces[0].data
channel.append(st.traces[0].stats.channel)
except IndexError:
pass
except Exception as error:
print(f'{type(error).__name__}: {error}')
self.data = data
self.channel = channel
self.metadata = Metadata(stream.traces[0])
return self
def from_feature(self, feature):
self.metadata = Metadata(feature)
self.data = feature.trace
self.channel = feature.channel
return self
def get_snr(self, pick, second=1):
vector = np.linalg.norm(self.data, axis=2)[0]
point = int((pick.time - self.metadata.starttime) * 100)
if point >= second * 100:
signal = vector[point:point + second * 100]
noise = vector[point - len(signal):point]
else:
noise = vector[0:point]
signal = vector[point:point + len(noise)]
snr = seisnn.qc.signal_to_noise_ratio(signal=signal, noise=noise)
pick.snr = np.around(snr, 4)
class Label:
"""
Main class for label data.
"""
picks = None
def __init__(self, metadata, phase, tag=None):
self.metadata = metadata
self.phase = phase
self.tag = tag
self.data = np.zeros([metadata.npts, len(phase)])
def generate_label(self, database, tag, shape, half_width=20):
"""
Add generated label to stream.
:param str database: SQL database.
:param str tag: Pick tag in SQL database.
:param str shape: Label shape, see scipy.signal.windows.get_window().
:param int half_width: Label half width in data point.
:rtype: np.array
:return: Label.
"""
db = seisnn.sql.Client(database)
ph_index = {}
for i, phase in enumerate(self.phase):
ph_index[phase] = i
picks = db.get_picks(from_time=self.metadata.starttime.datetime,
to_time=self.metadata.endtime.datetime,
station=self.metadata.station,
phase=phase, tag=tag)
for pick in picks:
pick_time = obspy.UTCDateTime(
pick.time) - self.metadata.starttime
pick_time_index = int(pick_time / self.metadata.delta)
self.data[pick_time_index, i] = 1
if 'EQ' in self.phase:
# Make EQ window start by P and end by S.
self.data[:, ph_index['EQ']] = \
self.data[:, ph_index['P']] - self.data[:, ph_index['S']]
self.data[:, ph_index['EQ']] = \
np.cumsum(self.data[:, ph_index['EQ']])
if np.any(self.data[:, ph_index['EQ']] < 0):
self.data[:, ph_index['EQ']] += 1
for i, phase in enumerate(self.phase):
if not phase == 'EQ':
wavelet = scipy.signal.windows.get_window(
shape, 2 * half_width)
self.data[:, i] = scipy.signal.convolve(
self.data[:, i], wavelet[1:], mode='same')
if 'N' in self.phase:
# Make Noise window by 1 - P - S
self.data[:, ph_index['N']] = 1
self.data[:, ph_index['N']] -= self.data[:, ph_index['P']]
self.data[:, ph_index['N']] -= self.data[:, ph_index['S']]
return self
def get_picks(self, height=0.5, distance=100):
"""
Extract pick from label and write into the database.
:param float height: Height threshold, from 0 to 1, default is 0.5.
:param int distance: Distance threshold in data point.
"""
picks = []
for i, phase in enumerate(self.phase[0:2]):
peaks, _ = scipy.signal.find_peaks(
self.data[-1, :, i],
height=height,
distance=distance)
for peak in peaks:
if peak:
pick_time = obspy.UTCDateTime(self.metadata.starttime) \
+ peak * self.metadata.delta
picks.append(Pick(time=pick_time,
station=self.metadata.station,
phase=self.phase[i])
)
self.picks = picks
def write_picks_to_database(self, tag, database):
"""
Write picks into the database.
:param str tag: Output pick tag name.
:param database: SQL database name.
"""
db = seisnn.sql.Client(database)
for pick in self.picks:
db.add_pick(time=pick.time.datetime,
station=pick.station,
phase=pick.phase,
tag=tag,
snr=pick.snr)
class Pick:
"""
Main class for phase pick.
"""
def __init__(self,
time=None,
station=None,
phase=None,
tag=None,
snr=None):
self.time = time
self.station = station
self.phase = phase
self.tag = tag
self.snr = snr
class Instance:
"""
Main class for data transfer.
"""
metadata = None
trace = None
label = None
predict = None
def __init__(self, input_data=None):
if input_data is None:
pass
try:
if isinstance(input_data, obspy.Stream):
self.from_stream(input_data)
elif isinstance(input_data, seisnn.sql.Waveform):
dataset = seisnn.io.read_dataset(input_data.tfrecord)
for item in dataset.skip(input_data.data_index).take(1):
input_data = item
self.from_example(input_data)
else:
self.from_example(input_data)
except TypeError:
pass
except Exception as error:
print(f'{type(error).__name__}: {error}')
def __repr__(self):
return f"Instance(" \
f"ID={self.metadata.id}, " \
f"Start Time={self.metadata.starttime}, " \
f"Phase={self.label.phase})"
def from_stream(self, stream):
"""
Initialized from stream.
:param stream:
:return:
"""
self.trace = Trace(stream)
self.metadata = self.trace.metadata
return self
def from_feature(self, feature):
"""
Initialized from feature dict.
:param Feature feature: Feature dict.
"""
self.trace = Trace(feature)
self.metadata = self.trace.metadata
self.label = Label(self.metadata, feature.phase, tag='label')
self.label.data = feature.label
self.predict = Label(self.metadata, feature.phase, tag='predict')
self.predict.data = feature.predict
return self
def to_feature(self):
"""
Returns Feature object.
:rtype: Feature
:return: Feature object.
"""
feature = seisnn.example_proto.Feature()
feature.id = self.metadata.id
feature.station = self.metadata.station
feature.starttime = self.metadata.starttime.isoformat()
feature.endtime = self.metadata.endtime.isoformat()
feature.npts = self.metadata.npts
feature.delta = self.metadata.delta
feature.trace = self.trace.data
feature.channel = self.trace.channel
feature.phase = self.label.phase
feature.label = self.label.data
feature.predict = self.predict.data
return feature
def from_example(self, example):
"""
Initialized from example protocol.
:param example: Example protocol.
"""
feature = seisnn.example_proto.eval_eager_tensor(example)
self.from_feature(feature)
return self
def to_example(self):
"""
Returns example protocol.
:return: Example protocol.
"""
feature = self.to_feature()
example = seisnn.example_proto.feature_to_example(feature)
return example
def to_tfrecord(self, file_path):
"""
Write TFRecord to file path.
:param str file_path: Output path.
"""
feature = self.to_feature()
example = seisnn.example_proto.feature_to_example(feature)
seisnn.io.write_tfrecord([example], file_path)
def plot(self, **kwargs):
"""
Plot dataset.
:param kwargs: Keywords pass into plot.
"""
seisnn.plot.plot_dataset(self, **kwargs)
def get_tfrecord_name(self):
year = str(self.metadata.starttime.year)
julday = str(self.metadata.starttime.julday)
return f'{self.metadata.id[:-1]}.{year}.{julday}.tfrecord'
def get_tfrecord_dir(self, sub_dir):
"""
:param sub_dir: Sub TFRecord directory: 'train', 'test', 'eval'
:return: TFRecord directory
"""
config = seisnn.utils.Config()
name = self.get_tfrecord_name()
net, sta, loc, chan, year, julday, suffix = name.split('.')
sub_dir = getattr(config, sub_dir)
tfr_dir = os.path.join(sub_dir, year, net, sta)
return tfr_dir
if __name__ == "__main__":
pass
``` |
{
"source": "jimmy605/practices-of-the-python-pro",
"score": 4
} |
#### File: My_Code/ch02/playground.py
```python
import time
import datetime
now = time.time()
midnight = datetime.time()
def join_names(names):
name_string = ''
for i, name in enumerate(names):
if i > 0:
name_string += ', '
if i == len(names) - 1:
name_string += 'and '
name_string += name
return name_string
def introduce(title, names):
print(f'{title}: {join_names(names)}')
introduce('The Three Stooges', ['Moe', 'Larry', 'Shemp'])
introduce('The Three Stooges', ['Larry', 'Curly', 'Moe'])
introduce('Teenage Mutant Ninja Turtles',['Donatello', 'Raphael','Michelangelo', 'Leonardo'])
```
#### File: My_Code/ch02/sales_tax.py
```python
def add_sales_tax(total, tax_rate):
return total * tax_rate
```
#### File: My_Code/ch04/playground.py
```python
def has_duplicates(seq):
for index1, item1 in enumerate(seq):
for index2, item2, in enumerate(seq):
if item1 == item2 and index1 != index2:
return True
return False
color_counts = {}
with open('all-favourite-colors.txt') as favourite_colors_file:
for color in favourite_colors_file:
color = color.strip()
if color in color_counts:
color_counts[color] += 1
else:
color_counts[color] = 1
all_colors = set()
with open('all-favourite-colors.txt') as favourite_colors_file:
for line in favourite_colors_file:
all_colors.add(line.strip())
def range(*args):
if len(args) == 1:
start = 0
stop = args[0]
else:
start = args[0]
stop = args[1]
current = start
while current < stop:
yield current
current += 1
def squares(lyst):
for num in lyst:
yield num ** 2
new_list = [1,2,3,4]
check = squares(new_list)
for num in check:
print(num)
``` |
{
"source": "jimmy646/violin",
"score": 2
} |
#### File: violin/model/ViolinBase.py
```python
import torch
from torch import nn
from .rnn import RNNEncoder
from .bidaf import BidafAttn
import pickle
class ViolinBase(nn.Module):
def __init__(self, opt):
super(ViolinBase, self).__init__()
hsize1 = opt.hsize1
hsize2 = opt.hsize2
embed_size = opt.embed_size
self.input_streams = opt.input_streams
self.lstm_raw = RNNEncoder(opt.embed_size, hsize1, bidirectional=True, dropout_p=0, n_layers=1, rnn_type='lstm')
self.lstm_mature_vid = RNNEncoder(hsize1 * 2 * 5, hsize2, bidirectional=True,
dropout_p=0, n_layers=1, rnn_type="lstm")
self.bert_fc = nn.Sequential(
nn.Dropout(0.5),
nn.Linear(opt.embed_size, hsize1*2),
nn.Tanh()
)
if 'vid' in self.input_streams:
self.video_fc = nn.Sequential(
nn.Dropout(0.5),
nn.Linear(opt.vid_feat_size, embed_size),
nn.Tanh()
)
self.vid_ctx_rnn = RNNEncoder(hsize1 * 2 * 3, hsize2, bidirectional=True, dropout_p=0, n_layers=1, rnn_type="lstm")
if 'sub' in self.input_streams:
self.sub_ctx_rnn = RNNEncoder(hsize1 * 2 * 3, hsize2, bidirectional=True, dropout_p=0, n_layers=1, rnn_type="lstm")
if len(self.input_streams) > 0:
self.bidaf = BidafAttn(hsize1 * 2, method="dot")
self.final_fc = nn.Sequential(
nn.Linear(hsize2*2, hsize2),
nn.ReLU(),
nn.Dropout(0.5),
nn.Linear(hsize2, 1),
nn.Sigmoid()
)
else:
self.final_fc = nn.Sequential(
nn.Linear(hsize1*2, hsize2),
nn.ReLU(),
nn.Dropout(0.5),
nn.Linear(hsize2, 1),
nn.Sigmoid()
)
def max_along_time(self, outputs, lengths):
max_outputs = [outputs[i, :int(lengths[i]), :].max(dim=0)[0] for i in range(len(lengths))]
ret = torch.stack(max_outputs, dim=0)
assert ret.size() == torch.Size([outputs.size()[0], outputs.size()[2]])
return ret
def forward(self, vid_input, sub_input, state_input):
final_vectors = []
state_hidden, state_lens = state_input
state_encoded = self.bert_fc(state_hidden)
#print(type(state_lens))
if 'vid' in self.input_streams:
vid_feat, vid_lens = vid_input
vid_projected = self.video_fc(vid_feat)
vid_encoded, _ = self.lstm_raw(vid_projected, vid_lens)
u_va, _ = self.bidaf(state_encoded, state_lens, vid_encoded, vid_lens)
concat_vid = torch.cat([state_encoded, u_va, state_encoded*u_va], dim=-1)
vec_vid = self.vid_ctx_rnn(concat_vid, state_lens)[1]
final_vectors.append(vec_vid)
if 'sub' in self.input_streams:
sub_hidden, sub_lens = sub_input
sub_encoded = self.bert_fc(sub_hidden)
u_sa, _ = self.bidaf(state_encoded, state_lens, sub_encoded, sub_lens)
concat_sub = torch.cat([state_encoded, u_sa, state_encoded*u_sa], dim=-1)
vec_sub = self.sub_ctx_rnn(concat_sub, state_lens)[1]
final_vectors.append(vec_sub)
if len(self.input_streams) == 0:
maxout_state = self.max_along_time(state_encoded, state_lens)
final_vectors.append(maxout_state)
if len(self.input_streams) < 2:
return self.final_fc(torch.cat(final_vectors, dim=1))
else:
concat_all = torch.cat([state_encoded, u_va, u_sa, state_encoded*u_va, state_encoded*u_sa], dim=-1)
vec_all = self.lstm_mature_vid(concat_all, state_lens)[1]
return self.final_fc(vec_all)
``` |
{
"source": "jimmy646/XML-CNN",
"score": 3
} |
#### File: XML-CNN/code/train.py
```python
import os
import argparse
import numpy as np
import timeit
import data_helpers
from cnn_model import CNN_model
def load_data(args):
X_trn, Y_trn, X_tst, Y_tst, vocabulary, vocabulary_inv = data_helpers.load_data(args.data_path, max_length=args.sequence_length, vocab_size=args.vocab_size)
X_trn = X_trn.astype(np.int32)
X_tst = X_tst.astype(np.int32)
Y_trn = Y_trn.astype(np.int32)
Y_tst = Y_tst.astype(np.int32)
return X_trn, Y_trn, X_tst, Y_tst, vocabulary, vocabulary_inv
def gen_model_file(args):
data_name = args.data_path.split('/')[-2]
fs_string = '-'.join([str(fs) for fs in args.filter_sizes])
file_name = 'data-%s_sl-%d_ed-%d_fs-%s_nf-%d_pu-%d_pt-%s_hd-%d_bs-%d_model-%s_pretrain-%s' % \
(data_name, args.sequence_length, args.embedding_dim,
fs_string, args.num_filters, args.pooling_units,
args.pooling_type, args.hidden_dims, args.batch_size,
args.model_variation, args.pretrain_type)
return file_name
def main(args):
print('-'*50)
print('Loading data...'); start_time = timeit.default_timer();
X_trn, Y_trn, X_tst, Y_tst, vocabulary, vocabulary_inv = load_data(args)
print('Process time %.3f (secs)\n' % (timeit.default_timer() - start_time))
# Building model
# ==================================================
print('-'*50)
print("Building model..."); start_time = timeit.default_timer();
model = CNN_model(args)
model.model_file = os.path.join('./CNN_runtime_models', gen_model_file(args))
if not os.path.isdir(model.model_file):
os.makedirs(model.model_file)
else:
print('Warning: model file already exist!\n %s' % (model.model_file))
model.add_data(X_trn, Y_trn)
model.add_pretrain(vocabulary, vocabulary_inv)
model.build_train()
print('Process time %.3f (secs)\n' % (timeit.default_timer() - start_time))
# Training model
# ==================================================
print('-'*50)
print("Training model..."); start_time = timeit.default_timer();
store_params_time = 0.0;
for epoch_idx in xrange(args.num_epochs + 1):
loss = model.train()
print 'Iter:', epoch_idx, 'Trn loss ', loss
if epoch_idx % 5 == 0:
print 'saving model...'; tmp_time = timeit.default_timer();
model.store_params(epoch_idx)
store_params_time += timeit.default_timer() - tmp_time
total_time = timeit.default_timer() - start_time
print('Total time %.4f (secs), training time %.4f (secs), IO time %.4f (secs)' \
% (total_time, total_time - store_params_time, store_params_time))
if __name__ == '__main__':
# Parameters
# ==================================================
# Model Variations. See Kim Yoon's Convolutional Neural Networks for
# Sentence Classification, Section 3 for detail.
parser = argparse.ArgumentParser()
parser.add_argument('--data_path',
help='raw data path in CPickle format', type=str,
default='../sample_data/rcv1_raw_small.p')
parser.add_argument('--sequence_length',
help='max sequence length of a document', type=int,
default=500)
parser.add_argument('--embedding_dim',
help='dimension of word embedding representation', type=int,
default=300)
parser.add_argument('--filter_sizes',
help='number of filter sizes (could be a list of integer)', type=int,
default=[2, 4, 8], nargs='+')
parser.add_argument('--num_filters',
help='number of filters (i.e. kernels) in CNN model', type=int,
default=32)
parser.add_argument('--pooling_units',
help='number of pooling units in 1D pooling layer', type=int,
default=32)
parser.add_argument('--pooling_type',
help='max or average', type=str,
default='max')
parser.add_argument('--hidden_dims',
help='number of hidden units', type=int,
default=512)
parser.add_argument('--model_variation',
help='model variation: CNN-rand or CNN-pretrain', type=str,
default='pretrain')
parser.add_argument('--pretrain_type',
help='pretrain model: GoogleNews or glove', type=str,
default='glove')
parser.add_argument('--batch_size',
help='number of batch size', type=int,
default=256)
parser.add_argument('--num_epochs',
help='number of epcohs for training', type=int,
default=50)
parser.add_argument('--vocab_size',
help='size of vocabulary keeping the most frequent words', type=int,
default=30000)
args = parser.parse_args()
main(args)
``` |
{
"source": "Jimmy880/Sound-of-Pixels-dev",
"score": 2
} |
#### File: Sound-of-Pixels-dev/dataset/__init__.py
```python
from .music import *
from .music import RAWDataset, STFTDataset
def warpgrid(bs, HO, WO, warp=True):
import numpy as np
# meshgrid
x = np.linspace(-1, 1, WO)
y = np.linspace(-1, 1, HO)
xv, yv = np.meshgrid(x, y)
grid = np.zeros((bs, HO, WO, 2))
grid_x = xv
if warp:
grid_y = (np.power(21, (yv+1)/2) - 11) / 10
else:
grid_y = np.log(yv * 10 + 11) / np.log(21) * 2 - 1
grid[:, :, :, 0] = grid_x
grid[:, :, :, 1] = grid_y
grid = grid.astype(np.float32)
return grid
def compute_mask(mags, mag_mix, is_binary_mask=False):
gt_masks = []
for n in range(len(mags)):
if is_binary_mask:
# for simplicity, mag_N > 0.5 * mag_mix
gt_masks.append((mags[n] > 0.5 * mag_mix).float())
else:
gt_mask = mags[n] / mag_mix
# clamp to avoid large numbers in ratio masks
gt_masks.append(gt_mask.clamp(0., 5.))
return gt_masks
def process_mag(mags=None, mag_mix=None, device='cpu'):
import torch
import torch.nn.functional as F
if mag_mix is not None:
B = mag_mix.size(0)
T = mag_mix.size(3)
elif mags is not None:
B = mags[0].size(0)
T = mags[0].size(3)
else: return
grid_warp = torch.from_numpy(
warpgrid(B, 256, T, warp=True)).to(device)
if mags is not None:
for n in range(len(mags)):
mags[n] = F.grid_sample(mags[n], grid_warp)
if mag_mix is not None:
mag_mix = F.grid_sample(mag_mix, grid_warp)
return mags, mag_mix
def compute_weight(mag_mix, weighted_loss=False):
import torch
# 0.1 calculate loss weighting coefficient: magnitude of input mixture
if weighted_loss:
weight = torch.log1p(mag_mix)
weight = torch.clamp(weight, 1e-3, 10)
else:
weight = torch.ones_like(mag_mix)
return weight
```
#### File: Jimmy880/Sound-of-Pixels-dev/main.py
```python
import os
# Numerical libs
import torch
# Our libs
from arguments import ArgParser
from dataset import STFTDataset, RAWDataset
from models import ModelBuilder, activate
from utils import makedirs
# Network wrapper, defines forward pass
class NetWrapper(torch.nn.Module):
def __init__(self, nets, crit, ckpt=None):
super(NetWrapper, self).__init__()
self.net_sound, self.net_frame = nets
self.crit = crit
if ckpt is not None:
self.net_sound.load_state_dict(ckpt['sound'])
self.net_frame.load_state_dict(ckpt['frame'])
def forward(self, batch_data, args):
audio_mix = batch_data['audio_mix'] # B, audio_len
audios = batch_data['audios'] # num_mix, B, audio_len
frames = batch_data['frames'] # num_mix, B, xxx
N = args.num_mix
B = audio_mix.size(0)
# 2. forward net_frame -> Bx1xC
feat_frames = [None for n in range(N)]
for n in range(N):
feat_frames[n] = self.net_frame.forward_multiframe(frames[n])
feat_frames[n] = activate(feat_frames[n], args.img_activation)
# 3. sound synthesizer
pred_audios = [None for n in range(N)]
for n in range(N):
# pred_masks[n] = self.net_synthesizer(feat_frames[n], feat_sound)
# pred_masks[n] = activate(pred_masks[n], args.output_activation)
pred_audios[n] = self.net_sound(audio_mix, feat_frames[n])
activate(pred_audios[n], args.sound_activation)
# 4. loss
err = self.crit(pred_audios, audios).reshape(1)
# print("\"", self.crit([audio_mix, audio_mix], audios).item(), self.crit(audios, audios).item(), err.item(),"\"")
return err, pred_audios # or masks
def create_optimizer(nets, args, checkpoint):
(net_sound, net_frame) = nets
param_groups = [{'params': net_sound.parameters(), 'lr': args.lr_sound},
{'params': net_frame.features.parameters(), 'lr': args.lr_frame},
{'params': net_frame.fc.parameters(), 'lr': args.lr_sound}]
# optimizer = torch.optim.SGD(param_groups, momentum=args.beta1, weight_decay=args.weight_decay)
optimizer = torch.optim.Adam(param_groups, weight_decay=args.weight_decay)
if checkpoint is not None and args.resume_optim:
optimizer.load_state_dict(checkpoint['optimizer'])
return optimizer
def adjust_learning_rate(optimizer, args):
args.lr_sound *= 0.1
args.lr_frame *= 0.1
args.lr_synthesizer *= 0.1
for param_group in optimizer.param_groups:
param_group['lr'] *= 0.1
def main(args):
# Network Builders
builder = ModelBuilder()
net_sound = builder.build_sound(
arch=args.arch_sound,
fc_dim=args.num_channels,)
net_frame = builder.build_frame(
arch=args.arch_frame,
fc_dim=args.num_channels,
pool_type=args.img_pool,)
nets = (net_sound, net_frame)
crit = builder.build_criterion(arch=args.loss)
# Dataset and Loader
dataset_train = RAWDataset(
args.list_train, args, split='train')
dataset_val = STFTDataset(
args.list_val, args, max_sample=args.num_val, split='val')
loader_train = torch.utils.data.DataLoader(
dataset_train,
batch_size=args.batch_size,
shuffle=True,
num_workers=int(args.workers),
drop_last=True)
loader_val = torch.utils.data.DataLoader(
dataset_val,
batch_size=args.batch_size,
shuffle=False,
num_workers=2,
drop_last=False)
args.epoch_iters = len(loader_train)
args.disp_iter = len(loader_train) // args.disp_iter
print('1 Epoch = {} iters'.format(args.epoch_iters))
# Wrap networks
netWrapper = NetWrapper(nets, crit, checkpoint)
netWrapper = torch.nn.DataParallel(netWrapper, device_ids=range(args.num_gpus))
netWrapper.to(args.device)
# Set up optimizer
optimizer = create_optimizer(nets, args, checkpoint)
# History of peroformance
history = {
'train': {'epoch': [], 'err': []},
'val': {'epoch': [], 'err': [], 'sdr': [], 'sir': [], 'sar': []}
} if checkpoint is None else checkpoint['history']
from epoch import train, evaluate
# Eval mode
# evaluate(netWrapper, loader_val, history, 0, args)
# if args.mode == 'eval':
# print('Evaluation Done!')
# return
# Training loop
init_epoch = 1 if checkpoint is None else checkpoint['epoch']
print('Training start at ', init_epoch)
for epoch in range(1, args.num_epoch + 1):
train(netWrapper, loader_train, optimizer, history, epoch, args)
# Evaluation and visualization
if epoch % args.eval_epoch == 0:
evaluate(netWrapper, loader_val, history, epoch, args)
# checkpointing
from utils import save_checkpoint
save_checkpoint(nets, history, optimizer, epoch, args)
# drop learning rate
if epoch in args.lr_steps:
adjust_learning_rate(optimizer, args)
print('Training Done!')
if __name__ == '__main__':
# arguments
parser = ArgParser()
args = parser.parse_train_arguments()
args.batch_size = args.num_gpus * args.batch_size_per_gpu
args.device = torch.device("cuda" if torch.cuda.is_available() else 'cpu')
# experiment name
if args.mode == 'train':
args.id += '-{}mix'.format(args.num_mix)
if args.log_freq:
args.id += '-LogFreq'
args.id += '-{}-{}-{}'.format(
args.arch_frame, args.arch_sound, args.arch_synthesizer)
args.id += '-frames{}stride{}'.format(args.num_frames, args.stride_frames)
args.id += '-{}'.format(args.img_pool)
args.id += '-' + args.loss
if args.weighted_loss:
args.id += '-weightedLoss'
args.id += '-channels{}'.format(args.num_channels)
args.id += '-epoch{}'.format(args.num_epoch)
# args.id += '-step' + '_'.join([str(x) for x in args.lr_steps])
print('Model ID: {}'.format(args.id))
# paths to save/load output
args.ckpt = os.path.join(args.ckpt, args.id)
args.vis = os.path.join(args.ckpt, 'visualization/')
if args.mode == 'eval' or args.resume:
try:
checkpoint = torch.load(os.path.join(args.ckpt, 'best.pth'), map_location='cpu')
# checkpoint = os.path.join(args.ckpt, 'lastest.pth')
print('Loaded', args.ckpt)
except:
print('Load model failed')
checkpoint = None
elif args.mode == 'train':
makedirs(args.ckpt, remove=True)
checkpoint = None
else: raise ValueError
# initialize best error with a big number
args.best_err = float("inf")
from utils import set_seed
set_seed(args.seed)
main(args, )
``` |
{
"source": "jimmy-academia/Deeper-Learnings",
"score": 2
} |
#### File: hw3/src/run.py
```python
from thrift.transport import TSocket,TTransport
from thrift.protocol import TBinaryProtocol
from hbase import Hbase
from hbase.ttypes import ColumnDescriptor
from hbase.ttypes import Mutation
import csv
import os
import time
import logging
from tqdm import tqdm
# table: station, column: attr, row: date
def main():
socket = TSocket.TSocket('1192.168.3.11',9090)
socket.setTimeout(5000)
transport = TTransport.TBufferedTransport(socket)
protocol = TBinaryProtocol.TBinaryProtocol(transport)
client = Hbase.Client(protocol)
socket.open()
table_list = client.getTableNames()
start = time.time()
logging.basicConfig(format='%(asctime)s | %(levelname)s | %(message)s',
level=logging.INFO, datefmt='%Y-%m-%d %H:%M:%S')
logging.info('Initiating task: Taiwan Air Quality!')
Attributes = ['AMB_TEMP','CO','NO','NO2','NOx','O3','PM10','PM2.5','RAINFALL','RAIN_COND','UVB',
'RH','SO2','WD_HR','WIND_DIREC','WIND_SPEED','WS_HR','CH4','NMHC','THC','PH_RAIN']
csvfiles = [filename for filename in os.listdir(os.getcwd()) if filename.endswith('.csv')]
logging.info(str(csvfiles))
InsertCounts = 0
for file in csvfiles:
with open(file, newline='') as f:
frames = csv.reader(f)
table_Name = ''
logging.info("Start reading {0}".format(file))
Column_Descriptors = []
ctr = 0
# length = sum(1 for row in frames)
#
# for frame in tqdm(frames, total=length):
for frame in tqdm(frames):
if ctr == 0:
ctr += 1
continue
elif ctr == 1:
ctr += 1
table_Name = str(str.encode(frame[1],'utf-8')).replace('\\',"")
table_Name = table_Name.replace("b","")
table_Name = table_Name.replace("'","")
if table_Name not in table_list:
for type in Attributes:
Column_Descriptors.append(ColumnDescriptor(name=type))
client.createTable(table_Name,Column_Descriptors)
logging.info('Build Table : {0}'.format(table_Name))
else:
logging.info('Table {0} already exist, no need to create'.format(table_Name))
# ['2018/01/02', 'iilan', 'NOx', '5.1', '4.4', '3.5', '2.1', '2.5', '3.2', '4.6', '15',
# '13', '11', '7', '6.8', '7.1', '13', '13', '12', '13', '16', '24', '23', '20', '24', '18', '13']
for i in range(3,26):
qualifier = i-2
value = frame[i]
row = frame[0] # date
column = frame[2] # attr
mutate = Mutation(column=column+':'+str(qualifier),value=value)
client.mutateRow(table_Name, frame[0], [mutate])
InsertCounts += 1
end = time.time()
logging.info("================Insert Done================\n")
logging.info("totalInsertCount: {0}, totalTimeSpend: {1}\n".format(InsertCounts,end-start))
logging.info(client.getTableNames())
if __name__ == '__main__':
main()
```
#### File: NaturalLanguage/datapreprocess/embedding.py
```python
import re
import torch
class Embedding:
"""
Args:
embedding_path (str): Path where embedding are loaded from (text file).
words (None or list): If not None, only load embedding of the words in
the list.
oov_as_unk (bool): If argument `words` are provided, whether or not
treat words in `words` but not in embedding file as `<unk>`. If
true, OOV will be mapped to the index of `<unk>`. Otherwise,
embedding of those OOV will be randomly initialize and their
indices will be after non-OOV.
lower (bool): Whether or not lower the words.
rand_seed (int): Random seed for embedding initialization.
"""
def __init__(self, embedding_path,
words=None, oov_as_unk=True, lower=True, rand_seed=524):
self.word_dict = {}
self.vectors = None
self.lower = lower
self.extend(embedding_path, words, oov_as_unk)
torch.manual_seed(rand_seed)
if '</s>' not in self.word_dict:
self.add(
'</s>', torch.zeros(self.get_dim())
)
if '<unk>' not in self.word_dict:
self.add('<unk>')
def to_index(self, word):
"""
word (str)
Return:
index of the word. If the word is not in `words` and not in the
embedding file, then index of `<unk>` will be returned.
"""
if self.lower:
word = word.lower()
if word not in self.word_dict:
return self.word_dict['<unk>']
else:
return self.word_dict[word]
def get_dim(self):
return self.vectors.shape[1]
def get_vocabulary_size(self):
return self.vectors.shape[0]
def add(self, word, vector=None):
if self.lower:
word = word.lower()
if vector is not None:
vector = vector.view(1, -1)
else:
vector = torch.empty(1, self.get_dim())
torch.nn.init.uniform_(vector)
self.vectors = torch.cat([self.vectors, vector], 0)
self.word_dict[word] = len(self.word_dict)
def extend(self, embedding_path, words, oov_as_unk=True):
self._load_embedding(embedding_path, words)
if words is not None and not oov_as_unk:
# initialize word vector for OOV
for word in words:
if self.lower:
word = word.lower()
if word not in self.word_dict:
self.word_dict[word] = len(self.word_dict)
oov_vectors = torch.nn.init.uniform_(
torch.empty(len(self.word_dict) - self.vectors.shape[0],
self.vectors.shape[1]))
self.vectors = torch.cat([self.vectors, oov_vectors], 0)
def _load_embedding(self, embedding_path, words):
if words is not None:
words = set(words)
vectors = []
with open(embedding_path) as fp:
row1 = fp.readline()
# if the first row is not header
if not re.match('^[0-9]+ [0-9]+$', row1):
# seek to 0
fp.seek(0)
# otherwise ignore the header
for i, line in enumerate(fp):
cols = line.rstrip().split(' ')
word = cols[0]
# skip word not in words if words are provided
if words is not None and word not in words:
continue
elif word not in self.word_dict:
self.word_dict[word] = len(self.word_dict)
vectors.append([float(v) for v in cols[1:]])
vectors = torch.tensor(vectors)
if self.vectors is not None:
self.vectors = torch.cat([self.vectors, vectors], dim=0)
else:
self.vectors = vectors
```
#### File: NaturalLanguage/datapreprocess/preprocessor.py
```python
import nltk
import json
import logging
from multiprocessing import Pool
from dataset import DialogDataset
from tqdm import tqdm
class Preprocessor:
"""
Args:
embedding_path (str): Path to the embedding to use.
"""
def __init__(self, embedding):
self.embedding = embedding
self.logging = logging.getLogger(name=__name__)
def tokenize(self, sentence):
""" Tokenize a sentence.
Args:
sentence (str): One string.
Return:
indices (list of str): List of tokens in a sentence.
"""
# TODO \(\) or other tokenize consideration
return nltk.word_tokenize(sentence)
# return sentence.split()
def sentence_to_indices(self, sentence):
""" Convert sentence to its word indices.
Args:
sentence (str): One string.
Return:
indices (list of int): List of word indices.
"""
# TODO
# Hint: You can use `self.embedding`
# sentence = sentence.split()
sentence = nltk.word_tokenize(sentence)
return [self.embedding.to_index(i) for i in sentence]
def collect_words(self, data_path, n_workers=1):
with open(data_path) as f:
data = json.load(f)
utterances = []
for sample in data:
utterances += (
[message['utterance']
for message in sample['messages-so-far']]
+ [option['utterance']
for option in sample['options-for-next']]
)
utterances = list(set(utterances))
chunks = [
' '.join(utterances[i:i + len(utterances) // n_workers])
for i in range(0, len(utterances), len(utterances) // n_workers)
]
with Pool(n_workers) as pool:
chunks = pool.map_async(self.tokenize, chunks)
words = set(sum(chunks.get(), []))
return words
def get_dataset(self, data_path, n_workers=4, dataset_args={}):
""" Load data and return Dataset objects for training and validating.
Args:
data_path (str): Path to the data.
valid_ratio (float): Ratio of the data to used as valid data.
"""
self.logging.info('loading dataset...')
with open(data_path) as f:
dataset = json.load(f)
self.logging.info('preprocessing data...')
results = [None] * n_workers
with Pool(processes=n_workers) as pool:
for i in range(n_workers):
batch_start = (len(dataset) // n_workers) * i
if i == n_workers - 1:
batch_end = len(dataset)
else:
batch_end = (len(dataset) // n_workers) * (i + 1)
batch = dataset[batch_start: batch_end]
results[i] = pool.apply_async(self.preprocess_samples, [batch])
# When debugging, you'd better not use multi-thread.
# results[i] = self.preprocess_dataset(batch, preprocess_args)
pool.close()
pool.join()
processed = []
for result in results:
processed += result.get()
padding = self.embedding.to_index('</s>')
return DialogDataset(processed, padding=padding, **dataset_args)
def preprocess_samples(self, dataset):
""" Worker function.
Args:
dataset (list of dict)
Returns:
list of processed dict.
"""
processed = []
for sample in tqdm(dataset):
processed.append(self.preprocess_sample(sample))
return processed
def preprocess_sample(self, data):
"""
Args:
data (dict)
Returns:
dict
"""
processed = {}
processed['id'] = data['example-id']
# process messages-so-far
processed['context'] = []
processed['speaker'] = []
for message in data['messages-so-far']:
sentence = self.sentence_to_indices(message['utterance'].lower())
processed['context'].append(sentence)
processed['speaker'].append(int(message['speaker'][-1]))
# process options
processed['options'] = []
processed['option_ids'] = []
# process correct options
if 'options-for-correct-answers' in data:
processed['n_corrects'] = len(data['options-for-correct-answers'])
for option in data['options-for-correct-answers']:
processed['options'].append(
self.sentence_to_indices(option['utterance'].lower())
)
processed['option_ids'].append(option['candidate-id'])
else:
processed['n_corrects'] = 0
# process the other options
for option in data['options-for-next']:
if option['candidate-id'] in processed['option_ids']:
continue
processed['options'].append(
self.sentence_to_indices(option['utterance'].lower())
)
processed['option_ids'].append(option['candidate-id'])
return processed
```
#### File: NaturalLanguage/module/bestmodel.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
class BestNet(torch.nn.Module):
def __init__(self, embedding_dim):
super(BestNet, self).__init__()
self.embedding_dim = embedding_dim
self.hidden_dim = 256
self.embedding_dropout=0.6
self.desc_rnn_size = 100
self.rnn = nn.GRU(
input_size=self.embedding_dim, hidden_size=self.hidden_dim,
num_layers=1, batch_first=True, bidirectional=True
)
self.rnn_desc = nn.GRU(
input_size=self.embedding_dim, hidden_size=self.desc_rnn_size,
num_layers=1, batch_first=True, bidirectional=True
)
self.emb_drop = nn.Dropout(self.embedding_dropout)
self.M = nn.Parameter(torch.FloatTensor(2*self.hidden_dim, 2*self.hidden_dim))
self.b = nn.Parameter(torch.FloatTensor([0]))
self.Wc = nn.Parameter(torch.FloatTensor(2*self.hidden_dim, self.embedding_dim))
self.We = nn.Parameter(torch.FloatTensor(self.embedding_dim, self.embedding_dim))
self.attn = nn.Linear(2*self.hidden_dim, 2*self.hidden_dim)
self.init_params_()
self.tech_w = 0.0
def init_params_(self):
#Initializing parameters
nn.init.xavier_normal_(self.M)
# Set forget gate bias to 2
size = self.rnn.bias_hh_l0.size(0)
self.rnn.bias_hh_l0.data[size//4:size//2] = 2
size = self.rnn.bias_ih_l0.size(0)
self.rnn.bias_ih_l0.data[size//4:size//2] = 2
size = self.rnn_desc.bias_hh_l0.size(0)
self.rnn_desc.bias_hh_l0.data[size//4:size//2] = 2
size = self.rnn_desc.bias_ih_l0.size(0)
self.rnn_desc.bias_ih_l0.data[size//4:size//2] = 2
# def forward(self, context, options):
# logits = []
# for i, option in enumerate(options.transpose(1, 0)):
# gits = []
# for context in context.transpose(1,0):
# git = self.forward_one_option(context, option)
# gits.append(logit)
# logit = torch.stack(gits).mean(0)
# logits = torch.stack(logits, 1)
# return logits.squeeze()
# def forward(self, context, options):
# logits = []
# for i, option in enumerate(options.transpose(1, 0)):
# logit = self.forward_one_option(context, option)
# logits.append(logit)
# logits = torch.stack(logits, 1)
# return logits.squeeze()
def forward(self, context, options):
logits = []
for i, option in enumerate(options.transpose(1, 0)):
logit_ = []
for utter in context.transpose(1,0):
logit = self.forward_one_option(utter, option) # 10,1,1
logit_.append(logit)
logits.append(torch.stack(logit_,1).mean(1))
logits = torch.stack(logits, 1)
return logits.squeeze()
def forward_one_option(self, context, option):
context, c_h, option, o_h = self.forward_crosspath(context, option)
context_attn = self.forward_attn(context, o_h)
option_attn = self.forward_attn(option, c_h)
final = self.forward_fc(context_attn, option_attn)
return final
def forward_crosspath(self, context, option):
context, c_h = self.rnn(self.emb_drop(context))
c_h = torch.cat([i for i in c_h], dim=-1)
option, o_h = self.rnn(self.emb_drop(option))
o_h = torch.cat([i for i in o_h], dim=-1)
return context, c_h.squeeze(), option, o_h.squeeze()
def forward_attn(self, output, hidden):
max_len = output.size(1)
b_size = output.size(0)
hidden = hidden.squeeze(0).unsqueeze(2)
attn = self.attn(output.contiguous().view(b_size*max_len, -1))
attn = attn.view(b_size, max_len, -1)
attn_energies = (attn.bmm(hidden).transpose(1,2))
alpha = F.softmax(attn_energies.squeeze(1), dim=-1)
alpha = alpha.unsqueeze(1)
weighted_attn = alpha.bmm(output)
return weighted_attn.squeeze()
def forward_fc(self, context, option):
out = torch.mm(context, self.M).unsqueeze(1)
out = torch.bmm(out, option.unsqueeze(2))
out = out + self.b
return out
def save(self, filepath):
torch.save(self.state_dict(), filepath)
```
#### File: NaturalLanguage/module/metrics.py
```python
import torch
class Metrics:
def __init__(self):
self.name = 'Metric Name'
def reset(self):
pass
def update(self, predicts, batch):
pass
def get_score(self):
pass
class Recall(Metrics):
"""
Args:
ats (int): @ to eval.
rank_na (bool): whether to consider no answer.
"""
def __init__(self, at=10):
self.at = at
self.n = 0
self.n_correct = 0
self.name = 'Recall@{}'.format(at)
def reset(self):
self.n = 0
self.n_corrects = 0
def update(self, predicts, batch):
"""
Args:
predicts (FloatTensor): with size (batch, n_samples).
batch (dict): batch.
"""
predicts = predicts.cpu()
self.n += len(predicts)
predicts = torch.argmax(predicts, dim=1)
targets = torch.argmax(batch['labels'], dim=1)
self.n_corrects += (predicts==targets).sum().item()
# print(predicts)
# print(targets)
# print(self.n_corrects, self.n)
# TODO
# This method will be called for each batch.
# You need to
# - increase self.n, which implies the total number of samples.
# - increase self.n_corrects based on the prediction and labels
# of the batch.
def get_score(self):
return self.n_corrects / self.n
def print_score(self):
score = self.get_score()
return '{:.2f}'.format(score)
```
#### File: NaturalLanguage/module/model.py
```python
import torch
from torch.autograd import Variable
class LinearNet(torch.nn.Module):
def __init__(self, dim_embeddings):
super(LinearNet, self).__init__()
self.mlp = torch.nn.Sequential(
torch.nn.Linear(dim_embeddings, 256),
torch.nn.ReLU(),
torch.nn.Linear(256, 256)
)
def forward(self, context, options):
context = self.mlp(context).max(1)[0]
logits = []
for i, option in enumerate(options.transpose(1, 0)):
option = self.mlp(option).max(1)[0]
logit = ((context - option) ** 2).sum(-1)
logits.append(logit)
logits = torch.stack(logits, 1)
return logits
def save(self, filepath):
torch.save(self.state_dict(), filepath)
class GruNet(torch.nn.Module):
def __init__(self, dim_embeddings):
super(GruNet, self).__init__()
self.C_process = torch.nn.GRU(256, 256, 1, batch_first=True)
self.context_word = torch.nn.GRU(300, 256, 1, batch_first=True)
self.P_process = torch.nn.GRU(300, 256, 1, batch_first=True)
self.W = torch.nn.Parameter(data=torch.rand(256, 256), requires_grad=True)
def forward(self, context, options):
## context 10, 14, 30, 300
context_vec = []
for utterance in context.transpose(1,0): # 10, 30, 300
utter_vec, __ = self.context_word(utterance) # 10, 30, 256
utter_vec = utter_vec.max(1)[0] # 10, 256
context_vec.append(utter_vec)
context_vec = torch.stack(context_vec, 1) # 10, 14, 256
context, __ = self.C_process(context_vec) # 10, 14, 256
## context 10, 30, 300; options 10, 100, 50, 300
# context, __ = self.C_process(context) ## 10,30,256
context = context.max(1)[0] # 10,256
logits = []
for i, option in enumerate(options.transpose(1, 0)): # 100, 10, 50, 300
option, __ = self.P_process(option) #10,50,300 -> 10,50, 256
option = option.max(1)[0] #10, 256
logit = context.matmul(self.W).matmul(option.transpose(1,0)).sum(-1)
logits.append(logit)
logits = torch.stack(logits, 1)
return logits
def save(self, filepath):
torch.save(self.state_dict(), filepath)
class LastNet(torch.nn.Module):
def __init__(self, dim_embeddings):
super(LastNet, self).__init__()
self.C_process = torch.nn.LSTM(300, 256, 1, batch_first=True)
# self.context_word = torch.nn.LSTM(300, 256, 1, batch_first=True)
self.P_process = torch.nn.LSTM(300, 256, 1, batch_first=True)
self.W = torch.nn.Parameter(data=torch.rand(256, 256), requires_grad=True)
def forward(self, context, options):
## context 10, 30, 300; options 10, 100, 50, 300
context, __ = self.C_process(context) ## 10,30,256
context = context.max(1)[0] # 10,256
logits = []
for i, option in enumerate(options.transpose(1, 0)): # 100, 10, 50, 300
option, __ = self.P_process(option) #10,50,300 -> 10,50, 256
option = option.max(1)[0] #10, 256
logit = context.matmul(self.W).matmul(option.transpose(1,0)).sum(-1)
logits.append(logit)
logits = torch.stack(logits, 1)
return logits
def save(self, filepath):
torch.save(self.state_dict(), filepath)
class RnnNet(torch.nn.Module):
def __init__(self, dim_embeddings):
super(RnnNet, self).__init__()
self.C_process = torch.nn.LSTM(256, 256, 1, batch_first=True)
self.context_word = torch.nn.LSTM(300, 256, 1, batch_first=True)
self.P_process = torch.nn.LSTM(300, 256, 1, batch_first=True)
self.W = torch.nn.Parameter(data=torch.rand(256, 256), requires_grad=True)
def forward(self, context, options):
## context 10, 14, 30, 300
context_vec = []
for utterance in context.transpose(1,0): # 10, 30, 300
utter_vec, __ = self.context_word(utterance) # 10, 30, 256
utter_vec = utter_vec.max(1)[0] # 10, 256
context_vec.append(utter_vec)
context_vec = torch.stack(context_vec, 1) # 10, 14, 256
context, __ = self.C_process(context_vec) # 10, 14, 256
## context 10, 30, 300; options 10, 100, 50, 300
# context, __ = self.C_process(context) ## 10,30,256
context = context.max(1)[0] # 10,256
logits = []
for i, option in enumerate(options.transpose(1, 0)): # 100, 10, 50, 300
option, __ = self.P_process(option) #10,50,300 -> 10,50, 256
option = option.max(1)[0] #10, 256
logit = context.matmul(self.W).matmul(option.transpose(1,0)).sum(-1)
logits.append(logit)
logits = torch.stack(logits, 1)
return logits
def save(self, filepath):
torch.save(self.state_dict(), filepath)
class DecAttn(torch.nn.Module):
"""docstring for DecAttn"""
def __init__(self):
super(DecAttn, self).__init__()
self.lstm = torch.nn.LSTM(300, 256, 1, batch_first=True)
self.softmax = torch.nn.Softmax(dim=1)
self.attn = torch.nn.Linear(256, 256)
self.final = torch.nn.LSTM(256+256, 256, 1, batch_first=True)
def forward(self, context, option):
"""
context: 10,30, 256
option: 10, 50, 256
attn_weight: 10, 50, 30
attn_context_sum: 10, 50, 256
"""
option, __ = self.lstm(option)
attn_context = self.attn(context)
attn_weight = self.softmax(option.bmm(attn_context.transpose(1,2)))
attn_context_sum = attn_weight.bmm(context)
complete = torch.cat([option, attn_context_sum], 2)
final, __ = self.final(complete)
return final
# class RnnAttentionNet(torch.nn.Module):
# def __init__(self, dim_embeddings):
# super(RnnAttentionNet, self).__init__()
# self.C_process = torch.nn.LSTM(300, 256, 2, batch_first=True)
# self.P_process = DecAttn()
# self.W = torch.nn.Parameter(torch.rand(256, 256), requires_grad=True)
# def forward(self, context, options):
# ## context 10, 30, 300; options 10, 100, 50, 300
# context_raw, hidden = self.C_process(context) ## 10,30,256
# context = context_raw.max(1)[0] # 10,256
# logits = []
# for i, option in enumerate(options.transpose(1, 0)): # 100, 10, 50, 300
# option = self.P_process(context_raw, option) #10,50,300 -> 10,50, 256
# option = option.max(1)[0] #10, 256
# logit = context.matmul(self.W).matmul(option.transpose(1,0)).sum(-1)
# logits.append(logit)
# logits = torch.stack(logits, 1)
# return logits
# def save(self, filepath):
# torch.save(self.state_dict(), filepath)
class RnnAttentionNet(torch.nn.Module):
def __init__(self, dim_embeddings):
super(RnnAttentionNet, self).__init__()
self.C_process = torch.nn.LSTM(256, 256, 1, batch_first=True)
self.context_word = torch.nn.LSTM(300, 256, 1, batch_first=True)
self.P_process = DecAttn()
self.W = torch.nn.Parameter(data=torch.rand(256, 256), requires_grad=True)
def forward(self, context, options):
## context 10, 14, 30, 300
context_vec = []
for utterance in context.transpose(1,0): # 10, 30, 300
utter_vec, __ = self.context_word(utterance) # 10, 30, 256
utter_vec = utter_vec.max(1)[0] # 10, 256
context_vec.append(utter_vec)
context_vec = torch.stack(context_vec, 1) # 10, 14, 256
context_raw, __ = self.C_process(context_vec) # 10, 14, 256
## context 10, 30, 300; options 10, 100, 50, 300
# context, __ = self.C_process(context) ## 10,30,256
context = context_raw.max(1)[0] # 10,256
logits = []
for i, option in enumerate(options.transpose(1, 0)): # 100, 10, 50, 300
option = self.P_process(context_raw, option) #10,50,300 -> 10,50, 256
option = option.max(1)[0] #10, 256
logit = context.matmul(self.W).matmul(option.transpose(1,0)).sum(-1)
logits.append(logit)
logits = torch.stack(logits, 1)
return logits
def save(self, filepath):
torch.save(self.state_dict(), filepath)
```
#### File: topics/custom_dataset/datafunc.py
```python
import os
import torch
import torchvision
from PIL import Image
from tqdm import tqdm
# example
# 378.0 318.0 401.0 318.0 401.0 386.0 378.0 386.0 large-vehicle 0
# 401.0 289.0 429.0 289.0 429.0 386.0 401.0 386.0 large-vehicle 0
# 435.0 336.0 458.0 336.0 458.0 393.0 435.0 393.0 large-vehicle 0
# 509.0 363.0 512.0 363.0 512.0 401.0 509.0 401.0 small-vehicle 2
# data_dir_root should be hw2_train_val
# train directory: hw2_train_val/train15000 # 00000 to 14999
# val directory: hw2_train_val/val1500 # 0000 to 1499
# sub dir: images labelTxt_hbb
classnames = ['plane', 'baseball-diamond', 'bridge', 'ground-track-field', 'small-vehicle', 'large-vehicle', 'ship', 'tennis-court',
'basketball-court', 'storage-tank', 'soccer-ball-field', 'roundabout', 'harbor', 'swimming-pool', 'helicopter', 'container-crane']
def YoloDataset(torch.utils.data.Dataset):
def __init__(self, data_dir_root, train=True, transform=None, target_transform=None, create_file=False):
self.data_dir_root = data_dir_root
self.transform = transform
self.train = train
self.train_file = os.path.join(data_dir_root, 'train.pt')
self.valid_file = os.path.join(data_dir_root, 'valid.pt')
if create_file:
self.create_data_file()
if not self._check_exist():
raise RuntimeError('Dataset not created.' +
' You can use create_file=True to create it')
if self.train:
self.train_data, self.train_labels = torch.load(self.train_file)
else:
self.valid_data, self.valid_labels = torch.load(self.valid_file)
def __len__(self):
if self.train:
return 15000
else:
return 1500
def __getitem__(self, index):
if self.train:
img, target = self.train_data[index], self.train_labels[index]
else:
img, target = self.valid_data[index], self.valid_labels[index]
img = Image.fromarray(img.numpy(), mode='L')
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def _check_exist(self):
return os.path.exists(self.train_file) and os.path.exists(self.valid_file)
def create_data_file(self): ## load data, save as torch tensor
train_dir = os.path.join(self.data_dir_root, 'train15000')
valid_dir = os.path.join(self.data_dir_root, 'valid1500')
for root_dir, save_path in zip([train_dir, valid_dir],[self.train_file, self.valid_file]):
print('Loading data from', root_dir)
image_dir = os.path.join(root_dir, 'images')
label_dir = os.path.join(root_dir, 'labelTxt_hbb')
total = len(os.listdir(image_dir))
trans = torchvision.trainsforms.Compose([
torchvision.transforms.Resize(512),
torchvision.transforms.ToTensor(),
])
img_set = []
target_set = []
for i in tqdm(range(total)):
k = len(str(total))
def str_(i):
return '0'*(k-len(str(i)))+str(i)
img_path = os.path.join(image_dir, str_(i)+'.jpg')
img = Image.open(img_path)
img = trans(img)
img_set.append(img)
h, w, __ = img.shape
label_path = os.path.join(label_dir, str_(i)+'.txt')
target = reader(label_path, h, w)
target_set.append(target)
img_set = torch.cat(img_set)
target_set = torch.cat(target_set)
torch.save((img_set, target_set), save_path)
print('saved to', save_path)
@staticmethod
def reader(label_path, h, w): # read file and output target
boxes = []
labels = []
with open(label_path, 'r') as f:
lines = f.readlines()
for line in lines:
obj_ = line.strip().split()
xmin = float(obj_[0])
ymin = float(obj_[1])
xmax = float(obj_[4])
ymax = float(obj_[5])
obj_class = classnames.index(obj_[8]) + 1
boxes.append([xmin, ymin, xmax, ymax])
labels.append(obj_class) ### +1???
boxes = torch.Tensor(boxes)
boxes /= torch.Tensor([w, h, w, h]).expand_as(boxes)
labels = torch.LongTensor(labels)
return encoder(boxes, labels)
@staticmethod
def encoder(boxes, labels)
target = torch.zeros((7,7,26))
cell_size = 1./7
width_height = boxes[:,2:] - boxes[:,:2]
center_x_ys = (boxes[:,2:]+ boxes[:,:2])/2
for it, center_x_y in enumerate(center_x_ys):
cell_i_j = (center_x_y/cell_size).ceil() - 1
i = int(cell_i_j[1])
j = int(cell_i_j[0])
target[i, j, 4] = 1
target[i, j, 9] = 1
target[i, j, int(labels[it]+9)] = 1
tl_x_y = cell_i_j * cell_size
delta_x_y = (center_x_y - tl_x_y) / cell_size
target[i, j, 2:4] = width_height[it]
target[i, j, :2] = delta_x_y
target[i, j, 7:9] = width_height[it]
target[i, j, 5:7] = delta_x_y
## probably no overlapse ## (center delta x,y; width, heigth, probability) x2, 16 class ... for 1 cell
return target
## can implement random crop etc later
## (x y x y x y x y class level) -> (7x7x26) written above
## (x y x y x y x y class level) <- (7x7x26) to be implemented in (predict)
def make_dataloader(data_dir_root, img_size=448, batch_size=128):
trans = torchvision.transforms.Compose([
torchvision.transforms.Resize(img_size),
torchvision.transforms.ToTensor(),
])
trainset = YoloDataset(data_dir_root, train=True, transform=trans, create_file=True)
validset = YoloDataset(data_dir_root, train=False, transform=trans, create_file=True)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True)
validloader = torch.utils.data.DataLoader(validset, batch_size=batch_size, shuffle=False)
return trainloader, validloader
```
#### File: simple-pytorch/goodwork/models.py
```python
import torch
class GoodModel(object):
"""docstring for GoodModel"""
def __init__(self, arg):
super(GoodModel, self).__init__()
self.arg = arg
self.linear = torch.nn.Sequential(
torch.nn.Conv2d(),
torch.nn.ReLU()
)
def forward(self, x):
return self.linear(x)
``` |
{
"source": "jimmy-academia/GAN_studies",
"score": 3
} |
#### File: DCGAN/experiments/02basic_lsun.py
```python
import sys
sys.path.append('..')
sys.path.append('.')
from module.trainer import Trainer
from module.config import configurations
from module.utils import check_directories
def main():
config, args, opt = configurations('BASIC_LSUN', 'lsun')
check_directories(opt.dir_list)
trainer = Trainer(config, args, opt)
trainer.train()
if __name__ == '__main__':
main()
```
#### File: InfoGAN/module/trainer.py
```python
from module.model import GAN
from module.datafunc import make_dataset, make_dataloader
import torch
from torch import optim
import torch.nn as nn
from torch.autograd import Variable
from tqdm import tqdm
from random import randint
from torchvision.utils import save_image
from matplotlib import pyplot as plt
plt.switch_backend('agg')
import numpy as np
import os
def to_variable(x):
if torch.cuda.is_available():
x = x.cuda()
return Variable(x)
##### Helper Function for Math
def denorm(x):
out = (x + 1) / 2
return out.clamp(0, 1)
# InfoGAN Function (Gaussian)
def gen_cc(n_size, dim):
return torch.Tensor(np.random.randn(n_size, dim) * 0.5 + 0.0)
# InfoGAN Function (Multi-Nomial)
def gen_dc(n_size, dim):
codes=[]
code = np.zeros((n_size, dim))
random_cate = np.random.randint(0, dim, n_size)
code[range(n_size), random_cate] = 1
codes.append(code)
codes = np.concatenate(codes,1)
return torch.Tensor(codes)
class Trainer():
def __init__(self, config, args, opt):
self.model = GAN(args)
self.G_optimizer = optim.Adam(self.model.G.parameters(), lr=args.lrG, betas=args.betas)
self.D_optimizer = optim.Adam(self.model.D.parameters(), lr=args.lrD, betas=args.betas)
self.criterion = nn.BCELoss()
self.config = config
self.args = args
self.opt = opt
self.device = 'cuda' if torch.cuda.is_available() else 'cpu'
self.records = []
def train(self):
print('training for task:', self.config.taskname)
dataset = make_dataset(self.config.data_dir_root, self.config.datatype, self.args.img_size)
dataloader = make_dataloader(dataset, batch_size=self.args.batch_size)
if self.device =='cuda':
self.model.cuda()
print('train #', self.opt.epochs, 'D: %d, G:%d, save at %s'%
(self.opt.k, self.opt.g, self.opt.task_dir))
# fixed_z = torch.randn(25, self.args.z_dim, 1, 1, device=self.device)
for i in range(self.opt.epochs):
epoch_records = self.train_one_epoch(dataloader)
self.records.append(epoch_records)
# self.save_img_sample(str(i))
# self.save_fixed_grid_sample(fixed_z,'Epoch_'+str(i))
self.save_loss_plot('Epoch_'+str(i))
if self.opt.save_model:
self.model.save(self.opt.model_filepath)
def train_one_epoch(self, dataloader):
pbar = tqdm(dataloader)
epoch_records = []
## different for calebA (no label)
for images, __ in pbar:
batch_size = images.shape[0]
label_real = Variable(torch.ones(batch_size).cuda())
label_fake = Variable(torch.zeros(batch_size).cuda())
images = to_variable(images)
# Discriminator
for __ in range(1):
z = to_variable(torch.randn(batch_size, self.args.z_dim))
cc = to_variable(gen_cc(batch_size, self.args.cc_dim))
dc = to_variable(gen_cc(batch_size, self.args.dc_dim))
noice = torch.cat((z, cc, dc), 1)
noice = noice.view(noice.size(0), noice.size(1),1,1)
fake_images = self.model.G(noice)
d_out_real = self.model.D(images)#.view(-1)
d_out_fake = self.model.D(fake_images)#.view(-1)
# err_Dis_real = self.criterion(d_out_real, label_real)
# err_Dis_fake = self.criterion(d_out_fake, label_fake)
# d_loss_a = err_Dis_real + err_Dis_fake
d_loss_a = -torch.mean(torch.log(d_out_real[:,0]) + torch.log(1 - d_out_fake[:,0]))
output_cc = d_out_fake[:, 1:1+self.args.cc_dim]
output_dc = d_out_fake[:, 1+self.args.cc_dim:]
d_loss_cc = torch.mean((((output_cc - 0.0) / 0.5) ** 2))
d_loss_dc = -(torch.mean(torch.sum(dc * output_dc, 1)) + torch.mean(torch.sum(dc * dc, 1)))
d_loss = d_loss_a + self.args.continuous_weight * d_loss_cc + 1.0 * d_loss_dc
self.model.D.zero_grad()
d_loss.backward(retain_graph=True)
self.D_optimizer.step()
Dis_out = d_out_real.view(-1).mean().item()
Dis_gen_out = d_out_fake.view(-1).mean().item()
# Generator maximize log(D(G(z)))
for __ in range(1):
g_loss_a = -torch.mean(torch.log(d_out_fake[:,0]))
g_loss = g_loss_a + self.args.continuous_weight * d_loss_cc + 1.0 * d_loss_dc
self.model.D.zero_grad() #???
self.model.G.zero_grad()
g_loss.backward()
self.G_optimizer.step()
batch_record = (d_loss.item(), g_loss.item(), Dis_out, Dis_gen_out)
epoch_records.append(list(batch_record))
message = 'errD:%.4f,errG:%.4f,D(x):%.4f,D(G(z)):%.4f'%batch_record
pbar.set_description(message)
epoch_records = np.mean(np.array(epoch_records),0)
return epoch_records.tolist()
# def save_img_sample(self, img_name='generated'):
# with torch.no_grad():
# z = torch.randn(1, 100, 1, 1, device=self.device)
# generated_img = self.model.G(z)
# save_image(generated_img.cpu(), self.opt.img_dir+'/'+img_name+'.png')
# def save_fixed_grid_sample(self, fixed_z, img_name='generated'):
# with torch.no_grad():
# fixed_z = Variable(fixed_z)
# generated_ = self.model.G(fixed_z)
# def denorm(x):
# # is this needed???
# out = (x + 1) / 2
# return out.clamp(0, 1)
# genrated_ = denorm(generated_)
# n_rows = np.sqrt(fixed_z.size()[0]).astype(np.int32)
# n_cols = np.sqrt(fixed_z.size()[0]).astype(np.int32)
# fig, axes = plt.subplots(n_rows, n_cols, figsize=(5,5))
# for ax, img in zip(axes.flatten(), generated_):
# ax.axis('off')
# ax.imshow(img.cpu().data.view(self.args.img_size, self.args.img_size).numpy(), cmap='gray', aspect='equal')
# plt.subplots_adjust(wspace=0, hspace=0)
# # title = 'Epoch {0}'.format(num_epoch+1)
# fig.text(0.5, 0.04, img_name, ha='center')
# filepath = self.opt.task_dir+'/generated_imgs'
# if not os.path.exists(filepath):
# os.mkdir(filepath)
# plt.savefig(filepath+'/'+img_name+'.png')
# plt.close()
def save_loss_plot(self, img_name='loss'):
four_records = np.array(self.records).T
fig, ax = plt.subplots()
ax.set_xlim(0, self.opt.epochs)
ax.set_xlabel(img_name)
ax.set_ylim(0, np.max(four_records[:2])*1.1)
ax.set_ylabel('Loss values')
ax.plot(four_records[0], label='D loss', color='#CC0000')
ax.plot(four_records[1], label='G loss', color='#FF8000')
ax.tick_params('y', colors = '#CC0000')
ax.legend(loc='upper left')
ax2 = ax.twinx()
ax2.set_ylim(0, np.max(four_records[2:])*1.1)
ax2.set_ylabel('D() values')
ax2.plot(four_records[2], label='D(x)', color='#0080FF', linestyle=':')
ax2.plot(four_records[3], label='D(G(z))', color='#808080', linestyle=':')
ax2.tick_params('y', colors='k')
ax2.legend(loc='upper right')
filepath = self.opt.task_dir+'/loss_plots'
if not os.path.exists(filepath):
os.mkdir(filepath)
plt.savefig(filepath+'/'+img_name+'.png')
plt.close()
``` |
{
"source": "Jimmy-A-Caroli/protwis",
"score": 2
} |
#### File: protwis/signprot/views.py
```python
from django.contrib.postgres.aggregates import ArrayAgg
from django.core.cache import cache
from django.db.models import F, Q, Count
from django.http import HttpResponse, JsonResponse
from django.shortcuts import render, redirect
from django.utils.decorators import method_decorator
from django.views.decorators.cache import cache_page
from django.views.decorators.csrf import csrf_exempt
from django.views.generic import TemplateView
from common import definitions
from common.diagrams_gpcr import DrawSnakePlot
from common.diagrams_gprotein import DrawGproteinPlot
from common.phylogenetic_tree import PhylogeneticTreeGenerator
from common.tools import fetch_from_web_api
from common.views import AbsTargetSelection
from contactnetwork.models import InteractingResiduePair
from mutation.models import MutationExperiment
from protein.models import (Gene, Protein, ProteinAlias, ProteinConformation, ProteinFamily, ProteinGProtein,
ProteinGProteinPair, ProteinArrestinPair, ProteinSegment)
from residue.models import (Residue, ResidueGenericNumberEquivalent, ResiduePositionSet)
from seqsign.sequence_signature import (SequenceSignature, SignatureMatch)
from signprot.interactions import (get_entry_names, get_generic_numbers, get_ignore_info, get_protein_segments,
get_signature_features, group_signature_features, prepare_signature_match)
from signprot.models import (SignprotBarcode, SignprotComplex, SignprotStructure)
from structure.models import Structure
import json
import re
import time
from collections import Counter, OrderedDict
from decimal import Decimal
from pprint import pprint
from copy import deepcopy
from statistics import mean
class BrowseSelection(AbsTargetSelection):
step = 1
number_of_steps = 1
psets = False
filters = True
filter_gprotein = True
type_of_selection = 'browse_gprot'
description = 'Select a G protein or family by searching or browsing in the right column.'
description = 'Select a G protein (family) by searching or browsing in the middle. The selection is viewed to' \
+ ' the right.'
docs = 'receptors.html'
target_input = False
selection_boxes = OrderedDict([
('reference', False), ('targets', True),
('segments', False),
])
try:
ppf_g = ProteinFamily.objects.get(slug="100_001")
# ppf_a = ProteinFamily.objects.get(slug="200_000")
# pfs = ProteinFamily.objects.filter(parent__in=[ppf_g.id,ppf_a.id])
pfs = ProteinFamily.objects.filter(parent__in=[ppf_g.id])
ps = Protein.objects.filter(family__in=[ppf_g]) # ,ppf_a
tree_indent_level = []
# action = 'expand'
# remove the parent family (for all other families than the root of the tree, the parent should be shown)
# del ppf_g
# del ppf_a
except Exception as e:
pass
class ArrestinSelection(AbsTargetSelection):
step = 1
number_of_steps = 1
psets = False
filters = True
filter_gprotein = True
type_of_selection = 'browse_gprot'
description = 'Select an Arrestin (family) by searching or browsing in the middle. The selection is viewed to' \
+ ' the right.'
docs = 'signalproteins.html'
target_input = False
selection_boxes = OrderedDict([
('reference', False), ('targets', True),
('segments', False),
])
try:
if ProteinFamily.objects.filter(slug="200_000").exists():
ppf = ProteinFamily.objects.get(slug="200_000")
pfs = ProteinFamily.objects.filter(parent=ppf.id)
ps = Protein.objects.filter(family=ppf)
tree_indent_level = []
action = 'expand'
# remove the parent family (for all other families than the root of the tree, the parent should be shown)
del ppf
except Exception as e:
pass
class ArrestinCoupling(TemplateView):
"""
Class based generic view which serves coupling data between Receptors and Arrestins.
Data coming from <NAME> only at the moment.
:param dataset: ProteinArrestinPair (see build/management/commands/build_arrestins.py)
:return: context
"""
template_name = "signprot/arrestin_coupling.html"
@method_decorator(csrf_exempt)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
# fields, header = self.fields()
protvals, couplvals = self.fields()
context['fields'] = protvals
context['signal'] = couplvals
return context
@staticmethod
def fields():
"""
This function returns the required fields for the Arrestin subtypes table.
:return: key.value pairs from dictotemplate dictionary
keys = id values in ProteinArrestinPair table.
values = source, class, family, uniprotid, iupharid, logmaxec50_deg, pec50_deg, emax_deg
"""
arrestins = ProteinArrestinPair.objects.filter(protein__species__common_name='Human',
protein__sequence_type__slug='wt',
protein__family__slug__startswith='00').prefetch_related(
"protein__family", # REMEMBER. Whatever you call in template prefetch to reduce SQL queries.
"protein__family__parent__parent__parent",
"arrestin_subtype",
"arrestin_subtype__source"
)
signaling_data = {}
for pairing in arrestins.values_list(
"protein__entry_name",
"arrestin_subtype__entry_name",
"emax_deg",
"pec50_deg",
"logmaxec50_deg"
):
if pairing[0] not in signaling_data:
signaling_data[pairing[0]] = {}
signaling_data[pairing[0]][pairing[1]] = {}
if 'emax' not in signaling_data[pairing[0]][pairing[1]]:
signaling_data[pairing[0]][pairing[1]]['emax'] = {}
signaling_data[pairing[0]][pairing[1]]['emax'] = pairing[2]
if 'pec50' not in signaling_data[pairing[0]][pairing[1]]:
signaling_data[pairing[0]][pairing[1]]['pec50'] = {}
signaling_data[pairing[0]][pairing[1]]['pec50'] = pairing[3]
if 'logmaxec50' not in signaling_data[pairing[0]][pairing[1]]:
signaling_data[pairing[0]][pairing[1]]['logmaxec50'] = {}
signaling_data[pairing[0]][pairing[1]]['logmaxec50'] = pairing[4]
protein_data = {}
for prot in arrestins.distinct("protein_id"):
protein_data[prot.id] = {}
protein_data[prot.id]['class'] = prot.protein.family.parent.parent.parent.shorter()
protein_data[prot.id]['family'] = prot.protein.family.parent.short()
protein_data[prot.id]['uniprot'] = prot.protein.entry_short()
protein_data[prot.id]['iuphar'] = prot.protein.family.name.replace('receptor', '').strip()
protein_data[prot.id]['accession'] = prot.protein.accession
protein_data[prot.id]['entryname'] = prot.protein.entry_name
protein_data[prot.id]['source'] = prot.source
protein_data[prot.id]['subtype'] = prot.arrestin_subtype
# MAKES 2396 SQL QUERIES, have to find out how to make it faster.
# uniprot_links = prot.web_links.filter(web_resource__slug='uniprot')
# if uniprot_links.count() > 0:
# protein_data[prot.id]['uniprot_link'] = uniprot_links[0]
# MAKES 970 SQL QUERIES. Even with prefetch_related of web_links__web_resource
gtop_links = prot.protein.web_links.filter(web_resource__slug='gtop')
if len(gtop_links) > 0:
protein_data[prot.id]['gtp_link'] = gtop_links[0]
arrestin_subtypes = ["arrb1_human", "arrb2_human"]
for arrestin in arrestin_subtypes:
if prot.protein.entry_name in signaling_data and arrestin in signaling_data[prot.protein.entry_name]:
protein_data[prot.id][arrestin] = signaling_data[prot.protein.entry_name][arrestin]
else:
protein_data[prot.id][arrestin] = "-"
return protein_data, signaling_data
class TargetSelection(AbsTargetSelection):
step = 1
number_of_steps = 1
filters = False
psets = False
target_input = False
redirect_on_select = True
type_of_selection = 'ginterface'
title = 'SELECT TARGET for Gs INTERFACE'
description = 'Select a reference target by searching or browsing.' \
+ '\n\nThe Gs interface from adrb2 (PDB: 3SN6) will be superposed onto the selected target.' \
+ '\n\nAn interaction browser for the adrb2 Gs interface will be given for comparison"'
# template_name = 'common/targetselection.html'
selection_boxes = OrderedDict([
('reference', False),
('targets', True),
('segments', False),
])
buttons = {
'continue': {
'label': 'Continue to next step',
'url': '#',
'color': 'success',
},
}
class CouplingBrowser(TemplateView):
"""
Class based generic view which serves coupling data between Receptors and G-proteins.
Data coming from Guide to Pharmacology, Asuka Inuoue and Michel Bouvier at the moment.
More data might come later from Roth and Strachan TRUPATH biosensor and Neville Lambert.
:param dataset: ProteinGProteinPair (see build/management/commands/build_g_proteins.py)
:return: context
"""
template_name = "signprot/coupling_browser.html"
@method_decorator(csrf_exempt)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
tab_fields, header = self.tab_fields()
context['tabfields'] = tab_fields
context['header'] = header
flat_list = [item for sublist in header.values() for item in sublist]
context['subunitheader'] = flat_list
return context
@staticmethod
def tab_fields():
"""
This function returns the required fields for the G-protein families table and the G-protein subtypes table
which are to be rendered in separate tabs in the same page.
:return: key.value pairs from dictotemplate dictionary
keys =id values in ProteinGProteinPair table.
values = source, class, family, uniprotid, iupharid, logmaxec50_deg, pec50_deg, emax_deg
"""
proteins = Protein.objects.filter(sequence_type__slug='wt',
family__slug__startswith='00',
species__common_name='Human').prefetch_related(
'family',
'family__parent__parent__parent',
'web_links'
)
couplings = ProteinGProteinPair.objects.filter(source="GuideToPharma").values_list('protein__entry_name',
'g_protein__name',
'transduction')
signaling_data = {}
for pairing in couplings:
if pairing[0] not in signaling_data:
signaling_data[pairing[0]] = {}
signaling_data[pairing[0]][pairing[1]] = pairing[2]
protein_data = {}
for prot in proteins:
protein_data[prot.id] = {}
protein_data[prot.id]['class'] = prot.family.parent.parent.parent.shorter()
protein_data[prot.id]['family'] = prot.family.parent.short()
protein_data[prot.id]['uniprot'] = prot.entry_short()
protein_data[prot.id]['iuphar'] = prot.family.name.replace('receptor', '').strip()
protein_data[prot.id]['accession'] = prot.accession
protein_data[prot.id]['entryname'] = prot.entry_name
# MAKES 2396 SQL QUERIES, have to find out how to make it faster.
# uniprot_links = prot.web_links.filter(web_resource__slug='uniprot')
# if uniprot_links.count() > 0:
# protein_data[prot.id]['uniprot_link'] = uniprot_links[0]
# MAKES 970 SQL QUERIES. Even with prefetch_related of web_links__web_resource
gtop_links = prot.web_links.filter(web_resource__slug='gtop')
if len(gtop_links) > 0:
protein_data[prot.id]['gtp_link'] = gtop_links[0]
gprotein_families = ["Gs family", "Gi/Go family", "Gq/G11 family", "G12/G13 family"]
for gprotein in gprotein_families:
if prot.entry_name in signaling_data and gprotein in signaling_data[prot.entry_name]:
if signaling_data[prot.entry_name][gprotein] == "primary":
protein_data[prot.id][gprotein] = "1'"
elif signaling_data[prot.entry_name][gprotein] == "secondary":
protein_data[prot.id][gprotein] = "2'"
else:
protein_data[prot.id][gprotein] = "-"
else:
protein_data[prot.id][gprotein] = "-"
protein_data[prot.id]['gs'] = protein_data[prot.id][gprotein_families[0]]
protein_data[prot.id]['gio'] = protein_data[prot.id][gprotein_families[1]]
protein_data[prot.id]['gq11'] = protein_data[prot.id][gprotein_families[2]]
protein_data[prot.id]['g1213'] = protein_data[prot.id][gprotein_families[3]]
couplings2 = ProteinGProteinPair.objects.filter(source__in=["Inoue", "Bouvier"]) \
.filter(g_protein_subunit__family__slug__startswith="100_001").order_by("g_protein_subunit__family__slug", "source") \
.prefetch_related('g_protein_subunit__family', 'g_protein')
coupling_headers = ProteinGProteinPair.objects.filter(source__in=["Inoue", "Bouvier"]) \
.filter(g_protein_subunit__family__slug__startswith="100_001") \
.order_by("g_protein_subunit__family__slug", "source").distinct("g_protein_subunit__family__slug") \
.values_list("g_protein_subunit__family__name", "g_protein_subunit__family__parent__name")
coupling_header_names = {}
coupling_reverse_header_names = {}
coupling_placeholder = {}
coupling_placeholder2 = {}
coupling_placeholder3 = {}
for name in coupling_headers:
if name[1] not in coupling_header_names:
coupling_header_names[name[1]] = []
coupling_placeholder3[name[1]] = []
coupling_reverse_header_names[name[0]] = name[1]
coupling_header_names[name[1]].append(name[0])
coupling_placeholder[name[0]] = "--"
coupling_placeholder2[name[0]] = []
dictotemplate = {}
sourcenames = set()
for pair in couplings2:
if pair.protein_id not in dictotemplate:
dictotemplate[pair.protein_id] = {}
dictotemplate[pair.protein_id]['protein'] = protein_data[pair.protein_id]
dictotemplate[pair.protein_id]['coupling'] = {}
dictotemplate[pair.protein_id]['couplingmax'] = {}
dictotemplate[pair.protein_id]['coupling']['1'] = {}
dictotemplate[pair.protein_id]['coupling']['1']['logemaxec50'] = deepcopy(coupling_placeholder2)
dictotemplate[pair.protein_id]['coupling']['1']['pec50'] = deepcopy(coupling_placeholder2)
dictotemplate[pair.protein_id]['coupling']['1']['emax'] = deepcopy(coupling_placeholder2)
dictotemplate[pair.protein_id]['couplingmax']['1'] = {}
dictotemplate[pair.protein_id]['couplingmax']['1']['logemaxec50'] = deepcopy(coupling_placeholder3)
dictotemplate[pair.protein_id]['couplingmax']['1']['pec50'] = deepcopy(coupling_placeholder3)
dictotemplate[pair.protein_id]['couplingmax']['1']['emax'] = deepcopy(coupling_placeholder3)
if pair.source not in dictotemplate[pair.protein_id]['coupling']:
sourcenames.add(pair.source)
dictotemplate[pair.protein_id]['coupling'][pair.source] = {}
dictotemplate[pair.protein_id]['couplingmax'][pair.source] = {}
dictotemplate[pair.protein_id]['coupling'][pair.source]['logemaxec50'] = coupling_placeholder.copy()
dictotemplate[pair.protein_id]['coupling'][pair.source]['pec50'] = coupling_placeholder.copy()
dictotemplate[pair.protein_id]['coupling'][pair.source]['emax'] = coupling_placeholder.copy()
dictotemplate[pair.protein_id]['couplingmax'][pair.source]['logemaxec50'] = deepcopy(coupling_placeholder3)
dictotemplate[pair.protein_id]['couplingmax'][pair.source]['pec50'] = deepcopy(coupling_placeholder3)
dictotemplate[pair.protein_id]['couplingmax'][pair.source]['emax'] = deepcopy(coupling_placeholder3)
subunit = pair.g_protein_subunit.family.name
dictotemplate[pair.protein_id]['coupling'][pair.source]['logemaxec50'][subunit] = round(pair.logmaxec50_deg, 1)
dictotemplate[pair.protein_id]['coupling'][pair.source]['pec50'][subunit] = round(pair.pec50_deg, 1)
dictotemplate[pair.protein_id]['coupling'][pair.source]['emax'][subunit] = round(pair.emax_deg)
dictotemplate[pair.protein_id]['coupling']['1']['logemaxec50'][subunit].append(round(pair.logmaxec50_deg, 1))
dictotemplate[pair.protein_id]['coupling']['1']['pec50'][subunit].append(round(pair.pec50_deg, 1))
dictotemplate[pair.protein_id]['coupling']['1']['emax'][subunit].append(round(pair.emax_deg))
family = coupling_reverse_header_names[subunit]
dictotemplate[pair.protein_id]['couplingmax'][pair.source]['logemaxec50'][family].append(round(pair.logmaxec50_deg, 1))
dictotemplate[pair.protein_id]['couplingmax'][pair.source]['pec50'][family].append(round(pair.pec50_deg, 1))
dictotemplate[pair.protein_id]['couplingmax'][pair.source]['emax'][family].append(round(pair.emax_deg))
dictotemplate[pair.protein_id]['couplingmax']['1']['logemaxec50'][family].append(round(pair.logmaxec50_deg, 1))
dictotemplate[pair.protein_id]['couplingmax']['1']['pec50'][family].append(round(pair.pec50_deg, 1))
dictotemplate[pair.protein_id]['couplingmax']['1']['emax'][family].append(round(pair.emax_deg))
for prot in dictotemplate:
for propval in dictotemplate[prot]['coupling']['1']:
for sub in dictotemplate[prot]['coupling']['1'][propval]:
valuelist = dictotemplate[prot]['coupling']['1'][propval][sub]
if len(valuelist) == 0:
dictotemplate[prot]['coupling']['1'][propval][sub] = "--"
elif len(valuelist) > 0 and propval == "logemaxec50":
if all(i > 0 for i in valuelist):
dictotemplate[prot]['coupling']['1'][propval][sub] = round(mean(valuelist), 1)
else:
dictotemplate[prot]['coupling']['1'][propval][sub] = round(max(valuelist), 1)
elif len(valuelist) > 0 and propval == "pec50":
if all(i > 0 for i in valuelist):
dictotemplate[prot]['coupling']['1'][propval][sub] = round(mean(valuelist), 1)
else:
dictotemplate[prot]['coupling']['1'][propval][sub] = round(max(valuelist), 1)
elif len(valuelist) > 0 and propval == "emax":
if all(i > 0 for i in valuelist):
dictotemplate[prot]['coupling']['1'][propval][sub] = round(mean(valuelist))
else:
dictotemplate[prot]['coupling']['1'][propval][sub] = round(max(valuelist))
else:
dictotemplate[prot]['coupling']['1'][propval][sub] = round(mean(valuelist))
#dict_name = 'confidence'
dict_name = 'coupling'
for prot in dictotemplate:
if dict_name not in dictotemplate[prot]:
dictotemplate[prot][dict_name] = {}
for i in range(2, len(sourcenames)+2):
dictotemplate[prot][dict_name][i] = {}
for propval in dictotemplate[prot]['coupling']['1']:
for i in range(2, len(sourcenames)+2):
dictotemplate[prot][dict_name][i][propval] = {}
for sub in dictotemplate[prot]['coupling']['1'][propval]: # use family here instead of sub for families "loop"
family = coupling_reverse_header_names[sub].replace("/", "/G")
gtp = protein_data[prot][family+" family"]
baseconfidence = dictotemplate[prot]['coupling']['1'][propval][sub]
confidence = 0
if gtp != "-":
confidence += 1
if baseconfidence == "-":
baseconfidence == gtp
for source in sourcenames:
if source in dictotemplate[prot]['coupling'] and dictotemplate[prot]['coupling'][source][propval][sub] != "--":
if dictotemplate[prot]['coupling'][source][propval][sub] > 0:
confidence += 1
for i in range(2, len(sourcenames)+2):
if confidence >= i:
dictotemplate[prot][dict_name][i][propval][sub] = baseconfidence
else:
dictotemplate[prot][dict_name][i][propval][sub] = gtp
for prot in dictotemplate:
for source in dictotemplate[prot]['couplingmax']:
for propval in dictotemplate[prot]['couplingmax'][source]:
for fam in dictotemplate[prot]['couplingmax'][source][propval]:
valuelist = dictotemplate[prot]['couplingmax'][source][propval][fam]
if len(valuelist) == 0:
dictotemplate[prot]['couplingmax'][source][propval][fam] = "--"
# elif len(valuelist) == 1:
# dictotemplate[prot]['coupling'][source][propval][fam] = valuelist[0]
elif propval == "logemaxec50":
dictotemplate[prot]['couplingmax'][source][propval][fam] = round(max(valuelist), 1)
elif propval == "pec50":
dictotemplate[prot]['couplingmax'][source][propval][fam] = round(max(valuelist), 1)
elif propval == "emax":
dictotemplate[prot]['couplingmax'][source][propval][fam] = round(max(valuelist))
else:
dictotemplate[prot]['couplingmax'][source][propval][fam] = max(valuelist)
dict_name = 'couplingmax'
for prot in dictotemplate:
if dict_name not in dictotemplate[prot]:
dictotemplate[prot][dict_name] = {}
for i in range(2, len(sourcenames)+2):
dictotemplate[prot][dict_name][i] = {}
for propval in dictotemplate[prot]['couplingmax']['1']:
for i in range(2, len(sourcenames)+2):
dictotemplate[prot][dict_name][i][propval] = {}
for family in dictotemplate[prot]['couplingmax']['1'][propval]:
gtp = protein_data[prot][family.replace("/", "/G") + " family"]
baseconfidence = dictotemplate[prot]['couplingmax']['1'][propval][family]
confidence = 0
if gtp != "-":
confidence += 1
if baseconfidence == "-":
baseconfidence == gtp
for source in sourcenames:
if source in dictotemplate[prot]['couplingmax'] and dictotemplate[prot]['couplingmax'][source][propval][family] != "--":
if dictotemplate[prot]['couplingmax'][source][propval][family] > 0:
confidence += 1
for i in range(2, len(sourcenames)+2):
if confidence >= i:
dictotemplate[prot][dict_name][i][propval][family] = baseconfidence
else:
dictotemplate[prot][dict_name][i][propval][family] = gtp
# pprint(dictotemplate[348]) # only Bouvier
# pprint(dictotemplate[1]) # Inoue and Bouvier
return dictotemplate, coupling_header_names
def GProtein(request, dataset="GuideToPharma", render_part="both"):
name_of_cache = 'gprotein_statistics_{}'.format(dataset)
context = cache.get(name_of_cache)
if context == None:
context = OrderedDict()
i = 0
gproteins = ProteinGProtein.objects.all().prefetch_related('proteingproteinpair_set')
slug_translate = {'001': "ClassA", '002': "ClassB1", '004': "ClassC", '006': "ClassF"}
selectivitydata = {}
for slug in slug_translate.keys():
jsondata = {}
for gp in gproteins:
# ps = gp.proteingproteinpair_set.all()
ps = gp.proteingproteinpair_set.filter(protein__family__slug__startswith=slug,
source=dataset).prefetch_related('protein')
# print(ps,len(ps))
if ps:
jsondata[str(gp)] = []
for p in ps:
if dataset == "Aska" and p.log_rai_mean < -1:
continue
if str(p.protein.entry_name).split('_')[0].upper() not in selectivitydata:
selectivitydata[str(p.protein.entry_name).split('_')[0].upper()] = []
selectivitydata[str(p.protein.entry_name).split('_')[0].upper()].append(str(gp))
# print(p.protein.family.parent.parent.parent)
jsondata[str(gp)].append(str(p.protein.entry_name) + '\n')
jsondata[str(gp)] = ''.join(jsondata[str(gp)])
context[slug_translate[slug]] = jsondata
context["selectivitydata"] = selectivitydata
cache.set(name_of_cache, context, 60 * 60 * 24 * 7) # seven days timeout on cache
context["render_part"] = render_part
return render(request,
'signprot/gprotein.html',
context
)
def CouplingProfiles(request, render_part="both", signalling_data="empty"):
name_of_cache = 'coupling_profiles_' + signalling_data
context = cache.get(name_of_cache)
# NOTE cache disabled for development only!
# context = None
if context == None:
context = OrderedDict()
i = 0
# adding info for tree from StructureStatistics View
tree = PhylogeneticTreeGenerator()
class_a_data = tree.get_tree_data(ProteinFamily.objects.get(name='Class A (Rhodopsin)'))
context['tree_class_a_options'] = deepcopy(tree.d3_options)
context['tree_class_a_options']['anchor'] = 'tree_class_a'
context['tree_class_a_options']['leaf_offset'] = 50
context['tree_class_a_options']['label_free'] = []
whole_class_a = class_a_data.get_nodes_dict(None)
# section to remove Orphan from Class A tree and apply to a different tree
for item in whole_class_a['children']:
if item['name'] == 'Orphan':
orphan_data = OrderedDict([('name', ''), ('value', 3000), ('color', ''), ('children',[item])])
whole_class_a['children'].remove(item)
break
context['tree_class_a'] = json.dumps(whole_class_a)
class_b1_data = tree.get_tree_data(ProteinFamily.objects.get(name__startswith='Class B1 (Secretin)'))
context['tree_class_b1_options'] = deepcopy(tree.d3_options)
context['tree_class_b1_options']['anchor'] = 'tree_class_b1'
context['tree_class_b1_options']['branch_trunc'] = 60
context['tree_class_b1_options']['label_free'] = [1,]
context['tree_class_b1'] = json.dumps(class_b1_data.get_nodes_dict(None))
class_b2_data = tree.get_tree_data(ProteinFamily.objects.get(name__startswith='Class B2 (Adhesion)'))
context['tree_class_b2_options'] = deepcopy(tree.d3_options)
context['tree_class_b2_options']['anchor'] = 'tree_class_b2'
context['tree_class_b2_options']['label_free'] = [1,]
context['tree_class_b2'] = json.dumps(class_b2_data.get_nodes_dict(None))
class_c_data = tree.get_tree_data(ProteinFamily.objects.get(name__startswith='Class C (Glutamate)'))
context['tree_class_c_options'] = deepcopy(tree.d3_options)
context['tree_class_c_options']['anchor'] = 'tree_class_c'
context['tree_class_c_options']['branch_trunc'] = 50
context['tree_class_c_options']['label_free'] = [1,]
context['tree_class_c'] = json.dumps(class_c_data.get_nodes_dict(None))
class_f_data = tree.get_tree_data(ProteinFamily.objects.get(name__startswith='Class F (Frizzled)'))
context['tree_class_f_options'] = deepcopy(tree.d3_options)
context['tree_class_f_options']['anchor'] = 'tree_class_f'
context['tree_class_f_options']['label_free'] = [1,]
context['tree_class_f'] = json.dumps(class_f_data.get_nodes_dict(None))
class_t2_data = tree.get_tree_data(ProteinFamily.objects.get(name='Class T (Taste 2)'))
context['tree_class_t2_options'] = deepcopy(tree.d3_options)
context['tree_class_t2_options']['anchor'] = 'tree_class_t2'
context['tree_class_t2_options']['label_free'] = [1,]
context['tree_class_t2'] = json.dumps(class_t2_data.get_nodes_dict(None))
# definition of the class a orphan tree
context['tree_orphan_options'] = deepcopy(tree.d3_options)
context['tree_orphan_options']['anchor'] = 'tree_orphan'
context['tree_orphan_options']['label_free'] = [1,]
context['tree_orphan_a'] = json.dumps(orphan_data)
# end copied section from StructureStatistics View
# gprot_id = ProteinGProteinPair.objects.all().values_list('g_protein_id', flat=True).order_by('g_protein_id').distinct()
gproteins = ProteinGProtein.objects.filter(pk__lte = 4) #here GPa1 is fetched
arrestins = ProteinArrestinPair.objects.all().values_list('arrestin_subtype_id', flat=True).order_by('arrestin_subtype_id').distinct()
arrestin_prots = list(Protein.objects.filter(family__slug__startswith="200", species__id=1, sequence_type__slug='wt').values_list("pk","name"))
arrestin_translate = {}
for arr in arrestin_prots:
arrestin_translate[arr[0]] = arr[1]
slug_translate = {'001': "ClassA", '002': "ClassB1", '003': "ClassB2", '004': "ClassC", '006': "ClassF", '007': "ClassT"}
key_translate ={'Gs':"G<sub>s</sub>", 'Gi/Go':"G<sub>i/o</sub>",
'Gq/G11':"G<sub>q/11</sub>", 'G12/G13':"G<sub>12/13</sub>",
'Beta-arrestin-1':"β-Arrestin<sub>1</sub>", 'Beta-arrestin-2':"β-Arrestin<sub>2</sub>"}
selectivitydata_gtp_plus = {}
receptor_dictionary = []
if signalling_data == "gprot":
table = {'Class':[], 'Gs': [], 'GiGo': [], 'GqG11': [], 'G12G13': [], 'Total': []}
else: #here there may be the need of a elif if more signalling proteins will be added
table = {'Class':[], 'Betaarrestin1': [], 'Betaarrestin2': [], 'Total': []}
for slug in slug_translate.keys():
tot = 0
txttot = ''
fam = str(ProteinFamily.objects.get(slug=(slug)))
table['Class'].append(fam.replace('Class',''))
jsondata_gtp_plus = {}
if signalling_data == "gprot":
for gp in gproteins:
# Collect GTP
gtp_couplings = list(ProteinGProteinPair.objects.filter(protein__family__slug__startswith=slug, source="GuideToPharma", g_protein=gp)\
.order_by("protein__entry_name")\
.values_list("protein__entry_name", flat=True)\
.distinct())
# Other coupling data with logmaxec50 greater than 0
other_couplings = list(ProteinGProteinPair.objects.filter(protein__family__slug__startswith=slug)\
.exclude(source="GuideToPharma")
.filter(g_protein=gp, logmaxec50_deg__gt=0)\
.order_by("protein__entry_name")\
.values_list("protein__entry_name").distinct()\
.annotate(num_sources=Count("source", distinct=True)))
# Initialize selectivity array
processed_receptors = []
key = str(gp).split(' ')[0]
jsondata_gtp_plus[key] = []
for coupling in other_couplings:
receptor_name = coupling[0]
receptor_dictionary.append(receptor_name)
receptor_only = receptor_name.split('_')[0].upper()
count = coupling[1] + (1 if receptor_name in gtp_couplings else 0)
# Data from at least two sources:
if count >= 2:
# Add to selectivity data (for tree)
if receptor_only not in selectivitydata_gtp_plus:
selectivitydata_gtp_plus[receptor_only] = []
if key not in selectivitydata_gtp_plus[receptor_only]:
selectivitydata_gtp_plus[receptor_only].append(key)
# Add to json data for Venn diagram
jsondata_gtp_plus[key].append(str(receptor_name) + '\n')
processed_receptors.append(receptor_name)
unique_gtp_plus = set(gtp_couplings) - set(processed_receptors)
for receptor_name in unique_gtp_plus:
receptor_dictionary.append(receptor_name)
receptor_only = receptor_name.split('_')[0].upper()
if receptor_only not in selectivitydata_gtp_plus:
selectivitydata_gtp_plus[receptor_only] = []
if key not in selectivitydata_gtp_plus[receptor_only]:
selectivitydata_gtp_plus[receptor_only].append(key)
jsondata_gtp_plus[key].append(str(receptor_name) + '\n')
tot += len(jsondata_gtp_plus[key])
txttot = ' '.join([txttot,' '.join(jsondata_gtp_plus[key]).replace('\n','')])
if len(jsondata_gtp_plus[key]) == 0:
jsondata_gtp_plus.pop(key, None)
table[key.replace('/','')].append((0,''))
else:
table[key.replace('/','')].append((len(jsondata_gtp_plus[key]), ' '.join(jsondata_gtp_plus[key]).replace('\n','')))
jsondata_gtp_plus[key] = ''.join(jsondata_gtp_plus[key])
tot = len(list(set(txttot.split(' ')))) -1
table['Total'].append((tot,txttot))
else: #here may need and elif if other signalling proteins will be added
for arr in arrestins:
# arrestins?
arrestin_couplings = list(ProteinArrestinPair.objects.filter(protein__family__slug__startswith=slug, arrestin_subtype=arr)\
.filter(logmaxec50_deg__gt=0)\
.order_by("protein__entry_name")\
.values_list("protein__entry_name", flat=True)\
.distinct())
key = arrestin_translate[arr]
jsondata_gtp_plus[key] = []
for coupling in arrestin_couplings:
receptor_name = coupling
receptor_dictionary.append(receptor_name)
receptor_only = receptor_name.split('_')[0].upper()
if receptor_only not in selectivitydata_gtp_plus:
selectivitydata_gtp_plus[receptor_only] = []
if key not in selectivitydata_gtp_plus[receptor_only]:
selectivitydata_gtp_plus[receptor_only].append(key)
# Add to json data for Venn diagram
jsondata_gtp_plus[key].append(str(receptor_name) + '\n')
tot += len(jsondata_gtp_plus[key])
txttot = ' '.join([txttot,' '.join(jsondata_gtp_plus[key]).replace('\n','')])
if len(jsondata_gtp_plus[key]) == 0:
jsondata_gtp_plus.pop(key, None)
table[key.replace('-','')].append((0,''))
else:
table[key.replace('-','')].append((len(jsondata_gtp_plus[key]), ' '.join(jsondata_gtp_plus[key]).replace('\n','')))
jsondata_gtp_plus[key] = ''.join(jsondata_gtp_plus[key])
tot = len(list(set(txttot.split(' ')))) -1
table['Total'].append((tot,txttot))
for item in key_translate:
try:
jsondata_gtp_plus[key_translate[item]] = jsondata_gtp_plus.pop(item)
except KeyError:
continue
context[slug_translate[slug]+"_gtp_plus"] = jsondata_gtp_plus
context[slug_translate[slug]+"_gtp_plus_keys"] = list(jsondata_gtp_plus.keys())
for key in list(table.keys())[1:]:
table[key].append((sum([pair[0] for pair in table[key]]),' '.join([pair[1] for pair in table[key]])+' '))
# context["selectivitydata"] = selectivitydata
context["selectivitydata_gtp_plus"] = selectivitydata_gtp_plus
context["table"] = table
# Collect receptor information
receptor_panel = Protein.objects.filter(entry_name__in=receptor_dictionary)\
.prefetch_related("family", "family__parent__parent__parent")
receptor_dictionary = {}
for p in receptor_panel:
# Collect receptor data
rec_class = p.family.parent.parent.parent.short().split(' ')[0]
rec_ligandtype = p.family.parent.parent.short()
rec_family = p.family.parent.short()
rec_uniprot = p.entry_short()
rec_iuphar = p.family.name.replace("receptor", '').replace("<i>","").replace("</i>","").strip()
receptor_dictionary[rec_uniprot] = [rec_class, rec_ligandtype, rec_family, rec_uniprot, rec_iuphar]
whole_receptors = Protein.objects.prefetch_related("family", "family__parent__parent__parent").filter(sequence_type__slug="wt", family__slug__startswith="00")
whole_rec_dict = {}
for rec in whole_receptors:
rec_uniprot = rec.entry_short()
rec_iuphar = rec.family.name.replace("receptor", '').replace("<i>","").replace("</i>","").strip()
whole_rec_dict[rec_uniprot] = [rec_iuphar]
context["whole_receptors"] = json.dumps(whole_rec_dict)
context["receptor_dictionary"] = json.dumps(receptor_dictionary)
cache.set(name_of_cache, context, 60 * 60 * 24 * 7) # seven days timeout on cache
context["render_part"] = render_part
context["signalling_data"] = signalling_data
return render(request,
'signprot/coupling_profiles.html',
context
)
def GProteinTree(request):
return CouplingProfiles(request, "tree", "gprot")
def GProteinVenn(request):
return CouplingProfiles(request, "venn", "gprot")
def ArrestinTree(request):
return CouplingProfiles(request, "tree", "arrestin")
def ArrestinVenn(request):
return CouplingProfiles(request, "venn", "arrestin")
#@cache_page(60*60*24*7)
def familyDetail(request, slug):
# get family
pf = ProteinFamily.objects.get(slug=slug)
# get family list
ppf = pf
families = [ppf.name]
while ppf.parent.parent:
families.append(ppf.parent.name)
ppf = ppf.parent
families.reverse()
# number of proteins
proteins = Protein.objects.filter(family__slug__startswith=pf.slug, sequence_type__slug='wt')
no_of_proteins = proteins.count()
no_of_human_proteins = Protein.objects.filter(family__slug__startswith=pf.slug, species__id=1,
sequence_type__slug='wt').count()
list_proteins = list(proteins.values_list('pk', flat=True))
# get structures of this family
structures = SignprotStructure.objects.filter(protein__family__slug__startswith=slug)
complex_structures = SignprotComplex.objects.filter(protein__family__slug__startswith=slug)
mutations = MutationExperiment.objects.filter(protein__in=proteins).prefetch_related('residue__generic_number',
'exp_qual', 'ligand')
mutations_list = {}
for mutation in mutations:
if not mutation.residue.generic_number: continue # cant map those without display numbers
if mutation.residue.generic_number.label not in mutations_list: mutations_list[
mutation.residue.generic_number.label] = []
if mutation.ligand:
ligand = mutation.ligand.name
else:
ligand = ''
if mutation.exp_qual:
qual = mutation.exp_qual.qual
else:
qual = ''
mutations_list[mutation.residue.generic_number.label].append(
[mutation.foldchange, ligand.replace("'", "\\'"), qual])
interaction_list = {} ###FIXME - always empty
try:
pc = ProteinConformation.objects.get(protein__family__slug=slug, protein__sequence_type__slug='consensus')
except ProteinConformation.DoesNotExist:
try:
pc = ProteinConformation.objects.get(protein__family__slug=slug, protein__species_id=1,
protein__sequence_type__slug='wt')
except:
pc = None
p = None
p = pc.protein
residues = Residue.objects.filter(protein_conformation=pc).order_by('sequence_number').prefetch_related(
'protein_segment', 'generic_number', 'display_generic_number')
jsondata = {}
jsondata_interaction = {}
for r in residues:
if r.generic_number:
if r.generic_number.label in mutations_list:
jsondata[r.sequence_number] = [mutations_list[r.generic_number.label]]
if r.generic_number.label in interaction_list:
jsondata_interaction[r.sequence_number] = interaction_list[r.generic_number.label]
# process residues and return them in chunks of 10
# this is done for easier scaling on smaller screens
chunk_size = 10
r_chunks = []
r_buffer = []
last_segment = False
border = False
title_cell_skip = 0
for i, r in enumerate(residues):
# title of segment to be written out for the first residue in each segment
segment_title = False
# keep track of last residues segment (for marking borders)
if r.protein_segment.slug != last_segment:
last_segment = r.protein_segment.slug
border = True
# if on a border, is there room to write out the title? If not, write title in next chunk
if i == 0 or (border and len(last_segment) <= (chunk_size - i % chunk_size)):
segment_title = True
border = False
title_cell_skip += len(last_segment) # skip cells following title (which has colspan > 1)
if i and i % chunk_size == 0:
r_chunks.append(r_buffer)
r_buffer = []
r_buffer.append((r, segment_title, title_cell_skip))
# update cell skip counter
if title_cell_skip > 0:
title_cell_skip -= 1
if r_buffer:
r_chunks.append(r_buffer)
context = {'pf': pf, 'families': families, 'structures': structures, 'no_of_proteins': no_of_proteins,
'no_of_human_proteins': no_of_human_proteins, 'mutations': mutations, 'r_chunks': r_chunks,
'chunk_size': chunk_size, 'p': p, 'complex_structures': complex_structures}
return render(request,
'signprot/family_details.html',
context
)
@cache_page(60 * 60 * 24 * 7)
def Ginterface(request, protein=None):
residuelist = Residue.objects.filter(protein_conformation__protein__entry_name=protein).prefetch_related(
'protein_segment', 'display_generic_number', 'generic_number')
SnakePlot = DrawSnakePlot(
residuelist, "Class A (Rhodopsin)", protein, nobuttons=1)
# TEST
gprotein_residues = Residue.objects.filter(protein_conformation__protein__entry_name='gnaz_human').prefetch_related(
'protein_segment', 'display_generic_number', 'generic_number')
gproteinplot = DrawGproteinPlot(
gprotein_residues, "Gprotein", protein)
crystal = Structure.objects.get(pdb_code__index="3SN6")
aa_names = definitions.AMINO_ACID_GROUP_NAMES_OLD
names_aa = dict(zip(aa_names.values(), aa_names.keys()))
names_aa['Polar (S/T)'] = 'pol_short'
names_aa['Polar (N/Q/H)'] = 'pol_long'
residues_browser = [
{'pos': 135, 'aa': 'I', 'gprotseg': "H5", 'segment': 'TM3', 'ligand': 'Gs', 'type': aa_names['hp'],
'gpcrdb': '3.54x54', 'gpnum': 'G.H5.16', 'gpaa': 'Q384', 'availability': 'interacting'},
{'pos': 136, 'aa': 'T', 'gprotseg': "H5", 'segment': 'TM3', 'ligand': 'Gs', 'type': 'Polar (S/T)',
'gpcrdb': '3.55x55', 'gpnum': 'G.H5.12', 'gpaa': 'R380', 'availability': 'interacting'},
{'pos': 139, 'aa': 'F', 'gprotseg': "H5", 'segment': 'ICL2', 'ligand': 'Gs', 'type': 'Aromatic',
'gpcrdb': '34.51x51', 'gpnum': 'G.H5.8', 'gpaa': 'F376', 'availability': 'interacting'},
{'pos': 139, 'aa': 'F', 'gprotseg': "S1", 'segment': 'ICL2', 'ligand': 'Gs', 'type': 'Aromatic',
'gpcrdb': '34.51x51', 'gpnum': 'G.S1.2', 'gpaa': 'H41', 'availability': 'interacting'},
{'pos': 141, 'aa': 'Y', 'gprotseg': "H5", 'segment': 'ICL2', 'ligand': 'Gs', 'type': 'Aromatic',
'gpcrdb': '34.53x53', 'gpnum': 'G.H5.19', 'gpaa': 'H387', 'availability': 'interacting'},
{'pos': 225, 'aa': 'E', 'gprotseg': "H5", 'segment': 'TM5', 'ligand': 'Gs', 'type': 'Negative charge',
'gpcrdb': '5.64x64', 'gpnum': 'G.H5.12', 'gpaa': 'R380', 'availability': 'interacting'},
{'pos': 225, 'aa': 'E', 'gprotseg': "H5", 'segment': 'TM5', 'ligand': 'Gs', 'type': 'Negative charge',
'gpcrdb': '5.64x64', 'gpnum': 'G.H5.16', 'gpaa': 'Q384', 'availability': 'interacting'},
{'pos': 229, 'aa': 'Q', 'gprotseg': "H5", 'segment': 'TM5', 'ligand': 'Gs', 'type': 'Polar (N/Q/H)',
'gpcrdb': '5.68x68', 'gpnum': 'G.H5.13', 'gpaa': 'D381', 'availability': 'interacting'},
{'pos': 229, 'aa': 'Q', 'gprotseg': "H5", 'segment': 'TM5', 'ligand': 'Gs', 'type': 'Polar (N/Q/H)',
'gpcrdb': '5.68x68', 'gpnum': 'G.H5.16', 'gpaa': 'Q384', 'availability': 'interacting'},
{'pos': 229, 'aa': 'Q', 'gprotseg': "H5", 'segment': 'TM5', 'ligand': 'Gs', 'type': 'Polar (N/Q/H)',
'gpcrdb': '5.68x68', 'gpnum': 'G.H5.17', 'gpaa': 'R385', 'availability': 'interacting'},
{'pos': 274, 'aa': 'T', 'gprotseg': "H5", 'segment': 'TM6', 'ligand': 'Gs', 'type': 'Polar (S/T)',
'gpcrdb': '6.36x36', 'gpnum': 'G.H5.24', 'gpaa': 'E392', 'availability': 'interacting'},
{'pos': 328, 'aa': 'R', 'gprotseg': "H5", 'segment': 'TM7', 'ligand': 'Gs', 'type': 'Positive charge',
'gpcrdb': '7.55x55', 'gpnum': 'G.H5.24', 'gpaa': 'E392', 'availability': 'interacting'},
{'pos': 232, 'aa': 'K', 'segment': 'TM5', 'ligand': 'Gs', 'type': 'Positive charge', 'gpcrdb': '5.71x71',
'gprotseg': "H5", 'gpnum': 'G.H5.13', 'gpaa': 'D381', 'availability': 'interacting'}]
# accessible_gn = ['3.50x50', '3.53x53', '3.54x54', '3.55x55', '34.50x50', '34.51x51', '34.53x53', '34.54x54', '5.61x61', '5.64x64', '5.65x65', '5.67x67', '5.68x68', '5.71x71', '5.72x72', '5.74x74', '5.75x75', '6.29x29', '6.32x32', '6.33x33', '6.36x36', '6.37x37', '7.55x55', '8.48x48', '8.49x49']
accessible_gn = ['3.50x50', '3.53x53', '3.54x54', '3.55x55', '3.56x56', '34.50x50', '34.51x51', '34.52x52',
'34.53x53', '34.54x54', '34.55x55', '34.56x56', '34.57x57', '5.61x61', '5.64x64', '5.65x65',
'5.66x66', '5.67x67', '5.68x68', '5.69x69', '5.71x71', '5.72x72', '5.74x74', '5.75x75', '6.25x25',
'6.26x26', '6.28x28', '6.29x29', '6.32x32', '6.33x33', '6.36x36', '6.37x37', '6.40x40', '7.55x55',
'7.56x56', '8.47x47', '8.48x48', '8.49x49', '8.51x51']
exchange_table = OrderedDict([('hp', ('V', 'I', 'L', 'M')),
('ar', ('F', 'H', 'W', 'Y')),
('pol_short', ('S', 'T')), # Short/hydroxy
('pol_long', ('N', 'Q', 'H')), # Amino-like (both donor and acceptor
('neg', ('D', 'E')),
('pos', ('K', 'R'))])
interacting_gn = []
accessible_pos = list(
residuelist.filter(display_generic_number__label__in=accessible_gn).values_list('sequence_number', flat=True))
# Which of the Gs interacting_pos are conserved?
GS_none_equivalent_interacting_pos = []
GS_none_equivalent_interacting_gn = []
for interaction in residues_browser:
interacting_gn.append(interaction['gpcrdb'])
gs_b2_interaction_type_long = (
next((item['type'] for item in residues_browser if item['gpcrdb'] == interaction['gpcrdb']), None))
interacting_aa = residuelist.filter(display_generic_number__label__in=[interaction['gpcrdb']]).values_list(
'amino_acid', flat=True)
if interacting_aa:
interaction['aa'] = interacting_aa[0]
pos = \
residuelist.filter(display_generic_number__label__in=[interaction['gpcrdb']]).values_list(
'sequence_number',
flat=True)[0]
interaction['pos'] = pos
feature = names_aa[gs_b2_interaction_type_long]
if interacting_aa[0] not in exchange_table[feature]:
GS_none_equivalent_interacting_pos.append(pos)
GS_none_equivalent_interacting_gn.append(interaction['gpcrdb'])
GS_equivalent_interacting_pos = list(
residuelist.filter(display_generic_number__label__in=interacting_gn).values_list('sequence_number', flat=True))
gProteinData = ProteinGProteinPair.objects.filter(protein__entry_name=protein)
primary = []
secondary = []
for entry in gProteinData:
if entry.transduction == 'primary':
primary.append((entry.g_protein.name.replace("Gs", "G<sub>s</sub>").replace("Gi", "G<sub>i</sub>").replace(
"Go", "G<sub>o</sub>").replace("G11", "G<sub>11</sub>").replace("G12", "G<sub>12</sub>").replace("G13",
"G<sub>13</sub>").replace(
"Gq", "G<sub>q</sub>").replace("G", "Gα"), entry.g_protein.slug))
elif entry.transduction == 'secondary':
secondary.append((
entry.g_protein.name.replace("Gs", "G<sub>s</sub>").replace("Gi", "G<sub>i</sub>").replace(
"Go", "G<sub>o</sub>").replace("G11", "G<sub>11</sub>").replace("G12",
"G<sub>12</sub>").replace(
"G13", "G<sub>13</sub>").replace("Gq", "G<sub>q</sub>").replace("G", "Gα"),
entry.g_protein.slug))
return render(request,
'signprot/ginterface.html',
{'pdbname': '3SN6',
'snakeplot': SnakePlot,
'gproteinplot': gproteinplot,
'crystal': crystal,
'interacting_equivalent': GS_equivalent_interacting_pos,
'interacting_none_equivalent': GS_none_equivalent_interacting_pos,
'accessible': accessible_pos,
'residues': residues_browser,
'mapped_protein': protein,
'interacting_gn': GS_none_equivalent_interacting_gn,
'primary_Gprotein': set(primary),
'secondary_Gprotein': set(secondary)}
)
def ajaxInterface(request, slug, **response_kwargs):
name_of_cache = 'ajaxInterface_' + slug
jsondata = cache.get(name_of_cache)
if jsondata == None:
p = Protein.objects.filter(entry_name=slug).get()
if p.family.slug.startswith('200'):
rsets = ResiduePositionSet.objects.get(name="Arrestin interface")
else:
rsets = ResiduePositionSet.objects.get(name="Gprotein Barcode")
jsondata = {}
for x, residue in enumerate(rsets.residue_position.all()):
try:
pos = str(list(Residue.objects.filter(protein_conformation__protein__entry_name=slug,
display_generic_number__label=residue.label))[0])
except:
print("Protein has no residue position at", residue.label)
a = pos[1:]
jsondata[a] = [5, 'Receptor interface position', residue.label]
jsondata = json.dumps(jsondata)
cache.set(name_of_cache, jsondata, 60 * 60 * 24 * 2) # two days timeout on cache
response_kwargs['content_type'] = 'application/json'
return HttpResponse(jsondata, **response_kwargs)
def ajaxBarcode(request, slug, cutoff, **response_kwargs):
name_of_cache = 'ajaxBarcode_' + slug + cutoff
jsondata = cache.get(name_of_cache)
if jsondata == None:
jsondata = {}
selectivity_pos = list(
SignprotBarcode.objects.filter(protein__entry_name=slug, seq_identity__gte=cutoff).values_list(
'residue__display_generic_number__label', flat=True))
conserved = list(SignprotBarcode.objects.filter(protein__entry_name=slug, paralog_score__gte=cutoff,
seq_identity__gte=cutoff).prefetch_related(
'residue__display_generic_number').values_list('residue__display_generic_number__label', flat=True))
na_data = list(
SignprotBarcode.objects.filter(protein__entry_name=slug, seq_identity=0, paralog_score=0).values_list(
'residue__display_generic_number__label', flat=True))
all_positions = Residue.objects.filter(protein_conformation__protein__entry_name=slug).prefetch_related(
'display_generic_number')
for res in all_positions:
cgn = str(res.generic_number)
res = str(res.sequence_number)
if cgn in conserved:
jsondata[res] = [0, 'Conserved', cgn]
elif cgn in selectivity_pos and cgn not in conserved:
jsondata[res] = [1, 'Selectivity determining', cgn]
elif cgn in na_data:
jsondata[res] = [3, 'NA', cgn]
else:
jsondata[res] = [2, 'Evolutionary neutral', cgn]
jsondata = json.dumps(jsondata)
response_kwargs['content_type'] = 'application/json'
cache.set(name_of_cache, jsondata, 60 * 60 * 24 * 2) # two days timeout on cache
return HttpResponse(jsondata, **response_kwargs)
@cache_page(60 * 60 * 24 * 7)
def StructureInfo(request, pdbname):
"""
Show structure details
"""
#protein = Protein.objects.get(signprotstructure__pdb_code__index=pdbname)
protein = Protein.objects.filter(signprotstructure__pdb_code__index=pdbname).first()
#crystal = SignprotStructure.objects.get(pdb_code__index=pdbname)
crystal = SignprotStructure.objects.filter(pdb_code__index=pdbname).first()
return render(request,
'signprot/structure_info.html',
{'pdbname': pdbname,
'protein': protein,
'crystal': crystal}
)
# @cache_page(60*60*24*2)
def signprotdetail(request, slug):
# get protein
slug = slug.lower()
p = Protein.objects.prefetch_related('web_links__web_resource').get(entry_name=slug, sequence_type__slug='wt')
# Redirect to protein page
if p.family.slug.startswith("00"):
return redirect("/protein/"+slug)
# get family list
pf = p.family
families = [pf.name]
while pf.parent.parent:
families.append(pf.parent.name)
pf = pf.parent
families.reverse()
# get protein aliases
aliases = ProteinAlias.objects.filter(protein=p).values_list('name', flat=True)
# get genes
genes = Gene.objects.filter(proteins=p).values_list('name', flat=True)
gene = ""
alt_genes = ""
if len(gene) > 0:
gene = genes[0]
alt_genes = genes[1:]
# get structures of this signal protein
structures = SignprotStructure.objects.filter(protein=p)
complex_structures = SignprotComplex.objects.filter(protein=p)
# mutations
mutations = MutationExperiment.objects.filter(protein=p)
# get residues
pc = ProteinConformation.objects.get(protein=p)
residues = Residue.objects.filter(protein_conformation=pc).order_by('sequence_number').prefetch_related(
'protein_segment', 'generic_number', 'display_generic_number')
# process residues and return them in chunks of 10
# this is done for easier scaling on smaller screens
chunk_size = 10
r_chunks = []
r_buffer = []
last_segment = False
border = False
title_cell_skip = 0
for i, r in enumerate(residues):
# title of segment to be written out for the first residue in each segment
segment_title = False
# keep track of last residues segment (for marking borders)
if r.protein_segment.slug != last_segment:
last_segment = r.protein_segment.slug
border = True
# if on a border, is there room to write out the title? If not, write title in next chunk
if i == 0 or (border and len(last_segment) <= (chunk_size - i % chunk_size)):
segment_title = True
border = False
title_cell_skip += len(last_segment) # skip cells following title (which has colspan > 1)
if i and i % chunk_size == 0:
r_chunks.append(r_buffer)
r_buffer = []
r_buffer.append((r, segment_title, title_cell_skip))
# update cell skip counter
if title_cell_skip > 0:
title_cell_skip -= 1
if r_buffer:
r_chunks.append(r_buffer)
context = {'p': p, 'families': families, 'r_chunks': r_chunks, 'chunk_size': chunk_size, 'aliases': aliases,
'gene': gene, 'alt_genes': alt_genes, 'structures': structures, 'complex_structures': complex_structures,
'mutations': mutations}
return render(request,
'signprot/signprot_details.html',
context
)
def sort_a_by_b(a, b, remove_invalid=False):
'''Sort one list based on the order of elements from another list'''
# https://stackoverflow.com/q/12814667
# a = ['alpha_mock', 'van-der-waals', 'ionic']
# b = ['ionic', 'aromatic', 'hydrophobic', 'polar', 'van-der-waals', 'alpha_mock']
# sort_a_by_b(a,b) -> ['ionic', 'van-der-waals', 'alpha_mock']
if remove_invalid:
a = [a_elem for a_elem in a if a_elem in b]
return sorted(a, key=lambda x: b.index(x))
def interface_dataset():
# correct receptor entry names - the ones with '_a' appended
complex_objs = SignprotComplex.objects.prefetch_related('structure__protein_conformation__protein')
# TOFIX: Current workaround is forcing _a to pdb for indicating alpha-subunit
# complex_names = [complex_obj.structure.protein_conformation.protein.entry_name + '_' + complex_obj.alpha.lower() for
# complex_obj in complex_objs]
complex_names = [complex_obj.structure.protein_conformation.protein.entry_name + '_a' for
complex_obj in complex_objs]
complex_struc_ids = [co.structure_id for co in complex_objs]
# protein conformations for those
prot_conf = ProteinConformation.objects.filter(protein__entry_name__in=complex_names).values_list('id', flat=True)
interaction_sort_order = [
"ionic",
"aromatic",
"polar",
"hydrophobic",
"van-der-waals",
]
# getting all the signal protein residues for those protein conformations
prot_residues = Residue.objects.filter(
protein_conformation__in=prot_conf
).values_list('id', flat=True)
interactions = InteractingResiduePair.objects.filter(
Q(res1__in=prot_residues) | Q(res2__in=prot_residues),
referenced_structure__in=complex_struc_ids
).exclude(
Q(res1__in=prot_residues) & Q(res2__in=prot_residues)
).prefetch_related(
'interaction__interaction_type',
'referenced_structure__pdb_code__index',
'referenced_structure__signprot_complex__protein__entry_name',
'referenced_structure__protein_conformation__protein__parent__entry_name',
'res1__amino_acid',
'res1__sequence_number',
'res1__generic_number__label',
'res2__amino_acid',
'res2__sequence_number',
'res2__generic_number__label',
).order_by(
'res1__generic_number__label',
'res2__generic_number__label'
).values(
int_id=F('id'),
int_ty=ArrayAgg(
'interaction__interaction_type',
distinct=True,
# ordering=interaction_sort_order
),
pdb_id=F('referenced_structure__pdb_code__index'),
conf_id=F('referenced_structure__protein_conformation_id'),
gprot=F('referenced_structure__signprot_complex__protein__entry_name'),
entry_name=F('referenced_structure__protein_conformation__protein__parent__entry_name'),
rec_aa=F('res1__amino_acid'),
rec_pos=F('res1__sequence_number'),
rec_gn=F('res1__generic_number__label'),
sig_aa=F('res2__amino_acid'),
sig_pos=F('res2__sequence_number'),
sig_gn=F('res2__generic_number__label')
)
conf_ids = set()
for i in interactions:
i['int_ty'] = sort_a_by_b(i['int_ty'], interaction_sort_order)
conf_ids.update([i['conf_id']])
return list(conf_ids), list(interactions)
@cache_page(60 * 60 * 24 * 7)
def InteractionMatrix(request):
prot_conf_ids, dataset = interface_dataset()
gprotein_order = ProteinSegment.objects.filter(proteinfamily='Alpha').values('id', 'slug')
receptor_order = ['N', '1', '12', '2', '23', '3', '34', '4', '45', '5', '56', '6', '67', '7', '78', '8', 'C']
struc = SignprotComplex.objects.prefetch_related(
'structure__pdb_code',
'structure__stabilizing_agents',
'structure__protein_conformation__protein__species',
'structure__protein_conformation__protein__parent__parent__parent',
'structure__protein_conformation__protein__family__parent__parent__parent__parent',
'structure__stabilizing_agents',
'structure__signprot_complex__protein__family__parent__parent__parent__parent',
)
complex_info = []
for s in struc:
r = {}
s = s.structure
r['pdb_id'] = s.pdb_code.index
r['name'] = s.protein_conformation.protein.parent.short()
r['entry_name'] = s.protein_conformation.protein.parent.entry_name
r['class'] = s.protein_conformation.protein.get_protein_class()
r['family'] = s.protein_conformation.protein.get_protein_family()
r['conf_id'] = s.protein_conformation.id
r['organism'] = s.protein_conformation.protein.species.common_name
try:
r['gprot'] = s.get_stab_agents_gproteins()
except Exception:
r['gprot'] = ''
try:
r['gprot_class'] = s.get_signprot_gprot_family()
except Exception:
r['gprot_class'] = ''
complex_info.append(r)
remaining_residues = Residue.objects.filter(
protein_conformation_id__in=prot_conf_ids,
).prefetch_related(
"protein_conformation",
"protein_conformation__protein",
"protein_conformation__structure"
).values(
rec_id=F('protein_conformation__protein__id'),
name=F('protein_conformation__protein__parent__name'),
entry_name=F('protein_conformation__protein__parent__entry_name'),
pdb_id=F('protein_conformation__structure__pdb_code__index'),
rec_aa=F('amino_acid'),
rec_gn=F('generic_number__label'),
).exclude(
Q(rec_gn=None)
)
context = {
'interactions': json.dumps(dataset),
'interactions_metadata': json.dumps(complex_info),
'non_interactions': json.dumps(list(remaining_residues)),
'gprot': json.dumps(list(gprotein_order)),
'receptor': json.dumps(receptor_order),
}
request.session['signature'] = None
request.session.modified = True
return render(request,
'signprot/matrix.html',
context
)
@method_decorator(csrf_exempt)
def IMSequenceSignature(request):
"""Accept set of proteins + generic numbers and calculate the signature for those"""
t1 = time.time()
pos_set_in = get_entry_names(request)
ignore_in_alignment = get_ignore_info(request)
segments = get_protein_segments(request)
if len(segments) == 0:
segments = list(ResidueGenericNumberEquivalent.objects.filter(scheme__slug__in=['gpcrdba']))
# get pos objects
pos_set = Protein.objects.filter(entry_name__in=pos_set_in).select_related('residue_numbering_scheme', 'species')
# Calculate Sequence Signature
signature = SequenceSignature()
# WHY IS THIS IGNORE USED -> it ignores counting of proteins for residue positions instead of ignoring residue positions
ignore_in_alignment = {}
signature.setup_alignments_signprot(segments, pos_set, ignore_in_alignment=ignore_in_alignment)
signature.calculate_signature_onesided()
# preprocess data for return
signature_data = signature.prepare_display_data_onesided()
# FEATURES AND REGIONS
feats = [feature for feature in signature_data['a_pos'].features_combo]
# GET GENERIC NUMBERS
generic_numbers = get_generic_numbers(signature_data)
# FEATURE FREQUENCIES
signature_features = get_signature_features(signature_data, generic_numbers, feats)
grouped_features = group_signature_features(signature_features)
# # FEATURE CONSENSUS
# generic_numbers_flat = list(chain.from_iterable(generic_numbers))
# sigcons = get_signature_consensus(signature_data, generic_numbers_flat)
# rec_class = pos_set[0].get_protein_class()
# dump = {
# 'rec_class': rec_class,
# 'signature': signature,
# 'consensus': signature_data,
# }
# with open('signprot/notebooks/interface_pickles/{}.p'.format(rec_class), 'wb+') as out_file:
# pickle.dump(dump, out_file)
# pass back to front
res = {
# 'cons': sigcons,
'feat_ungrouped': signature_features,
'feat': grouped_features,
}
request.session['signature'] = signature.prepare_session_data()
request.session.modified = True
t2 = time.time()
print('Runtime: {}'.format((t2 - t1) * 1000.0))
return JsonResponse(res, safe=False)
@method_decorator(csrf_exempt)
def IMSignatureMatch(request):
'''Take the signature stored in the session and query the db'''
signature_data = request.session.get('signature')
ss_pos = get_entry_names(request)
cutoff = request.POST.get('cutoff')
request.session['ss_pos'] = ss_pos
request.session['cutoff'] = cutoff
pos_set = Protein.objects.filter(entry_name__in=ss_pos).select_related('residue_numbering_scheme', 'species')
pos_set = [protein for protein in pos_set]
pfam = [protein.family.slug[:3] for protein in pos_set]
signature_match = SignatureMatch(
signature_data['common_positions'],
signature_data['numbering_schemes'],
signature_data['common_segments'],
signature_data['diff_matrix'],
pos_set,
# pos_set,
cutoff=0,
signprot=True
)
maj_pfam = Counter(pfam).most_common()[0][0]
signature_match.score_protein_class(maj_pfam, signprot=True)
# request.session['signature_match'] = signature_match
signature_match = {
'scores': signature_match.protein_report,
'scores_pos': signature_match.scores_pos,
# 'scores_neg': signature_match.scores_neg,
'protein_signatures': signature_match.protein_signatures,
'signatures_pos': signature_match.signatures_pos,
# 'signatures_neg': signature_match.signatures_neg,
'signature_filtered': signature_match.signature_consensus,
'relevant_gn': signature_match.relevant_gn,
'relevant_segments': signature_match.relevant_segments,
'numbering_schemes': signature_match.schemes,
}
signature_match = prepare_signature_match(signature_match)
return JsonResponse(signature_match, safe=False)
@method_decorator(csrf_exempt)
def render_IMSigMat(request):
# signature_match = request.session.get('signature_match')
signature_data = request.session.get('signature')
ss_pos = request.session.get('ss_pos')
cutoff = request.session.get('cutoff')
pos_set = Protein.objects.filter(entry_name__in=ss_pos).select_related('residue_numbering_scheme', 'species')
pos_set = [protein for protein in pos_set]
pfam = [protein.family.slug[:3] for protein in pos_set]
signature_match = SignatureMatch(
signature_data['common_positions'],
signature_data['numbering_schemes'],
signature_data['common_segments'],
signature_data['diff_matrix'],
pos_set,
# pos_set,
cutoff=0,
signprot=True
)
maj_pfam = Counter(pfam).most_common()[0][0]
signature_match.score_protein_class(maj_pfam, signprot=True)
response = render(
request,
'signprot/signature_match.html',
{'scores': signature_match}
)
return response
```
#### File: management/commands/build_bias_data.py
```python
from decimal import Decimal
import logging
import math
from build.management.commands.base_build import Command as BaseBuild
from protein.models import ProteinGProteinPair
from ligand.models import Ligand, BiasedExperiment, AnalyzedExperiment,AnalyzedAssay
MISSING_PROTEINS = {}
SKIPPED = 0
class Command(BaseBuild):
mylog = logging.getLogger(__name__)
mylog.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s:%(levelname)s:%(message)s')
file_handler = logging.FileHandler('biasDataTest.log')
file_handler.setLevel(logging.ERROR)
file_handler.setFormatter(formatter)
mylog.addHandler(file_handler)
help = 'Reads bias data and imports it'
publication_cache = {}
ligand_cache = {}
data_all = []
def add_arguments(self, parser):
parser.add_argument('-p', '--proc',
type=int,
action='store',
dest='proc',
default=1,
help='Number of processes to run')
parser.add_argument('-f', '--filename',
action='append',
dest='filename',
help='Filename to import. Can be used multiple times')
parser.add_argument('-u', '--purge',
action='store_true',
dest='purge',
default=False,
help='Purge existing bias records')
parser.add_argument('--test_run', action='store_true', help='Skip this during a test run',
default=False)
def handle(self, *args, **options):
if options['test_run']:
print('Skipping in test run')
return
# delete any existing structure data
if options['purge']:
try:
print('Started purging bias data')
self.purge_bias_data()
print('Ended purging bias data')
except Exception as msg:
print(msg)
# import the structure data
print('CREATING BIAS DATA')
print(options['filename'])
self.build_bias_data()
self.build_bias_data_subtypes()
self.logger.info('COMPLETED CREATING BIAS DATA')
# pylint: disable=R0201
def purge_bias_data(self):
delete_bias_experiment = AnalyzedExperiment.objects.all()
delete_bias_experiment.delete()
self.logger.info('Data is purged')
def get_from_model(self):
try:
content = BiasedExperiment.objects.all().prefetch_related(
'experiment_data', 'ligand', 'receptor', 'publication'
, 'publication__web_link'
, 'experiment_data__emax_ligand_reference',
).order_by('publication', 'receptor', 'ligand')
except BiasedExperiment.DoesNotExist:
self.logger.info('Data is not returned')
content = None
return content
def process_data(self, content):
rd = []
counter = 0
for instance in enumerate(content):
temp_obj = []
fin_obj = {}
fin_obj['main'] = (instance[1])
vendor_counter = 0
vendors_quantity = None
for i in instance[1].experiment_data_vendors.all():
vendor_counter = vendor_counter + 1
if not vendor_counter:
vendors_quantity = i
self.logger.info(vendors_quantity)
for entry in instance[1].experiment_data.all():
author_list = list()
for author in entry.experiment_data_authors.all():
author_list.append(author.author)
temp_obj.append(entry)
counter += 1
fin_obj['authors'] = author_list
fin_obj['children'] = temp_obj
fin_obj['vendor_counter'] = vendor_counter
rd.append(fin_obj)
self.logger.info('Return dict is returned')
return rd
def queryset_to_dict(self, results):
'''
Merge bias experminet data with assay data
'''
send = list()
for j in results:
temp_dict = dict()
temp = dict()
doubles = []
temp['ligand_source_id'] = None
temp['publication'] = j['main'].publication
temp['species'] = j['main'].receptor.species.common_name
# temp['ligand'] = j['main'].ligand
temp['endogenous_ligand'] = j['main'].endogenous_ligand
temp['receptor'] = j['main'].receptor
temp['assay'] = dict()
temp['vendor_counter'] = j['vendor_counter']
temp['reference'] = list()
temp['authors'] = j['authors']
temp['ref_ligand_experiment'] = dict()
temp['article_quantity'] = 0
temp['labs_quantity'] = 0
temp['ligand_source_id'] = j['main'].ligand_source_id
temp['ligand_source_type'] = j['main'].ligand_source_type
temp['reference_ligand'] = None
if not j['children']:
continue
temp_dict['potency'] = ''
temp_dict['t_factor'] = ''
temp_dict['log_bias_factor'] = ''
temp_dict['order_no'] = 0
temp_dict['reference_ligand'] = None
temp_dict['signalling_protein'] = j['children'][0].signalling_protein.lower()
temp_dict['cell_line'] = j['children'][0].cell_line
temp_dict['family'] = j['children'][0].family
temp_dict['assay_type'] = j['children'][0].assay_type
temp_dict['assay_measure_method'] = j['children'][0].assay_measure
temp_dict['assay_time_resolved'] = j['children'][0].assay_time_resolved
if j['children'][0].quantitive_activity:
temp_dict['quantitive_activity'] = j['children'][0].quantitive_activity
temp_dict['quantitive_activity_initial'] = j['children'][0].quantitive_activity
else:
temp_dict['quantitive_activity'] = None
temp_dict['quantitive_activity_initial'] = None
temp_dict['qualitative_activity'] = j['children'][0].qualitative_activity
temp_dict['quantitive_unit'] = j['children'][0].quantitive_unit
temp_dict['quantitive_efficacy'] = j['children'][0].quantitive_efficacy
temp_dict['efficacy_unit'] = j['children'][0].efficacy_unit
temp_dict['quantitive_measure_type'] = j['children'][0].quantitive_measure_type
temp_dict['efficacy_measure_type'] = j['children'][0].efficacy_measure_type
temp_dict['t_coefficient'] = j['children'][0].bias_value
temp_dict['t_coefficient_initial'] = j['children'][0].bias_value_initial
temp_dict['bias_reference'] = j['children'][0].bias_reference
temp_dict['emax_reference_ligand'] = j['children'][0].emax_ligand_reference
temp_dict['ligand_function'] = j['children'][0].ligand_function
temp_dict['ligand'] = j['main'].ligand
if (temp_dict['quantitive_activity_initial'] and
temp_dict['quantitive_measure_type'] != "Effect at single point measurement"):
temp_dict['quantitive_activity_initial'] = (-1) * math.log10(
temp_dict['quantitive_activity_initial'])
temp_dict['quantitive_activity_initial'] = "{:.2F}".format(
Decimal(temp_dict['quantitive_activity_initial']))
temp['ref_ligand_experiment'] = j['children'][0].emax_ligand_reference
doubles.append(temp_dict)
temp['assay'] = doubles
send.append(temp)
self.logger.info('Queryset processed')
return send
def combine_unique(self, data):
'''
combining tested assays and reference assays
'''
context = dict()
for j in data:
name = str(j['publication'].id) + \
'/' + '/' + str(j['receptor'].id)
temp_obj = list()
if name in context:
temp_obj = context[name]['assay']
for i in j['assay']:
temp_obj.append(i)
context[name] = j
context[name]['assay'] = temp_obj
self.logger.info('Combined experiments by publication and receptor')
return context
def process_referenced_assays(self, data):
'''
separate tested assays and reference assays
'''
for j in data.items():
assays, reference = self.return_refenced_assays(j[1]['assay'])
j[1]['assay_list'] = assays
j[1]['reference_assays_list'] = reference
return data
def return_refenced_assays(self, assays):
# pylint: disable=no-member
main, reference = list(), list()
for assay in assays:
if assay['bias_reference'] != '':
reference.append(assay)
else:
main.append(assay)
sorted_main = sorted(main, key=lambda k: k['quantitive_activity']
if k['quantitive_activity'] else 999999, reverse=True)
sorted_reference = reference
if len(sorted_reference) == 0:
self.get_reference_from_emax(assays)
# if len(sorted_reference) == 0:
# print('implementation required')
return sorted_main, sorted_reference
def filter_reference_assay(self, reference_return, reference_ligand):
reference_return[:] = [d for d in reference_return if d.get('ligand') == reference_ligand]
self.logger.info('Trying to get reference from assays')
return reference_return
def filter_assay_reference(self, assay_return, reference_ligand):
assay_return[:] = [d for d in assay_return if d.get('ligand') != reference_ligand]
self.logger.info('Trying to get filtering references from assays')
return assay_return
def chose_reference_from_assays(self, assays):
references = list()
final_assay = list()
reference_ligand = Ligand()
for i in reversed(assays):
if (i['quantitive_activity'] and i['quantitive_activity'] is not None and
i['quantitive_efficacy'] and i['quantitive_efficacy'] is not None and
i['ligand'] is not None):
reference_ligand=i['ligand']
reference_return = assays.copy()
assay_return = assays.copy()
references=self.filter_reference_assay(reference_return,reference_ligand)
final_assay=self.filter_assay_reference(assay_return,reference_ligand)
self.logger.info('return reference assay')
return references, final_assay
def get_reference_from_emax(self, assays):
reference_ligand = list()
for i in assays:
if i['emax_reference_ligand'] == i['ligand']:
reference_ligand.append(i)
self.logger.info('return reference emax')
return reference_ligand
def separate_ligands(self, context):
content = dict()
for i in context.items():
for assay in i[1]['assay_list']:
name = str(i[1]['publication'].id) + \
'/'+ str(assay['ligand'].id) + '/' + str(i[1]['receptor'].id)
if name in content:
content[name]['assay_list'].append(assay)
else:
content[name] = dict()
content[name]['publication'] = i[1]['publication']
content[name]['ligand'] = assay['ligand']
content[name]['endogenous_ligand'] = i[1]['endogenous_ligand']
content[name]['receptor'] = i[1]['receptor']
content[name]['vendor_counter'] = i[1]['vendor_counter']
content[name]['authors'] = i[1]['authors']
content[name]['ref_ligand_experiment'] = i[1]['ref_ligand_experiment']
content[name]['article_quantity'] = i[1]['article_quantity']
content[name]['labs_quantity'] = i[1]['labs_quantity']
content[name]['assay_list'] = list()
content[name]['ligand_source_type'] = i[1]['ligand_source_type']
content[name]['ligand_source_id'] = i[1]['ligand_source_id']
content[name]['assay_list'].append(assay)
content[name]['reference_assays_list'] = i[1]['reference_assays_list']
content[name]['assay'] = i[1]['assay']
return content
def limit_family_set(self, assay_list):
# pylint: disable=no-member
families = list()
proteins = set()
for assay in assay_list:
if assay['family'] not in proteins:
proteins.add(assay['family'])
families.append(assay)
else:
compare_val = next(item for item in families if item["family"] == assay['family'])
try:
if assay['quantitive_activity'] < compare_val['quantitive_activity']:
families[:] = [d for d in families if d.get('family') != compare_val['family']]
families.append(assay)
except TypeError:
self.logger.info('skipping families if existing copy')
continue
return families
def limit_family_set_subs(self, assay_list):
families = list()
proteins = set()
for assay in assay_list:
if assay['signalling_protein'] not in proteins:
proteins.add(assay['signalling_protein'])
families.append(assay)
else:
compare_val = next(item for item in families if item["signalling_protein"] == assay['signalling_protein'])
try:
if assay['quantitive_activity'] < compare_val['quantitive_activity']:
families[:] = [d for d in families if d.get('signalling_protein') != compare_val['signalling_protein']]
families.append(assay)
except:
families.append(assay)
self.logger.info('limit_family_set_subs error')
return families
def process_calculation(self, context):
for i in context.items():
test = dict()
temp_obj = list()
for j in i[1]['assay_list']:
if j not in temp_obj:
temp_obj.append(j)
else:
pass
i[1]['assay_list'] = temp_obj
test = sorted(i[1]['assay_list'], key=lambda k: k['quantitive_activity']
if k['quantitive_activity'] else 999999, reverse=False)
for item in enumerate(test):
item[1]['order_no'] = item[0]
i[1]['biasdata'] = test
i[1]['reference_lists'] = list()
i[1].pop('assay_list')
# calculate log bias
i[1]['reference_lists'] = self.calc_bias_factor(i[1]['biasdata'], i[1]['reference_assays_list'], i[1]['assay'])
# recalculates lbf if it is negative
i[1]['biasdata'] = self.validate_lbf(i)
self.calc_potency_and_transduction(i[1]['biasdata'])
return context
# pylint: disable=C0301
def calc_bias_factor(self, biasdata, reference, assay):
reference_lists = list()
most_reference = dict()
most_potent = dict()
for i in biasdata:
if i['order_no'] == 0:
most_potent = i
most_reference = self.get_reference_assay(reference, most_potent)
reference_lists.append(most_reference)
i['log_bias_factor'] = None
for i in biasdata:
if i['order_no'] != 0:
temp_reference = self.get_reference_assay(reference, i)
reference_lists.append(temp_reference)
try:
if (i['quantitive_measure_type'].lower() == 'ec50' and temp_reference['quantitive_measure_type'].lower() == 'ec50' and
most_potent['quantitive_measure_type'].lower() == 'ec50' and most_reference['quantitive_measure_type'].lower() == 'ec50'):
a = 0
b = 0
c = 0
d = 0
a = math.log10(
most_potent['quantitive_efficacy'] / most_potent['quantitive_activity'])
b = math.log10(
most_reference['quantitive_efficacy'] / most_reference['quantitive_activity'])
c = math.log10(
i['quantitive_efficacy'] / i['quantitive_activity'])
d = math.log10(
temp_reference['quantitive_efficacy'] / temp_reference['quantitive_activity'])
temp_calculation = self.caclulate_bias_factor_variables(
a, b, c, d)
i['log_bias_factor'] = round(temp_calculation, 1)
elif (i['quantitive_measure_type'].lower() == 'ic50' and temp_reference['quantitive_measure_type'].lower() == 'ic50'):
i['log_bias_factor'] = 'Only agonist in main pathway'
except:
try:
if i['qualitative_activity'] == 'No activity':
i['log_bias_factor'] = "Full Bias"
elif i['qualitative_activity'] == 'Low activity':
i['log_bias_factor'] = "High Bias"
elif i['qualitative_activity'] == 'High activity':
i['log_bias_factor'] = "Low Bias"
except:
i['log_bias_factor'] = None
return reference_lists
def get_reference_assay(self, reference, assay):
return_assay = dict()
try:
for i in reference:
if i['signalling_protein'] == assay['signalling_protein']:
if i['assay_type'] == assay['assay_type']:
return_assay = i
except:
self.logger.info('get_reference_assay error')
return return_assay
return return_assay
def caclulate_bias_factor_variables(self, a, b, c, d):
'''
calculations for log bias factor inputs
'''
lgb = 0
try:
lgb = (a - b) - (c - d)
except:
lgb = 0
self.logger.info('caclulate_bias_factor_variables error')
return lgb
def calc_potency_and_transduction(self, biasdata):
count = 0
most_potent = dict()
for i in biasdata:
count += 1
if i['order_no'] == 0:
most_potent = i
# T_factor -- bias factor
for i in biasdata:
if i['order_no'] > 0:
try:
if i['quantitive_measure_type'].lower() == 'ec50' or i['quantitive_measure_type'].lower() == 'ic50':
if i['quantitive_activity'] is not None and i['quantitive_activity'] != 0 and most_potent['quantitive_activity'] is not None:
i['potency'] = round(
i['quantitive_activity'] / most_potent['quantitive_activity'], 1)
elif i['quantitive_measure_type'].lower() == 'pec50' or i['quantitive_measure_type'].lower() == 'pic50':
i['potency'] = round(
most_potent['quantitive_activity'] - i['quantitive_activity'], 1)
except:
i['potency'] = None
if i['t_coefficient'] is not None and most_potent['t_coefficient'] is not None:
i['t_factor'] = round(
most_potent['t_coefficient'] - i['t_coefficient'], 1)
else:
i['t_factor'] = None
self.logger.info('t_factor error')
def validate_lbf(self, i):
for x in i[1]['biasdata']:
if isinstance(x['log_bias_factor'], float):
if x['log_bias_factor'] < 0.0:
j = next((item for item in i[1]['biasdata'] if item["order_no"] == 0), None)
x['order_no'], j['order_no'] = j['order_no'], x['order_no']
self.calc_bias_factor(i[1]['biasdata'], i[1]['reference_assays_list'], i[1]['assay'])
self.validate_lbf(i)
else:
self.logger.info('validate_lbf error')
return i[1]['biasdata']
return i[1]['biasdata']
def save_data_to_model(self, context, source):
for i in context['data'].items():
if self.fetch_experiment(i[1]['publication'], i[1]['ligand'], i[1]['receptor'], source) == False:
primary, secondary = self.fetch_receptor_trunsducers(
i[1]['receptor'])
if len(i[1]['biasdata']) > 1:
experiment_entry = AnalyzedExperiment(publication=i[1]['publication'],
ligand=i[1]['ligand'],
receptor=i[1]['receptor'],
source=source,
endogenous_ligand=i[1]['endogenous_ligand'],
vendor_quantity=i[1]['vendor_counter'],
reference_ligand=i[1]['ref_ligand_experiment'],
primary=primary,
secondary=secondary,
article_quantity=i[1]['article_quantity'],
labs_quantity=i[1]['labs'],
ligand_source_id = i[1]['ligand_source_id'],
ligand_source_type = i[1]['ligand_source_type']
)
experiment_entry.save()
for ex in i[1]['biasdata']:
emax_ligand = ex['emax_reference_ligand']
experiment_assay = AnalyzedAssay(experiment=experiment_entry,
family=ex['family'],
order_no=ex['order_no'],
signalling_protein=ex['signalling_protein'],
cell_line=ex['cell_line'],
assay_type=ex['assay_type'],
assay_measure=ex['assay_measure_method'],
assay_time_resolved=ex['assay_time_resolved'],
ligand_function=ex['ligand_function'],
quantitive_measure_type=ex['quantitive_measure_type'],
quantitive_activity=ex['quantitive_activity'],
quantitive_activity_initial=ex['quantitive_activity_initial'],
quantitive_unit=ex['quantitive_unit'],
qualitative_activity=ex['qualitative_activity'],
quantitive_efficacy=ex['quantitive_efficacy'],
efficacy_measure_type=ex['efficacy_measure_type'],
efficacy_unit=ex['efficacy_unit'],
potency=ex['potency'],
t_coefficient=ex['t_coefficient'],
t_value=ex['t_coefficient_initial'],
t_factor=ex['t_factor'],
log_bias_factor=ex['log_bias_factor'],
emax_ligand_reference=emax_ligand
)
experiment_assay.save()
for ex in i[1]['reference_lists']:
try:
emax_ligand = ex['emax_reference_ligand']
experiment_assay = AnalyzedAssay(experiment=experiment_entry,
assay_description='reference_assay',
family=ex['family'],
order_no=ex['order_no'],
signalling_protein=ex['signalling_protein'],
cell_line=ex['cell_line'],
assay_type=ex['assay_type'],
assay_measure=ex['assay_measure_method'],
assay_time_resolved=ex['assay_time_resolved'],
ligand_function=ex['ligand_function'],
quantitive_measure_type=ex['quantitive_measure_type'],
quantitive_activity=ex['quantitive_activity'],
quantitive_activity_initial=ex['quantitive_activity_initial'],
quantitive_unit=ex['quantitive_unit'],
qualitative_activity=ex['qualitative_activity'],
quantitive_efficacy=ex['quantitive_efficacy'],
efficacy_measure_type=ex['efficacy_measure_type'],
efficacy_unit=ex['efficacy_unit'],
potency=ex['potency'],
t_coefficient=ex['t_coefficient'],
t_value=ex['t_coefficient_initial'],
t_factor=ex['t_factor'],
log_bias_factor=ex['log_bias_factor'],
emax_ligand_reference=emax_ligand
)
experiment_assay.save()
except:
pass
else:
pass
else:
self.logger.info('saving error')
def fetch_experiment(self, publication, ligand, receptor, source):
'''
fetch receptor with Protein model
requires: protein id, source
'''
try:
experiment = AnalyzedExperiment.objects.filter(
publication=publication, ligand=ligand, receptor=receptor, source=source)
experiment = experiment.get()
return True
except Exception:
self.logger.info('fetch_experiment error')
experiment = None
return False
def fetch_receptor_trunsducers(self, receptor):
primary = set()
temp = str()
temp1 = str()
secondary = set()
try:
gprotein = ProteinGProteinPair.objects.filter(protein=receptor)
for x in gprotein:
if x.transduction and x.transduction == 'primary':
primary.add(x.g_protein.name)
elif x.transduction and x.transduction == 'secondary':
secondary.add(x.g_protein.name)
for i in primary:
temp += str(i) + str(', ')
for i in secondary:
temp1 += str(i) + str(', ')
return temp, temp1
except:
self.logger.info('receptor not found error')
return None, None
def process_signalling_proteins(self, context):
for i in context.items():
i[1]['assay_list'] = self.limit_family_set(i[1]['assay_list'])
self.logger.info('process_signalling_proteins')
return context
def process_signalling_proteins_subs(self, context):
for i in context.items():
i[1]['assay_list'] = self.limit_family_set_subs(i[1]['assay_list'])
self.logger.info('process_signalling_proteins_subs')
return context
def build_bias_data(self):
print('Build bias data gproteins')
context = dict()
content = self.get_from_model()
print('stage # 1: Getting data finished, data points: ', len(content))
content_with_children = self.process_data(content)
print('stage # 2: Processing children in queryset finished', len(content_with_children))
changed_data = self.queryset_to_dict(content_with_children)
print('stage # 3: Converting queryset into dict finished', len(changed_data))
send = self.combine_unique(changed_data)
referenced_assay = self.process_referenced_assays(send)
print('stage # 4: Separating reference assays is finished', len(referenced_assay))
ligand_data = self.separate_ligands(referenced_assay)
limit_family = self.process_signalling_proteins(ligand_data)
print('stage # 5: Separate ligands', len(limit_family))
calculated_assay = self.process_calculation(limit_family)
print('stage # 6: Calucating finished')
self.count_publications(calculated_assay)
print('stage # 7: labs and publications counted')
context.update({'data': calculated_assay})
print('stage # 8: combining data into common dict is finished')
# save dataset to model
self.save_data_to_model(context, 'different_family')
print('stage # 9: saving data to model is finished')
def build_bias_data_subtypes(self):
print('Build bias data gproteins')
context = dict()
content = self.get_from_model()
print('stage # 1 : Getting data finished, data points: ', len(content))
content_with_children = self.process_data(content)
print('stage # 2: Processing children in queryset finished', len(content_with_children))
changed_data = self.queryset_to_dict(content_with_children)
print('stage # 3: Converting queryset into dict finished', len(changed_data))
send = self.combine_unique(changed_data)
referenced_assay = self.process_referenced_assays(send)
print('stage # 4: Separating reference assays is finished', len(referenced_assay))
ligand_data = self.separate_ligands(referenced_assay)
limit_family = self.process_signalling_proteins_subs(ligand_data)
print('stage # 5: Separate ligands')
calculated_assay = self.process_calculation(limit_family)
print('stage # 6: Merging assays with same ligand/receptor/publication is finished')
self.count_publications(calculated_assay)
print('stage # 7: labs and publications counted')
context.update({'data': calculated_assay})
print('stage # 8: combining data into common dict is finished')
# save dataset to model
self.save_data_to_model(context, 'same_family')
print('stage # 9: saving data to model is finished')
def count_publications(self, context):
temp = dict()
for i in context.items():
labs = list()
i[1]['labs'] = 0
labs.append(i[1]['publication'])
lab_counter = 1
for j in context.items():
if j[1]['publication'] not in labs:
if set(i[1]['authors']) & set(j[1]['authors']):
lab_counter += 1
labs.append(j[1]['publication'])
i[1]['labs'] = lab_counter
temp_obj = 1
name = str(i[1]['ref_ligand_experiment']) + \
'/' + str(i[1]['ligand']) + '/' + str(i[1]['receptor'])
if name in temp:
for assays in i[1]['biasdata']:
if assays['order_no'] > 0:
if assays['log_bias_factor'] != None and assays['log_bias_factor'] != '' or assays['t_factor'] != None and assays['t_factor'] != '':
temp_obj = temp[name] + 1
temp[name] = temp_obj
for i in context.items():
temp_obj = 0
name = str(i[1]['ref_ligand_experiment']) + \
'/' + str(i[1]['ligand']) + '/' + str(i[1]['receptor'])
if name in temp:
i[1]['article_quantity'] = temp[name]
self.logger.info('count_publications')
``` |
{
"source": "jimmyai/chess-alpha-zero",
"score": 3
} |
#### File: src/chess_zero/config.py
```python
import os
import numpy as np
class PlayWithHumanConfig:
"""
Config for allowing human to play against an agent using uci
"""
def __init__(self):
self.simulation_num_per_move = 1200
self.threads_multiplier = 2
self.c_puct = 1 # lower = prefer mean action value
self.noise_eps = 0
self.tau_decay_rate = 0 # start deterministic mode
self.resign_threshold = None
def update_play_config(self, pc):
"""
:param PlayConfig pc:
:return:
"""
pc.simulation_num_per_move = self.simulation_num_per_move
pc.search_threads *= self.threads_multiplier
pc.c_puct = self.c_puct
pc.noise_eps = self.noise_eps
pc.tau_decay_rate = self.tau_decay_rate
pc.resign_threshold = self.resign_threshold
pc.max_game_length = 999999
class Options:
new = False
class ResourceConfig:
"""
Config describing all of the directories and resources needed during running this project
"""
def __init__(self):
self.project_dir = os.environ.get("PROJECT_DIR", _project_dir())
self.data_dir = os.environ.get("DATA_DIR", _data_dir())
self.model_dir = os.environ.get("MODEL_DIR", os.path.join(self.data_dir, "model"))
self.model_best_config_path = os.path.join(self.model_dir, "model_best_config.json")
self.model_best_weight_path = os.path.join(self.model_dir, "model_best_weight.h5")
self.model_best_distributed_ftp_server = "alpha-chess-zero.mygamesonline.org"
self.model_best_distributed_ftp_user = "2537576_chess"
self.model_best_distributed_ftp_password = "<PASSWORD>-2"
self.model_best_distributed_ftp_remote_path = "/alpha-chess-zero.mygamesonline.org/"
self.next_generation_model_dir = os.path.join(self.model_dir, "next_generation")
self.next_generation_model_dirname_tmpl = "model_%s"
self.next_generation_model_config_filename = "model_config.json"
self.next_generation_model_weight_filename = "model_weight.h5"
self.play_data_dir = os.path.join(self.data_dir, "play_data")
self.play_data_filename_tmpl = "play_%s.json"
self.log_dir = os.path.join(self.project_dir, "logs")
self.main_log_path = os.path.join(self.log_dir, "main.log")
def create_directories(self):
dirs = [self.project_dir, self.data_dir, self.model_dir, self.play_data_dir, self.log_dir,
self.next_generation_model_dir]
for d in dirs:
if not os.path.exists(d):
os.makedirs(d)
def flipped_uci_labels():
"""
Seems to somehow transform the labels used for describing the universal chess interface format, putting
them into a returned list.
:return:
"""
def repl(x):
return "".join([(str(9 - int(a)) if a.isdigit() else a) for a in x])
return [repl(x) for x in create_uci_labels()]
def create_uci_labels():
"""
Creates the labels for the universal chess interface into an array and returns them
:return:
"""
labels_array = []
letters = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h']
numbers = ['1', '2', '3', '4', '5', '6', '7', '8']
promoted_to = ['q', 'r', 'b', 'n']
for l1 in range(8):
for n1 in range(8):
destinations = [(t, n1) for t in range(8)] + \
[(l1, t) for t in range(8)] + \
[(l1 + t, n1 + t) for t in range(-7, 8)] + \
[(l1 + t, n1 - t) for t in range(-7, 8)] + \
[(l1 + a, n1 + b) for (a, b) in
[(-2, -1), (-1, -2), (-2, 1), (1, -2), (2, -1), (-1, 2), (2, 1), (1, 2)]]
for (l2, n2) in destinations:
if (l1, n1) != (l2, n2) and l2 in range(8) and n2 in range(8):
move = letters[l1] + numbers[n1] + letters[l2] + numbers[n2]
labels_array.append(move)
for l1 in range(8):
l = letters[l1]
for p in promoted_to:
labels_array.append(l + '2' + l + '1' + p)
labels_array.append(l + '7' + l + '8' + p)
if l1 > 0:
l_l = letters[l1 - 1]
labels_array.append(l + '2' + l_l + '1' + p)
labels_array.append(l + '7' + l_l + '8' + p)
if l1 < 7:
l_r = letters[l1 + 1]
labels_array.append(l + '2' + l_r + '1' + p)
labels_array.append(l + '7' + l_r + '8' + p)
return labels_array
class Config:
"""
Config describing how to run the application
Attributes (best guess so far):
:ivar list(str) labels: labels to use for representing the game using UCI
:ivar int n_lables: number of labels
:ivar list(str) flipped_labels: some transformation of the labels
:ivar int unflipped_index: idk
:ivar Options opts: options to use to configure this config
:ivar ResourceConfig resources: resources used by this config.
:ivar ModelConfig mode: config for the model to use
:ivar PlayConfig play: configuration for the playing of the game
:ivar PlayDataConfig play_date: configuration for the saved data from playing
:ivar TrainerConfig trainer: config for how training should go
:ivar EvaluateConfig eval: config for how evaluation should be done
"""
labels = create_uci_labels()
n_labels = int(len(labels))
flipped_labels = flipped_uci_labels()
unflipped_index = None
def __init__(self, config_type="mini"):
"""
:param str config_type: one of "mini", "normal", or "distributed", representing the set of
configs to use for all of the config attributes. Mini is a small version, normal is the
larger version, and distributed is a version which runs across multiple GPUs it seems
"""
self.opts = Options()
self.resource = ResourceConfig()
if config_type == "mini":
import chess_zero.configs.mini as c
elif config_type == "normal":
import chess_zero.configs.normal as c
elif config_type == "distributed":
import chess_zero.configs.distributed as c
else:
raise RuntimeError(f"unknown config_type: {config_type}")
self.model = c.ModelConfig()
self.play = c.PlayConfig()
self.play_data = c.PlayDataConfig()
self.trainer = c.TrainerConfig()
self.eval = c.EvaluateConfig()
self.labels = Config.labels
self.n_labels = Config.n_labels
self.flipped_labels = Config.flipped_labels
@staticmethod
def flip_policy(pol):
"""
:param pol policy to flip:
:return: the policy, flipped (for switching between black and white it seems)
"""
return np.asarray([pol[ind] for ind in Config.unflipped_index])
Config.unflipped_index = [Config.labels.index(x) for x in Config.flipped_labels]
# print(Config.labels)
# print(Config.flipped_labels)
def _project_dir():
d = os.path.dirname
return d(d(d(os.path.abspath(__file__))))
def _data_dir():
return os.path.join(_project_dir(), "data")
```
#### File: chess_zero/lib/data_helper.py
```python
import os
import json
from datetime import datetime
from glob import glob
from logging import getLogger
import chess
import pyperclip
from chess_zero.config import ResourceConfig
logger = getLogger(__name__)
def pretty_print(env, colors):
new_pgn = open("test3.pgn", "at")
game = chess.pgn.Game.from_board(env.board)
game.headers["Result"] = env.result
game.headers["White"], game.headers["Black"] = colors
game.headers["Date"] = datetime.now().strftime("%Y.%m.%d")
new_pgn.write(str(game) + "\n\n")
new_pgn.close()
pyperclip.copy(env.board.fen())
def find_pgn_files(directory, pattern='*.pgn'):
dir_pattern = os.path.join(directory, pattern)
files = list(sorted(glob(dir_pattern)))
return files
def get_game_data_filenames(rc: ResourceConfig):
pattern = os.path.join(rc.play_data_dir, rc.play_data_filename_tmpl % "*")
files = list(sorted(glob(pattern)))
return files
def get_next_generation_model_dirs(rc: ResourceConfig):
dir_pattern = os.path.join(rc.next_generation_model_dir, rc.next_generation_model_dirname_tmpl % "*")
dirs = list(sorted(glob(dir_pattern)))
return dirs
def write_game_data_to_file(path, data):
try:
with open(path, "wt") as f:
json.dump(data, f)
except Exception as e:
print(e)
def read_game_data_from_file(path):
try:
with open(path, "rt") as f:
return json.load(f)
except Exception as e:
print(e)
``` |
{
"source": "JimmyAstle/CbD-LR-Scripts",
"score": 2
} |
#### File: CbD-LR-Scripts/psrecon/run-recon.py
```python
from cbapi.defense import *
import os
import time
import argparse
import sys
cb_url = ""
cb_token = ""
cb_ssl = "True"
recon_path = "C:\\recon"
def execute_recon(cb, host=None):
#Select the device you want to gather recon data from
query_hostname = "hostNameExact:%s" % host
print ("[DEBUG] Executing remote forensics on Hostname: " + host)
#Create a new device object to launch LR on
device = cb.select(Device).where(query_hostname).first()
#Execute our LR session
with device.lr_session() as lr_session:
print ("[DEBUG] Create remote recon directory on: " + host)
lr_session.create_directory(recon_path)
print ("[DEBUG] Putting PsRecon on the remote host")
lr_session.put_file(open("psrecon.ps1", "rb"), recon_path + "\\psrecon.ps1")
print ("[DEBUG] Setting PowerShell execution policy to unrestricted")
lr_session.create_process("powershell.exe SET-EXECUTIONPOLICY UNRESTRICTED")
print ("[DEBUG] Executing PsRecon on host: " + host)
lr_session.create_process("powershell.exe -nologo -file %s\\psrecon.ps1" % recon_path)
p = recon_path
p = os.path.normpath(p)
try:
path = lr_session.walk(p, False) # False because bottom->up walk, not top->down
for items in path: # For each subdirectory in the path
directory = os.path.normpath((str(items[0]))) # The subdirectory in OS path syntax
subpathslist = items[1] # List of all subpaths in the subdirectory
fileslist = items[2] # List of files in the subdirectory
if str(fileslist) != "[]": # If the subdirectory is not empty
for afile in fileslist: # For each file in the subdirectory
if not(afile.endswith(".ps1")):
fpath = os.path.normpath(directory + "\\" + afile) # The path + filename in OS path syntax
print ("[DEBUG] Reading File: " + fpath)
dmp = lr_session.get_file(fpath)
time.sleep(2.5) # Ensures script and server are synced
save_path1 = "{0}".format(directory)
save_path1 = (save_path1.replace(p, ""))
if save_path1.startswith('\\'):
save_path1 = save_path1[1:]
save_path1 = save_path1.replace("\\", "/")
save_path1 = os.path.normpath(save_path1)
if not os.path.exists(save_path1):
os.makedirs(save_path1)
os.chmod(save_path1, 0o777) # read and write by everyone
save_path1 = save_path1 + "/" + afile
print ("[DEBUG] Writing file to path " + save_path1)
open(save_path1, "wb").write(dmp)
print ("[DEBUG] Reading Path: " + directory)
except Exception as err: # Could occur if main path did not exist, session issue, or unusual permission issue
print("[ERROR] Encountered: " + str(err) + "\n[FAILURE] Fatal error caused abort!") # Report error, and continue
print ("[DEBUG] Setting PowerShell execution policy back to restricted")
lr_session.create_process("powershell.exe SET-EXECUTIONPOLICY RESTRICTED")
#Lets clean up the recon scripts on the endpoint now that we collected all the data
path = lr_session.walk(p,False)
for items in path: # For each subdirectory in the path
directory = os.path.normpath((str(items[0]))) # The subdirectory in OS path syntax
fileslist = items[2] # List of files in the subdirectory
if str(fileslist) != "[]": # If the subdirectory is not empty
for afile in fileslist: # For each file in the subdirectory
fpath = os.path.normpath(directory + "\\" + afile) # The path + filename in OS path syntax
print ("[DEBUG] Deleting File: " + fpath)
lr_session.delete_file(fpath) # Delete the file
print ("[DEBUG] Deleting Path: " + directory)
lr_session.delete_file(directory) # Delete the empty directory
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-m", "--machinename", help="machinename to run host forensics recon on")
args = parser.parse_args()
#Create the CbD LR API object
cb = CbDefenseAPI(url=cb_url, token=cb_token, ssl_verify=cb_ssl)
if args.machinename:
execute_recon(cb, host=args.machinename)
else:
print ("[ERROR] You must specify a machinename with a --machinename parameter. IE ./run_recon.py --machinename cheese")
if __name__ == '__main__':
sys.exit(main())
``` |
{
"source": "JimmyAustin/Dota2TeamBuilder",
"score": 2
} |
#### File: Dota2TeamBuilder/modelling/ready_data.py
```python
import json
from os.path import join
from toolbox.notebook.fs import *
from utils import *
file_count = 100
input_directory = './scraper/scrape_results'
output_directory = './scraper/cleaned_data/'
def file_ref(i):
return open(join(output_directory, str(i).zfill(5) + ".json"), 'wb')
files = [file_ref(i) for i in range(0, file_count)]
for line in get_json_lines_in_folder(input_directory):
if proper_match(line) is True:
index = bucket_string(json.dumps(line), bucket_count=file_count)
files[index].write(json.dumps(line).encode('utf-8'))
files[index].write('\n'.encode('utf-8'))
```
#### File: Dota2TeamBuilder/scraper/scraper.py
```python
import dota2api
from ratelimit import limits, sleep_and_retry
import os
import json
from time import sleep
import random
directory = 'scrape_results'
all_files = [os.path.join(directory, x) for x in os.listdir(directory)]
seen_players = set()
unseen_player_list = []
seen_match_ids = set()
duplicate_matches_count = 0
for filepath in all_files:
print(filepath)
with open(filepath, 'rb') as file_ref:
for line in file_ref:
try:
result = json.loads(line.decode('utf8'))
if 'type' in result and result['type'] == 'STARTED_ON_NEW_PLAYER':
seen_players.add(result['player_id'])
else:
if result['match_id'] in seen_match_ids:
duplicate_matches_count = duplicate_matches_count + 1
seen_match_ids.add(result['match_id'])
for player in result['players']:
if 'account_id' in player:
unseen_player_list.append(player['account_id'])
except Exception:
pass
unseen_player_list = [x for x in unseen_player_list if x not in seen_players]
if len(unseen_player_list) == 0:
unseen_player_list = [31632658] # That's Zin
print('Inited, {0} duplicate matches'.format(duplicate_matches_count))
import dota2api
from ratelimit import limits, sleep_and_retry
api = dota2api.Initialise()
match_count = len(seen_match_ids)
def get_next_filename():
count = 1
while True:
path = './scrape_results/all_matches_{0}.json'.format(count)
if os.path.exists(path) is False:
return path
count = count + 1
matches_file = open(get_next_filename(), 'wb')
def print_status_update():
players_seen = len(seen_players) - len(unseen_player_list)
print("Matches saved: {0}, Players Seen: {1}, Players To Go: {2}".format(match_count, players_seen, len(unseen_player_list)))
@sleep_and_retry
@limits(calls=1, period=1.10)
def api_call(endpoint, *args, **kwargs):
try:
return getattr(api, endpoint)(*args, **kwargs)
except dota2api.src.exceptions.APITimeoutError:
sleep(10)
except Exception as e:
print(e)
print("Sleeping it off.")
sleep(10)
def get_player(player_id):
print('Getting player: {0}'.format(player_id))
try:
history = api_call('get_match_history', account_id=player_id)
except Exception as e:
print(e)
print("Sleeping it off.")
sleep(10)
matches_file.write(json.dumps({'type': 'STARTED_ON_NEW_PLAYER', 'player_id': player_id}).encode('utf8'))
matches_file.write('\n'.encode('utf8'))
for match in random.sample(history['matches'], 5):
get_match(match['match_id'])
def get_match(match_id):
global match_count
if match_id in seen_match_ids:
return
print('get_match_details: {0}'.format(match_id))
print_status_update()
details = api_call('get_match_details', match_id)
matches_file.write(json.dumps(details).encode('utf8'))
matches_file.write('\n'.encode('utf8'))
match_count = match_count + 1
for player in details['players']:
if player.get('account_id') and player['account_id'] not in seen_players:
unseen_player_list.append(player['account_id'])
seen_players.add(player['account_id'])
while len(unseen_player_list) > 0:
try:
get_player(unseen_player_list.pop())
except Exception as e:
pass
``` |
{
"source": "jimmybergman/openstack-dashboard",
"score": 2
} |
#### File: src/django_openstack/api.py
```python
from django.conf import settings
import logging
import glance.client
import httplib
import json
import openstack.compute
import openstackx.admin
import openstackx.extras
import openstackx.auth
from urlparse import urlparse
import json
def url_for(request, service_name, admin=False):
catalog = request.session['serviceCatalog']
if admin:
rv = catalog[service_name][0]['adminURL']
else:
rv = catalog[service_name][0]['internalURL']
return rv
def compute_api(request):
compute = openstack.compute.Compute(auth_token=request.session['token'],
management_url=url_for(request, 'nova'))
# this below hack is necessary to make the jacobian compute client work
compute.client.auth_token = request.session['token']
compute.client.management_url = url_for(request, 'nova')
return compute
def account_api(request):
return openstackx.extras.Account(auth_token=request.session['token'],
management_url=url_for(request, 'keystone', True))
def glance_api(request):
o = urlparse(url_for(request, 'glance'))
return glance.client.Client(o.hostname, o.port)
def admin_api(request):
return openstackx.admin.Admin(auth_token=request.session['token'],
management_url=url_for(request, 'nova', True))
def extras_api(request):
return openstackx.extras.Extras(auth_token=request.session['token'],
management_url=url_for(request, 'nova'))
def auth_api():
return openstackx.auth.Auth(management_url=\
settings.OPENSTACK_KEYSTONE_URL)
def get_tenant(request, tenant_id):
tenants = auth_api().tenants.for_token(request.session['token'])
for t in tenants:
if str(t.id) == str(tenant_id):
return t
def token_info(token):
hdrs = {"Content-type": "application/json",
"X_AUTH_TOKEN": settings.OPENSTACK_ADMIN_TOKEN,
"Accept": "text/json"}
o = urlparse(token.serviceCatalog['keystone'][0]['adminURL'])
conn = httplib.HTTPConnection(o.hostname, o.port)
conn.request("GET", "/v2.0/tokens/%s" % token.id, headers=hdrs)
response = conn.getresponse()
data = json.loads(response.read())
admin = False
for role in data['auth']['user']['roleRefs']:
if role['roleId'] == 'Admin':
admin = True
return {'tenant': data['auth']['user']['tenantId'],
'user': data['auth']['user']['username'],
'admin': admin}
def get_image_cache(request):
images = glance_api(request).get_images_detailed()
image_dict = {}
for image in images:
image_dict[image['id']] = image
return image_dict
```
#### File: syspanel/views/flavors.py
```python
from operator import itemgetter
from django import template
from django import http
from django.conf import settings
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.shortcuts import redirect
from django.shortcuts import render_to_response
from openstackx.api import exceptions as api_exceptions
from django_openstack import api
from django_openstack import forms
class CreateFlavor(forms.SelfHandlingForm):
flavorid = forms.CharField(max_length="10", label="Flavor ID")
name = forms.CharField(max_length="5", label="Name")
vcpus = forms.CharField(max_length="5", label="VCPUs")
memory_mb = forms.CharField(max_length="5", label="Memory MB")
disk_gb = forms.CharField(max_length="5", label="Disk GB")
def handle(self, request, data):
api.admin_api(request).flavors.create(data['name'],
int(data['memory_mb']),
int(data['vcpus']),
int(data['disk_gb']),
int(data['flavorid']))
messages.success(request,
'%s was successfully added to flavors.' % data['name'])
return redirect('syspanel_flavors')
class DeleteFlavor(forms.SelfHandlingForm):
flavorid = forms.CharField(required=True)
def handle(self, request, data):
flavor_id = data['flavorid']
api.admin_api(request).flavors.delete(flavor_id, True)
return redirect(request.build_absolute_uri())
@login_required
def index(request):
for f in (DeleteFlavor,):
_, handled = f.maybe_handle(request)
if handled:
return handled
# We don't have any way of showing errors for these, so don't bother
# trying to reuse the forms from above
delete_form = DeleteFlavor()
flavors = []
try:
flavors = api.admin_api(request).flavors.list()
except api_exceptions.ApiException, e:
messages.error(request, 'Unable to get usage info: %s' % e.message)
flavors.sort(key=lambda x: x.id, reverse=True)
return render_to_response('syspanel_flavors.html',{
'delete_form': delete_form,
'flavors': flavors,
}, context_instance = template.RequestContext(request))
@login_required
def create(request):
form, handled = CreateFlavor.maybe_handle(request)
if handled:
return handled
return render_to_response('syspanel_create_flavor.html',{
'form': form,
}, context_instance = template.RequestContext(request))
``` |
{
"source": "JimmyBlunt/RPi-Jukebox-RFID",
"score": 3
} |
#### File: components/twist_Volume/twist_volume.py
```python
from __future__ import print_function
import qwiic_twist
import time
import sys
import mpd
import serial
import subprocess
from math import floor
from subprocess import call
global muted
global volume
global orig_volume
def runExample():
def toggle_mute():
global muted
global orig_volume
global volume
if muted == False:
orig_volume=volume
client.volume(-volume)
muted=True
elif muted:
client.volume(orig_volume)
muted = False
mute=0
global muted
global orig_volume
global volume
muted = False
reboot=False
shutdown=False
client = mpd.MPDClient()
client.connect("localhost", 6600)
myTwist = qwiic_twist.QwiicTwist()
#print ( dir(myTwist) )
#print ( dir(client))
if myTwist.connected == False:
print("The Qwiic twist device isn't connected to the system. Please check your connection", \
file=sys.stderr)
return
myTwist.begin()
myTwist.set_color(112, 70, 0) #Set Red and Blue LED brightnesses to half of max.
myTwist.connect_red = 5 # Red LED will go down 10 in brightness with each encoder tick
myTwist.connect_green = -5 #Blue LED will go up 10 in brightness with each encoder tick
myTwist.connect_blue = 2 #Blue LED will go up 10 in brightness with each encoder tick
while True:
mpdstatus = client.status()
# Fetch volume
volume = int(mpdstatus["volume"])
if myTwist.moved:
diff = myTwist.get_diff()
if diff > 1000 :
diff = (-65536 + diff)
#print("Diff :" + str(diff))
client.volume(2*diff)
myTwist.clear_interrupts()
myTwist.get_diff(clear_value=True)
if myTwist.clicked:
toggle_mute()
myTwist.clear_interrupts()
if myTwist.pressed:
start_time = time.time()
while myTwist.pressed:
buttonTime = time.time() - start_time
#print("Button pressed : " + str(buttonTime) + " sec" )
if .01 <= buttonTime < 2: # short push mute
mute=1
elif 3 <= buttonTime < 5:
print('long Press! REBOOT!') #longer push reboot
reboot=True
elif buttonTime >= 5:
print('Looong Press! Shutdown') # really long push shutdwon
reboot=False
#shutdown=True
myTwist.clear_interrupts()
if mute == 1:
toggle_mute()
mute = 0
if shutdown:
call(["sudo", "shutdown","-h", "now"])
if reboot:
call(["sudo", "reboot"])
#time.sleep(0.5)
if __name__ == '__main__':
try:
runExample()
except (KeyboardInterrupt, SystemExit) as exErr:
print("\nEnding Example 3")
sys.exit(0)
``` |
{
"source": "jimmybot/cedict2db",
"score": 3
} |
#### File: jimmybot/cedict2db/pinyin.py
```python
import re
PINYIN_TONE_MARK = {
0: 'aoeiuvü',
1: 'āōēīūǖǖ',
2: 'áóéíúǘǘ',
3: 'ǎǒěǐǔǚǚ',
4: 'àòèìùǜǜ'
}
PUNCTUATION = {
' ',
'-', # hyphen
'–', # endash
'—', # emdash
'\t',
'\n',
'.',
',',
'?',
'!',
':',
';',
'。',
',',
'?',
'!',
':',
';',
}
def decode_pinyin(pinyin_numerical):
r = ""
t = ""
for c in pinyin_numerical:
# convenience for ü
if c == 'v':
t += 'ü'
# another convenience for ü
elif c == ':' and len(t) >= 1 and t[-1] == 'u':
t = t[:-1] + "\u00fc"
elif c >= '0' and c <= '5' or c in PUNCTUATION:
if c >= '0' and c <= '5':
tone = int(c) % 5
if tone != 0:
m = re.search("[aoeiuv\u00fc]+", t)
if m is None:
t += c
elif len(m.group(0)) == 1:
t = t[:m.start(0)] + PINYIN_TONE_MARK[tone][PINYIN_TONE_MARK[0].index(m.group(0))] + t[m.end(0):]
# more than one match so we need to figure out which vowel to mark
else:
before = t
if 'a' in t:
t = t.replace("a", PINYIN_TONE_MARK[tone][0])
elif 'o' in t:
t = t.replace("o", PINYIN_TONE_MARK[tone][1])
elif 'e' in t:
t = t.replace("e", PINYIN_TONE_MARK[tone][2])
elif t.endswith("ui"):
t = t.replace("i", PINYIN_TONE_MARK[tone][3])
elif t.endswith("iu"):
t = t.replace("u", PINYIN_TONE_MARK[tone][4])
else:
t += c
r += t
t = ""
else:
t += c
r += t
#print("%s | %s" % (pinyin_numerical, r))
return r
``` |
{
"source": "jimmybot/slowspacer",
"score": 3
} |
#### File: jimmybot/slowspacer/slowspacer.py
```python
__author__ = "<NAME>"
__license__ = "CC0 Public Domain"
__email__ = "<EMAIL>"
import fcntl
import os
import select
import time
def _no_block_read(infile):
try:
line = infile.readline().rstrip()
# Reading will not block with the nonblocking attribute set
# If there is nothing to read, instead of blocking, IOError is raised
except IOError:
line = None
return line
def watch(logfile, timeout=3, spacer='='):
is_spaced = False
spacer_line = spacer * 80
# default is that reading stdin will block if no data is ready to be read
# we use fcntl to set reading to non-blocking
fcntl.fcntl(logfile.fileno(), fcntl.F_SETFL, os.O_NONBLOCK)
while True:
# either data is available to be read or we may need to add a spacer
rfiles, wfiles, xfiles = select.select([logfile], [], [], timeout)
# select says we're ready to read
if rfiles:
# normally we should not block here but
# it can happen if a partial line was written
# in this case, the partial line will be output
# when a full line is ready
line = _no_block_read(rfiles[0])
if line and len(line):
# we have real content again, reset is_spaced to False
is_spaced = False
while line and len(line):
print(line)
line = _no_block_read(rfiles[0])
# select timed-out with nothing available to read, let's print a spacer
else:
# check if a spacer was already printed before printing
# (no need to print consecutive spacers)
if not is_spaced:
is_spaced = True
print("\n" + spacer_line + "\n")
if __name__ == '__main__':
import argparse
import sys
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('-s', '--seconds', metavar='S', default='3', type=int, help='number of seconds to wait before adding a spacer (default is 3)')
parser.add_argument('-c', '--character', metavar='C', default='=', type=str, help='character used as a spacer (default is \'=\')')
args = parser.parse_args()
watch(sys.stdin, args.seconds, args.character)
``` |
{
"source": "jimmybot/typedtsv",
"score": 2
} |
#### File: typedtsv/tests/test_typedtsv.py
```python
from collections import OrderedDict
from datetime import datetime, timezone, timedelta
import io
import toml
from typedtsv import __version__
from typedtsv.typedtsv import *
def test_version_in_sync():
assert toml.load('pyproject.toml')['tool']['poetry']['version'] == __version__
def test_load_header():
assert OrderedDict((
('title', 'str'),
('url', 'str'),
('n_loads', 'int'),
)) == load_header('title:str\turl:str\tn_loads:int')
def test_load_line():
header_info = load_header('one:float\ttwo:int\tthree:json\tfour:str\tfive:str')
line = '1\t1\t{"dragonfruit": "huolongguo"}\t大驚小怪\t5'
assert [
1.0,
1,
{"dragonfruit": "huolongguo"},
"大驚小怪",
"5",
] == load_line(header_info, line)
def test_parse_datetime():
datetimes_from_strs = (
(datetime(2000, 1, 1, 0, 0, 0, 0, timezone(timedelta(hours=0))), '2000-01-01 00:00:00Z'),
(datetime(2000, 1, 1, 0, 0, 0, 0, timezone(timedelta(hours=0))), '2000-01-01T00:00:00Z'),
(datetime(2000, 1, 1, 0, 0, 0, 0, timezone(timedelta(hours=1))), '2000-01-01 00:00:00+01'),
(datetime(2000, 1, 1, 0, 0, 0, 0, timezone(timedelta(hours=-3, minutes=-30))), '2000-01-01 00:00:00-0330'),
(datetime(2000, 1, 1, 0, 0, 0, 0, timezone(timedelta(hours=-3, minutes=-30))), '2000-01-01 00:00:00-03:30'),
(datetime(2000, 1, 1, 0, 0, 0, 123000, timezone(timedelta(hours=-3, minutes=-30))), '2000-01-01 00:00:00.123-03:30'),
(datetime(2000, 1, 1, 0, 0, 0, 0, timezone(timedelta(hours=-3, minutes=-30))), '2000-01-01 00:00:00 -03:30'),
(datetime(2000, 1, 1, 0, 0, 0, 0, timezone(timedelta(hours=-3, minutes=-30))), '2000-01-01 -03:30'),
(datetime(2000, 1, 1, 0, 0, 0, 0, timezone(timedelta(hours=0))), '2000-01-01Z'),
)
for expected, raw_string in datetimes_from_strs:
assert expected == parse_datetime(raw_string)
def test_dump_datetime():
strs_from_datetime = (
('2000-01-01 00:00', datetime(2000, 1, 1)),
('2000-01-01 00:00', datetime(2000, 1, 1, 0, 0, 0)),
('2000-01-01 00:00', datetime(2000, 1, 1, 0, 0, 0, tzinfo=timezone.utc)),
('2000-01-01 00:00:59', datetime(2000, 1, 1, 0, 0, 59, tzinfo=timezone.utc)),
('2000-01-01 00:00:35.123', datetime(2000, 1, 1, 0, 0, 35, 123000, tzinfo=timezone.utc)),
('2000-01-01 08:00:09-0600', datetime(2000, 1, 1, 8, 0, 9, tzinfo=timezone(timedelta(hours=-6)))),
('2000-01-01 08:00-0600', datetime(2000, 1, 1, 8, 0, 0, tzinfo=timezone(timedelta(hours=-6)))),
)
for expected, dt in strs_from_datetime:
assert expected == dump_datetime(dt)
def test_dump_header():
header_info = OrderedDict((
('one', 'int'),
('two', 'str')
))
assert 'one:int\ttwo:str' == dump_header(header_info)
def test_dump_line():
header_info = OrderedDict((
('one', 'int'),
('two', 'str')
))
assert '99\tgreen\\t+\\tblue\\n=grue' == dump_line(header_info, (
99,
'green\t+\tblue\n=grue'
))
def test_header_roundtrip():
raw_header = 'title:str\turl:str\tn_loads:int'
header_info = load_header(raw_header)
raw_header_roundtrip = dump_header(header_info)
assert raw_header == raw_header_roundtrip
def test_line_roundtrip():
header_info = OrderedDict((
('one', 'int'),
('two', 'str')
))
parsed_data = [
99,
'green\t+\tblue\n=grue\\t\\n\\ \\\\'
]
assert parsed_data == load_line(header_info, dump_line(header_info, parsed_data))
def test_loads():
raw_data = io.StringIO()
raw_data.write('# 1 comment line should be ignored\n')
raw_data.write('title:str\turl:str\tn_loads:int\n')
raw_data.write('# 2 comment line should be ignored\n')
raw_data.write('0\thttps://biglittlebear.cc\t55\n')
raw_data.write('# 3 comment line should be ignored\n')
raw_data.write('\\#\t\\\\#\t99\n')
raw_data.seek(0)
header_info, rows = loads(raw_data)
assert OrderedDict((
('title', 'str'),
('url', 'str'),
('n_loads', 'int'),
)) == header_info
assert [
['0', 'https://biglittlebear.cc', 55],
['#', '\\#', 99],
] == rows
def test_load_list():
raw_data = io.StringIO()
raw_data.write('# 1 comment line should be ignored\n')
raw_data.write('title:str\n')
raw_data.write('# 2 comment line should be ignored\n')
raw_data.write('https://biglittlebear.cc\n')
raw_data.write('https://jimmybot.com\n')
raw_data.write('# 3 comment line should be ignored\n')
raw_data.write('\\#\n')
raw_data.seek(0)
header_info, rows = load_list(raw_data)
assert OrderedDict((
('title', 'str'),
)) == header_info
assert [
'https://biglittlebear.cc',
'https://jimmybot.com',
'#',
] == rows
def test_load_bool_true():
raw_data = io.StringIO()
raw_data.write('title:str\tfetched:bool\n')
raw_data.write('0\ttrue\n')
raw_data.write('1\tt\n')
raw_data.write('2\tyes\n')
raw_data.write('3\ty\n')
raw_data.write('4\ton\n')
raw_data.write('5\t1\n')
raw_data.seek(0)
header_info, rows = loads(raw_data)
for row in rows:
assert True == row[1]
def test_load_bool_false():
raw_data = io.StringIO()
raw_data.write('title:str\tfetched:bool\n')
raw_data.write('0\tfalse\n')
raw_data.write('1\tf\n')
raw_data.write('2\tno\n')
raw_data.write('3\tn\n')
raw_data.write('4\toff\n')
raw_data.write('5\t0\n')
raw_data.seek(0)
header_info, rows = loads(raw_data)
for row in rows:
assert False == row[1]
def test_dumps():
header_info = OrderedDict((
('title', 'str'),
('url', 'str'),
('n_loads', 'int'),
))
data = [
['0', 'https://biglittlebear.cc', 55],
]
outfile = io.StringIO()
dumps(header_info, data, outfile)
outfile.seek(0)
raw_data_dumped = outfile.read()
raw_data_expected = (
'title:str\turl:str\tn_loads:int\n'
'0\thttps://biglittlebear.cc\t55\n'
)
assert raw_data_expected == raw_data_dumped
def test_dump_list():
data = [
'https://biglittlebear.cc',
'https://jimmybot.com',
]
outfile = io.StringIO()
dump_list(('title', 'str'), data, outfile)
outfile.seek(0)
raw_data_dumped = outfile.read()
raw_data_expected = (
'title:str\n'
'https://biglittlebear.cc\n'
'https://jimmybot.com\n'
)
assert raw_data_expected == raw_data_dumped
def test_dump_roundtrip():
header_info = OrderedDict((
('title', 'str'),
('url', 'str'),
('n_loads', 'int'),
('in_cache', 'bool'),
))
data = [
['0', 'https://biglittlebear.cc', 55, True],
['1', 'https://archive.org', 99, False],
]
outfile = io.StringIO()
dumps(header_info, data, outfile)
outfile.seek(0)
parsed_header_info, parsed_data = loads(outfile)
assert header_info == parsed_header_info
assert data == parsed_data
def test_dump_roundtrip_slashn():
header_info = OrderedDict((
('title', 'str'),
('url', 'str'),
('n_loads', 'int'),
))
data = [
['chit \n neng \t sah \\n', 'https://biglittlebear.cc', 55],
]
outfile = io.StringIO()
dumps(header_info, data, outfile)
outfile.seek(0)
parsed_header_info, parsed_data = loads(outfile)
assert header_info == parsed_header_info
assert data == parsed_data
def test_dump_roundtrip_windowsnewline():
header_info = OrderedDict((
('title', 'str'),
('url', 'str'),
('n_loads', 'int'),
))
data = [
['chit \r\n \n neng \t sah \\n', 'https://biglittlebear.cc', 55],
]
# regular files need to be opened with newline='\n'
# io.StringIO has a good default to only recognize '\n'
outfile = io.StringIO(newline='\n')
dumps(header_info, data, outfile)
outfile.seek(0)
parsed_header_info, parsed_data = loads(outfile)
assert header_info == parsed_header_info
assert data == parsed_data
def test_dump_roundtrip_null():
header_info = OrderedDict((
('title', 'str'),
('url', 'str'),
('n_loads', 'int'),
))
data = [
['chit \r\n \n neng \t sah \\n', 'null', 55],
['chit \r\n \n neng \t sah \\n', '', 55],
['chit \r\n \n neng \t sah \\n', None, 55],
['chit \r\n \n neng \t sah \\n', 'https://archive.org', None],
]
# regular files need to be opened with newline='\n'
# io.StringIO has a good default to only recognize '\n'
outfile = io.StringIO(newline='\n')
dumps(header_info, data, outfile)
outfile.seek(0)
parsed_header_info, parsed_data = loads(outfile)
assert header_info == parsed_header_info
assert data == parsed_data
def test_dump_roundtrip_datetime():
col_names = ['updated']
data = [
[datetime(1999, 9, 9, tzinfo=timezone.utc)],
[datetime(1999, 9, 9, 0, 0, 0, tzinfo=timezone.utc)],
[datetime(1999, 9, 9, 8, 3, 7, tzinfo=timezone.utc)],
[datetime(1999, 9, 9, 8, 3, 7, tzinfo=timezone(timedelta(hours=-6)))],
[datetime(1999, 9, 9, 8, 3, 7, 323000, tzinfo=timezone(timedelta(hours=-6)))],
]
# regular files need to be opened with newline='\n'
# io.StringIO has a good default to only recognize '\n'
outfile = io.StringIO(newline='\n')
dumps(col_names, data, outfile)
outfile.seek(0)
parsed_header_info, parsed_data = loads(outfile)
assert OrderedDict((
('updated', 'datetime'),
)) == parsed_header_info
assert data == parsed_data
``` |
{
"source": "jimmybow/myDL",
"score": 2
} |
#### File: myDL/ts/GRU_attention_cv.py
```python
import torch
import torch.utils.data as Data
from torch import nn
import torch.nn.functional as F
import random
import numpy as np
import pandas as pd
from sklearn import preprocessing
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import GridSearchCV, KFold, TimeSeriesSplit
from mydf import *
from datetime import datetime, timedelta
import math
import copy
import pkg_resources
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.font_manager import FontProperties
from pandas.plotting import register_matplotlib_converters
import io
import imageio
###############################################################################################################
### define class
###############################################################################################################
class torch_Dataset(Data.Dataset): # 需要继承 data.Dataset
def __init__(self, x, y):
self.x = x
self.y = y
def __getitem__(self, index):
data = (self.x[index], self.y[index])
return data
def __len__(self):
return len(self.y)
class AttnDecoderRNN(nn.Module):
def __init__(self, hidden_size, max_length):
super(AttnDecoderRNN, self).__init__()
self.hidden_size = hidden_size
self.max_length = max_length
self.attn = nn.Linear(2*self.hidden_size, self.max_length) # 512 => 10
self.attn_combine = nn.Linear(2*self.hidden_size, self.hidden_size) # 512 => 256
self.gru = nn.GRU(self.hidden_size, self.hidden_size, batch_first=True)
def forward(self, input, hidden, encoder_last_layer_outputs):
attn = self.attn(torch.cat((input[0], hidden[0]), 1)) # (1, 2*hidden_size) => (1, max_length)
attn_weights = F.softmax(attn, dim=1) # (1, max_length)
# (1, 1, max_length) X (1, max_length, hidden_size) = (1, 1, hidden_size) encoder_last_layer_outputs 的線性組合
attn_applied = torch.bmm(attn_weights.unsqueeze(0), encoder_last_layer_outputs.unsqueeze(0)) # (1, 1, hidden_size)
output = torch.cat((input[0], attn_applied[0]), 1) # (1, 2*hidden_size)
output = self.attn_combine(output).unsqueeze(0) # (1, 1, hidden_size)
output = F.relu(output) # (1, 1, hidden_size)
output, hidden = self.gru(output, hidden)
return output, hidden # (1, 1, hidden_size), (1, 1, hidden_size)
class GRU_seq2seq(nn.Module):
def __init__(self, input_size=5, hidden_size=10, output_size=3, prediction_length=12, time_step=24):
super(GRU_seq2seq, self).__init__()
self.prediction_length = prediction_length
self.GRU_encoder = nn.GRU(input_size, hidden_size, batch_first=True)
self.GRU_decoder = AttnDecoderRNN(hidden_size, max_length=time_step)
self.reg = nn.Linear(hidden_size, output_size)
def forward(self, x):
x, encoder_hidden = self.GRU_encoder(x)
encoder_last_layer_outputs = x[0]
decoder_hidden = encoder_hidden[[-1]]
decoder_input = decoder_hidden # (1, 1, hidden_size)
decoder_output_list = []
for di in range(self.prediction_length):
decoder_output, decoder_hidden = self.GRU_decoder(decoder_input, decoder_hidden, encoder_last_layer_outputs)
decoder_output_list.append(decoder_output)
decoder_input = decoder_output
output = torch.cat(decoder_output_list, 1) # (1, prediction_length, hidden_size)
output = self.reg(output) # (1, prediction_length, 3)
return output
class QuantileLoss(nn.Module):
def __init__(self, quantiles):
super().__init__()
self.quantiles = quantiles
def forward(self, preds, target):
assert not target.requires_grad
assert preds.size(0) == target.size(0)
losses = []
for i, q in enumerate(self.quantiles):
errors = target - preds[:, :, i]
losses.append(
torch.max( (q-1) * errors, q * errors ).unsqueeze(2)
)
result = torch.sum(torch.cat(losses, dim=2), dim=2)
w = torch.unsqueeze(torch.arange(result.shape[1],0,-1), 1).float()/np.arange(result.shape[1],0,-1).sum()
loss = torch.mean(torch.mm(result, w))
return loss
###############################################################################################################
### define function
###############################################################################################################
def RMSE(x, y, model, std_target, scaler):
predict_value = [model(x[[i]])[0, :, 0].data.numpy() for i in range(len(x))]
actual_value = y.data.numpy()
return np.sqrt(mean_squared_error(predict_value, actual_value))*std_target*scaler
def fit(data_source, target_column, output_filename = None, model_source = None,
freq = '1M',
data_use_length = None,
prediction_length = 12,
time_step = 24,
prediction_quantile = [0.05, 0.95],
test_size = 0,
cv_mode = 'kfold',
n_splits = 5,
hidden_size = 50,
learning_rate = 1e-2,
weight_decay = 0,
early_stopping_patience = 500,
epochs = 5000):
###############################################################################################################
### preprocessing data
###############################################################################################################
Quantile = [0.5] + prediction_quantile
mini_batch_size = 1
if isinstance(data_source, str):
df = pd.read_csv(open(data_source))
else:
df = data_source
if 'time' in df.columns.tolist():
df.index = df.time.astype('datetime64[ns]')
df >>= drop('time')
df = df.astype(float).resample(freq).mean()
if data_use_length is not None: df = df.iloc[-data_use_length:]
features = df.columns.tolist()
target_column_index = features.index(target_column)
time_index = df.index
if df.isnull().values.sum() > 0:
print("The data source has missing value after aggregated by freq = '{}'".format(freq))
print("Filling missing value use method = 'pad'")
df = df.fillna(method = 'pad').dropna()
elif len(df) <= time_step + prediction_length - 1:
raise Exception("The sample size is too low after aggregated, it's not enough to training")
scaler = 10
scaler_std = preprocessing.StandardScaler()
data_norm = scaler_std.fit_transform(df)/scaler
# 只會有 len(data_norm) - time_step + 1 個樣本 = len(sample_ranges_x)
sample_ranges_x = [range(i, i + time_step) for i in range(len(data_norm)) if i + time_step <= len(data_norm)]
data_x = torch.tensor([data_norm[sample_range] for sample_range in sample_ranges_x]).float()
# 只會有 len(data_norm) - prediction_length + 1 個樣本 = len(sample_ranges_y)
sample_ranges_y = [range(i, i + prediction_length) for i in range(len(data_norm)) if i + prediction_length <= len(data_norm)]
data_y = torch.tensor([data_norm[sample_range, target_column_index] for sample_range in sample_ranges_y]).float()
# 最終只會有 len(sample_ranges_x) - prediction_length = len(sample_ranges_y) - time_step 個學習樣本
data_x = data_x[:-prediction_length]
data_y = data_y[time_step:]
final_ranges_x = sample_ranges_x[:-prediction_length]
final_ranges_y = sample_ranges_y[time_step:]
###############################################################################################################
### apply window-move size = prediction_length/3
###############################################################################################################
#window_move = math.ceil(prediction_length/5)
#data_x = data_x[::window_move]
#data_y = data_y[::window_move]
###############################################################################################################
### k-fold split
###############################################################################################################
if test_size > 0:
test_x = data_x[-test_size:]
test_y = data_y[-test_size:]
data_x = data_x[:-test_size]
data_y = data_y[:-test_size]
if cv_mode == 'kfold':
splits = list(KFold(n_splits=n_splits, shuffle=True, random_state=2019).split(data_x, data_y))
else:
splits = list(TimeSeriesSplit(n_splits=n_splits).split(data_x, data_y))
train_size = len(splits[0][0])
validate_size = len(splits[0][1])
###############################################################################################################
### model train
###############################################################################################################
loss_func = QuantileLoss(Quantile)
if model_source is not None:
if isinstance(model_source, str):
model_list = torch.load(model_source)['model_list']
else:
model_list = model_source['model_list']
else:
model_list = []
for model_index, (train_idx, validate_idx) in enumerate(splits):
print("Beginning fold {}".format(model_index))
train_dataset = torch_Dataset(data_x[train_idx], data_y[train_idx])
train_loader = Data.DataLoader(dataset = train_dataset, batch_size = mini_batch_size, shuffle = True)
validate_x = data_x[validate_idx]
validate_y = data_y[validate_idx]
net = GRU_seq2seq(input_size = len(features),
output_size = len(Quantile),
hidden_size = hidden_size,
time_step = time_step,
prediction_length = prediction_length)
if model_source is not None:
net.load_state_dict(model_list[model_index]['state_dict'])
optimizer = torch.optim.Adam(net.parameters(), lr = learning_rate, weight_decay = weight_decay)
std_target = np.sqrt(scaler_std.var_)[target_column_index]
if std_target == 0: std_target = 1
train_loss_list = []
validate_loss_list = []
for epoch in range(epochs):
# training mode
net.train()
for step, (x, y) in enumerate(train_loader, 1):
# 前向传播
out = net(x) # (mini_batch, 12, 3)
loss = loss_func(out, y)
# 反向传播
optimizer.zero_grad()
loss.backward()
optimizer.step()
# evaluation mode
net.eval()
with torch.no_grad():
#validate_loss = loss_func(net(validate_x), validate_y).item()
validate_loss = RMSE(validate_x, validate_y, net, std_target, scaler)
validate_loss_list.append(validate_loss)
train_loss_list.append(loss.item())
if epoch == 0 or validate_loss < best_validate_loss:
best_validate_loss = validate_loss
best_state_dict = copy.deepcopy(net.state_dict())
best_epoch = epoch
print('Best Epoch:', epoch, 'Train_loss:', '%.10f' % loss.item(), 'Validate_loss:', '%.10f' % validate_loss)
elif epoch - best_epoch > early_stopping_patience:
print("Validate_RMSE don't imporved for {} epoch, training stop !".format(early_stopping_patience))
break
else:
print('---- Epoch:', epoch, 'Train_loss:', '%.10f' % loss.item(), 'Validate_loss:', '%.10f' % validate_loss)
if model_source is None:
model_list.append({
'state_dict': best_state_dict,
'best_epoch': best_epoch,
'best_validate_loss': best_validate_loss,
'train_loss_list': train_loss_list,
'validate_loss_list': validate_loss_list
})
######################################################################################################
### final model evaluation
######################################################################################################
RMSE_list = []
for model_index in range(len(model_list)):
best_validate_loss = model_list[model_index]['best_validate_loss']
print('model {}:'.format(model_index))
print('-- Best validate loss = {}'.format(best_validate_loss))
RMSE_list.append(best_validate_loss)
mean_CV_RMSE = np.mean(RMSE_list)
print('mean CV RMSE =', mean_CV_RMSE)
if test_size > 0:
pred_result_total = []
for i in range(test_size):
pred_result_list = []
for inner_model in model_list:
net.load_state_dict(inner_model['state_dict'])
net.eval()
with torch.no_grad():
pred_result_list.append( net(test_x[[i]])[0, :, 0].data.numpy() )
pred_result = np.mean(pred_result_list, axis=0)
pred_result_total.append(pred_result)
test_RMSE = np.sqrt(mean_squared_error(pred_result_total, test_y.data.numpy()))*std_target*scaler
else:
test_RMSE = -1
###############################################################################################################
### Output
###############################################################################################################
output_model = {
'model_list': model_list,
'mean_CV_RMSE' : mean_CV_RMSE,
'scaler_std':scaler_std,
'features': features,
'target_column':target_column,
'freq': freq,
'hidden_size': hidden_size,
'Quantile': Quantile,
'mini_batch_size': mini_batch_size,
'learning_rate': learning_rate,
'early_stopping_patience': early_stopping_patience,
'epochs': epochs,
'time_step': time_step,
'prediction_length': prediction_length,
'train_size':train_size,
'validate_size':validate_size,
'test_size':test_size,
'n_splits':n_splits,
'cv_mode':cv_mode,
'weight_decay':weight_decay,
'test_RMSE': test_RMSE
}
if output_filename is not None: torch.save(output_model, output_filename)
return output_model
###############################################################################################################
### End
###############################################################################################################
def predict(data_source, model_source, predict_start_time):
if isinstance(model_source, str):
model = torch.load(model_source)
else:
model = model_source
if isinstance(data_source, str):
df = pd.read_csv(open(data_source))
else:
df = data_source
model_list = model['model_list']
features = model['features']
target_column = model['target_column']
freq = model['freq']
time_step = model['time_step']
prediction_length = model['prediction_length']
hidden_size = model['hidden_size']
Quantile = model['Quantile']
scaler_std = model['scaler_std']
scaler = 10
if 'time' in df.columns.tolist():
df.index = df.time.astype('datetime64[ns]')
df >>= drop('time')
df = df[features].astype(float).resample(freq).mean()
time_index = pd.date_range(df.index[0], periods= len(df) + prediction_length , freq = freq)
target_column_index = features.index(target_column)
if df.isnull().values.sum() > 0:
print("The data source has missing value after aggregated by freq = '{}'".format(freq))
print("Filling missing value use method = 'pad'")
df = df.fillna(method = 'pad').dropna()
predict_range = where(time_index >= predict_start_time)[:prediction_length]
if len(predict_range) < prediction_length: predict_range = np.arange(len(time_index))[-prediction_length:]
input_data_range = np.arange(predict_range[0] - time_step, predict_range[0])
if sum(input_data_range < 0) > 0 :
input_data_range = np.arange(time_step)
predict_range = np.arange(time_step, time_step + prediction_length)
net = GRU_seq2seq(input_size = len(features),
output_size = len(Quantile),
hidden_size = hidden_size,
time_step = time_step,
prediction_length = prediction_length)
std_target = np.sqrt(scaler_std.var_)[target_column_index]
if std_target == 0: std_target = 1
mean_target = scaler_std.mean_[target_column_index]
pred_result_list = []
inpu_data = torch.tensor(scaler_std.transform(df.iloc[input_data_range])).float().unsqueeze(0)/scaler
for inner_model in model_list:
net.load_state_dict(inner_model['state_dict'])
net.eval()
with torch.no_grad():
pred_result_list.append( net(inpu_data).data.numpy()[0]*std_target*scaler + mean_target )
pred_result = np.mean(pred_result_list, axis=0)
pred_result_dict = {str(q):pred_result[:, i].tolist() for i, q in enumerate(Quantile)}
return {'predict_target': target_column,
'predict_result': pred_result_dict,
'time': time_index[predict_range].strftime('%Y-%m-%d %H:%M:%S').tolist()}
def predict_to_gif(data_source, model_source, predict_start_time, filename,
ticks_step = 15, size = [15, 7]):
if isinstance(model_source, str):
model = torch.load(model_source)
else:
model = model_source
if isinstance(data_source, str):
df = pd.read_csv(open(data_source))
else:
df = data_source
features = model['features']
target_column = model['target_column']
freq = model['freq']
time_step = model['time_step']
prediction_length = model['prediction_length']
Quantile = model['Quantile']
n_splits = model['n_splits']
mean_CV_RMSE = model['mean_CV_RMSE']
test_RMSE = model['test_RMSE']
cv_mode = model['cv_mode']
if 'time' in df.columns.tolist():
df.index = df.time.astype('datetime64[ns]')
df >>= drop('time')
df = df[features].astype(float).resample(freq).mean()
time_index = pd.date_range(df.index[0], periods= len(df) + prediction_length , freq = freq)
target_column_index = features.index(target_column)
if df.isnull().values.sum() > 0:
print("The data source has missing value after aggregated by freq = '{}'".format(freq))
print("Filling missing value use method = 'pad'")
df = df.fillna(method = 'pad').dropna()
fname = pkg_resources.resource_filename(__name__, '../Fonts/kaiu.ttf')
image_list = []
register_matplotlib_converters()
fig, ax = plt.subplots()
for i in predict_start_time:
pred_result = predict(data_source, model_source, i)
time_index = pd.date_range(df.index[0], periods = len(df) + len(pred_result['time']), freq = freq)
time_index_pred = pd.date_range(pred_result['time'][0], periods = len(pred_result['time']), freq = freq)
diff = (time_index[1] - time_index[0]).total_seconds()
if diff >= 360*86400:
time_index_label = time_index.astype(str).str.slice(0, 4)
title = '{} 未來走勢預測 (預測開始於 {})'.format(target_column, pred_result['time'][0][:4])
elif diff >= 20*86400:
time_index_label = time_index.astype(str).str.slice(0, 7)
title = '{} 未來走勢預測 (預測開始於 {})'.format(target_column, pred_result['time'][0][:7])
elif diff >= 86400:
time_index_label = time_index.astype(str).str.slice(0, 10)
title = '{} 未來走勢預測 (預測開始於 {})'.format(target_column, pred_result['time'][0][:10])
elif diff >= 3600:
time_index_label = time_index.astype(str).str.slice(0, 13)
title = '{} 未來走勢預測 (預測開始於 {})'.format(target_column, pred_result['time'][0][:13])
elif diff >= 60:
time_index_label = time_index.astype(str).str.slice(0, 16)
title = '{} 未來走勢預測 (預測開始於 {})'.format(target_column, pred_result['time'][0][:16])
else:
time_index_label = time_index.astype(str)
title = '{} 未來走勢預測 (預測開始於 {})'.format(target_column, pred_result['time'][0])
font = FontProperties(fname = fname, size = 15)
fig.set_size_inches(*size)
p1 = ax.plot(df[target_column], 'b')[0]
p2 = ax.plot(time_index_pred, pred_result['predict_result'][str(Quantile[0])], 'r')[0]
p3 = ax.fill_between(time_index_pred, pred_result['predict_result'][str(Quantile[1])], pred_result['predict_result'][str(Quantile[2])], color = 'c')
p4 = ax.plot([], [], ' ')[0]
if cv_mode == 'kfold':
label_cv_mode = 'fold'
else:
label_cv_mode = 'ts'
ax.legend([p1, p2, p3, p4, p4],
('實際值', '預測值', '{:.0%} 預測區間'.format(Quantile[2]-Quantile[1]),
'{}-{} CV mean RMSE = {:.4f}'.format(n_splits, label_cv_mode, mean_CV_RMSE),
'test RMSE = {:.4f}'.format(test_RMSE) )
, loc='best', prop=font)
ax.set_xticks(time_index[::ticks_step])
ax.set_xticklabels(time_index_label[::ticks_step])
ax.set_ylabel(target_column, fontproperties=font, fontsize = 20)
ax.set_xlabel('時間', fontproperties=font, fontsize = 20)
ax.set_title(title, fontproperties=font, fontsize = 30, y = 1.03)
buf = io.BytesIO()
fig.savefig(buf)
plt.cla()
buf.seek(0)
image_list.append(imageio.imread(buf))
imageio.mimsave(filename, image_list, duration=1.5)
``` |
{
"source": "JimmyBoyle/FeatureToggles",
"score": 2
} |
#### File: FeatureToggles/src/core.py
```python
import json
import logging
import ast
import boto3
import botocore
from jsonschema import ValidationError
import config
LOGGER = logging.getLogger(__name__)
client = boto3.client('ssm')
paginator = client.get_paginator('get_parameters_by_path')
PREFIX = '/' + config.PREFIX + '/'
def load():
"""Load feature toggles.
Load all feature toggle values from SSM
"""
response_iterator = paginator.paginate(
Path=PREFIX,
Recursive=True
)
response = {}
for iters in response_iterator:
for param in iters['Parameters']:
toggle, dimension = param['Name'].split('/')[-2:]
if toggle not in response:
response[toggle] = {}
response[toggle][dimension] = ast.literal_eval(param['Value'])
return response
def update(updates):
"""Update feature toggles.
Apply the given updates to the stored feature toggles.
"""
cur_params = load()
params_to_clear = []
params_to_update = []
for update in updates:
action = update['action']
if action != 'CLEAR_ALL':
if 'dimension' not in update:
raise ValidationError('update must specify a "dimension" key')
if action == 'SET':
if 'value' not in update:
raise ValidationError('SET update must specify a "value" key')
params_to_update.append(update)
elif action == 'CLEAR':
if update['toggle_name'] in cur_params:
if update['dimension'] in cur_params[update['toggle_name']]:
params_to_clear.append(
PREFIX + update['toggle_name'] + '/' + update['dimension'])
else:
raise ValidationError('{} toggle does not exist'.format(update['toggle_name']))
elif action == 'CLEAR_ALL':
if update['toggle_name'] in cur_params:
for dimension in cur_params[update['toggle_name']]:
param_name = PREFIX + \
update['toggle_name'] + '/' + dimension
params_to_clear.append(param_name)
else:
raise ValidationError('{} toggle does not exist'.format(update['toggle_name']))
else:
raise Exception('Unsupported action: {}'.format(action))
_update_params(params_to_update)
_clear_params(params_to_clear)
def _update_params(params):
for param in params:
client.put_parameter(
Name=PREFIX + param['toggle_name'] + '/' + param['dimension'],
Value=str(param['value']),
Type='String',
Overwrite=True
)
def _clear_params(param_names):
# max size of items to delete for ssm api call is 10 so we split into size 10 chunks
n = 10
for param_chunk in [param_names[i:i + n] for i in range(0, len(param_names), n)]:
response = client.delete_parameters(
Names=param_chunk
)
``` |
{
"source": "JimmyButler25/receipe-api",
"score": 3
} |
#### File: app/core/models.py
```python
from django.db import models
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager, PermissionsMixin
class UserManager(BaseUserManager):
def create_user(self, email, password=<PASSWORD>, **extra_fields):
"""create and save new user"""
# checking email is placed
if not email:
raise ValueError('Please enter your email address')
# normalize the email below
user = self.model(email = self.normalize_email(email), **extra_fields)
user.set_password(password)
user.save(using = self._db)
return user
def create_superuser(self, email, password):
"""Creating the Superuser and saving"""
user = self.create_user(email, password)
user.is_staff = True
user.is_superuser = True
user.save(using=self._db)
return user
# run the test
# creating model class
class User(AbstractBaseUser, PermissionsMixin):
"""custom user model that supports using email instead of username"""
email = models.EmailField(max_length=255, unique = True)
name = models.CharField(max_length=255)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
"""assign UserManager to User"""
objects = UserManager()
USERNAME_FIELD = 'email'
"""USERNAME_FIELD
A string describing the name of the field on the user model that is used as the unique identifier. This will
usually be a username of some kind, but it can also be an email address, or any other unique identifier. The
field must be unique (i.e., have unique=True set in its definition), unless you use a custom authentication
backend that can support non-unique usernames."""
""" is_active
A boolean attribute that indicates whether the user is considered “active”. This attribute is provided as an
attribute on AbstractBaseUser defaulting to True. """
```
#### File: core/tests/test_admin.py
```python
from django.test import TestCase, Client
from django.contrib.auth import get_user_model
# helper function called reverse to generate url for our page
from django.urls import reverse
class AdminSiteTests(TestCase):
"""Create a setup function"""
def setUp(self):
self.client = Client()
self.admin_user = get_user_model().objects.create_superuser(
email = '<EMAIL>',
password = '<PASSWORD>'
)
# this uses the client helper function to log a user into django with django auth which makes our tests easier to write
self.client.force_login(self.admin_user)
self.user = get_user_model().objects.create_user(
email = '<EMAIL>',
password = '<PASSWORD>',
name = 'Test user full name'
)
def test_users_listed(self):
"""Test for users listed on User page"""
url = reverse('admin:core_user_changelist')
response = self.client.get(url)
self.assertContains(response, self.user.name)
self.assertContains(response, self.user.email)
# run the test
def test_user_change_page(self):
"""Check that the user edit page works"""
url = reverse('admin:core_user_change', args=[self.user.id])
# similar to /admin/core/user/1
response = self.client.get(url)
# test that the page renders okay
self.assertEqual(response.status_code, 200)
# test fails when we run it
def test_create_user_page(self):
"""Test to check that the user page works"""
url = reverse('admin:core_user_add')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
# run the test bse we have not yet specified user name
``` |
{
"source": "jimmybutton/django-vaccine-dashboard",
"score": 4
} |
#### File: management/commands/_utils.py
```python
def convert(arg_in):
"""convert to float or int if possible, return string otherwise"""
if not arg_in:
return None
try:
arg_float = float(arg_in)
arg_int = int(arg_float)
if arg_int == arg_float:
return arg_int
else:
return arg_float
except:
return arg_in
if __name__ == "__main__":
assert convert('') == None
assert convert('1') == 1
assert convert('1.0') == 1
assert convert('1.2') == 1.2
assert convert('asdf') == 'asdf'
```
#### File: django-vaccine-dashboard/dashboard/views.py
```python
import datetime
from django.shortcuts import render
from django.http import JsonResponse
from .models import Entry
def home(request):
return render(request, template_name='home.html')
def linechart(request):
countries=["United Kingdom", "Germany", "France", "Israel", "United States", "India", "Brazil", "Turkey", "Chile", "Russia"]
cutoff_date = datetime.datetime.today() - datetime.timedelta(days=60)
datasets = []
for country in countries:
qs = Entry.objects.filter(country=country, date__gte=cutoff_date)
data = [{'x': entry.date, 'y': entry.people_vaccinated_per_hundred} for entry in qs]
datasets.append({'label': country, 'data': data})
return JsonResponse(data={'datasets': datasets})
``` |
{
"source": "jimmybutton/matml-viewer",
"score": 3
} |
#### File: matml-viewer/tests/test_utils.py
```python
import pytest
from matml import utils
import xml.etree.ElementTree as et
import pprint
pp = pprint.PrettyPrinter(indent=4, compact=True)
def test_parse_text():
assert utils.parse_text('123') == 123
assert utils.parse_text('1.234') == 1.234
assert utils.parse_text(' \n\r My text \n\r ') == "My text"
def test_node_with_text():
source = "<textnode>Some text...</textnode>"
target = {'textnode': "Some text..."}
node = et.fromstring(source)
temp = utils.matml_to_dict(node)
assert temp == target
def test_node_with_attrib():
source = """<?xml version="1.0"?>
<country name="Liechtenstein">
Some text...
</country>
"""
target = {
'country': {
'@name': "Liechtenstein",
'#text': "Some text..."
}
}
node = et.fromstring(source)
temp = utils.matml_to_dict(node)
assert temp == target
def test_node_with_children():
source = """<?xml version="1.0"?>
<country name="Liechtenstein">
<rank>1</rank>
<year>2008</year>
<gdppc>141100</gdppc>
</country>
"""
target = {
'country': {
'@name': "Liechtenstein",
'rank': 1,
'year': 2008,
'gdppc': 141100,
}
}
node = et.fromstring(source)
temp = utils.matml_to_dict(node)
assert temp == target
def test_node_with_multiple_children_of_same_name():
source = """<?xml version="1.0"?>
<country name="Liechtenstein">
<rank>1</rank>
<year>2008</year>
<gdppc>141100</gdppc>
<neighbor name="Austria" direction="E"/>
<neighbor name="Switzerland" direction="W"/>
</country>
"""
target = {
'country': {
'@name': "Liechtenstein",
'rank': 1,
'year': 2008,
'gdppc': 141100,
'neighbor': [
{
'@name': 'Austria',
'@direction': 'E'
}, {
'@name': 'Switzerland',
'@direction': 'W'
}
]
}
}
node = et.fromstring(source)
temp = utils.matml_to_dict(node)
assert temp == target
def test_countries_complete():
source = """<?xml version="1.0"?>
<data>
<country name="Liechtenstein">
<rank>1</rank>
<year>2008</year>
<gdppc>141100</gdppc>
<neighbor name="Austria" direction="E"/>
<neighbor name="Switzerland" direction="W"/>
</country>
<country name="Singapore">
<rank>4</rank>
<year>2011</year>
<gdppc>59900</gdppc>
<neighbor name="Malaysia" direction="N"/>
</country>
<country name="Panama">
<rank>68</rank>
<year>2011</year>
<gdppc>13600</gdppc>
<neighbor name="Costa Rica" direction="W"/>
<neighbor name="Colombia" direction="E"/>
</country>
</data>
"""
target = {
'data': {
'country': [
{
'@name': "Liechtenstein",
'rank': 1,
'year': 2008,
'gdppc': 141100,
'neighbor': [{
'@name': 'Austria',
'@direction': 'E'
}, {
'@name': 'Switzerland',
'@direction': 'W'
}
]
}, {
'@name': "Singapore",
'rank': 4,
'year': 2011,
'gdppc': 59900,
'neighbor': {
'@name': 'Malaysia',
'@direction': 'N'
}
}, {
'@name': "Panama",
'rank': 68,
'year': 2011,
'gdppc': 13600,
'neighbor': [{
'@name': '<NAME>',
'@direction': 'W'
}, {
'@name': 'Colombia',
'@direction': 'E'
}
]
}
]
}
}
node = et.fromstring(source)
temp = utils.matml_to_dict(node)
pp.pprint(temp)
assert temp == target
def test_parse_data():
assert utils.parse_data(None) is None
assert utils.parse_data(1) == 1
assert utils.parse_data(1.23) == 1.23
assert utils.parse_data('') is None
assert utils.parse_data('1.23, 2.34') == [1.23, 2.34]
assert utils.parse_data('972,561') == [972, 561]
assert utils.parse_data('.0011,.0018,.0023,.0027,.0029') == [.0011,.0018,.0023,.0027,.0029]
assert utils.parse_data('4,-,-') == [4, '-', '-']
assert utils.parse_data('1109,,') == [1109, None, None]
assert utils.parse_data("1.0E5,1.0E6,1.0E7,1.0E8,5.0E8") == [1.0E5,1.0E6,1.0E7,1.0E8,5.0E8]
def test_data_to_list():
source = """<Data format="integer">972,561</Data>"""
node = et.fromstring(source)
assert utils.matml_to_dict(node) == {'Data': [972,561]}
source = "<Data>23,1370</Data>"
node = et.fromstring(source)
assert utils.matml_to_dict(node) == {'Data': [23,1370]}
source = """<Data format="float">+11.5,+8.5,+7,+6.5,+6.5</Data>"""
node = et.fromstring(source)
assert utils.matml_to_dict(node) == {'Data': [11.5,8.5,7,6.5,6.5]}
``` |
{
"source": "jimmybutton/microblog",
"score": 3
} |
#### File: microblog/app/translate.py
```python
import json
import requests
import uuid
from flask import current_app
def translate(text, source_language, dest_language):
if 'MS_TRANSLATOR_KEY' not in current_app.config or not current_app.config['MS_TRANSLATOR_KEY']:
return 'Error: the translation service is not configured.'
base_url = 'https://api.cognitive.microsofttranslator.com'
path = '/translate?api-version=3.0'
params = '&from={}&to={}'.format(source_language, dest_language)
constructed_url = base_url + path + params
headers = {
'Ocp-Apim-Subscription-Key': current_app.config['MS_TRANSLATOR_KEY'],
'Content-type': 'application/json',
'X-ClientTraceId': str(uuid.uuid4())
}
body = [
{"Text": text}
]
response = requests.post(constructed_url, headers=headers, json=body)
if response.status_code != 200:
return 'Error {}: the translation service failed. URL {} Body {}'.format(response.status_code, constructed_url, body)
content = response.json()
return content[0].get('translations')[0].get('text')
``` |
{
"source": "jimmybutton/moviedb",
"score": 2
} |
#### File: moviedb/app/__init__.py
```python
from flask import Flask
from config import Config
from sqlalchemy import MetaData
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
from flask_login import LoginManager
from flask_moment import Moment
from flask_misaka import Misaka
from flask_bootstrap import Bootstrap
import os
import logging
from logging.handlers import RotatingFileHandler
from elasticsearch import Elasticsearch
convention = {
"ix": 'ix_%(column_0_label)s',
"uq": "uq_%(table_name)s_%(column_0_name)s",
"ck": "ck_%(table_name)s_%(constraint_name)s",
"fk": "fk_%(table_name)s_%(column_0_name)s_%(referred_table_name)s",
"pk": "pk_%(table_name)s"
}
metadata = MetaData(naming_convention=convention)
db = SQLAlchemy(metadata=metadata)
migrate = Migrate()
login = LoginManager()
login.login_view = "auth.login"
moment = Moment()
md = Misaka()
bootstrap = Bootstrap()
def create_app(config_class=Config):
app = Flask(__name__)
app.config.from_object(Config)
db.init_app(app)
with app.app_context():
if db.engine.url.drivername == 'sqlite':
migrate.init_app(app, db, render_as_batch=True)
else:
migrate.init_app(app, db)
# migrate.init_app(app, db)
login.init_app(app)
moment.init_app(app)
md.init_app(app)
bootstrap.init_app(app)
from app.errors import bp as errors_bp
app.register_blueprint(errors_bp)
from app.auth import bp as auth_bp
app.register_blueprint(auth_bp, url_prefix='/auth')
from app.main import bp as main_bp
app.register_blueprint(main_bp)
from app.cli import bp as cli_bp
app.register_blueprint(cli_bp)
app.elasticsearch = Elasticsearch([app.config['ELASTICSEARCH_URL']]) \
if app.config['ELASTICSEARCH_URL'] else None
from app import models
if not app.debug and not app.testing:
if not os.path.exists("logs"):
os.mkdir("logs")
file_handler = RotatingFileHandler(
"logs/moviedb.log", maxBytes=10240, backupCount=10
)
file_handler.setFormatter(
logging.Formatter(
"%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]"
)
)
file_handler.setLevel(logging.INFO)
app.logger.addHandler(file_handler)
app.logger.setLevel(logging.INFO)
app.logger.info("Moviedb startup")
return app
```
#### File: app/main/people.py
```python
from flask import render_template, flash, url_for, redirect, request, current_app, Markup
from app import db
from app.main import bp
from app.main.forms import SearchForm, EditPersonForm
from flask_login import current_user, login_required
from app.models import User, People, Character
from math import floor
from app.main.core import get_list
@bp.route("/people", methods=['GET'])
@login_required
def people():
search_form = SearchForm()
if not search_form.validate():
return redirect("main.people")
people = get_list(cls=People, request_args=request.args, search_form=search_form, search_fields=["name"], order=People.score.desc())
return render_template("people.html", title="People", people=people, search_form=search_form,)
@bp.route("/person/<id>")
@login_required
def person(id):
person = People.query.get(id)
if not person:
flash("Person with id={} not found.".format(id))
return redirect(url_for("main.people"))
return render_template("person.html", person=person, title=person.name)
@bp.route("/person/<id>/roles")
@login_required
def person_roles(id):
person = People.query.get(id)
if not person:
flash("Person with id={} not found.".format(id))
return redirect(url_for("main.people"))
sort = request.args.get("sort", "order", type=str) # field to sort by
order = request.args.get("order", "asc", type=str) # desc or asc
offset = request.args.get("offset", 0, type=int) # start item
per_page = request.args.get("limit", 10, type=int) # per page
page = floor(offset / per_page) + 1 # estimage page from offset
if sort in Character._get_keys():
sort_field = getattr(Character, sort)
order_method = sort_field.desc() if order == "desc" else sort_field.asc()
roles = person.roles.order_by(order_method)
elif sort == "movie_title":
sort_field = People.name
order_method = sort_field.desc() if order == "desc" else sort_field.asc()
roles = Character.query.filter(Character.actor_id == id).join(People, Character.movie).order_by(order_method)
else:
roles = person.roles.order_by(Character.order.asc())
roles = roles.paginate(page, per_page, False)
return {"total": roles.total, "rows": [c.to_dict() for c in roles.items]}
@bp.route("/create_person", methods=["GET", "POST"])
def create_person():
form = EditPersonForm()
if form.validate_on_submit():
person = People()
form_keys = [k for k in form.data.keys() if k not in ["submit", "csrf_token", "submit_and_new"]]
for key in form_keys:
setattr(person, key, getattr(form, key).data)
person.created_by = current_user
db.session.add(person)
db.session.commit()
flash(Markup(f"""person <a href="{url_for('main.person', id=id)}">{person.name}</a> created.""".format()))
if form.submit._value() == "Save and New":
return redirect(url_for("main.create_person"))
else:
return redirect(url_for("main.person", id=person.id))
return render_template("person_create.html", title="Create Person", form=form)
@bp.route("/person/<id>/edit", methods=["GET", "POST"])
@login_required
def edit_person(id):
form = EditPersonForm()
person = People.query.get(id)
if not person:
flash("Person with id={} not found.".format(id))
return redirect(url_for("main.people"))
form_keys = [k for k in form.data.keys() if k not in ["submit", "csrf_token", "submit_and_new"]]
if form.validate_on_submit():
for key in form_keys:
setattr(person, key, getattr(form, key).data)
person.modified_by = current_user
db.session.add(person)
db.session.commit()
flash(Markup(f"""Person <a href="{url_for("main.person", id=id)}">{person.name}</a> updated."""))
return redirect(url_for("main.person", id=person.id))
elif request.method == "GET":
for key in form_keys:
getattr(form, key).data = getattr(person, key)
return render_template("person_create.html", title="Edit person", form=form, person=person)
```
#### File: moviedb/app/models.py
```python
from app import db, login
import datetime
from werkzeug.security import generate_password_hash, check_password_hash
from flask_login import UserMixin
import sqlalchemy as sa
from hashlib import md5
from sqlalchemy.ext.hybrid import hybrid_property
import progressbar
class BaseModel(object):
@classmethod
def _get_keys(cls):
return sa.orm.class_mapper(cls).c.keys()
def to_dict(self):
d = {}
for k in self._get_keys():
value = getattr(self, k)
if isinstance(value, datetime.datetime):
d[k] = value.isoformat()
else:
d[k] = value
return d
class User(UserMixin, db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(64), index=True, unique=True)
email = db.Column(db.String(120), index=True, unique=True)
password_hash = db.Column(db.String(128))
about_me = db.Column(db.String(140))
last_seen = db.Column(db.DateTime, default=datetime.datetime.utcnow)
def __repr__(self):
return "<User {}>".format(self.username)
def set_password(self, password):
self.password_hash = generate_password_hash(password)
def check_password(self, password):
return check_password_hash(self.password_hash, password)
def avatar(self, size):
digest = md5(self.email.lower().encode('utf-8')).hexdigest()
return f'https://www.gravatar.com/avatar/{digest}?d=identicon&s={size}'
class Movie(db.Model, BaseModel):
id = db.Column(db.Integer, primary_key=True)
created_timestamp = db.Column(
db.DateTime, index=True, default=datetime.datetime.utcnow
)
created_id = db.Column(db.Integer, db.ForeignKey("user.id"))
created_by = db.relationship("User", foreign_keys=[created_id])
modified_timestamp = db.Column(
db.DateTime,
index=True,
default=datetime.datetime.utcnow,
onupdate=datetime.datetime.utcnow,
)
modified_id = db.Column(db.Integer, db.ForeignKey("user.id"))
modified_by = db.relationship("User", foreign_keys=[modified_id])
title = db.Column(db.String(128), index=True)
year = db.Column(db.Integer)
director_name = db.Column(db.String(64))
director_id = db.Column(db.Integer, db.ForeignKey("people.id"))
category = db.Column(db.String(64))
certificate = db.Column(db.String(16))
release_date = db.Column(db.Date)
release_country = db.Column(db.String(5))
plot_summary = db.Column(db.Text())
rating_value = db.Column(db.Float)
rating_count = db.Column(db.Integer)
poster_url = db.Column(db.String(256))
runtime = db.Column(db.Integer)
url = db.Column(db.String(64))
cast = db.relationship(
"Character", foreign_keys="Character.movie_id", backref="movie", lazy="dynamic"
)
def __repr__(self):
return "<Movie {}>".format(self.title)
@property
def displayname(self):
return f"{self.title} ({self.year})"
@property
def default_image_url(self):
if self.poster_url:
return self.poster_url
else:
return "https://m.media-amazon.com/images/G/01/imdb/images/nopicture/small/film-293970583._CB469775754_.png"
class People(db.Model, BaseModel):
id = db.Column(db.Integer, primary_key=True)
created_timestamp = db.Column(
db.DateTime, index=True, default=datetime.datetime.utcnow
)
created_id = db.Column(db.Integer, db.ForeignKey("user.id"))
created_by = db.relationship("User", foreign_keys=[created_id])
modified_timestamp = db.Column(
db.DateTime,
index=True,
default=datetime.datetime.utcnow,
onupdate=datetime.datetime.utcnow,
)
modified_id = db.Column(db.Integer, db.ForeignKey("user.id"))
modified_by = db.relationship("User", foreign_keys=[modified_id])
name = db.Column(db.String(64), index=True)
url = db.Column(db.String(64))
image_url = db.Column(db.String(256))
dob = db.Column(db.Date())
birthname = db.Column(db.String(128))
height = db.Column(db.String(64))
bio = db.Column(db.Text())
score = db.Column(db.Float)
roles = db.relationship(
"Character", foreign_keys="Character.actor_id", backref="actor", lazy="dynamic"
)
directed_movies = db.relationship("Movie", foreign_keys="Movie.director_id", backref='director', lazy='dynamic')
def __repr__(self):
return "<Person {}>".format(self.name)
@property
def default_image_url(self):
if self.image_url:
return self.image_url
else:
return "https://m.media-amazon.com/images/G/01/imdb/images/nopicture/32x44/name-2138558783._CB468460248_.png"
def _get_score(self):
score = 0
for role in self.roles:
try:
score += role.movie.rating_value * (-0.9/50 * role.order + 1 if role.order < 50 else 0.1)
except Exception as e:
print(self, e)
return score
def _update_score(self):
self.score = self._get_score()
db.session.add(self)
db.session.commit()
@classmethod
def update_score(cls):
for obj in progressbar.progressbar(cls.query, max_value=cls.query.count()):
obj.score = obj._get_score()
db.session.add(obj)
db.session.commit()
class Character(db.Model, BaseModel):
id = db.Column(db.Integer, primary_key=True)
created_timestamp = db.Column(
db.DateTime, index=True, default=datetime.datetime.utcnow
)
created_id = db.Column(db.Integer, db.ForeignKey("user.id"))
created_by = db.relationship("User", foreign_keys=[created_id])
modified_timestamp = db.Column(
db.DateTime,
index=True,
default=datetime.datetime.utcnow,
onupdate=datetime.datetime.utcnow,
)
modified_id = db.Column(db.Integer, db.ForeignKey("user.id"))
modified_by = db.relationship("User", foreign_keys=[modified_id])
movie_id = db.Column(db.Integer, db.ForeignKey("movie.id"))
actor_id = db.Column(db.Integer, db.ForeignKey("people.id"))
character_name = db.Column(db.String(128))
character_url = db.Column(db.String(128))
order = db.Column(db.Integer) # order in which to show character per movie
@property
def actor_name(self):
return self.actor.name
def __repr__(self):
return "<Character {}, Actor {}, Movie {}, Order {}>".format(
self.character_name, self.actor.name, self.movie.title, self.order
)
def to_dict(self):
d = super().to_dict()
d.update({
"movie_title": self.movie.title,
"movie_image": self.movie.default_image_url,
"movie_year": self.movie.year,
"actor_name": self.actor.name,
"actor_image": self.actor.default_image_url,
})
return d
@login.user_loader
def load_user(id):
return User.query.get(int(id))
```
#### File: moviedb/app/search.py
```python
from flask import current_app
from elasticsearch import NotFoundError
from datetime import datetime
def add_to_index(index, model):
if not current_app.elasticsearch:
return
payload = {}
for field in model.__searchable__.keys():
value = getattr(model, field)
if isinstance(value, datetime):
payload[field] = value.isoformat()
else:
payload[field] = value
current_app.elasticsearch.index(index=index, id=model.id, body=payload)
def remove_from_index(index, model):
if not current_app.elasticsearch:
return
try:
current_app.elasticsearch.delete(index=index, id=model.id)
except NotFoundError as e:
print(e)
def query_index_full_text(index, query, page, per_page):
if not current_app.elasticsearch:
return [], 0
search = current_app.elasticsearch.search(
index=index,
body={
"query": {"multi_match": {"query": query, "fields": ["*"]}},
"from": (page - 1) * per_page,
"size": per_page,
},
)
ids = [int(hit["_id"]) for hit in search["hits"]["hits"]]
return ids, search["hits"]["total"]["value"]
def query_index(index, query, page, per_page, sort_by=None, order="asc"):
if not current_app.elasticsearch:
return [], 0
search = current_app.elasticsearch.search(
index=index,
body={
"query": query,
"from": (page - 1) * per_page,
"size": per_page,
"sort": [{sort_by: order}] if sort_by else ["_score"],
},
)
ids = [int(hit["_id"]) for hit in search["hits"]["hits"]]
return ids, search["hits"]["total"]["value"]
def clear_index(index, mappings):
current_app.elasticsearch.indices.delete(index=index, ignore=[400, 404])
current_app.elasticsearch.indices.create(
index=index,
body={
"settings": {
"analysis": {
"normalizer": {
"my_normalizer": {
"type": "custom",
"char_filter": [],
"filter": ["lowercase", "asciifolding"],
}
}
}
},
"mappings": {"properties": mappings},
},
)
```
#### File: migrations/versions/2f2f0229ae34_update_character_model.py
```python
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '2f2f0229ae34'
down_revision = '29905aa2e122'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
# op.drop_column('character', 'movie_year')
# op.drop_column('character', 'movie_title')
# op.drop_column('character', 'actor_name')
# ### end Alembic commands ###
with op.batch_alter_table('character') as batch_op:
batch_op.drop_column('movie_year')
batch_op.drop_column('movie_title')
batch_op.drop_column('actor_name')
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('character', sa.Column('actor_name', sa.VARCHAR(length=128), nullable=True))
op.add_column('character', sa.Column('movie_title', sa.VARCHAR(length=128), nullable=True))
op.add_column('character', sa.Column('movie_year', sa.INTEGER(), nullable=True))
# ### end Alembic commands ###
```
#### File: migrations/versions/3b1663812883_director_relationship.py
```python
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '3b1663812883'
down_revision = '52521046886e'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
# op.add_column('movie', sa.Column('director_id', sa.Integer(), nullable=True))
# op.create_foreign_key(None, 'movie', 'people', ['director_id'], ['id'])
# ### end Alembic commands ###
# with op.batch_alter_table('post') as batch_op:
# batch_op.create_foreign_key(None, 'post', 'event', ['event_id'], ['id'])
# batch_op.drop_column('post', 'event')
with op.batch_alter_table('movie') as batch_op:
batch_op.create_foreign_key(None, 'movie', 'people', ['director_id'], ['id'])
batch_op.drop_column('movie', 'people')
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'movie', type_='foreignkey')
op.drop_column('movie', 'director_id')
# ### end Alembic commands ###
``` |
{
"source": "jimmybutton/newsblog",
"score": 3
} |
#### File: newsblog/app/models.py
```python
from app import db
from datetime import datetime
class Article(db.Model):
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(128))
content = db.Column(db.String(2048))
category_id = db.Column(db.Integer, db.ForeignKey('category.id'))
image = db.Column(db.String(128)) # filename of feature image
created = db.Column(db.DateTime, index=True, default=datetime.utcnow)
updated = db.Column(db.DateTime, index=True, default=datetime.utcnow)
def __repr__(self):
return '<Article {}>'.format(self.title)
class Category(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64), index=True, unique=True)
articles = db.relationship('Article', backref='category', lazy='dynamic')
def __repr__(self):
return '<Category {}>'.format(self.name)
``` |
{
"source": "JimmyCai91/tensorboardX",
"score": 2
} |
#### File: tensorboardX/beholder/beholder.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from ..src.summary_pb2 import Summary
from ..src.summary_pb2 import SummaryMetadata
from ..src.tensor_pb2 import TensorProto
from ..src.tensor_shape_pb2 import TensorShapeProto
import os
import time
import numpy as np
# import tensorflow as tf
# from tensorboard.plugins.beholder import im_util
# from . import im_util
from .file_system_tools import read_pickle,\
write_pickle, write_file
from .shared_config import PLUGIN_NAME, TAG_NAME,\
SUMMARY_FILENAME, DEFAULT_CONFIG, CONFIG_FILENAME, SUMMARY_COLLECTION_KEY_NAME, SECTION_INFO_FILENAME
from . import video_writing
# from .visualizer import Visualizer
class Beholder(object):
def __init__(self, logdir):
self.PLUGIN_LOGDIR = logdir + '/plugins/' + PLUGIN_NAME
self.is_recording = False
self.video_writer = video_writing.VideoWriter(
self.PLUGIN_LOGDIR,
outputs=[video_writing.FFmpegVideoOutput, video_writing.PNGVideoOutput])
self.last_image_shape = []
self.last_update_time = time.time()
self.config_last_modified_time = -1
self.previous_config = dict(DEFAULT_CONFIG)
if not os.path.exists(self.PLUGIN_LOGDIR + '/config.pkl'):
os.makedirs(self.PLUGIN_LOGDIR)
write_pickle(DEFAULT_CONFIG, '{}/{}'.format(self.PLUGIN_LOGDIR, CONFIG_FILENAME))
# self.visualizer = Visualizer(self.PLUGIN_LOGDIR)
def _get_config(self):
'''Reads the config file from disk or creates a new one.'''
filename = '{}/{}'.format(self.PLUGIN_LOGDIR, CONFIG_FILENAME)
modified_time = os.path.getmtime(filename)
if modified_time != self.config_last_modified_time:
config = read_pickle(filename, default=self.previous_config)
self.previous_config = config
else:
config = self.previous_config
self.config_last_modified_time = modified_time
return config
def _write_summary(self, frame):
'''Writes the frame to disk as a tensor summary.'''
path = '{}/{}'.format(self.PLUGIN_LOGDIR, SUMMARY_FILENAME)
smd = SummaryMetadata()
tensor = TensorProto(
dtype='DT_FLOAT',
float_val=frame.reshape(-1).tolist(),
tensor_shape=TensorShapeProto(
dim=[TensorShapeProto.Dim(size=frame.shape[0]),
TensorShapeProto.Dim(size=frame.shape[1]),
TensorShapeProto.Dim(size=frame.shape[2])]
)
)
summary = Summary(value=[Summary.Value(tag=TAG_NAME, metadata=smd, tensor=tensor)]).SerializeToString()
write_file(summary, path)
@staticmethod
def stats(tensor_and_name):
imgstats = []
for (img, name) in tensor_and_name:
immax = img.max()
immin = img.min()
imgstats.append(
{
'height': img.shape[0],
'max': str(immax),
'mean': str(img.mean()),
'min': str(immin),
'name': name,
'range': str(immax - immin),
'shape': str((img.shape[1], img.shape[2]))
})
return imgstats
def _get_final_image(self, config, trainable=None, arrays=None, frame=None):
if config['values'] == 'frames':
# print('===frames===')
final_image = frame
elif config['values'] == 'arrays':
# print('===arrays===')
final_image = np.concatenate([arr for arr, _ in arrays])
stat = self.stats(arrays)
write_pickle(stat, '{}/{}'.format(self.PLUGIN_LOGDIR, SECTION_INFO_FILENAME))
elif config['values'] == 'trainable_variables':
# print('===trainable===')
final_image = np.concatenate([arr for arr, _ in trainable])
stat = self.stats(trainable)
write_pickle(stat, '{}/{}'.format(self.PLUGIN_LOGDIR, SECTION_INFO_FILENAME))
if len(final_image.shape) == 2: # Map grayscale images to 3D tensors.
final_image = np.expand_dims(final_image, -1)
return final_image
def _enough_time_has_passed(self, FPS):
'''For limiting how often frames are computed.'''
if FPS == 0:
return False
else:
earliest_time = self.last_update_time + (1.0 / FPS)
return time.time() >= earliest_time
def _update_frame(self, trainable, arrays, frame, config):
final_image = self._get_final_image(config, trainable, arrays, frame)
self._write_summary(final_image)
self.last_image_shape = final_image.shape
return final_image
def _update_recording(self, frame, config):
'''Adds a frame to the current video output.'''
# pylint: disable=redefined-variable-type
should_record = config['is_recording']
if should_record:
if not self.is_recording:
self.is_recording = True
print('Starting recording using %s',
self.video_writer.current_output().name())
self.video_writer.write_frame(frame)
elif self.is_recording:
self.is_recording = False
self.video_writer.finish()
print('Finished recording')
# TODO: blanket try and except for production? I don't someone's script to die
# after weeks of running because of a visualization.
def update(self, trainable=None, arrays=None, frame=None):
'''Creates a frame and writes it to disk.
Args:
trainable: a list of namedtuple (tensors, name).
arrays: a list of namedtuple (tensors, name).
frame: lalala
'''
new_config = self._get_config()
if True or self._enough_time_has_passed(self.previous_config['FPS']):
# self.visualizer.update(new_config)
self.last_update_time = time.time()
final_image = self._update_frame(trainable, arrays, frame, new_config)
self._update_recording(final_image, new_config)
##############################################################################
# @staticmethod
# def gradient_helper(optimizer, loss, var_list=None):
# '''A helper to get the gradients out at each step.
# Args:
# optimizer: the optimizer op.
# loss: the op that computes your loss value.
# Returns: the gradient tensors and the train_step op.
# '''
# if var_list is None:
# var_list = tf.trainable_variables()
# grads_and_vars = optimizer.compute_gradients(loss, var_list=var_list)
# grads = [pair[0] for pair in grads_and_vars]
# return grads, optimizer.apply_gradients(grads_and_vars)
# implements pytorch backward later
class BeholderHook():
pass
# """SessionRunHook implementation that runs Beholder every step.
# Convenient when using tf.train.MonitoredSession:
# ```python
# beholder_hook = BeholderHook(LOG_DIRECTORY)
# with MonitoredSession(..., hooks=[beholder_hook]) as sess:
# sess.run(train_op)
# ```
# """
# def __init__(self, logdir):
# """Creates new Hook instance
# Args:
# logdir: Directory where Beholder should write data.
# """
# self._logdir = logdir
# self.beholder = None
# def begin(self):
# self.beholder = Beholder(self._logdir)
# def after_run(self, run_context, unused_run_values):
# self.beholder.update(run_context.session)
``` |
{
"source": "jimmycallin/master-thesis",
"score": 3
} |
#### File: architectures/conll16st-hd-sdp/DiscourseSenseClassification_FeatureExtraction_v1.py
```python
import codecs
import json
import random
import sys
from datetime import datetime
import logging #word2vec logging
from sklearn import preprocessing
import validator
from Common_Utilities import CommonUtilities
import gensim
from gensim import corpora, models, similarities # used for word2vec
from gensim.models.word2vec import Word2Vec # used for word2vec
from gensim.models.doc2vec import Doc2Vec#used for doc2vec
import time # used for performance measuring
import math
from scipy import spatial # used for similarity calculation
from gensim.models.doc2vec import LabeledSentence
from gensim.models import Phrases
from gensim import corpora # for dictionary
from gensim.models import LdaModel
# from sklearn.svm import libsvm
from sklearn.svm import SVC
sys.path.append('~/semanticz')
from Word2Vec_AverageVectorsUtilities import AverageVectorsUtilities
import pickle
import const
# Constants
const.FIELD_ARG1 = 'Arg1'
const.FIELD_ARG2 = 'Arg2'
const.FIELD_CONNECTIVE = 'Connective'
const.FIELD_LABEL_LEVEL1 = 'Lbl_Lvl1'
const.FIELD_LABEL_LEVEL2 = 'Lbl_Lvl2'
const.FIELD_REL_TYPE = 'Type'
class DiscourseSenseClassification_FeatureExtraction(object):
"""Discourse relation sense classifier feature extration
"""
CONNECTIVES_FREELY_OMISSIBLE= [
'accordingly',
'as a result',
'because',
'by comparison',
'by contrast',
'consequently',
'for example',
'for instance',
'furthermore',
'in fact',
'in other words',
'in particular',
'in short',
'indeed',
'previously',
'rather',
'so',
'specifically',
'therefore'
]
CONNECTIVES = ['if',
'rather',
'in contrast',
'conversely',
'neither nor',
'meantime',
'therefore',
'while',
'in other words',
'so',
'as well',
'after',
'afterward',
'once',
'upon',
'so that',
'nonetheless',
'by contrast',
'particularly',
'likewise',
'now that',
'indeed',
'further',
'before',
'previously',
'in addition',
'also',
'separately',
'besides',
'until',
'thereafter',
'thereby',
'hence',
'in fact',
'much as',
'when',
'for',
'since',
'or',
'consequently',
'similarly',
'in the end',
'thus',
'in particular',
'simultaneously',
'nevertheless',
'on the other hand',
'whereas',
'lest',
'furthermore',
'if and when',
'in sum',
'although',
'regardless',
'moreover',
'on the contrary',
'overall',
'alternatively',
'as long as',
'then',
'plus',
'before and after',
'meanwhile',
'by comparison',
'when and if',
'yet',
'in the mean time',
'as soon as',
'accordingly',
'on the one hand on the other hand',
'by then',
'earlier',
'however',
'as if',
'except',
'though',
'later',
'next',
'in turn',
'still',
'either or',
'unless',
'else',
'as',
'as a result',
'insofar as',
'otherwise',
'instead',
'because',
'for instance',
'finally',
'till',
'in short',
'but',
'if then',
'nor',
'ultimately',
'specifically',
'as though',
'as an alternative',
'and',
'for example',
'additionally']
@staticmethod
def get_connectives_emeddings(connectives, model, vocab_set, num_feats, split_words=True):
if split_words:
connectives_split = [c.split(' ') for c in connectives]
else:
connectives_split = [c for c in connectives]
connectives_embedd = []
for conn_words in connectives_split:
conn_embedding = AverageVectorsUtilities.makeFeatureVec(conn_words, model, num_feats,
vocab_set)
connectives_embedd.append(conn_embedding)
return connectives_embedd
@staticmethod
def calc_sim_singleembedd_to_embeddlist(single_embedding, embedding_list):
sim_list = []
for i in range(0, len(embedding_list)):
sim = spatial.distance.cosine(single_embedding, embedding_list[i])
sim_list.append(sim)
return sim_list
@staticmethod
def get_connectives_emeddings(connectives, model, vocab_set, num_feats):
connectives_split = [c.split(' ') for c in connectives]
connectives_embedd = []
for conn_words in connectives_split:
conn_embedding = AverageVectorsUtilities.makeFeatureVec(conn_words, model, num_feats,
vocab_set)
connectives_embedd.append(conn_embedding)
return connectives_embedd
@staticmethod
def get_word_token(parse_obj, doc_id, sent_id, word_id):
return parse_obj[doc_id]['sentences'][sent_id]['words'][word_id]
@staticmethod
def calculate_postagged_similarity_from_taggeddata_and_tokens(text1_tokens_in_vocab,
text2_tokens_in_vocab,
model,
tag_type_start_1,
tag_type_start_2):
res_sim = 0.00
text1_words_in_model = [x[0] for x in text1_tokens_in_vocab if x[1]['PartOfSpeech'].startswith(tag_type_start_1)]
text2_words_in_model = [x[0] for x in text2_tokens_in_vocab if x[1]['PartOfSpeech'].startswith(tag_type_start_2)]
if len(text1_words_in_model) > 0 and len(text2_words_in_model) > 0:
res_sim = model.n_similarity(text1_words_in_model, text2_words_in_model)
return res_sim
@staticmethod
def get_postagged_sim_fetures(tokens_data_text1, tokens_data_text2, postagged_data_dict,
model,
word2vec_num_features,
word2vec_index2word_set
):
input_data_wordvectors = []
input_data_sparse_features = {}
tokens_in_vocab_1 = [x for x in tokens_data_text1 if x[0] in word2vec_index2word_set]
tokens_in_vocab_2 = [x for x in tokens_data_text2 if x[0] in word2vec_index2word_set]
# similarity for tag type
tag_type_start_1 = 'NN'
tag_type_start_2 = 'NN'
postagged_sim = DiscourseSenseClassification_FeatureExtraction.calculate_postagged_similarity_from_taggeddata_and_tokens(
text1_tokens_in_vocab=tokens_in_vocab_1,
text2_tokens_in_vocab=tokens_in_vocab_2,
model=model,
tag_type_start_1=tag_type_start_1,
tag_type_start_2=tag_type_start_2)
input_data_wordvectors.append(postagged_sim)
input_data_sparse_features[
'sim_pos_arg1_%s_arg2_%s' % (tag_type_start_1, 'ALL' if tag_type_start_2 == '' else tag_type_start_2)] = \
postagged_sim
# similarity for tag type
tag_type_start_1 = 'J'
tag_type_start_2 = 'J'
postagged_sim = DiscourseSenseClassification_FeatureExtraction.calculate_postagged_similarity_from_taggeddata_and_tokens(
text1_tokens_in_vocab=tokens_in_vocab_1,
text2_tokens_in_vocab=tokens_in_vocab_2,
model=model,
tag_type_start_1=tag_type_start_1,
tag_type_start_2=tag_type_start_2)
input_data_wordvectors.append(postagged_sim)
input_data_sparse_features[
'sim_pos_arg1_%s_arg2_%s' % (tag_type_start_1, 'ALL' if tag_type_start_2 == '' else tag_type_start_2)] = \
postagged_sim
# similarity for tag type
tag_type_start_1 = 'VB'
tag_type_start_2 = 'VB'
postagged_sim = DiscourseSenseClassification_FeatureExtraction.calculate_postagged_similarity_from_taggeddata_and_tokens(
text1_tokens_in_vocab=tokens_in_vocab_1,
text2_tokens_in_vocab=tokens_in_vocab_2,
model=model,
tag_type_start_1=tag_type_start_1,
tag_type_start_2=tag_type_start_2)
input_data_wordvectors.append(postagged_sim)
input_data_sparse_features[
'sim_pos_arg1_%s_arg2_%s' % (tag_type_start_1, 'ALL' if tag_type_start_2 == '' else tag_type_start_2)] = \
postagged_sim
# similarity for tag type
tag_type_start_1 = 'RB'
tag_type_start_2 = 'RB'
postagged_sim = DiscourseSenseClassification_FeatureExtraction.calculate_postagged_similarity_from_taggeddata_and_tokens(
text1_tokens_in_vocab=tokens_in_vocab_1,
text2_tokens_in_vocab=tokens_in_vocab_2,
model=model,
tag_type_start_1=tag_type_start_1,
tag_type_start_2=tag_type_start_2)
input_data_wordvectors.append(postagged_sim)
input_data_sparse_features[
'sim_pos_arg1_%s_arg2_%s' % (tag_type_start_1, 'ALL' if tag_type_start_2 == '' else tag_type_start_2)] = \
postagged_sim
# similarity for tag type
tag_type_start_1 = 'DT'
tag_type_start_2 = 'DT'
postagged_sim = DiscourseSenseClassification_FeatureExtraction.calculate_postagged_similarity_from_taggeddata_and_tokens(
text1_tokens_in_vocab=tokens_in_vocab_1,
text2_tokens_in_vocab=tokens_in_vocab_2,
model=model,
tag_type_start_1=tag_type_start_1,
tag_type_start_2=tag_type_start_2)
input_data_wordvectors.append(postagged_sim)
input_data_sparse_features[
'sim_pos_arg1_%s_arg2_%s' % (tag_type_start_1, 'ALL' if tag_type_start_2 == '' else tag_type_start_2)] = \
postagged_sim
# similarity for tag type
tag_type_start_1 = 'PR'
tag_type_start_2 = 'PR'
postagged_sim = DiscourseSenseClassification_FeatureExtraction.calculate_postagged_similarity_from_taggeddata_and_tokens(
text1_tokens_in_vocab=tokens_in_vocab_1,
text2_tokens_in_vocab=tokens_in_vocab_2,
model=model,
tag_type_start_1=tag_type_start_1,
tag_type_start_2=tag_type_start_2)
input_data_wordvectors.append(postagged_sim)
input_data_sparse_features[
'sim_pos_arg1_%s_arg2_%s' % (tag_type_start_1, 'ALL' if tag_type_start_2 == '' else tag_type_start_2)] = \
postagged_sim
# similarity for tag type
tag_type_start_1 = 'NN'
tag_type_start_2 = 'J'
postagged_sim = DiscourseSenseClassification_FeatureExtraction.calculate_postagged_similarity_from_taggeddata_and_tokens(
text1_tokens_in_vocab=tokens_in_vocab_1,
text2_tokens_in_vocab=tokens_in_vocab_2,
model=model,
tag_type_start_1=tag_type_start_1,
tag_type_start_2=tag_type_start_2)
input_data_wordvectors.append(postagged_sim)
input_data_sparse_features[
'sim_pos_arg1_%s_arg2_%s' % (tag_type_start_1, 'ALL' if tag_type_start_2 == '' else tag_type_start_2)] = \
postagged_sim
# similarity for tag type
tag_type_start_1 = 'J'
tag_type_start_2 = 'NN'
postagged_sim = DiscourseSenseClassification_FeatureExtraction.calculate_postagged_similarity_from_taggeddata_and_tokens(
text1_tokens_in_vocab=tokens_in_vocab_1,
text2_tokens_in_vocab=tokens_in_vocab_2,
model=model,
tag_type_start_1=tag_type_start_1,
tag_type_start_2=tag_type_start_2)
input_data_wordvectors.append(postagged_sim)
input_data_sparse_features[
'sim_pos_arg1_%s_arg2_%s' % (tag_type_start_1, 'ALL' if tag_type_start_2 == '' else tag_type_start_2)] = \
postagged_sim
# similarity for tag type
tag_type_start_1 = 'RB'
tag_type_start_2 = 'VB'
postagged_sim = DiscourseSenseClassification_FeatureExtraction.calculate_postagged_similarity_from_taggeddata_and_tokens(
text1_tokens_in_vocab=tokens_in_vocab_1,
text2_tokens_in_vocab=tokens_in_vocab_2,
model=model,
tag_type_start_1=tag_type_start_1,
tag_type_start_2=tag_type_start_2)
input_data_wordvectors.append(postagged_sim)
input_data_sparse_features[
'sim_pos_arg1_%s_arg2_%s' % (tag_type_start_1, 'ALL' if tag_type_start_2 == '' else tag_type_start_2)] = \
postagged_sim
# similarity for tag type
tag_type_start_1 = 'VB'
tag_type_start_2 = 'RB'
postagged_sim = DiscourseSenseClassification_FeatureExtraction.calculate_postagged_similarity_from_taggeddata_and_tokens(
text1_tokens_in_vocab=tokens_in_vocab_1,
text2_tokens_in_vocab=tokens_in_vocab_2,
model=model,
tag_type_start_1=tag_type_start_1,
tag_type_start_2=tag_type_start_2)
input_data_wordvectors.append(postagged_sim)
input_data_sparse_features[
'sim_pos_arg1_%s_arg2_%s' % (tag_type_start_1, 'ALL' if tag_type_start_2 == '' else tag_type_start_2)] = \
postagged_sim
# similarity for tag type
tag_type_start_1 = 'PR'
tag_type_start_2 = 'NN'
postagged_sim = DiscourseSenseClassification_FeatureExtraction.calculate_postagged_similarity_from_taggeddata_and_tokens(
text1_tokens_in_vocab=tokens_in_vocab_1,
text2_tokens_in_vocab=tokens_in_vocab_2,
model=model,
tag_type_start_1=tag_type_start_1,
tag_type_start_2=tag_type_start_2)
input_data_wordvectors.append(postagged_sim)
input_data_sparse_features[
'sim_pos_arg1_%s_arg2_%s' % (tag_type_start_1, 'ALL' if tag_type_start_2 == '' else tag_type_start_2)] = \
postagged_sim
# similarity for tag type
tag_type_start_1 = 'NN'
tag_type_start_2 = 'PR'
postagged_sim = DiscourseSenseClassification_FeatureExtraction.calculate_postagged_similarity_from_taggeddata_and_tokens(
text1_tokens_in_vocab=tokens_in_vocab_1,
text2_tokens_in_vocab=tokens_in_vocab_2,
model=model,
tag_type_start_1=tag_type_start_1,
tag_type_start_2=tag_type_start_2)
input_data_wordvectors.append(postagged_sim)
input_data_sparse_features[
'sim_pos_arg1_%s_arg2_%s' % (tag_type_start_1, 'ALL' if tag_type_start_2 == '' else tag_type_start_2)] = \
postagged_sim
# Additional features
include_modal = True
if include_modal:
# similarity for tag type
tag_type_start_1 = 'MD'
tag_type_start_2 = 'VB'
postagged_sim = DiscourseSenseClassification_FeatureExtraction.calculate_postagged_similarity_from_taggeddata_and_tokens(
text1_tokens_in_vocab=tokens_in_vocab_1,
text2_tokens_in_vocab=tokens_in_vocab_2,
model=model,
tag_type_start_1=tag_type_start_1,
tag_type_start_2=tag_type_start_2)
input_data_wordvectors.append(postagged_sim)
input_data_sparse_features[
'sim_pos_arg1_%s_arg2_%s' % (tag_type_start_1, 'ALL' if tag_type_start_2 == '' else tag_type_start_2)] = \
postagged_sim
# similarity for tag type
tag_type_start_1 = 'VB'
tag_type_start_2 = 'MD'
postagged_sim = DiscourseSenseClassification_FeatureExtraction.calculate_postagged_similarity_from_taggeddata_and_tokens(
text1_tokens_in_vocab=tokens_in_vocab_1,
text2_tokens_in_vocab=tokens_in_vocab_2,
model=model,
tag_type_start_1=tag_type_start_1,
tag_type_start_2=tag_type_start_2)
input_data_wordvectors.append(postagged_sim)
input_data_sparse_features[
'sim_pos_arg1_%s_arg2_%s' % (tag_type_start_1, 'ALL' if tag_type_start_2 == '' else tag_type_start_2)] = \
postagged_sim
# similarity for tag type
tag_type_start_1 = ''
tag_type_start_2 = 'MD'
postagged_sim = DiscourseSenseClassification_FeatureExtraction.calculate_postagged_similarity_from_taggeddata_and_tokens(
text1_tokens_in_vocab=tokens_in_vocab_1,
text2_tokens_in_vocab=tokens_in_vocab_2,
model=model,
tag_type_start_1=tag_type_start_1,
tag_type_start_2=tag_type_start_2)
input_data_wordvectors.append(postagged_sim)
input_data_sparse_features[
'sim_pos_arg1_%s_arg2_%s' % (tag_type_start_1, 'ALL' if tag_type_start_2 == '' else tag_type_start_2)] = \
postagged_sim
# similarity for tag type
tag_type_start_1 = 'MD'
tag_type_start_2 = ''
postagged_sim = DiscourseSenseClassification_FeatureExtraction.calculate_postagged_similarity_from_taggeddata_and_tokens(
text1_tokens_in_vocab=tokens_in_vocab_1,
text2_tokens_in_vocab=tokens_in_vocab_2,
model=model,
tag_type_start_1=tag_type_start_1,
tag_type_start_2=tag_type_start_2)
input_data_wordvectors.append(postagged_sim)
input_data_sparse_features[
'sim_pos_arg1_%s_arg2_%s' % (tag_type_start_1, 'ALL' if tag_type_start_2 == '' else tag_type_start_2)] = \
postagged_sim
return input_data_wordvectors, input_data_sparse_features
@staticmethod
def extract_features_as_vector_from_single_record_v1(relation_dict, parse, word2vec_model, word2vec_index2word_set,
deps_model, deps_vocabulary):
features = []
sparse_feats_dict = {}
deps_num_feats = deps_model.shape[1]
w2v_num_feats = len(word2vec_model.syn0[0])
# FEATURE EXTRACTION HERE
doc_id = relation_dict['DocID']
# print doc_id
connective_tokenlist = [x[2] for x in relation_dict['Connective']['TokenList']]
has_connective = 1 if len(connective_tokenlist) > 0 else 0
features.append(has_connective)
feat_key = "has_connective"
if has_connective == 1:
CommonUtilities.increment_feat_val(sparse_feats_dict, feat_key, has_connective)
# print 'relation_dict:'
# print relation_dict['Arg1']['TokenList']
# ARG 1
arg1_tokens = [parse[doc_id]['sentences'][x[3]]['words'][x[4]] for x in relation_dict['Arg1']['TokenList']]
arg1_words = [x[0] for x in arg1_tokens]
# print 'arg1: %s' % arg1_words
arg1_embedding = AverageVectorsUtilities.makeFeatureVec(arg1_words, word2vec_model, w2v_num_feats,
word2vec_index2word_set)
features.extend(arg1_embedding)
vec_feats = {}
CommonUtilities.append_features_with_vectors(vec_feats, arg1_embedding, 'W2V_A1_')
# arg1 deps embeddings
arg1_embedding_deps = AverageVectorsUtilities.makeFeatureVec(arg1_words, deps_model, deps_num_feats,
deps_vocabulary)
features.extend(arg1_embedding_deps)
vec_feats = {}
CommonUtilities.append_features_with_vectors(vec_feats, arg1_embedding_deps, 'DEPS_A1_')
# connective embedding
connective_words = [parse[doc_id]['sentences'][x[3]]['words'][x[4]][0] for x in
relation_dict['Connective']['TokenList']]
connective_embedding = AverageVectorsUtilities.makeFeatureVec(connective_words, word2vec_model, w2v_num_feats,
word2vec_index2word_set)
features.extend(connective_embedding)
vec_feats = {}
CommonUtilities.append_features_with_vectors(vec_feats, connective_embedding, 'W2V_CON_')
# Connective DEPS embveddings
connective_embedding_deps = AverageVectorsUtilities.makeFeatureVec(connective_words, deps_model, deps_num_feats,
deps_vocabulary)
features.extend(connective_embedding_deps)
vec_feats = {}
CommonUtilities.append_features_with_vectors(vec_feats, connective_embedding_deps, 'DEPS_CON_')
# ARG 2
arg2_tokens = [parse[doc_id]['sentences'][x[3]]['words'][x[4]] for x in relation_dict['Arg2']['TokenList']]
arg2_words = [x[0] for x in arg2_tokens]
# print 'arg2: %s' % arg2_words
arg2_embedding = AverageVectorsUtilities.makeFeatureVec(arg2_words, word2vec_model, w2v_num_feats,
word2vec_index2word_set)
features.extend(arg2_embedding)
vec_feats = {}
CommonUtilities.append_features_with_vectors(vec_feats, arg2_embedding, 'W2V_A2_')
# arg2 deps embeddings
arg2_embedding_deps = AverageVectorsUtilities.makeFeatureVec(arg2_words, deps_model, deps_num_feats,
deps_vocabulary)
features.extend(arg2_embedding_deps)
vec_feats = {}
CommonUtilities.append_features_with_vectors(vec_feats, arg2_embedding_deps, 'DEPS_A2_')
# Arg1 to Arg 2 cosine similarity
arg1arg2_similarity = 0.00
if len(arg1_words) > 0 and len(arg2_words) > 0:
arg1arg2_similarity = spatial.distance.cosine(arg1_embedding, arg2_embedding)
features.append(arg1arg2_similarity)
# Calculate maximized similarities
words1 = [x for x in arg1_words if x in word2vec_index2word_set]
words2 = [x for x in arg1_words if x in word2vec_index2word_set]
sim_avg_max = AverageVectorsUtilities.get_feature_vec_avg_aligned_sim(words1, words2, word2vec_model,
w2v_num_feats,
word2vec_index2word_set)
features.append(sim_avg_max)
feat_key = "max_sim_aligned"
CommonUtilities.increment_feat_val(sparse_feats_dict, feat_key, sim_avg_max)
sim_avg_top1 = AverageVectorsUtilities.get_question_vec_to_top_words_avg_sim(words1, words2, word2vec_model,
w2v_num_feats,
word2vec_index2word_set, 1)
features.append(sim_avg_top1)
feat_key = "max_sim_avg_top1"
CommonUtilities.increment_feat_val(sparse_feats_dict, feat_key, sim_avg_top1)
sim_avg_top2 = AverageVectorsUtilities.get_question_vec_to_top_words_avg_sim(words1, words2, word2vec_model,
w2v_num_feats,
word2vec_index2word_set, 2)
features.append(sim_avg_top2)
feat_key = "max_sim_avg_top2"
CommonUtilities.increment_feat_val(sparse_feats_dict, feat_key, sim_avg_top2)
sim_avg_top3 = AverageVectorsUtilities.get_question_vec_to_top_words_avg_sim(words1, words2, word2vec_model,
w2v_num_feats,
word2vec_index2word_set, 3)
features.append(sim_avg_top3)
feat_key = "max_sim_avg_top3"
CommonUtilities.increment_feat_val(sparse_feats_dict, feat_key, sim_avg_top3)
sim_avg_top5 = AverageVectorsUtilities.get_question_vec_to_top_words_avg_sim(words1, words2, word2vec_model,
w2v_num_feats,
word2vec_index2word_set, 5)
features.append(sim_avg_top5)
feat_key = "max_sim_avg_top5"
CommonUtilities.increment_feat_val(sparse_feats_dict, feat_key, sim_avg_top5)
# POS tags similarities
postag_feats_vec, postag_feats_sparse = DiscourseSenseClassification_FeatureExtraction.get_postagged_sim_fetures(
tokens_data_text1=arg1_tokens, tokens_data_text2=arg2_tokens, postagged_data_dict=parse,
model=word2vec_model, word2vec_num_features=w2v_num_feats,
word2vec_index2word_set=word2vec_index2word_set)
# print postag_feats_vec
features.extend(postag_feats_vec)
sparse_feats_dict.update(postag_feats_sparse)
for i in range(0, len(features)):
if math.isnan(features[i]):
features[i] = 0.00
return features # , sparse_feats_dict
@staticmethod
def extract_features_as_vector_from_single_record_v2_optimized(relation_dict, parse, word2vec_model, word2vec_index2word_set,
deps_model, deps_vocabulary, use_connective_sim=True, return_sparse_feats = False):
features = []
sparse_feats_dict = {}
deps_num_feats = deps_model.shape[1]
w2v_num_feats = len(word2vec_model.syn0[0])
# FEATURE EXTRACTION HERE
doc_id = relation_dict['DocID']
# print doc_id
connective_tokenlist = [x[2] for x in relation_dict['Connective']['TokenList']]
has_connective = 1 if len(connective_tokenlist) > 0 else 0
features.append(has_connective)
feat_key = "has_connective"
if has_connective == 1:
CommonUtilities.increment_feat_val(sparse_feats_dict, feat_key, has_connective)
# print 'relation_dict:'
# print relation_dict['Arg1']['TokenList']
# ARG 1
arg1_tokens = [parse[doc_id]['sentences'][x[3]]['words'][x[4]] for x in relation_dict['Arg1']['TokenList']]
arg1_words = [x[0] for x in arg1_tokens]
# print 'arg1: %s' % arg1_words
arg1_embedding = AverageVectorsUtilities.makeFeatureVec(arg1_words, word2vec_model, w2v_num_feats,
word2vec_index2word_set)
features.extend(arg1_embedding)
vec_feats = {}
CommonUtilities.append_features_with_vectors(vec_feats, arg1_embedding, 'W2V_A1_')
# arg1 deps embeddings
arg1_embedding_deps = AverageVectorsUtilities.makeFeatureVec(arg1_words, deps_model, deps_num_feats,
deps_vocabulary)
features.extend(arg1_embedding_deps)
vec_feats = {}
CommonUtilities.append_features_with_vectors(vec_feats, arg1_embedding_deps, 'DEPS_A1_')
# connective embedding
connective_words = [parse[doc_id]['sentences'][x[3]]['words'][x[4]][0] for x in
relation_dict['Connective']['TokenList']]
connective_embedding = AverageVectorsUtilities.makeFeatureVec(connective_words, word2vec_model, w2v_num_feats,
word2vec_index2word_set)
features.extend(connective_embedding)
vec_feats = {}
CommonUtilities.append_features_with_vectors(vec_feats, connective_embedding, 'W2V_CON_')
# Connective DEPS embveddings
connective_embedding_deps = AverageVectorsUtilities.makeFeatureVec(connective_words, deps_model, deps_num_feats,
deps_vocabulary)
features.extend(connective_embedding_deps)
vec_feats = {}
CommonUtilities.append_features_with_vectors(vec_feats, connective_embedding_deps, 'DEPS_CON_')
# ARG 2
arg2_tokens = [parse[doc_id]['sentences'][x[3]]['words'][x[4]] for x in relation_dict['Arg2']['TokenList']]
arg2_words = [x[0] for x in arg2_tokens]
# print 'arg2: %s' % arg2_words
arg2_embedding = AverageVectorsUtilities.makeFeatureVec(arg2_words, word2vec_model, w2v_num_feats,
word2vec_index2word_set)
features.extend(arg2_embedding)
vec_feats = {}
CommonUtilities.append_features_with_vectors(vec_feats, arg2_embedding, 'W2V_A2_')
# arg2 deps embeddings
arg2_embedding_deps = AverageVectorsUtilities.makeFeatureVec(arg2_words, deps_model, deps_num_feats,
deps_vocabulary)
features.extend(arg2_embedding_deps)
vec_feats = {}
CommonUtilities.append_features_with_vectors(vec_feats, arg2_embedding_deps, 'DEPS_A2_')
# Arg1 to Arg 2 cosine similarity
arg1arg2_similarity = 0.00
if len(arg1_words) > 0 and len(arg2_words) > 0:
arg1arg2_similarity = spatial.distance.cosine(arg1_embedding, arg2_embedding)
features.append(arg1arg2_similarity)
# Calculate maximized similarities
words1 = [x for x in arg1_words if x in word2vec_index2word_set]
words2 = [x for x in arg1_words if x in word2vec_index2word_set]
sim_avg_max = AverageVectorsUtilities.get_feature_vec_avg_aligned_sim(words1, words2, word2vec_model,
w2v_num_feats,
word2vec_index2word_set)
features.append(sim_avg_max)
feat_key = "max_sim_aligned"
CommonUtilities.increment_feat_val(sparse_feats_dict, feat_key, sim_avg_max)
sim_avg_top1 = AverageVectorsUtilities.get_question_vec_to_top_words_avg_sim(words1, words2, word2vec_model,
w2v_num_feats,
word2vec_index2word_set, 1)
features.append(sim_avg_top1)
feat_key = "max_sim_avg_top1"
CommonUtilities.increment_feat_val(sparse_feats_dict, feat_key, sim_avg_top1)
sim_avg_top2 = AverageVectorsUtilities.get_question_vec_to_top_words_avg_sim(words1, words2, word2vec_model,
w2v_num_feats,
word2vec_index2word_set, 2)
features.append(sim_avg_top2)
feat_key = "max_sim_avg_top2"
CommonUtilities.increment_feat_val(sparse_feats_dict, feat_key, sim_avg_top2)
sim_avg_top3 = AverageVectorsUtilities.get_question_vec_to_top_words_avg_sim(words1, words2, word2vec_model,
w2v_num_feats,
word2vec_index2word_set, 3)
features.append(sim_avg_top3)
feat_key = "max_sim_avg_top3"
CommonUtilities.increment_feat_val(sparse_feats_dict, feat_key, sim_avg_top3)
sim_avg_top5 = AverageVectorsUtilities.get_question_vec_to_top_words_avg_sim(words1, words2, word2vec_model,
w2v_num_feats,
word2vec_index2word_set, 5)
features.append(sim_avg_top5)
feat_key = "max_sim_avg_top5"
CommonUtilities.increment_feat_val(sparse_feats_dict, feat_key, sim_avg_top5)
# POS tags similarities
postag_feats_vec, postag_feats_sparse = DiscourseSenseClassification_FeatureExtraction.get_postagged_sim_fetures(
tokens_data_text1=arg1_tokens, tokens_data_text2=arg2_tokens, postagged_data_dict=parse,
model=word2vec_model, word2vec_num_features=w2v_num_feats,
word2vec_index2word_set=word2vec_index2word_set)
features.extend(postag_feats_vec)
sparse_feats_dict.update(postag_feats_sparse)
for i in range(0, len(features)):
if math.isnan(features[i]):
features[i] = 0.00
if return_sparse_feats:
return features, sparse_feats_dict
else:
return features
@staticmethod
def extract_features_as_vector_from_single_record(relation_dict, parse, word2vec_model, word2vec_index2word_set,
connective_embedd_list=None,
include_connective_features=True,
return_sparse_feats=False):
features = []
sparse_feats_dict = {}
w2v_num_feats = len(word2vec_model.syn0[0])
# FEATURE EXTRACTION HERE
doc_id = relation_dict['DocID']
# print doc_id
connective_tokenlist = [x[2] for x in relation_dict['Connective']['TokenList']]
has_connective = 1 if len(connective_tokenlist) > 0 else 0
features.append(has_connective)
feat_key = "has_connective"
#if has_connective == 1:
CommonUtilities.increment_feat_val(sparse_feats_dict, feat_key, has_connective)
# print 'relation_dict:'
# print relation_dict['Arg1']['TokenList']
# ARG 1
arg1_tokens = [parse[doc_id]['sentences'][x[3]]['words'][x[4]] for x in relation_dict['Arg1']['TokenList']]
arg1_words = [x[0] for x in arg1_tokens]
# print 'arg1: %s' % arg1_words
arg1_embedding = AverageVectorsUtilities.makeFeatureVec(arg1_words, word2vec_model, w2v_num_feats,
word2vec_index2word_set)
features.extend(arg1_embedding)
vec_feats = {}
CommonUtilities.append_features_with_vectors(vec_feats, arg1_embedding, 'W2V_A1_')
sparse_feats_dict.update(vec_feats)
# Connective embedding
if include_connective_features:
connective_words = [parse[doc_id]['sentences'][x[3]]['words'][x[4]][0] for x in
relation_dict['Connective']['TokenList']]
connective_embedding = AverageVectorsUtilities.makeFeatureVec(connective_words, word2vec_model, w2v_num_feats,
word2vec_index2word_set)
features.extend(connective_embedding)
vec_feats = {}
CommonUtilities.append_features_with_vectors(vec_feats, connective_embedding, 'W2V_CON_')
sparse_feats_dict.update(vec_feats)
# ARG 2
arg2_tokens = [parse[doc_id]['sentences'][x[3]]['words'][x[4]] for x in relation_dict['Arg2']['TokenList']]
arg2_words = [x[0] for x in arg2_tokens]
# print 'arg2: %s' % arg2_words
arg2_embedding = AverageVectorsUtilities.makeFeatureVec(arg2_words, word2vec_model, w2v_num_feats,
word2vec_index2word_set)
features.extend(arg2_embedding)
vec_feats = {}
CommonUtilities.append_features_with_vectors(vec_feats, arg2_embedding, 'W2V_A2_')
sparse_feats_dict.update(vec_feats)
# Arg1 to Arg 2 cosine similarity
arg1arg2_similarity = 0.00
if len(arg1_words) > 0 and len(arg2_words) > 0:
arg1arg2_similarity = spatial.distance.cosine(arg1_embedding, arg2_embedding)
features.append(arg1arg2_similarity)
feat_key = "sim_arg1arg2"
CommonUtilities.increment_feat_val(sparse_feats_dict, feat_key, arg1arg2_similarity)
# Calculate maximized similarities
words1 = [x for x in arg1_words if x in word2vec_index2word_set]
words2 = [x for x in arg1_words if x in word2vec_index2word_set]
sim_avg_max = AverageVectorsUtilities.get_feature_vec_avg_aligned_sim(words1, words2, word2vec_model,
w2v_num_feats,
word2vec_index2word_set)
features.append(sim_avg_max)
feat_key = "max_sim_aligned"
CommonUtilities.increment_feat_val(sparse_feats_dict, feat_key, sim_avg_max)
sim_avg_top1 = AverageVectorsUtilities.get_question_vec_to_top_words_avg_sim(words1, words2, word2vec_model,
w2v_num_feats,
word2vec_index2word_set, 1)
features.append(sim_avg_top1)
feat_key = "max_sim_avg_top1"
CommonUtilities.increment_feat_val(sparse_feats_dict, feat_key, sim_avg_top1)
sim_avg_top2 = AverageVectorsUtilities.get_question_vec_to_top_words_avg_sim(words1, words2, word2vec_model,
w2v_num_feats,
word2vec_index2word_set, 2)
features.append(sim_avg_top2)
feat_key = "max_sim_avg_top2"
CommonUtilities.increment_feat_val(sparse_feats_dict, feat_key, sim_avg_top2)
sim_avg_top3 = AverageVectorsUtilities.get_question_vec_to_top_words_avg_sim(words1, words2, word2vec_model,
w2v_num_feats,
word2vec_index2word_set, 3)
features.append(sim_avg_top3)
feat_key = "max_sim_avg_top3"
CommonUtilities.increment_feat_val(sparse_feats_dict, feat_key, sim_avg_top3)
sim_avg_top5 = AverageVectorsUtilities.get_question_vec_to_top_words_avg_sim(words1, words2, word2vec_model,
w2v_num_feats,
word2vec_index2word_set, 5)
features.append(sim_avg_top5)
feat_key = "max_sim_avg_top5"
CommonUtilities.increment_feat_val(sparse_feats_dict, feat_key, sim_avg_top5)
# POS tags similarities
postag_feats_vec, postag_feats_sparse = DiscourseSenseClassification_FeatureExtraction.get_postagged_sim_fetures(
tokens_data_text1=arg1_tokens, tokens_data_text2=arg2_tokens, postagged_data_dict=parse,
model=word2vec_model, word2vec_num_features=w2v_num_feats,
word2vec_index2word_set=word2vec_index2word_set)
# print postag_feats_sparse
features.extend(postag_feats_vec)
sparse_feats_dict.update(postag_feats_sparse)
# calculate connectives similarity
if connective_embedd_list is not None:
arg1arg2_avg = (arg1_embedding+arg2_embedding)/2
connective_sims = DiscourseSenseClassification_FeatureExtraction.\
calc_sim_singleembedd_to_embeddlist(arg1arg2_avg, connective_embedd_list)
# print connective_sims
features.extend(connective_sims)
vec_feats = {}
CommonUtilities.append_features_with_vectors(vec_feats, connective_sims, 'A1A2_CONNSIMS_')
sparse_feats_dict.update(vec_feats)
#else:
# # Extend with zeros for explicit
# features.extend([0 for x in DiscourseSenseClassification_FeatureExtraction.CONNECTIVES])
# Set None to zero
for i in range(0, len(features)):
if math.isnan(features[i]):
features[i] = 0.00
# Set None to zero
for k in sparse_feats_dict.iterkeys():
if math.isnan(sparse_feats_dict[k]):
sparse_feats_dict[k] = 0.00
if return_sparse_feats:
return features, sparse_feats_dict
else:
return features
@staticmethod
def extract_features_as_rawtokens_from_single_record(relation_dict, parse):
features = {}
# FEATURE EXTRACTION HERE
doc_id = relation_dict['DocID']
# print doc_id
connective_tokenlist = [x[2] for x in relation_dict['Connective']['TokenList']]
has_connective = 1 if len(connective_tokenlist) > 0 else 0
# features.append(has_connective)
feat_key = "has_connective"
features['HasConnective'] = has_connective
# print 'relation_dict:'
# print relation_dict['Arg1']['TokenList']
# ARG 1
arg1_tokens = [parse[doc_id]['sentences'][x[3]]['words'][x[4]] for x in relation_dict['Arg1']['TokenList']]
arg1_words = [x[0] for x in arg1_tokens]
features[const.FIELD_ARG1] = arg1_words
# Connective embedding
connective_words = [parse[doc_id]['sentences'][x[3]]['words'][x[4]][0] for x in
relation_dict['Connective']['TokenList']]
features[const.FIELD_CONNECTIVE] = connective_words
# ARG 2
arg2_tokens = [parse[doc_id]['sentences'][x[3]]['words'][x[4]] for x in relation_dict['Arg2']['TokenList']]
arg2_words = [x[0] for x in arg2_tokens]
# print 'arg2: %s' % arg2_words
features[const.FIELD_ARG2] = arg2_words
return features
```
#### File: architectures/conll16st-hd-sdp/experiment_cross_conv_v1_np.py
```python
import numpy as np
import tensorflow as tf
from scipy.signal import convolve2d
import multiprocessing
import ctypes
import numpy as np
def convolve_cross(s1, s2, filter_size):
batch_size = s1.shape[0]
sent_len = s1.shape[1]
embedding_size = s1.shape[2]
conv_iter = sent_len - filter_size + 1
batches_conv_res = np.zeros((batch_size, conv_iter, conv_iter))
for bi in range(0, batch_size):
batch_s1 = s1[bi] # tf.gather(s1, bi)
batch_s2 = s2[bi] # tf.gather(s1, bi)
for i in range(0, conv_iter):
filter_s1 = batch_s1[i:i+filter_size]
for j in range(0, conv_iter):
filter_s2 = batch_s2[j:j+filter_size]
curr_val = 0
for k in range(0, filter_size):
for l in range(0, embedding_size):
curr_val += filter_s1[k][l] * filter_s2[k][l]
batches_conv_res[bi][i][j]
print "batch %s of %s"%(bi, batch_size)
return np.array(batches_conv_res)
def convolve_cross_filter(batch_s1, batch_s2, filter_size):
sent_len, embedding_size = batch_s1.shape
conv_iter = sent_len - filter_size + 1
batch_res = np.zeros((conv_iter, conv_iter))
for i in range(0, conv_iter):
filter_s1 = batch_s1[i:i + filter_size]
for j in range(0, conv_iter):
filter_s2 = batch_s2[j:j + filter_size]
curr_val = 0
for k in range(0, filter_size):
for l in range(0, embedding_size):
curr_val += filter_s1[k][l] * filter_s2[k][l]
batch_res[i][j] = curr_val
return batch_res
def convolve_cross_filter_batch(s1, s2, filter_size):
batch_size, sent_len, embedding_size = s1.shape
conv_iter = sent_len - filter_size + 1
batches_conv_res = np.zeros((batch_size, conv_iter, conv_iter))
for bi in range(0, batch_size):
batches_conv_res[bi] = convolve_cross_filter(s1[bi], s2[bi], filter_size)
print "batch %s of %s"%(bi, batch_size)
return batches_conv_res
from multiprocessing import Pool
def single_func(s1s2):
print 'Zipped item :'
#print s1s2
s1, s2 = zip(*s1s2)
print s1
print s2
return convolve_cross_filter(s1, s2, filter_size)
# print "batch %s of %s"%(i, batch_size)
def single_func1(s1s2):
#print 'Zipped item :'
#print s1s2
s1u = s1s2['s1']
s2u = s1s2['s2']
fs = s1s2['fs']
return convolve_cross_filter(s1u, s2u, fs)
def convolve_cross_filter_batch_multicore(s1, s2, filter_size, processes_cnt):
batch_size, sent_len, embedding_size = s1.shape
conv_iter = sent_len - filter_size + 1
# batches_conv_res = np.zeros((batch_size, conv_iter, conv_iter))
# shared_array = shared_array.reshape(batch_size, conv_iter, conv_iter)
# shared_array = shared_array.reshape(10, 10)
# s1s2_zipped = list(zip(s1, s2))
s1s2_zipped = []
for i in range(0, batch_size):
s1s2_zipped.append({'s1': s1[i], 's2': s2[i], 'fs':filter_size})
print 'Zipped:'
#print s1s2_zipped
pool = Pool(processes=processes_cnt)
shared_array = pool.map(single_func1, s1s2_zipped)
#pool.join()
return np.array(shared_array)
import sys
if __name__ == '__main__':
import logging # word2vec logging
# Set logging info
logFormatter = logging.Formatter('%(asctime)s [%(threadName)-12.12s]: %(levelname)s : %(message)s')
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
# Enable file logging
# logFileName = '%s/%s-%s.log' % ('logs', 'sup_parser_v1', '{:%Y-%m-%d-%H-%M-%S}'.format(datetime.now()))
# fileHandler = logging.FileHandler(logFileName, 'wb')
# fileHandler.setFormatter(logFormatter)
# logger.addHandler(fileHandler)
# Enable console logging
consoleHandler = logging.StreamHandler(sys.stdout)
consoleHandler.setFormatter(logFormatter)
logger.addHandler(consoleHandler)
import time as ti
batch_size = 20
sent_len = 100
embedding_size = 300
threads = 20
s1 = np.random.rand(batch_size, sent_len, embedding_size)
s2 = np.random.rand(batch_size, sent_len, embedding_size)
filter_size = 3
# multiprocessing
pool_size = 20
logging.info('Parallel claculation with %s pools...' % pool_size)
start = ti.time()
batches_conv_res = convolve_cross_filter_batch_multicore(s1, s2, 3, pool_size)
end = ti.time()
logging.info('calculated in %s '%(end-start))
print(batches_conv_res.shape)
print(batches_conv_res)
```
#### File: architectures/conll16st-hd-sdp/text_cnn_eval.py
```python
import tensorflow as tf
import numpy as np
import os
import time
import datetime
import data_helpers
from text_cnn import TextCNNModel
def text_cnn_load_model_and_eval(x_test,
checkpoint_file,
allow_soft_placement,
log_device_placement,
embeddings):
graph = tf.Graph()
with graph.as_default():
session_conf = tf.ConfigProto(
allow_soft_placement=allow_soft_placement,
log_device_placement=log_device_placement)
sess = tf.Session(config=session_conf)
with sess.as_default():
# Load the saved meta graph and restore variables
saver = tf.train.import_meta_graph("{}.meta".format(checkpoint_file))
saver.restore(sess, checkpoint_file)
# Get the placeholders from the graph by name
input_x = graph.get_operation_by_name("input_x").outputs[0]
# input_y = graph.get_operation_by_name("input_y").outputs[0]
dropout_keep_prob = graph.get_operation_by_name("dropout_keep_prob").outputs[0]
# Tensors we want to evaluate
predictions = graph.get_operation_by_name("output/predictions").outputs[0]
# Generate batches for one epoch
batch_size = 50
batches = data_helpers.batch_iter(x_test, batch_size, 1, shuffle=False)
# Collect the predictions here
all_predictions = []
# Load embeddings placeholder
embedding_size = embeddings.shape[1]
embeddings_number = embeddings.shape[0]
print 'embedding_size:%s, embeddings_number:%s' % (embedding_size, embeddings_number)
# with tf.name_scope("embedding"):
# embeddings_placeholder = tf.placeholder(tf.float32, shape=[embeddings_number, embedding_size])
embeddings_placeholder = graph.get_operation_by_name("embedding/Placeholder").outputs[0]
for x_test_batch in batches:
batch_predictions = sess.run(predictions, {input_x: x_test_batch, dropout_keep_prob: 1.0,
embeddings_placeholder: embeddings})
all_predictions = np.concatenate([all_predictions, batch_predictions])
return all_predictions
def text_cnn_load_model_and_eval_v2(x_test_s1,
x_test_s2,
checkpoint_file,
allow_soft_placement,
log_device_placement,
embeddings):
graph = tf.Graph()
with graph.as_default():
session_conf = tf.ConfigProto(
allow_soft_placement=allow_soft_placement,
log_device_placement=log_device_placement)
sess = tf.Session(config=session_conf)
with sess.as_default():
# Load the saved meta graph and restore variables
saver = tf.train.import_meta_graph("{}.meta".format(checkpoint_file))
saver.restore(sess, checkpoint_file)
# Get the placeholders from the graph by name
input_x_s1 = graph.get_operation_by_name("input_x_s1").outputs[0]
input_x_s2 = graph.get_operation_by_name("input_x_s2").outputs[0]
# input_y = graph.get_operation_by_name("input_y").outputs[0]
dropout_keep_prob = graph.get_operation_by_name("dropout_keep_prob").outputs[0]
# Tensors we want to evaluate
predictions = graph.get_operation_by_name("output/predictions").outputs[0]
# Generate batches for one epoch
batch_size = 50
batches = data_helpers.batch_iter(list(zip(x_test_s1, x_test_s2)), batch_size, 1, shuffle=False)
# Collect the predictions here
all_predictions = []
# Load embeddings placeholder
embedding_size = embeddings.shape[1]
embeddings_number = embeddings.shape[0]
print 'embedding_size:%s, embeddings_number:%s' % (embedding_size, embeddings_number)
# with tf.name_scope("embedding"):
# embeddings_placeholder = tf.placeholder(tf.float32, shape=[embeddings_number, embedding_size])
embeddings_placeholder = graph.get_operation_by_name("embedding/Placeholder").outputs[0]
for batch in batches:
x_test_batch_s1, x_test_batch_s2 = zip(*batch)
batch_predictions = sess.run(predictions, {input_x_s1: x_test_batch_s1,
input_x_s2: x_test_batch_s2,
dropout_keep_prob: 1.0,
embeddings_placeholder: embeddings})
all_predictions = np.concatenate([all_predictions, batch_predictions])
return all_predictions
def text_cnn_load_model_and_eval_v4(#x_test_s1,
# x_test_s2,
loaded_cross_batch_iter,
checkpoint_file,
allow_soft_placement,
log_device_placement,
embeddings):
graph = tf.Graph()
with graph.as_default():
session_conf = tf.ConfigProto(
allow_soft_placement=allow_soft_placement,
log_device_placement=log_device_placement)
sess = tf.Session(config=session_conf)
with sess.as_default():
# Load the saved meta graph and restore variables
saver = tf.train.import_meta_graph("{}.meta".format(checkpoint_file))
saver.restore(sess, checkpoint_file)
# Get the placeholders from the graph by name
#input_x_s1 = graph.get_operation_by_name("input_x_s1").outputs[0]
#input_x_s2 = graph.get_operation_by_name("input_x_s2").outputs[0]
input_x_s1s2_cross = graph.get_operation_by_name("input_x_s1s2_cross").outputs[0]
#cnn.input_y: y_batch,
# input_y = graph.get_operation_by_name("input_y").outputs[0]
dropout_keep_prob = graph.get_operation_by_name("dropout_keep_prob").outputs[0]
# Tensors we want to evaluate
predictions = graph.get_operation_by_name("output/predictions").outputs[0]
all_predictions = []
for loaded_cross_batch, batch_i, items_per_batch in loaded_cross_batch_iter:
# Generate batches for one epoch
batch_size = 50
#batches = data_helpers.batch_iter(list(zip(x_test_s1, x_test_s2)), batch_size, 1, shuffle=False)
batches = data_helpers.batch_iter(loaded_cross_batch, batch_size, 1, shuffle=False)
# Collect the predictions here
# Load embeddings placeholder
embedding_size = embeddings.shape[1]
embeddings_number = embeddings.shape[0]
print 'embedding_size:%s, embeddings_number:%s' % (embedding_size, embeddings_number)
# with tf.name_scope("embedding"):
# embeddings_placeholder = tf.placeholder(tf.float32, shape=[embeddings_number, embedding_size])
embeddings_placeholder = graph.get_operation_by_name("embedding/Placeholder").outputs[0]
for batch in batches:
#x_test_batch_s1, x_test_batch_s2 = zip(*batch)
x_s1s2_cross = batch
batch_predictions = sess.run(predictions, {#input_x_s1: x_test_batch_s1,
#input_x_s2: x_test_batch_s2,
input_x_s1s2_cross: x_s1s2_cross,
dropout_keep_prob: 1.0,
embeddings_placeholder: embeddings})
all_predictions = np.concatenate([all_predictions, batch_predictions])
return all_predictions
if __name__ == '__main__':
# Parameters
# ==================================================
# Eval Parameters
tf.flags.DEFINE_integer("batch_size", 64, "Batch Size (default: 64)")
tf.flags.DEFINE_string("checkpoint_dir", "", "Checkpoint directory from training run")
# Misc Parameters
tf.flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement")
tf.flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices")
FLAGS = tf.flags.FLAGS
FLAGS._parse_flags()
print("\nParameters:")
for attr, value in sorted(FLAGS.__flags.items()):
print("{}={}".format(attr.upper(), value))
print("")
# Load data. Load your own data here
print("Loading data...")
x_test, y_test, vocabulary, vocabulary_inv = data_helpers.load_data()
y_test = np.argmax(y_test, axis=1)
print("Vocabulary size: {:d}".format(len(vocabulary)))
print("Test set size {:d}".format(len(y_test)))
print("\nEvaluating...\n")
# Evaluation
# ==================================================
checkpoint_file = tf.train.latest_checkpoint(FLAGS.checkpoint_dir)
predictions_y = text_cnn_load_model_and_eval(x_test=x_test,
checkpoint_file=checkpoint_file,
allow_soft_placement=FLAGS.allow_soft_placement,
log_device_placement=FLAGS.log_device_placement)
# Print accuracy
correct_predictions = float(sum(predictions_y == y_test))
print("Total number of test examples: {}".format(len(y_test)))
print("Accuracy: {:g}".format(correct_predictions / float(len(y_test))))
```
#### File: conll16st-v34-focused-rnns/conll16st_data/relations.py
```python
__author__ = "GW [http://gw.tnode.com/] <<EMAIL>>"
__license__ = "GPLv3+"
from files import load_parses, load_raws, load_relations_gold
from words import get_word_metas
def rtsip_to_tag(rel_type, rel_sense, rel_id, rel_part):
"""Convert relation type, sense, id, and part to tag."""
rel_tag = ":".join([rel_type, rel_sense, str(rel_id), rel_part])
return rel_tag
def tag_to_rtsip(tag):
"""Convert tag to relation type, sense, id, and part."""
rel_type, rel_sense, rel_id, rel_part = tag.split(":")
return rel_type, rel_sense, int(rel_id), rel_part
def filter_tags(tags, prefixes=None):
"""Filter list of relation tags matching specified prefixes."""
if prefixes is not None:
# filter by specified relation tag prefixes
tags = tuple( t for t in tags if any(( t.startswith(p) for p in prefixes )) )
return tags
def strip_sense_level(rel_sense, level=None):
"""Strip relation sense to top level."""
if level is not None:
rel_sense = ".".join(rel_sense.split(".")[:level])
return rel_sense
def get_rel_parts(relations_gold):
"""Extract only discourse relation parts/spans of token ids by relation id from CoNLL16st corpus.
rel_parts[14905] = {
'Arg1': (879, 880, 881, 882, 883, 884, 885, 886),
'Arg1Len': 46,
'Arg2': (877, 889, 890, 891, 892, 893, 894),
'Arg2Len': 36,
'Connective': (878, 888),
'ConnectiveLen': 6,
'Punctuation': (),
'PunctuationLen': 0,
'PunctuationType': '',
'DocID': 'wsj_1000',
'ID': 14905,
'TokenMin': 877,
'TokenMax': 894,
'TokenCount': 17,
}
"""
rel_parts = {}
for rel_id, gold in relations_gold.iteritems():
doc_id = gold['DocID']
punct_type = gold['Punctuation']['PunctuationType']
# short token lists from detailed/gold format to only token id
arg1_list = tuple( t[2] for t in gold['Arg1']['TokenList'] )
arg2_list = tuple( t[2] for t in gold['Arg2']['TokenList'] )
conn_list = tuple( t[2] for t in gold['Connective']['TokenList'] )
punct_list = tuple( t[2] for t in gold['Punctuation']['TokenList'] )
all_list = sum([list(arg1_list), list(arg2_list), list(conn_list), list(punct_list)], [])
# character lengths of parts
arg1_len = sum(( (e - b) for b, e in gold['Arg1']['CharacterSpanList'] ))
arg2_len = sum(( (e - b) for b, e in gold['Arg2']['CharacterSpanList'] ))
conn_len = sum(( (e - b) for b, e in gold['Connective']['CharacterSpanList'] ))
punct_len = sum(( (e - b) for b, e in gold['Punctuation']['CharacterSpanList'] ))
# save relation parts
rel = {
'Arg1': arg1_list,
'Arg1Len': arg1_len,
'Arg2': arg2_list,
'Arg2Len': arg2_len,
'Connective': conn_list,
'ConnectiveLen': conn_len,
'Punctuation': punct_list,
'PunctuationLen': punct_len,
'PunctuationType': punct_type,
'DocID': doc_id,
'ID': rel_id,
'TokenMin': min(all_list),
'TokenMax': max(all_list),
'TokenCount': len(all_list),
}
rel_parts[rel_id] = rel
return rel_parts
def get_rel_types(relations_gold, filter_types=None):
"""Extract discourse relation types by relation id from CoNLL16st corpus.
rel_types[14905] = "Explicit"
"""
rel_types = {}
for rel_id, gold in relations_gold.iteritems():
rel_type = gold['Type']
if filter_types and rel_type not in filter_types:
continue
rel_types[rel_id] = rel_type
return rel_types
def get_rel_senses(relations_gold, level=None, filter_senses=None):
"""Extract first discourse relation senses by relation id from CoNLL16st corpus.
rel_senses[14905] = "Contingency.Condition"
"""
rel_senses = {}
for rel_id, gold in relations_gold.iteritems():
sfirst = gold['Sense'][0] # only first sense
if filter_senses and sfirst not in filter_senses:
continue
sfirst = strip_sense_level(sfirst, level) # strip to top level senses
rel_senses[rel_id] = sfirst
return rel_senses
def get_rel_senses_all(relations_gold, level=None, filter_senses=None):
"""Extract all discourse relation senses by relation id from CoNLL16st corpus.
rel_senses_all[14905] = ("Contingency.Condition")
"""
if filter_senses is None:
filter_senses = ()
rel_senses_all = {}
for rel_id, gold in relations_gold.iteritems():
slist = gold['Sense']
slist = [ s for s in slist if s not in filter_senses ]
slist = [ strip_sense_level(s, level) for s in slist ] # strip to top level senses
rel_senses_all[rel_id] = tuple(slist)
return rel_senses_all
def add_relation_tags(word_metas, rel_types, rel_senses):
"""Add discourse relation tags to metadata of words/tokens.
word_metas['wsj_1000'][0] = {
...
'RelationTags': ("Explicit:Expansion.Conjunction:14890:Arg1",),
}
"""
for doc_id in word_metas:
for meta in word_metas[doc_id]:
tags = []
for rel_id, rel_part in zip(meta['RelationIDs'], meta['RelationParts']):
if rel_id not in rel_types or rel_id not in rel_senses:
continue # skip missing relations
rel_type = rel_types[rel_id]
rel_sense_all = rel_senses[rel_id]
if isinstance(rel_sense_all, str): # only first sense
rel_sense_all = (rel_sense_all,)
for rel_sense in rel_senses[rel_id]:
tags.append(rtsip_to_tag(rel_type, rel_sense, rel_id, rel_part))
# save to metadata
meta['RelationTags'] = tuple(tags)
### Tests
def test_rel_parts():
dataset_dir = "./conll16st-en-trial"
t_rel0 = {
'Arg1': (879, 880, 881, 882, 883, 884, 885, 886),
'Arg1Len': 46,
'Arg2': (877, 889, 890, 891, 892, 893, 894),
'Arg2Len': 36,
'Connective': (878, 888),
'ConnectiveLen': 6,
'Punctuation': (),
'PunctuationLen': 0,
'PunctuationType': '',
'DocID': 'wsj_1000',
'ID': 14905,
'TokenMin': 877,
'TokenMax': 894,
'TokenCount': 17,
}
relations_gold = load_relations_gold(dataset_dir)
rel_parts = get_rel_parts(relations_gold)
rel0 = rel_parts[t_rel0['ID']]
assert rel0 == t_rel0
def test_rel_types():
dataset_dir = "./conll16st-en-trial"
t_rel0_id = 14905
t_rel0 = 'Explicit'
relations_gold = load_relations_gold(dataset_dir)
rel_types = get_rel_types(relations_gold)
rel0 = rel_types[t_rel0_id]
assert rel0 == t_rel0
def test_rel_senses():
dataset_dir = "./conll16st-en-trial"
t_rel0_id = 14905
t_rel0 = 'Contingency.Condition'
t_rel1_id = 14905
t_rel1 = 'Contingency'
relations_gold = load_relations_gold(dataset_dir)
rel_senses = get_rel_senses(relations_gold)
rel0 = rel_senses[t_rel0_id]
assert rel0 == t_rel0
relations_gold = load_relations_gold(dataset_dir)
rel_senses = get_rel_senses(relations_gold, level=1)
rel1 = rel_senses[t_rel1_id]
assert rel1 == t_rel1
def test_rel_senses_all():
dataset_dir = "./conll16st-en-trial"
t_rel0_id = 14905
t_rel0 = ('Contingency.Condition',)
t_rel1_id = 14905
t_rel1 = ('Contingency',)
relations_gold = load_relations_gold(dataset_dir)
rel_senses = get_rel_senses_all(relations_gold)
rel0 = rel_senses[t_rel0_id]
assert rel0 == t_rel0
relations_gold = load_relations_gold(dataset_dir)
rel_senses = get_rel_senses_all(relations_gold, level=1)
rel1 = rel_senses[t_rel1_id]
assert rel1 == t_rel1
def test_relation_tags():
dataset_dir = "./conll16st-en-trial"
doc_id = "wsj_1000"
t_meta0_id = 0
t_meta0_tags = ('Explicit:Expansion.Conjunction:14890:Arg1',)
t_meta1_id = 894
t_meta1_tags = ('Explicit:Comparison.Concession:14904:Arg2', 'Explicit:Contingency.Condition:14905:Arg2')
t_meta2_id = 895
t_meta2_tags = ()
parses = load_parses(dataset_dir)
raws = load_raws(dataset_dir, [doc_id])
word_metas = get_word_metas(parses, raws)
relations_gold = load_relations_gold(dataset_dir)
rel_types = get_rel_types(relations_gold)
rel_senses = get_rel_senses(relations_gold)
add_relation_tags(word_metas, rel_types, rel_senses)
assert word_metas[doc_id][t_meta0_id]['RelationTags'] == t_meta0_tags
assert word_metas[doc_id][t_meta1_id]['RelationTags'] == t_meta1_tags
assert word_metas[doc_id][t_meta2_id]['RelationTags'] == t_meta2_tags
if __name__ == '__main__':
import pytest
pytest.main(['-s', __file__])
```
#### File: conll16st-v34-focused-rnns/v34/optimize.py
```python
__author__ = "GW [http://gw.tnode.com/] <<EMAIL>>"
__license__ = "GPLv3+"
import argparse
import logging
import os
from hyperopt import hp, fmin, tpe, space_eval
from hyperopt.mongoexp import MongoTrials, main_worker_helper, spec_from_misc
import optimize_exec
def list_best(mongo, exp_key=None, space=None):
mongo_trials = MongoTrials(mongo, exp_key=exp_key)
jobs = mongo_trials.trials
jobs_ok = [ (d['result']['loss'], d) for d in jobs if d['state'] == 2 and d['result']['status'] == 'ok']
jobs_ok.sort()
for loss, job in reversed(jobs_ok):
print(loss, job['owner'], job['result'])
spec = spec_from_misc(job['misc'])
print("spec: {}".format(spec))
if space is not None:
print("space: {}".format(space_eval(space, spec)))
print("total: {}/{}".format(len(jobs_ok), len(jobs)))
return mongo_trials.argmin
# logging
log = logging.getLogger("hyperopt")
if log:
import sys
log.addHandler(logging.StreamHandler(stream=sys.stdout))
log.setLevel(logging.DEBUG)
# parse arguments
argp = argparse.ArgumentParser(description=__doc__.strip().split("\n", 1)[0])
argp.add_argument('action',
choices=['worker', 'optimizer', 'list_best'],
help="action to perform")
argp.add_argument('--mongo',
default="mongo://conll16st-mongo:27017/conll16st/jobs",
help="mongo connection string")
argp.add_argument('--exp-key',
default=None,
help="identifier for optimization experiments")
argp.add_argument('--evals', type=int,
default=10,
help="maximal number of evaluations (for optimizer)")
argp.add_argument('--cmd',
default="/srv/v34/train.py /srv/ex/{exp_key}-{exp_hash} /srv/data/conll16st-zh-train /srv/data/conll16st-zh-dev --clean --config='{config_str}'",
help="command for each experiment (for optimizer)")
argp.add_argument('--worker-helper',
default="/usr/local/bin/hyperopt-mongo-worker",
help="command for worker helper (for worker)")
args = argp.parse_args()
# define configuration search space
space = {
'_cmd': args.cmd,
'_exp_key': args.exp_key,
'filter_fn_name': "conn_gt_0", #!!!: "conn_eq_0", "conn_gt_0"
'epochs': 200,
'epochs_len': -1,
'epochs_patience': 10,
#'batch_size': 64,
#'snapshot_size': 2048,
'random_per_sample': hp.quniform('random_per_sample', 8, 64, 8.),
'words_dim': hp.quniform('words_dim', 10, 100, 10.),
'focus_dim': hp.quniform('focus_dim', 2, 8, 1.),
'rnn_dim': hp.quniform('rnn_dim', 10, 100, 10.),
'final_dim': hp.quniform('final_dim', 10, 100, 10.),
'arg1_len': 500, #= 100 (en), 500 (zh)
'arg2_len': 500, #= 100 (en), 500 (zh)
#'conn_len': 10,
#'punc_len': 2,
'words_dropout': hp.quniform('words_dropout', 0.0, 1.0, 0.25),
'focus_dropout_W': hp.quniform('focus_dropout_W', 0.0, 1.0, 0.25),
'focus_dropout_U': hp.quniform('focus_dropout_U', 0.0, 1.0, 0.25),
'rnn_dropout_W': hp.quniform('rnn_dropout_W', 0.0, 1.0, 0.25),
'rnn_dropout_U': hp.quniform('rnn_dropout_U', 0.0, 1.0, 0.25),
'final_dropout': hp.quniform('final_dropout', 0.0, 1.0, 0.25),
'words2vec_bin': None,
'words2vec_txt': None,
#'rsenses_imp_loss': "categorical_crossentropy",
}
if args.action == 'worker':
# run distributed worker
class Options(object):
mongo = args.mongo
exp_key = args.exp_key
last_job_timeout = None
max_consecutive_failures = 2
max_jobs = args.evals #= inf
poll_interval = 300 #= 5 min
reserve_timeout = 3600 #= 1 hour
workdir = None
sys.argv[0] = args.worker_helper
sys.exit(main_worker_helper(Options(), None))
elif args.action == 'optimizer':
# run distributed optimizer
trials = MongoTrials(args.mongo, exp_key=args.exp_key)
best = fmin(optimize_exec.objective, space, trials=trials, algo=tpe.suggest, max_evals=args.evals)
# summary
print
print "evals: {}".format(args.evals)
print "best: {}".format(best)
print "space: {}".format(space_eval(space, best))
elif args.action == 'list_best':
# list distributed evaluation results
best = list_best(args.mongo, exp_key=args.exp_key, space=space)
# summary
print
print "evals: {}".format(args.evals)
print "best: {}".format(best)
print "space: {}".format(space_eval(space, best))
else:
raise Exception("Invalid action '{}'".format(args.action))
```
#### File: architectures/nb_baseline/misc_utils.py
```python
import logging
import logging.config
import yaml
from time import time
import spacy
def get_config(path):
"""
Returns YAML configuration from path.
"""
with open(path) as file_:
return yaml.load(file_)
def get_logger(module_name, config=None):
"""
Returns global logger.
Params:
- module_name: The name of the logger, preferably module name.
- config: A dict of logger configuration.
"""
if config is not None:
logging.config.dictConfig(config)
return logging.getLogger(module_name)
EN_MODEL = None
def get_en_model():
"""
This takes a while to load, so we make it a singleton.
"""
global EN_MODEL # pylint: disable=W0603
logger = get_logger(__name__)
if EN_MODEL is None:
logger.debug("Loading spacy English model...")
EN_MODEL = spacy.load('en',
tagger=False,
parser=False,
entity=False,
matcher=False,
add_vectors=False)
return EN_MODEL
def tokenize(sentence):
"""
Returns tokenized string.
"""
en_model = get_en_model()
return [w.lower_ for w in en_model(sentence)]
class timer():
def __init__(self):
self.elapsed_time = 0
self.start_time = None
def __enter__(self):
self.start_time = time()
return self
def __exit__(self, type_, value, traceback):
self.elapsed_time = time() - self.start_time
return
```
#### File: architectures/nb_baseline/resources.py
```python
from misc_utils import get_config, get_logger, tokenize
from discourse_relation import DiscourseRelation
from collections import Counter, defaultdict
import json
import abc
import numpy as np
from os.path import join
import os
logger = get_logger(__name__)
class Resource(metaclass=abc.ABCMeta):
def __init__(self, path, classes):
self.path = path
self.classes = sorted(classes)
self.y_indices = {x: y for y, x in enumerate(self.classes)}
self.instances = list(self._load_instances(path))
@abc.abstractmethod
def _load_instances(self, path):
raise NotImplementedError("This class must be subclassed.")
class PDTBRelations(Resource):
def __init__(self, path, classes, separate_dual_classes, filter_type=None, skip_missing_classes=True):
self.skip_missing_classes = skip_missing_classes
self.separate_dual_classes = separate_dual_classes
self.filter_type = [] if filter_type is None else filter_type
super().__init__(path, classes)
def _load_instances(self, path):
with open(join(path, 'relations.json')) as file_:
for line in file_:
rel = DiscourseRelation(json.loads(line.strip()))
if (self.filter_type != [] or self.filter_type is not None) and rel.relation_type() not in self.filter_type:
continue
if self.separate_dual_classes:
for splitted in rel.split_up_senses():
if len(splitted.senses()) > 1:
raise ValueError("n_senses > 1")
if len(splitted.senses()) == 1 and splitted.senses()[0] not in self.y_indices:
if self.skip_missing_classes:
logger.debug("Sense class {} not in class list, skipping {}".format(splitted.senses()[0], splitted.relation_id()))
continue
yield splitted
else:
a_class_exist = any(r in self.y_indices for r in rel.senses())
if not a_class_exist:
if self.skip_missing_classes:
logger.debug("Sense {} classes not in class list, skipping {}".format(rel.senses(), rel.relation_id()))
continue
yield rel
def get_feature_tensor(self, extractors):
rels_feats = []
n_instances = 0
last_features_for_instance = None
for rel in self.instances:
n_instances += 1
feats = []
total_features_per_instance = 0
for extractor in extractors:
# These return matrices of shape (1, n_features)
# We concatenate them on axis 1
arg_rawtext = getattr(rel, extractor.argument)()
arg_tokenized = tokenize(arg_rawtext)
arg_feats = extractor.extract_features(arg_tokenized)
feats.append(arg_feats)
total_features_per_instance += extractor.n_features
if last_features_for_instance is not None:
# Making sure we have equal number of features per instance
assert total_features_per_instance == last_features_for_instance
rels_feats.append(np.concatenate(feats, axis=1))
feature_tensor = np.array(rels_feats)
assert_shape = (n_instances, 1, total_features_per_instance)
assert feature_tensor.shape == assert_shape, \
"Tensor shape mismatch. Is {}, should be {}".format(feature_tensor.shape, assert_shape)
return feature_tensor
def get_correct(self, indices=True):
"""
Returns answer indices.
"""
for rel in self.instances:
senses = rel.senses()
if self.separate_dual_classes:
if indices:
yield self.y_indices[senses[0]]
else:
yield senses[0]
else:
ys = [self.y_indices[sense] for sense in senses]
if indices:
yield ys
else:
yield senses
def store_results(self, results, store_path):
"""
Don't forget to use the official scoring script here.
"""
text_results = [self.classes[res] for res in results]
# Load test file
# Output json object with results
# Deal with multiple instances somehow
predicted_rels = []
for text_result, rel in zip(text_results, self.instances):
if rel.is_explicit():
rel_type = 'Explicit'
else:
rel_type = 'Implicit'
predicted_rels.append(rel.to_output_format(text_result, rel_type)) # turn string representation into list instance first
# Store test file
if not os.path.exists(store_path):
os.makedirs(store_path)
with open(join(store_path, 'output.json'), 'w') as w:
for rel in predicted_rels:
w.write(json.dumps(rel) + '\n')
logger.info("Stored predicted output at {}".format(store_path))
```
#### File: nn_discourse_parser/nets/lstm.py
```python
import numpy as np
import theano
from theano import config
import theano.tensor as T
from theano.tensor.shared_randomstreams import RandomStreams
import tree_util
class LSTM(object):
def square_weight(self, ndim):
return self.rng.uniform(
low=-np.sqrt(6. / (4 * ndim)),
high=np.sqrt(6. / (4 * ndim)),
size=(ndim, ndim)
).astype(config.floatX)
def _init_params(self, rng, dim_proj,
W=None, U=None, b=None, num_slices=4):
self.params = []#a list of paramter variables
self.input = [] #a list of input variables
self.output = []#a list of output variables
self.predict = [] # a list of prediction functions
self.hinge_loss = None # a function
self.crossentropy = None # a function
self.dim_proj = dim_proj
self.rng = rng
#LSTM parameters
if W is None:
W_values = np.concatenate(
[self.square_weight(self.dim_proj)
for x in range(num_slices)], axis=1)
self.W = theano.shared(W_values, borrow=True)
else:
self.W = W
if U is None:
U_values = np.concatenate(
[self.square_weight(self.dim_proj)
for x in range(num_slices)], axis=1)
self.U = theano.shared(U_values, borrow=True)
else:
self.U = U
if b is None:
b_values = np.zeros((num_slices * self.dim_proj,)).\
astype(config.floatX)
self.b = theano.shared(b_values, borrow=True)
else:
self.b = b
self.params = [self.W, self.U, self.b]
def _reset(self, rng, num_slices):
W_values = np.concatenate(
[self.square_weight(self.dim_proj)
for x in range(num_slices)], axis=1)
U_values = np.concatenate(
[self.square_weight(self.dim_proj)
for x in range(num_slices)], axis=1)
num_elems = num_slices * self.dim_proj
b_values = np.zeros((num_elems,)).astype(config.floatX)
self.W.set_value(W_values)
self.U.set_value(U_values)
self.b.set_value(b_values)
class BinaryTreeLSTM(LSTM):
"""Tree LSTM
This version is slow and a bit expensive on memory as the theano.scan loop
passes on all of the time slices with on one time slice changed.
I am not sure if theano is doing something smart with it or not.
tlstm.py is unuseable because it takes too long to compose a computation
graph in theano. It takes around 12s per discourse relation, which is
far too slow.
"""
def __init__(self, rng, dim_proj, W=None, U=None, b=None):
self._init_params(rng, dim_proj, W, U, b, 5)
word_matrix = T.tensor3('Word matrix', dtype=config.floatX)
c_mask = T.matrix('Child mask', dtype=config.floatX)
node_mask = T.matrix('Node mask', dtype=config.floatX)
children = T.tensor3('Children', dtype='int64')
self.X = word_matrix
self.mask = node_mask
self.c_mask = c_mask
self.input = [word_matrix, children, c_mask, node_mask]
n_samples = word_matrix.shape[1]
self.h, self.c_memory = self.project(word_matrix, children, c_mask)
all_samples = T.arange(n_samples)
self.max_pooled_h = (self.h * node_mask[:, :, None]).max(axis=0)
self.sum_pooled_h = (self.h * node_mask[:, :, None]).sum(axis=0)
self.mean_pooled_h = self.sum_pooled_h /\
T.maximum(c_mask.sum(axis=0)[:, None], 1)
num_inner_nodes = c_mask.sum(axis=0).astype('int64')
num_nodes = num_inner_nodes * 2 + 1
self.top_h = self.h[num_nodes - 1, all_samples, :]
def reset(self, rng):
self._reset(rng, 5)
def project(self, word_embedding, children, c_mask):
"""
word_embedding - TxNxd serrated tensor prefilled with word embeddings
children - TxNx3 serrated matrix for children list
c_mask - TxN mask for varying children list
"""
nsteps = children.shape[0]
if word_embedding.ndim == 3:
n_samples = word_embedding.shape[1]
else:
n_samples = 1
def _step(c, c_m, hidden, c_matrix):
node_idx = c[:, 0]
l_child_idx = c[:, 1]
r_child_idx = c[:, 2]
all_samples = T.arange(n_samples)
recursive = \
T.dot(hidden[l_child_idx, all_samples, :], self.W) +\
T.dot(hidden[r_child_idx, all_samples, :], self.U) +\
self.b
i = T.nnet.sigmoid(_slice(recursive, 0, self.dim_proj))
f1 = T.nnet.sigmoid(_slice(recursive, 1, self.dim_proj))
f2 = T.nnet.sigmoid(_slice(recursive, 2, self.dim_proj))
o = T.nnet.sigmoid(_slice(recursive, 3, self.dim_proj))
c_prime = T.tanh(_slice(recursive, 4, self.dim_proj))
new_c = i * c_prime + \
f1 * c_matrix[l_child_idx, all_samples,: ] +\
f2 * c_matrix[r_child_idx, all_samples,: ]
new_c_masked = c_m[:,None] * new_c + \
(1. - c_m[:, None]) * c_matrix[node_idx, all_samples, :]
new_h = o * T.tanh(new_c_masked)
new_h_masked = c_m[:, None] * new_h + \
(1. - c_m[:, None]) * hidden[node_idx, all_samples, :]
return T.set_subtensor(hidden[node_idx, all_samples],
new_h_masked), \
T.set_subtensor(c_matrix[node_idx, all_samples],
new_c_masked)
rval, updates = theano.scan(_step, sequences=[children, c_mask],
outputs_info=[word_embedding,
T.zeros(word_embedding.shape),],
n_steps=nsteps)
return rval[0][-1], rval[1][-1]
@staticmethod
def make_givens(givens, input_vec, T_training_data,
output_vec, T_training_data_label, start_idx, end_idx):
"""
embedding_series: 2T x N x d serrated matrix word embedding for the leaves
children : T x N x 3 children serrated matrix
c_mask : T x N masking matrix for children matrix
node_mask : 2T x N masking matrix for the internal nodes
(for embedding_series) nice for computing mean h or sum h
"""
givens[input_vec[0]] = T_training_data[0][:,start_idx:end_idx, :]
givens[input_vec[1]] = T_training_data[1][:,start_idx:end_idx, :]
givens[input_vec[2]] = T_training_data[2][:,start_idx:end_idx]
givens[input_vec[3]] = T_training_data[3][:,start_idx:end_idx]
givens[input_vec[0+4]] = \
T_training_data[0+4][:,start_idx:end_idx, :]
givens[input_vec[1+4]] = \
T_training_data[1+4][:,start_idx:end_idx, :]
givens[input_vec[2+4]] = T_training_data[2+4][:,start_idx:end_idx]
givens[input_vec[3+4]] = T_training_data[3+4][:,start_idx:end_idx]
# Sense label only
if len(output_vec) == 1:
givens[output_vec[0]] = \
T_training_data_label[0][start_idx:end_idx]
else:
givens[output_vec[0]] = \
T_training_data_label[0][:, start_idx:end_idx, :]
givens[output_vec[1]] = \
T_training_data_label[1][:, start_idx:end_idx, :]
givens[output_vec[2]] = \
T_training_data_label[2][start_idx:end_idx]
def build_stacked_lstm(num_layers, num_units, pooling, dropout_p):
lstm_layers = []
top_layer = None
rng = np.random.RandomState(100)
for i in range(num_layers):
last_layer = i == (num_layers - 1)
lstm_layer = SerialLSTM(rng, num_units,
pooling=pooling if last_layer else None,
parent_layer=top_layer,
dropout_p=dropout_p)
top_layer = lstm_layer
lstm_layers.append(lstm_layer)
return lstm_layers
class SerialLSTM(LSTM):
def __init__(self, rng, dim_proj, pooling, parent_layer=None,
W=None, U=None, b=None, dropout_p=1.0):
self._init_params(rng, dim_proj, W, U, b, 4)
self.dropout_p = dropout_p
self.n_out = dim_proj
self.srng = RandomStreams()
if parent_layer is None:
self.X = T.tensor3('x', dtype=config.floatX)
self.mask = T.matrix('mask', dtype=config.floatX)
self.c_mask = None
self.h_train = self.project(
self.X, self.mask, self.dropout_p, True)
self.h_test = self.project(
self.X, self.mask, self.dropout_p, False)
else:
self.X = parent_layer.X
self.mask = parent_layer.mask
self.c_mask = None
self.h_train = self.project(
parent_layer.h_train, self.mask, self.dropout_p, True)
self.h_test = self.project(
parent_layer.h_test, self.mask, self.dropout_p, False)
self.input = [self.X, self.mask]
n_samples = self.X.shape[1]
if pooling == 'max_pool':
self.activation_train = \
(self.h_train * self.mask[:, :, None]).max(axis=0)
self.activation_test = \
(self.h_test * self.mask[:, :, None]).max(axis=0)
elif pooling == 'sum_pool':
self.activation_train = \
(self.h_train * self.mask[:, :, None]).sum(axis=0)
self.activation_test = \
(self.h_test * self.mask[:, :, None]).sum(axis=0)
elif pooling == 'mean_pool':
sum_pooled = \
(self.h_train * self.mask[:, :, None]).sum(axis=0)
self.activation_train = \
sum_pooled / self.mask.sum(axis=0)[:, None]
sum_pooled = (self.h_test * self.mask[:, :, None]).sum(axis=0)
self.activation_test = \
sum_pooled / self.mask.sum(axis=0)[:, None]
elif pooling == 'top':
last_indices = self.mask.sum(axis=0).astype('int64') - 1
self.activation_train = \
self.h_train[last_indices, T.arange(n_samples), :]
self.activation_test = \
self.h_test[last_indices, T.arange(n_samples), :]
else:
self.activation_train = self.h_train
self.activation_test = self.h_test
def reset(self, rng):
self._reset(rng, 4)
def project(self, embedding_series, mask, dropout_p, training):
nsteps = embedding_series.shape[0]
if embedding_series.ndim == 3:
n_samples = embedding_series.shape[1]
else:
n_samples = 1
dropout_mask = self.srng.binomial(
size=[embedding_series.shape[-1]],
p=dropout_p, dtype=config.floatX)
def _step(m_, x_, h_, c_):
# x_ is actually x * W so we don't multiply again
preact = T.dot(h_, self.U) + x_
i = T.nnet.sigmoid(_slice(preact, 0, self.dim_proj))
f = T.nnet.sigmoid(_slice(preact, 1, self.dim_proj))
o = T.nnet.sigmoid(_slice(preact, 2, self.dim_proj))
c = T.tanh(_slice(preact, 3, self.dim_proj))
c = f * c_ + i * c
# if the sequence is shorter than the max length, then pad
# it with the old stuff for c and h
c = m_[:, None] * c + (1. - m_)[:, None] * c_
h = o * T.tanh(c)
h = m_[:, None] * h + (1. - m_)[:, None] * h_
return h, c
if training:
w_dot_x = T.dot(embedding_series * dropout_mask, self.W) + self.b
else:
w_dot_x = T.dot(embedding_series, self.W * dropout_p) + self.b
rval, updates = theano.scan(_step,
sequences=[mask, w_dot_x],
outputs_info=[
T.alloc(np_floatX(0.), n_samples, self.dim_proj),
T.alloc(np_floatX(0.), n_samples, self.dim_proj)],
n_steps=nsteps)
return rval[0]
@staticmethod
def make_givens(givens, input_vec, T_training_data,
output_vec, T_training_data_label, start_idx, end_idx):
# first arg embedding and mask
givens[input_vec[0]] = T_training_data[0][:,start_idx:end_idx, :]
givens[input_vec[1]] = T_training_data[1][:,start_idx:end_idx]
# second arg embedding and mask
givens[input_vec[2]] = T_training_data[2][:,start_idx:end_idx, :]
givens[input_vec[3]] = T_training_data[3][:,start_idx:end_idx]
# the rest if there is more e.g. input for MOE
for i, input_var in enumerate(input_vec[4:]):
givens[input_var] = T_training_data[i][start_idx:end_idx]
for i, output_var in enumerate(output_vec):
givens[output_var] = T_training_data_label[i][start_idx:end_idx]
def _slice(_x, n, dim):
if _x.ndim == 3:
return _x[:, :, n * dim:(n + 1) * dim]
return _x[:, n * dim:(n + 1) * dim]
def prep_srm_arg(relation_list, arg_pos, wbm, max_length, ignore_OOV=True):
assert arg_pos == 1 or arg_pos == 2
n_samples = len(relation_list)
x = np.zeros((max_length, n_samples)).astype('int64')
x_mask = np.zeros((max_length, n_samples)).astype(config.floatX)
for i, relation in enumerate(relation_list):
indices = wbm.index_tokens(relation.arg_tokens(arg_pos), ignore_OOV)
sequence_length = min(max_length, len(indices))
x[:sequence_length, i] = indices[:sequence_length]
x_mask[:sequence_length, i] = 1.
embedding_series = \
wbm.wm[x.flatten()].\
reshape([max_length, n_samples, wbm.num_units]).\
astype(config.floatX)
return embedding_series, x_mask
def prep_tree_srm_arg(relation_list, arg_pos, wbm, max_length,
all_left_branching=False, node_label_alphabet={}):
"""Make the matrices from the data required for the tree model
T = number of time steps
N = number of samples
d = dimensionality of the embedding
k = dimensionality of the
embedding_series: 2T x N x d serrated matrix word embedding for the leaves
children : T x N x 3 children serrated matrix
c_mask : T x N masking matrix for children matrix
node_mask : 2T x N masking matrix for the internal nodes
(for embedding_series) nice for computing mean h or sum h
node_label_tensor : 2T x N x k. This masks embedding_series matrix
"""
assert arg_pos == 1 or arg_pos == 2
n_samples = len(relation_list)
w_indices = np.zeros((2 * max_length, n_samples)).astype('int64')
c_mask = np.zeros((max_length, n_samples), dtype=config.floatX)
node_mask = np.zeros((2 * max_length, n_samples), dtype=config.floatX)
children = np.zeros((n_samples, max_length, 3), dtype='int64')
node_label_tensor = np.zeros((2 * max_length, n_samples, len(node_label_alphabet)), dtype=config.floatX)
for i, relation in enumerate(relation_list):
if all_left_branching:
parse_tree = tree_util.left_branching_tree(relation, arg_pos)
else:
parse_tree = tree_util.find_parse_tree(relation, arg_pos)
if len(parse_tree.leaves()) == 0:
parse_tree = tree_util.left_branching_tree(relation, arg_pos)
indices = wbm.index_tokens(parse_tree.leaves(), ignore_OOV=False)
sequence_length = min(max_length, len(indices))
w_indices[:sequence_length, i] = indices[:sequence_length]
ordering_matrix, node_label_list, num_leaves = \
tree_util.reverse_toposort(parse_tree)
num_nodes = min(2 * max_length, ordering_matrix.shape[0])
if num_nodes > num_leaves:
num_inner_nodes = num_nodes - num_leaves
children[i, :num_inner_nodes, :] = ordering_matrix[num_leaves:num_nodes, :]
c_mask[:num_inner_nodes, i] = 1.
node_mask[num_leaves:num_nodes, i] = 1.
if len(node_label_alphabet) > 0:
for t, node_label in enumerate(node_label_list):
if node_label is not None and t < (2 * max_length):
if node_label in node_label_alphabet:
label_index = node_label_alphabet[node_label]
else:
label_index = node_label_alphabet['OTHERS']
node_label_tensor[t, i, label_index] = 1.
children = np.swapaxes(children, 0, 1)
embedding_series = \
wbm.wm[w_indices.flatten()].\
reshape([max_length * 2, n_samples, wbm.num_units]).\
astype(config.floatX)
return embedding_series, children, c_mask, node_mask, node_label_tensor
def prep_serrated_matrix_relations(relation_list, wbm, max_length):
arg1_srm, arg1_mask = prep_srm_arg(relation_list, 1, wbm, max_length)
arg2_srm, arg2_mask = prep_srm_arg(relation_list, 2, wbm, max_length)
return (arg1_srm, arg1_mask, arg2_srm, arg2_mask)
def _check_masks(word_mask, c_mask):
"""Make sure that the two mask matrices are well-formed
word_mask and c_mask are T x N
We have to check that 0<= w_x + c_m <= 1. They can't both be 1.
"""
check_sum = word_mask + c_mask
assert(np.all(0 <= check_sum) and np.all(check_sum <= 1))
def prep_tree_lstm_serrated_matrix_relations(relation_list, wbm, max_length):
arg1_srm, arg1_children, arg1_c_mask, arg1_node_mask, _ = \
prep_tree_srm_arg(relation_list, 1, wbm, max_length)
arg2_srm, arg2_children, arg2_c_mask, arg2_node_mask, _ = \
prep_tree_srm_arg(relation_list, 2, wbm, max_length)
return (arg1_srm, arg1_children, arg1_c_mask, arg1_node_mask, \
arg2_srm, arg2_children, arg2_c_mask, arg2_node_mask)
def prep_stlstm_serrated_matrix_relations(relation_list, wbm, max_length, label_alphabet):
arg1_srm, arg1_children, arg1_c_mask, arg1_node_mask, arg1_node_label = \
prep_tree_srm_arg(relation_list, 1, wbm, max_length, False, label_alphabet)
arg2_srm, arg2_children, arg2_c_mask, arg2_node_mask, arg2_node_label = \
prep_tree_srm_arg(relation_list, 2, wbm, max_length, False, label_alphabet)
return (
[arg1_srm, arg1_children, arg1_c_mask, arg1_node_mask,
arg2_srm, arg2_children, arg2_c_mask, arg2_node_mask],
[arg1_node_label, arg2_node_label]
)
def np_floatX(data):
return np.asarray(data, dtype=config.floatX)
```
#### File: architectures/nn_discourse_parser/neural_discourse_parser.py
```python
import codecs
import sys
import json
from nets.bilinear_layer import NeuralNet
from nets.data_reader import DRelation
import nets.dense_feature_functions as df
from os import path
import theano
class DiscourseParser(object):
def __init__(self, model_file, dict_file):
self.net = NeuralNet.load(model_file)
self.word2vec_ff = get_word2vec_ff(dict_file, 'sum_pool')
self.index_to_label = {}
for label, label_index in self.net.label_alphabet.items():
self.index_to_label[label_index] = label
def classify_sense(self, data_dir, output_dir):
relation_file = '%s/relations-no-senses.json' % data_dir
parse_file = '%s/parses.json' % data_dir
parse = json.load(codecs.open(parse_file, encoding='utf8'))
relation_dicts = [json.loads(x) for x in open(relation_file)]
relation_list = [DRelation(x, parse) for x in relation_dicts ]
data_matrices = self.word2vec_ff(relation_list)
classify = theano.function(self.net.input, self.net.predict[0])
predicted_labels = classify(*data_matrices)
output = codecs.open('%s/output.json' % output_dir, 'wb', encoding ='utf8')
for i, relation_dict in enumerate(relation_dicts):
relation_dict['Sense'] = [self.index_to_label[predicted_labels[i]]]
relation_dict['Arg1']['TokenList'] = [x[2] for x in relation_dict['Arg1']['TokenList']]
relation_dict['Arg2']['TokenList'] = [x[2] for x in relation_dict['Arg2']['TokenList']]
relation_dict['Connective']['TokenList'] = \
[x[2] for x in relation_dict['Connective']['TokenList']]
if len(relation_dict['Connective']['TokenList']) > 0:
relation_dict['Type'] = 'Explicit'
else:
relation_dict['Type'] = 'Implicit'
output.write(json.dumps(relation_dict) + '\n')
def get_word2vec_ff(dic_tfile, projection):
word2vec = df.EmbeddingFeaturizer(dict_file)
if projection == 'mean_pool':
return word2vec.mean_args
elif projection == 'sum_pool':
return word2vec.additive_args
elif projection == 'max_pool':
return word2vec.max_args
elif projection == 'top':
return word2vec.top_args
else:
raise ValueError('projection must be one of {mean_pool, sum_pool, max_pool, top}. Got %s '\
% projection)
if __name__ == '__main__':
name = sys.argv[1]
model_file = path.join(sys.argv[2], "implicit_conll_ff_train_test.pkl")
input_dataset = sys.argv[3]
output_dir = sys.argv[4]
dict_file = sys.argv[5]
parser = DiscourseParser(model_file, dict_file)
parser.classify_sense(input_dataset, output_dir)
```
#### File: architectures/nn_discourse_parser/train_models.py
```python
import codecs
import json
import sys
import timeit
import random
import numpy as np
import theano
import nets.base_label_functions as l
import nets.util as util
from nets.data_reader import DRelation
from nets.learning import DataTriplet
from nets.templates import build_ff_network
def extract_non_explicit_relations(data_folder, label_function=None):
parse_file = '%s/parses.json' % data_folder
parse = json.load(codecs.open(parse_file, encoding='utf8'))
relation_file = '%s/relations.json' % data_folder
relation_dicts = [json.loads(x) for x in open(relation_file)]
relations = [DRelation(x, parse) for x in relation_dicts if x['Type'] != 'Explicit']
if label_function is not None:
relations = [x for x in relations if label_function.label(x) is not None]
return relations
def load_data(dir_list, relation_to_matrices_fn, sense_lf=None):
if sense_lf is None:
sense_lf = l.OriginalLabel()
relation_list_list = [extract_non_explicit_relations(dir, sense_lf) for dir in dir_list]
data_list = [relation_to_matrices_fn(relation_list) for relation_list in relation_list_list]
label_vectors, label_alphabet = util.label_vectorize(relation_list_list, sense_lf)
data_triplet = DataTriplet(data_list, [[x] for x in label_vectors], [label_alphabet])
return data_triplet
def implicit_second_level_ff_train(args):
dir_list = ['/data/conll16st-en-zh-dev-train-test_LDC2016E50/conll16st-en-03-29-16-train',
'/data/conll16st-en-zh-dev-train-test_LDC2016E50/conll16st-en-03-29-16-dev',
'/data/conll16st-en-zh-dev-train-test_LDC2016E50/conll16st-en-03-29-16-test']
num_units = 300
projection = 'sum_pool'
word2vec_ff = util._get_word2vec_ff(num_units, projection)
data_triplet = load_data(dir_list, word2vec_ff, l.SecondLevelLabel())
train(num_hidden_layers=1, num_hidden_units=400, model_name='second_level_ff',
data_triplet=data_triplet, minibatch_size=53)
def implicit_conll_zh_ff_train(args):
dir_list = ['/data/conll16st-en-zh-dev-train-test_LDC2016E50/conll16st-zh-03-29-16-train',
'/data/conll16st-en-zh-dev-train-test_LDC2016E50/conll16st-zh-03-29-16-dev',
'/data/conll16st-en-zh-dev-train-test_LDC2016E50/conll16st-zh-03-29-16-test']
num_units = 250
projection = 'sum_pool'
vec_type = 'skipgram'
num_hidden_layers = 2
num_hidden_units = 250
word2vec_ff = util._get_zh_word2vec_ff(num_units, vec_type, projection, cdtb=True)
data_triplet = load_data(dir_list, word2vec_ff)
model_name ='conll_zh_ff'
net, best_dev_model, best_test_model = \
train(num_hidden_layers=num_hidden_layers, num_hidden_units=num_hidden_units,
model_name=model_name, data_triplet=data_triplet,
minibatch_size=None)
net.label_alphabet = data_triplet.label_alphabet_list[0]
eval_model(net, data_triplet.dev_data, data_triplet.dev_data_label[0], best_dev_model)
net.save('/model/%s_dev.pkl' % model_name, best_dev_model)
eval_model(net, data_triplet.test_data, data_triplet.test_data_label[0], best_test_model)
net.save('/model/%s_test.pkl' % model_name, best_test_model)
def implicit_conll_ff_train(experiment_name, data_base_path, embedding_path, model_store_path, train_input_layer=True):
dir_list = [data_base_path + '-train',
data_base_path + '-dev',
data_base_path + '-test']
num_units = 300
projection = 'sum_pool'
num_hidden_layers = 2
num_hidden_units = 300
word2vec_ff = util._get_word2vec_ff(embedding_path, projection)
data_triplet = load_data(dir_list, word2vec_ff)
model_name ='conll_ff'
net, best_dev_model, best_test_model = \
train(num_hidden_layers=num_hidden_layers, num_hidden_units=num_hidden_units,
model_name=model_name, data_triplet=data_triplet,
minibatch_size=None,
train_input_layer=train_input_layer)
net.label_alphabet = data_triplet.label_alphabet_list[0]
eval_model(net, data_triplet.dev_data, data_triplet.dev_data_label[0], best_dev_model)
net.save('{}/{}_dev.pkl'.format(model_store_path, experiment_name), best_dev_model)
eval_model(net, data_triplet.test_data, data_triplet.test_data_label[0], best_test_model)
net.save('{}/{}_test.pkl'.format(model_store_path, experiment_name), best_test_model)
def eval_model(net, data, label, params=None):
if params is not None:
for param, best_param in zip(net.params, params):
param.set_value(best_param)
classify = theano.function(net.input, net.predict[0])
predicted_labels = classify(*data)
accuracy = float(np.sum(predicted_labels == label)) / len(label)
print accuracy
def train(num_hidden_layers, num_hidden_units, model_name, data_triplet,
minibatch_size=None, dry=False, train_input_layer=True):
if dry:
num_reps = 2
n_epochs = 2
model_name = '_model_trainer_dry_run'
else:
num_reps = 50
n_epochs = 5
json_file = util.set_logger(model_name, dry)
baseline_dev_acc = util.compute_baseline_acc(data_triplet.dev_data_label[0])
baseline_test_acc = util.compute_baseline_acc(data_triplet.test_data_label[0])
best_dev_so_far = 0.0
best_test_so_far = 0.0
best_dev_model = None
best_test_model = None
random_batch_size = minibatch_size == None
net, trainer = build_ff_network(data_triplet, num_hidden_layers, num_hidden_units,
train_input_layer=train_input_layer)
for rep in xrange(num_reps):
random_seed = rep + 10
rng = np.random.RandomState(random_seed)
net.reset(rng)
trainer.reset()
if random_batch_size:
minibatch_size = np.random.randint(20, 60)
start_time = timeit.default_timer()
best_iter, best_dev_acc, best_test_acc, best_parameters = \
trainer.train_minibatch_triplet(minibatch_size, n_epochs)
best_dev_acc = round(best_dev_acc, 4)
best_test_acc = round(best_test_acc, 4)
end_time = timeit.default_timer()
rep_elapsed = end_time - start_time
print '== Rep %s : Training process takes %s seconds' % (rep, rep_elapsed)
print '== The training process will require %s hour %s minutes %s seconds more.' % \
util.convert_seconds_to_hours(rep_elapsed * (num_reps - rep - 1))
print '== Best iteration is %s; ' % best_iter + \
'Test accuracy = %s; ' % best_test_acc + \
'Baseline test accuracy = %s; ' % baseline_test_acc + \
'Best dev accuracy = %s; ' % best_dev_acc + \
'Baseline dev accuracy = %s' % baseline_dev_acc
if best_test_acc > best_test_so_far:
best_test_model = best_parameters
best_test_so_far = best_test_acc
if best_dev_acc > best_dev_so_far:
best_dev_model = best_parameters
best_dev_so_far = best_dev_acc
result_dict = {
'test accuracy': best_test_acc,
'baseline test accuracy': baseline_test_acc,
'best dev accuracy': best_dev_acc,
'baseline dev accuracy': baseline_dev_acc,
'best iter': best_iter,
'random seed': random_seed,
'num rep': rep,
'minibatch size': minibatch_size,
'learning rate': trainer.learning_rate,
'lr smoother': trainer.lr_smoother,
'experiment name': model_name,
'num hidden units': num_hidden_units,
'num hidden layers': num_hidden_layers,
'cost function': 'crossentropy',
'dropout' : False
}
json_file.write('%s\n' % json.dumps(result_dict, sort_keys=True))
return net, best_dev_model, best_test_model
if __name__ == '__main__':
experiment_name = sys.argv[1]
embedding_path = sys.argv[2]
data_base_path = sys.argv[3]
model_store_path = sys.argv[4]
if sys.argv[5] == "train_input_layer":
train_input_layer = True
elif sys.argv[5] == 'static_input_layer':
train_input_layer = False
else:
raise ValueError("Must set input layer either to train or static.")
globals()[experiment_name](experiment_name, data_base_path, embedding_path, model_store_path, train_input_layer)
```
#### File: architectures/svm_baseline/main.py
```python
from misc_utils import get_config, get_logger, timer
from resources import PDTBRelations
from extractors import CBOW
from model import SVM
import argparse
import numpy as np
np.random.seed(0) # pylint: disable=E1101
EXTRACTOR_HANDLERS = {
'cbow': CBOW,
}
MODEL_HANDLERS = {
'svm': SVM
}
def load_resource(resource_config):
logger.debug("Loading data from %s", resource_config['path'])
resource_config = {x:y for x,y in resource_config.items() if x != 'name'}
resource = PDTBRelations(**resource_config)
return resource
def get_answers(instances):
return list(instances.get_correct())
def extract_features(extract_config, instances):
"""
Data should be of type PDTBRelations for now. I should generalize this.
Returns with dimensionality:
sentences x words x n_features
"""
extractors = []
# Sorting just makes sure they always end up in the same order,
# Python's random hashing could mess this up
for params in sorted(extract_config, key=lambda v: v['name']):
extractor_params = {x: y for x,y in params.items() if x != 'name'}
extractor = EXTRACTOR_HANDLERS[params['name']](**extractor_params)
extractors.append(extractor)
return instances.get_feature_tensor(extractors)
def get_model(model_name):
model = MODEL_HANDLERS[model_name]
return model
def run_experiment(config):
logger.info("Setting up...")
# Load resources
train_time = timer()
if config['train']:
training_data = load_resource(config['resources']['training_data'])
logger.debug("Training data classes: {}".format(training_data.y_indices))
correct = get_answers(training_data)
extracted_features = extract_features(config['extractors'], training_data)
model_class = get_model(config['model']['name'])
with train_time:
model_config = {x:y for x,y in config['model'].items() if x != 'name'}
model = model_class(n_features=extracted_features.shape[2],
n_classes=len(training_data.y_indices),
**model_config)
model.train(extracted_features, correct)
logger.info("Finished training!")
test_time = timer()
if config['test']:
test_data = load_resource(config['resources']['test_data'])
extracted_features = extract_features(config['extractors'], test_data)
model_class = get_model(config['model']['name'])
with test_time:
model_config = {x:y for x,y in config['model'].items() if x != 'name'}
model = model_class(n_features=extracted_features.shape[2],
n_classes=len(test_data.y_indices),
**model_config)
predicted = model.test(extracted_features)
test_data.store_results(predicted, config['test_output_path'])
logger.info("Finished testing!")
if __name__ == '__main__':
config = get_config('config.yaml')
parser = argparse.ArgumentParser(description='Run experiment.')
parser.add_argument('--train', action='store_true', default=False)
parser.add_argument('--test', action='store_true', default=False)
parser.add_argument('--train-path', type=str, required=False)
parser.add_argument('--dev-path', type=str, required=False)
parser.add_argument('--test-path', type=str, required=False)
parser.add_argument('--embedding-path', type=str, required=False)
parser.add_argument('--model-store-path', type=str, required=False)
parser.add_argument('--test-output-path', type=str, required=False)
parser.add_argument('--svm-kernel', type=str, required=False)
parser.add_argument('-c', type=float, required=False)
args = parser.parse_args()
config['train'] = args.train
config['test'] = args.test
if args.train_path:
config['resources']['training_data']['path'] = args.train_path
if args.dev_path:
config['resources']['dev_data']['path'] = args.dev_path
if args.test_path:
config['resources']['test_data']['path'] = args.test_path
if args.model_store_path:
config['model']['store_path'] = args.model_store_path
if args.test_output_path:
config['test_output_path'] = args.test_output_path
if args.svm_kernel:
config['model']['kernel'] = args.svm_kernel
if args.c:
config['model']['c'] = args.c
if args.embedding_path:
for extractor in config['extractors']:
extractor['path'] = args.embedding_path
logger = get_logger(__name__, config=config['logging'])
logger.info("Config: {}".format(config))
run_experiment(config)
``` |
{
"source": "jimmycallin/pydsm",
"score": 3
} |
#### File: pydsm/pydsm/composition.py
```python
from pydsm.indexmatrix import IndexMatrix
def linear_additive(v1, v2, alpha=0.5, beta=0.5):
"""
Weighted elementwise addition.
"""
compword = str(v1.row2word[0]) + " " + str(v2.row2word[0])
comp = (alpha * v1) + (beta * v2)
comp.row2word = [compword]
return comp
def multiplicative(v1, v2, alpha=1, beta=1):
"""
Weighted elementwise multiplication.
"""
compword = str(v1.row2word[0]) + " " + str(v2.row2word[0])
comp = (alpha * v1) * (beta * v2)
comp.row2word = [compword]
return comp
__dsm__ = ['compose']
```
#### File: pydsm/pydsm/evaluation.py
```python
import pydsm
import pydsm.similarity
from scipy.stats import spearmanr
from pkg_resources import resource_stream
import pickle
import os
def synonym_test(matrix, synonym_test, sim_func=pydsm.similarity.cos):
"""
Evaluate DSM using a synonym test.
:param matrix: A DSM matrix.
:param synonym_test: A dictionary where the key is the word in focus,
and the value is a list of possible word choices.
The first word in the dict is the correct choice.
:param sim_func: The similarity function to use for evaluation.
:return: Accuracy of synonym test.
"""
correct = []
incorrect = []
unknown_focus_words = []
unknown_synonyms = []
for focus_word, candidates in synonym_test.items():
if focus_word not in matrix.word2row:
unknown_focus_words.append(focus_word)
continue
known_words = [w for w in candidates if w in matrix.word2row]
unknown_words = [w for w in candidates if w not in matrix.word2row]
if candidates[0] in unknown_words:
unknown_synonyms.append(focus_word)
continue
word_sims = sim_func(matrix[focus_word], matrix[known_words], assure_consistency=False).transpose().sort(ascending=False)
if word_sims.row2word[0] == candidates[0]:
correct.append(focus_word)
else:
incorrect.append(focus_word)
accuracy = len(correct) / len(synonym_test)
print("Evaluation report")
print("Accuracy: {}".format(accuracy))
print("Number of words: {}".format(len(synonym_test)))
print("Correct words: {}".format(correct))
print("Incorrect words: {}".format(incorrect))
print("Unknown words: {}".format(unknown_focus_words))
print("Unknown correct synonym: {}".format(unknown_synonyms))
return accuracy
def simlex(matrix, sim_func=pydsm.similarity.cos):
"""
Evaluate DSM using simlex-999 evaluation test [1].
:param matrix: A DSM matrix.
:param sim_func: The similarity function to use for evaluation.
:return: Spearman correlation coefficient.
[1] SimLex-999: Evaluating Semantic Models with (Genuine) Similarity Estimation. 2014.
<NAME>, <NAME> and <NAME>. Preprint pubslished on arXiv. arXiv:1408.3456
"""
wordpair_sims = pickle.load(resource_stream(__name__, os.path.join('resources', 'simlex.pickle')))
simlex_vals = []
sim_vals = []
skipped = []
for (w1, w2), value in wordpair_sims.items():
if w1 not in matrix.word2row or w2 not in matrix.word2row:
skipped.append((w1, w2))
continue
sim_vals.append(sim_func(matrix[w1], matrix[w2])[0,0])
simlex_vals.append(value)
spearman = spearmanr(simlex_vals, sim_vals)
print("Evaluation report")
print("Spearman correlation: {}".format(spearman[0]))
print("P-value: {}".format(spearman[1]))
print("Skipped the following word pairs: {}".format(skipped ))
return spearman[0]
def toefl(matrix, sim_func=pydsm.similarity.cos):
"""
Evaluate DSM using TOEFL synonym test [1].
:param matrix: A DSM matrix.
:param sim_func: The similarity function to use for evaluation.
:return: Accuracy of TOEFL test.
[1] http://aclweb.org/aclwiki/index.php?title=TOEFL_Synonym_Questions_%28State_of_the_art%29
"""
synonym_dict = pickle.load(resource_stream(__name__, os.path.join('resources', 'toefl.pickle')))
return synonym_test(matrix, synonym_dict, sim_func=sim_func)
```
#### File: pydsm/tests/test_weighting.py
```python
from unittest import TestCase
import scipy.sparse as sp
from pydsm import IndexMatrix
import pydsm.weighting as weighting
__author__ = 'jimmy'
class TestWeighting(TestCase):
def create_mat(self, list_, row2word=None, col2word=None):
if row2word is None:
row2word = self.row2word
if col2word is None:
col2word = self.col2word
return IndexMatrix(sp.coo_matrix(list_), row2word, col2word)
def setUp(self):
self.spmat = sp.coo_matrix([[2, 5, 3], [0, 1, 9]])
self.row2word = ['a', 'b']
self.col2word = ['furiously', 'makes', 'sense']
self.mat = IndexMatrix(self.spmat, self.row2word, self.col2word)
def test_epmi(self):
res = self.create_mat([[2.0, 1.6666666666666667, 0.5], [0.0, 0.33333333333333337, 1.5]])
self.assertAlmostEqual(weighting.epmi(self.mat), res)
def test_pmi(self):
res = self.create_mat([[0.6931471805599453, 0.5108256237659907, -0.6931471805599453],
[0.0, -1.0986122886681096, 0.4054651081081644]])
self.assertAlmostEqual(res, weighting.pmi(self.mat))
def test_ppmi(self):
res = self.create_mat([[0.6931471805599453, 0.5108256237659907, 0.0], [0.0, 0.0, 0.4054651081081644]])
self.assertAlmostEqual(weighting.ppmi(self.mat), res)
def test_npmi(self):
res = self.create_mat([[0.3010299956639812, 0.3684827970831031, -0.3653681296292078],
[0.0, -0.3667257913420846, 0.507778585013894]])
self.assertAlmostEqual(weighting.npmi(self.mat), res)
def test_pnpmi(self):
res = self.create_mat([[0.3010299956639812, 0.3684827970831031, 0.0], [0.0, 0.0, 0.507778585013894]])
self.assertAlmostEqual(weighting.pnpmi(self.mat), res)
```
#### File: pydsm/pydsm/weighting.py
```python
from math import log
def epmi(matrix):
"""
Exponential pointwise mutual information
"""
row_sum = matrix.sum(axis=1)
col_sum = matrix.sum(axis=0)
total = row_sum.sum(axis=0)[0, 0]
inv_col_sum = 1 / col_sum # shape (1,n)
inv_row_sum = 1 / row_sum # shape (n,1)
inv_col_sum = inv_col_sum * total
mat = matrix * inv_row_sum
mat = mat * inv_col_sum
return mat
def smoothed_epmi(matrix, alpha=0.75):
"""
Performs smoothed epmi.
See smoothed_ppmi for more info.
Derived from this:
#(w,c) / #(TOT)
--------------
(#(w) / #(TOT)) * (#(c)^a / #(TOT)^a)
==>
#(w,c) / #(TOT)
--------------
(#(w) * #(c)^a) / #(TOT)^(a+1))
==>
#(w,c)
----------
(#(w) * #(c)^a) / #(TOT)^a
==>
#(w,c) * #(TOT)^a
----------
#(w) * #(c)^a
"""
row_sum = matrix.sum(axis=1)
col_sum = matrix.sum(axis=0).power(alpha)
total = row_sum.sum(axis=0).power(alpha)[0, 0]
inv_col_sum = 1 / col_sum # shape (1,n)
inv_row_sum = 1 / row_sum # shape (n,1)
inv_col_sum = inv_col_sum * total
mat = matrix * inv_row_sum
mat = mat * inv_col_sum
return mat
def pmi(matrix):
"""
Pointwise mutual information
"""
mat = epmi(matrix).log()
return mat
def shifted_pmi(matrix, k):
"""
Shifted pointwise mutual information
"""
mat = pmi(matrix) - log(k)
return mat
def smoothed_pmi(matrix, alpha=0.75):
"""
Smoothed pointwise mutual information
See smoothed_ppmi for more information.
"""
mat = smoothed_epmi(matrix, alpha).log()
return mat
def ppmi(matrix):
"""
Positive pointwise mutual information
"""
mat = pmi(matrix)
return mat[mat > 0]
def shifted_ppmi(matrix, k):
"""
Shifted positive pointwise mutual information
"""
mat = shifted_pmi(matrix, k)
return mat[mat > 0]
def smoothed_ppmi(matrix, alpha=0.75):
"""
Smoothed positive pointwise mutual information
Performs PPMI with context distribution smoothing,
as described by [1].
[1] <NAME> (2015). Improving Distributional
Similarity with Lessons Learned from Word Embeddings
"""
mat = smoothed_pmi(matrix, alpha)
return mat[mat > 0]
def npmi(matrix):
"""
Normalized pointwise mutual information
"""
total = matrix.sum(axis=0).sum(axis=1)[0, 0]
log_probs = -matrix.divide(total).log()
return pmi(matrix).divide(log_probs)
def pnpmi(matrix):
"""
Positive normalized pointwise mutual information
"""
mat = npmi(matrix)
return mat[mat > 0]
def lmi(matrix):
"""
Local mutual information (not tested).
"""
ppmi_mat = ppmi(matrix)
return ppmi_mat.multiply(matrix.log())
__dsm__ = ['apply_weighting']
``` |
{
"source": "jimmycao/libga_mpi",
"score": 2
} |
#### File: py/libga_gat/optimizationprocess.py
```python
import zmq
import subprocess
import multiprocessing
import imp
import os
import numpy as np
import time
import logging
import pickle
import threading
import sys
URI_process_to_host = "tcp://127.0.0.1:5533"
URI_host_to_process = "tcp://127.0.0.1:5534"
# each message is sent via send_multipart( type, content ). if
# there is no content, we send an empty string.
class MessageType:
# logging record from process to UI
logging_record = "0"
# data update (genome, ospace, fitnesses) from process to UI
data_update = "1"
# signals to UI that the optimization process is done
exit_notification = "2"
# send a quit request from UI to optimization process
abort_process = "3"
# load a module. this is used for modules in a parent path
# (e.g. sga.py) to avoid any __init__.py and relative import
# trickery.
def _load_module(path):
mod_name = os.path.split(path)[1]
base = os.path.splitext(mod_name)[0]
return imp.load_source(base, path)
# the UI owns a single optimization server once an optimization
# is started. it will listen in a separate thread.
class OptimizationServer(threading.Thread):
def __init__(self, on_logging_record, on_data_update):
threading.Thread.__init__(self)
# save parameters
self.on_logging_record = on_logging_record
self.on_data_update = on_data_update
self.process_is_running = True
def run(self):
# init context
self.ctx = zmq.Context()
# init in and out sockets
self.out_socket = self.ctx.socket(zmq.PUB)
self.out_socket.bind(URI_host_to_process)
self.in_socket = self.ctx.socket(zmq.SUB)
self.in_socket.setsockopt(zmq.SUBSCRIBE, "")
self.in_socket.bind(URI_process_to_host)
while True:
[t, contents] = self.in_socket.recv_multipart()
if t == MessageType.exit_notification:
# stop receiving messages, end thread. no more
# abor requests can be sent.
self.process_is_running = False
# clean up zmq
self.out_socket.close()
self.in_socket.close()
self.ctx.term()
# exit thread
return
if t == MessageType.logging_record:
# forward logging record to UI
self.on_logging_record(contents)
continue
if t == MessageType.data_update:
# forward genome data, ospace and fitnesses to UI
self.on_data_update(contents)
def abort_process(self):
if self.process_is_running:
# this will "non-violently" end the optimization process
# in its next generation
self.out_socket.send_multipart([MessageType.abort_process, ""])
# send log entries via URI_logger_to_host
class ZMQLogger(logging.Handler):
def __init__(self, socket):
self.out_socket = socket
logging.Handler.__init__(self)
def emit(self, record):
self.out_socket.send_multipart([MessageType.logging_record
, pickle.dumps(record)])
def _crossover_func(gacommon, name):
if "SBX" in name:
return gacommon.sbx_crossover
if "BLX" in name:
return gacommon.blxa_crossover
def _selection_func(sga, name):
if "Binary" in name:
return sga.BinaryTournamentSelector
if "Rank" in name:
return sga.RankSelector
if "Roulette" in name:
return sga.RouletteWheelSelector
if "SUS" in name:
return sga.SusSelector
# running the optimization algorithm in its own process for safety
class OptimizationClient(multiprocessing.Process):
def __init__(self, mod_name, mod_path, func_name
, objectives, variables, constraints, population
, generations, elite, archive, mutation
, crossover, selection):
multiprocessing.Process.__init__(self)
# save parameters
self.mod_name = mod_name
self.mod_path = mod_path
self.func_name = func_name
self.objectives = objectives
self.variables = variables
self.constraints = constraints
self.population = population
self.generations = generations
self.elite = elite
self.archive = archive
self.mutation = mutation
self.crossover = crossover
self.selection = selection
# no error handling here, since the module has been "test loaded"
# in the UI thread before spawning this process.
def load_module(self):
self.logger.info("Loading custom module with objective function(s)")
base = os.path.splitext(self.mod_name)[0]
self.module = imp.load_source(base, self.mod_path)
self.function = getattr(self.module, self.func_name)
def run(self):
# initialize outbound zmq socket
self.ctx = zmq.Context()
self.out_socket = self.ctx.socket(zmq.PUB)
self.out_socket.connect(URI_process_to_host)
np.random.seed(int(time.time()))
# generate inbound zmq socket
self.in_socket = self.ctx.socket(zmq.SUB)
self.in_socket.setsockopt(zmq.SUBSCRIBE, "")
self.in_socket.connect(URI_host_to_process)
# set up a file logger
self.logger = logging.getLogger("optimizationclient")
self.logger.addHandler(ZMQLogger(self.out_socket))
self.logger.setLevel(logging.INFO)
# load the user-supplied objective functions from a python module
self.load_module()
# create initial genome
genome = [self.constraints[:,0] \
+ (self.constraints[:,1] - self.constraints[:,0]) \
* np.random.random(self.variables) for i in range(self.population)]
# bind to common module
self.logger.info("Binding to gacommon.py module")
gacommon = _load_module("../gacommon.py")
# create the right algorithm instance. by design all parameters that
# differ between SGA and SPEA2 are passed to the constructor.
# the alg.start() methods of SGA and SPEA2 have identical signatures.
if self.objectives > 1:
self.logger.info("Binding to spea2.py module")
spea2 = _load_module("../spea2.py")
self.alg = spea2.Spea2(genome \
, archive_percentage = self.archive / float(self.population))
else:
self.logger.info("Binding to sga.py module")
sga = _load_module("../sga.py")
self.alg = sga.SGA(genome \
, elite_percentage = self.elite / float(self.population) \
, selector_type = _selection_func(sga, self.selection))
# finally run the thing
self.alg.start(
lambda g : self.function(np.asarray(g))
, self.on_progress
, _crossover_func(gacommon, self.crossover)
, lambda s: gacommon.is_within_constraints(s, self.constraints)
, lambda s: gacommon.mutate_uniform(s \
, self.constraints, prob = self.mutation / 100.0)
)
# notify host that we are done
self.logger.info("Algorithm done.<hr />")
self.out_socket.send_multipart([MessageType.exit_notification, ""])
# clean up zmq
self.out_socket.close()
self.in_socket.close()
self.ctx.term()
def on_progress(self):
# communicate genome to the host
# send genome information (dependent on whether we are working
# with spea2 or with sga).
genome_info = {"genome":self.alg.genome, "ospace":self.alg.ospace \
, "fitnesses":self.alg.fitnesses} \
if self.objectives > 1 else \
{"genome":self.alg.genome, "ospace":self.alg.ospace}
# send info "over the wire"
self.out_socket.send_multipart([MessageType.data_update \
, pickle.dumps(genome_info)])
# see if there is a request to abort from the UI
poller = zmq.Poller()
poller.register(self.in_socket, zmq.POLLIN)
if len(poller.poll(0)) > 0:
# yes, there is a message coming in
[t, content] = self.in_socket.recv_multipart()
if t == MessageType.abort_process:
self.logger.info("Received request to abort")
return False
# run until the maximum number of generations have been reached
return self.alg.generation < self.generations
# start up the server/host, the optimization process and return the
# server to the UI thread.
def start_subprocess(on_logging_record, on_data_update
, mod_name, mod_path, func_name, objectives
, variables, constraints, population, generations
, elite, archive, mutation, crossover, selection):
server = OptimizationServer(on_logging_record, on_data_update)
client = OptimizationClient(mod_name, mod_path, func_name
, objectives, variables, constraints, population, generations
, elite, archive, mutation, crossover, selection)
server.start()
client.start()
return server
```
#### File: libga_mpi/py/paretoplot.py
```python
import sys
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
class Paretoplot2D:
def __init__(self, genome_info):
self._genome = genome_info['genome']
self._ospace = genome_info['ospace']
self._fitnesses = genome_info['fitnesses']
if self._ospace.shape[1] < 2:
raise Exception('This plot is not suitable for 1D problems')
if self._ospace.shape[1] > 2:
print("Warning: This optimization problem has more than two "
+ "objectives. Only the first two are displayed for now. "
+ "Change the paretoplo.py show() function.")
self._fig = plt.figure(1)
rect = self._fig.patch
rect.set_facecolor('white')
self._ax = self._fig.add_subplot(1, 1, 1)
self._ax.set_title('Pareto Optimal Front')
self._ax.set_xlabel('Objective #0')
self._ax.set_ylabel('Objective #1')
self._fig.canvas.mpl_connect('pick_event', self.on_pick)
def on_pick(self, event):
if event.ind != None and len(event.ind) >= 1:
print '''\r
Variables:\r
{0}\r
Objectives:\r
{1}\r
Fitness:\r
{2}'''.format(self._genome[event.ind[0]],
self._ospace[event.ind[0]],
self._fitnesses[event.ind[0]])
def show(self):
sc = plt.scatter(
self._ospace[:, 0],
self._ospace[:, 1],
c=self._fitnesses,
cmap=matplotlib.cm.get_cmap('gray'),
s=40,
picker=5,
)
plt.colorbar(sc)
plt.grid(True)
plt.show()
if __name__ == '__main__':
if not len(sys.argv) == 2:
print 'usage: paretoplot.py <npz file>'
else:
plot = Paretoplot2D(np.load(sys.argv[1]))
plot.show()
``` |
{
"source": "jimmycdunn/transition-risk-tool",
"score": 3
} |
#### File: ffequity/processors/analyst.py
```python
import numpy as np
import pandas as pd
from fuzzywuzzy import fuzz
class AnalystException(Exception):
pass
class Analyst:
"""Analyst performs risk computations on the data within data structures"""
def __init__(self, dfs):
self.dfs = dfs
def analyze_equity(self, dataframefile):
# analyze will match the available data and then compute summary statistics
# first, get the years that the user has requested
years = sorted(set([key[:4] for key in self.dfs]))
matchedData = self.match_data(years)
# write the matched files to /assessment/
for year in matchedData:
dataframefile.data = matchedData[year]
dataframefile.write(year+'assessment', path="./data/assessment/")
companyNames = pd.DataFrame(dataframefile.data.loc[:, "Company(Company)"])
companyNames = companyNames[companyNames.loc[:, "Company(Company)"].notnull()]
companyNames["MarketCap(B)"] = None
dataframefile.data = companyNames
dataframefile.write(year+'MarketCaps', path="./data/financial_data/")
def analyze_carbon(self, dataframefile):
# get the years
years = sorted(set([key[:4] for key in self.dfs]))
# check if the user has financial data for this year
completeData = {}
for year in years:
completeData[year] = self.dfs[year + "assessment"]
try:
financial = self.dfs[year+"financial_data"]
except KeyError:
print(f"No financial data for {year}, will not compute fair-share allocation")
continue
# if the user has financial data, update the matched data to include it
completeData[year] = self.match_finance(year, completeData[year], financial)
# compute carbon held if there was financial data provided
analyzedData = self.analyze_data(completeData)
# write fossil fuel assessment to CSV files in /benchmarks
for year in analyzedData:
dataframefile.data = analyzedData[year]
dataframefile.write(year+'benchmarks', path="./data/benchmarks/")
def match_data(self, years):
matchedData = {}
for year in years:
# check if the user has equity data for this year
try:
equity = self.dfs[year+"equity_data"]
except KeyError:
print(f"No equity data for {year}, will not match")
continue
# check if the user has carbon data for this year
try:
carbon = self.dfs[year+"carbon_data"]
except KeyError:
print(f"No carbon data for {year}, will not match")
continue
# if the user has both equity and carbon data, match them
matchedData[year] = self.match_equity(year, equity, carbon)
return matchedData
def match_equity(self, year, equity, carbon):
# will return a dataframe with the matched data
# first, instantiate the matched dataframe from the equity index
# and the columns from both data files
allColumns = [x for x in equity.columns] + [y for y in carbon.columns]
# remove duplicate values from allColumns, order doesn't matter
allColumns = set(allColumns)
# create matchedDf with columns of all three data sets
matchedDf = pd.DataFrame(index=range(len(equity.index)), columns=allColumns)
# populate Stocks column for pd.merge(left, right, on='Stocks')
matchedDf.Stocks = equity.Stocks
# get a list of carbon comapnies and equity stock names for matching
carbonCompanies = [x for x in carbon.loc[:, 'Company(Company)']]
equityCompanies = [x for x in equity.loc[:, 'Stocks']]
# iterate through all of the carbon companies first, because the user
# is trying to see if a stock is on the carbon list
for carbonCompany in carbonCompanies:
bestStocks = [] # create a place to store best matches
# for now, using edit distance w/90% match threshhold
# in the future, would recommend cosine similarity to catch abbreviations
for equityCompany in equityCompanies:
matchRatio = fuzz.partial_token_set_ratio(equityCompany, carbonCompany)
thresh = 90
if matchRatio < thresh:
continue
else:
bestStocks.append(equityCompany)
# if there are no matches to the carbon company, then move on
if len(bestStocks) == 0:
continue
else:
# grab the data row from carbonCompanies for current carbonCompany
carbonRow = carbon['Company(Company)'] == carbonCompany
carbonValues = carbon[carbonRow]
# iterate if there are multiple stock options in one company
for equityCompany in bestStocks:
# pull the index matching the stock in matchedDf for updating and align indices
carbonValues.index = matchedDf[matchedDf['Stocks'] == equityCompany].index # ValueError
# the above error occurs when a company has a duplicate row in both the Coal AND Oil and Gas
# update matchedDf with both carbon data
matchedDf.update(carbonValues)
matchedDf.update(equity) # index is already aligned to equity
print(f"{year} complete...")
return matchedDf
def match_finance(self, year, matchedDf, financial):
# create a new column in matchedDf to include MktCap
matchedDf["MarketCap(B)"] = None
# iterate through the carbon company names in matchedDf
carbonCompanies = matchedDf.loc[matchedDf.loc[:, "Company(Company)"].notnull()].loc[:, "Company(Company)"]
for carbonCompany in carbonCompanies:
# check if carbon company is in financial list
if carbonCompany in financial.loc[:, "Company(Company)"].values:
# get financial data linked to the company
financialRow = financial.loc[:, 'Company(Company)'] == carbonCompany
financialData = financial[financialRow].loc[:, "MarketCap(B)"].values[0]
matchedRow = matchedDf.loc[:, "Company(Company)"] == carbonCompany
matchedDf.loc[matchedRow, "MarketCap(B)"] = financialData
return matchedDf
def analyze_data(self, completeData):
analyzedData = {}
for year in completeData:
df = completeData[year]
fuels = self.get_fuels(df) # get fuels by year
# modify the fuel names
reserves = {k: k+v for k, v in fuels.items()}
for key in reserves:
# populate dataframe with intensities
# fuels[key] is the units of the name of the fuel, market cap is in B
try:
df[key + 'Intensity' + fuels[key] + '/$B'] = df[reserves[key]] / df['MarketCap(B)']
df[key + '(tCO2)'] = df[key + 'Intensity' + fuels[key] + '/$B'] * df['EndingMarketValue']
except KeyError:
continue
# remove infinities created by EMV = 0
df = df.replace(np.inf, np.nan)
# save rows that have a carbon company affiliated
df = df[df.loc[:, "Company(Company)"].notnull()]
# address companies with multiple stock options
df = self.combine_multiple_stocks(df)
for key in reserves:
df[key + 'Pctile'] = df[reserves[key]].rank(pct=True)
df[key + '(tCO2)Pctile'] = df[key + '(tCO2)'].rank(pct=True)
analyzedData[year] = df
return analyzedData
def get_fuels(self, df):
# returns a dictionary with keys as names of fuels and values of units of fuels
fuels = {}
for col in df.columns:
if '(' in col and 'Company' not in col and 'MarketCap' not in col:
# split the column into name and (unit) at the (
splitCol = col.split('(')
name = splitCol[0]
unit = '(' + splitCol[1] # add leading parenthesis back in
fuels[name] = unit
return fuels
def combine_multiple_stocks(self, df):
# returns an analysis dataframe with multiple stock rows combined
# into one company row to aggregate holdings across multiple
# options in a company
multipleStockCompanies = set(df[df.duplicated(subset="Company(Company)")].loc[:, "Company(Company)"])
for duplicate in multipleStockCompanies:
combinedStocksCompany = pd.DataFrame()
allCompanyStocks = df[df.loc[:, "Company(Company)"] == duplicate]
keepSame = ["Company(Company)", "MarketCap(B)", "Stocks", "CoalIntensity(GtCO2)/$B",
"OilIntensity(GtCO2)/$B", "GasIntensity(GtCO2)/$B"]
sumColumns = ["EndingMarketValue", "Coal(tCO2)", "Oil(tCO2)", "Gas(tCO2)"]
for col in keepSame:
combinedStocksCompany.loc[0,col] = allCompanyStocks.loc[:, col].values[0]
for col in sumColumns:
combinedStocksCompany.loc[0,col] = allCompanyStocks.loc[:, col].sum()
df.drop(allCompanyStocks.index, inplace=True)
df = df.append(combinedStocksCompany, ignore_index=True)
return df
```
#### File: ffequity/processors/validator.py
```python
import os
import re
class ValidatorException(Exception):
pass
class Validator:
"""This guys job is to find the data, validate it, and put it into data structures"""
def __init__(self, folderNames):
self.folderNames = folderNames
def validate(self, dataframefile):
self.validate_folders()
self.validate_files()
dfs = self.validate_data(dataframefile)
return dfs # to an Analyst instance
def validate_folders(self):
with os.scandir(path='./data') as it: # try ./data
# store name attribute of each os.DirEntry in iterator provided by scandir()
currentFolders = [x.name for x in it]
for folder in self.folderNames:
if folder not in currentFolders:
raise ValidatorException(f"Required folder not present: {folder}")
it.close() # Explicitly close the iterator to free memory
print("Folders Validated")
def validate_files(self):
for folder in self.folderNames:
with os.scandir(path='./data/'+folder) as it:
currentFiles = [x.name for x in it if x.name != ".gitignore"] # store name attributes of all files in a folder
for fileName in currentFiles:
if not fileName.endswith(".csv"): # validate filetype is a csv
raise ValidatorException(f"File Type is not csv: {fileName}")
if not re.match(r"\d{4}", fileName[:4]): # validate that first four digits of file name is a year
raise ValidatorException(f"File name must start with YYYY: {fileName}")
it.close() # Explicity close the iterator to free memory
print(f"All files validated within {folder}")
print("Files validated")
def validate_data(self, dataframefile):
# dfs is a dictionary of dataframes
dfs = {}
for folder in self.folderNames:
with os.scandir(path='./data/'+folder) as it:
currentFiles = [x.name for x in it if x.name != ".gitignore"] # store name attributes of all files in a folders
for fileName in currentFiles:
df = dataframefile.read(os.path.join('./data/', folder, fileName))
# check the column titles
for col in df.columns:
if type(col) is not str: # ensure column names are string types
raise ValidatorException(f"File {fileName} needs to be formatted correctly: {col}")
# if column names are valid, then we can safely store the dataframe to our master dictionary
dfs[fileName[:4] + folder] = df
# i.e. dfs['2016carbon_data']
it.close()
print(f"All data validated within {folder}")
print("Data validated")
return dfs
``` |
{
"source": "jimmycheng603/katrain",
"score": 2
} |
#### File: gui/widgets/graph.py
```python
import math
import threading
from kivy.lang import Builder
from kivy.metrics import dp
from kivy.properties import BooleanProperty, Clock, ListProperty, NumericProperty, StringProperty
from kivy.uix.widget import Widget
from kivymd.app import MDApp
from katrain.gui.theme import Theme
class Graph(Widget):
marker_font_size = NumericProperty(0)
background_image = StringProperty(Theme.GRAPH_TEXTURE)
background_color = ListProperty([1, 1, 1, 1])
highlighted_index = NumericProperty(0)
nodes = ListProperty([])
hidden = BooleanProperty(False)
def __init__(self, **kwargs):
super().__init__(**kwargs)
self._lock = threading.Lock()
self.bind(pos=self.update_graph, size=self.update_graph)
self.redraw_trigger = Clock.create_trigger(self.update_graph, 0.1)
def initialize_from_game(self, root):
self.nodes = [root]
node = root
while node.children:
node = node.ordered_children[0]
self.nodes.append(node)
self.highlighted_index = 0
self.redraw_trigger()
def update_graph(self, *args):
pass
def update_value(self, node):
with self._lock:
self.highlighted_index = index = node.depth
self.nodes.extend([None] * max(0, index - (len(self.nodes) - 1)))
self.nodes[index] = node
if index > 1 and node.parent: # sometimes there are gaps
backfill, bfnode = index - 1, node.parent
while bfnode is not None and self.nodes[backfill] != bfnode:
self.nodes[backfill] = bfnode
backfill -= 1
bfnode = bfnode.parent
if index + 1 < len(self.nodes) and (
node is None or not node.children or self.nodes[index + 1] != node.ordered_children[0]
):
self.nodes = self.nodes[: index + 1] # on branch switching, don't show history from other branch
if index == len(self.nodes) - 1: # possibly just switched branch or the line above triggered
while node.children: # add children back
node = node.ordered_children[0]
self.nodes.append(node)
self.redraw_trigger()
class ScoreGraph(Graph):
show_score = BooleanProperty(True)
show_winrate = BooleanProperty(True)
score_points = ListProperty([])
winrate_points = ListProperty([])
score_dot_pos = ListProperty([0, 0])
winrate_dot_pos = ListProperty([0, 0])
highlight_size = NumericProperty(dp(6))
score_scale = NumericProperty(5)
winrate_scale = NumericProperty(5)
navigate_move = ListProperty([None, 0, 0, 0])
def on_touch_down(self, touch):
if self.collide_point(*touch.pos) and "scroll" not in getattr(touch, "button", ""):
ix, _ = min(enumerate(self.score_points[::2]), key=lambda ix_v: abs(ix_v[1] - touch.x))
self.navigate_move = [
self.nodes[ix],
self.score_points[2 * ix],
self.score_points[2 * ix + 1],
self.winrate_points[2 * ix + 1],
]
else:
self.navigate_move = [None, 0, 0, 0]
def on_touch_move(self, touch):
return self.on_touch_down(touch)
def on_touch_up(self, touch):
if self.collide_point(*touch.pos) and self.navigate_move[0] and "scroll" not in getattr(touch, "button", ""):
katrain = MDApp.get_running_app().gui
if katrain and katrain.game:
katrain.game.set_current_node(self.navigate_move[0])
katrain.update_state()
self.navigate_move = [None, 0, 0, 0]
def show_graphs(self, keys):
self.show_score = keys["score"]
self.show_winrate = keys["winrate"]
def update_graph(self, *args):
nodes = self.nodes
if nodes:
score_values = [n.score if n and n.score else math.nan for n in nodes]
# score_values=[]
# for n in nodes:
# if n and n.score:
# score_values.append(n.score)
# else:
# score_values.append(math.nan)
score_nn_values = [n.score for n in nodes if n and n.score]
score_values_range = min(score_nn_values or [0]), max(score_nn_values or [0])
winrate_values = [(n.winrate - 0.5) * 100 if n and n.winrate else math.nan for n in nodes]
winrate_nn_values = [(n.winrate - 0.5) * 100 for n in nodes if n and n.winrate]
winrate_values_range = min(winrate_nn_values or [0]), max(winrate_nn_values or [0])
score_granularity = 5
winrate_granularity = 10
self.score_scale = (
max(math.ceil(max(-score_values_range[0], score_values_range[1]) / score_granularity), 1)
* score_granularity
)
self.winrate_scale = (
max(math.ceil(max(-winrate_values_range[0], winrate_values_range[1]) / winrate_granularity), 1)
* winrate_granularity
)
xscale = self.width / max(len(score_values) - 1, 15)
available_height = self.height
score_line_points = [
[self.x + i * xscale, self.y + self.height / 2 + available_height / 2 * (val / self.score_scale)]
for i, val in enumerate(score_values)
]
winrate_line_points = [
[self.x + i * xscale, self.y + self.height / 2 + available_height / 2 * (val / self.winrate_scale)]
for i, val in enumerate(winrate_values)
]
self.score_points = sum(score_line_points, [])
self.winrate_points = sum(winrate_line_points, [])
if self.highlighted_index is not None:
self.highlighted_index = min(self.highlighted_index, len(score_values) - 1)
score_dot_point = score_line_points[self.highlighted_index]
winrate_dot_point = winrate_line_points[self.highlighted_index]
if math.isnan(score_dot_point[1]):
score_dot_point[1] = (
self.y
+ self.height / 2
+ available_height / 2 * ((score_nn_values or [0])[-1] / self.score_scale)
)
self.score_dot_pos = score_dot_point
if math.isnan(winrate_dot_point[1]):
winrate_dot_point[1] = (
self.y
+ self.height / 2
+ available_height / 2 * ((winrate_nn_values or [0])[-1] / self.winrate_scale)
)
self.winrate_dot_pos = winrate_dot_point
Builder.load_string(
"""
#:import Theme katrain.gui.theme.Theme
<Graph>:
background_color: Theme.BOX_BACKGROUND_COLOR
marker_font_size: 0.1 * self.height
canvas.before:
Color:
rgba: root.background_color
Rectangle:
size: self.size
pos: self.pos
Color:
rgba: [1,1,1,1]
Rectangle:
pos: self.pos
size: self.size
source: root.background_image
<ScoreGraph>:
canvas:
Color:
rgba: Theme.SCORE_COLOR
Line:
points: root.score_points if root.show_score else []
width: dp(1.1)
Color:
rgba: Theme.WINRATE_COLOR
Line:
points: root.winrate_points if root.show_winrate else []
width: dp(1.1)
Color:
rgba: [0.5,0.5,0.5,1] if root.navigate_move[0] else [0,0,0,0]
Line:
points: root.navigate_move[1], root.y, root.navigate_move[1], root.y+root.height
width: 1
Color:
rgba: Theme.GRAPH_DOT_COLOR
Ellipse:
id: score_dot
pos: [c - self.highlight_size / 2 for c in (self.score_dot_pos if not self.navigate_move[0] else [self.navigate_move[1],self.navigate_move[2]] ) ]
size: (self.highlight_size,self.highlight_size) if root.show_score else (0.0001,0.0001)
Color:
rgba: Theme.GRAPH_DOT_COLOR
Ellipse:
id: winrate_dot
pos: [c - self.highlight_size / 2 for c in (self.winrate_dot_pos if not self.navigate_move[0] else [self.navigate_move[1],self.navigate_move[3]] ) ]
size: (self.highlight_size,self.highlight_size) if root.show_winrate else (0.0001,0.0001)
# score ticks
GraphMarkerLabel:
font_size: root.marker_font_size
color: Theme.SCORE_MARKER_COLOR
pos: root.x + root.width - self.width-1, root.pos[1]+root.height - self.font_size - 1
text: 'B+{}'.format(root.score_scale)
opacity: int(root.show_score)
GraphMarkerLabel:
font_size: root.marker_font_size
color: Theme.SCORE_MARKER_COLOR
pos: root.x + root.width - self.width-1, root.y + root.height*0.5 - self.height/2 + 2
text: i18n._('Jigo')
opacity: int(root.show_score)
GraphMarkerLabel:
font_size: root.marker_font_size
color: Theme.SCORE_MARKER_COLOR
pos: root.x + root.width - self.width-1, root.pos[1]
text: 'W+' + str(int(root.score_scale))
opacity: int(root.show_score)
# wr ticks
GraphMarkerLabel:
font_size: root.marker_font_size
color: Theme.WINRATE_MARKER_COLOR
pos: root.pos[0]+1, root.pos[1] + root.height - self.font_size - 1
text: "{}%".format(50 + root.winrate_scale)
opacity: int(root.show_winrate)
GraphMarkerLabel:
font_size: root.marker_font_size
color: Theme.WINRATE_MARKER_COLOR
pos:root.pos[0]+1, root.pos[1]
text: "{}%".format(50 - root.winrate_scale)
opacity: int(root.show_winrate)
"""
)
``` |
{
"source": "Jimmycheong/flask-celery-demo",
"score": 2
} |
#### File: Jimmycheong/flask-celery-demo/main.py
```python
import time
from flask import (
Flask,
request,
url_for,
redirect,
make_response,
render_template,
jsonify
)
from tasks import my_background_task
from celery import Celery
app = Flask(__name__)
app.config['CELERY_BROKER_URL'] = 'redis://localhost:6379/0'
app.config['CELERY_RESULT_BACKEND'] = 'redis://localhost:6379/0'
celery = Celery(app.name, broker=app.config['CELERY_BROKER_URL'])
celery.conf.update(app.config)
@app.route("/", methods=['GET'])
def index():
resp = make_response(render_template('index.html'), 200)
return resp
@app.errorhandler(404)
def not_found(error):
return render_template('error.html'), 404
@app.route("/task", methods=['POST'])
def add_task():
task = my_background_task.delay("Puppies")
print("Added: %s to the queue" % task.id)
return task.id
@app.route("/status/<task_id>", methods=['GET'])
def task_status(task_id):
task = my_background_task.AsyncResult(task_id)
if task.state == "PENDING":
response = "PENDING"
elif task.state == "FAILURE":
response = "FAILURE"
elif task.state == "SUCCESS":
response = "SUCCESS"
else:
response = "Something went wrong..."
return jsonify(response)
``` |
{
"source": "Jimmycheong/React-Router-boilerplate",
"score": 3
} |
#### File: Jimmycheong/React-Router-boilerplate/main.py
```python
import time
from flask import (
Flask,
request,
url_for,
redirect,
make_response,
render_template,
jsonify,
redirect
)
app = Flask(__name__)
@app.route("/", methods=['GET'])
def index():
resp = make_response(render_template('index.html'), 200)
return resp
@app.errorhandler(404)
def not_found(error):
return redirect('/')
# return render_template('error.html'), 404
``` |
{
"source": "JimmyCHEVALLIER/socialgraphs2018",
"score": 3
} |
#### File: socialgraphs2018/ParseData/SocialbladeData.py
```python
import requests
from bs4 import BeautifulSoup
# Get and parse specific list in socialblade
# return an iterator (zip)
def getSocialBladeList(link):
res = {}
f = requests.get(link)
soup = BeautifulSoup(str(f.text), 'html.parser')
parsedTable = list(filter(lambda x: x != '\n',[ line for line in soup.find("div", {"class": "content-module-wide"})]))
parsedTable = [line.text for line in parsedTable]
for celebrity in zip(*[iter(parsedTable)]*6):
res[celebrity[2]] = {"rank" : celebrity[0], "tweetsCount": celebrity[3], "followersCount": celebrity[4], "friendCount": celebrity[5]}
return res
#link = "https://socialblade.com/twitter/top/500/followers"
#res = getSocialBladeList(link)
#print(res)
``` |
{
"source": "jimmycode/colabcode",
"score": 2
} |
#### File: colabcode/colabcode/code.py
```python
import os
import subprocess
from pyngrok import ngrok
try:
from google.colab import drive
colab_env = True
except ImportError:
colab_env = False
EXTENSIONS = ["ms-python.python", "ms-toolsai.jupyter"]
class ColabCode:
def __init__(self, workspace, port=10000, password=<PASSWORD>, authtoken=None, mount_drive=False, user_data_dir=None, extensions_dir=None):
self.workspace = workspace
self.port = port
self.password = password
self.authtoken = authtoken
self.user_data_dir = user_data_dir
self.extensions_dir = extensions_dir
self._mount = mount_drive
self._install_code()
self._install_extensions()
self._start_server()
self._run_code()
def _install_code(self):
subprocess.run(
["wget", "https://code-server.dev/install.sh"], stdout=subprocess.PIPE
)
subprocess.run(["sh", "install.sh"], stdout=subprocess.PIPE)
def _install_extensions(self):
for ext in EXTENSIONS:
subprocess.run(["code-server", "--install-extension", f"{ext}"])
def _start_server(self):
if self.authtoken:
ngrok.set_auth_token(self.authtoken)
active_tunnels = ngrok.get_tunnels()
for tunnel in active_tunnels:
public_url = tunnel.public_url
ngrok.disconnect(public_url)
url = ngrok.connect(addr=self.port, options={"bind_tls": True})
print(f"Code Server can be accessed on: {url}")
def _run_code(self):
os.system(f"fuser -n tcp -k {self.port}")
if self._mount and colab_env:
drive.mount("/content/drive")
prefix, options = [], [f"--port {self.port}", "--disable-telemetry"]
if self.password:
prefix.append(f"PASSWORD={self.password}")
else:
options.append("--auth none")
if self.user_data_dir:
options.append(f"--user-data-dir {self.user_data_dir}")
if self.extensions_dir:
options.append(f"--extensions-dir {self.extensions_dir}")
prefix_str = " ".join(prefix)
options_str = " ".join(options)
code_cmd = f"{prefix_str} code-server {options_str} {self.workspace}"
print(code_cmd)
with subprocess.Popen(
[code_cmd],
shell=True,
stdout=subprocess.PIPE,
bufsize=1,
universal_newlines=True,
) as proc:
for line in proc.stdout:
print(line, end="")
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.