prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
# Authors: <NAME> <<EMAIL>>, <NAME> <<EMAIL>>
# Copyright (c) 2015, <NAME> and <NAME>.
# License: GNU-GPL Style.
# How to cite GBpy:
# Banadaki, <NAME>. & <NAME>. "An efficient algorithm for computing the primitive
# bases of a general lattice plane",
# Journal of Applied Crystallography 48, 585-588 (2015). doi:10.1107/S1600576715004446
import numpy as np
from . import integer_manipulations as int_man
from . import misorient_fz as mis_fz
from . import tools as trans
import numpy.linalg as nla
def proper_ptgrp(cryst_ptgrp):
"""
Returns the proper point group corresponding to a crystallographic point
group
Parameters
----------------
cryst_ptgrp: str
Crystallogrphic point group in Schoenflies notation
Returns
----------
proper_ptgrp: str
Proper point group in Schoenflies notation
"""
if cryst_ptgrp in ['D3', 'D3d']:
proper_ptgrp = 'D3'
if cryst_ptgrp in ['D4', 'D4h']:
proper_ptgrp = 'D4'
if cryst_ptgrp in ['D6', 'D6h']:
proper_ptgrp = 'D6'
if cryst_ptgrp in ['O', 'Oh']:
proper_ptgrp = 'O'
# prop_grps = ['C1', 'C2', 'C3', 'C4', 'C6', 'D2', 'D3', 'D4', 'D6',
# 'T', 'O']
# laue_grps = ['Ci', 'C2h', 'C3i', 'C4h', 'C6h', 'D2h', 'D3d', 'D4h', 'D6h',
# 'Th', 'Oh']
# if cryst_ptgrp in laue_grps:
# proper_ptgrp =
# elif cryst_ptgrp in prop_grps:
# proper_ptgrp = cryst_ptgrp
return proper_ptgrp
def largest_odd_factor(var_arr):
"""
Function that computes the larges odd factors of an array of integers
Parameters
-----------------
var_arr: numpy.array
Array of integers whose largest odd factors needs to be computed
Returns
------------
odd_d: numpy.array
Array of largest odd factors of each integer in var_arr
"""
if var_arr.ndim == 1:
odd_d = np.empty(np.shape(var_arr))
odd_d[:] = np.NaN
ind1 = np.where((np.remainder(var_arr, 2) != 0) | (var_arr == 0))[0]
if np.size(ind1) != 0:
odd_d[ind1] = var_arr[ind1]
ind2 = np.where((np.remainder(var_arr, 2) == 0) & (var_arr != 0))[0]
if np.size(ind2) != 0:
odd_d[ind2] = largest_odd_factor(var_arr[ind2] / 2.0)
return odd_d
else:
raise Exception('Wrong Input Type')
def compute_inp_params(lattice, sig_type):
# Leila: for the tolerance value for D6 I chose 1e-2
# to get the values of mu and nu in table 2 in grimmers paper.
"""
tau and kmax necessary for possible integer quadruple combinations
are computed
Parameters
----------------
lattice: class
Attributes of the underlying lattice class
sig_type: {'common', 'specific'}
Returns
-----------
tau: float
tau is a rational number :math:`= \\frac{\\nu}{\\mu}`
tau is equal to (a/c)^2
kmax: float
kmax is an integer that depends on :math:`\\mu \\ , \\nu`
for hcp: kmax equals to F/\Sigma. kmax is always a divisor of 12\\mu\\nu.
F/\Sigma is a dicisor of 6\\mu\\nu if \\nu is even and a divisor od 3\\mu\\nu
if \\nu is a multiple of 4.
"""
lat_params = lattice.lat_params
cryst_ptgrp = proper_ptgrp(lattice.cryst_ptgrp)
if cryst_ptgrp == 'D3':
c_alpha = np.cos(lat_params['alpha'])
tau = c_alpha / (1 + 2 * c_alpha)
if sig_type == 'specific':
[nu, mu] = int_man.rat_approx(tau, 1e-8)
rho = mu - 3 * nu
kmax = 4 * mu * rho
elif sig_type == 'common':
kmax = []
if cryst_ptgrp == 'D4':
tau = (lat_params['a'] ** 2) / (lat_params['c'] ** 2)
if sig_type == 'specific':
[nu, mu] = int_man.rat_approx(tau, 1e-8)
kmax = 4 * mu * nu
if sig_type == 'common':
kmax = []
if cryst_ptgrp == 'D6':
tau = (lat_params['a'] ** 2) / (lat_params['c'] ** 2)
if sig_type == 'specific':
[nu, mu] = int_man.rat_approx(tau, 1e-2)
if np.remainder(nu, 2) == 0:
if np.remainder(nu, 4) == 0:
kmax = 3 * mu * nu
else:
kmax = 6 * mu * nu
else:
kmax = 12 * mu * nu
if sig_type == 'common':
kmax = []
if cryst_ptgrp == 'O':
tau = 1
kmax = []
return tau, kmax
def mesh_muvw(cryst_ptgrp, sigma, sig_type, *args):
# Leila note, deleted the star and lines 208-210
# mu = args[0]['mu']
# nu = args[0]['nu']
# kmax = args[0]['kmax']
#delete lines 228-235
# uncomment lines 236-245
"""
Compute max allowed values of [m,U,V,W] and generates an array
of integer quadruples
Parameters
----------------
cryst_ptgrp: str
Proper point group in Schoenflies notation
sigma: int
Sigma number
sig_type: {'common', 'specific'}
args[0]: dic
keys: 'nu', 'mu', 'kmax'
Returns
-----------
Integer quadruple numpy array
"""
if sig_type == 'common':
if cryst_ptgrp == 'D3':
tu1 = np.ceil(2 * np.sqrt(sigma))
m_max = tu1
u_max = tu1
v_max = tu1
w_max = tu1
mlims = [0, m_max]
ulims = [0, u_max]
vlims = [-v_max, v_max]
wlims = [0, w_max]
if cryst_ptgrp == 'D6':
tu1 = np.ceil(np.sqrt(sigma / 3.0))
tu2 = np.ceil(np.sqrt(sigma))
m_max = tu1
u_max = tu2
v_max = tu2
w_max = tu2
mlims = [0, m_max]
ulims = [0, u_max]
vlims = [0, v_max]
wlims = [0, w_max]
if cryst_ptgrp == 'D4' or cryst_ptgrp == 'O':
t1 = np.ceil(np.sqrt(sigma))
m_max = t1
u_max = t1
v_max = t1
w_max = t1
mlims = [0, m_max]
ulims = [0, u_max]
vlims = [0, v_max]
wlims = [0, w_max]
elif sig_type == 'specific':
mu = args[0]['mu']
nu = args[0]['nu']
kmax = args[0]['kmax']
if cryst_ptgrp == 'D3':
t1 = np.ceil(np.sqrt(sigma * kmax / (mu)))
t2 = np.ceil(np.sqrt(sigma * kmax / (mu - 2 * nu)))
m_max = t1
u_max = t2
v_max = t2
w_max = t2
mlims = [0, m_max]
ulims = [0, u_max]
vlims = [-v_max, v_max]
wlims = [-w_max, w_max]
if cryst_ptgrp == 'D6':
m_max = np.ceil(np.sqrt(sigma * kmax / (3.0 * mu)))
u_max = np.ceil(np.sqrt(sigma * kmax / (nu)))
v_max = np.ceil(np.sqrt(sigma * kmax / (nu)))
w_max = np.ceil(np.sqrt(sigma * kmax / (mu)))
mlims = [0, m_max]
ulims = [0, u_max]
vlims = [0, v_max]
wlims = [0, w_max]
if cryst_ptgrp == 'D4':
t1 = np.sqrt(sigma * kmax)
m_max = np.ceil(t1 / np.sqrt(mu))
u_max = np.ceil(t1 / np.sqrt(nu))
v_max = np.ceil(t1 / np.sqrt(nu))
w_max = np.ceil(t1 / np.sqrt(mu))
mlims = [0, m_max]
ulims = [0, u_max]
vlims = [0, v_max]
wlims = [0, w_max]
else:
raise Exception('sig_type: wrong input type')
m_var = np.arange(mlims[0], mlims[1] + 1, 1)
u_var = np.arange(ulims[0], ulims[1] + 1, 1)
v_var = np.arange(vlims[0], vlims[1] + 1, 1)
w_var = np.arange(wlims[0], wlims[1] + 1, 1)
[x1, x2, x3, x4] = np.meshgrid(m_var, u_var, v_var, w_var)
x1 = x1.ravel()
x2 = x2.ravel()
x3 = x3.ravel()
x4 = x4.ravel()
return | np.vstack((x1, x2, x3, x4)) | numpy.vstack |
import numpy as np
import os
import pickle
import logging
from flarestack.shared import acceptance_path, get_base_sob_plot_dir
from flarestack.core.energy_pdf import PowerLaw
from flarestack.utils.make_SoB_splines import make_plot
logger = logging.getLogger(__name__)
sin_dec_range = | np.linspace(-1, 1, 101) | numpy.linspace |
## worker.py -- evaluation code
##
## Copyright (C) 2017, <NAME> <<EMAIL>>.
##
## This program is licenced under the BSD 2-Clause licence,
## contained in the LICENCE file in this directory.
import matplotlib
from scipy.stats import entropy
from numpy.linalg import norm
from matplotlib.ticker import FuncFormatter
from tensorflow.keras.models import Sequential, load_model
from tensorflow.keras.activations import softmax
import numpy as np
import os
import tensorflow as tf
from tensorflow.keras.layers import Lambda
from RsNet.tf_config import CHANNELS_LAST
from utils import load_obj, load_model_idx, load_cache, save_cache
matplotlib.use('Agg')
class AEDetector:
def __init__(self, path, p=1, verbose=1):
"""
Error based detector.
Marks examples for filtering decisions.
path: Path to the autoencoder used.
p: Distance measure to use.
"""
self.model = load_model(path)
if verbose:
self.model.summary()
self.path = path
self.p = p
def mark(self, X, data_format=CHANNELS_LAST):
if self.model.inputs[0].shape[1:] != np.shape(X)[1:]:
if data_format == CHANNELS_LAST:
X = | np.transpose(X, [0, 3, 1, 2]) | numpy.transpose |
"""
This file contains class definition and necessary tools for constructing
and evaluating all symmetry groups.
"""
import numpy as np
from itertools import product as itertools_product
from enum import Enum
from hikari.symmetry import SymmOp
from hikari.utility.list_tools import find_best
def _unpack_group_dictionary_from_json(json_dict):
"""Development function used to get PG and SG from csv to pickle it later"""
group_dict = {}
for json_key, json_group in json_dict.items():
g_name = json_group["H-M_short"]
g_number = json_group["number"]
g_gens = [SymmOp.from_code(g) for g in json_group["generators"]]
g_ops = [SymmOp.from_code(o) for o in json_group["operations"]]
g = Group.create_manually(generators=g_gens, operations=g_ops)
g.name = json_group["H-M_short"]
g.number = abs(g_number)
group_dict[json_key] = g
if g_number > 0:
group_dict[g_name] = g
group_dict[g_number] = g
return group_dict
class Group:
"""
Base immutable class containing information about symmetry groups.
It stores information for point and space groups and, among others,
allows for iteration over its elements from `hikari.symmetry.SymmOp`.
"""
class System(Enum):
"""Enumerator class with information about associated crystal system"""
triclinic = 0
monoclinic = 1
orthorhombic = 2
trigonal = 3
tetragonal = 4
cubic = 5
hexagonal = 6
@property
def directions(self):
_a = np.array((1, 0, 0))
_b = np.array((0, 1, 0))
_c = np.array((0, 0, 1))
_ab = np.array((1 / np.sqrt(2), 1 / np.sqrt(2), 0))
_abc = np.array((1 / np.sqrt(3), 1 / np.sqrt(3), 1 / np.sqrt(3)))
return [(), (_b, ), (_a, _b, _c), (_c, _a, _ab),
(_c, _a, _ab), (_c, _abc, _ab), (_c, _a, _ab)][self.value]
BRAVAIS_PRIORITY_RULES = 'A+B+C=F>R>I>C>B>A>H>P'
AXIS_PRIORITY_RULES = '6>61>62>63>64>65>-6>4>41>42>43>-4>-3>3>31>32>2>21'
PLANE_PRIORITY_RULES = 'm>a+b=e>a+c=e>b+c=e>a>b>c>n>d'
def __init__(self, *generators):
"""
:param generators: List of operations necessary to construct whole group
:type generators: List[SymmOp]
"""
generator_list = []
for gen in generators:
if gen % 1 not in generator_list:
generator_list.append(gen % 1)
def _find_new_product(ops):
if len(ops) > 200:
raise ValueError('Generated group order exceeds size of 200')
new = list({o1 * o2 % 1 for o1, o2 in itertools_product(ops, ops)})
new = set(ops).union(new)
return _find_new_product(new) if len(new) > len(ops) else ops
self.__generators = tuple(generator_list)
self.__operations = tuple(_find_new_product(generator_list))
self.name = self.auto_generated_name
self.number = 0
@classmethod
def create_manually(cls, generators, operations):
"""
Generate group using already complete list of generators and operators.
:param generators: A complete list of group generators
:type generators: List[np.ndarray]
:param operations: A complete list of group operations
:type operations: List[np.ndarray]
:return:
:rtype:
"""
new_group = cls()
new_group.__generators = generators
new_group.__operations = operations
return new_group
def __eq__(self, other):
return all([o in self.operations for o in other.operations])\
and all([o in other.operations for o in self.operations])
def __lt__(self, other):
return len(self.operations) < len(other.operations) and \
all([o in other.operations for o in self.operations])
def __gt__(self, other):
return other.__lt__(self)
def __le__(self, other):
return self.__eq__(other) or self.__lt__(other)
def __ge__(self, other):
return self.__eq__(other) or other.__lt__(self)
def __repr__(self):
return 'Group('+',\n '.join([repr(g) for g in self.generators])+')'
def __str__(self):
return f'{self.name} (#{abs(self.number)}{"*" if self.number<0 else""})'
def __hash__(self):
return sum(hash(o) for o in self.operations)
@property
def auto_generated_name(self):
"""Name of the group generated automatically. Use only as approx."""
# TODO: enantiomorphs like P41 / P43 not recognised
# TODO: 'e' found always whenever 'a' and 'b' present
# TODO: some mistakes occur in trigonal crystal system (see SG149+)
name = self.centering_symbol
for d in self.system.directions:
ops = [o.name.partition(':')[0] for o in self.operations
if o.orientation is not None and
np.isclose(abs(np.dot(np.abs(o.orientation), np.abs(d))), 1)]
best_axis = find_best(ops, self.AXIS_PRIORITY_RULES)
best_plane = find_best(ops, self.PLANE_PRIORITY_RULES)
sep = '/' if len(best_axis) > 0 and len(best_plane) > 0 else ''
name += ' ' + best_axis + sep + best_plane
return name.strip()
@property
def centering_symbol(self):
tl = ([o.name for o in self.operations if o.typ is o.Type.translation])
tl.append('H' if self.system is self.System.trigonal else 'P')
return find_best(tl, self.BRAVAIS_PRIORITY_RULES)
@property
def generators(self):
return self.__generators
@property
def operations(self):
return self.__operations
@property
def order(self):
return len(self.__operations)
@property
def is_centrosymmetric(self):
"""
:return: True if group has centre of symmetry; False otherwise.
:rtype: bool
"""
return any(np.isclose(op.trace, -3) for op in self.operations)
@property
def is_enantiogenic(self):
"""
:return: True if determinant of all operations in group are positive.
:rtype: bool
"""
return any(op.det < 0 for op in self.operations)
@property
def is_sohncke(self):
"""
:return: True if determinant of all operations in group are positive.
:rtype: bool
"""
return all(op.det > 0 for op in self.operations)
@property
def is_achiral(self): # TODO See dictionary.iucr.org/Chiral_space_group
return NotImplemented # TODO and dx.doi.org/10.1524/zkri.2006.221.1.1
@property
def is_chiral(self): # TODO See dictionary.iucr.org/Chiral_space_group
return NotImplemented # TODO and dx.doi.org/10.1524/zkri.2006.221.1.1
@property
def is_symmorphic(self):
zero_vector = np.array([0, 0, 0])
trans = [o for o in self.operations if o.typ is o.Type.translation]
zero_tl = [o for o in self.operations if | np.allclose(o.tl, zero_vector) | numpy.allclose |
import unittest
import numpy as np
import pandas as pd
from numpy import testing as nptest
from operational_analysis.toolkits import met_data_processing as mt
class SimpleMetProcessing(unittest.TestCase):
def setUp(self):
pass
def test_compute_wind_direction(self):
u = [0, -1, -1, -1, 0, 1, 1, 1] # u-vector of wind
v = [-1, -1, 0, 1, 1, 1, 0, -1] # v-vector of wind
wd_ans = [0, 45, 90, 135, 180, 225, 270, 315] # Expected result
y = mt.compute_wind_direction(u, v) # Test result
nptest.assert_array_equal(y, wd_ans)
def test_compute_u_v_components(self):
wind_speed = np.array([1, 1, 1, 1, 1, 1, 1, 1]) # Wind speed
wind_direction = np.array([0, 45, 90, 135, 180, 225, 270, 315]) # Wind direction
u, v = mt.compute_u_v_components(wind_speed, wind_direction) # Test result
sqrt_2 = 1 / np.sqrt(2) # Handy constant
u_ans = np.array([0, -sqrt_2, -1, -sqrt_2, 0, sqrt_2, 1, sqrt_2]) # Expected result for 'u'
v_ans = np.array([-1, -sqrt_2, 0, sqrt_2, 1, sqrt_2, 0, -sqrt_2]) # Expected result for 'v'
nptest.assert_array_almost_equal(u, u_ans, decimal=5)
nptest.assert_array_almost_equal(v, v_ans, decimal=5)
def test_compute_air_density(self):
# Test data frame with pressure and temperature data
temp = np.arange(280, 300, 5)
pres = np.arange(90000, 110000, 5000)
rho = mt.compute_air_density(temp, pres) # Test result
rho_ans = np.array([1.11744, 1.1581, 1.19706, 1.23427]) # Expected result
nptest.assert_array_almost_equal(rho, rho_ans, decimal=5)
def test_pressure_vertical_extrapolation(self):
# Define test data
p_samp = np.array([1e6, 9.5e5]) # pressure at lower level
z0_samp = np.array([0, 30]) # lower level height
z1_samp = np.array([100, 100]) # extrapolation level height
t_samp = np.array([290, 300]) # average temperature in layer between z0 and z1
p1 = mt.pressure_vertical_extrapolation(p_samp, t_samp, z0_samp, z1_samp) # Test result
p1_ans = | np.array([988288.905, 942457.391]) | numpy.array |
# Copyright2019 <NAME> <EMAIL>
import random
import numpy as np
import kmeans
import csv
from copy import deepcopy
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
def read_file(path):
"""
:param path: the location of movies.csv
:return: [0] is features, [1] is set X
"""
f = open(path, 'r')
cf = csv.reader(f)
X = []
for row in cf:
X.append(row)
features = deepcopy(X[0])
X = X[1:]
return features, X
def the_kind_of_feature(feature):
"""
:param feature: should be get from features[i]
:return: 3 kinds of features.
"""
number_kind = ['budget', 'id', 'popularity',
'revenue', 'release_date', 'runtime',
'vote_average', 'vote_count']
vector_kind = ['genres', 'keywords', 'original_language',
'production_companies', 'production_countries',
'spoken_languages', 'status']
if feature in number_kind:
return 'number'
elif feature in vector_kind:
return 'vector'
else:
return 'string'
def fix(features, X):
"""
:param X:
:return: fixed X
"""
fixed_X = deepcopy(X)
for i in range(len(fixed_X)):
if not X[i]:
continue
for j in range(len(fixed_X[i])):
if the_kind_of_feature(features[j]) == 'string':
fixed_X[i][j] = [0]
# fixed_X[i][j] = np.array(fixed_X[i][j])
# fix json vector features
json_vec = []
jsons = []
json_vec.append(features.index('genres'))
json_vec.append(features.index('keywords'))
json_vec.append(features.index('production_companies'))
json_vec.append(features.index('production_countries'))
json_vec.append(features.index('spoken_languages'))
for i in range(len(json_vec)):
jsons.append(features[json_vec[i]])
for json in json_vec:
all_genres = []
for i in range(len(X)):
genre = eval(X[i][json])
if not genre:
continue
for item in genre:
# if not item['name'] in all_genres:
all_genres.append(item['name'])
List_set = set(all_genres)
c = []
for item in List_set:
if all_genres.count(item) > 100:
c.append(item)
if not c:
c = [0]
all_genres = c
Xs_genre = []
for i in range(len(X)):
x_genre = [0] * len(all_genres)
genre = eval(X[i][json])
if not genre:
Xs_genre.append(x_genre)
continue
for item in genre:
if item['name'] in all_genres:
index = all_genres.index(item['name'])
x_genre[index] = 1
Xs_genre.append(x_genre)
for i in range(len(fixed_X)):
fixed_X[i][json] = Xs_genre[i]
# fixed_X[i][json] = np.array(fixed_X[i][json])
# fix other vector features
other_vec = []
for feature in features:
if the_kind_of_feature(feature) == 'vector':
if feature not in jsons:
other_vec.append(features.index(feature))
for vec in other_vec:
all_vec = []
for i in range(len(X)):
if not X[i][vec]:
continue
if X[i][vec] not in all_vec:
all_vec.append(X[i][vec])
Xs_vec = []
for i in range(len(X)):
x_vec = [0] * len(all_vec)
if not X[i][vec]:
Xs_vec.append(x_vec)
continue
if X[i][vec] in all_vec:
index = all_vec.index(X[i][vec])
x_vec[index] = 1
Xs_vec.append(x_vec)
for i in range(len(fixed_X)):
fixed_X[i][vec] = Xs_vec[i]
# fixed_X[i][vec] = np.array(fixed_X[i][vec])
# fix number vector features
num_vecs = [feature for feature in features if (the_kind_of_feature(feature) == 'number')]
num_vecs_index = []
for i in range(len(num_vecs)):
num_vecs_index.append(features.index(num_vecs[i]))
# fix release date
index_date = features.index('release_date')
for x in fixed_X:
if x[index_date]:
date = x[index_date].split('-')
year, month, day = int(date[0]), int(date[1]), int(date[2])
absdate = 365 * year + 30 * month + day
x[index_date] = absdate
for i in num_vecs_index:
for x in fixed_X:
if x[i]:
x[i] = float(x[i])
else:
x[i] = 0
for i in num_vecs_index:
numbers = []
for x in fixed_X:
f = deepcopy(x[i])
numbers.append(float(f))
left, right = min(numbers), max(numbers)
for x in fixed_X:
f = deepcopy(x[i])
x[i] = [(float(f) - left) / (right - left)]
# x[i] = np.array(x[i])
return fixed_X
def count_len(feature):
"""
:param feature:
:return:
"""
count = 0
already = []
for i in range(len(feature)):
if feature[i] not in already:
count += 1
vector_len = count
return vector_len
def get_top_250():
path = './movies.csv'
movies = pd.read_csv(path)
movies['total_votes'] = movies['vote_average'] * movies['vote_count']
movies.sort_values('total_votes', ascending=False, inplace=True)
Top250 = movies.head(250)
Top250.to_csv('./movies_250.csv')
def pca_2d(K, dim):
newpath = './movies_250.csv'
# global features
features, X = read_file(newpath)
fixed_X = fix(features,X)
nokeywords = deepcopy(fixed_X)
for x in nokeywords:
for i in range(21):
if i in [0,2,4,5,6,10,11,15,16]:
x[i] = [0]
# print(nokeywords[0],'\n', nokeywords[1])
k = K
x_pca = deepcopy(nokeywords)
for n in range(len(x_pca)):
for f in range(len(x_pca[n])):
# if f in []
x_pca[n][f] = sum([nokeywords[n][f][_] for _ in range(len(nokeywords[n][f]))])
pca = PCA(n_components=dim)
pca_fixed_x = pca.fit_transform(x_pca)
for x in pca_fixed_x:
for f in x:
f = [f]
assignn = assign_250(K)
diction = {}
for i in range(K):
diction[i] = []
for i in range(len(pca_fixed_x)):
a = pca_fixed_x[i]
diction[assignn[i]].append(a)
for i in range(K):
plt.scatter(np.array(diction[i])[:, 0], | np.array(diction[i]) | numpy.array |
#!/usr/bin/env python
# Code to run observation simulation for binary:
# 1. run ETC to get desired electron count
# 2. simulate AMI data of a binary with desired separation and flux ratio
#
# 2016-02-15 <NAME>
# based on ETC and binary simulation codes from <NAME> and <NAME>
#
# 2016-09-27 <EMAIL>
# Create calibrator observation with same readout as target, same total flux
# Places calibrator at location of first maximum of target (in np.where(tgt==tgt.max()) list)
# rework driver_scene to create cal and tgt data cubes
# rework make_scene to match reworked make_binary
# (utils create_ramp & create_integration changed,
# new up-the-ramp handling to match binary simulation)
# name cal and tgt data cubes c_* and t_*
import sys, os, argparse
import numpy as np
from astropy.io import fits
import pyami.simcode.make_scene as scenesim
import importlib
importlib.reload(scenesim)
import pyami.simcode.utils as U
importlib.reload(U)
def main(argv):
parser = argparse.ArgumentParser(description='''
This script simulates the observation of a sky scene with a (Webb)PSF supplied by the user.
WARNING - WE ASSUME A SINGLE DOMINANT POINT SOURCE IN THE SKY DATA:
We match the countrate to that of the dominant point source in skydata.
Thus if the skydata point source accounts for 0.99 of of target flux,
then we place a unit delta function into caldata, and request a count
rate of 0.99 * the user-requested count rate for creating calibrator
data.
To generate a calibration star observation, use a 'delta function' single positive
pixel in an otherwise zero-filled array as your sky fits file.
''', formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-t','--target_dir', type=str, default='niriss-ami_out/', help='output directory path (relative to home directory)')
parser.add_argument('--output_absolute_path', type=str, default=None, help='absolute output directory path, if specified it overrides --target_dir')
parser.add_argument('-o','--overwrite', type=int, default=0, help='overwrite yes/no, default 0 (no)', choices=[0,1])
parser.add_argument('-utr','--uptheramp', type=int, default=0, help='generate up-the-ramp fits file? yes/no, default 0 (no)', choices=[0,1])
parser.add_argument('-f', '--filter', type=str, help='filter name (upper/lower case)', choices=["F277W", "F380M", "F430M", "F480M"])
parser.add_argument('-p','--psf', type=str, help='absolute path to oversampled PSF fits file. Spectral type set in this')
parser.add_argument('-s','--sky', type=str, help='absolute path to oversampled sky scene fits file, normalized to sum to unity')
parser.add_argument('-os','--oversample', type=int, help='sky scene oversampling (must be odd integer number)', choices=[1,3,5,7,9,11,21])
parser.add_argument('-I','--nint', type=int, default=1, help='number of integrations (IR community calls these exposures sometimes)')
parser.add_argument('-G','--ngroups', type=int, default=1, help='number of up-the-ramp readouts')
parser.add_argument('-c','--create_calibrator', type=int, default=1, help='create calibrator observation yes/no default 1 (yes)', choices=[0,1])
parser.add_argument('-cr','--countrate', type=float, help='Photon count rate on 25m^2 per sec in the bandpass (CRclearp in ami_etc output)',)
parser.add_argument('-tag','--tag', type=str, default='', help='Tag to include in the names of the produced files')
parser.add_argument('--uniform_flatfield', type=int, default='0',help='Generate random-noise flatfield (default) or uniform noiseless flatfield (if set to 1) ', choices=[0,1])
parser.add_argument('--random_seed' ,type=int, default=None, help='Random seed for all noise generations (seed is altered for every integration), allows for well-controlled simulations')
parser.add_argument('--flatfield_dir' ,type=str, default=None, help='Directory for simulated flatfield. Defaults to targetDir.')
parser.add_argument('--overwrite_flatfield', type=int, default=0, help='Overwrite simulated flatfield. Defaults to No.', choices=[0,1])
parser.add_argument('-v','--verbose', type=int, default=0, help='Verbose output to screen. Default is off', choices=[0,1])
parser.add_argument('--apply_dither', type=int, default=1, help='Dither the observations. Default is on', choices=[0,1])
parser.add_argument('--apply_jitter', type=int, default=1, help='Include pointing errors in the observations. Default is on', choices=[0,1])
parser.add_argument('--include_detection_noise', type=int, default=1, help='Include photon noise, read noise, background noise, and dark current. Default is on', choices=[-1,0,1])
args = parser.parse_args(argv)
print('*** JWST NIRISS scene simulation of NRM observation ***')
target_dir = args.target_dir
out_dir_0 = os.path.join(os.getenv('HOME') , target_dir);
output_absolute_path = args.output_absolute_path
if output_absolute_path is not None:
# override target_dir
out_dir_0 = output_absolute_path
overwrite = args.overwrite
uptheramp = args.uptheramp
calibrator = args.create_calibrator
file_tag = args.tag
uniform_flatfield = args.uniform_flatfield
random_seed = args.random_seed
flatfield_dir = args.flatfield_dir
overwrite_flatfield = args.overwrite_flatfield
countrate = args.countrate
filt = args.filter
psffile = args.psf
skyfile = args.sky
osample = args.oversample
verbose = args.verbose
apply_dither = args.apply_dither
apply_jitter = args.apply_jitter
include_detection_noise = args.include_detection_noise
nint = args.nint # TBD: calculate internally to save the user prep time doing ETC work
ngroups = args.ngroups # TBD: calculate internally to save the user prep time doing ETC work
if flatfield_dir is None:
flatfield_dir = out_dir_0
if verbose:
print(argv)
print("countrate input as %.2e photons/sec on 25m^2 primary in filter bandpass" % args.countrate)
# rebin sky_conv_psf image to detector scale, use max of detector array to calculate nint, ngroups, data-collect-time
# generate images
if verbose:
print("oversampling set in top level driver to %d" % osample)
trials = 1
out_dir = os.path.join(out_dir_0 , '%s/' % (filt));
# tmpDir = os.path.join(outDir0 , 'tmp/')
if verbose:
print(('Output directory set to %s' % out_dir))
# NB outDir must exist to contain input files - clean up organization later?
for dd in [out_dir]:#,tmpDir]:
if not os.path.exists(dd):
os.makedirs(dd)
# FEEDER FOR SIMULATION - read in pre-made psf made by WebbPSF (or any other way)
# File sizes:
# psfdata, psfhdr = fits.getdata(os.path.join(out_dir_0,psffile), header=True)
# skydata, skyhdr = fits.getdata(os.path.join(out_dir_0,skyfile), header=True)
psfdata, psfhdr = fits.getdata(psffile, header=True)
skydata, skyhdr = fits.getdata(skyfile, header=True)
skydata = skydata / skydata.sum() # normalize sky data total to unity!
skydata = skydata * countrate
if verbose:
print("psfdata", psfdata.shape, "totals %.2e (NRM throughput / full aperture throughput)"%psfdata.sum())
print("skydata", skydata.shape, "totals %.2e (photons / s on 25^m in band)"%skydata.sum())
caldata = np.zeros(skydata.shape, np.float64)
maxloc = np.where(skydata==skydata.max())
ptsrcfraction = skydata[maxloc]/skydata.sum()
caldata[maxloc[0][0], maxloc[1][0]] = countrate#ptsrcfraction * countrate
# DEFINE DITHER POINTING in det pixels
ipsoffset = U.ips_size//2 - (skydata.shape[0]//osample)//2
x_dith, y_dith = [(skydata.shape[0]//2)/osample + ipsoffset,], \
[(skydata.shape[0]//2)/osample + ipsoffset,]
dithers = len(x_dith)
if verbose:
print("x_dith, y_dith", x_dith, y_dith)
# now convert to oversampled pixels for the calculation:
x_dith[:] = [(x*osample - osample//2+1) for x in x_dith]
y_dith[:] = [(y*osample - osample//2+1) for y in y_dith]
""" python 2to3 conversion"""
print(type(x_dith))
x_dith = np.array(x_dith).astype(np.int)
y_dith = | np.array(y_dith) | numpy.array |
import sys
import time
import numpy as np
import tensorflow as tf
from collections import defaultdict
from random import shuffle
FLAGS = tf.app.flags.FLAGS
class Batcher(object):
def __init__(self, in_file, num_epochs, max_seq, batch_size):
self._num_epochs = num_epochs
self._batch_size = batch_size
self._max_seq = max_seq
self.step = 1.
self.in_file = in_file
self.next_batch_op = self.input_pipeline(in_file, self._batch_size, num_epochs=num_epochs)
def next_batch(self, sess):
return sess.run(self.next_batch_op)
def input_pipeline(self, file_pattern, batch_size, num_epochs=None, num_threads=10):
filenames = tf.matching_files(file_pattern)
filename_queue = tf.train.string_input_producer(filenames, num_epochs=num_epochs, shuffle=True)
parsed_batch = self.example_parser(filename_queue)
min_after_dequeue = 10000
capacity = min_after_dequeue + 12 * batch_size
next_batch = tf.train.batch(
parsed_batch, batch_size=batch_size, capacity=capacity,
num_threads=num_threads, dynamic_pad=True, allow_smaller_final_batch=True)
return next_batch
def example_parser(self, filename_queue):
reader = tf.TFRecordReader()
key, record_string = reader.read(filename_queue)
# Define how to parse the example
context_features = {
'doc_id': tf.FixedLenFeature([], tf.string),
'e1': tf.FixedLenFeature([], tf.int64),
'e2': tf.FixedLenFeature([], tf.int64),
'ep': tf.FixedLenFeature([], tf.int64),
'rel': tf.FixedLenFeature([], tf.int64),
'seq_len': tf.FixedLenFeature([], tf.int64),
}
sequence_features = {
"tokens": tf.FixedLenSequenceFeature([], dtype=tf.int64),
"e1_dist": tf.FixedLenSequenceFeature([], dtype=tf.int64),
"e2_dist": tf.FixedLenSequenceFeature([], dtype=tf.int64),
}
context_parsed, sequence_parsed = tf.parse_single_sequence_example(serialized=record_string,
context_features=context_features,
sequence_features=sequence_features)
doc_id = context_parsed['doc_id']
e1 = context_parsed['e1']
e2 = context_parsed['e2']
ep = context_parsed['ep']
rel = context_parsed['rel']
tokens = sequence_parsed['tokens']
e1_dist = sequence_parsed['e1_dist']
e2_dist = sequence_parsed['e2_dist']
seq_len = context_parsed['seq_len']
return [e1, e2, ep, rel, tokens, e1_dist, e2_dist, seq_len, doc_id]
class InMemoryBatcher(Batcher):
def __init__(self, in_file, num_epochs, max_seq, batch_size):
super(InMemoryBatcher, self).__init__(in_file, num_epochs, max_seq, batch_size)
self.epoch = 0.
loading_batch_size = self._batch_size
self.next_batch_op = self.input_pipeline(in_file, loading_batch_size, num_epochs=1, num_threads=1)
self.data = defaultdict(list)
self._starts = {}
self._ends = {}
self._bucket_probs = {}
def load_all_data(self, sess, max_batches=-1, pad=0, bucket_space=0, doc_filter=None):
'''
load batches to memory for shuffling and dynamic padding
'''
batch_num = 0
samples = 0
start_time = time.time()
print ('Loading data from %s with batch size: %d' % (self.in_file, self._batch_size))
try:
while max_batches <= 0 or batch_num < max_batches:
batch = sess.run(self.next_batch_op)
e1, e2, ep, rel, tokens, e1_dist, e2_dist, seq_len, doc_id = batch
batch = [(_e1, _e2, _ep, _rel, _tokens, _e1_dist, _e2_dist, _seq_len, _doc_id)
for (_e1, _e2, _ep, _rel, _tokens, _e1_dist, _e2_dist, _seq_len, _doc_id)
in zip(e1, e2, ep, rel, tokens, e1_dist, e2_dist, seq_len, doc_id)
if not doc_filter or _doc_id not in doc_filter]
if batch:
e1, e2, ep, rel, tokens, e1_dist, e2_dist, seq_len, doc_id = zip(*batch)
e1, e2, ep, rel, tokens, e1_dist, e2_dist, seq_len, doc_id = \
np.array(e1), np.array(e2), np.array(ep), np.array(rel), np.array(tokens), \
np.array(e1_dist), np.array(e2_dist), np.array(seq_len), np.array(doc_id)
# pad sequences a little bit so buckets aren't so sparse
if bucket_space > 0:
add_pad = (bucket_space - tokens.shape[1] % bucket_space)
zero_col = np.ones((seq_len.shape[0], add_pad)) * pad
tokens = np.hstack((tokens, zero_col))
e1_dist = np.hstack((e1_dist, zero_col))
e2_dist = np.hstack((e2_dist, zero_col))
samples += e1.shape[0]
updated_batch = (e1, e2, ep, rel, tokens, e1_dist, e2_dist, seq_len, doc_id)
self.data[tokens.shape[1]].append(updated_batch)
batch_num += 1
sys.stdout.write('\rLoading batch: %d' % batch_num)
sys.stdout.flush()
except Exception as e:
print('')
for seq_len, batches in self.data.iteritems():
self.data[seq_len] = [tuple((e1[i], e2[i], ep[i], rel[i], tokens[i], e1d[i], e2d[i], sl[i], did[i]))
for (e1, e2, ep, rel, tokens, e1d, e2d, sl, did) in batches
for i in range(e1.shape[0])]
self.reset_batch_pointer()
end_time = time.time()
print('Done, loaded %d samples in %5.2f seconds' % (samples, (end_time-start_time)))
return batch_num
def next_batch(self, sess):
# select bucket to create batch from
self.step += 1
bucket = self.select_bucket()
batch = self.data[bucket][self._starts[bucket]:self._ends[bucket]]
# update pointers
self._starts[bucket] = self._ends[bucket]
self._ends[bucket] = min(self._ends[bucket] + self._batch_size, len(self.data[bucket]))
self._bucket_probs[bucket] = max(0, len(self.data[bucket]) - self._starts[bucket])
#TODO this is dumb
_e1 = np.array([e1 for e1, e2, ep, rel, t, e1d, e2d, s, did in batch])
_e2 = | np.array([e2 for e1, e2, ep, rel, t, e1d, e2d, s, did in batch]) | numpy.array |
import numpy as np
# Auxiliary function for stochastic gradient descent
def batch_iter(y, tx, batch_size, num_batches=1, shuffle=True):
data_size = len(y)
if shuffle:
shuffle_indices = np.random.permutation( | np.arange(data_size) | numpy.arange |
import numpy as np
from datetime import datetime
import System
from System import Array
from DHI.Generic.MikeZero import eumUnit, eumQuantity
from DHI.Generic.MikeZero.DFS import (
DfsFileFactory,
DfsFactory,
DfsSimpleType,
DataValueType,
)
from DHI.Generic.MikeZero.DFS.dfs123 import Dfs1Builder
from .dutil import to_numpy, Dataset, find_item, get_item_info
from .eum import TimeStep, ItemInfo
from .helpers import safe_length
class Dfs1:
def read(self, filename, item_numbers=None, item_names=None):
"""Read data from the dfs1 file
Usage:
read(filename, item_numbers=None, item_names=None)
filename
full path to the dfs1 file.
item_numbers
read only the item_numbers in the array specified (0 base)
item_names
read only the items in the array specified, (takes precedence over item_numbers)
Return:
Dataset(data, time, items)
where data[nt,x]
"""
# NOTE. Item numbers are base 0 (everything else in the dfs is base 0)
# Open the dfs file for reading
dfs = DfsFileFactory.DfsGenericOpen(filename)
if item_names is not None:
item_numbers = find_item(dfs, item_names)
if item_numbers is None:
n_items = safe_length(dfs.ItemInfo)
item_numbers = list(range(n_items))
# Determine the size of the grid
axis = dfs.ItemInfo[0].SpatialAxis
xNum = axis.XCount
nt = dfs.FileInfo.TimeAxis.NumberOfTimeSteps
if nt == 0:
raise Warning("Static dfs1 files (with no time steps) are not supported.")
nt = 1
deleteValue = dfs.FileInfo.DeleteValueFloat
n_items = len(item_numbers)
data_list = []
for item in range(n_items):
# Initialize an empty data block
data = np.ndarray(shape=(nt, xNum), dtype=float)
data_list.append(data)
t = []
startTime = dfs.FileInfo.TimeAxis.StartDateTime
for it in range(dfs.FileInfo.TimeAxis.NumberOfTimeSteps):
for item in range(n_items):
itemdata = dfs.ReadItemTimeStep(item_numbers[item] + 1, it)
src = itemdata.Data
d = to_numpy(src)
d[d == deleteValue] = np.nan
data_list[item][it, :] = d
t.append(
startTime.AddSeconds(itemdata.Time).ToString("yyyy-MM-dd HH:mm:ss")
)
time = [datetime.strptime(x, "%Y-%m-%d %H:%M:%S") for x in t]
items = get_item_info(dfs, item_numbers)
dfs.Close()
return Dataset(data_list, time, items)
def write(self, filename, data):
"""
Function: write to a pre-created dfs1 file.
filename:
full path and filename to existing dfs1 file
data:
list of matrices. len(data) must equal the number of items in the dfs2.
Each matrix must be of dimension time, x
usage:
write(filename, data) where data(nt, x)
Returns:
Nothing
"""
# Open the dfs file for writing
dfs = DfsFileFactory.Dfs1FileOpenEdit(filename)
# Determine the size of the grid
number_x = dfs.SpatialAxis.XCount
n_time_steps = dfs.FileInfo.TimeAxis.NumberOfTimeSteps
n_items = safe_length(dfs.ItemInfo)
deletevalue = -1e-035
if not all(np.shape(d)[0] == n_time_steps for d in data):
raise Warning(
"ERROR data matrices in the time dimension do not all match in the data list. "
"Data is list of matices [t, x]"
)
if not all(np.shape(d)[1] == number_x for d in data):
raise Warning(
"ERROR data matrices in the X dimension do not all match in the data list. "
"Data is list of matices [t, x]"
)
if not len(data) == n_items:
raise Warning(
"The number of matrices in data do not match the number of items in the dfs1 file."
)
for i in range(n_time_steps):
for item in range(n_items):
d = data[item][i, :]
d[np.isnan(d)] = deletevalue
darray = Array[System.Single](np.array(d.reshape(d.size, 1)[:, 0]))
dfs.WriteItemTimeStepNext(0, darray)
dfs.Close()
def create(
self,
filename,
data,
start_time=None,
dt=1,
items=None,
length_x=1,
x0=0,
coordinate=None,
timeseries_unit=TimeStep.SECOND,
title=None,
):
"""
Create a dfs1 file
Parameters
----------
filename: str
Location to write the dfs1 file
data: list[np.array]
list of matrices, one for each item. Matrix dimension: x, time
start_time: datetime, optional
start datetime
timeseries_unit: Timestep, optional
TimeStep unit default TimeStep.SECOND
dt: float
The time step (double based on the timeseries_unit). Therefore dt of 5.5 with timeseries_unit of minutes
means 5 mins and 30 seconds.
items: list[ItemInfo], optional
List of ItemInfo corresponding to a variable types (ie. Water Level).
coordinate:
['UTM-33', 12.4387, 55.2257, 327] for UTM, Long, Lat, North to Y orientation. Note: long, lat in decimal degrees
OR
[TODO: Support not Local Coordinates ...]
x0:
Lower right position
length_x:
length of each grid in the x direction (meters)
title:
title of the dfs2 file (can be blank)
"""
if title is None:
title = ""
n_time_steps = np.shape(data[0])[0]
number_x = np.shape(data[0])[1]
n_items = len(data)
if start_time is None:
start_time = datetime.now()
if coordinate is None:
coordinate = ["LONG/LAT", 0, 0, 0]
if items is None:
items = [ItemInfo(f"temItem {i+1}") for i in range(n_items)]
if not all(np.shape(d)[0] == n_time_steps for d in data):
raise Warning(
"ERROR data matrices in the time dimension do not all match in the data list. "
"Data is list of matices [t, x]"
)
if not all(np.shape(d)[1] == number_x for d in data):
raise Warning(
"ERROR data matrices in the X dimension do not all match in the data list. "
"Data is list of matices [t, x]"
)
if len(items) != n_items:
raise Warning(
"names must be an array of strings with the same number as matrices in data list"
)
if not type(start_time) is datetime:
raise Warning("start_time must be of type datetime ")
system_start_time = System.DateTime(
start_time.year,
start_time.month,
start_time.day,
start_time.hour,
start_time.minute,
start_time.second,
)
# Create an empty dfs1 file object
factory = DfsFactory()
builder = Dfs1Builder.Create(title, "mikeio", 0)
# Set up the header
builder.SetDataType(0)
builder.SetGeographicalProjection(
factory.CreateProjectionGeoOrigin(
coordinate[0], coordinate[1], coordinate[2], coordinate[3]
)
)
builder.SetTemporalAxis(
factory.CreateTemporalEqCalendarAxis(
timeseries_unit, system_start_time, 0, dt
)
)
builder.SetSpatialAxis(
factory.CreateAxisEqD1(eumUnit.eumUmeter, number_x, x0, length_x)
)
for i in range(n_items):
builder.AddDynamicItem(
items[i].name,
eumQuantity.Create(items[i].type, items[i].unit),
DfsSimpleType.Float,
DataValueType.Instantaneous,
)
try:
builder.CreateFile(filename)
except IOError:
print("cannot create dfs2 file: ", filename)
dfs = builder.GetFile()
deletevalue = dfs.FileInfo.DeleteValueFloat # -1.0000000031710769e-30
for i in range(n_time_steps):
for item in range(n_items):
d = data[item][i, :]
d[ | np.isnan(d) | numpy.isnan |
import streamlit as st
from PIL import Image, ImageOps
import matplotlib.pyplot as plt
from utils1 import load_model, get_prediction, get_heatmaps
import numpy as np
from keras import backend as K
plt.rcParams['figure.figsize'] = [14, 10]
def main():
st.title("Histopathologic Cancer Detection")
st.header("Task: Binary classification")
st.write("Set up...")
model, funcs, session = load_model()
st.write("Done!!")
option = st.selectbox(
'',
['Choose demo data or your data','Use Demo data','Use your data'])
if option == 'Choose demo data or your data':
st.write('')
else:
if option == 'Use Demo data':
# some pics
image = Image.open('lymph_cancer.png')
st.image(image, caption='Original histopathological scan (label = Cancer)', use_column_width=True)
if model is not None:
st.write("")
st.write("Classifying ...")
K.set_session(session)
probas = get_prediction(image, model)
probas = list(probas)
st.write("Done!!")
if probas is not None:
st.write("")
st.write("Making heatmap...")
heatmap = get_heatmaps(image, model, funcs)
st.write("Done!!")
st.write("")
plt.title("Cancer: " + f" {probas[0]:.3f}",fontsize=20)
plt.axis('off')
plt.imshow(ImageOps.fit(image, (96,96), Image.ANTIALIAS), cmap='gray')
plt.imshow(heatmap, cmap='magma', alpha=min(0.5, probas[0]))
st.pyplot()
else:
uploaded_file = st.file_uploader("Choose data to input (only JPG, JPEG or PNG)")
if uploaded_file is not None:
# Upload image and confirm
image = Image.open(uploaded_file)
shape = | np.asarray(image) | numpy.asarray |
import numpy as np
from ..datasets import NumpyDatasetAdapter
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
class OneToNDatasetAdapter(NumpyDatasetAdapter):
r"""1-to-N Dataset Adapter.
Given a triples dataset X comprised of n triples in the form (s, p, o), this dataset adapter will
generate one-hot outputs for each (s, p) tuple to all entities o that are found in X.
E.g: X = [[a, p, b],
[a, p, d],
[c, p, d],
[c, p, e],
[c, p, f]]
Gives a one-hot vector mapping of entities to indices:
Entities: [a, b, c, d, e, f]
Indices: [0, 1, 2, 3, 4, 5]
One-hot outputs are produced for each (s, p) tuple to all valid object indices in the dataset:
# [a, b, c, d, e, f]
(a, p) : [0, 1, 0, 1, 0, 0]
The ```get_next_batch``` function yields the (s, p, o) triple and one-hot vector corresponding to the (s, p)
tuple.
If batches are generated with ```unique_pairs=True``` then only one instance of each unique (s, p) tuple
is returned:
(a, p) : [0, 1, 0, 1, 0, 0]
(c, p) : [0, 0, 0, 1, 1, 1]
Otherwise batch outputs are generated in dataset order (required for evaluating test set, but gives a higher
weight to more frequent (s, p) pairs if used during model training):
(a, p) : [0, 1, 0, 1, 0, 0]
(a, p) : [0, 1, 0, 1, 0, 0]
(c, p) : [0, 0, 0, 1, 1, 1]
(c, p) : [0, 0, 0, 1, 1, 1]
(c, p) : [0, 0, 0, 1, 1, 1]
"""
def __init__(self, low_memory=False):
"""Initialize the class variables
Parameters
----------
low_memory : bool
If low_memory flag set to True the output vectors indices are generated on-the-fly in the batch yield
function, which lowers memory usage but increases training time.
"""
super(OneToNDatasetAdapter, self).__init__()
self.filter_mapping = None
self.filtered_status = {}
self.paired_status = {}
self.output_mapping = None
self.output_onehot = {}
self.low_memory = low_memory
def set_filter(self, filter_triples, mapped_status=False):
""" Set the filter to be used while generating batch outputs.
Parameters
----------
filter_triples : nd-array
Triples to be used as a filter.
mapped_status : bool
Bool indicating if filter has already been mapped to internal indices.
"""
self.set_data(filter_triples, 'filter', mapped_status)
self.filter_mapping = self.generate_output_mapping('filter')
def generate_outputs(self, dataset_type='train', use_filter=False, unique_pairs=True):
"""Generate one-hot outputs for a dataset.
Parameters
----------
dataset_type : string
Indicates which dataset to generate outputs for.
use_filter : bool
Bool indicating whether to generate outputs using the filter set by `set_filter()`. Default: False
unique_pairs : bool
Bool indicating whether to generate outputs according to unique pairs of (subject, predicate), otherwise
will generate outputs in same row-order as the triples in the specified dataset. Default: True.
"""
if dataset_type not in self.dataset.keys():
msg = 'Unable to generate outputs: dataset `{}` not found. ' \
'Use `set_data` to set dataset in adapter first.'.format(dataset_type)
raise KeyError(msg)
if dataset_type in ['valid', 'test']:
if unique_pairs:
# This is just a friendly warning - in most cases the test and valid sets should NOT be unique_pairs.
msg = 'Generating outputs for dataset `{}` with unique_pairs=True. ' \
'Are you sure this is desired behaviour?'.format(dataset_type)
logger.warning(msg)
if use_filter:
if self.filter_mapping is None:
msg = 'Filter not found: cannot generate one-hot outputs with `use_filter=True` ' \
'if a filter has not been set.'
raise ValueError(msg)
else:
output_dict = self.filter_mapping
else:
if self.output_mapping is None:
msg = 'Output mapping was not created before generating one-hot vectors. '
raise ValueError(msg)
else:
output_dict = self.output_mapping
if self.low_memory:
# With low_memory=True the output indices are generated on the fly in the batch yield function
pass
else:
if unique_pairs:
X = np.unique(self.dataset[dataset_type][:, [0, 1]], axis=0).astype(np.int32)
else:
X = self.dataset[dataset_type]
# Initialize np.array of shape [len(X), num_entities]
self.output_onehot[dataset_type] = np.zeros((len(X), len(self.ent_to_idx)), dtype=np.int8)
# Set one-hot indices using output_dict
for i, x in enumerate(X):
indices = output_dict.get((x[0], x[1]), [])
self.output_onehot[dataset_type][i, indices] = 1
# Set flags indicating filter and unique pair status of outputs for given dataset.
self.filtered_status[dataset_type] = use_filter
self.paired_status[dataset_type] = unique_pairs
def generate_output_mapping(self, dataset_type='train'):
""" Creates dictionary keyed on (subject, predicate) to list of objects
Parameters
----------
dataset_type : string
Indicates which dataset to generate output mapping from.
Returns
-------
dict
"""
# if data is not already mapped, then map before creating output map
if not self.mapped_status[dataset_type]:
self.map_data()
output_mapping = dict()
for s, p, o in self.dataset[dataset_type]:
output_mapping.setdefault((s, p), []).append(o)
return output_mapping
def set_output_mapping(self, output_dict, clear_outputs=True):
""" Set the mapping used to generate one-hot outputs vectors.
Setting a new output mapping will clear_outputs any previously generated outputs, as otherwise
can lead to a situation where old outputs are returned from batch function.
Parameters
----------
output_dict : dict
(subject, predicate) to object indices
clear_outputs: bool
Clears any one hot outputs held by the adapter, as otherwise can lead to a situation where onehot
outputs generated by a different mapping are returned from the batch function. Default: True.
"""
self.output_mapping = output_dict
# Clear any onehot outputs previously generated
if clear_outputs:
self.clear_outputs()
def clear_outputs(self, dataset_type=None):
""" Clears generated one-hot outputs currently held by the adapter.
Parameters
----------
dataset_type: string
indicates which dataset to clear_outputs. Default: None (clears all).
"""
if dataset_type is None:
self.output_onehot = {}
self.filtered_status = {}
self.paired_status = {}
else:
del self.output_onehot[dataset_type]
del self.filtered_status[dataset_type]
del self.paired_status[dataset_type]
def verify_outputs(self, dataset_type, use_filter, unique_pairs):
"""Verifies if one-hot outputs currently held in adapter correspond to the use_filter and unique_pairs
options.
Parameters
----------
dataset_type: string
indicates which dataset to use
use_filter : bool
Flag to indicate whether the one-hot outputs are generated from filtered or unfiltered datasets
unique_pairs : bool
Flag to indicate whether the one-hot outputs are generated by unique (s, p) pairs or in dataset order.
Returns
-------
bool
If False then outputs must be re-generated for the specified dataset and parameters.
"""
if dataset_type not in self.output_onehot.keys():
# One-hot outputs have not been generated for this dataset_type
return False
if dataset_type not in self.filtered_status.keys():
# This shouldn't happen.
logger.debug('Dataset {} is in adapter, but filtered_status is not set.'.format(dataset_type))
return False
if dataset_type not in self.paired_status.keys():
logger.debug('Dataset {} is in adapter, but paired_status is not set.'.format(dataset_type))
return False
if use_filter != self.filtered_status[dataset_type]:
return False
if unique_pairs != self.paired_status[dataset_type]:
return False
return True
def get_next_batch(self, batches_count=-1, dataset_type='train', use_filter=False, unique_pairs=True):
"""Generator that returns the next batch of data.
Parameters
----------
batches_count: int
number of batches per epoch (default: -1, i.e. uses batch_size of 1)
dataset_type: string
indicates which dataset to use
use_filter : bool
Flag to indicate whether the one-hot outputs are generated from filtered or unfiltered datasets
unique_pairs : bool
Flag to indicate whether the one-hot outputs are generated by unique (s, p) pairs or in dataset order.
Returns
-------
batch_output : nd-array, shape=[batch_size, 3]
A batch of triples from the dataset type specified. If unique_pairs=True, then the object column
will be set to zeros.
batch_onehot : nd-array
A batch of onehot arrays corresponding to `batch_output` triples
"""
# if data is not already mapped, then map before returning the batch
if not self.mapped_status[dataset_type]:
self.map_data()
if unique_pairs:
X = np.unique(self.dataset[dataset_type][:, [0, 1]], axis=0).astype(np.int32)
X = np.c_[X, np.zeros(len(X))] # Append dummy object columns
else:
X = self.dataset[dataset_type]
dataset_size = len(X)
if batches_count == -1:
batch_size = 1
batches_count = dataset_size
else:
batch_size = int( | np.ceil(dataset_size / batches_count) | numpy.ceil |
## @package optimizer_test_util
# Module caffe2.python.optimizer_test_util
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
import numpy as np
from caffe2.python import brew, core, workspace, cnn, optimizer
from caffe2.proto import caffe2_pb2
from caffe2.python.modeling.initializers import (
Initializer, PseudoFP16Initializer)
from caffe2.python.model_helper import ModelHelper
class OptimizerTestBase(object):
"""
This is an abstract base class.
Don't inherit from unittest.TestCase, and don't name it 'Test*'.
Do, however, do these things in classes which inherit from this.
"""
def _createDense(self, dtype=core.DataType.FLOAT):
perfect_model = np.array([2, 6, 5, 0, 1]).astype(np.float32)
np.random.seed(123) # make test deterministic
numpy_dtype = np.float32 if dtype == core.DataType.FLOAT else np.float16
initializer = Initializer if dtype == core.DataType.FLOAT else \
PseudoFP16Initializer
data = np.random.randint(
2,
size=(20, perfect_model.size)).astype(numpy_dtype)
label = np.dot(data, perfect_model)[:, np.newaxis]
model = ModelHelper(name="test", arg_scope={'order': 'NCHW'})
out = brew.fc(
model,
'data', 'fc', perfect_model.size, 1, ('ConstantFill', {}),
('ConstantFill', {}), axis=0,
WeightInitializer=initializer, BiasInitializer=initializer
)
if dtype == core.DataType.FLOAT16:
out = model.HalfToFloat(out, out + "_fp32")
sq = model.SquaredL2Distance([out, 'label'])
loss = model.AveragedLoss(sq, "avg_loss")
grad_map = model.AddGradientOperators([loss])
self.assertIsInstance(grad_map['fc_w'], core.BlobReference)
return (model, perfect_model, data, label)
def testDense(self):
model, perfect_model, data, label = self._createDense()
optimizer = self.build_optimizer(model)
workspace.FeedBlob('data', data[0])
workspace.FeedBlob('label', label[0])
workspace.RunNetOnce(model.param_init_net)
workspace.CreateNet(model.net, True)
for _ in range(2000):
idx = np.random.randint(data.shape[0])
workspace.FeedBlob('data', data[idx])
workspace.FeedBlob('label', label[idx])
workspace.RunNet(model.net.Proto().name)
np.testing.assert_allclose(
perfect_model[np.newaxis, :],
workspace.FetchBlob('fc_w'),
atol=1e-2
)
self.check_optimizer(optimizer)
@unittest.skipIf(not workspace.has_gpu_support, "No gpu support")
def testGPUDense(self, dtype=core.DataType.FLOAT):
device_opt = core.DeviceOption(caffe2_pb2.CUDA, 0)
with core.DeviceScope(device_opt):
model, _perfect_model, data, label = self._createDense(dtype)
if dtype == core.DataType.FLOAT16:
fc_fp32_for_host = model.HalfToFloat('fc', 'fc_fp32_for_host')
model.CopyGPUToCPU(fc_fp32_for_host, 'fc_cpu')
else:
model.CopyGPUToCPU('fc', 'fc_cpu')
workspace.FeedBlob('data', data[0])
workspace.FeedBlob('label', label[0])
# Add some CPU ops
brew.fc(model, 'fc_cpu', 'fc2', dim_in=1, dim_out=10, axis=0)
# Create optimizer in default device scope
self.build_optimizer(model)
if self._skip_gpu:
return
# Run net to see it does not crash
workspace.RunNetOnce(model.param_init_net)
workspace.CreateNet(model.net, True)
workspace.RunNet(model.net.Proto().name)
def testSparse(self):
# to test duplicated indices we assign two indices to each weight and
# thus each weight might count once or twice
DUPLICATION = 2
perfect_model = np.array([2, 6, 5, 0, 1]).astype(np.float32)
np.random.seed(123) # make test deterministic
data = np.random.randint(
2,
size=(20, perfect_model.size * DUPLICATION)).astype(np.float32)
label = np.dot(data, np.repeat(perfect_model, DUPLICATION))
model = cnn.CNNModelHelper("NCHW", name="test")
# imitate what model wrapper does
w = model.param_init_net.ConstantFill(
[], 'w', shape=[perfect_model.size], value=0.0)
model.params.append(w)
picked = model.net.Gather([w, 'indices'], 'gather')
out = model.ReduceFrontSum(picked, 'sum')
sq = model.SquaredL2Distance([out, 'label'])
loss = model.AveragedLoss(sq, "avg_loss")
grad_map = model.AddGradientOperators([loss])
self.assertIsInstance(grad_map['w'], core.GradientSlice)
optimizer = self.build_optimizer(model)
workspace.CreateBlob('indices')
workspace.CreateBlob('label')
for indices_type in [np.int32, np.int64]:
workspace.RunNetOnce(model.param_init_net)
workspace.CreateNet(model.net, True)
for _ in range(2000):
idx = np.random.randint(data.shape[0])
# transform into indices of binary features
indices = np.repeat(np.arange(perfect_model.size),
DUPLICATION)[data[idx] == 1]
if indices.size == 0:
continue
workspace.FeedBlob(
'indices',
indices.reshape((indices.size,)).astype(indices_type)
)
workspace.FeedBlob('label',
| np.array(label[idx]) | numpy.array |
from __future__ import annotations
from collections import deque
from itertools import count
from typing import get_type_hints, Any, Callable, Iterable, Iterator, Optional, Sequence, Type, TypeVar, Union
from numpy.typing import ArrayLike
import numpy as np
class Momentum:
"""
Provides an unbiased momentum i.e. an exponential moving average.
Since it provides averages of the input values provided, no learning rates are incorporated.
This keeps the momentum and learning rate decoupled and let's momentum be applied to other things.
The weights are updated every iteration as well, meaning that the weight
may be modified between updates without affecting previous estimates.
"""
rate: float
rate_minus_1: float
momentum: ArrayLike
weights: float
value: ArrayLike
last: ArrayLike
def __init__(self: Momentum, *args: float, **kwargs: float) -> None:
"""
Usage
-----
Momentum(0.9)
Momentum(rate=0.9)
Momentum(rate_minus_1=-0.1)
"""
self.rate_minus_1 = args[0]-1 if args else kwargs.get("rate_minus_1", kwargs.get("rate", 0.999) - 1)
self.momentum = 0.0
self.weights = 0.0
def __call__(self: Momentum, value: ArrayLike) -> ArrayLike:
"""
Usage
-----
avg_value = momentum(value) # update and return
avg_value == momentum.value # retrieve the value later
"""
self.last = value
self.momentum += self.rate_minus_1 * (self.momentum - value)
self.weights += self.rate_minus_1 * (self.weights - 1)
return self.value
def clear(self: Momentum) -> None:
self.momentum = 0.0
self.weights = 0.0
@property
def rate(self: Momentum) -> float:
return self.rate_minus_1 + 1
@rate.setter
def rate(self: Momentum, rate: float) -> None:
self.rate_minus_1 = rate - 1
@property
def value(self: Momentum) -> ArrayLike:
return self.momentum / self.weights
class GeometricMomentum:
"""
Similar to normal momentum but uses a logarithmic rescaling.
Requires inputs to be non-negative.
"""
rate: float
rate_minus_1: float
eps: float
momentum: ArrayLike
weights: float
value: ArrayLike
last: ArrayLike
def __init__(self: Momentum, *args: float, eps: float = 1e-7, **kwargs: float) -> None:
"""
Usage
-----
Momentum(0.9)
Momentum(rate=0.9)
Momentum(rate_minus_1=-0.1)
"""
self.rate_minus_1 = args[0]-1 if args else kwargs.get("rate_minus_1", kwargs.get("rate", 0.999) - 1)
self.eps = eps
self.momentum = 0.0
self.weights = 0.0
def __call__(self: Momentum, value: ArrayLike) -> np.ndarray:
"""
Usage
-----
avg_value = momentum(value) # update and return
avg_value == momentum.value # retrieve the value later
"""
self.last = value
self.momentum += self.rate_minus_1 * (self.momentum - np.log(value + self.eps))
self.weights += self.rate_minus_1 * (self.weights - 1)
return self.value
def clear(self: Momentum) -> None:
self.momentum = 0.0
self.weights = 0.0
@property
def rate(self: Momentum) -> float:
return self.rate_minus_1 + 1
@rate.setter
def rate(self: Momentum, rate: float) -> None:
self.rate_minus_1 = rate - 1
@property
def value(self: Momentum) -> ArrayLike:
return np.exp(self.momentum / self.weights) - self.eps
def random_array(
low: ArrayLike = 0,
high: ArrayLike = 1,
size: Union[int, Sequence[int]] = (),
dtype: Union[Type[float], Type[int]] = float,
) -> np.ndarray:
"""
Produces a random array of the specified shape within the provided bounds of the specific dtype.
Parameters
----------
low : ArrayLike = 0
The lowest possible value, inclusive.
high : ArrayLike = 1
The highest possible value, exclusive.
size
: Sequence[int] = ()
Creates an ndarray of the given shape.
: int
Creates a 1-D array of the given length.
dtype : type
= float, default
Create a random float value between the low/high values.
low, high : ArrayLike[float]
= int
Create a random integer value between the low/high values.
low, high : int
Raises ValueError if np.any(low > high).
Returns
-------
x : np.ndarray[dtype]
A random numpy array.
"""
if dtype is float:
return np.random.uniform(low, high, size)
elif dtype is int:
return | np.random.random_integers(low, high, size) | numpy.random.random_integers |
import numpy as np
from ._nlmeans import _pixelwise_nlmeans_3d
from .filter_ import Filter
class NLMeansFilter(Filter):
"""
Non-Local Means (Buades2011).
<NAME>., <NAME>., & <NAME>. (2011). Non-Local Means Denoising.
Image Processing On Line, 1, 208–212.
https://doi.org/10.5201/ipol.2011.bcm_nlm
Parameters
----------
dims : tuple of str
The dataset dimensions along which to filter.
r : {int, sequence}
The radius
sigma : float
The standard deviation of the noise present in the data.
h : float
f : int
"""
per_variable = False
def __init__(self, dims, r=1, sigma=1, h=1, f=1):
if isinstance(r, (int, float)):
r = [r] * len(dims)
self.dims = tuple(dims)
self.r = np.array(r, dtype=np.uint32)
self.f = np.array([f if _ > 0 else 0 for _ in self.r], dtype=np.uint32)
self.sigma = sigma
self.h = h
def _filter(self, arr, axes, output):
#
# Pad r and f to three dimensions.
#
pad_before = np.zeros(4 - arr.ndim, dtype=self.r.dtype)
pad_after = np.zeros(arr.ndim - len(self.r) - 1, dtype=self.r.dtype)
r = | np.concatenate([pad_before, self.r, pad_after]) | numpy.concatenate |
import numpy as np
import itertools
import sys
import argparse
import os
import random
import tqdm
import time
###############################################################################
def get_distance_matrix(dist_matrix_file):
tstart = time.time()
if not os.path.exists(dist_matrix_file):
sys.stderr.write("File '%s' do not exist\n"%(dist_matrix_file))
#end if
dist_matrix = np.loadtxt(fname=dist_matrix_file, delimiter=",", dtype=float)
#sys.stdout.write("get-distance-matrix: [total: %.2fs]\n"%(time.time()-tstart))
#sys.stdout.flush()
return dist_matrix
#end get_distance_matrix()
def get_color_matrix(color_matrix_file):
tstart= time.time()
if not os.path.exists(color_matrix_file):
sys.stderr.write("File '%s' do not exist\n"%(dist_matrix_file))
#end if
color_matrix = np.loadtxt(fname=color_matrix_file, delimiter=",", dtype=int)
#sys.stdout.write("get-color-matrix: [total: %.2fs]\n"%(time.time()-tstart))
#sys.stdout.flush()
return color_matrix
#end get_distance_matrix()
################################################################################
def local_search_v3_iter(A, C, R, F, S_in, cost_in):
cost = cost_in
S = np.sort(S_in)
iters = 0
for i in range(len(S)):
u = S[i]
S_d = S.copy()
for j in range(len(F)):
v = F[j]
if v in S_d:
continue;
iters += 1
S_d[i] = v
R_d = C[:, S_d].sum(axis=1)
temp_R = np.subtract(R, R_d)
if np.any(temp_R < 0):
continue;
temp_cost = np.sum(A[:, S_d].min(axis=1))
if temp_cost < cost:
cost = temp_cost
S[i] = v
#end if
#end for
#end for
return cost, S, iters
#end local_search_v3_iter()
def local_search_v3(A, C, R, seed):
np.random.seed(seed)
r0 = R[0]
r1 = R[1]
F0 = np.array(np.nonzero(C[0])[0])
F1 = np.array(np.nonzero(C[1])[0])
F = np.sort(np.concatenate([F0, F1]))
if (len(F0) < r0) or (len(F1) < r1):
cost = np.inf
solution = []
return cost, solution, 0
#end if
# initialise a random assignment
S0 = np.random.choice(F0, r0)
S1 = np.random.choice(F1, r1)
S = np.sort(np.concatenate([S0, S1]))
cost = np.sum(A[:, S].min(axis=1))
iters = 0
while(1):
cur_cost, cur_S, cur_iters = local_search_v3_iter(A, C, R, F, S, cost)
cur_R = C[:, cur_S].sum(axis=1)
iters += cur_iters
if cur_cost >= cost:
break;
else:
cost = cur_cost
S = cur_S
#end if
#end while
return cost, S, iters
#end local_search_v3()
###############################################################################
def run_exp1(output = sys.stdout):
dataset_list = ["heart-switzerland",\
"heart-va",\
"heart-hungarian",\
"heart-cleveland",\
"student-mat",\
"house-votes-84",\
"student-por",\
"student-per2",\
"autism",\
"hcv-egy-data",\
#"cmc"
]
k = 10
min_frac_list = np.array([0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0])
#min_frac_list = np.array([0.1, 0.2])
#seed_list = [ 94236883, 2611535, 34985942, 6378810, 15208894, 25557092,\
# 43871896, 15786068, 86513484, 118111772]
seed_list = [123456789]
for dataset in dataset_list:
#dataset = "heart-switzerland"
dist_file = "../dataset_c2/%s-distances-l1.csv"%(dataset)
color_file = "../dataset_c2/%s-colors.csv"%(dataset)
A = get_distance_matrix(dist_file)
C = get_color_matrix(color_file)
for seed in seed_list:
for min_frac in min_frac_list:
total_time = 0.0
cost = np.inf
r0_min = int(k*min_frac)
for r0 in range(r0_min, k+1):
r1 = int(k - r0)
R = np.array([r0, r1])
tstart = time.time()
cur_cost, cur_S, cur_iters = local_search_v3(A, C, R, seed)
total_time += time.time() - tstart
if cur_cost < cost:
cost = cur_cost
S = cur_S
#end if
#end for
S = np.sort(S)
R_d = C[:, S].sum(axis=1)
output.write("%s, %d, %.2f, %d, %d, %.2f, %.2f, %d\n"%\
(dataset, k, min_frac, R_d[0], R_d[1], cost,\
total_time, seed))
output.flush()
sys.stdout.write("%s, %d, %.2f, %d, %d, %.2f, %.2f, %d\n"%\
(dataset, k, min_frac, R_d[0], R_d[1], cost,\
total_time, seed))
sys.stdout.flush()
#print(cost, S, total_time)
#end for
#end for
#end run_exp1()
def run_exp2(output = sys.stdout):
dataset_list = ["cmc",\
"abalone",\
"mushroom",\
"nursery",\
"census-income"\
]
k = 10
min_frac_list = np.array([0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0])
#seed_list = [ 94236883, 2611535, 34985942, 6378810, 15208894, 25557092,\
# 43871896, 15786068, 86513484, 118111772]
seed_list = [123456789]
for dataset in dataset_list:
#dataset = "heart-switzerland"
dist_file = "../dataset_c2/%s-distances-l1.csv"%(dataset)
color_file = "../dataset_c2/%s-colors.csv"%(dataset)
A = get_distance_matrix(dist_file)
C = get_color_matrix(color_file)
for seed in seed_list:
for min_frac in min_frac_list:
total_time = 0.0
cost = np.inf
r0_min = int(k*min_frac)
for r0 in range(r0_min, k+1):
r1 = int(k - r0)
R = np.array([r0, r1])
tstart = time.time()
cur_cost, cur_S0, cur_S1, cur_iters = local_search_v3(A, C, R, seed)
total_time += time.time() - tstart
if cur_cost < cost:
cost = cur_cost
S0 = cur_S0
S1 = cur_S1
#end if
#end for
S = np.sort(np.concatenate([S0, S1]))
output.write("%s, %d, %.2f, %d, %d, %.2f, %.2f, %d\n"%\
(dataset, k, min_frac, len(S0), len(S1), cost,\
total_time, seed))
output.flush()
sys.stdout.write("%s, %d, %.2f, %d, %d, %.2f, %.2f, %d\n"%\
(dataset, k, min_frac, len(S0), len(S1), cost,\
total_time, seed))
sys.stdout.flush()
#print(cost, S0, S1, S, total_time)
#end for
#end for
#end run_exp1()
################################################################################
def local_search_v3_gen(A, C, R, seed):
np.random.seed(seed)
F = np.array([], dtype=int)
S = np.array([], dtype=int)
for i in range(len(R)):
F_i = np.array(np.nonzero(C[i])[0])
if len(F_i) < R[i]:
return np.inf, [], 0
S_i = np.random.choice(F_i, R[i])
F = np.concatenate([F, F_i])
S = | np.concatenate([S, S_i]) | numpy.concatenate |
import numpy as np
import pandas as pd
from sklearn.model_selection import StratifiedKFold
from bokeh.layouts import gridplot
from bokeh.plotting import figure
from bokeh.models import ColumnDataSource
from bokeh.models import Circle, HoverTool, TapTool, LabelSet
from tqdm import tqdm
from bokeh.plotting import output_notebook, show
from .BaseCrossVal import BaseCrossVal
from ..utils import binary_metrics
class kfold(BaseCrossVal):
""" Exhaustitive search over param_dict calculating binary metrics.
Parameters
----------
model : object
This object is assumed to store bootlist attributes in .model (e.g. modelPLS.model.x_scores_).
X : array-like, shape = [n_samples, n_features]
Predictor variables, where n_samples is the number of samples and n_features is the number of predictors.
Y : array-like, shape = [n_samples, 1]
Response variables, where n_samples is the number of samples.
param_dict : dict
List of attributes to calculate and return bootstrap confidence intervals.
folds: : a positive integer, (default 10)
The number of folds used in the computation.
bootnum : a positive integer, (default 100)
The number of bootstrap samples used in the computation for the plot.
Methods
-------
Run: Runs all necessary methods prior to plot.
Plot: Creates a R2/Q2 plot.
"""
def __init__(self, model, X, Y, param_dict, folds=10, bootnum=100):
super().__init__(model=model, X=X, Y=Y, param_dict=param_dict, folds=folds, bootnum=bootnum)
self.crossval_idx = StratifiedKFold(n_splits=folds)
def calc_ypred(self):
"""Calculates ypred full and ypred cv."""
self.ypred_full = []
self.ypred_cv = []
for params in self.param_list:
# Set hyper-parameters
params_i = params
model_i = self.model(**params_i)
# Full
model_i.train(self.X, self.Y)
ypred_full_i = model_i.test(self.X)
self.ypred_full.append(ypred_full_i)
# CV (for each fold)
ypred_cv_i = self._calc_cv_ypred(model_i, self.X, self.Y)
self.ypred_cv.append(ypred_cv_i)
def calc_stats(self):
"""Calculates binary statistics from ypred full and ypred cv."""
stats_list = []
for i in range(len(self.param_list)):
# Create dictionaries with binary_metrics
stats_full_i = binary_metrics(self.Y, self.ypred_full[i])
stats_cv_i = binary_metrics(self.Y, self.ypred_cv[i])
# Rename columns
stats_full_i = {k + "full": v for k, v in stats_full_i.items()}
stats_cv_i = {k + "cv": v for k, v in stats_cv_i.items()}
stats_cv_i["R²"] = stats_full_i.pop("R²full")
stats_cv_i["Q²"] = stats_cv_i.pop("R²cv")
# Combine and append
stats_combined = {**stats_full_i, **stats_cv_i}
stats_list.append(stats_combined)
self.table = self._format_table(stats_list) # Transpose, Add headers
return self.table
def run(self):
"""Runs all functions prior to plot."""
self.calc_ypred()
self.calc_stats()
if self.bootnum > 1:
self.calc_ypred_boot()
self.calc_stats_boot()
def calc_ypred_boot(self):
"""Calculates ypred full and ypred cv for each bootstrap resample."""
self.ytrue_boot = []
self.ypred_full_boot = []
self.ypred_cv_boot = []
for i in tqdm(range(self.bootnum), desc="Kfold"):
bootidx_i = np.random.choice(len(self.Y), len(self.Y))
newX = self.X[bootidx_i, :]
newY = self.Y[bootidx_i]
ypred_full_nboot_i = []
ypred_cv_nboot_i = []
for params in self.param_list:
# Set hyper-parameters
model_i = self.model(**params)
# Full
model_i.train(newX, newY)
ypred_full_i = model_i.test(newX)
ypred_full_nboot_i.append(ypred_full_i)
# cv
ypred_cv_i = self._calc_cv_ypred(model_i, newX, newY)
ypred_cv_nboot_i.append(ypred_cv_i)
self.ytrue_boot.append(newY)
self.ypred_full_boot.append(ypred_full_nboot_i)
self.ypred_cv_boot.append(ypred_cv_nboot_i)
def calc_stats_boot(self):
"""Calculates binary statistics from ypred full and ypred cv for each bootstrap resample."""
self.full_boot_metrics = []
self.cv_boot_metrics = []
for i in range(len(self.param_list)):
stats_full_i = []
stats_cv_i = []
for j in range(self.bootnum):
stats_full = binary_metrics(self.ytrue_boot[j], self.ypred_full_boot[j][i])
stats_full_i.append(stats_full)
stats_cv = binary_metrics(self.ytrue_boot[j], self.ypred_cv_boot[j][i])
stats_cv_i.append(stats_cv)
self.full_boot_metrics.append(stats_full_i)
self.cv_boot_metrics.append(stats_cv_i)
def _calc_cv_ypred(self, model_i, X, Y):
"""Method used to calculate ypred cv."""
ypred_cv_i = [None] * len(Y)
for train, test in self.crossval_idx.split(self.X, self.Y):
X_train = X[train, :]
Y_train = Y[train]
X_test = X[test, :]
model_i.train(X_train, Y_train)
ypred_cv_i_j = model_i.test(X_test)
# Return value to y_pred_cv in the correct position # Better way to do this
for (idx, val) in zip(test, ypred_cv_i_j):
ypred_cv_i[idx] = val.tolist()
return ypred_cv_i
def _format_table(self, stats_list):
"""Make stats pretty (pandas table -> proper names in columns)."""
table = pd.DataFrame(stats_list).T
param_list_string = []
for i in range(len(self.param_list)):
param_list_string.append(str(self.param_list[i]))
table.columns = param_list_string
return table
def plot(self, metric="r2q2"):
"""Create a full/cv plot using based on metric selected.
Parameters
----------
metric : string, (default "r2q2")
metric has to be either "r2q2", "auc", "acc", "f1score", "prec", "sens", or "spec".
"""
# Choose metric to plot
metric_title = np.array(["ACCURACY", "AUC", "F1-SCORE", "PRECISION", "R²", "SENSITIVITY", "SPECIFICITY"])
metric_list = np.array(["acc", "auc", "f1score", "prec", "r2q2", "sens", "spec"])
metric_idx = np.where(metric_list == metric)[0][0]
# get full, cv, and diff
full = self.table.iloc[2 * metric_idx + 1]
cv = self.table.iloc[2 * metric_idx]
diff = abs(full - cv)
full_text = self.table.iloc[2 * metric_idx + 1].name
cv_text = self.table.iloc[2 * metric_idx].name
diff_text = "DIFFERENCE " + "(" + full_text + " - " + cv_text + ")"
# round full, cv, and diff for hovertool
full_hover = []
cv_hover = []
diff_hover = []
for j in range(len(full)):
full_hover.append("%.2f" % round(full[j], 2))
cv_hover.append("%.2f" % round(cv[j], 2))
diff_hover.append("%.2f" % round(diff[j], 2))
# get key, values (as string) from param_dict (key -> title, values -> x axis values)
for k, v in self.param_dict.items():
key = k
values = v
values_string = [str(i) for i in values]
# store data in ColumnDataSource for Bokeh
data = dict(full=full, cv=cv, diff=diff, full_hover=full_hover, cv_hover=cv_hover, diff_hover=diff_hover, values_string=values_string)
source = ColumnDataSource(data=data)
fig1_yrange = (min(diff) - max(0.1 * (min(diff)), 0.1), max(diff) + max(0.1 * (max(diff)), 0.1))
fig1_xrange = (min(cv) - max(0.1 * (min(cv)), 0.1), max(cv) + max(0.1 * (max(cv)), 0.1))
fig1_title = diff_text + " vs " + cv_text
# Figure 1 (DIFFERENCE (R2 - Q2) vs. Q2)
fig1 = figure(x_axis_label=cv_text, y_axis_label=diff_text, title=fig1_title, tools="tap,pan,wheel_zoom,box_zoom,reset,save,lasso_select,box_select", y_range=fig1_yrange, x_range=fig1_xrange, plot_width=485, plot_height=405)
# Figure 1: Add a line
fig1_line = fig1.line(cv, diff, line_width=2, line_color="black", line_alpha=0.25)
# Figure 1: Add circles (interactive click)
fig1_circ = fig1.circle("cv", "diff", size=17, alpha=0.7, color="green", source=source)
fig1_circ.selection_glyph = Circle(fill_color="green", line_width=2, line_color="black", fill_alpha=0.6)
fig1_circ.nonselection_glyph.fill_color = "green"
fig1_circ.nonselection_glyph.fill_alpha = 0.4
fig1_circ.nonselection_glyph.line_color = "white"
fig1_text = fig1.text(x="cv", y="diff", text="values_string", source=source, text_font_size="10pt", text_color="white", x_offset=-3.5, y_offset=7)
fig1_text.nonselection_glyph.text_color = "white"
fig1_text.nonselection_glyph.text_alpha = 0.6
# Figure 1: Add hovertool
fig1.add_tools(HoverTool(renderers=[fig1_circ], tooltips=[(full_text, "@full_hover"), (cv_text, "@cv_hover"), ("Diff", "@diff_hover")]))
# Figure 1: Extra formating
fig1.axis.major_label_text_font_size = "8pt"
if metric is "r2q2" or metric is "auc":
fig1.title.text_font_size = "12pt"
fig1.xaxis.axis_label_text_font_size = "10pt"
fig1.yaxis.axis_label_text_font_size = "10pt"
else:
fig1.title.text_font_size = "10pt"
fig1.xaxis.axis_label_text_font_size = "9pt"
fig1.yaxis.axis_label_text_font_size = "9pt"
# Figure 2: full/cv
fig2_title = full_text + " & " + cv_text + " vs no. of components"
fig2 = figure(x_axis_label="components", y_axis_label="Value", title=fig2_title, plot_width=485, plot_height=405, x_range=pd.unique(values_string), y_range=(0, 1.1), tools="pan,wheel_zoom,box_zoom,reset,save,lasso_select,box_select")
# Figure 2: add confidence intervals if bootnum > 1
if self.bootnum > 1:
lower_ci_full = []
upper_ci_full = []
lower_ci_cv = []
upper_ci_cv = []
# Get all upper, lower 95% CI (full/cv) for each specific n_component and append
for m in range(len(self.full_boot_metrics)):
full_boot = []
cv_boot = []
for k in range(len(self.full_boot_metrics[0])):
full_boot.append(self.full_boot_metrics[m][k][metric_title[metric_idx]])
cv_boot.append(self.cv_boot_metrics[m][k][metric_title[metric_idx]])
# Calculated percentile 95% CI and append
full_bias = np.mean(full_boot) - full[m]
cv_bias = np.mean(cv_boot) - cv[m]
lower_ci_full.append(np.percentile(full_boot, 2.5) - full_bias)
upper_ci_full.append( | np.percentile(full_boot, 97.5) | numpy.percentile |
import sys
import warnings
import itertools
import platform
import pytest
from decimal import Decimal
import numpy as np
from numpy.core import umath
from numpy.random import rand, randint, randn
from numpy.testing import (
assert_, assert_equal, assert_raises, assert_raises_regex,
assert_array_equal, assert_almost_equal, assert_array_almost_equal,
assert_warns, HAS_REFCOUNT
)
class TestResize(object):
def test_copies(self):
A = np.array([[1, 2], [3, 4]])
Ar1 = np.array([[1, 2, 3, 4], [1, 2, 3, 4]])
assert_equal(np.resize(A, (2, 4)), Ar1)
Ar2 = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
assert_equal(np.resize(A, (4, 2)), Ar2)
Ar3 = np.array([[1, 2, 3], [4, 1, 2], [3, 4, 1], [2, 3, 4]])
assert_equal(np.resize(A, (4, 3)), Ar3)
def test_zeroresize(self):
A = np.array([[1, 2], [3, 4]])
Ar = np.resize(A, (0,))
assert_array_equal(Ar, np.array([]))
assert_equal(A.dtype, Ar.dtype)
Ar = np.resize(A, (0, 2))
assert_equal(Ar.shape, (0, 2))
Ar = np.resize(A, (2, 0))
assert_equal(Ar.shape, (2, 0))
def test_reshape_from_zero(self):
# See also gh-6740
A = np.zeros(0, dtype=[('a', np.float32)])
Ar = np.resize(A, (2, 1))
assert_array_equal(Ar, np.zeros((2, 1), Ar.dtype))
assert_equal(A.dtype, Ar.dtype)
class TestNonarrayArgs(object):
# check that non-array arguments to functions wrap them in arrays
def test_choose(self):
choices = [[0, 1, 2],
[3, 4, 5],
[5, 6, 7]]
tgt = [5, 1, 5]
a = [2, 0, 1]
out = np.choose(a, choices)
assert_equal(out, tgt)
def test_clip(self):
arr = [-1, 5, 2, 3, 10, -4, -9]
out = np.clip(arr, 2, 7)
tgt = [2, 5, 2, 3, 7, 2, 2]
assert_equal(out, tgt)
def test_compress(self):
arr = [[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9]]
tgt = [[5, 6, 7, 8, 9]]
out = np.compress([0, 1], arr, axis=0)
assert_equal(out, tgt)
def test_count_nonzero(self):
arr = [[0, 1, 7, 0, 0],
[3, 0, 0, 2, 19]]
tgt = np.array([2, 3])
out = np.count_nonzero(arr, axis=1)
assert_equal(out, tgt)
def test_cumproduct(self):
A = [[1, 2, 3], [4, 5, 6]]
assert_(np.all(np.cumproduct(A) == np.array([1, 2, 6, 24, 120, 720])))
def test_diagonal(self):
a = [[0, 1, 2, 3],
[4, 5, 6, 7],
[8, 9, 10, 11]]
out = np.diagonal(a)
tgt = [0, 5, 10]
assert_equal(out, tgt)
def test_mean(self):
A = [[1, 2, 3], [4, 5, 6]]
assert_(np.mean(A) == 3.5)
assert_(np.all(np.mean(A, 0) == np.array([2.5, 3.5, 4.5])))
assert_(np.all(np.mean(A, 1) == np.array([2., 5.])))
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', RuntimeWarning)
assert_(np.isnan(np.mean([])))
assert_(w[0].category is RuntimeWarning)
def test_ptp(self):
a = [3, 4, 5, 10, -3, -5, 6.0]
assert_equal(np.ptp(a, axis=0), 15.0)
def test_prod(self):
arr = [[1, 2, 3, 4],
[5, 6, 7, 9],
[10, 3, 4, 5]]
tgt = [24, 1890, 600]
assert_equal(np.prod(arr, axis=-1), tgt)
def test_ravel(self):
a = [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]
tgt = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
assert_equal(np.ravel(a), tgt)
def test_repeat(self):
a = [1, 2, 3]
tgt = [1, 1, 2, 2, 3, 3]
out = np.repeat(a, 2)
assert_equal(out, tgt)
def test_reshape(self):
arr = [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]
tgt = [[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]]
assert_equal(np.reshape(arr, (2, 6)), tgt)
def test_round(self):
arr = [1.56, 72.54, 6.35, 3.25]
tgt = [1.6, 72.5, 6.4, 3.2]
assert_equal(np.around(arr, decimals=1), tgt)
def test_searchsorted(self):
arr = [-8, -5, -1, 3, 6, 10]
out = np.searchsorted(arr, 0)
assert_equal(out, 3)
def test_size(self):
A = [[1, 2, 3], [4, 5, 6]]
assert_(np.size(A) == 6)
assert_(np.size(A, 0) == 2)
assert_(np.size(A, 1) == 3)
def test_squeeze(self):
A = [[[1, 1, 1], [2, 2, 2], [3, 3, 3]]]
assert_equal(np.squeeze(A).shape, (3, 3))
assert_equal(np.squeeze(np.zeros((1, 3, 1))).shape, (3,))
assert_equal(np.squeeze(np.zeros((1, 3, 1)), axis=0).shape, (3, 1))
assert_equal(np.squeeze(np.zeros((1, 3, 1)), axis=-1).shape, (1, 3))
assert_equal(np.squeeze(np.zeros((1, 3, 1)), axis=2).shape, (1, 3))
assert_equal(np.squeeze([np.zeros((3, 1))]).shape, (3,))
assert_equal(np.squeeze([np.zeros((3, 1))], axis=0).shape, (3, 1))
assert_equal(np.squeeze([np.zeros((3, 1))], axis=2).shape, (1, 3))
assert_equal(np.squeeze([np.zeros((3, 1))], axis=-1).shape, (1, 3))
def test_std(self):
A = [[1, 2, 3], [4, 5, 6]]
assert_almost_equal(np.std(A), 1.707825127659933)
assert_almost_equal(np.std(A, 0), np.array([1.5, 1.5, 1.5]))
assert_almost_equal(np.std(A, 1), np.array([0.81649658, 0.81649658]))
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', RuntimeWarning)
assert_(np.isnan(np.std([])))
assert_(w[0].category is RuntimeWarning)
def test_swapaxes(self):
tgt = [[[0, 4], [2, 6]], [[1, 5], [3, 7]]]
a = [[[0, 1], [2, 3]], [[4, 5], [6, 7]]]
out = np.swapaxes(a, 0, 2)
assert_equal(out, tgt)
def test_sum(self):
m = [[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]
tgt = [[6], [15], [24]]
out = np.sum(m, axis=1, keepdims=True)
assert_equal(tgt, out)
def test_take(self):
tgt = [2, 3, 5]
indices = [1, 2, 4]
a = [1, 2, 3, 4, 5]
out = np.take(a, indices)
assert_equal(out, tgt)
def test_trace(self):
c = [[1, 2], [3, 4], [5, 6]]
assert_equal(np.trace(c), 5)
def test_transpose(self):
arr = [[1, 2], [3, 4], [5, 6]]
tgt = [[1, 3, 5], [2, 4, 6]]
assert_equal(np.transpose(arr, (1, 0)), tgt)
def test_var(self):
A = [[1, 2, 3], [4, 5, 6]]
assert_almost_equal(np.var(A), 2.9166666666666665)
assert_almost_equal(np.var(A, 0), np.array([2.25, 2.25, 2.25]))
assert_almost_equal( | np.var(A, 1) | numpy.var |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 25 15:21:35 2020
Main script used to run the machine learning analysis.
First coastline properties have to be calculated (see 'calculate_....py')
This script then:
1) crunches the data from the BBCT cleanup tour (see folder datafiles)
2) Calculates a big regression table with all features: for every measurement,
hydrodynamic conditions in the neighborhood are calculated
3) The machine learning model is trained and tested using this regression table
4) Model properties are calculated, and the variogram is plotted
@author: kaandorp
"""
import numpy as np
import pandas as pd
import xarray as xr
import os
from datetime import datetime,timedelta
import matplotlib.pyplot as plt
# import cmocean.cm as cmo
import pickle
import math
import shapely.geometry
import cartopy.io.shapereader as shpreader
import cartopy.crs as ccrs
from shapely.ops import split
from shapely.geometry import (box, LineString, MultiLineString, MultiPoint,
Point, Polygon, MultiPolygon, shape)
import warnings
warnings.filterwarnings("ignore", category=RuntimeWarning)
warnings.filterwarnings("ignore", category=UserWarning)
colors_tableau = ['#006BA4', '#FF800E', '#ABABAB', '#595959',
'#5F9ED1', '#C85200', '#898989', '#A2C8EC', '#FFBC79', '#CFCFCF']
def find_closest_date(arr,date):
diff = np.array([arr[i]-date for i in range(len(arr))])
return np.argmin(np.abs(diff))
def calculate_variables(lon,lat,date,data,dist_select,time_lag,variable_name,tmp_X,tmp_Y,quantity='all',use_land_mask=False,land_mask=None):
i_date_closest = None
if 'time' in data.keys():
tmp_time = data['time']
i_date_closest = find_closest_date(tmp_time.data,date)
i_date_start = find_closest_date(tmp_time.data,date-timedelta(days=time_lag))
i_date_start = max(0,i_date_start)
i_date_closest = min(len(tmp_time.data),i_date_closest)
if i_date_start == 0:
print('warning: starting index 0')
if i_date_closest == len(tmp_time.data):
print('warning: date at the end of the data array')
dist_mat = np.sqrt(((tmp_X - lon)*1.11e2*np.cos(lat*(np.pi/180)))**2 + ((tmp_Y - lat)*1.11e2)**2)
def closest_point(dist_mat,use_land_mask,land_mask):
dist_mat_min = None
if use_land_mask==True:
dist_mat_min = np.min(dist_mat[~land_mask])
i_select = (dist_mat == dist_mat_min)
elif type(use_land_mask) == float:
dist_mat_min = np.min(dist_mat[land_mask < use_land_mask])
i_select = (dist_mat == dist_mat_min)
else:
i_select = (dist_mat == dist_mat.min())
return i_select,dist_mat_min
if dist_select == 0: #closest point
i_select,_ = closest_point(dist_mat,use_land_mask,land_mask)
assert(i_select.sum() == 1)
else: # look in a radius
if use_land_mask==True:
i_select = (dist_mat < dist_select) & ~land_mask
elif type(use_land_mask) == float:
i_select = (dist_mat < dist_select) & (land_mask < use_land_mask)
else:
i_select = (dist_mat < dist_select)
# fall back to closest distance if necessary
if i_select.sum() == 0:
i_select,dist_mat_min = closest_point(dist_mat,use_land_mask,land_mask)
print('no cells within %2.2f km, falling back on closest distance (%2.2f)' % (dist_select,dist_mat_min))
if quantity == 'mean':
fn_quantity = lambda x: np.nanmean(x)
elif quantity == 'max':
fn_quantity = lambda x: np.nanmax(x)
elif quantity == 'all':
fn_quantity = lambda x: (np.nanmean(x),np.nanmax(x),np.nanmin(x),np.nanstd(x))
elif quantity == 'sum':
fn_quantity = lambda x: np.nansum(x)
else:
raise RuntimeError('not implemented')
try:
if 'time' in data.keys():
if i_date_start != i_date_closest: #calculate quantity over a range of times (i.e. lag time)
if isinstance(variable_name,str): #scalar
result = fn_quantity(data[variable_name][i_date_start:i_date_closest+1].data[:,i_select])
elif isinstance(variable_name,list): #vector -> convert to magnitude (scalar)
magnitude = np.sqrt(data[variable_name[0]]**2 + data[variable_name[1]]**2)
result = fn_quantity(magnitude[i_date_start:i_date_closest+1].data[:,i_select])
else:
raise RuntimeError('not implemented')
else: #calculate quantity for a single time
if isinstance(variable_name,str):
result = fn_quantity(data[variable_name][i_date_closest].data[i_select])
elif isinstance(variable_name,list):
magnitude = np.sqrt(data[variable_name[0]]**2 + data[variable_name[1]]**2)
result = fn_quantity(magnitude[i_date_closest].data[:,i_select])
else:
raise RuntimeError('not implemented')
else: #the netcdf does not contain time, space only
if isinstance(variable_name,str):
result = fn_quantity(data[variable_name].data[i_select])
elif isinstance(variable_name,list):
magnitude = np.sqrt(data[variable_name[0]]**2 + data[variable_name[1]]**2)
result = fn_quantity(magnitude.data[:,i_select])
else:
raise RuntimeError('not implemented')
except:
result = np.nan
print('returning nan')
return result
def calculate_inproduct(lon,lat,date,data,dist_select,time_lag,variable_name,tmp_X,tmp_Y,quantity='all',PLOT=False,use_land_mask=False,land_mask=None):
tmp_time = data['time']
i_date_closest = find_closest_date(tmp_time.data,date)
i_date_start = find_closest_date(tmp_time.data,date-timedelta(days=time_lag))
i_date_start = max(0,i_date_start)
i_date_closest = min(len(tmp_time.data),i_date_closest)
dist_mat = np.sqrt(((tmp_X - lon)*1.11e2*np.cos(lat*(np.pi/180)))**2 + ((tmp_Y - lat)*1.11e2)**2)
def closest_point(dist_mat,use_land_mask,land_mask):
dist_mat_min = None
if use_land_mask==True:
dist_mat_min = np.min(dist_mat[~land_mask])
i_select = (dist_mat == dist_mat_min)
elif type(use_land_mask) == float:
dist_mat_min = np.min(dist_mat[land_mask < use_land_mask])
i_select = (dist_mat == dist_mat_min)
else:
i_select = (dist_mat == dist_mat.min())
return i_select,dist_mat_min
if dist_select == 0: #closest point
i_select,_ = closest_point(dist_mat,use_land_mask,land_mask)
assert(i_select.sum() == 1)
else: # look in a radius
if use_land_mask==True:
i_select = (dist_mat < dist_select) & ~land_mask
elif type(use_land_mask) == float:
i_select = (dist_mat < dist_select) & (land_mask < use_land_mask)
else:
i_select = (dist_mat < dist_select)
# fall back to closest distance if necessary
if i_select.sum() == 0:
i_select,dist_mat_min = closest_point(dist_mat,use_land_mask,land_mask)
print('no cells within %2.2f km, falling back on closest distance (%2.2f)' % (dist_select,dist_mat_min))
i_which_coast = np.where( np.isclose(lon_,data_coastal_orientations['lon']) )[0]
# i_which_coast = np.where(lon_ == beach_orientations['lon'])[0]
if i_which_coast.size != 0:
i_coastal_segment = i_which_coast[0]
else:
raise RuntimeError('coastal segment not found')
if quantity == 'mean':
fn_quantity = lambda x: np.nanmean(x)
elif quantity == 'max':
fn_quantity = lambda x: np.nanmax(x)
elif quantity == 'all':
fn_quantity = lambda x: (np.nanmean(x),np.nanmax(x),np.nanmin(x))
else:
raise RuntimeError('not implemented')
if i_date_start != i_date_closest: #calculate quantity over a range of times (i.e. lag time)
vec_u = data[variable_name[0]][i_date_start:i_date_closest+1].data[:,i_select]
vec_v = data[variable_name[1]][i_date_start:i_date_closest+1].data[:,i_select]
vec_ = np.array([vec_u[~np.isnan(vec_u)],vec_v[~np.isnan(vec_v)]]).T
normal_vec = data_coastal_orientations['normal_vec_cartopy'][:,i_coastal_segment]
dot_prod = np.array([np.dot(vec_[i,:], normal_vec) for i in range(len(vec_))])
else:
print('calculating in product for single time')
vec_u = data[variable_name[0]][i_date_closest].data[i_select]
vec_v = data[variable_name[1]][i_date_closest].data[i_select]
vec_ = np.array([vec_u[~np.isnan(vec_u)],vec_v[~np.isnan(vec_v)]]).T
normal_vec = data_coastal_orientations['normal_vec_cartopy'][:,i_coastal_segment]
dot_prod = np.array([np.dot(vec_[i,:], normal_vec) for i in range(len(vec_))])
if PLOT: #validation plots
lons_plot = tmp_X[0,:]
lats_plot = tmp_Y[:,0]
lons_spacing = lons_plot[1] - lons_plot[0]
lats_spacing = lats_plot[1] - lats_plot[0]
lons_mesh = np.append(lons_plot -.5*lons_spacing, lons_plot[-1]+.5*lons_spacing)
lats_mesh = np.append(lats_plot -.5*lats_spacing, lats_plot[-1]+.5*lats_spacing)
X_plot,Y_plot = np.meshgrid(lons_mesh,lats_mesh)
scale_ = 0.001
normal_vec_lon = np.array([lon,lon+scale_*(normal_vec[0]*(1.11e2 * np.cos(lat*(np.pi/180))))])
normal_vec_lat = np.array([lat,lat+scale_*(normal_vec[1]*1.11e2)])
#set 1
vec_u = data[variable_name[0]][i_date_start].data[i_select]
vec_v = data[variable_name[1]][i_date_start].data[i_select]
vec_ = np.array([vec_u,vec_v]).T
dot_prod_1 = np.array([np.dot(vec_[i,:], normal_vec) for i in range(len(vec_))])
dot_field = np.zeros(tmp_X.shape)
dot_field[i_select] = dot_prod_1
dot_field[dot_field==0] = np.nan
level_max = np.nanmax(np.abs(dot_field))
# levels = np.linpspace(-levels_max,levels_max,50)
fig = plt.figure(figsize=(7,5),dpi=120)
ax = plt.axes(projection=ccrs.PlateCarree())
cmesh = plt.pcolormesh(X_plot,Y_plot,dot_field,cmap=plt.cm.coolwarm,vmin=-level_max,vmax=level_max)
plt.quiver(tmp_X,tmp_Y,data[variable_name[0]][i_date_start],data[variable_name[1]][i_date_start],scale=300)
ax.plot(normal_vec_lon,normal_vec_lat,'g-')
ax.set_extent((3.2,6.8,51,54))
ax.coastlines(resolution='10m')
plt.colorbar(cmesh)
plt.title('In-product of wind normal to coastline\nRadius = %ikm' % dist_select)
#set 2
vec_u = data[variable_name[0]][i_date_closest].data[i_select]
vec_v = data[variable_name[1]][i_date_closest].data[i_select]
vec_ = np.array([vec_u,vec_v]).T
dot_prod_1 = np.array([np.dot(vec_[i,:], normal_vec) for i in range(len(vec_))])
dot_field = np.zeros(tmp_X.shape)
dot_field[i_select] = dot_prod_1
dot_field[dot_field==0] = np.nan
level_max = np.nanmax(np.abs(dot_field))
fig = plt.figure(figsize=(7,5),dpi=120)
ax = plt.axes(projection=ccrs.PlateCarree())
cmesh = plt.pcolormesh(X_plot,Y_plot,dot_field,cmap=plt.cm.coolwarm,vmin=-level_max,vmax=level_max)
plt.quiver(tmp_X,tmp_Y,data[variable_name[0]][i_date_closest],data[variable_name[1]][i_date_closest],scale=100)
ax.plot(normal_vec_lon,normal_vec_lat,'g-')
ax.set_extent((3.2,6.8,51,54))
ax.coastlines(resolution='10m')
plt.colorbar(cmesh)
if type(use_land_mask) == float:
fig = plt.figure(figsize=(7,5),dpi=120)
ax = plt.axes(projection=ccrs.PlateCarree())
plt.pcolormesh(X_plot,Y_plot,(land_mask < use_land_mask))
ax.plot(normal_vec_lon,normal_vec_lat,'g-')
ax.set_extent((3.2,6.8,51,54))
ax.coastlines(resolution='10m')
if dot_prod.size == 0:
result = np.nan
print('returning nan')
else:
result = fn_quantity(dot_prod)
return result
def calculate_tide_derivative(lon,lat,date,data,dist_select,time_lag,variable_name,tmp_X,tmp_Y,use_land_mask=False,land_mask=None):
"""
Calculate variable indicating in which part of the spring-neap cycle we are
"""
tmp_time = data['time']
i_date_closest = find_closest_date(tmp_time.data,date)
i_date_start = find_closest_date(tmp_time.data,date-timedelta(days=time_lag))
i_date_start = max(0,i_date_start)
i_date_closest = min(len(tmp_time.data),i_date_closest)
dist_mat = np.sqrt(((tmp_X - lon)*1.11e2*np.cos(lat*(np.pi/180)))**2 + ((tmp_Y - lat)*1.11e2)**2)
def closest_point(dist_mat,use_land_mask,land_mask):
dist_mat_min = None
if use_land_mask==True:
dist_mat_min = np.min(dist_mat[~land_mask])
i_select = (dist_mat == dist_mat_min)
elif type(use_land_mask) == float:
dist_mat_min = np.min(dist_mat[land_mask < use_land_mask])
i_select = (dist_mat == dist_mat_min)
else:
i_select = (dist_mat == dist_mat.min())
return i_select,dist_mat_min
if dist_select == 0: #closest point
i_select,_ = closest_point(dist_mat,use_land_mask,land_mask)
else:
raise RuntimeError('tide derivatives only defined for closest point')
tide_max_start = data_tides[variable_name][i_date_start:i_date_start+24].values[:,i_select].max()
tide_max_end = data_tides[variable_name][i_date_closest:i_date_closest+24].values[:,i_select].max()
dtide_dt = (tide_max_end - tide_max_start) / time_lag
return dtide_dt
def load_data_currents(year):
file_july = '/Users/kaandorp/Data/CMEMS/NWSHELF_MULTIYEAR_PHY_004_009/%4.4i07.nc'%year
file_aug = '/Users/kaandorp/Data/CMEMS/NWSHELF_MULTIYEAR_PHY_004_009/%4.4i08.nc'%year
data_currents = xr.load_dataset(file_july)
data_currents = xr.concat((data_currents,xr.load_dataset(file_aug)), 'time')
return data_currents.squeeze()
def give_element_within_bounds(geom_split, box_analyze, tol=1.01):
tol_lower = 1/tol
tol_upper = tol
c = 0
for geom_ in geom_split:
x_ = np.array(geom_.xy[0])
y_ = np.array(geom_.xy[1])
if (x_.min()>= tol_lower*np.array(box_analyze)[:,0].min()) and (y_.min()>= tol_lower*np.array(box_analyze)[:,1].min()) and (
x_.max()<= tol_upper*np.array(box_analyze)[:,0].max()) and (y_.max()<= tol_upper*np.array(box_analyze)[:,1].max()):
break
c+=1
return c
def find_normal_vector(lon_center,lat_center,ax,shpfilename,radius=5):
# dkm = 5
reader = shpreader.Reader(shpfilename)
coastlines = reader.records()
dlon = radius / (1.11e2 * np.cos(lat_center*(np.pi/180)))
dlat = radius / 1.11e2
box_analyze = [[lon_center-dlon, lat_center-dlat], [lon_center-dlon, lat_center+dlat],
[lon_center+dlon, lat_center+dlat], [lon_center+dlon, lat_center-dlat], [lon_center-dlon, lat_center-dlat]]
box_poly = shapely.geometry.Polygon(box_analyze)
coastline_lons = np.array([])
coastline_lats = np.array([])
normal_vec = np.array([])
any_intersect = False
c = 0
for coast_ in coastlines:
check_ = False
if radius > 10:
check_ = True
else:
if coast_.bounds[0] > -10 and coast_.bounds[1] > 30 and coast_.bounds[2] < 30 and coast_.bounds[3] < 60: #only use coastlines in the neighborhood
check_ = True
if check_:
if coast_.geometry.intersects(box_poly):
any_intersect = True
print('intersect at %i' %c)
box_linestring = LineString(box_analyze)
coastline_split = split(coast_.geometry,box_linestring)
index_in_box = give_element_within_bounds(coastline_split,box_analyze,tol=1.00)
coastline_in_box = coastline_split[index_in_box]
coastline_lons = np.array(coastline_in_box.xy[0])
coastline_lats = np.array(coastline_in_box.xy[1])
x_lons = coastline_lons * np.cos(lat_center*(np.pi/180)) #convert to meters, i.e. compress longitude
y_lats = coastline_lats
x_ = x_lons - x_lons.mean()
y_ = y_lats - y_lats.mean()
svd_ = np.linalg.svd(np.array([x_,y_]))
normal_vec = svd_[0][:,1]
break
c += 1
if not any_intersect:
print('no intersections found')
normal_vec = np.array([0,0])
if normal_vec[0] < 0: #all vectors point to the right, don't copy this for other domains than the Netherlands..
normal_vec = -normal_vec
ax.plot(*box_poly.exterior.xy,'k',transform=ccrs.PlateCarree())
ax.plot(coastline_lons,coastline_lats,'ro-')
scale_ = 0.001
normal_vec_lon = np.array([lon_center,lon_center+scale_*(normal_vec[0]*(1.11e2 * np.cos(lat_center*(np.pi/180))))])
normal_vec_lat = np.array([lat_center,lat_center+scale_*(normal_vec[1]*1.11e2)])
ax.plot(normal_vec_lon,normal_vec_lat,'g-')
return normal_vec
def normal_vector_2_points(lons,lats,ax):
dx = (lons[1]-lons[0])*1.11e2 * np.cos(lats.mean()*(np.pi/180))
dy = (lats[1]-lats[0])*1.11e2
n1 = np.array([-dy, dx]) / np.sqrt(dx**2+dy**2)
n2 = np.array([dy, -dx]) / np.sqrt(dx**2+dy**2)
if n1[0] < 0: #all vectors point to the right (onto the land), don't copy this for other domains than the Netherlands..
normal_vec = n2
else:
normal_vec = n1
lon_center = lons.mean()
lat_center = lats.mean()
scale_ = 0.001
normal_vec_lon = np.array([lon_center,lon_center+scale_*(normal_vec[0]*(1.11e2 * np.cos(lat_center*(np.pi/180))))])
normal_vec_lat = np.array([lat_center,lat_center+scale_*(normal_vec[1]*1.11e2)])
ax.plot(normal_vec_lon,normal_vec_lat,'c-')
return normal_vec
#%% get the locations, and corresponding beach orientations
data_BBCT = {}
startcols = [1, 7, 13, 19, 26, 38, 51]
for s in startcols[1:]:
if s <= 13:
usecols = [s+i for i in range(5)]
colnames = ["Datum", "Etappes", "kms", "Gewicht", "Opmerkingen"]
elif s == 19:
usecols = [s + i for i in [0, 1, 2, 3, 5]]
["Datum", "Etappes", "kms", "Gewicht", "Opmerkingen"]
elif s == 26: #2017
usecols = [s+i for i in range(5)]
colnames = ["Datum", "Etappes", "kms", "Gewicht", "Deelnemers"]
elif s == 38: #2018
usecols = [s + i for i in [0,1,3,4,5,7]]
colnames = ["Datum", "Etappes", "kms", "Gewicht", "Deelnemers", "Opmerkingen"]
elif s == 51:
usecols = [s + i for i in [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]]
colnames = ["Datum", "Etappes", "Startlocatie", "CoordinatenStart", "Eindlocatie", "CoordinatenEind", "Opmerkingen", "kms", "Gewicht", "Deelnemers"]
else:
raise RuntimeError('not implemented')
df = pd.read_excel('datafiles/BBCT_data_10062021.xlsx', header=1, skiprows=[0, 1, 2, 3, 4], usecols=usecols)
df.columns = colnames
df.Datum = pd.to_datetime(df.Datum, errors='coerce')
df = df.drop(df[np.isnat(df.Datum)].index)
yr = df.iloc[0,0].year
print(yr)
data_BBCT[yr] = df
GPSlocs = {}
for yr in data_BBCT:
df = data_BBCT[yr]
if 'CoordinatenStart' in df.columns:
print('Found coordinates data for %s' % yr)
for i, e in enumerate(df.Etappes):
if '.' in e:
e = e.split('. ')[1]
e = e.replace('–', '-')
CoordStart = df.CoordinatenStart[i]
lat1, lon1 = df.CoordinatenStart[i].split(',')
if ',' in df.CoordinatenEind[i]:
lat2, lon2 = df.CoordinatenEind[i].split(',')
else:
lat2, lon2 = lat1, lon1
lat = (float(lat1) + float(lat2)) /2
lon = (float(lon1) + float(lon2)) /2
GPSlocs[e] = (lat, lon)
# adding additional coordinates not in xls file
GPSlocs['Vrouwenpolder'] = (51.59, 3.57)
GPSlocs['Camperduin'] = (52.73, 4.64)
GPSlocs['Callantsoog'] = (52.84, 4.69)
GPSlocs['Julianadorp'] = (52.90, 4.71)
GPSlocs['Katwijk'] = (52.20, 4.39)
lon_unique = np.array([])
lat_unique = np.array([])
for i1,yr in enumerate(data_BBCT):
df = data_BBCT[yr]
lat = np.nan*np.zeros(len(df.Etappes))
lon = np.nan*np.zeros(len(df.Etappes))
exactlocmatch = np.zeros(len(df.Etappes), dtype=bool)
for i, e in enumerate(df.Etappes):
# split off stage number
e = e.split('. ')[-1]
# some replacing
e = e.replace('–', '-')
e = e.replace(' naar ', '-')
if e in GPSlocs:
lat[i] = GPSlocs[e][0]
lon[i] = GPSlocs[e][1]
exactlocmatch[i] = True
else:
# separate start and end
e = e.split('-')
# remove province
for j in range(len(e)):
e[j] = e[j].split('(')[0]
e[j] = e[j].replace(')', '')
# remove colons:
e[j] = e[j].split(':')[0]
# remove numbers:
for n in ['1', '2']:
e[j] = e[j].replace(n, '')
for v in GPSlocs.keys():
if e[0].strip() in v or (len(e) > 1 and e[1].strip() in v):
lat[i] = GPSlocs[v][0]
lon[i] = GPSlocs[v][1]
exactlocmatch[i] = False
if np.isnan(lon[i]):
print('Not found:', yr, e)
df['lat'] = lat
df['lon'] = lon
df['exactlocmatch'] = exactlocmatch
for lon_,lat_ in zip(df['lon'],df['lat']):
if ~np.isin(lon_,lon_unique) and ~np.isin(lat_,lat_unique):
lon_unique=np.append(lon_unique,lon_)
lat_unique=np.append(lat_unique,lat_)
i_sort = np.argsort(lon_unique)
lon_unique = lon_unique[i_sort]
lat_unique = lat_unique[i_sort]
#%% import the calculated coastline properties (see calculate_coast_mesh_properties.py and calculate_coastal_orientations.py)
data_coastal_length = xr.open_dataset('datafiles/netcdf_coastal_lengths.nc')
X_coast, Y_coast = np.meshgrid(data_coastal_length['lon'],data_coastal_length['lat'])
currents_landmask = np.loadtxt('datafiles/datafile_trueLandMask_297x_375y').astype(bool)
def plot_normal_vec(normal_vec,lon_,lat_,ax,scale=0.005,style='k-'):
normal_vec_lon = np.array([lon_,lon_+scale*(normal_vec[0]*(1.11e2 * np.cos(lat_*(np.pi/180))))])
normal_vec_lat = np.array([lat_,lat_+scale*(normal_vec[1]*1.11e2)])
ax.plot(normal_vec_lon,normal_vec_lat,style,zorder=1000)
fig = plt.figure(figsize=(10,5),dpi=120)
ax = plt.axes(projection=ccrs.PlateCarree())
ax.pcolormesh(X_coast,Y_coast,currents_landmask,alpha=.6)
ax.plot(lon_unique,lat_unique,'o',transform=ccrs.PlateCarree())
ax.set_extent((3.2,6.8,51,53.7))
ax.coastlines(resolution='10m')
data_coastal_orientations = xr.open_dataset('datafiles/netcdf_coastal_orientations.nc')
array_dot_mesh_coast = []
for i1,yr in enumerate(data_BBCT):
df = data_BBCT[yr]
for lon_,lat_ in zip(df['lon'],df['lat']):
i_which_coast = np.where( np.isclose(lon_,data_coastal_orientations['lon']) )[0]
if i_which_coast.size == 1:
i_coastal_segment = i_which_coast[0]
else:
raise RuntimeError('coastal segment not found, or multiple segments correspond to location')
n_mesh = data_coastal_orientations['normal_vec_mesh'][:,i_coastal_segment]
n_cartopy = data_coastal_orientations['normal_vec_cartopy'][:,i_coastal_segment]
plot_normal_vec(n_mesh,lon_,lat_,ax,style='k-')
plot_normal_vec(n_cartopy,lon_,lat_,ax,style='r-')
array_dot_mesh_coast.append(np.dot(n_mesh,n_cartopy))
array_dot_mesh_coast = np.array(array_dot_mesh_coast)
#%% Calculate the regression table which is used to select the machine learning features from
data_land_sea = xr.open_dataset('/Users/kaandorp/Data/ERA5/Wind_NorthSea_old/land_sea_mask.nc')
mask_land_sea = data_land_sea['lsm'].data[0,:,:]
data_beaching_f = xr.open_dataset('histograms/beaching_hist_11_f.nc')
data_beaching_p = xr.open_dataset('histograms/beaching_hist_11_p.nc')
data_beaching_r = xr.open_dataset('histograms/beaching_hist_11_r.nc')
X_beaching,Y_beaching = np.meshgrid(data_beaching_f['lon'].data,data_beaching_f['lat'].data)
data_popden = xr.open_dataset('datafiles/netcdf_popdensity.nc')
X_pop, Y_pop = np.meshgrid(data_popden['lon'],data_popden['lat'])
land_mask_pop = ~np.isnan(data_popden['popdensity'][0,:,:].data)
#create dummy xr dataset for the fishing density
data_fish_ = np.loadtxt('datafiles/datafile_fishingInputMatrices_297x_375y')
data_fish = xr.Dataset(
{"fishing_density": (("lat", "lon"), data_fish_ ),
"explanation": 'fishing density'},
coords={
"lat": np.arange(data_fish_.shape[0]),
"lon": np.arange(data_fish_.shape[1]),
},
)
distances_select = [0,20,50,100]
times_lag = [1, 3, 9, 30]
#variables with radii and lead times
vars_names = ['VHM0_mean','VHM0_max','mag_Stokes_mean','mag_Stokes_max','mag_wind_mean',
'mag_wind_max','in_Stokes_mean','in_Stokes_max','in_Stokes_min','in_wind_mean','in_wind_max','in_wind_min',
'in_currents_mean','in_currents_max','in_currents_min',
'beaching_f_tau25','beaching_p_tau25','beaching_r_tau25','beaching_f_tau75','beaching_p_tau75','beaching_r_tau75',
'beaching_f_tau150','beaching_p_tau150','beaching_r_tau150','sal_mean','sal_min']
#variables with lead times only
vars_names2 = ['tide_max','tide_std','tide_derivative','mag_tide_max','mag_tide_std','in_tide_mean','in_tide_max','in_tide_min']
#variables with radii only
distances_select2 = [0,20,50,100]
vars_names3 = ['pop_density','coastal_length','fish_density']
#'instantaneous' variables (conditions during the tour of 6 hours)
vars_names4 = ['tide_tour_max','tide_tour_min']
vars_calculate = []
for dist_ in distances_select:
for time_ in times_lag:
for var_ in vars_names:
vars_calculate.append('%s_%3.3i_%3.3i' % (var_,dist_,time_))
for time_ in times_lag:
for var_ in vars_names2:
vars_calculate.append('%s_%3.3i' % (var_,time_))
for dist_ in distances_select2:
for var_ in vars_names3:
vars_calculate.append('%s_%3.3i' % (var_,dist_))
vars_calculate.extend(vars_names4)
print('Calculating regression parameters...')
regression_table = pd.DataFrame(columns=['lon','lat','time','kg/m']+vars_calculate+['participants'])
c_table = 0
for i1,yr in enumerate(data_BBCT):
df = data_BBCT[yr]
data_waves = xr.load_dataset('/Users/kaandorp/Data/CMEMS/Waves_NorthSea/GLOBAL_REANALYSIS_WAV_001_032_%4i0101.nc' % yr)
data_wind = xr.load_dataset('/Users/kaandorp/Data/ERA5/Wind_NorthSea_old/wind_%4i_065--020-040-013.nc' % yr)
# data_temp = xr.load_dataset('/Users/kaandorp/Data/ERA5/Temp_NorthSea/temp_%4i_054-003-051-006.nc' % yr)
data_currents = load_data_currents(yr)
data_tides = xr.load_dataset('datafiles/tides_%4i.nc' % yr)
data_sal = xr.load_dataset('/Users/kaandorp/Data/CMEMS/NWSHELF_MULTIYEAR_PHY_004_009_salinity/%4i0701.nc' % yr)
data_sal = data_sal.squeeze('depth')
X_waves,Y_waves = np.meshgrid(data_waves['longitude'].data,data_waves['latitude'].data)
waves_land_mask = np.all( np.isnan( data_waves['VHM0'][:,:,:]), axis=0).data
X_wind,Y_wind = np.meshgrid(data_wind['longitude'].data,data_wind['latitude'].data)
# X_temp,Y_temp = np.meshgrid(data_temp['longitude'].data,data_temp['latitude'].data)
X_curr,Y_curr = np.meshgrid(data_currents['longitude'].data,data_currents['latitude'].data)
X_tides,Y_tides = np.meshgrid(data_tides['lon'].data,data_tides['lat'].data)
tides_land_mask = data_tides['mask_land'].values
X_sal, Y_sal = np.meshgrid(data_sal['longitude'].data,data_sal['latitude'].data)
sal_land_mask = np.all(np.isnan(data_sal['so'][:,:,:]),axis=0).values
# data_tides,X_tides,Y_tides,tides_land_mask = initialize_tides(yr)
# dist_select = 50 #km
# time_lag = 1 #days
for i2, (date_,lon_,lat_,kg_,km_) in enumerate(zip(df['Datum'],df['lon'],df['lat'],df['Gewicht'],df['kms'])):
if 'Deelnemers' in df.keys():
participants_ = df['Deelnemers'].iloc[i2]
else:
participants_ = np.nan
if date_.hour == 0:
date_ = date_ + timedelta(hours=10) #start at 10:00
calculated_variables = [lon_,lat_,date_,kg_/km_]
# variables with radii and lead times
for dist_select in distances_select:
for time_lag in times_lag:
# calculate variables: (np.nanmean(x),np.nanmax(x),np.nanmin(x),np.nanstd(x))
VHM0_mean, VHM0_max, _, _ = calculate_variables(lon_,lat_,date_,data_waves,
dist_select,time_lag,'VHM0',X_waves,Y_waves,quantity='all',
use_land_mask=True,land_mask=waves_land_mask)
mag_Stokes_mean, mag_Stokes_max, _, _ = calculate_variables(lon_,lat_,date_,data_waves,
dist_select,time_lag,['VSDX','VSDY'],X_waves,Y_waves,quantity='all',
use_land_mask=True,land_mask=waves_land_mask)
mag_wind_mean, mag_wind_max, _, _ = calculate_variables(lon_,lat_,date_,data_wind,
dist_select,time_lag,['u10','v10'],X_wind,Y_wind,quantity='all',use_land_mask=.5,land_mask=mask_land_sea)
mag_currents_mean, mag_currents_max, _, _ = calculate_variables(lon_,lat_,date_,data_currents,
dist_select,time_lag,['uo','vo'],X_curr,Y_curr,quantity='all',
use_land_mask=True,land_mask=currents_landmask)
in_Stokes_mean, in_Stokes_max, in_Stokes_min = calculate_inproduct(lon_,lat_,date_,data_waves,
dist_select,time_lag,['VSDX','VSDY'],X_waves,Y_waves,quantity='all',
use_land_mask=True,land_mask=waves_land_mask)
in_wind_mean, in_wind_max, in_wind_min = calculate_inproduct(lon_,lat_,date_,data_wind,
dist_select,time_lag,['u10','v10'],X_wind,Y_wind,quantity='all',use_land_mask=.5,land_mask=mask_land_sea)
in_currents_mean, in_currents_max, in_currents_min = calculate_inproduct(lon_,lat_,date_,data_currents,
dist_select,time_lag,['uo','vo'],X_curr,Y_curr,quantity='all',use_land_mask=True,land_mask=currents_landmask)
beaching_f_tau25 = calculate_variables(lon_,lat_,date_,data_beaching_f,
dist_select,time_lag,'beaching_tau25',X_beaching,Y_beaching,quantity='sum')
beaching_p_tau25 = calculate_variables(lon_,lat_,date_,data_beaching_p,
dist_select,time_lag,'beaching_tau25',X_beaching,Y_beaching,quantity='sum')
beaching_r_tau25 = calculate_variables(lon_,lat_,date_,data_beaching_r,
dist_select,time_lag,'beaching_tau25',X_beaching,Y_beaching,quantity='sum')
beaching_f_tau75 = calculate_variables(lon_,lat_,date_,data_beaching_f,
dist_select,time_lag,'beaching_tau75',X_beaching,Y_beaching,quantity='sum')
beaching_p_tau75 = calculate_variables(lon_,lat_,date_,data_beaching_p,
dist_select,time_lag,'beaching_tau75',X_beaching,Y_beaching,quantity='sum')
beaching_r_tau75 = calculate_variables(lon_,lat_,date_,data_beaching_r,
dist_select,time_lag,'beaching_tau75',X_beaching,Y_beaching,quantity='sum')
beaching_f_tau150 = calculate_variables(lon_,lat_,date_,data_beaching_f,
dist_select,time_lag,'beaching_tau150',X_beaching,Y_beaching,quantity='sum')
beaching_p_tau150 = calculate_variables(lon_,lat_,date_,data_beaching_p,
dist_select,time_lag,'beaching_tau150',X_beaching,Y_beaching,quantity='sum')
beaching_r_tau150 = calculate_variables(lon_,lat_,date_,data_beaching_r,
dist_select,time_lag,'beaching_tau150',X_beaching,Y_beaching,quantity='sum')
sal_mean, _, sal_min, _ = calculate_variables(lon_,lat_,date_,data_sal,
dist_select,time_lag,'so',X_sal,Y_sal,quantity='all',
use_land_mask=True,land_mask=sal_land_mask)
print(date_,in_wind_mean,in_wind_min,in_Stokes_mean,in_Stokes_min,dist_select,time_lag)
calculated_variables.extend([VHM0_mean,VHM0_max,mag_Stokes_mean,mag_Stokes_max,mag_wind_mean,
mag_wind_max,in_Stokes_mean,in_Stokes_max,in_Stokes_min,in_wind_mean,in_wind_max,in_wind_min,
in_currents_mean,in_currents_max,in_currents_min,
beaching_f_tau25,beaching_p_tau25,beaching_r_tau25,beaching_f_tau75,beaching_p_tau75,beaching_r_tau75,
beaching_f_tau150,beaching_p_tau150,beaching_r_tau150,sal_mean,sal_min])
# variables with lead times only (temp and tides)
print('Calculating tides')
for time_lag in times_lag:
# temp_mean, temp_max, _, _ = calculate_variables(lon_, lat_, date_, data_temp, 0, time_lag, 't2m', X_temp, Y_temp, quantity='all')
_, tide_max, _, tide_std = calculate_variables(lon_, lat_, date_, data_tides, 0, time_lag, 'tide', X_tides, Y_tides, quantity='all',use_land_mask=True,land_mask=tides_land_mask)
tide_derivative = calculate_tide_derivative(lon_, lat_, date_, data_tides, 0, time_lag, 'tide', X_tides, Y_tides, use_land_mask=True, land_mask=tides_land_mask)
_, mag_tide_max, _, mag_tide_std = calculate_variables(lon_,lat_,date_,data_tides,
0,time_lag,['tide_U','tide_V'],X_tides,Y_tides,quantity='all',
use_land_mask=True,land_mask=tides_land_mask)
in_tide_mean, in_tide_max, in_tide_min = calculate_inproduct(lon_,lat_,date_,data_tides,
0,time_lag,['tide_U','tide_V'],X_tides,Y_tides,quantity='all',
use_land_mask=True,land_mask=tides_land_mask)
calculated_variables.extend([tide_max,tide_std,tide_derivative,mag_tide_max,mag_tide_std,in_tide_mean,in_tide_max,in_tide_min])
# variables with radii only (MPW density, coastal lengths, fishing density)
print('Calculating population density, coastal lengths, fishing density')
for dist_select in distances_select2:
pop_density = calculate_variables(lon_, lat_, date_, data_popden, dist_select, 0, 'popdensity', X_pop, Y_pop, quantity='mean',use_land_mask=True,land_mask=~land_mask_pop)
coastal_length = calculate_variables(lon_, lat_, date_, data_coastal_length, dist_select, 0, 'coastline_length', X_coast, Y_coast, quantity='sum',use_land_mask=False)
fishing_density = calculate_variables(lon_, lat_, date_, data_fish, dist_select, 0, 'fishing_density', X_curr, Y_curr, quantity='mean',use_land_mask=True,land_mask=currents_landmask)
calculated_variables.extend([pop_density,coastal_length,fishing_density])
print('Calculating tides along tour')
# 'instantaneous' variables (tide during tour): nearest location, lead time of 0.25 day (6 hours)
_, tide_tour_max, tide_tour_min, _ = calculate_variables(lon_, lat_, date_+timedelta(hours=6), data_tides, 0, 0.25, 'tide', X_tides, Y_tides, quantity='all',use_land_mask=True,land_mask=tides_land_mask)
calculated_variables.extend([tide_tour_max,tide_tour_min])
calculated_variables.extend([participants_])
regression_table.loc[c_table] = calculated_variables
c_table += 1
print('Year %i done' % yr)
regression_table['dot_mesh_coast'] = array_dot_mesh_coast
filename_rt = 'regression_table_%3.3i_%3.3i_%4.4i%2.2i%2.2i.pickle' % (regression_table.shape[0],regression_table.shape[1],datetime.today().year,datetime.today().month,datetime.today().day)
outfile = open(os.path.join('./pickle_files/',filename_rt),'wb')
pickle.dump(regression_table,outfile)
outfile.close()
#%% Part 2: the machine learning analysis, where random forests are trained on the regression table.
# features are picked from the feature clusters using k-fold analysis
def find_features_containing(list_features,string_):
return np.array([True if string_ in feature_ else False for feature_ in list_features])
def find_features_not_containing(list_features,string_):
return np.array([False if string_ in feature_ else True for feature_ in list_features])
from sklearn.model_selection import KFold
from sklearn import linear_model
from scipy.stats import pearsonr
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from sklearn.ensemble import RandomForestRegressor
from sklearn.gaussian_process import GaussianProcessRegressor
from scipy.stats import spearmanr
from scipy.cluster import hierarchy
from sklearn.inspection import permutation_importance
from collections import defaultdict
def cluster_select_features_2(x,threshold=2.89,include_model_feat=True,separate_model_weather=False,exclude=np.array([2,3])):
if separate_model_weather:
i_nonmodel_features_ = np.where(find_features_not_containing(x.keys(),'beaching'))[0]
else:
i_nonmodel_features_ = np.where(find_features_not_containing(x.keys(),'xxxxxx'))[0]
i_nonmodel_features_ = np.setdiff1d(i_nonmodel_features_,exclude)
fig1,ax1 = plt.subplots(1,figsize=(20,15))
fig2,ax2 = plt.subplots(1,figsize=(20,15))
corr = spearmanr(x.iloc[:,i_nonmodel_features_]).correlation
corr_linkage = hierarchy.ward(corr)
dendro = hierarchy.dendrogram(
corr_linkage, labels=list(x.iloc[:,i_nonmodel_features_].keys()), ax=ax1, leaf_rotation=90, leaf_font_size=8,
)
dendro_idx = np.arange(0, len(dendro['ivl']))
ax2.imshow(corr[dendro['leaves'], :][:, dendro['leaves']])
ax2.set_xticks(dendro_idx)
ax2.set_yticks(dendro_idx)
ax2.set_xticklabels(dendro['ivl'], rotation='vertical',fontsize=8)
ax2.set_yticklabels(dendro['ivl'],fontsize=6)
fig1.tight_layout()
fig2.tight_layout()
cluster_ids = hierarchy.fcluster(corr_linkage, threshold, criterion='distance')
cluster_id_to_feature_ids = defaultdict(list)
for idx, cluster_id in enumerate(cluster_ids):
cluster_id_to_feature_ids[cluster_id].append(idx)
selected_features = [v[0] for v in cluster_id_to_feature_ids.values()]
selected_features_names = x.iloc[:,i_nonmodel_features_].keys()[selected_features]
clusters_out = []
for cluster_ in cluster_id_to_feature_ids.values():
clusters_out.append(list(i_nonmodel_features_[cluster_]))
if separate_model_weather:
if include_model_feat:
clusters_out.append(list(np.where(find_features_containing(x.keys(),'beaching_f'))[0]))
clusters_out.append(list(np.where(find_features_containing(x.keys(),'beaching_p'))[0]))
clusters_out.append(list(np.where(find_features_containing(x.keys(),'beaching_r'))[0]))
return np.array(selected_features),selected_features_names,clusters_out #cluster_id_to_feature_ids.values()
def normalize_beaching_variables(regression_table_):
'''
Normalize the beaching fluxes to the length of the coastline (-> concentrations)
'''
i_100 = np.where(find_features_containing(regression_table_.keys(),'beaching') & find_features_containing(regression_table_.keys(),'_100_'))[0]
i_50 = np.where(find_features_containing(regression_table_.keys(),'beaching') & find_features_containing(regression_table_.keys(),'_050_'))[0]
i_20 = np.where(find_features_containing(regression_table_.keys(),'beaching') & find_features_containing(regression_table_.keys(),'_020_'))[0]
for i_100_ in i_100:
regression_table_.iloc[:,i_100_] /= (regression_table_.loc[:,'coastal_length_100'].values/1000)
for i_50_ in i_50:
regression_table_.iloc[:,i_50_] /= (regression_table_.loc[:,'coastal_length_050'].values/1000)
for i_20_ in i_20:
regression_table_.iloc[:,i_20_] /= (regression_table_.loc[:,'coastal_length_020'].values/1000)
return regression_table_
def calculate_most_common_feature(best_feature_in_cluster):
"""
Look in each cluster which feature was picked to most often
"""
best_feature = []
for i1 in range(best_feature_in_cluster.shape[1]):
a,b, = np.unique(best_feature_in_cluster[:,i1],return_counts=True)
index_feat = a[np.argmax(b)]
best_feature.append(index_feat)
return np.array(best_feature,dtype=int)
def datetime64_to_datetime(datetime64):
if type(datetime64) == np.ndarray:
array_datetime = np.array([])
for dt64 in datetime64:
ts = (dt64 - np.datetime64('1970-01-01T00:00:00')) / np.timedelta64(1, 's')
array_datetime = np.append(array_datetime, datetime.utcfromtimestamp(ts))
return array_datetime
else:
ts = (datetime64 - np.datetime64('1970-01-01T00:00:00')) / np.timedelta64(1, 's')
return datetime.utcfromtimestamp(ts)
def impute_participant_numbers(regression_table_):
"""
For some years the participant numbers are missing, these are imputed here
"""
array_years = np.array([2014,2015,2016,2017,2018,2019])
array_participants = | np.array([1479,2015,2320,2748,2764,2568]) | numpy.array |
# -*- coding: utf-8 -*-
import numpy as np
import pymc3 as pm
from exoplanet.sampling import PyMC3Sampler
def test_tuning():
ndim = 5
np.random.seed(42)
sampler = PyMC3Sampler(start=10, window=50, finish=10)
with pm.Model() as model:
pm.Normal("x", sd=np.exp(np.random.uniform(-10, 0, ndim)), shape=ndim)
trace = sampler.tune(tune=200, progressbar=False)
start, step = sampler._get_start_and_step(start=None, step=None)
# Expected step size
expected = []
for chain in trace._straces.values():
expected.append(chain.get_sampler_stats("step_size")[-1])
expected = np.mean(expected)
assert | np.allclose(step.step_size, expected) | numpy.allclose |
from scipy.io.wavfile import read
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
plt.rcParams["font.family"] = "Times New Roman"
import pysptk
try:
from .peakdetect import peakdetect
from .GCI import SE_VQ_varF0, IAIF, get_vq_params
except:
from peakdetect import peakdetect
from GCI import SE_VQ_varF0, IAIF, get_vq_params
PATH=os.path.dirname(os.path.abspath(__file__))
sys.path.append('../')
from utils import dynamic2static, save_dict_kaldimat, get_dict
from scipy.integrate import cumtrapz
from tqdm import tqdm
import pandas as pd
import torch
from script_mananger import script_manager
class Glottal:
"""
Compute features based on the glottal source reconstruction from sustained vowels and continuous speech.
For continuous speech, the features are computed over voiced segments
Nine descriptors are computed:
1. Variability of time between consecutive glottal closure instants (GCI)
2. Average opening quotient (OQ) for consecutive glottal cycles-> rate of opening phase duration / duration of glottal cycle
3. Variability of opening quotient (OQ) for consecutive glottal cycles-> rate of opening phase duration /duration of glottal cycle
4. Average normalized amplitude quotient (NAQ) for consecutive glottal cycles-> ratio of the amplitude quotient and the duration of the glottal cycle
5. Variability of normalized amplitude quotient (NAQ) for consecutive glottal cycles-> ratio of the amplitude quotient and the duration of the glottal cycle
6. Average H1H2: Difference between the first two harmonics of the glottal flow signal
7. Variability H1H2: Difference between the first two harmonics of the glottal flow signal
8. Average of Harmonic richness factor (HRF): ratio of the sum of the harmonics amplitude and the amplitude of the fundamental frequency
9. Variability of HRF
Static or dynamic matrices can be computed:
Static matrix is formed with 36 features formed with (9 descriptors) x (4 functionals: mean, std, skewness, kurtosis)
Dynamic matrix is formed with the 9 descriptors computed for frames of 200 ms length with a time-shift of 50 ms.
Notes:
1. The fundamental frequency is computed using the RAPT algorithm.
>>> python glottal.py <file_or_folder_audio> <file_features> <dynamic_or_static> <plots (true, false)> <format (csv, txt, npy, kaldi, torch)>
Examples command line:
>>> python glottal.py "../audios/001_a1_PCGITA.wav" "glottalfeaturesAst.txt" "static" "true" "txt"
>>> python glottal.py "../audios/098_u1_PCGITA.wav" "glottalfeaturesUst.csv" "static" "true" "csv"
>>> python glottal.py "../audios/098_u1_PCGITA.wav" "glottalfeaturesUst.ark" "dynamic" "true" "kaldi"
>>> python glottal.py "../audios/098_u1_PCGITA.wav" "glottalfeaturesUst.pt" "dynamic" "true" "torch"
Examples directly in Python
>>> from disvoice.glottal import Glottal
>>> glottal=Glottal()
>>> file_audio="../audios/001_a1_PCGITA.wav"
>>> features=glottal.extract_features_file(file_audio, static, plots=True, fmt="numpy")
>>> features2=glottal.extract_features_file(file_audio, static, plots=True, fmt="dataframe")
>>> features3=glottal.extract_features_file(file_audio, dynamic, plots=True, fmt="torch")
>>> path_audios="../audios/"
>>> features1=glottal.extract_features_path(path_audios, static, plots=False, fmt="numpy")
>>> features2=glottal.extract_features_path(path_audios, static, plots=False, fmt="torch")
>>> features3=glottal.extract_features_path(path_audios, static, plots=False, fmt="dataframe")
"""
def __init__(self):
self.size_frame=0.2
self.size_step=0.05
self.head=["var GCI", "avg NAQ", "std NAQ", "avg QOQ", "std QOQ", "avg H1H2", "std H1H2", "avg HRF", "std HRF"]
def plot_glottal(self, data_audio,fs,GCI, glottal_flow, glottal_sig):
"""Plots of the glottal features
:param data_audio: speech signal.
:param fs: sampling frequency
:param GCI: glottal closure instants
:param glottal_flow: glottal flow
:param glottal_sig: reconstructed glottal signal
:returns: plots of the glottal features.
"""
fig, ax=plt.subplots(3, sharex=True)
t=np.arange(0, float(len(data_audio))/fs, 1.0/fs)
if len(t)>len(data_audio):
t=t[:len(data_audio)]
elif len(t)<len(data_audio):
data_audio=data_audio[:len(t)]
ax[0].plot(t, data_audio, 'k')
ax[0].set_ylabel('Amplitude', fontsize=12)
ax[0].set_xlim([0, t[-1]])
ax[0].grid(True)
ax[1].plot(t, glottal_sig, color='k', linewidth=2.0, label="Glottal flow signal")
amGCI=[glottal_sig[int(k-2)] for k in GCI]
GCI=GCI/fs
ax[1].plot(GCI, amGCI, 'bo', alpha=0.5, markersize=8, label="GCI")
GCId=np.diff(GCI)
ax[1].set_ylabel("Glottal flow", fontsize=12)
ax[1].text(t[2],-0.8, "Avg. time consecutive GCI:"+str(np.round(np.mean(GCId)*1000,2))+" ms")
ax[1].text(t[2],-1.05, "Std. time consecutive GCI:"+str(np.round(np.std(GCId)*1000,2))+" ms")
ax[1].set_xlabel('Time (s)', fontsize=12)
ax[1].set_xlim([0, t[-1]])
ax[1].set_ylim([-1.1, 1.1])
ax[1].grid(True)
ax[1].legend(ncol=2, loc=2)
ax[2].plot(t, glottal_flow, color='k', linewidth=2.0)
ax[2].set_ylabel("Glotal flow derivative", fontsize=12)
ax[2].set_xlabel('Time (s)', fontsize=12)
ax[2].set_xlim([0, t[-1]])
ax[2].grid(True)
plt.show()
def extract_glottal_signal(self, x, fs):
"""Extract the glottal flow and the glottal flow derivative signals
:param x: data from the speech signal.
:param fs: sampling frequency
:returns: glottal signal
:returns: derivative of the glottal signal
:returns: glottal closure instants
>>> from scipy.io.wavfile import read
>>> glottal=Glottal()
>>> file_audio="../audios/001_a1_PCGITA.wav"
>>> fs, data_audio=read(audio)
>>> glottal, g_iaif, GCIs=glottal.extract_glottal_signal(data_audio, fs)
"""
winlen=int(0.025*fs)
winshift=int(0.005*fs)
x=x-np.mean(x)
x=x/float(np.max(np.abs(x)))
GCIs=SE_VQ_varF0(x,fs)
g_iaif=np.zeros(len(x))
glottal=np.zeros(len(x))
if GCIs is None:
print("------------- warning -------------------, not enought voiced segments were found to compute GCI")
return glottal, g_iaif, GCIs
start=0
stop=int(start+winlen)
win = np.hanning(winlen)
while stop <= len(x):
x_frame=x[start:stop]
pGCIt=np.where((GCIs>start) & (GCIs<stop))[0]
GCIt=GCIs[pGCIt]-start
g_iaif_f=IAIF(x_frame,fs,GCIt)
glottal_f=cumtrapz(g_iaif_f, dx=1/fs)
glottal_f=np.hstack((glottal[start], glottal_f))
g_iaif[start:stop]=g_iaif[start:stop]+g_iaif_f*win
glottal[start:stop]=glottal[start:stop]+glottal_f*win
start=start+winshift
stop=start+winlen
g_iaif=g_iaif-np.mean(g_iaif)
g_iaif=g_iaif/max(abs(g_iaif))
glottal=glottal-np.mean(glottal)
glottal=glottal/max(abs(glottal))
glottal=glottal- | np.mean(glottal) | numpy.mean |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Implementation for Single Image Haze Removal Using Dark Channel Prior.
Reference:
http://research.microsoft.com/en-us/um/people/kahe/cvpr09/
http://research.microsoft.com/en-us/um/people/kahe/eccv10/
"""
import numpy as np
from PIL import Image
from guidedfilter import guided_filter
R, G, B = 0, 1, 2 # index for convenience
L = 256 # color depth
def get_dark_channel(I, w):
"""Get the dark channel prior in the (RGB) image data.
Parameters
-----------
I: an M * N * 3 numpy array containing data ([0, L-1]) in the image where
M is the height, N is the width, 3 represents R/G/B channels.
w: window size
Return
-----------
An M * N array for the dark channel prior ([0, L-1]).
"""
M, N, _ = I.shape
padded = np.pad(I, ((w / 2, w / 2), (w / 2, w / 2), (0, 0)), 'edge')
darkch = | np.zeros((M, N)) | numpy.zeros |
"""
File name: renderer
Author: <NAME>
Date created: 03.03.2019
Date last modified: 18:25 03.03.2019
Python Version: "3.6"
Copyright = "Copyright (C) 2018-2019 of Packt"
Credits = ["<NAME>, <NAME>"] # people who reported bug fixes, made suggestions, etc. but did not actually write the code
License = "MIT"
Version = "1.0.0"
Maintainer = "non"
Status = "Prototype" # "Prototype", "Development", or "Production"
"""
# ==============================================================================
# Imported Modules
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from random import random
import numpy as np
import scipy.linalg
from vispy import app, gloo
import OpenGL.GL as gl
app.use_app('PyGlet') # Set backend (try e.g. "PyQt5" otherwise)
# ==============================================================================
# Constant Definitions
# ==============================================================================
VERTEX_SHADER_BASIC = """
uniform mat4 u_mv;
uniform mat4 u_mvp;
uniform vec3 u_light_eye_position;
attribute vec3 a_position;
attribute vec3 a_color;
varying vec4 v_color;
varying vec3 v_eye_position;
varying vec3 v_light;
void main() {
// Projected position:
gl_Position = u_mvp * vec4(a_position, 1.0);
// Vertex color (varying):
v_color = vec4(a_color, 1.0);
// Vertex position in eye/camera coordinates:
v_eye_position = (u_mv * vec4(a_position, 1.0)).xyz;
// Vector to the light:
v_light = normalize(u_light_eye_position - v_eye_position);
}
"""
FRAGMENT_SHADER_COLOR = """
uniform float u_light_ambient;
varying vec4 v_color;
varying vec3 v_eye_position;
varying vec3 v_light;
void main() {
// Face normal in eye coordinates:
vec3 face_normal = normalize(cross(dFdx(v_eye_position), dFdy(v_eye_position)));
// Light received by the surface (ambient + diffuse):
float light_diffuse_w = max(dot(normalize(v_light), normalize(face_normal)), 0.0);
float light_w = u_light_ambient + light_diffuse_w;
light_w = clamp(light_w, 0.0, 1.0); // Clamp/clip brightness factor
gl_FragColor = light_w * v_color;
}
"""
FRAGMENT_SHADER_DEPTH = """
uniform float u_light_ambient;
varying vec4 v_color;
varying vec3 v_eye_position;
varying vec3 v_light;
void main() {
// Depth/distance between surface and eye:
float v_eye_depth = -v_eye_position.z;
gl_FragColor = vec4(v_eye_depth, 0.0, 0.0, 1.0);
}
"""
FRAGMENT_SHADER_NORMALS = """
uniform float u_light_ambient;
varying vec4 v_color;
varying vec3 v_eye_position;
varying vec3 v_light;
void main() {
// Face normal in eye coordinates:
vec3 face_normal = normalize(cross(dFdx(v_eye_position), dFdy(v_eye_position)));
gl_FragColor = vec4((face_normal + 1.) / 2., 1.0);
}
"""
# ==============================================================================
# Function Definitions
# ==============================================================================
def convert_hz_intrinsic_to_opengl_projection(K, x0, y0, width, height, znear, zfar, flipy=False):
"""
Convert camera parameter (Hartley-Zisserman intrinsic matrix) into a projection matrix for OpenGL.
Snippet by <NAME>
(https://gist.github.com/astraw/1341472/c5f8aba7f81431967d1fc9d954ae20822c616c17#file-calib_test_utils-py-L67)
:param K: Camera matrix
:param x0: Camera horizontal image origin (typically 0)
:param y0: Camera vertical image origin (typically 0)
:param width: Canvas width
:param height: Canvas height
:param znear: Clip-near value
:param zfar: Clip-far value
:param flipy: Flag to True if images should be rendered upside-down (to match other pixel coordinate systems)
:return: Camera projection matrix
"""
znear = float(znear)
zfar = float(zfar)
depth = zfar - znear
q = -(zfar + znear) / depth
qn = -2 * (zfar * znear) / depth
if not flipy:
proj = np.array([[2 * K[0, 0] / width, -2 * K[0, 1] / width, (-2 * K[0, 2] + width + 2 * x0) / width, 0],
[0, -2 * K[1, 1] / height, (-2 * K[1, 2] + height + 2 * y0) / height, 0],
[0, 0, q, qn], # This row is standard glPerspective and sets near and far planes.
[0, 0, -1, 0]]) # This row is also standard glPerspective.
else:
proj = np.array([[2 * K[0, 0] / width, -2 * K[0, 1] / width, (-2 * K[0, 2] + width + 2 * x0) / width, 0],
[0, 2 * K[1, 1] / height, (2 * K[1, 2] - height + 2 * y0) / height, 0],
[0, 0, q, qn], # This row is standard glPerspective and sets near and far planes.
[0, 0, -1, 0]]) # This row is also standard glPerspective.
return proj.T
def fibonacci_sphere(radius=20., samples=1, randomize=True):
""" Yields 3D cartesian coordinates of pseudo-equidistributed points on the surface of a sphere of given radius,
aligned on the origin, using Fibonacci Sphere algorithm.
Gist from Snord (http://stackoverflow.com/a/26127012/624547)
@yield 3D point
"""
rnd = 1.
if randomize:
rnd = random() * samples
offset = 2./samples
increment = np.pi * (3. - np.sqrt(5.))
for i in range(samples):
y = ((i * offset) - 1) + (offset / 2)
r = np.sqrt(1 - pow(y, 2))
phi = ((i + rnd) % samples) * increment
x = np.cos(phi) * r
z = np.sin(phi) * r
yield [radius * x, radius * y, radius * z]
# ==============================================================================
# Class Definitions
# ==============================================================================
class Mesh():
"""
Simplified object defining a 3D mesh.
Snippet by:
- <NAME> (https://github.com/wadimkehl/ssd-6d/blob/master/rendering)
"""
def __init__(self, vertices, faces, vertex_colors=None,
attribute_position_name='a_position', attribute_color_name='a_color'):
self.vertices = vertices
self.faces = faces
self.vertex_colors = vertex_colors
# Collate vertex data (postion and opt. color):
vertices_type = [(attribute_position_name, np.float32, 3)]
vertex_data = np.asarray(vertices)
if vertex_colors is not None:
vertices_type += [(attribute_color_name, np.float32, 3)]
vertex_data = np.asarray(list(zip(vertices, vertex_colors)), vertices_type)
# Buffers
self.vertex_buffer = gloo.VertexBuffer(vertex_data)
self.index_buffer = gloo.IndexBuffer(faces.flatten().astype(np.uint32))
@staticmethod
def from_ply(filename, default_color=None):
# Read .ply file:
ply_data = PlyData.read(filename)
faces = np.vstack(ply_data['face'].data['vertex_indices'])
vertices = np.stack(
[ply_data['vertex'].data['x'], ply_data['vertex'].data['y'], ply_data['vertex'].data['z']],
axis=-1).astype(np.float32)
if 'blue' in ply_data['vertex']._property_lookup: # has colors
vertex_colors = np.stack(
[ply_data['vertex'].data['blue'], ply_data['vertex'].data['green'], ply_data['vertex'].data['red']],
axis=-1).astype(np.float32) / 255.
elif default_color is not None:
vertex_colors = np.tile(default_color, [vertices.shape[0], 1])
else:
vertex_colors = None
mesh = Mesh(vertices, faces, vertex_colors)
return mesh
class Camera():
"""
Object containing the intrinsic parameters of a camera
"""
def __init__(self, fx, fy, cx, cy, image_size, skew=0):
"""
Initialize the camera.
:param fx: Horizontal focal length (in px)
:param fy: Vertical focal length (in px)
:param cx: Horizontal principal point offset (in px)
:param cy: Vertical principal point offset (in px)
:param image_size: Canvas/image size (in px)
:param skew: (opt.) Axis skew factor
"""
# Camera matrix:
self.K = np.identity(3)
self.K[0, 0] = fx
self.K[1, 1] = fy
self.K[0, 2] = cx
self.K[1, 2] = cy
self.K[0, 1] = skew
self.image_size = image_size
@staticmethod
def look_at(camera_position, target_position, roll_angle=0):
"""
Return the rotation matrix so that the camera faces the target.
Snippet by <NAME> (https://github.com/wadimkehl/ssd-6d/blob/master/rendering)
:param camera_position: Camera position/translation
:param target_position: Target position
:param roll_angle: Roll angle (in degrees)
:return: 4x4 transformation matrix
"""
eye_direction = target_position - camera_position
# Compute what is the "up" vector of the camera:
if eye_direction[0] == 0 and eye_direction[1] == 0 and eye_direction[2] != 0:
up = [-1, 0, 0]
else:
up = [0, 0, 1]
# Compute rotation matrix:
rotation_matrix = np.zeros((3, 3))
rotation_matrix[:, 2] = -eye_direction / np.linalg.norm(eye_direction) # View direction towards origin
rotation_matrix[:, 0] = np.cross(rotation_matrix[:, 2], up) # Camera-Right
rotation_matrix[:, 0] /= np.linalg.norm(rotation_matrix[:, 0])
rotation_matrix[:, 1] = np.cross(rotation_matrix[:, 2], rotation_matrix[:, 0]) # Camera-Down
rotation_matrix = rotation_matrix.T
# Apply roll rotation using Rodrigues' formula + set position accordingly:
rodriguez = np.asarray([0, 0, 1]) * (roll_angle * np.pi / 180.0)
angle_axis = scipy.linalg.expm(np.cross(np.eye(3), rodriguez))
rotation_matrix = | np.dot(angle_axis, rotation_matrix) | numpy.dot |
from equadratures.parameter import Parameter
from equadratures.poly import Poly
from equadratures.basis import Basis
from equadratures.scalers import scaler_minmax, scaler_meanvar, scaler_custom
import equadratures.plot as plot
import numpy as np
import scipy
import scipy.io
from scipy.linalg import orth, sqrtm
from scipy.spatial import ConvexHull
from scipy.special import comb
from scipy.optimize import linprog
import warnings
class Subspaces(object):
""" This class defines a subspaces object. It can be used for polynomial-based subspace dimension reduction.
Parameters
----------
method : str
The method to be used for subspace-based dimension reduction. Two options:
- ``active-subspace``, which uses ideas in [1] and [2] to compute a dimension-reducing subspace with a global polynomial approximant. Gradients evaluations of the polynomial approximation are used to compute the averaged outer product of the gradient covariance matrix. The polynomial approximation in the original full-space can be provided via ``full_space_poly``. Otherwise, it is fit internally to the data provided via ``sample_points`` and ``sample_outputs``.
- ``variable-projection`` [3], where a Gauss-Newton optimisation problem is solved to compute both the polynomial coefficients and its subspace, with the data provided via ``sample_points`` and ``sample_outputs``.
full_space_poly : Poly, optional
An instance of Poly fitted to the full-space data, to use for the AS computation.
sample_points : numpy.ndarray, optional
Array with shape (number_of_observations, dimensions) that corresponds to a set of sample points over the parameter space.
sample_outputs : numpy.ndarray, optional
Array with shape (number_of_observations, 1) that corresponds to model evaluations at the sample points.
subspace_dimension : int, optional
The dimension of the *active* subspace.
param_args : dict, optional
Arguments passed to parameters of the AS polynomial. (see :class:`~equadratures.parameter.Parameter`)
poly_args : dict , optional
Arguments passed to constructing polynomial used for AS computation. (see :class:`~equadratures.poly.Poly`)
dr_args : dict, optional
Arguments passed to customise the VP optimiser. See documentation for :meth:`~equadratures.subspaces.Subspaces._get_variable_projection` in source.
Examples
--------
Obtaining a 2D subspace via active subspaces on user data
>>> mysubspace = Subspaces(method='active-subspace', sample_points=X, sample_outputs=Y)
>>> eigs = mysubspace.get_eigenvalues()
>>> W = mysubspace.get_subspace()[:, :2]
>>> e = mysubspace.get_eigenvalues()
Obtaining a 2D subspace via active subspaces with a Poly object (remember to call set_model() on Poly first)
>>> mysubspace = Subspaces(method='active-subspace', full_space_poly=my_poly)
>>> eigs = mysubspace.get_eigenvalues()
>>> W = mysubspace.get_subspace()[:, :2]
>>> e = mysubspace.get_eigenvalues()
Obtaining a 2D subspace via variable projection on user data
>>> mysubspace = Subspaces(method='variable-projection', sample_points=X, sample_outputs=Y)
>>> W = mysubspace.get_subspace()[:, :2]
References
----------
1. <NAME>., (2015) Active Subspaces: Emerging Ideas for Dimension Reduction in Parameter Studies. SIAM Spotlights.
2. <NAME>., <NAME>., <NAME>., <NAME>., <NAME>. (2018) Turbomachinery Active Subspace Performance Maps. Journal of Turbomachinery, 140(4), 041003. `Paper <http://turbomachinery.asmedigitalcollection.asme.org/article.aspx?articleid=2668256>`__.
3. <NAME>., <NAME>., (2018) Data-driven Polynomial Ridge Approximation Using Variable Projection. SIAM Journal of Scientific Computing, 40(3), A1566-A1589. `Paper <https://epubs.siam.org/doi/abs/10.1137/17M1117690>`__.
"""
def __init__(self, method, full_space_poly=None, sample_points=None, sample_outputs=None,
subspace_dimension=2, polynomial_degree=2, param_args=None, poly_args=None, dr_args=None):
self.full_space_poly = full_space_poly
self.sample_points = sample_points
self.Y = None # for the zonotope vertices
self.sample_outputs = sample_outputs
self.method = method
self.subspace_dimension = subspace_dimension
self.polynomial_degree = polynomial_degree
my_poly_args = {'method': 'least-squares', 'solver_args': {}}
if poly_args is not None:
my_poly_args.update(poly_args)
self.poly_args = my_poly_args
my_param_args = {'distribution': 'uniform', 'order': self.polynomial_degree, 'lower': -1, 'upper': 1}
if param_args is not None:
my_param_args.update(param_args)
# I suppose we can detect if lower and upper is present to decide between these categories?
bounded_distrs = ['analytical', 'beta', 'chebyshev', 'arcsine', 'truncated-gaussian', 'uniform']
unbounded_distrs = ['gaussian', 'normal', 'gumbel', 'logistic', 'students-t', 'studentst']
semi_bounded_distrs = ['chi', 'chi-squared', 'exponential', 'gamma', 'lognormal', 'log-normal', 'pareto', 'rayleigh', 'weibull']
if dr_args is not None:
if 'standardize' in dr_args:
dr_args['standardise'] = dr_args['standardize']
if self.method.lower() == 'active-subspace' or self.method.lower() == 'active-subspaces':
self.method = 'active-subspace'
if dr_args is not None:
self.standardise = getattr(dr_args, 'standardise', True)
else:
self.standardise = True
if self.full_space_poly is None:
# user provided input/output data
N, d = self.sample_points.shape
if self.standardise:
self.data_scaler = scaler_minmax()
self.data_scaler.fit(self.sample_points)
self.std_sample_points = self.data_scaler.transform(self.sample_points)
else:
self.std_sample_points = self.sample_points.copy()
param = Parameter(**my_param_args)
if param_args is not None:
if (hasattr(dr_args, 'lower') or hasattr(dr_args, 'upper')) and self.standardise:
warnings.warn('Points standardised but parameter range provided. Overriding default ([-1,1])...',
UserWarning)
myparameters = [param for _ in range(d)]
mybasis = Basis("total-order")
mypoly = Poly(myparameters, mybasis, sampling_args={'sample-points': self.std_sample_points,
'sample-outputs': self.sample_outputs},
**my_poly_args)
mypoly.set_model()
self.full_space_poly = mypoly
else:
# User provided polynomial
# Standardise according to distribution specified. Only care about the scaling (not shift)
# TODO: user provided callable with parameters?
user_params = self.full_space_poly.parameters
d = len(user_params)
self.sample_points = self.full_space_poly.get_points()
if self.standardise:
scale_factors = np.zeros(d)
centers = np.zeros(d)
for dd, p in enumerate(user_params):
if p.name.lower() in bounded_distrs:
scale_factors[dd] = (p.upper - p.lower) / 2.0
centers[dd] = (p.upper + p.lower) / 2.0
elif p.name.lower() in unbounded_distrs:
scale_factors[dd] = np.sqrt(p.variance)
centers[dd] = p.mean
else:
scale_factors[dd] = np.sqrt(p.variance)
centers[dd] = 0.0
self.param_scaler = scaler_custom(centers, scale_factors)
self.std_sample_points = self.param_scaler.transform(self.sample_points)
else:
self.std_sample_points = self.sample_points.copy()
if not hasattr(self.full_space_poly, 'coefficients'):
raise ValueError('Please call set_model() first on poly.')
self.sample_outputs = self.full_space_poly.get_model_evaluations()
# TODO: use dr_args for resampling of gradient points
as_args = {'grad_points': None}
if dr_args is not None:
as_args.update(dr_args)
self._get_active_subspace(**as_args)
elif self.method == 'variable-projection':
self.data_scaler = scaler_minmax()
self.data_scaler.fit(self.sample_points)
self.std_sample_points = self.data_scaler.transform(self.sample_points)
if dr_args is not None:
vp_args = {'gamma':0.1, 'beta':1e-4, 'tol':1e-7, 'maxiter':1000, 'U0':None, 'verbose':False}
vp_args.update(dr_args)
self._get_variable_projection(**vp_args)
else:
self._get_variable_projection()
def get_subspace_polynomial(self):
""" Returns a polynomial defined over the dimension reducing subspace.
Returns
-------
Poly
A Poly object that defines a polynomial over the subspace. The distribution of parameters
is assumed to be uniform and the maximum and minimum bounds for each parameter are defined by the maximum
and minimum values of the project samples.
"""
# TODO: Try correlated poly here
active_subspace = self._subspace[:, 0:self.subspace_dimension]
projected_points = np.dot(self.std_sample_points, active_subspace)
myparameters = []
for i in range(0, self.subspace_dimension):
param = Parameter(distribution='uniform', lower=np.min(projected_points[:, i]),
upper=np.max(projected_points[:, i]), order=self.polynomial_degree)
myparameters.append(param)
mybasis = Basis("total-order")
subspacepoly = Poly(myparameters, mybasis, method='least-squares',
sampling_args={'sample-points': projected_points,
'sample-outputs': self.sample_outputs})
subspacepoly.set_model()
return subspacepoly
def get_eigenvalues(self):
""" Returns the eigenvalues of the dimension reducing subspace. Note: this option is
currently only valid for method ``active-subspace``.
Returns
-------
numpy.ndarray
Array of shape (dimensions,) corresponding to the eigenvalues of the above mentioned covariance matrix.
"""
if self.method == 'active-subspace':
return self._eigenvalues
else:
print('Only the active-subspace method yields eigenvalues.')
def get_subspace(self):
""" Returns the dimension reducing subspace.
Returns
-------
numpy.ndarray
Array of shape (dimensions, dimensions) where the first ``subspace_dimension`` columns
contain the dimension reducing subspace, while the remaining columns contain its orthogonal complement.
"""
return self._subspace
def _get_active_subspace(self, grad_points=None, **kwargs):
""" Private method to compute active subspaces. """
if grad_points is None:
X = self.full_space_poly.get_points()
else:
if hasattr(self, 'data_scaler'):
X = self.data_scaler.transform(grad_points)
else:
# Either no standardisation, or user provided poly + param scaling
X = grad_points.copy()
M, d = X.shape
if d != self.sample_points.shape[1]:
raise ValueError('In _get_active_subspace: dimensions of gradient evaluation points mismatched with input dimension!')
alpha = 2.0
num_grad_lb = alpha * self.subspace_dimension * np.log(d)
if M < num_grad_lb:
warnings.warn('Number of gradient evaluation points is likely to be insufficient. Consider resampling!', UserWarning)
polygrad = self.full_space_poly.get_polyfit_grad(X)
if hasattr(self, 'param_scaler'):
# Evaluate gradient in transformed coordinate space
polygrad = self.param_scaler.div[:, np.newaxis] * polygrad
weights = np.ones((M, 1)) / M
R = polygrad.transpose() * weights
C = np.dot(polygrad, R )
# Compute eigendecomposition!
e, W = np.linalg.eigh(C)
idx = e.argsort()[::-1]
eigs = e[idx]
eigVecs = W[:, idx]
if hasattr(self, 'data_scaler'):
scale_factors = 2.0 / (self.data_scaler.Xmax - self.data_scaler.Xmin)
eigVecs = scale_factors[:, np.newaxis] * eigVecs
eigVecs = np.linalg.qr(eigVecs)[0]
self._subspace = eigVecs
self._eigenvalues = eigs
def _get_variable_projection(self, gamma=0.1, beta=1e-4, tol=1e-7, maxiter=1000, U0=None, verbose=False):
""" Private method to obtain an active subspace in inputs design space via variable projection.
Note: It may help to standardize outputs to zero mean and unit variance
Parameters
----------
gamma : float, optional
Step length reduction factor (0,1).
beta : float, optional
Armijo tolerance for backtracking line search (0,1).
tol : float, optional
Tolerance for convergence, measured in the norm of residual over norm of f.
maxiter : int, optional
Maximum number of optimisation iterations.
U0 : numpy.ndarray, optional
Initial guess for active subspace.
verbose : bool, optional
Set to ``True`` for debug messages.
"""
# NOTE: How do we know these are the best values of gamma and beta?
M, m = self.std_sample_points.shape
if U0 is None:
Z = np.random.randn(m, self.subspace_dimension)
U, _ = np.linalg.qr(Z)
else:
U = orth(U0)
y = np.dot(self.std_sample_points,U)
minmax = np.zeros((2, self.subspace_dimension))
minmax[0, :] = np.amin(y, axis=0)
minmax[1, :] = np.amax(y, axis=0)
# Construct the affine transformation
eta = 2 * np.divide((y - minmax[0,:]), (minmax[1,:]-minmax[0,:])) - 1
# Construct the Vandermonde matrix step 6
V, poly_obj = vandermonde(eta, self.polynomial_degree)
V_plus = np.linalg.pinv(V)
coeff = np.dot(V_plus, self.sample_outputs)
res = self.sample_outputs - np.dot(V,coeff)
# R = np.linalg.norm(res)
# TODO: convergence criterion??
for iteration in range(0,maxiter):
# Construct the Jacobian step 9
J = jacobian_vp(V, V_plus, U, self.sample_outputs, poly_obj, eta, minmax, self.std_sample_points)
# Calculate the gradient of Jacobian (step 10)
G = np.zeros((m, self.subspace_dimension))
# NOTE: Can be vectorised
for i in range(0, M):
G += res[i]*J[i, :, :]
# conduct the SVD for J_vec
vec_J = np.reshape(J, (M, m*self.subspace_dimension))
Y, S, Z = np.linalg.svd(vec_J,full_matrices=False) # step 11
# obtain delta
delta = np.dot(Y[:,:-self.subspace_dimension**2].T, res)
delta = np.dot(np.diag(1/S[:-self.subspace_dimension**2]), delta)
delta = -np.dot(Z[:-self.subspace_dimension**2,:].T, delta).reshape(U.shape)
# carry out Gauss-Newton step
vec_delta=delta.flatten() # step 12
# vectorize G step 13
vec_G = G.flatten()
alpha = np.dot(vec_G.T, vec_delta)
norm_G = np.dot(vec_G.T, vec_G)
# check alpha step 14
if alpha >= 0:
delta = -G
alpha = -norm_G
# SVD on delta step 17
Y, S, Z = np.linalg.svd(delta, full_matrices=False)
UZ = np.dot(U,Z.T)
t = 1
for iter2 in range(0,20):
U_new = np.dot(UZ, np.diag(np.cos(S*t))) + np.dot(Y, np.diag(np.sin(S*t)))#step 19
U_new = orth(U_new)
# Update the values with the new U matrix
y = np.dot(self.std_sample_points, U_new)
minmax[0,:] = np.amin(y, axis=0)
minmax[1,:] = np.amax(y, axis=0)
eta = 2 * np.divide((y - minmax[0,:]), (minmax[1,:]-minmax[0,:])) - 1
V_new, poly_obj = vandermonde(eta, self.polynomial_degree)
V_plus_new = np.linalg.pinv(V_new)
coeff_new = np.dot(V_plus_new, self.sample_outputs)
res_new = self.sample_outputs - np.dot(V_new,coeff_new)
R_new = np.linalg.norm(res_new)
if np.linalg.norm(res_new) <= np.linalg.norm(res)+alpha*beta*t or t < 1e-10: # step 21
break
t = t * gamma
dist_change = subspace_dist(U, U_new)
U = U_new
V = V_new
# coeff = coeff_new
V_plus = V_plus_new
res = res_new
# R = R_new
if dist_change < tol:
if verbose:
print("VP finished with %d iterations" % iteration)
break
if iteration == maxiter - 1 and verbose:
print("VP finished with %d iterations" % iteration)
active_subspace = U
inactive_subspace = _null_space(active_subspace.T)
self._subspace = np.hstack([active_subspace, inactive_subspace])
def get_zonotope_vertices(self, num_samples=10000, max_count=100000):
""" Returns the vertices of the zonotope -- the projection of the high-dimensional space over the computed
subspace.
Parameters
----------
num_samples : int, optional
Number of samples per iteration to check.
max_count : int, optional
Maximum number of iteration.
Returns
-------
numpy.ndarray
Array of shape (number of vertices, ``subspace_dimension``).
Note
----
This routine has been adapted from <NAME>'s zonotope_vertices() function; see reference below.
<NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., (2016) Python Active-Subspaces Utility Library. Journal of Open Source Software, 1(5), 79. `Paper <http://joss.theoj.org/papers/10.21105/joss.00079>`__.
"""
m = self._subspace.shape[0]
n = self.subspace_dimension
W = self._subspace[:, :n]
if n == 1:
y0 = np.dot(W.T, np.sign(W))[0]
if y0 < -y0:
yl, yu = y0, -y0
xl, xu = np.sign(W), -np.sign(W)
else:
yl, yu = -y0, y0
xl, xu = -np.sign(W), np.sign(W)
Y = np.array([yl, yu]).reshape((2,1))
X = np.vstack((xl.reshape((1,m)), xu.reshape((1,m))))
self.Y = Y
return Y
else:
total_vertices = 0
for i in range(n):
total_vertices += comb(m-1,i)
total_vertices = int(2*total_vertices)
Z = np.random.normal(size=(num_samples, n))
X = get_unique_rows(np.sign(np.dot(Z, W.transpose())))
X = get_unique_rows(np.vstack((X, -X)))
N = X.shape[0]
count = 0
while N < total_vertices:
Z = np.random.normal(size=(num_samples, n))
X0 = get_unique_rows(np.sign(np.dot(Z, W.transpose())))
X0 = get_unique_rows(np.vstack((X0, -X0)))
X = get_unique_rows(np.vstack((X, X0)))
N = X.shape[0]
count += 1
if count > max_count:
break
num_vertices = X.shape[0]
if total_vertices > num_vertices:
print('Warning: {} of {} vertices found.'.format(num_vertices, total_vertices))
Y = np.dot(X, W)
self.Y = Y.reshape((num_vertices, n))
return self.Y
def get_linear_inequalities(self):
""" Returns the linear inequalities defining the zonotope vertices, i.e., Ax<=b.
Returns
-------
tuple
Tuple (A,b), containing the numpy.ndarray's A and b; where A is the matrix for setting the linear inequalities,
and b is the right-hand-side vector for setting the linear inequalities.
"""
if self.Y is None:
self.Y = self.get_zonotope_vertices()
n = self.Y.shape[1]
if n == 1:
A = np.array([[1],[-1]])
b = np.array([[max(self.Y)],[min(self.Y)]])
return A, b
else:
convexHull = ConvexHull(self.Y)
A = convexHull.equations[:,:n]
b = -convexHull.equations[:,n]
return A, b
def get_samples_constraining_active_coordinates(self, inactive_samples, active_coordinates):
""" A hit and run type sampling strategy for generating samples at a given coordinate in the active subspace
by varying its coordinates along the inactive subspace.
Parameters
----------
inactive_samples : int
The number of inactive samples required.
active_coordiantes : numpy.ndarray
The active subspace coordinates.
Returns
-------
numpy.ndarray
Array containing the full-space coordinates.
Note
----
This routine has been adapted from <NAME>'s hit_and_run() function; see reference below.
<NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., (2016) Python Active-Subspaces Utility Library. Journal of Open Source Software, 1(5), 79. `Paper <http://joss.theoj.org/papers/10.21105/joss.00079>`__.
"""
y = active_coordinates
N = inactive_samples
W1 = self._subspace[:, :self.subspace_dimension]
W2 = self._subspace[:, self.subspace_dimension:]
m, n = W1.shape
s = np.dot(W1, y).reshape((m, 1))
normW2 = np.sqrt(np.sum(np.power(W2, 2), axis=1)).reshape((m, 1))
A = np.hstack((np.vstack((W2, -W2.copy())), np.vstack((normW2, normW2.copy()))))
b = np.vstack((1 - s, 1 + s)).reshape((2 * m, 1))
c = np.zeros((m - n + 1, 1))
c[-1] = -1.0
# print()
zc = linear_program_ineq(c, -A, -b)
z0 = zc[:-1].reshape((m - n, 1))
# define the polytope A >= b
s = np.dot(W1, y).reshape((m, 1))
A = np.vstack((W2, -W2))
b = np.vstack((-1 - s, -1 + s)).reshape((2 * m, 1))
# tolerance
ztol = 1e-6
eps0 = ztol / 4.0
Z = np.zeros((N, m - n))
for i in range(N):
# random direction
bad_dir = True
count, maxcount = 0, 50
while bad_dir:
d = np.random.normal(size=(m - n, 1))
bad_dir = np.any(np.dot(A, z0 + eps0 * d) <= b)
count += 1
if count >= maxcount:
Z[i:, :] = np.tile(z0, (1, N - i)).transpose()
yz = np.vstack([np.repeat(y[:, np.newaxis], N, axis=1), Z.T])
return np.dot(self._subspace, yz).T
# find constraints that impose lower and upper bounds on eps
f, g = b - np.dot(A, z0), np.dot(A, d)
# find an upper bound on the step
min_ind = np.logical_and(g <= 0, f < -np.sqrt(np.finfo(np.float).eps))
eps_max = np.amin(f[min_ind] / g[min_ind])
# find a lower bound on the step
max_ind = np.logical_and(g > 0, f < -np.sqrt(np.finfo(np.float).eps))
eps_min = np.amax(f[max_ind] / g[max_ind])
# randomly sample eps
eps1 = np.random.uniform(eps_min, eps_max)
# take a step along d
z1 = z0 + eps1 * d
Z[i, :] = z1.reshape((m - n,))
# update temp var
z0 = z1.copy()
yz = np.vstack([np.repeat(y[:, np.newaxis], N, axis=1), Z.T])
return np.dot(self._subspace, yz).T
def plot_sufficient_summary(self, ax=None, X_test=None, y_test=None, show=True, poly=True, uncertainty=False, legend=False, scatter_kwargs={}, plot_kwargs={}):
""" Generates a sufficient summary plot for 1D or 2D polynomial ridge approximations.
See :meth:`~equadratures.plot.plot_sufficient_summary` for full description. """
return plot.plot_sufficient_summary(self, ax, X_test, y_test, show, poly, uncertainty, legend, scatter_kwargs, plot_kwargs)
def plot_2D_contour_zonotope(self, mysubspace, minmax=[- 3.5, 3.5], grid_pts=180, show=True, ax=None):
""" Generates a 2D contour plot of the polynomial ridge approximation.
See :meth:`~equadratures.plot.plot_2D_contour_zonotope` for full description. """
return plot.plot_2D_contour_zonotope(self,minmax,grid_pts,show,ax)
def plot_samples_from_second_subspace_over_first(self, mysubspace_2, axs=None, no_of_samples=500, minmax=[- 3.5, 3.5], grid_pts=180, show=True):
"""
Generates a zonotope plot where samples from the second subspace are projected over the first.
See :meth:`~equadratures.plot.plot_samples_from_second_subspace_over_first` for full description.
"""
return plot.plot_samples_from_second_subspace_over_first(self,mysubspace_2, axs, no_of_samples, minmax, grid_pts, show)
def vandermonde(eta, p):
# TODO: Try using a "correlated" basis here?
_, n = eta.shape
listing = []
for i in range(0, n):
listing.append(p)
Object=Basis('total-order',listing)
# Establish n Parameter objects
params = []
P = Parameter(order=p, lower=-1, upper=1, distribution='uniform')
for i in range(0, n):
params.append(P)
# Use the params list to establish the Poly object
poly_obj = Poly(params, Object, method='least-squares')
V = poly_obj.get_poly(eta)
V = V.T
return V, poly_obj
def vector_AS(list_of_polys, R = None, alpha=None, k=None, samples=None, bootstrap=False, bs_trials = 50
, J = None, save_path = None):
# Find AS directions to vector val func
# analogous to computeActiveSubspace
# Since we are dealing with *one* vector val func we should have just one input space
# Take the first of the polys.
poly = list_of_polys[0]
if samples is None:
d = poly.dimensions
if alpha is None:
alpha = 4
if k is None or k > d:
k = d
M = int(alpha * k * np.log(d))
X = np.zeros((M, d))
for j in range(0, d):
X[:, j] = np.reshape(poly.parameters[j].getSamples(M), M)
else:
X = samples
M, d = X.shape
n = len(list_of_polys) # number of outputs
if R is None:
R = np.eye(n)
elif len(R.shape) == 1:
R = np.diag(R)
if J is None:
J = jacobian_vec(list_of_polys,X)
if not(save_path is None):
np.save(save_path,J)
J_new = np.matmul(sqrtm(R), np.transpose(J,[2,0,1]))
JtJ = np.matmul(np.transpose(J_new,[0,2,1]), J_new)
H = np.mean(JtJ,axis=0)
# Compute P_r by solving generalized eigenvalue problem...
# Assume sigma = identity for now
e, W = np.linalg.eigh(H)
eigs = np.flipud(e)
eigVecs = np.fliplr(W)
if bootstrap:
all_bs_eigs = np.zeros((bs_trials, d))
all_bs_W = []
for t in range(bs_trials):
print("Starting bootstrap trial %d"%t)
bs_samples = X[np.random.randint(0,M,size=M), :]
J_bs = jacobian_vec(list_of_polys, bs_samples)
J_new_bs = np.matmul(sqrtm(R), np.transpose(J_bs,[2,0,1]))
JtJ_bs = np.matmul(np.transpose(J_new_bs, [0, 2, 1]), J_new_bs)
H_bs = np.mean(JtJ_bs, axis=0)
# Compute P_r by solving generalized eigenvalue problem...
# Assume sigma = identity for now
e_bs, W_bs = np.linalg.eigh(H_bs)
all_bs_eigs[t,:] = np.flipud(e_bs)
eigVecs_bs = np.fliplr(W_bs)
all_bs_W.append(eigVecs_bs)
eigs_bs_lower = np.min(all_bs_eigs, axis = 0)
eigs_bs_upper = np.max(all_bs_eigs, axis = 0)
return eigs,eigVecs,eigs_bs_lower,eigs_bs_upper, all_bs_W
else:
return eigs,eigVecs
def jacobian_vp(V, V_plus, U, f, Polybasis, eta, minmax, X):
M, N = V.shape
m, n = U.shape
Gradient = Polybasis.get_poly_grad(eta)
sub = (minmax[1,:]-minmax[0,:]).T
vectord = np.reshape(2.0/sub,(n,1))
# Initialize the tensor
J = np.zeros((M, m, n))
# Obtain the derivative of this tensor
dV = np.zeros((m, n, M, N))
for l in range(0, n):
for j in range(0, N):
current = Gradient[l].T
if n == 1:
current = Gradient.T
dV[:,l,:,j] = np.asscalar(vectord[l])*(X.T*current[:,j])
# Get the P matrix
P = np.identity(M)-np.matmul(V,V_plus)
V_minus = scipy.linalg.pinv(V)
# Calculate entries for the tensor
for j in range(0,m):
for k in range(0,n):
temp1 = np.linalg.multi_dot([P,dV[j,k,:,:],V_minus])
J[:, j, k]=(-np.matmul((temp1+temp1.T),f)).reshape((M,)) # Eqn 15
return J
def jacobian_vec(list_of_poly, X):
m = len(list_of_poly)
[N, d] = X.shape
J = np.zeros((m, d, N))
for p in range(len(list_of_poly)):
J[p, :, :] = list_of_poly[p].get_polyfit_grad(X)
return J
def subspace_dist(U, V):
if len(U.shape) == 1:
return np.linalg.norm(np.outer(U, U) - np.outer(V, V), ord=2)
else:
return np.linalg.norm(np.dot(U, U.T) - | np.dot(V, V.T) | numpy.dot |
#! /usr/bin/python3.7
# -- coding: utf-8 -- **
### Here are a set of functions used in elec_pipe
### and a set of qthread class for elec_main_gui
import sys
import os
import re
import math
import numpy as np
from numpy import ndarray
import nibabel as nib
from scipy import ndimage
from sklearn.mixture import GaussianMixture as GMM
from sklearn.linear_model import LinearRegression, Lasso
from PyQt5.QtCore import QThread, pyqtSignal
# import matplotlib
# matplotlib.use("Qt5Agg")
# from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
# from matplotlib.figure import Figure
# from matplotlib import pyplot as plt
# from mpl_toolkits.mplot3d import Axes3D, art3d
# import electrode
CMD_Hough3D = './hough-3d-lines/hough3dlines'
def run(cmd):
"""
Print the command.
Execute a command string on the shell (on bash).
Parameters
----------
cmd : str
Command to be sent to the shell.
"""
print(f"Running shell command: {cmd}")
os.system(cmd)
print(f"Done!\n")
def align(inp, ref, xfm=None, out=None, dof=12, searchrad=True, bins=256, interp=None, cost="mutualinfo", sch=None, wmseg=None, init=None, finesearch=None,):
"""Aligns two images using FSLs flirt function and stores the transform between them
Parameters
----------
inp : str
path to input image being altered to align with the reference image as a nifti image file
ref : str
path to reference image being aligned to as a nifti image file
xfm : str, optional
where to save the 4x4 affine matrix containing the transform between two images, by default None
out : str, optional
determines whether the image will be automatically aligned and where the resulting image will be saved, by default None
dof : int, optional
the number of degrees of free dome of the alignment, by default 12
searchrad : bool, optional
whether to use the predefined searchradius parameter (180 degree sweep in x, y, and z), by default True
bins : int, optional
number of histogram bins, by default 256
interp : str, optional
interpolation method to be used (trilinear,nearestneighbour,sinc,spline), by default None
cost : str, optional
cost function to be used in alignment (mutualinfo, corratio, normcorr, normmi, leastsq, labeldiff, or bbr), by default "mutualinfo"
sch : str, optional
the optional FLIRT schedule, by default None
wmseg : str, optional
an optional white-matter segmentation for bbr, by default None
init : str, optional
an initial guess of an alignment in the form of the path to a matrix file, by default None
finesearch : int, optional
angle in degrees, by default None
"""
cmd = f"flirt -in {inp} -ref {ref}"
if xfm is not None:
cmd += f" -omat {xfm}"
if out is not None:
cmd += f" -out {out}"
if dof is not None:
cmd += f" -dof {dof}"
if bins is not None:
cmd += f" -bins {bins}"
if interp is not None:
cmd += f" -interp {interp}"
if cost is not None:
cmd += f" -cost {cost}"
if searchrad is not None:
cmd += " -searchrx -180 180 -searchry -180 180 " + "-searchrz -180 180"
if sch is not None:
cmd += f" -schedule {sch}"
if wmseg is not None:
cmd += f" -wmseg {wmseg}"
if init is not None:
cmd += f" -init {init}"
run(cmd)
def align_nonlinear(inp, ref, xfm, out, warp, ref_mask=None, in_mask=None, config=None):
"""Aligns two images using nonlinear methods and stores the transform between them using fnirt
Parameters
----------
inp : str
path to the input image
ref : str
path to the reference image that the input will be aligned to
xfm : str
path to the file containing the affine transform matrix created by align()
out : str
path for the desired output image
warp : str
the path to store the output file containing the nonlinear warp coefficients/fields
ref_mask : str, optional
path to the reference image brain_mask, by default None
in_mask : str, optional
path for the file with mask in input image space, by default None
config : str, optional
path to the config file specifying command line arguments, by default None
"""
cmd = f"fnirt --in={inp} --ref={ref} --aff={xfm} --iout={out} --cout={warp} --warpres=8,8,8"
if ref_mask is not None:
cmd += f" --refmask={ref_mask} --applyrefmask=1"
if in_mask is not None:
cmd += f" --inmask={in_mask} --applyinmask=1"
if config is not None:
cmd += f" --config={config}"
run(cmd)
def dataExtraction(intraFile, thre=0.2):
rawData = nib.load(intraFile).get_fdata()
maxVal = np.amax(rawData)
# print(f"maxVal={maxVal}")
thre = maxVal * thre
threData = np.copy(rawData)
threData[threData < thre] = 0
xs, ys, zs = np.where(threData != 0)
return xs, ys, zs
def trackRecognition(patient, cmd_hough3d, CTresult_dir, intraFile, thre=0.2):
xs, ys, zs = dataExtraction(intraFile, thre)
X = np.transpose(np.array((xs, ys, zs)))
# print(X.shape)
fname = f"{CTresult_dir}/{patient}_3dPointClouds.dat"
np.savetxt(fname, X, fmt='%.4f', delimiter=',', newline='\n', header='point clouds', footer='', comments='# ', encoding=None)
cmd_hough = f"{cmd_hough3d} -o {CTresult_dir}/{patient}.txt -minvotes 5 {fname}"
run(cmd=cmd_hough)
return xs, ys, zs
def locateLine(row, info):
ax = info[row][1]
ay = info[row][2]
az = info[row][3]
bx = info[row][4]
by = info[row][5]
bz = info[row][6]
axx = np.linspace(ax, ax+bx*50, 50)
ayy = np.linspace(ay, ay+by*50, 50)
azz = np.linspace(az, az+bz*50, 50)
return axx, ayy, azz
class Preprocess_thread(QThread):
finished = pyqtSignal()
def __init__(self):
super(Preprocess_thread, self).__init__()
def run(self): # erode, skull, intra_save
mask_file = os.path.join(f"{self.directory_surf}/mri", f"mask.mgz")
img_mask = nib.load(mask_file)
data_mask = img_mask.get_fdata()
data_mask_ero = ndimage.morphology.binary_erosion(data_mask, iterations=self.ero_itr)
CTreg_file = os.path.join(self.directory_ct, f"{self.patient}CT_Reg.nii.gz")
img_ct = nib.load(CTreg_file)
data_ct = img_ct.get_fdata()
maxVal = np.amax(data_ct)
self.thre = self.thre / 100
thre = maxVal * self.thre
data_ct[data_mask_ero == 0] = 0
img1 = nib.Nifti1Image(data_ct, img_ct.affine)
intra_file1 = os.path.join(self.directory_ct, f"{self.patient}CT_intra.nii.gz")
nib.save(img1, intra_file1)
data_ct[data_ct < thre] = 0
img0 = nib.Nifti1Image(data_ct, img_ct.affine)
intra_file = os.path.join(self.directory_ct, f"{self.patient}CT_intracranial_{self.thre}_{self.K}_{self.ero_itr}.nii.gz")
nib.save(img0, intra_file)
self.finished.emit()
class PreprocessResult_thread(QThread):
send_axes = pyqtSignal(ndarray)
def __init__(self):
super(PreprocessResult_thread, self).__init__()
def run(self):
intra_file = self.CTintra_file
xs, ys, zs = dataExtraction(intraFile=intra_file, thre=self.thre)
pointsArray = np.transpose(np.vstack((xs, ys, zs)))
self.send_axes.emit(pointsArray)
class GenerateLabel_thread(QThread):
finished = pyqtSignal(int)
def __init__(self):
super(GenerateLabel_thread, self).__init__()
def run(self):
# process 3d line hough transform
hough_file = f"{self.directory_ct}/{self.patient}.txt"
if not os.path.exists(hough_file):
xs, ys, zs = trackRecognition(patient=self.patient, cmd_hough3d=CMD_Hough3D, CTresult_dir=self.directory_ct, intraFile=self.intra_file, thre=0)
else: # temporarily
# xs, ys, zs = utils.trackRecognition(patient=patient, cmd_hough3d=CMD_Hough3D, CTresult_dir=CTresult_dir, intraFile=intra_file, thre=Thre)
xs, ys, zs = dataExtraction(intraFile=self.intra_file, thre=0)
pass
# read detected lines' info
elec_track = []
with open(hough_file, 'r') as f:
for line in f.readlines():
a = re.findall(r"\d+\.?\d*", line)
for i in range(len(a)):
a[i] = float(a[i])
elec_track.append(a)
# print(f"{len(elec_track)} tracks has been detected!\n")
# print(elec_track)
elec_track = np.array(elec_track)
K_check = elec_track.shape[0]
if K_check < self.K:
self.finished.emit(1)
else: # if K_check != K:
print(f"Warning: {self.K} electrodes implanted, but {K_check} has been clustered by Hough!")
# sys.exit()
# process a gaussian mixture model for bug fixing
centroids = np.array(elec_track[0:self.K, 1:4])
# print(centroids)
X = np.transpose(np.vstack((xs, ys, zs)))
gmm = GMM(n_components=self.K, covariance_type='full',means_init=centroids, random_state=None).fit(X)
labels = gmm.predict(X)
# print(labels)
Labels = np.zeros((256, 256, 256)) # labeled space
for i in range(self.K):
ind = np.where(labels == i)
Labels[xs[ind], ys[ind], zs[ind]] = i + 1
np.save(os.path.join(self.directory_ct, f"{self.patient}_labels.npy"), Labels, allow_pickle=True, fix_imports=True)
self.finished.emit(0)
# class LabelResult_thread(QThread):
# def __init__(self):
# super(LabelResult_thread, self).__init__()
# def run(self):
# print('Yaah!')
class ContactSegment_thread(QThread):
finished = pyqtSignal()
def __init__(self):
super(ContactSegment_thread, self).__init__()
def run(self):
print('Yaah!')
for i in range(self.K):
iLabel = i + 1
# xxx = electrode.ElectrodeSeg(filePath=self.directory_labels, patName=self.patName, iLabel=iLabel, numMax=self.numMax, diameterSize=self.diameterSize, spacing=self.spacing, gap=self.gap)
xxx = ElectrodeSeg(filePath=self.directory_labels, patName=self.patName, iLabel=iLabel, numMax=self.numMax, diameterSize=self.diameterSize, spacing=self.spacing, gap=self.gap)
xxx.pipeline()
print(xxx.elecPos)
self.finished.emit()
def savenpy(filePath, patientName):
dir = f"{filePath}/{patientName}_result"
# dir1 = f"{filePath}/{patientName}_data"
elec_dict = {}
for root, dirs, files in os.walk(dir, topdown=True):
# print('files:', files)
if '.DS_Store' in files:
files.remove('.DS_Store')
if 'chnXyzDict.npy' in files:
files.remove('chnXyzDict.npy')
for file in files:
elec_name = file.split('.')[0]
elec_info = np.loadtxt(os.path.join(root, file))
elec_info = elec_info # [1:, :] # [:,np.array([2,1,0])]
elec_dict[elec_name] = elec_info
np.save(f"{filePath}/chnXyzDict.npy", elec_dict)
def lookupTable(subdir, patient, ctdir, elec_label):
annot_dir = f"{subdir}/subjects/{patient}/mri/aparc.a2009s+aseg.mgz"
lookup_table = f"{subdir}/FreeSurferColorLUT.txt"
annot_img = nib.load(annot_dir).get_fdata()
elecs_file = f"{ctdir}/{patient}_result/{elec_label}.txt"
elecs_xyz = np.loadtxt(elecs_file, dtype='float', comments='#')
elecs_xyz = elecs_xyz[:, [0, 2, 1]]
elecs_xyz[:, 0] = 128 - elecs_xyz[:, 0]
elecs_xyz[:, 1] = 128 - elecs_xyz[:, 1]
elecs_xyz[:, 2] = 128 + elecs_xyz[:, 2]
labels = []
for row in range(elecs_xyz.shape[0]):
x = elecs_xyz[row, 0]
y = elecs_xyz[row, 1]
z = elecs_xyz[row, 2]
x1 = int(x)
x2 = math.ceil(x)
y1 = int(y)
y2 = math.ceil(y)
z1 = int(z)
z2 = math.ceil(z)
val = [0]
val.append(annot_img[x1, y1, z1])
val.append(annot_img[x1, y1, z2])
val.append(annot_img[x1, y2, z1])
val.append(annot_img[x1, y2, z2])
val.append(annot_img[x2, y1, z1])
val.append(annot_img[x2, y1, z2])
val.append(annot_img[x2, y2, z1])
val.append(annot_img[x2, y2, z2])
val = val[1:]
labels.append(max(set(val), key = val.count))
# print(labels)
labels_name = []
for label in labels:
with open(lookup_table, 'r') as f:
lines = f.readlines()
rows = len(lines)
for row in range(rows):
line = lines[row][0: 8]
b = str(int(label))
if re.match(b, line):
# print(lines[row])
a = lines[row][len(b): -16].strip()
labels_name.append(a)
break
return labels_name
class ElectrodeSeg:
def __init__(self, filePath, patName, iLabel, numMax, diameterSize, spacing, gap):
super(ElectrodeSeg, self).__init__()
# set up input initials
self.filePath = filePath
self.patientName = patName
raw_flag = 0 # check for the filepath existance
for root, dirs, files in os.walk(self.filePath):
for filename in files:
if re.search(r'CT_intra.nii.gz', filename):
raw_flag = 1
self.rawDataPath = f"{self.filePath}/{filename}"
break
if not raw_flag:
sys.exit()
label_flag = 0
for root, dirs, files in os.walk(self.filePath):
for filename in files:
if re.search(r'_labels.npy', filename):
label_flag = 1
self.labelsPath = f"{self.filePath}/{filename}"
break
if not label_flag:
sys.exit()
self.rawData = nib.load(self.rawDataPath).get_fdata()
self.labels = np.load(self.labelsPath)
self.iLabel = iLabel
self.numMax = numMax
self.diameterSize = diameterSize
self.spacing = spacing
self.gap = gap
# some calculations to get the rest initials
self.labelValues = np.unique(self.labels)
self.numElecs = len(self.labelValues) - 1
if self.numElecs > 8: # remove 'I' from the alphabet list, a trivial custom not to name the electrode 'I'
self.alphaList = [chr(i) for i in range(65, 66+self.numElecs)]
self.alphaList.pop(8)
else:
self.alphaList = [chr(i) for i in range(65, 65+self.numElecs)]
self.iValue = self.labelValues[self.iLabel]
self.nameLabel = self.alphaList[self.iLabel-1]
data_elec = np.copy(self.labels)
data_elec[np.where(self.labels != self.iValue)] = 0 ## isolate a single cluster of voxels belonging to the ith electrode
self.xs, self.ys, self.zs = np.where(data_elec != 0)
self.pos_elec = np.transpose(np.vstack((self.xs, self.ys, self.zs))) ## positions of these voxels
### test!
data_elec1 = np.copy(self.labels)
data_elec1[np.where(self.labels == self.iValue)] = 0
self.xrest, self.yrest, self.zrest = np.where(data_elec1 != 0)
self.rawData[self.xrest, self.yrest, self.zrest] = 0
### test!
self.rawData_single = self.rawData
xmin = np.amin(self.xs)
xmax = np.amax(self.xs)
ymin = np.amin(self.ys)
ymax = np.amax(self.ys)
zmin = np.amin(self.zs)
zmax = np.amax(self.zs)
# self.rawData_single[self.xs, self.ys, self.zs] = self.rawData_single[self.xs, self.ys, self.zs] * 3
self.rawData_single[xmin:xmax+1, ymin:ymax+1, zmin:zmax+1] = self.rawData_single[xmin:xmax+1, ymin:ymax+1, zmin:zmax+1] * 3
self.resultPath = f"{self.filePath}/{self.patientName}_result"
if not os.path.exists(self.resultPath):
os.mkdir(self.resultPath)
self.resultFile = f"{self.resultPath}/{self.nameLabel}.txt"
self.elecPos = [0, 0, 0]
self.headStart = [0, 0, 0]
self.targetPoint = [0, 0, 0]
self.regressInfo = [0, 0, 0, 0]
def pipeline(self):
self.startPoint()
self.contactPoint(1)
self.regression()
for j in np.arange(self.numMax - 1):
# if self.rawData[int(round(self.elecPos[-1,0])), int(round(self.elecPos[-1,1])), int(round(self.elecPos[-1,2]))] == 0:
# self.elecPos = self.elecPos[0:-1, :]
# break
if int(self.elecPos[-1,0])==int(self.elecPos[-2,0]) and int(self.elecPos[-1,1])==int(self.elecPos[-2,1]) and int(self.elecPos[-1,2])==int(self.elecPos[-2,2]):
self.elecPos = self.elecPos[0:-1, :]
break
self.step()
if self.flag_step_stop:
break
self.elecPos = self.elecPos[1:, :]
# print(self.elecPos)
self.resulting()
# return self.elecPos
def resulting(self):
self.elecPos_true = np.copy(self.elecPos)
self.elecPos_true[:, 0] = 128 - self.elecPos[:, 0]
self.elecPos_true[:, 1] = 128 - self.elecPos[:, 1]
self.elecPos_true[:, 2] = self.elecPos[:, 2] - 128
self.elecPos_true = self.elecPos_true[:, [0, 2, 1]]
self.elecFilepath = os.path.join(self.filePath, f"{self.patientName}_result")
if not os.path.exists(self.elecFilepath):
os.mkdir(self.elecFilepath)
else:
self.elecFile = os.path.join(self.elecFilepath, f"{self.nameLabel}.txt")
with open(self.elecFile, "ab") as f:
f.seek(0)
f.truncate()
# f.write(b"\n")
np.savetxt(f, self.elecPos_true, fmt='%10.8f', delimiter=' ', newline='\n', header=f"{self.elecPos_true.shape[0]}")
## target point functions
def startPoint(self):
## firstly find a voxel near the target
x = [np.max(self.xs), np.min(self.xs)]
y = [np.max(self.ys), np.min(self.ys)]
z = [np.max(self.zs), np.min(self.zs)]
self.reg1 = LinearRegression().fit(X=self.xs.reshape(-1,1), y=self.ys) # x-y
self.reg2 = LinearRegression().fit(X=self.xs.reshape(-1,1), y=self.zs) # x-z
self.reg3 = LinearRegression().fit(X=self.ys.reshape(-1,1), y=self.zs) # y-z
coefs = [abs(self.reg1.coef_), abs(self.reg2.coef_), abs(self.reg3.coef_)]
coef_min = coefs.index(min(coefs))
if coef_min == 0:
index = [0 if self.reg2.coef_>0 else 1, 0 if self.reg3.coef_>0 else 1, 0]
elif coef_min == 1:
index = [0 if self.reg1.coef_>0 else 1, 0, 0 if self.reg3.coef_>0 else 1]
else:
index = [0, 0 if self.reg1.coef_>0 else 1, 0 if self.reg2.coef_>0 else 1]
indexreverse = [~index[0], ~index[1], ~index[2]]
point1 = np.array([x[index[0]], y[index[1]], z[index[2]]])
point2 = np.array([x[indexreverse[0]], y[indexreverse[1]], z[indexreverse[2]]])
center = 127.5 * np.ones(3)
diff1 = point1 - center
diff2 = point2 - center
headStart = point2 if np.sum(np.transpose(diff1)*diff1) > np.sum(np.transpose(diff2)*diff2) else point1
self.direction = indexreverse if np.sum(np.transpose(diff1)*diff1) > np.sum(np.transpose(diff2)*diff2) else index
## secondly specify a target voxel in label voxels
diffs = self.pos_elec - headStart
diffs2 = np.power(diffs[:,0], 2) + np.power(diffs[:,1], 2) + np.power(diffs[:,2], 2)
headPointPos = np.argmin(diffs2)
self.headStart = self.pos_elec[headPointPos, :]
def converge(self, x, y, z):
## converge to the mass center of a cluster of voxels
n = self.diameterSize
delta = math.ceil(round((n - 1) / 2, 1)) # represent the radius of the electrode contact
## extract a cubic ROI of the raw CT data
seq_s = np.arange(x - delta, x + delta + 1)
seq_r = np.arange(y - delta, y + delta + 1)
seq_c = np.arange(z - delta, z + delta + 1)
if not ((np.array(seq_s) > 0).all() and (np.array(seq_r) > 0).all() and (np.array(seq_c) > 0).all()):
print('Error: index too small 0!')
return 0, 0, 0
elif not ((np.array(seq_s) < 256).all() and (np.array(seq_r) < 256).all() and (np.array(seq_c) < 256).all()):
print('Error: index too large 256!')
return 0, 0, 0
else:
## extract the ROI cubic
# test!!!
matrixVoxels = self.rawData_local[seq_s[0]:seq_s[-1]+1, seq_r[0]:seq_r[-1]+1, seq_c[0]:seq_c[-1]+1]
sumVoxels = np.sum(matrixVoxels)
if (np.sum(matrixVoxels)== 0):
print('Error: Converge to non-elec region!')
return 0, 0, 0
else:
f = np.zeros((1, 4))
for index, element in np.ndenumerate(matrixVoxels):
x, y, z = index
tmp = np.array([x+seq_s[0], y+seq_r[0], z+seq_c[0], element])
f = np.vstack((f, tmp))
f = f[1:]
CM = np.average(f[:,:3], axis=0, weights=f[:,3])
C100 = CM[0]
C010 = CM[1]
C001 = CM[2]
x1 = C100
y1 = C010
z1 = C001
return x1, y1, z1
def contactPoint(self, target):
## converge to an electrode contact position
x0 = self.headStart[0] if target == 1 else self.x0
y0 = self.headStart[1] if target == 1 else self.y0
z0 = self.headStart[2] if target == 1 else self.z0
x = int(round(x0))
y = int(round(y0))
z = int(round(z0))
print(f"initial start voxel:({x0}, {y0}, {z0})")
# test!!!
self.rawData_local = self.rawData_single
diff_array = self.pos_elec - np.array([x0, y0, z0])
elec_diffs = np.sqrt(np.dot(diff_array, np.transpose(diff_array)).diagonal())
ind_diffs = np.where(elec_diffs <= 2)
self.rawData_local[self.xs[ind_diffs], self.ys[ind_diffs], self.zs[ind_diffs]] = self.rawData_local[self.xs[ind_diffs], self.ys[ind_diffs], self.zs[ind_diffs]] * 2
(x1, y1, z1) = self.converge(x, y, z)
itr = 1
flag_convergence = 0
while not ((x==int(round(x1))) and (y==int(round(y1))) and (z==int(round(z1)))):
x = int(round(x1))
y = int(round(y1))
z = int(round(z1))
(x1, y1, z1) = self.converge(x, y, z)
itr = itr + 1
if itr > 5:
flag_convergence = 1
break
print(f"Convergent center voxel coordinates:({x1},{y1},{z1})")
print(f"Convergent center voxel value:{self.rawData[int(round(x1)), int(round(y1)), int(round(z1))]}")
self.flag_step_stop = 0
if (x1, y1, z1) == (0, 0, 0):
self.flag_step_stop = 1
print('here1,converged to 0!')
# self.elecPos = np.vstack([self.elecPos, [x1, y1, z1]])
else:
if not flag_convergence:
print('here2,converged normally!')
self.targetPoint = [x1, y1, z1] if target == 1 else self.targetPoint
self.elecPos = np.vstack([self.elecPos, [x1, y1, z1]])
else:
print('here3, maybe not convergent!')
self.targetPoint = [x1, y1, z1] if target == 1 else self.targetPoint
self.elecPos = | np.vstack([self.elecPos, [x1, y1, z1]]) | numpy.vstack |
import numpy as np
import matplotlib.pyplot as plt
import tensorflow.keras.layers as layers
from tensorflow.keras.models import Model, load_model
from tensorflow.keras.optimizers import Adam
import os
import colorsys
NUM_BATCHES = 60000
BATCH_SIZE = 512
PLOT_EVERY = 100
GRID_RESOLUTION = 400
FILE = ".".join(os.path.basename(__file__).split(".")[:-1])
THETA_MAPPING = np.arange(8).reshape((-1, 1)) / 8 # rotations
def colourize(Z):
theta = (Z[:, 2:] @ THETA_MAPPING).flatten()
return [colorsys.hsv_to_rgb(x, 1, 1) for x in theta]
def target_function(Z):
"""
Map Z (N, N, onehot(8)) to 8 gaussians
"""
theta = (Z[:, 2:] @ THETA_MAPPING).flatten()
r = 2
results = 0.2 * Z[:, :2]
results[:, 0] += r * np.cos(theta * 2 * np.pi)
results[:, 1] += r * np.sin(theta * 2 * np.pi)
return results
def generate_noise(samples):
"""
Generate `samples` samples of uniform noise in
([-1,1], [-1,1])
"""
noise = np.zeros((samples, 2 + 8))
noise[:, :2] = np.random.normal(0, 1, (samples, 2))
noise[np.arange(samples), 2 + np.random.randint(0, 8, samples)] = 1
return noise
def sample_from_target_function(samples):
"""
sample from the target function
"""
Z = generate_noise(samples)
return target_function(Z)
def build_generator():
"""
Build a generator mapping (N, N, onehot(8)) to ([-1,1], [-1,1])
"""
input_layer = layers.Input((2 + 8,))
X = input_layer
for i in range(3):
X = layers.Dense(512)(X)
X = layers.LeakyReLU(0.1)(X)
output_layer = layers.Dense(2)(X)
G = Model(input_layer, output_layer)
return G
def build_discriminator():
"""
Build a discriminator mapping (R, R) to [0, 1]
"""
input_layer = layers.Input((2,))
X = input_layer
for i in range(3):
X = layers.Dense(512)(X)
X = layers.LeakyReLU(0.1)(X)
output_layer = layers.Dense(1, activation="sigmoid")(X)
D = Model(input_layer, output_layer)
D.compile(
Adam(learning_rate=0.001, beta_1=0.5),
loss="binary_crossentropy",
metrics=["accuracy"],
)
return D
def build_GAN(G, D):
"""
Given a generator and a discriminator, build a GAN
"""
D.trainable = False
input_layer = layers.Input((2 + 8,))
X = G(input_layer)
output_layer = D(X)
GAN = Model(input_layer, output_layer)
GAN.compile(
Adam(learning_rate=0.0002, beta_1=0.5),
loss="binary_crossentropy",
metrics=["accuracy"],
)
return GAN
grid = | np.zeros((GRID_RESOLUTION, GRID_RESOLUTION, 2)) | numpy.zeros |
import glob
import os, utils, torch
from torch.utils.data import DataLoader
from data import datasets, trans
import numpy as np
from torchvision import transforms
import nibabel as nib
def nib_load(file_name):
if not os.path.exists(file_name):
return | np.array([1]) | numpy.array |
import sys
import operator
import pytest
import ctypes
import gc
import warnings
import numpy as np
from numpy.core._rational_tests import rational
from numpy.core._multiarray_tests import create_custom_field_dtype
from numpy.testing import (
assert_, assert_equal, assert_array_equal, assert_raises, HAS_REFCOUNT)
from numpy.compat import pickle
from itertools import permutations
def assert_dtype_equal(a, b):
assert_equal(a, b)
assert_equal(hash(a), hash(b),
"two equivalent types do not hash to the same value !")
def assert_dtype_not_equal(a, b):
assert_(a != b)
assert_(hash(a) != hash(b),
"two different types hash to the same value !")
class TestBuiltin:
@pytest.mark.parametrize('t', [int, float, complex, np.int32, str, object,
np.compat.unicode])
def test_run(self, t):
"""Only test hash runs at all."""
dt = np.dtype(t)
hash(dt)
@pytest.mark.parametrize('t', [int, float])
def test_dtype(self, t):
# Make sure equivalent byte order char hash the same (e.g. < and = on
# little endian)
dt = np.dtype(t)
dt2 = dt.newbyteorder("<")
dt3 = dt.newbyteorder(">")
if dt == dt2:
assert_(dt.byteorder != dt2.byteorder, "bogus test")
assert_dtype_equal(dt, dt2)
else:
assert_(dt.byteorder != dt3.byteorder, "bogus test")
assert_dtype_equal(dt, dt3)
def test_equivalent_dtype_hashing(self):
# Make sure equivalent dtypes with different type num hash equal
uintp = np.dtype(np.uintp)
if uintp.itemsize == 4:
left = uintp
right = np.dtype(np.uint32)
else:
left = uintp
right = np.dtype(np.ulonglong)
assert_(left == right)
assert_(hash(left) == hash(right))
def test_invalid_types(self):
# Make sure invalid type strings raise an error
assert_raises(TypeError, np.dtype, 'O3')
assert_raises(TypeError, np.dtype, 'O5')
assert_raises(TypeError, np.dtype, 'O7')
assert_raises(TypeError, np.dtype, 'b3')
assert_raises(TypeError, np.dtype, 'h4')
assert_raises(TypeError, np.dtype, 'I5')
assert_raises(TypeError, np.dtype, 'e3')
assert_raises(TypeError, np.dtype, 'f5')
if np.dtype('g').itemsize == 8 or np.dtype('g').itemsize == 16:
assert_raises(TypeError, np.dtype, 'g12')
elif np.dtype('g').itemsize == 12:
assert_raises(TypeError, np.dtype, 'g16')
if np.dtype('l').itemsize == 8:
assert_raises(TypeError, np.dtype, 'l4')
assert_raises(TypeError, np.dtype, 'L4')
else:
assert_raises(TypeError, np.dtype, 'l8')
assert_raises(TypeError, np.dtype, 'L8')
if np.dtype('q').itemsize == 8:
assert_raises(TypeError, np.dtype, 'q4')
assert_raises(TypeError, np.dtype, 'Q4')
else:
assert_raises(TypeError, np.dtype, 'q8')
assert_raises(TypeError, np.dtype, 'Q8')
@pytest.mark.parametrize("dtype",
['Bool', 'Complex32', 'Complex64', 'Float16', 'Float32', 'Float64',
'Int8', 'Int16', 'Int32', 'Int64', 'Object0', 'Timedelta64',
'UInt8', 'UInt16', 'UInt32', 'UInt64', 'Void0',
"Float128", "Complex128"])
def test_numeric_style_types_are_invalid(self, dtype):
with assert_raises(TypeError):
np.dtype(dtype)
@pytest.mark.parametrize(
'value',
['m8', 'M8', 'datetime64', 'timedelta64',
'i4, (2,3)f8, f4', 'a3, 3u8, (3,4)a10',
'>f', '<f', '=f', '|f',
])
def test_dtype_bytes_str_equivalence(self, value):
bytes_value = value.encode('ascii')
from_bytes = np.dtype(bytes_value)
from_str = np.dtype(value)
assert_dtype_equal(from_bytes, from_str)
def test_dtype_from_bytes(self):
# Empty bytes object
assert_raises(TypeError, np.dtype, b'')
# Byte order indicator, but no type
assert_raises(TypeError, np.dtype, b'|')
# Single character with ordinal < NPY_NTYPES returns
# type by index into _builtin_descrs
assert_dtype_equal(np.dtype(bytes([0])), np.dtype('bool'))
assert_dtype_equal(np.dtype(bytes([17])), np.dtype(object))
# Single character where value is a valid type code
assert_dtype_equal(np.dtype(b'f'), np.dtype('float32'))
# Bytes with non-ascii values raise errors
assert_raises(TypeError, np.dtype, b'\xff')
assert_raises(TypeError, np.dtype, b's\xff')
def test_bad_param(self):
# Can't give a size that's too small
assert_raises(ValueError, np.dtype,
{'names':['f0', 'f1'],
'formats':['i4', 'i1'],
'offsets':[0, 4],
'itemsize':4})
# If alignment is enabled, the alignment (4) must divide the itemsize
assert_raises(ValueError, np.dtype,
{'names':['f0', 'f1'],
'formats':['i4', 'i1'],
'offsets':[0, 4],
'itemsize':9}, align=True)
# If alignment is enabled, the individual fields must be aligned
assert_raises(ValueError, np.dtype,
{'names':['f0', 'f1'],
'formats':['i1', 'f4'],
'offsets':[0, 2]}, align=True)
def test_field_order_equality(self):
x = np.dtype({'names': ['A', 'B'],
'formats': ['i4', 'f4'],
'offsets': [0, 4]})
y = np.dtype({'names': ['B', 'A'],
'formats': ['f4', 'i4'],
'offsets': [4, 0]})
assert_equal(x == y, False)
# But it is currently an equivalent cast:
assert np.can_cast(x, y, casting="equiv")
class TestRecord:
def test_equivalent_record(self):
"""Test whether equivalent record dtypes hash the same."""
a = np.dtype([('yo', int)])
b = np.dtype([('yo', int)])
assert_dtype_equal(a, b)
def test_different_names(self):
# In theory, they may hash the same (collision) ?
a = np.dtype([('yo', int)])
b = np.dtype([('ye', int)])
assert_dtype_not_equal(a, b)
def test_different_titles(self):
# In theory, they may hash the same (collision) ?
a = np.dtype({'names': ['r', 'b'],
'formats': ['u1', 'u1'],
'titles': ['Red pixel', 'Blue pixel']})
b = np.dtype({'names': ['r', 'b'],
'formats': ['u1', 'u1'],
'titles': ['RRed pixel', 'Blue pixel']})
assert_dtype_not_equal(a, b)
@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
def test_refcount_dictionary_setting(self):
names = ["name1"]
formats = ["f8"]
titles = ["t1"]
offsets = [0]
d = dict(names=names, formats=formats, titles=titles, offsets=offsets)
refcounts = {k: sys.getrefcount(i) for k, i in d.items()}
np.dtype(d)
refcounts_new = {k: sys.getrefcount(i) for k, i in d.items()}
assert refcounts == refcounts_new
def test_mutate(self):
# Mutating a dtype should reset the cached hash value
a = np.dtype([('yo', int)])
b = np.dtype([('yo', int)])
c = np.dtype([('ye', int)])
assert_dtype_equal(a, b)
assert_dtype_not_equal(a, c)
a.names = ['ye']
assert_dtype_equal(a, c)
assert_dtype_not_equal(a, b)
state = b.__reduce__()[2]
a.__setstate__(state)
assert_dtype_equal(a, b)
assert_dtype_not_equal(a, c)
def test_not_lists(self):
"""Test if an appropriate exception is raised when passing bad values to
the dtype constructor.
"""
assert_raises(TypeError, np.dtype,
dict(names={'A', 'B'}, formats=['f8', 'i4']))
assert_raises(TypeError, np.dtype,
dict(names=['A', 'B'], formats={'f8', 'i4'}))
def test_aligned_size(self):
# Check that structured dtypes get padded to an aligned size
dt = np.dtype('i4, i1', align=True)
assert_equal(dt.itemsize, 8)
dt = np.dtype([('f0', 'i4'), ('f1', 'i1')], align=True)
assert_equal(dt.itemsize, 8)
dt = np.dtype({'names':['f0', 'f1'],
'formats':['i4', 'u1'],
'offsets':[0, 4]}, align=True)
assert_equal(dt.itemsize, 8)
dt = np.dtype({'f0': ('i4', 0), 'f1':('u1', 4)}, align=True)
assert_equal(dt.itemsize, 8)
# Nesting should preserve that alignment
dt1 = np.dtype([('f0', 'i4'),
('f1', [('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')]),
('f2', 'i1')], align=True)
assert_equal(dt1.itemsize, 20)
dt2 = np.dtype({'names':['f0', 'f1', 'f2'],
'formats':['i4',
[('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')],
'i1'],
'offsets':[0, 4, 16]}, align=True)
assert_equal(dt2.itemsize, 20)
dt3 = np.dtype({'f0': ('i4', 0),
'f1': ([('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')], 4),
'f2': ('i1', 16)}, align=True)
assert_equal(dt3.itemsize, 20)
assert_equal(dt1, dt2)
assert_equal(dt2, dt3)
# Nesting should preserve packing
dt1 = np.dtype([('f0', 'i4'),
('f1', [('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')]),
('f2', 'i1')], align=False)
assert_equal(dt1.itemsize, 11)
dt2 = np.dtype({'names':['f0', 'f1', 'f2'],
'formats':['i4',
[('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')],
'i1'],
'offsets':[0, 4, 10]}, align=False)
assert_equal(dt2.itemsize, 11)
dt3 = np.dtype({'f0': ('i4', 0),
'f1': ([('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')], 4),
'f2': ('i1', 10)}, align=False)
assert_equal(dt3.itemsize, 11)
assert_equal(dt1, dt2)
assert_equal(dt2, dt3)
# Array of subtype should preserve alignment
dt1 = np.dtype([('a', '|i1'),
('b', [('f0', '<i2'),
('f1', '<f4')], 2)], align=True)
assert_equal(dt1.descr, [('a', '|i1'), ('', '|V3'),
('b', [('f0', '<i2'), ('', '|V2'),
('f1', '<f4')], (2,))])
def test_union_struct(self):
# Should be able to create union dtypes
dt = np.dtype({'names':['f0', 'f1', 'f2'], 'formats':['<u4', '<u2', '<u2'],
'offsets':[0, 0, 2]}, align=True)
assert_equal(dt.itemsize, 4)
a = np.array([3], dtype='<u4').view(dt)
a['f1'] = 10
a['f2'] = 36
assert_equal(a['f0'], 10 + 36*256*256)
# Should be able to specify fields out of order
dt = np.dtype({'names':['f0', 'f1', 'f2'], 'formats':['<u4', '<u2', '<u2'],
'offsets':[4, 0, 2]}, align=True)
assert_equal(dt.itemsize, 8)
# field name should not matter: assignment is by position
dt2 = np.dtype({'names':['f2', 'f0', 'f1'],
'formats':['<u4', '<u2', '<u2'],
'offsets':[4, 0, 2]}, align=True)
vals = [(0, 1, 2), (3, -1, 4)]
vals2 = [(0, 1, 2), (3, -1, 4)]
a = np.array(vals, dt)
b = np.array(vals2, dt2)
assert_equal(a.astype(dt2), b)
assert_equal(b.astype(dt), a)
assert_equal(a.view(dt2), b)
assert_equal(b.view(dt), a)
# Should not be able to overlap objects with other types
assert_raises(TypeError, np.dtype,
{'names':['f0', 'f1'],
'formats':['O', 'i1'],
'offsets':[0, 2]})
assert_raises(TypeError, np.dtype,
{'names':['f0', 'f1'],
'formats':['i4', 'O'],
'offsets':[0, 3]})
assert_raises(TypeError, np.dtype,
{'names':['f0', 'f1'],
'formats':[[('a', 'O')], 'i1'],
'offsets':[0, 2]})
assert_raises(TypeError, np.dtype,
{'names':['f0', 'f1'],
'formats':['i4', [('a', 'O')]],
'offsets':[0, 3]})
# Out of order should still be ok, however
dt = np.dtype({'names':['f0', 'f1'],
'formats':['i1', 'O'],
'offsets':[np.dtype('intp').itemsize, 0]})
@pytest.mark.parametrize(["obj", "dtype", "expected"],
[([], ("(2)f4,"), np.empty((0, 2), dtype="f4")),
(3, "(3)f4,", [3, 3, 3]),
(np.float64(2), "(2)f4,", [2, 2]),
([((0, 1), (1, 2)), ((2,),)], '(2,2)f4', None),
(["1", "2"], "(2)i,", None)])
def test_subarray_list(self, obj, dtype, expected):
dtype = np.dtype(dtype)
res = np.array(obj, dtype=dtype)
if expected is None:
# iterate the 1-d list to fill the array
expected = np.empty(len(obj), dtype=dtype)
for i in range(len(expected)):
expected[i] = obj[i]
assert_array_equal(res, expected)
def test_comma_datetime(self):
dt = np.dtype('M8[D],datetime64[Y],i8')
assert_equal(dt, np.dtype([('f0', 'M8[D]'),
('f1', 'datetime64[Y]'),
('f2', 'i8')]))
def test_from_dictproxy(self):
# Tests for PR #5920
dt = np.dtype({'names': ['a', 'b'], 'formats': ['i4', 'f4']})
assert_dtype_equal(dt, np.dtype(dt.fields))
dt2 = np.dtype((np.void, dt.fields))
assert_equal(dt2.fields, dt.fields)
def test_from_dict_with_zero_width_field(self):
# Regression test for #6430 / #2196
dt = | np.dtype([('val1', np.float32, (0,)), ('val2', int)]) | numpy.dtype |
#!/usr/bin/env python
# imports
import numpy as np
import scipy.linalg as spla
# load data
convergence_DM = np.loadtxt("../../data/convergence_DM.txt")
convergence_E = np.loadtxt("../../data/convergence_E.txt")
S = np.loadtxt("../../data/S.txt")
T = np.loadtxt("../../data/T.txt")
V = np.loadtxt("../../data/V.txt")
eri = np.loadtxt("../../data/eri.txt")
E_nuc = np.loadtxt("../../data/E_nuc.txt")
iteration_max = (int)(np.loadtxt("../../data/iteration_max.txt"))
num_ao = (int)(np.loadtxt("../../data/num_ao.txt"))
num_elec_alpha = (int)(np.loadtxt("../../data/num_elec_alpha.txt"))
num_elec_beta = (int)(np.loadtxt("../../data/num_elec_beta.txt"))
iteration_max = (int)(np.loadtxt("../../data/iteration_max.txt"))
# Code
def idx2(i, j):
if i >= j:
return int(i*(i+1)/2+j)
else:
return int(j*(j+1)/2+i)
def idx4(i, j, k, l):
return idx2(idx2(i, j), idx2(k, l))
D = np.zeros((num_ao, num_ao))
# loop variables
iteration_num = 0
E_total = 0
E_elec = 0.0
iteration_E_diff = 0.0
iteration_rmsc_dm = 0.0
converged = False
exceeded_iterations = False
s, L = spla.eigh(S)
X = np.zeros_like(L)
for i in range(len(s)):
X[i,i] = 1.0/np.sqrt(s[i])
X = np.dot(L,np.dot(X,L.T))
H = T + V
while (not converged and not exceeded_iterations):
# store last iteration and increment counters
iteration_num += 1
E_elec_last = E_elec
D_last = np.copy(D)
# form G matrix
G = np.zeros((num_ao, num_ao))
for i in range(num_ao):
for j in range(num_ao):
for k in range(num_ao):
for l in range(num_ao):
G[i, j] += D[k, l] * ((2.0*(eri[idx4(i, j, k, l)])) -
(eri[idx4(i, k, j, l)]))
# build fock matrix
F = H + G
F_prime = X @ F @ X
# solve the eigenvalue problem
E_orbitals, C_prime = spla.eigh(F_prime)
C = X @ C_prime
# compute new density matrix
D = np.zeros((num_ao, num_ao))
for i in range(num_ao):
for j in range(num_ao):
for k in range(num_elec_alpha):
D[i, j] += C[i, k] * C[j, k]
# calculate electronic energy
E_elec = np.sum(np.multiply(D, (H + F)))
# calculate energy change of iteration
iteration_E_diff = np.abs(E_elec - E_elec_last)
# rms change of density matrix
iteration_rmsc_dm = np.sqrt(np.sum((D - D_last)**2))
if( | np.abs(iteration_E_diff) | numpy.abs |
'''
MODULE: main_ANN.py
@Author:
<NAME> [1,2]
[1]: Université Libre de Bruxelles, Aero-Thermo-Mechanics Laboratory, Bruxelles, Belgium
[2]: CRECK Modeling Lab, Department of Chemistry, Materials and Chemical Engineering, Politecnico di Milano
@Contacts:
<EMAIL>
@Additional notes:
This code is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
Please report any bug to: <EMAIL>
'''
import ANN as neural
from utilities import *
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
import numpy as np
import time
import os
def Learning():
start_time = time.time()
file_options = {
"path_to_file" : "./model_zc/",
"input_file_name" : "X_scaled_zc.npy",
"output_file_name" : "target_scaled_zc.npy",
# Optional additional input matrix to pass through the trained net for a second test:
"test_file_name" : "X_scaled_zc.npy",
}
training_options = {
"center" : True,
"centering_method" : "mean",
"scale" : True,
"scaling_method" : "auto",
"neurons_per_layer" : [256, 512],
"batch_size" : 256,
"number_of_epochs" : 1000,
"activation_function" : "leaky_relu",
"alpha_LR" : 0.01,
"activation_output" : "softmax",
"batchNormalization" : True,
"dropout" : 0,
"patience" : 10,
"loss_function" : "binary_crossentropy",
"monitor" : "val_loss",
"learning_rate" : 0.0001,
}
X = np.load(file_options["path_to_file"] + file_options["input_file_name"])
Y = np.load(file_options["path_to_file"] + file_options["output_file_name"])
Z = np.load(file_options["path_to_file"] + file_options["test_file_name"])
# outlier removal section
input_index = np.arange(X.shape[0])
outlier_index = np.zeros(X.shape[0], dtype=bool)
print("Original training dimensions: {}".format(X.shape))
print("Original test dimensions: {}".format(Y.shape))
X_noOUT, ___, mask = outlier_removal_leverage(X, 2, training_options["centering_method"], training_options["scaling_method"])
Y_noOUT = np.delete(Y, mask, axis=0)
input_index_noOUT = np.delete(input_index, mask)
outlier_index[mask] = True
print("Training dimensions after first outlier removal: {}".format(X_noOUT.shape))
print("Test dimensions after first outlier removal: {}".format(Y_noOUT.shape))
X_noOUT2, ___, mask2 = outlier_removal_orthogonal(X_noOUT, 2, training_options["centering_method"], training_options["scaling_method"])
Y_noOUT2 = np.delete(Y_noOUT, mask2, axis=0)
outlier_index[input_index_noOUT[mask2]] = True
print("Training dimensions after second outlier removal: {}".format(X_noOUT2.shape))
print("Test dimensions after second outlier removal: {}".format(Y_noOUT2.shape))
model = neural.regressor(X_noOUT2, Y_noOUT2, training_options, Z)
predicted_Y_noOUT2 = model.fit_network()
predictedTest, trueTest = model.predict()
# Test the net for an additional input matrix (Z):
predicted_Z = model.predict_new_matrix()
print("---Completed in %s seconds ---" % (time.time() - start_time))
np.save('predictions_zc_noOUT', predicted_Y_noOUT2)
np.save('predictions_zc_newInput', predicted_Z)
# write a txt file with final results from the training
f = open('history_final.txt', 'w+')
f.write('completed in {:.1f} seconds \n'.format(time.time() - start_time))
f.write('loss = {} \n'.format(model.model.history.history['loss'][-1]))
f.write('val_loss = {} \n'.format(model.model.history.history['val_loss'][-1]))
f.write('mae = {} \n'.format(model.model.history.history['mae'][-1]))
f.write('mse = {} \n'.format(model.model.history.history['mse'][-1]))
f.write('val_mse = {}'.format(model.model.history.history['val_mse'][-1]))
f.close()
# save the outlier indices
np.save('outlier_index', outlier_index)
# plotting functions
# scatter plot
fig = plt.figure()
plt.axes(aspect='equal')
plt.scatter(Y_noOUT2.flatten(), predicted_Y_noOUT2.flatten()
, s=2, edgecolors='black', linewidths=0.1)
plt.xlabel('Y_zc')
plt.ylabel('Y_pred')
lims = [np.min(predicted_Y_noOUT2), np.max(predicted_Y_noOUT2)]
lims2 = [np.min(Y), np.max(Y)]
_ = plt.plot(lims2, lims2, 'r')
plt.savefig('parity_plot.png')
plt.show()
fig.tight_layout()
#Changing the directory from Results-ANN to working directory
os.chdir("../../.")
def preprocessing_zc(X_zc,target_zc,loc):
[n_elements, n_variables] = X_zc.shape
scal_fact = np.zeros((n_variables,1))
x_bar = | np.zeros((n_variables,1)) | numpy.zeros |
import numpy as np
from goal_prox.method.goal_traj_dataset import GoalTrajDataset
import torch
from goal_prox.envs.gw_helper import *
def exp_discounted(T, t, delta):
return | np.power(delta, T - t) | numpy.power |
#!/usr/bin/env python3
import numpy as np
import os
import sys
from torch.utils import data
def metrics(threshold, predictions, labels, label_unmon):
''' Computes a range of metrics.
For details on the metrics, see, e.g., https://www.cs.kau.se/pulls/hot/baserate/
'''
tp, fpp, fnp, tn, fn, accuracy, recall, precision, f1 = 0, 0, 0, 0, 0, 0.0, 0.0, 0.0, 0.0
for i in range(len(predictions)):
label_pred = | np.argmax(predictions[i]) | numpy.argmax |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Jun 27 14:04:57 2020
@author: theomacmillan
"""
#%%
import torch
import torch.nn.functional as F
import numpy as np
import torch.optim as optim
import matplotlib.pyplot as plt
from torch.utils.data import DataLoader, TensorDataset
from models import VAE
from utils import target_loss, kl_loss
#%%
latent = 12
net = VAE(100, 100, latent, 50)
inputs = np.transpose(np.load("training_data/jet_inputs.npy"))/20E6
outputs = np.transpose(np.load("training_data/jet_inputs.npy"))/20E6
#%%
beta = 0.01
inputs = torch.Tensor(inputs)
outputs = torch.Tensor(outputs)
traindata = TensorDataset(inputs, outputs)
dataloader = DataLoader(traindata, batch_size=1000, shuffle=True, num_workers=0)
SAVE_PATH = "trained_models/VAE_jet_L12_BE2.dat"
N_EPOCHS = 10000
optimizer = optim.Adam(net.parameters())
rms_loss = []
kldiv_loss = []
for epoch in range(N_EPOCHS):
epoch_rms_loss = []
epoch_kldiv_loss = []
for minibatch in dataloader:
inputs, outputs = minibatch
optimizer.zero_grad()
pred = net.forward(inputs)
kl = beta*kl_loss(net.mu, net.log_sigma)
rms = target_loss(pred, outputs)
loss = rms+kl
loss.backward()
optimizer.step()
epoch_rms_loss.append(np.mean(rms.data.detach().numpy()))
epoch_kldiv_loss.append(np.mean(kl.data.detach().numpy()))
kldiv_loss.append(np.mean(epoch_kldiv_loss))
rms_loss.append( | np.mean(epoch_rms_loss) | numpy.mean |
#! /usr/bin/python3
# -*- coding: utf-8 -*-
"""
********
BST file
********
"""
__author__ = '<NAME>'
__copyright__ = 'Copyright 2021, nenupy'
__credits__ = ['<NAME>']
__maintainer__ = 'Alan'
__email__ = '<EMAIL>'
__status__ = 'Production'
__all__ = [
"XST"
]
from abc import ABC
import os
from itertools import islice
from astropy.time import Time, TimeDelta
from astropy.coordinates import SkyCoord, AltAz, Angle
import astropy.units as u
from astropy.io import fits
from healpy.fitsfunc import write_map, read_map
from healpy.pixelfunc import mask_bad, nside2resol
import numpy as np
import json
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib.colorbar import ColorbarBase
from matplotlib.ticker import LinearLocator
from matplotlib.colors import Normalize
from matplotlib.cm import get_cmap
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
import dask.array as da
from dask.diagnostics import ProgressBar
import nenupy
from os.path import join, dirname
from nenupy.astro.target import FixedTarget, SolarSystemTarget
from nenupy.io.io_tools import StatisticsData
from nenupy.io.bst import BST_Slice
from nenupy.astro import wavelength, altaz_to_radec, l93_to_etrs, etrs_to_enu
from nenupy.astro.uvw import compute_uvw
from nenupy.astro.sky import HpxSky
from nenupy.astro.pointing import Pointing
from nenupy.instru import NenuFAR, MiniArray, read_cal_table, freq2sb, nenufar_miniarrays
from nenupy import nenufar_position, DummyCtMgr
import logging
log = logging.getLogger(__name__)
# ============================================================= #
# ------------------------- XST_Slice ------------------------- #
# ============================================================= #
class XST_Slice:
""" """
def __init__(self, mini_arrays, time, frequency, value):
self.mini_arrays = mini_arrays
self.time = time
self.frequency = frequency
self.value = value
# --------------------------------------------------------- #
# ------------------------ Methods ------------------------ #
def plot_correlaton_matrix(self, mask_autocorrelations: bool = False, **kwargs):
"""
"""
max_ma_index = self.mini_arrays.max() + 1
all_mas = np.arange(max_ma_index)
matrix = np.full([max_ma_index, max_ma_index], np.nan, "complex")
ma1, ma2 = np.tril_indices(self.mini_arrays.size, 0)
for ma in all_mas:
if ma not in self.mini_arrays:
ma1[ma1 >= ma] += 1
ma2[ma2 >= ma] += 1
mask = None
if mask_autocorrelations:
mask = ma1 != ma2 # cross_correlation mask
matrix[ma2[mask], ma1[mask]] = np.mean(self.value, axis=(0, 1))[mask]
fig = plt.figure(figsize=kwargs.get("figsize", (10, 10)))
ax = fig.add_subplot(111)
ax.set_aspect("equal")
data = np.absolute(matrix)
if kwargs.get("decibel", True):
data = 10*np.log10(data)
im = ax.pcolormesh(
all_mas,
all_mas,
data,
shading="nearest",
cmap=kwargs.get("cmap", "YlGnBu"),
vmin=kwargs.get("vmin", np.nanmin(data)),
vmax=kwargs.get("vmax", np.nanmax(data))
)
ax.set_xticks(all_mas[::2])
ax.set_yticks(all_mas[::2])
ax.grid(alpha=0.5)
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.3)
cbar = fig.colorbar(im, cax=cax)
cbar.set_label(kwargs.get("colorbar_label", "dB" if kwargs.get("decibel", True) else "Amp"))
# Axis abels
ax.set_xlabel(f"Mini-Array index")
ax.set_ylabel(f"Mini-Array index")
# Title
ax.set_title(kwargs.get("title", ""))
# Save or show the figure
figname = kwargs.get("figname", "")
if figname != "":
plt.savefig(
figname,
dpi=300,
bbox_inches="tight",
transparent=True
)
log.info(f"Figure '{figname}' saved.")
else:
plt.show()
plt.close("all")
def rephase_visibilities(self, phase_center, uvw):
""" """
# Compute the zenith original phase center
zenith = SkyCoord(
np.zeros(self.time.size),
np.ones(self.time.size)*90,
unit="deg",
frame=AltAz(
obstime=self.time,
location=nenufar_position
)
)
zenith_phase_center = altaz_to_radec(zenith)
# Define the rotation matrix
def rotation_matrix(skycoord):
"""
"""
ra_rad = skycoord.ra.rad
dec_rad = skycoord.dec.rad
if np.isscalar(ra_rad):
ra_rad = np.array([ra_rad])
dec_rad = np.array([dec_rad])
cos_ra = np.cos(ra_rad)
sin_ra = np.sin(ra_rad)
cos_dec = np.cos(dec_rad)
sin_dec = np.sin(dec_rad)
return np.array([
[cos_ra, -sin_ra, np.zeros(ra_rad.size)],
[-sin_ra*sin_dec, -cos_ra*sin_dec, cos_dec],
[sin_ra*cos_dec, cos_ra*cos_dec, sin_dec],
])
# Transformation matrices
to_origin = rotation_matrix(zenith_phase_center) # (3, 3, ntimes)
to_new_center = rotation_matrix(phase_center) # (3, 3, 1)
total_transformation = np.matmul(
np.transpose(
to_new_center,
(2, 0, 1)
),
to_origin
) # (3, 3, ntimes)
rotUVW = np.matmul(
np.expand_dims(
(to_origin[2, :] - to_new_center[2, :]).T,
axis=1
),
np.transpose(
to_origin,
(2, 1, 0)
)
) # (ntimes, 1, 3)
phase = np.matmul(
rotUVW,
np.transpose(uvw, (0, 2, 1))
) # (ntimes, 1, nvis)
rotate_visibilities = np.exp(
2.j*np.pi*phase/wavelength(self.frequency).to(u.m).value[None, :, None]
) # (ntimes, nfreqs, nvis)
new_uvw = np.matmul(
uvw, # (ntimes, nvis, 3)
np.transpose(total_transformation, (2, 0, 1))
)
return rotate_visibilities, new_uvw
def make_image(self,
resolution: u.Quantity = 1*u.deg,
fov_radius: u.Quantity = 25*u.deg,
phase_center: SkyCoord = None,
stokes: str = "I"
):
"""
:Example:
xst = XST("XST.fits")
data = xst.get_stokes("I")
sky = data.make_image(
resolution=0.5*u.deg,
fov_radius=27*u.deg,
phase_center=SkyCoord(277.382, 48.746, unit="deg")
)
sky[0, 0, 0].plot(
center=SkyCoord(277.382, 48.746, unit="deg"),
radius=24.5*u.deg
)
"""
exposure = self.time[-1] - self.time[0]
# Compute XST UVW coordinates (zenith phased)
uvw = compute_uvw(
interferometer=NenuFAR()[self.mini_arrays],
phase_center=None, # will be zenith
time=self.time,
)
# Prepare visibilities rephasing
rephase_matrix, uvw = self.rephase_visibilities(
phase_center=phase_center,
uvw=uvw
)
# Mask auto-correlations
ma1, ma2 = np.tril_indices(self.mini_arrays.size, 0)
cross_mask = ma1 != ma2
uvw = uvw[:, cross_mask, :]
# Transform to lambda units
wvl = wavelength(self.frequency).to(u.m).value
uvw = uvw[:, None, :, :]/wvl[None, :, None, None] # (t, f, bsl, 3)
# Mean in time
uvw = np.mean(uvw, axis=0)
# Prepare the sky
sky = HpxSky(
resolution=resolution,
time=self.time[0] + exposure/2,
frequency=np.mean(self.frequency),
polarization=np.array([stokes]),
value=np.nan
)
# Compute LMN coordinates
image_mask = sky.visible_mask[0, 0, 0]
image_mask *= sky.coordinates.separation(phase_center) <= fov_radius
l, m, n = sky.compute_lmn(
phase_center=phase_center,
coordinate_mask=image_mask
)
lmn = np.array([l, m, (n - 1)], dtype=np.float32).T
n_pix = l.size
lmn = da.from_array(
lmn,
chunks=(np.floor(n_pix/os.cpu_count()), 3)
)
# Transform to Dask array
n_bsl = uvw.shape[1]
n_freq = self.frequency.size
n_pix = l.size
uvw = da.from_array(
uvw.astype(np.float32),
chunks=(n_freq, np.floor(n_bsl/os.cpu_count()), 3)
)
# Compute the phase
uvwlmn = np.sum(uvw[:, :, None, :] * lmn[None, None, :, :], axis=-1)
phase = np.exp( -2j * np.pi * uvwlmn ) # (f, bsl, npix)
# Rephase and average visibilites
vis = np.mean( # Mean in time
self.value * rephase_matrix,
axis=0
)[..., cross_mask] # (nfreqs, nvis)
# Make dirty image
dirty = np.nanmean( # mean in baselines
np.real(
np.mean( # mean in freq
vis[:, :, None] * phase,
axis=0
)
),
axis=0
)
# Insert dirty image in Sky object
log.info(
f"Computing image (time: {self.time.size}, frequency: {self.frequency.size}, baselines: {vis.shape[1]}, pixels: {phase.shape[-1]})... "
)
with ProgressBar() if log.getEffectiveLevel() <= logging.INFO else DummyCtMgr():
sky.value[0, 0, 0, image_mask] = dirty.compute()
return sky
def make_nearfield(self,
radius: u.Quantity = 400*u.m,
npix: int = 64,
sources: list = []
):
r""" Computes the Near-field image from the cross-correlation
statistics data :math:`\mathcal{V}`.
The distances between each Mini-Array :math:`{\rm MA}_i`
and the ground positions :math:`Delta` is:
.. math::
d_{\rm{MA}_i} (x, y) = \sqrt{
({\rm MA}_{i, x} - \Delta_x)^2 + ({\rm MA}_{i, y} - \Delta_y)^2 + \left( {\rm MA}_{i, z} - \sum_j \frac{{\rm MA}_{j, z}}{n_{\rm MA}} - 1 \right)^2
}
Then, the near-field image :math:`n_f` can be retrieved
as follows (:math:`k` and :math:`l` being two distinct
Mini-Arrays):
.. math::
n_f (x, y) = \sum_{k, l} \left| \sum_{\nu} \langle \mathcal{V}_{\nu, k, l}(t) \rangle_t e^{2 \pi i \left( d_{{\rm MA}_k} - d_{{\rm MA}_l} \right) (x, y) \frac{\nu}{c}} \right|
.. note::
To simulate astrophysical source of brightness :math:`\mathcal{B}`
footprint on the near-field, its visibility per baseline
of Mini-Arrays :math:`k` and :math:`l` are computed as:
.. math::
\mathcal{V}_{{\rm simu}, k, l} = \mathcal{B} e^{2 \pi i \left( \mathbf{r}_k - \mathbf{r}_l \right) \cdot \mathbf{u} \frac{\nu}{c}}
with :math:`\mathbf{r}` the ENU position of the Mini-Arrays,
:math:`\mathbf{u} = \left( \cos(\theta) \sin(\phi), \cos(\theta) \cos(\phi), sin(\theta) \right)`
the ground projection vector (in East-North-Up coordinates),
(:math:`\phi` and :math:`\theta` are the source horizontal
coordinates azimuth and elevation respectively).
:param radius:
Radius of the ground image. Default is ``400m``.
:type radius:
:class:`~astropy.units.Quantity`
:param npix:
Number of pixels of the image size. Default is ``64``.
:type npix:
`int`
:param sources:
List of source names for which their near-field footprint
may be computed. Only sources above 10 deg elevation
will be considered.
:type sources:
`list`
:returns:
Tuple of near-field image and a dictionnary
containing all source footprints.
:rtype:
`tuple`(:class:`~numpy.ndarray`, `dict`)
:Example:
from nenupy.io.xst import XST
xst = XST("xst_file.fits")
nearfield, src_dict = xst.make_nearfield(sources=["Cas A", "Sun"])
.. versionadded:: 1.1.0
"""
def compute_nearfield_imprint(visibilities, phase):
# Phase and average in frequency
nearfield = np.mean(
visibilities[..., None, None] * phase,
axis=0
)
# Average in baselines
nearfield = np.nanmean(np.abs(nearfield), axis=0)
with ProgressBar() if log.getEffectiveLevel() <= logging.INFO else DummyCtMgr():
return nearfield.compute()
# Mini-Array positions in ENU coordinates
nenufar = NenuFAR()[self.mini_arrays]
ma_etrs = l93_to_etrs(nenufar.antenna_positions)
ma_enu = etrs_to_enu(ma_etrs)
# Treat baselines
ma1, ma2 = np.tril_indices(self.mini_arrays.size, 0)
cross_mask = ma1 != ma2
# Mean time of observation
obs_time = self.time[0] + (self.time[-1] - self.time[0])/2.
# Delays at the ground
radius_m = radius.to(u.m).value
ground_granularity = np.linspace(-radius_m, radius_m, npix)
posx, posy = np.meshgrid(ground_granularity, ground_granularity)
posz = np.ones_like(posx) * (np.average(ma_enu[:, 2]) + 1)
ground_grid = np.stack((posx, posy, posz), axis=2)
ground_distances = np.sqrt(
np.sum(
(ma_enu[:, None, None, :] - ground_grid[None])**2,
axis=-1
)
)
grid_delays = ground_distances[ma1] - ground_distances[ma2] # (nvis, npix, npix)
n_bsl = ma1[cross_mask].size
grid_delays = da.from_array(
grid_delays[cross_mask],
chunks=(np.floor(n_bsl/os.cpu_count()), npix, npix)
)
# Mean in time the visibilities
vis = np.mean(
self.value,
axis=0
)[..., cross_mask] # (nfreqs, nvis)
vis = da.from_array(
vis,
chunks=(1, np.floor(n_bsl/os.cpu_count()))#(self.frequency.size, np.floor(n_bsl/os.cpu_count()))
)
# Make the nearfield image
log.info(
f"Computing nearfield (time: {self.time.size}, frequency: {self.frequency.size}, baselines: {vis.shape[1]}, pixels: {posx.size})... "
)
wvl = wavelength(self.frequency).to(u.m).value
phase = np.exp(2.j * np.pi * (grid_delays[None, ...]/wvl[:, None, None, None]))
log.debug("Computing the phase term...")
with ProgressBar() if log.getEffectiveLevel() <= logging.INFO else DummyCtMgr():
phase = phase.compute()
log.debug("Computing the nearf-field...")
nearfield = compute_nearfield_imprint(vis, phase)
# Compute nearfield imprints for other sources
simu_sources = {}
for src_name in sources:
# Check that the source is visible
if src_name.lower() in ["sun", "moon", "venus", "mars", "jupiter", "saturn", "uranus", "neptune"]:
src = SolarSystemTarget.from_name(name=src_name, time=obs_time)
else:
src = FixedTarget.from_name(name=src_name, time=obs_time)
altaz = src.horizontal_coordinates#[0]
if altaz.alt.deg <= 10:
log.debug(f"{src_name}'s elevation {altaz[0].alt.deg}<=10deg, not considered for nearfield imprint.")
continue
# Projection from AltAz to ENU vector
az_rad = altaz.az.rad
el_rad = altaz.alt.rad
cos_az = np.cos(az_rad)
sin_az = np.sin(az_rad)
cos_el = np.cos(el_rad)
sin_el = np.sin(el_rad)
to_enu = np.array(
[cos_el*sin_az, cos_el*cos_az, sin_el]
)
# src_delays = np.matmul(
# ma_enu[ma1] - ma_enu[ma2],
# to_enu
# )
# src_delays = da.from_array(
# src_delays[cross_mask, :],
# chunks=((np.floor(n_bsl/os.cpu_count()), npix, npix), 1)
# )
ma1_enu = da.from_array(
ma_enu[ma1[cross_mask]],
chunks=np.floor(n_bsl/os.cpu_count())
)
ma2_enu = da.from_array(
ma_enu[ma2[cross_mask]],
chunks=np.floor(n_bsl/os.cpu_count())
)
src_delays = np.matmul(
ma1_enu - ma2_enu,
to_enu
)
# Simulate visibilities
src_vis = np.exp(2.j * np.pi * (src_delays/wvl))
src_vis = np.swapaxes(src_vis, 1, 0)
log.debug(f"Computing the nearf-field imprint of {src_name}...")
simu_sources[src_name] = compute_nearfield_imprint(src_vis, phase)
return nearfield, simu_sources
# ============================================================= #
# ============================================================= #
# ============================================================= #
# ------------------------- Crosslet -------------------------- #
# ============================================================= #
class Crosslet(ABC):
""" """
# def __init__(self,
# mini_arrays: np.ndarray,
# frequency: u.Quantity,
# time: Time,
# visibilities: np.ndarray
# ):
# self.mini_arrays = mini_arrays
# self.frequency = frequency
# self.time = time
# self.visibilities = visibilities
# --------------------------------------------------------- #
# --------------------- Getter/Setter --------------------- #
# --------------------------------------------------------- #
# ------------------------ Methods ------------------------ #
def get(self,
frequency_selection: str = None,
time_selection: str = None,
polarization: str = "XX",
):
""" """
# Polarization selection
allowed_polarizations = ["XX", "XY", "YX", "YY"]
if polarization not in allowed_polarizations:
raise ValueError(
f"'polarization' argument must be equal to one of the following: {allowed_polarizations}."
)
# Frequency selection
frequency_mask = self._get_freq_mask(frequency_selection)
# Time selection
time_mask = self._get_time_mask(time_selection)
ma1, ma2 = np.tril_indices(self.mini_arrays.size, 0)
auto_mask = ma1 == ma2
cross_mask = ~auto_mask
if polarization == "XY":
# Deal with lack of auto XY cross in XST-like data
yx = self.data[
np.ix_(
time_mask,
frequency_mask,
self._get_cross_idx("Y", "X")
)
]
_xy = np.zeros(
(list(yx.shape[:-1]) + [ma1.size]),
dtype=np.complex
)
_xy[:, :, auto_mask] = yx[:, :, auto_mask].conj()
# Get XY correlations
_xy[:, :, cross_mask] = self.data[
np.ix_(
time_mask,
frequency_mask,
self._get_cross_idx("X", "Y")
)
]
return _xy
else:
return self.data[
np.ix_(
time_mask,
frequency_mask,
self._get_cross_idx(*list(polarization))
)
]
def get_stokes(self,
stokes: str = "I",
frequency_selection: str = None,
time_selection: str = None
):
""" """
frequency_mask = self._get_freq_mask(frequency_selection)
time_mask = self._get_time_mask(time_selection)
stokes_parameters = {
"I": {
"cross": ["XX", "YY"],
"compute": lambda xx, yy: 0.5*(xx + yy)
},
"Q": {
"cross": ["XX", "YY"],
"compute": lambda xx, yy: 0.5*(xx - yy)
},
"U": {
"cross": ["XY", "YX"],
"compute": lambda xy, yx: 0.5*(xy + yx)
},
"V": {
"cross": ["XY", "YX"],
"compute": lambda xy, yx: -0.5j*(xy - yx)
},
"FL": {
"cross": ["XX", "YY", "XY", "YX"],
"compute": lambda xx, yy, xy, yx: np.sqrt((0.5*(xx - yy))**2 + (0.5*(xy + yx))**2) / (0.5*(xx + yy))
},
"FV": {
"cross": ["XX", "YY", "XY", "YX"],
"compute": lambda xx, yy, xy, yx: np.abs(-0.5j*(xy - yx))/(0.5*(xx + yy))
}
}
try:
selected_stokes = stokes_parameters[stokes]
except KeyError:
log.warning(f"Available polarizations are: {stokes_parameters.keys()}.")
return XST_Slice(
mini_arrays=self.mini_arrays,
time=self.time[time_mask],
frequency=self.frequencies[frequency_mask],
value=selected_stokes["compute"](
*map(
lambda pol: self.get(
frequency_selection=frequency_selection,
time_selection=time_selection,
polarization=pol
),
selected_stokes["cross"]
)
)
)
def get_beamform(self,
pointing: Pointing,
frequency_selection: str = None,
time_selection: str = None,
mini_arrays: np.ndarray = np.array([0, 1]),
polarization: str = "NW",
calibration: str = "default"
):
"""
:Example:
from nenupy.io.bst import BST, XST
bst = BST("20191129_141900_BST.fits")
xst = XST("20191129_141900_XST.fits")
bf_cal = xst.get_beamform(
pointing = Pointing.from_bst(bst, beam=0, analog=False),
mini_arrays=bst.mini_arrays,
calibration="default"
)
"""
frequency_mask = self._get_freq_mask(frequency_selection)
time_mask = self._get_time_mask(time_selection)
# Select the mini-arrays cross correlations
nenufar = NenuFAR()#[self.mini_arrays]
bf_nenufar = NenuFAR()[mini_arrays]
ma_real_indices = np.array([nenufar_miniarrays[name]["id"] for name in bf_nenufar.antenna_names])
if np.any( ~np.isin(ma_real_indices, self.mini_arrays) ):
raise IndexError(
f"Selected Mini-Arrays {mini_arrays} are outside possible values: {self.mini_arrays}."
)
ma_indices = np.arange(self.mini_arrays.size, dtype="int")[np.isin(self.mini_arrays, ma_real_indices)]
ma1, ma2 = np.tril_indices(self.mini_arrays.size, 0)
mask = np.isin(ma1, ma_indices) & np.isin(ma2, ma_indices)
# Calibration table
if calibration.lower() == "none":
# No calibration
cal = np.ones(
(self.frequencies[frequency_mask].size, ma_indices.size)
)
else:
pol_idx = {"NW": [0], "NE": [1]}
cal = read_cal_table(
calibration_file=calibration
)
cal = cal[np.ix_(
freq2sb(self.frequencies[frequency_mask]),
ma_real_indices,
pol_idx[polarization]
)].squeeze(axis=2)
# Load and filter the data
vis = self.get(
frequency_selection=frequency_selection,
time_selection=time_selection,
polarization= "XX" if polarization.upper() == "NW" else "YY",
)[:, :, mask]
# Insert the data in a matrix
tri_x, tri_y = np.tril_indices(ma_indices.size, 0)
vis_matrix = np.zeros(
(
self.time[time_mask].size,
self.frequencies[frequency_mask].size,
ma_indices.size,
ma_indices.size
),
dtype=np.complex
)
vis_matrix[:, :, tri_x, tri_y] = vis
vis_matrix[:, :, tri_y, tri_x] = vis_matrix[:, :, tri_x, tri_y].conj()
# Calibrate the Xcorr with the caltable
for fi in range(vis_matrix.shape[1]):
cal_i = np.expand_dims(cal[fi], axis=1)
cal_i_h = np.expand_dims(cal[fi].T.conj(), axis=0)
mul = np.dot(cal_i, cal_i_h)
vis_matrix[:, fi, :, :] *= mul[np.newaxis, :, :]
# Phase the visibilities towards the phase center
phase = np.ones(
(
self.time[time_mask].size,
self.frequencies[frequency_mask].size,
ma_indices.size,
ma_indices.size
),
dtype=np.complex
)
altaz_pointing = pointing.horizontal_coordinates
if altaz_pointing.size == 1:
# Transit
pass
else:
# Multiple pointings, get the correct value for all times
altaz_pointing = pointing[self.time[time_mask]].horizontal_coordinates
az = altaz_pointing.az.rad
el = altaz_pointing.alt.rad
ground_projection = np.array([
np.cos(el) * np.cos(az),
np.cos(el) * np.sin(az),
np.sin(el)
])
rot = np.radians(-90)
rotation = np.array(
[
[ np.cos(rot), np.sin(rot), 0],
[-np.sin(rot), np.cos(rot), 0],
[ 0, 0, 1]
]
)
ma1_pos = np.dot(
nenufar.antenna_positions[ma1[mask]],
rotation
)
ma2_pos = np.dot(
nenufar.antenna_positions[ma2[mask]],
rotation
)
dphi = np.dot(
ma1_pos - ma2_pos,
ground_projection
).T
wvl = wavelength(self.frequencies[frequency_mask]).to(u.m).value
phase[:, :, tri_x, tri_y] = np.exp(
-2.j*np.pi/wvl[None, :, None] * dphi[:, None, :]
)
phase[:, :, tri_y, tri_x] = phase[:, :, tri_x, tri_y].conj().copy()
data = np.sum((vis_matrix * phase).real, axis=(2, 3))
return BST_Slice(
time=self.time[time_mask],
frequency=self.frequencies[frequency_mask],
value=data.squeeze()
)
# --------------------------------------------------------- #
# ----------------------- Internal ------------------------ #
def _get_freq_mask(self, frequency_selection=None):
""" """
# Frequency selection
frequencies = self.frequencies
if frequency_selection is None:
frequency_selection = f">={frequencies.min()} & <= {frequencies.max()}"
frequency_mask = self._parse_frequency_condition(frequency_selection)(frequencies)
if not np.any(frequency_mask):
log.warning(
"Empty frequency selection, input values should fall "
f"between {frequencies.min()} and {frequencies.max()}, "
f"i.e.: '>={frequencies.min()} & <= {frequencies.max()}'"
)
return frequency_mask
def _get_time_mask(self, time_selection=None):
""" """
# Time selection
if time_selection is None:
time_selection = f">={self.time[0].isot} & <= {self.time[-1].isot}"
time_mask = self._parse_time_condition(time_selection)(self.time)
if not np.any(time_mask):
log.warning(
"Empty time selection, input values should fall "
f"between {self.time[0].isot} and {self.time[-1].isot}, "
f"i.e.: '>={self.time[0].isot} & <= {self.time[-1].isot}'"
)
return time_mask
def _get_cross_idx(self, c1='X', c2='X'):
""" Retrieves visibilities indices for the given cross polarizations
"""
mini_arrays_size = self.mini_arrays.size
corr = np.array(['X', 'Y']*mini_arrays_size)
i_ant1, i_ant2 = np.tril_indices(mini_arrays_size*2, 0)
corr_mask = (corr[i_ant1] == c1) & (corr[i_ant2] == c2)
indices = np.arange(i_ant1.size)[corr_mask]
return indices
# ============================================================= #
# ============================================================= #
# ============================================================= #
# ---------------------------- XST ---------------------------- #
# ============================================================= #
class XST(StatisticsData, Crosslet):
""" """
def __init__(self, file_name):
super().__init__(file_name=file_name)
self.mini_arrays = self._meta_data['ins']['noMROn'][0]
# ============================================================= #
# ============================================================= #
# ============================================================= #
# ------------------------- TV_Image -------------------------- #
# ============================================================= #
class TV_Image:
""" """
def __init__(self,
tv_image: HpxSky,
analog_pointing: SkyCoord,
fov_radius: u.Quantity
):
self.tv_image = tv_image
self.analog_pointing = analog_pointing
self.fov_radius = fov_radius
# --------------------------------------------------------- #
# ------------------------ Methods ------------------------ #
@classmethod
def from_fits(cls, file_name):
""" """
header = fits.getheader(file_name, ext=1)
# Load the image
image = read_map(
file_name,
dtype=None,
partial="PARTIAL" in header["OBJECT"]
)
# Fill NaNs
if "PARTIAL" in header["OBJECT"]:
image[mask_bad(image)] = np.nan
# Recreate the sky
sky = HpxSky(
resolution=Angle(
angle=nside2resol(
header["NSIDE"],
arcmin=True
),
unit=u.arcmin
),
time=Time(header["OBSTIME"]),
frequency=header["FREQ"]*u.MHz,
polarization=np.array([header["STOKES"]]),
value=image.reshape((1, 1, 1, image.size))
)
return cls(
tv_image=sky,
analog_pointing=SkyCoord(
header["AZANA"]*u.deg,
header["ELANA"]*u.deg,
frame=AltAz(
obstime=Time(header["OBSTIME"]),
location=nenufar_position
)
),
fov_radius=header["FOV"]*u.deg/2
)
def save_fits(self, file_name: str, partial: bool = True):
""" """
phase_center_eq = altaz_to_radec(self.analog_pointing)
header = [
("software", 'nenupy'),
("version", nenupy.__version__),
("contact", nenupy.__email__),
("azana", self.analog_pointing.az.deg),
("elana", self.analog_pointing.alt.deg),
("freq", self.tv_image.frequency[0].to(u.MHz).value),
("obstime", self.tv_image.time[0].isot),
("fov", self.fov_radius.to(u.deg).value * 2),
("pc_ra", phase_center_eq.ra.deg),
("pc_dec", phase_center_eq.dec.deg),
("stokes", self.tv_image.polarization[0])
]
map2write = self.tv_image.value[0, 0, 0].copy()
write_map(
filename=file_name,
m=map2write,
nest=False,
coord='C',
overwrite=True,
dtype=self.tv_image.value.dtype,
extra_header=header,
partial=partial
)
log.info(
'HEALPix image of {} cells (nside={}) saved in `{}`.'.format(
map2write.size,
self.tv_image.nside,
file_name
)
)
def save_png(self, figname: str, beam_contours: bool = True, show_sources: bool = True):
""" """
image_center = altaz_to_radec(
SkyCoord(
self.analog_pointing.az,
self.analog_pointing.alt,
frame=AltAz(
obstime=self.tv_image.time[0],
location=nenufar_position
)
)
)
kwargs = {}
if show_sources:
src_names = []
src_position = []
with open(join(dirname(__file__), "nenufar_tv_sources.json")) as src_file:
sources = json.load(src_file)
for name in sources["FixedSources"]:
src = FixedTarget.from_name(name, time=self.tv_image.time[0])
if src.coordinates.separation(image_center) <= 0.8*self.fov_radius:
src_names.append(name)
src_position.append(src.coordinates)
for name in sources["SolarSystemSources"]:
src = SolarSystemTarget.from_name(name, time=self.tv_image.time[0])
if src.coordinates.separation(image_center) <= 0.8*self.fov_radius:
src_names.append(name)
src_position.append(src.coordinates)
if len(src_position) != 0:
kwargs["text"] = (SkyCoord(src_position), src_names, "white")
if beam_contours:
# Simulate the array factor
ma = MiniArray()
af_sky = ma.array_factor(
sky=HpxSky(
resolution=0.2*u.deg,
time=self.tv_image.time[0],
frequency=self.tv_image.frequency[0]
),
pointing=Pointing(
coordinates=image_center,
time=self.tv_image.time[0]
)
)
# Normalize the array factor
af = af_sky[0, 0, 0].compute()
af_normalized = af/af.max()
kwargs["contour"] = (af_normalized, | np.arange(0.5, 1, 0.2) | numpy.arange |
# Utility functions for the course Robot Modelling
# <NAME> (<EMAIL>), sept. 2016
#
# Additional functions added for more functionality
# <NAME> (<EMAIL>), sept. 2018
# <NAME> (<EMAIL>), sept. 2018
###############################################################################
import numpy as np
from numpy import cos, sin
# Checks if a matrix is a valid rotation matrix.
def isRotationMatrix(R):
"""
Check if input is a correct matrix
:param R:
:return:
"""
Rt = np.transpose(R.copy())
shouldBeIdentity = np.dot(Rt, R)
I = np.identity(3, dtype = R.dtype)
n = np.linalg.norm(I - shouldBeIdentity)
return n < 1e-6
def inverse_kinematics_wrist(R):
"""
Calculates the inverse kinematics of the wrist of the robot
:param R:
:return:
"""
minplus = 1
t5 = np.arctan2(minplus * np.sqrt(1 - (R[2, 2]**2)), R[2, 2])
t4 = np.arctan2(minplus * R[1, 2], minplus * R[0, 2])
t6 = np.arctan2(minplus * R[2, 1], minplus * -R[2, 0])
R_check = np.array([[cos(t4) * cos(t5) * cos(t6) - sin(t4) * sin(t6) - R[0, 0], -cos(t4) * cos(t5) * sin(t6) - sin(t4) * cos(t6) - R[0, 1], cos(t4) * sin(t5) - R[0, 2]],
[sin(t4) * cos(t5) * cos(t6) + cos(t4) * sin(t6) - R[1, 0], -sin(t4) * cos(t5) * sin(t6) + cos(t4) * cos(t6) - R[1, 1], sin(t4) * sin(t5) - R[1, 2]],
[-sin(t5) * cos(t6) - R[2, 0], sin(t5) * sin(t6) - R[2, 1], cos(t5) - R[2, 2]]])
return np.array([t4, t5, t6]), R_check
def make_rotation_matrix(axis, angle):
"""
make a rotation matrix based on an angle and specified axis
:param axis: string that specifies over which axis will be rotated
:param angle: rotation angle in radians
:return: rotation matrix
"""
if axis == "x":
return np.array([[1, 0, 0],
[0, cos(angle), -sin(angle)],
[0, sin(angle), cos(angle)]])
elif axis == "y":
return np.array([[cos(angle), 0, -sin(angle)],
[0, 1, 0],
[sin(angle), 0, cos(angle)]])
elif axis == "z":
return np.array([[cos(angle), -sin(angle), 0],
[sin(angle), cos(angle), 0],
[0, 0, 1]])
def make_DH_matrix(DH_parameters):
"""
make a homogenious matrix based on the Denavit Hartenberg Convention
:param DH_parameters: array of 4 with all DH parameters
:return: DH matrix
"""
from numpy import cos, sin
length = DH_parameters[0]
twist = DH_parameters[1]
offset = DH_parameters[2]
angle = DH_parameters[3]
return np.array([[cos(angle), -sin(angle) * cos(twist), sin(angle) * sin(twist), length * cos(angle)],
[sin(angle), cos(angle) * cos(twist), -cos(angle) * sin(twist), length * sin(angle)],
[0, sin(twist), cos(twist), offset],
[0, 0, 0, 1]])
def interpolate(values, precision):
"""Create positionvalues within the given trajectory
precision = amount of subvalues"""
nr_values = len(values)
solution = []
for nr in range(0, nr_values):
if nr < nr_values - 1:
delta_val = np.subtract(values[nr + 1], values[nr])
x_val = np.true_divide(delta_val, precision)
for x in range(0, precision):
solution.append(np.add(values[nr], np.multiply(x_val, x)))
else:
break
solution = np.array(solution)
return solution
def make_homogenious_matrix(rotation, translation):
return np.vstack((np.hstack((rotation, translation)), np.array([0, 0, 0, 1])))
# function for the inverse kinematics of a 3DOF robot
def inverse_algorithm_3DOF(arms, points, elbow_down=False):
"""Inverse kinematics of a scara robot.
Inputs:
arms: 3-element array/list with arm lengths
point2: 3-element array with (x,y,z) coordinate of end point
elbow_down (optional): True/False boolean to determine
which solution needs to be returned
Output:
angles: 3-element array/list with angles in radians(!)
"""
x = points[0]
y = points[1]
z = points[2]
d1 = arms[0]
d2 = arms[1]
d3 = arms[2]
s = z - d1
r = np.sqrt(x**2 + y**2)
c = np.sqrt(r**2 + s**2)
beta = np.arctan2(s, r)
alpha = np.arccos(np.minimum(1, ((-d3**2 + d2**2 + c**2) / (2 * d2 * c))))
theta1 = np.arctan2(y, x)
upper_cos = (-c**2 + d3**2 + d2**2)
lower_cos = (2 * d3 * d2)
if abs(upper_cos) > abs(lower_cos):
return [0, 0, 0], True
if elbow_down:
theta2 = beta - alpha
theta3 = np.radians(180) - np.arccos(np.minimum(1, (upper_cos / lower_cos)))
else:
theta2 = beta + alpha
theta3 = -(np.radians(180) - np.arccos(np.minimum(1, (upper_cos / lower_cos))))
angles = [theta1, theta2, theta3, 0]
return angles, False
def kin_planar_forward(arms, angles):
"""Forward kinematics of a 2-link planar robot.
Inputs:
arms: 2-element array/list with arm lengths
angles: 2-element array/list with angles in radians(!)
Output:
point2: 2-element numpy array with (x,y) coordinate of end point
"""
x1 = arms[0] * np.cos(angles[0])
y1 = arms[0] * np.sin(angles[0])
x2 = x1 + arms[1] * np.cos(angles[0] + angles[1])
y2 = y1 + arms[1] * np.sin(angles[0] + angles[1])
points = np.array([x2, y2])
return points
def kin_planar_inverse(arms, points, elbow_down=True):
"""Inverse kinematics of a 2-link planar robot.
Inputs:
arms: 2-element array/list with arm lengths
point2: 2-element array with (x,y) coordinate of end point
elbow_down (optional): True/False boolean to determine
which solution needs to be returned
Output:
angles: 2-element array/list with angles in radians(!)
"""
x = points[0]
y = points[1]
a1 = arms[0]
a2 = arms[1]
D = (x ** 2 + y ** 2 - a1 ** 2 - a2 ** 2) / (2 * a1 * a2)
f = np.sqrt(1 - (D ** 2))
if elbow_down:
theta2 = np.arctan2(f, D)
else:
theta2 = np.arctan2(-f, D)
theta1 = np.arctan2(y, x) - np.arctan2((a2 * np.sin(theta2)), (a1 + a2 * np.cos(theta2)))
angles = np.array([theta1, theta2])
return angles
def sphere():
import pyqtgraph.opengl as gl
sphere_data= gl.MeshData.sphere(rows=8,
cols=16)
obj = gl.GLMeshItem(meshdata=sphere_data,
smooth=False,
drawFaces=True,
faceColor=(0.2, 0.3, 0.4, 1),
drawEdges=False,
edgeColor=(0.2, 0.3, 0.4, 1))
return obj
# cylinder is a convenience function to create a cylinder shape in
# pyqtgraph/OpenGL, it gives you a number of vertices distributed over the
# surface of the cylinder and triangular shaped faces that cover the whole
# surface of the cylinder
# cylinders are being used to visualize joints
def cylinder(radius, height, N):
"""Calculates vertices and faces for a cylinder for visualisation in
pyqtgraph/OpenGL.
Inputs:
radius: radius of the cylinder
height: height of the cylinder
N: number of segments to approximate the circular shape of the cylinder
Outputs:
vertices: array with on each row the (x,y,z) coordinates of the vertices
faces: array with triangular faces of the cylinder
Note:
The cylinder is a circle in the x,y plane with center at (0,0) that is
extruded along the z-axis.
"""
import scipy.spatial
t = np.linspace(0, 2 * np.pi, N, endpoint=False).reshape(N, 1)
vertices = np.zeros((2 * N, 3))
vertices[0:N, :] = np.hstack((radius * np.cos(t), radius * np.sin(t), np.zeros((N, 1))))
vertices[N:2 * N, :] = vertices[0:N, :] + np.hstack((np.zeros((N, 2)), height * np.ones((N, 1))))
faces = np.zeros((N - 2 + 2 * N + N - 2, 3), dtype=np.uint)
# bottom, makes use of Delaunay triangulation contained in Scipy's
# submodule spatial (which on its turn makes use of the Qhull library)
faces[0:N - 2, :] = scipy.spatial.Delaunay(vertices[0:N, 0:2], furthest_site=True, qhull_options='QJ').simplices[:,
-1::-1]
# sides
for i in range(N - 1):
faces[N - 2 + 2 * i, :] = np.array([i, i + 1, N + i + 1], dtype=np.uint)
faces[N - 2 + 2 * i + 1, :] = | np.array([i, N + i + 1, N + i], dtype=np.uint) | numpy.array |
"""
.. module:: reporters
:platform: Unix, Windows
:synopsis: a module for defining OpenMM reporter classes.
.. moduleauthor:: <NAME> <<EMAIL>>
.. _pandas.DataFrame: https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.html
.. _StateDataReporter: http://docs.openmm.org/latest/api-python/generated/simtk.openmm.app.statedatareporter.StateDataReporter.html
.. _CustomIntegrator: http://docs.openmm.org/latest/api-python/generated/simtk.openmm.openmm.CustomIntegrator.html
.. _CustomCVForce: docs.openmm.org/latest/api-python/generated/simtk.openmm.openmm.CustomCVForce.html
"""
import sys
import numpy as np
import pandas as pd
from simtk import openmm
from simtk import unit
from simtk.openmm import app
from .computers import PressureComputer
from .computers import _MoleculeTotalizer
from .utils import InputError
class _MultiStream:
def __init__(self, outputs):
self._outputs = list()
for output in outputs:
self._outputs.append(open(output, 'w') if isinstance(output, str) else output)
def __del__(self):
for output in self._outputs:
if output != sys.stdout and output != sys.stderr:
output.close()
def write(self, message):
for output in self._outputs:
output.write(message)
def flush(self):
for output in self._outputs:
output.flush()
class _AtomsMM_Reporter():
"""
Base class for reporters.
"""
def __init__(self, file, reportInterval, **kwargs):
self._reportInterval = reportInterval
self._requiresInitialization = True
self._needsPositions = False
self._needsVelocities = False
self._needsForces = False
self._needEnergy = False
extraFile = kwargs.pop('extraFile', None)
if extraFile is None:
self._out = open(file, 'w') if isinstance(file, str) else file
else:
self._out = _MultiStream([file, extraFile])
self._separator = kwargs.pop('separator', ',')
def _initialize(self, simulation, state):
pass
def _generateReport(self, simulation, state):
pass
def describeNextReport(self, simulation):
"""
Get information about the next report this object will generate.
Parameters
----------
simulation : Simulation
The Simulation to generate a report for
Returns
-------
tuple
A five element tuple. The first element is the number of steps
until the next report. The remaining elements specify whether
that report will require positions, velocities, forces, and
energies respectively.
"""
steps = self._reportInterval - simulation.currentStep % self._reportInterval
return (steps, self._needsPositions, self._needsVelocities, self._needsForces, self._needEnergy)
def report(self, simulation, state):
"""
Generate a report.
Parameters
----------
simulation : Simulation
The Simulation to generate a report for
state : State
The current state of the simulation
"""
if self._requiresInitialization:
self._initialize(simulation, state)
self._requiresInitialization = False
self._generateReport(simulation, state)
class ExtendedStateDataReporter(app.StateDataReporter):
"""
An extension of OpenMM's StateDataReporter_ class, which outputs information about a simulation,
such as energy and temperature, to a file.
All original functionalities of StateDataReporter_ are preserved and the following ones are
included:
1. Report the Coulomb contribution of the potential energy (keyword: `coulombEnergy`):
This contribution includes both real- and reciprocal-space terms.
2. Report the atomic virial of a fully-flexible system (keyword: `atomicVirial`):
Considering full scaling of atomic coordinates in a box volume change (i.e. without any
distance constraints), the internal virial of the system is given by
.. math::
W = -\\sum_{i,j} r_{ij} E^\\prime(r_{ij}),
where :math:`E^\\prime(r)` is the derivative of the pairwise interaction potential as a
function of the distance between to atoms. Such interaction includes van der Waals, Coulomb,
and bond-stretching contributions. Bond-bending and dihedral angles are not considered
because they are invariant to full volume-scaling of atomic coordinates.
3. Report the nonbonded contribution of the atomic virial (keyword: `nonbondedVirial`):
The nonbonded virial is given by
.. math::
W_\\mathrm{nb} = -\\sum_{i,j} r_{ij} E_\\mathrm{nb}^\\prime(r_{ij}),
where :math:`E_\\mathrm{nb}^\\prime(r)` is the derivative of the nonbonded pairwise
potential, which comprises van der Waals and Coulomb interactions only.
4. Report the atomic pressure of a fully-flexible system (keyword: `atomicPressure`):
.. math::
P = \\frac{2 K + W}{3 V},
where :math:`K` is the kinetic energy sum for all atoms in the system. If keyword
`bathTemperature` is employed (see below), the instantaneous kinetic energy is substituted
by its equipartition-theorem average
:math:`\\left\\langle K \\right\\rangle = 3 N_\\mathrm{atoms} k_B T/2`,
where :math:`T` is the heat-bath temperature.
5. Report the molecular virial of a system (keyword: `molecularVirial`):
To compute the molecular virial, only the center-of-mass coordinates of the molecules are
considered to scale in a box volume change, while the internal molecular structure is kept
unaltered. The molecular virial is computed from the nonbonded part of the atomic virial by
using the formulation of Ref. :cite:`Hunenberger_2002`:
.. math::
W_\\mathrm{mol} = W - \\sum_{i} (\\mathbf{r}_i - \\mathbf{r}_i^\\mathrm{cm}) \\cdot \\mathbf{F}_i,
where :math:`\\mathbf{r}_i` is the coordinate of atom i, :math:`\\mathbf{F}_i` is the
resultant pairwise force acting on it (excluding bond-bending and dihedral angles), and
:math:`\\mathbf{r}_i^\\mathrm{cm}` is the center-of-mass coordinate of its containing
molecule.
6. Report the molecular pressure of a system (keyword: `molecularPressure`):
.. math::
P = \\frac{2 K_\\mathrm{mol} + W_\\mathrm{mol}}{3 V},
where :math:`K_\\mathrm{mol}` is the center-of-mass kinetic energy summed for all molecules
in the system. If keyword `bathTemperature` is employed (see below), the instantaneous
kinetic energy is substituted by its equipartition-theorem average
:math:`\\left\\langle K_\\mathrm{mol} \\right\\rangle = 3 N_\\mathrm{mols} k_B T/2`,
where :math:`T` is the heat-bath temperature.
7. Report the center-of-mass kinetic energy (keyword: `molecularKineticEnergy`):
.. math::
K_\\mathrm{mol} = \\frac{1}{2} \\sum_{i=1}^{N_\\mathrm{mol}} M_i v_{\\mathrm{cm}, i}^2,
where :math:`N_\\mathrm{mol}` is the number of molecules in the system, :math:`M_i` is the
total mass of molecule `i`, and :math:`v_{\\mathrm{cm}, i}` is the center-of-mass velocity
of molecule `i`.
8. Report potential energies at multiple global parameter states (keyword: `globalParameterStates`):
Computes and reports the potential energy of the system at a number of provided global
parameter states.
9. Report global parameter values (keyword: `globalParameters`):
Reports the values of specified global parameters.
10. Report derivatives of energy with respect to global parameters (keyword: `energyDerivatives`):
Computes and reports derivatives of the potential energy of the system at the current
state with respect to specified global parameters.
11. Report values of collective variables (keyword: `collectiveVariables`)
Report the values of a set of collective variables.
12. Allow specification of an extra file for reporting (keyword: `extraFile`).
This can be used for replicating a report simultaneously to `sys.stdout` and to a file
using a unique reporter.
Keyword Args
------------
coulombEnergy : bool, optional, default=False
Whether to write the Coulomb contribution of the potential energy to the file.
atomicVirial : bool, optional, default=False
Whether to write the total atomic virial to the file.
nonbondedVirial : bool, optional, default=False
Whether to write the nonbonded contribution to the atomic virial to the file.
atomicPressure : bool, optional, default=False
Whether to write the internal atomic pressure to the file.
molecularVirial : bool, optional, default=False
Whether to write the molecular virial to the file.
molecularPressure : bool, optional, default=False
Whether to write the internal molecular pressure to the file.
molecularKineticEnergy : bool, optional, default=False
Whether to write the molecular center-of-mass kinetic energy to the file.
globalParameterStates : pandas.DataFrame_, optional, default=None
A DataFrame containing context global parameters (column names) and sets of values
thereof. If it is provided, then the potential energy will be reported for every state
these parameters define.
globalParameters : list(str), optional, default=None
A list of global parameter names. If it is provided, then the values of these parameters
will be reported.
energyDerivatives : list(str), optional, default=None
A list of global parameter names. If it is provided, then the derivatives of the
total potential energy with respect to these parameters will be reported. It is
necessary that the calculation of these derivatives has been activated beforehand
(see, for instance, CustomIntegrator_).
collectiveVariables : list(openmm.CustomCVForce), optional, default=None
A list of CustomCVForce_ objects. If it is provided, then the values of all collective
variables associated with these objects will be reported.
pressureComputer : :class:`~atomsmm.computers.PressureComputer`, optional, default=None
A computer designed to determine pressures and virials. This is mandatory if any keyword
related to virial or pressure is set as `True`.
extraFile : str or file, optional, default=None
Extra file to write to, specified as a file name or a file object.
"""
def __init__(self, file, reportInterval, **kwargs):
self._coulombEnergy = kwargs.pop('coulombEnergy', False)
self._atomicVirial = kwargs.pop('atomicVirial', False)
self._nonbondedVirial = kwargs.pop('nonbondedVirial', False)
self._atomicPressure = kwargs.pop('atomicPressure', False)
self._molecularVirial = kwargs.pop('molecularVirial', False)
self._molecularPressure = kwargs.pop('molecularPressure', False)
self._molecularKineticEnergy = kwargs.pop('molecularKineticEnergy', False)
self._globalParameterStates = kwargs.pop('globalParameterStates', None)
self._globalParameters = kwargs.pop('globalParameters', None)
self._energyDerivatives = kwargs.pop('energyDerivatives', None)
self._collectiveVariables = kwargs.pop('collectiveVariables', None)
self._pressureComputer = kwargs.pop('pressureComputer', None)
extra = kwargs.pop('extraFile', None)
if extra is None:
super().__init__(file, reportInterval, **kwargs)
else:
super().__init__(_MultiStream([file, extra]), reportInterval, **kwargs)
self._computing = any([self._coulombEnergy,
self._atomicVirial,
self._nonbondedVirial,
self._atomicPressure,
self._molecularVirial,
self._molecularPressure,
self._molecularKineticEnergy])
if self._computing:
if self._pressureComputer is not None and not isinstance(self._pressureComputer, PressureComputer):
raise InputError('keyword "pressureComputer" requires a PressureComputer instance')
self._needsPositions = True
self._needsForces = any([self._needsForces,
self._molecularVirial,
self._molecularPressure])
self._needsVelocities = any([self._needsVelocities,
self._molecularPressure,
self._atomicPressure,
self._molecularKineticEnergy])
self._backSteps = -sum([self._speed, self._elapsedTime, self._remainingTime])
def _add_item(self, lst, item):
if self._backSteps == 0:
lst.append(item)
else:
lst.insert(self._backSteps, item)
def _constructHeaders(self):
headers = super()._constructHeaders()
if self._coulombEnergy:
self._add_item(headers, 'Coulomb Energy (kJ/mole)')
if self._atomicVirial:
self._add_item(headers, 'Atomic Virial (kJ/mole)')
if self._nonbondedVirial:
self._add_item(headers, 'Nonbonded Virial (kJ/mole)')
if self._atomicPressure:
self._add_item(headers, 'Atomic Pressure (atm)')
if self._molecularVirial:
self._add_item(headers, 'Molecular Virial (kJ/mole)')
if self._molecularPressure:
self._add_item(headers, 'Molecular Pressure (atm)')
if self._molecularKineticEnergy:
self._add_item(headers, 'Molecular Kinetic Energy (kJ/mole)')
if self._globalParameterStates is not None:
for index in self._globalParameterStates.index:
self._add_item(headers, 'Energy[{}] (kJ/mole)'.format(index))
if self._globalParameters is not None:
for name in self._globalParameters:
self._add_item(headers, name)
if self._energyDerivatives is not None:
for name in self._energyDerivatives:
self._add_item(headers, 'diff(E,{})'.format(name))
if self._collectiveVariables is not None:
for force in self._collectiveVariables:
for index in range(force.getNumCollectiveVariables()):
name = force.getCollectiveVariableName(index)
self._add_item(headers, name)
return headers
def _constructReportValues(self, simulation, state):
values = super()._constructReportValues(simulation, state)
if self._computing:
computer = self._pressureComputer
computer.import_configuration(state)
atomicVirial = computer.get_atomic_virial().value_in_unit(unit.kilojoules_per_mole)
if self._coulombEnergy:
coulombVirial = computer.get_coulomb_virial()
self._add_item(values, coulombVirial.value_in_unit(unit.kilojoules_per_mole))
if self._atomicVirial:
self._add_item(values, atomicVirial)
if self._nonbondedVirial:
nonbondedVirial = computer.get_dispersion_virial() + computer.get_coulomb_virial()
self._add_item(values, nonbondedVirial.value_in_unit(unit.kilojoules_per_mole))
if self._atomicPressure:
atomicPressure = computer.get_atomic_pressure()
self._add_item(values, atomicPressure.value_in_unit(unit.atmospheres))
if self._molecularVirial or self._molecularPressure:
forces = state.getForces(asNumpy=True)
if self._molecularVirial:
molecularVirial = computer.get_molecular_virial(forces)
self._add_item(values, molecularVirial.value_in_unit(unit.kilojoules_per_mole))
if self._molecularPressure:
molecularPressure = computer.get_molecular_pressure(forces)
self._add_item(values, molecularPressure.value_in_unit(unit.atmospheres))
if self._molecularKineticEnergy:
molKinEng = computer.get_molecular_kinetic_energy()
self._add_item(values, molKinEng.value_in_unit(unit.kilojoules_per_mole))
if self._globalParameterStates is not None:
original = dict()
for name in self._globalParameterStates.columns:
original[name] = simulation.context.getParameter(name)
latest = original.copy()
for index, row in self._globalParameterStates.iterrows():
for name, value in row.items():
if value != latest[name]:
simulation.context.setParameter(name, value)
latest[name] = value
energy = simulation.context.getState(getEnergy=True).getPotentialEnergy()
self._add_item(values, energy.value_in_unit(unit.kilojoules_per_mole))
for name, value in original.items():
if value != latest[name]:
simulation.context.setParameter(name, value)
if self._globalParameters is not None:
for name in self._globalParameters:
self._add_item(values, simulation.context.getParameter(name))
if self._energyDerivatives is not None:
mystate = simulation.context.getState(getParameterDerivatives=True)
derivative = mystate.getEnergyParameterDerivatives()
for name in self._energyDerivatives:
self._add_item(values, derivative[name])
if self._collectiveVariables is not None:
for force in self._collectiveVariables:
for cv in force.getCollectiveVariableValues(simulation.context):
self._add_item(values, cv)
return values
class XYZReporter(_AtomsMM_Reporter):
"""
Outputs to an XYZ-format file a series of frames containing the coordinates, velocities,
momenta, or forces on all atoms in a Simulation.
.. note::
Coordinates are expressed in nanometers, velocities in nanometer/picosecond, momenta in
dalton*nanometer/picosecond, and forces in dalton*nanometer/picosecond^2.
To use this reporter, create an XYZReporter object and append it to the Simulation's list of
reporters.
Keyword Args
------------
output : str, default='positions'
Which kind of info to report. Valid options are 'positions', 'velocities', 'momenta' and
'forces'.
groups : set(int), default=None
Which force groups to consider in the force calculations. If this is `None`, then all
force groups will be evaluated.
"""
def __init__(self, file, reportInterval, **kwargs):
self._output = kwargs.get('output', 'positions')
self._groups = kwargs.get('groups', None)
if self._output == 'positions':
self._unit = unit.angstroms
elif self._output == 'velocities':
self._unit = unit.angstroms/unit.picoseconds
elif self._output == 'momenta':
self._unit = unit.dalton*unit.angstroms/unit.picoseconds
elif self._output == 'forces':
self._unit = unit.dalton*unit.angstroms/unit.picoseconds**2
else:
raise InputError('Unrecognizable keyword value')
super().__init__(file, reportInterval, **kwargs)
self._needsPositions = self._output == 'positions'
self._needsVelocities = self._output in ['velocities', 'momenta']
self._needsForces = self._output == 'forces'
def _initialize(self, simulation, state):
self._symbols = [atom.element.symbol for atom in simulation.topology.atoms()]
sys = simulation.system
self._N = sys.getNumParticles()
if self._output == 'momenta':
mass = [sys.getParticleMass(i).value_in_unit(unit.dalton) for i in range(self._N)]
self._mass = np.vstack([mass, mass, mass]).transpose()*unit.dalton
def _get_values(self, simulation, state):
if self._output == 'positions':
values = state.getPositions(asNumpy=True)
elif self._output == 'velocities':
values = state.getVelocities(asNumpy=True)
elif self._output == 'momenta':
values = self._mass*state.getVelocities(asNumpy=True)
elif self._groups is None:
values = state.getForces(asNumpy=True)
else:
new_state = simulation.context.getState(getForces=True, groups=self._groups)
values = new_state.getForces(asNumpy=True)
return values.value_in_unit(self._unit)
def _write(self, step, N, names, values):
print(N, file=self._out)
pd.DataFrame(index=names, data=values).to_csv(
self._out,
sep='\t',
header=[f'{self._output} in {self._unit} at time step {step}', '', ''],
)
def _generateReport(self, simulation, state):
values = self._get_values(simulation, state)
self._write(simulation.currentStep, self._N, self._symbols, values)
class CenterOfMassReporter(XYZReporter):
"""
Outputs to an XYZ-format file a series of frames containing the center-of-mass coordinates,
center-of-mass velocities, total momenta, or resultant forces on all molecules in a Simulation.
.. note::
Coordinates are expressed in nanometers, velocities in nanometer/picosecond, momenta in
dalton*nanometer/picosecond, and forces in dalton*nanometer/picosecond^2.
To use this reporter, create an CenterOfMassReporter object and append it to the Simulation's
list of reporters.
Keyword Args
------------
output : str, default='positions'
Which kind of info to report. Valid options are 'positions', 'velocities', 'momenta' and
'forces'.
groups : set(int), default=None
Which force groups to consider in the force calculations. If this is `None`, then all
force groups will be evaluated.
"""
def _initialize(self, simulation, state):
super()._initialize(simulation, state)
self._mols = _MoleculeTotalizer(simulation.context, simulation.topology)
def _generateReport(self, simulation, state):
values = self._get_values(simulation, state)
if self._output in ['positions', 'velocities']:
cm_values = self._mols.massFrac.dot(values)
else:
cm_values = self._mols.selection.dot(values)
self._write(simulation.currentStep, self._mols.nmols, self._mols.residues, cm_values)
class CustomIntegratorReporter(_AtomsMM_Reporter):
"""
Outputs global and per-DoF variables of a CustomIntegrator instance.
Keyword Args
------------
describeOnly : bool, optional, default=True
Whether to output only descriptive statistics that summarize the activated per-Dof
variables.
"""
def __init__(self, file, reportInterval, **kwargs):
super().__init__(file, reportInterval, **kwargs)
self._describeOnly = kwargs.pop('describeOnly', True)
self._variables = []
for key, value in kwargs.items():
if value is True:
self._variables.append(key)
if not self._variables:
raise InputError("No global or perDof variables have been passed")
def _initialize(self, simulation, state):
integrator = self._integrator = simulation.integrator
if not isinstance(integrator, openmm.CustomIntegrator):
raise Exception("simulation.integrator is not a CustomIntegrator")
self._globals = {}
for index in range(integrator.getNumGlobalVariables()):
variable = integrator.getGlobalVariableName(index)
if variable in self._variables:
self._globals[variable] = index
self._perDof = {}
for index in range(integrator.getNumPerDofVariables()):
variable = integrator.getPerDofVariableName(index)
if variable in self._variables:
self._perDof[variable] = index
if set(self._variables) != set(self._globals) | set(self._perDof):
raise InputError("Unknown variables have been passed")
def _generateReport(self, simulation, state):
for variable, index in self._globals.items():
value = self._integrator.getGlobalVariable(index)
print('{}\n{}'.format(variable, value), file=self._out)
for variable, index in self._perDof.items():
values = self._integrator.getPerDofVariable(index)
titles = ['{}.{}'.format(variable, dir) for dir in ['x', 'y', 'z']]
df = pd.DataFrame(data=np.array(values), columns=titles)
if self._describeOnly:
print(df.describe(), file=self._out)
else:
df.to_csv(self._out, sep='\t')
class ExpandedEnsembleReporter(_AtomsMM_Reporter):
"""
Performs an Expanded Ensemble simulation and reports the energies of multiple states.
Parameters
----------
states : pandas.DataFrame_
A DataFrame containing context global parameters (column names) and sets of values
thereof. The potential energy will be reported for every state these parameters define.
If one of the variables is named as `weight`, then its set of values will be assigned
to every state as an importance sampling weight. Otherwise, all states will have
identical weights. States which are supposed to only have their energies reported, with
no actual visits, can have their weights set up to `-inf`.
temperature : unit.Quantity
The system temperature.
Keyword Args
------------
reportsPerExchange : int, optional, default=1
The number of reports between attempts to exchange the global parameter state, that is,
the exchange interval measured in units of report intervals.
"""
def __init__(self, file, reportInterval, states, temperature, **kwargs):
self._parameter_states = states.copy()
self._nstates = len(states.index)
self._reports_per_exchange = kwargs.pop('reportsPerExchange', 1)
super().__init__(file, reportInterval, **kwargs)
if 'weight' in states:
self._weights = self._parameter_states.pop('weight').values
finite = np.where(np.isfinite(self._weights))[0]
self._first_state = finite[0]
self._last_state = finite[-1]
else:
self._weights = np.zeros(self._nstates)
self._first_state = 0
self._last_state = self._nstates - 1
kT = (unit.MOLAR_GAS_CONSTANT_R*temperature).value_in_unit(unit.kilojoules_per_mole)
self._beta = 1.0/kT
self._nreports = 0
self._overall_visits = np.zeros(self._nstates, dtype=int)
self._downhill_visits = np.zeros(self._nstates, dtype=int)
self._probability_accumulators = np.zeros(self._nstates)
self._downhill = False
self._counting_started = False
self._regime_change = []
def _initialize(self, simulation, state):
headers = ['step', 'state']
for index in self._parameter_states.index:
headers.append('Energy[{}] (kJ/mole)'.format(index))
print(*headers, sep=self._separator, file=self._out)
def _register_visit(self, state):
if self._downhill:
if state == self._first_state:
self._downhill = False
self._regime_change.append(self._nreports)
elif state == self._last_state:
self._downhill = True
self._regime_change.append(self._nreports)
if self._counting_started:
self._overall_visits[state] += 1
if self._downhill:
self._downhill_visits[state] += 1
else:
self._counting_started = self._downhill is True
def _generateReport(self, simulation, state):
energies = np.zeros(self._nstates)
original = dict()
for name in self._parameter_states.columns:
original[name] = simulation.context.getParameter(name)
latest = original.copy()
for i, (index, row) in enumerate(self._parameter_states.iterrows()):
for name, value in row.items():
if value != latest[name]:
simulation.context.setParameter(name, value)
latest[name] = value
energy = simulation.context.getState(getEnergy=True).getPotentialEnergy()
energies[i] = energy.value_in_unit(unit.kilojoules_per_mole)
self._nreports += 1
exponents = self._weights - self._beta*energies
probabilities = np.exp(exponents - np.amax(exponents))
probabilities /= | np.sum(probabilities) | numpy.sum |
import numpy as np
class NACA4:
def __init__(self, foil, close_trailing_edge=False):
m = float(foil[0]) / 100. # max camber
p = float(foil[1]) / 10. # chord-wise position of max camber
t = float(foil[2]) / 100. # thickness
self.foil = (m, p, t)
if close_trailing_edge:
self.c4 = -0.1036
else:
self.c4 = -0.1015
def calculate_camber_line(self, x):
x = np.asarray(x)
m, p = self.foil[0], self.foil[1]
if m:
yc = np.asarray(2 * p * x - x ** 2)
sel_a = x <= p
yc[sel_a] *= m / p ** 2
sel_b = np.logical_not(sel_a)
yc[sel_b] = m / (1 - p) ** 2 * (1 - 2 * p + yc[sel_b])
return yc
else:
return np.zeros_like(x)
def calculate_surface_points(self, x):
x = np.asarray(x)
yt = 5 * self.foil[2] * (0.2969 * | np.sqrt(x) | numpy.sqrt |
# coding: utf-8
# Copyright (c) Max-Planck-Institut für Eisenforschung GmbH - Computational Materials Design (CM) Department
# Distributed under the terms of "New BSD License", see the LICENSE file.
from __future__ import division, print_function
import ast
from copy import copy
from collections import OrderedDict
from math import cos, sin
import numpy as np
from six import string_types
import warnings
from ase.geometry import cellpar_to_cell, complete_cell, get_distances
from matplotlib.colors import rgb2hex
from scipy.interpolate import interp1d
from pyiron.atomistics.structure.atom import Atom
from pyiron.atomistics.structure.sparse_list import SparseArray, SparseList
from pyiron.atomistics.structure.periodic_table import PeriodicTable, ChemicalElement, ElementColorDictionary
from pyiron.base.settings.generic import Settings
from scipy.spatial import cKDTree, Voronoi
try:
import spglib
except ImportError:
try:
import pyspglib as spglib
except ImportError:
raise ImportError("The spglib package needs to be installed")
__author__ = "<NAME>, <NAME>"
__copyright__ = "Copyright 2019, Max-Planck-Institut für Eisenforschung GmbH - " \
"Computational Materials Design (CM) Department"
__version__ = "1.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "production"
__date__ = "Sep 1, 2017"
s = Settings()
class Atoms(object):
"""
The Atoms class represents all the information required to describe a structure at the atomic scale. This class is
written in such a way that is compatible with the `ASE atoms class`_. Some of the functions in this module is based
on the corresponding implementation in the ASE package
Args:
elements (list/numpy.ndarray): List of strings containing the elements or a list of
atomistics.structure.periodic_table.ChemicalElement instances
numbers (list/numpy.ndarray): List of atomic numbers of elements
symbols (list/numpy.ndarray): List of chemical symbols
positions (list/numpy.ndarray): List of positions
scaled_positions (list/numpy.ndarray): List of scaled positions (relative coordinates)
pbc (list/numpy.ndarray/boolean): Tells if periodic boundary conditions should be applied on the three axes
cell (list/numpy.ndarray instance): A 3x3 array representing the lattice vectors of the structure
Note: Only one of elements/symbols or numbers should be assigned during initialization
Attributes:
indices (numpy.ndarray): A list of size N which gives the species index of the structure which has N atoms
.. _ASE atoms class: https://wiki.fysik.dtu.dk/ase/ase/atoms.html
"""
def __init__(self, symbols=None, positions=None, numbers=None, tags=None, momenta=None, masses=None,
magmoms=None, charges=None, scaled_positions=None, cell=None, pbc=None, celldisp=None, constraint=None,
calculator=None, info=None, indices=None, elements=None, dimension=None, species=None,
**qwargs):
if symbols is not None:
if elements is None:
elements = symbols
else:
raise ValueError("Only elements OR symbols should be given.")
if tags is not None or momenta is not None or masses is not None or charges is not None \
or celldisp is not None or constraint is not None or calculator is not None or info is not None:
s.logger.debug('Not supported parameter used!')
self._store_elements = dict()
self._species_to_index_dict = None
self.colorLut = ElementColorDictionary().to_lut()
self._is_scaled = False
if cell is not None:
# make it ASE compatible
if np.linalg.matrix_rank(cell) == 1:
cell = np.eye(len(cell)) * cell
else:
cell = np.array(cell)
self._cell = cell
self._species = list()
self.positions= None
self._pse = PeriodicTable()
self._tag_list = SparseArray()
self.indices = np.array([])
self._info = dict()
self.arrays = dict()
self.adsorbate_info = {}
self.bonds = None
self._pbc = False
self.dimension = 3 # Default
self.units = {"length": "A", "mass": "u"}
el_index_lst = list()
element_list = None
if (elements is None) and (numbers is None) and (indices is None):
return
if numbers is not None: # for ASE compatibility
if not (elements is None):
raise AssertionError()
elements = self.numbers_to_elements(numbers)
if elements is not None:
el_object_list = None
if isinstance(elements, str):
element_list = self.convert_formula(elements)
elif isinstance(elements, (list, tuple, np.ndarray)):
if not all([isinstance(el, elements[0].__class__) for el in elements]):
object_list = list()
for el in elements:
if isinstance(el, (str, np.str, np.str_)):
object_list.append(self.convert_element(el))
if isinstance(el, ChemicalElement):
object_list.append(el)
if isinstance(el, Atom):
object_list.append(el.element)
if isinstance(el, (int, np.integer)):
# pse = PeriodicTable()
object_list.append(self._pse.element(el))
el_object_list = object_list
if len(elements) == 0:
element_list = elements
else:
if isinstance(elements[0], (list, tuple, np.ndarray)):
elements = np.array(elements).flatten()
if isinstance(elements[0], string_types):
element_list = elements
elif isinstance(elements[0], ChemicalElement):
el_object_list = elements
elif isinstance(elements[0], Atom):
el_object_list = [el.element for el in elements]
positions = [el.position for el in elements]
elif elements.dtype in [int, np.integer]:
el_object_list = self.numbers_to_elements(elements)
else:
raise ValueError('Unknown static type for element in list: ' + str(type(elements[0])))
if el_object_list is None:
el_object_list = [self.convert_element(el) for el in element_list]
self.set_species(list(set(el_object_list)))
# species_to_index_dict = {el: i for i, el in enumerate(self.species)}
el_index_lst = [self._species_to_index_dict[el] for el in el_object_list]
elif indices is not None:
el_index_lst = indices
self.set_species(species)
if scaled_positions is not None:
if positions is not None:
raise ValueError("either position or scaled_positions can be given")
if cell is None:
raise ValueError('scaled_positions can only be used with a given cell')
positions = np.dot(np.array(cell).T, np.array(scaled_positions).T).T
if positions is None:
self.dimension = 3
if cell is not None:
positions = np.zeros((len(el_index_lst), self.dimension))
self.indices = np.array(el_index_lst)
self.positions = np.array(positions).astype(np.float)
self._tag_list._length = len(positions)
for key, val in qwargs.items():
print('set qwargs (ASE): ', key, val)
setattr(self, key, val)
if len(positions) > 0:
self.dimension = len(positions[0])
else:
self.dimension = 3
if dimension is not None:
self.dimension = dimension
if cell is not None:
if pbc is None:
self.pbc = True # default setting
else:
self.pbc = pbc
self.set_initial_magnetic_moments(magmoms)
@property
def cell(self):
"""
numpy.ndarray: A size 3x3 array which gives the lattice vectors of the cell as [a1, a2, a3]
"""
return self._cell
@cell.setter
def cell(self, value):
if value is None:
self._cell = None
else:
if self._is_scaled:
self.set_cell(value, scale_atoms=True)
else:
self.set_cell(value)
@property
def species(self):
"""
list: A list of atomistics.structure.periodic_table.ChemicalElement instances
"""
return self._species
# @species.setter
def set_species(self, value):
"""
Setting the species list
Args:
value (list): A list atomistics.structure.periodic_table.ChemicalElement instances
"""
if value is None:
return
value = list(value)
self._species_to_index_dict = {el: i for i, el in enumerate(value)}
self._species = value[:]
self._store_elements = {el.Abbreviation: el for el in value}
@property
def info(self):
"""
dict: This dictionary is merely used to be compatible with the ASE Atoms class.
"""
return self._info
@info.setter
def info(self, val):
self._info = val
@property
def pbc(self):
"""
list: A list of boolean values which gives the periodic boundary consitions along the three axes.
The default value is [True, True, True]
"""
if not isinstance(self._pbc, np.ndarray):
self.set_pbc(self._pbc)
return self._pbc
@pbc.setter
def pbc(self, val):
self._pbc = val
@property
def elements(self):
"""
numpy.ndarray: A size N list of atomistics.structure.periodic_table.ChemicalElement instances according
to the ordering of the atoms in the instance
"""
return np.array([self.species[el] for el in self.indices])
def new_array(self, name, a, dtype=None, shape=None):
"""
Adding a new array to the instance. This function is for the purpose of compatibility with the ASE package
Args:
name (str): Name of the array
a (list/numpy.ndarray): The array to be added
dtype (type): Data type of the array
shape (list/turple): Shape of the array
"""
if dtype is not None:
a = np.array(a, dtype, order='C')
if len(a) == 0 and shape is not None:
a.shape = (-1,) + shape
else:
if not a.flags['C_CONTIGUOUS']:
a = np.ascontiguousarray(a)
else:
a = a.copy()
if name in self.arrays:
raise RuntimeError
for b in self.arrays.values():
if len(a) != len(b):
raise ValueError('Array has wrong length: %d != %d.' %
(len(a), len(b)))
break
if shape is not None and a.shape[1:] != shape:
raise ValueError('Array has wrong shape %s != %s.' %
(a.shape, (a.shape[0:1] + shape)))
self.arrays[name] = a
def get_array(self, name, copy=True):
"""
Get an array. This function is for the purpose of compatibility with the ASE package
Args:
name (str): Name of the required array
copy (bool): True if a copy of the array is to be returned
Returns:
An array of a copy of the array
"""
if copy:
return self.arrays[name].copy()
else:
return self.arrays[name]
def set_array(self, name, a, dtype=None, shape=None):
"""
Update array. This function is for the purpose of compatibility with the ASE package
Args:
name (str): Name of the array
a (list/numpy.ndarray): The array to be added
dtype (type): Data type of the array
shape (list/turple): Shape of the array
"""
b = self.arrays.get(name)
if b is None:
if a is not None:
self.new_array(name, a, dtype, shape)
else:
if a is None:
del self.arrays[name]
else:
a = np.asarray(a)
if a.shape != b.shape:
raise ValueError('Array has wrong shape %s != %s.' %
(a.shape, b.shape))
b[:] = a
def add_tag(self, *args, **qwargs):
"""
Add tags to the atoms object.
Examples:
For selective dynamics::
>>> self.add_tag(selective_dynamics=[False, False, False])
"""
self._tag_list.add_tag(*args, **qwargs)
# @staticmethod
def numbers_to_elements(self, numbers):
"""
Convert atomic numbers in element objects (needed for compatibility with ASE)
Args:
numbers (list): List of Element Numbers (as Integers; default in ASE)
Returns:
list: A list of elements as needed for pyiron
"""
# pse = PeriodicTable() # TODO; extend to internal PSE which can contain additional elements and tags
atom_number_to_element = {}
for i_el in set(numbers):
i_el = int(i_el)
atom_number_to_element[i_el] = self._pse.element(i_el)
return [atom_number_to_element[i_el] for i_el in numbers]
def copy(self):
"""
Returns a copy of the instance
Returns:
pyiron.atomistics.structure.atoms.Atoms: A copy of the instance
"""
return self.__copy__()
def to_hdf(self, hdf, group_name="structure"):
"""
Save the object in a HDF5 file
Args:
hdf (pyiron.base.generic.hdfio.FileHDFio): HDF path to which the object is to be saved
group_name (str):
Group name with which the object should be stored. This same name should be used to retrieve the object
"""
# import time
with hdf.open(group_name) as hdf_structure:
# time_start = time.time()
hdf_structure["TYPE"] = str(type(self))
for el in self.species:
if isinstance(el.tags, dict):
with hdf_structure.open("new_species") as hdf_species:
el.to_hdf(hdf_species)
hdf_structure['species'] = [el.Abbreviation for el in self.species]
hdf_structure["indices"] = self.indices
with hdf_structure.open("tags") as hdf_tags:
for tag in self._tag_list.keys():
tag_value = self._tag_list[tag]
if isinstance(tag_value, SparseList):
tag_value.to_hdf(hdf_tags, tag)
hdf_structure["units"] = self.units
hdf_structure["dimension"] = self.dimension
if self.cell is not None:
with hdf_structure.open("cell") as hdf_cell:
hdf_cell["cell"] = self.cell
hdf_cell["pbc"] = self.pbc
# hdf_structure["coordinates"] = self.positions # "Atomic coordinates"
hdf_structure["positions"] = self.positions # "Atomic coordinates"
# potentials with explicit bonds (TIP3P, harmonic, etc.)
if self.bonds is not None:
hdf_structure["explicit_bonds"] = self.bonds
# print ('time in atoms.to_hdf: ', time.time() - time_start)
def from_hdf(self, hdf, group_name="structure"):
"""
Retrieve the object from a HDF5 file
Args:
hdf (pyiron.base.generic.hdfio.FileHDFio): HDF path to which the object is to be saved
group_name (str): Group name from which the Atoms object is retreived.
Returns:
pyiron_atomistic.structure.atoms.Atoms: The retrieved atoms class
"""
if "indices" in hdf[group_name].list_nodes():
with hdf.open(group_name) as hdf_atoms:
if "new_species" in hdf_atoms.list_groups():
with hdf_atoms.open("new_species") as hdf_species:
self._pse.from_hdf(hdf_species)
el_object_list = [self.convert_element(el, self._pse) for el in hdf_atoms["species"]]
self.indices = hdf_atoms["indices"]
self._tag_list._length = len(self)
self.set_species(el_object_list)
self.bonds = None
if "explicit_bonds" in hdf_atoms.list_nodes():
# print "bonds: "
self.bonds = hdf_atoms["explicit_bonds"]
if "tags" in hdf_atoms.list_groups():
with hdf_atoms.open("tags") as hdf_tags:
tags = hdf_tags.list_nodes()
for tag in tags:
# tr_dict = {'0': False, '1': True}
if isinstance(hdf_tags[tag], (list, np.ndarray)):
my_list = hdf_tags[tag]
self._tag_list[tag] = SparseList(my_list, length=len(self))
else:
my_dict = hdf_tags.get_pandas(tag).to_dict()
my_dict = {i: val for i, val in zip(my_dict["index"], my_dict["values"])}
self._tag_list[tag] = SparseList(my_dict, length=len(self))
tr_dict = {1: True, 0: False}
self.dimension = hdf_atoms["dimension"]
self.units = hdf_atoms["units"]
self.cell = None
if "cell" in hdf_atoms.list_groups():
with hdf_atoms.open("cell") as hdf_cell:
self.cell = hdf_cell["cell"]
self.pbc = hdf_cell["pbc"]
# Backward compatibility
position_tag = "positions"
if position_tag not in hdf_atoms.list_nodes():
position_tag = "coordinates"
if "is_absolute" in hdf_atoms.list_nodes():
if not tr_dict[hdf_atoms["is_absolute"]]:
self.set_scaled_positions(hdf_atoms[position_tag])
else:
self.positions = hdf_atoms[position_tag]
else:
self.positions = hdf_atoms[position_tag]
if "bonds" in hdf_atoms.list_nodes():
self.bonds = hdf_atoms["explicit_bonds"]
return self
else:
return self._from_hdf_old(hdf, group_name)
def _from_hdf_old(self, hdf, group_name="structure"):
"""
This function exits merely for the purpose of backward compatibility
"""
with hdf.open(group_name) as hdf_atoms:
self._pse = PeriodicTable()
if "species" in hdf_atoms.list_groups():
with hdf_atoms.open("species") as hdf_species:
self._pse.from_hdf(hdf_species)
chemical_symbols = np.array(hdf_atoms["elements"], dtype=str)
el_object_list = [self.convert_element(el, self._pse) for el in chemical_symbols]
self.set_species(list(set(el_object_list)))
self.indices = [self._species_to_index_dict[el] for el in el_object_list]
self._tag_list._length = len(self)
self.bonds = None
if "explicit_bonds" in hdf_atoms.list_nodes():
# print "bonds: "
self.bonds = hdf_atoms["explicit_bonds"]
if "tags" in hdf_atoms.list_groups():
with hdf_atoms.open("tags") as hdf_tags:
tags = hdf_tags.list_nodes()
for tag in tags:
# tr_dict = {'0': False, '1': True}
if isinstance(hdf_tags[tag], (list, np.ndarray)):
my_list = hdf_tags[tag]
self._tag_list[tag] = SparseList(my_list, length=len(self))
else:
my_dict = hdf_tags.get_pandas(tag).to_dict()
my_dict = {i: val for i, val in zip(my_dict["index"], my_dict["values"])}
self._tag_list[tag] = SparseList(my_dict, length=len(self))
self.cell = None
if "cell" in hdf_atoms.list_groups():
with hdf_atoms.open("cell") as hdf_cell:
self.cell = hdf_cell["cell"]
self.pbc = hdf_cell["pbc"]
tr_dict = {1: True, 0: False}
self.dimension = hdf_atoms["dimension"]
if "is_absolute" in hdf_atoms and not tr_dict[hdf_atoms["is_absolute"]]:
self.positions = hdf_atoms["coordinates"]
else:
self.set_scaled_positions(hdf_atoms["coordinates"])
self.units = hdf_atoms["units"]
if "bonds" in hdf_atoms.list_nodes():
self.bonds = hdf_atoms["explicit_bonds"]
return self
def center(self, vacuum=None, axis=(0, 1, 2)):
"""
Center atoms in unit cell.
Adopted from ASE code (https://wiki.fysik.dtu.dk/ase/_modules/ase/atoms.html#Atoms.center)
Args:
vacuum (float): If specified adjust the amount of vacuum when centering. If vacuum=10.0 there will thus be
10 Angstrom of vacuum on each side.
axis (tuple/list): List or turple of integers specifying the axis along which the atoms should be centered
"""
# Find the orientations of the faces of the unit cell
c = self.cell
if c is None:
c = np.identity(self.dimension)
self.cell = c
dirs = np.zeros_like(c)
for i in range(3):
dirs[i] = np.cross(c[i - 1], c[i - 2])
dirs[i] /= np.linalg.norm(dirs[i]) # normalize
if np.dot(dirs[i], c[i]) < 0.0:
dirs[i] *= -1
# Now, decide how much each basis vector should be made longer
if isinstance(axis, int):
axes = (axis,)
else:
axes = axis
p = self.positions
longer = np.zeros(3)
shift = np.zeros(3)
for i in axes:
p0 = np.dot(p, dirs[i]).min()
p1 = np.dot(p, dirs[i]).max()
height = np.dot(c[i], dirs[i])
if vacuum is not None:
lng = (p1 - p0 + 2 * vacuum) - height
else:
lng = 0.0 # Do not change unit cell size!
top = lng + height - p1
shf = 0.5 * (top - p0)
cosphi = | np.dot(c[i], dirs[i]) | numpy.dot |
# %load runDM.py
"""
runDM version 1.0 [Python implementation]
Calculate low energy DM-SM couplings from high energy couplings,
taking into account RG evolution due to SM loops. See
arXiv:1605.04917 for further details.
Please contact <NAME> (<EMAIL>) for any questions,
problems, bugs and suggestions.
"""
from __future__ import print_function
import numpy as np
from scipy import linalg
from scipy.interpolate import interp1d
import sys
#-------------Initialisation---------------
#------------------------------------------
mZ = 91.1875 #Z-mass in GeV
mN = 0.938 #Nucleon mass in GeV
EvolutionSM = np.zeros([16,16,1401])
EvolutionEMSM = np.zeros([16,16,453])
#Pre-calculated values of t = Log[m_V/m_Z]
t_SM = np.linspace(0, 14, 1401)
t_EMSM = np.append(np.linspace(-4.51, 0.0,452)-0.003, 0)
#Load in the evolution tables
for i in range(1401):
#Truncate to a few decimal places, to prevent rounding errors in the filenames
s = str(np.around(t_SM[i], 3))
if (t_SM[i] == int(t_SM[i])):
s = str(int(t_SM[i]))
EvolutionSM[:,:,i] = np.loadtxt('data/EvolutionSM_t=' + s + '.dat')
for i in range(453):
#Truncate to a few decimal places, to prevent rounding errors in the filenames
s = str(np.around(t_EMSM[i], 3))
if (t_EMSM[i] == int(t_EMSM[i])):
s = str(int(t_EMSM[i]))
EvolutionEMSM[:,:,i] = np.loadtxt('data/EvolutionEMSM_t=' + s + '.dat')
Umatch = np.loadtxt('data/Umatch.dat')
#Correct the value of t_EMSM slightly
# (because Log(1/mZ)~-4.51292)
t_EMSM[:-1] = t_EMSM[:-1]+0.00008
#Define interpolating functions
UevolutionABOVE = interp1d(t_SM, EvolutionSM)
UevolutionBELOW = interp1d(t_EMSM, EvolutionEMSM)
#------------------------------------------
#%% Initialise empty coupling vector
def initCouplings():
"""
initCouplings()
Returns a numpy array with 16 elements, all set to zero,
for use in initialising coupling vectors.
"""
return np.zeros(16)
#%% Generate coupling vectors with preset operator structures
def setBenchmark(benchmarkID):
"""
setBenchmark(benchmarkID)
Returns a numpy array with 16 elements, corresponding to the
vector of couplings defined in Eq. 4 of the runDM manual.
The value of the couplings is defined by the string benchmarkID.
Possible choices for benchmarkID are:
'Higgs', 'UniversalVector', 'UniversalAxial',
'QuarksVector', 'QuarksAxial', 'LeptonsVector',
'LeptonsAxial', 'ThirdVector', 'ThirdAxial'
"""
if (benchmarkID == "Higgs"):
return np.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0])
elif (benchmarkID == "UniversalVector"):
return np.array([1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,0.0])
elif (benchmarkID == "UniversalAxial"):
return np.array([-1.0,1.0,1.0,-1.0,1.0,-1.0,1.0,1.0,-1.0,1.0,-1.0,1.0,1.0,-1.0,1.0,0.0])
elif (benchmarkID == "QuarksVector"):
return np.array([1.0,1.0,1.0,0.0,0.0,1.0,1.0,1.0,0.0,0.0,1.0,1.0,1.0,0.0,0.0,0.0])
elif (benchmarkID == "QuarksAxial"):
return np.array([-1.0,1.0,1.0,0.0,0.0,-1.0,1.0,1.0,0.0,0.0,-1.0,1.0,1.0,0.0,0.0,0.0])
elif (benchmarkID == "LeptonsVector"):
return np.array([0.0,0.0,0.0,1.0,1.0,0.0,0.0,0.0,1.0,1.0,0.0,0.0,0.0,1.0,1.0,0.0])
elif (benchmarkID == "LeptonsAxial"):
return np.array([0.0,0.0,0.0,-1.0,1.0,0.0,0.0,0.0,-1.0,1.0,0.0,0.0,0.0,-1.0,1.0,0.0])
elif (benchmarkID == "ThirdVector"):
return np.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,1.0,1.0,1.0,1.0,0.0])
elif (benchmarkID == "ThirdAxial"):
return np.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,-1.0,1.0,1.0,-1.0,1.0,0.0])
else:
print(" Error in runDM.setbenchmark: benchmarkID <<", benchmarkID, ">> not found...")
print(" Options are: 'Higgs', 'UniversalVector', 'UniversalAxial', 'QuarksVector', 'QuarksAxial', 'LeptonsVector', 'LeptonsAxial', 'ThirdVector', 'ThirdAxial'...")
print(" Returning empty coupling vector...")
return np.zeros(16)
#%% Calculate matrix to evolve couplings
def evolutionMat(E1, E2):
"""
evolutionMat(E1, E2)
Calculate 16x16 matrix for evolving coupling vector
between energies E1 and E2 (in GeV). evolutionMat
takes care of the relative values of E1 and E2.
Requires E1, E2 in range [1, 1e8] GeV.
Note that evolutionMat is NOT vectorized - it can only
accept floats for E1 and E2.
Input:
E1 - energy to start from (in GeV)
E2 - energy to run to (in GeV)
Output:
Returns a 16x16 numpy array containing the evolution matrix
"""
#Check to see if E1 or E2 is a list
if ((hasattr(E1, "__len__"))or(hasattr(E2,"__len__"))):
sys.exit(" Error in runDM.evolutionMat: E1 and E2 must both be floats")
t1 = np.log(E1/mZ)
t2 = np.log(E2/mZ)
#Check ranges of t1 and t2
if not(np.log(1.0/mZ) <= t1 <= 14.0):
sys.exit(" Error in runDM.evolutionMat: E1 out of range. Require 1 GeV <= E1 <= 1e8 GeV")
if not(np.log(1.0/mZ) <= t2 <= 14.0):
sys.exit(" Error in runDM.evolutionMat: E2 out of range. Require 1 GeV <= E2 <= 1e8 GeV")
#Both energies below the Z-mass
if ((t1 <= 0)and(t2 <= 0)):
Emat = np.dot(np.dot(linalg.inv(UevolutionBELOW(t2)), UevolutionBELOW(t1)),Umatch)
#Both energies above the Z-mass
if ((t1 >= 0)and(t2 >= 0)):
Emat = np.dot(linalg.inv(UevolutionABOVE(t2)), UevolutionABOVE(t1))
#Energies either side of the Z-mass
if ((t1 >= 0)and(t2 <= 0)):
EmatBELOW = np.dot(linalg.inv(UevolutionBELOW(t2)), UevolutionBELOW(0))
Emat = np.dot(EmatBELOW, np.dot(Umatch, UevolutionABOVE(t1)))
#Energies either side of Z-mass (in wrong order)
if ((t1 < 0)and(t2 > 0)):
sys.exit(" Error in runDM.evolutionMat: E1 < mZ, E2 > mZ not supported - matching is not unique.")
return Emat
#%% Evolve couplings between E1 and E2
def runCouplings(c, E1, E2):
"""
runCouplings(c, E1, E2)
Calculate running of couplings c between two energies
E1 and E2. If E2 > mZ, the output is an array of
couplings in the EW-unbroken phase (Eq. 4 of the manual).
If E2 < mZ, the output is an array of couplings in
the EW-broken phase (Eq. 6 of the manual).
Note that E1 < mZ with E2 > mZ is not allowed.
Input:
c - numpy array with 16 elements, with values corresponding
to those defined in Eq. 4 of the runDM manual
E1 - energy (in GeV) at which c is defined. E1 may be
a scalar or a 1-d numpy array.
E2 - energy (in GeV) to run to. E2 may be a scalar or 1-d
numpy array (with same length as E1).
Output:
Returns array with length 16 in the final dimension
(corresponding either to Eq. 4 or Eq. 6 of the manual).
The full dimensions of the array will be (Len(E1), 16).
"""
#Check length of coupling vector c
if not(hasattr(c, "__len__")):
sys.exit(" Error in runDM.runCouplings: c must be an array with 16 elements")
if (len(c) != 16):
sys.exit(" Error in runDM.runCouplings: c must be an array with 16 elements")
#If E1 and E2 are scalar
if not((hasattr(E1, "__len__"))or(hasattr(E2,"__len__"))):
return np.dot(evolutionMat(E1, E2), c)
#If E1 or E2 are arrays, need to check to make sure correct
#array dimensions are returned
#Both E1, E2 are arrays (check they are same length...)
if ((hasattr(E1, "__len__"))and(hasattr(E2, "__len__"))):
n1 = len(E1)
if (len(E2) != n1):
sys.exit(" Error in runDM.runCouplings: E1 and E2 must have same length (or be scalar)")
else:
result = np.zeros([n1,16])
for i in range(n1):
result[:, i] = np.dot(evolutionMat(E1[i], E2[i]), c)
#Only E1 is an array
elif (hasattr(E1, "__len__")):
n1 = len(E1)
result = np.zeros([n1,16])
for i in range(n1):
result[:, i] = np.dot(evolutionMat(E1[i], E2), c)
#Only E2 is an array
elif (hasattr(E2, "__len__")):
n2 = len(E2)
result = np.zeros([n2,16])
for i in range(n2):
result[i, :] = np.dot(evolutionMat(E1, E2[i]), c)
return result
#%% Calculate couplings to light quarks at the nuclear scale
def DDCouplingsQuarks(c, E1):
"""
DDCouplingsQuarks(c, E1)
Calculate vector (V) and axial-vector (A) couplings
to u, d, s quarks at the nuclear energy scale
starting from the high energy coupling vector c,
defined at energy E1 (in GeV).
Input:
c - numpy array with 16 elements, with values corresponding
to those defined in Eq. 4 of the runDM manual
E1 - energy (in GeV) at which c is defined. E1 may be
a scalar or a 1-d numpy array.
Output:
Returns array with length 5 in the final dimension,
corresponding to (CVu, CVd, CAu, CAd, CAs).
The dimensions of the array will be (Len(E1), 5).
"""
#Check length of coupling vector c
if not(hasattr(c, "__len__")):
sys.exit(" Error in runDM.runCouplings: c must be an array with 16 elements")
if (len(c) != 16):
sys.exit(" Error in runDM.runCouplings: c must be an array with 16 elements")
#If E1 is scalar
if not(hasattr(E1, "__len__")):
return runCouplings(c, E1, 1.0)[[0,1,8,9,11]]
#If E1 is an array
else:
n1 = len(E1)
result = np.zeros([n1,5])
for i in range(n1):
result[i,:] = runCouplings(c, E1[i], 1.0)[[0,1,8,9,11]]
return result
#%% Calculate non-relativistic couplings to protons
def DDCouplingsProton(c, E1, mx, DMcurrent):
#From quarks to nucleons
#Values from arXiv:1202.1292
deltau_p = 0.84
deltad_p = -0.44
deltas_p = -0.03
#Get quark couplings
cuV, cdV, cuA, cdA, csA = DDCouplingsQuarks(c, E1)
#Calculate non-relativistic proton couplings
#Note the number of operators is shifted by 1
#because python uses zero-indexed arrays
lambda_p = | np.zeros(12) | numpy.zeros |
# Copyright 2021 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for channels."""
from typing import Iterable, Sequence
import numpy as np
import pytest
import cirq
def apply_channel(channel: cirq.SupportsKraus, rho: np.ndarray) -> np.ndarray:
return apply_kraus_operators(cirq.kraus(channel), rho)
def apply_kraus_operators(kraus_operators: Sequence[np.ndarray], rho: np.ndarray) -> np.ndarray:
d_out, d_in = kraus_operators[0].shape
assert rho.shape == (d_in, d_in)
out = np.zeros((d_out, d_out), dtype=np.complex128)
for k in kraus_operators:
out += k @ rho @ k.conj().T
return out
def generate_standard_operator_basis(d_out: int, d_in: int) -> Iterable[np.ndarray]:
for i in range(d_out):
for j in range(d_in):
e_ij = np.zeros((d_out, d_in))
e_ij[i, j] = 1
yield e_ij
def compute_choi(channel: cirq.SupportsKraus) -> np.ndarray:
ks = cirq.kraus(channel)
d_out, d_in = ks[0].shape
d = d_in * d_out
c = np.zeros((d, d), dtype=np.complex128)
for e in generate_standard_operator_basis(d_in, d_in):
c += np.kron(apply_channel(channel, e), e)
return c
def compute_superoperator(channel: cirq.SupportsKraus) -> np.ndarray:
ks = cirq.kraus(channel)
d_out, d_in = ks[0].shape
m = np.zeros((d_out * d_out, d_in * d_in), dtype=np.complex128)
for k, e_in in enumerate(generate_standard_operator_basis(d_in, d_in)):
m[:, k] = np.reshape(apply_channel(channel, e_in), d_out * d_out)
return m
@pytest.mark.parametrize(
'kraus_operators, expected_choi',
(
([np.eye(2)], np.array([[1, 0, 0, 1], [0, 0, 0, 0], [0, 0, 0, 0], [1, 0, 0, 1]])),
(cirq.kraus(cirq.depolarize(0.75)), np.eye(4) / 2),
(
[
np.array([[1, 0, 0], [0, 0, 1]]) / np.sqrt(2),
np.array([[1, 0, 0], [0, 0, -1]]) / np.sqrt(2),
],
np.diag([1, 0, 0, 0, 0, 1]),
),
),
)
def test_kraus_to_choi(kraus_operators, expected_choi):
"""Verifies that cirq.kraus_to_choi computes the correct Choi matrix."""
assert np.allclose(cirq.kraus_to_choi(kraus_operators), expected_choi)
@pytest.mark.parametrize(
'choi, error',
(
(np.array([[1, 2, 3], [4, 5, 6]]), "shape"),
(np.eye(2), "shape"),
(np.diag([1, 1, 1, -1]), "positive"),
(
np.array(
[
[0.6, 0.0, -0.1j, 0.1],
[0.0, 0.0, 0.0, 0.0],
[0.1j, 0.0, 0.4, 0.0],
[0.2, 0.0, 0.0, 1.0],
]
),
"Hermitian",
),
),
)
def test_choi_to_kraus_invalid_input(choi, error):
with pytest.raises(ValueError, match=error):
_ = cirq.choi_to_kraus(choi)
@pytest.mark.parametrize(
'choi, expected_kraus',
(
(
# Identity channel
np.array([[1, 0, 0, 1], [0, 0, 0, 0], [0, 0, 0, 0], [1, 0, 0, 1]]),
(np.eye(2),),
),
(
# S gate
np.array([[1, 0, 0, -1j], [0, 0, 0, 0], [0, 0, 0, 0], [1j, 0, 0, 1]]),
(np.diag([-1j, 1]),),
),
(
# Hadamard
np.array([[1, 1, 1, -1], [1, 1, 1, -1], [1, 1, 1, -1], [-1, -1, -1, 1]]) / 2,
(np.array([[1, 1], [1, -1]]) / np.sqrt(2),),
),
(
# Completely dephasing channel
np.diag([1, 0, 0, 1]),
(np.diag([1, 0]), np.diag([0, 1])),
),
(
# Amplitude damping channel
# fmt: off
np.array(
[
[1, 0, 0, 0.8],
[0, 0.36, 0, 0],
[0, 0, 0, 0],
[0.8, 0, 0, 0.64],
],
),
# fmt: off
(np.diag([1, 0.8]), np.array([[0, 0.6], [0, 0]])),
),
(
# Completely depolarizing channel
np.eye(4) / 2,
(
np.array([[ | np.sqrt(0.5) | numpy.sqrt |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from scipy.sparse import coo_matrix, csr_matrix, csc_matrix
class SBiScale(object):
''' A sparse approach to scaling and centering, row-wise and column-wise, for input to a SoftImpute algorithm.
maxit: int
the maximum number of iterations allowed for obtaining the ideal scaling and centering levels.
thresh: int
the threshold for convergence
row_center, row_scale, col_center, col_scale: bool
a boolean indicating whether or not the task should be completed.
trace: bool
whether or not a verbose output should be provided.
'''
def __init__(self, maxit=20, thresh=1e-9, row_center=True, row_scale=False, col_center=True, col_scale=False, trace=False):
self.maxit = maxit
self.thresh = 1e-9
self.row_center = row_center
self.row_scale = row_scale
self.col_center = col_center
self.col_scale = col_scale
self.trace = trace
self.x = None
self.m = None
self.n = None
self.a = None
self.b = None
self.tau = None
self.gamma = None
self.xhat = None
self.critmat = []
def _prepare_suvc(self):
a = self.a.copy()
a = a.reshape(-1,1)
b = self.b.copy()
b = b.reshape(-1,1)
a = np.hstack((a, np.ones(a.shape[0]).reshape(-1,1)))
b = np.hstack((np.ones(b.shape[0]).reshape(-1,1), b))
return a, b
def _pred_one(self, u, v, row, col):
u_data = np.expand_dims(u[row,:], 0)
return float(u_data.dot(v[col, :].T))
def _c_suvc(self, u, v, irow, icol):
nomega = len(irow)
res = np.zeros(nomega)
targets = zip(irow, icol)
for idx, (r,c) in enumerate(targets):
res[idx] = self._pred_one(u, v, r, c)
return res
def _center_scale_I(self):
x = self.x.data
a, b = self._prepare_suvc()
coo_x = coo_matrix(self.x)
irow = coo_x.row
icol = coo_x.col
suvc1 = self._c_suvc(a, b, irow, icol)
suvc2 = self._c_suvc(self.tau.reshape(-1,1), self.gamma.reshape(-1,1), irow, icol)
self.xhat.data = (x-suvc1) / suvc2
return self
def _col_sum_along(self, a, x):
x = (self.x != 0)
a = csc_matrix(a.T)
return a.dot(x).toarray()
def _row_sum_along(self, b, x):
x = (self.x != 0)
return x.dot(b)
def _add_variables(self, x):
self.x = x
self.m = x.shape[0]
self.n = x.shape[1]
self.a = np.zeros(self.m)
self.b = np.zeros(self.n)
self.tau = np.ones(self.m)
self.gamma = np.ones(self.n)
self.xhat = self.x.copy()
return self
def fit(self, x):
''' Fits data to provide ideal scaling/centering levels. Runs until convergence is achieved or maximum iterations are reached.
x: scipy.sparse matrix type
The data to fit.
Returns: scipy.sparse type matrix
The scaled/centered matrix.
'''
self._add_variables(x)
self._center_scale_I()
for i in xrange(self.maxit):
# Centering
## Column mean
if self.col_center:
colsums = np.sum(self.xhat, axis=0)
gamma_by_sum = np.multiply(colsums,(self.gamma))
dbeta = gamma_by_sum / self._col_sum_along(1 / self.tau, self.x)
self.b = self.b + dbeta
self.b[np.isnan(self.b)] = 0
self._center_scale_I()
else:
dbeta = 0
## Row Mean
if self.row_center:
rowsums = np.sum(self.xhat, axis=1).T
tau_by_sum = np.multiply(self.tau, rowsums)
dalpha = tau_by_sum / self._row_sum_along(1 / self.gamma, self.x)
self.a = self.a + dalpha
self.a[np.isnan(self.a)] = 0
self._center_scale_I()
else:
dalpha = 0
#Leaving out scaling for now; not required for SoftImputeALS algorithm
dalpha[np.isnan(dalpha)] = 0
dbeta[np.isnan(dbeta)] = 0
convergence_level = np.square(dalpha).sum() + np.square(dbeta).sum()
self.critmat.append([i + 1, convergence_level])
if convergence_level < self.thresh:
break
# Complete solution
self.xhat.row_center = | np.ravel(self.a) | numpy.ravel |
import argparse
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import sys
import numpy as np
stderr = sys.stderr
sys.stderr = open(os.devnull, 'w')
from keras.models import model_from_json
sys.stderr = stderr
import pickle
from os import system, name
import pyperclip
import json
import pandas as pd
def main():
parser = argparse.ArgumentParser(
description="""
---------------------------------------------------------------
Description:
Classify a single str entry or a JSON formated data with labels
1. soft: for soft skills like 'team player'
2. tech: for tech skills like'python experience'
3. none: for all other type of sentences
---------------------------------------------------------------""",
formatter_class=argparse.RawDescriptionHelpFormatter,
add_help=True)
parser.add_argument('-s','--str', type=str,metavar='', dest='text',
help='may be used to classify a single Text in prompt',
action='store')
parser.add_argument('-j','--json', action='store_true',
help='state that a JSON file will be classified',
dest='fromFile')
parser.add_argument('-p','--path', dest='path',type=str, metavar='',
help='path to JSON file (may contain labes)\n' +
"default='tech_soft_none.json'",
action='store', default='tech_soft_none.json')
args = parser.parse_args()
fromFile = args.fromFile
text = args.text
path = args.path.replace('\\','/')
#Load trained model
json_file = open('model/model.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
loaded_model.load_weights("model/model.h5")
#Compile loaded model
loaded_model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
#loaded_model.summary()
#Load words used as features (BoW)
with open('model/word_features', 'rb') as f:
word_features = pickle.load(f)
if (text):
print(find_features_text(text,word_features,loaded_model))
if (fromFile):
#Read and format dataset
print("Retrieved from: " + path)
with open(path, 'r',encoding = 'utf8', errors="ignore") as f:
data = json.load(f)
df = pd.DataFrame.from_dict(data["data"])
featuresets = np.array([(find_features(df["text"][index],word_features), df["label"][index]) for index,_ in df.iterrows()])
X = np.array([x[0] for x in featuresets[:]])
Y = loaded_model.predict(X)
predRaw = np.argmax(Y,axis=1)
#print(predRaw[0:10])
labels = ['none', 'soft', 'tech']
data = {}
data['data'] = []
for i, pred in enumerate(predRaw):
data['data'].append({
'text': df['text'][i],
'label': labels[pred]
})
with open('output.json', 'w') as outfile:
json.dump(data, outfile,indent=4)
print("Generated output.json")
#Predict from entry 'doc'
def find_features_text(text,word_features,model):
features = find_features(text,word_features)
newData = | np.asarray(features) | numpy.asarray |
#!/bin/env python
# Copyright (c) 2013, 2018 National Technology and Engineering Solutions of Sandia, LLC . Under the terms of Contract
# DE-NA0003525 with National Technology and Engineering Solutions of Sandia, LLC, the U.S. Government
# retains certain rights in this software.
"""
Compute a timeseries model data from hdf5 data, saving to files for the Slycat
Web Server to ingest.
This script loads data from a directory containing:
One inputs.hdf5 file containing a single table.
One timeseries-N.hdf5 file for each row in the input table.
"""
import argparse
import collections
import datetime
import h5py
import ipyparallel
import itertools
import json
import numpy
import os
import tarfile
import scipy.cluster.hierarchy
import scipy.spatial.distance
import slycat.hdf5
import json
import time
try:
import cpickle as pickle
except:
import pickle
parser = argparse.ArgumentParser()
parser.add_argument("--directory",
help="Directory containing hdf5 timeseries data (one inputs.hdf5 and multiple sub-directories with multiple timeseries-N.hdf5 files).")
parser.add_argument("--timeseries-name", default=None,
help="Name of the timeseries, i.e. sub-directory name in the input directory.")
parser.add_argument("--cluster-sample-count", type=int, default=1000,
help="Sample count used for the uniform-pla and uniform-paa resampling algorithms. Default: %(default)s")
parser.add_argument("--cluster-sample-type", default="uniform-paa", choices=["uniform-pla", "uniform-paa"],
help="Resampling algorithm type. Default: %(default)s")
parser.add_argument("--cluster-type", default="average", choices=["single", "complete", "average", "weighted"],
help="Hierarchical clustering method. Default: %(default)s")
parser.add_argument("--cluster-metric", default="euclidean", choices=["euclidean"],
help="Hierarchical clustering distance metric. Default: %(default)s")
parser.add_argument("--workdir", default=None,
help="Working directory to store data to be processed during model creation")
parser.add_argument("--hash", default=None, help="Unique identifier for the output folder.")
parser.add_argument("--profile", default=None, help="Name of the IPython profile to use")
parser.add_argument("--log_file", default=None, help="log file path")
arguments = parser.parse_args()
if arguments.timeseries_name is None:
directory_full_path = arguments.directory
else:
directory_full_path = os.path.join(arguments.directory, arguments.timeseries_name)
if not os.path.exists(directory_full_path):
raise Exception("Directory %s does not exists." % directory_full_path)
if arguments.cluster_sample_count < 1:
raise Exception("Cluster sample count must be greater than zero.")
_numSamples = arguments.cluster_sample_count
try:
pool = ipyparallel.Client(profile=arguments.profile)[:]
except:
raise Exception("A running IPython parallel cluster is required to run this script.")
def tardir(path):
# ziph is zipfile handle
with tarfile.open(os.path.join(path, 'slycat-timeseries.tar.gz'), 'w:gz') as tarh:
for root, dirs, files in os.walk(path):
for file in files:
if file != 'slycat-timeseries.tar.gz':
tarh.add(os.path.join(root, file), arcname=file)
# Compute the model.
try:
start_now = datetime.datetime.now()
start_time_str = start_now.strftime("%d/%m/%Y %H:%M:%S")
print("[START] " + start_time_str)
print("Examining and verifying data.")
"""
Find number of timeseries and accurate cluster sample count before starting model
"""
if os.path.isfile(os.path.join(arguments.directory, "inputs.hdf5")):
inputs_path = os.path.join(arguments.directory, "inputs.hdf5")
else:
inputs_path = os.path.join(os.path.dirname(arguments.directory), "inputs.hdf5")
with h5py.File(inputs_path, "r") as file:
array = slycat.hdf5.ArraySet(file)[0]
dimensions = array.dimensions
if len(dimensions) != 1:
raise Exception("Inputs table must have exactly one dimension.")
# size for the dataset, i.e. 5k, 10k, etc...
_numTimeseries = dimensions[0]["end"] - dimensions[0]["begin"]
# initialize a 1-dimensional array of size of the dataset with 0's
timeseries_samples = numpy.zeros(shape=(_numTimeseries))
for timeseries_index in range(_numTimeseries):
with h5py.File(os.path.join(directory_full_path, "timeseries-%s.hdf5" % timeseries_index), "r") as file:
# store all the timeseries sample counts
timeseries_samples[timeseries_index] = len(slycat.hdf5.ArraySet(file)[0].get_data(0)[:])
# reduce the num of samples if fewer timeseries that curr cluster-sample-count
if timeseries_samples.min() < _numSamples:
_numSamples = int(timeseries_samples.min())
print(("Reducing cluster sample count to minimum found in data: %s", _numSamples))
print("Storing clustering parameters.")
dirname = "%s/slycat_timeseries_%s" % (arguments.workdir, arguments.hash)
if not os.path.exists(dirname):
os.makedirs(dirname)
with h5py.File(inputs_path, "r") as file:
array = slycat.hdf5.ArraySet(file)[0]
dimensions = array.dimensions
attributes = array.attributes
if len(attributes) < 1:
raise Exception("Inputs table must have at least one attribute.")
if len(dimensions) != 1:
raise Exception("Inputs table must have exactly one dimension.")
timeseries_count = dimensions[0]["end"] - dimensions[0]["begin"]
"""
Save data to dictionary to be pickled. Slycat server will later un-pickle
the file and use the data for the following commands:
put_model_arrayset(mid, "inputs")
put_model_arrayset_array(mid, "inputs", 0, dimensions, attributes)
"""
arrayset_inputs = dict(aid="inputs", array=0, dimensions=dimensions, attributes=attributes)
with open(os.path.join(dirname, "arrayset_inputs.pickle"), "wb") as arrayset_inputs_pickle:
pickle.dump(arrayset_inputs, arrayset_inputs_pickle)
"""
Fetch data for each of the attributes and pickle to disk. Slycat server will
later un-pickle the files and use the data for the following command:
put_model_arrayset_data(mid, "inputs", "0/%s/..." % attribute, [data])
"""
attributes_array = numpy.empty(shape=(len(attributes),), dtype=object)
for attribute in range(len(attributes)):
print(("Storing input table attribute %s", attribute))
attributes_array[attribute] = array.get_data(attribute)[...]
with open(os.path.join(dirname, "inputs_attributes_data.pickle"), "wb") as attributes_file:
pickle.dump(attributes_array, attributes_file)
# Create a mapping from unique cluster names to timeseries attributes.
clusters = collections.defaultdict(list)
timeseries_samples = numpy.zeros(shape=(timeseries_count))
for timeseries_index in range(timeseries_count):
with h5py.File(os.path.join(directory_full_path, "timeseries-%s.hdf5" % timeseries_index), "r") as file:
attributes = slycat.hdf5.ArraySet(file)[0].attributes[1:] # Skip the timestamps
# Get and store a shallow copy of the data
timeseries_samples[timeseries_index] = len(slycat.hdf5.ArraySet(file)[0].get_data(0)[:])
if len(attributes) < 1:
raise Exception("A timeseries must have at least one attribute.")
for attribute_index, attribute in enumerate(attributes):
# Mapping is created here...
clusters[attribute["name"]].append((timeseries_index, attribute_index))
# Store an alphabetized collection of cluster names in a JSON file
file_clusters = dict(aid="clusters", file=json.dumps(sorted(clusters.keys())), parser="slycat-blob-parser",
timeseries_count=str(timeseries_count))
with open(os.path.join(dirname, "file_clusters.json"), "w") as file_clusters_json:
json.dump(file_clusters, file_clusters_json)
with open(os.path.join(dirname, "file_clusters.out"), "w") as file_clusters_out:
json.dump(sorted(clusters.keys()), file_clusters_out)
def get_time_range(directory, timeseries_index):
"""
Get the minimum and maximum times for the input timeseries and returns the
values as a tuple.
:param directory: working directory for the timeseries
:param timeseries_index:
:returns: timeseries time range as tuple
"""
import h5py
import os
import slycat.hdf5
# We have to open the file with writing enabled in case the statistics cache gets updated.
with h5py.File(os.path.join(directory, "timeseries-%s.hdf5" % timeseries_index), "r+") as file:
statistics = slycat.hdf5.ArraySet(file)[0].get_statistics(0)
return statistics["min"], statistics["max"]
print("Collecting timeseries statistics.")
time_ranges = pool.map_sync(get_time_range, list(itertools.repeat(directory_full_path, timeseries_count)),
list(range(timeseries_count)))
# For each cluster ...
for index, (name, storage) in enumerate(sorted(clusters.items())):
print(("cluster index: %s" % index))
progress_begin = float(index) / float(len(clusters))
progress_end = float(index + 1) / float(len(clusters))
# Rebin each timeseries within the cluster so they share common stop/start times and samples.
print(("Resampling data for %s" % name))
# Get the minimum and maximum times across every series in the cluster.
ranges = [time_ranges[timeseries[0]] for timeseries in storage]
time_min = min(list(zip(*ranges))[0])
time_max = max(list(zip(*ranges))[1])
if arguments.cluster_sample_type == "uniform-pla":
def uniform_pla(directory, min_time, max_time, bin_count, timeseries_index, attribute_index):
"""
Create waveforms using a piecewise linear approximation.
:param directory: working directory for the timeseries
:param min_time:
:param max_time:
:param bin_count:
:param timeseries_index:
:param attribute_index:
:return: computed time series
"""
import h5py
import numpy
import os
import slycat.hdf5
# generate evenly spaced times
bin_edges = numpy.linspace(min_time, max_time, bin_count + 1)
bin_times = (bin_edges[:-1] + bin_edges[1:]) / 2
with h5py.File(os.path.join(directory, "timeseries-%s.hdf5" % timeseries_index), "r") as file:
original_times = slycat.hdf5.ArraySet(file)[0].get_data(0)[:]
original_values = slycat.hdf5.ArraySet(file)[0].get_data(attribute_index + 1)[:]
# interpolate original data with binned times
bin_values = numpy.interp(bin_times, original_times, original_values)
return {
"input-index": timeseries_index,
"times": bin_times,
"values": bin_values,
}
directories = list(itertools.repeat(directory_full_path, len(storage)))
min_times = list(itertools.repeat(time_min, len(storage)))
max_times = list(itertools.repeat(time_max, len(storage)))
bin_counts = list(itertools.repeat(_numSamples, len(storage)))
timeseries_indices = [timeseries for timeseries, attribute in storage]
attribute_indices = [attribute for timeseries, attribute in storage]
waveforms = pool.map_sync(uniform_pla, directories, min_times, max_times, bin_counts, timeseries_indices,
attribute_indices)
elif arguments.cluster_sample_type == "uniform-paa":
def uniform_paa(directory, min_time, max_time, bin_count, timeseries_index, attribute_index):
"""
Create waveforms using a piecewise aggregate approximation.
:param directory: working directory for the timeseries
:param min_time:
:param max_time:
:param bin_count:
:param timeseries_index:
:param attribute_index:
:return: computed time series
"""
import h5py
import numpy
import os
import slycat.hdf5
bin_edges = numpy.linspace(min_time, max_time, bin_count + 1)
bin_times = (bin_edges[:-1] + bin_edges[1:]) / 2
with h5py.File(os.path.join(directory, "timeseries-%s.hdf5" % timeseries_index), "r") as file:
original_times = slycat.hdf5.ArraySet(file)[0].get_data(0)[:]
original_values = slycat.hdf5.ArraySet(file)[0].get_data(attribute_index + 1)[:]
bin_indices = | numpy.digitize(original_times, bin_edges[1:]) | numpy.digitize |
import itertools
import unittest
import numpy as np
import nmsbind
def get_exact_cosine(row, data, N=10):
scores = data.dot(row) / np.linalg.norm(data, axis=-1)
best = np.argpartition(scores, -N)[-N:]
return sorted(zip(best, scores[best] / | np.linalg.norm(row) | numpy.linalg.norm |
import torch
from torch.utils.data import Dataset, DataLoader
from torch.distributions.multivariate_normal import MultivariateNormal
import numpy as np
from tqdm import tqdm
import random
def get_rotation(theta):
rad = np.radians(theta)
c, s = np.cos(rad), np.sin(rad)
R = np.array([[c, -s],
[s, c]])
return R
class CircleDataset(Dataset):
def __init__(self, n_samples, n_centers=9, sigma=0.1, ysigma=0.01, include_zero=True,
target_label=1., seed = None, radius=1.):
super().__init__()
if seed != None:
random.seed(seed)
| np.random.seed(seed) | numpy.random.seed |
import numpy as np
from numpy.random import seed
seed(1)
import pandas as pd
from math import sqrt
from sklearn.decomposition import PCA
######################################################################
# METRICS
######################################################################
def mse(y, y_hat):
"""
Calculates Mean Squared Error.
MSE measures the prediction accuracy of a
forecasting method by calculating the squared deviation
of the prediction and the true value at a given time and
averages these devations over the length of the series.
y: numpy array
actual test values
y_hat: numpy array
predicted values
return: MSE
"""
mse = np.mean(np.square(y - y_hat))
return mse
def rmse(y, y_hat):
"""
Calculates Root Mean Squared Error.
RMSE measures the prediction accuracy of a
forecasting method by calculating the squared deviation
of the prediction and the true value at a given time and
averages these devations over the length of the series.
Finally the RMSE will be in the same scale
as the original time series so its comparison with other
series is possible only if they share a common scale.
y: numpy array
actual test values
y_hat: numpy array
predicted values
return: RMSE
"""
rmse = sqrt(np.mean(np.square(y - y_hat)))
return rmse
def mape(y, y_hat):
"""
Calculates Mean Absolute Percentage Error.
MAPE measures the relative prediction accuracy of a
forecasting method by calculating the percentual deviation
of the prediction and the true value at a given time and
averages these devations over the length of the series.
y: numpy array
actual test values
y_hat: numpy array
predicted values
return: MAPE
"""
mape = np.mean(np.abs(y - y_hat) / np.abs(y))
mape = 100 * mape
return mape
def smape(y, y_hat):
"""
Calculates Symmetric Mean Absolute Percentage Error.
SMAPE measures the relative prediction accuracy of a
forecasting method by calculating the relative deviation
of the prediction and the true value scaled by the sum of the
absolute values for the prediction and true value at a
given time, then averages these devations over the length
of the series. This allows the SMAPE to have bounds between
0% and 200% which is desireble compared to normal MAPE that
may be undetermined.
y: numpy array
actual test values
y_hat: numpy array
predicted values
return: SMAPE
"""
smape = np.mean( | np.abs(y - y_hat) | numpy.abs |
from collections import OrderedDict
import weakref
import sys
from openmdao.main.datatypes.api import List, VarTree
from openmdao.main.expreval import ExprEvaluator
from openmdao.main.interfaces import obj_has_interface, ISolver, IDriver
from openmdao.main.variable import make_legal_path
from openmdao.main.vartree import VariableTree
from openmdao.main.depgraph import base_var
from openmdao.util.typegroups import real_types, int_types
from openmdao.util.graph import fix_single_tuple
from numpy import array, ndarray, ndindex, ones
from openmdao.main.mpiwrap import MPI
__missing = object()
class ParameterBase(object):
"""Abstract base class for parameters."""
def __init__(self, target, high=None, low=None,
scaler=None, adder=None, start=None,
fd_step=None, scope=None, name=None,
_expreval=None):
"""If scaler and/or adder are not None, then high, low, and start, if
not None, are assumed to be expressed in unscaled form. If high and low
are not supplied, then their values will be pulled from the target
variable (along with a start value), and are assumed to be in scaled
form, so their values will be unscaled prior to being stored in the
Parameter.
"""
if scaler is None:
scaler = 1.0
if adder is None:
adder = 0.0
self._scaling_required = scaler != 1. or adder != 0.
self.low = low
self.high = high
self.scaler = scaler
self.adder = adder
self.start = start
self.fd_step = fd_step
self.name = name or target
if _expreval is None:
try:
_expreval = ExprEvaluator(target, scope)
except Exception as err:
raise err.__class__("Can't add parameter: %s" % str(err))
if not _expreval.is_valid_assignee():
raise ValueError("Can't add parameter: '%s' is not a valid"
" parameter expression" % _expreval.text)
self._expreval = _expreval
try:
self._metadata = self._expreval.get_metadata()
except AttributeError:
raise AttributeError("Can't add parameter '%s' because it doesn't"
" exist." % target)
# 'raw' metadata is in the form [(varname, metadata)],
# so use [0][1] to get the actual metadata dict
metadata = self._metadata[0][1]
if 'iotype' in metadata and metadata['iotype'] == 'out':
raise RuntimeError("Can't add parameter '%s' because '%s' is an"
" output." % (target, target))
try:
# So, our traits might not have a vartypename?
self.vartypename = metadata['vartypename']
except KeyError:
self.vartypename = None
def __str__(self):
return self._expreval.text
def _transform(self, val):
""" Unscales the variable (parameter space -> var space). """
if self._scaling_required:
return (val + self.adder) * self.scaler
else:
return val
def _untransform(self, val):
""" Scales the variable (var space -> parameter space). """
if self._scaling_required:
return val / self.scaler - self.adder
else:
return val
def _get_scope(self):
"""Return scope of target expression."""
return self._expreval.scope
@property
def target(self):
"""The target of this parameter."""
return self._expreval.text
@property
def targets(self):
"""A one element list containing the target of this parameter."""
return [self._expreval.text]
def initialize(self, scope, param_owner):
"""Set parameter to initial value."""
if self.start is not None:
start = self.start
self.set(start, param_owner)
def set(self, val, param_owner):
"""Assigns the given value to the target referenced by this parameter,
must be overridden."""
raise NotImplementedError('set')
def get_metadata(self, metaname=None):
"""Returns a list of tuples of the form (varname, metadata), with one
entry for each variable referenced by the parameter expression. The
metadata value found in the tuple will be either the specified piece
of metadata, if metaname is provided, or the whole metadata dictionary
for that variable if it is not.
"""
if metaname is None:
return self._metadata[0]
else:
return [(name, self._metadata.get(metaname))
for name, val in self._metadata]
def get_referenced_compnames(self):
"""Return a set of Component names based on the
pathnames of Variables referenced in our target string.
"""
return self._expreval.get_referenced_compnames()
def get_referenced_varpaths(self, refs=False):
"""Return a set of Variable names referenced in our target string."""
return self._expreval.get_referenced_varpaths(copy=False, refs=refs)
def get_config(self):
"""Return configuration arguments."""
return (self.target, self.low, self.high, self.fd_step,
self.scaler, self.adder, self.start, self.name)
class Parameter(ParameterBase):
""" A scalar parameter. """
def __init__(self, target, high=None, low=None,
scaler=None, adder=None, start=None,
fd_step=None, scope=None, name=None,
_expreval=None, _val=None, _allowed_types=None):
"""If scaler and/or adder are not None, then high, low, and start, if
not None, are assumed to be expressed in unscaled form. If high and low
are not supplied, then their values will be pulled from the target
variable (along with a start value), and are assumed to be in scaled
form, so their values will be unscaled prior to being stored in the
Parameter.
"""
super(Parameter, self).__init__(target, high, low,
scaler, adder, start,
fd_step, scope, name,
_expreval)
if scaler is not None:
try:
scaler = float(scaler)
except (TypeError, ValueError):
raise ValueError("Bad value given for parameter's 'scaler'"
" attribute.")
if adder is not None:
try:
adder = float(adder)
except (TypeError, ValueError):
raise ValueError("Bad value given for parameter's 'adder'"
" attribute.")
if _val is None:
try:
_val = self._expreval.evaluate()
except Exception:
raise ValueError("Can't add parameter because I can't evaluate"
" '%s'." % target)
self.valtypename = type(_val).__name__
if self.vartypename == 'Enum':
return # it's an Enum, so no need to set high or low
if _allowed_types is None or 'any' not in _allowed_types:
if not isinstance(_val, real_types) and \
not isinstance(_val, int_types):
raise ValueError("The value of parameter '%s' must be a real or"
" integral type, but its type is '%s'." %
(target, type(_val).__name__))
# metadata is in the form (varname, metadata), so use [1] to get
# the actual metadata dict
metadata = self.get_metadata()[1]
meta_low = metadata.get('low') # this will be None if 'low' isn't there
if meta_low is not None:
if low is None:
self.low = self._untransform(meta_low)
elif low < self._untransform(meta_low):
raise ValueError("Trying to add parameter '%s', but the lower"
" limit supplied (%s) exceeds the built-in"
" lower limit (%s)." % (target, low, meta_low))
elif _allowed_types is None or \
'unbounded' not in _allowed_types and 'any' not in _allowed_types:
if low is None:
raise ValueError("Trying to add parameter '%s', "
"but no lower limit was found and no "
"'low' argument was given. One or the "
"other must be specified." % target)
meta_high = metadata.get('high') # will be None if 'high' isn't there
if meta_high is not None:
if high is None:
self.high = self._untransform(meta_high)
elif high > self._untransform(meta_high):
raise ValueError("Trying to add parameter '%s', but the upper"
" limit supplied (%s) exceeds the built-in"
" upper limit (%s)."
% (target, high, meta_high))
elif _allowed_types is None or \
'unbounded' not in _allowed_types and 'any' not in _allowed_types:
if high is None:
raise ValueError("Trying to add parameter '%s', "
"but no upper limit was found and no "
"'high' argument was given. One or the "
"other must be specified." % target)
if self.low > self.high:
raise ValueError("Parameter '%s' has a lower bound (%s) that"
" exceeds its upper bound (%s)" %
(target, self.low, self.high))
def __eq__(self, other):
if not isinstance(other, Parameter):
return False
return (self._expreval,self.scaler,self.adder,self.low,self.high,self.fd_step,self.start,self.name) == \
(other._expreval,other.scaler,other.adder,other.low,other.high,other.fd_step,other.start,self.name)
def __repr__(self):
return '<Parameter(target=%s,low=%s,high=%s,fd_step=%s,scaler=%s,adder=%s,start=%s,name=%s)>' % \
self.get_config()
@property
def names(self):
"""A one element list containing the name of this parameter."""
return [self.name]
@property
def size(self):
"""Total scalar items in this parameter."""
return 1
def configure(self):
"""Reconfigure from potentially changed target."""
pass
def get_high(self):
"""Returns upper limits as a sequence."""
return [self.high]
def get_low(self):
"""Returns lower limits as a sequence."""
return [self.low]
def get_fd_step(self):
"""Returns finite difference step size as a sequence."""
return [self.fd_step]
def evaluate(self, scope=None):
"""Returns the value of this parameter as a sequence."""
return [self._untransform(self._expreval.evaluate(scope))]
def set(self, val, param_owner):
"""Assigns the given value to the target of this parameter."""
transval = self._transform(val)
try:
param_owner._system.vec['u'][self._expreval.text] = transval
except (KeyError, AttributeError):
self._expreval.set(transval)
def copy(self):
"""Return a copy of this Parameter."""
return Parameter(self._expreval.text,
high=self.high, low=self.low,
scaler=self.scaler, adder=self.adder,
start=self.start,
fd_step=self.fd_step,
scope=self._get_scope(), name=self.name)
def override(self, low=None, high=None,
scaler=None, adder=None, start=None,
fd_step=None, name=None):
"""Called by add_parameter() when the target is this Parameter."""
if low is not None:
self.low = low
if high is not None:
self.high = high
if scaler is not None:
self.scaler = scaler
if adder is not None:
self.adder = adder
if start is not None:
self.start = start
if fd_step is not None:
self.fd_step = fd_step
if name is not None:
self.name = name
class ParameterGroup(object):
"""A group of Parameters that are treated as one, i.e., they are all
set to the same value.
"""
def __init__(self, params):
for param in params:
# prevent multiply nested ParameterGroups
if not isinstance(param, (Parameter, ArrayParameter)):
raise ValueError("tried to add a non-Parameter object to a"
" ParameterGroup")
self._params = params[:]
param0 = self._params[0]
self.low = max([x.low for x in self._params])
self.high = min([x.high for x in self._params])
self.start = param0.start
self.scaler = param0.scaler
self.adder = param0.adder
self.fd_step = param0.fd_step
self.name = param0.name
self.typename = param0.valtypename
def __eq__(self, other):
if not isinstance(other, ParameterGroup):
return False
return (self._params,self.low,self.high,self.start,self.scaler,self.adder,self.fd_step,self.name) == \
(other._params,other.low,other.high,other.start,other.scaler,other.adder,other.fd_step,self.name)
def __str__(self):
return "%s" % self.targets
def __repr__(self):
return '<ParameterGroup(targets=%s,low=%s,high=%s,fd_step=%s,scaler=%s,adder=%s,start=%s,name=%s)>' % \
(self.targets, self.low, self.high, self.fd_step, self.scaler,
self.adder, self.start, self.name)
@property
def names(self):
"""A one element list containing the name of this parameter."""
return self._params[0].names
@property
def size(self):
"""Total scalar items in this parameter."""
return self._params[0].size
@property
def target(self):
"""The target of the first parameter in the group."""
return self._params[0].target
@property
def targets(self):
"""A list containing the targets of this parameter."""
return [p.target for p in self._params]
def configure(self):
"""Reconfigure from potentially changed target."""
for param in self._params:
param.configure()
def get_high(self):
"""Returns upper limits as a sequence."""
return self._params[0].get_high()
def get_low(self):
"""Returns lower limits as a sequence."""
return self._params[0].get_low()
def get_fd_step(self):
"""Returns finite difference step size as a sequence."""
return self._params[0].get_fd_step()
def set(self, value, param_owner):
"""Set all targets to the given value."""
for param in self._params:
param.set(value, param_owner)
def evaluate(self, scope=None):
"""Return the value of the first parameter in our target list as a
sequence. Values of all of our targets are assumed to be the same.
"""
return self._params[0].evaluate(scope)
def get_metadata(self, metaname=None):
"""Returns a list of tuples of the form (varname, metadata), with one
entry for each variable referenced by a target expression. The
metadata value found in the tuple will be either the specified piece
of metadata, if metaname is provided, or the whole metadata dictionary
for that variable if it is not.
"""
dct = {'low':self.low,
'high':self.high,
'start':self.start,
'scaler':self.scaler,
'adder':self.adder,
'fd_step':self.fd_step,
'name':self.name}
if metaname is not None:
val = dct.get(metaname, __missing)
if val is __missing:
val = None
return [(p.target, val) for p in self._params]
else:
return [(p.target, dct) for p in self._params]
def get_referenced_compnames(self):
"""Return a set of Component names based on the
pathnames of Variables referenced in our target strings.
"""
result = set()
for param in self._params:
result.update(param.get_referenced_compnames())
return result
def get_referenced_vars_by_compname(self):
"""Return a mapping from component name to referencing parameters."""
result = dict()
for param in self._params:
comp = param.get_referenced_compnames().pop()
try:
result[comp].update([param,])
except KeyError:
result[comp] = set([param,])
return result
def get_referenced_varpaths(self, refs=False):
"""Return a set of Variable names referenced in our target strings."""
result = set()
for param in self._params:
result.update(param.get_referenced_varpaths(refs=refs))
return result
def copy(self):
"""Return a copy of this ParameterGroup."""
return ParameterGroup([p.copy() for p in self._params])
def get_config(self):
"""Return list of configuration argument tuples."""
return [p.get_config() for p in self._params]
def _get_scope(self):
"""Return scope of first parameter in group."""
return self._params[0]._get_scope()
def override(self, low=None, high=None,
scaler=None, adder=None, start=None,
fd_step=None, name=None):
"""Called by add_parameter() when the target is this ParameterGroup."""
if low is not None:
self.low = low
if high is not None:
self.high = high
if scaler is not None:
self.scaler = scaler
if adder is not None:
self.adder = adder
if start is not None:
self.start = start
if fd_step is not None:
self.fd_step = fd_step
if name is not None:
self.name = name
def initialize(self, scope, param_owner):
"""Set parameter to initial value."""
for param in self._params:
param.initialize(scope, param_owner)
class ArrayParameter(ParameterBase):
"""A parameter whose target is an array. If scaler and/or adder are not
None, then high, low, and start, if not None, are assumed to be expressed
in unscaled form. If high and low are not supplied, then their values
will be pulled from the target variable (along with a start value), and are
assumed to be in scaled form, so their values will be unscaled prior to
being stored in the ArrayParameter.
"""
def __init__(self, target, high=None, low=None,
scaler=None, adder=None, start=None,
fd_step=None, scope=None, name=None,
_expreval=None, _val=None, _allowed_types=None):
super(ArrayParameter, self).__init__(target, high, low,
scaler, adder, start,
fd_step, scope, name,
_expreval)
if _val is None:
try:
_val = self._expreval.evaluate()
except Exception:
raise ValueError("Can't add parameter because I can't evaluate"
" '%s'." % target)
self.valtypename = _val.dtype.name
if _val.dtype.kind not in 'fi':
raise TypeError('Only float or int arrays are supported')
dtype = self.dtype = _val.dtype
self.shape = _val.shape
self._size = _val.size
# Use scalar arithmetic for transform/untransform if possible.
if scaler is None:
self._scaler = 1.
else:
_scaler = self._convert_sequence(scaler, dtype)
if isinstance(_scaler, ndarray):
self._scaler = _scaler
else:
self._scaler = float(scaler)
if adder is None:
self._adder = 0.
else:
_adder = self._convert_sequence(adder, dtype)
if isinstance(_adder, ndarray):
self._adder = _adder
else:
self._adder = float(adder)
high = self._convert_sequence(high, dtype)
low = self._convert_sequence(low, dtype)
# metadata is in the form (varname, metadata), so use [1] to get
# the actual metadata dict
metadata = self.get_metadata()[1]
meta_low = self._convert_sequence(metadata.get('low'), dtype)
meta_high = self._convert_sequence(metadata.get('high'), dtype)
highs = []
lows = []
for i in range(_val.size):
_high = self._fetch('high', high, i)
_low = self._fetch('low', low, i)
if meta_low is not None:
_meta_low = self._fetch('meta_low', meta_low, i)
if _low is None:
_low = self._untransform(_meta_low)
elif _low < self._untransform(_meta_low):
raise ValueError("Trying to add parameter '%s', but the"
" lower limit supplied (%s) exceeds the"
" built-in lower limit (%s)."
% (target, _low, _meta_low))
elif _allowed_types is None or \
'unbounded' not in _allowed_types and 'any' not in _allowed_types:
if _low is None:
raise ValueError("Trying to add parameter '%s', "
"but no lower limit was found and no "
"'low' argument was given. One or the "
"other must be specified." % target)
if meta_high is not None:
_meta_high = self._fetch('meta_high', meta_high, i)
if _high is None:
_high = self._untransform(_meta_high)
elif _high > self._untransform(_meta_high):
raise ValueError("Trying to add parameter '%s', but the"
" upper limit supplied (%s) exceeds the"
" built-in upper limit (%s)."
% (target, _high, _meta_high))
elif _allowed_types is None or \
'unbounded' not in _allowed_types and 'any' not in _allowed_types:
if high is None:
raise ValueError("Trying to add parameter '%s', "
"but no upper limit was found and no "
"'high' argument was given. One or the "
"other must be specified." % target)
if _low > _high:
raise ValueError("Parameter '%s' has a lower bound (%s) that"
" exceeds its upper bound (%s)"
% (target, _low, _high))
highs.append(_high)
lows.append(_low)
self._high = | array(highs, dtype) | numpy.array |
"""
fockgaussian
============
Provives a simple function to calculate the Fock matrix elements of Gaussian
unitary using loop hafnians.
"""
import numpy as np
from thewalrus import hafnian
from strawberryfields.decompositions import takagi
import strawberryfields.backends.gaussianbackend.gaussiancircuit as gc
# pylint: disable=invalid-name
def tmsq(state, i, j, r):
""" Given a gaussiancircuit object it applies a two mode squeezing operator
by amount r between modes i and j using the decomposition of this operation
in terms of beamsplitters and (single mode) squeezers.
Args:
state (gaussiancircuit): A gaussiancircuit object
i,j (integers): The two modes in which to apply the squeezing operation
r (float): Squeezing parameter
"""
state.beamsplitter(np.pi / 4, 0, i, j)
state.squeeze(-r, 0, i)
state.squeeze(r, 0, j)
state.beamsplitter(-np.pi / 4, 0, i, j)
# pylint: disable=too-many-arguments, too-many-locals
def matelem(l, m, n, U, Up, ls, alpha):
""" Calculates a Fock matrix element <m|W(alpha,U,ls,Up)|n> of the Gaussian
unitary W specified by alpha, U, ls, Up.
Args:
l (integer): Number of modes
m (list): List of integers specifying the input Fock states
n (list): List of integers specifying the output Fock states
U (array): Unitary matrix of size l
Up (array): Unitary matrix of size l
ls (array): Squeezing parameters
alpha (array): Complex displacements
Returns:
(complex): Value of the required matrix element
"""
assert l == len(m)
assert l == len(n)
assert U.shape == (l, l)
assert Up.shape == (l, l)
assert len(ls) == l
assert len(alpha) == l
idl = np.identity(l)
# Define extended unitaries that are identities in the second half of the mode
Ue = np.block([[U, 0 * idl], [0 * idl, idl]])
Uep = np.block([[Up, 0 * idl], [0 * idl, idl]])
# Define the ts of the squeezing parameters
# pylint: disable=assignment-from-no-return
ts = np.arcsinh(np.sqrt(1.0 * np.array(n)))
# Now we generate the circuit in Fig 4.(b)
nmodes = 2 * l
state = gc.GaussianModes(nmodes)
for i, t in enumerate(ts):
tmsq(state, i, i + l, -t)
state.apply_u(Uep)
for i, lval in enumerate(ls):
state.squeeze(-lval, 0, i)
state.apply_u(Ue)
# Shortcircuited Bloch-Messiah using Takagi
Mt = state.mmat
lt, ut = takagi(Mt, 15)
# Define the lambda tilde and the u tilde
lt = -0.5 * np.arcsinh(2 * lt)
ut = ut.conj()
alphat = np.array(list(alpha) + list(np.zeros_like(alpha)))
B = ut @ np.diag(np.tanh(lt)) @ ut.T
zeta = alphat - B @ alphat.conj()
pref = -0.5 * alphat.conj() @ zeta
p = m + n
# Calculating prefactors
R = 1.0 / np.prod((np.tanh(ts) ** n) / np.cosh(ts))
prefns = np.sqrt(np.prod(np.array([np.math.factorial(i) for i in p])))
T = np.exp(pref) / (prefns * np.sqrt(np.prod(np.cosh(lt))))
# Calculating the multiset S_p
sp = []
for k, pval in enumerate(p):
for i in range(pval):
sp.append(k)
# Generate Bp with possibly repeated rows and columns
Bp = B[:, sp][sp, :]
# Generate zetap with possibly repeated entries
zetap = zeta[sp]
# Calculate Bt
| np.fill_diagonal(Bp, zetap) | numpy.fill_diagonal |
import numpy as np
from PIL import Image
import time
def cal_distance(a, b, A_padding, B, p_size):
p = p_size // 2
patch_a = A_padding[a[0]:a[0]+p_size, a[1]:a[1]+p_size, :]
patch_b = B[b[0]-p:b[0]+p+1, b[1]-p:b[1]+p+1, :]
temp = patch_b - patch_a
num = np.sum(1 - np.int32(np.isnan(temp)))
dist = np.sum(np.square(np.nan_to_num(temp))) / num
return dist
def reconstruction(f, A, B):
A_h = | np.size(A, 0) | numpy.size |
import numpy as np
from src.compute_corr_coef import compute_corr_coef
from utils.plotting import plot_similarities
def compute_trust_values(dsk, do_plot=False):
"""
Compute trust values following formula 6
k:= number of blendshapes
n:= num_features (num_markers*3)
:param dsk: delta_sk vector (k, n)
:param do_plot: decide if we want to plot the between-correlation matrix
:return: trust values vector (k,)
"""
if len(np.shape(dsk)) != 2:
raise ValueError("[COMPUTE TRUST VALUE] dsk dimensions not supported ({}) instead of 2".format(len(np.shape(dsk))))
# compute between-blendshape correlation
ckl = compute_corr_coef(dsk, dsk)
ckl = np.maximum(ckl, np.zeros(np.shape(ckl)))
if do_plot:
plot_similarities(ckl, "Between blendshapes correlation", vmin=0, vmax=1)
# compute lower triangle
num_k = | np.shape(ckl) | numpy.shape |
import os
import numpy as np
from scipy.optimize import linear_sum_assignment
from ._base_metric import _BaseMetric
from .. import _timing
class HOTA(_BaseMetric):
"""Class which implements the HOTA metrics.
See: https://link.springer.com/article/10.1007/s11263-020-01375-2
"""
def __init__(self):
super().__init__()
self.plottable = True
self.array_labels = np.arange(0.05, 0.99, 0.05)
self.integer_array_fields = ['HOTA_TP', 'HOTA_FN', 'HOTA_FP']
self.float_array_fields = ['HOTA', 'DetA', 'AssA', 'DetRe', 'DetPr', 'AssRe', 'AssPr', 'LocA', 'RHOTA']
self.float_fields = ['HOTA(0)', 'LocA(0)', 'HOTALocA(0)']
self.fields = self.float_array_fields + self.integer_array_fields + self.float_fields
self.summary_fields = self.float_array_fields + self.float_fields
@_timing.time
def eval_sequence(self, data):
"""Calculates the HOTA metrics for one sequence"""
# Initialise results
res = {}
res_per_gt = {} # metric_name: alpha: gt_id
res_per_pr = {}
for field in self.float_array_fields + self.integer_array_fields:
res[field] = np.zeros((len(self.array_labels)), dtype=np.float)
res_per_gt[field] = np.zeros((len(self.array_labels), data['num_gt_ids']), dtype=np.float)
res_per_pr[field] = np.zeros((len(self.array_labels), data['num_tracker_ids']), dtype=np.float)
for field in self.float_fields:
res[field] = 0
# Return result quickly if tracker or gt sequence is empty
if data['num_tracker_dets'] == 0:
res['HOTA_FN'] = data['num_gt_dets'] * np.ones((len(self.array_labels)), dtype=np.float)
res['LocA'] = np.ones((len(self.array_labels)), dtype=np.float)
res['LocA(0)'] = 1.0
return res
if data['num_gt_dets'] == 0:
res['HOTA_FP'] = data['num_tracker_dets'] * np.ones((len(self.array_labels)), dtype=np.float)
res['LocA'] = np.ones((len(self.array_labels)), dtype=np.float)
res['LocA(0)'] = 1.0
return res
# Variables counting global association
potential_matches_count = np.zeros((data['num_gt_ids'], data['num_tracker_ids']))
gt_id_count = np.zeros((data['num_gt_ids'], 1))
tracker_id_count = np.zeros((1, data['num_tracker_ids']))
# First loop through each timestep and accumulate global track information.
for t, (gt_ids_t, tracker_ids_t) in enumerate(zip(data['gt_ids'], data['tracker_ids'])):
# Count the potential matches between ids in each timestep
# These are normalised, weighted by the match similarity.
similarity = data['similarity_scores'][t]
sim_iou_denom = similarity.sum(0)[np.newaxis, :] + similarity.sum(1)[:, np.newaxis] - similarity
sim_iou = np.zeros_like(similarity)
sim_iou_mask = sim_iou_denom > 0 + np.finfo('float').eps
sim_iou[sim_iou_mask] = similarity[sim_iou_mask] / sim_iou_denom[sim_iou_mask]
potential_matches_count[gt_ids_t[:, np.newaxis], tracker_ids_t[np.newaxis, :]] += sim_iou
# Calculate the total number of dets for each gt_id and tracker_id.
gt_id_count[gt_ids_t] += 1
tracker_id_count[0, tracker_ids_t] += 1
# Calculate overall jaccard alignment score (before unique matching) between IDs
global_alignment_score = potential_matches_count / (gt_id_count + tracker_id_count - potential_matches_count)
matches_counts = [np.zeros_like(potential_matches_count) for _ in self.array_labels]
# Calculate scores for each timestep
for t, (gt_ids_t, tracker_ids_t) in enumerate(zip(data['gt_ids'], data['tracker_ids'])):
# Deal with the case that there are no gt_det/tracker_det in a timestep.
if len(gt_ids_t) == 0:
for a, alpha in enumerate(self.array_labels):
res['HOTA_FP'][a] += len(tracker_ids_t)
res_per_pr['HOTA_FP'][:, tracker_ids_t] += 1
continue
if len(tracker_ids_t) == 0:
for a, alpha in enumerate(self.array_labels):
res['HOTA_FN'][a] += len(gt_ids_t)
res_per_gt['HOTA_FN'][:, gt_ids_t] += 1
continue
# Get matching scores between pairs of dets for optimizing HOTA
similarity = data['similarity_scores'][t]
score_mat = global_alignment_score[gt_ids_t[:, np.newaxis], tracker_ids_t[np.newaxis, :]] * similarity
# Hungarian algorithm to find best matches
match_rows, match_cols = linear_sum_assignment(-score_mat)
# Calculate and accumulate basic statistics
for a, alpha in enumerate(self.array_labels):
actually_matched_mask = similarity[match_rows, match_cols] >= alpha - np.finfo('float').eps
alpha_match_rows = match_rows[actually_matched_mask]
alpha_match_cols = match_cols[actually_matched_mask]
num_matches = len(alpha_match_rows)
res['HOTA_TP'][a] += num_matches
res['HOTA_FN'][a] += len(gt_ids_t) - num_matches
res['HOTA_FP'][a] += len(tracker_ids_t) - num_matches
if num_matches > 0:
res['LocA'][a] += sum(similarity[alpha_match_rows, alpha_match_cols])
matches_counts[a][gt_ids_t[alpha_match_rows], tracker_ids_t[alpha_match_cols]] += 1
n_tp, n_fn = 0, 0
for i, gt_id in enumerate(gt_ids_t):
if i in alpha_match_rows: # score_mat is gt x pr
res_per_gt['HOTA_TP'][a][gt_id] += 1
n_tp += 1
else:
res_per_gt['HOTA_FN'][a][gt_id] += 1
n_fn += 1
for i, pr_id in enumerate(tracker_ids_t):
if i in alpha_match_cols:
res_per_pr['HOTA_TP'][a][pr_id] += 1
else:
res_per_pr['HOTA_FP'][a][pr_id] += 1
if not (res_per_gt['HOTA_FN'].sum(1) == res['HOTA_FN']).all():
print((num_matches, n_tp), (len(gt_ids_t) - num_matches, n_fn))
print(res_per_gt['HOTA_FN'].sum(1), res['HOTA_FN'])
assert (res_per_gt['HOTA_FN'].sum(1) == res['HOTA_FN']).all(), f'{res_per_gt["HOTA_FN"].sum(1)} != {res["HOTA_FN"]}'
assert (res_per_gt['HOTA_TP'].sum(1) == res['HOTA_TP']).all(), f'{res_per_gt["HOTA_TP"].sum(1)} != {res["HOTA_TP"]}'
assert (res_per_pr['HOTA_TP'].sum(1) == res['HOTA_TP']).all(), f'{res_per_pr["HOTA_TP"].sum(1)} != {res["HOTA_TP"]}'
assert (res_per_pr['HOTA_FP'].sum(1) == res['HOTA_FP']).all(), f'{res_per_pr["HOTA_FP"].sum(1)} != {res["HOTA_FP"]}'
# Calculate association scores (AssA, AssRe, AssPr) for the alpha value.
# First calculate scores per gt_id/tracker_id combo and then average over the number of detections.
for a, alpha in enumerate(self.array_labels):
matches_count = matches_counts[a]
ass_a = matches_count / np.maximum(1, gt_id_count + tracker_id_count - matches_count) # ass_iou
# All pred mapped to the same GT track have the same Ass-IOU. AssA is Ass-IOU weighted by track length
res['AssA'][a] = np.sum(matches_count * ass_a) / | np.maximum(1, res['HOTA_TP'][a]) | numpy.maximum |
import os
import json
import random
import numpy as np
import tensorflow as tf
from dataset import DataProcessor, get_dataset
class BeerProcessor(DataProcessor):
"""
Processor for the Beer dataset.
"""
def get_train_examples(self, data_dir):
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")))
def get_dev_examples(self, data_dir):
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")))
def get_labels(self):
return ["0", "1"]
def _create_examples(self, lines):
examples = []
num_classes = len(self.get_labels())
for (i, line) in enumerate(lines):
if i == 0:
continue
text = self._convert_to_unicode(line[0])
label = int(self._convert_to_unicode(line[1]))
# convert label to one-hot format
one_hot_label = [0] * num_classes
one_hot_label[label] = 1
examples.append({"text": text, "label": one_hot_label})
return examples
def get_beer_dataset(data_dir, max_seq_length, word_threshold, balance=False):
"""
Return tf datasets (train and dev) and language index
for the beer dataset.
Assume train.tsv and dev.tsv are in the dir.
"""
processor = BeerProcessor()
train_examples = processor.get_train_examples(data_dir)
dev_examples = processor.get_dev_examples(data_dir)
print("Dataset: Beer Review")
print("Training samples %d, Validation sampels %d" %
(len(train_examples), len(dev_examples)))
# check the label balance
train_labels = | np.array([0., 0.]) | numpy.array |
import qctests.Argo_global_range_check
import util.testingProfile
import numpy
from util import obs_utils
##### Argo_global_range_check ---------------------------------------------------
def test_Argo_global_range_check_temperature():
'''
Make sure AGRC is flagging temperature excursions
'''
# should fail despite rounding
p = util.testingProfile.fakeProfile([-2.500000001], [100], latitude=0.0)
qc = qctests.Argo_global_range_check.test(p, None)
truth = numpy.zeros(1, dtype=bool)
truth[0] = True
assert numpy.array_equal(qc, truth), 'failed to flag temperature slightly colder than -2.5 C'
# -2.5 OK
p = util.testingProfile.fakeProfile([-2.5], [100], latitude=0.0)
qc = qctests.Argo_global_range_check.test(p, None)
truth = numpy.zeros(1, dtype=bool)
assert numpy.array_equal(qc, truth), 'incorrectly flagging -2.5 C'
# 40 OK
p = util.testingProfile.fakeProfile([40], [100], latitude=0.0)
qc = qctests.Argo_global_range_check.test(p, None)
truth = numpy.zeros(1, dtype=bool)
assert numpy.array_equal(qc, truth), 'incorrectly flagging 40 C'
# should fail despite rounding
p = util.testingProfile.fakeProfile([40.0000001], [100], latitude=0.0)
qc = qctests.Argo_global_range_check.test(p, None)
truth = numpy.zeros(1, dtype=bool)
truth[0] = True
assert numpy.array_equal(qc, truth), 'failed to flag temperature slightly warmer than 40 C'
def test_Argo_global_range_check_pressure():
'''
Make sure AGRC is flagging pressure excursions
'''
# should fail despite rounding
p = util.testingProfile.fakeProfile([5], obs_utils.pressure_to_depth([-5.00000001], 0.0), latitude=0.0)
qc = qctests.Argo_global_range_check.test(p, None)
truth = numpy.zeros(1, dtype=bool)
truth[0] = True
assert numpy.array_equal(qc, truth), 'failed to flag pressure slightly below -5 '
# -5 OK
p = util.testingProfile.fakeProfile([5], obs_utils.pressure_to_depth([-5], 0.0), latitude=0.0)
qc = qctests.Argo_global_range_check.test(p, None)
truth = numpy.zeros(1, dtype=bool)
assert | numpy.array_equal(qc, truth) | numpy.array_equal |
import numpy as np
x=[20,25,30,35,40,50,60,65,70,75,80,90]
y=[1.81,1.70,1.65,1.55,1.48,1.40,1.30,1.26,1.24,1.21,1.20,1.18]
x=np.array(x)
x2=x**2
print(x)
print(x2)
n=x.shape[0]
X=np.vstack( (np.ones(n),x,x2)).transpose((1,0))
XT=X.transpose((1,0))
Y= | np.array(y) | numpy.array |
"""Efficient and general interfaces for sampling tasks for Meta-RL."""
import abc
import copy
import math
import numpy as np
from metarl.sampler.env_update import (ExistingEnvUpdate, NewEnvUpdate,
SetTaskUpdate)
def _sample_indices(n_to_sample, n_available_tasks, with_replacement):
"""Select indices of tasks to sample.
Args:
n_to_sample (int): Number of environments to sample. May be greater
than n_available_tasks.
n_available_tasks (int): Number of available tasks. Task indices will
be selected in the range [0, n_available_tasks).
with_replacement (bool): Whether tasks can repeat when sampled.
Note that if more tasks are sampled than exist, then tasks may
repeat, but only after every environment has been included at
least once in this batch. Ignored for continuous task spaces.
Returns:
np.ndarray[int]: Array of task indices.
"""
if with_replacement:
return np.random.randint(n_available_tasks, size=n_to_sample)
else:
blocks = []
for _ in range(math.ceil(n_to_sample / n_available_tasks)):
s = np.arange(n_available_tasks)
np.random.shuffle(s)
blocks.append(s)
return | np.concatenate(blocks) | numpy.concatenate |
# -*- coding: utf-8 -*-
import sys
import pickle, time
from copy import deepcopy
from shutil import copy
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import scipy as sp
from scipy.linalg import svd
from scipy import optimize
from sklearn.neighbors import KernelDensity
### Generalized Entropy
# Trace Normalization
def matrix_entropy(W):
W=W/np.trace(W)
m = W.shape[1]
u, sv, vh = svd(W)
rank = np.linalg.matrix_rank(W)
p = sv*sv
p = p/np.sum(p)
if (rank==1):
rank=1.000001
entropy = - np.sum(p*np.log(p)) / np.log(rank)
return entropy
# Wigner SemiCircle Plots
def marchenko_pastur_pdf(x_min, x_max, Q, sigma=1):
y=1/Q
x= | np.arange(x_min,x_max,0.001) | numpy.arange |
"""
This is focused on matching sources in the catalog to those detected in the cubes
"""
import numpy as np
from scipy.interpolate import interp2d, interp1d
import astropy.units as u
from astropy.table import Table, vstack
from astropy.coordinates import SkyCoord, Angle, SkyOffsetFrame, ICRS, Distance
from astropy.coordinates import match_coordinates_sky, search_around_sky
from stats import comoving_volume, get_kms, has_spec_z, get_co_z, convert_deltaZ_to_kms
transitions = {"1-0": [0.0030, 0.3694, 115.271, 0.2801, 89],
"2-1": [1.0059, 1.7387, 230.538, 1.4277, 1920],
"3-2": [2.0088, 3.1080, 345.796, 2.6129, 3363],
"4-3": [3.0115, 4.4771, 461.041, 3.8030, 4149], }
transitions1 = {"1-0": [0.0030, 0.3694, 115.271, 0.2801, 89],
"2-1": [1.0059, 1.7387, 230.538, 1.4277, 1920],
"3-2": [2.0088, 3.1080, 345.796, 2.6129, 3363],
"4-3": [3.0115, 4.4771, 461.041, 3.8030, 4149],
"5-4": [4.0142, 5.8460, 576.268, 4.9933, 4571],
"6-5": [5.0166, 7.2146, 691.473, 6.1843, 4809],
"7-6": [6.0188, 8.5829, 806.652, 7.3750, 4935],}
transitions1 = {"2-1": [0.0, 0.0873, 230.538, 0.0656, 1.4],
"3-2": [0.2713, 0.6309, 345.796, 0.4858, 314],
"4-3": [0.6950, 1.1744, 461.041, 0.9543, 1028],
"5-4": [1.1186, 1.7178, 576.268, 1.4297, 1759],
"6-5": [1.5422, 2.2612, 691.473, 1.9078, 2376],
"7-6": [1.9656, 2.8044, 806.652, 2.3859, 2864],}
temp = {
"C1mm": [0.8094, 1.3212, 492.161, 1.0828, 1233],
"C1_2-1mm": [1.9755, 2.8171, 809.342, 2.3973, 2875],
"C2": [5.9873, 7.9635, 1900.548, 6.9408, 4431],
"C1": [3.2823, 4.8468, 492.161, 4.1242, 4287],
"C1_2-1": [6.0422, 8.6148, 809.342, 7.4031, 4936],
}
def convert_observed_line_to_restframe():
return NotImplementedError
def calculate_delta_z():
return NotImplementedError
def estimate_redshift():
return NotImplementedError
def construct_fid_mask(catalog):
"""
Constructs the fidelity mask based off my results, not Robertos
:param catalog:
:return:
"""
line_widths = [i for i in range(3, 21, 2)]
fid_catalog = load_table("fidelity_snr.out", start=0)
fid_limit = 0.4
six_fids = []
for width in line_widths:
f = interp1d(fid_catalog["fbin"], fid_catalog["pure{}".format(width)], kind='slinear')
xdata = np.linspace(5.85, 7.85, 10000)
six_fids.append(xdata[np.argmax(f(xdata) >= fid_limit)])
masks = []
line_widths = [i for i in range(3, 21, 2)]
#six_fids = [6.3, 6.2, 6.1, 6.15, 6.1, 6.20, 6.1, 6.20, 6.05]
# six_fids = [6.35, 6.25, 6.15, 6.15, 6.15, 6.25, 6.15, 6.25, 6.05]
# six_fids = [6.25, 6.2, 6.1, 6.1, 6.1, 6.15, 6.1, 6.15, 6.05]
for index, width in enumerate(line_widths):
print(six_fids[index])
masks.append(catalog[((catalog['width'] == width) & (catalog['rsnrrbin'] >= six_fids[index]))])
total = masks[0]
t_sum = 0
for mask in masks[1:]:
t_sum += len(mask)
total = vstack((total, mask))
print("Total One: {}".format(len(total)))
return total
def match_lines_to_catalog_pilot(lines, catalog, max_redshift=0.3, max_sep=1.0, method='closest'):
aspecs_table = Table(names=(
'RA (J2000)', 'DEC (J2000)', 'Roberto ID', 'Roberto RA', 'Roberto DEC', 'Observed CO (GHz)', 'Restframe CO (GHz)',
'Transition', 'Z (Matched)', 'Z (CO)',
'Spec Z', 'Delta Z', 'Delta V (Km/s)', 'Km/s', 'Separation (Arcsecond)', 'S/N', 'Flux Density at Peak (Jy/beam)',
'Integrated Flux (Jy km/s)', 'Width (Channels)', 'Cosmic Volume (Mpc^3)', 'Log(M*)', 'Error Log(M*)', 'Log(SFR)',
'Error Log(SFR)', 'Catalog Index'),
dtype=(
'f8', 'f8', 'int32', 'f8', 'f8', 'f4', 'f4', 'U6', 'f4', 'f4', 'bool', 'f4', 'f8', 'f8', 'f4',
'f4', 'f4', 'f4', 'int8', 'f4', 'f4', 'f4', 'f4', 'f4', 'int32'))
"""
Steps to do so:
Find separations between line coordinates and catalog coordinates
For those that are within the arcsecond limit, see if the galactic redshift is within the range that ASPECS can find
If so, then get the difference in delta_z to see if that is within the range allowed
If so, then get the properties and put together a whole entry on it
If not, see if line matches to a different CO line within that range
If so, save it out
If not within range, see which line it could go to and use that one
"""
# first step is to do is get the SkyCoords
catalog_ra = 'ra'
catalog_dec = 'dc'
# Only choose ones above SN limit
#lines = lines[lines['rsnrrbin'] >= snr_limit]
line_skycoords = make_skycoords(lines, ra='rra', dec='rdc')
catalog_skycoords = make_skycoords(catalog, ra=catalog_ra, dec=catalog_dec)
#for one in line_skycoords:
# print("{} {}".format(one.ra.to_string(unit=u.hour, sep=':'),one.dec.to_string(unit=u.deg, sep=':')))
catalog_ids = []
print()
# Second step is to calculate the catalog matches
if method == 'all_closest':
# This is for getting all the matches, and only keeping the one with the closest redshift
# Do it where it goes through all matches within a given radius
idxc, idxcatalog, d2d, d3d = search_around_sky(line_skycoords, catalog_skycoords, max_sep * u.arcsecond)
#for index, id in enumerate(idxc):
# print("Matched: {} {} To: {} {} Sep: {}".format(line_skycoords[idxc[index]].ra.to_string(unit=u.hour, sep=':'), line_skycoords[idxc[index]].dec.to_string(unit=u.degree, sep=':'), catalog_skycoords[idxcatalog[index]].ra.to_string(unit=u.hour, sep=':'), catalog_skycoords[idxcatalog[index]].dec.to_string(unit=u.degree, sep=':'), d2d[index]))
# Get the set of chosen lines, all not chosen ones are sent to the other thing
chosen_lines = set(idxc)
full_set = set([i for i in range(len(lines))])
non_matched_set_indexes = full_set - chosen_lines
for index, separation in enumerate(d2d):
matched_line = lines[idxc[index]]
matched_to_galaxy = False
# In order of lines, so then need to keep only best match here:
# Also need to keep it so that match to CO is only called once, and only after matched_line changes
if separation.arcsecond < max_sep:
# Could be a match!
# Get the catalog match
matched_galaxy = catalog[idxcatalog[index]] # index is the index in line_skycoord matched
# idx[index] is then the index into catalog that is matched to this one
for key, values in transitions.items():
if (values[0] - max_redshift) < matched_galaxy['z_1'] < (values[1] + max_redshift):
# Now within range of this transition
rest_frame_ghz = convert_to_rest_frame_ghz(matched_galaxy['z_1'],
matched_line['rfreq'])
delta_z, matched_key = get_delta_z(matched_galaxy['z_1'], rest_frame_ghz)
if np.abs(delta_z) <= max_redshift: # Checks that delta z within the range
# Now check with offset if the z is within the range
if matched_galaxy['z_1'] + delta_z < (120.4) or (1.1) <= matched_galaxy['z_1'] + delta_z <= (
1.8) or (2.2) < matched_galaxy['z_1'] + delta_z < (4.4):
matched_to_galaxy = True
# so with offset, the galaxy is now within the range, is above SNR, and have a transition
# Now get the KMS, if there is a Spec Z, Comoving volume, etc. and add to the table
volume = comoving_volume(values[0], values[1], 42.6036)
spec_z = has_spec_z(matched_galaxy)
co_z = get_co_z(matched_line['rfreq'], matched_key)
kms = 0#get_kms(matched_line['width'], matched_line['rfreq'])
delta_v = convert_deltaZ_to_kms(delta_z, co_z)
add_row = False
prev_match_mask = (np.isclose(np.round(aspecs_table['RA (J2000)'], 10), np.round(line_skycoords[idxc[index]].ra.degree, 10)) & np.isclose(np.round(aspecs_table['DEC (J2000)'], 10), | np.round(line_skycoords[idxc[index]].dec.degree, 10) | numpy.round |
import pickle
import numpy as np
import scipy.sparse as sps
import lenskit.matrix as lm
from lenskit.util.test import rand_csr
from pytest import mark, approx, raises
@mark.parametrize('copy', [True, False])
def test_csr_from_sps(copy):
# initialize sparse matrix
mat = np.random.randn(10, 5)
mat[mat <= 0] = 0
smat = sps.csr_matrix(mat)
# make sure it's sparse
assert smat.nnz == np.sum(mat > 0)
csr = lm.CSR.from_scipy(smat, copy=copy)
assert csr.nnz == smat.nnz
assert csr.nrows == smat.shape[0]
assert csr.ncols == smat.shape[1]
assert all(csr.rowptrs == smat.indptr)
assert all(csr.colinds == smat.indices)
assert all(csr.values == smat.data)
assert isinstance(csr.rowptrs, np.ndarray)
assert isinstance(csr.colinds, np.ndarray)
assert isinstance(csr.values, np.ndarray)
def test_csr_is_numpy_compatible():
# initialize sparse matrix
mat = np.random.randn(10, 5)
mat[mat <= 0] = 0
smat = sps.csr_matrix(mat)
# make sure it's sparse
assert smat.nnz == np.sum(mat > 0)
csr = lm.CSR.from_scipy(smat)
d2 = csr.values * 10
assert d2 == approx(smat.data * 10)
def test_csr_from_coo():
rows = np.array([0, 0, 1, 3], dtype=np.int32)
cols = np.array([1, 2, 0, 1], dtype=np.int32)
vals = np.arange(4, dtype=np.float_)
csr = lm.CSR.from_coo(rows, cols, vals)
assert csr.nrows == 4
assert csr.ncols == 3
assert csr.nnz == 4
assert csr.values == approx(vals)
def test_csr_rowinds():
rows = np.array([0, 0, 1, 3], dtype=np.int32)
cols = np.array([1, 2, 0, 1], dtype=np.int32)
vals = np.arange(4, dtype=np.float_)
csr = lm.CSR.from_coo(rows, cols, vals)
ris = csr.rowinds()
assert all(ris == rows)
def test_csr_set_values():
rows = np.array([0, 0, 1, 3], dtype=np.int32)
cols = np.array([1, 2, 0, 1], dtype=np.int32)
vals = np.arange(4, dtype=np.float_)
csr = lm.CSR.from_coo(rows, cols, vals)
v2 = np.random.randn(4)
csr.values = v2
assert all(csr.values == v2)
def test_csr_set_values_oversize():
rows = np.array([0, 0, 1, 3], dtype=np.int32)
cols = np.array([1, 2, 0, 1], dtype=np.int32)
vals = | np.arange(4, dtype=np.float_) | numpy.arange |
"""
Quasi-inverse matrices for Tau and Galerkin methods
@article{julien09,
title = {Efficient multi-dimensional solution of PDEs using Chebyshev spectral methods},
journal = {Journal of Computational Physics},
volume = {228},
number = {5},
pages = {1480-1503},
year = {2009},
issn = {0021-9991},
doi = {https://doi.org/10.1016/j.jcp.2008.10.043}
}
"""
import numpy as np
from shenfun.matrixbase import SparseMatrix
__all__ = ('QIGmat', 'QITmat', 'QICGmat', 'QICTmat')
# Note to self. Matrices below do not use ck because we use them in
# scalar products not inverted with (T_j, T_k)_w = ck*pi/2 \delta_{kj}
class QImat(SparseMatrix):
def __mul__(self, y):
"""Returns copy of self.__mul__(y) <==> self*y"""
if isinstance(y, SparseMatrix):
return y.__quasi__(self)
return SparseMatrix.__mul__(self, y)
class QIGmat(QImat):
"""Quasi-inverse matrix for the Galerkin method
Parameters
----------
N : int
The number of quadrature points
"""
def __init__(self, N):
k = np.arange(N)
#d = {
# 0: 1/4/(k[2:]*(k[2:]-1)),
# 2: -1/2/(k[2:-2]**2-1),
# 4: 1/4/(k[2:-4]*(k[2:-4]+1))}
#SparseMatrix.__init__(self, d, (N-2, N-2))
d = {
0: 1/4/(k[2:]*(k[2:]-1)),
2: -1/2/(k[2:]**2-1),
4: 1/4/(k[2:-2]*(k[2:-2]+1))}
# Note: truncating the upper diagonals is in agreement with
# \cite{julien09}.
d[2][-2:] = 0
d[4][-2:] = 0
SparseMatrix.__init__(self, d, (N-2, N))
class QITmat(QImat):
"""Quasi-inverse matrix for the Tau method
Parameters
----------
N : int
The number of quadrature points
"""
def __init__(self, N):
k = np.arange(N)
d = {
-2: 1/4/(k[2:]*(k[2:]-1)),
0: np.zeros(N),
2: np.zeros(N-2)}
d[0][2:-2] = -1/2/(k[2:-2]**2-1)
d[2][2:-2] = 1/4/(k[2:-4]*(k[2:-4]+1))
# Truncate, like \cite{julien09}
d[0][-2:] = 0
d[2][-2:] = 0
SparseMatrix.__init__(self, d, (N, N))
class QICGmat(QImat):
"""Quasi-inverse matrix for the Galerkin method
Parameters
----------
N : int
The number of quadrature points
"""
def __init__(self, N):
k = np.arange(N)
d = {
0: 1/(2*(k[:-1]+1)),
2: -1/(2*(k[:-2]+1))}
SparseMatrix.__init__(self, d, (N-1, N))
class QICTmat(QImat):
"""Quasi-inverse matrix for the Tau method.
Parameters
----------
N : int
The number of quadrature points
"""
def __init__(self, N):
k = | np.arange(N) | numpy.arange |
"""
U-matrix visualization
Implementation of the classic U-Matrix as described in
<NAME>., and <NAME>.
"Kohonen's Self-Organizing Feature Maps for Exploratory Data Analysis."
In Proc. Intern. Neural Networks, 1990, pp. 305-308, Kluwer Academic Press, Paris, France.
"""
from visualizations.iVisualization import VisualizationInterface
import numpy as np
import panel as pn
class UMatrix(VisualizationInterface):
def __init__(self, main):
self._main = main
def _activate_controllers(self, ):
reference = pn.pane.Str("<ul><li><b>U-Matrix:</b> <NAME>., and <NAME>. \"Kohonen's Self Organizing Feature Maps for Exploratory Data Analysis.\" In Proc. Intern. Neural Networks, 1990, pp. 305-308, Kluwer Academic Press, Paris, France.</li></ul>")
self._main._controls.append(reference)
self._calculate()
def _deactivate_controllers(self,):
pass
def _calculate(self, ):
U = UMatrix.calculate_UMatrix(self._main._weights, self._main._m, self._main._n, self._main._dim)
self._main._display(plot=U)
@staticmethod
def calculate_UMatrix(weights, m, n, dim):
U = weights.reshape(m, n, dim)
U = np.insert(U, | np.arange(1, n) | numpy.arange |
# Copyright 2020 The PyMC Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
import copy
import numpy as np
import theano.tensor as tt
from scipy.linalg import cholesky
from scipy.special import logsumexp
from scipy.stats import multivariate_normal, median_abs_deviation
from scipy.optimize import minimize, approx_fprime
from theano import function as theano_function
import arviz as az
import jax
import jax.numpy as jnp
from jax.experimental import optimizers as jax_optimizers
import time
import pymc3 as pm
import pymc3.nfmc.posdef as posdef
from pymc3.tuning.scaling import find_hessian
from pymc3.tuning.starting import find_MAP
from pymc3.backends.ndarray import NDArray, point_list_to_multitrace
from pymc3.blocking import ArrayOrdering, DictToArrayBijection
from pymc3.model import Point, modelcontext, set_data
from pymc3.distributions.distribution import draw_values, to_tuple
from pymc3.sampling import sample_prior_predictive
from pymc3.theanof import (
floatX,
inputvars,
join_nonshared_inputs,
make_shared_replacements,
gradient,
hessian,
)
from pymc3.util import (
check_start_vals,
get_default_varnames,
get_var_name,
update_start_vals,
)
from pymc3.vartypes import discrete_types, typefilter
# SINF code for fitting the normalizing flow.
from pymc3.sinf.GIS import GIS
import torch
# This is a global variable used to store the optimization steps.
# Presumably there's a nicer way to do this.
param_store = []
class NFMC:
"""Sequential type normalizing flow based sampling/global approx."""
def __init__(
self,
draws=500,
init_draws=500,
resampling_draws=500,
init_ess=100,
sample_mode='reinit',
cull_lowp_tol=0.05,
model=None,
init_method='prior',
init_samples=None,
start=None,
init_EL2O='adam',
use_hess_EL2O=False,
mean_field_EL2O=False,
absEL2O=1e-10,
fracEL2O=1e-2,
EL2O_draws=100,
maxiter_EL2O=500,
EL2O_optim_method='L-BFGS-B',
scipy_map_method='L-BFGS-B',
adam_lr=1e-3,
adam_b1=0.9,
adam_b2=0.999,
adam_eps=1.0e-8,
adam_steps=1000,
simulator=None,
model_data=None,
sim_data_cov=None,
sim_size=None,
sim_params=None,
sim_start=None,
sim_optim_method='lbfgs',
sim_tol=0.01,
local_thresh=3,
local_step_size=0.1,
local_grad=True,
init_local=True,
nf_local_iter=0,
max_line_search=100,
random_seed=-1,
chain=0,
frac_validate=0.1,
iteration=None,
final_iteration=None,
alpha=(0,0),
final_alpha=(0.75,0.75),
optim_iter=1000,
ftol=2.220446049250313e-9,
gtol=1.0e-5,
k_trunc=0.25,
verbose=False,
n_component=None,
interp_nbin=None,
KDE=True,
bw_factor_min=0.5,
bw_factor_max=2.5,
bw_factor_num=11,
edge_bins=None,
ndata_wT=None,
MSWD_max_iter=None,
NBfirstlayer=True,
logit=False,
Whiten=False,
batchsize=None,
nocuda=False,
patch=False,
shape=[28,28,1],
redraw=True,
):
self.draws = draws
self.init_draws = init_draws
self.resampling_draws = resampling_draws
self.init_ess = init_ess
self.sample_mode = sample_mode
self.cull_lowp_tol = cull_lowp_tol
self.model = model
# Init method params.
self.init_method = init_method
self.init_samples = init_samples
self.start = start
self.init_EL2O = init_EL2O
self.mean_field_EL2O = mean_field_EL2O
self.use_hess_EL2O = use_hess_EL2O
self.absEL2O = absEL2O
self.fracEL2O = fracEL2O
self.EL2O_draws = EL2O_draws
self.maxiter_EL2O = maxiter_EL2O
self.EL2O_optim_method = EL2O_optim_method
self.scipy_map_method = scipy_map_method
self.adam_lr = adam_lr
self.adam_b1 = adam_b1
self.adam_b2 = adam_b2
self.adam_eps = adam_eps
self.adam_steps = adam_steps
self.simulator = simulator
self.model_data = model_data
self.sim_data_cov = sim_data_cov
self.sim_size = sim_size
self.sim_params = sim_params
self.sim_start = sim_start
self.sim_optim_method = sim_optim_method
self.sim_tol = sim_tol
# Local exploration params.
self.local_thresh = local_thresh
self.local_step_size = local_step_size
self.local_grad = local_grad
self.init_local = init_local
self.nf_local_iter = nf_local_iter
self.max_line_search = max_line_search
self.random_seed = random_seed
self.chain = chain
# Set the torch seed.
if self.random_seed != 1:
np.random.seed(self.random_seed)
torch.manual_seed(self.random_seed)
# Separating out so I can keep track. These are SINF params.
assert 0.0 <= frac_validate <= 1.0
self.frac_validate = frac_validate
self.iteration = iteration
self.final_iteration = final_iteration
self.alpha = alpha
self.final_alpha = final_alpha
self.optim_iter = optim_iter
self.ftol = ftol
self.gtol = gtol
self.k_trunc = k_trunc
self.verbose = verbose
self.n_component = n_component
self.interp_nbin = interp_nbin
self.KDE = KDE
self.bw_factors = np.logspace(bw_factor_min, bw_factor_max, bw_factor_num)
self.edge_bins = edge_bins
self.ndata_wT = ndata_wT
self.MSWD_max_iter = MSWD_max_iter
self.NBfirstlayer = NBfirstlayer
self.logit = logit
self.Whiten = Whiten
self.batchsize = batchsize
self.nocuda = nocuda
self.patch = patch
self.shape = shape
#whether to redraw samples at every iteration, used for BO testing
self.redraw = redraw
self.model = modelcontext(model)
if self.random_seed != -1:
np.random.seed(self.random_seed)
self.variables = inputvars(self.model.vars)
def initialize_var_info(self):
"""Extract variable info for the model instance."""
var_info = OrderedDict()
init = self.model.test_point
for v in self.variables:
var_info[v.name] = (init[v.name].shape, init[v.name].size)
self.var_info = var_info
def initialize_population(self):
"""Create an initial population from the prior distribution."""
population = []
if self.init_samples is None:
init_rnd = sample_prior_predictive(
self.init_draws,
var_names=[v.name for v in self.model.unobserved_RVs],
model=self.model,
)
for i in range(self.init_draws):
point = Point({v.name: init_rnd[v.name][i] for v in self.variables}, model=self.model)
population.append(self.model.dict_to_array(point))
self.prior_samples = np.array(floatX(population))
elif self.init_samples is not None:
self.prior_samples = np.copy(self.init_samples)
self.weighted_samples = np.copy(self.prior_samples)
self.nf_samples = np.copy(self.weighted_samples)
self.get_posterior_logp()
self.get_prior_logp()
self.log_weight = self.posterior_logp - self.prior_logp
self.log_evidence = logsumexp(self.log_weight) - np.log(len(self.log_weight))
self.evidence = np.exp(self.log_evidence)
self.log_weight = self.log_weight - self.log_evidence
self.regularize_weights()
#same as in fitnf but prior~q
self.log_weight_pq_num = self.posterior_logp + 2*self.prior_logp
self.log_weight_pq_den = 3*self.prior_logp
self.log_evidence_pq = logsumexp(self.log_weight_pq_num) - logsumexp(self.log_weight_pq_den)
self.evidence_pq = np.exp(self.log_evidence_pq)
#sum of mean loss (p - q*Z_pq)^2 /N for diagnostic purposes
self.log_mean_loss = np.log(np.mean( ( np.exp(self.posterior_logp) - np.exp(self.prior_logp+self.log_evidence_pq) )**2 ))
self.init_weights_cleanup(lambda x: self.prior_logp(x), lambda x: self.prior_dlogp(x))
self.q_ess = self.calculate_ess(self.log_weight)
self.total_ess = self.calculate_ess(self.sinf_logw)
self.all_logq = np.array([])
self.nf_models = []
def setup_logp(self):
"""Set up the prior and likelihood logp functions, and derivatives."""
shared = make_shared_replacements(self.variables, self.model)
self.prior_logp_func = logp_forw([self.model.varlogpt], self.variables, shared)
self.prior_dlogp_func = logp_forw([gradient(self.model.varlogpt, self.variables)], self.variables, shared)
self.likelihood_logp_func = logp_forw([self.model.datalogpt], self.variables, shared)
self.posterior_logp_func = logp_forw([self.model.logpt], self.variables, shared)
self.posterior_dlogp_func = logp_forw([gradient(self.model.logpt, self.variables)], self.variables, shared)
self.posterior_hessian_func = logp_forw([hessian(self.model.logpt, self.variables)], self.variables, shared)
self.posterior_logp_nojac = logp_forw([self.model.logp_nojact], self.variables, shared)
self.posterior_dlogp_nojac = logp_forw([gradient(self.model.logp_nojact, self.variables)], self.variables, shared)
self.posterior_hessian_nojac = logp_forw([hessian(self.model.logp_nojact, self.variables)], self.variables, shared)
def get_prior_logp(self):
"""Get the prior log probabilities."""
priors = [self.prior_logp_func(sample) for sample in self.nf_samples]
self.prior_logp = np.array(priors).squeeze()
def get_likelihood_logp(self):
"""Get the likelihood log probabilities."""
likelihoods = [self.likelihood_logp_func(sample) for sample in self.nf_samples]
self.likelihood_logp = np.array(likelihoods).squeeze()
def get_posterior_logp(self):
"""Get the posterior log probabilities."""
posteriors = [self.posterior_logp_func(sample) for sample in self.nf_samples]
self.posterior_logp = np.array(posteriors).squeeze()
def optim_target_logp(self, param_vals):
"""Optimization target function"""
return -1.0 * self.posterior_logp_func(param_vals)
def optim_target_dlogp(self, param_vals):
return -1.0 * self.posterior_dlogp_func(param_vals)
def optim_target_logp_nojac(self, param_vals):
"""Optimization target function"""
return -1.0 * self.posterior_logp_nojac(param_vals)
def optim_target_dlogp_nojac(self, param_vals):
return -1.0 * self.posterior_dlogp_nojac(param_vals)
def prior_dlogp(self, param_vals):
dlogps = [self.prior_dlogp_func(val) for val in param_vals]
return np.array(dlogps).squeeze()
def target_logp(self, param_vals):
logps = [self.posterior_logp_func(val) for val in param_vals]
return np.array(logps).squeeze()
def target_dlogp(self, param_vals):
dlogps = [self.posterior_dlogp_func(val) for val in param_vals]
return np.array(dlogps).squeeze()
def target_hessian(self, param_vals):
hessians = [self.posterior_hessian_func(val) for val in param_vals]
return np.array(hessians).squeeze()
def target_logp_nojac(self, param_vals):
logps = [self.posterior_logp_nojac(val) for val in param_vals]
return np.array(logps).squeeze()
def target_dlogp_nojac(self, param_vals):
dlogps = [self.posterior_dlogp_nojac(val) for val in param_vals]
return np.array(dlogps).squeeze()
def target_hessian_nojac(self, param_vals):
hessians = [self.posterior_hessian_nojac(val) for val in param_vals]
return np.array(hessians).squeeze()
def sinf_logq(self, param_vals):
if param_vals.size == 1:
param_vals = np.array([param_vals])
sinf_logq = self.nf_model.evaluate_density(torch.from_numpy(param_vals.astype(np.float32))).numpy().astype(np.float64)
return sinf_logq.item()
def sinf_dlogq(self, param_vals):
if param_vals.size == 1:
param_vals = np.array([param_vals])
sinf_dlogq = self.nf_model.score(torch.from_numpy(param_vals.astype(np.float32))).numpy().astype(np.float64)
return sinf_dlogq.squeeze()
def callback(self, xk):
self.optim_iter_samples = np.append(self.optim_iter_samples, np.array([xk]), axis=0)
def optimize(self, sample):
"""Optimize the prior samples"""
self.optim_iter_samples = np.array([sample])
minimize(self.optim_target_logp, x0=sample, method=self.scipy_map_method,
options={'maxiter': self.optim_iter, 'ftol': self.ftol, 'gtol': self.gtol},
jac=self.optim_target_dlogp, callback=self.callback)
return self.optim_iter_samples
def get_MAP(self, map_method='adam', map_start=None):
"""Get the MAP estimate."""
if map_start is None:
map_start = self.start
if map_method == 'adam':
self.optimization_start()
opt_init, opt_update, get_params = jax_optimizers.adam(step_size=self.adam_lr, b1=self.adam_b1,
b2=self.adam_b2, eps=self.adam_eps)
opt_state = opt_init(map_start)
for i in range(self.adam_steps):
value, opt_state, update_params = self.update_adam(i, opt_state, opt_update, get_params)
target_diff = np.abs((value - np.float64(self.adam_logp(floatX(update_params)))) /
max(value, np.float64(self.adam_logp(floatX(update_params)))))
if target_diff <= self.ftol:
print(f'ADAM converged at step {i}')
break
vars = get_default_varnames(self.model.unobserved_RVs, include_transformed=True)
map_dict = {var.name: value for var, value in zip(vars, self.model.fastfn(vars)(self.bij.rmap(update_params.squeeze())))}
else:
map_dict = find_MAP(start=map_start, model=self.model, method=self.scipy_map_method)
return map_dict
def regularize_weights(self):
"""Apply clipping to importance weights."""
inf_weights = np.isinf(np.exp(self.log_weight))
self.log_weight = np.clip(self.log_weight, a_min=None, a_max=logsumexp(self.log_weight[~inf_weights])
- np.log(len(self.log_weight[~inf_weights])) + self.k_trunc * np.log(len(self.log_weight)))
self.weights = np.exp(self.log_weight)
def regularize_weights_pq(self):
"""Apply clipping to pq importance weights."""
inf_weights = np.isinf(np.exp(self.log_weight_pq))
self.log_weight_pq = np.clip(self.log_weight_pq, a_min=None, a_max=logsumexp(self.log_weight_pq[~inf_weights])
- np.log(len(self.log_weight_pq[~inf_weights])) + self.k_trunc * np.log(len(self.log_weight_pq)))
self.weights_pq = np.exp(self.log_weight_pq)
def calculate_ess(self, logw):
"""Calculate ESS given a set of sample weights"""
logw = logw - logsumexp(logw)
ess = np.exp(-logsumexp(2 * logw) - np.log(logw.shape[0]))
return ess
def calculate_weight_variance(self):
"""Calculates the variance of importance weights for a given q."""
return np.var(self.weight)
def shrink_init(self, mu, sigma):
"""Shrinks the initialization until we acheive some ESS."""
while self.q_ess * self.init_draws < self.init_ess:
previous_q_ess = 1.0 * self.q_ess
print(f'Shrinking intialization to improve ESS. Current ESS: {self.q_ess * self.init_draws}')
sigma = sigma / 2
self.weighted_samples = np.random.multivariate_normal(mu, sigma, size=self.init_draws)
self.nf_samples = np.copy(self.weighted_samples)
self.get_posterior_logp()
self.log_weight = self.posterior_logp - multivariate_normal.logpdf(self.nf_samples, mu.squeeze(), sigma, allow_singular=True)
self.log_evidence = logsumexp(self.log_weight) - np.log(len(self.log_weight))
self.evidence = np.exp(self.log_evidence)
self.log_weight = self.log_weight - self.log_evidence
#same as in fitnf but prior~q
self.log_weight_pq_num = self.posterior_logp + 2 * multivariate_normal.logpdf(self.nf_samples, mu.squeeze(), sigma, allow_singular=True)
self.log_weight_pq_den = 3*multivariate_normal.logpdf(self.nf_samples, mu.squeeze(), sigma, allow_singular=True)
self.log_evidence_pq = logsumexp(self.log_weight_pq_num) - logsumexp(self.log_weight_pq_den)
self.evidence_pq = np.exp(self.log_evidence_pq)
self.regularize_weights()
self.q_ess = self.calculate_ess(self.log_weight)
self.total_ess = self.calculate_ess(self.sinf_logw)
return sigma
def init_weights_cleanup(self, logq_func=None, dlogq_func=None):
"""Finish initializing the first importance weights (including possible local exploration)."""
self.sinf_logw = np.copy(self.log_weight)
self.importance_weights = np.copy(self.weights)
if self.init_local:
self.local_exploration(logq_func=logq_func, dlogq_func=dlogq_func,
log_thresh=np.log(self.local_thresh))
self.weighted_samples = np.append(self.weighted_samples, self.local_samples, axis=0)
self.nf_samples = np.append(self.nf_samples, self.local_samples, axis=0)
self.log_weight = np.append(self.log_weight, self.local_log_weight)
self.weights = np.append(self.weights, self.local_weights)
self.sinf_logw = np.copy(self.log_weight)
self.importance_weights = np.copy(self.weights)
def run_sinf(self, bw_factor, train_samples, val_samples=None, train_weights=None, val_weights=None,
final=False):
"""Fit SINF given a set of samples (and weights)."""
if final:
sinf_alpha = self.final_alpha
sinf_iteration = self.final_iteration
elif not final:
sinf_alpha = self.alpha
sinf_iteration = self.iteration
if (val_samples is not None and train_weights is not None and val_weights is not None):
q = GIS(torch.from_numpy(train_samples.astype(np.float32)),
torch.from_numpy(val_samples.astype(np.float32)),
weight_train=torch.from_numpy(train_weights.astype(np.float32)),
weight_validate=torch.from_numpy(val_weights.astype(np.float32)),
iteration=self.iteration, alpha=self.alpha, verbose=self.verbose,
K=self.n_component, M=self.interp_nbin,
KDE=self.KDE, b_factor=bw_factor, edge_bins=self.edge_bins,
ndata_A=self.ndata_wT, MSWD_max_iter=self.MSWD_max_iter,
NBfirstlayer=self.NBfirstlayer, Whiten=self.Whiten,
batchsize=self.batchsize, nocuda=self.nocuda)
elif (val_samples is None and train_weights is not None):
q = GIS(torch.from_numpy(train_samples.astype(np.float32)),
weight_train=torch.from_numpy(train_weights.astype(np.float32)),
iteration=self.iteration, alpha=self.alpha, verbose=self.verbose,
K=self.n_component, M=self.interp_nbin,
KDE=self.KDE, b_factor=bw_factor, edge_bins=self.edge_bins,
ndata_A=self.ndata_wT, MSWD_max_iter=self.MSWD_max_iter,
NBfirstlayer=self.NBfirstlayer, Whiten=self.Whiten,
batchsize=self.batchsize, nocuda=self.nocuda)
elif (val_samples is not None and train_weights is None and val_weights is None):
q = GIS(torch.from_numpy(train_samples.astype(np.float32)),
torch.from_numpy(val_samples.astype(np.float32)),
iteration=self.iteration, alpha=self.alpha, verbose=self.verbose,
K=self.n_component, M=self.interp_nbin,
KDE=self.KDE, b_factor=bw_factor, edge_bins=self.edge_bins,
ndata_A=self.ndata_wT, MSWD_max_iter=self.MSWD_max_iter,
NBfirstlayer=self.NBfirstlayer, Whiten=self.Whiten,
batchsize=self.batchsize, nocuda=self.nocuda)
elif (val_samples is None and train_weights is None and val_weights is None):
q = GIS(torch.from_numpy(train_samples.astype(np.float32)),
iteration=self.iteration, alpha=self.alpha, verbose=self.verbose,
K=self.n_component, M=self.interp_nbin,
KDE=self.KDE, b_factor=bw_factor, edge_bins=self.edge_bins,
ndata_A=self.ndata_wT, MSWD_max_iter=self.MSWD_max_iter,
NBfirstlayer=self.NBfirstlayer, Whiten=self.Whiten,
batchsize=self.batchsize, nocuda=self.nocuda)
return q
def get_sim_data(self, point):
"""Generate simulated data using the supplied simulator function."""
size = to_tuple(self.sim_size)
params = draw_values([*self.params], point=point, size=1)
forward_sim = self.simulator(*params)
self.sim_data = forward_sim + np.random.multivariate_normal(mu=0, cov=self.sim_data_cov)
self.sim_params = np.array([])
for p in params:
self.sim_params = np.append(self.sim_params, p)
if self.sim_params.size == 1:
self.sim_params = np.array([self.sim_params])
self.sim_params = self.sim_params.squeeze()
def simulation_init(self):
"""Initialize the model using a simulation-based init (generalization of the Ensemble Kalman filter). INCOMPLETE!"""
assert self.model_data is not None
self.data_MAP = self.get_MAP(map_method=self.sim_optim_method, start=self.start)
self.data_map_arr = np.array([])
for v in self.variables:
self.data_map_arr = np.append(self.data_map_arr, self.data_MAP[v.name])
self.data_map_arr = self.data_map_arr.squeeze()
if self.sim_start is None:
# Check this - only really want MAP of the hyper-params. Maybe can't have self.sim_start as None.
self.sim_start = self.data_MAP
self.sim_samples = np.empty((0, len(self.data_map_arr)))
self.sim_logp_diff = 1000
sim_iter = 1
while self.sim_logp_diff > self.sim_tol:
print(f'Running simulation init iteration: {sim_iter}.')
self.get_sim_data(point=self.sim_start)
set_data({self.model_data.keys(): self.sim_data}, model=self.model)
self.sim_MAP = self.get_MAP(map_method=self.sim_optim_method, start=self.sim_start)
self.sim_map_arr = np.array([])
for v in self.variables:
self.sim_map_arr = np.append(self.sim_map_arr, self.sim_MAP[v.name])
self.sim_map_arr = self.sim_map_arr.squeeze()
self.map_diff = self.sim_map_arr - self.sim_params
self.sim_update = self.data_map_arr + self.map_diff
self.sim_samples = np.append(self.sim_samples, self.sim_update)
set_data({self.model_data.keys(): self.sim_data}, model=self.model)
self.old_logp = self.get_posterior_logp(self.sim_params.reshape(-1, self.sim_params.size))
self.new_logp = self.get_posterior_logp(self.sim_update.reshape(-1, self.sim_update.size))
self.sim_logp_diff = abs(self.old_logp - self.new_logp) / max(abs(self.old_logp), abs(self.new_logp), 1)
sim_stage += 1
self.mu_map = 1.0 * self.sim_update
self.hess_inv = np.linalg.inv(self.target_hessian(self.mu_map.reshape(-1, self.mu_map.size)))
self.weighted_samples = np.random.multivariate_normal(self.mu_map, self.hess_inv, size=self.init_draws)
self.nf_samples = np.copy(self.weighted_samples)
self.get_posterior_logp()
self.log_weight = self.posterior_logp - multivariate_normal.logpdf(self.nf_samples, self.mu_map.squeeze(), self.hess_inv, allow_singular=True)
self.log_evidence = logsumexp(self.log_weight) - np.log(len(self.log_weight))
self.evidence = np.exp(self.log_evidence)
self.log_weight = self.log_weight - self.log_evidence
self.regularize_weights()
#same as in fitnf but prior~q
self.log_weight_pq_num = self.posterior_logp + 2*multivariate_normal.logpdf(self.nf_samples, self.mu_map.squeeze(), self.hess_inv, allow_singular=True)
self.log_weight_pq_den = 3*multivariate_normal.logpdf(self.nf_samples, self.mu_map.squeeze(), self.hess_inv, allow_singular=True)
self.log_evidence_pq = logsumexp(self.log_weight_pq_num) - logsumexp(self.log_weight_pq_den)
self.evidence_pq = np.exp(self.log_evidence_pq)
self.regularize_weight_pq()
#sum of mean loss (p - q*Z_pq)^2 /N for diagnostic purposes
self.log_mean_loss = np.log( np.mean(( np.exp(self.posterior_logp) - np.exp(self.log_weight_pq_den/3 +self.log_evidence_pq) )**2 ))
self.init_weights_cleanup(lambda x: self.logq_fr_el2o(x, self.mu_map, self.hess_inv),
jax.grad(lambda x: self.logq_fr_el2o(x, self.mu_map, self.hess_inv)))
self.q_ess = self.calculate_ess(self.log_weight)
self.total_ess = self.calculate_ess(self.sinf_logw)
self.all_logq = np.array([])
self.nf_models = []
def optimization_start(self):
"""Setup for optimization starting point."""
disc_vars = list(typefilter(self.variables, discrete_types))
allinmodel(self.variables, self.model)
self.start = copy.deepcopy(self.start)
if self.start is None:
self.start = self.model.test_point
else:
update_start_vals(self.start, self.model.test_point, self.model)
check_start_vals(self.start, self.model)
self.start = Point(self.start, model=self.model)
self.bij = DictToArrayBijection(ArrayOrdering(self.variables), self.start)
self.start = self.bij.map(self.start)
self.adam_logp = self.bij.mapf(self.model.fastlogp_nojac)
self.adam_dlogp = self.bij.mapf(self.model.fastdlogp_nojac(self.variables))
def update_adam(self, step, opt_state, opt_update, get_params):
"""Jax implemented ADAM update."""
params = np.asarray(get_params(opt_state)).astype(np.float64)
value = np.float64(self.adam_logp(floatX(params.squeeze())))
grads = -1 * jnp.asarray(np.float64(self.adam_dlogp(floatX(params.squeeze()))))
opt_state = opt_update(step, grads, opt_state)
update_params = np.asarray(get_params(opt_state)).astype(np.float64)
return value, opt_state, update_params
def adam_map_hess(self):
"""Use ADAM to find the MAP solution."""
self.optimization_start()
opt_init, opt_update, get_params = jax_optimizers.adam(step_size=self.adam_lr, b1=self.adam_b1,
b2=self.adam_b2, eps=self.adam_eps)
opt_state = opt_init(self.start)
for i in range(self.adam_steps):
value, opt_state, update_params = self.update_adam(i, opt_state, opt_update, get_params)
target_diff = np.abs((value - np.float64(self.adam_logp(floatX(update_params)))) /
max(value, np.float64(self.adam_logp(floatX(update_params)))))
if target_diff <= self.ftol:
print(f'ADAM converged at step {i}')
break
vars = get_default_varnames(self.model.unobserved_RVs, include_transformed=True)
self.map_dict = {var.name: value for var, value in zip(vars, self.model.fastfn(vars)(self.bij.rmap(update_params.squeeze())))}
self.mu_map = np.array([])
for v in self.variables:
self.mu_map = np.append(self.mu_map, self.map_dict[v.name])
self.mu_map = self.mu_map.squeeze()
if self.mu_map.size == 1:
self.mu_map = np.array([self.mu_map])
print(f'BIJ rmap = {self.map_dict}')
print(f'ADAM map solution = {self.mu_map}')
if self.mu_map.size == 1:
self.hess_inv = 1.0 / self.target_hessian(self.mu_map.reshape(-1, self.mu_map.size))
else:
self.hess_inv = np.linalg.inv(self.target_hessian(self.mu_map.reshape(-1, self.mu_map.size)))
if not posdef.isPD(self.hess_inv):
print(f'Autodiff Hessian is not positive semi-definite. Building Hessian with L-BFGS run starting from ADAM MAP.')
self.scipy_opt = minimize(self.optim_target_logp_nojac, x0=self.mu_map, method='L-BFGS-B',
options={'maxiter': self.optim_iter, 'ftol': self.ftol, 'gtol': self.gtol},
jac=self.optim_target_dlogp)
print(f'lbfgs Hessian inverse = {self.scipy_opt.hess_inv.todense()}')
self.hess_inv = self.scipy_opt.hess_inv.todense()
print(f'Final MAP solution = {self.mu_map}')
print(f'Inverse Hessian at MAP = {self.hess_inv}')
self.weighted_samples = np.random.multivariate_normal(self.mu_map, self.hess_inv, size=self.init_draws)
self.nf_samples = np.copy(self.weighted_samples)
self.get_posterior_logp()
self.log_weight = self.posterior_logp - multivariate_normal.logpdf(self.nf_samples, self.mu_map.squeeze(), self.hess_inv, allow_singular=True)
self.log_evidence = logsumexp(self.log_weight) - np.log(len(self.log_weight))
self.evidence = np.exp(self.log_evidence)
self.log_weight = self.log_weight - self.log_evidence
#same as in fitnf but prior~q
self.log_weight_pq_num = self.posterior_logp + 2*multivariate_normal.logpdf(self.nf_samples, self.mu_map.squeeze(), self.hess_inv, allow_singular=True)
self.log_weight_pq_den = 3*multivariate_normal.logpdf(self.nf_samples, self.mu_map.squeeze(), self.hess_inv, allow_singular=True)
self.log_evidence_pq = logsumexp(self.log_weight_pq_num) - logsumexp(self.log_weight_pq_den)
self.evidence_pq = np.exp(self.log_evidence_pq)
#sum of mean loss (p - q*Z_pq)^2 /N for diagnostic purposes
self.log_mean_loss = np.log( np.mean(( np.exp(self.posterior_logp) - np.exp(self.log_weight_pq_den/3+self.log_evidence_pq) )**2 ))
self.regularize_weights()
self.init_weights_cleanup(lambda x: self.logq_fr_el2o(x, self.mu_map, self.hess_inv),
jax.grad(lambda x: self.logq_fr_el2o(x, self.mu_map, self.hess_inv)))
self.q_ess = self.calculate_ess(self.log_weight)
self.total_ess = self.calculate_ess(self.sinf_logw)
self.hess_inv = self.shrink_init(self.mu_map, self.hess_inv)
self.init_weights_cleanup(lambda x: self.logq_fr_el2o(x, self.mu_map, self.hess_inv),
jax.grad(lambda x: self.logq_fr_el2o(x, self.mu_map, self.hess_inv)))
self.q_ess = self.calculate_ess(self.log_weight)
self.total_ess = self.calculate_ess(self.sinf_logw)
self.all_logq = np.array([])
self.nf_models = []
def local_exploration(self, logq_func=None, dlogq_func=None, log_thresh=None):
"""Perform local exploration."""
if log_thresh is None:
self.high_iw_idx = np.where(self.log_weight >= np.log(self.local_thresh))[0]
else:
self.high_iw_idx = np.where(self.log_weight >= log_thresh)[0]
self.num_local = len(self.high_iw_idx)
if self.sample_mode == 'function_approx':
self.high_iw_samples = self.weighted_samples[self.high_iw_idx, ...]
else:
self.high_iw_samples = self.nf_samples[self.high_iw_idx, ...]
self.high_log_weight = self.log_weight[self.high_iw_idx]
self.high_weights = self.weights[self.high_iw_idx]
print(f'Number of points we perform additional local exploration around = {self.num_local}')
self.local_samples = np.empty((0, np.shape(self.high_iw_samples)[1]))
self.local_log_weight = np.array([])
self.modified_log_weight = np.array([])
self.local_weights = np.array([])
self.modified_weights = np.array([])
for i, sample in enumerate(self.high_iw_samples):
sample = sample.reshape(-1, sample.size)
if self.local_grad:
if dlogq_func is None:
raise Exception('Using gradient-based exploration requires you to supply dlogq_func.')
self.log_weight_grad = self.target_dlogp(sample.astype(np.float64)) - dlogq_func(sample.astype(np.float64))
elif not self.local_grad:
if logq_func is None:
raise Exception('Gradient-free approximates gradients with finite difference. Requires you to supply logq_func.')
self.log_weight_grad = (approx_fprime(sample, self.target_logp, np.finfo(float).eps)
- approx_fprime(sample, logq_func, np.finfo(float).eps))
self.log_weight_grad = np.asarray(self.log_weight_grad).astype(np.float64)
delta = 1.0 * self.local_step_size
proposed_step_inc = sample + delta * self.log_weight_grad
line_search_iter = 0
while (logq_func(proposed_step_inc) - logq_func(sample) <= -np.log(2) or
logq_func(proposed_step_inc) - logq_func(sample) >= 0):
delta = delta / 2.0
proposed_step_inc = sample + delta * self.log_weight_grad
line_search_iter += 1
if line_search_iter >= self.max_line_search:
break
proposed_step_dec = sample - delta * self.log_weight_grad
sample_logp = self.target_logp(sample)
proposed_logp_inc = self.target_logp(proposed_step_inc)
proposed_logp_dec = self.target_logp(proposed_step_dec)
max_logp = max(sample_logp, proposed_logp_inc, proposed_logp_dec)
local_log_w_inc = (self.high_log_weight[i] + proposed_logp_inc - max_logp -
np.log(np.exp(proposed_logp_inc - max_logp) +
np.exp(sample_logp - max_logp) +
np.exp(proposed_logp_dec - max_logp)))
modif_log_w = (self.high_log_weight[i] + sample_logp - max_logp -
np.log(np.exp(proposed_logp_inc - max_logp) +
np.exp(sample_logp - max_logp) +
np.exp(proposed_logp_dec - max_logp)))
local_log_w_dec = (self.high_log_weight[i] + proposed_logp_dec - max_logp -
np.log(np.exp(proposed_logp_dec - max_logp) +
np.exp(sample_logp - max_logp) +
np.exp(proposed_logp_inc - max_logp)))
self.local_log_weight = np.append(self.local_log_weight, local_log_w_inc)
self.local_log_weight = np.append(self.local_log_weight, local_log_w_dec)
self.modified_log_weight = np.append(self.modified_log_weight, modif_log_w)
self.local_weights = np.append(self.local_weights, np.exp(local_log_w_inc))
self.local_weights = np.append(self.local_weights, np.exp(local_log_w_dec))
self.modified_weights = np.append(self.modified_weights, np.exp(modif_log_w))
self.local_samples = np.append(self.local_samples, proposed_step_inc, axis=0)
self.local_samples = np.append(self.local_samples, proposed_step_dec, axis=0)
self.log_weight[self.high_iw_idx] = self.modified_log_weight
self.weights[self.high_iw_idx] = self.modified_weights
def initialize_map_hess(self):
"""Initialize using scipy MAP optimization and Hessian."""
self.map_dict, self.scipy_opt = find_MAP(start=self.start, model=self.model, method=self.scipy_map_method, return_raw=True)
self.mu_map = []
for v in self.variables:
self.mu_map.append(self.map_dict[v.name])
self.mu_map = np.array(self.mu_map).squeeze()
if self.mu_map.size == 1:
self.mu_map = np.array([self.mu_map])
if self.init_method == 'lbfgs':
assert self.scipy_map_method == 'L-BFGS-B'
self.hess_inv = self.scipy_opt.hess_inv.todense()
if self.init_method == 'map+laplace':
if self.mu_map.size == 1:
self.hess_inv = np.array([1.0 / self.target_hessian(self.mu_map.reshape(-1, self.mu_map.size))]).reshape(-1, 1)
else:
self.hess_inv = np.linalg.inv(self.target_hessian(self.mu_map.reshape(-1, self.mu_map.size)))
print(f'Map+Laplace mean = {self.mu_map}')
print(f'Map+Laplace covariance = {self.hess_inv}')
self.weighted_samples = np.random.multivariate_normal(self.mu_map, self.hess_inv, size=self.init_draws)
self.nf_samples = np.copy(self.weighted_samples)
self.get_posterior_logp()
self.log_weight = self.posterior_logp - multivariate_normal.logpdf(self.nf_samples, self.mu_map, self.hess_inv, allow_singular=True)
self.log_evidence = logsumexp(self.log_weight) - np.log(len(self.log_weight))
self.evidence = np.exp(self.log_evidence)
self.log_weight = self.log_weight - self.log_evidence
#same as in fitnf but prior~q
self.log_weight_pq_num = self.posterior_logp + 2*multivariate_normal.logpdf(self.nf_samples, self.mu_map, self.hess_inv, allow_singular=True)
self.log_weight_pq_den = 3*multivariate_normal.logpdf(self.nf_samples, self.mu_map, self.hess_inv, allow_singular=True)
self.log_evidence_pq = logsumexp(self.log_weight_pq_num) - logsumexp(self.log_weight_pq_den)
self.evidence_pq = np.exp(self.log_evidence_pq)
#sum of mean loss (p - q*Z_pq)^2 /N for diagnostic purposes
self.log_mean_loss = np.log( np.mean(( np.exp(self.posterior_logp) - np.exp(self.log_weight_pq_den/3+self.log_evidence_pq) )**2 ))
self.regularize_weights()
self.init_weights_cleanup(lambda x: self.logq_fr_el2o(x, self.mu_map, self.hess_inv),
jax.grad(lambda x: self.logq_fr_el2o(x, self.mu_map, self.hess_inv)))
self.q_ess = self.calculate_ess(self.log_weight)
self.total_ess = self.calculate_ess(self.sinf_logw)
self.hess_inv = self.shrink_init(self.mu_map, self.hess_inv)
self.init_weights_cleanup(lambda x: self.logq_fr_el2o(x, self.mu_map, self.hess_inv),
jax.grad(lambda x: self.logq_fr_el2o(x, self.mu_map, self.hess_inv)))
self.q_ess = self.calculate_ess(self.log_weight)
self.total_ess = self.calculate_ess(self.sinf_logw)
self.all_logq = np.array([])
self.nf_models = []
def logq_fr_el2o(self, z, mu, Sigma):
"""Logq for full-rank Gaussian family."""
return jnp.reshape(jax.scipy.stats.multivariate_normal.logpdf(z, mu, Sigma), ())
def get_map_laplace(self):
"""Find the MAP+Laplace solution for the model."""
if self.init_EL2O == 'adam':
self.optimization_start()
opt_init, opt_update, get_params = jax_optimizers.adam(step_size=self.adam_lr, b1=self.adam_b1,
b2=self.adam_b2, eps=self.adam_eps)
opt_state = opt_init(self.start)
for i in range(self.adam_steps):
value, opt_state, update_params = self.update_adam(i, opt_state, opt_update, get_params)
target_diff = np.abs((value - np.float64(self.adam_logp(floatX(update_params)))) / max(value, np.float64(self.adam_logp(floatX(update_params)))))
if target_diff <= self.ftol:
print(f'ADAM converged at step {i}')
break
vars = get_default_varnames(self.model.unobserved_RVs, include_transformed=True)
self.map_dict = {var.name: value for var, value in zip(vars, self.model.fastfn(vars)(self.bij.rmap(update_params.squeeze())))}
else:
self.map_dict = find_MAP(start=self.start, model=self.model, method=self.scipy_map_method)
self.mu_map = np.array([])
for v in self.variables:
self.mu_map = np.append(self.mu_map, self.map_dict[v.name])
self.mu_map = self.mu_map.squeeze()
if self.mu_map.size == 1:
self.mu_map = np.array([self.mu_map])
self.Sigma_map = np.array([1.0 / self.target_hessian(self.mu_map.reshape(-1, self.mu_map.size))]).reshape(-1, 1)
else:
self.Sigma_map = np.linalg.inv(self.target_hessian(self.mu_map.reshape(-1, self.mu_map.size)))
print(f'MAP estimate = {self.map_dict}')
print(f'Sigma estimate at MAP = {self.Sigma_map}')
def run_el2o(self):
"""Run the EL2O algorithm, assuming you've got the MAP+Laplace solution."""
self.mu_k = self.mu_map
self.Sigma_k = self.Sigma_map
self.EL2O = [1e10, 1]
self.zk = np.random.multivariate_normal(self.mu_k, self.Sigma_k, size=len(self.mu_k))
Niter = 1
while (self.EL2O[-1] > self.absEL2O
and abs((self.EL2O[-1] - self.EL2O[-2]) / self.EL2O[-1]) > self.fracEL2O
and Niter < self.maxiter_EL2O):
self.zk = np.vstack((self.zk, np.random.multivariate_normal(self.mu_k, self.Sigma_k)))
Nk = len(self.zk)
if not self.use_hess_EL2O:
temp1 = 0
temp2 = 0
for k in range(Nk):
temp1 += np.outer(self.zk[k, :] - np.mean(self.zk, axis=0), self.zk[k, :] - np.mean(self.zk, axis=0))
temp2 += np.outer(self.zk[k, :] - np.mean(self.zk, axis=0), self.target_dlogp(self.zk[k, :].reshape(-1, self.zk[k, :].size)))
if self.mean_field_EL2O:
self.Sigma_k = -1 * np.diag(temp2) / np.diag(temp1)
self.Sigma_k = 1.0 / self.Sigma_k
self.Sigma_k = self.Sigma_k * np.eye(self.Sigma_k.size)
elif not self.mean_field_EL2O:
if temp1.size == 1:
self.Sigma_k = -1 * temp2 / temp1
self.Sigma_k = np.array([1.0 / self.Sigma_k]).reshape(-1, 1)
else:
self.Sigma_k = -1 * np.matmul(np.linalg.inv(temp1), temp2)
self.Sigma_k = np.linalg.inv(self.Sigma_k)
elif self.use_hess_EL2O:
self.Sigma_k = np.linalg.inv(np.sum(self.target_hessian(self.zk), axis=0) / Nk)
if self.mean_field_EL2O:
self.Sigma_k = np.diag(self.Sigma_k) * np.eye(len(self.Sigma_k))
temp = 0
for j in range(Nk):
if self.zk[j, :].size == 1:
joint_logp = np.array([self.target_dlogp(self.zk[j, :].reshape(-1, self.zk[j, :].size))])
else:
joint_logp = self.target_dlogp(self.zk[j, :].reshape(-1, self.zk[j, :].size))
temp += np.matmul(self.Sigma_k, joint_logp)
self.mu_k = np.mean(self.zk, axis=0) + temp / Nk
self.EL2O = np.append(self.EL2O, (1 / (len(self.zk)) * (np.sum((self.target_logp(self.zk) -
jax.vmap(lambda x: self.logq_fr_el2o(x, self.mu_k, self.Sigma_k), in_axes=0)(self.zk))**2) +
np.sum((self.target_dlogp(self.zk) -
jax.vmap(jax.grad(lambda x: self.logq_fr_el2o(x, self.mu_k, self.Sigma_k)), in_axes=0)(self.zk))**2)
)))
Niter += 1
print(f'Final EL2O mu = {self.mu_k}')
print(f'Final EL2O Sigma = {self.Sigma_k}')
self.weighted_samples = np.random.multivariate_normal(self.mu_k, self.Sigma_k, size=self.init_draws)
self.nf_samples = np.copy(self.weighted_samples)
self.get_posterior_logp()
self.log_weight = self.posterior_logp - multivariate_normal.logpdf(self.nf_samples, self.mu_k.squeeze(), self.Sigma_k, allow_singular=True)
self.log_evidence = logsumexp(self.log_weight) - np.log(len(self.log_weight))
self.evidence = np.exp(self.log_evidence)
self.log_weight = self.log_weight - self.log_evidence
#same as in fitnf but prior~q
self.log_weight_pq_num = self.posterior_logp + 2*multivariate_normal.logpdf(self.nf_samples, self.mu_k.squeeze(), self.Sigma_k, allow_singular=True)
self.log_weight_pq_den = 3*multivariate_normal.logpdf(self.nf_samples, self.mu_k.squeeze(), self.Sigma_k, allow_singular=True)
self.log_evidence_pq = logsumexp(self.log_weight_pq_num) - logsumexp(self.log_weight_pq_den)
self.evidence_pq = np.exp(self.log_evidence_pq)
#sum of mean loss (p - q*Z_pq)^2 /N for diagnostic purposes
self.log_mean_loss = np.log( np.mean(( np.exp(self.posterior_logp) - np.exp(self.log_weight_pq_den/3+self.log_evidence_pq) )**2 ))
self.regularize_weights()
self.init_weights_cleanup(lambda x: self.logq_fr_el2o(x, self.mu_k, self.Sigma_k),
jax.grad(lambda x: self.logq_fr_el2o(x, self.mu_k, self.Sigma_k)))
self.q_ess = self.calculate_ess(self.log_weight)
self.total_ess = self.calculate_ess(self.sinf_logw)
self.Sigma_k = self.shrink_init(self.mu_k, self.Sigma_k)
self.init_weights_cleanup(lambda x: self.logq_fr_el2o(x, self.mu_k, self.Sigma_k),
jax.grad(lambda x: self.logq_fr_el2o(x, self.mu_k, self.Sigma_k)))
self.q_ess = self.calculate_ess(self.log_weight)
self.total_ess = self.calculate_ess(self.sinf_logw)
self.all_logq = np.array([])
self.nf_models = []
def run_el2o_optim(self):
"""Runs EL2O, optimizing for the elements of the Cholesky decomposition of the covariance."""
self.mu_k = self.mu_map
self.Sigma_k = self.Sigma_map
self.L_k = cholesky(self.Sigma_k, lower=True)
self.tril_ind = np.tril_indices(len(self.L_k))
if self.mean_field_EL2O:
self.L_k = np.sqrt(np.diag(self.Sigma_k)) * np.eye(len(self.L_k))
self.tril_ind = np.diag_indices_from(self.L_k)
print(len(self.L_k))
#self.const_k = 0
self.EL2O = [1e10, 1]
Ndim = len(self.mu_k)
Niter = 1
while (self.EL2O[-1] > self.absEL2O
and abs((self.EL2O[-1] - self.EL2O[-2]) / self.EL2O[-1]) > self.fracEL2O
and Niter < self.maxiter_EL2O):
print(f"EL2O iteration: {Niter}")
if Niter < 3:
self.zk = np.random.multivariate_normal(self.mu_k, np.matmul(self.L_k, self.L_k.T), size=self.EL2O_draws)
else:
self.zk = np.vstack((self.zk,
np.random.multivariate_normal(self.mu_k, np.matmul(self.L_k, self.L_k.T),
size=self.EL2O_draws)))
#self.zk = self.zk.reshape(-1, Ndim)
eloargs0 = np.copy(self.mu_k)
eloargs0 = np.append(eloargs0, self.L_k[self.tril_ind])
#eloargs0 = np.append(eloargs0, self.const_k)
#eloargs0 = self.L_k[self.tril_ind]
if self.EL2O_optim_method == 'adam':
print('Using Adam for ELO optimization.')
opt_init, opt_update, get_params = jax_optimizers.adam(step_size=self.adam_lr, b1=self.adam_b1,
b2=self.adam_b2, eps=self.adam_eps)
opt_state = opt_init(eloargs0)
for i in range(self.adam_steps):
value, opt_state, update_params = self.update_elo_adam(i, opt_state, opt_update, get_params, self.zk)
target_diff = np.abs((value - np.float64(self.elo_cost(update_params.squeeze(), self.zk))) /
max(value, np.float64(self.elo_cost(update_params.squeeze(), self.zk))))
if target_diff <= self.ftol:
print(f'ADAM converged at step {i}')
break
opt_result = update_params.squeeze()
self.mu_k = opt_result[0:Ndim]
self.L_k[self.tril_ind] = opt_result[Ndim:]
#self.L_k[self.tril_ind] = opt_result
#self.const_k = opt_result[-1]
self.EL2O = np.append(self.EL2O, self.elo_cost(opt_result.squeeze(), self.zk))
print(f'EL2O: {self.elo_cost(opt_result.squeeze(), self.zk)}')
elif self.EL2O_optim_method != 'adam':
opt_result = minimize(self.elo_cost, x0=eloargs0,
options={'maxiter': self.optim_iter, 'ftol': self.ftol, 'gtol': self.gtol},
method=self.EL2O_optim_method, args=(self.zk,),
jac=np.asarray(jax.grad(self.elo_cost)))
self.mu_k = opt_result.x[0:Ndim]
self.L_k[self.tril_ind] = opt_result.x[Ndim:]
#self.L_k[self.tril_ind] = opt_result.x
#self.const_k = opt_result.x[-1]
self.EL2O = np.append(self.EL2O, self.elo_cost(opt_result.x, self.zk))
print(f'EL2O: {self.elo_cost(opt_result.x, self.zk)}')
Niter += 1
self.Sigma_k = np.matmul(self.L_k, self.L_k.T)
print(f'Final EL2O mu = {self.mu_k}')
print(f'Final EL2O Sigma = {self.Sigma_k}')
#Sigma_lam = self.Sigma_k + lam * np.diag(self.Sigma_k) * np.eye(len(self.Sigma_k))
self.weighted_samples = np.random.multivariate_normal(self.mu_k, self.Sigma_k, size=self.init_draws)
self.nf_samples = np.copy(self.weighted_samples)
self.get_posterior_logp()
self.log_weight = self.posterior_logp - multivariate_normal.logpdf(self.nf_samples, self.mu_k.squeeze(), self.Sigma_k, allow_singular=True)
self.log_evidence = logsumexp(self.log_weight) - np.log(len(self.log_weight))
self.evidence = np.exp(self.log_evidence)
self.log_weight = self.log_weight - self.log_evidence
#same as in fitnf but prior~q
self.log_weight_pq_num = self.posterior_logp + 2*multivariate_normal.logpdf(self.nf_samples, self.mu_k.squeeze(), self.Sigma_k, allow_singular=True)
self.log_weight_pq_den = 3*multivariate_normal.logpdf(self.nf_samples, self.mu_k.squeeze(), self.Sigma_k, allow_singular=True)
self.log_evidence_pq = logsumexp(self.log_weight_pq_num) - logsumexp(self.log_weight_pq_den)
self.evidence_pq = np.exp(self.log_evidence_pq)
#sum of mean loss (p - q*Z_pq)^2 /N for diagnostic purposes
self.log_mean_loss = np.log( np.mean(( np.exp(self.posterior_logp) - np.exp(self.log_weight_pq_den/3+self.log_evidence_pq) )**2 ))
self.regularize_weights()
self.init_weights_cleanup(lambda x: self.logq_fr_el2o(x, self.mu_k, self.Sigma_k),
jax.grad(lambda x: self.logq_fr_el2o(x, self.mu_k, self.Sigma_k)))
self.q_ess = self.calculate_ess(self.log_weight)
self.total_ess = self.calculate_ess(self.sinf_logw)
self.Sigma_k = self.shrink_init(self.mu_k, self.Sigma_k)
self.init_weights_cleanup(lambda x: self.logq_fr_el2o(x, self.mu_k, self.Sigma_k),
jax.grad(lambda x: self.logq_fr_el2o(x, self.mu_k, self.Sigma_k)))
self.q_ess = self.calculate_ess(self.log_weight)
self.total_ess = self.calculate_ess(self.sinf_logw)
'''
self.weighted_samples = np.random.multivariate_normal(self.mu_k, self.Sigma_k, size=self.init_draws)
self.nf_samples = np.copy(self.weighted_samples)
self.get_posterior_logp()
self.log_weight = self.posterior_logp - multivariate_normal.logpdf(self.nf_samples, self.mu_k.squeeze(), self.Sigma_k, allow_singular=True)
self.log_evidence = logsumexp(self.log_weight) - np.log(len(self.log_weight))
self.evidence = np.exp(self.log_evidence)
self.log_weight = self.log_weight - self.log_evidence
#same as in fitnf but prior~q
self.log_weight_pq_num = self.posterior_logp + 2*multivariate_normal.logpdf(self.nf_samples, self.mu_k.squeeze(), self.Sigma_k, allow_singular=True)
self.log_weight_pq_den = 3*multivariate_normal.logpdf(self.nf_samples, self.mu_k.squeeze(), self.Sigma_k, allow_singular=True)
self.log_evidence_pq = logsumexp(self.log_weight_pq_num) - logsumexp(self.log_weight_pq_den)
self.evidence_pq = np.exp(self.log_evidence_pq)
self.regularize_weights()
self.init_weights_cleanup(lambda x: self.logq_fr_el2o(x, self.mu_k, self.Sigma_k),
jax.grad(lambda x: self.logq_fr_el2o(x, self.mu_k, self.Sigma_k)))
self.q_ess = self.calculate_ess(self.log_weight)
self.total_ess = self.calculate_ess(self.sinf_logw)
'''
self.all_logq = np.array([])
self.nf_models = []
def elo_cost(self, eloargs, z):
"""EL2O cost function, used for EL2O optimization."""
_mu_k = eloargs[0:z.shape[1]]
_L_k = jnp.zeros((z.shape[1], z.shape[1]))
_L_k = jax.ops.index_update(_L_k, self.tril_ind, eloargs[z.shape[1]:])
#_L_k = jax.ops.index_update(_L_k, self.tril_ind, eloargs)
#_const_k = eloargs[-1]
'''
elo = (1 / len(z)) * (jnp.sum((jnp.asarray(self.target_logp(z)) -
jax.vmap(lambda x: self.logq_fr_el2o(x, _mu_k, jnp.matmul(_L_k, _L_k.T)), in_axes=0)(z)
- _const_k)**2) +
jnp.sum((jnp.asarray(self.target_dlogp(z)) -
jax.vmap(jax.grad(lambda x: self.logq_fr_el2o(x, _mu_k, jnp.matmul(_L_k, _L_k.T))), in_axes=0)(z))**2
))
'''
elo = (1 / len(z)) * jnp.sum((jnp.asarray(self.target_dlogp(z)) -
jax.vmap(jax.grad(lambda x: self.logq_fr_el2o(x, _mu_k, jnp.matmul(_L_k, _L_k.T))), in_axes=0)(z))**2)
return elo
def update_elo_adam(self, step, opt_state, opt_update, get_params, z):
"""Adam update step for EL2O optimization."""
params = np.asarray(get_params(opt_state)).astype(np.float64)
value = np.asarray(self.elo_cost(params.squeeze(), z))
grads = jax.grad(self.elo_cost)(params.squeeze(), z)
opt_state = opt_update(step, grads, opt_state)
update_params = np.asarray(get_params(opt_state)).astype(np.float64)
return value, opt_state, update_params
def run_advi(self):
"""Runs mean-field ADVI for initialization."""
if self.init_method == 'advi':
advi = pm.fit(method='advi', model=self.model)
elif self.init_method == 'fullrank_advi':
advi = pm.fit(method='fullrank_advi', model=self.model)
advi_samples = advi.sample(self.init_draws)
print(f'ADVI mean = {advi.mean.eval()}')
print(f'ADVI cov = {advi.cov.eval()}')
population = []
for i in range(self.init_draws):
point = Point({v.name: advi_samples[v.name][i] for v in self.variables}, model=self.model)
population.append(self.model.dict_to_array(point))
self.weighted_samples = np.array(floatX(population))
self.nf_samples = np.copy(self.weighted_samples)
self.get_posterior_logp()
self.log_weight = self.posterior_logp - multivariate_normal.logpdf(self.nf_samples, advi.mean.eval(),
advi.cov.eval(), allow_singular=True)
self.log_evidence = logsumexp(self.log_weight) - np.log(len(self.log_weight))
self.evidence = np.exp(self.log_evidence)
self.log_weight = self.log_weight - self.log_evidence
#same as in fitnf but prior~q
self.log_weight_pq_num = self.posterior_logp + 2*multivariate_normal.logpdf(self.nf_samples, advi.mean.eval(),
advi.cov.eval(), allow_singular=True)
self.log_weight_pq_den = 3*multivariate_normal.logpdf(self.nf_samples, advi.mean.eval(),
advi.cov.eval(), allow_singular=True)
self.log_evidence_pq = logsumexp(self.log_weight_pq_num) - logsumexp(self.log_weight_pq_den)
self.evidence_pq = | np.exp(self.log_evidence_pq) | numpy.exp |
import os
import numpy as np
import flopy
import warnings
from io import StringIO
from struct import pack
from tempfile import TemporaryFile
from textwrap import dedent
from flopy.utils.util_array import Util2d, Util3d, Transient2d, Transient3d
from ci_framework import base_test_dir, FlopyTestSetup
base_dir = base_test_dir(__file__, rel_path="temp", verbose=True)
def test_load_txt_free():
a = np.ones((10,), dtype=np.float32) * 250.0
fp = StringIO("10*250.0")
fa = Util2d.load_txt(a.shape, fp, a.dtype, "(FREE)")
np.testing.assert_equal(fa, a)
assert fa.dtype == a.dtype
a = np.arange(10, dtype=np.int32).reshape((2, 5))
fp = StringIO(
dedent(
"""\
0 1,2,3, 4
5 6, 7, 8 9
"""
)
)
fa = Util2d.load_txt(a.shape, fp, a.dtype, "(FREE)")
np.testing.assert_equal(fa, a)
assert fa.dtype == a.dtype
a = np.ones((2, 5), dtype=np.float32)
a[1, 0] = 2.2
fp = StringIO(
dedent(
"""\
5*1.0
2.2 2*1.0, +1E-00 1.0
"""
)
)
fa = Util2d.load_txt(a.shape, fp, a.dtype, "(FREE)")
np.testing.assert_equal(fa, a)
assert fa.dtype == a.dtype
def test_load_txt_fixed():
a = np.arange(10, dtype=np.int32).reshape((2, 5))
fp = StringIO(
dedent(
"""\
01234X
56789
"""
)
)
fa = Util2d.load_txt(a.shape, fp, a.dtype, "(5I1)")
np.testing.assert_equal(fa, a)
assert fa.dtype == a.dtype
fp = StringIO(
dedent(
"""\
0123X
4
5678
9
"""
)
)
fa = Util2d.load_txt(a.shape, fp, a.dtype, "(4I1)")
np.testing.assert_equal(fa, a)
assert fa.dtype == a.dtype
a = np.array([[-1, 1, -2, 2, -3], [3, -4, 4, -5, 5]], np.int32)
fp = StringIO(
dedent(
"""\
-1 1-2 2-3
3 -44 -55
"""
)
)
fa = Util2d.load_txt(a.shape, fp, a.dtype, "(5I2)")
np.testing.assert_equal(fa, a)
assert fa.dtype == a.dtype
def test_load_block():
a = np.ones((2, 5), dtype=np.int32) * 4
fp = StringIO(
dedent(
"""\
1
1 2 1 5 4
"""
)
)
fa = Util2d.load_block(a.shape, fp, a.dtype)
np.testing.assert_equal(fa, a)
assert fa.dtype == a.dtype
a = np.ones((2, 5), dtype=np.float32) * 4
a[0:2, 1:2] = 9.0
a[0, 2:4] = 6.0
fp = StringIO(
dedent(
"""\
3
1 2 1 5 4.0
1 2 2 2 9.0
1 1 3 4 6.0
"""
)
)
fa = Util2d.load_block(a.shape, fp, a.dtype)
np.testing.assert_equal(fa, a)
assert fa.dtype == a.dtype
a = np.zeros((2, 5), dtype=np.int32)
a[0, 2:4] = 8
fp = StringIO(
dedent(
"""\
1
1 1 3 4 8
"""
)
)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
fa = Util2d.load_block(a.shape, fp, a.dtype)
assert len(w) == 1
assert "blocks do not cover full array" in str(w[-1].message)
np.testing.assert_equal(fa, a)
assert fa.dtype == a.dtype
def test_load_bin():
model_ws = f"{base_dir}_test_load_bin"
test_setup = FlopyTestSetup(test_dirs=model_ws)
def temp_file(data):
# writable file that is destroyed as soon as it is closed
f = TemporaryFile(dir=model_ws)
f.write(data)
f.seek(0)
return f
# INTEGER
a = np.arange(3 * 4, dtype=np.int32).reshape((3, 4)) - 1
fp = temp_file(a.tobytes())
fh, fa = Util2d.load_bin((3, 4), fp, np.int32)
assert fh is None # no header_dtype
np.testing.assert_equal(fa, a)
assert fa.dtype == a.dtype
# check warning if wrong integer type is used to read 4-byte integers
# e.g. on platforms where int -> int64
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
fp.seek(0)
fh, fa = Util2d.load_bin((3, 4), fp, np.int64)
fp.close()
assert len(w) == 1
assert a.dtype == np.int32
assert fh is None # no header_dtype
np.testing.assert_equal(fa, a)
# REAL
real_header_fmt = "2i2f16s3i"
header_data = (1, 2, 3.5, 4.5, b"Hello", 6, 7, 8)
real_header = pack(real_header_fmt, *header_data)
assert len(real_header) == 44
a = np.arange(10).reshape((2, 5))
fp = temp_file(real_header + pack("10f", *list(range(10))))
fh, fa = Util2d.load_bin((2, 5), fp, np.float32, "Head")
fp.close()
for h1, h2 in zip(fh[0], header_data):
assert h1 == h2
np.testing.assert_equal(a.astype(np.float32), fa)
assert fa.dtype == np.float32
# DOUBLE PRECISION
dbl_header_fmt = "2i2d16s3i"
dbl_header = pack(dbl_header_fmt, *header_data)
assert len(dbl_header) == 52
fp = temp_file(real_header + pack("10d", *list(range(10))))
fh, fa = Util2d.load_bin((2, 5), fp, np.float64, "Head")
fp.close()
for h1, h2 in zip(fh[0], header_data):
assert h1 == h2
np.testing.assert_equal(a.astype(np.float64), fa)
assert fa.dtype == np.float64
def test_transient2d():
ml = flopy.modflow.Modflow()
dis = flopy.modflow.ModflowDis(ml, nlay=10, nrow=10, ncol=10, nper=3)
t2d = Transient2d(ml, (10, 10), np.float32, 10.0, "fake")
a1 = t2d.array
assert a1.shape == (3, 1, 10, 10), a1.shape
t2d.cnstnt = 2.0
assert np.array_equal(t2d.array, np.zeros((3, 1, 10, 10)) + 20.0)
t2d[0] = 1.0
t2d[2] = 999
assert np.array_equal(t2d[0].array, np.ones((ml.nrow, ml.ncol)))
assert np.array_equal(t2d[2].array, np.ones((ml.nrow, ml.ncol)) * 999)
m4d = t2d.array
t2d2 = Transient2d.from_4d(ml, "rch", {"rech": m4d})
m4d2 = t2d2.array
assert | np.array_equal(m4d, m4d2) | numpy.array_equal |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 12 17:25:58 2020
@author: ligk2e
"""
from Bio.Alphabet import generic_dna
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.Align import MultipleSeqAlignment
from Bio import AlignIO
from Bio.SubsMat import MatrixInfo
import json
import numpy as np
from collections import Counter
import pandas as pd
import re
from sklearn.decomposition import PCA
from sklearn.preprocessing import Normalizer, StandardScaler,MinMaxScaler,RobustScaler
from sklearn.metrics import roc_curve, auc, precision_recall_curve, confusion_matrix, f1_score,accuracy_score
import collections
import itertools
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader, random_split,Subset
def json2fsa(hla):
with open('/Users/ligk2e/Desktop/NeoAntigenWorkflow/immunogenecity/hla_paratope/{}.json'.format(hla),'r') as f:
data = json.load(f)
with open('/Users/ligk2e/Desktop/NeoAntigenWorkflow/immunogenecity/hla_paratope_fsa/{}.fsa'.format(hla),'w') as f:
for item in data:
key = list(item.keys())[0]
value = list(item.values())[0]
f.write('>{}\n'.format(key))
f.write('{}\n'.format(value))
def multiple_json2fsa():
with open('/Users/ligk2e/Desktop/NeoAntigenWorkflow/immunogenecity/hla_paratope/inventory_new.txt','r') as f1:
for line in f1:
line = line.rstrip('\n')
json2fsa(line)
'''
run clustal-omega:
download binary: http://www.clustal.org/omega/
chmod 777 ./clustal-omega-1.2.3-macosx
./clustal-omega-1.2.3-macosx -i "/Users/ligk2e/Desktop/NeoAntigenWorkflow/immunogenecity/hla_paratope_fsa/HLA-A*0101.fsa" -o "/Users/ligk2e/Desktop/NeoAntigenWorkflow/immunogenecity/hla_paratope_fsa_aligned/HLA-A*0101.aligned.fasta" --auto -v
run multiple sequentially:
cat /Users/ligk2e/Desktop/NeoAntigenWorkflow/immunogenecity/hla_paratope/inventory_new.txt | while read line; do
./clustal-omega-1.2.3-macosx -i "/Users/ligk2e/Desktop/NeoAntigenWorkflow/immunogenecity/hla_paratope_fsa/${line}.fsa" -o "/Users/ligk2e/Desktop/NeoAntigenWorkflow/immunogenecity/hla_paratope_fsa_aligned/${line}.aligned.fasta" --auto -v; done
only hla that has more than 1 paratope will be processed in clustal-omega
'''
def single_paratope(hla):
with open('/Users/ligk2e/Desktop/NeoAntigenWorkflow/immunogenecity/hla_paratope_fsa/{}.fsa'.format(hla),'r') as f:
seq = f.readlines()[1].rstrip('\n')
return hla,seq
def matrix2concensus(mat):
final = ''
for j in range(mat.shape[1]):
most = Counter(mat[:,j]).most_common(1)[0][0] # if most_common(2): [('A', 3), ('C', 1)]
if most == '-':
most = Counter(mat[:,j]).most_common(2)[1][0]
final += most
return final
def msa_paratope(hla):
alignment = AlignIO.read(open('/Users/ligk2e/Desktop/NeoAntigenWorkflow/immunogenecity/hla_paratope_fsa_aligned/{}.aligned.fasta'.format(hla)),"fasta")
msa = []
for record in alignment:
msa.append(list(record.seq)) # another part is record.id
mat = np.array(msa)
final = matrix2concensus(mat)
return hla,final
def hla_paratope():
with open('/Users/ligk2e/Desktop/NeoAntigenWorkflow/immunogenecity/hla_paratope_fsa/inventory_single.txt','r') as single:
singles = single.readlines() # each one will contain '\n'
singles = [item.rstrip('\n') for item in singles]
with open('/Users/ligk2e/Desktop/NeoAntigenWorkflow/immunogenecity/hla_paratope_fsa_aligned/inventory_msa.txt','r') as multiple:
multiples = multiple.readlines()
multiples = [item.rstrip('\n') for item in multiples]
with open('/Users/ligk2e/Desktop/NeoAntigenWorkflow/immunogenecity/hla2paratopeTable.txt','w') as f:
for item in singles:
hla,seq = single_paratope(item)
f.write('{0}\t{1}\n'.format(hla,seq))
for item in multiples:
hla,seq = msa_paratope(item)
f.write('{0}\t{1}\n'.format(hla,seq))
########################################################################################################
def clean_series(series): # give a pandas series
if series.dtype == object: # pandas will store str as object since string has variable length, you can use astype('|S')
clean = []
for item in series:
item = item.lstrip(' ') # remove leading whitespace
item = item.rstrip(' ') # remove trailing whitespace
item = item.replace(' ','') # replace all whitespace in the middle
clean.append(item)
else:
clean = series
return pd.Series(clean)
def clean_data_frame(data): # give a pandas dataFrame
peptide_clean = clean_series(data['peptide'])
hla_clean = clean_series(data['HLA'])
immunogenecity_clean = clean_series(data['immunogenecity'])
data_clean = pd.concat([peptide_clean,hla_clean,immunogenecity_clean],axis=1)
data_clean.columns = ['peptide','HLA','immunogenecity']
return data_clean
def convert_hla(hla):
cond = True
hla = hla.replace(':','')
if len(hla) < 9: cond = False # HLA-A3
elif len(hla) == 9: # HLA-A3002
f = hla[0:5] # HLA-A
e = hla[5:] # 3002
hla = f+'*'+e
return hla,cond
def convert_hla_series(df):
new = []
col = []
for i in df['HLA']:
hla,cond = convert_hla(i)
col.append(cond)
if cond == True: new.append(hla)
df = df.loc[pd.Series(col)]
df = df.set_index(pd.Index(np.arange(df.shape[0])))
df['HLA'] = new
return df
def test_no_space(series):
for i in series:
if ' ' in i:
print('damn')
'''
a = pd.read_excel('/Users/ligk2e/Desktop/NeoAntigenWorkflow/immunogenecity/data/data.xlsx')
a1 = clean_data_frame(a)
test_no_space(a1.iloc[:,0])
test_no_space(a1.iloc[:,1])
a1.iloc[:,2].dtype
a2 = convert_hla_series(a1)
a2.to_csv('/Users/ligk2e/Desktop/NeoAntigenWorkflow/immunogenecity/data/data.txt',sep='\t',index=None)
then use:
{ cat data.txt | head -n 1; cat data.txt | tail -n +2 | sort -u -k1,2; } > data_new.txt, only 32669 training data left
ori = pd.read_csv('/Users/ligk2e/Desktop/NeoAntigenWorkflow/immunogenecity/data/data_new.txt',sep='\t')
hla = pd.read_csv('/Users/ligk2e/Desktop/NeoAntigenWorkflow/immunogenecity/hla2paratopeTable.txt',sep='\t',header=None,names=['hla','paratope'])
inventory = hla['hla']
dic_inventory = dict_inventory(inventory)
'''
def dict_inventory(inventory):
dicA,dicB,dicC = {},{},{}
dic = {'A':dicA,'B':dicB,'C':dicC}
for hla in inventory:
type_ = hla[4] # A,B,C
first2 = hla[6:8] # 01
last2 = hla[8:] # 01
try:
dic[type_][first2].append(last2)
except KeyError:
dic[type_][first2] = []
dic[type_][first2].append(last2)
return dic
def rescue_unknown_hla(hla,dic_inventory):
type_ = hla[4]
first2 = hla[6:8]
last2 = hla[8:]
big_category = dic_inventory[type_]
if not big_category.get(first2) == None:
small_category = big_category.get(first2)
distance = [abs(int(last2)-int(i)) for i in small_category]
optimal = min(zip(small_category,distance),key=lambda x:x[1])[0]
return 'HLA-' + str(type_) + '*' + str(first2) + str(optimal)
else:
small_category = list(big_category.keys())
distance = [abs(int(first2)-int(i)) for i in small_category]
optimal = min(zip(small_category,distance),key=lambda x:x[1])[0]
return 'HLA-' + str(type_) + '*' + str(optimal) + str(big_category[optimal][0])
class dataset(Dataset):
# the output would be ([seq_len,21],[batch]),(),()
def __init__(self,ori,hla,dic_inventory):
self.ori = ori
self.hla = hla
self.dic_inventory = dic_inventory
self.paratope_dic()
self.middle = self.convert()
#self.new = self.padding()
self.new = self.padding_oneside()
#self.new = self.padding_onehot()
def __len__(self):
return len(self.new)
def __getitem__(self,idx):
return self.new[idx]
def padding(self):
len_values = [tup[0].shape[0] for tup in self.middle]
#max_length = max(len_values)
max_length = 50
# padding
bucket = []
for item in self.middle:
length = item[0].shape[0]
gap = max_length - length
if gap % 2 == 0: # even number
gapped_left, gapped_right = gap // 2, gap //2 # will be an int
else: # odd number
if np.random.uniform() < 0.5: # randomly decide which side will have one more padded value
gapped_left = gap // 2
gapped_right = gap - gapped_left
else:
gapped_right = gap // 2
gapped_left = gap - gapped_right
padding_left = torch.empty([gapped_left,20]).fill_(-1.0)
padding_right = torch.empty([gapped_right,20]).fill_(-1.0)
final = torch.cat([padding_left,item[0],padding_right],dim=0)
bucket.append((final,item[1]))
self.max_length = max_length
return bucket
def padding_onehot(self):
len_values = [tup[0].shape[0] for tup in self.middle]
max_length = max(len_values)
#max_length = 48
# padding
bucket = []
for item in self.middle:
length = item[0].shape[0]
gap = max_length - length
if gap % 2 == 0: # even number
gapped_left, gapped_right = gap // 2, gap //2 # will be an int
else: # odd number
if np.random.uniform() < 0.5: # randomly decide which side will have one more padded value
gapped_left = gap // 2
gapped_right = gap - gapped_left
else:
gapped_right = gap // 2
gapped_left = gap - gapped_right
padding_left = torch.empty([gapped_left,20]).fill_(0.05)
padding_right = torch.empty([gapped_right,20]).fill_(0.05)
final = torch.cat([padding_left,item[0],padding_right],dim=0)
bucket.append((final,item[1]))
self.max_length = max_length
return bucket
def padding_oneside(self):
len_values = [tup[0].shape[0] for tup in self.middle]
#max_length = max(len_values)
max_length = 56
# padding
bucket = []
for item in self.middle:
length = item[0].shape[0]
gap = max_length - length
padding_right = torch.empty([gap,21]).fill_(-1.0)
final = torch.cat([item[0],padding_right],dim=0)
bucket.append((final,item[1]))
self.max_length = max_length
return bucket
def paratope_dic(self):
df = self.hla
self.dic = {}
for i in range(df.shape[0]):
hla = df['hla'].iloc[i]
paratope = df['paratope'].iloc[i]
self.dic[hla] = paratope
@staticmethod
def onehot_classic(peptide):
amino = 'ARNDCQEGHILKMFPSTWYV'
encoded = torch.empty([len(peptide),20])
onehot = torch.eye(20)
for i in range(len(peptide)):
encoded[i,:] = onehot[:,amino.index(peptide[i])]
return encoded
@staticmethod
def onehot_adapt(peptide):
amino = 'ARNDCQEGHILKMFPSTWYV'
encoded = torch.empty([len(peptide),20])
onehot = torch.eye(20)
mask = torch.eye(20)
onehot = onehot.masked_fill(mask == 1, 0.9)
onehot = onehot.masked_fill(mask == 0, 0.005)
for i in range(len(peptide)):
encoded[i,:] = onehot[:,amino.index(peptide[i])]
return encoded
@staticmethod
def blosum50_new(peptide):
amino = 'ARNDCQEGHILKMFPSTWYV-'
dic = MatrixInfo.blosum50
matrix = np.zeros([21,21])
for i in range(matrix.shape[0]):
for j in range(matrix.shape[1]):
try:
matrix[i,j] = dic[(amino[i],amino[j])]
except KeyError:
try:
matrix[i,j] = dic[(amino[j],amino[i])]
except:
matrix[i,j] = -1
encoded = torch.empty([len(peptide),21]) # (seq_len,21)
for i in range(len(peptide)):
encoded[i,:] = torch.from_numpy(matrix[:,amino.index(peptide[i])])
return encoded
@staticmethod
def blosum50(peptide):
amino = 'ARNDCQEGHILKMFPSTWYV'
dic = MatrixInfo.blosum50
matrix = np.zeros([20,20])
for i in range(matrix.shape[0]):
for j in range(matrix.shape[1]):
try:
matrix[i,j] = dic[(amino[i],amino[j])]
except KeyError:
matrix[i,j] = dic[(amino[j],amino[i])]
encoded = torch.empty([len(peptide),20]) # (seq_len,20)
for i in range(len(peptide)):
encoded[i,:] = torch.from_numpy(matrix[:,amino.index(peptide[i])])
return encoded
def convert(self):
lis = []
df = self.ori
for i in range(df.shape[0]):
#print(i)
peptide = df['peptide'].iloc[i]
hla_type = df['HLA'].iloc[i]
immuno = df['immunogenecity'].iloc[i]
try:
cat = self.dic[hla_type] + peptide
except KeyError:
hla_type = rescue_unknown_hla(hla_type, self.dic_inventory)
cat = self.dic[hla_type] + peptide
cat = cat.upper()
if 'X' in cat: continue
X = dataset.blosum50_new(cat).float() # 2-d tensor
#X = dataset.onehot_classic(cat).float()
y = torch.tensor(immuno).long() # 0-d tensor
lis.append((X,y))
return lis
def balancedBinaryLoader(dataset,batch_size):
dic = {'0':[],'1':[]}
for i in range(len(dataset)):
X = dataset[i][0]
y = dataset[i][1]
if y == 1: dic['1'].append(i)
elif y == 0: dic['0'].append(i)
sample_size = batch_size // 2 # will be an int, make sure batch_size is an even number
negative = Subset(dataset,dic['0']) # dataset.Subset object
positive = Subset(dataset,dic['1'])
# print(len(positive),type(positive))
negative_loader = DataLoader(negative,batch_size=sample_size,shuffle=True,drop_last=True)
positive_loader = DataLoader(positive,batch_size=sample_size,shuffle=True,drop_last=True)
neg_chunks_X = []
neg_chunks_y = []
for idx,(X,y) in enumerate(negative_loader):
neg_chunks_X.append(X)
neg_chunks_y.append(y)
pos_chunks_X = []
pos_chunks_y = []
for idx,(X,y) in enumerate(positive_loader):
pos_chunks_X.append(X)
pos_chunks_y.append(y)
pos_chunks_X_cycle = pos_chunks_X * 10
pos_chunks_y_cycle = pos_chunks_y * 10
chunks_X_list = []
chunks_y_list = []
for i in range(len(neg_chunks_X)):
chunks_X = torch.cat([neg_chunks_X[i],pos_chunks_X_cycle[i]],dim=0)
chunks_y = torch.cat([neg_chunks_y[i],pos_chunks_y_cycle[i]],dim=0)
chunks_X_list.append(chunks_X)
chunks_y_list.append(chunks_y)
loader = list(zip(chunks_X_list,chunks_y_list)) # zip can only be iterated once
return loader
##############################################################################################
'''
all_assay = pd.read_csv('/Users/ligk2e/Desktop/immunogenecity/iedb/all_assay.csv',skiprows=0,header=1)
all_assay_extract = all_assay[['Description','Qualitative Measure','Allele Name']]
all_assay_extract = replace_immunogenecity_df(all_assay_extract)
all_assay_extract = replace_hla_allele_df(all_assay_extract)
all_assay_extract = purge_peptide_df(all_assay_extract)
all_assay_extract.columns = ['peptide','immunogenecity','HLA']
all_assay_extract = all_assay_extract[['peptide','HLA','immunogenecity']]
all_assay_extract.to_csv('/Users/ligk2e/Desktop/immunogenecity/iedb/all_assay.txt',sep='\t',index=None)
{ cat all_assay.txt | head -n 1; cat all_assay.txt | tail -n +2 | sort -u -k1,2; } > all_assay_new.txt
cat all_assay_new.txt | awk 'length($1) < 15 {print $0}' > all_assay_new_filster15.txt
on linux system:
cat all_assay_new_filster15.txt | tail -n +2 | shuf > shuffle_all.txt
{ echo -e "peptide\tHLA\timmunogenecity"; cat shuffle_all.txt | head -n 30000; } > shuffle_training.txt
{ echo -e "peptide\tHLA\timmunogenecity"; cat shuffle_all.txt | tail -n 3444; } > shuffle_testing.txt
'''
def purge_peptide(entry):
cond = True
if '+' in entry:
cond = False
return entry,cond
def purge_peptide_df(df):
col = []
conds = []
for i in range(df.shape[0]):
entry = df.iloc[i]['Description']
entry,cond = purge_peptide(entry)
col.append(entry)
conds.append(cond)
df.update(pd.DataFrame({'Description':col}))
df = df.loc[pd.Series(conds)]
df = df.set_index(pd.Index(np.arange(df.shape[0])))
return df
def replace_immunogenecity(entry):
if entry == 'Positive' or entry == 'Positive-High' or entry == 'Positive-Intermediate' or entry == 'Positive-Low':
entry = 1
else:
entry = 0
return entry
def replace_immunogenecity_df(df):
col = []
for i in range(df.shape[0]):
entry = df.iloc[i]['Qualitative Measure']
entry = replace_immunogenecity(entry)
col.append(entry)
df.update(pd.DataFrame({'Qualitative Measure':col}))
return df
def replace_hla_allele(entry):
cond = True
entry = entry.replace(':','')
if len(entry) < 9:
a = entry[0:5]
b = entry[5:]
if len(b) == 1: b = '0' + b
if 'w' in b: b = b.replace('w','0')
entry = a + '*' + b + '01'
if ' ' in entry: cond = False
return entry,cond
def replace_hla_allele_df(df):
col = []
conds = []
for i in range(df.shape[0]):
entry = df.iloc[i]['Allele Name']
entry,cond = replace_hla_allele(entry)
col.append(entry)
conds.append(cond)
df.update(pd.DataFrame({'Allele Name':col}))
df = df.loc[pd.Series(conds)]
df = df.set_index(pd.Index(np.arange(df.shape[0])))
return df
######################################################################################
# preprocess AA index
'''
cplit aaindex3 '/\/\//+1' {45} # 47 indices in total, repeat 45 more times, means do it 46 times in total, the remaing one will automatically be written into a new file xx46
a=0; for i in xx*; do cat $i | tail -n 21 | head -n 20 > index$a.txt; a=$[$a+1]; done
for i in xx*; do grep -e "^D" $i; done
# index42,index43,index45 are non-symmetrical, discard them
# get [210 40] matrix
result = convert_all_index_before_pca()
execute:
normalize_before_pca()
scale_before_pca()
pca_get_components()
pca_apply_reduction() # [210,25] matrix
'''
def count_character(string,tar):
counter = 0
for char in string:
if char == tar:
counter += 1
return counter
def impute_triangle(int_path):
with open(int_path,'r') as f, open('{0}.tmp.txt'.format(int_path),'w') as f1:
data = f.readlines()
for row in data: # each item in the data array corrspond to a row in lower triangle matrix
row = row.lstrip(' ') # remove leading whitespace
row = re.sub(' +',' ',row) # compress multiple whitespace to one
num_space = count_character(row,' ') # how many whitespace
num_newline = count_character(row,'\n') # how many newline
num = num_space + num_newline # how many items
diff = 20 - num
row = row.rstrip('\n') + ' ' # will be '-3.4 -4.3 '
row += '0.0 '*diff
row = row.rstrip(' ') + '\n' # will be '-3.4 -4.3 0.0 0.0 0.0\n'
f1.write(row)
index = np.loadtxt('{0}.tmp.txt'.format(int_path))
return index
def extract_pair_metircs(index):
amino = 'ARNDCQEGHILKMFPSTWYV'
frame = np.empty([210,1])
counter = -1
for i in range(index.shape[0]):
for j in range(index.shape[1]):
entry = index[i,j]
if not entry==0:
counter += 1
frame[counter] = entry
return frame
def convert_all_index_before_pca():
array = []
for i in range(47):
if not i in [42,43,45]:
if not i in [2,22,23,25]: # Glu is not available, NA, just discard for now
index=impute_triangle('/data/salomonis2/LabFiles/Frank-Li/immunogenecity/AAindex3/index{0}.txt'.format(i))
frame=extract_pair_metircs(index)
array.append(frame)
result = np.concatenate(array,axis=1)
return result # should be [210,40]
def normalize_before_pca(result):
# first figure out what columns have extreme value and needs to be normalizced beforehand
troubles = [0,1,7,9,11,14,15,28,29,30]
for i in troubles:
subject = result[:,i].reshape(1,-1) # Normalizer have to be applied to a matrix, and perform by row, each subject [1,210]
t1 = Normalizer()
new = t1.fit_transform(subject).reshape(-1) # [210,] 1d
result[:,i] = new
return result
def scale_before_pca(result):
t1 = StandardScaler()
new = t1.fit_transform(result)
return new
def pca_get_components(result):
pca= PCA()
pca.fit(result)
result = pca.explained_variance_ratio_
sum_ = 0
for index,var in enumerate(result):
sum_ += var
if sum_ > 0.95:
return index # 25 components
def pca_apply_reduction(result):
pca = PCA(n_components=25) # or strictly speaking ,should be 26, since python is 0-index
new = pca.fit_transform(result)
return new
def wrapper_preprocess():
result = convert_all_index_before_pca()
result = normalize_before_pca(result)
result = scale_before_pca(result)
result = pca_apply_reduction(result)
return result
def sum_to_itself(a):
if a == 0:
return 0
elif a == 1:
return 1
elif a == 2:
return 1+2
else:
return sum_to_itself(a-1) + a
class CNN_dataset(Dataset):
def __init__(self,ori,hla,dic_inventory,index): # index is [210,25] matrix
self.ori= ori
self.hla = hla
self.dic_inventory = dic_inventory
self.index = index
self.paratope_dic() # will get self.dic in order to get hla sequence in self.convert function
self.new = self.convert() # self.new [ (tensor(25,pep_max,hla_max),0-d tensor as y), (), () ]
def __len__(self):
return len(self.new)
def __getitem__(self,index):
return self.new[index]
def paratope_dic(self):
df = self.hla
self.dic = {}
for i in range(df.shape[0]):
hla = df['hla'].iloc[i]
paratope = df['paratope'].iloc[i]
self.dic[hla] = paratope
def get_index_value(self,tup): # input will be a tuple (a,b), a, b will be the amino acid one letter character like ('D','Q')
amino = 'ARNDCQEGHILKMFPSTWYV'
first = amino.index(tup[0].upper())
second = amino.index(tup[1].upper())
if first < second:
first,second = second,first # first will be row index, second will be column index in [20*20] matrix
row_index = sum_to_itself(first) + second # the row index in [210,25] matrix
values = self.index[row_index,:]
return values # 1d numpy array
def convert(self):
df = self.ori
peptides = df['peptide']
self.pep_max = max([len(pep) for pep in peptides])
self.hla_max = 46 # hardcode it
self.components = 25 # hardcode it
result = torch.empty([25,self.pep_max,self.hla_max])
'''
clear them up:
we initialize a empty 3-d array, the same shape as our desired output,
then at each 2d plane, given [i,j] we should have a vector of length 25,
the question is, how to find the corresponding row in [210,15] matrix?
let's think about this in an example, say we wanna explore the row index of D -> Q
amino = 'ARNDCQEGHILKMFPSTWYV'
amino.index('D') will return 3, amino.index('Q') will return 5, considering the way we generate and order
the 210 aa-pair, the way we get (D->Q) value should be when row is Q and column is D, extropolating that means
always from larger index(Q,5) to smaller index(D,3), it will be clear when draw a 20*20 lower triangle and figure
out how the 210 pair is genrated one by one.
then still focus on D -> Q example, how many pair been pulled out before Q to D, row-wise, there are 5 aa before
Q because the index of Q is 5, so there are 1 + 2 + 3 + 4 + 5 pair already, then comes to Q's row, before arrive at
D, there are 3 aa in front of that, because index of D is 3, so (1 + 2 + 3 + 4 + 5) + 3, there are 18 items before
(D -> Q), given python is 0-index, so (D -> Q) is the 18th row in index_result [210,25] matrix
Let's generalize the rule we observed, we can write the function
'''
final = []
for row in range(df.shape[0]):
#print(row)
peptide = df['peptide'].iloc[row]
hla_type = df['HLA'].iloc[row]
try:
hla_seq = self.dic[hla_type]
except KeyError:
hla_type = rescue_unknown_hla(hla_type, self.dic_inventory)
hla_seq = self.dic[hla_type]
immuno = df['immunogenecity'].iloc[row]
y = torch.tensor(immuno).long()
'''
Understanding the padding process below:
say for a 8-mer peptide, we know self.pep_max = 14
gap_left = 3, gap_right =3
gap_left_indices: 0,1,2 these three indices will be padded instead of encoding with real value
gap_right_indices: 11,12,13, so range(14), take from 14-3=11, we get 11,12,13
'''
pep_len = len(peptide)
diff_len = self.pep_max - pep_len
gap_left = diff_len // 2
gap_right = diff_len - gap_left
gap_left_indices = list(range(gap_left))
gap_right_indices = list(range(self.pep_max))[self.pep_max-gap_right:]
for i in range(result.shape[1]):
for j in range(result.shape[2]):
hla_index = j
hla_aa = hla_seq[hla_index]
if i in gap_left_indices or i in gap_right_indices:
result[:,i,j] = torch.empty(25).fill_(0.005).float()
elif hla_aa == '-':
result[:,i,j] = torch.empty(25).fill_(0.005).float()
else:
real_peptide_index = i - gap_left # say the i=4, [0,1,2] is gapped, 4-3=1, the second aa in peptide
real_peptide = peptide[real_peptide_index]
if real_peptide == 'X':
result[:,i,j] = torch.empty(25).fill_(0.005).float()
else:
try:
result[:,i,j] = torch.from_numpy(self.get_index_value((real_peptide,hla_aa))).float()
except: print(real_peptide,hla_aa); raise Exception
final.append((result,y))
return final
######################################################################################
def pytorch_training(modelObj,training_dataset,optimizer,criterion,batch_size,num_epochs,outdir):
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = modelObj().to(device)
training_loader = DataLoader(training_dataset,batch_size=batch_size,shuffle=True)
num_epochs = num_epochs
for epoch in range(num_epochs):
loss_list = []
acc_list = []
for i in training_loader:
X = i[0].to(device)
y = i[1].to(device)
optimizer.zero_grad()
y_pred = model(X)
print(y_pred)
loss = criterion(y_pred,y)
loss.backward()
optimizer.step()
loss_list.append(loss.item())
num_correct = 0
num_samples = 0
_,predictions = y_pred.max(1)
print(predictions)
print(y)
num_correct += (predictions == y).sum() # will generate a 0-D tensor, tensor(49), float() to convert it
num_samples += predictions.size(0)
acc_list.append(float(num_correct)/float(num_samples)*100)
loss,acc = sum(loss_list)/len(loss_list),sum(acc_list)/len(acc_list)
print('Epoch {0}/{1} loss: {2:6.2f} - accuracy{3:6.2f}%'.format(epoch+1,num_epochs,loss,acc))
torch.save(model.state_dict(),outdir)
###########################################################################################################################
# FChelper dataset
class FC_dataset(Dataset):
def __init__(self,ori,hla,dic_inventory,index): #index [210, 25] matrix
self.ori = ori
self.hla = hla
self.dic_inventory = dic_inventory
self.index = index
self.paratope_dic()
self.new = self.convert() # [ (tensor: [10*46*25=11500] - 1-d tensor, tensor:[] - 0-d tensor), (), () ]
def __len__(self):
return len(self.new)
def __getitem__(self,index):
return self.new[index]
def paratope_dic(self):
df = self.hla
self.dic = {}
for i in range(df.shape[0]):
hla = df['hla'].iloc[i]
paratope = df['paratope'].iloc[i]
self.dic[hla] = paratope
def get_index_value(self,tup): # input will be a tuple (a,b), a, b will be the amino acid one letter character like ('D','Q')
amino = 'ARNDCQEGHILKMFPSTWYV'
first = amino.index(tup[0].upper())
second = amino.index(tup[1].upper())
if first < second:
first,second = second,first # first will be row index, second will be column index in [20*20] matrix
row_index = sum_to_itself(first) + second # the row index in [210,25] matrix
values = self.index[row_index,:]
return values # 1d numpy array
def convert(self):
df = self.ori
peptides = df['peptide']
self.pep_max = max([len(pep) for pep in peptides])
self.hla_max = 46
self.components = 25
final = []
for row in range(df.shape[0]):
#print(row)
peptide = df['peptide'].iloc[row]
hla_type = df['HLA'].iloc[row]
try:
hla_seq = self.dic[hla_type]
except KeyError:
hla_type = rescue_unknown_hla(hla_type,self.dic_inventory)
hla_seq = self.dic[hla_type]
immuno = df['immunogenecity'].iloc[row]
y = torch.tensor(immuno).long()
pep_len = len(peptide)
diff_len = self.pep_max - pep_len
gap = diff_len # either be 0 or 1
gap_indices = list(range(gap)) # either [] or [0]
result = torch.tensor([]).float()
for i in range(self.pep_max):
for j in range(self.hla_max):
if i in gap_indices or peptide[i-gap] == 'X' or hla_seq[j] == '-':
patch = torch.empty([25]).fill_(0.005).float()
result = torch.cat([result,patch],dim=0)
else:
p = peptide[i-gap]
h = hla_seq[j]
patch = torch.from_numpy(self.get_index_value((p,h))).float()
result = torch.cat([result,patch],dim=0)
#print(result[1200:1300])
final.append((result,y))
return final # [ (tensor:[11500],tesnor:[]),(),()
##################################################################################################################
# construct for transformer model, dataset_add3, which add hydrophobicity, bulkiness and polarity into encoding schema
class dataset_add3(Dataset):
# the output would be ([seq_len,24],[batch]),(),()
properties = {
# 'AA':[Hydrophobicity(Kyte-Doolittle),Bulkiness(Zimmerman),'Polarity(Grahtham)']
# source: TCR contact residue hydrophobicity is a hallmark of immunogenic CD8+ T cell epitopes
'A':[1.8,11.5,8], # Alanine #1
'C':[2.5,13.46,5.5], # Cysteine #2
'D':[-3.5,11.68,13], # Aspartic acid #3
'E':[-3.5,13.57,12.3], # Glutamic acid #4
'F':[2.8,19.8,5.2], # Phenylalanine #5
'G':[-0.4,3.4,9], # Glycine #6
'H':[-3.2,13.69,10.4], # histidine #7
'I':[4.5,21.4,5.2], # Isoleicine #8
'K':[-3.9,15.71,11.3], # Lysine #9
'L':[3.8,21.4,4.9], # Leucine #10
'M':[1.9,16.25,5.7], # Methionine #11
'N':[-3.5,12.82,11.6], # Asparagine #12
'P':[-1.6,17.43,8], # Proline #13
'Q':[-3.5,14.45,10.5], # Glutamine #14
'R':[-4.5,14.28,10.5], # Arginine #15
'S':[-0.8,9.47,9.2], # Serine #16
'T':[-0.7,15.77,8.6], # Threonine #17
'V':[4.2,21.57,5.9], # Valine #18
'W':[-0.9,21.67,5.4], # Tryptophan #19
'Y':[-1.3,18.03,6.2], # Tyrosine #20
'-':[-0.49,15.37,8.32]} # other: X, -
def __init__(self,ori,hla,dic_inventory):
self.ori = ori
self.hla = hla
self.dic_inventory = dic_inventory
self.paratope_dic()
self.middle = self.convert()
self.new = self.padding_oneside()
def __len__(self):
return len(self.new)
def __getitem__(self,idx):
return self.new[idx]
def padding_oneside(self):
len_values = [tup[0].shape[0] for tup in self.middle]
#max_length = max(len_values)
max_length = 56
# padding
bucket = []
for item in self.middle:
length = item[0].shape[0]
gap = max_length - length
if gap > 0:
padding_right = torch.from_numpy(self.matrix_whole[:,20]).reshape(1,-1).expand(gap,24).float() # treat as '-', will be [gap,24]
final = torch.cat([item[0],padding_right],dim=0)
else:
final = item[0]
bucket.append((final,item[1]))
self.max_length = max_length
return bucket
def paratope_dic(self):
df = self.hla
self.dic = {}
for i in range(df.shape[0]):
hla = df['hla'].iloc[i]
paratope = df['paratope'].iloc[i]
self.dic[hla] = paratope
def blosum50_new(self,peptide):
amino = 'ARNDCQEGHILKMFPSTWYV-'
dic = MatrixInfo.blosum50
matrix = np.zeros([21,21])
for i in range(matrix.shape[0]):
for j in range(matrix.shape[1]):
try:
matrix[i,j] = dic[(amino[i],amino[j])]
except KeyError:
try:
matrix[i,j] = dic[(amino[j],amino[i])]
except:
matrix[i,j] = -1
mat_3 = np.zeros([3,21]) # contain 3 extra properties
for j in range(mat_3.shape[1]):
aa = amino[j]
mat_3[:,j] = dataset_add3.properties[aa]
self.matrix_whole = np.concatenate([matrix,mat_3],axis=0) # [24,21]
encoded = torch.empty([len(peptide),24]) # (seq_len,24)
for i in range(len(peptide)):
encoded[i,:] = torch.from_numpy(self.matrix_whole[:,amino.index(peptide[i])])
return encoded
def convert(self):
lis = []
df = self.ori
for i in range(df.shape[0]):
#print(i)
peptide = df['peptide'].iloc[i]
hla_type = df['HLA'].iloc[i]
immuno = df['immunogenecity'].iloc[i]
try:
cat = self.dic[hla_type] + peptide
except KeyError:
hla_type = rescue_unknown_hla(hla_type, self.dic_inventory)
cat = self.dic[hla_type] + peptide
cat = cat.upper()
if 'X' in cat: continue
X = self.blosum50_new(cat).float() # 2-d tensor
y = torch.tensor(immuno).long() # 0-d tensor
lis.append((X,y))
return lis
#######################################################################################
# AAindex1, explore new way to embed amino acid
'''
csplit -n 3 aaindex1 '/\/\/+1' {564}
for i in xx*; do a=${i:2:3}; cat $i | tail -n 3 | head -n 2 > index$a.txt; done
'''
def add_X(array):
me = np.mean(array)
array = np.append(array,me)
return array
def read_index(path):
with open(path,'r') as f:
data = f.readlines()
array = []
for line in data:
line = line.lstrip(' ').rstrip('\n')
line = re.sub(' +',' ',line)
items = line.split(' ')
items = [float(i) for i in items]
array.extend(items)
array = | np.array(array) | numpy.array |
import matplotlib.pyplot as plt
import matplotlib
import numpy as np
import os, argparse
import csv
from run1 import get_params_office_world, get_params_traffic_world, get_params_craft_world
def smooth(y, box_pts):
box = np.ones(box_pts)/box_pts
y.append(sum(y[-5:])/len(y[-5:]))
y.append(sum(y[-5:]) / len(y[-5:]))
y.append(sum(y[-5:]) / len(y[-5:]))
y.append(sum(y[-5:]) / len(y[-5:]))
y.append(sum(y[-5:]) / len(y[-5:]))
y_smooth = np.convolve(y[0:-5], box, mode='same')
y_smooth[-1] = y_smooth[-6]
y_smooth[-2] = y_smooth[-6]
y_smooth[-3] = y_smooth[-6]
y_smooth[-4] = y_smooth[-6]
y_smooth[-5] = y_smooth[-6]
return y_smooth
def export_results_traffic_world(task_id, algorithm):
files = os.listdir("../plotdata/")
step_unit = get_params_traffic_world('../experiments/traffic/tests/ground_truth.txt')[0].num_steps
max_step = get_params_traffic_world('../experiments/traffic/tests/ground_truth.txt')[3].total_steps
steps = np.linspace(0, max_step, (max_step / step_unit) + 1, endpoint=True)
if task_id>0:
p25 = [0]
p50 = [0]
p75 = [0]
p25s = [0]
p50s = [0]
p75s = [0]
p25_q = [0]
p50_q = [0]
p75_q = [0]
p25_hrl = [0]
p50_hrl = [0]
p75_hrl = [0]
p25_dqn = [0]
p50_dqn = [0]
p75_dqn = [0]
files_of_interest = list()
for file in files:
if (("traffic" in file) and (".csv" in file) and (str(task_id) in file)):
files_of_interest.append(file)
for file in files_of_interest:
file_str = ("../plotdata/") + file
if 'qlearning' in file:
with open(file_str) as csvfile:
readcsv = csv.reader(csvfile)
for row_ in readcsv:
if len(row_) > 1:
row = list(map(int, row_))
else:
row = [float(row_[0])]
p25_q.append(np.percentile(row, 25))
p50_q.append(np.percentile(row, 50))
p75_q.append(np.percentile(row, 75))
elif 'hrl' in file:
with open(file_str) as csvfile:
readcsv = csv.reader(csvfile)
for row_ in readcsv:
if len(row_) > 1:
row = list(map(int, row_))
else:
row = [float(row_[0])]
p25_hrl.append(np.percentile(row, 25))
p50_hrl.append(np.percentile(row, 50))
p75_hrl.append(np.percentile(row, 75))
elif 'dqn' in file:
with open(file_str) as csvfile:
readcsv = csv.reader(csvfile)
for row_ in readcsv:
if len(row_) > 1:
row = list(map(int, row_))
else:
row = [float(row_[0])]
p25_dqn.append(np.percentile(row, 25))
p50_dqn.append(np.percentile(row, 50))
p75_dqn.append(np.percentile(row, 75))
elif 'rpni' in file:
with open(file_str) as csvfile:
readcsv = csv.reader(csvfile)
for row_ in readcsv:
if len(row_) > 1:
row = list(map(int, row_))
else:
row = [float(row_[0])]
p25.append(np.percentile(row, 25))
p50.append(np.percentile(row, 50))
p75.append(np.percentile(row, 75))
elif 'sat' in file:
with open(file_str) as csvfile:
readcsv = csv.reader(csvfile)
for row_ in readcsv:
if len(row_) > 1:
row = list(map(int, row_))
else:
row = [float(row_[0])]
p25s.append(np.percentile(row, 25))
p50s.append(np.percentile(row, 50))
p75s.append(np.percentile(row, 75))
fig, ax = plt.subplots()
fig.set_figheight(6)
fig.set_figwidth(8)
if algorithm == "jirprpni" or algorithm == "all":
p25 = smooth(p25, 5)
p50 = smooth(p50, 5)
p75 = smooth(p75, 5)
steps = np.linspace(0, (len(p25)-1) * step_unit, len(p25), endpoint=True)
plt.xlim(0, (len(p25)-1) * step_unit)
ax.plot(steps, p25, alpha=0)
ax.plot(steps, p50, color='black', label='JIRP RPNI')
ax.plot(steps, p75, alpha=0)
plt.fill_between(steps, p50, p25, color='black', alpha=0.25)
plt.fill_between(steps, p50, p75, color='black', alpha=0.25)
if algorithm == "jirpsat" or algorithm == "all":
p25s = smooth(p25s, 5)
p50s = smooth(p50s, 5)
p75s = smooth(p75s, 5)
steps = np.linspace(0, (len(p25s)-1) * step_unit, len(p25s), endpoint=True)
plt.xlim(0, (len(p25s) - 1) * step_unit)
ax.plot(steps, p25s, alpha=0)
ax.plot(steps, p50s, color='green', label='JIRP SAT')
ax.plot(steps, p75s, alpha=0)
plt.fill_between(steps, p50s, p25s, color='green', alpha=0.25)
plt.fill_between(steps, p50s, p75s, color='green', alpha=0.25)
if algorithm == "qlearning" or algorithm == "all":
p25_q = smooth(p25_q, 5)
p50_q = smooth(p50_q, 5)
p75_q = smooth(p75_q, 5)
steps = np.linspace(0, (len(p25_q)-1) * step_unit, len(p25_q), endpoint=True)
plt.xlim(0, (len(p25_q) - 1) * step_unit)
ax.plot(steps, p25_q, alpha=0)
ax.plot(steps, p50_q, color='red', label='QAS')
ax.plot(steps, p75_q, alpha=0)
plt.fill_between(steps, p50_q, p25_q, color='red', alpha=0.25)
plt.fill_between(steps, p50_q, p75_q, color='red', alpha=0.25)
if algorithm == "hrl" or algorithm == "all":
p25_hrl = smooth(p25_hrl, 5)
p50_hrl = smooth(p50_hrl, 5)
p75_hrl = smooth(p75_hrl, 5)
steps = np.linspace(0, (len(p25_hrl)-1) * step_unit, len(p25_hrl), endpoint=True)
plt.xlim(0, (len(p25_hrl) - 1) * step_unit)
ax.plot(steps, p25_hrl, alpha=0)
ax.plot(steps, p50_hrl, color='blue', label='HRL')
ax.plot(steps, p75_hrl, alpha=0)
plt.fill_between(steps, p50_hrl, p25_hrl, color='blue', alpha=0.25)
plt.fill_between(steps, p50_hrl, p75_hrl, color='blue', alpha=0.25)
if algorithm == "ddqn" or algorithm == "all":
p25_dqn = smooth(p25_dqn, 5)
p50_dqn = smooth(p50_dqn, 5)
p75_dqn = smooth(p75_dqn, 5)
steps = np.linspace(0, (len(p25_dqn)-1) * step_unit, len(p25_dqn), endpoint=True)
plt.xlim(0, (len(p25_dqn)-1) * step_unit)
ax.plot(steps, p25_dqn, alpha=0)
ax.plot(steps, p50_dqn, color='purple', label='D-DQN')
ax.plot(steps, p75_dqn, alpha=0)
plt.fill_between(steps, p50_dqn, p25_dqn, color='purple', alpha=0.25)
plt.fill_between(steps, p50_dqn, p75_dqn, color='purple', alpha=0.25)
ax.grid()
ax.set_xlabel('number of training steps', fontsize=22)
ax.set_ylabel('reward', fontsize=22)
plt.ylim(-0.1, 1.1)
if algorithm == "all":
plt.xlim(0,max_step)
plt.locator_params(axis='x', nbins=5)
plt.yticks([0, 0.2, 0.4, 0.6, 0.8, 1])
plt.gcf().subplots_adjust(bottom=0.15)
plt.gca().legend(('', 'JIRP RPNI', '', '', 'JIRP SAT', '', '', 'QAS', '', '', 'D-DQN','','','HRL', ''))
plt.legend(loc='upper right', bbox_to_anchor=(1, 0.8), prop={'size': 14})
ax.tick_params(axis='both', which='major', labelsize=22)
plt.savefig('../plotdata/figure.png', dpi=600)
plt.show()
else:
step = 0
p25dict = dict()
p50dict = dict()
p75dict = dict()
p25sdict = dict()
p50sdict = dict()
p75sdict = dict()
p25_qdict = dict()
p50_qdict = dict()
p75_qdict = dict()
p25_hrldict = dict()
p50_hrldict = dict()
p75_hrldict = dict()
p25_dqndict = dict()
p50_dqndict = dict()
p75_dqndict = dict()
p25 = list()
p50 = list()
p75 = list()
p25s = list()
p50s = list()
p75s = list()
p25_q = list()
p50_q = list()
p75_q = list()
p25_hrl = list()
p50_hrl = list()
p75_hrl = list()
p25_dqn = list()
p50_dqn = list()
p75_dqn = list()
p25dict[0] = [0,0,0,0]
p50dict[0] = [0,0,0,0]
p75dict[0] = [0,0,0,0]
p25sdict[0] = [0,0,0,0]
p50sdict[0] = [0,0,0,0]
p75sdict[0] = [0,0,0,0]
p25_qdict[0] = [0,0,0,0]
p50_qdict[0] = [0,0,0,0]
p75_qdict[0] = [0,0,0,0]
p25_hrldict[0] = [0,0,0,0]
p50_hrldict[0] = [0,0,0,0]
p75_hrldict[0] = [0,0,0,0]
p25_dqndict[0] = [0,0,0,0]
p50_dqndict[0] = [0,0,0,0]
p75_dqndict[0] = [0,0,0,0]
files_dict = dict()
for file in files:
if (("traffic" in file) and (".csv" in file)):
if "1" in file:
task = 1
if "2" in file:
task = 2
if "3" in file:
task = 3
if "4" in file:
task = 4
if task not in files_dict:
files_dict[task] = [file]
else:
files_dict[task].append(file)
for task in files_dict:
for file in files_dict[task]:
file_str = ("../plotdata/") + file
if 'qlearning' in file:
with open(file_str) as csvfile:
step = 0
readcsv = csv.reader(csvfile)
for row_ in readcsv:
if len(row_) > 1:
row = list(map(int, row_))
else:
row = [float(row_[0])]
step += step_unit
if step in p25_qdict:
p25_qdict[step].append(np.percentile(row, 25))
p50_qdict[step].append(np.percentile(row, 50))
p75_qdict[step].append(np.percentile(row, 75))
else:
p25_qdict[step] = [np.percentile(row, 25)]
p50_qdict[step] = [np.percentile(row, 50)]
p75_qdict[step] = [np.percentile(row, 75)]
elif 'hrl' in file:
with open(file_str) as csvfile:
step = 0
readcsv = csv.reader(csvfile)
for row_ in readcsv:
if len(row_) > 1:
row = list(map(int, row_))
else:
row = [float(row_[0])]
step += step_unit
if step in p25_hrldict:
p25_hrldict[step].append(np.percentile(row, 25))
p50_hrldict[step].append(np.percentile(row, 50))
p75_hrldict[step].append(np.percentile(row, 75))
else:
p25_hrldict[step] = [np.percentile(row, 25)]
p50_hrldict[step] = [np.percentile(row, 50)]
p75_hrldict[step] = [np.percentile(row, 75)]
elif 'dqn' in file:
with open(file_str) as csvfile:
step = 0
readcsv = csv.reader(csvfile)
for row_ in readcsv:
if len(row_) > 1:
row = list(map(int, row_))
else:
row = [float(row_[0])]
step += step_unit
if step in p25_dqndict:
p25_dqndict[step].append(np.percentile(row, 25))
p50_dqndict[step].append(np.percentile(row, 50))
p75_dqndict[step].append(np.percentile(row, 75))
else:
p25_dqndict[step] = [np.percentile(row, 25)]
p50_dqndict[step] = [np.percentile(row, 50)]
p75_dqndict[step] = [np.percentile(row, 75)]
elif 'rpni' in file:
with open(file_str) as csvfile:
step = 0
readcsv = csv.reader(csvfile)
for row_ in readcsv:
if len(row_) > 1:
row = list(map(int, row_))
else:
row = [float(row_[0])]
step += step_unit
if step in p25dict:
p25dict[step].append(np.percentile(row, 25))
p50dict[step].append(np.percentile(row, 50))
p75dict[step].append(np.percentile(row, 75))
else:
p25dict[step] = [np.percentile(row, 25)]
p50dict[step] = [np.percentile(row, 50)]
p75dict[step] = [np.percentile(row, 75)]
elif 'sat' in file:
with open(file_str) as csvfile:
step = 0
readcsv = csv.reader(csvfile)
for row_ in readcsv:
if len(row_) > 1:
row = list(map(int, row_))
else:
row = [float(row_[0])]
step += step_unit
if step in p25sdict:
p25sdict[step].append(np.percentile(row, 25))
p50sdict[step].append(np.percentile(row, 50))
p75sdict[step].append(np.percentile(row, 75))
else:
p25sdict[step] = [np.percentile(row, 25)]
p50sdict[step] = [np.percentile(row, 50)]
p75sdict[step] = [np.percentile(row, 75)]
for step in steps:
if step in p25_qdict:
p25_q.append(sum(p25_qdict[step]) / len(p25_qdict[step]))
p50_q.append(sum(p50_qdict[step]) / len(p50_qdict[step]))
p75_q.append(sum(p75_qdict[step]) / len(p75_qdict[step]))
if step in p25_hrldict:
p25_hrl.append(sum(p25_hrldict[step]) / len(p25_hrldict[step]))
p50_hrl.append(sum(p50_hrldict[step]) / len(p50_hrldict[step]))
p75_hrl.append(sum(p75_hrldict[step]) / len(p75_hrldict[step]))
if step in p25dict:
p25.append(sum(p25dict[step]) / len(p25dict[step]))
p50.append(sum(p50dict[step]) / len(p50dict[step]))
p75.append(sum(p75dict[step]) / len(p75dict[step]))
if step in p25sdict:
p25s.append(sum(p25sdict[step]) / len(p25sdict[step]))
p50s.append(sum(p50sdict[step]) / len(p50sdict[step]))
p75s.append(sum(p75sdict[step]) / len(p75sdict[step]))
if step in p25_dqndict:
p25_dqn.append(sum(p25_dqndict[step]) / len(p25_dqndict[step]))
p50_dqn.append(sum(p50_dqndict[step]) / len(p50_dqndict[step]))
p75_dqn.append(sum(p75_dqndict[step]) / len(p75_dqndict[step]))
fig, ax = plt.subplots()
fig.set_figheight(6)
fig.set_figwidth(8)
if algorithm == "jirprpni" or algorithm == "all":
p25 = smooth(p25, 5)
p50 = smooth(p50, 5)
p75 = smooth(p75, 5)
steps = np.linspace(0, (len(p25) - 1) * step_unit, len(p25), endpoint=True)
plt.xlim(0, (len(p25) - 1) * step_unit)
ax.plot(steps, p25, alpha=0)
ax.plot(steps, p50, color='black', label='JIRP RPNI')
ax.plot(steps, p75, alpha=0)
plt.fill_between(steps, p50, p25, color='black', alpha=0.25)
plt.fill_between(steps, p50, p75, color='black', alpha=0.25)
if algorithm == "jirpsat" or algorithm == "all":
p25s = smooth(p25s, 5)
p50s = smooth(p50s, 5)
p75s = smooth(p75s, 5)
steps = np.linspace(0, (len(p25s) - 1) * step_unit, len(p25s), endpoint=True)
plt.xlim(0, (len(p25s) - 1) * step_unit)
ax.plot(steps, p25s, alpha=0)
ax.plot(steps, p50s, color='green', label='JIRP SAT')
ax.plot(steps, p75s, alpha=0)
plt.fill_between(steps, p50s, p25s, color='green', alpha=0.25)
plt.fill_between(steps, p50s, p75s, color='green', alpha=0.25)
if algorithm == "qlearning" or algorithm == "all":
p25_q = smooth(p25_q, 5)
p50_q = smooth(p50_q, 5)
p75_q = smooth(p75_q, 5)
steps = np.linspace(0, (len(p25_q) - 1) * step_unit, len(p25_q), endpoint=True)
plt.xlim(0, (len(p25_q) - 1) * step_unit)
ax.plot(steps, p25_q, alpha=0)
ax.plot(steps, p50_q, color='red', label='QAS')
ax.plot(steps, p75_q, alpha=0)
plt.fill_between(steps, p50_q, p25_q, color='red', alpha=0.25)
plt.fill_between(steps, p50_q, p75_q, color='red', alpha=0.25)
if algorithm == "ddqn" or algorithm == "all":
p25_dqn = smooth(p25_dqn, 5)
p50_dqn = smooth(p50_dqn, 5)
p75_dqn = smooth(p75_dqn, 5)
steps = np.linspace(0, (len(p25_dqn) - 1) * step_unit, len(p25_dqn), endpoint=True)
plt.xlim(0, (len(p25_hrl) - 1) * step_unit)
ax.plot(steps, p25_dqn, alpha=0)
ax.plot(steps, p50_dqn, color='purple', label='D-DQN')
ax.plot(steps, p75_dqn, alpha=0)
plt.fill_between(steps, p50_dqn, p25_dqn, color='purple', alpha=0.25)
plt.fill_between(steps, p50_dqn, p75_dqn, color='purple', alpha=0.25)
if algorithm == "hrl" or algorithm == "all":
p25_hrl = smooth(p25_hrl, 5)
p50_hrl = smooth(p50_hrl, 5)
p75_hrl = smooth(p75_hrl, 5)
steps = np.linspace(0, (len(p25_hrl) - 1) * step_unit, len(p25_hrl), endpoint=True)
plt.xlim(0, (len(p25_hrl) - 1) * step_unit)
ax.plot(steps, p25_hrl, alpha=0)
ax.plot(steps, p50_hrl, color='blue', label='HRL')
ax.plot(steps, p75_hrl, alpha=0)
plt.fill_between(steps, p50_hrl, p25_hrl, color='blue', alpha=0.25)
plt.fill_between(steps, p50_hrl, p75_hrl, color='blue', alpha=0.25)
ax.grid()
ax.set_xlabel('number of training steps', fontsize=22)
ax.set_ylabel('reward', fontsize=22)
plt.ylim(-0.1, 1.1)
plt.xlim(0, max_step)
plt.locator_params(axis='x', nbins=5)
plt.yticks([0, 0.2, 0.4, 0.6, 0.8, 1])
plt.gcf().subplots_adjust(bottom=0.15)
plt.gca().legend(('', 'JIRP RPNI', '', '', 'JIRP SAT', '', '', 'QAS', '','','D-DQN','', '', 'HRL', ''))
plt.legend(loc='upper right', bbox_to_anchor=(1, 0.8), prop={'size': 14})
ax.tick_params(axis='both', which='major', labelsize=22)
plt.savefig('../plotdata/figure.png', dpi=600)
plt.show()
def export_results_office_world(task_id, algorithm):
files = os.listdir("../plotdata/")
step_unit = get_params_office_world('../experiments/office/tests/ground_truth.txt')[0].num_steps
max_step = get_params_office_world('../experiments/office/tests/ground_truth.txt')[3].total_steps
steps = np.linspace(0, max_step, (max_step / step_unit) + 1, endpoint=True)
if task_id>0:
p25 = [0]
p50 = [0]
p75 = [0]
p25s = [0]
p50s = [0]
p75s = [0]
p25_q = [0]
p50_q = [0]
p75_q = [0]
p25_hrl = [0]
p50_hrl = [0]
p75_hrl = [0]
p25_dqn = [0]
p50_dqn = [0]
p75_dqn = [0]
files_of_interest = list()
for file in files:
if (("office" in file) and (".csv" in file) and (str(task_id) in file)):
files_of_interest.append(file)
for file in files_of_interest:
file_str = ("../plotdata/") + file
if 'qlearning' in file:
with open(file_str) as csvfile:
readcsv = csv.reader(csvfile)
for row_ in readcsv:
if len(row_) > 1:
row = list(map(int, row_))
else:
row = [float(row_[0])]
p25_q.append(np.percentile(row, 25))
p50_q.append(np.percentile(row, 50))
p75_q.append(np.percentile(row, 75))
elif 'hrl' in file:
with open(file_str) as csvfile:
readcsv = csv.reader(csvfile)
for row_ in readcsv:
if len(row_) > 1:
row = list(map(int, row_))
else:
row = [float(row_[0])]
p25_hrl.append(np.percentile(row, 25))
p50_hrl.append(np.percentile(row, 50))
p75_hrl.append(np.percentile(row, 75))
elif 'dqn' in file:
with open(file_str) as csvfile:
readcsv = csv.reader(csvfile)
for row_ in readcsv:
if len(row_) > 1:
row = list(map(int, row_))
else:
row = [float(row_[0])]
p25_dqn.append(np.percentile(row, 25))
p50_dqn.append(np.percentile(row, 50))
p75_dqn.append(np.percentile(row, 75))
elif 'rpni' in file:
with open(file_str) as csvfile:
readcsv = csv.reader(csvfile)
for row_ in readcsv:
if len(row_) > 1:
row = list(map(int, row_))
else:
row = [float(row_[0])]
p25.append(np.percentile(row, 25))
p50.append(np.percentile(row, 50))
p75.append(np.percentile(row, 75))
elif 'sat' in file:
with open(file_str) as csvfile:
readcsv = csv.reader(csvfile)
for row_ in readcsv:
if len(row_) > 1:
row = list(map(int, row_))
else:
row = [float(row_[0])]
p25s.append(np.percentile(row, 25))
p50s.append(np.percentile(row, 50))
p75s.append(np.percentile(row, 75))
fig, ax = plt.subplots()
fig.set_figheight(6)
fig.set_figwidth(8)
if algorithm == "jirprpni" or algorithm == "all":
p25 = smooth(p25, 5)
p50 = smooth(p50, 5)
p75 = smooth(p75, 5)
steps = np.linspace(0, (len(p25)-1) * step_unit, len(p25), endpoint=True)
plt.xlim(0, (len(p25)-1) * step_unit)
ax.plot(steps, p25, alpha=0)
ax.plot(steps, p50, color='black', label='JIRP RPNI')
ax.plot(steps, p75, alpha=0)
plt.fill_between(steps, p50, p25, color='black', alpha=0.25)
plt.fill_between(steps, p50, p75, color='black', alpha=0.25)
if algorithm == "jirpsat" or algorithm == "all":
p25s = smooth(p25s, 5)
p50s = smooth(p50s, 5)
p75s = smooth(p75s, 5)
steps = np.linspace(0, (len(p25s)-1) * step_unit, len(p25s), endpoint=True)
plt.xlim(0, (len(p25s) - 1) * step_unit)
ax.plot(steps, p25s, alpha=0)
ax.plot(steps, p50s, color='green', label='JIRP SAT')
ax.plot(steps, p75s, alpha=0)
plt.fill_between(steps, p50s, p25s, color='green', alpha=0.25)
plt.fill_between(steps, p50s, p75s, color='green', alpha=0.25)
if algorithm == "qlearning" or algorithm == "all":
p25_q = smooth(p25_q, 5)
p50_q = smooth(p50_q, 5)
p75_q = smooth(p75_q, 5)
steps = np.linspace(0, (len(p25_q)-1) * step_unit, len(p25_q), endpoint=True)
plt.xlim(0, (len(p25_q) - 1) * step_unit)
ax.plot(steps, p25_q, alpha=0)
ax.plot(steps, p50_q, color='red', label='QAS')
ax.plot(steps, p75_q, alpha=0)
plt.fill_between(steps, p50_q, p25_q, color='red', alpha=0.25)
plt.fill_between(steps, p50_q, p75_q, color='red', alpha=0.25)
if algorithm == "hrl" or algorithm == "all":
p25_hrl = smooth(p25_hrl, 5)
p50_hrl = smooth(p50_hrl, 5)
p75_hrl = smooth(p75_hrl, 5)
steps = np.linspace(0, (len(p25_hrl)-1) * step_unit, len(p25_hrl), endpoint=True)
plt.xlim(0, (len(p25_hrl) - 1) * step_unit)
ax.plot(steps, p25_hrl, alpha=0)
ax.plot(steps, p50_hrl, color='blue', label='HRL')
ax.plot(steps, p75_hrl, alpha=0)
plt.fill_between(steps, p50_hrl, p25_hrl, color='blue', alpha=0.25)
plt.fill_between(steps, p50_hrl, p75_hrl, color='blue', alpha=0.25)
if algorithm == "ddqn" or algorithm == "all":
p25_dqn = smooth(p25_dqn, 5)
p50_dqn = smooth(p50_dqn, 5)
p75_dqn = smooth(p75_dqn, 5)
steps = np.linspace(0, (len(p25_dqn)-1) * step_unit, len(p25_dqn), endpoint=True)
plt.xlim(0, (len(p25_dqn)-1) * step_unit)
ax.plot(steps, p25_dqn, alpha=0)
ax.plot(steps, p50_dqn, color='purple', label='D-DQN')
ax.plot(steps, p75_dqn, alpha=0)
plt.fill_between(steps, p50_dqn, p25_dqn, color='purple', alpha=0.25)
plt.fill_between(steps, p50_dqn, p75_dqn, color='purple', alpha=0.25)
ax.grid()
ax.set_xlabel('number of training steps', fontsize=22)
ax.set_ylabel('reward', fontsize=22)
plt.ylim(-0.1, 1.1)
plt.xlim(0, max_step)
plt.locator_params(axis='x', nbins=5)
plt.yticks([0, 0.2, 0.4, 0.6, 0.8, 1])
plt.gcf().subplots_adjust(bottom=0.15)
plt.gca().legend(('', 'JIRP RPNI', '', '', 'JIRP SAT', '', '', 'QAS', '', '','D-DQN','','', 'HRL', ''))
plt.legend(loc='upper right', bbox_to_anchor=(1, 0.8), prop={'size': 14})
ax.tick_params(axis='both', which='major', labelsize=22)
plt.savefig('../plotdata/figure.png', dpi=600)
plt.show()
else:
step = 0
p25dict = dict()
p50dict = dict()
p75dict = dict()
p25sdict = dict()
p50sdict = dict()
p75sdict = dict()
p25_qdict = dict()
p50_qdict = dict()
p75_qdict = dict()
p25_hrldict = dict()
p50_hrldict = dict()
p75_hrldict = dict()
p25_dqndict = dict()
p50_dqndict = dict()
p75_dqndict = dict()
p25 = list()
p50 = list()
p75 = list()
p25s = list()
p50s = list()
p75s = list()
p25_q = list()
p50_q = list()
p75_q = list()
p25_hrl = list()
p50_hrl = list()
p75_hrl = list()
p25_dqn = list()
p50_dqn = list()
p75_dqn = list()
p25dict[0] = [0,0,0,0]
p50dict[0] = [0,0,0,0]
p75dict[0] = [0,0,0,0]
p25sdict[0] = [0,0,0,0]
p50sdict[0] = [0,0,0,0]
p75sdict[0] = [0,0,0,0]
p25_qdict[0] = [0,0,0,0]
p50_qdict[0] = [0,0,0,0]
p75_qdict[0] = [0,0,0,0]
p25_hrldict[0] = [0,0,0,0]
p50_hrldict[0] = [0,0,0,0]
p75_hrldict[0] = [0,0,0,0]
p25_dqndict[0] = [0,0,0,0]
p50_dqndict[0] = [0,0,0,0]
p75_dqndict[0] = [0,0,0,0]
files_dict = dict()
for file in files:
if (("office" in file) and (".csv" in file)):
if "1" in file:
task = 1
if "2" in file:
task = 2
if "3" in file:
task = 3
if "4" in file:
task = 4
if task not in files_dict:
files_dict[task] = [file]
else:
files_dict[task].append(file)
for task in files_dict:
for file in files_dict[task]:
file_str = ("../plotdata/") + file
if 'qlearn' in file:
with open(file_str) as csvfile:
step = 0
readcsv = csv.reader(csvfile)
for row_ in readcsv:
if len(row_) > 1:
row = list(map(int, row_))
else:
row = [float(row_[0])]
step += step_unit
if step in p25_qdict:
p25_qdict[step].append(np.percentile(row, 25))
p50_qdict[step].append(np.percentile(row, 50))
p75_qdict[step].append(np.percentile(row, 75))
else:
p25_qdict[step] = [np.percentile(row, 25)]
p50_qdict[step] = [np.percentile(row, 50)]
p75_qdict[step] = [np.percentile(row, 75)]
elif 'hrl' in file:
with open(file_str) as csvfile:
step = 0
readcsv = csv.reader(csvfile)
for row_ in readcsv:
if len(row_) > 1:
row = list(map(int, row_))
else:
row = [float(row_[0])]
step += step_unit
if step in p25_hrldict:
p25_hrldict[step].append(np.percentile(row, 25))
p50_hrldict[step].append(np.percentile(row, 50))
p75_hrldict[step].append(np.percentile(row, 75))
else:
p25_hrldict[step] = [np.percentile(row, 25)]
p50_hrldict[step] = [np.percentile(row, 50)]
p75_hrldict[step] = [np.percentile(row, 75)]
elif 'dqn' in file:
with open(file_str) as csvfile:
step = 0
readcsv = csv.reader(csvfile)
for row_ in readcsv:
if len(row_) > 1:
row = list(map(int, row_))
else:
row = [float(row_[0])]
step += step_unit
if step in p25_dqndict:
p25_dqndict[step].append(np.percentile(row, 25))
p50_dqndict[step].append(np.percentile(row, 50))
p75_dqndict[step].append(np.percentile(row, 75))
else:
p25_dqndict[step] = [np.percentile(row, 25)]
p50_dqndict[step] = [np.percentile(row, 50)]
p75_dqndict[step] = [np.percentile(row, 75)]
elif 'rpni' in file:
with open(file_str) as csvfile:
step = 0
readcsv = csv.reader(csvfile)
for row_ in readcsv:
if len(row_) > 1:
row = list(map(int, row_))
else:
row = [float(row_[0])]
step += step_unit
if step in p25dict:
p25dict[step].append(np.percentile(row, 25))
p50dict[step].append(np.percentile(row, 50))
p75dict[step].append(np.percentile(row, 75))
else:
p25dict[step] = [np.percentile(row, 25)]
p50dict[step] = [np.percentile(row, 50)]
p75dict[step] = [np.percentile(row, 75)]
elif 'sat' in file:
with open(file_str) as csvfile:
step = 0
readcsv = csv.reader(csvfile)
for row_ in readcsv:
if len(row_) > 1:
row = list(map(int, row_))
else:
row = [float(row_[0])]
step += step_unit
if step in p25sdict:
p25sdict[step].append(np.percentile(row, 25))
p50sdict[step].append(np.percentile(row, 50))
p75sdict[step].append(np.percentile(row, 75))
else:
p25sdict[step] = [np.percentile(row, 25)]
p50sdict[step] = [np.percentile(row, 50)]
p75sdict[step] = [np.percentile(row, 75)]
for step in steps:
if step in p25_qdict:
p25_q.append(sum(p25_qdict[step])/len(p25_qdict[step]))
p50_q.append(sum(p50_qdict[step])/len(p50_qdict[step]))
p75_q.append(sum(p75_qdict[step])/len(p75_qdict[step]))
if step in p25_hrldict:
p25_hrl.append(sum(p25_hrldict[step])/len(p25_hrldict[step]))
p50_hrl.append(sum(p50_hrldict[step])/len(p50_hrldict[step]))
p75_hrl.append(sum(p75_hrldict[step])/len(p75_hrldict[step]))
if step in p25dict:
p25.append(sum(p25dict[step])/len(p25dict[step]))
p50.append(sum(p50dict[step])/len(p50dict[step]))
p75.append(sum(p75dict[step])/len(p75dict[step]))
if step in p25sdict:
p25s.append(sum(p25sdict[step])/len(p25sdict[step]))
p50s.append(sum(p50sdict[step])/len(p50sdict[step]))
p75s.append(sum(p75sdict[step])/len(p75sdict[step]))
if step in p25_dqndict:
p25_dqn.append(sum(p25_dqndict[step]) / len(p25_dqndict[step]))
p50_dqn.append(sum(p50_dqndict[step]) / len(p50_dqndict[step]))
p75_dqn.append(sum(p75_dqndict[step]) / len(p75_dqndict[step]))
fig, ax = plt.subplots()
fig.set_figheight(6)
fig.set_figwidth(8)
if algorithm == "jirprpni" or algorithm == "all":
p25 = smooth(p25, 5)
p50 = smooth(p50, 5)
p75 = smooth(p75, 5)
steps = np.linspace(0, (len(p25) - 1) * step_unit, len(p25), endpoint=True)
plt.xlim(0, (len(p25) - 1) * step_unit)
ax.plot(steps, p25, alpha=0)
ax.plot(steps, p50, color='black', label='JIRP RPNI')
ax.plot(steps, p75, alpha=0)
plt.fill_between(steps, p50, p25, color='black', alpha=0.25)
plt.fill_between(steps, p50, p75, color='black', alpha=0.25)
if algorithm == "jirpsat" or algorithm == "all":
p25s = smooth(p25s, 5)
p50s = smooth(p50s, 5)
p75s = smooth(p75s, 5)
steps = np.linspace(0, (len(p25s) - 1) * step_unit, len(p25s), endpoint=True)
plt.xlim(0, (len(p25s) - 1) * step_unit)
ax.plot(steps, p25s, alpha=0)
ax.plot(steps, p50s, color='green', label='JIRP SAT')
ax.plot(steps, p75s, alpha=0)
plt.fill_between(steps, p50s, p25s, color='green', alpha=0.25)
plt.fill_between(steps, p50s, p75s, color='green', alpha=0.25)
if algorithm == "ddqn" or algorithm == "all":
p25_dqn = smooth(p25_dqn, 5)
p50_dqn = smooth(p50_dqn, 5)
p75_dqn = smooth(p75_dqn, 5)
steps = np.linspace(0, (len(p25_dqn) - 1) * step_unit, len(p25_dqn), endpoint=True)
plt.xlim(0, (len(p25_hrl) - 1) * step_unit)
ax.plot(steps, p25_dqn, alpha=0)
ax.plot(steps, p50_dqn, color='purple', label='D-DQN')
ax.plot(steps, p75_dqn, alpha=0)
plt.fill_between(steps, p50_dqn, p25_dqn, color='purple', alpha=0.25)
plt.fill_between(steps, p50_dqn, p75_dqn, color='purple', alpha=0.25)
if algorithm == "qlearning" or algorithm == "all":
p25_q = smooth(p25_q, 5)
p50_q = smooth(p50_q, 5)
p75_q = smooth(p75_q, 5)
steps = np.linspace(0, (len(p25_q) - 1) * step_unit, len(p25_q), endpoint=True)
plt.xlim(0, (len(p25_q) - 1) * step_unit)
ax.plot(steps, p25_q, alpha=0)
ax.plot(steps, p50_q, color='red', label='QAS')
ax.plot(steps, p75_q, alpha=0)
plt.fill_between(steps, p50_q, p25_q, color='red', alpha=0.25)
plt.fill_between(steps, p50_q, p75_q, color='red', alpha=0.25)
if algorithm == "hrl" or algorithm == "all":
p25_hrl = smooth(p25_hrl, 5)
p50_hrl = smooth(p50_hrl, 5)
p75_hrl = smooth(p75_hrl, 5)
steps = np.linspace(0, (len(p25_hrl) - 1) * step_unit, len(p25_hrl), endpoint=True)
plt.xlim(0, (len(p25_hrl) - 1) * step_unit)
ax.plot(steps, p25_hrl, alpha=0)
ax.plot(steps, p50_hrl, color='blue', label='HRL')
ax.plot(steps, p75_hrl, alpha=0)
plt.fill_between(steps, p50_hrl, p25_hrl, color='blue', alpha=0.25)
plt.fill_between(steps, p50_hrl, p75_hrl, color='blue', alpha=0.25)
ax.grid()
ax.set_xlabel('number of training steps', fontsize=22)
ax.set_ylabel('reward', fontsize=22)
plt.ylim(-0.1, 1.1)
if algorithm == "all":
plt.xlim(0,max_step)
plt.locator_params(axis='x', nbins=5)
plt.yticks([0, 0.2, 0.4, 0.6, 0.8, 1])
plt.gcf().subplots_adjust(bottom=0.15)
plt.gca().legend(('', 'JIRP RPNI', '', '', 'JIRP SAT', '', '', 'QAS', '', '','D-DQN','','', 'HRL', ''))
plt.legend(loc='upper right', bbox_to_anchor=(1, 0.32), prop={'size': 14})
ax.tick_params(axis='both', which='major', labelsize=22)
plt.savefig('../plotdata/figure.png', dpi=600)
plt.show()
def export_results_craft_world(task_id, algorithm):
files = os.listdir("../plotdata/")
step_unit = get_params_craft_world('../experiments/craft/tests/ground_truth.txt')[0].num_steps
max_step = get_params_craft_world('../experiments/craft/tests/ground_truth.txt')[3].total_steps
steps = np.linspace(0, max_step, (max_step / step_unit) + 1, endpoint=True)
if task_id>0:
p25 = [0]
p50 = [0]
p75 = [0]
p25s = [0]
p50s = [0]
p75s = [0]
p25_q = [0]
p50_q = [0]
p75_q = [0]
p25_hrl = [0]
p50_hrl = [0]
p75_hrl = [0]
p25_dqn = [0]
p50_dqn = [0]
p75_dqn = [0]
files_of_interest = list()
for file in files:
if (("craft" in file) and (".csv" in file) and (str(task_id) in file)):
files_of_interest.append(file)
for file in files_of_interest:
file_str = ("../plotdata/") + file
if 'qlearning' in file:
with open(file_str) as csvfile:
readcsv = csv.reader(csvfile)
for row_ in readcsv:
if len(row_) > 1:
row = list(map(int, row_))
else:
row = [float(row_[0])]
p25_q.append(np.percentile(row,25))
p50_q.append(np.percentile(row,50))
p75_q.append(np.percentile(row,75))
elif 'hrl' in file:
with open(file_str) as csvfile:
readcsv = csv.reader(csvfile)
for row_ in readcsv:
if len(row_) > 1:
row = list(map(int, row_))
else:
row = [float(row_[0])]
p25_hrl.append(np.percentile(row,25))
p50_hrl.append(np.percentile(row,50))
p75_hrl.append(np.percentile(row,75))
elif 'dqn' in file:
with open(file_str) as csvfile:
readcsv = csv.reader(csvfile)
for row_ in readcsv:
if len(row_) > 1:
row = list(map(int, row_))
else:
row = [float(row_[0])]
p25_dqn.append(np.percentile(row, 25))
p50_dqn.append(np.percentile(row, 50))
p75_dqn.append(np.percentile(row, 75))
elif 'rpni' in file:
with open(file_str) as csvfile:
readcsv = csv.reader(csvfile)
for row_ in readcsv:
if len(row_) > 1:
row = list(map(int, row_))
else:
row = [float(row_[0])]
p25.append(np.percentile(row,25))
p50.append(np.percentile(row,50))
p75.append(np.percentile(row,75))
elif 'sat' in file:
with open(file_str) as csvfile:
readcsv = csv.reader(csvfile)
for row_ in readcsv:
if len(row_) > 1:
row = list(map(int, row_))
else:
row = [float(row_[0])]
p25s.append(np.percentile(row,25))
p50s.append(np.percentile(row,50))
p75s.append(np.percentile(row,75))
fig, ax = plt.subplots()
fig.set_figheight(6)
fig.set_figwidth(8)
if algorithm == "jirprpni" or algorithm == "all":
p25 = smooth(p25, 5)
p50 = smooth(p50, 5)
p75 = smooth(p75, 5)
steps = np.linspace(0, (len(p25)-1) * step_unit, len(p25), endpoint=True)
plt.xlim(0, (len(p25)-1) * step_unit)
ax.plot(steps, p25, alpha=0)
ax.plot(steps, p50, color='black', label='JIRP RPNI')
ax.plot(steps, p75, alpha=0)
plt.fill_between(steps, p50, p25, color='black', alpha=0.25)
plt.fill_between(steps, p50, p75, color='black', alpha=0.25)
if algorithm == "jirpsat" or algorithm == "all":
p25s = smooth(p25s, 5)
p50s = smooth(p50s, 5)
p75s = smooth(p75s, 5)
steps = np.linspace(0, (len(p25s)-1) * step_unit, len(p25s), endpoint=True)
plt.xlim(0, (len(p25s) - 1) * step_unit)
ax.plot(steps, p25s, alpha=0)
ax.plot(steps, p50s, color='green', label='JIRP SAT')
ax.plot(steps, p75s, alpha=0)
plt.fill_between(steps, p50s, p25s, color='green', alpha=0.25)
plt.fill_between(steps, p50s, p75s, color='green', alpha=0.25)
if algorithm == "qlearning" or algorithm == "all":
p25_q = smooth(p25_q, 5)
p50_q = smooth(p50_q, 5)
p75_q = smooth(p75_q, 5)
steps = np.linspace(0, (len(p25_q)-1) * step_unit, len(p25_q), endpoint=True)
plt.xlim(0, (len(p25_q) - 1) * step_unit)
ax.plot(steps, p25_q, alpha=0)
ax.plot(steps, p50_q, color='red', label='QAS')
ax.plot(steps, p75_q, alpha=0)
plt.fill_between(steps, p50_q, p25_q, color='red', alpha=0.25)
plt.fill_between(steps, p50_q, p75_q, color='red', alpha=0.25)
if algorithm == "hrl" or algorithm == "all":
p25_hrl = smooth(p25_hrl, 5)
p50_hrl = smooth(p50_hrl, 5)
p75_hrl = smooth(p75_hrl, 5)
steps = np.linspace(0, (len(p25_hrl)-1) * step_unit, len(p25_hrl), endpoint=True)
plt.xlim(0, (len(p25_hrl) - 1) * step_unit)
ax.plot(steps, p25_hrl, alpha=0)
ax.plot(steps, p50_hrl, color='blue', label='HRL')
ax.plot(steps, p75_hrl, alpha=0)
plt.fill_between(steps, p50_hrl, p25_hrl, color='blue', alpha=0.25)
plt.fill_between(steps, p50_hrl, p75_hrl, color='blue', alpha=0.25)
if algorithm == "ddqn" or algorithm == "all":
p25_dqn = smooth(p25_dqn, 5)
p50_dqn = smooth(p50_dqn, 5)
p75_dqn = smooth(p75_dqn, 5)
steps = np.linspace(0, (len(p25_dqn)-1) * step_unit, len(p25_dqn), endpoint=True)
plt.xlim(0, (len(p25_dqn)-1) * step_unit)
ax.plot(steps, p25_dqn, alpha=0)
ax.plot(steps, p50_dqn, color='purple', label='D-DQN')
ax.plot(steps, p75_dqn, alpha=0)
plt.fill_between(steps, p50_dqn, p25_dqn, color='purple', alpha=0.25)
plt.fill_between(steps, p50_dqn, p75_dqn, color='purple', alpha=0.25)
ax.grid()
if algorithm == "all":
plt.xlim(0,max_step)
ax.set_xlabel('number of training steps', fontsize=22)
ax.set_ylabel('reward', fontsize=22)
plt.ylim(-0.1, 1.1)
plt.xlim(0, max_step)
plt.locator_params(axis='x', nbins=5)
plt.yticks([0, 0.2, 0.4, 0.6, 0.8, 1])
plt.gcf().subplots_adjust(bottom=0.15)
plt.gca().legend(('', 'JIRP RPNI', '', '', 'JIRP SAT', '', '', 'QAS', '','','D-DQN','','', 'HRL', ''))
plt.legend(loc='upper right', bbox_to_anchor=(1, 0.8), prop={'size': 14})
ax.tick_params(axis='both', which='major', labelsize=22)
plt.savefig('../plotdata/figure.png', dpi=600)
plt.show()
else:
step = 0
p25dict = dict()
p50dict = dict()
p75dict = dict()
p25sdict = dict()
p50sdict = dict()
p75sdict = dict()
p25_qdict = dict()
p50_qdict = dict()
p75_qdict = dict()
p25_hrldict = dict()
p50_hrldict = dict()
p75_hrldict = dict()
p25_dqndict = dict()
p50_dqndict = dict()
p75_dqndict = dict()
p25 = list()
p50 = list()
p75 = list()
p25s = list()
p50s = list()
p75s = list()
p25_q = list()
p50_q = list()
p75_q = list()
p25_hrl = list()
p50_hrl = list()
p75_hrl = list()
p25_dqn = list()
p50_dqn = list()
p75_dqn = list()
p25dict[0] = [0,0,0,0]
p50dict[0] = [0,0,0,0]
p75dict[0] = [0,0,0,0]
p25sdict[0] = [0,0,0,0]
p50sdict[0] = [0,0,0,0]
p75sdict[0] = [0,0,0,0]
p25_qdict[0] = [0,0,0,0]
p50_qdict[0] = [0,0,0,0]
p75_qdict[0] = [0,0,0,0]
p25_hrldict[0] = [0,0,0,0]
p50_hrldict[0] = [0,0,0,0]
p75_hrldict[0] = [0,0,0,0]
p25_dqndict[0] = [0,0,0,0]
p50_dqndict[0] = [0,0,0,0]
p75_dqndict[0] = [0,0,0,0]
files_dict = dict()
for file in files:
if (("craft" in file) and (".csv" in file)):
if "1" in file:
task = 1
if "2" in file:
task = 2
if "3" in file:
task = 3
if "4" in file:
task = 4
if task not in files_dict:
files_dict[task] = [file]
else:
files_dict[task].append(file)
for task in files_dict:
for file in files_dict[task]:
file_str = ("../plotdata/") + file
if 'qlearning' in file:
with open(file_str) as csvfile:
step = 0
readcsv = csv.reader(csvfile)
for row_ in readcsv:
if len(row_)>1:
row = list(map(int, row_))
else:
row = [float(row_[0])]
step += step_unit
if step in p25_qdict:
p25_qdict[step].append(np.percentile(row, 25))
p50_qdict[step].append(np.percentile(row, 50))
p75_qdict[step].append(np.percentile(row, 75))
else:
p25_qdict[step] = [np.percentile(row, 25)]
p50_qdict[step] = [np.percentile(row, 50)]
p75_qdict[step] = [np.percentile(row, 75)]
elif 'hrl' in file:
with open(file_str) as csvfile:
step = 0
readcsv = csv.reader(csvfile)
for row_ in readcsv:
if len(row_)>1:
row = list(map(int, row_))
else:
row = [float(row_[0])]
step += step_unit
if step in p25_hrldict:
p25_hrldict[step].append(np.percentile(row, 25))
p50_hrldict[step].append(np.percentile(row, 50))
p75_hrldict[step].append(np.percentile(row, 75))
else:
p25_hrldict[step] = [np.percentile(row, 25)]
p50_hrldict[step] = [np.percentile(row, 50)]
p75_hrldict[step] = [np.percentile(row, 75)]
elif 'dqn' in file:
with open(file_str) as csvfile:
step = 0
readcsv = csv.reader(csvfile)
for row_ in readcsv:
if len(row_) > 1:
row = list(map(int, row_))
else:
row = [float(row_[0])]
step += step_unit
if step in p25_dqndict:
p25_dqndict[step].append(np.percentile(row, 25))
p50_dqndict[step].append(np.percentile(row, 50))
p75_dqndict[step].append(np.percentile(row, 75))
else:
p25_dqndict[step] = [np.percentile(row, 25)]
p50_dqndict[step] = [np.percentile(row, 50)]
p75_dqndict[step] = [np.percentile(row, 75)]
elif 'rpni' in file:
with open(file_str) as csvfile:
step = 0
readcsv = csv.reader(csvfile)
for row_ in readcsv:
if len(row_)>1:
row = list(map(int, row_))
else:
row = [float(row_[0])]
step += step_unit
if step in p25dict:
p25dict[step].append(np.percentile(row, 25))
p50dict[step].append(np.percentile(row, 50))
p75dict[step].append(np.percentile(row, 75))
else:
p25dict[step] = [np.percentile(row, 25)]
p50dict[step] = [np.percentile(row, 50)]
p75dict[step] = [np.percentile(row, 75)]
elif 'sat' in file:
with open(file_str) as csvfile:
step = 0
readcsv = csv.reader(csvfile)
for row_ in readcsv:
if len(row_)>1:
row = list(map(int, row_))
else:
row = [float(row_[0])]
step += step_unit
if step in p25sdict:
p25sdict[step].append( | np.percentile(row, 25) | numpy.percentile |
# -*- coding: utf-8 -*-
import helpers as util
import matrices
import math
import numpy as np
from scipy.spatial.distance import cdist
def dfs(M, fns, word):
"""Compute the Document Frequency Score of a word from a document-word count matrix.
"""
word_index = fns.index(word)
# Word count over all documents. It's a Matrix (2d ndarray).
W = M[:, [word_index]]
# The total number of Documents is just the number of rows of the matrix.
n_total_documents = W.shape[0]
# The number of documents where the word appears is the length of the array of nonzero elements in that row
document_frequency = len(W.nonzero()[0])
# Scaled document frequency in relation to the total number of documents
rdfm = document_frequency / n_total_documents
return rdfm
def nzds(M, fns, word):
"""Computes the Non Zero Dimensions Score for @word.
Computes the count of total unique cooccurences for the given word divided by the total of words.
The result ist the percentage of the words that @word stands in cooccurence with.
"""
context_vector = M[fns.index(word)]
n_total_dimensions = len(fns)
n_non_zero_dimensions = len(context_vector.nonzero()[0])
return n_non_zero_dimensions / n_total_dimensions
def avnzds (M, fns, word):
"""Computes the Average Context Non Zero Dimensions Score for @word.
Computes the Nzd Score for every word in the context. Then returns the average.
"""
context_vector = M[fns.index(word)]
indices = np.flatnonzero(context_vector)
indices = indices[indices != 2]
M = M[indices]
n_total_dimensions = len(fns)
def ndzs_per_row( cv ):
n_non_zero_dimensions = len(cv.nonzero()[0])
return n_non_zero_dimensions / n_total_dimensions
nzdss = np.apply_along_axis( ndzs_per_row, axis=1, arr=M )
return nzdss.mean()
def tacds(WWC, fns, word, metric = 'cosine'):
"""Computes the Total Average Context Distance Score for @word.
Arguments:
WWC -- Word-Word Cooccurrence Matrix
fns -- labels for the matrix
word -- word to Compute the measure for.
"""
context_vector = WWC[fns.index(word)]
indices = np.flatnonzero(context_vector)
# The Subset of WWC with just the context vector's rows
# So that the average can be Computed more efficiently.
SWWC = WWC[indices,:]
# Compute the cosine distance between each row of SWWC.
# Gives a Square nxn Matrix with n = number of rows in SWWC
CSM = matrices.distance_matrix(SWWC, metric = metric)
# Computes the Average Cosine distance of all pairs of terms.
# Does NOT count the main diagonal (distance of each row to itself equals 1).
# That's what the masking is for.
mask = np.ones(CSM.shape, dtype=bool)
np.fill_diagonal(mask, 0)
return CSM[mask].mean()
def acds(WWC, fns, word, metric = 'cosine'):
"""Computes the Average Context Distance Score of each context term's cooccurrence vector
to @word's context vector
"""
context_vector = WWC[fns.index(word)]
indices = np.flatnonzero(context_vector)
# The Subset of the Cooccurrence Matrix with just the terms that appear in some context.
SWWC = WWC[indices,:]
# print(SWWC.shape)
CSM = cdist(SWWC, | np.array([context_vector]) | numpy.array |
import pickle
import random # for shuffling the dataset at the start of an epoch
"""
This version of the NN can also create arbitrary architectures, using the Relu activation and Dropout for
regularization.
"""
# Import SHVN DATASET
with open("Datasets/processed/pickled_shvt_full_data.pkl", "br") as fh:
data = pickle.load(fh)
train_imgs = data[0].values
test_imgs = data[1].values
train_labels_one_hot = data[2].values
test_labels_one_hot = data[3].values
train_labels = data[4]
test_labels = data[5]
image_size = 28
import numpy as np
import matplotlib.pyplot as plt
#from scipy.special import softmax
#set seed for reproducible results
#np.random.seed(8)
#np.random.RandomState(8)
class billy_nn:
def linear_activation_batch(self,matrix):
return matrix
def relu_activation_batch(self,matrix):
return np.maximum(0,matrix)
def relu_derivative_batch(self, matrix):
matrix[matrix<=0] = 0
matrix[matrix>0] = 1
return matrix
def softmax_activation_batch(self, matrix):
z = matrix - np.max(matrix, axis=-1, keepdims=True) #prevent overflow here, with this
numerator = np.exp(z)
denominator = np.sum(numerator,1)
denominator = denominator.reshape(matrix.shape[0],-1) # (number of samples, 1)
probs = numerator/denominator
return probs
def __init__(self, architecture = [1024, 100, 10] , bias = False, activation = 'RELU', learning_rate = 0.0015,
regularizer_l2 = False, L2_term = 0.005, dropout = False, dropout_rate = 0.3):
self.bias = bias
self.activation = activation
self.architecture = architecture
self.learning_rate = learning_rate
self.regularizer_l2 = regularizer_l2
self.L2_term = L2_term
self.dropout = dropout
self.dropout_rate = dropout_rate
self.initialize_weights() #initialize weights by taking into account the architecture
def initialize_weights(self):
self.weights = []
self.biases = []
#initialize weights for arbitrary lenght NN
for _ in range(len(self.architecture)-1):
weight_matrix = np.random.normal(loc=0.0,scale=2/np.sqrt(self.architecture[_]+self.architecture[_+1]),
size=(self.architecture[_],self.architecture[_+1]))
self.weights.append(weight_matrix)
#biases = np.random.normal(loc=0.0, scale=1,size=(self.architecture[i+1]))
def calculate_cost_batch(self, probs, labels):
losses = labels * np.log(probs+ 1e-5) # works against underflow
#losses
batch_loss = - losses.sum()
return batch_loss
def train_on_batch(self, batch_samples, batch_labels):
if self.dropout == False:
batch_probs, hidden_activations = self.forward_batch_propagation(batch_samples)
else:
batch_probs, hidden_activations, activation_masks = self.forward_batch_propagation_dropout(batch_samples)
#calculate batch loss
batch_loss = self.calculate_cost_batch( batch_probs, batch_labels )
self.batch_loss = batch_loss
####update weights for the batch, first backpropagate the error, and then update each weight matrix
if self.dropout == False :
self.update_weights_batch( batch_probs, hidden_activations, batch_labels, batch_samples )
else:
self.update_weights_batch_dropout( batch_probs, hidden_activations, batch_labels, batch_samples, activation_masks)
return True
def forward_batch_propagation_dropout(self,batch_samples):
# create activation masks for every hidden layer (No masks will be create for the input and output layers)
activation_masks = []
nn_layers = self.architecture[1:-1] # grab the dimensions of the hidden layers, excluding the first and last layers
# nn_layers --> number of masks
for layer in nn_layers:
activation_mask = (np.random.rand(batch_samples.shape[0],layer) < self.dropout_rate) / self.dropout_rate
activation_masks.append(activation_mask)
#activation_masks.insert(0, np.ones(batch_samples.shape)) #add ones matrix for the first transformation
## forward propagation using masks
# 1. linear transformation
# 2. non-linear activation
# 3. activation_mask application
input_batch = batch_samples
hidden_activations = [] #
mask_counter = 0
for weight in self.weights:
trans_batch = np.dot(input_batch, weight) #matrix multiplication, no biasses added
if weight.shape[1] == 10: #if we are multipying the by the final weight matrix
#apply softmax activation to the batch
probabilities_batch = self.softmax_activation_batch(trans_batch)
break
elif self.activation == 'RELU':
output_batch = self.relu_activation_batch(trans_batch)
output_batch = output_batch * activation_masks[mask_counter] #dropout
hidden_activations.append(output_batch)
mask_counter += 1
elif self.activation == 'LINEAR':
output_batch = self.linear_activation_batch(trans_batch)
output_batch = output_batch * activation_masks[mask_counter] #dropout
hidden_activations.append(output_batch)
input_batch = output_batch
return probabilities_batch, hidden_activations, activation_masks
def update_weights_batch_dropout(self,batch_probs, hidden_activations, batch_labels, batch_samples ,activation_masks) :
hidden_activations.reverse()
# error to propagate
output_layer_error = batch_probs - batch_labels
weights_list = list(self.weights)
weights_list.reverse()
layer_errors = []
layer_errors.append(output_layer_error.T)
error_l = output_layer_error
# back-prop using the activation masks for dropout
for i in range(len(weights_list)-1):
error_term = np.dot(weights_list[i],error_l.T)
derivative_term = self.relu_derivative_batch(hidden_activations[i].T)
#element-wise multiplication for the full error expression
error_l_minus = error_term * derivative_term
layer_errors.append(error_l_minus)
error_l = error_l_minus.T
activations = list(hidden_activations)
activations.reverse()
activations.insert(0,batch_samples)
activations.reverse()
## weight updates using the hidden activations and layer error
activation_masks.reverse()
activation_masks.append( np.ones(batch_samples.shape))
mask_counter = 0
for i in range(len(layer_errors)):
masked_activation = activations[i] * activation_masks[mask_counter] #mask activations
weight_update = | np.dot(layer_errors[i], masked_activation) | numpy.dot |
'''
This code is based on:
https://github.com/hunkim/DeepRL-Agents
http://karpathy.github.io/2016/05/31/rl/
'''
import numpy as np
import tensorflow as tf
import gym
env = gym.make('CartPole-v0')
hidden_layer_neurons = 24
learning_rate = 1e-2
# Constants defining our neural network
input_size = env.observation_space.shape[0]
output_size = 1 # logistic regression, one p output
X = tf.placeholder(tf.float32, [None, input_size], name="input_x")
# First layer of weights
W1 = tf.get_variable("W1", shape=[input_size, hidden_layer_neurons],
initializer=tf.contrib.layers.xavier_initializer())
layer1 = tf.nn.relu(tf.matmul(X, W1))
# Second layer of weights
W2 = tf.get_variable("W2", shape=[hidden_layer_neurons, output_size],
initializer=tf.contrib.layers.xavier_initializer())
action_pred = tf.nn.sigmoid(tf.matmul(layer1, W2))
# Y (fake) and advantages (rewards)
Y = tf.placeholder(tf.float32, [None, output_size], name="input_y")
advantages = tf.placeholder(tf.float32, name="reward_signal")
# Loss function: log_likelihood * advantages
#log_lik = -tf.log(Y * action_pred + (1 - Y) * (1 - action_pred)) # using author(awjuliani)'s original cost function (maybe log_likelihood)
log_lik = -Y*tf.log(action_pred) - (1 - Y)*tf.log(1 - action_pred) # using logistic regression cost function
loss = tf.reduce_sum(log_lik * advantages)
# Learning
train = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss)
def discount_rewards(r, gamma=0.99):
"""Takes 1d float array of rewards and computes discounted reward
e.g. f([1, 1, 1], 0.99) -> [1, 0.99, 0.9801] -> [1.22 -0.004 -1.22]
http://karpathy.github.io/2016/05/31/rl/ """
d_rewards = np.array([val * (gamma ** i) for i, val in enumerate(r)])
# Normalize/standardize rewards
d_rewards -= d_rewards.mean()
d_rewards /= d_rewards.std()
return d_rewards
# Setting up our environment
sess = tf.Session()
sess.run(tf.global_variables_initializer())
max_num_episodes = 5000
for step in range(max_num_episodes):
# Initialize x stack, y stack, and rewards
xs = np.empty(0).reshape(0, input_size)
ys = np.empty(0).reshape(0, 1)
rewards = np.empty(0).reshape(0, 1)
reward_sum = 0
observation = env.reset()
while True:
x = np.reshape(observation, [1, input_size])
# Run the neural net to determine output
action_prob = sess.run(action_pred, feed_dict={X: x})
# Determine the output based on our net, allowing for some randomness
action = 0 if action_prob < np.random.uniform() else 1
# Append the observations and outputs for learning
xs = np.vstack([xs, x])
ys = | np.vstack([ys, action]) | numpy.vstack |
import sys
sys.path.append("../../lib/")
sys.path.append("../../pycomposer/")
import argparse
import math
import time
def get_data(size, composer):
if composer:
import composer_numpy as np
else:
import numpy as np
lats = np.ones(size, dtype="float64") * 0.0698132
lons = np.ones(size, dtype="float64") * 0.0698132
return lats, lons
def haversine(lat2, lon2, composer, threads):
if composer:
import composer_numpy as np
else:
import numpy as np
lat1 = 0.70984286
lon1 = 1.23892197
MILES_CONST = 3959.0
start = time.time()
a = np.zeros(len(lat2), dtype="float64")
dlat = np.zeros(len(lat2), dtype="float64")
dlon = np.zeros(len(lat2), dtype="float64")
end = time.time()
print("Allocation time:", end-start)
start = time.time()
np.subtract(lat2, lat1, out=dlat)
np.subtract(lon2, lon1, out=dlon)
# dlat = sin(dlat / 2.0) ** 2.0
np.divide(dlat, 2.0, out=dlat)
np.sin(dlat, out=dlat)
np.multiply(dlat, dlat, out=dlat)
# a = cos(lat1) * cos(lat2)
lat1_cos = math.cos(lat1)
np.cos(lat2, out=a)
np.multiply(a, lat1_cos, out=a)
# a = a + sin(dlon / 2.0) ** 2.0
np.divide(dlon, 2.0, out=dlon)
np.sin(dlon, out=dlon)
np.multiply(dlon, dlon, out=dlon)
np.multiply(a, dlon, out=a)
np.add(dlat, a, out=a)
c = a
np.sqrt(a, out=a)
np.arcsin(a, out=a)
np.multiply(a, 2.0, out=c)
mi = c
| np.multiply(c, MILES_CONST, out=mi) | numpy.multiply |
import numpy as np
import pytest
from numpy.testing import assert_almost_equal, assert_raises
from sklearn.metrics import pairwise_distances
from ...tools import power, rot_ksamp
from .. import DISCO
class TestDISCO:
@pytest.mark.parametrize(
"n, obs_stat, obs_pvalue",
[(200, 6.621905272534802, 0.001), (100, 2.675357570989666, 0.001)],
)
def test_disco_linear_oned(self, n, obs_stat, obs_pvalue):
np.random.seed(123456789)
x, y = rot_ksamp("linear", n, 1, k=2)
stat, pvalue = DISCO().test(x, y, auto=False)
assert_almost_equal(stat, obs_stat, decimal=1)
assert_almost_equal(pvalue, obs_pvalue, decimal=1)
@pytest.mark.parametrize(
"n, obs_stat, obs_pvalue",
[(100, 2.675357570989666, 0.001)],
)
def test_rep(self, n, obs_stat, obs_pvalue):
x, y = rot_ksamp("linear", n, 1, k=2)
stat, pvalue = DISCO().test(x, y, auto=False, random_state=99)
stat2, pvalue2 = DISCO().test(x, y, auto=False, random_state=99)
assert stat == stat2
assert pvalue == pvalue2
class TestDISCOErrorWarn:
"""Tests errors and warnings derived from MGC."""
def test_diffshape(self):
# raises error if not indep test
x = np.arange(20)
y = np.arange(10)
assert_raises(ValueError, DISCO().statistic, x, y)
assert_raises(ValueError, DISCO().test, x, y)
class TestDISCOTypeIError:
def test_oned(self):
| np.random.seed(123456789) | numpy.random.seed |
"""
Copyright 2017 <NAME>, <NAME>
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import numpy as np
import scipy as sp
import math
import matplotlib.pyplot as plt
from . import solver
from . import project_simplex_box
from . import pgd
import llops as yp
import llops.operators as ops
from llops.solvers import iterative, objectivefunctions
from llops import iFt, Ft
from llops.config import default_backend, default_dtype
eps = 1e-13
def dnf(x):
if len(x) == 0:
return 0
else:
# x = x / np.sum(x)
x_fft = np.fft.fft(x)
sigma_x = np.abs(x_fft) ** 2
return np.sqrt(1 / len(x) * np.sum(np.max(sigma_x) / sigma_x))
def cond(x):
if len(x) == 0:
return 0
else:
# x = x / np.sum(x)
x_fft = np.fft.fft(x)
sigma_x = np.abs(x_fft)
return np.max(sigma_x) / np.min(sigma_x)
def vector(pulse_count, kernel_length=None,
method='random_phase', n_tests=100, metric='dnf', dtype=None, backend=None):
"""
This is a helper function for solving for a blur vector in terms of it's condition #
"""
# Parse dtype and backend
dtype = dtype if dtype is not None else yp.config.default_dtype
backend = backend if backend is not None else yp.config.default_backend
# Calculate kernel length if not provided
if kernel_length is None:
kernel_length = 2 * pulse_count
# Compute many kernels
kernel_list = []
for _ in range(n_tests):
# Generate blur kernel
if method == 'random_phase':
# Ensure first and last time point are illuminated
indicies = np.random.choice(kernel_length, size=(pulse_count - 2), replace=False)
illum = np.zeros(kernel_length)
illum[indicies] = 1.0
illum[0], illum[-1] = 1.0, 1.0
elif method == 'random':
illum = np.random.uniform(size=kernel_length)
else:
raise ValueError('Invalid kernel generation method %s' % method)
# Append kernel to list
kernel_list.append(illum)
## Choose best kernel
if metric == 'cond':
# Determine kernel with best condition #
metric_best = 1e10
kernel_best = []
for kernel in kernel_list:
kappa = cond(kernel)
if kappa < metric_best:
kernel_best = kernel
metric_best = kappa
elif metric == 'dnf':
# Determine kernel with best dnf
metric_best = 1e10
kernel_best = []
for kernel in kernel_list:
_dnf = dnf(kernel)
if _dnf < metric_best:
kernel_best = kernel
metric_best = _dnf
else:
raise ValueError
# Normalize kernel
kernel_best /= np.sum(kernel_best)
# Cast
kernel_best = yp.cast(kernel_best, dtype, backend)
return (kernel_best, metric_best)
def kernel(shape, pulse_count, kernel_length=None, method='random_phase',
n_tests=100, metric='dnf', axis=1, position='center'):
# Generate blur vector
blur_vector, _ = vector(pulse_count,
kernel_length=kernel_length,
method=method,
n_tests=n_tests,
metric=metric)
# Generate kernel from vector
return fromVector(blur_vector, shape=shape, axis=axis, position=position)
def generate(shape, blur_kernel_length, method='random_phase', axis=1,
blur_illumination_fraction=0.5, position='center',normalize=True):
# Generate blur kernel
if method == 'constant':
illum = yp.ones(blur_kernel_length) * blur_illumination_fraction
elif method == 'random_phase' or method == 'coded':
illum, _ = genRandInitialization(blur_kernel_length, blur_illumination_fraction)
elif method == 'random' or method == 'uniform':
illum = np.random.uniform(size=blur_kernel_length)
else:
assert False, "method " + method + " unrecognized"
# Generate kernel
kernel = fromVector(illum, shape, axis, position, normalize=normalize)
# Return kernel
return kernel
def fromVector(blur_vector, shape, axis=1, position='center',
normalize=True, reverse=False, interpolation_factor=1.0):
"""Converts a blur vector to a blur kernel."""
# Get length of kernel
blur_kernel_length = yp.size(blur_vector)
# Find correct dimension
ndims = len(shape)
# Expand illum to 2D and ensure it's in the correct direction
blur_vector = yp.expandDims(blur_vector, ndims)
# Reverse blur vector if requested
if reverse:
blur_vector = yp.flip(blur_vector)
# Ensure blur vector is 1D
blur_vector = yp.vec(blur_vector)
# Apply interpolation
if interpolation_factor != 1.0:
interpolated_length = int(np.round(interpolation_factor * len(blur_vector)))
blur_vector = yp.real(yp.iFt(yp.pad(yp.Ft(blur_vector), interpolated_length, center=True)))
# Ensure blur kernel has the correct dimensions
blur_vector = yp.expandDims(blur_vector, ndims)
# Rotate if necessary
if axis == 1:
blur_vector = blur_vector.T
# Position kernel in image
if position == 'center':
kernel = yp.pad(blur_vector, shape, center=True)
elif position == 'center_left':
roll_amount = [0, 0]
roll_amount[axis] = -blur_kernel_length // 2
kernel = yp.roll(yp.pad(blur_vector, shape, center=True), roll_amount)
elif position == 'center_right':
roll_amount = [0, 0]
roll_amount[axis] = blur_kernel_length // 2
kernel = yp.roll(yp.pad(blur_vector, shape, center=True), roll_amount)
elif position == 'origin':
kernel = yp.pad(blur_vector, shape, crop_start=(0, 0))
else:
raise ValueError('Invalid position %s' % position)
# Center kernel after pad. This is a hack.
roll_values = [1] * yp.ndim(kernel)
kernel = yp.roll(kernel, roll_values)
# Normalize kernel
if normalize:
kernel /= yp.scalar(yp.sum(kernel))
return kernel
######################################################################################################
################################ UTILITIES FOR READING FROM DATA #####################################
######################################################################################################
def blurVectorsFromDataset(dataset, dtype=None, backend=None, debug=False,
use_phase_ramp=False, corrections={}):
"""
This function generates the object size, image size, and blur kernels from
a comptic dataset object.
Args:
dataset: An io.Dataset object
dtype [np.float32]: Which datatype to use for kernel generation (All numpy datatypes supported)
Returns:
object_size: The object size this dataset can recover
image_size: The computed image size of the dataset
blur_kernel_list: A dictionary of blur kernels lists, one key per color channel.
"""
dtype = dtype if dtype is not None else yp.config.default_dtype
backend = backend if backend is not None else yp.config.default_backend
# Calculate effective pixel size if necessaey
if dataset.metadata.system.eff_pixel_size_um is None:
dataset.metadata.system.eff_pixel_size_um = dataset.metadata.camera.pixel_size_um / \
(dataset.metadata.objective.mag * dataset.metadata.system.mag)
# Recover and store position and illumination list
blur_vector_roi_list = []
position_list, illumination_list = [], []
frame_segment_map = []
for frame_index in range(len(dataset.frame_list)):
frame_state = dataset.frame_state_list[frame_index]
# Store which segment this measurement uses
frame_segment_map.append(frame_state['position']['common']['linear_segment_index'])
# Extract list of illumination values for each time point
if 'illumination' in frame_state:
illumination_list_frame = []
for time_point in frame_state['illumination']['states']:
illumination_list_time_point = []
for illumination in time_point:
illumination_list_time_point.append(
{'index': illumination['index'], 'value': illumination['value']})
illumination_list_frame.append(illumination_list_time_point)
else:
raise ValueError('Frame %d does not contain illumination information' % frame_index)
# Extract list of positions for each time point
if 'position' in frame_state:
position_list_frame = []
for time_point in frame_state['position']['states']:
position_list_time_point = []
for position in time_point:
if 'units' in position['value']:
if position['value']['units'] == 'mm':
ps_um = dataset.metadata.system.eff_pixel_size_um
position_list_time_point.append(
[1000 * position['value']['y'] / ps_um, 1000 * position['value']['x'] / ps_um])
elif position['value']['units'] == 'um':
position_list_time_point.append(
[position['value']['y'] / ps_um, position['value']['x'] / ps_um])
elif position['value']['units'] == 'pixels':
position_list_time_point.append([position['value']['y'], position['value']['x']])
else:
raise ValueError('Invalid units %s for position in frame %d' %
(position['value']['units'], frame_index))
else:
# print('WARNING: Could not find posiiton units in metadata, assuming mm')
ps_um = dataset.metadata.system.eff_pixel_size_um
position_list_time_point.append(
[1000 * position['value']['y'] / ps_um, 1000 * position['value']['x'] / ps_um])
position_list_frame.append(position_list_time_point[0]) # Assuming single time point for now.
# Define positions and position indicies used
positions_used, position_indicies_used = [], []
for index, pos in enumerate(position_list_frame):
for color in illumination_list_frame[index][0]['value']:
if any([illumination_list_frame[index][0]['value'][color] > 0 for color in illumination_list_frame[index][0]['value']]):
position_indicies_used.append(index)
positions_used.append(pos)
# Generate ROI for this blur vector
blur_vector_roi = getPositionListBoundingBox(positions_used)
# Append to list
blur_vector_roi_list.append(blur_vector_roi)
# Crop illumination list to values within the support used
illumination_list.append([illumination_list_frame[index] for index in range(min(position_indicies_used), max(position_indicies_used) + 1)])
# Store corresponding positions
position_list.append(positions_used)
# Apply kernel scaling or compression if necessary
if 'scale' in corrections:
for index in range(len(position_list)):
_positions = np.asarray(position_list[index])
for ax in range(yp.shape(_positions)[1]):
_positions[:, ax] = ((_positions[:, ax] - yp.min(_positions[:, ax])) * corrections['scale'] + yp.min(_positions[:, ax]))
position_list[index] = _positions.tolist()
blur_vector_roi_list[index].shape = [corrections['scale'] * sh for sh in blur_vector_roi_list[index].shape]
# Synthesize blur vectors
blur_vector_list = []
for frame_index in range(len(dataset.frame_list)):
# Generate blur vectors
if use_phase_ramp:
kernel_shape = [yp.fft.next_fast_len(max(sh, 1)) for sh in blur_vector_roi_list[frame_index].shape]
offset = yp.cast([sh // 2 + st for (sh, st) in zip(kernel_shape, blur_vector_roi_list[frame_index].start)], 'complex32')
# Create phase ramp and calculate offset
R = ops.PhaseRamp(kernel_shape, dtype='complex32')
# Generate blur vector
blur_vector = yp.zeros(R.M)
for pos, illum in zip(position_list[frame_index], illumination_list[frame_index]):
blur_vector += (R * (yp.cast(pos, 'complex32') - offset))
# Take inverse Fourier Transform
blur_vector = yp.abs(yp.cast(yp.iFt(blur_vector)), 0.0)
else:
blur_vector = yp.asarray([illum[0]['value']['w'] for illum in illumination_list[frame_index]],
dtype=dtype, backend=backend)
# Normalize illuminaiton vectors
blur_vector /= yp.scalar(yp.sum(blur_vector))
# Append to list
blur_vector_list.append(blur_vector)
# Subtract mininum of frame_segment_map
frame_segment_map = [segment - min(frame_segment_map) for segment in frame_segment_map]
# Return
return blur_vector_list, blur_vector_roi_list, frame_segment_map, position_list, illumination_list
def blurKernelRecoveryFromStatic(blurred, static, solver='iterative', reg=None, iteration_count=10, system_otf=None, threshold=0.2):
static_mean = np.mean(static)
if static_mean > 1e-4:
static = (static.copy() - static_mean) / static_mean
blurred_mean = np.mean(blurred)
if blurred_mean > 1e-4:
blurred = (blurred.copy() - blurred_mean) / blurred_mean
# if system_otf is not None:
# static = iFt(Ft(static) * system_otf)
if solver == 'iterative':
A = ops.Convolution(blurred.shape, static, mode='windowed')
y = blurred.reshape(-1).astype(np.complex64)
# Initialization: choosing a "good" coefficient value will help in convergence
initialization = np.ones(y.shape, y.dtype)
# Define cost function
objective = objectivefunctions.L2(A, y, l2_reg=reg) #, reg=5e-3)
# Gradient descent implementation
kernel_recovered = iterative.GradientDescent(objective).solve(initialization=initialization,
step_size=1e-3,
nesterov_enabled=True,
iteration_count=iteration_count,
display_type='text',
display_iteration_delta=max((iteration_count // 10),1))
else:
if reg is None:
reg = 0
kernel_recovered = iFt((np.conj(Ft(static)) * Ft(blurred)) / (np.abs(Ft(static)) ** 2 + reg))
# Take real part
kernel_recovered = np.real(kernel_recovered).reshape(static.shape)
# Subtract low-frequency information
kernel_recovered -= scipy.ndimage.filters.gaussian_filter(np.real(kernel_recovered.reshape(blurred.shape)), 10)
# Filter by OTF support, threshold
if system_otf is not None:
kernel_recovered = np.real(iFt(Ft(kernel_recovered.reshape(blurred.shape)) * system_otf))
kernel_recovered *= (kernel_recovered > threshold * np.max(kernel_recovered))
return(kernel_recovered)
def registerDatasetImages(dataset, roi=None):
from comptic.registration import registerImage
shift_list = []
image_list = []
for index in range(1, len(dataset.frame_list)):
if roi is not None:
shift_list.append(registerImage(dataset.frame_list[index - 1][roi.slice],
dataset.frame_list[index][roi.slice]))
image_list.append((dataset.frame_list[index - 1][roi.slice], dataset.frame_list[index][roi.slice]))
else:
shift_list.append(registerImage(dataset.frame_list[index - 1], dataset.frame_list[index]))
print(shift_list)
print("Registered image %d of %d, shift was (%d, %d) pixels" %
(index, len(dataset.frame_list), shift_list[-1][0], shift_list[-1]))
return(shift_list, image_list)
def cropAndCenterKernel(kernel_recovered, kernel_size):
# Center maximum value in blur kernel
max_pos = np.unravel_index(np.argmax(kernel_recovered), kernel_recovered.shape)
kernel_centered = np.roll(kernel_recovered, -np.asarray(max_pos) + np.asarray(kernel_recovered.shape) //2)
# Crop to 2x blur kernel fov
kernel_zeroed = np.zeros(kernel_centered.shape, dtype=kernel_centered.dtype)
kernel_zeroed[kernel_centered.shape[0] // 2 - kernel_size[0]:kernel_centered.shape[0] // 2 + kernel_size[0],
kernel_centered.shape[1] // 2 - kernel_size[1]:kernel_centered.shape[1] // 2 + kernel_size[1]] = \
kernel_centered[kernel_centered.shape[0] // 2 - kernel_size[0]:kernel_centered.shape[0] // 2 + kernel_size[0],
kernel_centered.shape[1] // 2 - kernel_size[1]:kernel_centered.shape[1] // 2 + kernel_size[1]]
# Center at middle of blur kernel
p = np.where(kernel_zeroed > 0)
kernel_centered = np.roll(kernel_zeroed, -np.round(np.asarray((np.mean(p[0]), np.mean(p[1]))) + np.asarray(kernel_zeroed.shape) // 2).astype(np.int))
kernel_size_small = kernel_size //2
# Zero everything outside a resonable shift range
kernel_zeroed_crop = np.zeros(kernel_centered.shape, dtype=kernel_centered.dtype)
kernel_zeroed_crop[kernel_centered.shape[0] // 2 - kernel_size_small[0]:kernel_centered.shape[0] // 2 + kernel_size_small[0],
kernel_centered.shape[1] // 2 - kernel_size_small[1]:kernel_centered.shape[1] // 2 + kernel_size_small[1]] = \
kernel_centered[kernel_centered.shape[0] // 2 - kernel_size_small[0]:kernel_centered.shape[0] // 2 + kernel_size_small[0],
kernel_centered.shape[1] // 2 - kernel_size_small[1]:kernel_centered.shape[1] // 2 + kernel_size_small[1]]
return(kernel_zeroed_crop)
def plotBlurKernelList(blur_kernel_list, max_count_to_show=5, measurement_list=None, figsize=None):
""" Plots a list of blur kernels and (optionally) corresponding measurements """
count_to_show = min(max_count_to_show, len(blur_kernel_list))
if figsize is None:
plt.figure(figsize=(count_to_show * 2.5, 4 * (1 + int(measurement_list is not None))))
else:
plt.figure(figsize=figsize)
for i in range(count_to_show):
plt.subplot(1 + int(measurement_list is not None), count_to_show, i + 1)
plt.imshow(blur_kernel_list[i], interpolation='bilinear')
plt.title('Blur Kernel ' + str(i))
def illustrateMultiFrameKernel(blur_kernel_list, filename):
""" Function which illustrates a multi-frame blur kernel and saves it to the disk"""
image_c = np.zeros((blur_kernel_list[0].shape[0], blur_kernel_list[0].shape[1], 3))
color_list = ['r', 'g', 'c', 'm', 'w', 'y']
for index, blur_kernel in enumerate(blur_kernel_list):
rgb = matplotlib.colors.to_rgb(color_list[index])
image_c[:, :, 0] += blur_kernel * rgb[0]
image_c[:, :, 1] += blur_kernel * rgb[1]
image_c[:, :, 2] += blur_kernel * rgb[2]
image_c /= np.amax(image_c)
plt.figure()
plt.imshow(image_c, interpolation='bilinear')
plt.xticks([], [])
plt.yticks([], [])
plt.tight_layout()
plt.savefig(filename, transparent=True)
def genSamplingComb(object_size, image_size, dtype=np.complex64):
""" Generates a comb function corresponding with seperation defined by
image_size, centered at the center of object_size """
sampling = np.floor(((np.asarray(object_size) / 2) / np.asarray(image_size)))
sampling_comb = np.zeros(object_size, dtype=dtype)
yy, xx = np.meshgrid(np.arange(-sampling[0], sampling[0] + 1), np.arange(-sampling[1], sampling[1] + 1))
positions_0 = np.hstack((yy.ravel()[:, np.newaxis], xx.ravel()[:, np.newaxis])).astype(np.int)
positions = np.zeros(positions_0.shape, dtype=positions_0.dtype)
positions[:, 0] = object_size[0] // 2 + positions_0[:, 0] * image_size[0]
positions[:, 1] = object_size[1] // 2 + positions_0[:, 1] * image_size[1]
for position in positions:
sampling_comb[position[0], position[1]] = 1
positions -= np.asarray(object_size) // 2
return((sampling_comb, positions))
def genConvolutionSupportList(blur_kernel_list, image_size, threshold=0.05):
"""
This function generates a list of images defining the support of a windowed convolution operation.
"""
object_size = blur_kernel_list[0].shape
W = ops.Crop(object_size, image_size)
kernel_support_mask = []
object_support_mask = []
print(W.dtype)
window_mask = np.abs(W.H * W * np.ones(W.shape[1], dtype=np.complex64)).reshape(object_size)
for blur_kernel in blur_kernel_list:
C = ops.Convolution((blur_kernel > threshold).astype(np.complex64), mode='windowed',
pad_value=0, pad_size=int(object_size[0] / 2))
kernel_support_mask += [((C * (window_mask.reshape(-1).astype(np.complex64))).reshape(object_size) > threshold)]
object_support_mask.append(kernel_support_mask[-1])
for dim in range(kernel_support_mask[-1].ndim):
object_support_mask[-1] = np.flip(object_support_mask[-1], dim)
return (kernel_support_mask, object_support_mask)
def blurKernelFromPositions(object_size, position_list, illum_list, flip_kernels=False, use_phase_ramp=False,
pos_perturbation=None, dtype=default_dtype, backend=default_backend):
"""
This function generates a single blur kernel from a list of positions and illuminations. not multiframe.
"""
# Initialize blur kernels
blur_kernel = np.zeros(object_size, dtype=np.complex64)
for position_index, position in enumerate(position_list):
y = position[0]
x = position[1]
if pos_perturbation is not None:
y = y + pos_perturbation[position_index, 0]
x = x + pos_perturbation[position_index, 1]
if not use_phase_ramp:
x = int(round(x))
y = int(round(y))
# Assign illumination values
if illum_list[position_index] > 0:
if not use_phase_ramp:
blur_kernel[y, x] += illum_list[position_index]
else:
R = ops.PhaseRamp(blur_kernel.shape, dtype=dtype, backend=backend)
x_ = yp.astype(np.asarray((y - object_size[0] // 2, x - object_size[1] // 2)), R.dtype)
ramp = yp.reshape(R * x_, blur_kernel.shape)
blur_kernel += (ramp * illum_list[position_index])
if use_phase_ramp:
blur_kernel = iFt(blur_kernel)
blur_kernel[blur_kernel < 1e-8] = 0.0
if flip_kernels:
blur_kernel = np.fliplr(blur_kernel)
if np.sum(blur_kernel) > 0:
blur_kernel /= np.sum(blur_kernel)
return blur_kernel
def positionListToBlurKernelMap(kernel_size, position_list, return_fourier=True):
"""Function which converts a list of positions in a blur kernel to a full (non-sparse) blur kernel map.
Args:
kernel_size: Size of first two dimensions in blur_kernel_map
position_list: List of x,y tuples which are the locaitons of each position in the blur kernel.
return_fourier: Optional, enables return of blur kernels in frequency (Fourier) domain.
Returns:
A 2D blur_kernel_map, which has dimensions (kernel_size[0], kernel_size[1], size(position_list,1))
"""
# TODO redundant
print("can this be replaced with blurKernelFromPositions?")
n_positions = np.size(position_list, 0)
blur_kernel_map = np.zeros((n_positions, kernel_size[0], kernel_size[1]))
for pos in np.arange(0, n_positions):
blur_kernel_map[pos, position_list[pos, 0], position_list[pos, 1]] = 1
if return_fourier:
blur_kernel_map = Ft(blur_kernel_map.astype(np.complex64))
return(blur_kernel_map)
def pointListToBlurKernel(kernel_size, position_list, illumination_vector):
"""Converts point list and illuminaiton vector to blur kernel"""
# TODO redundant
print("can this be replaced with blurKernelFromPositions?")
position_count = np.size(position_list, 0)
blur_kernel = np.zeros((kernel_size[0], kernel_size[1]))
assert position_count == len(illumination_vector)
for index, position in enumerate(position_list):
blur_kernel[position[0], position[1]] = illumination_vector[index]
return(blur_kernel)
def colorBlurKernelsToMonochrome(blur_kernel_list_color):
"""
This function converts a list of color blur kernels to monochrome, assuming no optical effects.
Args:
blur_kernel_list_color: A dictionary of blur kernel lists, where each key indicates the illumination color channel of that kernel.
Returns:
A list of blur kernels which is the sum of the lists of each key in blur_kernel_list_color
"""
blur_kernel_list = []
for index, blur_kernel in enumerate(blur_kernel_list_color):
first_channel = list(blur_kernel.keys())[0]
new_kernel = np.zeros(blur_kernel[first_channel].shape, dtype=blur_kernel[first_channel].dtype)
for channel in blur_kernel:
new_kernel += blur_kernel[channel]
blur_kernel_list.append(new_kernel)
return(blur_kernel_list)
def getPositionListBoundingBox(kernel_position_list, use_mean=False):
"""
This function returns the bounding box of a single blur kernel or list of blur kernels, defined as a list of positions
Args:
kernel_position_list: list of points (y,x)
Returns:
A list of the extreme values in the blur kernel in the format [y_min, y_max, x_min, x_max]
"""
bounding_box = [1e10, -1e10, 1e10, -1e10]
assert type(kernel_position_list) in [list, np.ndarray]
# Make a single kernel_position_list a list with one element
if type(kernel_position_list[0][0]) not in [list, np.ndarray, tuple]:
kernel_position_list = [kernel_position_list]
for position in kernel_position_list:
if type(position[0][0]) in [np.ndarray, list, tuple]:
# TODO: This will break if we blur by more than one pixel during each pixel motion
if not use_mean:
max_y, max_x = np.max(np.asarray(position), axis=0)[0]
min_y, min_x = np.min(np.asarray(position), axis=0)[0]
else:
mean_y, mean_x = np.mean(np.asarray(position), axis=0)[0]
else:
if not use_mean:
max_y, max_x = np.max(np.asarray(position), axis=0)
min_y, min_x = np.min(np.asarray(position), axis=0)
else:
mean_y, mean_x = np.mean(np.asarray(position), axis=0)
if not use_mean:
bounding_box = [min(min_y, bounding_box[0]),
max(max_y, bounding_box[1]),
min(min_x, bounding_box[2]),
max(max_x, bounding_box[3])]
else:
bounding_box = [min(mean_y, bounding_box[0]),
max(mean_y, bounding_box[1]),
min(mean_x, bounding_box[2]),
max(mean_x, bounding_box[3])]
# Create ROI object
kernel_support_roi = yp.Roi(start=(int(round(bounding_box[0])), int(round(bounding_box[2]))),
end=(int(round(bounding_box[1])), int(round(bounding_box[3]))))
return(kernel_support_roi)
######################################################################################################
##################################### AUTOCALIBRATION ################################################
######################################################################################################
class BsplineND():
# from http://pythology.blogspot.com/2017/07/nd-b-spline-basis-functions-with-scipy.html
def __init__(self, knots, degree=3, periodic=False):
"""
:param knots: a list of the spline knots with ndim = len(knots)
TODO (sarah) incorporate 2d aspect?
"""
self.ndim = len(knots)
self.splines = []
self.knots = knots
self.degree = degree
for idim, knots1d in enumerate(knots):
nknots1d = len(knots1d)
y_dummy = np.zeros(nknots1d)
knots1d, coeffs, degree = sp.interpolate.splrep(knots1d, y_dummy, k=degree,
per=periodic)
self.splines.append((knots1d, coeffs, degree))
self.ncoeffs = [len(coeffs) for knots, coeffs, degree in self.splines]
def evaluate_independent(self, position):
"""
:param position: a numpy array with size [ndim, npoints]
:returns: a numpy array with size [nspl1, nspl2, ..., nsplN, npts]
with the spline basis evaluated at the input points
"""
ndim, npts = position.shape
values_shape = self.ncoeffs + [npts]
values = np.empty(values_shape)
ranges = [range(icoeffs) for icoeffs in self.ncoeffs]
for icoeffs in itertools.product(*ranges):
values_dim = np.empty((ndim, npts))
for idim, icoeff in enumerate(icoeffs):
coeffs = [1.0 if ispl == icoeff else 0.0 for ispl in
range(self.ncoeffs[idim])]
values_dim[idim] = sp.interpolate.splev(
position[idim],
(self.splines[idim][0], coeffs, self.degree))
values[icoeffs] = np.product(values_dim, axis=0)
return values
def evaluate(self, position):
assert self.weights is not None, "Must specify coefficients with set_coeffs()"
values = self.evaluate_independent(position)
return self.weights.dot(values)
def set_weights(self, weights):
assert len(weights) == self.ncoeffs[0], "must input correct number of weights"
self.weights = weights
def get_basis_splines(extent, num_basis_fn):
knotsx = np.linspace(0,extent-1,num_basis_fn)
bspline = BsplineND([knotsx])
pointsx1d = | np.linspace(knotsx[0], knotsx[-1], extent) | numpy.linspace |
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
import numpy as np
fig = plt.figure()
ax = fig.gca(projection='3d')
# Generate data
X = np.arange(-4.4, 4.4, 0.2)
Y = np.arange(-4.4, 4.4, 0.2)
X, Y = np.meshgrid(X, Y)
R = np.sqrt(X**2 + Y**2)
Z = | np.sin(R) | numpy.sin |
#
# This file is part of the chi repository
# (https://github.com/DavAug/chi/) which is released under the
# BSD 3-clause license. See accompanying LICENSE.md for copyright notice and
# full license details.
#
import copy
import math
import numpy as np
from scipy.stats import norm, truncnorm
import chi
class PopulationModel(object):
"""
A base class for population models.
"""
def __init__(self):
super(PopulationModel, self).__init__()
self._transforms_psi = False
def compute_log_likelihood(self, parameters, observations):
"""
Returns the log-likelihood of the population model parameters.
:param parameters: Parameters of the population model.
:type parameters: List, np.ndarray of length (p,)
:param observations: "Observations" of the individuals. Typically
refers to the values of a mechanistic model parameter for each
individual.
:type observations: List, np.ndarray of length (n,)
:returns: Log-likelihood of individual parameters and population
parameters.
:rtype: float
"""
raise NotImplementedError
def compute_pointwise_ll(self, parameters, observations):
r"""
Returns the pointwise log-likelihood of the model parameters for
each observation.
:param parameters: Parameters of the population model.
:type parameters: List, np.ndarray of length (p,)
:param observations: "Observations" of the individuals. Typically
refers to the values of a mechanistic model parameter for each
individual.
:type observations: List, np.ndarray of length (n,)
:returns: Log-likelihoods for each individual parameter for population
parameters.
:rtype: np.ndarray of length (n,)
"""
raise NotImplementedError
def compute_sensitivities(self, parameters, observations):
r"""
Returns the log-likelihood of the population parameters and its
sensitivities w.r.t. the observations and the parameters.
:param parameters: Parameters of the population model.
:type parameters: List, np.ndarray of length (p,)
:param observations: "Observations" of the individuals. Typically
refers to the values of a mechanistic model parameter for each
individual.
:type observations: List, np.ndarray of length (n,)
:returns: Log-likelihood and its sensitivity to individual parameters
as well as population parameters.
:rtype: Tuple[float, np.ndarray of length (n + p,)]
"""
raise NotImplementedError
def get_parameter_names(self):
"""
Returns the names of the population model parameters. If name is
not set, defaults are returned.
"""
raise NotImplementedError
def n_hierarchical_parameters(self, n_ids):
"""
Returns a tuple of the number of individual parameters and the number
of population parameters that this model expects in context of a
:class:`HierarchicalLogLikelihood`, when ``n_ids`` individuals are
modelled.
Parameters
----------
n_ids
Number of individuals.
"""
raise NotImplementedError
def n_parameters(self):
"""
Returns the number of parameters of the population model.
"""
raise NotImplementedError
def transforms_individual_parameters(self):
r"""
Returns a boolean whether the population model models the individual
likelihood parameters directly or a transform of those parameters.
Some population models compute the likelihood of the population
parameters :math:`\theta` based on estimates of the
individual likelihood parameters :math:`\Psi = \{\psi _i \} _{i=1}^n`,
where :math:`n` is the number of individual likelihoods. Here,
the parameters are not transformed and ``False`` is returned.
Other population models, in particular the
:class:`CovariatePopulationModel`, transforms the parameters to a
latent representation
:math:`\Psi \rightarrow \{\eta _i \} _{i=1}^n`.
Here, a transformation of the likelihood parameters is modelled and
``True`` is returned.
"""
return self._transforms_psi
def sample(self, parameters, n_samples=None, seed=None):
r"""
Returns random samples from the population distribution.
The returned value is a NumPy array with shape ``(n_samples,)``.
Parameters
----------
parameters
An array-like object with the parameters of the population model.
n_samples
Number of samples. If ``None``, one sample is returned.
seed
A seed for the pseudo-random number generator.
"""
raise NotImplementedError
def set_parameter_names(self, names=None):
"""
Sets the names of the population model parameters.
Parameters
----------
names
An array-like object with string-convertable entries of length
:meth:`n_parameters`. If ``None``, parameter names are reset to
defaults.
"""
raise NotImplementedError
class CovariatePopulationModel(PopulationModel):
r"""
A CovariatePopulationModel assumes that the individual parameters
:math:`\psi` are distributed according to a population model that is
conditional on the model parameters :math:`\vartheta` and the covariates
:math:`\chi`
.. math::
\psi \sim \mathbb{P}(\cdot | \vartheta, \chi).
Here, covariates identify subpopulations in the population and can vary
from one individual to the next, while the model parameters
:math:`\vartheta` are the same for all individuals.
To simplify this dependence, CovariatePopulationModels make the assumption
that the distribution :math:`\mathbb{P}(\psi | \vartheta, \chi)`
deterministically varies with the covariates, such that the distribution
can be rewritten in terms of a covariate-independent distribution of
inter-individual fluctuations :math:`\eta`
.. math::
\eta \sim \mathbb{P}(\cdot | \theta)
and a set of deterministic relationships for the individual parameters
:math:`\psi` and the new population parameters :math:`\theta`
.. math::
\theta = f(\vartheta) \quad \mathrm{and} \quad
\psi = g(\vartheta , \eta, \chi ).
The ``population_model`` input defines the distribution of :math:`\eta`
and the ``covariate_model`` defines the functions :math:`f` and :math:`g`.
Extends :class:`PopulationModel`.
:param population_model: Defines the distribution of :math:`\eta`.
:type population_model: PopulationModel
:param covariate_model: Defines the covariate model.
:type covariate_model: CovariateModel
"""
def __init__(self, population_model, covariate_model):
super(CovariatePopulationModel, self).__init__()
# Check inputs
if not isinstance(population_model, PopulationModel):
raise TypeError(
'The population model has to be an instance of a '
'chi.PopulationModel.')
if not isinstance(covariate_model, chi.CovariateModel):
raise TypeError(
'The covariate model has to be an instance of a '
'chi.CovariateModel.')
# Check compatibility of population model with covariate model
covariate_model.check_compatibility(population_model)
# Remember models
self._population_model = population_model
self._covariate_model = covariate_model
# Set transform psis to true
self._transforms_psi = True
def compute_individual_parameters(
self, parameters, eta, covariates=None):
r"""
Returns the individual parameters :math:`\psi`.
By default ``covariates`` are set to ``None``, such that model
does not rely on covariates. Each derived :class:`CovariateModel`
needs to make sure that model reduces to sensible values for
this edge case.
:param parameters: Model parameters :math:`\vartheta`.
:type parameters: np.ndarray of length (p,)
:param eta: Inter-individual fluctuations :math:`\eta`.
:type eta: np.ndarray of length (n,)
:param covariates: Individual covariates :math:`\chi`.
:type covariates: np.ndarray of length (n, c)
:returns: Individual parameters :math:`\psi`.
:rtype: np.ndarray of length (n,)
"""
return self._covariate_model.compute_individual_parameters(
parameters, eta, covariates)
def compute_individual_sensitivities(
self, parameters, eta, covariates=None):
r"""
Returns the individual parameters :math:`\psi` and their sensitivities
with respect to the model parameters :math:`\vartheta` and the relevant
fluctuations :math:`\eta`.
:param parameters: Model parameters :math:`\vartheta`.
:type parameters: np.ndarray of length (p,)
:param eta: Inter-individual fluctuations :math:`\eta`.
:type eta: np.ndarray of length (n,)
:param covariates: Individual covariates :math:`\chi`.
:type covariates: np.ndarray of length (n, c)
:returns: Individual parameters :math:`\psi` and sensitivities
(:math:`\partial _{\eta} \psi` ,
:math:`\partial _{\vartheta _1} \psi`, :math:`\ldots`,
:math:`\partial _{\vartheta _p} \psi`).
:rtype: Tuple[np.ndarray, np.ndarray] of shapes (n,) and (1 + p, n)
"""
return self._covariate_model.compute_individual_sensitivities(
parameters, eta, covariates)
def compute_log_likelihood(self, parameters, observations):
r"""
Returns the log-likelihood of the model parameters.
:param parameters: Values of the model parameters :math:`\vartheta`.
:type parameters: List, np.ndarray of length (p,)
:param observations: "Observations" of the individuals :math:`\eta`.
Typically refers to the inter-individual fluctuations of the
mechanistic model parameter.
:type observations: List, np.ndarray of length (n,)
:returns: Log-likelihood of individual parameters and population
parameters.
:rtype: float
"""
# Compute population parameters
parameters = self._covariate_model.compute_population_parameters(
parameters)
# Compute log-likelihood
score = self._population_model.compute_log_likelihood(
parameters, observations)
return score
def compute_pointwise_ll(self, parameters, observations):
r"""
Returns the pointwise log-likelihood of the model parameters for
each observation.
:param parameters: Values of the model parameters :math:`\vartheta`.
:type parameters: List, np.ndarray of length (p,)
:param observations: "Observations" of the individuals :math:`\eta`.
Typically refers to the inter-individual fluctuations of the
mechanistic model parameter.
:type observations: List, np.ndarray of length (n,)
:returns: Log-likelihoods of individual parameters for population
parameters.
:rtype: np.ndarray of length (n,)
"""
# Compute population parameters
parameters = self._covariate_model.compute_population_parameters(
parameters)
# Compute log-likelihood
score = self._population_model.compute_pointwise_ll(
parameters, observations)
return score
def compute_sensitivities(self, parameters, observations):
r"""
Returns the log-likelihood of the population parameters and its
sensitivities w.r.t. the observations and the parameters.
The sensitivities are computed with respect to the individual
:math:`\eta _i` and the population parameters :math:`\vartheta`
.. math::
\left(
\partial _{\eta _i}\log p(\eta _i | \theta),
\sum _{i,j}\partial _{\theta _j}\log p(\eta _i | \theta_j)
\frac{\partial f_j}{\partial \vartheta _k}\right) .
:param parameters: Parameters of the population model.
:type parameters: List, np.ndarray of length (p,)
:param observations: "Observations" of the individuals. Typically
refers to the values of a mechanistic model parameter for each
individual.
:type observations: List, np.ndarray of length (n,)
:returns: Log-likelihood and its sensitivity to individual parameters
as well as population parameters.
:rtype: Tuple[float, np.ndarray], where array is of shape (n + p,)
"""
# Compute population parameters and sensitivities dtheta/dvartheta
params, dvartheta = \
self._covariate_model.compute_population_sensitivities(
parameters)
# Compute log-likelihood and sensitivities dscore/deta, dscore/dtheta
score, sensitivities = self._population_model.compute_sensitivities(
params, observations)
# Propagate sensitivity of score to vartheta
# i.e. dscore/dvartheta = sum_i dscore/dtheta_i * dtheta_i/dvartheta
# Note dvartheta has shape (p, p') and dtheta has shape (p')
n = len(observations)
deta = sensitivities[:n]
dtheta = sensitivities[n:]
dvartheta = dvartheta @ dtheta
# Stack results
sensitivities = np.hstack((deta, dvartheta))
return (score, sensitivities)
def get_covariate_model(self):
"""
Returns the covariate model.
"""
return self._covariate_model
def get_covariate_names(self):
"""
Returns the names of the covariates. If name is
not set, defaults are returned.
"""
return self._covariate_model.get_covariate_names()
def get_parameter_names(self):
"""
Returns the names of the model parameters. If name is
not set, defaults are returned.
"""
return self._covariate_model.get_parameter_names()
def n_hierarchical_parameters(self, n_ids):
"""
Returns a tuple of the number of individual parameters and the number
of population parameters that this model expects in context of a
:class:`HierarchicalLogLikelihood`, when ``n_ids`` individuals are
modelled.
Parameters
----------
n_ids
Number of individuals.
"""
# Get number of individual parameters
n_ids, _ = self._population_model.n_hierarchical_parameters(n_ids)
return (n_ids, self._covariate_model.n_parameters())
def n_covariates(self):
"""
Returns the number of covariates.
"""
return self._covariate_model.n_covariates()
def n_parameters(self):
"""
Returns the number of parameters of the population model.
"""
return self._covariate_model.n_parameters()
def sample(
self, parameters, n_samples=None, seed=None, covariates=None,
return_psi=False):
r"""
Returns random samples from the population distribution.
By default samples from
.. math::
\psi \sim \mathbb{P}(\cdot | \vartheta, \chi)
are returned. If ``return_psi=False`` samples from
.. math::
\eta \sim \mathbb{P}(\cdot | \theta)
are returned.
:param parameters: Values of the model parameters.
:type parameters: List, np.ndarray of shape (p,)
:param n_samples: Number of samples. If ``None``, one sample is
returned.
:type n_samples: int, optional
:param seed: A seed for the pseudo-random number generator.
:type seed: int, np.random.Generator, optional
:param covariates: Values for the covariates. If ``None``, default
is assumed defined by the :class:`CovariateModel`.
:type covariates: List, np.ndarray of shape (c,)
:param return_psi: Boolean flag that indicates whether the parameters
of the individual likelihoods are returned or the transformed
inter-individual fluctuations.
:type return_psi: bool, optional
:returns: Samples from population model conditional on covariates.
:rtype: np.ndarray of shape (n_samples,)
"""
# Check that covariates has the correct dimensions
if covariates is not None:
covariates = np.array(covariates)
n_covariates = self._covariate_model.n_covariates()
if len(covariates) != n_covariates:
raise ValueError(
'Covariates must be of length n_covariates.')
# Add dimension to fit shape (n, c) for later convenience
covariates = np.reshape(covariates, (1, n_covariates))
# Compute population parameters
eta_dist_params = self._covariate_model.compute_population_parameters(
parameters)
# Sample eta from population model
eta = self._population_model.sample(eta_dist_params, n_samples, seed)
if not return_psi:
return eta
# Compute psi
psi = self._covariate_model.compute_individual_parameters(
parameters, eta, covariates)
return psi
def set_covariate_names(self, names=None, update_param_names=False):
"""
Sets the names of the covariates.
:param names: A list of parameter names. If ``None``, covariate names
are reset to defaults.
:type names: List
:param update_param_names: Boolean flag indicating whether parameter
names should be updated according to new covariate names. By
default parameter names are not updated.
:type update_param_names: bool, optional
"""
self._covariate_model.set_covariate_names(names, update_param_names)
def set_parameter_names(self, names=None):
"""
Sets the names of the population model parameters.
Parameters
----------
names
An array-like object with string-convertable entries of length
:meth:`n_parameters`. If ``None``, parameter names are reset to
defaults.
"""
self._covariate_model.set_parameter_names(names)
class GaussianModel(PopulationModel):
r"""
A population model which assumes that model parameters across individuals
are distributed according to a Gaussian distribution.
A Gaussian population model assumes that a model parameter
:math:`\psi` varies across individuals such that :math:`\psi` is
Gaussian distributed in the population
.. math::
p(\psi |\mu, \sigma) =
\frac{1}{\sqrt{2\pi} \sigma}
\exp\left(-\frac{(\psi - \mu )^2}
{2 \sigma ^2}\right).
Here, :math:`\mu` and :math:`\sigma ^2` are the
mean and variance of the Gaussian distribution.
Any observed individual with parameter :math:`\psi _i` is
assumed to be a realisation of the random variable :math:`\psi`.
Extends :class:`PopulationModel`.
"""
def __init__(self):
super(GaussianModel, self).__init__()
# Set number of parameters
self._n_parameters = 2
# Set default parameter names
self._parameter_names = ['Mean', 'Std.']
@staticmethod
def _compute_log_likelihood(mean, var, observations): # pragma: no cover
r"""
Calculates the log-likelihood using numba speed up.
"""
# Compute log-likelihood score
n_ids = len(observations)
log_likelihood = \
- n_ids * np.log(2 * np.pi * var) / 2 \
- np.sum((observations - mean) ** 2) / (2 * var)
# If score evaluates to NaN, return -infinity
if np.isnan(log_likelihood):
return -np.inf
return log_likelihood
@staticmethod
def _compute_pointwise_ll(mean, var, observations): # pragma: no cover
r"""
Calculates the pointwise log-likelihoods using numba speed up.
"""
# Compute log-likelihood score
log_likelihood = \
- np.log(2 * np.pi * var) / 2 \
- (observations - mean) ** 2 / (2 * var)
# If score evaluates to NaN, return -infinity
mask = np.isnan(log_likelihood)
if np.any(mask):
log_likelihood[mask] = -np.inf
return log_likelihood
return log_likelihood
def _compute_sensitivities(self, mean, var, psi): # pragma: no cover
r"""
Calculates the log-likelihood and its sensitivities using numba
speed up.
Expects:
mean = float
var = float
Shape observations = (n_obs,)
Returns:
log_likelihood: float
sensitivities: np.ndarray of shape (n_obs + 2,)
"""
# Compute log-likelihood score
n_ids = len(psi)
log_likelihood = self._compute_log_likelihood(mean, var, psi)
# If score evaluates to NaN, return -infinity
if | np.isnan(log_likelihood) | numpy.isnan |
from __future__ import absolute_import, division, print_function
import os
import warnings
import numpy as np
from multiprocessing import cpu_count
from astropy.wcs import WCS
from astropy import units as u
from astropy.nddata import StdDevUncertainty
from astropy.utils.console import ProgressBar
from .nikamap import NikaMap
__all__ = ["HalfDifference", "Jackknife", "Bootstrap"]
def compare_header(header_ref, header_target):
"""Crude comparison of two header
Parameters
----------
header_ref : astropy.io.fits.Header
the reference header
header_target : astropy.io.fits.Header
the target header to check
Notes
-----
This will raise assertion error if the two header are not equivalent
"""
wcs_ref = WCS(header_ref)
wcs_target = WCS(header_target)
assert wcs_ref.wcs == wcs_target.wcs, "Different header found"
for key in ["UNIT", "NAXIS1", "NAXIS2"]:
if key in header_ref:
assert header_ref[key] == header_target[key], "Different key found"
def check_filenames(filenames):
"""check filenames existence
Parameters
----------
filenames : list of str
filenames list to be checked
Returns
-------
list of str
curated list of files
"""
_filenames = []
for filename in filenames:
if os.path.isfile(filename):
_filenames.append(filename)
else:
warnings.warn("{} does not exist, removing from list".format(filename), UserWarning)
return _filenames
class MultiScans(object):
"""A class to hold multi single scans from a list of fits files.
This acts as a python lazy iterator and/or a callable
Parameters
----------
filenames : list or `~MultiScans` object
the list of fits files to produce the Jackknifes or an already filled object
ipython_widget : bool, optional
If True, the progress bar will display as an IPython notebook widget.
ignore_header : bool, optional
if True, the check on header is ignored
n : int
the number of iteration for the iterator
Notes
-----
A crude check is made on the wcs of each map when instanciated
"""
def __init__(self, filenames, n=None, ipython_widget=False, ignore_header=False, **kwd):
self.i = 0
self.n = n
self.kwargs = kwd
self.ipython_widget = ipython_widget
if isinstance(filenames, MultiScans):
data = filenames
self.filenames = data.filenames
self.primary_header = data.primary_header
self.header = data.header
self.unit = data.unit
self.shape = data.shape
self.datas = data.datas
self.weights = data.weights
self.time = data.time
self.mask = data.mask
else:
self.filenames = check_filenames(filenames)
nm = NikaMap.read(self.filenames[0], **kwd)
self.primary_header = nm.meta.get("primary_header", None)
self.header = nm.meta["header"]
self.unit = nm.unit
self.shape = nm.shape
# This is a low_mem=False case ...
# TODO: How to refactor that for low_mem=True ?
datas = np.zeros((len(self.filenames),) + self.shape)
weights = np.zeros((len(self.filenames),) + self.shape)
time = np.zeros(self.shape) * u.h
for i, filename in enumerate(ProgressBar(self.filenames, ipython_widget=self.ipython_widget)):
nm = NikaMap.read(filename, **kwd)
try:
compare_header(self.header, nm.meta["header"])
except AssertionError as e:
if ignore_header:
warnings.warn("{} for {}".format(e, filename), UserWarning)
else:
raise ValueError("{} for {}".format(e, filename))
datas[i, :, :] = nm.data
with np.errstate(invalid="ignore", divide="ignore"):
weights[i, :, :] = nm.uncertainty.array ** -2
time += nm.time
# make sure that we do not have nans in the data
datas[i, nm.time == 0] = 0
weights[i, nm.time == 0] = 0
unobserved = time == 0
self.datas = datas
self.weights = weights
self.time = time
self.mask = unobserved
def __len__(self):
# to retrieve the legnth of the iterator, enable ProgressBar on it
return self.n
def __iter__(self):
# Iterators are iterables too.
# Adding this functions to make them so.
return self
def __call__(self):
"""The main method which should be overrided
should return a :class:`nikamap.NikaMap`
"""
pass
def __next__(self):
"""Iterator on the objects"""
if self.n is None or self.i < self.n:
# Produce data until last iter
self.i += 1
data = self.__call__()
else:
raise StopIteration()
return data
class HalfDifference(MultiScans):
"""A class to create weighted half differences uncertainty maps from a list of scans.
This acts as a python lazy iterator and/or a callable
Parameters
----------
filenames : list
the list of fits files to produce the Jackknifes
ipython_widget : bool, optional
If True, the progress bar will display as an IPython notebook widget.
n : int
the number of Jackknifes maps to be produced in the iterator
if set to `None`, produce only one weighted average of the maps
parity_threshold : float
mask threshold between 0 and 1 to keep partially jackknifed area
* 1 pure jackknifed
* 0 partially jackknifed, keep all
Notes
-----
A crude check is made on the wcs of each map when instanciated
"""
def __init__(self, filenames, parity_threshold=1, **kwd):
super(HalfDifference, self).__init__(filenames, **kwd)
self.parity_threshold = parity_threshold
# Create weights for Half differences
jk_weights = np.ones(len(self.filenames))
if self.n is not None:
jk_weights[::2] *= -1
if self.n is not None and len(self.filenames) % 2:
warnings.warn("Even number of files, dropping a random file", UserWarning)
jk_weights[-1] = 0
assert np.sum(jk_weights != 0), "Less than 2 existing files in filenames"
self.jk_weights = jk_weights
@property
def parity_threshold(self):
return self._parity
@parity_threshold.setter
def parity_threshold(self, value):
if value is not None and isinstance(value, (int, float)) and 0 <= value <= 1:
self._parity = value
else:
raise TypeError("parity must be between 0 and 1")
def __call__(self):
"""Compute a jackknifed dataset
Returns
-------
:class:`nikamap.NikaMap`
a jackknifed data set
"""
np.random.shuffle(self.jk_weights)
with np.errstate(invalid="ignore", divide="ignore"):
e_data = 1 / np.sqrt(np.sum(self.weights, axis=0))
data = | np.sum(self.datas * self.weights * self.jk_weights[:, np.newaxis, np.newaxis], axis=0) | numpy.sum |
from functools import partial
import numpy as np
from scipy.stats import boxcox
from sklearn.datasets import make_blobs
from sklearn.preprocessing import minmax_scale
from clustermatch.cluster import run_quantile_clustering
def blobs_data_generator01():
"""
Blobs. n_samples=100, n_features=20, centers=3, cluster_std=0.10, center_box=(-1.0, 1.0)
"""
return make_blobs(
n_samples=100,
centers=3,
n_features=20,
cluster_std=0.10,
shuffle=True,
center_box=(-1.0, 1.0)
)
def blobs_data_generator02(seed=None, n_samples=100, n_features=1000):
"""
Blobs. n_samples=100, n_features=1000, centers=3, cluster_std=0.10, center_box=(-1.0, 1.0)
"""
return make_blobs(
n_samples=n_samples,
centers=3,
n_features=n_features,
cluster_std=0.10,
shuffle=True,
center_box=(-1.0, 1.0),
random_state=seed,
)
def _get_array_chunks(data, chunk_size):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(data), chunk_size):
sl = slice(i, i + chunk_size)
yield sl, data[sl]
def _apply_noise(data, data_noise):
data_n_objects = data.shape[1]
data_n_measures = data.shape[0]
if len(data_noise) == 0:
return data
percentage_objects = data_noise.get('percentage_objects', 0.1)
percentage_measures = data_noise.get('percentage_measures', 0.0)
magnitude = data_noise.get('magnitude', 0.0)
selected_rows = np.random.choice(
data_n_measures,
size=int(data_n_measures * percentage_measures),
replace=False
)
selected_cols = np.random.choice(
data_n_objects,
size=int(data_n_objects * percentage_objects),
replace=False
)
noisy_data = data.copy()
if np.issubdtype(data.dtype, np.number) or all([np.isreal(x) for row in data for x in row]):
if not np.issubdtype(data.dtype, np.number):
data = data.astype(float)
if len(selected_rows) > 0:
noisy_points = np.random.rand(len(selected_rows), data_n_objects)
noisy_points = minmax_scale(noisy_points, axis=1, feature_range=(data.min(), data.max()))
noisy_points = noisy_points * magnitude
noisy_data[selected_rows, :] += noisy_points
if len(selected_cols) > 0:
noisy_points = np.random.rand(data_n_measures, len(selected_cols))
noisy_points = minmax_scale(noisy_points, axis=1, feature_range=(data.min(), data.max()))
noisy_data[:, selected_cols] = noisy_points
else:
assert all([not np.isreal(x) for row in data for x in row])
unique_cat = np.unique(data)
if len(selected_cols) > 0:
# noisy_points = np.random.rand(data_n_measures, len(selected_cols))
noisy_points = np.random.choice(unique_cat, (data_n_measures, len(selected_cols)))
# noisy_points = minmax_scale(noisy_points, axis=1, feature_range=(data.min(), data.max()))
noisy_data[:, selected_cols] = noisy_points
# for i in range(data.shape[0]):
# for j in range(data.shape[1]):
# if np.random.rand() < magnitude:
# noisy_data[i, j] = np.random.choice(unique_cat)
return noisy_data
def _generic_data_transformation(data, sources_transformers, dtype=None, **kwargs):
if len(sources_transformers) == 0:
return data
n_data = data.shape[0]
n_sim_sources = len(sources_transformers)
data_step = int(n_data / n_sim_sources)
t_data = np.empty(data.shape, dtype=data.dtype if dtype is None else dtype)
i = 0
for sl, data_chunk in _get_array_chunks(data, data_step):
transformer = sources_transformers[i % n_sim_sources]
# transform
if callable(transformer):
t_data_chunk = transformer(data_chunk)
else:
t_data_chunk = data_chunk * transformer
t_data[sl] = t_data_chunk
# if not np.issubdtype(t_data_chunk.dtype, np.number):
# is_data_object = True
# data noise
if 'data_noise' in kwargs:
data_noise = kwargs['data_noise']
t_data[sl] = _apply_noise(t_data[sl], data_noise)
i += 1
return t_data
def _create_categorical(data, cats):
n_cats = len(cats)
t_data = np.empty(data.shape, dtype=object)
for data_row_idx, data_row in enumerate(data):
data_row_part = run_quantile_clustering(data_row, n_cats)
t_data[data_row_idx] = np.array([cats[int(x)] for x in data_row_part])
return t_data
def transform_rows_nonlinear_and_categorical01(data, **kwargs):
"""
Nonlinear and categorical row transformation 01. 7 numerical data sources (x^4, log, exp2, 100, x^5, 10000, 0.0001) and 3 categorical (10, 4 and 2 categories).
"""
sources_transformers = [
lambda x: np.power(x, 4),
lambda x: np.log(np.abs(x)),
lambda x: np.exp2(x),
100.0,
lambda x: _create_categorical(x, cats=[
'cat01', 'cat02', 'cat03', 'cat04',
'cat05', 'cat06', 'cat07', 'cat08',
'cat09', 'cat10',
]),
lambda x: np.power(x, 5),
10000.0,
lambda x: _create_categorical(x, cats=['cat01', 'cat02', 'cat03', 'cat04']),
0.0001,
lambda x: _create_categorical(x, cats=['cat01', 'cat02']),
]
return _generic_data_transformation(data, sources_transformers, dtype=object, **kwargs)
def transform_rows_nonlinear_and_categorical02(data, **kwargs):
"""
Nonlinear and categorical row transformation 02. 7 numerical data sources (x^4, log, exp2, log1p, x^5, log10, log2) and 3 categorical (8, 4 and 2 categories).
"""
sources_transformers = [
lambda x: np.power(x, 4),
lambda x: np.log(np.abs(x)),
lambda x: np.exp2(x),
lambda x: _create_categorical(x, cats=[
'cat01', 'cat02', 'cat03', 'cat04',
'cat05', 'cat06', 'cat07', 'cat08',
'cat09', 'cat10',
]),
lambda x: np.log1p(np.abs(x)),
lambda x: np.power(x, 5),
lambda x: _create_categorical(x, cats=['cat01', 'cat02', 'cat03', 'cat04']),
lambda x: np.log10(np.abs(x)),
lambda x: _create_categorical(x, cats=['cat01', 'cat02']),
lambda x: np.log2(np.abs(x)),
]
return _generic_data_transformation(data, sources_transformers, dtype=object, **kwargs)
def transform_rows_full_scaled01(data):
"""
Full row scale. 5 simulated data sources; values: 0.01, 0.1, 10, 100, 1000
"""
sources_transformers = [0.01, 0.1, 10.0, 100.0, 1000.0]
return _generic_data_transformation(data, sources_transformers)
def transform_rows_nonlinear01(data, **kwargs):
"""
Nonlinear row transformation 01. 5 simulated data sources; Functions: exp, x^2, log, expm1, log10
"""
sources_transformers = [
np.exp,
lambda x: np.power(x, 2),
lambda x: np.log(np.abs(x)),
np.expm1,
lambda x: np.log10(np.abs(x)),
]
return _generic_data_transformation(data, sources_transformers, **kwargs)
def transform_rows_nonlinear02(data, **kwargs):
"""
Nonlinear row transformation 02. 4 simulated data sources; Functions: x^3, log, log1p, exp2
"""
sources_transformers = [
lambda x: np.power(x, 3),
lambda x: np.log(np.abs(x)),
lambda x: np.log1p(np.abs(x)),
np.exp2,
]
return _generic_data_transformation(data, sources_transformers, **kwargs)
def transform_rows_nonlinear03(data, **kwargs):
"""
Nonlinear row transformation 03. 10 simulated data sources; Functions: x^4, log, exp2, 100, log1p, x^5, 10000, log10, 0.0001, log2
"""
sources_transformers = [
lambda x: np.power(x, 4),
lambda x: np.log(np.abs(x)),
lambda x: np.exp2(x),
100.0,
lambda x: np.log1p(np.abs(x)),
lambda x: np.power(x, 5),
10000.0,
lambda x: np.log10(np.abs(x)),
0.0001,
lambda x: np.log2(np.abs(x)),
]
return _generic_data_transformation(data, sources_transformers, **kwargs)
def transform_rows_nonlinear03_01(data, **kwargs):
"""
Nonlinear row transformation 03_01. 10 simulated data sources; Functions: x^2, log, exp2, 100, log1p, x^3, 10000, log10, 0.0001, log2
"""
sources_transformers = [
lambda x: np.power(x, 2),
lambda x: np.log(np.abs(x)),
lambda x: np.exp2(x),
100.0,
lambda x: np.log1p(np.abs(x)),
lambda x: np.power(x, 3),
10000.0,
lambda x: np.log10(np.abs(x)),
0.0001,
lambda x: np.log2(np.abs(x)),
]
return _generic_data_transformation(data, sources_transformers, **kwargs)
def transform_rows_nonlinear04(data, **kwargs):
"""
Nonlinear row transformation 04. 10 simulated data sources; Functions: 1.0, 0.5*(x+1)^2, sin(pi*x), cos(pi*x), x^5, exp2, log10, boxcox(2), boxcox(4), boxcox(6).
"""
sources_transformers = [
1.0,
lambda x: 0.5 * np.power((x+1), 2),
lambda x: np.sin(np.pi * x),
lambda x: np.cos(np.pi * x),
lambda x: np.power(x, 5),
lambda x: np.exp2(x),
lambda x: np.log10(np.abs(x)),
lambda x: boxcox(x + (-1.0 * x.min()) + 0.01, 2.00),
lambda x: boxcox(x + (-1.0 * x.min()) + 0.01, 4.00),
lambda x: boxcox(x + (-1.0 * x.min()) + 0.01, 6.00),
]
return _generic_data_transformation(data, sources_transformers, **kwargs)
def transform_rows_nonlinear05(data, **kwargs):
"""
Nonlinear row transformation 05. 10 simulated data sources; Functions: 1.0, 0.5*(x+1)^2, sin(pi*x), cos(pi*x), x^5, exp2, log10(x-x.min()), boxcox(2), boxcox(4), boxcox(6).
"""
sources_transformers = [
1.0,
lambda x: 0.5 * np.power((x+1), 2),
lambda x: np.sin(np.pi * x),
lambda x: np.cos(np.pi * x),
lambda x: np.power(x, 5),
lambda x: np.exp2(x),
lambda x: np.log10(x + (-1.0 * x.min()) + 0.01),
lambda x: boxcox(x + (-1.0 * x.min()) + 0.01, 2.00),
lambda x: boxcox(x + (-1.0 * x.min()) + 0.01, 4.00),
lambda x: boxcox(x + (-1.0 * x.min()) + 0.01, 6.00),
]
return _generic_data_transformation(data, sources_transformers, **kwargs)
def transform_rows_nonlinear06(data, **kwargs):
"""
Nonlinear row transformation 06. 12 simulated data sources; Functions: 1.0, 0.5*(x+1)^2, sin(pi*x), sin(2*pi*x), cos(pi*x), cos(2*pi*x), x^5, exp2, log10(x-x.min()), boxcox(2), boxcox(4), boxcox(6).
"""
sources_transformers = [
1.0,
lambda x: 0.5 * np.power((x+1), 2),
lambda x: np.sin(np.pi * x),
lambda x: np.sin(2.0 * np.pi * x),
lambda x: np.cos(np.pi * x),
lambda x: np.cos(2.0 * np.pi * x),
lambda x: np.power(x, 5),
lambda x: np.exp2(x),
lambda x: np.log10(x + (-1.0 * x.min()) + 0.01),
lambda x: boxcox(x + (-1.0 * x.min()) + 0.01, 2.00),
lambda x: boxcox(x + (-1.0 * x.min()) + 0.01, 4.00),
lambda x: boxcox(x + (-1.0 * x.min()) + 0.01, 6.00),
]
return _generic_data_transformation(data, sources_transformers, **kwargs)
def transform_rows_nonlinear07(data, **kwargs):
"""
Nonlinear row transformation 07. 12 simulated data sources; Functions: 1.0, 0.5*(x+1)^2, sin(pi*x), -100, cos(pi*x), 0.0001, x^5, exp2, log10(x-x.min()), boxcox(2), boxcox(4), boxcox(6).
"""
sources_transformers = [
1.0,
lambda x: 0.5 * np.power((x+1), 2),
lambda x: np.sin(np.pi * x),
-100.0,
lambda x: np.cos(np.pi * x),
0.0001,
lambda x: np.power(x, 5),
lambda x: np.exp2(x),
lambda x: np.log10(x + (-1.0 * x.min()) + 0.01),
lambda x: boxcox(x + (-1.0 * x.min()) + 0.01, 2.00),
lambda x: boxcox(x + (-1.0 * x.min()) + 0.01, 4.00),
lambda x: boxcox(x + (-1.0 * x.min()) + 0.01, 6.00),
]
return _generic_data_transformation(data, sources_transformers, **kwargs)
def transform_rows_nonlinear08(data, **kwargs):
"""
Nonlinear row transformation 08. 5 simulated data sources; Functions: boxcox(0), boxcox(1), boxcox(2), boxcox(3), boxcox(4).
"""
sources_transformers = [
lambda x: boxcox(x + (-1.0 * x.min()) + 0.01, 0.00),
lambda x: boxcox(x + (-1.0 * x.min()) + 0.01, 1.00),
lambda x: boxcox(x + (-1.0 * x.min()) + 0.01, 2.00),
lambda x: boxcox(x + (-1.0 * x.min()) + 0.01, 3.00),
lambda x: boxcox(x + (-1.0 * x.min()) + 0.01, 4.00),
]
return _generic_data_transformation(data, sources_transformers, **kwargs)
def transform_rows_nonlinear09(data, **kwargs):
"""
Nonlinear row transformation 09. 5 simulated data sources; Functions: x^2, boxcox(1), boxcox(2), boxcox(3), boxcox(4).
"""
sources_transformers = [
lambda x: np.power(x, 2),
lambda x: boxcox(x + (-1.0 * x.min()) + 0.01, 1.00),
lambda x: boxcox(x + (-1.0 * x.min()) + 0.01, 2.00),
lambda x: boxcox(x + (-1.0 * x.min()) + 0.01, 3.00),
lambda x: boxcox(x + (-1.0 * x.min()) + 0.01, 4.00),
]
return _generic_data_transformation(data, sources_transformers, **kwargs)
def transform_rows_nonlinear10(data, **kwargs):
"""
Nonlinear row transformation 10. 5 simulated data sources; Functions: x^2, log(x), boxcox(2), boxcox(3), boxcox(4).
"""
sources_transformers = [
lambda x: np.power(x, 2),
lambda x: np.log(np.abs(x)),
lambda x: boxcox(x + (-1.0 * x.min()) + 0.01, 2.00),
lambda x: boxcox(x + (-1.0 * x.min()) + 0.01, 3.00),
lambda x: boxcox(x + (-1.0 * x.min()) + 0.01, 4.00),
]
return _generic_data_transformation(data, sources_transformers, **kwargs)
def transform_rows_nonlinear11(data, **kwargs):
"""
Nonlinear row transformation 11. 5 simulated data sources; Functions: x^2, log(x), x^4, boxcox(3), boxcox(4).
"""
sources_transformers = [
lambda x: np.power(x, 2),
lambda x: np.log(np.abs(x)),
lambda x: np.power(x, 4),
lambda x: boxcox(x + (-1.0 * x.min()) + 0.01, 3.00),
lambda x: boxcox(x + (-1.0 * x.min()) + 0.01, 4.00),
]
return _generic_data_transformation(data, sources_transformers, **kwargs)
def transform_rows_nonlinear12(data, **kwargs):
"""
Nonlinear row transformation 12. 5 simulated data sources; Functions: x^2, log(x), x^4, sin(pi * x), boxcox(4).
"""
sources_transformers = [
lambda x: np.power(x, 2),
lambda x: np.log(np.abs(x)),
lambda x: | np.power(x, 4) | numpy.power |
import numpy as np
from dask.utils import tmpfile
from metagraph.tests.compiler.test_subgraphs import res
from metagraph.core.dask.visualize import visualize, merge_dict_of_dict
def test_merge_dict_of_dict():
base = {"key1": {"attr1": 1, "attr2": 2}, "key2": {"attr1": 11, "attr3": 13,}}
overlay = {
"key1": {"attr1": 100, "attr3": 300,},
"key3": {"attr1": 1000, "attr4": 4000,},
}
result = merge_dict_of_dict(base, overlay)
expected = {
"key1": {"attr1": 100, "attr2": 2, "attr3": 300,},
"key2": {"attr1": 11, "attr3": 13,},
"key3": {"attr1": 1000, "attr4": 4000,},
}
assert result == expected
def test_visualize(res):
a = np.arange(100)
scale_func = res.algos.testing.scale
z1 = scale_func(scale_func(scale_func(a, 2.0), 3.0), 4.0)
z2 = scale_func(scale_func(scale_func(a, 2.5), 3.5), 4.5)
merge = res.algos.testing.add(z1, z2)
ans = scale_func(merge, 2.8)
with tmpfile(extension="dot") as fn:
visualize(ans, filename=fn)
with open(fn) as f:
contents = f.read()
# this is an inadequate test, but at least confirms some basic are working
assert "testing.scale" in contents
assert "testing.add" in contents
assert "NumpyVectorType" in contents
with tmpfile(extension="dot") as fn:
visualize(ans, collapse_outputs=True, filename=fn)
with open(fn) as f:
contents = f.read()
assert "testing.scale" in contents
assert "testing.add" in contents
# data nodes should be hidden
assert "NumpyVectorType" not in contents
def test_placeholder_visualize(res):
a = np.arange(100)
scale_func = res.algos.testing.scale
z1 = scale_func(scale_func(scale_func(a, 2.0), 3.0), 4.0)
with tmpfile(extension="dot") as fn:
z1.visualize(filename=fn)
with open(fn) as f:
contents = f.read()
assert "testing.scale" in contents
def test_optimize_and_visualize(res):
a = | np.arange(100) | numpy.arange |
#!/usr/bin/env python3
"""Soil masking Transformer
"""
import argparse
import logging
import os
import numpy as np
from agpypeline import entrypoint, algorithm, geoimage
from agpypeline.environment import Environment
from agpypeline.checkmd import CheckMD
from cv2 import cv2
from osgeo import gdal
# from PIL import Image Used by code that's getting deprecated
from skimage import morphology
from configuration import ConfigurationSoilmask
SATURATE_THRESHOLD = 245
MAX_PIXEL_VAL = 255
SMALL_AREA_THRESHOLD = 200
class __internal__:
"""Class for functions intended for internal use only for this file
"""
def __init__(self):
"""Performs initialization of class instance
"""
@staticmethod
def prepare_metadata_for_geotiff(transformer_info: dict = None) -> dict:
"""Create geotiff-embedable metadata from extractor_info and other metadata pieces.
Arguments:
transformer_info: details about the transformer
Return:
A dict containing information to save with an image
"""
extra_metadata = {}
if transformer_info:
extra_metadata["transformer_name"] = str(transformer_info.get("name", ""))
extra_metadata["transformer_version"] = str(transformer_info.get("version", ""))
extra_metadata["transformer_author"] = str(transformer_info.get("author", ""))
extra_metadata["transformer_description"] = str(transformer_info.get("description", ""))
if "repository" in transformer_info and transformer_info["repository"] and \
"repUrl" in transformer_info["repository"]:
extra_metadata["transformer_repo"] = str(transformer_info["repository"]["repUrl"])
else:
extra_metadata["transformer_repo"] = ""
return extra_metadata
@staticmethod
def gen_plant_mask(color_img: np.ndarray, kernel_size: int = 3) -> np.ndarray:
"""Generates an image with plants masked in.
Arguments:
color_img: RGB image to mask
kernel_size: masking kernel size
Return:
An RGB image with plants masked in
"""
r_channel = color_img[:, :, 2]
g_channel = color_img[:, :, 1]
b_channel = color_img[:, :, 0]
sub_img = (g_channel.astype('int') - r_channel.astype('int')) > 1
mask = np.zeros_like(b_channel)
mask[sub_img] = MAX_PIXEL_VAL
blur = cv2.blur(mask, (kernel_size, kernel_size))
pix = np.array(blur)
sub_mask = pix > 128
mask_1 = np.zeros_like(b_channel)
mask_1[sub_mask] = MAX_PIXEL_VAL
return mask_1
@staticmethod
def remove_small_area_mask(mask_img: np.ndarray, min_area_size: int) -> np.ndarray:
"""Removes small anomalies in the mask
Arguments:
mask_img: the mask image to remove anomalies from
min_area_size: the size of anomalies to look for
Return:
A new mask image with the anomalies removed
"""
mask_array = mask_img > 0
rel_array = morphology.remove_small_objects(mask_array, min_area_size)
rel_img = np.zeros_like(mask_img)
rel_img[rel_array] = MAX_PIXEL_VAL
return rel_img
@staticmethod
def remove_small_holes_mask(mask_image: np.ndarray, max_hole_size: int) -> np.ndarray:
"""Removes small holes from the mask image
Arguments:
mask_image: the mask image to remove holes from
max_hole_size: the maximum size of holes to remove
Return:
A new mask image with the holes removed
"""
mask_array = mask_image > 0
rel_array = morphology.remove_small_holes(mask_array, max_hole_size)
rel_img = np.zeros_like(mask_image)
rel_img[rel_array] = MAX_PIXEL_VAL
return rel_img
@staticmethod
def saturated_pixel_classification(gray_img: np.ndarray, base_mask: np.ndarray, saturated_mask: np.ndarray,
dilate_size: int = 0) -> np.ndarray:
"""Returns an image with pixes classified for masking
Arguments:
Returns:
A mask image with the pixels classified
"""
# add saturated area into basic mask
saturated_mask = morphology.binary_dilation(saturated_mask, morphology.diamond(dilate_size))
rel_img = np.zeros_like(gray_img)
rel_img[saturated_mask] = MAX_PIXEL_VAL
label_img, num = morphology.label(rel_img, connectivity=2, return_num=True)
rel_mask = base_mask
for idx in range(1, num):
match = (label_img == idx)
if np.sum(match) > 100000: # if the area is too large, do not add it into basic mask
continue
if not (match & base_mask).any():
continue
rel_mask = rel_mask | match
return rel_mask
@staticmethod
def over_saturation_process(rgb_image: np.ndarray, init_mask: np.ndarray, threshold: int = SATURATE_THRESHOLD) -> np.ndarray:
"""Removes over saturated areas from an image
Arguments:
rgb_image: the image to process
init_mask:
threshold: The saturation threshold value
Return:
A new image with over saturated pixels removed
"""
# connected component analysis for over saturation pixels
gray_img = cv2.cvtColor(rgb_image, cv2.COLOR_BGR2GRAY)
mask_over = gray_img > threshold
mask_0 = gray_img < threshold
src_mask_array = init_mask > 0
mask_1 = src_mask_array & mask_0
mask_1 = morphology.remove_small_objects(mask_1, SMALL_AREA_THRESHOLD)
mask_over = morphology.remove_small_objects(mask_over, SMALL_AREA_THRESHOLD)
rel_mask = __internal__.saturated_pixel_classification(gray_img, mask_1, mask_over, 1)
rel_img = np.zeros_like(gray_img)
rel_img[rel_mask] = MAX_PIXEL_VAL
return rel_img
@staticmethod
def gen_saturated_mask(img: np.ndarray, kernel_size: int) -> np.ndarray:
"""Generates a mask of over saturated pixels
Arguments:
img: the image to generate the mask from
kernel_size: the size of masking kernel
Returns:
The image mask of over saturated pixels
"""
bin_mask = __internal__.gen_plant_mask(img, kernel_size)
bin_mask = __internal__.remove_small_area_mask(bin_mask,
500) # 500 is a parameter for number of pixels to be removed as small area
bin_mask = __internal__.remove_small_holes_mask(bin_mask,
300) # 300 is a parameter for number of pixels to be filled as small holes
bin_mask = __internal__.over_saturation_process(img, bin_mask, SATURATE_THRESHOLD)
bin_mask = __internal__.remove_small_holes_mask(bin_mask, 4000)
return bin_mask
@staticmethod
def gen_mask(img: np.ndarray, kernel_size: int) -> np.ndarray:
"""Generated the mask for plants
Arguments:
img: the image used to mask in plants
kernel_size: the size of the image processing kernel
Return:
A new image mask
"""
bin_mask = __internal__.gen_plant_mask(img, kernel_size)
bin_mask = __internal__.remove_small_area_mask(bin_mask, SMALL_AREA_THRESHOLD)
bin_mask = __internal__.remove_small_holes_mask(bin_mask,
3000) # 3000 is a parameter for number of pixels to be filled as small holes
return bin_mask
@staticmethod
def gen_rgb_mask(img: np.ndarray, bin_mask: np.ndarray) -> np.ndarray:
"""Applies the mask to the image
Arguments:
img: the source image to mask
bin_mask: the mask to apply to the image
Return:
A new image that had the mask applied
"""
rgb_mask = cv2.bitwise_and(img[:, :, 0:3], img[:, :, 0:3], mask=bin_mask)
if img.shape[2] > 3:
rgb_mask = np.concatenate((rgb_mask, img[:, :, 3:]), axis=2)
return rgb_mask
@staticmethod
def check_saturation(img: np.ndarray) -> list:
"""Checks the saturation of an image
Arguments:
img: the image to check
Return:
A list containing the over threshold rate and the under threshold rate
"""
# check how many percent of pix close to 255 or 0
gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
over_threshold = gray_img > SATURATE_THRESHOLD
under_threshold = gray_img < 20 # 20 is a threshold to classify low pixel value
masked_count = 0 if img.shape[2] < 4 else np.sum(img[:, :, 3] == 0)
over_rate = float( | np.sum(over_threshold) | numpy.sum |
from rtcom import RealTimeCommunication
import cv2
import numpy as np
from time import sleep
from utils import VideoCapture, write_header, write_line
with RealTimeCommunication("pc") as rtcom:
rtcom.subscribe("rpi","camera") #Request unicast for this specific endpoint.
#(Too much packet drop on multicast for some reason.)
print("Press escape to quit.")
while True:
try:
image_data = rtcom["rpi"]["camera"]
if image_data is not None:
jpg_data = | np.asarray(image_data) | numpy.asarray |
import numpy as np
import matplotlib.pyplot as plt
import EDGE as edge
import collate as c
import pdb
from astropy.io import fits
'''
DEMO_analysis_imlup.py
HOW TO USE THIS SCRIPT:
Open a terminal and go to the location of this script
Launch the interactive mode for python by entering 'ipython' into a terminal
Then write 'run DEMO_analysis_imlup' and press enter.
PURPOSE:
Script that loads in data and models for IM Lup, and then finds the model/wall combination with the lowest chi^2
INPUTS:
In order for this script to run properly, you will need to change the paths to the correct directory.
If using this file as a template for other objects, it is likely that you will need to change it according to your needs.
If you want to save the plot that this script creates, set 'save' to True.
OUTPUTS:
Produces a plot with the model/wall with the lowest chi^2, along with a list of all the chi^2 + model numbers and best fitting wall heights.
NOTES:
This script is supposed to act as a simple example of how to use EDGE for analysis, but is definitely not the rule. Significant changes will
likely need to be made in order to analyze your objects.
AUTHOR:
<NAME>, June 19th, 2017
'''
#Define the object name
obj = 'imlup'
#Set up paths. YOU WILL NEED TO CHANGE THIS!!!
datapath = '/Users/Connor/Desktop/Research/diad/EDGE/DEMO/data/'
modelpath = '/Users/Connor/Desktop/Research/diad/EDGE/DEMO/models/'
figpath = '/Users/Connor/Desktop/Research/diad/EDGE/DEMO/'
#-------------------------------------------------
#For the purposes of this example, you are not required to change anything below this line
#However, you should be able to understand what the code is doing before doing your own analysis
#-------------------------------------------------
#Define the jobs
jobs = np.arange(3)+1
#Define list of wall heights to try
altinh = [1,2,3,4,5]
#Load in the data from the fits file
targ = edge.loadObs(obj, datapath = datapath)
#Create a blank list to append onto later
chi2 = []
#Begin looping over each job
for job in jobs:
#Convert the job number into the right format. In this case, using a fill of 3
job = str(job).zfill(3)
#Load in the header. It will be used to check if jobs have failed.
hdu = fits.open(modelpath+obj+'_'+job+'.fits')
#Load in the model
model = edge.TTS_Model(obj, job, dpath = modelpath)
#Check to see if the model failed and if it did, move onto the next model.
try:
failed = hdu[0].header['FAILED']
pass
except KeyError:
#Create a black array to append onto later for fitting the best wall height
chiwall = []
#Initialize the model. For a pre-transitional disk, this command would be more complicated
model.dataInit()
#Loop over each wall height to find the best fitting wall
for alt in altinh:
#Calculate the total emission from all the components of the disk + star
model.calc_total(altinh = alt, verbose = 0)
#If you are running your code with the filter deconvolution, uncomment this
#model.calc_filters(obj = targ)
#Append the chi2 vlaue and the height of the wall
chiwall.append([alt, edge.model_rchi2(targ, model)])
#Convert the list into an array
chiwall = np.array(chiwall)
#Find the best fitting wall based on its chi^2
bestwall = chiwall[ | np.argmin(chiwall[:,1]) | numpy.argmin |
# pylint: disable=missing-function-docstring, missing-module-docstring/
import pytest
import numpy as np
from numpy.random import randint
from pyccel.epyccel import epyccel
from modules import arrays
#==============================================================================
# TEST: 1D ARRAYS OF INT-32
#==============================================================================
def test_array_int32_1d_scalar_add(language):
f1 = arrays.array_int32_1d_scalar_add
f2 = epyccel( f1 , language = language)
x1 = np.array( [1,2,3], dtype=np.int32 )
x2 = np.copy(x1)
a = randint(low = -1e9, high = 1e9, dtype = np.int32)
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int32_1d_scalar_sub(language):
f1 = arrays.array_int32_1d_scalar_sub
f2 = epyccel( f1 , language = language)
x1 = np.array( [1,2,3], dtype=np.int32 )
x2 = np.copy(x1)
a = randint(low = -1e9, high = 1e9, dtype = np.int32)
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int32_1d_scalar_mul(language):
f1 = arrays.array_int32_1d_scalar_mul
f2 = epyccel( f1 , language = language)
x1 = np.array( [1,2,3], dtype=np.int32 )
x2 = np.copy(x1)
a = randint(low = -1e9, high = 1e9, dtype = np.int32)
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int32_1d_scalar_div(language):
f1 = arrays.array_int32_1d_scalar_div
f2 = epyccel( f1 , language = language)
x1 = np.array( [1,2,3], dtype=np.int32 )
x2 = np.copy(x1)
a = randint(low = 1, high = 1e9, dtype = np.int32)
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int32_1d_scalar_idiv(language):
f1 = arrays.array_int32_1d_scalar_idiv
f2 = epyccel( f1 , language = language)
x1 = np.array( [1,2,3], dtype=np.int32 )
x2 = np.copy(x1)
a = randint(low = 1, high = 1e9, dtype = np.int32)
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int32_1d_add(language):
f1 = arrays.array_int32_1d_add
f2 = epyccel( f1 , language = language)
x1 = np.array( [1,2,3], dtype=np.int32 )
x2 = np.copy(x1)
a = np.array( [1,2,3], dtype=np.int32 )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int32_1d_sub(language):
f1 = arrays.array_int32_1d_sub
f2 = epyccel( f1 , language = language)
x1 = np.array( [1,2,3], dtype=np.int32 )
x2 = np.copy(x1)
a = np.array( [1,2,3], dtype=np.int32 )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int32_1d_mul(language):
f1 = arrays.array_int32_1d_mul
f2 = epyccel( f1 , language = language)
x1 = np.array( [1,2,3], dtype=np.int32 )
x2 = np.copy(x1)
a = np.array( [1,2,3], dtype=np.int32 )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int32_1d_idiv(language):
f1 = arrays.array_int32_1d_idiv
f2 = epyccel( f1 , language = language)
x1 = np.array( [1,2,3], dtype=np.int32 )
x2 = np.copy(x1)
a = np.array( [1,2,3], dtype=np.int32 )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int32_1d_add_augassign(language):
f1 = arrays.array_int32_1d_add_augassign
f2 = epyccel( f1 , language = language)
x1 = np.array( [1,2,3], dtype=np.int32 )
x2 = np.copy(x1)
a = np.array( [1,2,3], dtype=np.int32 )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int32_1d_sub_augassign(language):
f1 = arrays.array_int32_1d_sub_augassign
f2 = epyccel( f1 , language = language)
x1 = np.array( [1,2,3], dtype=np.int32 )
x2 = np.copy(x1)
a = np.array( [1,2,3], dtype=np.int32 )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
@pytest.mark.parametrize( 'language', [
pytest.param("c", marks = [
pytest.mark.skip(reason="Numpy sum not yet implemented for C language"),
pytest.mark.c]),
pytest.param("fortran", marks = pytest.mark.fortran)
]
)
def test_array_int_1d_initialization_1(language):
f1 = arrays.array_int_1d_initialization_1
f2 = epyccel( f1 , language = language)
assert np.array_equal(f1(), f2())
@pytest.mark.parametrize( 'language', [
pytest.param("c", marks = [
pytest.mark.skip(reason="Numpy sum not yet implemented for C language"),
pytest.mark.c]),
pytest.param("fortran", marks = pytest.mark.fortran)
]
)
def test_array_int_1d_initialization_2(language):
f1 = arrays.array_int_1d_initialization_2
f2 = epyccel( f1 , language = language)
assert np.array_equal(f1(), f2())
@pytest.mark.parametrize( 'language', [
pytest.param("c", marks = [
pytest.mark.skip(reason="Numpy sum not yet implemented for C language"),
pytest.mark.c]),
pytest.param("fortran", marks = pytest.mark.fortran)
]
)
def test_array_int_1d_initialization_3(language):
f1 = arrays.array_int_1d_initialization_3
f2 = epyccel( f1 , language = language)
assert np.array_equal(f1(), f2())
#==============================================================================
# TEST: 2D ARRAYS OF INT-32 WITH C ORDERING
#==============================================================================
def test_array_int32_2d_C_scalar_add(language):
f1 = arrays.array_int32_2d_C_scalar_add
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]], dtype=np.int32 )
x2 = np.copy(x1)
a = randint(low = -1e9, high = 1e9, dtype = np.int32)
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int32_2d_C_scalar_sub(language):
f1 = arrays.array_int32_2d_C_scalar_sub
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]], dtype=np.int32 )
x2 = np.copy(x1)
a = randint(low = -1e9, high = 1e9, dtype = np.int32)
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int32_2d_C_scalar_mul(language):
f1 = arrays.array_int32_2d_C_scalar_mul
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]], dtype=np.int32 )
x2 = np.copy(x1)
a = randint(low = -1e9, high = 1e9, dtype = np.int32)
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int32_2d_C_scalar_idiv(language):
f1 = arrays.array_int32_2d_C_scalar_idiv
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]], dtype=np.int32 )
x2 = np.copy(x1)
a = randint(low = 1, high = 1e9, dtype = np.int32)
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int32_2d_C_add(language):
f1 = arrays.array_int32_2d_C_add
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]], dtype=np.int32 )
x2 = np.copy(x1)
a = np.array( [[-1,-2,-3], [-4,-5,-6]], dtype=np.int32 )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int32_2d_C_sub(language):
f1 = arrays.array_int32_2d_C_sub
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]], dtype=np.int32 )
x2 = np.copy(x1)
a = np.array( [[-1,-2,-3], [-4,-5,-6]], dtype=np.int32 )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int32_2d_C_mul(language):
f1 = arrays.array_int32_2d_C_mul
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]], dtype=np.int32 )
x2 = np.copy(x1)
a = np.array( [[-1,-2,-3], [-4,-5,-6]], dtype=np.int32 )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int32_2d_C_idiv(language):
f1 = arrays.array_int32_2d_C_idiv
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]], dtype=np.int32 )
x2 = np.copy(x1)
a = np.array( [[-1,-2,-3], [-4,-5,-6]], dtype=np.int32 )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
#==============================================================================
# TEST: 2D ARRAYS OF INT-32 WITH F ORDERING
#==============================================================================
def test_array_int32_2d_F_scalar_add(language):
f1 = arrays.array_int32_2d_F_scalar_add
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]], dtype=np.int32, order='F' )
x2 = np.copy(x1)
a = randint(low = -1e9, high = 1e9, dtype = np.int32)
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int32_2d_F_scalar_sub(language):
f1 = arrays.array_int32_2d_F_scalar_sub
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]], dtype=np.int32, order='F' )
x2 = np.copy(x1)
a = randint(low = -1e9, high = 1e9, dtype = np.int32)
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int32_2d_F_scalar_mul(language):
f1 = arrays.array_int32_2d_F_scalar_mul
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]], dtype=np.int32, order='F' )
x2 = np.copy(x1)
a = randint(low = -1e9, high = 1e9, dtype = np.int32)
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int32_2d_F_scalar_idiv(language):
f1 = arrays.array_int32_2d_F_scalar_idiv
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]], dtype=np.int32, order='F' )
x2 = np.copy(x1)
a = randint(low = 1, high = 1e9, dtype = np.int32)
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int32_2d_F_add(language):
f1 = arrays.array_int32_2d_F_add
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]], dtype=np.int32, order='F' )
x2 = np.copy(x1)
a = np.array( [[-1,-2,-3], [-4,-5,-6]], dtype=np.int32, order='F' )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int32_2d_F_sub(language):
f1 = arrays.array_int32_2d_F_sub
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]], dtype=np.int32, order='F' )
x2 = np.copy(x1)
a = np.array( [[-1,-2,-3], [-4,-5,-6]], dtype=np.int32, order='F' )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int32_2d_F_mul(language):
f1 = arrays.array_int32_2d_F_mul
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]], dtype=np.int32, order='F' )
x2 = np.copy(x1)
a = np.array( [[-1,-2,-3], [-4,-5,-6]], dtype=np.int32, order='F' )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int32_2d_F_idiv(language):
f1 = arrays.array_int32_2d_F_idiv
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]], dtype=np.int32, order='F' )
x2 = np.copy(x1)
a = np.array( [[-1,-2,-3], [-4,-5,-6]], dtype=np.int32, order='F' )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
#==============================================================================
# TEST: 1D ARRAYS OF INT-64
#==============================================================================
def test_array_int_1d_scalar_add(language):
f1 = arrays.array_int_1d_scalar_add
f2 = epyccel( f1 , language = language)
x1 = np.array( [1,2,3] )
x2 = np.copy(x1)
a = 5
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int_1d_scalar_sub(language):
f1 = arrays.array_int_1d_scalar_sub
f2 = epyccel( f1 , language = language)
x1 = np.array( [1,2,3] )
x2 = np.copy(x1)
a = 5
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int_1d_scalar_mul(language):
f1 = arrays.array_int_1d_scalar_mul
f2 = epyccel( f1 , language = language)
x1 = np.array( [1,2,3] )
x2 = np.copy(x1)
a = 5
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int_1d_scalar_idiv(language):
f1 = arrays.array_int_1d_scalar_idiv
f2 = epyccel( f1 , language = language)
x1 = np.array( [1,2,3] )
x2 = np.copy(x1)
a = 5
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int_1d_add(language):
f1 = arrays.array_int_1d_add
f2 = epyccel( f1 , language = language)
x1 = np.array( [1,2,3] )
x2 = np.copy(x1)
a = np.array( [1,2,3] )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int_1d_sub(language):
f1 = arrays.array_int_1d_sub
f2 = epyccel( f1 , language = language)
x1 = np.array( [1,2,3] )
x2 = np.copy(x1)
a = np.array( [1,2,3] )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int_1d_mul(language):
f1 = arrays.array_int_1d_mul
f2 = epyccel( f1 , language = language)
x1 = np.array( [1,2,3] )
x2 = np.copy(x1)
a = np.array( [1,2,3] )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int_1d_idiv(language):
f1 = arrays.array_int_1d_idiv
f2 = epyccel( f1 , language = language)
x1 = np.array( [1,2,3] )
x2 = np.copy(x1)
a = np.array( [1,2,3] )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
#==============================================================================
# TEST: 2D ARRAYS OF INT-64 WITH C ORDERING
#==============================================================================
def test_array_int_2d_C_scalar_add(language):
f1 = arrays.array_int_2d_C_scalar_add
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]] )
x2 = np.copy(x1)
a = 5
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int_2d_C_scalar_sub(language):
f1 = arrays.array_int_2d_C_scalar_sub
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]] )
x2 = np.copy(x1)
a = 5
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int_2d_C_scalar_mul(language):
f1 = arrays.array_int_2d_C_scalar_mul
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]] )
x2 = np.copy(x1)
a = 5
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int_2d_C_scalar_idiv(language):
f1 = arrays.array_int_2d_C_scalar_idiv
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]] )
x2 = np.copy(x1)
a = 5
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int_2d_C_add(language):
f1 = arrays.array_int_2d_C_add
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]] )
x2 = np.copy(x1)
a = np.array( [[-1,-2,-3], [-4,-5,-6]] )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int_2d_C_sub(language):
f1 = arrays.array_int_2d_C_sub
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]] )
x2 = np.copy(x1)
a = np.array( [[-1,-2,-3], [-4,-5,-6]] )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int_2d_C_mul(language):
f1 = arrays.array_int_2d_C_mul
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]] )
x2 = np.copy(x1)
a = np.array( [[-1,-2,-3], [-4,-5,-6]] )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int_2d_C_idiv(language):
f1 = arrays.array_int_2d_C_idiv
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]] )
x2 = np.copy(x1)
a = np.array( [[-1,-2,-3], [-4,-5,-6]] )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int_2d_C_initialization(language):
f1 = arrays.array_int_2d_C_initialization
f2 = epyccel(f1, language = language)
x1 = np.zeros((2, 3), dtype=int)
x2 = np.ones_like(x1)
f1(x1)
f2(x2)
assert np.array_equal(x1, x2)
#==============================================================================
# TEST: 2D ARRAYS OF INT-64 WITH F ORDERING
#==============================================================================
def test_array_int_2d_F_scalar_add(language):
f1 = arrays.array_int_2d_F_scalar_add
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]], order='F' )
x2 = np.copy(x1)
a = 5
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int_2d_F_scalar_sub(language):
f1 = arrays.array_int_2d_F_scalar_sub
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]], order='F' )
x2 = np.copy(x1)
a = 5
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int_2d_F_scalar_mul(language):
f1 = arrays.array_int_2d_F_scalar_mul
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]], order='F' )
x2 = np.copy(x1)
a = 5
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int_2d_F_scalar_idiv(language):
f1 = arrays.array_int_2d_F_scalar_idiv
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]], order='F' )
x2 = np.copy(x1)
a = 5
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int_2d_F_add(language):
f1 = arrays.array_int_2d_F_add
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]], order='F' )
x2 = np.copy(x1)
a = np.array( [[-1,-2,-3], [-4,-5,-6]], order='F' )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int_2d_F_sub(language):
f1 = arrays.array_int_2d_F_sub
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]], order='F' )
x2 = | np.copy(x1) | numpy.copy |
import numpy as np
import scipy.integrate as integ
import sympy as sym
import torch
import torch.autograd as auto
import deepymod_torch.VE_params as VE_params
######## MAJOR FUNCTIONALITY ########
# Data generation using Boltzmann superposition integrals.
def calculate_strain_stress(input_type, time_array, input_lambda, d_input_lambda, E_mods, viscs):
'''
Main function for generating accurate viscoelastic response to provided manipulation for given mechanical model.
Uses the principle of Boltzmann superposition and as such is only valid for linear viscoelasticity.
In addition, only GMMs can be used in this framework to calculate responses to strain manipulations and...
... only GKMs can be used to calculate responses to stress manipulations.
As such, the model parameters provided will be interpreted as defining a GMM if the specified input_type is 'Strain' and...
... the model parameters provided will be interpreted as defining a GKM if the specified input_type is 'Stress'.
Solutions are obtained using numerical integration from the SciPy package.
Parameters
input_type: string
Must be 'Strain' or 'Stress'. Defines the manipulation type and mechanical model.
time_array: Nx1 array
Time series previously defined.
More time points does not equal greater accuracy but does equal greater computation time.
input_lambda: function
Returns the result of an analytical definition for the manipulation for a given time point.
d_input_lambda: function
Returns the result of the first derivative of the expression used to define the manipulation profile for any time point.
E_mods: list
The elastic moduli partially defining the mechanical model being manipulated.
All but the first value are paired with a corresponding viscosity.
viscs: list
The viscosities partially defining the mechanical model being manipulated.
Always one element shorter than E_mods.
Returns
response_array: array of same shape as time_array
'''
# Relaxation and creep functions occupy identical positions in mathematics. Whichever is needed depending on input_type...
# ... is created as a lambda function with input time, and explicit use of model parameters.
relax_creep_lambda = relax_creep(E_mods, viscs, input_type)
start_time_point = time_array[0]
integrand_lambda = lambda x, t: relax_creep_lambda(t-x)*d_input_lambda(x) # x is t', or dummy variable of integration.
integral_lambda = lambda t: integ.quad(integrand_lambda, start_time_point, t, args=(t))[0] # integral to perform at each time point.
response_array = np.array([])
input_array = np.array([])
for time_point in time_array:
# Term outside integral, corrects for discontinuity between assumed zero manipulation history and beginning of here defined manipulation.
first_term = input_lambda(start_time_point)*relax_creep_lambda(time_point-start_time_point)
# Integral term. Response to here defined manipulation.
second_term = integral_lambda(time_point)
response_array = np.append(response_array, first_term + second_term)
response_array = response_array.reshape(time_array.shape)
return response_array
def relax_creep(E_mods, viscs, input_type):
'''
Incorporates mechanical model definition and manipulation type into function for kernal within Boltzmann superposition integral.
Function returned is either that called the relaxation function (input_type='Strain') or the creep function (input_type='Stress'), the result being used analagously.
If the input_type is 'Strain' then the parameters are assumed to refer to a Maxwell model, whereas
if the input_type is 'Stress' then the parameters are assumed to refer to a Kelvin model.
Parameters
E_mods: list
The elastic moduli partially defining the mechanical model being manipulated.
All but the first value are paired with a corresponding viscosity.
viscs: list
The viscosities partially defining the mechanical model being manipulated.
Always one element shorter than E_mods.
input_type: string
Must be 'Strain' or 'Stress'. Defines the manipulation type and mechanical model.
Returns
relax_creep_lambda: lambda function
With single parameter of time.
'''
# Converted to arrays for easy computation of relevant tau (characteristic times) values
E_mods_1plus_array = | np.array(E_mods[1:]) | numpy.array |
import functools
from collections import OrderedDict
from itertools import product
import numpy as np
import pandas as pd
from estimagic import batch_evaluators
from estimagic.config import DEFAULT_N_CORES
from estimagic.differentiation import finite_differences
from estimagic.differentiation.generate_steps import generate_steps
from estimagic.differentiation.richardson_extrapolation import richardson_extrapolation
from estimagic.optimization.utilities import namedtuple_from_kwargs
def first_derivative(
func,
params,
func_kwargs=None,
method="central",
n_steps=1,
base_steps=None,
scaling_factor=1,
lower_bounds=None,
upper_bounds=None,
step_ratio=2,
min_steps=None,
f0=None,
n_cores=DEFAULT_N_CORES,
error_handling="continue",
batch_evaluator="joblib",
return_func_value=False,
key=None,
):
"""Evaluate first derivative of func at params according to method and step options.
Internally, the function is converted such that it maps from a 1d array to a 1d
array. Then the Jacobian of that function is calculated. The resulting derivative
estimate is always a :class:`numpy.ndarray`.
The parameters and the function output can be pandas objects (Series or DataFrames
with value column). In that case the output of first_derivative is also a pandas
object and with appropriate index and columns.
Detailed description of all options that influence the step size as well as an
explanation of how steps are adjusted to bounds in case of a conflict,
see :func:`~estimagic.differentiation.generate_steps.generate_steps`.
Args:
func (callable): Function of which the derivative is calculated.
params (numpy.ndarray, pandas.Series or pandas.DataFrame): 1d numpy array or
:class:`pandas.DataFrame` with parameters at which the derivative is
calculated. If it is a DataFrame, it can contain the columns "lower_bound"
and "upper_bound" for bounds. See :ref:`params`.
func_kwargs (dict): Additional keyword arguments for func, optional.
method (str): One of ["central", "forward", "backward"], default "central".
n_steps (int): Number of steps needed. For central methods, this is
the number of steps per direction. It is 1 if no Richardson extrapolation
is used.
base_steps (numpy.ndarray, optional): 1d array of the same length as pasams.
base_steps * scaling_factor is the absolute value of the first (and possibly
only) step used in the finite differences approximation of the derivative.
If base_steps * scaling_factor conflicts with bounds, the actual steps will
be adjusted. If base_steps is not provided, it will be determined according
to a rule of thumb as long as this does not conflict with min_steps.
scaling_factor (numpy.ndarray or float): Scaling factor which is applied to
base_steps. If it is an numpy.ndarray, it needs to be as long as params.
scaling_factor is useful if you want to increase or decrease the base_step
relative to the rule-of-thumb or user provided base_step, for example to
benchmark the effect of the step size. Default 1.
lower_bounds (numpy.ndarray): 1d array with lower bounds for each parameter. If
params is a DataFrame and has the columns "lower_bound", this will be taken
as lower_bounds if now lower_bounds have been provided explicitly.
upper_bounds (numpy.ndarray): 1d array with upper bounds for each parameter. If
params is a DataFrame and has the columns "upper_bound", this will be taken
as upper_bounds if no upper_bounds have been provided explicitly.
step_ratio (float, numpy.array): Ratio between two consecutive Richardson
extrapolation steps in the same direction. default 2.0. Has to be larger
than one. The step ratio is only used if n_steps > 1.
min_steps (numpy.ndarray): Minimal possible step sizes that can be chosen to
accommodate bounds. Must have same length as params. By default min_steps is
equal to base_steps, i.e step size is not decreased beyond what is optimal
according to the rule of thumb.
f0 (numpy.ndarray): 1d numpy array with func(x), optional.
n_cores (int): Number of processes used to parallelize the function
evaluations. Default 1.
error_handling (str): One of "continue" (catch errors and continue to calculate
derivative estimates. In this case, some derivative estimates can be
missing but no errors are raised), "raise" (catch errors and continue
to calculate derivative estimates at fist but raise an error if all
evaluations for one parameter failed) and "raise_strict" (raise an error
as soon as a function evaluation fails).
batch_evaluator (str or callable): Name of a pre-implemented batch evaluator
(currently 'joblib' and 'pathos_mp') or Callable with the same interface
as the estimagic batch_evaluators.
return_func_value (bool): If True, return a tuple with the derivative and the
function value at params. Default False. This is useful when using
first_derivative during optimization.
key (str): If func returns a dictionary, take the derivative of
func(params)[key].
Returns:
derivative (numpy.ndarray, pandas.Series or pandas.DataFrame): The estimated
first derivative of func at params. The shape of the output depends on the
dimension of params and func(params):
- f: R -> R leads to shape (1,), usually called derivative
- f: R^m -> R leads to shape (m, ), usually called Gradient
- f: R -> R^n leads to shape (n, 1), usually called Jacobian
- f: R^m -> R^n leads to shape (n, m), usually called Jacobian
float, dict, numpy.ndarray or pandas.Series: The function value at params, only
returned if return_func_value is True.
"""
lower_bounds, upper_bounds = _process_bounds(lower_bounds, upper_bounds, params)
# handle keyword arguments
func_kwargs = {} if func_kwargs is None else func_kwargs
partialed_func = functools.partial(func, **func_kwargs)
# convert params to numpy, but keep label information
params_index = (
params.index if isinstance(params, (pd.DataFrame, pd.Series)) else None
)
x = params["value"].to_numpy() if isinstance(params, pd.DataFrame) else params
x = np.atleast_1d(x).astype(float)
if np.isnan(x).any():
raise ValueError("The parameter vector must not contain NaNs.")
# generate the step array
steps = generate_steps(
x=x,
method=method,
n_steps=n_steps,
target="first_derivative",
base_steps=base_steps,
scaling_factor=scaling_factor,
lower_bounds=lower_bounds,
upper_bounds=upper_bounds,
step_ratio=step_ratio,
min_steps=min_steps,
)
# generate parameter vectors at which func has to be evaluated as numpy arrays
evaluation_points = []
for step_arr in steps:
for i, j in product(range(n_steps), range(len(x))):
if np.isnan(step_arr[i, j]):
evaluation_points.append(np.nan)
else:
point = x.copy()
point[j] += step_arr[i, j]
evaluation_points.append(point)
# convert the numpy arrays to whatever is needed by func
evaluation_points = _convert_evaluation_points_to_original(
evaluation_points, params
)
# we always evaluate f0, so we can fall back to one-sided derivatives if
# two-sided derivatives fail. The extra cost is negligible in most cases.
if f0 is None:
evaluation_points.append(params)
# do the function evaluations, including error handling
batch_error_handling = "raise" if error_handling == "raise_strict" else "continue"
raw_evals = _nan_skipping_batch_evaluator(
func=partialed_func,
arguments=evaluation_points,
n_cores=n_cores,
error_handling=batch_error_handling,
batch_evaluator=batch_evaluator,
)
# extract information on exceptions that occurred during function evaluations
exc_info = "\n\n".join([val for val in raw_evals if isinstance(val, str)])
raw_evals = [val if not isinstance(val, str) else np.nan for val in raw_evals]
# store full function value at params as func_value and a processed version of it
# that we need to calculate derivatives as f0
if f0 is None:
f0 = raw_evals[-1]
raw_evals = raw_evals[:-1]
func_value = f0
f0 = f0[key] if isinstance(f0, dict) else f0
f_was_scalar = np.isscalar(f0)
out_index = f0.index if isinstance(f0, pd.Series) else None
f0 = np.atleast_1d(f0)
# convert the raw evaluations to numpy arrays
raw_evals = _convert_evals_to_numpy(raw_evals, key)
# apply finite difference formulae
evals = np.array(raw_evals).reshape(2, n_steps, len(x), -1)
evals = np.transpose(evals, axes=(0, 1, 3, 2))
evals = namedtuple_from_kwargs(pos=evals[0], neg=evals[1])
jac_candidates = {}
for m in ["forward", "backward", "central"]:
jac_candidates[m] = finite_differences.jacobian(evals, steps, f0, m)
# get the best derivative estimate out of all derivative estimates that could be
# calculated, given the function evaluations.
orders = {
"central": ["central", "forward", "backward"],
"forward": ["forward", "backward"],
"backward": ["backward", "forward"],
}
if n_steps == 1:
jac = _consolidate_one_step_derivatives(jac_candidates, orders[method])
else:
richardson_candidates = _compute_richardson_candidates(
jac_candidates, steps, n_steps
)
jac = _consolidate_extrapolated(richardson_candidates)
# raise error if necessary
if error_handling in ("raise", "raise_strict") and np.isnan(jac).any():
raise Exception(exc_info)
# results processing
derivative = jac.flatten() if f_was_scalar else jac
derivative = _add_index_to_derivative(derivative, params_index, out_index)
res = (derivative, func_value) if return_func_value else derivative
return res
def _process_bounds(lower_bounds, upper_bounds, params):
lower_bounds = np.atleast_1d(lower_bounds) if lower_bounds is not None else None
upper_bounds = np.atleast_1d(upper_bounds) if upper_bounds is not None else None
if isinstance(params, pd.DataFrame):
if lower_bounds is None and "lower_bound" in params.columns:
lower_bounds = params["lower_bound"].to_numpy()
if upper_bounds is None and "upper_bound" in params.columns:
upper_bounds = params["upper_bound"].to_numpy()
return lower_bounds, upper_bounds
def _convert_evaluation_points_to_original(evaluation_points, params):
if np.isscalar(params):
res = [p[0] if isinstance(p, np.ndarray) else p for p in evaluation_points]
elif isinstance(params, pd.DataFrame):
res = []
for point in evaluation_points:
if isinstance(point, np.ndarray):
pandas_point = params.copy(deep=True)
pandas_point["value"] = point
res.append(pandas_point)
else:
res.append(point)
elif isinstance(params, pd.Series):
res = [
pd.Series(p, index=params.index) if isinstance(p, np.ndarray) else p
for p in evaluation_points
]
else:
res = evaluation_points
return res
def _convert_evals_to_numpy(raw_evals, key):
"""harmonize the output of the function evaluations.
The raw_evals might contain dictionaries of which we only need one entry, scalar
np.nan where we need arrays filled with np.nan or pandas objects. The processed
evals only contain numpy arrays.
"""
# get rid of dictionaries
evals = [val[key] if isinstance(val, dict) else val for val in raw_evals]
# get rid of pandas objects
evals = [np.array(val) if isinstance(val, pd.Series) else val for val in evals]
# find out the correct output shape
try:
array = next(x for x in evals if hasattr(x, "shape") or isinstance(x, dict))
out_shape = array.shape
except StopIteration:
out_shape = "scalar"
# convert to correct output shape
if out_shape == "scalar":
evals = [np.atleast_1d(val) for val in evals]
else:
for i in range(len(evals)):
if isinstance(evals[i], float) and np.isnan(evals[i]):
evals[i] = | np.full(out_shape, np.nan) | numpy.full |
import tensorflow as tf
import tensorflow.contrib.slim as slim
import numpy as np
import sys
import random
# from contact_point_dataset_torch_multi_label import MyDataset
from simple_dataset import MyDataset
import os
import time
import argparse
from functools import partial
import time
from torch.utils.data import DataLoader
import torch
# from torch.utils.tensorboard import SummaryWriter
from tensorboardX import SummaryWriter
import datetime
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
UTILS_DIR = os.path.abspath(os.path.join(BASE_DIR, '..', 'utils'))
sys.path.append(UTILS_DIR)
from data_helper import *
from coord_helper import *
from train_helper import *
from bullet_helper import *
from rotation_lib import *
import s1_model_multi_label as s1_model
import s3_classifier_model as s3_model
from s2_utils import *
import s3_replay_buffer_pose as ReplayBuffer
import ES_multithread
import multiprocessing
from scipy.special import softmax
def restore_model_s1(epoch, save_top_dir, sess):
ckpt_path = os.path.join(save_top_dir,str(epoch)+'model.ckpt')
variables = slim.get_variables_to_restore()
variables_to_restore = [v for v in variables if 's2a_' not in v.name and ('s1_' in v.name)]
# for v in variables_to_restore:
# print(v.name)
saver = tf.train.Saver(variables_to_restore)
print("restoring from %s" % ckpt_path)
saver.restore(sess, ckpt_path)
def calc_pose_cem_init(args, pc_o_all, pc_h_all, transl_s1_all, aa_s1_all):
b_size = transl_s1_all.shape[0]
cem_init_transl = np.zeros((b_size, 3))
cem_init_aa = np.zeros((b_size, 3))
cem_rotation_center_o = np.zeros((b_size, 3))
#pc_tmp_o_all = np.zeros((b_size, args.s3_num_cp, 3))
#pc_tmp_h_all = np.zeros((b_size, args.s3_num_cp, 3))
#pc_tmp_rotated_s1_o_all = np.zeros((b_size, args.s3_num_cp, 3))
for bi in range(b_size):
transl_s1 = transl_s1_all[bi]
aa_s1 = aa_s1_all[bi]
cem_init_transl[bi] = transl_s1
cem_init_aa[bi] = aa_s1
#cem_rotation_center_o[bi] = np.mean(pc_o_all[bi], axis=0)
# debug info
#pc_tmp_o_all[bi] = pc_tmp_o
#pc_tmp_rotated_s1_o_all[bi] = pc_tmp_rotated_s1_o
#pc_tmp_h_all[bi] = pc_tmp_h
info_dict = {
#'s3_partial_pc_o': np.copy(pc_tmp_o_all),
#'s3_partial_pc_rotated_s1_o': np.copy(pc_tmp_rotated_s1_o_all),
#'s3_partial_pc_h': np.copy(pc_tmp_h_all),
'cem_rotation_center_o': np.copy(cem_rotation_center_o),
}
return cem_init_transl, cem_init_aa, cem_rotation_center_o, info_dict
from s3_rl_collect import cem_transform_pc_batch, cem_eval, bullet_check
import s3_bullet_checker as bullet_checker
import s3_bullet_checker_eval as bullet_checker_eval
def train(args, train_set, train_loader, test_set, test_loader, writer, result_folder, file_name, extra_dict=None):
model_folder = os.path.join(result_folder, 'models')
can_write = not (writer is None)
# stage 1
pc_o_pl, pc_h_pl, z_pl, gt_transl_pl, gt_aa_pl, pose_mult_pl = s1_model.placeholder_inputs(args.batch_size, 4096, args)
pred_transl_tf, pred_aa_tf, end_points_s1 = s1_model.get_model(pc_o_pl, pc_h_pl, z_pl)
loss_transl_tf, loss_aa_tf, min_pose_idx_tf = s1_model.get_loss(pred_transl_tf, pred_aa_tf, gt_transl_pl, gt_aa_pl, pose_mult_pl, float(args.loss_transl_const), end_points_s1)
loss_s1_tf = float(args.loss_transl_const) * loss_transl_tf + loss_aa_tf
#stage 3
pc_combined_pl, gt_succ_label_pl = s3_model.placeholder_inputs(args.batch_size, 4096, with_normal=False, args=args)
pred_succ_cla_score_tf, end_points = s3_model.get_model(pc_combined_pl)
pred_succ_cla_tf = tf.math.argmax(pred_succ_cla_score_tf, axis=-1)
# loss_succ_cla_tf = s3_model.get_loss(pred_succ_cla_score_tf, gt_succ_label_pl, end_points)
# loss_tf = loss_listnet_tf + loss_ce_tf
# loss_tf = tf.boolean_mask(loss_tf, non_nan_mask_pl)
# loss_tf = tf.reduce_mean(loss_tf)
# print('loss tf', loss_tf)
# train_op = tf.train.AdamOptimizer(learning_rate=args.learning_rate).minimize(loss_tf)
init_op = tf.group(tf.global_variables_initializer(),
tf.local_variables_initializer())
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
sess.run(init_op)
saver = tf.train.Saver(max_to_keep=1000)
loss_tracker = LossTracker()
loss_tracker_test = LossTracker()
epoch_init = 0
if not train_loader is None:
epoch_iter = len(train_loader)
if args.pretrain_s1:
pretrain_s1_folder = os.path.abspath(os.path.join(result_folder, '..', '..', args.pretrain_s1_folder, args.pretrain_s1_model_name, 'models'))
restore_model_s1(args.pretrain_s1_epoch, pretrain_s1_folder, sess)
if args.pretrain_s3:
pretrain_s3_folder = os.path.abspath(os.path.join(result_folder, '..', '..', args.pretrain_s3_folder, args.pretrain_s3_model_name, 'models'))
if args.pretrain_s3_folder_dir != '':
pretrain_s3_folder = args.pretrain_s3_folder_dir
restore_model_s3(args.pretrain_s3_epoch, pretrain_s3_folder, sess)
else:
print(args.s3_model_dir)
tmp = restore_model_s3_second_last(args.s3_model_dir, sess)
assert tmp # make sure that one model is restored
if args.restore_model_epoch != -1:
epoch_init = args.restore_model_epoch
restore_model_folder = os.path.abspath(os.path.join(result_folder, '..', args.restore_model_name, 'models'))
restore_model_generic(epoch_init, restore_model_folder, saver, sess)
total_ct = 0
cem = ES_multithread.Searcher(
action_dim=6,
max_action=float(args.cem_max_transl),
max_action_aa=float(args.cem_max_aa),
sigma_init=float(args.cem_sigma_init_transl),
sigma_init_aa=float(args.cem_sigma_init_aa),
pop_size=args.cem_pop_size,
damp=float(args.cem_damp_transl),
damp_limit=float(args.cem_damp_limit_transl),
damp_aa=float(args.cem_damp_aa),
damp_limit_aa=float(args.cem_damp_limit_aa),
parents=args.cem_parents,
)
fcl_hook_dict = extra_dict['fcl_hook_dict']
fcl_object_dict = extra_dict['fcl_object_dict']
if not args.no_bullet_check:
pool = multiprocessing.Pool(processes=args.batch_size)
p_list = pool.map(partial(p_init_multithread, gui=args.bullet_gui), range(args.batch_size))
if not args.run_test:
saved_buffer_name_list = [get_2nd_last_dir(args.s3_buffer_dir)]
replay_buffer = ReplayBuffer.combine_replay_buffer(saved_buffer_name_list, train_set, args.home_dir_data, preload_data=False)
eval_folder_dir = os.path.join(result_folder, 'eval')
mkdir_if_not(eval_folder_dir)
# for epoch_i in range(args.max_epochs):
epoch_i = 0
tested = False
while True:
if epoch_i == args.max_epochs:
break
loss_tracker.reset()
run_test = False
if (not args.no_eval) and (((epoch_i + 1) % args.eval_epoch_freq == 0) or args.run_test):
if not tested:
run_test = True
epoch_i -= 1
tested = True
if tested:
tested = False
if args.run_test:
epoch_i = 0
if not run_test:
loader = train_loader
dataset = train_set
else:
loader = test_loader
dataset = test_set
info_dict_all = {}
for i, batch_dict in enumerate(loader):
total_ct += 1
if not args.no_bullet_check:
if (total_ct + 1) % 20 == 0:
print('reset')
p_list = pool.map(partial(p_reset_multithread, p_list=p_list, gui=args.bullet_gui), range(args.batch_size))
log_it = ((total_ct % args.log_freq ) == 0) and can_write
pc_o = batch_dict['input1']
pc_h = batch_dict['input2']
b_size = pc_o.shape[0]
object_urdf = batch_dict['urdf_o']
hook_urdf = batch_dict['urdf_h']
result_file_name = batch_dict['result_file_name']
fcl_hook_model = [fcl_hook_dict[name] for name in batch_dict['hook_name']]
fcl_object_model = [fcl_object_dict[name] for name in batch_dict['object_name']]
if run_test:
eval_sample_n = args.eval_sample_n
else:
eval_sample_n = 1
info_dict = {}
bullet_succ = np.zeros((b_size, eval_sample_n))
for i_eval in range(args.eval_sample_n):
z = np.random.normal(size=(pc_o.shape[0],1,32))
feed_dict_s1 = {
pc_o_pl: pc_o[:, :, :3],
pc_h_pl: pc_h[:, :, :3],
z_pl: z,
}
pred_transl, pred_aa = sess.run([
pred_transl_tf, pred_aa_tf
], feed_dict=feed_dict_s1)
# stage 3
cem_init_transl, cem_init_aa, cem_rotation_center_o, cem_info_dict = calc_pose_cem_init(
args=args,
pc_o_all=pc_o[:, :, :3],
pc_h_all=pc_h[:, :, :3],
transl_s1_all=pred_transl,
aa_s1_all=pred_aa,
)
pc_o_cem_init = transform_pc_batch(pc_o[:, :, :3], cem_init_transl, cem_init_aa)
cem_eval_partial = partial(cem_eval,
pc_o=pc_o_cem_init,
pc_h=pc_h[:, :, :3],
rotation_center_o=cem_rotation_center_o,
sess_tf=sess,
pc_combined_pl=pc_combined_pl,
pred_succ_cla_score_tf=pred_succ_cla_score_tf,
)
cem_search_info_dict = {
'cem_elite_pose': np.zeros((b_size, args.cem_n_iter, 6)),
'cem_elite_pose_scores': np.zeros((b_size, args.cem_n_iter))
}
cem_max_score = np.zeros((b_size))
cem_out_transl = np.zeros((b_size, 3))
cem_out_aa = np.zeros((b_size, 3))
cem_out_pose = np.zeros((b_size, 6))
for ii in range(args.cem_run_n):
_, cem_out_pose_tmp, cem_score_tmp, cem_search_info_dict_tmp = cem.search(
b_size,
np.array([[0, 0, 0, 1e-6, 0, 0]] * b_size),
cem_eval_partial,
n_iter=args.cem_n_iter,
elitism=True,
visualize=False,
visualize_func=None,
)
for jj in range(b_size):
cur_score = cem_score_tmp[jj]
if cem_max_score[jj] < cur_score:
cem_max_score[jj] = cur_score
cem_out_transl[jj] = cem_out_pose_tmp[jj, :3]
cem_out_aa[jj] = cem_out_pose_tmp[jj, 3:]
cem_out_pose[jj] = cem_out_pose_tmp[jj]
cem_search_info_dict['cem_elite_pose'][jj] = cem_search_info_dict_tmp['cem_elite_pose'][jj]
cem_search_info_dict['cem_elite_pose_scores'][jj] = cem_search_info_dict_tmp['cem_elite_pose_scores'][jj]
# convert the cem output pose to object pose
pc_o_cem = cem_transform_pc_batch(pc_o_cem_init[:, :, :3], cem_rotation_center_o, cem_out_transl, cem_out_aa)
final_pred_transl, final_pred_aa = best_fit_transform_batch(pc_o[:, :, :3], pc_o_cem)
if not args.no_bullet_check:
bullet_check_one_pose = bullet_checker_eval.check_one_pose_simple if args.use_bullet_checker else bullet_checker.check_one_pose_simple
# bullet check
bullet_check_func = partial(
bullet_check,
bullet_check_one_pose=bullet_check_one_pose,
transl=final_pred_transl,
aa=final_pred_aa,
p_list=p_list,
result_file_name=result_file_name,
hook_urdf=hook_urdf,
object_urdf=object_urdf,
fcl_hook_model=fcl_hook_model,
fcl_object_model=fcl_object_model,
gui=args.bullet_gui,
)
for bi, (flag_tmp, bullet_final_transl, bullet_final_quat) in enumerate(pool.imap(bullet_check_func, range(b_size))):
succ = 1. if flag_tmp else 0.
bullet_succ[bi, i_eval] = succ
hook_name, object_name = split_result_file_name(result_file_name[bi])
if not run_test:
replay_buffer.add(object_name, hook_name, np.append(final_pred_transl[bi], final_pred_aa[bi]), succ)
for ii in range(b_size):
if i_eval == 0:
info_dict[result_file_name[ii]] = []
info_dict[result_file_name[ii]].append({
'z': z[ii].tolist(),
's1_transl': pred_transl[ii].tolist(),
's1_aa': pred_aa[ii].tolist(),
'cem_init_transl': cem_init_transl[ii].tolist(),
'cem_init_aa': cem_init_aa[ii].tolist(),
'succ': bullet_succ[ii, i_eval],
'final_pred_transl': final_pred_transl[ii].tolist(),
'final_pred_aa': final_pred_aa[ii].tolist(),
'cem_out_transl': cem_out_pose[ii, :3].tolist(),
'cem_out_aa': cem_out_pose[ii, 3:].tolist(),
# 'cem_elite_pose': cem_elite_pose[ii].tolist()
})
for tmp_key in cem_info_dict:
info_dict[result_file_name[ii]][-1][tmp_key] = cem_info_dict[tmp_key][ii].tolist()
for tmp_key in cem_search_info_dict:
info_dict[result_file_name[ii]][-1][tmp_key] = cem_search_info_dict[tmp_key][ii].tolist()
info_dict_all.update(info_dict)
loss_dict = {
'bullet_succ_acc': np.mean(bullet_succ),
'bullet_succ_acc_max': np.mean(np.max(bullet_succ, axis=-1))
}
loss_tracker.add_dict(loss_dict)
if log_it:
write_tb(loss_dict, writer, 'test' if run_test else 'train', total_ct)
print('epoch {} iter {}/{} {}'.format(epoch_i, i, epoch_iter, loss_dict_to_str(loss_dict)))
loss_dict_epoch = loss_tracker.stat()
print('cumulative, epoch {} {} {}'.format(epoch_i, 'test' if run_test else 'train', loss_dict_to_str(loss_dict_epoch)))
# periodically save buffer
if int(total_ct) % args.s3_buffer_freq == 0 and (not run_test):
print('save buffer', args.s3_buffer_dir, replay_buffer.buffer_succ.size, replay_buffer.buffer_fail.size, )
replay_buffer.save_pkl(args.s3_buffer_dir, replay_buffer)
# save info dict
info_dict_dir = os.path.join(eval_folder_dir, '{}_eval_epoch_{}_ct_{}_{}.json'.format(file_name, str(epoch_i + 1), int(total_ct), 'test' if run_test else 'train'))
save_json(info_dict_dir, info_dict_all)
# periodically load s3 model
if int(total_ct) % args.s3_model_freq == 0 and (not run_test):
restore_model_s3_second_last(args.s3_model_dir, sess)
if (total_ct % args.model_save_freq == 0) and not args.no_save and (not run_test):
save_model_generic(epoch_init + total_ct, model_folder, saver, sess)
if total_ct % 5 == 0 or total_ct == epoch_iter:
# save info dict
info_dict_dir = os.path.join(eval_folder_dir, '{}_eval_epoch_{}_{}.json'.format(file_name, str(epoch_i + 1), 'test' if run_test else 'train'))
save_json(info_dict_dir, info_dict_all)
loss_dict_epoch = loss_tracker.stat()
if can_write:
write_tb(loss_dict_epoch, writer, 'test_epoch' if run_test else 'train_epoch', total_ct)
print('epoch {} {} {}'.format(epoch_i, 'test' if run_test else 'train', loss_dict_to_str(loss_dict_epoch)))
epoch_i += 1
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--home_dir_data", default="../data")
parser.add_argument('--pointset_dir', default='/scr2/')
parser.add_argument('--bohg4', action='store_true')
parser.add_argument('--no_vis', action='store_true')
parser.add_argument('--model_name', default='s3_rl_collect')
parser.add_argument('--comment', default='')
parser.add_argument('--exp_name', default='exp_s3')
parser.add_argument('--debug', action='store_true')
parser.add_argument('--log_freq', type=int, default=2)
parser.add_argument('--train_list', default='train_list')
parser.add_argument('--test_list', default='test_list')
parser.add_argument('--restrict_object_cat', default='')
parser.add_argument('--run_test', action='store_true')
parser.add_argument('--no_save', action='store_true')
parser.add_argument('--overfit', action='store_true')
parser.add_argument('--restore_model_name', default='')
parser.add_argument('--restore_model_epoch', type=int, default=-1)
parser.add_argument('--max_epochs', type=int, default=10000)
parser.add_argument('--eval_epoch_freq', type=int, default=2)
parser.add_argument('--eval_sample_n', type=int, default=1)
parser.add_argument('--model_save_freq', type=int, default=3000)
parser.add_argument('--no_eval', action='store_true')
parser.add_argument('--loss_transl_const', default=1)
parser.add_argument('--data_one_pose', action='store_true')
parser.add_argument('--data_vary_scale', action='store_true')
parser.add_argument('--data_more_pose', action='store_true')
parser.add_argument('--data_vary_scale_more_pose', action='store_true')
parser.add_argument('--batch_size', type=int, default=10)
parser.add_argument('--learning_rate', type=float, default=1e-4)
#s1 argument
parser.add_argument('--z_dim', type=int, default=32)
#s2 argument
parser.add_argument('--top_k_o', type=int, default=128)
parser.add_argument('--top_k_h', type=int, default=128)
parser.add_argument('--n_gt_sample', type=int, default=128)
parser.add_argument('--top_k_corr', type=int, default=256)
parser.add_argument('--pose_loss_l2', action='store_true')
#s3 argument
parser.add_argument('--s3_num_cp', type=int, default=3)
parser.add_argument('--cem_run_n', type=int, default=1)
parser.add_argument('--cem_n_iter', type=int, default=10)
parser.add_argument('--cem_max_transl', default=0.02)
parser.add_argument('--cem_max_aa', default=0.5)
parser.add_argument('--cem_sigma_init_transl', default=1e-2)
parser.add_argument('--cem_sigma_init_aa', default=1e-1)
parser.add_argument('--cem_pop_size', type=int, default=32)
parser.add_argument('--cem_damp_transl', default=0.005)
parser.add_argument('--cem_damp_limit_transl', default=1e-2)
parser.add_argument('--cem_damp_aa', default=0.1)
parser.add_argument('--cem_damp_limit_aa', default=0.1)
parser.add_argument('--cem_parents', type=int, default=10)
parser.add_argument('--bullet_gui', action='store_true')
parser.add_argument('--s3_train_folder_dir', default='/juno/downloads/new_hang_training/')
parser.add_argument('--s3_train_name', default='s3')
parser.add_argument('--s3_device_name', default='bohg4')
parser.add_argument('--s3_buffer_dir', default='')
parser.add_argument('--s3_model_dir', default='')
parser.add_argument('--no_fcl', action='store_true')
parser.add_argument('--s3_buffer_freq', default=1000, type=int)
parser.add_argument('--s3_model_freq', default=1000, type=int)
parser.add_argument('--use_bullet_checker', action='store_true')
parser.add_argument('--pretrain_s1', action='store_true')
parser.add_argument('--pretrain_s1_folder', default='exp_s1')
parser.add_argument('--pretrain_s1_model_name', default='Feb17_23-51-13_s1_matching_model_loss_transl_const_1000')
parser.add_argument('--pretrain_s1_epoch', default=40500, type=int)
parser.add_argument('--pretrain_s3', action='store_true')
parser.add_argument('--pretrain_s3_folder', default='exp_s3')
parser.add_argument('--pretrain_s3_folder_dir', default='')
parser.add_argument('--pretrain_s3_model_name', default='Feb19_14-26-47_s3_classifier_model_new_data')
parser.add_argument('--pretrain_s3_epoch', default=750000, type=int)
parser.add_argument('--lin', action='store_true')
parser.add_argument('--no_bullet_check', action='store_true')
parser.add_argument('--seed',default=3323,type=int)
parser.add_argument('--parallel_n', default=-1, type=int)
parser.add_argument('--parallel_id', default=-1, type=int)
args = parser.parse_args()
args.data_more_pose = True
print("args.seed",args.seed)
random.seed(args.seed)
torch.manual_seed(args.seed)
| np.random.seed(args.seed) | numpy.random.seed |
from mlgames.board import Board
import numpy as np
BOARD_SIZE = 9
STARTING_WALLS = 10
def a(arr):
return | np.array(arr) | numpy.array |
import os
import tempfile
import numpy as np
import scipy.ndimage.measurements as meas
from functools import reduce
import warnings
import sys
sys.path.append(os.path.abspath(r'../lib'))
import NumCppPy as NumCpp # noqa E402
####################################################################################
def factors(n):
return set(reduce(list.__add__,
([i, n//i] for i in range(1, int(n**0.5) + 1) if n % i == 0)))
####################################################################################
def test_seed():
np.random.seed(1)
####################################################################################
def test_abs():
randValue = np.random.randint(-100, -1, [1, ]).astype(np.double).item()
assert NumCpp.absScaler(randValue) == np.abs(randValue)
components = np.random.randint(-100, -1, [2, ]).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.absScaler(value), 9) == np.round(np.abs(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.absArray(cArray), np.abs(data))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols]) + \
1j * np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.absArray(cArray), 9), np.round(np.abs(data), 9))
####################################################################################
def test_add():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(-100, 100, [shape.rows, shape.cols])
data2 = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.add(cArray1, cArray2), data1 + data2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(cArray, value), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(value, cArray), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.add(cArray1, cArray2), data1 + data2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(cArray, value), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(value, cArray), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArray(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
data2 = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.add(cArray1, cArray2), data1 + data2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols])
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.add(cArray1, cArray2), data1 + data2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(cArray, value), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(value, cArray), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(cArray, value), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(value, cArray), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.add(cArray1, cArray2), data1 + data2)
####################################################################################
def test_alen():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.alen(cArray) == shape.rows
####################################################################################
def test_all():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.all(cArray, NumCpp.Axis.NONE).astype(bool).item() == np.all(data).item()
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.all(cArray, NumCpp.Axis.NONE).astype(bool).item() == np.all(data).item()
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.all(cArray, NumCpp.Axis.ROW).flatten().astype(bool), np.all(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.all(cArray, NumCpp.Axis.ROW).flatten().astype(bool), np.all(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.all(cArray, NumCpp.Axis.COL).flatten().astype(bool), np.all(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.all(cArray, NumCpp.Axis.COL).flatten().astype(bool), np.all(data, axis=1))
####################################################################################
def test_allclose():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
cArray3 = NumCpp.NdArray(shape)
tolerance = 1e-5
data1 = np.random.randn(shape.rows, shape.cols)
data2 = data1 + tolerance / 10
data3 = data1 + 1
cArray1.setArray(data1)
cArray2.setArray(data2)
cArray3.setArray(data3)
assert NumCpp.allclose(cArray1, cArray2, tolerance) and not NumCpp.allclose(cArray1, cArray3, tolerance)
####################################################################################
def test_amax():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.amax(cArray, NumCpp.Axis.NONE).item() == np.max(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.amax(cArray, NumCpp.Axis.NONE).item() == np.max(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.amax(cArray, NumCpp.Axis.ROW).flatten(), np.max(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.amax(cArray, NumCpp.Axis.ROW).flatten(), np.max(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.amax(cArray, NumCpp.Axis.COL).flatten(), np.max(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.amax(cArray, NumCpp.Axis.COL).flatten(), np.max(data, axis=1))
####################################################################################
def test_amin():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.amin(cArray, NumCpp.Axis.NONE).item() == np.min(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.amin(cArray, NumCpp.Axis.NONE).item() == np.min(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.amin(cArray, NumCpp.Axis.ROW).flatten(), np.min(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.amin(cArray, NumCpp.Axis.ROW).flatten(), np.min(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.amin(cArray, NumCpp.Axis.COL).flatten(), np.min(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.amin(cArray, NumCpp.Axis.COL).flatten(), np.min(data, axis=1))
####################################################################################
def test_angle():
components = np.random.randint(-100, -1, [2, ]).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.angleScaler(value), 9) == np.round(np.angle(value), 9) # noqa
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols]) + \
1j * np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.angleArray(cArray), 9), np.round(np.angle(data), 9))
####################################################################################
def test_any():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.any(cArray, NumCpp.Axis.NONE).astype(bool).item() == np.any(data).item()
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.any(cArray, NumCpp.Axis.NONE).astype(bool).item() == np.any(data).item()
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.any(cArray, NumCpp.Axis.ROW).flatten().astype(bool), np.any(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.any(cArray, NumCpp.Axis.ROW).flatten().astype(bool), np.any(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.any(cArray, NumCpp.Axis.COL).flatten().astype(bool), np.any(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.any(cArray, NumCpp.Axis.COL).flatten().astype(bool), np.any(data, axis=1))
####################################################################################
def test_append():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(0, 100, [shape.rows, shape.cols])
data2 = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.append(cArray1, cArray2, NumCpp.Axis.NONE).getNumpyArray().flatten(),
np.append(data1, data2))
shapeInput = np.random.randint(20, 100, [2, ])
numRows = np.random.randint(1, 100, [1, ]).item()
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[0].item() + numRows, shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
data1 = np.random.randint(0, 100, [shape1.rows, shape1.cols])
data2 = np.random.randint(0, 100, [shape2.rows, shape2.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.append(cArray1, cArray2, NumCpp.Axis.ROW).getNumpyArray(),
np.append(data1, data2, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
NumCppols = np.random.randint(1, 100, [1, ]).item()
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + NumCppols)
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
data1 = np.random.randint(0, 100, [shape1.rows, shape1.cols])
data2 = np.random.randint(0, 100, [shape2.rows, shape2.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.append(cArray1, cArray2, NumCpp.Axis.COL).getNumpyArray(),
np.append(data1, data2, axis=1))
####################################################################################
def test_arange():
start = np.random.randn(1).item()
stop = np.random.randn(1).item() * 100
step = np.abs(np.random.randn(1).item())
if stop < start:
step *= -1
data = np.arange(start, stop, step)
assert np.array_equal(np.round(NumCpp.arange(start, stop, step).flatten(), 9), np.round(data, 9))
####################################################################################
def test_arccos():
value = np.abs(np.random.rand(1).item())
assert np.round(NumCpp.arccosScaler(value), 9) == np.round(np.arccos(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.arccosScaler(value), 9) == np.round(np.arccos(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.arccosArray(cArray), 9), np.round(np.arccos(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.arccosArray(cArray), 9), np.round(np.arccos(data), 9))
####################################################################################
def test_arccosh():
value = np.abs(np.random.rand(1).item()) + 1
assert np.round(NumCpp.arccoshScaler(value), 9) == np.round(np.arccosh(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.arccoshScaler(value), 9) == np.round(np.arccosh(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols) + 1
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.arccoshArray(cArray), 9), np.round(np.arccosh(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.arccoshArray(cArray), 9), np.round( | np.arccosh(data) | numpy.arccosh |
from skfda import FDataGrid
from skfda._utils import _check_estimator
from skfda.datasets import (make_multimodal_samples, make_multimodal_landmarks,
make_sinusoidal_process)
from skfda.exploratory.stats import mean
from skfda.preprocessing.registration import (
normalize_warping, invert_warping, landmark_shift_deltas, landmark_shift,
landmark_registration_warping, landmark_registration, ShiftRegistration)
from skfda.preprocessing.registration.validation import (
AmplitudePhaseDecomposition, LeastSquares,
SobolevLeastSquares, PairwiseCorrelation)
from skfda.representation.basis import Fourier
from skfda.representation.interpolation import SplineInterpolation
import unittest
from sklearn.exceptions import NotFittedError
import numpy as np
class TestWarping(unittest.TestCase):
"""Test warpings functions"""
def setUp(self):
"""Initialization of samples"""
self.time = np.linspace(-1, 1, 50)
interpolation = SplineInterpolation(3, monotone=True)
self.polynomial = FDataGrid([self.time**3, self.time**5],
self.time, interpolation=interpolation)
def test_invert_warping(self):
inverse = invert_warping(self.polynomial)
# Check if identity
id = self.polynomial.compose(inverse)
np.testing.assert_array_almost_equal([self.time, self.time],
id.data_matrix[..., 0],
decimal=3)
def test_standard_normalize_warping(self):
"""Test normalization to (0, 1)"""
normalized = normalize_warping(self.polynomial, (0, 1))
# Test new domain range (0, 1)
np.testing.assert_array_equal(normalized.domain_range, [(0, 1)])
np.testing.assert_array_almost_equal(normalized.grid_points[0],
np.linspace(0, 1, 50))
np.testing.assert_array_almost_equal(
normalized(0)[..., 0], [[0.], [0.]])
np.testing.assert_array_almost_equal(
normalized(1)[..., 0], [[1.], [1.]])
def test_standard_normalize_warping_default_value(self):
"""Test normalization """
normalized = normalize_warping(self.polynomial)
# Test new domain range (0, 1)
np.testing.assert_array_equal(normalized.domain_range, [(-1, 1)])
np.testing.assert_array_almost_equal(normalized.grid_points[0],
np.linspace(-1, 1, 50))
np.testing.assert_array_almost_equal(
normalized(-1)[..., 0], [[-1], [-1]])
np.testing.assert_array_almost_equal(
normalized(1)[..., 0], [[1.], [1.]])
def test_normalize_warping(self):
"""Test normalization to (a, b)"""
a = -4
b = 3
domain = (a, b)
normalized = normalize_warping(self.polynomial, domain)
# Test new domain range (0, 1)
np.testing.assert_array_equal(normalized.domain_range, [domain])
np.testing.assert_array_almost_equal(normalized.grid_points[0],
np.linspace(*domain, 50))
np.testing.assert_array_equal(normalized(a)[..., 0], [[a], [a]])
np.testing.assert_array_equal(normalized(b)[..., 0], [[b], [b]])
def test_landmark_shift_deltas(self):
fd = make_multimodal_samples(n_samples=3, random_state=1)
landmarks = make_multimodal_landmarks(n_samples=3, random_state=1)
landmarks = landmarks.squeeze()
shifts = landmark_shift_deltas(fd, landmarks).round(3)
np.testing.assert_almost_equal(shifts, [0.25, -0.25, -0.231])
def test_landmark_shift(self):
fd = make_multimodal_samples(n_samples=3, random_state=1)
landmarks = make_multimodal_landmarks(n_samples=3, random_state=1)
landmarks = landmarks.squeeze()
original_modes = fd(landmarks.reshape((3, 1, 1)),
aligned=False)
# Test default location
fd_registered = landmark_shift(fd, landmarks)
center = (landmarks.max() + landmarks.min()) / 2
reg_modes = fd_registered(center)
# Test callable location
np.testing.assert_almost_equal(reg_modes, original_modes, decimal=2)
fd_registered = landmark_shift(fd, landmarks, location=np.mean)
center = np.mean(landmarks)
reg_modes = fd_registered(center)
np.testing.assert_almost_equal(reg_modes, original_modes, decimal=2)
# Test integer location
fd_registered = landmark_shift(fd, landmarks, location=0)
center = np.mean(landmarks)
reg_modes = fd_registered(0)
np.testing.assert_almost_equal(reg_modes, original_modes, decimal=2)
# Test array location
fd_registered = landmark_shift(fd, landmarks, location=[0, 0.1, 0.2])
reg_modes = fd_registered([[0], [.1], [.2]], aligned=False)
np.testing.assert_almost_equal(reg_modes, original_modes, decimal=2)
def test_landmark_registration_warping(self):
fd = make_multimodal_samples(n_samples=3, n_modes=2, random_state=9)
landmarks = make_multimodal_landmarks(n_samples=3, n_modes=2,
random_state=9)
landmarks = landmarks.squeeze()
# Default location
warping = landmark_registration_warping(fd, landmarks)
center = (landmarks.max(axis=0) + landmarks.min(axis=0)) / 2
np.testing.assert_almost_equal(
warping(center)[..., 0], landmarks, decimal=1)
# Fixed location
center = [.3, .6]
warping = landmark_registration_warping(fd, landmarks, location=center)
np.testing.assert_almost_equal(
warping(center)[..., 0], landmarks, decimal=3)
def test_landmark_registration(self):
fd = make_multimodal_samples(n_samples=3, n_modes=2, random_state=9)
landmarks = make_multimodal_landmarks(n_samples=3, n_modes=2,
random_state=9)
landmarks = landmarks.squeeze()
original_values = fd(landmarks.reshape(3, 2), aligned=False)
# Default location
fd_reg = landmark_registration(fd, landmarks)
center = (landmarks.max(axis=0) + landmarks.min(axis=0)) / 2
np.testing.assert_almost_equal(fd_reg(center), original_values,
decimal=2)
# Fixed location
center = [.3, .6]
fd_reg = landmark_registration(fd, landmarks, location=center)
np.testing.assert_array_almost_equal(fd_reg(center), original_values,
decimal=2)
class TestShiftRegistration(unittest.TestCase):
"""Test shift registration"""
def setUp(self):
"""Initialization of samples"""
self.fd = make_sinusoidal_process(n_samples=2, error_std=0,
random_state=1)
self.fd.extrapolation = "periodic"
def test_fit_transform(self):
reg = ShiftRegistration()
# Test fit transform with FDataGrid
fd_reg = reg.fit_transform(self.fd)
# Check attributes fitted
self.assertTrue(hasattr(reg, 'deltas_'))
self.assertTrue(hasattr(reg, 'template_'))
self.assertTrue(hasattr(reg, 'n_iter_'))
self.assertTrue(isinstance(fd_reg, FDataGrid))
deltas = reg.deltas_.round(3)
np.testing.assert_array_almost_equal(deltas, [-0.022, 0.03])
# Test with Basis
fd = self.fd.to_basis(Fourier())
reg.fit_transform(fd)
deltas = reg.deltas_.round(3)
np.testing.assert_array_almost_equal(deltas, [-0.022, 0.03])
def test_fit_and_transform(self):
"""Test wrapper of shift_registration_deltas"""
fd = make_sinusoidal_process(n_samples=2, error_std=0, random_state=10)
reg = ShiftRegistration()
response = reg.fit(self.fd)
# Check attributes and returned value
self.assertTrue(hasattr(reg, 'template_'))
self.assertTrue(response is reg)
fd_registered = reg.transform(fd)
deltas = reg.deltas_.round(3)
np.testing.assert_allclose(deltas, [0.071, -0.072])
def test_inverse_transform(self):
reg = ShiftRegistration()
fd = reg.fit_transform(self.fd)
fd = reg.inverse_transform(fd)
np.testing.assert_array_almost_equal(fd.data_matrix,
self.fd.data_matrix, decimal=3)
def test_raises(self):
reg = ShiftRegistration()
# Test not fitted
with np.testing.assert_raises(NotFittedError):
reg.transform(self.fd)
reg.fit(self.fd)
reg.set_params(restrict_domain=True)
# Test use fit or transform with restrict_domain=True
with np.testing.assert_raises(AttributeError):
reg.transform(self.fd)
with np.testing.assert_raises(AttributeError):
reg.fit(self.fd)
# Test inverse_transform without previous transformation
with np.testing.assert_raises(AttributeError):
reg.inverse_transform(self.fd)
reg.fit_transform(self.fd)
# Test inverse transform with different number of sample
with np.testing.assert_raises(ValueError):
reg.inverse_transform(self.fd[:1])
fd = make_multimodal_samples(dim_domain=2, random_state=0)
with np.testing.assert_raises(ValueError):
reg.fit_transform(fd)
reg.set_params(initial=[0.])
# Wrong initial estimation
with | np.testing.assert_raises(ValueError) | numpy.testing.assert_raises |
from presamples.utils import *
import numpy as np
import os
import pytest
basedir = os.path.dirname(os.path.abspath(__file__))
def test_name_conflicts():
assert check_name_conflicts(['ABC', 'DEF']) is None
with pytest.raises(NameConflicts):
check_name_conflicts(['ABC', 'CDEF'])
def test_convert_parameter_dict_to_presamples():
data = {
'b': np.arange(5),
'a': np.arange(5, 10)
}
expected = (
['a', 'b'],
np.array([[5, 6, 7, 8, 9], [0, 1, 2, 3, 4]])
)
result = convert_parameter_dict_to_presamples(data)
assert expected[0] == result[0]
assert np.allclose(expected[1], result[1])
def test_convert_parameter_dict_to_presamples_error():
data = {
'b': | np.arange(5) | numpy.arange |
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import numpy as np
from preshed.counter import PreshCounter
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA, TruncatedSVD
import spacy
from vecgram.parallel import get_n_jobs
def get_embeddings(vocab):
max_rank = max(lex.rank for lex in vocab if lex.has_vector)
vectors = np.ndarray((max_rank + 1, vocab.vectors_length), dtype=np.float32)
for lex in vocab:
if lex.has_vector:
vectors[lex.rank] = lex.vector
return vectors
def process_embeddings(embeddings, n_components, random_state=123):
"""Process the word embeddings by subtracting off the common
mean and directions using the method in
"All-but-the-Top: Simple but Effective Postprocessing for Word Representations"
https://arxiv.org/pdf/1702.01417.pdf.
"""
# subtract off the mean (no need to keep for test time
# since the embedding is fixed)
scaler = StandardScaler(with_std=False, copy=False)
embeddings = scaler.fit_transform(embeddings)
# perform a truncated svd
svd = TruncatedSVD(n_components=n_components, random_state=random_state)
projections = svd.fit_transform(embeddings)
embeddings -= np.dot(projections, svd.components_)
return embeddings
def get_features(docs, n_docs, max_length=100):
Xs = np.zeros((n_docs, max_length), dtype=np.int32)
counts = PreshCounter()
for i, doc in enumerate(docs):
doc.count_by
for j, token in enumerate(doc[:max_length]):
if token.has_vector:
Xs[i, j] = token.rank
counts.inc(token.rank, 1)
else:
Xs[i, j] = 0
return Xs, counts
class SpacyWordVectorTransformer(BaseEstimator, TransformerMixin):
def __init__(self,
max_document_length=100,
language='en',
batch_size=10000,
post_process='pca',
n_components_threshold='auto',
n_jobs=1):
self.max_document_length = max_document_length
self.language = language
self.batch_size = batch_size
self.post_process = post_process
self.n_components_threshold = n_components_threshold
self.n_jobs = get_n_jobs(n_jobs)
self.nlp_ = None
self.embeddings_ = None
self.vocabulary_ = None
def fit(self, X, y=None):
self.nlp_ = spacy.load(self.language, parser=False, tagger=False, entity=False)
self.embeddings_ = get_embeddings(self.nlp_.vocab)
if self.post_process == 'pca':
if self.n_components_threshold == 'auto':
k = int(self.embeddings_.shape[1] / 100.)
else:
k = self.n_components_threshold
self.embeddings_ = process_embeddings(self.embeddings_, n_components=k)
return self
def transform(self, X):
n_docs = len(X)
doc_pipeline = self.nlp_.pipe(X, batch_size=self.batch_size, n_threads=self.n_jobs)
Xs, counts = get_features(doc_pipeline, n_docs, max_length=self.max_document_length)
self.vocabulary_ = counts
return Xs
class AverageWordEmbedding(BaseEstimator, TransformerMixin):
"""Embed a sentence using an average of word vectors in the sentence."""
def __init__(self,
language='en',
batch_size=10000,
n_jobs=1):
self.language = language
self.batch_size = batch_size
self.n_jobs = get_n_jobs(n_jobs)
self.nlp_ = None
self.embeddings_ = None
def fit(self, X, y=None):
self.nlp_ = spacy.load(self.language, parser=False, tagger=False, entity=False)
self.embeddings_ = get_embeddings(self.nlp_.vocab)
return self
def transform(self, X):
n_docs = len(X)
doc_pipeline = self.nlp_.pipe(X, batch_size=self.batch_size, n_threads=self.n_jobs)
Xs = np.zeros((n_docs, self.nlp_.vocab.vectors_length), dtype=np.float32)
for i, doc in enumerate(doc_pipeline):
n_tokens = 0
for j, token in enumerate(doc):
if token.has_vector:
Xs[i, :] += token.vector
n_tokens += 1
Xs[i, :] /= n_tokens
return Xs
class SimpleSentenceEmbedding(SpacyWordVectorTransformer):
def __init__(self,
alpha=1e-3,
max_document_length=100,
language='en',
batch_size=10000,
n_jobs=1):
self.alpha = alpha
super(SimpleSentenceEmbedding, self).__init__(
max_document_length=max_document_length,
language=language,
batch_size=batch_size,
n_jobs=n_jobs)
def transform(self, X, y=None):
Xs = super(SimpleSentenceEmbedding, self).transform(X)
Es = | np.zeros((Xs.shape[0], self.embeddings_.shape[1]), dtype=np.float32) | numpy.zeros |
import math
import numpy as np
from pymatgen.core.structure import Structure
from pymatgen.core.lattice import Lattice
from pymatgen.core.sites import PeriodicSite
try:
from pymatgen.util.coord import lattice_points_in_supercell
except ImportError:
from pymatgen.util.coord_utils import lattice_points_in_supercell
from supercellor.lib.optimal_supercell import (
utils, fort_optimal_supercell_hnf, fort_optimal_supercell_bec)
EPSILON = 1e-6 # The precision when comparing floats!
def rotate(R, lattice_vecs, frac_coords):
new_lattice_vecs = np.dot(lattice_vecs.T, R).T
new_frac_coords = np.dot(frac_coords, R)
return new_lattice_vecs, new_frac_coords
def get_angles(cell_matrix):
a, b, c = [v/np.linalg.norm(v) for v in cell_matrix]
k_a = np.arccos(np.dot(b, c))
k_b = np.arccos(np.dot(a, c))
k_c = np.arccos(np.dot(a, b))
return k_a, k_b, k_c
def standardize_cell(structure, wrap=True):
"""
Standardizes a structure. If a,b,c are the cell vector, the standard structure returned
will have the property that norm(a) <= norm(b) <= norm(c).
Also, the cell matrix will be triangular.
:param structure: A pymatgen structure instance
:param bool wrap: Whether to wrap the positions into the cell after standardizing, defaults to True.
:returns: A standardized cell
"""
frac_coords = np.empty((len(structure.sites), 3))
cellvecs = structure._lattice.matrix
for i, site in enumerate(structure.sites):
frac_coords[i,:] = site.frac_coords
# The code below sorts the cell and positions by lattice vector length,
# shortest first:
veclens = sorted([(np.linalg.norm(v), i) for i, v in enumerate(cellvecs)])
M1 = np.zeros((3,3))
for row, (_, idx) in enumerate(veclens):
M1[idx, row] = 1
cellvecs, frac_coords = rotate(M1, cellvecs, frac_coords)
k_a, k_b, k_c = get_angles(cellvecs)
right_angle = 0.5*np.pi
if ((k_a <= right_angle and k_b <= right_angle and k_c <= right_angle ) or
(k_a > right_angle and k_b > right_angle and k_c > right_angle )):
M2 = np.eye(3)
elif ((k_a <= right_angle and k_b > right_angle and k_c > right_angle ) or
(k_a > right_angle and k_b <= right_angle and k_c <= right_angle )):
M2 = np.diag([1,-1,-1])
elif ((k_a > right_angle and k_b <= right_angle and k_c > right_angle ) or
(k_a <= right_angle and k_b > right_angle and k_c <= right_angle )):
M2 = np.diag([-1,1,-1])
elif ((k_a > right_angle and k_b > right_angle and k_c <= right_angle ) or
(k_a <= right_angle and k_b <= right_angle and k_c > right_angle )):
M2 = np.diag([-1,-1,1])
else:
raise RuntimeError("Unrecognized case for k_a={}, k_b={}, k_c={}".format(k_a,k_b, k_c))
cellvecs, frac_coords = rotate(M2, cellvecs, frac_coords)
# Now applying the rules layed out in http://arxiv.org/abs/1506.01455 (see Sec. 5.1)
# to get the standardized conventional triclinic cell.
# Since not all cells give to me are triclinic < -> <= with respect to
# the paper, it's not standardized based on definition of that paper.
metric = np.dot(cellvecs, cellvecs.T)
a = np.sqrt(metric[0, 0])
b = np.sqrt(metric[1, 1])
c = np.sqrt(metric[2, 2])
alpha = np.arccos(metric[1, 2] / b / c)
beta = np.arccos(metric[0][2] / a / c)
gamma = np.arccos(metric[0][1] / a / b)
cg = np.cos(gamma)
cb = np.cos(beta)
ca = np.cos(alpha)
sg = np.sin(gamma)
cellvecs = np.zeros((3,3))
cellvecs[0, 0] = a
cellvecs[1, 0] = b * cg
cellvecs[2, 0] = c * cb
cellvecs[1, 1] = b * sg
cellvecs[2, 1] = c * (ca - cb * cg) / sg
cellvecs[2, 2] = c * np.sqrt(1 - ca * ca - cb * cb - cg * cg + 2 * ca * cb * cg) / sg
# And some checks:
if (cellvecs[0,0] < -EPSILON or abs(cellvecs[0,1]) > EPSILON or abs(cellvecs[0,2]) > EPSILON):
raise ValueError("First lattice vector not aligned with x-axis")
if (cellvecs[1,1] < -EPSILON or abs(cellvecs[1,2]) > EPSILON):
raise ValueError("Second lattice vector not in X-Y plane in first quadrant")
if (cellvecs[2,2] < 0):
raise ValueError("Third lattice vector not in positive Z-direction")
if np.linalg.norm(cellvecs[0]) - np.linalg.norm(cellvecs[1]) > EPSILON:
raise ValueError("Second lattice vector is smaller than first one")
if | np.linalg.norm(cellvecs[1]) | numpy.linalg.norm |
ENABLE_MULTIPROCESSING = True
from dsl import cpp_trace_param_automata
def generate_public_submission():
import numpy as np
import pandas as pd
import os
import json
from pathlib import Path
import matplotlib.pyplot as plt
from matplotlib import colors
import numpy as np
from xgboost import XGBClassifier
import pdb
# data_path = Path('.')
data_path = Path('.')
if not (data_path / 'test').exists():
data_path = Path('../input/abstraction-and-reasoning-challenge')
training_path = data_path / 'training'
evaluation_path = data_path / 'evaluation'
test_path = data_path / 'test'
def plot_result(test_input, test_prediction,
input_shape):
"""
Plots the first train and test pairs of a specified task,
using same color scheme as the ARC app
"""
cmap = colors.ListedColormap(
['#000000', '#0074D9', '#FF4136', '#2ECC40', '#FFDC00',
'#AAAAAA', '#F012BE', '#FF851B', '#7FDBFF', '#870C25'])
norm = colors.Normalize(vmin=0, vmax=9)
fig, axs = plt.subplots(1, 2, figsize=(15, 15))
test_input = test_input.reshape(input_shape[0], input_shape[1])
axs[0].imshow(test_input, cmap=cmap, norm=norm)
axs[0].axis('off')
axs[0].set_title('Actual Target')
test_prediction = test_prediction.reshape(input_shape[0], input_shape[1])
axs[1].imshow(test_prediction, cmap=cmap, norm=norm)
axs[1].axis('off')
axs[1].set_title('Model Prediction')
plt.tight_layout()
plt.show()
def plot_test(test_prediction, task_name):
"""
Plots the first train and test pairs of a specified task,
using same color scheme as the ARC app
"""
cmap = colors.ListedColormap(
['#000000', '#0074D9', '#FF4136', '#2ECC40', '#FFDC00',
'#AAAAAA', '#F012BE', '#FF851B', '#7FDBFF', '#870C25'])
norm = colors.Normalize(vmin=0, vmax=9)
fig, axs = plt.subplots(1, 1, figsize=(15, 15))
axs.imshow(test_prediction, cmap=cmap, norm=norm)
axs.axis('off')
axs.set_title(f'Test Prediction {task_name}')
plt.tight_layout()
plt.show()
# https://www.kaggle.com/inversion/abstraction-and-reasoning-starter-notebook
def flattener(pred):
str_pred = str([row for row in pred])
str_pred = str_pred.replace(', ', '')
str_pred = str_pred.replace('[[', '|')
str_pred = str_pred.replace('][', '|')
str_pred = str_pred.replace(']]', '|')
return str_pred
sample_sub1 = pd.read_csv(data_path / 'sample_submission.csv')
sample_sub1 = sample_sub1.set_index('output_id')
sample_sub1.head()
def get_moore_neighbours(color, cur_row, cur_col, nrows, ncols):
if cur_row <= 0:
top = -1
else:
top = color[cur_row - 1][cur_col]
if cur_row >= nrows - 1:
bottom = -1
else:
bottom = color[cur_row + 1][cur_col]
if cur_col <= 0:
left = -1
else:
left = color[cur_row][cur_col - 1]
if cur_col >= ncols - 1:
right = -1
else:
right = color[cur_row][cur_col + 1]
return top, bottom, left, right
def get_tl_tr(color, cur_row, cur_col, nrows, ncols):
if cur_row == 0:
top_left = -1
top_right = -1
else:
if cur_col == 0:
top_left = -1
else:
top_left = color[cur_row - 1][cur_col - 1]
if cur_col == ncols - 1:
top_right = -1
else:
top_right = color[cur_row - 1][cur_col + 1]
return top_left, top_right
def make_features(input_color, nfeat):
nrows, ncols = input_color.shape
feat = np.zeros((nrows * ncols, nfeat))
cur_idx = 0
for i in range(nrows):
for j in range(ncols):
feat[cur_idx, 0] = i
feat[cur_idx, 1] = j
feat[cur_idx, 2] = input_color[i][j]
feat[cur_idx, 3:7] = get_moore_neighbours(input_color, i, j, nrows, ncols)
feat[cur_idx, 7:9] = get_tl_tr(input_color, i, j, nrows, ncols)
feat[cur_idx, 9] = len(np.unique(input_color[i, :]))
feat[cur_idx, 10] = len(np.unique(input_color[:, j]))
feat[cur_idx, 11] = (i + j)
feat[cur_idx, 12] = len(np.unique(input_color[i - local_neighb:i + local_neighb,
j - local_neighb:j + local_neighb]))
cur_idx += 1
return feat
def features(task, mode='train'):
num_train_pairs = len(task[mode])
feat, target = [], []
global local_neighb
for task_num in range(num_train_pairs):
input_color = np.array(task[mode][task_num]['input'])
target_color = task[mode][task_num]['output']
nrows, ncols = len(task[mode][task_num]['input']), len(task[mode][task_num]['input'][0])
target_rows, target_cols = len(task[mode][task_num]['output']), len(task[mode][task_num]['output'][0])
if (target_rows != nrows) or (target_cols != ncols):
print('Number of input rows:', nrows, 'cols:', ncols)
print('Number of target rows:', target_rows, 'cols:', target_cols)
not_valid = 1
return None, None, 1
imsize = nrows * ncols
# offset = imsize*task_num*3 #since we are using three types of aug
feat.extend(make_features(input_color, nfeat))
target.extend(np.array(target_color).reshape(-1, ))
return np.array(feat), np.array(target), 0
# mode = 'eval'
mode = 'test'
if mode == 'eval':
task_path = evaluation_path
elif mode == 'train':
task_path = training_path
elif mode == 'test':
task_path = test_path
all_task_ids = sorted(os.listdir(task_path))
nfeat = 13
local_neighb = 5
valid_scores = {}
model_accuracies = {'ens': []}
pred_taskids = []
for task_id in all_task_ids:
task_file = str(task_path / task_id)
with open(task_file, 'r') as f:
task = json.load(f)
feat, target, not_valid = features(task)
if not_valid:
print('ignoring task', task_file)
print()
not_valid = 0
continue
xgb = XGBClassifier(n_estimators=10, n_jobs=-1)
xgb.fit(feat, target, verbose=-1)
# training on input pairs is done.
# test predictions begins here
num_test_pairs = len(task['test'])
for task_num in range(num_test_pairs):
cur_idx = 0
input_color = np.array(task['test'][task_num]['input'])
nrows, ncols = len(task['test'][task_num]['input']), len(
task['test'][task_num]['input'][0])
feat = make_features(input_color, nfeat)
print('Made predictions for ', task_id[:-5])
preds = xgb.predict(feat).reshape(nrows, ncols)
if (mode == 'train') or (mode == 'eval'):
ens_acc = (np.array(task['test'][task_num]['output']) == preds).sum() / (nrows * ncols)
model_accuracies['ens'].append(ens_acc)
pred_taskids.append(f'{task_id[:-5]}_{task_num}')
# print('ensemble accuracy',(np.array(task['test'][task_num]['output'])==preds).sum()/(nrows*ncols))
# print()
preds = preds.astype(int).tolist()
# plot_test(preds, task_id)
sample_sub1.loc[f'{task_id[:-5]}_{task_num}',
'output'] = flattener(preds)
if (mode == 'train') or (mode == 'eval'):
df = pd.DataFrame(model_accuracies, index=pred_taskids)
print(df.head(10))
print(df.describe())
for c in df.columns:
print(f'for {c} no. of complete tasks is', (df.loc[:, c] == 1).sum())
df.to_csv('ens_acc.csv')
sample_sub1.head()
training_path = data_path / 'training'
evaluation_path = data_path / 'evaluation'
test_path = data_path / 'test'
training_tasks = sorted(os.listdir(training_path))
eval_tasks = sorted(os.listdir(evaluation_path))
T = training_tasks
Trains = []
for i in range(400):
task_file = str(training_path / T[i])
task = json.load(open(task_file, 'r'))
Trains.append(task)
E = eval_tasks
Evals = []
for i in range(400):
task_file = str(evaluation_path / E[i])
task = json.load(open(task_file, 'r'))
Evals.append(task)
cmap = colors.ListedColormap(
['#000000', '#0074D9', '#FF4136', '#2ECC40', '#FFDC00',
'#AAAAAA', '#F012BE', '#FF851B', '#7FDBFF', '#870C25'])
norm = colors.Normalize(vmin=0, vmax=9)
# 0:black, 1:blue, 2:red, 3:greed, 4:yellow,
# 5:gray, 6:magenta, 7:orange, 8:sky, 9:brown
plt.figure(figsize=(5, 2), dpi=200)
plt.imshow([list(range(10))], cmap=cmap, norm=norm)
plt.xticks(list(range(10)))
plt.yticks([])
# plt.show()
def plot_task(task):
n = len(task["train"]) + len(task["test"])
fig, axs = plt.subplots(2, n, figsize=(4 * n, 8), dpi=50)
plt.subplots_adjust(wspace=0, hspace=0)
fig_num = 0
for i, t in enumerate(task["train"]):
t_in, t_out = np.array(t["input"]), np.array(t["output"])
axs[0][fig_num].imshow(t_in, cmap=cmap, norm=norm)
axs[0][fig_num].set_title(f'Train-{i} in')
axs[0][fig_num].set_yticks(list(range(t_in.shape[0])))
axs[0][fig_num].set_xticks(list(range(t_in.shape[1])))
axs[1][fig_num].imshow(t_out, cmap=cmap, norm=norm)
axs[1][fig_num].set_title(f'Train-{i} out')
axs[1][fig_num].set_yticks(list(range(t_out.shape[0])))
axs[1][fig_num].set_xticks(list(range(t_out.shape[1])))
fig_num += 1
for i, t in enumerate(task["test"]):
t_in, t_out = np.array(t["input"]), np.array(t["output"])
axs[0][fig_num].imshow(t_in, cmap=cmap, norm=norm)
axs[0][fig_num].set_title(f'Test-{i} in')
axs[0][fig_num].set_yticks(list(range(t_in.shape[0])))
axs[0][fig_num].set_xticks(list(range(t_in.shape[1])))
axs[1][fig_num].imshow(t_out, cmap=cmap, norm=norm)
axs[1][fig_num].set_title(f'Test-{i} out')
axs[1][fig_num].set_yticks(list(range(t_out.shape[0])))
axs[1][fig_num].set_xticks(list(range(t_out.shape[1])))
fig_num += 1
plt.tight_layout()
plt.show()
def plot_picture(x):
plt.imshow(np.array(x), cmap=cmap, norm=norm)
plt.show()
def Defensive_Copy(A):
n = len(A)
k = len(A[0])
L = np.zeros((n, k), dtype=int)
for i in range(n):
for j in range(k):
L[i, j] = 0 + A[i][j]
return L.tolist()
def Create(task, task_id=0):
n = len(task['train'])
Input = [Defensive_Copy(task['train'][i]['input']) for i in range(n)]
Output = [Defensive_Copy(task['train'][i]['output']) for i in range(n)]
Input.append(Defensive_Copy(task['test'][task_id]['input']))
return Input, Output
def Recolor(task):
Input = task[0]
Output = task[1]
Test_Picture = Input[-1]
Input = Input[:-1]
N = len(Input)
for x, y in zip(Input, Output):
if len(x) != len(y) or len(x[0]) != len(y[0]):
return -1
Best_Dict = -1
Best_Q1 = -1
Best_Q2 = -1
Best_v = -1
# v ranges from 0 to 3. This gives an extra flexibility of measuring distance from any of the 4 corners
Pairs = []
for t in range(15):
for Q1 in range(1, 8):
for Q2 in range(1, 8):
if Q1 + Q2 == t:
Pairs.append((Q1, Q2))
for Q1, Q2 in Pairs:
for v in range(4):
if Best_Dict != -1:
continue
possible = True
Dict = {}
for x, y in zip(Input, Output):
n = len(x)
k = len(x[0])
for i in range(n):
for j in range(k):
if v == 0 or v == 2:
p1 = i % Q1
else:
p1 = (n - 1 - i) % Q1
if v == 0 or v == 3:
p2 = j % Q2
else:
p2 = (k - 1 - j) % Q2
color1 = x[i][j]
color2 = y[i][j]
if color1 != color2:
rule = (p1, p2, color1)
if rule not in Dict:
Dict[rule] = color2
elif Dict[rule] != color2:
possible = False
if possible:
# Let's see if we actually solve the problem
for x, y in zip(Input, Output):
n = len(x)
k = len(x[0])
for i in range(n):
for j in range(k):
if v == 0 or v == 2:
p1 = i % Q1
else:
p1 = (n - 1 - i) % Q1
if v == 0 or v == 3:
p2 = j % Q2
else:
p2 = (k - 1 - j) % Q2
color1 = x[i][j]
rule = (p1, p2, color1)
if rule in Dict:
color2 = 0 + Dict[rule]
else:
color2 = 0 + y[i][j]
if color2 != y[i][j]:
possible = False
if possible:
Best_Dict = Dict
Best_Q1 = Q1
Best_Q2 = Q2
Best_v = v
if Best_Dict == -1:
return -1 # meaning that we didn't find a rule that works for the traning cases
# Otherwise there is a rule: so let's use it:
n = len(Test_Picture)
k = len(Test_Picture[0])
answer = np.zeros((n, k), dtype=int)
for i in range(n):
for j in range(k):
if Best_v == 0 or Best_v == 2:
p1 = i % Best_Q1
else:
p1 = (n - 1 - i) % Best_Q1
if Best_v == 0 or Best_v == 3:
p2 = j % Best_Q2
else:
p2 = (k - 1 - j) % Best_Q2
color1 = Test_Picture[i][j]
rule = (p1, p2, color1)
if (p1, p2, color1) in Best_Dict:
answer[i][j] = 0 + Best_Dict[rule]
else:
answer[i][j] = 0 + color1
return answer.tolist()
sample_sub2 = pd.read_csv(data_path / 'sample_submission.csv')
sample_sub2.head()
def flattener(pred):
str_pred = str([row for row in pred])
str_pred = str_pred.replace(', ', '')
str_pred = str_pred.replace('[[', '|')
str_pred = str_pred.replace('][', '|')
str_pred = str_pred.replace(']]', '|')
return str_pred
example_grid = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
# display(example_grid)
print(flattener(example_grid))
Solved = []
Problems = sample_sub2['output_id'].values
Proposed_Answers = []
test_paths_my = {task.stem: json.load(task.open()) for task in test_path.iterdir()}
test_task_ids = np.sort(list(test_paths_my.keys()))
print(Problems, len(Problems))
task_number_my = dict(zip(test_task_ids, np.arange(100)))
for i in range(len(Problems)):
output_id = Problems[i]
task_id = output_id.split('_')[0]
pair_id = int(output_id.split('_')[1])
f = str(test_path / str(task_id + '.json'))
with open(f, 'r') as read_file:
task = json.load(read_file)
n = len(task['train'])
Input = [Defensive_Copy(task['train'][j]['input']) for j in range(n)]
Output = [Defensive_Copy(task['train'][j]['output']) for j in range(n)]
Input.append(Defensive_Copy(task['test'][pair_id]['input']))
solution = Recolor([Input, Output])
pred = ''
if solution != -1:
Solved.append(i)
pred1 = flattener(solution)
pred = pred + pred1 + ' '
if pred == '':
pred = flattener(example_grid)
Proposed_Answers.append(pred)
sample_sub2['output'] = Proposed_Answers
sample_sub1 = sample_sub1.reset_index()
sample_sub1 = sample_sub1.sort_values(by="output_id")
sample_sub2 = sample_sub2.sort_values(by="output_id")
out1 = sample_sub1["output"].astype(str).values
out2 = sample_sub2["output"].astype(str).values
merge_output = []
for o1, o2 in zip(out1, out2):
o = o1.strip().split(" ")[:1] + o2.strip().split(" ")[:2]
o = " ".join(o[:3])
merge_output.append(o)
sample_sub1["output"] = merge_output
sample_sub1["output"] = sample_sub1["output"].astype(str)
# test_paths_my = { task.stem: json.load(task.open()) for task in test_path.iterdir() }
# test_task_ids = np.sort(list(test_paths_my.keys()))
# task_number_my = dict(zip(test_task_ids, np.arange(100)))
submission = sample_sub1.copy()
submission.to_csv("public_submission.csv", index=False)
#generate_public_submission()
import numpy as np
from tqdm.notebook import tqdm
from PIL import Image, ImageDraw
import time
from collections import defaultdict
import os
import json
import random
import copy
import networkx as nx
from pathlib import Path
import matplotlib.colors as colors
import matplotlib.pyplot as plt
from itertools import product
import pandas as pd
import multiprocessing
import subprocess
# from moviepy.editor import ImageSequenceClip
# from moviepy.editor import clips_array, CompositeVideoClip
# from moviepy.video.io.html_tools import html_embed, HTML2
# def display_vid(vid, verbose=False, **html_kw):
# """
# Display a moviepy video clip, useful for removing loadbars
# """
# rd_kwargs = {
# 'fps': 10, 'verbose': verbose
# }
# if not verbose:
# rd_kwargs['logger'] = None
# return HTML2(html_embed(vid, filetype=None, maxduration=60,
# center=True, rd_kwargs=rd_kwargs, **html_kw))
data_path = Path('../input/abstraction-and-reasoning-challenge/')
# data_path = Path('.') # Artyom: it's better use symlinks locally
cmap_lookup = [
'#000000', '#0074D9', '#FF4136', '#2ECC40', '#FFDC00',
'#AAAAAA', '#F012BE', '#FF851B', '#7FDBFF', '#870C25'
]
cmap_lookup = [np.array([int(x[1:3], 16), int(x[3:5], 16), int(x[5:], 16)]) for x in cmap_lookup]
def cmap(x):
"""
Translate a task matrix to a color coded version
arguments
x : a h x w task matrix
returns
a h x w x 3 matrix with colors instead of numbers
"""
y = np.zeros((*x.shape, 3))
y[x < 0, :] = np.array([112, 128, 144])
y[x > 9, :] = np.array([255, 248, 220])
for i, c in enumerate(cmap_lookup):
y[x == i, :] = c
return y
def draw_one(x, k=20):
"""
Create a PIL image from a task matrix, the task will be
drawn using the default color coding with grid lines
arguments
x : a task matrix
k = 20 : an up scaling factor
returns
a PIL image
"""
img = Image.fromarray(cmap(x).astype(np.uint8)).resize((x.shape[1] * k, x.shape[0] * k), Image.NEAREST)
draw = ImageDraw.Draw(img)
for i in range(x.shape[0]):
draw.line((0, i * k, img.width, i * k), fill=(80, 80, 80), width=1)
for j in range(x.shape[1]):
draw.line((j * k, 0, j * k, img.height), fill=(80, 80, 80), width=1)
return img
def vcat_imgs(imgs, border=10):
"""
Concatenate images vertically
arguments:
imgs : an array of PIL images
border = 10 : the size of space between images
returns:
a PIL image
"""
h = max(img.height for img in imgs)
w = sum(img.width for img in imgs)
res_img = Image.new('RGB', (w + border * (len(imgs) - 1), h), color=(255, 255, 255))
offset = 0
for img in imgs:
res_img.paste(img, (offset, 0))
offset += img.width + border
return res_img
def plot_task(task):
n = len(task["train"]) + len(task["test"])
fig, axs = plt.subplots(2, n, figsize=(n * 4, 8))
plt.subplots_adjust(wspace=0, hspace=0)
fig_num = 0
def go(ax, title, x):
ax.imshow(draw_one(x), interpolation='nearest')
ax.set_title(title)
ax.set_yticks([])
ax.set_xticks([])
for i, t in enumerate(task["train"]):
go(axs[0][fig_num], f'Train-{i} in', t["input"])
go(axs[1][fig_num], f'Train-{i} out', t["output"])
fig_num += 1
for i, t in enumerate(task["test"]):
go(axs[0][fig_num], f'Test-{i} in', t["input"])
try:
go(axs[1][fig_num], f'Test-{i} out', t["output"])
except:
go(axs[1][fig_num], f'Test-{i} out', np.zeros_like(t["input"]))
fig_num += 1
plt.tight_layout()
plt.show()
def real_trace_param_automata(input, params, n_iter, n_hidden):
"""
Execute an automata and return all the intermediate states
arguments:
step_fn : transition rule function, should take two arguments `input` and `hidden_i`,
should return an output grid an a new hidden hidden grid
n_iter : num of iteration to perform
n_hidden: number of hidden grids, if set to 0 `hidden_i` will be set to None
laodbar = True: weather display loadbars
returns:
an array of tuples if output and hidden grids
"""
# hidden = np.zeros((n_hidden, *input.shape)) if n_hidden > 0 else None
#
# global_rules, ca_rules = params
#
# trace = [(input, hidden)]
#
# for rule in global_rules:
#
# output, hidden = apply_rule(input, hidden, rule)
# trace.append((output, hidden))
# input = output
#
# its = range(n_iter)
#
# for i_it in its:
# output, hidden = compute_parametrized_automata(input, hidden, ca_rules)
# trace.append((output, hidden))
#
# if (input.shape == output.shape) and (output == input).all():
# break
# input = output
hidden = np.zeros((n_hidden, *input.shape)) if n_hidden > 0 else None
global_rules, ca_rules, split_rule, merge_rule = params
grids = apply_split_rule(input, hidden, split_rule)
#print(grids[0][0])
for rule in global_rules:
for i, (inp, hid) in enumerate(grids):
if rule['macro_type'] == 'global_rule':
if rule['apply_to'] == 'all' or \
(rule['apply_to'] == 'index' and i == rule['apply_to_index']%len(grids) or
(rule['apply_to'] == 'last' and i == len(grids) - 1)):
grids[i] = apply_rule(inp, hid, rule)
elif rule['macro_type'] == 'global_interaction_rule':
grids = apply_interaction_rule(grids, rule)
#print(grids[0][0])
#1/0
for i, (input, hidden) in enumerate(grids):
for _ in range(n_iter):
output, hidden = compute_parametrized_automata(input, hidden, ca_rules)
if np.array_equal(input, output):
break
input = output
grids[i] = (output, hidden)
output = apply_merge_rule(grids, merge_rule, split_rule)
return output
def apply_interaction_rule(grids, rule):
if rule['type'] == 'align_pattern':
# index_from = rule['index_from'] % len(grids)
# index_to = rule['index_to'] % len(grids)
# allow_rotation = rule['allow_rotation']
if len(grids) > 5:
return grids
for index_from in range(len(grids)):
for index_to in range(index_from+1, len(grids)):
input_i = grids[index_from][0]
input_j = grids[index_to][0]
# print(np.max(input_i>0, axis=1))
# print(np.max(input_i>0, axis=1).shape)
# print(np.arange(input_i.shape[0]).shape)
#1/0
i_nonzero_rows = np.arange(input_i.shape[0])[np.max(input_i>0, axis=1)]
i_nonzero_columns = np.arange(input_i.shape[1])[np.max(input_i>0, axis=0)]
j_nonzero_rows = np.arange(input_j.shape[0])[np.max(input_j>0, axis=1)]
j_nonzero_columns = np.arange(input_j.shape[1])[np.max(input_j>0, axis=0)]
if i_nonzero_rows.shape[0] == 0 or i_nonzero_columns.shape[0] == 0 or \
j_nonzero_rows.shape[0] == 0 or j_nonzero_columns.shape[0] == 0:
continue
i_minrow = np.min(i_nonzero_rows)
i_mincol = np.min(i_nonzero_columns)
i_maxrow = np.max(i_nonzero_rows) + 1
i_maxcol = np.max(i_nonzero_columns) + 1
j_minrow = np.min(j_nonzero_rows)
j_mincol = np.min(j_nonzero_columns)
j_maxrow = np.max(j_nonzero_rows) + 1
j_maxcol = np.max(j_nonzero_columns) + 1
figure_to_align = input_i[i_minrow:i_maxrow, i_mincol:i_maxcol]
figure_target = input_j[j_minrow:j_maxrow, j_mincol:j_maxcol]
best_fit = 0
best_i_fit, best_j_fit = -1, -1
#print(figure_to_align)
#print(figure_target)
if figure_to_align.shape[0] < figure_target.shape[0] or figure_to_align.shape[1] < figure_target.shape[1]:
continue
#1/0
else:
for i_start in range((figure_to_align.shape[0] - figure_target.shape[0])+1):
for j_start in range((figure_to_align.shape[1] - figure_target.shape[1])+1):
fig_1 = figure_to_align[i_start:(i_start + figure_target.shape[0]), j_start:(j_start + figure_target.shape[1])]
if np.logical_and(np.logical_and(figure_target > 0, figure_target!=rule['allow_color']), figure_target != fig_1).any():
continue
fit = np.sum(figure_target==fig_1)
if fit > best_fit:
best_i_fit, best_j_fit = i_start, j_start
best_fit = fit
if best_fit == 0:
continue
imin = j_minrow-best_i_fit
imax = j_minrow-best_i_fit + figure_to_align.shape[0]
jmin = j_mincol - best_j_fit
jmax = j_mincol - best_j_fit + figure_to_align.shape[1]
begin_i = max(imin, 0)
begin_j = max(jmin, 0)
end_i = min(imax, input_j.shape[0])
end_j = min(jmax, input_j.shape[1])
i_fig_begin = (begin_i-imin)
i_fig_end = figure_to_align.shape[0]-(imax-end_i)
j_fig_begin = (begin_j-jmin)
j_fig_end = figure_to_align.shape[1]-(jmax-end_j)
if rule['fill_with_color'] == 0:
input_j[begin_i:end_i, begin_j:end_j] = figure_to_align[i_fig_begin:i_fig_end, j_fig_begin:j_fig_end]
else:
for i, j in product(range(end_i-begin_i + 1), range(end_j-begin_j + 1)):
if input_j[begin_i + i, begin_j + j] == 0:
input_j[begin_i + i, begin_j + j] = rule['fill_with_color'] * (figure_to_align[i_fig_begin + i, j_fig_begin + j])
return grids
def trace_param_automata(input, params, n_iter, n_hidden):
# expected = real_trace_param_automata(input, params, n_iter, n_hidden)
#
# testcase = {'input': input, 'params': params}
# print(str(testcase).replace('\'', '"').replace('array(', '').replace(')', ''))
output = cpp_trace_param_automata(input, params, n_iter)
# if not np.array_equal(expected, output):
# print('cpp result is wrong')
# print('input:')
# print(input)
# print('expected:')
# print(expected)
# print('got:')
# print(output)
#
# diff = [[str(g) if e != g else '-' for e, g in zip(exp_row, got_row)]
# for exp_row, got_row in zip(expected, output)]
# diff_lines = [' '.join(line) for line in diff]
# diff_str = '[[' + ']\n ['.join(diff_lines)
#
# print('diff:')
# print(diff_str)
# print('rules')
# print(params)
#
# assert False
return [[output]]
# def vis_automata_trace(states, loadbar=False, prefix_image=None):
# """
# Create a video from an array of automata states
#
# arguments:
# states : array of automata steps, returned by `trace_automata()`
# loadbar = True: weather display loadbars
# prefix_image = None: image to add to the beginning of each frame
# returns
# a moviepy ImageSequenceClip
# """
# frames = []
# if loadbar:
# states = tqdm(states, desc='Frame')
# for i, (canvas, hidden) in enumerate(states):
#
# frame = []
# if prefix_image is not None:
# frame.append(prefix_image)
# frame.append(draw_one(canvas))
# frames.append(vcat_imgs(frame))
#
# return ImageSequenceClip(list(map(np.array, frames)), fps=10)
# def vis_automata_paramed_task(tasks, parameters, n_iter, n_hidden, vis_only_ix=None):
# """
# Visualize the automata steps during the task solution
# arguments:
# tasks : the task to be solved by the automata
# step_fn : automata transition function as passed to `trace_automata()`
# n_iter : number of iterations to perform
# n_hidden : number of hidden girds
# """
#
# n_vis = 0
#
# def go(task, n_vis, test=False):
#
# if vis_only_ix is not None and vis_only_ix != n_vis:
# return
# trace = trace_param_automata(task['input'], parameters, n_iter, n_hidden)
# if not test:
# vid = vis_automata_trace(trace, prefix_image=draw_one(task['output']))
# else:
# vid = vis_automata_trace(trace, prefix_image=draw_one(np.zeros_like(task['input'])))
#
# # display(display_vid(vid))
#
# for task in (tasks['train']):
# n_vis += 1
# go(task, n_vis)
#
# for task in (tasks['test']):
# n_vis += 1
# go(task, n_vis, True)
training_path = data_path / 'training'
evaluation_path = data_path / 'evaluation'
test_path = data_path / 'test'
training_tasks = sorted(os.listdir(training_path))
evaluation_tasks = sorted(os.listdir(evaluation_path))
test_tasks = sorted(os.listdir(test_path))
def load_data(p, phase=None):
"""
Load task data
"""
if phase in {'training', 'test', 'evaluation'}:
p = data_path / phase / p
task = json.loads(Path(p).read_text())
dict_vals_to_np = lambda x: {k: np.array(v) for k, v in x.items()}
assert set(task) == {'test', 'train'}
res = dict(test=[], train=[])
for t in task['train']:
assert set(t) == {'input', 'output'}
res['train'].append(dict_vals_to_np(t))
for t in task['test']:
if phase == 'test':
assert set(t) == {'input'}
else:
assert set(t) == {'input', 'output'}
res['test'].append(dict_vals_to_np(t))
return res
nbh = lambda x, i, j: {
(ip, jp) : x[i+ip, j+jp]
for ip, jp in product([1, -1, 0], repeat=2)
if 0 <= i+ip < x.shape[0] and 0 <= j+jp < x.shape[1] and (not (ip==0 and jp==0))
}
def get_random_split_rule(all_colors, best_candidates={}, temp=0, config={}, r_type=None):
rule = {}
rule['type'] = random.choice(['nothing', 'color_figures', 'figures', 'macro_multiply'])
if rule['type'] in ['color_figures', 'figures']:
rule['sort'] = random.choice(['biggest', 'smallest'])
if rule['type'] == 'macro_multiply':
rule['k1'] = np.random.randint(config['mink1'], config['maxk1']+1)
rule['k2'] = np.random.randint(config['mink2'], config['maxk2']+1)
return rule
def get_random_merge_rule(all_colors, best_candidates={}, temp=0, config={}, r_type=None):
rule = {}
rule['type'] = random.choice(['cellwise_or', 'output_first', 'output_last'])
return rule
def apply_split_rule(input, hidden, split_rule):
if split_rule['type'] == 'nothing':
return [(input, hidden)]
if split_rule['type'] == 'macro_multiply':
ks = split_rule['k1'] * split_rule['k2']
grids = [( | np.copy(input) | numpy.copy |
"""
Tests for functions in ensemble_tools.py.
Authors: <NAME> & <NAME>
Note that functions that start with an underscore (_) are designed for local
use only by the primary functions within ensemble_tools.py. Therefore, testing
of those local scripts does not include checking for irrational inputs that
would cause meaningless results and/or an exception since such inputs are
checked for earlier in the primary functions.
"""
import numpy as np
import pytest
import sys
from ensemble.ensemble_tools import (
_gumbel_cdf,
_probability_from_members,
probability_from_members,
_prob_from_outside_rank_gumbel,
_prob_from_outside_rank_exp,
_deterministic_event_prob,
probability_from_members,
prob_between_values,
ensemble_verification_rank,
_validate_arg_type
)
# Define ensemble datasets to test with
MEMBERS_ALPHA = np.array([[0.0, 0.0, 0.0, 0.0, 0.1, 0.2, 0.5, 0.5, 0.5, 1.0]])
MEMBERS_BRAVO = np.array([[-1.0, -1.0, 0.0, 0.0, 0.1, 0.2, 0.5, 0.5, 1.0, 1.0]])
MEMBERS_CHARLIE = np.array([[7, 7, 7, 7, 7, 7, 7, 7, 7, 7]])
# Set the roundoff decimals for testing precision
ROUNDOFF = 5
# ------------------------------------------------------------------------------
# _gumbel_cdf
# ------------------------------------------------------------------------------
@pytest.mark.parametrize("members, x, expected",
[(MEMBERS_ALPHA[0], 1.1, 0.97949),
(MEMBERS_ALPHA[0], 10.0, 1.0),
(MEMBERS_ALPHA[0], -10.0, 0.0),
])
def test_gumbel_cdf(members, x, expected):
print("I AM TESTING _gumbel_cdf")
assert np.round(_gumbel_cdf(members, x), ROUNDOFF) == expected
# ------------------------------------------------------------------------------
# _prob_from_outside_rank_gumbel
# ------------------------------------------------------------------------------
@pytest.mark.parametrize("threshold, members, threshold_position, expected",
[(1000, MEMBERS_ALPHA, 'above_all_members', 0.0),
(-1000, MEMBERS_ALPHA, 'below_all_members', 1),
(1.1, MEMBERS_ALPHA, 'mouse_burgers', 1e32),
(1.1, MEMBERS_ALPHA, 'above_all_members', 'between_0_1')])
def test_prob_from_outside_rank_gumbel(
threshold, members, threshold_position, expected
):
prob = _prob_from_outside_rank_gumbel(threshold, members, threshold_position)
if isinstance(expected, float) or isinstance(expected, int) :
assert( | np.round(prob, ROUNDOFF) | numpy.round |
import numpy as np
from scipy.integrate import odeint, solve_ivp
import lmfit
from .. import util
from .base_model import BaseModel
import math
class SeicrdRlcModelResult:
def __init__(self, t, population, susceptible, exposed_normal, exposed_over, infectious, critical, recovered_normal, recovered_critical, dead_normal, dead_over, mortality_rate, r0_normal, kapasitas_rs, r0_over, r0_overall, test_coverage, sanity_check_mode=util.SANITY_CHECK_CORRECT):
self.t = t
self.population = population
self.susceptible = susceptible
self.exposed_normal = util.sanity_clamp(exposed_normal, sanity_check_mode)
self.exposed_over = util.sanity_clamp(exposed_over, sanity_check_mode)
self.infectious = util.sanity_clamp(infectious, sanity_check_mode)
self.critical = util.sanity_clamp(critical, sanity_check_mode)
self.recovered_normal = util.sanity_clamp(recovered_normal, sanity_check_mode)
self.recovered_critical = util.sanity_clamp(recovered_critical, sanity_check_mode)
self.dead_normal = util.sanity_clamp(dead_normal, sanity_check_mode)
self.dead_over = util.sanity_clamp(dead_over, sanity_check_mode)
self.mortality_rate = util.sanity_clamp(mortality_rate, sanity_check_mode)
self.r0_normal = r0_normal
self.kapasitas_rs = kapasitas_rs
self.r0_over = r0_over
self.r0_overall = r0_overall
self.test_coverage = test_coverage
def exposed(self):
return self.exposed_normal + self.exposed_over
def critical_cared(self):
return SeicrdRlcModel.critical_cared(self.critical, self.kapasitas_rs)
def critical_over(self):
return SeicrdRlcModel.critical_over(self.critical, self.kapasitas_rs)
def infectious_all(self):
return self.infectious + self.critical
def recovered(self):
return self.recovered_normal + self.recovered_critical
def dead(self):
return self.dead_normal + self.dead_over
def over(self):
return self.critical_over() + self.dead_over
def infectious_scaled(self):
return self.test_coverage * (self.infectious + self.critical_over())
def critical_cared_scaled(self):
return self.critical_cared()
def infectious_all_scaled(self):
return self.infectious_scaled() + self.critical_cared_scaled()
def recovered_scaled(self):
return self.test_coverage * self.recovered_normal + self.recovered_critical
def dead_scaled(self):
return self.test_coverage * self.dead_over + self.dead_normal
def infected_scaled(self):
return self.infectious_all_scaled() + self.recovered_scaled() + self.dead_scaled()
def daily_susceptible(self):
return util.delta(self.susceptible)
def daily_exposed_normal(self):
return util.delta(self.exposed_normal)
def daily_exposed_over(self):
return util.delta(self.exposed_over)
def daily_exposed(self):
return util.delta(self.exposed())
def daily_infectious(self):
return util.delta(self.infectious)
def daily_critical_cared(self):
return util.delta(self.critical_cared())
def daily_critical_over(self):
return util.delta(self.critical_over())
def daily_critical(self):
return util.delta(self.critical)
def daily_infectious_all(self):
return util.delta(self.infectious_all())
def daily_recovered_normal(self):
return util.delta(self.recovered_normal)
def daily_recovered_critical(self):
return util.delta(self.recovered_critical)
def daily_recovered(self):
return util.delta(self.recovered())
def daily_dead_normal(self):
return util.delta(self.dead_normal)
def daily_dead_over(self):
return util.delta(self.dead_over)
def daily_dead(self):
return util.delta(self.dead())
def daily_over(self):
return tuil.delta(self.over())
def daily_infectious_scaled(self):
return util.delta(self.infectious_scaled())
def daily_critical_cared_scaled(self):
return util.delta(self.critical_cared_scaled())
def daily_infectious_all_scaled(self):
return util.delta(self.infectious_all_scaled())
def daily_recovered_scaled(self):
return util.delta(self.recovered_scaled())
def daily_dead_scaled(self):
return util.delta(self.dead_scaled())
def daily_infected_scaled(self):
return util.delta(self.infected_scaled())
def get_dataset(self, d, shift=0):
# TODO
ret = None
if d == "infectious":
ret = self.infectious_scaled()
elif d == "critical_cared":
ret = self.critical_cared_scaled()
elif d == "infectious_all":
ret = self.infectious_all_scaled()
elif d == "recovered":
ret = self.recovered_scaled()
elif d == "dead":
ret = self.dead_scaled()
elif d == "infected":
ret = self.infected_scaled()
else:
raise ValueError("Invalid dataset: " + str(d))
return np.array(ret) if not shift else util.shift_array(ret, shift)
def get_datasets(self, datasets, shift=0):
return {k:self.get_dataset(k, shift) for k in datasets}
def get_datasets_values(self, datasets, shift=0):
return np.array([self.get_dataset(k, shift) for k in datasets])
class SeicrdRlcModel(BaseModel):
params = ["infectious_rate",
"critical_chance", "critical_rate",
"recovery_rate_normal", "recovery_rate_critical",
"death_chance_normal", "death_rate_normal",
"death_chance_over", "death_rate_over",
"exposed_rate_over", "k", "kapasitas_rs_mul",
"test_coverage_0", "test_coverage_increase", "test_coverage_max"]
def __init__(self, kabko):
super().__init__(kabko)
self.prev_dydt = None
def critical_cared(critical, kapasitas_rs):
ret = critical[:]
return | np.clip(ret, a_min=None, a_max=kapasitas_rs) | numpy.clip |
#
# generate mock data for PyCALI.
#
import numpy as np
from numpy import fft
import pycali
import matplotlib.pyplot as plt
import copy
def convolve_fft(con, resp):
"""
convolution continuum with a response function using FFT
"""
resp_pad = np.zeros(con.shape[0])
resp_pad[:resp.shape[0]] = resp
con_fft = fft.rfft(con)
resp_fft = fft.rfft(resp_pad)
conv_fft = con_fft * resp_fft
conv = fft.irfft(conv_fft, n = con.shape[0])
return conv
def generate_mock_data():
"""
generate mock data
"""
# DRW parameters
sigma = 0.3
tau = 50.0
# time nodes for continuum
tg = np.linspace(-200.0, 600.0, 2000)
tmx, tmy = np.meshgrid(tg, tg)
# covariance
Cov = sigma * sigma * np.exp(-np.abs(tmx-tmy)/tau)
Mat = np.linalg.cholesky(Cov)
u = np.random.randn(tg.shape[0])
fs = np.matmul(Mat, u) + 1.0
# errors, around 0.015
fe = np.random.randn(tg.shape[0])*0.005+0.015
con = np.stack((tg, fs, fe), axis=-1)
# now emission line
# first transfer function, Gaussian
dt = con[1, 0] - con[0, 0]
ntau = 200
resp = np.zeros(ntau)
resp2 = np.zeros(ntau)
tau = np.array(np.arange(ntau))*dt
resp[:] = np.exp(-0.5 * (tau-30.0)**2/(10.0)**2)
resp[:] /= | np.sum(resp[:]) | numpy.sum |
"""
Augmenters that somehow change the size of the images.
List of augmenters:
* :class:`Resize`
* :class:`CropAndPad`
* :class:`Crop`
* :class:`Pad`
* :class:`PadToFixedSize`
* :class:`CenterPadToFixedSize`
* :class:`CropToFixedSize`
* :class:`CenterCropToFixedSize`
* :class:`CropToMultiplesOf`
* :class:`CenterCropToMultiplesOf`
* :class:`PadToMultiplesOf`
* :class:`CenterPadToMultiplesOf`
* :class:`CropToPowersOf`
* :class:`CenterCropToPowersOf`
* :class:`PadToPowersOf`
* :class:`CenterPadToPowersOf`
* :class:`CropToAspectRatio`
* :class:`CenterCropToAspectRatio`
* :class:`PadToAspectRatio`
* :class:`CenterPadToAspectRatio`
* :class:`CropToSquare`
* :class:`CenterCropToSquare`
* :class:`PadToSquare`
* :class:`CenterPadToSquare`
* :class:`KeepSizeByResize`
"""
from __future__ import print_function, division, absolute_import
import re
import functools
import numpy as np
import cv2
import imgaug as ia
from imgaug.imgaug import _normalize_cv2_input_arr_
from . import meta
from .. import parameters as iap
def _crop_trbl_to_xyxy(shape, top, right, bottom, left, prevent_zero_size=True):
if prevent_zero_size:
top, right, bottom, left = _crop_prevent_zero_size(
shape[0], shape[1], top, right, bottom, left)
height, width = shape[0:2]
x1 = left
x2 = width - right
y1 = top
y2 = height - bottom
# these steps prevent negative sizes
# if x2==x1 or y2==y1 then the output arr has size 0 for the respective axis
# note that if height/width of arr is zero, then y2==y1 or x2==x1, which
# is still valid, even if height/width is zero and results in a zero-sized
# axis
x2 = max(x2, x1)
y2 = max(y2, y1)
return x1, y1, x2, y2
def _crop_arr_(arr, top, right, bottom, left, prevent_zero_size=True):
x1, y1, x2, y2 = _crop_trbl_to_xyxy(arr.shape, top, right, bottom, left,
prevent_zero_size=prevent_zero_size)
return arr[y1:y2, x1:x2, ...]
def _crop_and_pad_arr(arr, croppings, paddings, pad_mode="constant",
pad_cval=0, keep_size=False):
height, width = arr.shape[0:2]
image_cr = _crop_arr_(arr, *croppings)
image_cr_pa = pad(
image_cr,
top=paddings[0], right=paddings[1],
bottom=paddings[2], left=paddings[3],
mode=pad_mode, cval=pad_cval)
if keep_size:
image_cr_pa = ia.imresize_single_image(image_cr_pa, (height, width))
return image_cr_pa
def _crop_and_pad_heatmap_(heatmap, croppings_img, paddings_img,
pad_mode="constant", pad_cval=0.0, keep_size=False):
return _crop_and_pad_hms_or_segmaps_(heatmap, croppings_img,
paddings_img, pad_mode, pad_cval,
keep_size)
def _crop_and_pad_segmap_(segmap, croppings_img, paddings_img,
pad_mode="constant", pad_cval=0, keep_size=False):
return _crop_and_pad_hms_or_segmaps_(segmap, croppings_img,
paddings_img, pad_mode, pad_cval,
keep_size)
def _crop_and_pad_hms_or_segmaps_(augmentable, croppings_img,
paddings_img, pad_mode="constant",
pad_cval=None, keep_size=False):
if isinstance(augmentable, ia.HeatmapsOnImage):
arr_attr_name = "arr_0to1"
pad_cval = pad_cval if pad_cval is not None else 0.0
else:
assert isinstance(augmentable, ia.SegmentationMapsOnImage), (
"Expected HeatmapsOnImage or SegmentationMapsOnImage, got %s." % (
type(augmentable)))
arr_attr_name = "arr"
pad_cval = pad_cval if pad_cval is not None else 0
arr = getattr(augmentable, arr_attr_name)
arr_shape_orig = arr.shape
augm_shape = augmentable.shape
croppings_proj = _project_size_changes(croppings_img, augm_shape, arr.shape)
paddings_proj = _project_size_changes(paddings_img, augm_shape, arr.shape)
croppings_proj = _crop_prevent_zero_size(arr.shape[0], arr.shape[1],
*croppings_proj)
arr_cr = _crop_arr_(arr,
croppings_proj[0], croppings_proj[1],
croppings_proj[2], croppings_proj[3])
arr_cr_pa = pad(
arr_cr,
top=paddings_proj[0], right=paddings_proj[1],
bottom=paddings_proj[2], left=paddings_proj[3],
mode=pad_mode,
cval=pad_cval)
setattr(augmentable, arr_attr_name, arr_cr_pa)
if keep_size:
augmentable = augmentable.resize(arr_shape_orig[0:2])
else:
augmentable.shape = _compute_shape_after_crop_and_pad(
augmentable.shape, croppings_img, paddings_img)
return augmentable
def _crop_and_pad_kpsoi_(kpsoi, croppings_img, paddings_img, keep_size):
# using the trbl function instead of croppings_img has the advantage
# of incorporating prevent_zero_size, dealing with zero-sized input image
# axis and dealing the negative crop amounts
x1, y1, _x2, _y2 = _crop_trbl_to_xyxy(kpsoi.shape, *croppings_img)
crop_left = x1
crop_top = y1
shape_orig = kpsoi.shape
shifted = kpsoi.shift_(
x=-crop_left+paddings_img[3],
y=-crop_top+paddings_img[0])
shifted.shape = _compute_shape_after_crop_and_pad(
shape_orig, croppings_img, paddings_img)
if keep_size:
shifted = shifted.on_(shape_orig)
return shifted
def _compute_shape_after_crop_and_pad(old_shape, croppings, paddings):
x1, y1, x2, y2 = _crop_trbl_to_xyxy(old_shape, *croppings)
new_shape = list(old_shape)
new_shape[0] = y2 - y1 + paddings[0] + paddings[2]
new_shape[1] = x2 - x1 + paddings[1] + paddings[3]
return tuple(new_shape)
def _crop_prevent_zero_size(height, width, crop_top, crop_right, crop_bottom,
crop_left):
remaining_height = height - (crop_top + crop_bottom)
remaining_width = width - (crop_left + crop_right)
if remaining_height < 1:
regain = abs(remaining_height) + 1
regain_top = regain // 2
regain_bottom = regain // 2
if regain_top + regain_bottom < regain:
regain_top += 1
if regain_top > crop_top:
diff = regain_top - crop_top
regain_top = crop_top
regain_bottom += diff
elif regain_bottom > crop_bottom:
diff = regain_bottom - crop_bottom
regain_bottom = crop_bottom
regain_top += diff
crop_top = crop_top - regain_top
crop_bottom = crop_bottom - regain_bottom
if remaining_width < 1:
regain = abs(remaining_width) + 1
regain_right = regain // 2
regain_left = regain // 2
if regain_right + regain_left < regain:
regain_right += 1
if regain_right > crop_right:
diff = regain_right - crop_right
regain_right = crop_right
regain_left += diff
elif regain_left > crop_left:
diff = regain_left - crop_left
regain_left = crop_left
regain_right += diff
crop_right = crop_right - regain_right
crop_left = crop_left - regain_left
return (
max(crop_top, 0), max(crop_right, 0), max(crop_bottom, 0),
max(crop_left, 0))
def _project_size_changes(trbl, from_shape, to_shape):
if from_shape[0:2] == to_shape[0:2]:
return trbl
height_to = to_shape[0]
width_to = to_shape[1]
height_from = from_shape[0]
width_from = from_shape[1]
top = trbl[0]
right = trbl[1]
bottom = trbl[2]
left = trbl[3]
# Adding/subtracting 1e-4 here helps for the case where a heatmap/segmap
# is exactly half the size of an image and the size change on an axis is
# an odd value. Then the projected value would end up being <something>.5
# and the rounding would always round up to the next integer. If both
# sides then have the same change, they are both rounded up, resulting
# in more change than expected.
# E.g. image height is 8, map height is 4, change is 3 at the top and 3 at
# the bottom. The changes are projected to 4*(3/8) = 1.5 and both rounded
# up to 2.0. Hence, the maps are changed by 4 (100% of the map height,
# vs. 6 for images, which is 75% of the image height).
top = _int_r(height_to * (top/height_from) - 1e-4)
right = _int_r(width_to * (right/width_from) + 1e-4)
bottom = _int_r(height_to * (bottom/height_from) + 1e-4)
left = _int_r(width_to * (left/width_from) - 1e-4)
return top, right, bottom, left
def _int_r(value):
return int( | np.round(value) | numpy.round |
from numpy.core.numeric import ones
from pandas._config.config import set_option
from kivy.app import App
from kivy.uix.floatlayout import FloatLayout
from kivy.factory import Factory
from kivy.properties import ObjectProperty, StringProperty, AliasProperty
from kivy.uix.popup import Popup
from kivy.uix.checkbox import CheckBox
from kivy.uix.label import Label
from kivy.uix.widget import Widget
from kivy.uix.scrollview import ScrollView
from kivy.uix.textinput import TextInput
from kivy.clock import Clock
from kivy.event import EventDispatcher
# Loading this in is slow here, so might be nice to put elsewhere...
# Can I bring up over python programs to laod this in?
import numpy as np
from matplotlib import cm
import matplotlib.pyplot as pl
from matplotlib import rcParams
from matplotlib import rc
import pandas
from time import time
import aims_loader as aimsLoad
import aims_analysis as aims
import aims_classification as classy
from kivy.uix.button import Button
from kivy.uix.label import Label
from kivy.uix.textinput import TextInput
from kivy.uix.screenmanager import Screen
from os.path import dirname, join
from kivy.lang import Builder
from kivy.properties import NumericProperty, StringProperty, BooleanProperty,\
ListProperty
import os
import re
# Evidently these lines will remove a weird multi-touch emulator from kivy:
from kivy.config import Config
Config.set('input', 'mouse', 'mouse,multitouch_on_demand')
# Jesus, this thing really was built for phone apps...
class Root(Screen):
# REALLY Sucks to use global variables, but I'm unsure how else to
# pass all of these across functions
global N; N = 4
global LFile; LFile = ['']
fullscreen = BooleanProperty(False)
def on_pre_enter(self):
global loadLabel
global loadButton
if 'loadLabel' not in globals():
global LFile; LFile = ['']
N = 4
a=0
# Need to re-read through this shit to see what's going on...
for j in np.arange(int(N)):
# No idea what this lambda function does, but it lets us bind show load to the button
if molecule == 'ig':
xxname = 'File '
xname = ''
else:
xxname = 'FASTA '
xname = 'FASTA '
if LFile == ['']:
if j > 0:
button = Button(text='Load '+ xxname + str(j+1), size_hint=(0.2, 0.075),
pos_hint={'center_x':.15, 'center_y':.75-int(a)*0.6/N},
on_release=lambda x = int(j):self.show_load(win = x),background_down='app_data/butt_down.png',
background_normal='app_data/butt_up.png',color=(0, 0.033, 0.329, 1),border=(0, 0, 0, 0),
disabled = True)
else:
button = Button(text='Load '+ xxname + str(j+1), size_hint=(0.2, 0.075),
pos_hint={'center_x':.15, 'center_y':.75-int(a)*0.6/N},
on_release=lambda x = int(j):self.show_load(win = x),background_down='app_data/butt_down.png',
background_normal='app_data/butt_up.png',color=(0, 0.033, 0.329, 1),border=(0, 0, 0, 0))
else:
if j > len(LFile):
button = Button(text='Load '+ xxname + str(j+1), size_hint=(0.2, 0.075),
pos_hint={'center_x':.15, 'center_y':.75-int(a)*0.6/N},
on_release=lambda x = int(j):self.show_load(win = x),background_down='app_data/butt_down.png',
background_normal='app_data/butt_up.png',color=(0, 0.033, 0.329, 1),border=(0, 0, 0, 0),
disabled = True)
else:
button = Button(text='Load '+ xxname + str(j+1), size_hint=(0.2, 0.075),
pos_hint={'center_x':.15, 'center_y':.75-int(a)*0.6/N},
on_release=lambda x = int(j):self.show_load(win = x),background_down='app_data/butt_down.png',
background_normal='app_data/butt_up.png',color=(0, 0.033, 0.329, 1),border=(0, 0, 0, 0))
# What an absolute fucking nightmare solution the line above... works though
# Need to make sure we don't overwrite the labels every time we load shit in
if j >= len(LFile):
label = Label(text=xname +'File ' + str(j+1) + ' Path', size_hint=(0.2, 0.075),
pos_hint={'center_x':.62, 'center_y':.75-int(a)*0.6/N},font_name='app_data/Poppins-Light.ttf')
else:
if LFile[int(j)] != '':
label = Label(text=LFile[int(j)], size_hint=(0.2, 0.075),
pos_hint={'center_x':.62, 'center_y':.75-int(a)*0.6/N},font_name='app_data/Poppins-Light.ttf')
else:
label = Label(text=xname+'File ' + str(j+1) + ' Path', size_hint=(0.2, 0.075),
pos_hint={'center_x':.62, 'center_y':.75-int(a)*0.6/N},font_name='app_data/Poppins-Light.ttf')
if a == 0:
loadButton = [button]
loadLabel = [label]
else:
loadButton = loadButton + [button]
loadLabel = loadLabel + [label]
a = a + 1
for i in loadButton:
FloatLayout.add_widget(self, i)
for k in loadLabel:
FloatLayout.add_widget(self, k)
def get_path(self):
return(os.getcwd())
def dismiss_popup(self):
self._popup.dismiss()
# This is getting more and more confusing. Apologies for whoever has to go through this.
# this is basically just a dummy function to keep track of which FASTA file is where...
def do_thing(self,win2 = 1):
global FASTA_L
# What a wild way for all of this to work... BUT IT DOES
if str(type(win2)) == "<class 'kivy.uix.button.Button'>":
FASTA_L = int(win2.text[-1])-1
else:
FASTA_L = win2
# Basically just recreate the entire screen every time we want to
# add more fastas. Kind of a pain, but it works.
def check_one(self):
global onlyONE
global LFile
if self.tbutt1.state == 'down':
for i in loadButton[1:]:
i.disabled = True
onlyONE = True
if LFile[0] == '':
return
else:
if len(LFile) > 1:
LFile = [LFile[0]]
self.next1_1.disabled = False
elif self.tbutt2.state == 'down':
onlyONE = False
if LFile != ['']:
loadButton[1].disabled = False
if len(LFile) >= 2:
return
else:
self.next1_1.disabled = True
return
# win is how we will try to keep track of using the right button...
def show_load(self, win = 2):
content = LoadDialog(load=self.load, cancel=self.dismiss_popup, fas1 = self.do_thing(win2 = win))
self._popup = Popup(title="Load file", content=content,
size_hint=(0.9, 0.9))
self._popup.open()
def load(self, path, filename):
global LFile
path1 = os.path.join(path, filename[0])
# So FASTA_L should tell you WHERE
# The loadfile is coming from
while FASTA_L+1 > len(LFile):
LFile = LFile + ['']
LFile[FASTA_L] = path1
# Need to have two separate options because we move from Kivy defined buttons to
# python defined buttons. I handle those slightly differently.
loadLabel[FASTA_L].text = path1
if len(loadButton) > FASTA_L+1:
loadButton[FASTA_L+1].disabled = False
self.check_one()
if len(LFile) >= 2:
self.next1_1.disabled = False
self.dismiss_popup()
def make_path(self):
global dir_name
self.text1 = self.v_input1.text
dir_name = self.text1
def reset_loadScrn(self):
# need to do something to let people know they need to re-enter everything...
global N
global loadButton
global loadLabel
global LFile
self.lessF.disabled = True
self.next1_1.disabled = True
for i in loadButton:
FloatLayout.remove_widget(self,i)
for j in loadLabel:
FloatLayout.remove_widget(self,j)
# Remove any extra entries, just in case...
while len(LFile) > N:
LFile = LFile[:-1]
def more_fastas(self):
global N
global loadButton
global loadLabel
# Alright so this works...
self.lessF.disabled = False
N = N + 1
a = 0
for i in loadButton:
FloatLayout.remove_widget(self,i)
for j in loadLabel:
FloatLayout.remove_widget(self,j)
for j in np.arange(int(N)):
# No idea what this lambda function does, but it lets us bind show load to the button
if molecule == 'ig':
xxname = 'File '
xname = ''
else:
xxname = 'FASTA '
xname = 'FASTA '
if LFile == ['']:
if j > 0:
button = Button(text='Load '+ xxname + str(j+1), size_hint=(0.2, 0.075),
pos_hint={'center_x':.15, 'center_y':.75-int(a)*0.6/N},
on_release=lambda x = int(j):self.show_load(win = x),
background_normal='app_data/butt_up.png',color=(0, 0.033, 0.329, 1),border=(0, 0, 0, 0),
disabled = True)
else:
button = Button(text='Load '+ xxname + str(j+1), size_hint=(0.2, 0.075),
pos_hint={'center_x':.15, 'center_y':.75-int(a)*0.6/N},
on_release=lambda x = int(j):self.show_load(win = x),
background_normal='app_data/butt_up.png',color=(0, 0.033, 0.329, 1),border=(0, 0, 0, 0))
else:
if j > len(LFile):
button = Button(text='Load '+ xxname + str(j+1), size_hint=(0.2, 0.075),
pos_hint={'center_x':.15, 'center_y':.75-int(a)*0.6/N},
on_release=lambda x = int(j):self.show_load(win = x),
background_normal='app_data/butt_up.png',color=(0, 0.033, 0.329, 1),border=(0, 0, 0, 0),
disabled = True)
else:
button = Button(text='Load '+ xxname + str(j+1), size_hint=(0.2, 0.075),
pos_hint={'center_x':.15, 'center_y':.75-int(a)*0.6/N},
on_release=lambda x = int(j):self.show_load(win = x),
background_normal='app_data/butt_up.png',color=(0, 0.033, 0.329, 1),border=(0, 0, 0, 0))
# What an absolute fucking nightmare solution the line above... works though
# Need to make sure we don't overwrite the labels every time we load shit in
if j >= len(LFile):
label = Label(text=xname +'File ' + str(j+1) + ' Path', size_hint=(0.2, 0.075),
pos_hint={'center_x':.62, 'center_y':.75-int(a)*0.6/N},font_name='app_data/Poppins-Light.ttf')
else:
if LFile[int(j)] != '':
label = Label(text=LFile[int(j)], size_hint=(0.2, 0.075),
pos_hint={'center_x':.62, 'center_y':.75-int(a)*0.6/N},font_name='app_data/Poppins-Light.ttf')
else:
label = Label(text=xname+'File ' + str(j+1) + ' Path', size_hint=(0.2, 0.075),
pos_hint={'center_x':.62, 'center_y':.75-int(a)*0.6/N},font_name='app_data/Poppins-Light.ttf')
if a == 0:
loadButton = [button]
loadLabel = [label]
else:
loadButton = loadButton + [button]
loadLabel = loadLabel + [label]
a = a + 1
for i in loadButton:
FloatLayout.add_widget(self, i)
for k in loadLabel:
FloatLayout.add_widget(self, k)
def less_fastas(self):
global N
global loadButton
global loadLabel
# Alright so this works...
N = N - 1
a = 0
# WHAT A MESS THIS IS, BUT IT WORKS!
if N == 4:
self.lessF.disabled = True
for i in loadButton:
FloatLayout.remove_widget(self,i)
for j in loadLabel:
FloatLayout.remove_widget(self,j)
for j in np.arange(int(N)):
# No idea what this lambda function does, but it lets us bind show load to the button
if molecule == 'ig':
xxname = 'File '
xname = ''
else:
xxname = 'FASTA '
xname = 'FASTA '
if LFile == ['']:
if j > 0:
button = Button(text='Load '+ xxname + str(j+1), size_hint=(0.2, 0.075),
pos_hint={'center_x':.15, 'center_y':.75-int(a)*0.6/N},
on_release=lambda x = int(j):self.show_load(win = x),background_down='app_data/butt_down.png',
background_normal='app_data/butt_up.png',color=(0, 0.033, 0.329, 1),border=(0, 0, 0, 0),
disabled = True)
else:
button = Button(text='Load '+ xxname + str(j+1), size_hint=(0.2, 0.075),
pos_hint={'center_x':.15, 'center_y':.75-int(a)*0.6/N},
on_release=lambda x = int(j):self.show_load(win = x),background_down='app_data/butt_down.png',
background_normal='app_data/butt_up.png',color=(0, 0.033, 0.329, 1),border=(0, 0, 0, 0))
else:
if j > len(LFile):
button = Button(text='Load '+ xxname + str(j+1), size_hint=(0.2, 0.075),
pos_hint={'center_x':.15, 'center_y':.75-int(a)*0.6/N},
on_release=lambda x = int(j):self.show_load(win = x),background_down='app_data/butt_down.png',
background_normal='app_data/butt_up.png',color=(0, 0.033, 0.329, 1),border=(0, 0, 0, 0),
disabled = True)
else:
button = Button(text='Load '+ xxname + str(j+1), size_hint=(0.2, 0.075),
pos_hint={'center_x':.15, 'center_y':.75-int(a)*0.6/N},
on_release=lambda x = int(j):self.show_load(win = x),background_down='app_data/butt_down.png',
background_normal='app_data/butt_up.png',color=(0, 0.033, 0.329, 1),border=(0, 0, 0, 0))
# What an absolute fucking nightmare solution the line above... works though
# Need to make sure we don't overwrite the labels every time we load shit in
if j >= len(LFile):
label = Label(text=xname +'File ' + str(j+1) + ' Path', size_hint=(0.2, 0.075),
pos_hint={'center_x':.62, 'center_y':.75-int(a)*0.6/N},font_name='app_data/Poppins-Light.ttf')
else:
if LFile[int(j)] != '':
label = Label(text=LFile[int(j)], size_hint=(0.2, 0.075),
pos_hint={'center_x':.62, 'center_y':.75-int(a)*0.6/N},font_name='app_data/Poppins-Light.ttf')
else:
label = Label(text=xname+'File ' + str(j+1) + ' Path', size_hint=(0.2, 0.075),
pos_hint={'center_x':.62, 'center_y':.75-int(a)*0.6/N},font_name='app_data/Poppins-Light.ttf')
if a == 0:
loadButton = [button]
loadLabel = [label]
else:
loadButton = loadButton + [button]
loadLabel = loadLabel + [label]
a = a + 1
for i in loadButton:
FloatLayout.add_widget(self, i)
for k in loadLabel:
FloatLayout.add_widget(self, k)
def add_widget(self, *args):
if 'content' in self.ids:
return self.ids.content.add_widget(*args)
return super(Root, self).add_widget(*args)
# Alright for our alignment entries, let's try breaking all of
# this code up into another class...
class aligner(Screen):
# Need to redefine a different type of loading from the wild one above.
def dismiss_popup(self):
self._popup.dismiss()
def show_load(self, *args):
content = LoadDialog(load=self.load, cancel=self.dismiss_popup)
self._popup = Popup(title="Load file", content=content,size_hint=(0.9, 0.9))
self._popup.open()
def drop_buts(self):
x = self.align_button1; FloatLayout.remove_widget(self, x)
x = self.align_button2; FloatLayout.remove_widget(self, x)
global align_label
global alignEnt
global thing_loaded
align_label = Label(text = '')
alignEnt = ''
thing_loaded = False
# This is our special load function just for this page, so
# do everything inside this one function
#... probably if I did this more often, I wouldn't need to assign so many
# global variables... whoops.
def load(self, path, filename):
global align_label
global alignEnt
global thing_loaded
pos_path = os.path.join(path, filename[0])
matPos = pandas.read_csv(pos_path,header=0).values
# Check that they have at least two entries
if len(matPos) < 2:
if align_label.text == 'ERROR: Matrix only has 1 row. Must have at least 2':
FloatLayout.remove_widget(self,align_label)
align_label = Label(text='ERROR: Matrix only has 1 row. Must have at least 2', size_hint=(0.4, 0.2),
pos_hint={'center_x':.5, 'center_y':.5}, color = (1, 0, 0, 1))
FloatLayout.add_widget(self, align_label)
button_moved = Button(text='Load Matrix', size_hint=(None, None),
size =('150dp','48dp'),pos_hint={'center_x':.4, 'center_y':0.1}, on_release=self.show_load,
background_down='app_data/butt_down.png',background_normal='app_data/butt_up.png',
color=(0, 0.033, 0.329, 1), border=(0, 0, 0, 0))
button2_moved = Button(text='Resize Entries', size_hint=(None, None),
size =('150dp','48dp'),pos_hint={'center_x':.6, 'center_y':0.1}, on_release=self.load_aligners,
background_down='app_data/butt_down.png',background_normal='app_data/butt_up.png',
color=(0, 0.033, 0.329, 1),border=(0, 0, 0, 0))
FloatLayout.add_widget(self, button2_moved)
FloatLayout.add_widget(self, button_moved)
# Check that they have the right number of columns, or else the program will crash
# LEAVE THIS BE FOR NOW, BUT ACTUALLY IMPLEMENT ASAP
#elif len(matPos[0]) != 6:
# if align_label.text == 'ERROR: Matrix only has 1 row. Must have at least 2':
# FloatLayout.remove_widget(self,align_label)
# align_label = Label(text='ERROR: Matrix only has 1 row. Must have at least 2', size_hint=(0.4, 0.2),
# pos_hint={'center_x':.5, 'center_y':.5}, color = (1, 0, 0, 1))
else:
if align_label.text == 'ERROR: Matrix only has 1 row. Must have at least 2':
FloatLayout.remove_widget(self,align_label)
align_label.text = ''
# REALIZING NOW I DONT HAVE ANY FUNCTIONS TO REMOVE THESE BUTTONS,
# PROBABLY DOESN'T MATTER, BUT THEORETICALLY COULD CAUSE A CRASH?
button_moved = Button(text='Load Matrix', size_hint=(None, None),
size =('150dp','48dp'),pos_hint={'center_x':.4, 'center_y':0.1}, on_release=self.show_load,
background_normal='app_data/butt_up.png',color=(0, 0.033, 0.329, 1),border=(0, 0, 0, 0))
button2_moved = Button(text='Resize Entries', size_hint=(None, None),background_down='app_data/butt_down.png',
background_normal='app_data/butt_up.png',color=(0, 0.033, 0.329, 1),border=(0, 0, 0, 0),
size =('150dp','48dp'),pos_hint={'center_x':.6, 'center_y':0.1}, on_release=self.load_aligners)
FloatLayout.add_widget(self, button2_moved)
FloatLayout.add_widget(self, button_moved)
# If we're running this script again, we've almost certainly gotta remove it...
if alignEnt != '':
for entry in alignEnt:
for j in entry:
FloatLayout.remove_widget(self, j)
for j in np.arange(len(matPos)):
if matPos[j,0] != '':
name = matPos[j,0]
else:
name = 'FASTA ' + str(j+1)
textinput0 = TextInput(text=name, multiline=False, size_hint = (None, None),write_tab =False,
pos_hint = {'center_x': 0.1, 'center_y': 0.60-int(j)*0.5/N}, height = '32dp',width='125dp')
textinput1 = TextInput(text=str(matPos[j,1]), multiline=False, size_hint = (None, None),write_tab = False,
pos_hint = {'center_x': 0.25, 'center_y': 0.60-int(j)*0.5/N}, height = '32dp',width='42dp')
textinput2 = TextInput(text=str(matPos[j,2]), multiline=False, size_hint = (None, None),write_tab = False,
pos_hint = {'center_x': 0.4, 'center_y': 0.60-int(j)*0.5/N}, height = '32dp',width='42dp')
textinput3 = TextInput(text=str(matPos[j,3]), multiline=False, size_hint = (None, None),write_tab = False,
pos_hint = {'center_x': 0.55, 'center_y': 0.60-int(j)*0.5/N}, height = '32dp',width='42dp')
textinput4 = TextInput(text=str(matPos[j,4]), multiline=False, size_hint = (None, None),write_tab = False,
pos_hint = {'center_x': 0.7, 'center_y': 0.60-int(j)*0.5/N}, height = '32dp',width='42dp')
textinput5 = TextInput(text=str(matPos[j,5]), multiline=False, size_hint = (None, None),write_tab = False,
pos_hint = {'center_x': 0.85, 'center_y': 0.60-int(j)*0.5/N}, height = '32dp',width='42dp')
if int(j) == 0:
alignEnt = [[textinput0,textinput1,textinput2,textinput3,textinput4,textinput5]]
else:
alignEnt = alignEnt + [[textinput0,textinput1,textinput2,textinput3,textinput4,textinput5]]
# Before adding in all of the new ones
for entry in alignEnt:
for j in entry:
FloatLayout.add_widget(self, j)
thing_loaded = True
self.dismiss_popup()
def load_aligners(self, *args):
global align_label
global alignEnt
global thing_loaded
self.next1_2.disabled = False
# Literally just here so we can have these messages or not
if align_label.text == 'ERROR: MUST LOAD AT LEAST TWO FILES':
FloatLayout.remove_widget(self,align_label)
align_label.text = ''
button_moved = Button(text='Load Matrix', size_hint=(None, None),
size =('150dp','48dp'),pos_hint={'center_x':.4, 'center_y':0.1}, on_release=self.show_load,
background_normal='app_data/butt_up.png',color=(0, 0.033, 0.329, 1),border=(0, 0, 0, 0))
button2_moved = Button(text='Resize Entries', size_hint=(None, None),
size =('150dp','48dp'),pos_hint={'center_x':.6, 'center_y':0.1}, on_release=self.load_aligners,
background_normal='app_data/butt_up.png',color=(0, 0.033, 0.329, 1),border=(0, 0, 0, 0))
FloatLayout.add_widget(self, button2_moved)
FloatLayout.add_widget(self, button_moved)
# If we're running this script again, we've almost certainly gotta remove it...
if alignEnt != '':
for entry in alignEnt:
for j in entry:
FloatLayout.remove_widget(self, j)
for j in np.arange(len(LFile)):
if LFile[j] != '':
startname = [y.end() for y in re.finditer('/',LFile[j])]
endname = [y.start() for y in re.finditer('.fasta',LFile[j])]
if len(endname) == 0:
endname = [int(len(LFile[j])-1)]
name = LFile[j][int(startname[len(startname)-1]):int(endname[0])]
else:
name = 'File ' + str(j+1)
textinput0 = TextInput(text=name, multiline=False, size_hint = (None, None),write_tab =False,
pos_hint = {'center_x': 0.1, 'center_y': 0.60-int(j)*0.5/N}, height = '32dp',width='125dp')
textinput1 = TextInput(text='', multiline=False, size_hint = (None, None),write_tab = False,
pos_hint = {'center_x': 0.25, 'center_y': 0.60-int(j)*0.5/N}, height = '32dp',width='42dp')
textinput2 = TextInput(text='', multiline=False, size_hint = (None, None),write_tab = False,
pos_hint = {'center_x': 0.4, 'center_y': 0.60-int(j)*0.5/N}, height = '32dp',width='42dp')
textinput3 = TextInput(text='', multiline=False, size_hint = (None, None),write_tab = False,
pos_hint = {'center_x': 0.55, 'center_y': 0.60-int(j)*0.5/N}, height = '32dp',width='42dp')
textinput4 = TextInput(text='', multiline=False, size_hint = (None, None),write_tab = False,
pos_hint = {'center_x': 0.7, 'center_y': 0.60-int(j)*0.5/N}, height = '32dp',width='42dp')
textinput5 = TextInput(text='', multiline=False, size_hint = (None, None),write_tab = False,
pos_hint = {'center_x': 0.85, 'center_y': 0.60-int(j)*0.5/N}, height = '32dp',width='42dp')
if int(j) == 0:
alignEnt = [[textinput0,textinput1,textinput2,textinput3,textinput4,textinput5]]
else:
alignEnt = alignEnt + [[textinput0,textinput1,textinput2,textinput3,textinput4,textinput5]]
# Before adding in all of the new ones
for entry in alignEnt:
for j in entry:
FloatLayout.add_widget(self, j)
thing_loaded = True
# So now we're done with our loading stuff, let's wrap things up and get ready to move on...
def make_path(self):
global dir_name
self.text1 = self.input1.text
dir_name = self.text1
def check_aligns(self):
global mat_coords
global labels
global check_run
# Really idiot-proof this thing, in case someone is just buzzing through the screens
if 'thing_loaded' in globals():
if thing_loaded:
# Convert our weird little kivy objects into a numpy array
x,y = np.shape(alignEnt)
for row in np.arange(x):
for column in np.arange(y):
if column == 0:
mat_pre = alignEnt[row][column].text
else:
mat_pre = np.hstack((mat_pre,alignEnt[row][column].text))
if row == 0:
mat_coords = mat_pre
else:
mat_coords = np.vstack((mat_coords,mat_pre))
if molecule == 'mhc' and onlyONE:
labels = mat_coords[0]
else:
labels = mat_coords[:,0]
# When you go back this far, make sure we recreate the checkbox screen
if 'check_run' in globals():
# So intelligently, if you define something as a global variable
# but don't "fill" it, it won't register as being in globals.
del check_run
class LoadDialog(FloatLayout):
load = ObjectProperty(None)
cancel = ObjectProperty(None)
fas1 = ObjectProperty(None)
def get_path(self):
return(os.getcwd())
# let's try to have all of my nice python functions in
# the "analysis" screen instance:
class Analysis(Screen):
def get_matrix(self):
# Ok so here is where I will actually call my python code
# For now, print all the shit
#########################################
# DEFINE PLOT PARAMETERS:
font = {'family' : 'Arial',
'weight' : 'bold',
'size' : 16}
COLOR = 'black'
rcParams['text.color'] = 'black'
rcParams['axes.labelcolor'] = COLOR
rcParams['xtick.color'] = COLOR
rcParams['ytick.color'] = COLOR
rc('font', **font)
# Custom colormap code from: https://stackoverflow.com/questions/49367144/modify-matplotlib-colormap
import matplotlib as mpl
upper = mpl.cm.jet(np.arange(256))
lower = np.ones((int(256/4),4))
for i in range(3):
lower[:,i] = np.linspace(1, upper[0,i], lower.shape[0])
global cmap
cmap = np.vstack(( lower, upper ))
cmap = mpl.colors.ListedColormap(cmap, name='myColorMap', N=cmap.shape[0])
##########################################
global seq_MIf
global seqNameF
global seq_final
global labels
this_dir = os.getcwd()
paths = LFile
# mat_coords is predefined from above function
######### PRE LOAD DATA SO TESTING CAN GO FASTER!!!! #############
# x1 = np.array(['cd1','HLA-A','UFA','UAA','UDA']); x2 = np.array(['124','170','22','2','2'])
# x3 = np.array(['167','210','66','49','49']); x4 = np.array(['209','260','105','93','93'])
# x5 = np.array(['262','306','158','152','152']); x6 = np.array(['303','348','199','193','193'])
# mat_coords = np.transpose(np.vstack((x1,x2,x3,x4,x5,x6)))
# paths = ['/Users/boughter/Desktop/AIMS/gui/AIMS/mhc_testData/cd1_seqs.fasta',
# '/Users/boughter/Desktop/AIMS/gui/AIMS/mhc_testData/hlaA_seqs.fasta',
# '/Users/boughter/Desktop/AIMS/gui/AIMS/mhc_testData/cd1_ufa_genes.fasta',
# '/Users/boughter/Desktop/AIMS/gui/AIMS/mhc_testData/UAA_seqs.fasta',
# '/Users/boughter/Desktop/AIMS/gui/AIMS/mhc_testData/UDA_seqs.fasta']
#################### DEBUG VERSION ONLY #########################
AA_num_key = aims.get_props()[1]
# Labels will always be predefined for Abs
# Predefined, but poorly formatted... reformat here
global align
global labels
global LOOPnum
global exp_drop
global mat_size
global ID
if molecule == 'mhc':
if onlyONE:
data = mat_coords[1:]
else:
data = mat_coords[:,1:]
for i in np.arange(len(paths)):
if molecule == 'mhc':
#if any(data[:,i] == ['','','','','']):
# self.img1.source = this_dir + '/app_data/error_pos.png'
# return()
# turn data into an integer.
if onlyONE:
int_dat = [int(x) for x in data]
seq,seq_key = aimsLoad.mhc_loader(paths[i],int_dat,labels[i])
else:
int_dat = [int(x) for x in data[i]]
seq,seq_key = aimsLoad.mhc_loader(paths[i],int_dat,labels[i])
else:
# My first ever error handling! Works pretty well (for now)
# Probably need more "exceptions" here for formatting errors
try:
seq = aimsLoad.Ig_loader(paths[i],labels[i],loops=LOOPnum,drop_degens=exp_drop)
except pandas.errors.ParserError:
# Alrighty we want a popup here on this screen
popup = Popup(title='ERROR (Click Anywhere to Dismiss)',
content=Label(text='Wrong # loops, go back and redefine'),
size_hint=(None, None), size=(600, 600))
popup.open()
return
ID_pre = np.ones(np.shape(seq)[1])
if i == 0:
seq_final = seq
seq_size = np.shape(seq)[1]
seqNameF = labels[i]
ID = i*ID_pre
if molecule == 'mhc':
seq_keyF = seq_key
mat_size = aims.get_sequence_dimension(np.array(seq))[0]
else:
seq_final = pandas.concat([seq_final,seq],axis = 1)
seqNameF = np.vstack((seqNameF,labels[i]))
seq_size = np.vstack((seq_size,np.shape(seq)[1]))
ID = np.hstack((ID, i*ID_pre))
if molecule == 'mhc':
seq_keyF = np.hstack((seq_keyF,seq_key))
mat_size2 = aims.get_sequence_dimension(np.array(seq))[0]
if type(mat_size) != int:
max_lenp=np.zeros(len(mat_size))
for i in np.arange(len(mat_size)):
max_lenp[i]=int(max(mat_size[i],mat_size2[i]))
else:
max_lenp = int(max(mat_size,mat_size2))
mat_size = max_lenp
# Alright if we have gotten this far, bring the "next" button back online
self.next1_3.disabled = False
# Obviously need to define this somewhere in the software
if molecule == 'ig':
if self.alnc.active:
align = 'center'
elif self.alnb.active:
align = 'bulge'
elif self.alnl.active:
align = 'left'
elif self.alnr.active:
align = 'right'
else:
align = 'center'
seq_MI = aims.gen_tcr_matrix(np.array(seq_final),key = AA_num_key, giveSize = mat_size, alignment = align)
# Convert our MI matrix to a pandas dataframe
seq_MIf = pandas.DataFrame(np.transpose(seq_MI),columns = seq_final.columns)
fig, ax = pl.subplots(1, 1,squeeze=False,figsize=(16,8))
ax[0,0].imshow(seq_MI, interpolation='nearest', aspect='auto',cmap=cm.jet)
pl.close()
# Need to try to draw lines separating the distinct groups...
seq_locs = 0
if onlyONE:
pass
else:
for i in np.arange(len(seq_size)-1):
seq_locs = seq_locs + seq_size[i]
ax[0,0].plot(np.arange(len(np.transpose(seq_MI))),np.ones(len(np.transpose(seq_MI)))*seq_locs,'white',linewidth = 3)
# Alright now we want to change xlabel to actually talk about the features...
ax[0,0].set_ylabel('Sequence Number')
global xtick_loc
if type(mat_size) != int:
for i in np.arange(len(mat_size)):
if i == 0:
xtick_loc = [mat_size[i]/2]
else:
pre_loc = sum(mat_size[:i])
xtick_loc = xtick_loc + [mat_size[i]/2 + pre_loc]
ax[0,0].set_xticks(xtick_loc)
else:
xtick_loc = [mat_size/2]
ax[0,0].set_xticks(xtick_loc)
if molecule == 'mhc':
ax[0,0].set_xticklabels(['Strand 1','Helix 1','Strand 2','Helix 2'])
else:
if LOOPnum == 6:
ax[0,0].set_xticklabels(['CDR1L','CDR2L','CDR3L','CDR1H','CDR2H','CDR3H'])
elif LOOPnum == 2:
ax[0,0].set_xticklabels(['CDR3H','CDR3L'])
elif LOOPnum == 3:
ax[0,0].set_xticklabels(['CDR1','CDR2','CDR3'])
elif LOOPnum == 1:
ax[0,0].set_xticklabels(['CDR Loop'])
Numclones = np.shape(seq_MIf)[1]
if type(mat_size) != int:
for i in np.arange(len(mat_size)-1):
ax[0,0].plot( (mat_size[i] + sum(mat_size[:i]) - 0.5) * np.ones(Numclones),np.arange(Numclones),'white',linewidth = 3)
# NOTE need to make sure that I do something so that files are not overwritten...
if os.path.exists(this_dir +'/' + dir_name):
None
else:
os.mkdir(this_dir +'/' + dir_name)
fig.savefig(this_dir + '/' + dir_name + '/matrix.pdf',format='pdf',dpi=500)
fig.savefig(this_dir + '/' + dir_name + '/matrix.png',format='png',dpi=500)
# save raw data at all steps now...
np.savetxt(this_dir + '/' + dir_name + '/raw_matrix.dat',seq_MIf,fmt='%.3f')
if molecule == 'mhc':
np.savetxt(this_dir + '/' + dir_name + '/sequence_key.txt',seq_keyF,fmt='%s')
self.img1.source = this_dir + '/' + dir_name + '/matrix.png'
def reduce_dim(self):
self.clust_butt.disabled = False
global chosen_dset
full_big.index = seq_MIf.columns
if self.dat1.active:
dChoice = 'net'
elif self.dat2.active:
dChoice = 'parsed'
if dChoice == 'full':
chosen_dset = full_big
elif dChoice == 'parsed':
chosen_dset = parsed_mat
elif dChoice == 'net':
first = True
for i in seqNameF:
pre_label = i[0]
index = [column for column in seq_MIf.columns if pre_label in column]
pre_array = np.array(full_big.loc[index])
dat_size = len(index)
seq_bigF = pre_array.reshape(dat_size,61,len(seq_MIf))
if first:
big_reshape = seq_bigF
first = False
else:
big_reshape = np.vstack((big_reshape,seq_bigF))
chosen_dset = pandas.DataFrame(np.average(big_reshape,axis=2))
chosen_dset.index = seq_MIf.columns
final_chosen = np.transpose(chosen_dset)
#### For datasets with > 2 entries need to allow those to pick and choose:
global plot_label
if onlyONE:
plot_label = seqNameF
ID_new = ID
pass
elif len(labels) > 2:
aaa = 0; bbb = 0
for i in range(len(checked)):
# Don't love this solution but it works... so...
pre_label = seqNameF[i][0]
index = [column for column in seq_MIf.columns if pre_label in column]
# Alright so our new matrix, checked is "i" rows by three
# 0 is the "group 1", 1 is the "group 2"
if checked[i,0]:
if aaa == 0:
for_clust = final_chosen[index]
ID_new = aaa * np.ones(np.shape(for_clust)[1])
plot_label = pre_label
aaa = 1
else:
for_clust = pandas.concat([for_clust,final_chosen[index]],axis=1)
ID_new = np.hstack((ID_new, aaa * np.ones(np.shape(final_chosen[index])[1])))
plot_label = np.hstack((plot_label,pre_label))
aaa = aaa + 1
elif checked[i,1]:
if bbb == 0:
no_clust = final_chosen[index]
bbb = 1
else:
no_clust = pandas.concat([no_clust,final_chosen[index]],axis=1)
bbb = bbb + 1
# Then just redefine "for_clust" as "chosen_dset" for consistency with labels <=2
chosen_dset = np.transpose(for_clust)
else:
plot_label = np.hstack((seqNameF[0][0],seqNameF[1][0]))
ID_new = ID
global dim
if self.pca2.active:
reduce = 'pca'; dim = '2d'
elif self.pca3.active:
reduce = 'pca'; dim = '3d'
elif self.umap2.active:
reduce = 'umap'; dim = '2d'
elif self.umap3.active:
reduce = 'umap'; dim = '3d'
else:
return
if reduce == 'pca':
from sklearn.decomposition import PCA
pca = PCA(n_components=3, svd_solver='full')
final=pca.fit_transform(chosen_dset)
transform = pandas.DataFrame(np.transpose(final),columns = chosen_dset.index)
#print(pca.explained_variance_ratio_)
elif reduce == 'umap':
import umap
reducer = umap.UMAP(n_components=3, n_neighbors = 25, n_jobs=1, random_state = 47)
final = reducer.fit_transform(chosen_dset)
transform = pandas.DataFrame(np.transpose(final),columns = chosen_dset.index)
elif reduce == 'tsne':
from sklearn.manifold import TSNE
tsne = TSNE(n_components = 3, random_state = 47, n_jobs = 1)
final=tsne.fit_transform(chosen_dset)
transform = pandas.DataFrame(np.transpose(final),columns = chosen_dset.index)
global clust_input
clust_input = np.array(np.transpose(transform))
# A bit of an odd solution to how we add in a legend to the PCA/UMAP
from matplotlib.lines import Line2D
cmap = pl.get_cmap('rainbow')
global cmap_discrete
cmap_discrete = cmap(np.linspace(0, 1, len(seqNameF)))
for i in np.arange(len(seqNameF)):
if i == 0:
custom_lines = [Line2D([0], [0], color='w', marker='o',markerfacecolor=cmap_discrete[i], markersize= 10)]
else:
custom_lines = custom_lines + [Line2D([0], [0], color='w', marker='o',markerfacecolor = cmap_discrete[i], markersize= 10)]
if dim == '2d':
fig = pl.figure(figsize = (14, 10))
pl.scatter(clust_input[:,0],clust_input[:,1],c = ID_new, cmap = 'rainbow')
pl.legend(custom_lines, plot_label)
pl.xlabel('AX1'); pl.ylabel('AX2')
elif dim == '3d':
from mpl_toolkits import mplot3d
fig = pl.figure(figsize = (14, 10))
ax3d = fig.add_subplot(111, projection='3d')
ax3d.scatter(clust_input[:,0],clust_input[:,1],clust_input[:,2],c = ID_new, cmap ='rainbow')
pl.legend(custom_lines, plot_label)
ax3d.set_xlabel('AX1',labelpad=20)
ax3d.set_ylabel('AX2',labelpad=20)
ax3d.set_zlabel('AX3',labelpad=10)
this_dir = os.getcwd()
fig.savefig(this_dir + '/' + dir_name + '/'+reduce+'_'+dim+'_fig.pdf',format='pdf',dpi=500)
fig.savefig(this_dir + '/' + dir_name + '/'+reduce+'_'+dim+'_fig.png',format='png',dpi=500)
np.savetxt(this_dir + '/' + dir_name + '/'+reduce+'_'+dim+'.dat',clust_input,fmt='%.3f')
#NOTE DELETE THIS BELOW LINE ASAP
self.img6.source = this_dir + '/' + dir_name + '/'+reduce+'_'+dim+'_fig.png'
pl.close()
def cluster_seqs(self):
global cluster_dset
import sklearn.cluster as cluster
global clust
global NClust
self.next1_5.disabled = False
if self.kmean.active:
clust = 'kmean'
NClust = int(self.kclust.text)
clusts = cluster.KMeans(n_clusters=NClust).fit_predict(clust_input)
elif self.optics.active:
clust = 'optics'
OPsamp = int(self.opticnum.text)
clusts = cluster.OPTICS(min_samples=OPsamp).fit_predict(clust_input)
elif self.dbscan.active:
clust = 'dbscan'
DBrad = float(self.dbrad.text)
clusts = cluster.DBSCAN(eps=DBrad).fit_predict(clust_input)
else:
return
cluster_dset = pandas.DataFrame(clusts,columns=['cluster'])
# Want the min cluster to be white for non kmean cluster, as these are unclustered
if dim == '2d':
fig = pl.figure(figsize = (14, 10))
if clust == 'kmean':
pl.scatter(clust_input[:,0],clust_input[:,1],c = clusts, cmap = 'rainbow')
else:
pl.scatter(clust_input[:,0],clust_input[:,1],c = clusts, cmap = cmap)
pl.xlabel('AX1'); pl.ylabel('AX2')
elif dim == '3d':
from mpl_toolkits import mplot3d
fig = pl.figure(figsize = (14, 10))
ax3d = fig.add_subplot(111, projection='3d')
if clust == 'kmean':
ax3d.scatter(clust_input[:,0],clust_input[:,1],clust_input[:,2],c = clusts, cmap='rainbow')
else:
ax3d.scatter(clust_input[:,0],clust_input[:,1],clust_input[:,2],c = clusts, cmap=cmap)
ax3d.set_xlabel('AX1',labelpad=20)
ax3d.set_ylabel('AX2',labelpad=20)
ax3d.set_zlabel('AX3',labelpad=10)
this_dir = os.getcwd()
fig.savefig(this_dir + '/' + dir_name + '/'+clust+'_'+dim+'_fig.pdf',format='pdf',dpi=500)
fig.savefig(this_dir + '/' + dir_name + '/'+clust+'_'+dim+'_fig.png',format='png',dpi=500)
np.savetxt(this_dir + '/' + dir_name + '/'+clust+'_'+dim+'.dat',clusts,fmt='%.3f')
pl.close()
self.img6.source = this_dir + '/' + dir_name + '/'+clust+'_'+dim+'_fig.png'
def analyze_clusts(self):
self.next1_6.disabled = False
fig, ax = pl.subplots(1, 1,squeeze=False,figsize=(16,10))
global fin_clustL
fin_clustL = len(cluster_dset['cluster'].drop_duplicates())
cluster_bins = np.zeros((fin_clustL,len(seqNameF)))
kmean = True
for i in np.sort(cluster_dset['cluster'].drop_duplicates()):
pre_clust = seq_MIf[chosen_dset.index[cluster_dset[cluster_dset['cluster'] == i].index]]
clustID = np.transpose(pandas.DataFrame(i*np.ones(np.shape(pre_clust)[1])))
clustID.columns = pre_clust.columns
pre_clustF = pandas.concat([pre_clust,clustID],axis=0)
for j in np.arange(len(seqNameF)):
index = [column for column in clustID.columns if seqNameF[j][0] in column]
cluster_bins[i,j] = np.shape(clustID[index])[1]
# Do not plot the unclustered data
if i == -1:
kmean = False
continue
if i == 0:
clustered = pre_clustF
else:
clustered = pandas.concat([clustered, pre_clustF],axis = 1)
ax[0,0].plot(np.arange(len(seq_MIf)),np.ones(len(seq_MIf))*(np.shape(clustered)[1]),'white',linewidth = 3)
ax[0,0].imshow(np.transpose(np.array(clustered))[:,:-1], interpolation='nearest', aspect='auto',cmap=cm.jet)
ax[0,0].set_ylabel('Sequence Number')
this_dir = os.getcwd()
fig.savefig(this_dir + '/' + dir_name + '/'+clust+'_mat_fig.pdf',format='pdf',dpi=500)
fig.savefig(this_dir + '/' + dir_name + '/'+clust+'_mat_fig.png',format='png',dpi=500)
self.img7.source = this_dir + '/' + dir_name + '/'+clust+'_mat_fig.png'
pl.close()
# Skip all of this stuff if only looking at ONE file... can't get cluster membership then
if onlyONE:
return
else:
fig, ax = pl.subplots(1, 1,squeeze=False,figsize=(16,10))
# Same deal, need to ignore the unclustered (last entry)
clustmax = 0
if kmean:
for i in np.arange(len(labels)):
newmax = max(cluster_bins[:,i])
if newmax > clustmax:
clustmax = newmax
pad = i*0.8/len(labels)
pl.bar(np.arange(fin_clustL)+pad,cluster_bins[:,i],width=0.8/len(labels),alpha = 0.5, color = cmap_discrete[i])
else:
for i in np.arange(len(labels)):
newmax = max(cluster_bins[:-1,i])
if newmax > clustmax:
clustmax = newmax
pad = i*0.8/len(labels)
pl.bar(np.arange(fin_clustL-1)+pad,cluster_bins[:-1,i],width=0.8/len(labels),alpha = 0.5, color = cmap_discrete[i])
pl.xlabel('Cluster #')
pl.ylabel('Count')
pl.legend(labels)
if clust == 'kmean':
subt_clust = 1
else:
subt_clust = 2
for i in np.arange(fin_clustL-subt_clust):
place = int(i) + 0.9 - 0.4/len(labels)
pl.plot(place*np.ones(int(clustmax)),np.arange(int(clustmax)),'black')
unclust_str = ('Number Unclustered: '+str(sum(cluster_bins[-1,:])))
fig.savefig(this_dir + '/' + dir_name + '/'+clust+'_bars_fig.pdf',format='pdf',dpi=500)
fig.savefig(this_dir + '/' + dir_name + '/'+clust+'_bars_fig.png',format='png',dpi=500)
self.img7_5.source = this_dir + '/' + dir_name + '/'+clust+'_bars_fig.png'
pl.close()
def prep7(self):
this_dir = os.getcwd()
# Select out our clusters of interest for downstream analysis
global sub1_MI; global sub2_MI
global sub1_seqs; global sub2_seqs
global sel1; global sel2
global skip7
# This is defined to be sure that
if clust == 'kmean':
subt_clust = 1
else:
subt_clust = 2
if self.ori_binary.active:
if onlyONE:
self.ori_binary.active = False
sel1 = int(self.clust1.text)
sel2 = int(self.clust2.text)
if sel1 > fin_clustL-subt_clust or sel2 > fin_clustL-subt_clust:
sel1 = 0; sel2 = 1
sub1_MI = seq_MIf[seq_MIf.columns[cluster_dset[cluster_dset['cluster'] == sel1].index]]
sub2_MI = seq_MIf[seq_MIf.columns[cluster_dset[cluster_dset['cluster'] == sel2].index]]
# Alright now get the sequences
sub1_seqs = np.transpose(seq_final[sub1_MI.columns])
sub2_seqs = np.transpose(seq_final[sub2_MI.columns])
sub1_seqs.to_csv(this_dir + '/' + dir_name +'/clust2seq_'+str(sel1)+'.txt',header=None,index=None)
sub2_seqs.to_csv(this_dir + '/' + dir_name +'/clust2seq_'+str(sel2)+'.txt',header=None,index=None)
# AVOID the binary clusters screen
skip7 = True
popup = Popup(title='ERROR (Click Anywhere to Dismiss)',
content=Label(text='Why would you click that button? Revert to cluster#'),
size_hint=(None, None), size=(800, 800))
popup.open()
return
# Bring back our "binary clusters" screen
skip7 = False
return
else:
sel1 = int(self.clust1.text)
sel2 = int(self.clust2.text)
if sel1 > fin_clustL-subt_clust or sel2 > fin_clustL-subt_clust:
sel1 = 0; sel2 = 1
popup = Popup(title='ERROR (Click Anywhere to Dismiss)',
content=Label(text='Selected cluster out of range, default to clust0, clust1'),
size_hint=(None, None), size=(800, 800))
popup.open()
# AVOID the binary clusters screen
skip7 = True
sub1_MI = seq_MIf[seq_MIf.columns[cluster_dset[cluster_dset['cluster'] == sel1].index]]
sub2_MI = seq_MIf[seq_MIf.columns[cluster_dset[cluster_dset['cluster'] == sel2].index]]
# Alright now get the sequences
sub1_seqs = np.transpose(seq_final[sub1_MI.columns])
sub2_seqs = np.transpose(seq_final[sub2_MI.columns])
sub1_seqs.to_csv(this_dir + '/' + dir_name +'/clust2seq_'+str(sel1)+'.txt',header=None,index=None)
sub2_seqs.to_csv(this_dir + '/' + dir_name +'/clust2seq_'+str(sel2)+'.txt',header=None,index=None)
def get_pos_props(self):
self.next1_8.disabled = False
this_dir = os.getcwd()
full_big.index = seq_MIf.columns
global sels
global labels_new
# Skip7 is our sign of "use the original labels"
if skip7:
# define the IDs by the datasets already subsected
sel1_pre = seq_MIf.columns[cluster_dset[cluster_dset['cluster'] == sel1].index]
sel2_pre = seq_MIf.columns[cluster_dset[cluster_dset['cluster'] == sel2].index]
sel_pre = np.hstack((sel1_pre,sel2_pre))
# So I believe this should just make a big vector with IDs that matches the vector length
ID_vect = [sel1] * len(sel1_pre) + [sel2] * len(sel2_pre)
sels = np.transpose(pandas.DataFrame((sel_pre,ID_vect)))
sels.columns = ['selection','ID']
labels_new =['Cluster '+str(sel1), 'Cluster '+str(sel2)]
else:
# try to sort of cheat in creating the labels here...
labels_new = [''] * len(group_a_id)
first = True
for i in np.arange(len(group_a_id)):
# lda_checked is now a "dump this data" variable
if len(group_a_id) > 2:
if lda_checked[i]:
continue
if first:
sel_pre = [column for column in seq_MIf.columns if seqNameF[i][0] in column]
ID_vect = [int(group_a_id[i][0])] * len(sel_pre)
labels_new[int(group_a_id[i][0])] = seqNameF[i][0]
first = False
else:
temp_sel = [column for column in seq_MIf.columns if seqNameF[i][0] in column]
sel_pre = np.hstack((sel_pre,temp_sel))
ID_vect = ID_vect + [int(group_a_id[i][0])] * len(temp_sel)
if labels_new[int(group_a_id[i][0])] == '':
labels_new[int(group_a_id[i][0])] = seqNameF[i][0]
else:
labels_new[int(group_a_id[i][0])] = labels_new[int(group_a_id[i][0])] + ' + ' + seqNameF[i][0]
sels = np.transpose(pandas.DataFrame((sel_pre,ID_vect)))
sels.columns = ['selection','ID']
labels_new = [a for a in labels_new if a != '']
# Alright we need to change the way we plot these too:
fig, ax = pl.subplots(2, 1,squeeze=False,figsize=(16,8))
for j in sels['ID'].drop_duplicates():
findex = sels[sels['ID'] == j]['selection']
pre_array = np.array(full_big.loc[findex])
dat_size = len(findex)
seq_bigF = pre_array.reshape(dat_size,61,len(seq_MIf))
# If we need the sequences, we can call them this same way but
# with the variable seq_final rather than full_big
prop1 = 1
plotProp1 = np.average(seq_bigF[:,prop1,:],axis = 0)
ax[0,0].plot(plotProp1,marker='o',linewidth=2.5,color=cmap_discrete[j])
prop2 = 2
plotProp2 = np.average(seq_bigF[:,prop2,:],axis = 0)
ax[1,0].plot(plotProp2,marker='o',linewidth=2.5,color=cmap_discrete[j])
np.savetxt(this_dir + '/' + dir_name + '/position_sensitive_mat'+str(j)+'.dat',pre_array,fmt='%.3f')
ax[0,0].set_ylabel('Charge')
ax[1,0].set_ylabel('Hydrophobicity')
ax[1,0].set_xlabel('Sequence Position')
pl.legend(labels_new)
if molecule == 'mhc':
ax[0,0].set_xticks(xtick_loc)
ax[0,0].set_xticklabels(['Strand 1','Helix 1','Strand 2','Helix 2'])
else:
if LOOPnum == 6:
ax[0,0].set_xticks(xtick_loc)
ax[0,0].set_xticklabels(['CDR1L','CDR2L','CDR3L','CDR1H','CDR2H','CDR3H'])
elif LOOPnum == 3:
ax[0,0].set_xticks(xtick_loc)
ax[0,0].set_xticklabels(['CDR1','CDR2','CDR3'])
elif LOOPnum == 2:
ax[0,0].set_xticks(xtick_loc)
ax[0,0].set_xticklabels(['CDR3H','CDR3L'])
elif LOOPnum == 1:
ax[0,0].set_xticks(xtick_loc)
ax[0,0].set_xticklabels(['CDR Loop'])
# Since there's only two now, just easier to hard code these...
ax[0,0].set_ylabel('Normalized Charge')
ax[1,0].set_ylabel('Normalized Hydrophobicity')
fig.savefig(this_dir + '/' + dir_name + '/pos_prop.pdf',format='pdf',dpi=500)
fig.savefig(this_dir + '/' + dir_name + '/pos_prop.png',format='png',dpi=500)
self.img8.source = this_dir + '/' + dir_name + '/pos_prop.png'
pl.close()
def get_clone_pos_props(self):
self.next1_9.disabled = False
this_dir = os.getcwd()
# Generate the position sensitive charge across all clones in the dataset
num_figs = int(np.ceil(len(sels['ID'].drop_duplicates())/2))
fig, axs = pl.subplots(num_figs, 2,squeeze=False,figsize=(20,8))
fig_track = 0; track2 = 0
aa = 0
for j in sels['ID'].drop_duplicates():
findex = sels[sels['ID'] == j]['selection']
pre_array = np.array(full_big.loc[findex])
dat_size = len(findex)
seq_bigF = pre_array.reshape(dat_size,61,len(seq_MIf))
# If we need the sequences, we can call them this same way but
# with the variable seq_final rather than full_big
x = axs[fig_track,track2].imshow(seq_bigF[:,1,:],interpolation='nearest', aspect='auto',cmap=cm.PiYG)
# NEED TO CHANGE THIS SO IT ISNT DEFINED FOR EVERY FIGURE
axs[fig_track,track2].set_title(labels_new[aa] + ' - Charge')
aa += 1
fig.colorbar(x, ax=axs[fig_track,track2])
if fig_track == num_figs:
axs[fig_track,track2].set_xlabel('Sequence Position')
axs[fig_track,track2].set_ylabel('Sequence Number')
if track2 == 0:
track2 = 1
else:
fig_track += 1
track2 = 0
fig.savefig(this_dir + '/' + dir_name + '/clone_pos_prop.pdf',format='pdf',dpi=500)
fig.savefig(this_dir + '/' + dir_name + '/clone_pos_prop.png',format='png',dpi=500)
self.img9.source = this_dir + '/' + dir_name + '/clone_pos_prop.png'
pl.close()
def get_props(self):
self.next1_10.disabled = False
this_dir = os.getcwd()
# Generate the position sensitive charge across all clones in the dataset
fig, ax = pl.subplots(1, 1,squeeze=False,figsize=(16,8))
x_axis = np.array([-0.2,0.9,2,3.1])
# Need to have some kind of color wheel to replace this...
# We want to exclude prop0 (the simple 1-21 AA representation entries)
aa = 0
for j in sels['ID'].drop_duplicates():
findex = sels[sels['ID'] == j]['selection']
pre_array = np.array(full_big.loc[findex])
dat_size = len(findex)
seq_bigF = pre_array.reshape(dat_size,61,len(seq_MIf))
# If we need the sequences, we can call them this same way but
# with the variable seq_final rather than full_big
plotProp1 = np.average(np.average(seq_bigF[:,1,:],axis = 1))
plotProp2 = np.average(np.average(seq_bigF[:,2,:],axis = 1))
plotProp3 = np.average(np.average(seq_bigF[:,3,:],axis = 1))
plotProp4 = np.average(np.average(seq_bigF[:,4,:],axis = 1))
stdProp1 = np.std(np.average(seq_bigF[:,1,:],axis = 1))
stdProp2 = np.std(np.average(seq_bigF[:,2,:],axis = 1))
stdProp3 = np.std(np.average(seq_bigF[:,3,:],axis = 1))
stdProp4 = np.std(np.average(seq_bigF[:,4,:],axis = 1))
plotIT = np.hstack((plotProp1, plotProp2,plotProp3,plotProp4))
stdIT = np.hstack((stdProp1, stdProp2,stdProp3,stdProp4))
ax[0,0].bar(x_axis+aa*1/len(labels_new), plotIT,
yerr = stdIT,alpha = 0.5, width = 1/len(labels_new),color=cmap_discrete[j])
aa += 1
ax[0,0].legend(labels_new)
ax[0,0].set_xticks([0.2,1.3,2.4,3.5])
ax[0,0].set_xticklabels(['Charge','Hydrophobicity','Bulkiness','Flexibility'])
ax[0,0].set_xlabel('Biophysical Property')
ax[0,0].set_ylabel('Normalized Property Value')
fig.savefig(this_dir + '/' + dir_name + '/avg_props.pdf',format='pdf',dpi=500)
fig.savefig(this_dir + '/' + dir_name + '/avg_props.png',format='png',dpi=500)
self.img10.source = this_dir + '/' + dir_name + '/avg_props.png'
pl.close()
def do_lda(self):
this_dir = os.getcwd()
numVects = int(self.inputLDA.text)
findex1 = sels[sels['ID'] == sels['ID'].drop_duplicates().values[0]]['selection']
findex2 = sels[sels['ID'] == sels['ID'].drop_duplicates().values[1]]['selection']
pre_array1 = np.array(full_big.loc[findex1])
pre_array2 = np.array(full_big.loc[findex2])
sub1_seqs = np.transpose(seq_final[findex1])
sub2_seqs = np.transpose(seq_final[findex2])
pg1 = np.transpose(sub1_seqs.values); num1 = np.shape(pg1)[1]
pg2 = np.transpose(sub2_seqs.values); num2 = | np.shape(pg2) | numpy.shape |
import numpy as np
import copy
from spt3g import core
from spt3g.gcp import ACUStatus, ACUState, TrackerStatus, TrackerState, TrackerPointing, CalFile
@core.usefulfunc
def UnitValue(caldict_entry):
'''Turn unit name into floating point unit value'''
try:
uname = caldict_entry['UnitName']
if uname and uname != 'None':
try:
if '/' in uname:
unames = list(filter(None,uname.split('/')))
uvalue1 = getattr(core.G3Units,
list(filter(None,unames[0].split(' ')))[0])
uvalue2 = getattr(core.G3Units,
list(filter(None,unames[1].split(' ')))[0])
uvalue = uvalue1 / uvalue2
else:
uvalue = getattr(core.G3Units, uname)
except AttributeError:
uvalue = 1.
core.log_warn('No entry in G3Units for ' + uname + '. Setting UnitValue to 1.0\n')
else:
uvalue = 1.
except KeyError:
uvalue = 1.
return uvalue
@core.usefulfunc
def CalibrateValue(data, caldict_entry):
'''Apply gain / offset units from G3 cal file to register'''
uvalue = UnitValue(caldict_entry)
g3type = type(data)
# make a copy
if np.size(data) == 1:
data = data.value
data2 = np.array(data, dtype='float64')
thisdtype = data2.dtype
# calibrate units
data2 += np.array(caldict_entry['Offset'], dtype=thisdtype)
data2 *= np.array(uvalue / caldict_entry['ReciprocalFactor'], dtype=thisdtype)
if not data2.shape:
data2 = data2.tolist()
# if a register has units, it can't be an int anymore. well, actually,
# it can't be an int if we're adding floats to it or multiplying it by
# floats either, so convert everything that has an entry in the cal file
# to float/double.
if g3type == core.G3VectorInt:
return core.G3VectorDouble(data2)
elif g3type == core.G3MapInt:
return core.G3MapDouble(data2)
elif g3type == core.G3Int:
return core.G3Double(data2)
else:
return g3type(data2)
@core.indexmod
def CalibrateFrame(f, calibration_file=None):
'''Apply gain / offset / units from G3 cal file'''
if f.type != core.G3FrameType.GcpSlow:
return
try:
if f['Calibrated'] == True:
print('Already calibrated!\n')
return
except KeyError:
f['Calibrated'] = True
cf = CalFile.CalFileReader()
cd = cf.readCalFile(calibration_file)
for board in f.keys():
if board == 'Calibrated':
continue
cboard = copy.deepcopy(f[board])
for rmap in cboard.keys():
for reg in cboard[rmap].keys():
try:
rcd = cd[board][rmap][reg]
except KeyError:
continue
rsize = np.size(cboard[rmap][reg])
rshape = np.shape(cboard[rmap][reg])
if rsize > 1 and len(rshape) > 1:
for i in range(rshape[0]):
try:
rcdi = rcd[i]
except KeyError:
rcdi = rcd
cboard[rmap][reg][i] = CalibrateValue(cboard[rmap][reg][i], rcdi)
else:
try:
rcdi = rcd[0]
except KeyError:
rcdi = rcd
cboard[rmap][reg] = CalibrateValue(cboard[rmap][reg], rcdi)
del f[board]
f[board] = cboard
@core.indexmod
def UnpackACUData(f):
'''Extracts ACU status information to ACUStatus key in frame'''
if f.type != core.G3FrameType.GcpSlow:
return
board = f['antenna0']['acu']
a = ACUStatus()
a.time = f['antenna0']['frame']['utc']
a.az_pos = board['az_pos'].value
a.el_pos = board['el_pos'].value
a.az_rate = board['az_rate'].value
a.el_rate = board['el_rate'].value
# 'new_*' registers not actually filled by GCP; ignore them
a.px_checksum_error_count = board['px_checksum_error_count'].value
a.px_resync_count = board['px_resync_count'].value
a.px_resync_timeout_count = board['px_resync_timeout_count'].value
a.px_resyncing = board['px_resyncing'].value
a.px_timeout_count = board['px_timeout_count'].value
a.restart_count = board['restart_count'].value
a.state = ACUState(board['state'].value)
a.status = board['acu_status'].value
try:
a.error = board['acu_error'].value
except KeyError:
# This register was some time in early 2018. In order to read
# older data, just set the error code to 0.
a.error = 0
f['ACUStatus'] = a
@core.indexmod
def UnpackTrackerMinimal(f, rewrite_source_from_feature_bits=True):
'''
Construct SourceName and ObservationId keys from frame.
If rewrite_source_from_feature_bits is True (the default), will try to
rewrite source names if DecryptFeatureBit() has been run and either
"elnod", "calibrator", or "noise" is present in the feature bit list
to that value.
'''
if f.type != core.G3FrameType.GcpSlow:
return
# Grab the GCP source name. If it is "current", fill in something more
# helpful from the feature bits if possible.
source = f['antenna0']['tracker']['source'].value
if rewrite_source_from_feature_bits and 'GCPFeatureBits' in f:
if 'elnod' in f['GCPFeatureBits']:
source = 'elnod'
if 'calibrator' in f['GCPFeatureBits']:
source = 'calibrator'
if 'noise' in f['GCPFeatureBits']:
source = 'noise'
if 'debug' in f['GCPFeatureBits']:
source = 'debug-forced-scanify'
if 'every_pixel_on_src' in f['GCPFeatureBits']:
source = source + '-pixelraster' # NB: Do NOT use in-place +=
f['SourceName'] = source
# And observation ID, if present
if 'obs_id' in f['antenna0']['tracker']:
f['ObservationID'] = f['antenna0']['tracker']['obs_id']
@core.indexmod
def UnpackTrackerData(f, rewrite_source_from_feature_bits=True):
'''
Extracts tracker status information to frame into the TrackerStatus key,
along with the observation processing handled by UnpackTrackerMinimal.
If rewrite_source_from_feature_bits is True (the default), will try to
rewrite source names if DecryptFeatureBit() has been run and either
"elnod", "calibrator", or "noise" is present in the feature bit list
to that value.
'''
if f.type != core.G3FrameType.GcpSlow:
return
UnpackTrackerMinimal(f, rewrite_source_from_feature_bits)
board = f['antenna0']['tracker']
t = TrackerStatus()
# List comprehensions are due to funny business with G3VectorFrameObject
t.time = [tm for tm in board['utc'][0]]
# Measured values
t.az_pos = np.asarray(board['actual'][0])
t.el_pos = np.asarray(board['actual'][1])
# XXX units for rates seem to be wrong. I think this is in encoder counts
t.az_rate = np.asarray(board['actual_rates'][0], dtype = float)
t.el_rate = np.asarray(board['actual_rates'][1], dtype = float)
# Expected values
t.az_command = np.asarray(board['expected'][0])
t.el_command = np.asarray(board['expected'][1])
t.az_rate_command = np.asarray(board['expected_rates'][0], dtype = float)
t.el_rate_command = np.asarray(board['expected_rates'][1], dtype = float)
# Status params
if isinstance(board['state'][0], core.G3String):
# If state is all zero (LACKING), for example due to an ACU glitch,
# the ARC reader may decide that the 8-bit array field is a string.
# Treat it as one.
t.state = [TrackerState(0) for s in board['inControl'][0]]
else:
t.state = [TrackerState(s) for s in board['state'][0]]
t.acu_seq = board['acu_seq'][0]
t.in_control = core.BoolVector(board['inControl'][0])
t.in_control_int = core.IntVector(board['inControl'][0])
t.scan_flag = core.BoolVector(board['scan_flag'][0])
t.lst = np.asarray(board['lst'][0], dtype=float)
t.source_acquired = np.asarray(board['off_source'][0])
t.source_acquired_threshold = np.asarray(board['source_acquired_threshold'])
t.tracker_mode = np.asarray(board['mode'][0])
t.tracker_lacking = | np.asarray(board['lacking'][0]) | numpy.asarray |
from __future__ import with_statement
from collections import defaultdict
import numpy as np
import scipy
import pickle
def choice_hack(data, p=None, size = 1):
weights = p
# all choices are at equal probability if no weights given
if weights == None:
weights = [1.0 / float(len(data)) for x in range(len(data))]
if weights == None:
weights = [1.0 / float(len(data)) for x in range(len(data))]
if not | np.sum(weights) | numpy.sum |
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.layers import Input, Dense
from tensorflow.keras.models import Model
import numpy as np
def winning_hand(hand):
my_hand = max(hand)
if my_hand == hand[0]:
return [0,0,1]
if my_hand == hand[1]:
return [1,0,0]
if my_hand == hand[2]:
return [0,1,0]
def opponent_hand():
return np.random.permutation([1,0,0])
def array_to_hand_name(array):
arr = array[0]
arr_max = max(arr)
if arr_max == arr[0]:
return "rock"
elif arr_max == arr[1]:
return "scissors"
elif arr_max == arr[2]:
return "paper"
return ""
inputs = | np.array([]) | numpy.array |
import numpy as np
from mesonh_atm.mesonh_atmosphere import MesoNHAtmosphere
import matplotlib.pyplot as plt
from scipy.interpolate import RegularGridInterpolator
import modules.cloud as ModCloud
#Old Data without advection
path = "/net/skyscanner/volume1/data/mesoNH/ARM_OneHour3600files_No_Horizontal_Wind/"
mfiles = [path+"U0K10.1.min{:02d}.{:03d}_diaKCL.nc".format(minute, second)
for minute in range(1, 60)
for second in range(1, 61)]
mtstep = 1
atm = MesoNHAtmosphere(mfiles, 1)
font = {'size' : 26}
plt.rc('font', **font)
#######################################################################
########################### cloud example #############################
#######################################################################
# Example Data of two variables with the coordinates of a rough bounding box of a cloud
# RCT = liquid water content, WT = vertical wind
lwc_data=atm.data['RCT'][449:599,75:125,60:200,110:250]
zwind_data=atm.data['WT'][449:599,75:125,60:200,110:250]
ids,counter,clouds=ModCloud.cloud_segmentation(lwc_data)
clouds=list(set(clouds.values()))
length_point_clds = np.ndarray((0,1))
for each_cloud in clouds:
print(len(each_cloud.points))
temp = len(each_cloud.points)
length_point_clds = np.vstack((length_point_clds,temp))
# Get cloud with the biggest amount of points in the bounding box
cloud = clouds[np.argmax(length_point_clds)]
cloud.calculate_attributes(lwc_data,zwind_data)
lwc_cloud = np.zeros(lwc_data.shape)
for point in cloud.points:
lwc_cloud[point] = 1
#Coordinates of the rough bounding box of the example cloud
xr = np.arange(0.005 + 60*0.01, 0.005 + 200*0.01,0.01)
yr = np.arange(0.005 + 110*0.01, 0.005 + 250*0.01,0.01)
all_Zs = atm.data["VLEV"][:,0,0]
zr = all_Zs[75:125]
tr = np.arange(449,599)
origin_xy = [60,110]
zspan = np.arange(0,16)
# Plotting three different cross-sections including the center of geometry COG and the center of masses
# of the vertical wind and liquid water content
plt.figure()
plt.xlabel("x coordinate(km)")
plt.ylabel("y coordinate(km)")
plt.contour(zwind_data[0,15].T,origin="lower",label='zwind',extent=[xr[0], xr[-1], yr[0], yr[-1]],linewidths=2)
cbar=plt.colorbar()
cbar.set_label('m/s')
plt.contour(lwc_cloud[0,15].T,V=[0,1],origin='lower',extent=[xr[0], xr[-1], yr[0], yr[-1]],alpha=0.6,cmap='Greys')
COG_2D = cloud.COG_2D_tz[0,15]*0.01 + np.array([0.005 + origin_xy[0]*0.01,0.005 + origin_xy[1]*0.01])
plt.plot(COG_2D[0],COG_2D[1],'ro',markersize=8,label='COG 2D')
COM_2D_zwind = cloud.COM_2D_zwind_tz[0,15]*0.01 + np.array([0.005 + origin_xy[0]*0.01,0.005 + origin_xy[1]*0.01])
plt.plot(COM_2D_zwind[0],COM_2D_zwind[1],'gx',markersize=8, label='COM 2D zwind')
COM_2D_lwc = cloud.COM_2D_lwc_tz[0,15]*0.01 + np.array([0.005 + origin_xy[0]*0.01,0.005 + origin_xy[1]*0.01])
plt.plot(COM_2D_lwc[0],COM_2D_lwc[1],'b>',markersize=8, label='COM 2D lwc')
plt.title("Zwind Cross-section Cloud Example, z={}km, t={}s".format(np.round(float(zr[15]),3),tr[0]))
plt.xlim(xr[0], xr[-1])
plt.ylim(yr[0], yr[-1])
plt.legend()
plt.figure()
plt.xlabel("x coordinate(km)")
plt.ylabel("y coordinate(km)")
plt.contour(zwind_data[0,19].T,origin="lower",label='zwind',extent=[xr[0], xr[-1], yr[0], yr[-1]],linewidths=2)
cbar=plt.colorbar()
cbar.set_label('m/s')
plt.contour(lwc_cloud[0,19].T,V=[0,1],origin='lower',extent=[xr[0], xr[-1], yr[0], yr[-1]],alpha=0.6,cmap='Greys')
COG_2D = cloud.COG_2D_tz[0,19]*0.01 + np.array([0.005 + origin_xy[0]*0.01,0.005 + origin_xy[1]*0.01])
plt.plot(COG_2D[0],COG_2D[1],'ro',markersize=8,label='COG 2D')
COM_2D_zwind = cloud.COM_2D_zwind_tz[0,19]*0.01 + np.array([0.005 + origin_xy[0]*0.01,0.005 + origin_xy[1]*0.01])
plt.plot(COM_2D_zwind[0],COM_2D_zwind[1],'gx',markersize=8, label='COM 2D zwind')
COM_2D_lwc = cloud.COM_2D_lwc_tz[0,19]*0.01 + np.array([0.005 + origin_xy[0]*0.01,0.005 + origin_xy[1]*0.01])
plt.plot(COM_2D_lwc[0],COM_2D_lwc[1],'b>',markersize=8, label='COM 2D lwc')
plt.title("Zwind Cross-section Cloud Example, z={}km, t={}s".format(np.round(float(zr[19]),3),tr[0]))
plt.xlim(xr[0], xr[-1])
plt.ylim(yr[0], yr[-1])
plt.legend()
plt.figure()
plt.xlabel("x coordinate(km)")
plt.ylabel("y coordinate(km)")
plt.contour(zwind_data[0,30].T,origin="lower",label='zwind',extent=[xr[0], xr[-1], yr[0], yr[-1]],linewidths=2)
cbar=plt.colorbar()
cbar.set_label('m/s')
plt.contour(lwc_cloud[0,30].T,V=[0,1],origin='lower',extent=[xr[0], xr[-1], yr[0], yr[-1]],alpha=0.6,cmap='Greys')
COG_2D = cloud.COG_2D_tz[0,30]*0.01 + np.array([0.005 + origin_xy[0]*0.01,0.005 + origin_xy[1]*0.01])
plt.plot(COG_2D[0],COG_2D[1],'ro',markersize=8,label='COG 2D')
COM_2D_zwind = cloud.COM_2D_zwind_tz[0,30]*0.01 + np.array([0.005 + origin_xy[0]*0.01,0.005 + origin_xy[1]*0.01])
plt.plot(COM_2D_zwind[0],COM_2D_zwind[1],'gx',markersize=8, label='COM 2D zwind')
COM_2D_lwc = cloud.COM_2D_lwc_tz[0,30]*0.01 + np.array([0.005 + origin_xy[0]*0.01,0.005 + origin_xy[1]*0.01])
plt.plot(COM_2D_lwc[0],COM_2D_lwc[1],'b>',markersize=8, label='COM 2D lwc')
plt.title("Zwind Cross-section Cloud, z={}km, t={}s".format(np.round(float(zr[30]),3),tr[0]))
plt.xlim(xr[0], xr[-1])
plt.ylim(yr[0], yr[-1])
plt.legend()
# Center of masses and Geometry, for each cross-section
plt.figure()
plt.xlabel("z coordinate(km)")
plt.ylabel("y coordinate(km)")
plt.plot(zr,cloud.COG_2D_tz[0,:,1]*0.01 + 0.005 + origin_xy[1]*0.01,label='COG 2D',linewidth=3)
plt.plot(zr,cloud.COM_2D_lwc_tz[0,:,1]*0.01 + 0.005 + origin_xy[1]*0.01, label='COM 2D lwc',linewidth=3)
plt.plot(zr,cloud.COM_2D_zwind_tz[0,:,1]*0.01 + 0.005 + origin_xy[1]*0.01, label='COM 2D zwind',linewidth=3)
plt.legend()
plt.title('Center of masses and geometry Cloud, t = {}s'.format(tr[0]))
plt.figure()
plt.xlabel("z coordinate(km)")
plt.ylabel("x coordinate(km)")
plt.plot(zr,cloud.COG_2D_tz[0,:,0]*0.01 + 0.005 + origin_xy[1]*0.01,label='COG 2D',linewidth=3)
plt.plot(zr,cloud.COM_2D_lwc_tz[0,:,0]*0.01 + 0.005 + origin_xy[1]*0.01, label='COM 2D lwc',linewidth=3)
plt.plot(zr,cloud.COM_2D_zwind_tz[0,:,0]*0.01 + 0.005 + origin_xy[1]*0.01, label='COM 2D zwind',linewidth=3)
plt.legend()
plt.title('Center of masses and geometry Cloud, t = {}s'.format(tr[0]))
plt.figure()
plt.xlabel("z coordinate(km)")
plt.ylabel("Surface(100$m^2$)")
plt.plot(zr,cloud.area_cs_tz[0],linewidth=3)
plt.title('Surface Area of Cloud, t={}s'.format(tr[0]))
plt.figure()
plt.xlabel("time(s)")
plt.ylabel("Volume(1000 $m^3$)")
plt.plot(tr,cloud.volumen_t,linewidth=3)
plt.title('Volume of Cloud')
####### Visualizing max vertical wind as a function of z
zwind_maxz = np.ndarray((0,1))
for z in range(int(cloud.zmin_t[0]),int(cloud.zmax_t[0])+1):
zwind_max = np.max(zwind_data[0,z][lwc_cloud[0,z]>0])
zwind_maxz = np.vstack((zwind_maxz,zwind_max))
####### Visualizing mean vertical wind as a function of z
zwind_meanz = np.ndarray((0,1))
for z in range(int(cloud.zmin_t[0]),int(cloud.zmax_t[0])+1):
zwind_mean = np.mean(zwind_data[0,z][lwc_cloud[0,z]>0])
zwind_meanz = np.vstack((zwind_meanz,zwind_mean))
plt.figure()
plt.xlabel("z coordinate(km)")
plt.ylabel("Max zwind(m/s)")
plt.plot(zr[4:],zwind_maxz,linewidth=3)
plt.title('Max Zwind per z cross-section Cloud,t={}s'.format(tr[0]))
plt.figure()
plt.xlabel("z coordinate(km)")
plt.ylabel("Mean Zwind (m/s)")
plt.plot(zr[4:],zwind_meanz,linewidth=3)
plt.title('Mean Zwind per z cross-section Cloud,t={}s'.format(tr[0]))
################# Variance behaviour of vertical wind in dependence of z
zwind_varz = np.ndarray((0,1))
for z in range(int(cloud.zmin_t[0]),int(cloud.zmax_t[0])+1):
zwind_var = zwind_data[0,z][lwc_cloud[0,z]>0].var()
zwind_varz = np.vstack((zwind_varz,zwind_var))
plt.figure()
plt.xlabel("z coordinate(km)")
plt.ylabel("Variance Zwind")
plt.plot(zr[4:],zwind_varz,linewidth=3)
plt.title('Mean Zwind per z cross-section Cloud,t={}s'.format(tr[0]))
##########################################
############# Variogram Analysis #########
##########################################
##############################################################
##### creating moving bounding box that follows center #######
##############################################################
xbound_max = int(np.max(cloud.xsize_t))
ybound_max = int(np.max(cloud.ysize_t))
zbound_max = int( | np.max(cloud.zsize_t) | numpy.max |
import numpy as np
from PIL import Image
from torchvision.transforms import functional as F
import json
import warnings
from copy import deepcopy
from math import inf
import random
from .transforms import Compose
PIL_RESIZE_MODE = {'bilinear': Image.BILINEAR, 'nearest': Image.NEAREST}
class ScaleMatchFactory(object):
@staticmethod
def create(cfg_SM):
SM = cfg_SM
if cfg_SM.TYPE == 'ScaleMatch':
sm = ScaleMatch(
anno_file=SM.TARGET_ANNO_FILE, bins=SM.BINS, except_rate=SM.EXCEPT_RATE,
default_scale=SM.DEFAULT_SCALE, scale_range=SM.SCALE_RANGE, out_scale_deal=SM.OUT_SCALE_DEAL,
mode=SM.SCALE_MODE, use_log_bins=SM.USE_LOG_SCALE_BIN)
elif cfg_SM.TYPE == 'MonotonicityScaleMatch':
sm = MonotonicityScaleMatch(
SM.SOURCE_ANNO_FILE, SM.TARGET_ANNO_FILE, bins=SM.BINS, except_rate=SM.EXCEPT_RATE,
default_scale=SM.DEFAULT_SCALE, scale_range=SM.SCALE_RANGE, out_scale_deal=SM.OUT_SCALE_DEAL,
mode=SM.SCALE_MODE, use_log_bins=SM.USE_LOG_SCALE_BIN, mu_sigma=SM.MU_SIGMA)
elif cfg_SM.TYPE == 'GaussianScaleMatch':
sm = GaussianScaleMatch(
SM.SOURCE_ANNO_FILE, SM.MU_SIGMA, SM.BINS, SM.EXCEPT_RATE, SM.SCALE_RANGE, SM.DEFAULT_SCALE,
SM.OUT_SCALE_DEAL, SM.USE_LOG_SCALE_BIN, SM.SCALE_MODE, SM.GAUSSIAN_SAMPLE_FILE,
SM.USE_MEAN_SIZE_IN_IMAGE, SM.MIN_SIZE
)
else:
raise ValueError("cfg.DATALOADER.SCALE_MATCH.TYPE must be chose in ['ScaleMatch', 'MonotonicityScaleMatch'"
"'GaussianScaleMatch'], but {} got".format(cfg_SM.TYPE))
if len(cfg_SM.REASPECT) > 0:
rs = ReAspect(cfg_SM.REASPECT)
sm = Compose([rs, sm])
return sm
class ScaleMatch(object):
"""
ScaleMatch face two problem when using:
1) May generate too small scale, it will lead loss to NaN.
2) May generate too big scale, it will lead out of memory.
we find bigger batch size can ease 1) problem.
there are four way to handle these problem:
1) clip scale constraint to a specified scale_range
2) change SM target distribute by scale mean and var
3) use MonotonicityScaleMatch
4) use chose scale as warm up scale
"""
def __init__(self, distribute=None, sizes=None, # param group 1
anno_file=None, bins=100, except_rate=-1., # param group 2
scale_range=(0., 2.), default_scale=1.0, max_sample_try=5, out_scale_deal='clip', use_log_bins=False,
mode='bilinear', debug_no_image_resize=False, debug_close_record=True):
assert anno_file is not None or (distribute is not None and sizes is not None)
if anno_file is not None:
if except_rate < 0:
except_rate = 1./ bins * 2
distribute, sizes = ScaleMatch._get_distribute(json.load(open(anno_file))['annotations'], bins,
except_rate, use_log_bins)
self.distri_cumsum = np.cumsum(distribute)
self.sizes = sizes
self.mode = PIL_RESIZE_MODE[mode]
self.scale_range = scale_range # scale_range[1] to avoid out of memory
self.out_scale_deal = out_scale_deal
assert out_scale_deal in ['clip', 'use_default_scale']
self.max_sample_try = max_sample_try
self.default_scale = default_scale
self.fail_time = 0
self.debug_no_image_resize = debug_no_image_resize
self.debug_record = DebugScaleRecord(debug_close_record)
@staticmethod
def _get_distribute(annotations, bins=100, except_rate=0.1, use_log_bins=False, mu_sigma=(-1, -1)):
"""
except_rate: to except len(annotations)*except_rate/2 abnormal points as head and tial bin
"""
annos = [anno for anno in annotations if not anno['iscrowd']]
if len(annos) > 0 and 'ignore' in annos[0]:
annos = [anno for anno in annos if not anno['ignore']]
sizes = np.sqrt(np.array([anno['bbox'][2] * anno['bbox'][3] for anno in annos]))
sizes = sizes[sizes > 0]
if mu_sigma[0] > 0 and mu_sigma[1] > 0:
print('distribute(mu, sigma): ', np.mean(sizes), np.std(sizes), end='->')
sizes = (sizes - np.mean(sizes)) / np.std(sizes)
sizes = sizes * mu_sigma[1] + mu_sigma[0]
print(np.mean(sizes), np.std(sizes))
sizes = sizes.clip(1)
if use_log_bins:
sizes = np.log(sizes)
sizes = np.sort(sizes)
N = len(sizes)
hist_sizes = sizes[int(N * except_rate / 2): int(N * (1 - except_rate / 2))]
if except_rate > 0:
c, s = np.histogram(hist_sizes, bins=bins-2)
c = np.array([int(N * except_rate / 2)] + c.tolist() + [N - int(N * (1 - except_rate / 2))])
s = [sizes[0]] + s.tolist() + [sizes[-1]]
s = np.array(s)
else:
c, s = np.histogram(hist_sizes, bins=bins)
c = c / len(sizes)
if use_log_bins:
s = | np.exp(s) | numpy.exp |
r"""
"""
import numpy as _np
from numpy.linalg import norm as _norm
from openpnm.utils import logging as _logging
_logger = _logging.getLogger(__name__)
def ctc(target):
r"""
Calculates throat length assuming point-like pores, i.e. center-to-center
distance between pores. Also, this model assumes that pores and throat
centroids are colinear.
Parameters
----------
target : GenericGeometry
Geometry object which this model is associated with. This controls the
length of the calculated array, and also provides access to other
necessary properties.
Returns
-------
value : ndarray
Array containing throat length values.
"""
network = target.project.network
throats = network.map_throats(throats=target.Ts, origin=target)
cn = network['throat.conns'][throats]
C1 = network['pore.coords'][cn[:, 0]]
C2 = network['pore.coords'][cn[:, 1]]
value = | _norm(C1 - C2, axis=1) | numpy.linalg.norm |
Subsets and Splits