prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
import sys
import operator
import pytest
import ctypes
import gc
import warnings
import numpy as np
from numpy.core._rational_tests import rational
from numpy.core._multiarray_tests import create_custom_field_dtype
from numpy.testing import (
assert_, assert_equal, assert_array_equal, assert_raises, HAS_REFCOUNT)
from numpy.compat import pickle
from itertools import permutations
def assert_dtype_equal(a, b):
assert_equal(a, b)
assert_equal(hash(a), hash(b),
"two equivalent types do not hash to the same value !")
def assert_dtype_not_equal(a, b):
assert_(a != b)
assert_(hash(a) != hash(b),
"two different types hash to the same value !")
class TestBuiltin:
@pytest.mark.parametrize('t', [int, float, complex, np.int32, str, object,
np.compat.unicode])
def test_run(self, t):
"""Only test hash runs at all."""
dt = np.dtype(t)
hash(dt)
@pytest.mark.parametrize('t', [int, float])
def test_dtype(self, t):
# Make sure equivalent byte order char hash the same (e.g. < and = on
# little endian)
dt = np.dtype(t)
dt2 = dt.newbyteorder("<")
dt3 = dt.newbyteorder(">")
if dt == dt2:
assert_(dt.byteorder != dt2.byteorder, "bogus test")
assert_dtype_equal(dt, dt2)
else:
assert_(dt.byteorder != dt3.byteorder, "bogus test")
assert_dtype_equal(dt, dt3)
def test_equivalent_dtype_hashing(self):
# Make sure equivalent dtypes with different type num hash equal
uintp = np.dtype(np.uintp)
if uintp.itemsize == 4:
left = uintp
right = np.dtype(np.uint32)
else:
left = uintp
right = np.dtype(np.ulonglong)
assert_(left == right)
assert_(hash(left) == hash(right))
def test_invalid_types(self):
# Make sure invalid type strings raise an error
assert_raises(TypeError, np.dtype, 'O3')
assert_raises(TypeError, np.dtype, 'O5')
assert_raises(TypeError, np.dtype, 'O7')
assert_raises(TypeError, np.dtype, 'b3')
assert_raises(TypeError, np.dtype, 'h4')
assert_raises(TypeError, np.dtype, 'I5')
assert_raises(TypeError, np.dtype, 'e3')
assert_raises(TypeError, np.dtype, 'f5')
if np.dtype('g').itemsize == 8 or np.dtype('g').itemsize == 16:
assert_raises(TypeError, np.dtype, 'g12')
elif np.dtype('g').itemsize == 12:
assert_raises(TypeError, np.dtype, 'g16')
if np.dtype('l').itemsize == 8:
assert_raises(TypeError, np.dtype, 'l4')
assert_raises(TypeError, np.dtype, 'L4')
else:
assert_raises(TypeError, np.dtype, 'l8')
assert_raises(TypeError, np.dtype, 'L8')
if np.dtype('q').itemsize == 8:
assert_raises(TypeError, np.dtype, 'q4')
assert_raises(TypeError, np.dtype, 'Q4')
else:
assert_raises(TypeError, np.dtype, 'q8')
assert_raises(TypeError, np.dtype, 'Q8')
@pytest.mark.parametrize("dtype",
['Bool', 'Complex32', 'Complex64', 'Float16', 'Float32', 'Float64',
'Int8', 'Int16', 'Int32', 'Int64', 'Object0', 'Timedelta64',
'UInt8', 'UInt16', 'UInt32', 'UInt64', 'Void0',
"Float128", "Complex128"])
def test_numeric_style_types_are_invalid(self, dtype):
with assert_raises(TypeError):
np.dtype(dtype)
@pytest.mark.parametrize(
'value',
['m8', 'M8', 'datetime64', 'timedelta64',
'i4, (2,3)f8, f4', 'a3, 3u8, (3,4)a10',
'>f', '<f', '=f', '|f',
])
def test_dtype_bytes_str_equivalence(self, value):
bytes_value = value.encode('ascii')
from_bytes = np.dtype(bytes_value)
from_str = np.dtype(value)
assert_dtype_equal(from_bytes, from_str)
def test_dtype_from_bytes(self):
# Empty bytes object
assert_raises(TypeError, np.dtype, b'')
# Byte order indicator, but no type
assert_raises(TypeError, np.dtype, b'|')
# Single character with ordinal < NPY_NTYPES returns
# type by index into _builtin_descrs
assert_dtype_equal(np.dtype(bytes([0])), np.dtype('bool'))
assert_dtype_equal(np.dtype(bytes([17])), np.dtype(object))
# Single character where value is a valid type code
assert_dtype_equal(np.dtype(b'f'), np.dtype('float32'))
# Bytes with non-ascii values raise errors
assert_raises(TypeError, np.dtype, b'\xff')
assert_raises(TypeError, np.dtype, b's\xff')
def test_bad_param(self):
# Can't give a size that's too small
assert_raises(ValueError, np.dtype,
{'names':['f0', 'f1'],
'formats':['i4', 'i1'],
'offsets':[0, 4],
'itemsize':4})
# If alignment is enabled, the alignment (4) must divide the itemsize
assert_raises(ValueError, np.dtype,
{'names':['f0', 'f1'],
'formats':['i4', 'i1'],
'offsets':[0, 4],
'itemsize':9}, align=True)
# If alignment is enabled, the individual fields must be aligned
assert_raises(ValueError, np.dtype,
{'names':['f0', 'f1'],
'formats':['i1', 'f4'],
'offsets':[0, 2]}, align=True)
def test_field_order_equality(self):
x = np.dtype({'names': ['A', 'B'],
'formats': ['i4', 'f4'],
'offsets': [0, 4]})
y = np.dtype({'names': ['B', 'A'],
'formats': ['f4', 'i4'],
'offsets': [4, 0]})
assert_equal(x == y, False)
# But it is currently an equivalent cast:
assert np.can_cast(x, y, casting="equiv")
class TestRecord:
def test_equivalent_record(self):
"""Test whether equivalent record dtypes hash the same."""
a = np.dtype([('yo', int)])
b = np.dtype([('yo', int)])
assert_dtype_equal(a, b)
def test_different_names(self):
# In theory, they may hash the same (collision) ?
a = np.dtype([('yo', int)])
b = np.dtype([('ye', int)])
assert_dtype_not_equal(a, b)
def test_different_titles(self):
# In theory, they may hash the same (collision) ?
a = np.dtype({'names': ['r', 'b'],
'formats': ['u1', 'u1'],
'titles': ['Red pixel', 'Blue pixel']})
b = np.dtype({'names': ['r', 'b'],
'formats': ['u1', 'u1'],
'titles': ['RRed pixel', 'Blue pixel']})
assert_dtype_not_equal(a, b)
@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
def test_refcount_dictionary_setting(self):
names = ["name1"]
formats = ["f8"]
titles = ["t1"]
offsets = [0]
d = dict(names=names, formats=formats, titles=titles, offsets=offsets)
refcounts = {k: sys.getrefcount(i) for k, i in d.items()}
np.dtype(d)
refcounts_new = {k: sys.getrefcount(i) for k, i in d.items()}
assert refcounts == refcounts_new
def test_mutate(self):
# Mutating a dtype should reset the cached hash value
a = np.dtype([('yo', int)])
b = np.dtype([('yo', int)])
c = np.dtype([('ye', int)])
assert_dtype_equal(a, b)
assert_dtype_not_equal(a, c)
a.names = ['ye']
assert_dtype_equal(a, c)
assert_dtype_not_equal(a, b)
state = b.__reduce__()[2]
a.__setstate__(state)
assert_dtype_equal(a, b)
assert_dtype_not_equal(a, c)
def test_not_lists(self):
"""Test if an appropriate exception is raised when passing bad values to
the dtype constructor.
"""
assert_raises(TypeError, np.dtype,
dict(names={'A', 'B'}, formats=['f8', 'i4']))
assert_raises(TypeError, np.dtype,
dict(names=['A', 'B'], formats={'f8', 'i4'}))
def test_aligned_size(self):
# Check that structured dtypes get padded to an aligned size
dt = np.dtype('i4, i1', align=True)
assert_equal(dt.itemsize, 8)
dt = np.dtype([('f0', 'i4'), ('f1', 'i1')], align=True)
assert_equal(dt.itemsize, 8)
dt = np.dtype({'names':['f0', 'f1'],
'formats':['i4', 'u1'],
'offsets':[0, 4]}, align=True)
assert_equal(dt.itemsize, 8)
dt = np.dtype({'f0': ('i4', 0), 'f1':('u1', 4)}, align=True)
assert_equal(dt.itemsize, 8)
# Nesting should preserve that alignment
dt1 = np.dtype([('f0', 'i4'),
('f1', [('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')]),
('f2', 'i1')], align=True)
assert_equal(dt1.itemsize, 20)
dt2 = np.dtype({'names':['f0', 'f1', 'f2'],
'formats':['i4',
[('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')],
'i1'],
'offsets':[0, 4, 16]}, align=True)
assert_equal(dt2.itemsize, 20)
dt3 = np.dtype({'f0': ('i4', 0),
'f1': ([('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')], 4),
'f2': ('i1', 16)}, align=True)
assert_equal(dt3.itemsize, 20)
assert_equal(dt1, dt2)
assert_equal(dt2, dt3)
# Nesting should preserve packing
dt1 = np.dtype([('f0', 'i4'),
('f1', [('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')]),
('f2', 'i1')], align=False)
assert_equal(dt1.itemsize, 11)
dt2 = np.dtype({'names':['f0', 'f1', 'f2'],
'formats':['i4',
[('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')],
'i1'],
'offsets':[0, 4, 10]}, align=False)
assert_equal(dt2.itemsize, 11)
dt3 = np.dtype({'f0': ('i4', 0),
'f1': ([('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')], 4),
'f2': ('i1', 10)}, align=False)
assert_equal(dt3.itemsize, 11)
assert_equal(dt1, dt2)
assert_equal(dt2, dt3)
# Array of subtype should preserve alignment
dt1 = np.dtype([('a', '|i1'),
('b', [('f0', '<i2'),
('f1', '<f4')], 2)], align=True)
assert_equal(dt1.descr, [('a', '|i1'), ('', '|V3'),
('b', [('f0', '<i2'), ('', '|V2'),
('f1', '<f4')], (2,))])
def test_union_struct(self):
# Should be able to create union dtypes
dt = np.dtype({'names':['f0', 'f1', 'f2'], 'formats':['<u4', '<u2', '<u2'],
'offsets':[0, 0, 2]}, align=True)
assert_equal(dt.itemsize, 4)
a = np.array([3], dtype='<u4').view(dt)
a['f1'] = 10
a['f2'] = 36
assert_equal(a['f0'], 10 + 36*256*256)
# Should be able to specify fields out of order
dt = np.dtype({'names':['f0', 'f1', 'f2'], 'formats':['<u4', '<u2', '<u2'],
'offsets':[4, 0, 2]}, align=True)
assert_equal(dt.itemsize, 8)
# field name should not matter: assignment is by position
dt2 = np.dtype({'names':['f2', 'f0', 'f1'],
'formats':['<u4', '<u2', '<u2'],
'offsets':[4, 0, 2]}, align=True)
vals = [(0, 1, 2), (3, -1, 4)]
vals2 = [(0, 1, 2), (3, -1, 4)]
a = np.array(vals, dt)
b = np.array(vals2, dt2)
assert_equal(a.astype(dt2), b)
assert_equal(b.astype(dt), a)
assert_equal(a.view(dt2), b)
assert_equal(b.view(dt), a)
# Should not be able to overlap objects with other types
assert_raises(TypeError, np.dtype,
{'names':['f0', 'f1'],
'formats':['O', 'i1'],
'offsets':[0, 2]})
assert_raises(TypeError, np.dtype,
{'names':['f0', 'f1'],
'formats':['i4', 'O'],
'offsets':[0, 3]})
assert_raises(TypeError, np.dtype,
{'names':['f0', 'f1'],
'formats':[[('a', 'O')], 'i1'],
'offsets':[0, 2]})
assert_raises(TypeError, np.dtype,
{'names':['f0', 'f1'],
'formats':['i4', [('a', 'O')]],
'offsets':[0, 3]})
# Out of order should still be ok, however
dt = np.dtype({'names':['f0', 'f1'],
'formats':['i1', 'O'],
'offsets':[np.dtype('intp').itemsize, 0]})
@pytest.mark.parametrize(["obj", "dtype", "expected"],
[([], ("(2)f4,"), np.empty((0, 2), dtype="f4")),
(3, "(3)f4,", [3, 3, 3]),
(np.float64(2), "(2)f4,", [2, 2]),
([((0, 1), (1, 2)), ((2,),)], '(2,2)f4', None),
(["1", "2"], "(2)i,", None)])
def test_subarray_list(self, obj, dtype, expected):
dtype = np.dtype(dtype)
res = np.array(obj, dtype=dtype)
if expected is None:
# iterate the 1-d list to fill the array
expected = np.empty(len(obj), dtype=dtype)
for i in range(len(expected)):
expected[i] = obj[i]
assert_array_equal(res, expected)
def test_comma_datetime(self):
dt = np.dtype('M8[D],datetime64[Y],i8')
assert_equal(dt, np.dtype([('f0', 'M8[D]'),
('f1', 'datetime64[Y]'),
('f2', 'i8')]))
def test_from_dictproxy(self):
# Tests for PR #5920
dt = np.dtype({'names': ['a', 'b'], 'formats': ['i4', 'f4']})
assert_dtype_equal(dt, np.dtype(dt.fields))
dt2 = np.dtype((np.void, dt.fields))
assert_equal(dt2.fields, dt.fields)
def test_from_dict_with_zero_width_field(self):
# Regression test for #6430 / #2196
dt = np.dtype([('val1', np.float32, (0,)), ('val2', int)])
dt2 = np.dtype({'names': ['val1', 'val2'],
'formats': [(np.float32, (0,)), int]})
assert_dtype_equal(dt, dt2)
| assert_equal(dt.fields['val1'][0].itemsize, 0) | numpy.testing.assert_equal |
import cv2
import numpy as np
import open3d
import plotly.express as px
import plotly.graph_objects as go
import plotly.graph_objs as go
def show_img_grayscale(image: np.ndarray, title="") -> None:
if len(image.shape) == 3:
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
fig = px.imshow(image, color_continuous_scale="gray", title=title)
fig.show()
def show_img(image: np.ndarray, title: str = "") -> None:
px.imshow(cv2.cvtColor(image, cv2.COLOR_BGR2RGB), title=title).show()
def show_stl(mesh: open3d.geometry.TriangleMesh, title: str = "") -> None:
def stl2mesh3d(stl_mesh):
# stl_mesh is read by nympy-stl from a stl file; it is an array of faces/triangles (i.e. three 3d points)
# this function extracts the unique vertices and the lists I, J, K to define a Plotly mesh3d
p, q, r = stl_mesh.vectors.shape # (p, 3, 3)
# the array stl_mesh.vectors.reshape(p*q, r) can contain multiple copies of the same vertex;
# extract unique vertices from all mesh triangles
vertices, ixr = np.unique(
stl_mesh.vectors.reshape(p * q, r), return_inverse=True, axis=0
)
I = np.take(ixr, [3 * k for k in range(p)])
J = np.take(ixr, [3 * k + 1 for k in range(p)])
K = np.take(ixr, [3 * k + 2 for k in range(p)])
return vertices, I, J, K
vertices, I, J, K = stl2mesh3d(mesh)
x, y, z = vertices.T
colorscale = [[0, "#e5dee5"], [1, "#e5dee5"]]
mesh3D = go.Mesh3d(
x=x,
y=y,
z=z,
i=I,
j=J,
k=K,
flatshading=True,
colorscale=colorscale,
intensity=z,
name=title,
showscale=False,
)
layout = go.Layout(
paper_bgcolor="rgb(1,1,1)",
title_text=title,
title_x=0.5,
font_color="white",
scene_camera=dict(eye=dict(x=1.25, y=-1.25, z=1)),
scene_xaxis_visible=False,
scene_yaxis_visible=False,
scene_zaxis_visible=False,
scene=dict(aspectmode="data"),
)
fig = go.Figure(data=[mesh3D], layout=layout)
fig.data[0].update(
lighting=dict(
ambient=0.18,
diffuse=1,
fresnel=0.1,
specular=1,
roughness=0.1,
facenormalsepsilon=0,
)
)
fig.show()
def triangle_mesh_to_fig(mesh: open3d.geometry.TriangleMesh) -> go.Figure:
"""Takes a open3d TriangleMesh and returns a plotly Mesh3d Figure.
Parameters
----------
mesh : open3d.geometry.TriangleMesh
The open3d Mesh to convert to a Plotly Mesh3d Figure
Returns
-------
go.Figure
The final figure, can be shown using dash.Graph
"""
R = mesh.get_rotation_matrix_from_xyz((-np.pi / 2, 0, np.pi))
mesh.rotate(R, center=(0, 0, 0))
verts = | np.asarray(mesh.vertices) | numpy.asarray |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 21 18:19:27 2020
@author: <NAME>
"""
import logging
import numpy as np
import pandas as pd
from numpy import linalg as LA
from numpy.core.umath_tests import inner1d
from scipy.spatial.transform import Rotation as R
from scipy.interpolate import Rbf
import sys
logger = logging.getLogger(__name__)
class Mesh_Def:
def __init__(self,args,aeroframeSettings,lattice):
"""
*_p : lattice points
*r_ p: lattice points reshaped array for ease of use
*_v : horseshoe vortex points
*r_v : horseshoe vortex points reshaped for ease of use
*_c : cell collocation points
*_n : normal vector directions
*_b : bound leg midpoints
f_a : final area of each individual panels
u_* : defomation of the given type of point. follows the same naming
pattern as for the previous 5 items
"""
self.args = args
self.aeroframeSettings = aeroframeSettings
self.lattice = lattice
# stores lattice shapes (only theses two are needed, the others are
# of identical shape)
self.s_p = lattice.p.shape
self.s_v = lattice.v.shape
self.s_c = lattice.c.shape
self.s_b = lattice.bound_leg_midpoints.shape
self.s_n = lattice.n.shape
# stores lattice intitial (i_) data
self.i_p = np.copy(lattice.p)
self.ir_p = self.i_p.reshape((self.s_p[0] * self.s_p[1], self.s_p[2]))
self.i_v = np.copy(lattice.v)
self.ir_v = self.i_v.reshape((self.s_v[0] * self.s_v[1], self.s_v[2]))
self.i_c = np.copy(lattice.c)
self.i_n = np.copy(lattice.n)
self.i_b = np.copy(lattice.bound_leg_midpoints)
# stores lattice final (f_) data
self.f_p = np.zeros([self.s_p[0], self.s_p[1], self.s_p[2]])
self.fr_p = np.zeros([self.s_p[0] * self.s_p[1], self.s_p[2]])
self.f_v = np.zeros([self.s_p[0], self.s_p[1], self.s_p[2]])
self.fr_v = np.zeros([self.s_p[0] * self.s_p[1], self.s_p[2]])
self.f_c = np.zeros([self.s_c[0] * self.s_c[1]])
self.f_n = np.zeros([self.s_c[0] * self.s_c[1]])
self.f_b = np.zeros([self.s_c[0] * self.s_c[1]])
self.f_a = np.zeros([self.s_c[0] * self.s_c[1]])
# Cells absolute y corrdinates (needed for testing and debug)
self.y_p = np.abs(self.ir_p[:,1])
self.y_v = np.abs(self.ir_v[:,1])
self.y_c = np.abs(self.i_c[:,1])
self.y_b = np.abs(self.i_b[:,1])
self.x_p = np.abs(self.ir_p[:,0])
self.x_v = np.abs(self.ir_v[:,0])
self.x_c = np.abs(self.i_c[:,0])
self.x_b = np.abs(self.i_b[:,0])
# Mesh displacement
self.u_p = np.zeros((self.s_p[0] * self.s_p[1], self.s_p[2]))
self.u_v = np.zeros((self.s_p[0] * self.s_p[1], self.s_p[2]))
self.u_c = np.zeros((self.s_c[0], self.s_c[1]))
self.u_b = np.zeros((self.s_c[0], self.s_c[1]))
# logger.debug(self.u_p)
def mesh_update(self):
"""
Feeds deformed values back to the mesh
Returns
-------
None.
"""
self.fr_p = self.ir_p + self.u_p
self.f_p = self.i_p + self.u_p.reshape(self.s_p[0],
self.s_p[1],
self.s_p[2])
self.fr_v = self.ir_v + self.u_v
self.f_v = self.i_v + self.u_v.reshape(self.s_v[0],
self.s_v[1],
self.s_v[2])
self.f_c = self.i_c + self.u_c
self.f_b = self.i_b + self.u_b
def cantilever(self, y, q, L, E, Ix):
return q*y**2 * (6*L**2 - 4*L*y + y**2) / (24*E*Ix)
def shape_1(self,settings):
"""
Shape function 1. This functions respresents line of slope 1. The only
purpouse of this shape function is to test if the deformation is done
correctly.
"""
csv_save = True
const = 0.0
logger.info("Shape function 1 is selected")
case = settings.settings["aircraft"]
if case == "1_flat_funcActivated.json":
m = 0.0
elif case == "2_dih_funcActivated.json":
m = 0.1
elif case == "3_anh_funcActivated.json":
m = -0.1
elif case == "4_flat_funcActivated.json":
m = 0
elif case == "5_dih_funcActivated.json":
m = 0.1
elif case == "6_anh_funcActivated.json":
m = -0.1
elif case == "7_flat_funcActivated.json":
m = 0
elif case == "8_dih_funcActivated.json":
m = 0.1
elif case == "9_anh_funcActivated.json":
m = -0.1
elif case == "10_flat_funcActivated.json":
m = 0
elif case == "11_dih_funcActivated.json":
m = 0.1
elif case == "12_anh_funcActivated.json":
m = -0.1
elif case == "AircraftMalo-std_funActivated.xml":
m = const
elif case == "B7772VSP_v3.1_funActivated.xml":
m = const
elif case == "BWB_102_VTP1_v3.1_funActivated.xml":
m = const
elif case == "BWB_ACFA_cpacs_v3.1_funActivated.xml":
m = const
elif case == "Circlewing_Test.v_3.1_funActivated.xml":
m = const
elif case == "D150_AGILE_Hangar_funActivated.xml":
m = const
elif case == "Boxwing_AGILE_Hangar_funActivated_v3.1.xml":
m = const
elif case == "Optimale_Tornado_SU2_funActivated.xml":
m = const
elif case == "Optimale.xml":
m = 1e-6
elif case == "EbeeX_d0_q0.xml":
m = 0.1975
elif "Wing_" in case:
logger.debug(int(case[15:-4]))
m = np.linspace(0,0.1,13)
m = m[int(case[15:-4])-1]
else:
logger.warning("Deformation input UNEXPECTED")
h = 0
self.u_p[:,2] = m * self.y_p + h
self.u_v[:,2] = m * self.y_v + h
self.u_c[:,2] = m * self.y_c + h
self.u_b[:,2] = m * self.y_b + h
self.mesh_update()
# logger.debug(self.u_p.shape)
# Saves data to csv. This part is put here ease of use during debug
# phase. "csv_save" will be set to false when this phase is done
# TODO set "csv_save" to false when debug is finished
if csv_save:
headers = ["x","y","z","dx","dy","dz"]
points = np.concatenate((self.i_c,self.u_c),axis=1)
filepath = str(settings.paths('f_deformation'))
name = "deformation_data.csv"
dataset = pd.DataFrame(points,columns=headers)
dataset.to_csv(filepath[:-4]+name,index=False,float_format='%.18E')
logger.info("csv file saved")
def shape_2(self,settings):
"""
Shape function2. This function computes the slope at each y location
for a cantilever beam of length "L", with a distributed load of "q".
The Young modulus is imposed for steel and "I" the second moment of
inertia is also imposed.
"""
csv_save = True
case = settings.settings["aircraft"]
logger.info("Shape function 2 is selected")
if case == "EbeeX_d0_q0.xml":
m = 0.0
if case == "Optimale.xml":
m = 0.0
else:
logger.warning("Deformation input UNEXPECTED")
h = 0
self.u_p[:,2] = m * self.y_p + h
self.u_v[:,2] = m * self.y_v + h
self.u_c[:,2] = m * self.y_c + h
self.u_b[:,2] = m * self.y_b + h
cst2 = 1e-1
self.u_p[:,2] = self.u_p[:,2] + cst2*self.y_p**2
self.u_v[:,2] = self.u_v[:,2] + cst2*self.y_v**2
self.u_c[:,2] = self.u_c[:,2] + cst2*self.y_c**2
self.u_b[:,2] = self.u_b[:,2] + cst2*self.y_b**2
self.mesh_update()
if csv_save:
headers = ["x","y","z","dx","dy","dz"]
points = np.concatenate((self.i_c,self.u_c),axis=1)
filepath = str(settings.paths('f_deformation'))
name = "deformation_data.csv"
dataset = pd.DataFrame(points,columns=headers)
dataset.to_csv(filepath[:-4]+name,index=False,float_format='%.18E')
logger.info("csv file saved")
# # [N/m] Distributed load
# q = 2000000
# # [m] Wing span
# L = 1
# # logger.debug("L = " + str(L) )
# # [Pa] Elasticity modulus
# E = 210e9
# # [m**4] Second moment of area
# Ix = 1.330e-6
# # Computes beam deformation
# self.u_p[:,2] = self.cantilever(self.y_p, q, L, E, Ix)
# self.u_v[:,2] = self.cantilever(self.y_v, q, L, E, Ix)
# self.u_c[:,2] = self.cantilever(self.y_c, q, L, E, Ix)
# self.u_b[:,2] = self.cantilever(self.y_b, q, L, E, Ix)
# # logger.debug(self.i_v)
# self.mesh_update()
# s = self.f_v.shape
# var = self.f_v.reshape(s[0]*s[1],s[2]) - self.i_v.reshape(s[0]*s[1],s[2])
# logger.debug(np.max(var[:,0]))
def framatDeformation(self,transform):
x = self.i_c[:,0]
y = self.i_c[:,1]
z = self.i_c[:,2]
logger.debug("x: \n"+str(x))
logger.debug("tranform.uax = \n"+str(transform.aux))
d = transform.displacements
logger.debug("x.shape = "+str(x.shape))
logger.debug("y.shape = "+str(y.shape))
logger.debug("z.shape = "+str(z.shape))
logger.debug("d.shape = "+str(d.shape))
logger.debug(transform.displacements)
# sys.exit()
# s = dataset.shape
# Sorts out which type of FEM simulation was done (beam or shell)
# TODO: separate the airplane if half using the x axis. At the moment
# there is an issue with the center of the airplane.
logger.info("Input deformation data is of type surface")
# interpolates the points (lattice.p)
rbfi = Rbf(x,y,z,d,function='linear',mode="N-D")
self.u_p = rbfi(self.ir_p[:,0],self.ir_p[:,1],self.ir_p[:,2])
# interpolates the vortex horseshoe points (lattice.v)
for i in range(len(self.ir_v)):
if (i % 4) == 1:
self.u_v[i] = rbfi(self.ir_v[i,0],
self.ir_v[i,1],
self.ir_v[i,2])
self.u_v[i-1] = self.u_v[i]
elif (i % 4) == 2:
self.u_v[i] = rbfi(self.ir_v[i,0],
self.ir_v[i,1],
self.ir_v[i,2])
self.u_v[i+1] = self.u_v[i]
# interpolates the collocation points (lattice.c)
self.u_c = rbfi(self.i_c[:,0],self.i_c[:,1],self.i_c[:,2])
# interpolates the bound leg mid-points (lattice.blm)
self.u_b = rbfi(self.i_b[:,0],self.i_b[:,1],self.i_b[:,2])
# Feed values to the deformed points (f for final).
self.fr_p = self.ir_p + self.u_p
self.f_p = self.i_p + self.u_p.reshape(self.s_p[0],
self.s_p[1],
self.s_p[2])
self.fr_v = self.ir_v + self.u_v
self.f_v = self.i_v + self.u_v.reshape(self.s_v[0],
self.s_v[1],
self.s_v[2])
self.f_c = self.i_c + self.u_c
self.f_b = self.i_b + self.u_b
def CSVDeformation(self):
"""
Loads a displacement file of format .csv and up
Returns
-------
RBF explained
https://www.youtube.com/watch?v=OOpfU3CvUkM
None.
TODO: take into accound the potential rotations! if file is constructed
with beams.
"""
logger.debug("=== csv deformation function called ===")
# Path of the deformation file from the current working directory
pathFromCWD = self.aeroframeSettings["deformation_file"]
path = self.args.cwd + "/" + pathFromCWD
logger.debug("Input deformation csv file path is: \n"+str(path))
if path is None:
logger.error("NO DEFORMATION FILE")
try:
dataset = pd.read_csv(path)
dataset = dataset.to_numpy()
x = dataset[:,0]
y = dataset[:,1]
z = dataset[:,2]
d = dataset[:,3:]
s = dataset.shape
# h = list(disp.columns.values)
# N_headers = len(h)
# Sorts out which type of FEM simulation was done (beam or shell)
# TODO: separate the airplane if half using the x axis. At the moment
# there is an issue with the center of the airplane.
if s[1] == 6:
logger.info("Input deformation data is of type surface")
# interpolates the points (lattice.p)
# Previous experience with Optimale aircraft:
# 'multiquadric': GOOD RESULTS with eps = [1e-3;1e-1]
# 'inverse': Does not work with any eps
# 'gaussian': works only with eps to small to capture deformation
# 'linear': GOOD RESULTS, small lack of symmetry
# 'cubic': GOOD RESULTS, small lack of symmetry
# 'quintic': GOOD RESULTS, small lack of symmetry
# 'thin_plate': BEST RESULTS, very little loss of symmetry
rbfi = Rbf(x,y,z,d,function='thin_plate',mode="N-D",epsilon=1e-5)
self.u_p = rbfi(self.ir_p[:,0],self.ir_p[:,1],self.ir_p[:,2])
# interpolates the vortex horseshoe points (lattice.v)
for i in range(len(self.ir_v)):
if (i % 4) == 1:
self.u_v[i] = rbfi(self.ir_v[i,0],
self.ir_v[i,1],
self.ir_v[i,2])
self.u_v[i-1] = self.u_v[i]
elif (i % 4) == 2:
self.u_v[i] = rbfi(self.ir_v[i,0],
self.ir_v[i,1],
self.ir_v[i,2])
self.u_v[i+1] = self.u_v[i]
# interpolates the collocation points (lattice.c)
self.u_c = rbfi(self.i_c[:,0],self.i_c[:,1],self.i_c[:,2])
# interpolates the bound leg mid-points (lattice.blm)
self.u_b = rbfi(self.i_b[:,0],self.i_b[:,1],self.i_b[:,2])
# Feed values to the deformed points (f for final).
self.fr_p = self.ir_p + self.u_p
self.f_p = self.i_p + self.u_p.reshape(self.s_p[0],
self.s_p[1],
self.s_p[2])
self.fr_v = self.ir_v + self.u_v
self.f_v = self.i_v + self.u_v.reshape(self.s_v[0],
self.s_v[1],
self.s_v[2])
except FileNotFoundError:
logger.info("No such deformation file or directiory:\n" + str(path))
logger.info("Simulation is continued without deformation")
sys.exit()
self.f_c = self.i_c + self.u_c
self.f_b = self.i_b + self.u_b
def deformation(self,acceptedNames,transform=None):
"""
This function deforms the mesh, computes the new parameters p, v, c, n
a. The newly computed parameters are then fed back into the lattice
class variable in the stdrun.run function.
The stdrun.run function will then continue to compute the simulation
with the deformed mesh.
Parameters
----------
settings : class variable
Variable of class settings. This variable is used for checking
which simulation should be done especially during the debug and
testing phase. It also provides the path of the current simulation
Returns
-------
None.
"""
logger.info("=== Starts deformation function ===")
# Computes the initial normal vector for each panel. SVD has a
# proprety that in the vh matrix, all the vectors are unitary
# orthonormal vectors. This allows to have the reference frame and
# compute the angles between old undeformed mesh and the new
# deformed reference frame for the panel.
G = np.concatenate((self.i_c, self.i_c, self.i_c, self.i_c), axis=1)
mat = self.i_p - G.reshape(self.s_p[0],self.s_p[1],self.s_p[2])
u, s, vh_i = LA.svd(mat)
# user input choice
######################################################################
# Old way of doing things. Might be useful for debugging
#
# if settings.settings["deformation_method"] == "shape_1":
# logger.debug(settings.settings["deformation_method"])
# self.shape_1(settings)
# elif settings.settings["deformation_method"] == "shape_2":
# logger.debug(settings.settings["deformation_method"])
# self.shape_2(settings)
######################################################################
if self.aeroframeSettings["CSD_solver"] in acceptedNames[1]:
self.CSVDeformation()
elif self.aeroframeSettings["CSD_solver"] in acceptedNames[2]:
self.framatDeformation(transform)
else:
logger.error("No shape function selected")
sys.exit()
# Computes the deformed reference frame by using the same SVD proprety
# as before.
G = np.concatenate((self.f_c, self.f_c, self.f_c, self.f_c), axis=1)
mat = self.f_p - G.reshape(self.s_p[0],self.s_p[1],self.s_p[2])
u, s, vh_f = LA.svd(mat)
# Computes the roation pivot vector. Equivalent of a hinge axis for
# rotation. This is useful for the quaternion description
rot_g = np.cross(vh_f[:,2,:],vh_i[:,2,:])
rot = rot_g / np.linalg.norm(rot_g,axis=1,)[:,np.newaxis]
rot[np.isnan(rot)] = 0.0
# Computes the angle between the intial and deformed normal vector
# dot product of vector "a" (initial state) and "b" (deformed state).
ab = inner1d(vh_f[:,2,:],vh_i[:,2,:])
a = LA.norm(vh_f[:,2,:], axis=1)
b = LA.norm(vh_i[:,2,:], axis=1)
angle = | np.arccos(ab / (a*b)) | numpy.arccos |
import numpy as np
import xarray as xr
import pandas as pd
import multiprocessing as mp
class PreprocessData:
"""Class instantiation of PreprocessData:
Here we will be preprocessing data for deep learning model training.
Attributes:
working_directory (str): The path to the directory where the deep learning preprocessing files will be saved and worked from.
stormpatch_path (str): Where the storm patch files were saved.
climate (str): The climate period to derive deep learning data for. Options are ``current`` or ``future``.
threshold1 (int): The UH threshold to use. This value will delineate some form of ``severe`` and ``non-severe`` storm patches.
mask (boolean): Whether the threshold will be applied within the storm patch mask or within the full storm patch. Defaults to ``False``.
num_cpus (int): Number of CPUs to use in a node for parallelizing extractions. Defaults to 36 (Cheyenne compute nodes contain 36).
"""
def __init__(self, working_directory, stormpatch_path, climate, threshold1, mask=False, num_cpus=36):
# class attributes
self.working_directory=working_directory
self.stormpatch_path=stormpatch_path
# sanity check
if climate!='current' and climate!='future':
raise Exception("Please enter current or future for climate option.")
else:
self.climate=climate
# class attributes
self.threshold1=threshold1
# string help
self.mask=mask
if not self.mask:
self.mask_str='nomask'
if self.mask:
self.mask_str='mask'
# cpus for parallelizing
self.num_cpus=num_cpus
def generate_time_full(self):
"""Creation of the full time period that will be looped through for extracting storm patch information.
Only considering December-May months due to warm season bias over the central CONUS. The CONUS1 simulations
were run for 2000-2013.
Returns:
Pandas date range (DatetimeIndex).
"""
return pd.date_range('2000-10-01','2013-09-30',freq='MS')[(pd.date_range('2000-10-01','2013-09-30',freq='MS').month==12)|
(pd.date_range('2000-10-01','2013-09-30',freq='MS').month==1)|
(pd.date_range('2000-10-01','2013-09-30',freq='MS').month==2)|
(pd.date_range('2000-10-01','2013-09-30',freq='MS').month==3)|
(pd.date_range('2000-10-01','2013-09-30',freq='MS').month==4)|
(pd.date_range('2000-10-01','2013-09-30',freq='MS').month==5)]
def create_data_indices(self, time):
"""Split the loaded data into categories based on the UH threshold chosen and save the first intermediary files. Here we create
the indices of the storm patches that satisfy UH criteria for later use.
Args:
time (DatetimeIndex): Time object from pandas date range.
"""
if not self.mask:
data=xr.open_mfdataset(f"/{self.stormpatch_path}/{self.climate}_SP3hourly_{time.strftime('%Y%m')}*.nc", combine='by_coords')
data_assemble=xr.Dataset({'grid':(['x'], np.argwhere(data.uh25_grid.values.max(axis=(1,2)) > self.threshold1)[:,0])})
data_assemble.to_netcdf(f"/{self.working_directory}/{self.climate}_indx{self.threshold1}_{self.mask_str}_{time.strftime('%Y')}{time.strftime('%m')}.nc")
if self.mask:
data=xr.open_mfdataset(f"/{self.stormpatch_path}/{self.climate}_SP3hourly_{time.strftime('%Y%m')}*.nc", combine='by_coords')
data_assemble=xr.Dataset({'grid':(['x'], np.argwhere(data.uh25_grid.where(data.mask).max(axis=(1,2), skipna=True).values > self.threshold1)[:,0])})
data_assemble.to_netcdf(f"/{self.working_directory}/{self.climate}_indx{self.threshold1}_{self.mask_str}_{time.strftime('%Y')}{time.strftime('%m')}.nc")
def parallelizing_indxs(self):
"""Activate the multiprocessing function to parallelize the functions.
"""
print(f"Starting jobs...")
timearray=self.generate_time_full()
pool1=mp.Pool(self.num_cpus)
for time in timearray:
print(f"Extracting {time.strftime('%Y-%m')} indices...")
pool1.apply_async(self.create_data_indices, args=([time]))
pool1.close()
pool1.join()
print(f"Completed the jobs.")
def generate_time_month(self, month_int):
"""Creation of the time array that will be looped through for extracting storm patch information.
Args:
month_int (int): The month being used for the time array (2000-2013 years).
Returns:
Pandas date range (DatetimeIndex) for the respective month.
"""
return pd.date_range('2000-10-01','2013-09-30',freq='MS')[(pd.date_range('2000-10-01','2013-09-30',freq='MS').month==month_int)]
def apply_exceed_mask(self, data_var, data_mask, level):
"""Function to retain the patches that exceeded the threshold.
Args:
data_var (Xarray data array): The variable's data.
data_mask (Xarray data array): The storm patch mask.
level (int): The dataset level coordinate. This could be 0, 1, 2, or 3.
Returns:
Xarray data array of the variable for the storm patches that exceeded the UH threshold.
"""
return data_var.var_grid.sel(levels=level)[data_mask.grid.values,:,:]
def apply_notexceed_mask(self, data_var, data_mask, level):
"""Function to retain the patches that did not exceed the threshold.
Args:
data_var (Xarray data array): The variable's data.
data_mask (Xarray data array): The storm patch mask.
level (int): The dataset level coordinate. This could be 0, 1, 2, or 3.
Returns:
Numpy array of the variable for the storm patches that did not exceed the UH threshold.
"""
return np.delete(data_var.var_grid.sel(levels=level).values, data_mask.grid.values, axis=0)
def flatten_list(self, array):
"""Function to flatten the created list of Xarray data arrays.
Args:
array (list): The list of Xarray data arrays.
Returns:
Flattened list of Xarray data arrays.
"""
return [j for i in array for j in i.values]
def flatten_arraylist(self, array):
"""Function to flatten the created list of numpy arrays.
Args:
array (list): The list of numpy arrays.
Returns:
Flattened list of numpy arrays.
"""
return [j for i in array for j in i]
def month_translate(self, num):
"""Convert integer month to string month.
Args:
num (int): Input month.
Returns:
out (str): Input month as string.
Raises:
ValueError: If the month is not within the study's range (Dec-May).
"""
var={12:'December',
1:'January',
2:'February',
3:'March',
4:'April',
5:'May'}
try:
out=var[num]
return out
except:
raise ValueError("Please enter month integer from Dec-May.")
def run_months(self, months=np.array([12,1,2,3,4,5]), uh=True, nouh=True):
"""Function to automate and parallelize the creation of the exceedance/nonexceedance files.
Args:
months (int array): Months to iterate through.
uh (boolean): Whether to compute analysis for threshold exceedances. Defaults to ``True``.
nouh(boolean): Whether to compute analysis for threshold non-exceedances. Defaults to ``True``.
"""
pool2=mp.Pool(self.num_cpus)
for mo in months:
if uh:
print(f"Creating {self.month_translate(mo)} patches of threshold exceedances...")
pool2.apply_async(self.create_files_exceed_threshold, args=([mo]))
if nouh:
print(f"Creating {self.month_translate(mo)} patches of threshold non-exceedances...")
pool2.apply_async(self.create_files_notexceed_threshold, args=([mo]))
pool2.close()
pool2.join()
print(f"Completed the jobs.")
def create_files_exceed_threshold(self, month_int):
"""Create and save files containing the environment patches for storms that exceeded the threshold.
Data files being opened contain the storm patches, not the full CONUS WRF domain.
Args:
month_int (int): Month for analysis.
"""
time_temp=self.generate_time_month(month_int)
data_temp_sev_1=[]; data_temp_sev_3=[]; data_temp_sev_5=[]; data_temp_sev_7=[]; data_evwd_sev_1=[]; data_evwd_sev_3=[]
data_euwd_sev_1=[]; data_euwd_sev_3=[]; data_euwd_sev_5=[]; data_euwd_sev_7=[]; data_evwd_sev_5=[]; data_evwd_sev_7=[]
data_qvap_sev_1=[]; data_qvap_sev_3=[]; data_qvap_sev_5=[]; data_qvap_sev_7=[]; data_dbzs_sev_1=[]; data_maxw_sev_1=[]
data_pres_sev_1=[]; data_pres_sev_3=[]; data_pres_sev_5=[]; data_pres_sev_7=[]; data_ctts_sev_1=[]; data_mask_sev_1=[]
data_wwnd_sev_1=[]; data_wwnd_sev_3=[]; data_wwnd_sev_5=[]; data_wwnd_sev_7=[]; data_uh25_sev_1=[]; data_uh03_sev_1=[]
for time in time_temp:
print(f"opening files for {time.strftime('%Y')}{time.strftime('%m')}")
data_mask=xr.open_mfdataset(
f"/{self.working_directory}/{self.climate}_indx{self.threshold1}_{self.mask_str}_{time.strftime('%Y')}{time.strftime('%m')}.nc",
combine='by_coords')
data_temp=xr.open_mfdataset(f"/{self.stormpatch_path}/{self.climate}_SP3hourly_tk_{time.strftime('%Y%m')}*.nc", combine='by_coords')
data_evwd=xr.open_mfdataset(f"/{self.stormpatch_path}/{self.climate}_SP3hourly_ev_{time.strftime('%Y%m')}*.nc", combine='by_coords')
data_euwd=xr.open_mfdataset(f"/{self.stormpatch_path}/{self.climate}_SP3hourly_eu_{time.strftime('%Y%m')}*.nc", combine='by_coords')
data_qvap=xr.open_mfdataset(f"/{self.stormpatch_path}/{self.climate}_SP3hourly_qvapor_{time.strftime('%Y%m')}*.nc", combine='by_coords')
data_pres=xr.open_mfdataset(f"/{self.stormpatch_path}/{self.climate}_SP3hourly_p_{time.strftime('%Y%m')}*.nc", combine='by_coords')
data_wwnd=xr.open_mfdataset(f"/{self.stormpatch_path}/{self.climate}_SP3hourly_w_{time.strftime('%Y%m')}*.nc", combine='by_coords')
data_maxw=xr.open_mfdataset(f"/{self.stormpatch_path}/{self.climate}_SP3hourly_maxw_{time.strftime('%Y%m')}*.nc", combine='by_coords')
data_gen =xr.open_mfdataset(f"/{self.stormpatch_path}/{self.climate}_SP3hourly_{time.strftime('%Y%m')}*.nc", combine='by_coords')
data_temp_sev_1.append(self.apply_exceed_mask(data_temp, data_mask, 0))
data_temp_sev_3.append(self.apply_exceed_mask(data_temp, data_mask, 1))
data_temp_sev_5.append(self.apply_exceed_mask(data_temp, data_mask, 2))
data_temp_sev_7.append(self.apply_exceed_mask(data_temp, data_mask, 3))
data_evwd_sev_1.append(self.apply_exceed_mask(data_evwd, data_mask, 0))
data_evwd_sev_3.append(self.apply_exceed_mask(data_evwd, data_mask, 1))
data_evwd_sev_5.append(self.apply_exceed_mask(data_evwd, data_mask, 2))
data_evwd_sev_7.append(self.apply_exceed_mask(data_evwd, data_mask, 3))
data_euwd_sev_1.append(self.apply_exceed_mask(data_euwd, data_mask, 0))
data_euwd_sev_3.append(self.apply_exceed_mask(data_euwd, data_mask, 1))
data_euwd_sev_5.append(self.apply_exceed_mask(data_euwd, data_mask, 2))
data_euwd_sev_7.append(self.apply_exceed_mask(data_euwd, data_mask, 3))
data_qvap_sev_1.append(self.apply_exceed_mask(data_qvap, data_mask, 0))
data_qvap_sev_3.append(self.apply_exceed_mask(data_qvap, data_mask, 1))
data_qvap_sev_5.append(self.apply_exceed_mask(data_qvap, data_mask, 2))
data_qvap_sev_7.append(self.apply_exceed_mask(data_qvap, data_mask, 3))
data_pres_sev_1.append(self.apply_exceed_mask(data_pres, data_mask, 0))
data_pres_sev_3.append(self.apply_exceed_mask(data_pres, data_mask, 1))
data_pres_sev_5.append(self.apply_exceed_mask(data_pres, data_mask, 2))
data_pres_sev_7.append(self.apply_exceed_mask(data_pres, data_mask, 3))
data_wwnd_sev_1.append(self.apply_exceed_mask(data_wwnd, data_mask, 0))
data_wwnd_sev_3.append(self.apply_exceed_mask(data_wwnd, data_mask, 1))
data_wwnd_sev_5.append(self.apply_exceed_mask(data_wwnd, data_mask, 2))
data_wwnd_sev_7.append(self.apply_exceed_mask(data_wwnd, data_mask, 3))
data_maxw_sev_1.append(data_maxw.var_grid[data_mask.grid.values,:,:])
data_dbzs_sev_1.append(data_gen.dbz_grid[data_mask.grid.values,:,:])
data_ctts_sev_1.append(data_gen.ctt_grid[data_mask.grid.values,:,:])
data_uh25_sev_1.append(data_gen.uh25_grid[data_mask.grid.values,:,:])
data_uh03_sev_1.append(data_gen.uh03_grid[data_mask.grid.values,:,:])
data_mask_sev_1.append(data_gen.mask[data_mask.grid.values,:,:])
data_temp_sev_1_patches=self.flatten_list(data_temp_sev_1)
data_temp_sev_3_patches=self.flatten_list(data_temp_sev_3)
data_temp_sev_5_patches=self.flatten_list(data_temp_sev_5)
data_temp_sev_7_patches=self.flatten_list(data_temp_sev_7)
data_evwd_sev_1_patches=self.flatten_list(data_evwd_sev_1)
data_evwd_sev_3_patches=self.flatten_list(data_evwd_sev_3)
data_evwd_sev_5_patches=self.flatten_list(data_evwd_sev_5)
data_evwd_sev_7_patches=self.flatten_list(data_evwd_sev_7)
data_euwd_sev_1_patches=self.flatten_list(data_euwd_sev_1)
data_euwd_sev_3_patches=self.flatten_list(data_euwd_sev_3)
data_euwd_sev_5_patches=self.flatten_list(data_euwd_sev_5)
data_euwd_sev_7_patches=self.flatten_list(data_euwd_sev_7)
data_qvap_sev_1_patches=self.flatten_list(data_qvap_sev_1)
data_qvap_sev_3_patches=self.flatten_list(data_qvap_sev_3)
data_qvap_sev_5_patches=self.flatten_list(data_qvap_sev_5)
data_qvap_sev_7_patches=self.flatten_list(data_qvap_sev_7)
data_pres_sev_1_patches=self.flatten_list(data_pres_sev_1)
data_pres_sev_3_patches=self.flatten_list(data_pres_sev_3)
data_pres_sev_5_patches=self.flatten_list(data_pres_sev_5)
data_pres_sev_7_patches=self.flatten_list(data_pres_sev_7)
data_wwnd_sev_1_patches=self.flatten_list(data_wwnd_sev_1)
data_wwnd_sev_3_patches=self.flatten_list(data_wwnd_sev_3)
data_wwnd_sev_5_patches=self.flatten_list(data_wwnd_sev_5)
data_wwnd_sev_7_patches=self.flatten_list(data_wwnd_sev_7)
data_maxw_sev_1_patches=self.flatten_list(data_maxw_sev_1)
data_dbzs_sev_1_patches=self.flatten_list(data_dbzs_sev_1)
data_ctts_sev_1_patches=self.flatten_list(data_ctts_sev_1)
data_uh25_sev_1_patches=self.flatten_list(data_uh25_sev_1)
data_uh03_sev_1_patches=self.flatten_list(data_uh03_sev_1)
data_mask_sev_1_patches=self.flatten_list(data_mask_sev_1)
data_assemble=xr.Dataset({
'temp_sev_1':(['patch','y','x'], np.array(data_temp_sev_1_patches)), 'temp_sev_3':(['patch','y','x'], np.array(data_temp_sev_3_patches)),
'temp_sev_5':(['patch','y','x'], np.array(data_temp_sev_5_patches)), 'temp_sev_7':(['patch','y','x'], np.array(data_temp_sev_7_patches)),
'evwd_sev_1':(['patch','y','x'], np.array(data_evwd_sev_1_patches)), 'evwd_sev_3':(['patch','y','x'], np.array(data_evwd_sev_3_patches)),
'evwd_sev_5':(['patch','y','x'], np.array(data_evwd_sev_5_patches)), 'evwd_sev_7':(['patch','y','x'], np.array(data_evwd_sev_7_patches)),
'euwd_sev_1':(['patch','y','x'], np.array(data_euwd_sev_1_patches)), 'euwd_sev_3':(['patch','y','x'], np.array(data_euwd_sev_3_patches)),
'euwd_sev_5':(['patch','y','x'], np.array(data_euwd_sev_5_patches)), 'euwd_sev_7':(['patch','y','x'], np.array(data_euwd_sev_7_patches)),
'qvap_sev_1':(['patch','y','x'], np.array(data_qvap_sev_1_patches)), 'qvap_sev_3':(['patch','y','x'], np.array(data_qvap_sev_3_patches)),
'qvap_sev_5':(['patch','y','x'], np.array(data_qvap_sev_5_patches)), 'qvap_sev_7':(['patch','y','x'], np.array(data_qvap_sev_7_patches)),
'pres_sev_1':(['patch','y','x'], np.array(data_pres_sev_1_patches)), 'pres_sev_3':(['patch','y','x'], np.array(data_pres_sev_3_patches)),
'pres_sev_5':(['patch','y','x'], np.array(data_pres_sev_5_patches)), 'pres_sev_7':(['patch','y','x'], np.array(data_pres_sev_7_patches)),
'wwnd_sev_1':(['patch','y','x'], np.array(data_wwnd_sev_1_patches)), 'wwnd_sev_3':(['patch','y','x'], np.array(data_wwnd_sev_3_patches)),
'wwnd_sev_5':(['patch','y','x'], | np.array(data_wwnd_sev_5_patches) | numpy.array |
import numpy as np
from gt import weighted_dist, interval_search
def range_search(xb_, q_, rg_):
dist_ = np.abs(xb_ - q_)
return np.nonzero(np.all(dist_ < rg_, axis=1))[0]
def weighted_search(xb_, q_, w_, p_):
if p_ == -1:
distances = np.sum(np.abs(xb_ - q_) > 1 / w_, axis=1)
else:
distances = np.sum(np.multiply(np.abs(q_ - xb_) ** p_, w_ ** p_), axis=1)
return distances
def run_():
nb, nq, d = 100000, 100, 16
xb = np.random.uniform(size=(nb, d))
xq = np.random.uniform(size=(nq, d))
rg = | np.random.uniform(size=(nq, d)) | numpy.random.uniform |
import gc
import os
import weakref
from dataclasses import dataclass
from typing import List
from unittest import mock
import numpy as np
import pytest
from qtpy.QtGui import QGuiApplication
from qtpy.QtWidgets import QMessageBox
from napari._tests.utils import (
add_layer_by_type,
check_viewer_functioning,
layer_test_data,
skip_local_popups,
)
from napari.settings import get_settings
from napari.utils.interactions import mouse_press_callbacks
from napari.utils.io import imread
from napari.utils.theme import available_themes
def test_qt_viewer(make_napari_viewer):
"""Test instantiating viewer."""
viewer = make_napari_viewer()
view = viewer.window.qt_viewer
assert viewer.title == 'napari'
assert view.viewer == viewer
assert len(viewer.layers) == 0
assert view.layers.model().rowCount() == 0
assert viewer.dims.ndim == 2
assert view.dims.nsliders == viewer.dims.ndim
assert np.sum(view.dims._displayed_sliders) == 0
def test_qt_viewer_with_console(make_napari_viewer):
"""Test instantiating console from viewer."""
viewer = make_napari_viewer()
view = viewer.window.qt_viewer
# Check console is created when requested
assert view.console is not None
assert view.dockConsole.widget() is view.console
def test_qt_viewer_toggle_console(make_napari_viewer):
"""Test instantiating console from viewer."""
viewer = make_napari_viewer()
view = viewer.window.qt_viewer
# Check console has been created when it is supposed to be shown
view.toggle_console_visibility(None)
assert view._console is not None
assert view.dockConsole.widget() is view.console
@pytest.mark.parametrize('layer_class, data, ndim', layer_test_data)
def test_add_layer(make_napari_viewer, layer_class, data, ndim):
viewer = make_napari_viewer(ndisplay=int(np.clip(ndim, 2, 3)))
view = viewer.window.qt_viewer
add_layer_by_type(viewer, layer_class, data)
check_viewer_functioning(viewer, view, data, ndim)
def test_new_labels(make_napari_viewer):
"""Test adding new labels layer."""
# Add labels to empty viewer
viewer = make_napari_viewer()
view = viewer.window.qt_viewer
viewer._new_labels()
assert | np.max(viewer.layers[0].data) | numpy.max |
# Copyright 2018 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Unit tests for the Simulator plugin
"""
import pytest
import math
import pennylane as qml
import numpy as np
from pennylane_cirq import SimulatorDevice
from pennylane_cirq.simulator_device import z_eigs
import cirq
class TestHelperFunctions:
"""Test the helper functions needed for SimulatorDevice."""
# fmt: off
@pytest.mark.parametrize("n,expected_output", [
(1, [1, -1]),
(2, [1, -1, -1, 1]),
(3, [1, -1, -1, 1, -1, 1, 1, -1]),
])
# fmt: on
def test_z_eigs(self, n, expected_output):
"""Tests that z_eigs returns the proper eigenvalues of an
n-fold tensor product of Pauli Z operators."""
assert np.array_equal(z_eigs(n), expected_output)
class TestDeviceIntegration:
"""Tests that the SimulatorDevice integrates well with PennyLane"""
def test_device_loading(self):
"""Tests that the cirq.simulator device is properly loaded"""
dev = qml.device("cirq.simulator", wires=2)
assert dev.num_wires == 2
assert dev.shots == 1000
assert dev.short_name == "cirq.simulator"
assert isinstance(dev, SimulatorDevice)
@pytest.fixture(scope="function")
def simulator_device_1_wire(shots, analytic):
"""Return a single wire instance of the SimulatorDevice class."""
yield SimulatorDevice(1, shots=shots, analytic=analytic)
@pytest.fixture(scope="function")
def simulator_device_2_wires(shots, analytic):
"""Return a two wire instance of the SimulatorDevice class."""
yield SimulatorDevice(2, shots=shots, analytic=analytic)
@pytest.fixture(scope="function")
def simulator_device_3_wires(shots, analytic):
"""Return a three wire instance of the SimulatorDevice class."""
yield SimulatorDevice(3, shots=shots, analytic=analytic)
@pytest.mark.parametrize("shots,analytic", [(100, True)])
class TestInternalLogic:
"""Test internal logic of the SimulatorDevice class."""
def test_probability_error(self, simulator_device_1_wire):
"""Test that an error is raised in probability if the
internal state is None."""
simulator_device_1_wire.state = None
with pytest.raises(qml.DeviceError, match="Probability can not be computed because the internal state is None."):
simulator_device_1_wire.probability()
@pytest.mark.parametrize("shots,analytic", [(100, True)])
class TestApply:
"""Tests that gates are correctly applied"""
# fmt: off
@pytest.mark.parametrize("name,input,expected_output", [
("PauliX", [1, 0], np.array([0, 1])),
("PauliX", [1 / math.sqrt(2), 1 / math.sqrt(2)], [1 / math.sqrt(2), 1 / math.sqrt(2)]),
("PauliY", [1, 0], [0, 1j]),
("PauliY", [1 / math.sqrt(2), 1 / math.sqrt(2)], [-1j / math.sqrt(2), 1j / math.sqrt(2)]),
("PauliZ", [1, 0], [1, 0]),
("PauliZ", [1 / math.sqrt(2), 1 / math.sqrt(2)], [1 / math.sqrt(2), -1 / math.sqrt(2)]),
("Hadamard", [1, 0], [1 / math.sqrt(2), 1 / math.sqrt(2)]),
("Hadamard", [1 / math.sqrt(2), -1 / math.sqrt(2)], [0, 1]),
])
# fmt: on
def test_apply_operation_single_wire_no_parameters(
self, simulator_device_1_wire, tol, name, input, expected_output
):
"""Tests that applying an operation yields the expected output state for single wire
operations that have no parameters."""
simulator_device_1_wire._obs_queue = []
simulator_device_1_wire.pre_apply()
simulator_device_1_wire.apply(name, wires=[0], par=[])
simulator_device_1_wire.initial_state = np.array(input, dtype=np.complex64)
simulator_device_1_wire.pre_measure()
assert np.allclose(
simulator_device_1_wire.state, np.array(expected_output), **tol
)
# fmt: off
@pytest.mark.parametrize("name,input,expected_output", [
("CNOT", [1, 0, 0, 0], [1, 0, 0, 0]),
("CNOT", [0, 0, 1, 0], [0, 0, 0, 1]),
("CNOT", [1 / math.sqrt(2), 0, 0, 1 / math.sqrt(2)], [1 / math.sqrt(2), 0, 1 / math.sqrt(2), 0]),
("SWAP", [1, 0, 0, 0], [1, 0, 0, 0]),
("SWAP", [0, 0, 1, 0], [0, 1, 0, 0]),
("SWAP", [1 / math.sqrt(2), 0, -1 / math.sqrt(2), 0], [1 / math.sqrt(2), -1 / math.sqrt(2), 0, 0]),
("CZ", [1, 0, 0, 0], [1, 0, 0, 0]),
("CZ", [0, 0, 0, 1], [0, 0, 0, -1]),
("CZ", [1 / math.sqrt(2), 0, 0, -1 / math.sqrt(2)], [1 / math.sqrt(2), 0, 0, 1 / math.sqrt(2)]),
])
# fmt: on
def test_apply_operation_two_wires_no_parameters(
self, simulator_device_2_wires, tol, name, input, expected_output
):
"""Tests that applying an operation yields the expected output state for two wire
operations that have no parameters."""
simulator_device_2_wires._obs_queue = []
simulator_device_2_wires.pre_apply()
simulator_device_2_wires.apply(name, wires=[0, 1], par=[])
simulator_device_2_wires.initial_state = np.array(input, dtype=np.complex64)
simulator_device_2_wires.pre_measure()
assert np.allclose(
simulator_device_2_wires.state, np.array(expected_output), **tol
)
# fmt: off
@pytest.mark.parametrize("name,expected_output,par", [
("BasisState", [0, 0, 1, 0], [[1, 0]]),
("BasisState", [0, 0, 1, 0], [[1, 0]]),
("BasisState", [0, 0, 0, 1], [[1, 1]]),
("QubitStateVector", [0, 0, 1, 0], [[0, 0, 1, 0]]),
("QubitStateVector", [0, 0, 1, 0], [[0, 0, 1, 0]]),
("QubitStateVector", [0, 0, 0, 1], [[0, 0, 0, 1]]),
("QubitStateVector", [1 / math.sqrt(3), 0, 1 / math.sqrt(3), 1 / math.sqrt(3)], [[1 / math.sqrt(3), 0, 1 / math.sqrt(3), 1 / math.sqrt(3)]]),
("QubitStateVector", [1 / math.sqrt(3), 0, -1 / math.sqrt(3), 1 / math.sqrt(3)], [[1 / math.sqrt(3), 0, -1 / math.sqrt(3), 1 / math.sqrt(3)]]),
])
# fmt: on
def test_apply_operation_state_preparation(
self, simulator_device_2_wires, tol, name, expected_output, par
):
"""Tests that applying an operation yields the expected output state for single wire
operations that have no parameters."""
simulator_device_2_wires._obs_queue = []
simulator_device_2_wires.pre_apply()
simulator_device_2_wires.apply(name, wires=[0, 1], par=par)
simulator_device_2_wires.pre_measure()
assert np.allclose(
simulator_device_2_wires.state, np.array(expected_output), **tol
)
# fmt: off
@pytest.mark.parametrize("name,input,expected_output,par", [
("PhaseShift", [1, 0], [1, 0], [math.pi / 2]),
("PhaseShift", [0, 1], [0, 1j], [math.pi / 2]),
("PhaseShift", [1 / math.sqrt(2), 1 / math.sqrt(2)], [1 / math.sqrt(2), 1 / 2 + 1j / 2], [math.pi / 4]),
("RX", [1, 0], [1 / math.sqrt(2), -1j * 1 / math.sqrt(2)], [math.pi / 2]),
("RX", [1, 0], [0, -1j], [math.pi]),
("RX", [1 / math.sqrt(2), 1 / math.sqrt(2)], [1 / 2 - 1j / 2, 1 / 2 - 1j / 2], [math.pi / 2]),
("RY", [1, 0], [1 / math.sqrt(2), 1 / math.sqrt(2)], [math.pi / 2]),
("RY", [1, 0], [0, 1], [math.pi]),
("RY", [1 / math.sqrt(2), 1 / math.sqrt(2)], [0, 1], [math.pi / 2]),
("RZ", [1, 0], [1 / math.sqrt(2) - 1j / math.sqrt(2), 0], [math.pi / 2]),
("RZ", [0, 1], [0, 1j], [math.pi]),
("RZ", [1 / math.sqrt(2), 1 / math.sqrt(2)], [1 / 2 - 1j / 2, 1 / 2 + 1j / 2], [math.pi / 2]),
("Rot", [1, 0], [1 / math.sqrt(2) - 1j / math.sqrt(2), 0], [math.pi / 2, 0, 0]),
("Rot", [1, 0], [1 / math.sqrt(2), 1 / math.sqrt(2)], [0, math.pi / 2, 0]),
("Rot", [1 / math.sqrt(2), 1 / math.sqrt(2)], [1 / 2 - 1j / 2, 1 / 2 + 1j / 2], [0, 0, math.pi / 2]),
("Rot", [1, 0], [-1j / math.sqrt(2), -1 / math.sqrt(2)], [math.pi / 2, -math.pi / 2, math.pi / 2]),
("Rot", [1 / math.sqrt(2), 1 / math.sqrt(2)], [1 / 2 + 1j / 2, -1 / 2 + 1j / 2], [-math.pi / 2, math.pi, math.pi]),
("QubitUnitary", [1, 0], [1j / math.sqrt(2), 1j / math.sqrt(2)], [
np.array([
[1j / math.sqrt(2), 1j / math.sqrt(2)],
[1j / math.sqrt(2), -1j / math.sqrt(2)]
])
]),
("QubitUnitary", [0, 1], [1j / math.sqrt(2), -1j / math.sqrt(2)], [
np.array([
[1j / math.sqrt(2), 1j / math.sqrt(2)],
[1j / math.sqrt(2), -1j / math.sqrt(2)]
])
]),
("QubitUnitary", [1 / math.sqrt(2), -1 / math.sqrt(2)], [0, 1j], [
np.array([
[1j / math.sqrt(2), 1j / math.sqrt(2)],
[1j / math.sqrt(2), -1j / math.sqrt(2)]
])
]),
])
# fmt: on
def test_apply_operation_single_wire_with_parameters(
self, simulator_device_1_wire, tol, name, input, expected_output, par
):
"""Tests that applying an operation yields the expected output state for single wire
operations that have no parameters."""
simulator_device_1_wire._obs_queue = []
simulator_device_1_wire.pre_apply()
simulator_device_1_wire.apply(name, wires=[0], par=par)
simulator_device_1_wire.initial_state = np.array(input, dtype=np.complex64)
simulator_device_1_wire.pre_measure()
assert np.allclose(
simulator_device_1_wire.state, np.array(expected_output), **tol
)
# fmt: off
@pytest.mark.parametrize("name,input,expected_output,par", [
("CRX", [0, 1, 0, 0], [0, 1, 0, 0], [math.pi / 2]),
("CRX", [0, 0, 0, 1], [0, 0, -1j, 0], [math.pi]),
("CRX", [0, 1 / math.sqrt(2), 1 / math.sqrt(2), 0], [0, 1 / math.sqrt(2), 1 / 2, -1j / 2], [math.pi / 2]),
("CRY", [0, 0, 0, 1], [0, 0, -1 / math.sqrt(2), 1 / math.sqrt(2)], [math.pi / 2]),
("CRY", [0, 0, 0, 1], [0, 0, -1, 0], [math.pi]),
("CRY", [1 / math.sqrt(2), 1 / math.sqrt(2), 0, 0], [1 / math.sqrt(2), 1 / math.sqrt(2), 0, 0], [math.pi / 2]),
("CRZ", [0, 0, 0, 1], [0, 0, 0, 1 / math.sqrt(2) + 1j / math.sqrt(2)], [math.pi / 2]),
("CRZ", [0, 0, 0, 1], [0, 0, 0, 1j], [math.pi]),
("CRZ", [1 / math.sqrt(2), 1 / math.sqrt(2), 0, 0], [1 / math.sqrt(2), 1 / math.sqrt(2), 0, 0], [math.pi / 2]),
("CRot", [0, 0, 0, 1], [0, 0, 0, 1 / math.sqrt(2) + 1j / math.sqrt(2)], [math.pi / 2, 0, 0]),
("CRot", [0, 0, 0, 1], [0, 0, -1 / math.sqrt(2), 1 / math.sqrt(2)], [0, math.pi / 2, 0]),
("CRot", [0, 0, 1 / math.sqrt(2), 1 / math.sqrt(2)], [0, 0, 1 / 2 - 1j / 2, 1 / 2 + 1j / 2], [0, 0, math.pi / 2]),
("CRot", [0, 0, 0, 1], [0, 0, 1 / math.sqrt(2), 1j / math.sqrt(2)], [math.pi / 2, -math.pi / 2, math.pi / 2]),
("CRot", [0, 1 / math.sqrt(2), 1 / math.sqrt(2), 0], [0, 1 / math.sqrt(2), 0, -1 / 2 + 1j / 2], [-math.pi / 2, math.pi, math.pi]),
("QubitUnitary", [1, 0, 0, 0], [1, 0, 0, 0], [
np.array([
[1, 0, 0, 0],
[0, 1 / math.sqrt(2), 1 / math.sqrt(2), 0],
[0, 1 / math.sqrt(2), -1 / math.sqrt(2), 0],
[0, 0, 0, 1],
])
]),
("QubitUnitary", [0, 1, 0, 0], [0, 1 / math.sqrt(2), 1 / math.sqrt(2), 0], [
np.array([
[1, 0, 0, 0],
[0, 1 / math.sqrt(2), 1 / math.sqrt(2), 0],
[0, 1 / math.sqrt(2), -1 / math.sqrt(2), 0],
[0, 0, 0, 1],
])
]),
("QubitUnitary", [1 / 2, 1 / 2, -1 / 2, 1 / 2], [1 / 2, 0, 1 / math.sqrt(2), 1 / 2], [
np.array([
[1, 0, 0, 0],
[0, 1 / math.sqrt(2), 1 / math.sqrt(2), 0],
[0, 1 / math.sqrt(2), -1 / math.sqrt(2), 0],
[0, 0, 0, 1],
])
]),
])
# fmt: on
def test_apply_operation_two_wires_with_parameters(
self, simulator_device_2_wires, tol, name, input, expected_output, par
):
"""Tests that applying an operation yields the expected output state for single wire
operations that have no parameters."""
simulator_device_2_wires._obs_queue = []
simulator_device_2_wires.pre_apply()
simulator_device_2_wires.apply(name, wires=[0, 1], par=par)
simulator_device_2_wires.initial_state = np.array(input, dtype=np.complex64)
simulator_device_2_wires.pre_measure()
assert np.allclose(
simulator_device_2_wires.state, np.array(expected_output), **tol
)
# fmt: off
@pytest.mark.parametrize("operation,par,match", [
("BasisState", [[2]], "Argument for BasisState can only contain 0 and 1"),
("BasisState", [[1.2]], "Argument for BasisState can only contain 0 and 1"),
("BasisState", [[0, 0, 1]], "For BasisState, the state has to be specified for the correct number of qubits"),
("BasisState", [[0, 0]], "For BasisState, the state has to be specified for the correct number of qubits"),
("QubitStateVector", [[0, 0, 1]], "For QubitStateVector, the state has to be specified for the correct number of qubits"),
("QubitStateVector", [[0, 0, 1, 0]], "For QubitStateVector, the state has to be specified for the correct number of qubits"),
("QubitStateVector", [[1]], "For QubitStateVector, the state has to be specified for the correct number of qubits"),
("QubitStateVector", [[0.5, 0.5]], "The given state for QubitStateVector is not properly normalized to 1"),
("QubitStateVector", [[1.1, 0]], "The given state for QubitStateVector is not properly normalized to 1"),
("QubitStateVector", [[0.7, 0.7j]], "The given state for QubitStateVector is not properly normalized to 1"),
])
# fmt: on
def test_state_preparation_error(self, simulator_device_1_wire, operation, par, match):
"""Tests that the state preparation routines raise proper errors for wrong parameter values."""
simulator_device_1_wire._obs_queue = []
simulator_device_1_wire.pre_apply()
with pytest.raises(qml.DeviceError, match=match):
simulator_device_1_wire.apply(operation, wires=[0], par=par)
def test_basis_state_not_at_beginning_error(self, simulator_device_1_wire):
"""Tests that application of BasisState raises an error if is not
the first operation."""
simulator_device_1_wire.pre_apply()
simulator_device_1_wire.apply("PauliX", wires=[0], par=[])
with pytest.raises(qml.DeviceError, match="The operation BasisState is only supported at the beginning of a circuit."):
simulator_device_1_wire.apply("BasisState", wires=[0], par=[[0]])
def test_qubit_state_vector_not_at_beginning_error(self, simulator_device_1_wire):
"""Tests that application of QubitStateVector raises an error if is not
the first operation."""
simulator_device_1_wire.pre_apply()
simulator_device_1_wire.apply("PauliX", wires=[0], par=[])
with pytest.raises(qml.DeviceError, match="The operation QubitStateVector is only supported at the beginning of a circuit."):
simulator_device_1_wire.apply("QubitStateVector", wires=[0], par=[[0, 1]])
@pytest.mark.parametrize("shots,analytic", [(100, False)])
class TestStatePreparationErrorsNonAnalytic:
"""Tests state preparation errors that occur for non-analytic devices."""
def test_basis_state_not_analytic_error(self, simulator_device_1_wire):
"""Tests that application of BasisState raises an error if the device
is not in analytic mode."""
simulator_device_1_wire.pre_apply()
with pytest.raises(qml.DeviceError, match="The operation BasisState is only supported in analytic mode."):
simulator_device_1_wire.apply("BasisState", wires=[0], par=[[0]])
def test_qubit_state_vector_not_analytic_error(self, simulator_device_1_wire):
"""Tests that application of QubitStateVector raises an error if the device
is not in analytic mode."""
dev = qml.device("cirq.simulator", wires=1, shots=1000, analytic=False)
simulator_device_1_wire.pre_apply()
with pytest.raises(qml.DeviceError, match="The operation QubitStateVector is only supported in analytic mode."):
simulator_device_1_wire.apply("QubitStateVector", wires=[0], par=[[0, 1]])
@pytest.mark.parametrize("shots,analytic", [(100, True)])
class TestExpval:
"""Tests that expectation values are properly calculated or that the proper errors are raised."""
# fmt: off
@pytest.mark.parametrize("operation,input,expected_output", [
(qml.Identity, [1, 0], 1),
(qml.Identity, [0, 1], 1),
(qml.Identity, [1/math.sqrt(2), -1/math.sqrt(2)], 1),
(qml.PauliX, [1/math.sqrt(2), 1/math.sqrt(2)], 1),
(qml.PauliX, [1/math.sqrt(2), -1/math.sqrt(2)], -1),
(qml.PauliX, [1, 0], 0),
(qml.PauliY, [1/math.sqrt(2), 1j/math.sqrt(2)], 1),
(qml.PauliY, [1/math.sqrt(2), -1j/math.sqrt(2)], -1),
(qml.PauliY, [1, 0], 0),
(qml.PauliZ, [1, 0], 1),
(qml.PauliZ, [0, 1], -1),
(qml.PauliZ, [1/math.sqrt(2), 1/math.sqrt(2)], 0),
(qml.Hadamard, [1, 0], 1/math.sqrt(2)),
(qml.Hadamard, [0, 1], -1/math.sqrt(2)),
(qml.Hadamard, [1/math.sqrt(2), 1/math.sqrt(2)], 1/math.sqrt(2)),
])
# fmt: on
def test_expval_single_wire_no_parameters(self, simulator_device_1_wire, tol, operation, input, expected_output):
"""Tests that expectation values are properly calculated for single-wire observables without parameters."""
op = operation(0, do_queue=False)
simulator_device_1_wire._obs_queue = [op]
simulator_device_1_wire.pre_apply()
simulator_device_1_wire.apply("QubitStateVector", wires=[0], par=[input])
simulator_device_1_wire.post_apply()
simulator_device_1_wire.pre_measure()
res = simulator_device_1_wire.expval(op.name, wires=[0], par=[])
assert np.isclose(res, expected_output, **tol)
# fmt: off
@pytest.mark.parametrize("operation,input,expected_output,par", [
(qml.Hermitian, [1, 0], 1, [np.array([[1, 1j], [-1j, 1]])]),
(qml.Hermitian, [0, 1], 1, [np.array([[1, 1j], [-1j, 1]])]),
(qml.Hermitian, [1/math.sqrt(2), -1/math.sqrt(2)], 1, [np.array([[1, 1j], [-1j, 1]])]),
])
# fmt: on
def test_expval_single_wire_with_parameters(self, simulator_device_1_wire, tol, operation, input, expected_output, par):
"""Tests that expectation values are properly calculated for single-wire observables with parameters."""
op = operation(par[0], 0, do_queue=False)
simulator_device_1_wire._obs_queue = [op]
simulator_device_1_wire.pre_apply()
simulator_device_1_wire.apply("QubitStateVector", wires=[0], par=[input])
simulator_device_1_wire.post_apply()
simulator_device_1_wire.pre_measure()
res = simulator_device_1_wire.expval(op.name, wires=[0], par=par)
assert np.isclose(res, expected_output, **tol)
# fmt: off
@pytest.mark.parametrize("operation,input,expected_output,par", [
(qml.Hermitian, [0, 1, 0, 0], -1, [
np.array([
[1, 0, 0, 0],
[0, -1, 0, 0],
[0, 0, -1, 0],
[0, 0, 0, 1],
])
]),
(qml.Hermitian, [1/math.sqrt(3), 0, 1/math.sqrt(3), 1/math.sqrt(3)], 5/3, [
np.array([
[1, 1j, 0, 1],
[-1j, 1, 0, 0],
[0, 0, 1, -1j],
[1, 0, 1j, 1]
])
]),
(qml.Hermitian, [0, 0, 0, 1], 0, [
np.array([
[0, 1j, 0, 0],
[-1j, 0, 0, 0],
[0, 0, 0, -1j],
[0, 0, 1j, 0]
])
]),
(qml.Hermitian, [1/math.sqrt(2), 0, -1/math.sqrt(2), 0], 1, [
np.array([
[1, 1j, 0, 0],
[-1j, 1, 0, 0],
[0, 0, 1, -1j],
[0, 0, 1j, 1]
])
]),
(qml.Hermitian, [1/math.sqrt(3), -1/math.sqrt(3), 1/math.sqrt(6), 1/math.sqrt(6)], 1, [
np.array([
[1, 1j, 0, .5j],
[-1j, 1, 0, 0],
[0, 0, 1, -1j],
[-.5j, 0, 1j, 1]
])
]),
(qml.Hermitian, [1/math.sqrt(2), 0, 0, 1/math.sqrt(2)], 1, [
np.array([
[1, 0, 0, 0],
[0, -1, 0, 0],
[0, 0, -1, 0],
[0, 0, 0, 1]
])
]),
(qml.Hermitian, [0, 1/math.sqrt(2), -1/math.sqrt(2), 0], -1, [
np.array([
[1, 0, 0, 0],
[0, -1, 0, 0],
[0, 0, -1, 0],
[0, 0, 0, 1]
])
]),
])
# fmt: on
def test_expval_two_wires_with_parameters(self, simulator_device_2_wires, tol, operation, input, expected_output, par):
"""Tests that expectation values are properly calculated for two-wire observables with parameters."""
op = operation(par[0], [0, 1], do_queue=False)
simulator_device_2_wires._obs_queue = [op]
simulator_device_2_wires.pre_apply()
simulator_device_2_wires.apply("QubitStateVector", wires=[0, 1], par=[input])
simulator_device_2_wires.post_apply()
simulator_device_2_wires.pre_measure()
res = simulator_device_2_wires.expval(op.name, wires=[0, 1], par=par)
assert np.isclose(res, expected_output, **tol)
@pytest.mark.parametrize("shots,analytic", [(100, True)])
class TestVar:
"""Tests that variances are properly calculated."""
# fmt: off
@pytest.mark.parametrize("operation,input,expected_output", [
(qml.PauliX, [1/math.sqrt(2), 1/math.sqrt(2)], 0),
(qml.PauliX, [1/math.sqrt(2), -1/math.sqrt(2)], 0),
(qml.PauliX, [1, 0], 1),
(qml.PauliY, [1/math.sqrt(2), 1j/math.sqrt(2)], 0),
(qml.PauliY, [1/math.sqrt(2), -1j/math.sqrt(2)], 0),
(qml.PauliY, [1, 0], 1),
(qml.PauliZ, [1, 0], 0),
(qml.PauliZ, [0, 1], 0),
(qml.PauliZ, [1/math.sqrt(2), 1/math.sqrt(2)], 1),
(qml.Hadamard, [1, 0], 1/2),
(qml.Hadamard, [0, 1], 1/2),
(qml.Hadamard, [1/math.sqrt(2), 1/math.sqrt(2)], 1/2),
])
# fmt: on
def test_var_single_wire_no_parameters(self, simulator_device_1_wire, tol, operation, input, expected_output):
"""Tests that variances are properly calculated for single-wire observables without parameters."""
op = operation(0, do_queue=False)
simulator_device_1_wire._obs_queue = [op]
simulator_device_1_wire.pre_apply()
simulator_device_1_wire.apply("QubitStateVector", wires=[0], par=[input])
simulator_device_1_wire.post_apply()
simulator_device_1_wire.pre_measure()
res = simulator_device_1_wire.var(op.name, wires=[0], par=[])
assert np.isclose(res, expected_output, **tol)
# fmt: off
@pytest.mark.parametrize("operation,input,expected_output,par", [
(qml.Identity, [1, 0], 0, []),
(qml.Identity, [0, 1], 0, []),
(qml.Identity, [1/math.sqrt(2), -1/math.sqrt(2)], 0, []),
(qml.Hermitian, [1, 0], 1, [[[1, 1j], [-1j, 1]]]),
(qml.Hermitian, [0, 1], 1, [[[1, 1j], [-1j, 1]]]),
(qml.Hermitian, [1/math.sqrt(2), -1/math.sqrt(2)], 1, [[[1, 1j], [-1j, 1]]]),
])
# fmt: on
def test_var_single_wire_with_parameters(self, simulator_device_1_wire, tol, operation, input, expected_output, par):
"""Tests that expectation values are properly calculated for single-wire observables with parameters."""
if par:
op = operation(np.array(*par), 0, do_queue=False)
else:
op = operation(0, do_queue=False)
simulator_device_1_wire._obs_queue = [op]
simulator_device_1_wire.pre_apply()
simulator_device_1_wire.apply("QubitStateVector", wires=[0], par=[input])
simulator_device_1_wire.post_apply()
simulator_device_1_wire.pre_measure()
if par:
res = simulator_device_1_wire.var(op.name, wires=[0], par=[ | np.array(*par) | numpy.array |
import time
import sys
import math
import itertools
import numpy as np
import shapely.geometry
import shapely.ops
import scenic.simulators.webots.world_parser as world_parser
from scenic.simulators.webots.common import webotsToScenicPosition, webotsToScenicRotation
from scenic.core.workspaces import Workspace
from scenic.core.vectors import PolygonalVectorField
from scenic.core.regions import PolygonalRegion, PolylineRegion, nowhere
from scenic.core.geometry import (normalizeAngle, rotateVector, polygonUnion, cleanChain,
plotPolygon)
from scenic.syntax.veneer import verbosePrint
def polygonWithPoints(points):
polygon = shapely.geometry.Polygon(points)
if not polygon.is_valid: # TODO improve hack?
verbosePrint(f'WARNING: simplifying invalid polygon with points {points}')
polygon = polygon.simplify(0.5)
if not polygon.is_valid:
raise RuntimeError(f'unable to simplify polygon {polygon}')
return polygon
def regionWithPolygons(polygons, orientation=None):
if len(polygons) == 0:
return nowhere
else:
return PolygonalRegion(polygon=polygons, orientation=orientation)
## Classes for WBT nodes we are interested in
class OSMObject:
"""Objects with OSM id tags"""
def __init__(self, attrs):
self.attrs = attrs
self.osmID = attrs['id']
class Road(OSMObject):
"""OSM roads"""
def __init__(self, attrs, driveOnLeft=False):
super().__init__(attrs)
self.driveOnLeft = driveOnLeft
self.translation = attrs['translation']
pts = [np.array(webotsToScenicPosition(p + self.translation)) for p in attrs['wayPoints']]
self.waypoints = tuple(cleanChain(pts, 0.05))
assert len(self.waypoints) > 1, pts
self.width = float(attrs.get('width', 7))
self.lanes = int(attrs.get('numberOfLanes', 2))
if self.lanes < 1:
raise RuntimeError(f'Road {self.osmID} has fewer than 1 lane!')
self.forwardLanes = int(attrs.get('numberOfForwardLanes', 1))
# if self.forwardLanes < 1:
# raise RuntimeError(f'Road {self.osmID} has fewer than 1 forward lane!')
self.backwardLanes = self.lanes - self.forwardLanes
self.hasLeftSidewalk = attrs.get('leftBorder', True)
self.hasRightSidewalk = attrs.get('rightBorder', True)
self.sidewalkWidths = list(attrs.get('roadBorderWidth', [0.8]))
if ((self.hasLeftSidewalk or self.hasRightSidewalk)
and len(self.sidewalkWidths) < 1):
raise RuntimeError(f'Road {self.osmID} has sidewalk with empty width!')
self.startCrossroad = attrs.get('startJunction')
self.endCrossroad = attrs.get('endJunction')
def computeGeometry(self, crossroads, snapTolerance=0.05):
## Approximate bounding polygon and sidewalks
# TODO improve this!!!
lefts, rights = [], []
leftSidewalk, rightSidewalk = [], []
headings = []
sidewalkWidths = itertools.chain(self.sidewalkWidths,
itertools.repeat(self.sidewalkWidths[-1]))
segments = zip(self.waypoints, sidewalkWidths)
for i, segment in enumerate(segments):
point, sidewalkWidth = segment
if i+1 < len(self.waypoints):
nextPt = self.waypoints[i+1]
dx, dy = nextPt - point
heading = normalizeAngle(math.atan2(dy, dx) - (math.pi / 2))
headings.append(heading)
perp = np.array([-dy, dx])
perp /= np.linalg.norm(perp)
else:
pass # use perp from last segment
toEdge = perp * (self.width / 2)
left = point + toEdge
right = point - toEdge
lefts.append(left)
rights.append(right)
toEdge = perp * sidewalkWidth
leftSidewalk.append(left + toEdge)
rightSidewalk.append(right - toEdge)
# Snap to adjacent crossroads if possible
if snapTolerance > 0:
sc = self.startCrossroad
if sc is not None:
if sc not in crossroads:
raise RuntimeError(f'Road {self.osmID} begins at invalid crossroad {sc}')
crossroad = crossroads[sc]
if crossroad.region is not None:
pt = shapely.geometry.Point(lefts[0])
pt = shapely.ops.snap(pt, crossroad.region.polygons, snapTolerance)
lefts[0] = np.array([pt.x, pt.y])
pt = shapely.geometry.Point(rights[0])
pt = shapely.ops.snap(pt, crossroad.region.polygons, snapTolerance)
rights[0] = np.array([pt.x, pt.y])
perp = lefts[0] - rights[0]
toEdge = perp * (self.sidewalkWidths[0] / np.linalg.norm(perp))
leftSidewalk[0] = lefts[0] + toEdge
rightSidewalk[0] = rights[0] - toEdge
ec = self.endCrossroad
if ec is not None:
if ec not in crossroads:
raise RuntimeError(f'Road {self.osmID} ends at invalid crossroad {ec}')
crossroad = crossroads[ec]
if crossroad.region is not None:
pt = shapely.geometry.Point(lefts[-1])
pt = shapely.ops.snap(pt, crossroad.region.polygons, snapTolerance)
lefts[-1] = np.array([pt.x, pt.y])
pt = shapely.geometry.Point(rights[-1])
pt = shapely.ops.snap(pt, crossroad.region.polygons, snapTolerance)
rights[-1] = np.array([pt.x, pt.y])
perp = lefts[-1] - rights[-1]
toEdge = perp * (self.sidewalkWidths[-1] / np.linalg.norm(perp))
leftSidewalk[-1] = lefts[-1] + toEdge
rightSidewalk[-1] = rights[-1] - toEdge
roadPoints = lefts + list(reversed(rights))
self.leftCurb = PolylineRegion(reversed(lefts))
self.rightCurb = PolylineRegion(rights)
self.leftSidewalk = self.rightSidewalk = None
if self.hasLeftSidewalk:
points = lefts + list(reversed(leftSidewalk))
polygon = polygonWithPoints(points)
assert polygon.is_valid, self.waypoints
self.leftSidewalk = PolygonalRegion(polygon=polygon)
if self.hasRightSidewalk:
points = rights + list(reversed(rightSidewalk))
polygon = polygonWithPoints(points)
assert polygon.is_valid, self.waypoints
self.rightSidewalk = PolygonalRegion(polygon=polygon)
## Compute lanes and traffic directions
cells = []
la, ra = lefts[0], rights[0]
gapA = (ra - la) / self.lanes
markerA = ra
laneMarkers = [[] for lane in range(self.lanes)]
for lb, rb, heading in zip(lefts[1:], rights[1:], headings):
# Compute lanes for this segment of road
gapB = (rb - lb) / self.lanes
markerB = rb
for lane, markers in enumerate(laneMarkers):
forward = lane < self.forwardLanes
if self.driveOnLeft:
forward = not forward
nextMarkerA = markerA - gapA
nextMarkerB = markerB - gapB
markers.append(nextMarkerA)
cell = shapely.geometry.Polygon((markerA, markerB, nextMarkerB, nextMarkerA))
heading = heading if forward else normalizeAngle(heading + math.pi)
cells.append((cell, heading))
markerA = nextMarkerA
markerB = nextMarkerB
gapA = gapB
markerA = rb
self.lanes = []
markerB = rb
rightEdge = rights
for lane, markers in enumerate(laneMarkers):
markerB = markerB - gapB
markers.append(markerB)
self.lanes.append(PolygonalRegion(rightEdge + list(reversed(markers))))
rightEdge = markers
self.laneMarkers = laneMarkers[:-1]
self.cells = cells
self.direction = PolygonalVectorField(f'Road{self.osmID}Direction', cells)
roadPolygon = polygonWithPoints(roadPoints)
self.region = PolygonalRegion(polygon=roadPolygon, orientation=self.direction)
def show(self, plt):
if self.hasLeftSidewalk:
x, y = zip(*self.leftSidewalk.points)
plt.fill(x, y, '#A0A0FF')
if self.hasRightSidewalk:
x, y = zip(*self.rightSidewalk.points)
plt.fill(x, y, '#A0A0FF')
self.region.show(plt, style='r:')
x, y = zip(*self.lanes[0].points)
plt.fill(x, y, color=(0.8, 1.0, 0.8))
for lane, markers in enumerate(self.laneMarkers):
x, y = zip(*markers)
color = (0.8, 0.8, 0) if lane == self.backwardLanes - 1 else (0.3, 0.3, 0.3)
plt.plot(x, y, '--', color=color)
class Crossroad(OSMObject):
"""OSM crossroads"""
def __init__(self, attrs):
super().__init__(attrs)
self.translation = attrs['translation']
points = list(np.array(webotsToScenicPosition(p + self.translation))
for p in attrs['shape'])
if len(points) > 0:
self.points = points
self.region = PolygonalRegion(self.points)
else:
verbosePrint(f'WARNING: Crossroad {self.osmID} has empty shape field!')
self.region = None
def show(self, plt):
if self.region is not None:
x, y = zip(*self.points)
plt.fill(x, y, color=(1, 0.9, 0.9))
plt.plot(x, y, ':', color=(1, 0.5, 0))
class PedestrianCrossing:
"""PedestrianCrossing nodes"""
def __init__(self, attrs):
self.translation = attrs.get('translation', | np.array((0, 0, 0)) | numpy.array |
""" fiber.py
Module containing classes and functions related to processing fiber
information.
"""
import numpy as np
import vtk
from collections import defaultdict
from . import misc
def tree():
"""
Creates tree to store quantitative information.
INPUT:
none
OUTPUT:
none
"""
return defaultdict(tree)
def convertFromTuple(fiberTuple):
"""
Converts fiber data in form of type tuple (from extraction) to fiberTree.
Output is of class FiberTree
INPUT:
fiberTuple - tuple containing fiber information to be converted
OUTPUT:
fiberTree - fiber information converted to a tree
"""
fiberTree = FiberTree()
fiberTree.no_of_fibers = len(fiberTuple[0])
fiberTree.pts_per_fiber = len(fiberTuple[0][0])
for fidx in range(fiberTree.no_of_fibers):
for pidx in range(fiberTree.pts_per_fiber):
fiberTree.fiberTree[fidx][pidx]['x'] = fiberTuple[0][fidx][pidx]
fiberTree.fiberTree[fidx][pidx]['y'] = fiberTuple[1][fidx][pidx]
fiberTree.fiberTree[fidx][pidx]['z'] = fiberTuple[2][fidx][pidx]
return fiberTree
def calcEndPointSep(fiberData, rejIdx):
"""
Calculates distance between end points
INPUT:
fiberData - fiber tree containing tractography information
rejIdx - indices of outlier
OUTPUT:
DArray - distance between end points
"""
endpt = fiberData.pts_per_fiber - 1
DArray = []
fidxes = [i for i in range(fiberData.no_of_fibers)]
for i in rejIdx:
del fidxes[i]
for fidx in fidxes:
x1 = fiberData.fiberTree[fidx][0]['x']
x2 = fiberData.fiberTree[fidx][endpt]['x']
y1 = fiberData.fiberTree[fidx][0]['y']
y2 = fiberData.fiberTree[fidx][endpt]['y']
z1 = fiberData.fiberTree[fidx][0]['z']
z2 = fiberData.fiberTree[fidx][endpt]['z']
DArray.append(np.sqrt((x2 - x1)**2 + (y2 - y1)**2 + (z2 - z1)**2))
return DArray
def calcFiberLength(fiberData, rejIdx=[]):
"""
Calculates the fiber length via arc length
NOTE: same function as ufiber module without removing any fibers
INPUT:
fiberData - fiber tree containing tractography information
rejIdx - indices of outlier
OUTPUT:
LArray - array containing length of fibers
"""
no_of_pts = fiberData.pts_per_fiber
if no_of_pts < 2:
print("Not enough samples to determine length of fiber")
raise ValueError
LArray = []
fidxes = [i for i in range(fiberData.no_of_fibers)]
for i in rejIdx:
del fidxes[i]
for fidx in fidxes:
L = 0
for idx in range(1, no_of_pts):
x1 = fiberData.fiberTree[fidx][idx]['x']
x2 = fiberData.fiberTree[fidx][idx - 1]['x']
y1 = fiberData.fiberTree[fidx][idx]['y']
y2 = fiberData.fiberTree[fidx][idx - 1]['y']
z1 = fiberData.fiberTree[fidx][idx]['z']
z2 = fiberData.fiberTree[fidx][idx - 1]['z']
L = L + np.sqrt((x2 - x1)**2 + (y2 - y1)**2 + (z2 - z1)**2)
LArray.append(L)
return LArray
def addLDRatio(DArray, LArray, polyData):
"""
Calculates and adds LD Ratio to VTK
INPUT:
DArray - array of distances between end points for fibers
LArray - array of lengths of fibers
polyData - tractography data to add L/D ratio
OUTPUT:
none
"""
LDScalar = vtk.vtkFloatArray()
LDScalar.SetNumberOfComponents(1)
LDScalar.SetName('LDRatio')
LDRatio = np.divide(DArray, LArray)
for fidx in range(len(LArray)):
LDScalar.InsertNextTuple1(LDRatio[fidx])
polyData.GetCellData().AddArray(LDScalar)
class FiberTree:
"""
Data pertaining to a group of fibers.
Value returned is of class FiberTree
"""
def __init__(self):
self.fiberTree = tree()
# Info related to fibers
self.no_of_fibers = None
self.pts_per_fiber = None
def _calc_fiber_indices(self, fiberLength, pts_per_fiber):
""" *INTERNAL FUNCTION*
Determine indices to traverse data along a fiber.
Indices include both end points of the fiber plus evenly spaced points
along the line. Module determines which indices are wanted based on
fiber length and desired number of points along the length.
INPUT:
fiberLength - number of points along a fiber
pts_per_fiber - number of desired points along fiber
OUTPUT:
idxList - corresponding new indices to traverse along fiber
"""
# Step length between points along fiber
stepLength = (fiberLength - 1.0) / (pts_per_fiber - 1.0)
# Output indices along fiber
idxList = []
for idx in range(0, pts_per_fiber):
idxList.append(idx * stepLength)
return idxList
def getFiber(self, fiberIdx):
"""
Extract a single fiber from the group with corresponding data.
Value returned is of class Fiber.
INPUT:
fiberIdx - index of fiber to be extracted
OUTPUT
fiber_x - array of "x" spatial component at each sample
fiber_y - array of "y" spatial component at each sample
fiber_z - array of "z" spatial component at each sample
"""
# Fiber data
fiber_x = np.zeros(self.pts_per_fiber)
fiber_y = np.zeros(self.pts_per_fiber)
fiber_z = np.zeros(self.pts_per_fiber)
for pidx in range(0, self.pts_per_fiber):
fiber_x[pidx] = float(self.fiberTree[fiberIdx][pidx]['x'])
fiber_y[pidx] = float(self.fiberTree[fiberIdx][pidx]['y'])
fiber_z[pidx] = float(self.fiberTree[fiberIdx][pidx]['z'])
return fiber_x, fiber_y, fiber_z
def getFibers(self, fidxes, rejIdx=[]):
"""
Extracts a subset of fibers corresponding to inputted indices.
Returned fibers are of class fiberArray.
INPUT:
fidxes - Indices of subset of fibers to be extracted
OUTPUT:
fiberArray_x - array of "x" spatial component at each sample for
fiber bundle
fiberArray_y - array of "y" spatial component at each sample for
fiber bundle
fiberArray_z - array of "z" spatial component at each sample for
fiber bundle
"""
fiberArray_x = np.zeros((len(fidxes)-len(rejIdx), self.pts_per_fiber))
fiberArray_y = np.zeros((len(fidxes)-len(rejIdx), self.pts_per_fiber))
fiberArray_z = np.zeros((len(fidxes)-len(rejIdx), self.pts_per_fiber))
# Fiber data
idx = 0
fidxes = list(fidxes)
if len(rejIdx) is not 0:
for i in rejIdx:
if i > (len(fidxes) - 1):
continue
else:
del fidxes[i]
for fidx in fidxes:
for pidx in range(0, self.pts_per_fiber):
fiberArray_x[idx][pidx] = float(self.fiberTree[fidx][pidx]['x'])
fiberArray_y[idx][pidx] = float(self.fiberTree[fidx][pidx]['y'])
fiberArray_z[idx][pidx] = float(self.fiberTree[fidx][pidx]['z'])
idx += 1
return fiberArray_x, fiberArray_y, fiberArray_z
def addClusterInfo(self, clusterLabels, centroids):
"""
Add and save cluster label to fiber tree storing tractography data.
INPUT:
clusterLabels - array of cluster labels sorted in fiber index order
centroids - array of centroids associated with fiber clusters
OUTPUT:
none
"""
uniqueLabels = np.unique(clusterLabels, return_counts=False)
for label in uniqueLabels:
for fidx in | np.where(clusterLabels == label) | numpy.where |
""" Tests related to connecing inputs to outputs."""
import unittest
import numpy as np
from six.moves import cStringIO, range
from six import assertRaisesRegex
from openmdao.api import Problem, Group, IndepVarComp, ExecComp, ExplicitComponent
from openmdao.utils.assert_utils import assert_rel_error
class TestConnections(unittest.TestCase):
def setUp(self):
self.setup_model(None, None)
def setup_model(self, c1meta=None, c3meta=None):
self.p = Problem()
root = self.p.model
if c1meta is None:
c1meta = {}
if c3meta is None:
c3meta = {}
self.G1 = root.add_subsystem("G1", Group())
self.G2 = self.G1.add_subsystem("G2", Group())
self.C1 = self.G2.add_subsystem("C1", ExecComp('y=x*2.0', **c1meta))
self.C2 = self.G2.add_subsystem("C2", IndepVarComp('x', 1.0))
self.G3 = root.add_subsystem("G3", Group())
self.G4 = self.G3.add_subsystem("G4", Group())
self.C3 = self.G4.add_subsystem("C3", ExecComp('y=x*2.0', **c3meta))
self.C4 = self.G4.add_subsystem("C4", ExecComp('y=x*2.0'))
def test_no_conns(self):
self.p.setup()
self.p['G1.G2.C1.x'] = 111.
self.p['G3.G4.C3.x'] = 222.
self.p['G3.G4.C4.x'] = 333.
self.p.final_setup()
self.assertEqual(self.C1._inputs['x'], 111.)
self.assertEqual(self.C3._inputs['x'], 222.)
self.assertEqual(self.C4._inputs['x'], 333.)
def test_inp_inp_explicit_conn_w_src(self):
raise unittest.SkipTest("explicit input-input connections not supported yet")
self.p.model.connect('G3.G4.C3.x', 'G3.G4.C4.x') # connect inputs
self.p.model.connect('G1.G2.C2.x', 'G3.G4.C3.x') # connect src to one of connected inputs
self.p.setup()
self.p['G1.G2.C2.x'] = 999.
self.assertEqual(self.C3._inputs['x'], 0.)
self.assertEqual(self.C4._inputs['x'], 0.)
self.p.run_model()
self.assertEqual(self.C3._inputs['x'], 999.)
self.assertEqual(self.C4._inputs['x'], 999.)
def test_pull_size_from_source(self):
raise unittest.SkipTest("setting input size based on src size not supported yet")
class Src(ExplicitComponent):
def setup(self):
self.add_input('x', 2.0)
self.add_output('y1', np.zeros((3, )))
self.add_output('y2', shape=((3, )))
def solve_nonlinear(self, inputs, outputs, resids):
x = inputs['x']
outputs['y1'] = x * np.array([1.0, 2.0, 3.0])
outputs['y2'] = x * np.array([1.0, 2.0, 3.0])
class Tgt(ExplicitComponent):
def setup(self):
self.add_input('x1')
self.add_input('x2')
self.add_output('y1', 0.0)
self.add_output('y2', 0.0)
def solve_nonlinear(self, inputs, outputs, resids):
x1 = inputs['x1']
x2 = inputs['x2']
outputs['y1'] = np.sum(x1)
outputs['y2'] = np.sum(x2)
p = Problem()
p.model.add_subsystem('src', Src())
p.model.add_subsystem('tgt', Tgt())
p.model.connect('src.y1', 'tgt.x1')
p.model.connect('src.y2', 'tgt.x2')
p.setup()
p.run_model()
self.assertEqual(p['tgt.y1'], 12.0)
self.assertEqual(p['tgt.y2'], 12.0)
def test_pull_size_from_source_with_indices(self):
raise unittest.SkipTest("setting input size based on src size not supported yet")
class Src(ExplicitComponent):
def setup(self):
self.add_input('x', 2.0)
self.add_output('y1', np.zeros((3, )))
self.add_output('y2', shape=((3, )))
self.add_output('y3', 3.0)
def solve_nonlinear(self, inputs, outputs, resids):
""" counts up. """
x = inputs['x']
outputs['y1'] = x * np.array([1.0, 2.0, 3.0])
outputs['y2'] = x * np.array([1.0, 2.0, 3.0])
outputs['y3'] = x * 4.0
class Tgt(ExplicitComponent):
def setup(self):
self.add_input('x1')
self.add_input('x2')
self.add_input('x3')
self.add_output('y1', 0.0)
self.add_output('y2', 0.0)
self.add_output('y3', 0.0)
def solve_nonlinear(self, inputs, outputs, resids):
""" counts up. """
x1 = inputs['x1']
x2 = inputs['x2']
x3 = inputs['x3']
outputs['y1'] = np.sum(x1)
outputs['y2'] = np.sum(x2)
outputs['y3'] = np.sum(x3)
top = Problem()
top.model.add_subsystem('src', Src())
top.model.add_subsystem('tgt', Tgt())
top.model.connect('src.y1', 'tgt.x1', src_indices=(0, 1))
top.model.connect('src.y2', 'tgt.x2', src_indices=(0, 1))
top.model.connect('src.y3', 'tgt.x3')
top.setup()
top.run_model()
self.assertEqual(top['tgt.y1'], 6.0)
self.assertEqual(top['tgt.y2'], 6.0)
self.assertEqual(top['tgt.y3'], 8.0)
def test_inp_inp_conn_no_src(self):
raise unittest.SkipTest("no setup testing yet")
self.p.model.connect('G3.G4.C3.x', 'G3.G4.C4.x')
stream = cStringIO()
self.p.setup(out_stream=stream)
self.p['G3.G4.C3.x'] = 999.
self.assertEqual(self.p.model.G3.G4.C3._inputs['x'], 999.)
self.assertEqual(self.p.model.G3.G4.C4._inputs['x'], 999.)
content = stream.getvalue()
self.assertTrue("The following parameters have no associated unknowns:\n"
"G1.G2.C1.x\nG3.G4.C3.x\nG3.G4.C4.x" in content)
self.assertTrue("The following components have no connections:\n"
"G1.G2.C1\nG1.G2.C2\nG3.G4.C3\nG3.G4.C4\n" in content)
self.assertTrue("No recorders have been specified, so no data will be saved." in content)
def test_diff_conn_input_vals(self):
raise unittest.SkipTest("no checking yet of connected inputs without a src")
# set different initial values
self.C1._inputs['x'] = 7.
self.C3._inputs['x'] = 5.
# connect two inputs
self.p.model.connect('G1.G2.C1.x', 'G3.G4.C3.x')
try:
self.p.setup()
except Exception as err:
self.assertTrue(
"The following sourceless connected inputs have different initial values: "
"[('G1.G2.C1.x', 7.0), ('G3.G4.C3.x', 5.0)]. Connect one of them to the output of "
"an IndepVarComp to ensure that they have the same initial value." in str(err))
else:
self.fail("Exception expected")
def test_diff_conn_input_units(self):
raise unittest.SkipTest("no compatability checking of connected inputs yet")
# set different but compatible units
self.setup_model(c1meta={'x': {'units': 'ft'}}, c3meta={'x': {'units': 'inch'}})
# connect two inputs
self.p.model.connect('G1.G2.C1.x', 'G3.G4.C3.x')
try:
self.p.setup()
except Exception as err:
msg = "The following connected inputs have no source and different units: " \
"[('G1.G2.C1.x', 'ft'), ('G3.G4.C3.x', 'inch')]. " \
"Connect 'G1.G2.C1.x' to a source (such as an IndepVarComp) with defined units."
self.assertTrue(msg in str(err))
else:
self.fail("Exception expected")
def test_diff_conn_input_units_swap(self):
raise unittest.SkipTest("no compatability checking of connected inputs yet")
# set different but compatible units
self.setup_model(c1meta={'x': {'units': 'ft'}}, c3meta={'x': {'units': 'inch'}})
# connect two inputs
self.p.model.connect('G3.G4.C3.x', 'G1.G2.C1.x')
try:
self.p.setup()
except Exception as err:
msg = "The following connected inputs have no source and different units: " \
"[('G1.G2.C1.x', 'ft'), ('G3.G4.C3.x', 'inch')]. " \
"Connect 'G3.G4.C3.x' to a source (such as an IndepVarComp) with defined units."
self.assertTrue(msg in str(err))
else:
self.fail("Exception expected")
def test_diff_conn_input_units_w_src(self):
raise unittest.SkipTest("no compatability checking of connected inputs yet")
p = Problem()
root = p.model
num_comps = 50
root.add_subsystem("desvars", IndepVarComp('dvar1', 1.0))
# add a bunch of comps
for i in range(num_comps):
if i % 2 == 0:
units = "ft"
else:
units = "m"
root.add_subsystem("C%d" % i, ExecComp('y=x*2.0', units={'x': units}))
# connect all of their inputs (which have different units)
for i in range(1, num_comps):
root.connect("C%d.x" % (i-1), "C%d.x" % i)
try:
p.setup()
except Exception as err:
self.assertTrue("The following connected inputs have no source and different units" in
str(err))
else:
self.fail("Exception expected")
# now, connect a source and the error should go away
p.cleanup()
root.connect('desvars.dvar1', 'C10.x')
p.setup()
class TestConnectionsPromoted(unittest.TestCase):
def test_inp_inp_promoted_no_src(self):
p = Problem()
root = p.model
G1 = root.add_subsystem("G1", Group())
G2 = G1.add_subsystem("G2", Group())
G2.add_subsystem("C1", ExecComp('y=x*2.0'))
G2.add_subsystem("C2", ExecComp('y=x*2.0'))
G3 = root.add_subsystem("G3", Group())
G4 = G3.add_subsystem("G4", Group(), promotes=['x'])
G4.add_subsystem("C3", ExecComp('y=x*2.0'), promotes=['x'])
G4.add_subsystem("C4", ExecComp('y=x*2.0'), promotes=['x'])
p.setup()
p.final_setup()
# setting promoted name should set both inputs mapped to that name
with self.assertRaises(Exception) as context:
p['G3.x'] = 999.
self.assertEqual(str(context.exception),
"The promoted name G3.x is invalid because it refers to multiple inputs: "
"[G3.G4.C3.x, G3.G4.C4.x] that are not connected to an output variable.")
def test_inp_inp_promoted_w_prom_src(self):
p = Problem()
root = p.model
G1 = root.add_subsystem("G1", Group(), promotes=['x'])
G2 = G1.add_subsystem("G2", Group(), promotes=['x'])
G2.add_subsystem("C1", ExecComp('y=x*2.0'))
G2.add_subsystem("C2", IndepVarComp('x', 1.0), promotes=['x'])
G3 = root.add_subsystem("G3", Group(), promotes=['x'])
G4 = G3.add_subsystem("G4", Group(), promotes=['x'])
C3 = G4.add_subsystem("C3", ExecComp('y=x*2.0'), promotes=['x'])
C4 = G4.add_subsystem("C4", ExecComp('y=x*2.0'), promotes=['x'])
p.setup()
p.set_solver_print(level=0)
# setting promoted name will set the value into the outputs, but will
# not propagate it to the inputs. That will happen during run_model().
p['x'] = 999.
p.run_model()
self.assertEqual(C3._inputs['x'], 999.)
self.assertEqual(C4._inputs['x'], 999.)
def test_inp_inp_promoted_w_explicit_src(self):
p = Problem()
root = p.model
G1 = root.add_subsystem("G1", Group())
G2 = G1.add_subsystem("G2", Group(), promotes=['x'])
G2.add_subsystem("C1", ExecComp('y=x*2.0'))
G2.add_subsystem("C2", IndepVarComp('x', 1.0), promotes=['x'])
G3 = root.add_subsystem("G3", Group())
G4 = G3.add_subsystem("G4", Group(), promotes=['x'])
C3 = G4.add_subsystem("C3", ExecComp('y=x*2.0'), promotes=['x'])
C4 = G4.add_subsystem("C4", ExecComp('y=x*2.0'), promotes=['x'])
p.model.connect('G1.x', 'G3.x')
p.setup()
p.set_solver_print(level=0)
# setting promoted name will set the value into the outputs, but will
# not propagate it to the inputs. That will happen during run_model().
p['G1.x'] = 999.
p.run_model()
self.assertEqual(C3._inputs['x'], 999.)
self.assertEqual(C4._inputs['x'], 999.)
def test_unit_conv_message(self):
raise unittest.SkipTest("no units yet")
prob = Problem()
root = prob.model
root.add_subsystem("C1", ExecComp('y=x*2.0', units={'x': 'ft'}), promotes=['x'])
root.add_subsystem("C2", ExecComp('y=x*2.0', units={'x': 'inch'}), promotes=['x'])
root.add_subsystem("C3", ExecComp('y=x*2.0', units={'x': 'm'}), promotes=['x'])
try:
prob.setup()
except Exception as err:
msg = "The following connected inputs are promoted to 'x', but have different units: " \
"[('C1.x', 'ft'), ('C2.x', 'inch'), ('C3.x', 'm')]. " \
"Connect 'x' to a source (such as an IndepVarComp) with defined units."
self.assertTrue(msg in str(err))
else:
self.fail("Exception expected")
# Remedy the problem with an Indepvarcomp
prob = Problem()
root = prob.model
root.add_subsystem("C1", ExecComp('y=x*2.0', units={'x': 'ft'}), promotes=['x'])
root.add_subsystem("C2", ExecComp('y=x*2.0', units={'x': 'inch'}), promotes=['x'])
root.add_subsystem("C3", ExecComp('y=x*2.0', units={'x': 'm'}), promotes=['x'])
root.add_subsystem('p', IndepVarComp('x', 1.0, units='cm'), promotes=['x'])
prob.setup()
def test_overlapping_system_names(self):
# This ensures that _setup_connections does not think g1 and g1a are the same system
prob = Problem()
model = prob.model
g1 = model.add_subsystem('g1', Group())
g1a = model.add_subsystem('g1a', Group())
g1.add_subsystem('c', ExecComp('y=x'))
g1a.add_subsystem('c', ExecComp('y=x'))
model.connect('g1.c.y', 'g1a.c.x')
model.connect('g1a.c.y', 'g1.c.x')
prob.setup(check=True)
class TestConnectionsIndices(unittest.TestCase):
def setUp(self):
class ArrayComp(ExplicitComponent):
def setup(self):
self.add_input('inp', val=np.ones((2)))
self.add_input('inp1', val=0)
self.add_output('out', val=np.zeros((2)))
def compute(self, inputs, outputs):
outputs['out'] = inputs['inp'] * 2.
indep_var_comp = IndepVarComp()
indep_var_comp.add_output('blammo', val=3.)
indep_var_comp.add_output('arrout', val=np.ones(5))
prob = Problem()
prob.model.add_subsystem('idvp', indep_var_comp)
prob.model.add_subsystem('arraycomp', ArrayComp())
self.prob = prob
def test_bad_shapes(self):
# Should not be allowed because the source and target shapes do not match
self.prob.model.connect('idvp.blammo', 'arraycomp.inp')
expected = (r"The source and target shapes do not match or are ambiguous for the "
r"connection 'idvp.blammo' to 'arraycomp.inp'."
r" The source shape is \(1.*,\) but the target shape is \(2.*,\).")
with assertRaisesRegex(self, ValueError, expected):
self.prob.setup()
def test_bad_length(self):
# Should not be allowed because the length of src_indices is greater than
# the shape of arraycomp.inp
self.prob.model.connect('idvp.blammo', 'arraycomp.inp', src_indices=[0, 1, 0])
expected = (r"The source indices \[0 1 0\] do not specify a valid shape "
r"for the connection 'idvp.blammo' to 'arraycomp.inp'. "
r"The target shape is \(2.*,\) but indices are \(3.*,\).")
with assertRaisesRegex(self, ValueError, expected):
self.prob.setup()
def test_bad_value(self):
# Should not be allowed because the index value within src_indices is outside
# the valid range for the source
self.prob.model.connect('idvp.arrout', 'arraycomp.inp1', src_indices=[100000])
expected = ("Group (<model>): The source indices do not specify a valid index for the "
"connection 'idvp.arrout' to 'arraycomp.inp1'. "
"Index '100000' is out of range for source dimension of "
"size 5.")
try:
self.prob.setup()
except ValueError as err:
self.assertEqual(str(err), expected)
else:
self.fail('Exception expected.')
class TestShapes(unittest.TestCase):
def test_connect_flat_array_to_row_vector(self):
p = Problem()
p.model.add_subsystem('indep', IndepVarComp('x', val=np.arange(10)))
p.model.add_subsystem('C1',
ExecComp('y=dot(x, A)',
x={'value': np.zeros((1, 10))},
A={'value': np.eye(10)},
y={'value': np.zeros((1, 10))}))
p.model.connect('indep.x', 'C1.x')
p.setup()
p.run_model()
assert_rel_error(self, p['C1.y'], np.arange(10)[np.newaxis, :])
def test_connect_flat_array_to_col_vector(self):
p = Problem()
p.model.add_subsystem('indep', IndepVarComp('x', val=np.arange(10)))
p.model.add_subsystem('C1',
ExecComp('y=dot(A, x)',
x={'value': np.zeros((10, 1))},
A={'value': np.eye(10)},
y={'value': np.zeros((10, 1))}))
p.model.connect('indep.x', 'C1.x')
p.setup()
p.run_model()
assert_rel_error(self, p['C1.y'], np.arange(10)[:, np.newaxis])
def test_connect_row_vector_to_flat_array(self):
p = Problem()
p.model.add_subsystem('indep', IndepVarComp('x', val=np.arange(10)[np.newaxis, :]))
p.model.add_subsystem('C1', ExecComp('y=5*x',
x={'value': np.zeros(10)},
y={'value': np.zeros(10)}))
p.model.connect('indep.x', 'C1.x')
p.setup()
p.run_model()
assert_rel_error(self, p['C1.y'], 5 * np.arange(10))
def test_connect_col_vector_to_flat_array(self):
p = Problem()
p.model.add_subsystem('indep', IndepVarComp('x', val=np.arange(10)[:, np.newaxis]))
p.model.add_subsystem('C1', ExecComp('y=5*x',
x={'value': np.zeros(10)},
y={'value': np.zeros(10)}))
p.model.connect('indep.x', 'C1.x')
p.setup()
p.run_model()
assert_rel_error(self, p['C1.y'], 5 * np.arange(10))
def test_connect_flat_to_3d_array(self):
p = Problem()
p.model.add_subsystem('indep', IndepVarComp('x', val=np.arange(10)))
p.model.add_subsystem('C1', ExecComp('y=5*x',
x={'value': np.zeros((1, 10, 1))},
y={'value': np.zeros((1, 10, 1))}))
p.model.connect('indep.x', 'C1.x')
p.setup()
p.run_model()
assert_rel_error(self, p['C1.y'], 5 * np.arange(10)[np.newaxis, :, np.newaxis])
def test_connect_flat_nd_to_flat_nd(self):
p = Problem()
p.model.add_subsystem('indep', IndepVarComp('x',
val=np.arange(10)[np.newaxis, :, np.newaxis,
np.newaxis]))
p.model.add_subsystem('C1', ExecComp('y=5*x',
x={'value': np.zeros((1, 1, 1, 10))},
y={'value': np.zeros((1, 1, 1, 10))}))
p.model.connect('indep.x', 'C1.x')
p.setup()
p.run_model()
assert_rel_error(self, p['C1.y'],
5 * np.arange(10)[np.newaxis, np.newaxis, np.newaxis, :])
def test_connect_incompatible_shapes(self):
p = Problem()
p.model.add_subsystem('indep', IndepVarComp('x', val=np.arange(10)[np.newaxis, :,
np.newaxis, np.newaxis]))
p.model.add_subsystem('C1', ExecComp('y=5*x',
x={'value': np.zeros((5, 2))},
y={'value': np.zeros((5, 2))}))
p.model.connect('indep.x', 'C1.x')
with self.assertRaises(Exception) as context:
p.setup()
self.assertEqual(str(context.exception),
"Group (<model>): The source and target shapes do not match or are ambiguous "
"for the connection 'indep.x' to 'C1.x'. The source shape is (1, 10, 1, 1) but "
"the target shape is (5, 2).")
class TestMultiConns(unittest.TestCase):
def test_mult_conns(self):
class SubGroup(Group):
def setup(self):
self.add_subsystem('c1', ExecComp('y = 2*x', x=np.ones(4), y=2*np.ones(4)),
promotes=['y', 'x'])
self.add_subsystem('c2', ExecComp('z = 2*y', y=np.ones(4), z=2*np.ones(4)),
promotes=['z', 'y'])
prob = Problem()
indeps = prob.model.add_subsystem('indeps', IndepVarComp(), promotes=['*'])
indeps.add_output('x', 10*np.ones(4))
indeps.add_output('y', | np.ones(4) | numpy.ones |
import numpy as np
class DrawObj:
"""
General class for drawing object by using matplotlib.animation.
Multiple ships can be drawn by using this class.
"""
def __init__(self, ax):
self.ax = ax
self.img = []
self.img.append(ax.plot([], [], color="b"))
self.img.append(ax.plot([], [], color="y"))
def draw_obj_with_angle(
self, center_x_list, center_y_list, shape_list, angle_list, obj="ship"
):
"""Draw square image with angle
Args:
center_x_list (List[float]): list of the center x position of the square
center_y_list (List[float]): list of the center y position of the square
shape_list (List[float]): list of the square's shape(length/2, width/2)
angle_list (List[float]): list of in radians
obj (str: optional): object type, 'ship' or 'square'
Returns:
Image: List of Image
"""
for i in range(len(shape_list)):
if obj == "square":
square_x, square_y, angle_x, angle_y = self.__square_with_angle(
center_x_list[i], center_y_list[i], shape_list[i], angle_list[i]
)
elif obj == "ship":
square_x, square_y, angle_x, angle_y = self.__ship_with_angle(
center_x_list[i], center_y_list[i], shape_list[i], angle_list[i]
)
self.img[i][0].set_xdata(square_x)
self.img[i][0].set_ydata(square_y)
return self.img
def __rotate_pos(self, pos, angle):
"""Transformation the coordinate in the angle
Args:
pos (numpy.ndarray): local state, shape(data_size, 2)
angle (float): rotate angle, in radians
Returns:
rotated_pos (numpy.ndarray): shape(data_size, 2)
"""
rot_mat = np.array(
[[np.cos(angle), -np.sin(angle)], [ | np.sin(angle) | numpy.sin |
#! /usr/bin/env python3
import argparse
import glob
import numpy as np
import matplotlib.pyplot as plt
import pickle
from sklearn.preprocessing import quantile_transform as qt
import statsmodels.api as sm
from statsmodels.stats.multitest import fdrcorrection as fdr
from scipy import stats
np.set_printoptions(precision=2, suppress=True, edgeitems=100)
# Gene-TR mapping relation
def getSize(f):
bed = np.loadtxt(f, usecols=[1,2], dtype=int)
return bed[:,1] - bed[:,0]
def getLociList():
lociList = np.loadtxt(args.TRbed, dtype=object, usecols=[0,1,2])
loci2ind = {}
for ind, row in enumerate(lociList):
loci2ind["_".join(row)] = ind
return lociList, loci2ind
def indexGeneList(tissue):
tisGeneList = np.loadtxt(f'{args.expDir}/{tissue}.v8.normalized_expression.bed.gz', dtype=object, skiprows=1, usecols=[3])
tisGene2ind = {}
for ind, gene in enumerate(tisGeneList):
tisGene2ind[gene] = ind
return tisGeneList, tisGene2ind
def getLocusi2tisGenei(tisGene2ind):
locusi2tisGenei = {}
ncomb = 0
for row in TRxGene:
locusname = "_".join(row[:-1])
locusi = loci2ind[locusname]
if row[-1] in tisGene2ind:
if locusi not in locusi2tisGenei:
locusi2tisGenei[locusi] = []
locusi2tisGenei[locusi].append(tisGene2ind[row[-1]])
ncomb += 1
print(f'\t{len(locusi2tisGenei)} TRs')
print(f'\t{ncomb} TR x Gene tests')
return locusi2tisGenei
def getGenei2nloci(locusi2tisGenei):
genei2nloci = {}
for locusi, geneindices in locusi2tisGenei.items():
for genei in geneindices:
if genei not in genei2nloci:
genei2nloci[genei] = 0
genei2nloci[genei] += 1
return genei2nloci
# expression matrix
def loadSNPPCinfo():
if args.SNPPC is None:
return None, None
ndim = 838 # XXX
tmp = np.loadtxt(args.SNPPC, usecols=np.arange(11), dtype=object)[:ndim] # XXX
SNP_PCs = tmp[:,1:].astype(float)
SNP_sampleList = [s.split("-")[-1] for s in tmp[:,0]]
return SNP_PCs, SNP_sampleList
def getTisSNPResTpmMat(tissue, SNP_PCs, SNP_sampleList):
# SNP PCs
tmp = np.loadtxt(f'{args.expDir}/{tissue}.v8.normalized_expression.bed.gz', dtype=object, max_rows=1, comments="!")[4:]
tisSampleList = np.array([s[5:] for s in tmp])
snpSample2ind = {}
for sind, sample in enumerate(SNP_sampleList):
snpSample2ind[sample] = sind
sampleMap_tis2snp = np.zeros(tisSampleList.size, dtype=int)
for ind in range(tisSampleList.size):
sampleMap_tis2snp[ind] = snpSample2ind[tisSampleList[ind]]
tisSNP_PCs = SNP_PCs[sampleMap_tis2snp]
# GTEx PCs
gtexPCs = np.loadtxt(f'{args.covDir}/{tissue}.v8.covariates.txt', dtype=object, skiprows=1)[:,1:].astype(float).T
C = np.hstack((gtexPCs, tisSNP_PCs))
tisTpmMat = np.loadtxt(f'{args.expDir}/{tissue}.v8.normalized_expression.bed.gz', dtype=object, skiprows=1)[:,4:].astype(float).T
tisResTpmMat = (1 - C @ np.linalg.inv(C.T @ C) @ C.T) @ tisTpmMat
return tisResTpmMat.T
# genotype matrix
def getGenotypeMat():
genMat = np.zeros([nloci, nwgs], dtype=float)
kmerfnames = glob.glob(f'{args.genDir}/*.tr.kmers')
for fi, fname in enumerate(kmerfnames):
print(".", end='', flush=True)
if fi % 100 == 99: print("")
with open(fname) as f:
locusi = -1 # XXX was -14
kms = 0
for line in f:
if line[0] == ">":
if locusi >= 0:
genMat[locusi, fi] = kms
kms = 0
locusi += 1
else:
kms += int(line.split()[1])
else:
genMat[locusi, fi] = kms
print("done reading genotypes", flush=True)
return genMat
def processBamCov(bamcovmat, mth=1.2, sth=0.1):
ctrlsize = getSize(args.ctrlbed)
badmask = np.zeros_like(ctrlsize, dtype=bool)
### compute coverage for each locus; normalize wrt sample global coverage
pnormcovmat = bamcovmat / (bamcovmat@ctrlsize / np.sum(ctrlsize))[:,None]
### check variance
stds = np.std(pnormcovmat, axis=0)
normstds = stds
badmask = np.logical_or(badmask, normstds > sth)
### check if mean is biased
mnormcov = np.mean(pnormcovmat, axis=0)
badmask = np.logical_or(badmask, mnormcov > mth)
### reject outliers
pctrlsize = ctrlsize[~badmask]
pcovmat = bamcovmat[:,~badmask]
return pcovmat@pctrlsize / np.sum(pctrlsize)
def correctGenMat():
gtexSex = np.loadtxt(args.phenotype, dtype=object, usecols=[0,1])[1:]
sample2sex = {}
for i in range(gtexSex.shape[0]):
sample = gtexSex[i,0].split("-")[1]
sample2sex[sample] = int(gtexSex[i,1])
print(len(sample2sex))
print(genMat.shape)
wgsSex = np.zeros_like(genomes, dtype=int)
for ind, g in enumerate(genomes):
wgsSex[ind] = sample2sex[g]
covmat = np.loadtxt(f'{args.outDir}/ctrl.cov', dtype=object)
gcov = processBamCov(covmat[:,2:].astype(float))
normGenMat = genMat / gcov
normGenMat[:args.NL1] /= 2
normGenMat[args.NL1:args.NL2] /= wgsSex
print(normGenMat.shape)
return normGenMat
def getTissueGenMat(tissue):
genoSample2ind = {}
for ind, sample in enumerate(genomes):
genoSample2ind[sample] = ind
tmp = np.loadtxt(f'{args.expDir}/{tissue}.v8.normalized_expression.bed.gz', dtype=object, max_rows=1, comments="!")[4:]
tisSampleList = np.array([s[5:] for s in tmp])
sampleMap_tis2geno = np.zeros(tisSampleList.shape[0], dtype=int)
for ind, sample in enumerate(tisSampleList):
sampleMap_tis2geno[ind] = genoSample2ind[sample]
return genMat[:,sampleMap_tis2geno]
# eQTL mapping
def runRegressionZ3(tisResTpmMat, tisGenMat, locusi2tisGenei, genei2nloci):
outs = {}
Y_zscore = (tisResTpmMat - np.mean(tisResTpmMat, axis=1)[:,None]) / | np.std(tisResTpmMat, axis=1) | numpy.std |
#!/usr/bin/python
import sys, getopt
import os
import pandas as pd
import numpy as np
import pyquaternion as pyq
from pyquaternion import Quaternion
from scipy import signal
from scipy.spatial.transform import Slerp
from scipy.spatial.transform import Rotation as R
def main(argv):
inputfile = ''
calfile = ''
outputfile = ''
try:
opts, args = getopt.getopt(argv,"hi:c:o:",["ifile=", "cfile=","ofile="])
except getopt.GetoptError:
print('test.py -i <inputfile> -c <calfile> -o <outputfile>')
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print('test.py -i <inputfile> -c calfile -o <outputfile>')
sys.exit()
elif opt in ("-i", "--ifile"):
inputfile = arg
elif opt in ("-c", "--ifile"):
calfile = arg
elif opt in ("-o", "--ofile"):
outputfile = arg
# Creating Functions
def orientation_matrix(q0, q1, q2, q3):
# based on https://automaticaddison.com/how-to-convert-a-quaternion-to-a-rotation-matrix/
r11 = 2 * (q0 ** 2 + q1 ** 2) - 1
r12 = 2 * (q1 * q2 - q0 * q3)
r13 = 2 * (q1 * q3 + q0 * q2)
r21 = 2 * (q1 * q2 + q0 * q3)
r22 = 2 * (q0 ** 2 + q2 ** 2) - 1
r23 = 2 * (q2 * q3 - q0 * q1)
r31 = 2 * (q1 * q3 - q0 * q2)
r32 = 2 * (q2 * q3 + q0 * q1)
r33 = 2 * (q0 ** 2 + q3 ** 2) - 1
return r11, r12, r13, r21, r22, r23, r31, r32, r33
def compute_relative_orientation(seg, cal):
'''
Calculating the relative orientation between two matrices. This is used for the initial normalization
procedure using the standing calibration
'''
R_11 = np.array([])
R_12 = np.array([])
R_13 = np.array([])
R_21 = np.array([])
R_22 = np.array([])
R_23 = np.array([])
R_31 = np.array([])
R_32 = np.array([])
R_33 = np.array([])
for i in range(seg.shape[0]):
segment = np.asmatrix([
[np.array(seg['o11'])[i], np.array(seg['o12'])[i], np.array(seg['o13'])[i]],
[np.array(seg['o21'])[i], np.array(seg['o22'])[i], np.array(seg['o23'])[i]],
[np.array(seg['o31'])[i], np.array(seg['o32'])[i], np.array(seg['o33'])[i]]
])
segment_cal = np.asmatrix([
[np.array(cal['o11'])[i], np.array(cal['o12'])[i], np.array(cal['o13'])[i]],
[np.array(cal['o21'])[i], np.array(cal['o22'])[i], np.array(cal['o23'])[i]],
[np.array(cal['o31'])[i], np.array(cal['o32'])[i], np.array(cal['o33'])[i]]
])
# normalization
r = np.matmul(segment, segment_cal.T)
new_orientations = np.asarray(r).reshape(-1)
R_11 = np.append(R_11, new_orientations[0])
R_12 = np.append(R_12, new_orientations[1])
R_13 = np.append(R_13, new_orientations[2])
R_21 = np.append(R_21, new_orientations[3])
R_22 = np.append(R_22, new_orientations[4])
R_23 = np.append(R_23, new_orientations[5])
R_31 = np.append(R_31, new_orientations[6])
R_32 = np.append(R_32, new_orientations[7])
R_33 = np.append(R_33, new_orientations[8])
return R_11, R_12, R_13, R_21, R_22, R_23, R_31, R_32, R_33
def compute_joint_angle(df, child, parent):
c = df[df[' jointType'] == child]
p = df[df[' jointType'] == parent]
ml = np.array([])
ap = np.array([])
v = np.array([])
# Compute Rotation Matrix Components
for i in range(c.shape[0]):
segment = np.asmatrix([
[np.array(c['n_o11'])[i], np.array(c['n_o12'])[i], np.array(c['n_o13'])[i]],
[np.array(c['n_o21'])[i], np.array(c['n_o22'])[i], np.array(c['n_o23'])[i]],
[np.array(c['n_o31'])[i], np.array(c['n_o32'])[i], np.array(c['n_o33'])[i]]
])
reference_segment = np.asmatrix([
[np.array(p['n_o11'])[i], np.array(p['n_o12'])[i], np.array(p['n_o13'])[i]],
[np.array(p['n_o21'])[i], np.array(p['n_o22'])[i], np.array(p['n_o23'])[i]],
[np.array(p['n_o31'])[i], np.array(p['n_o32'])[i], np.array(p['n_o33'])[i]]
])
# transformation of segment to reference segment
r = np.matmul(reference_segment.T, segment)
# decomposition to Euler angles
rotations = R.from_matrix(r).as_euler('xyz', degrees=True)
ml = np.append(ml, rotations[0])
ap = np.append(ap, rotations[1])
v = np.append(v, rotations[2])
return ml, ap, v
def resample_df(d, new_freq=30, method='linear'):
# Resamples data at 30Hz unless otherwise specified
joints_without_quats = [3, 15, 19, 21, 22, 23, 24]
resampled_df = pd.DataFrame(
columns=['# timestamp', ' jointType', ' orientation.X', ' orientation.Y', ' orientation.Z',
' orientation.W', ' position.X', ' position.Y', ' position.Z'])
new_df = pd.DataFrame()
for i in d[' jointType'].unique():
current_df = d.loc[d[' jointType'] == i].copy()
old_times = np.array(current_df['# timestamp'])
new_times = np.arange(min(current_df['# timestamp']), max(current_df['# timestamp']), 1 / new_freq)
o_x = np.array(current_df[' orientation.X'])
o_y = np.array(current_df[' orientation.Y'])
o_z = np.array(current_df[' orientation.Z'])
o_w = np.array(current_df[' orientation.W'])
p_x = np.array(current_df[' position.X'])
p_y = np.array(current_df[' position.Y'])
p_z = np.array(current_df[' position.Z'])
if i in joints_without_quats:
orientation_x = np.repeat(0.0, len(new_times))
orientation_y = np.repeat(0.0, len(new_times))
orientation_z = np.repeat(0.0, len(new_times))
orientation_w = np.repeat(0.0, len(new_times))
else:
if method == "linear":
orientation_x = np.interp(new_times, old_times, o_x)
orientation_y = np.interp(new_times, old_times, o_y)
orientation_z = np.interp(new_times, old_times, o_z)
orientation_w = np.interp(new_times, old_times, o_w)
elif method == 'slerp':
quats = []
for t in range(len(old_times)):
quats.append([o_x[t], o_y[t], o_z[t], o_w[t]])
# Create rotation object
quats_object = R.from_quat(quats)
# Spherical Linear Interpolation
slerp = Slerp(np.array(current_df['# timestamp']), quats_object)
interp_rots = slerp(new_times)
new_quats = interp_rots.as_quat()
# Create new orientation objects
orientation_x = np.array([item[0] for item in new_quats])
orientation_y = np.array([item[1] for item in new_quats])
orientation_z = np.array([item[2] for item in new_quats])
orientation_w = np.array([item[3] for item in new_quats])
else:
raise ValueError("Method must be either linear or spherical (slerp) interpolation.")
position_x = signal.resample(p_x, num=int(max(current_df['# timestamp']) * new_freq))
position_y = signal.resample(p_y, num=int(max(current_df['# timestamp']) * new_freq))
position_z = signal.resample(p_z, num=int(max(current_df['# timestamp']) * new_freq))
new_df['# timestamp'] = pd.Series(new_times)
new_df[' jointType'] = pd.Series(np.repeat(i, len(new_times)))
new_df[' orientation.X'] = pd.Series(orientation_x)
new_df[' orientation.Y'] = pd.Series(orientation_y)
new_df[' orientation.Z'] = pd.Series(orientation_z)
new_df[' orientation.W'] = pd.Series(orientation_w)
new_df[' position.X'] = pd.Series(position_x)
new_df[' position.Y'] = pd.Series(position_y)
new_df[' position.Z'] = pd.Series(position_z)
resampled_df = resampled_df.append(new_df, ignore_index=True)
return resampled_df
def smooth_rotations(o_x, o_y, o_z, o_w):
o_x = np.array(o_x)
o_y = np.array(o_y)
o_z = np.array(o_z)
o_w = np.array(o_w)
trajNoisy = []
for i in range(len(o_x)):
trajNoisy.append([o_x[i], o_y[i], o_z[i], o_w[i]])
trajNoisy = np.array(trajNoisy)
# This code was adapted from https://ww2.mathworks.cn/help/nav/ug/lowpass-filter-orientation-using-quaternion-slerp.html
# As explained in the link above, "The interpolation parameter to slerp is in the closed-interval [0,1], so the output of dist
# must be re-normalized to this range. However, the full range of [0,1] for the interpolation parameter gives poor performance,
# so it is limited to a smaller range hrange centered at hbias."
hrange = 0.4
hbias = 0.4
low = max(min(hbias - (hrange / 2), 1), 0)
high = max(min(hbias + (hrange / 2), 1), 0)
hrangeLimited = high - low
# initial filter state is the quaternion at frame 0
y = trajNoisy[0]
qout = []
for i in range(1, len(trajNoisy)):
x = trajNoisy[i]
# x = mathutils.Quaternion(x)
# y = mathutils.Quaternion(y)
# d = x.rotation_difference(y).angle
x = pyq.Quaternion(x)
y = pyq.Quaternion(y)
d = (x.conjugate * y).angle
# Renormalize dist output to the range [low, high]
hlpf = (d / np.pi) * hrangeLimited + low
# y = y.slerp(x, hlpf)
y = Quaternion.slerp(y, x, hlpf).elements
qout.append(np.array(y))
# because a frame of data is lost during this process, I've (arbitrarily) decided to append an extra quaternion at the end of the trial
# that is identical to the n-1th frame. This keeps the length consistent (so there is no issues with merging later) and should not
# negatively impact the data since the last frame is rarely of interest (and the data collector can decide to collect for a split second
# after their trial of interest has completed to attenuate any of these "errors" that may propogate in the analyses)
qout.append(qout[int(len(qout) - 1)])
orientation_x = [item[0] for item in qout]
orientation_y = [item[1] for item in qout]
orientation_z = [item[2] for item in qout]
orientation_w = [item[3] for item in qout]
return orientation_x, orientation_y, orientation_z, orientation_w
def smooth_quaternions(d):
for i in d[' jointType'].unique():
current_df = d.loc[d[' jointType'] == i].copy()
current_df[' orientation.X'], current_df[' orientation.Y'], current_df[' orientation.Z'], current_df[
' orientation.W'] = smooth_rotations(current_df[' orientation.X'], current_df[' orientation.Y'],
current_df[' orientation.Z'], current_df[' orientation.W'])
d[d[' jointType'] == i] = current_df
return d
def compute_segment_angle(df, SEGMENT):
s = df[df[' jointType'] == SEGMENT]
ml = np.array([])
ap = np.array([])
v = np.array([])
# Compute Rotation Matrix Components
for i in range(s.shape[0]):
segment = np.asmatrix([
[np.array(s['n_o11'])[i], np.array(s['n_o12'])[i], np.array(s['n_o13'])[i]],
[np.array(s['n_o21'])[i], np.array(s['n_o22'])[i], np.array(s['n_o23'])[i]],
[np.array(s['n_o31'])[i], np.array(s['n_o32'])[i], np.array(s['n_o33'])[i]]
])
# decomposition to Euler angles
rotations = R.from_matrix(segment).as_euler('xyz', degrees=True)
ml = np.append(ml, rotations[0])
ap = np.append(ap, rotations[1])
v = np.append(v, rotations[2])
return ml, ap, v
dir = os.getcwd()
# Loading Data
print('... Loading data')
cal = pd.read_csv(os.path.join(dir, calfile))
df = pd.read_csv(os.path.join(dir, inputfile))
df['# timestamp'] = df['# timestamp'] * 10 ** -3
cal['# timestamp'] = cal['# timestamp'] * 10 ** -3
df_reoriented = df.copy()
cal_reoriented = cal.copy()
print('... Reorienting LCSs')
# Hips
df_reoriented.loc[df[' jointType'] == 16, ' orientation.X'] = df.loc[df[' jointType'] == 16, ' orientation.Z']
df_reoriented.loc[df[' jointType'] == 16, ' orientation.Y'] = df.loc[df[' jointType'] == 16, ' orientation.X']
df_reoriented.loc[df[' jointType'] == 16, ' orientation.Z'] = df.loc[df[' jointType'] == 16, ' orientation.Y']
cal_reoriented.loc[cal[' jointType'] == 16, ' orientation.X'] = cal.loc[cal[' jointType'] == 16, ' orientation.Z']
cal_reoriented.loc[cal[' jointType'] == 16, ' orientation.Y'] = cal.loc[cal[' jointType'] == 16, ' orientation.X']
cal_reoriented.loc[cal[' jointType'] == 16, ' orientation.Z'] = cal.loc[cal[' jointType'] == 16, ' orientation.Y']
df_reoriented.loc[df[' jointType'] == 12, ' orientation.X'] = df.loc[df[' jointType'] == 12, ' orientation.Z']
df_reoriented.loc[df[' jointType'] == 12, ' orientation.Y'] = df.loc[df[' jointType'] == 12, ' orientation.X'] * -1
df_reoriented.loc[df[' jointType'] == 12, ' orientation.Z'] = df.loc[df[' jointType'] == 12, ' orientation.Y'] * -1
cal_reoriented.loc[cal[' jointType'] == 12, ' orientation.X'] = cal.loc[cal[' jointType'] == 12, ' orientation.Z']
cal_reoriented.loc[cal[' jointType'] == 12, ' orientation.Y'] = cal.loc[cal[' jointType'] == 12, ' orientation.X'] * -1
cal_reoriented.loc[cal[' jointType'] == 12, ' orientation.Z'] = cal.loc[cal[' jointType'] == 12, ' orientation.Y'] * -1
# Knees
df_reoriented.loc[df[' jointType'] == 17, ' orientation.X'] = df.loc[df[' jointType'] == 17, ' orientation.X'] * -1
df_reoriented.loc[df[' jointType'] == 17, ' orientation.Y'] = df.loc[df[' jointType'] == 17, ' orientation.Y'] * -1
df_reoriented.loc[df[' jointType'] == 17, ' orientation.Z'] = df.loc[df[' jointType'] == 17, ' orientation.Z']
cal_reoriented.loc[cal[' jointType'] == 17, ' orientation.X'] = cal.loc[cal[' jointType'] == 17, ' orientation.X'] * -1
cal_reoriented.loc[cal[' jointType'] == 17, ' orientation.Y'] = cal.loc[cal[' jointType'] == 17, ' orientation.Y'] * -1
cal_reoriented.loc[cal[' jointType'] == 17, ' orientation.Z'] = cal.loc[cal[' jointType'] == 17, ' orientation.Z']
df_reoriented.loc[df[' jointType'] == 13, ' orientation.X'] = df.loc[df[' jointType'] == 13, ' orientation.X']
df_reoriented.loc[df[' jointType'] == 13, ' orientation.Y'] = df.loc[df[' jointType'] == 13, ' orientation.Y'] * -1
df_reoriented.loc[df[' jointType'] == 13, ' orientation.Z'] = df.loc[df[' jointType'] == 13, ' orientation.Z'] * -1
cal_reoriented.loc[cal[' jointType'] == 13, ' orientation.X'] = cal.loc[cal[' jointType'] == 13, ' orientation.X']
cal_reoriented.loc[cal[' jointType'] == 13, ' orientation.Y'] = cal.loc[cal[' jointType'] == 13, ' orientation.Y'] * -1
cal_reoriented.loc[cal[' jointType'] == 13, ' orientation.Z'] = cal.loc[cal[' jointType'] == 13, ' orientation.Z'] * -1
# Ankles
df_reoriented.loc[df[' jointType'] == 18, ' orientation.X'] = df.loc[df[' jointType'] == 18, ' orientation.X'] * -1
df_reoriented.loc[df[' jointType'] == 18, ' orientation.Y'] = df.loc[df[' jointType'] == 18, ' orientation.Y'] * -1
df_reoriented.loc[df[' jointType'] == 18, ' orientation.Z'] = df.loc[df[' jointType'] == 18, ' orientation.Z']
cal_reoriented.loc[cal[' jointType'] == 18, ' orientation.X'] = cal.loc[cal[' jointType'] == 18, ' orientation.X'] * -1
cal_reoriented.loc[cal[' jointType'] == 18, ' orientation.Y'] = cal.loc[cal[' jointType'] == 18, ' orientation.Y'] * -1
cal_reoriented.loc[cal[' jointType'] == 18, ' orientation.Z'] = cal.loc[cal[' jointType'] == 18, ' orientation.Z']
df_reoriented.loc[df[' jointType'] == 14, ' orientation.X'] = df.loc[df[' jointType'] == 14, ' orientation.X']
df_reoriented.loc[df[' jointType'] == 14, ' orientation.Y'] = df.loc[df[' jointType'] == 14, ' orientation.Y'] * -1
df_reoriented.loc[df[' jointType'] == 14, ' orientation.Z'] = df.loc[df[' jointType'] == 14, ' orientation.Z'] * -1
cal_reoriented.loc[cal[' jointType'] == 14, ' orientation.X'] = cal.loc[cal[' jointType'] == 14, ' orientation.X']
cal_reoriented.loc[cal[' jointType'] == 14, ' orientation.Y'] = cal.loc[cal[' jointType'] == 14, ' orientation.Y'] * -1
cal_reoriented.loc[cal[' jointType'] == 14, ' orientation.Z'] = cal.loc[cal[' jointType'] == 14, ' orientation.Z'] * -1
# Resampling data to 30Hz
df_reoriented = resample_df(df_reoriented, new_freq=30, method='slerp')
# Smooth Quaternion Rotations
df_reoriented = smooth_quaternions(df_reoriented)
# need to re-sort and reset the index following the resampling
df_reoriented = df_reoriented.sort_values(by=['# timestamp', ' jointType']).reset_index()
df_reoriented['o11'], df_reoriented['o12'], df_reoriented['o13'], df_reoriented['o21'], df_reoriented['o22'], \
df_reoriented['o23'], df_reoriented['o31'], df_reoriented['o32'], df_reoriented['o33'] \
= orientation_matrix(df_reoriented[' orientation.W'], df_reoriented[' orientation.X'],
df_reoriented[' orientation.Y'], df_reoriented[' orientation.Z'])
cal_reoriented['o11'], cal_reoriented['o12'], cal_reoriented['o13'], cal_reoriented['o21'], cal_reoriented['o22'], \
cal_reoriented['o23'], cal_reoriented['o31'], cal_reoriented['o32'], cal_reoriented['o33'] \
= orientation_matrix(cal_reoriented[' orientation.W'], cal_reoriented[' orientation.X'],
cal_reoriented[' orientation.Y'], cal_reoriented[' orientation.Z'])
df_reoriented.set_index(' jointType', inplace=True)
cal_reoriented.set_index(' jointType', inplace=True)
cal_reoriented = cal_reoriented.groupby(' jointType').mean().drop(columns=['# timestamp'])
cal_reoriented = pd.concat([cal_reoriented] * np.int64(df_reoriented.shape[0] / 25))
print('... Normalizing to calibration pose')
# Normalize orientations to calibration pose
df_reoriented['n_o11'], df_reoriented['n_o12'], df_reoriented['n_o13'], df_reoriented['n_o21'], df_reoriented[
'n_o22'], \
df_reoriented['n_o23'], df_reoriented['n_o31'], df_reoriented['n_o32'], df_reoriented['n_o33'] \
= np.array(compute_relative_orientation(cal_reoriented, df_reoriented))
df_reoriented.reset_index(inplace=True)
print('... Computing joint angles')
r_hipFlexion, r_hipAbduction, r_hipV = compute_joint_angle(df_reoriented, child=17, parent=16)
l_hipFlexion, l_hipAbduction, l_hipV = compute_joint_angle(df_reoriented, child=13, parent=12)
r_kneeFlexion, r_kneeAbduction, r_kneeV = compute_joint_angle(df_reoriented, child=18, parent=17)
l_kneeFlexion, l_kneeAbduction, l_kneeV = compute_joint_angle(df_reoriented, child=14, parent=13)
# Note that 16 or 12 can be used for the pelvis (given Kinect's definitions)
pelvis_rotation = compute_segment_angle(df_reoriented, 16)[0]
r_thigh_rotation = compute_segment_angle(df_reoriented, 17)[0]
l_thigh_rotation = compute_segment_angle(df_reoriented, 13)[0]
r_shank_rotation = compute_segment_angle(df_reoriented, 18)[0]
l_shank_rotation = compute_segment_angle(df_reoriented, 14)[0]
new_df = pd.DataFrame({
'frame': np.arange(df_reoriented['# timestamp'].unique().shape[0]),
'timeStamp': df_reoriented['# timestamp'].unique(),
# Below are adjusted for relatively easy anatomical interpretations
'r_hipFlexion' : r_hipFlexion,
'l_hipFlexion' : l_hipFlexion*-1,
'r_hipAbduction' : r_hipAbduction*-1,
'l_hipAbduction' : l_hipAbduction,
'r_hipV' : r_hipV *-1,
'l_hipV' : l_hipV *-1,
'r_kneeFlexion' : r_kneeFlexion*-1,
'l_kneeFlexion' : l_kneeFlexion,
'r_kneeAdduction' : r_kneeAbduction,
'l_kneeAdduction' : l_kneeAbduction*-1,
'r_kneeV' : r_kneeV*-1,
'l_kneeV' : l_kneeV,
# Below are adjusted specifically for use with relative phase analyses
'pelvis_rotation': pelvis_rotation,
'r_thigh_rotation': r_thigh_rotation,
'l_thigh_rotation': l_thigh_rotation*-1,
'r_shank_rotation': r_shank_rotation,
'l_shank_rotation': l_shank_rotation*-1,
# Below are left in the GCS
'r_hip_x': np.array(df_reoriented[df_reoriented[' jointType'] == 16][' position.X']),
'r_hip_y': np.array(df_reoriented[df_reoriented[' jointType'] == 16][' position.Y']),
'r_hip_z': np.array(df_reoriented[df_reoriented[' jointType'] == 16][' position.Z']),
'l_hip_x': np.array(df_reoriented[df_reoriented[' jointType'] == 12][' position.X']),
'l_hip_y': np.array(df_reoriented[df_reoriented[' jointType'] == 12][' position.Y']),
'l_hip_z': np.array(df_reoriented[df_reoriented[' jointType'] == 12][' position.Z']),
'r_knee_x': np.array(df_reoriented[df_reoriented[' jointType'] == 17][' position.X']),
'r_knee_y': np.array(df_reoriented[df_reoriented[' jointType'] == 17][' position.Y']),
'r_knee_z': np.array(df_reoriented[df_reoriented[' jointType'] == 17][' position.Z']),
'l_knee_x': np.array(df_reoriented[df_reoriented[' jointType'] == 13][' position.X']),
'l_knee_y': np.array(df_reoriented[df_reoriented[' jointType'] == 13][' position.Y']),
'l_knee_z': np.array(df_reoriented[df_reoriented[' jointType'] == 13][' position.Z']),
'r_ankle_x': np.array(df_reoriented[df_reoriented[' jointType'] == 18][' position.X']),
'r_ankle_y': np.array(df_reoriented[df_reoriented[' jointType'] == 18][' position.Y']),
'r_ankle_z': np.array(df_reoriented[df_reoriented[' jointType'] == 18][' position.Z']),
'l_ankle_x': np.array(df_reoriented[df_reoriented[' jointType'] == 14][' position.X']),
'l_ankle_y': np.array(df_reoriented[df_reoriented[' jointType'] == 14][' position.Y']),
'l_ankle_z': np.array(df_reoriented[df_reoriented[' jointType'] == 14][' position.Z']),
'r_foot_x': np.array(df_reoriented[df_reoriented[' jointType'] == 19][' position.X']),
'r_foot_y': np.array(df_reoriented[df_reoriented[' jointType'] == 19][' position.Y']),
'r_foot_z': np.array(df_reoriented[df_reoriented[' jointType'] == 19][' position.Z']),
'l_foot_x': np.array(df_reoriented[df_reoriented[' jointType'] == 15][' position.X']),
'l_foot_y': np.array(df_reoriented[df_reoriented[' jointType'] == 15][' position.Y']),
'l_foot_z': np.array(df_reoriented[df_reoriented[' jointType'] == 15][' position.Z']),
'spinebase_x': np.array(df_reoriented[df_reoriented[' jointType'] == 0][' position.X']),
'spinebase_y': np.array(df_reoriented[df_reoriented[' jointType'] == 0][' position.Y']),
'spinebase_z': np.array(df_reoriented[df_reoriented[' jointType'] == 0][' position.Z']),
'spinemid_x': np.array(df_reoriented[df_reoriented[' jointType'] == 1][' position.X']),
'spinemid_y': np.array(df_reoriented[df_reoriented[' jointType'] == 1][' position.Y']),
'spinemid_z': np.array(df_reoriented[df_reoriented[' jointType'] == 1][' position.Z']),
'neck_x': np.array(df_reoriented[df_reoriented[' jointType'] == 2][' position.X']),
'neck_y': | np.array(df_reoriented[df_reoriented[' jointType'] == 2][' position.Y']) | numpy.array |
"""PMAC Parser
Library for parsing and running PMAC programs
"""
import numpy as np
from pygments.token import Number
from pmacparser.pmac_lexer import PmacLexer
class ParserError(Exception):
"""Parser error exception."""
def __init__(self, message, token):
super(ParserError, self).__init__()
self.message = message
self.line = token.line
def __str__(self):
return '[Line %s] %s' % (self.line, self.message)
class Variables(object):
"""Represents a PMAC Variable (I, M, P, Q)."""
def __init__(self):
self.variable_dict = {}
def get_i_variable(self, var_num):
"""Return the value of the specified I variable."""
return self.get_var('I', var_num)
def get_p_variable(self, var_num):
"""Return the value of the specified P variable."""
return self.get_var('P', var_num)
def get_q_variable(self, var_num):
"""Return the value of the specified Q variable."""
return self.get_var('Q', var_num)
def get_m_variable(self, var_num):
"""Return the value of the specified M variable."""
return self.get_var('M', var_num)
def set_i_variable(self, var_num, value):
"""Set the value of the specified I variable."""
self.set_var('I', var_num, value)
def set_p_variable(self, var_num, value):
"""Set the value of the specified P variable."""
self.set_var('P', var_num, value)
def set_q_variable(self, var_num, value):
"""Set the value of the specified Q variable."""
self.set_var('Q', var_num, value)
def set_m_variable(self, var_num, value):
"""Set the value of the specified M variable."""
self.set_var('M', var_num, value)
def get_var(self, var_type, var_num):
"""Return the value of the specified variable type and number."""
addr = '%s%s' % (var_type, var_num)
if addr in self.variable_dict:
result = self.variable_dict[addr]
else:
result = 0
npvalue = np.array(result)
return npvalue.astype(float)
def set_var(self, var_type, var_num, value):
"""Set the value of the variable type and number with the value specified."""
addr = '%s%s' % (var_type, var_num)
self.variable_dict[addr] = value
def populate_with_dict(self, dictionary):
"""Copy the input dictionary into the local variable dictionary."""
self.variable_dict = dictionary.copy()
def to_dict(self):
"""Return the variables as a dictionary."""
return self.variable_dict
class PMACParser(object):
"""Parses a PMAC program, and runs an emulator for forward kinematic programs
Uses the PMAC Lexer to tokenise a list of strings, and then parses the tokens,
using an input dictionary or variables to evaluate the expressions in the code,
populating a dictionary with the results of the program operations.
It is a modification of the dls_pmacanalyse code developed by <NAME>.
"""
def __init__(self, program_lines):
self.lexer = PmacLexer()
self.lines = program_lines
self.lexer.lex(self.lines)
self.variable_dict = Variables()
self.if_level = 0
self.while_level = 0
self.while_dict = {}
self.pre_process()
def pre_process(self):
"""Evaluate and replace any Constants Expressions (e.g. 4800+17)."""
token = self.lexer.get_token()
while token is not None:
if token.type == Number.ConstantExpression:
token_text = str(token)
token_text = token_text.replace("(", "")
token_text = token_text.replace(")", "")
tokens = token_text.split("+")
int1 = int(tokens[0])
int2 = int(tokens[1])
val = int1 + int2
token.set(str(val), token.line)
token.type = Number
token = self.lexer.get_token()
self.lexer.reset()
def parse(self, variable_dict):
"""Top level kinematic program parser."""
self.variable_dict.populate_with_dict(variable_dict)
token = self.lexer.get_token()
while token is not None:
if token == 'Q':
self.parseQ()
elif token == 'P':
self.parseP()
elif token == 'I':
self.parseI()
elif token == 'M':
self.parseM()
elif token == 'IF':
self.parseIf()
elif token == 'ELSE':
self.parseElse(token)
elif token in ('ENDIF', 'ENDI'):
self.parseEndIf(token)
elif token == 'WHILE':
self.parseWhile(token)
elif token in ('ENDWHILE', 'ENDW'):
self.parseEndWhile(token)
elif token in ('RETURN', 'RET'):
self.parseReturn(token)
else:
raise ParserError('Unexpected token: %s' % token, token)
token = self.lexer.get_token()
self.lexer.reset()
return self.variable_dict.to_dict()
def parseM(self):
"""Parse an M expression - typically an assignment."""
num = self.lexer.get_token()
if num.is_int():
num = num.to_int()
token = self.lexer.get_token()
if token == '=':
val = self.parseExpression()
self.variable_dict.set_m_variable(num, val)
else:
self.lexer.put_token(token)
# Report M variable values (do nothing)
else:
raise ParserError('Unexpected statement: M %s' % num, num)
def parseI(self):
"""Parse an I expression - typically an assignment."""
num = self.lexer.get_token()
if num.is_int():
num = num.to_int()
token = self.lexer.get_token()
if token == '=':
val = self.parseExpression()
self.variable_dict.set_i_variable(num, val)
else:
self.lexer.put_token(token)
# Report I variable values (do nothing)
elif num == '(':
num = self.parseExpression()
self.lexer.get_token(')')
token = self.lexer.get_token()
if token == '=':
val = self.parseExpression()
self.variable_dict.set_i_variable(num, val)
else:
self.lexer.put_token(token)
# Report I variable values (do nothing)
else:
raise ParserError('Unexpected statement: I %s' % num, num)
def parseP(self):
"""Parse a P expression - typically an assignment."""
num = self.lexer.get_token()
if num.is_int():
num = num.to_int()
token = self.lexer.get_token()
if token == '=':
val = self.parseExpression()
self.variable_dict.set_p_variable(num, val)
else:
self.lexer.put_token(token)
# Report P variable values (do nothing)
elif num == '(':
num = self.parseExpression()
self.lexer.get_token(')')
token = self.lexer.get_token()
if token == '=':
val = self.parseExpression()
self.variable_dict.set_p_variable(num, val)
else:
self.lexer.put_token(token)
# Report P variable values (do nothing)
else:
self.lexer.put_token(num)
# Do nothing
def parseQ(self):
"""Parse a Q expression - typically an assignment."""
num = self.lexer.get_token()
if num.is_int():
num = num.to_int()
token = self.lexer.get_token()
if token == '=':
val = self.parseExpression()
self.variable_dict.set_q_variable(num, val)
else:
self.lexer.put_token(token)
# Report Q variable values (do nothing)
elif num == '(':
num = self.parseExpression()
self.lexer.get_token(')')
token = self.lexer.get_token()
if token == '=':
val = self.parseExpression()
self.variable_dict.set_q_variable(num, val)
else:
self.lexer.put_token(token)
# Report Q variable values (do nothing)
else:
self.lexer.put_token(num)
# Do nothing
def parseCondition(self):
"""Parse a condition, return the result of the condition."""
has_parenthesis = True
token = self.lexer.get_token()
if token != '(':
self.lexer.put_token(token)
has_parenthesis = False
value1 = self.parseExpression()
token = self.lexer.get_token()
comparator = token
value2 = self.parseExpression()
if comparator == '=':
result = value1 == value2
elif comparator == '!=':
result = value1 != value2
elif comparator == '>':
result = value1 > value2
elif comparator == '!>':
result = value1 <= value2
elif comparator == '<':
result = value1 < value2
elif comparator == '!<':
result = value1 >= value2
else:
raise ParserError('Expected comparator, got: %s' % comparator, comparator)
# Take ) or AND or OR
token = self.lexer.get_token()
if token == 'AND' or token == 'OR':
self.lexer.put_token(token)
result = self.parseConditionalOR(result)
if has_parenthesis:
self.lexer.get_token(')')
elif token == ')':
if not has_parenthesis:
self.lexer.put_token(token)
elif token != ')':
raise ParserError('Expected ) or AND/OR, got: %s' % comparator, comparator)
return result
def parseConditionalOR(self, current_value):
"""Parse a conditional OR token, return the result of the condition."""
result = self.parseConditionalAND(current_value)
token = self.lexer.get_token()
if token == 'OR':
condition_result = self.parseCondition()
result = self.parseConditionalOR(condition_result) or current_value
elif token == 'AND':
self.lexer.put_token(token)
result = self.parseConditionalOR(result)
else:
self.lexer.put_token(token)
return result
def parseConditionalAND(self, current_value):
"""Parse a conditional AND token, return the result of the condition."""
token = self.lexer.get_token()
if token == 'AND':
result = self.parseCondition() and current_value
else:
self.lexer.put_token(token)
result = current_value
return result
def parseIf(self):
"""Parse an IF token, skipping to after the else if necessary."""
condition = self.parseCondition()
if_condition = self.parseConditionalOR(condition)
# Condition could be numpy array, check and throw if not all True or all False
if np.all(if_condition):
if_condition = True
elif not np.any(if_condition):
if_condition = False
else:
raise Exception('If conditions is an array with not all the same value')
self.if_level += 1
if not if_condition:
this_if_level = self.if_level
token = self.lexer.get_token()
while (token != 'ELSE' and token not in ('ENDIF', 'ENDI')) or this_if_level != self.if_level:
if token in ('ENDIF', 'ENDI'):
self.if_level -= 1
token = self.lexer.get_token()
if token == 'IF':
self.if_level += 1
if token in ('ENDIF', 'ENDI'):
self.parseEndIf(token)
def parseElse(self, token):
"""Parse an ELSE token, skipping to ENDIF if necessary."""
if self.if_level > 0:
this_if_level = self.if_level
while token not in ('ENDIF', 'ENDI') or this_if_level != self.if_level:
if token in ('ENDIF', 'ENDI'):
self.if_level -= 1
token = self.lexer.get_token()
if token == 'IF':
self.if_level += 1
else:
raise ParserError('Unexpected ELSE', token)
def parseEndIf(self, t):
"""Parse an ENDIF token, closing off the current IF level."""
if self.if_level > 0:
self.if_level -= 1
else:
raise ParserError('Unexpected ENDIF/ENDI', t)
def parseWhile(self, token):
"""Parse a WHILE token, skipping to the ENDWHILE the condition is false."""
self.while_level += 1
# Get all tokens up to the ENDWHILE
while_tokens = []
this_while_level = self.while_level
while_tokens.append(token)
while (token not in ('ENDWHILE', 'ENDW')) or this_while_level != self.while_level:
if token in ('ENDWHILE', 'ENDW'):
self.while_level -= 1
token = self.lexer.get_token()
while_tokens.append(token)
if token == 'WHILE':
self.while_level += 1
# Put the tokens back on
self.lexer.put_tokens(while_tokens)
# Get the WHILE
token = self.lexer.get_token()
condition = self.parseCondition()
condition = self.parseConditionalOR(condition)
# Condition could be numpy array, check and throw if not all True or all False
if np.all(condition):
condition = True
elif not np.any(condition):
condition = False
else:
raise Exception('While conditions is an array with not all the same value')
if condition:
self.while_dict[this_while_level] = while_tokens
else:
while (token not in ('ENDWHILE', 'ENDW')) or this_while_level != self.while_level:
if token in ('ENDWHILE', 'ENDW'):
self.while_level -= 1
token = self.lexer.get_token()
while_tokens.append(token)
if token == 'WHILE':
self.while_level += 1
self.while_level -= 1
def parseEndWhile(self, t):
"""Parse an ENDWHILE statement, placing the tokens within the while back on to the list to be executed."""
if self.while_level > 0:
while_tokens = self.while_dict[self.while_level]
# Put the tokens back on
self.lexer.put_tokens(while_tokens)
self.while_level -= 1
else:
raise ParserError('Unexpected ENDWHILE/ENDW', t)
def parseReturn(self, t):
"""Parse a RETURN statement, which can just be ignored."""
pass
def parseExpression(self):
"""Return the result of the expression."""
# Currently supports syntax of the form:
# <expression> ::= <e1> { <sumop> <e1> }
# <e1> ::= <e2> { <multop> <e2> }
# <e2> ::= [ <monop> ] <e3>
# <e3> ::= '(' <expression> ')' | <constant> | 'P'<integer> | 'Q'<integer> | 'I'<integer> | 'M' <integer>
# | <mathop><float>
# <sumop> ::= '+' | '-' | '|' | '^'
# <multop> ::= '*' | '/' | '%' | '&'
# <monop> ::= '+' | '-'
# <mathop> ::= 'SIN' | 'COS' | 'TAB' | 'ASIN' | 'ACOS' | 'ATAN' | 'ATAN2'
# | 'SQRT' | 'ABS' | 'EXT' | 'IN' | 'LN'
result = self.parseE1()
going = True
while going:
token = self.lexer.get_token()
if token == '+':
result = result + self.parseE1()
elif token == '-':
result = result - self.parseE1()
elif token == '|':
result = np.bitwise_or(np.array(result).astype(int), np.array(self.parseE1()).astype(int))
elif token == '^':
result = np.bitwise_xor(np.array(result).astype(int), np.array(self.parseE1()).astype(int))
else:
self.lexer.put_token(token)
going = False
return result
def parseE1(self):
"""Return the result of a sub-expression containing multiplicative operands."""
result = self.parseE2()
going = True
while going:
token = self.lexer.get_token()
if token == '*':
result = result * self.parseE2()
elif token == '/':
result = result / self.parseE2()
elif token == '%':
result = result % self.parseE2()
elif token == '&':
result = np.bitwise_and(np.array(result).astype(int), np.array(self.parseE2()).astype(int))
else:
self.lexer.put_token(token)
going = False
return result
def parseE2(self):
"""Return the result of a sub-expression containing monadic operands."""
monop = self.lexer.get_token()
if monop not in ['+', '-']:
self.lexer.put_token(monop)
monop = '+'
result = self.parseE3()
if monop == '-':
result = -result
return result
def parseE3(self):
"""Return the result of a sub-expression containing a value.
This could be an I,P,Q or M variable, or a constant or a
parenthesised expression, or a mathematical operation.
"""
token = self.lexer.get_token()
if token == '(':
result = self.parseExpression()
self.lexer.get_token(')')
elif token == 'Q':
token = self.lexer.get_token()
if token == '(':
value = self.parseExpression()
self.lexer.get_token(')')
else:
value = token
result = self.variable_dict.get_q_variable(value)
elif token == 'P':
token = self.lexer.get_token()
if token == '(':
value = self.parseExpression()
self.lexer.get_token(')')
else:
value = token
result = self.variable_dict.get_p_variable(value)
elif token == 'I':
token = self.lexer.get_token()
if token == '(':
value = self.parseExpression()
self.lexer.get_token(')')
else:
value = token
result = self.variable_dict.get_i_variable(value)
elif token == 'M':
token = self.lexer.get_token()
if token == '(':
value = self.parseExpression()
self.lexer.get_token(')')
else:
value = token
result = self.variable_dict.get_m_variable(value)
elif token == 'SIN':
token = self.lexer.get_token()
if token == '(':
value = self.parseExpression()
self.lexer.get_token(')')
else:
value = token
I15 = self.variable_dict.get_i_variable(15)
if I15 == 0:
value = np.radians(value)
result = np.sin(value)
elif token == 'COS':
token = self.lexer.get_token()
if token == '(':
value = self.parseExpression()
self.lexer.get_token(')')
else:
value = token
I15 = self.variable_dict.get_i_variable(15)
if I15 == 0:
value = | np.radians(value) | numpy.radians |
# Author: <NAME>
# email: <EMAIL>
import os, sys, numpy as np, pytest
from PIL import Image
import init_paths
from type_check import isimsize, isimage_dimension, iscolorimage_dimension, isgrayimage_dimension, isuintimage, isfloatimage, isnpimage, ispilimage, isimage
def test_isimsize():
input_test = np.zeros((100, 100), dtype='uint8')
input_test = input_test.shape
assert isimsize(input_test)
input_test = [100, 200]
assert isimsize(input_test)
input_test = (100, 200)
assert isimsize(input_test)
input_test = np.array([100, 200])
assert isimsize(input_test)
input_test = np.zeros((100, 100, 3), dtype='float32')
input_test = input_test.shape
assert isimsize(input_test) is False
input_test = [100, 200, 3]
assert isimsize(input_test) is False
input_test = (100, 200, 3)
assert isimsize(input_test) is False
def test_ispilimage():
input_test = Image.fromarray(np.zeros((100, 100, 3), dtype='uint8'))
assert ispilimage(input_test)
input_test = Image.fromarray(np.zeros((100, 100), dtype='uint8'))
assert ispilimage(input_test)
input_test = np.zeros((100, 100), dtype='uint8')
assert ispilimage(input_test) is False
input_test = np.zeros((100, 100), dtype='float32')
assert ispilimage(input_test) is False
def test_iscolorimage_dimension():
input_test = np.zeros((100, 100, 4), dtype='uint8')
assert iscolorimage_dimension(input_test)
input_test = np.zeros((100, 100, 3), dtype='uint8')
assert iscolorimage_dimension(input_test)
input_test = np.zeros((100, 100, 4), dtype='float32')
assert iscolorimage_dimension(input_test)
input_test = np.zeros((100, 100, 3), dtype='float64')
assert iscolorimage_dimension(input_test)
input_test = Image.fromarray(np.zeros((100, 100, 3), dtype='uint8'))
assert iscolorimage_dimension(input_test)
input_test = Image.fromarray(np.zeros((100, 100), dtype='uint8'))
assert iscolorimage_dimension(input_test) is False
input_test = np.zeros((100, 100), dtype='float32')
assert iscolorimage_dimension(input_test) is False
input_test = np.zeros((100, 100, 1), dtype='uint8')
assert iscolorimage_dimension(input_test) is False
input_test = | np.zeros((100, 100, 2), dtype='uint8') | numpy.zeros |
""" Utility functions operating on operation matrices """
#***************************************************************************************************
# Copyright 2015, 2019 National Technology & Engineering Solutions of Sandia, LLC (NTESS).
# Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights
# in this software.
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory.
#***************************************************************************************************
import numpy as _np
import scipy.linalg as _spl
import scipy.sparse as _sps
import scipy.sparse.linalg as _spsl
import warnings as _warnings
import collections as _collections
from . import jamiolkowski as _jam
from . import matrixtools as _mt
from . import lindbladtools as _lt
from . import basistools as _bt
from ..objects.basis import Basis as _Basis, ExplicitBasis as _ExplicitBasis, DirectSumBasis as _DirectSumBasis
from ..objects.label import Label as _Label
IMAG_TOL = 1e-7 # tolerance for imaginary part being considered zero
def _flat_mut_blks(i, j, blockDims):
# like _mut(i,j,dim).flatten() but works with basis *blocks*
N = sum(blockDims)
mx = _np.zeros((N, N), 'd'); mx[i, j] = 1.0
ret = _np.zeros(sum([d**2 for d in blockDims]), 'd')
i = 0; off = 0
for d in blockDims:
ret[i:i + d**2] = mx[off:off + d, off:off + d].flatten()
i += d**2; off += d
return ret
def _hack_sqrtm(A):
sqrt, _ = _spl.sqrtm(A, disp=False) # Travis found this scipy function
# to be incorrect in certain cases (we need a workaround)
if _np.any(_np.isnan(sqrt)): # this is sometimes a good fallback when sqrtm doesn't work.
ev, U = _np.linalg.eig(A)
sqrt = _np.dot(U, _np.dot(_np.diag(_np.sqrt(ev)), _np.linalg.inv(U)))
return sqrt
def fidelity(A, B):
"""
Returns the quantum state fidelity between density
matrices A and B given by :
F = Tr( sqrt{ sqrt(A) * B * sqrt(A) } )^2
To compute process fidelity, pass this function the
Choi matrices of the two processes, or just call
:function:`entanglement_fidelity` with the operation matrices.
Parameters
----------
A : numpy array
First density matrix.
B : numpy array
Second density matrix.
Returns
-------
float
The resulting fidelity.
"""
evals, U = _np.linalg.eig(A)
if len([ev for ev in evals if abs(ev) > 1e-8]) == 1:
# special case when A is rank 1, A = vec * vec^T and sqrt(A) = A
ivec = _np.argmax(evals)
vec = U[:, ivec:(ivec + 1)]
F = evals[ivec].real * _np.dot(_np.conjugate(_np.transpose(vec)), _np.dot(B, vec)).real # vec^T * B * vec
return float(F)
evals, U = _np.linalg.eig(B)
if len([ev for ev in evals if abs(ev) > 1e-8]) == 1:
# special case when B is rank 1 (recally fidelity is sym in args)
ivec = _np.argmax(evals)
vec = U[:, ivec:(ivec + 1)]
F = evals[ivec].real * _np.dot(_np.conjugate(_np.transpose(vec)), _np.dot(A, vec)).real # vec^T * A * vec
return float(F)
#if _np.array_equal(A, B): return 1.0 # HACK - some cases when A and B are perfecty equal sqrtm(A) fails...
sqrtA = _hack_sqrtm(A) # _spl.sqrtm(A)
# test the scipy sqrtm function - sometimes fails when rank defficient
#assert(_np.linalg.norm(_np.dot(sqrtA, sqrtA) - A) < 1e-8)
if _np.linalg.norm(_np.dot(sqrtA, sqrtA) - A) > 1e-8:
evals = _np.linalg.eigvals(A)
_warnings.warn(("sqrtm(A) failure when computing fidelity - beware result. "
"Maybe due to rank defficiency - eigenvalues of A are: %s") % evals)
F = (_mt.trace(_hack_sqrtm(_np.dot(sqrtA, _np.dot(B, sqrtA)))).real)**2 # Tr( sqrt{ sqrt(A) * B * sqrt(A) } )^2
return float(F)
def frobeniusdist(A, B):
"""
Returns the frobenius distance between gate
or density matrices A and B given by :
sqrt( sum( (A_ij-B_ij)^2 ) )
Parameters
----------
A : numpy array
First matrix.
B : numpy array
Second matrix.
Returns
-------
float
The resulting frobenius distance.
"""
return _mt.frobeniusnorm(A - B)
def frobeniusdist2(A, B):
"""
Returns the square of the frobenius distance between gate
or density matrices A and B given by :
sum( (A_ij-B_ij)^2 )
Parameters
----------
A : numpy array
First matrix.
B : numpy array
Second matrix.
Returns
-------
float
The resulting frobenius distance.
"""
return _mt.frobeniusnorm2(A - B)
def residuals(A, B):
"""
Calculate residuals between the elements of two matrices
Parameters
----------
A : numpy array
First matrix.
B : numpy array
Second matrix.
Returns
-------
np.array
residuals
"""
return (A - B).flatten()
def tracenorm(A):
"""
Compute the trace norm of matrix A given by:
Tr( sqrt{ A^dagger * A } )
Parameters
----------
A : numpy array
The matrix to compute the trace norm of.
"""
if _np.linalg.norm(A - _np.conjugate(A.T)) < 1e-8:
#Hermitian, so just sum eigenvalue magnitudes
return _np.sum(_np.abs(_np.linalg.eigvals(A)))
else:
#Sum of singular values (positive by construction)
return _np.sum(_np.linalg.svd(A, compute_uv=False))
def tracedist(A, B):
"""
Compute the trace distance between matrices A and B,
given by:
D = 0.5 * Tr( sqrt{ (A-B)^dagger * (A-B) } )
Parameters
----------
A, B : numpy array
The matrices to compute the distance between.
"""
return 0.5 * tracenorm(A - B)
def diamonddist(A, B, mxBasis='pp', return_x=False):
"""
Returns the approximate diamond norm describing the difference between gate
matrices A and B given by :
D = ||A - B ||_diamond = sup_rho || AxI(rho) - BxI(rho) ||_1
Parameters
----------
A, B : numpy array
The *gate* matrices to use when computing the diamond norm.
mxBasis : Basis object
The source and destination basis, respectively. Allowed
values are Matrix-unit (std), Gell-Mann (gm), Pauli-product (pp),
and Qutrit (qt) (or a custom basis object).
return_x : bool, optional
Whether to return a numpy array encoding the state (rho) at
which the maximal trace distance occurs.
Returns
-------
dm : float
Diamond norm
W : numpy array
Only returned if `return_x = True`. Encodes the state rho, such that
`dm = trace( |(J(A)-J(B)).T * W| )`.
"""
mxBasis = _bt.build_basis_for_matrix(A, mxBasis)
#currently cvxpy is only needed for this function, so don't import until here
import cvxpy as _cvxpy
#Check if using version < 1.0
old_cvxpy = bool(tuple(map(int, _cvxpy.__version__.split('.'))) < (1, 0))
# This SDP implementation is a modified version of Kevin's code
#Compute the diamond norm
#Uses the primal SDP from arXiv:1207.5726v2, Sec 3.2
#Maximize 1/2 ( < J(phi), X > + < J(phi).dag, X.dag > )
#Subject to [[ I otimes rho0, X],
# [X.dag, I otimes rho1]] >> 0
# rho0, rho1 are density matrices
# X is linear operator
#Jamiolkowski representation of the process
# J(phi) = sum_ij Phi(Eij) otimes Eij
#< A, B > = Tr(A.dag B)
#def vec(matrix_in):
# # Stack the columns of a matrix to return a vector
# return _np.transpose(matrix_in).flatten()
#
#def unvec(vector_in):
# # Slice a vector into columns of a matrix
# d = int(_np.sqrt(vector_in.size))
# return _np.transpose(vector_in.reshape( (d,d) ))
#Code below assumes *un-normalized* Jamiol-isomorphism, so multiply by
# density mx dimension (`smallDim`) below
JAstd = _jam.fast_jamiolkowski_iso_std(A, mxBasis)
JBstd = _jam.fast_jamiolkowski_iso_std(B, mxBasis)
#Do this *after* the fast_jamiolkowski_iso calls above because these will convert
# A & B to a "single-block" basis representation when mxBasis has multiple blocks.
dim = JAstd.shape[0]
smallDim = int(_np.sqrt(dim))
JAstd *= smallDim # see above comment
JBstd *= smallDim # see above comment
assert(dim == JAstd.shape[1] == JBstd.shape[0] == JBstd.shape[1])
#CHECK: Kevin's jamiolowski, which implements the un-normalized isomorphism:
# smallDim * _jam.jamiolkowski_iso(M, "std", "std")
#def kevins_jamiolkowski(process, representation = 'superoperator'):
# # Return the Choi-Jamiolkowski representation of a quantum process
# # Add methods as necessary to accept different representations
# process = _np.array(process)
# if representation == 'superoperator':
# # Superoperator is the linear operator acting on vec(rho)
# dimension = int(_np.sqrt(process.shape[0]))
# print "dim = ",dimension
# jamiolkowski_matrix = _np.zeros([dimension**2, dimension**2], dtype='complex')
# for i in range(dimension**2):
# Ei_vec= _np.zeros(dimension**2)
# Ei_vec[i] = 1
# output = unvec(_np.dot(process,Ei_vec))
# tmp = _np.kron(output, unvec(Ei_vec))
# print "E%d = \n" % i,unvec(Ei_vec)
# #print "contrib =",_np.kron(output, unvec(Ei_vec))
# jamiolkowski_matrix += tmp
# return jamiolkowski_matrix
#JAstd_kev = jamiolkowski(A)
#JBstd_kev = jamiolkowski(B)
#print "diff A = ",_np.linalg.norm(JAstd_kev/2.0-JAstd)
#print "diff B = ",_np.linalg.norm(JBstd_kev/2.0-JBstd)
#Kevin's function: def diamondnorm( jamiolkowski_matrix ):
jamiolkowski_matrix = JBstd - JAstd
# Here we define a bunch of auxiliary matrices because CVXPY doesn't use complex numbers
K = jamiolkowski_matrix.real # J.real
L = jamiolkowski_matrix.imag # J.imag
if old_cvxpy:
Y = _cvxpy.Variable(dim, dim) # X.real
Z = _cvxpy.Variable(dim, dim) # X.imag
sig0 = _cvxpy.Variable(smallDim, smallDim) # rho0.real
sig1 = _cvxpy.Variable(smallDim, smallDim) # rho1.real
tau0 = _cvxpy.Variable(smallDim, smallDim) # rho1.imag
tau1 = _cvxpy.Variable(smallDim, smallDim) # rho1.imag
else:
Y = _cvxpy.Variable(shape=(dim, dim)) # X.real
Z = _cvxpy.Variable(shape=(dim, dim)) # X.imag
sig0 = _cvxpy.Variable(shape=(smallDim, smallDim)) # rho0.real
sig1 = _cvxpy.Variable(shape=(smallDim, smallDim)) # rho1.real
tau0 = _cvxpy.Variable(shape=(smallDim, smallDim)) # rho1.imag
tau1 = _cvxpy.Variable(shape=(smallDim, smallDim)) # rho1.imag
ident = _np.identity(smallDim, 'd')
objective = _cvxpy.Maximize(_cvxpy.trace(K.T * Y + L.T * Z))
constraints = [_cvxpy.bmat([
[_cvxpy.kron(ident, sig0), Y, -_cvxpy.kron(ident, tau0), -Z],
[Y.T, _cvxpy.kron(ident, sig1), Z.T, -_cvxpy.kron(ident, tau1)],
[_cvxpy.kron(ident, tau0), Z, _cvxpy.kron(ident, sig0), Y],
[-Z.T, _cvxpy.kron(ident, tau1), Y.T, _cvxpy.kron(ident, sig1)]]) >> 0,
_cvxpy.bmat([[sig0, -tau0],
[tau0, sig0]]) >> 0,
_cvxpy.bmat([[sig1, -tau1],
[tau1, sig1]]) >> 0,
sig0 == sig0.T,
sig1 == sig1.T,
tau0 == -tau0.T,
tau1 == -tau1.T,
_cvxpy.trace(sig0) == 1.,
_cvxpy.trace(sig1) == 1.]
prob = _cvxpy.Problem(objective, constraints)
try:
prob.solve(solver="CVXOPT")
# prob.solve(solver="ECOS")
# prob.solve(solver="SCS")#This always fails
except _cvxpy.error.SolverError as e:
_warnings.warn("CVXPY failed: %s - diamonddist returning -2!" % str(e))
return (-2, _np.zeros((dim, dim))) if return_x else -2
except:
_warnings.warn("CVXOPT failed (uknown err) - diamonddist returning -2!")
return (-2, _np.zeros((dim, dim))) if return_x else -2
#Validate result
#assert( abs(_np.trace(_np.dot(K.T,Y.value) + _np.dot(L.T,Z.value))-prob.value) < 1e-6 ), \
# "Diamondnorm mismatch"
if return_x:
X = Y.value + 1j * Z.value # encodes state at which maximum trace-distance occurs
return prob.value, X
else:
return prob.value
def jtracedist(A, B, mxBasis='pp'): # Jamiolkowski trace distance: Tr(|J(A)-J(B)|)
"""
Compute the Jamiolkowski trace distance between operation matrices A and B,
given by:
D = 0.5 * Tr( sqrt{ (J(A)-J(B))^2 } )
where J(.) is the Jamiolkowski isomorphism map that maps a operation matrix
to it's corresponding Choi Matrix.
Parameters
----------
A, B : numpy array
The matrices to compute the distance between.
mxBasis : {'std', 'gm', 'pp', 'qt'} or Basis object
The source and destination basis, respectively. Allowed
values are Matrix-unit (std), Gell-Mann (gm), Pauli-product (pp),
and Qutrit (qt) (or a custom basis object).
"""
JA = _jam.fast_jamiolkowski_iso_std(A, mxBasis)
JB = _jam.fast_jamiolkowski_iso_std(B, mxBasis)
return tracedist(JA, JB)
def entanglement_fidelity(A, B, mxBasis='pp'):
"""
Returns the "entanglement" process fidelity between gate
matrices A and B given by :
F = Tr( sqrt{ sqrt(J(A)) * J(B) * sqrt(J(A)) } )^2
where J(.) is the Jamiolkowski isomorphism map that maps a operation matrix
to it's corresponding Choi Matrix.
Parameters
----------
A, B : numpy array
The matrices to compute the fidelity between.
mxBasis : {'std', 'gm', 'pp', 'qt'} or Basis object
The basis of the matrices. Allowed values are Matrix-unit (std),
Gell-Mann (gm), Pauli-product (pp), and Qutrit (qt)
(or a custom basis object).
"""
d2 = A.shape[0]
def isTP(x): return _np.isclose(x[0, 0], 1.0) and all(
[_np.isclose(x[0, i], 0) for i in range(d2)])
def isUnitary(x): return _np.allclose(_np.identity(d2, 'd'), _np.dot(x, x.conjugate().T))
if isTP(A) and isTP(B) and isUnitary(B): # then assume TP-like gates & use simpler formula
TrLambda = _np.trace(_np.dot(A, B.conjugate().T)) # same as using _np.linalg.inv(B)
d2 = A.shape[0]
return TrLambda / d2
JA = _jam.jamiolkowski_iso(A, mxBasis, mxBasis)
JB = _jam.jamiolkowski_iso(B, mxBasis, mxBasis)
return fidelity(JA, JB)
def average_gate_fidelity(A, B, mxBasis='pp'):
"""
Computes the average gate fidelity (AGF) between two gates.
Average gate fidelity (F_g) is related to entanglement fidelity
(F_p), via:
F_g = (d * F_p + 1)/(1 + d),
where d is the Hilbert space dimension. This formula, and the
definition of AGF, can be found in Phys. Lett. A 303 249-252 (2002).
Parameters
----------
A : array or gate
The gate to compute the AGI to B of. E.g., an imperfect
implementation of B.
B : array or gate
The gate to compute the AGI to A of. E.g., the target gate
corresponding to A.
mxBasis : {"std","gm","pp"} or Basis object, optional
The basis of the matrices.
Returns
-------
AGI : float
The AGI of A to B.
"""
d = int(round(_np.sqrt(A.shape[0])))
PF = entanglement_fidelity(A, B, mxBasis=mxBasis)
AGF = (d * PF + 1) / (1 + d)
return float(AGF)
def average_gate_infidelity(A, B, mxBasis="gm"):
"""
Computes the average gate infidelity (AGI) between two gates.
Average gate infidelity is related to entanglement infidelity
(EI) via:
AGI = (d * (1-EI) + 1)/(1 + d),
where d is the Hilbert space dimension. This formula, and the
definition of AGI, can be found in Phys. Lett. A 303 249-252 (2002).
Parameters
----------
A : array or gate
The gate to compute the AGI to B of. E.g., an imperfect
implementation of B.
B : array or gate
The gate to compute the AGI to A of. E.g., the target gate
corresponding to A.
mxBasis : {"std","gm","pp"} or Basis object, optional
The basis of the matrices.
Returns
----------
AGI : float
The AGI of A to B.
"""
return 1 - average_gate_fidelity(A, B, mxBasis)
def entanglement_infidelity(A, B, mxBasis='pp'):
"""
Returns the entanglement infidelity (EI) between gate
matrices A and B given by :
EI = 1 - Tr( sqrt{ sqrt(J(A)) * J(B) * sqrt(J(A)) } )^2
where J(.) is the Jamiolkowski isomorphism map that maps a operation matrix
to it's corresponding Choi Matrix.
Parameters
----------
A, B : numpy array
The matrices to compute the fidelity between.
mxBasis : {'std', 'gm', 'pp', 'qt'} or Basis object
The basis of the matrices. Allowed values are Matrix-unit (std),
Gell-Mann (gm), Pauli-product (pp), and Qutrit (qt)
(or a custom basis object).
Returns
-------
EI : float
The EI of A to B.
"""
return 1 - float(entanglement_fidelity(A, B, mxBasis))
def gateset_infidelity(mdl, target_model, itype='EI',
weights=None, mxBasis=None):
"""
Computes the average-over-gates of the infidelity between gates in `mdl`
and the gates in `target_model`. If `itype` is 'EI' then the "infidelity"
is the entanglement infidelity; if `itype` is 'AGI' then the "infidelity"
is the average gate infidelity (AGI and EI are related by a dimension
dependent constant).
This is the quantity that RB error rates are sometimes claimed to be
related to directly related.
Parameters
----------
mdl : Model
The model to calculate the average infidelity, to `target_model`, of.
target_model : Model
The model to calculate the average infidelity, to `mdl`, of.
itype : str, optional
The infidelity type. Either 'EI', corresponding to entanglement
infidelity, or 'AGI', corresponding to average gate infidelity.
weights : dict, optional
If not None, a dictionary of floats, whereby the keys are the gates
in `mdl` and the values are, possibly unnormalized, probabilities.
These probabilities corresponding to the weighting in the average,
so if the model contains gates A and B and weights[A] = 2 and
weights[B] = 1 then the output is Inf(A)*2/3 + Inf(B)/3 where
Inf(X) is the infidelity (to the corresponding element in the other
model) of X. If None, a uniform-average is taken, equivalent to
setting all the weights to 1.
mxBasis : {"std","gm","pp"} or Basis object, optional
The basis of the models. If None, the basis is obtained from
the model.
Returns
-------
float
The weighted average-over-gates infidelity between the two models.
"""
assert(itype == 'AGI' or itype == 'EI'), \
"The infidelity type must be `AGI` (average gate infidelity) or `EI` (entanglement infidelity)"
if mxBasis is None: mxBasis = mdl.basis
sum_of_weights = 0
I_list = []
for gate in list(target_model.operations.keys()):
if itype == 'AGI':
I = average_gate_infidelity(mdl.operations[gate], target_model.operations[gate], mxBasis=mxBasis)
if itype == 'EI':
I = entanglement_infidelity(mdl.operations[gate], target_model.operations[gate], mxBasis=mxBasis)
if weights is None:
w = 1
else:
w = weights[gate]
I_list.append(w * I)
sum_of_weights += w
assert(sum_of_weights > 0), "The sum of the weights should be positive!"
AI = _np.sum(I_list) / sum_of_weights
return AI
def unitarity(A, mxBasis="gm"):
"""
Returns the "unitarity" of a channel, as defined in Wallman et al,
``Estimating the Coherence of noise'' NJP 17 113020 (2015). The
unitarity is given by (Prop 1 in Wallman et al):
u(A) = Tr( A_u^{\dagger} A_u ) / (d^2 - 1),
where A_u is the unital submatrix of A, and d is the dimension of
the Hilbert space. When A is written in any basis for which the
first element is the normalized identity (e.g., the pp or gm
bases), The unital submatrix of A is the matrix obtained when the
top row and left hand column is removed from A.
Parameters
----------
A : array or gate
The gate for which the unitarity is to be computed.
mxBasis : {"std","gm","pp"} or a Basis object, optional
The basis of the matrix.
d : int, optional
The dimension of the Hilbert space.
Returns
----------
u : float
The unitarity of the gate A.
"""
d = int(round(_np.sqrt(A.shape[0])))
basisMxs = _bt.basis_matrices(mxBasis, A.shape[0])
if _np.allclose(basisMxs[0], _np.identity(d, 'd')):
B = A
else:
B = _bt.change_basis(A, mxBasis, "gm") # everything should be able to be put in the "gm" basis
unital = B[1:d**2, 1:d**2]
u = _np.trace(_np.dot(_np.conj(_np.transpose(unital)), unital)) / (d**2 - 1)
return u
def fidelity_upper_bound(operationMx):
"""
Get an upper bound on the fidelity of the given
operation matrix with any unitary operation matrix.
The closeness of the result to one tells
how "unitary" the action of operationMx is.
Parameters
----------
operationMx : numpy array
The operation matrix to act on.
Returns
-------
float
The resulting upper bound on fidelity(operationMx, anyUnitaryGateMx)
"""
choi = _jam.jamiolkowski_iso(operationMx, choiMxBasis="std")
choi_evals, choi_evecs = _np.linalg.eig(choi)
maxF_direct = max([_np.sqrt(max(ev.real, 0.0)) for ev in choi_evals]) ** 2
iMax = _np.argmax([ev.real for ev in choi_evals]) # index of maximum eigenval
closestVec = choi_evecs[:, iMax:(iMax + 1)]
# #print "DEBUG: closest evec = ", closestUnitaryVec
# new_evals = _np.zeros( len(closestUnitaryVec) ); new_evals[iClosestU] = 1.0
# # gives same result:
# closestUnitaryJmx = _np.dot(choi_evecs, _np.dot( _np.diag(new_evals), _np.linalg.inv(choi_evecs) ) )
closestJmx = _np.kron(closestVec, _np.transpose(_np.conjugate(closestVec))) # closest rank-1 Jmx
closestJmx /= _mt.trace(closestJmx) # normalize so trace of Jmx == 1.0
maxF = fidelity(choi, closestJmx)
if not _np.isnan(maxF):
#Uncomment for debugging
#if abs(maxF - maxF_direct) >= 1e-6:
# print "DEBUG: operationMx:\n",operationMx
# print "DEBUG: choiMx:\n",choi
# print "DEBUG choi_evals = ",choi_evals, " iMax = ",iMax
# #print "DEBUG: J = \n", closestUnitaryJmx
# print "DEBUG: eigvals(J) = ", _np.linalg.eigvals(closestJmx)
# print "DEBUG: trace(J) = ", _mt.trace(closestJmx)
# print "DEBUG: maxF = %f, maxF_direct = %f" % (maxF, maxF_direct)
# raise ValueError("ERROR: maxF - maxF_direct = %f" % (maxF -maxF_direct))
assert(abs(maxF - maxF_direct) < 1e-6)
else:
maxF = maxF_direct # case when maxF is nan, due to scipy sqrtm function being buggy - just use direct F
closestOpMx = _jam.jamiolkowski_iso_inv(closestJmx, choiMxBasis="std")
return maxF, closestOpMx
#closestU_evals, closestU_evecs = _np.linalg.eig(closestUnitaryGateMx)
#print "DEBUG: U = \n", closestUnitaryGateMx
#print "DEBUG: closest U evals = ",closestU_evals
#print "DEBUG: evecs = \n",closestU_evecs
def get_povm_map(model, povmlbl):
"""
Constructs a gate-like quantity for the POVM within `model`.
This is done by embedding the `k`-outcome classical output space of the POVM
in the Hilbert-Schmidt space of `k` by `k` density matrices by placing the
classical probability distribution along the diagonal of the density matrix.
Currently, this is only implemented for the case when `k` equals `d`, the
dimension of the POVM's Hilbert space.
Parameters
----------
model : Model
The model supplying the POVM effect vectors and the basis those
vectors are in.
povmlbl : str
The POVM label
Returns
-------
numpy.ndarray
The matrix of the "POVM map" in the `model.basis` basis.
"""
povmVectors = [v.todense()[:, None] for v in model.povms[povmlbl].values()]
if isinstance(model.basis, _DirectSumBasis): # HACK - need to get this to work with general bases
blkDims = [int(_np.sqrt(comp.dim)) for comp in model.basis.component_bases]
else:
blkDims = [int(round(_np.sqrt(model.dim)))] # [d] where density matrix is dxd
nV = len(povmVectors)
#assert(d**2 == model.dim), "Model dimension (%d) is not a perfect square!" % model.dim
#assert( nV**2 == d ), "Can only compute POVM metrics when num of effects == H space dimension"
# I don't think above assert is needed - should work in general (Robin?)
povm_mx = _np.concatenate(povmVectors, axis=1).T # "povm map" ( B(H) -> S_k ) (shape= nV,model.dim)
Sk_embedding_in_std = _np.zeros((model.dim, nV))
for i in range(nV):
Sk_embedding_in_std[:, i] = _flat_mut_blks(i, i, blkDims)
std_to_basis = model.basis.reverse_transform_matrix("std") # _bt.transform_matrix("std", model.basis, blkDims)
assert(std_to_basis.shape == (model.dim, model.dim))
return _np.dot(std_to_basis, _np.dot(Sk_embedding_in_std, povm_mx))
def povm_fidelity(model, targetModel, povmlbl):
"""
Computes the process (entanglement) fidelity between POVM maps.
Parameters
----------
model, targetModel : Model
Models containing the two POVMs to compare.
povmlbl : str
The POVM label
Returns
-------
float
"""
povm_mx = get_povm_map(model, povmlbl)
target_povm_mx = get_povm_map(targetModel, povmlbl)
return entanglement_fidelity(povm_mx, target_povm_mx, targetModel.basis)
def povm_jtracedist(model, targetModel, povmlbl):
"""
Computes the Jamiolkowski trace distance between POVM maps using :func:`jtracedist`.
Parameters
----------
model, targetModel : Model
Models containing the two POVMs to compare.
povmlbl : str
The POVM label
Returns
-------
float
"""
povm_mx = get_povm_map(model, povmlbl)
target_povm_mx = get_povm_map(targetModel, povmlbl)
return jtracedist(povm_mx, target_povm_mx, targetModel.basis)
def povm_diamonddist(model, targetModel, povmlbl):
"""
Computes the diamond distance between POVM maps using :func:`diamonddist`.
Parameters
----------
model, targetModel : Model
Models containing the two POVMs to compare.
povmlbl : str
The POVM label
Returns
-------
float
"""
povm_mx = get_povm_map(model, povmlbl)
target_povm_mx = get_povm_map(targetModel, povmlbl)
return diamonddist(povm_mx, target_povm_mx, targetModel.basis)
#decompose operation matrix into axis of rotation, etc
def decompose_gate_matrix(operationMx):
"""
Compute how the action of a operation matrix can be
is decomposed into fixed points, axes of rotation,
angles of rotation, and decays. Also determines
whether a gate appears to be valid and/or unitary.
Parameters
----------
operationMx : numpy array
The operation matrix to act on.
Returns
-------
dict
A dictionary describing the decomposed action. Keys are:
'isValid' : bool
whether decomposition succeeded
'isUnitary' : bool
whether operationMx describes unitary action
'fixed point' : numpy array
the fixed point of the action
'axis of rotation' : numpy array or nan
the axis of rotation
'decay of diagonal rotation terms' : float
decay of diagonal terms
'rotating axis 1' : numpy array or nan
1st axis orthogonal to axis of rotation
'rotating axis 2' : numpy array or nan
2nd axis orthogonal to axis of rotation
'decay of off diagonal rotation terms' : float
decay of off-diagonal terms
'pi rotations' : float
angle of rotation in units of pi radians
"""
op_evals, op_evecs = _np.linalg.eig(_np.asarray(operationMx))
# fp_eigenvec = None
# aor_eval = None; aor_eigenvec = None
# ra_eval = None; ra1_eigenvec = None; ra2_eigenvec = None
TOL = 1e-4 # 1e-7
unit_eval_indices = [i for (i, ev) in enumerate(op_evals) if abs(ev - 1.0) < TOL]
#unit_eval_indices = [ i for (i,ev) in enumerate(op_evals) if ev > (1.0-TOL) ]
conjpair_eval_indices = []
for (i, ev) in enumerate(op_evals):
if i in unit_eval_indices: continue # don't include the unit eigenvalues in the conjugate pair count
# don't include existing conjugate pairs
if any([(i in conjpair) for conjpair in conjpair_eval_indices]): continue
for (j, ev2) in enumerate(op_evals[i + 1:]):
if abs(ev - _np.conjugate(ev2)) < TOL:
conjpair_eval_indices.append((i, j + (i + 1)))
break # don't pair i-th eigenvalue with any other (pairs should be disjoint)
real_eval_indices = [] # indices of real eigenvalues that are not units or a part of any conjugate pair
complex_eval_indices = [] # indices of complex eigenvalues that are not units or a part of any conjugate pair
for (i, ev) in enumerate(op_evals):
if i in unit_eval_indices: continue # don't include the unit eigenvalues
if any([(i in conjpair) for conjpair in conjpair_eval_indices]): continue # don't include the conjugate pairs
if abs(ev.imag) < TOL: real_eval_indices.append(i)
else: complex_eval_indices.append(i)
#if len(real_eval_indices + unit_eval_indices) > 0:
# max_real_eval = max([ op_evals[i] for i in real_eval_indices + unit_eval_indices])
# min_real_eval = min([ op_evals[i] for i in real_eval_indices + unit_eval_indices])
#else:
# max_real_eval = _np.nan
# min_real_eval = _np.nan
#
#fixed_points = [ op_evecs[:,i] for i in unit_eval_indices ]
#real_eval_axes = [ op_evecs[:,i] for i in real_eval_indices ]
#conjpair_eval_axes = [ (op_evecs[:,i],op_evecs[:,j]) for (i,j) in conjpair_eval_indices ]
#
#ret = { }
nQubits = _np.log2(operationMx.shape[0]) / 2
if nQubits == 1:
#print "DEBUG: 1 qubit decomp --------------------------"
#print " --> evals = ", op_evals
#print " --> unit eval indices = ", unit_eval_indices
#print " --> conj eval indices = ", conjpair_eval_indices
#print " --> unpaired real eval indices = ", real_eval_indices
#Special case: if have two conjugate pairs, check if one (or both) are real
# and break the one with the largest (real) value into two unpaired real evals.
if len(conjpair_eval_indices) == 2:
iToBreak = None
if abs(_np.imag(op_evals[conjpair_eval_indices[0][0]])) < TOL and \
abs(_np.imag(op_evals[conjpair_eval_indices[1][0]])) < TOL:
iToBreak = _np.argmax([_np.real(conjpair_eval_indices[0][0]), _np.real(conjpair_eval_indices[1][0])])
elif abs(_np.imag(op_evals[conjpair_eval_indices[0][0]])) < TOL: iToBreak = 0
elif abs(_np.imag(op_evals[conjpair_eval_indices[1][0]])) < TOL: iToBreak = 1
if iToBreak is not None:
real_eval_indices.append(conjpair_eval_indices[iToBreak][0])
real_eval_indices.append(conjpair_eval_indices[iToBreak][1])
del conjpair_eval_indices[iToBreak]
#Find eigenvector corresponding to fixed point (or closest we can get). This
# should be a unit eigenvalue with identity eigenvector.
if len(unit_eval_indices) > 0:
#Find linear least squares solution within possibly degenerate unit-eigenvalue eigenspace
# of eigenvector closest to identity density mx (the desired fixed point), then orthogonalize
# the remaining eigenvectors w.r.t this one.
A = _np.take(op_evecs, unit_eval_indices, axis=1)
b = _np.array([[1], [0], [0], [0]], 'd') # identity density mx
x = _np.dot(_np.linalg.pinv(_np.dot(A.T, A)), _np.dot(A.T, b))
fixedPtVec = _np.dot(A, x) # fixedPtVec / _np.linalg.norm(fixedPtVec)
fixedPtVec = fixedPtVec[:, 0]
iLargestContrib = _np.argmax(_np.abs(x)) # index of gate eigenvector which contributed the most
for ii, i in enumerate(unit_eval_indices):
if ii == iLargestContrib:
op_evecs[:, i] = fixedPtVec
iFixedPt = i
else:
op_evecs[:, i] = op_evecs[:, i] - _np.vdot(fixedPtVec, op_evecs[:, i]) * fixedPtVec
for jj, j in enumerate(unit_eval_indices[:ii]):
if jj == iLargestContrib: continue
op_evecs[:, i] = op_evecs[:, i] - _np.vdot(op_evecs[:, j], op_evecs[:, i]) * op_evecs[:, j]
op_evecs[:, i] /= _np.linalg.norm(op_evecs[:, i])
elif len(real_eval_indices) > 0:
# just take eigenvector corresponding to the largest real eigenvalue?
#iFixedPt = real_eval_indices[ _np.argmax( [ op_evals[i] for i in real_eval_indices ] ) ]
# ...OR take eigenvector corresponding to a real unpaired eigenvalue closest to identity:
idmx = _np.array([[1], [0], [0], [0]], 'd') # identity density mx
iFixedPt = real_eval_indices[_np.argmin([_np.linalg.norm(op_evecs[i] - idmx) for i in real_eval_indices])]
else:
#No unit or real eigenvalues => two complex conjugate pairs or unpaired complex evals --> bail out
return {'isValid': False, 'isUnitary': False, 'msg': "All evals are complex."}
#Find eigenvector corresponding to axis of rotation: find the *largest* unpaired real/unit eval
indsToConsider = (unit_eval_indices + real_eval_indices)[:]
del indsToConsider[indsToConsider.index(iFixedPt)] # don't consider fixed pt evec
if len(indsToConsider) > 0:
iRotAxis = indsToConsider[_np.argmax([op_evals[i] for i in indsToConsider])]
else:
#No unit or real eigenvalues => an unpaired complex eval --> bail out
return {'isValid': False, 'isUnitary': False, 'msg': "Unpaired complex eval."}
#There are only 2 eigenvalues left -- hopefully a conjugate pair giving rotation
inds = list(range(4))
del inds[inds.index(iFixedPt)]
del inds[inds.index(iRotAxis)]
if abs(op_evals[inds[0]] - _np.conjugate(op_evals[inds[1]])) < TOL:
iConjPair1, iConjPair2 = inds
else:
return {'isValid': False, 'isUnitary': False, 'msg': "No conjugate pair for rotn."}
return {'isValid': True,
'isUnitary': bool(len(unit_eval_indices) >= 2),
'fixed point': op_evecs[:, iFixedPt],
'axis of rotation': op_evecs[:, iRotAxis],
'rotating axis 1': op_evecs[:, iConjPair1],
'rotating axis 2': op_evecs[:, iConjPair2],
'decay of diagonal rotation terms': 1.0 - abs(op_evals[iRotAxis]),
'decay of off diagonal rotation terms': 1.0 - abs(op_evals[iConjPair1]),
'pi rotations': _np.angle(op_evals[iConjPair1]) / _np.pi,
'msg': "Success"}
else:
return {'isValid': False,
'isUnitary': False,
'msg': "Unsupported number of qubits: %d" % nQubits}
def state_to_dmvec(psi):
"""
Compute the vectorized density matrix which acts as the state `psi`.
This is just the outer product map |psi> => |psi><psi| with the
output flattened, i.e. `dot(psi, conjugate(psi).T)`.
Parameters
----------
psi : numpy array
The state vector.
Returns
-------
numpy array
The vectorized density matrix.
"""
psi = psi.reshape((psi.size, 1)) # convert to (N,1) shape if necessary
dm = _np.dot(psi, _np.conjugate(psi.T))
return dm.flatten()
def dmvec_to_state(dmvec, tol=1e-6):
"""
Compute the pure state describing the action of density matrix vector `dmvec`.
If `dmvec` represents a mixed state, ValueError is raised.
Parameters
----------
dmvec : numpy array
The vectorized density matrix, assumed to be in the standard (matrix
unit) basis.
tol : float, optional
tolerance for determining whether an eigenvalue is zero.
Returns
-------
numpy array
The pure state, as a column vector of shape = (N,1)
"""
d2 = dmvec.size; d = int(round(_np.sqrt(d2)))
dm = dmvec.reshape((d, d))
evals, evecs = _np.linalg.eig(dm)
k = None
for i, ev in enumerate(evals):
if abs(ev) > tol:
if k is None: k = i
else: raise ValueError("Cannot convert mixed dmvec to pure state!")
if k is None: raise ValueError("Cannot convert zero dmvec to puse state!")
psi = evecs[:, k] * _np.sqrt(evals[k])
psi.shape = (d, 1)
return psi
def unitary_to_process_mx(U):
"""
Compute the super-operator which acts on (row)-vectorized
density matrices from a unitary operator (matrix) U which
acts on state vectors. This super-operator is given by
the tensor product of U and conjugate(U), i.e. kron(U,U.conj).
Parameters
----------
U : numpy array
The unitary matrix which acts on state vectors.
Returns
-------
numpy array
The super-operator process matrix.
"""
# U -> kron(U,Uc) since U rho U_dag -> kron(U,Uc)
# since AXB --row-vectorize--> kron(A,B.T)*vec(X)
return _np.kron(U, _np.conjugate(U))
def process_mx_to_unitary(superop):
"""
Compute the unitary corresponding to the (unitary-action!)
super-operator `superop` which acts on (row)-vectorized
density matrices. The super-operator must be of the form
`kron(U,U.conj)` or an error will be thrown.
Parameters
----------
superop : numpy array
The superoperator matrix which acts on vectorized
density matrices (in the 'std' matrix-unit basis).
Returns
-------
numpy array
The unitary matrix which acts on state vectors.
"""
d2 = superop.shape[0]; d = int(round(_np.sqrt(d2)))
U = _np.empty((d, d), 'complex')
for i in range(d):
densitymx_i = _np.zeros((d, d), 'd'); densitymx_i[i, i] = 1.0 # |i><i|
UiiU = _np.dot(superop, densitymx_i.flat).reshape((d, d)) # U|i><i|U^dag
if i > 0:
j = 0
densitymx_ij = _np.zeros((d, d), 'd'); densitymx_ij[i, j] = 1.0 # |i><i|
UijU = _np.dot(superop, densitymx_ij.flat).reshape((d, d)) # U|i><j|U^dag
Uj = U[:, j]
Ui = _np.dot(UijU, Uj)
else:
##method1: use random state projection
#rand_state = _np.random.rand(d)
#projected_rand_state = _np.dot(UiiU, rand_state)
#assert(_np.linalg.norm(projected_rand_state) > 1e-8)
#projected_rand_state /= _np.linalg.norm(projected_rand_state)
#Ui = projected_rand_state
#method2: get eigenvector corresponding to largest eigenvalue (more robust)
evals, evecs = _np.linalg.eig(UiiU)
imaxeval = _np.argmax(_np.abs(evals))
#TODO: assert other eigenvalues are much smaller?
Ui = evecs[:, imaxeval]
Ui /= _np.linalg.norm(Ui)
U[:, i] = Ui
return U
def spam_error_generator(spamvec, target_spamvec, mxBasis, typ="logGTi"):
"""
Construct an error generator from a SPAM vector and it's target.
Computes the value of the error generator given by
`errgen = log( diag(spamvec / target_spamvec) )`, where division is
element-wise. This results in a (non-unique) error generator matrix
`E` such that `spamvec = exp(E) * target_spamvec`.
Note: This is currently of very limited use, as the above algorithm fails
whenever `target_spamvec` has zero elements where `spamvec` doesn't.
Parameters
----------
spamvec : ndarray
The SPAM vector.
target_spamvec : ndarray
The target SPAM vector.
mxBasis : {'std', 'gm', 'pp', 'qt'} or Basis object
The source and destination basis, respectively. Allowed
values are Matrix-unit (std), Gell-Mann (gm), Pauli-product (pp),
and Qutrit (qt) (or a custom basis object).
typ : {"logGTi"}
The type of error generator to compute. Allowed values are:
- "logGTi" : errgen = log( diag(spamvec / target_spamvec) )
Returns
-------
errgen : ndarray
The error generator.
"""
# Compute error generator for rho: rho = exp(E)rho0 => rho = A*rho0 => A = diag(rho/rho0)
assert(typ == "logGTi"), "Only logGTi type is supported so far"
d2 = len(spamvec)
errgen = _np.zeros((d2, d2), 'd') # type assumes this is density-mx evolution
diags = []
for a, b in zip(spamvec, target_spamvec):
if _np.isclose(b, 0.0):
if _np.isclose(a, b): d = 1
else: raise ValueError("Cannot take spam_error_generator")
else:
d = a / b
diags.append(d)
errgen[_np.diag_indices(d2)] = diags
return _spl.logm(errgen)
def error_generator(gate, target_op, mxBasis, typ="logG-logT"):
"""
Construct the error generator from a gate and its target.
Computes the value of the error generator given by
errgen = log( inv(target_op) * gate ), so that
gate = target_op * exp(errgen).
Parameters
----------
gate : ndarray
The operation matrix
target_op : ndarray
The target operation matrix
mxBasis : {'std', 'gm', 'pp', 'qt'} or Basis object
The source and destination basis, respectively. Allowed
values are Matrix-unit (std), Gell-Mann (gm), Pauli-product (pp),
and Qutrit (qt) (or a custom basis object).
typ : {"logG-logT", "logTiG", "logGTi"}
The type of error generator to compute. Allowed values are:
- "logG-logT" : errgen = log(gate) - log(target_op)
- "logTiG" : errgen = log( dot(inv(target_op), gate) )
- "logGTi" : errgen = log( dot(gate,inv(target_op)) )
Returns
-------
errgen : ndarray
The error generator.
"""
TOL = 1e-8
if typ == "logG-logT":
try:
logT = _mt.unitary_superoperator_matrix_log(target_op, mxBasis)
except AssertionError: # if not unitary, fall back to just taking the real log
logT = _mt.real_matrix_log(target_op, "raise", TOL) # make a fuss if this can't be done
logG = _mt.approximate_matrix_log(gate, logT)
# Both logG and logT *should* be real, so we just take the difference.
if _np.linalg.norm(_np.imag(logG)) < TOL and \
_np.linalg.norm(_np.imag(logT)) < TOL:
return _np.real(logG - logT)
#Otherwise, there could be branch cut issues or worse, so just
# raise an error for now (maybe return a dummy if needed elsewhere?)
raise ValueError("Could not construct a real logarithms for the "
"'logG-logT' generator. Perhaps you should use "
"the 'logTiG' or 'logGTi' generator instead?")
elif typ == "logTiG":
target_op_inv = _spl.inv(target_op)
try:
errgen = _mt.near_identity_matrix_log(_np.dot(target_op_inv, gate), TOL)
except AssertionError: # not near the identity, fall back to the real log
_warnings.warn(("Near-identity matrix log failed; falling back "
"to approximate log for logTiG error generator"))
errgen = _mt.real_matrix_log(_np.dot(target_op_inv, gate), "warn", TOL)
if _np.linalg.norm(errgen.imag) > TOL:
_warnings.warn("Falling back to approximate log for logTiG error generator")
errgen = _mt.approximate_matrix_log(_np.dot(target_op_inv, gate),
_np.zeros(gate.shape, 'd'), TOL=TOL)
elif typ == "logGTi":
target_op_inv = _spl.inv(target_op)
try:
errgen = _mt.near_identity_matrix_log(_np.dot(gate, target_op_inv), TOL)
except AssertionError as e: # not near the identity, fall back to the real log
_warnings.warn(("Near-identity matrix log failed; falling back "
"to approximate log for logGTi error generator:\n%s") % str(e))
errgen = _mt.real_matrix_log(_np.dot(gate, target_op_inv), "warn", TOL)
if _np.linalg.norm(errgen.imag) > TOL:
_warnings.warn("Falling back to approximate log for logGTi error generator")
errgen = _mt.approximate_matrix_log(_np.dot(gate, target_op_inv),
_np.zeros(gate.shape, 'd'), TOL=TOL)
else:
raise ValueError("Invalid error-generator type: %s" % typ)
if _np.linalg.norm(_np.imag(errgen)) > TOL:
raise ValueError("Could not construct a real generator!")
#maybe this is actually ok, but a complex error generator will
# need to be plotted differently, etc -- TODO
return _np.real(errgen)
def operation_from_error_generator(error_gen, target_op, typ="logG-logT"):
"""
Construct a gate from an error generator and a target gate.
Inverts the computation fone in :func:`error_generator` and
returns the value of the gate given by
gate = target_op * exp(error_gen).
Parameters
----------
error_gen : ndarray
The error generator matrix
target_op : ndarray
The target operation matrix
typ : {"logG-logT", "logTiG"}
The type of error generator to compute. Allowed values are:
- "logG-logT" : errgen = log(gate) - log(target_op)
- "logTiG" : errgen = log( dot(inv(target_op), gate) )
Returns
-------
ndarray
The operation matrix.
"""
if typ == "logG-logT":
return _spl.expm(error_gen + _spl.logm(target_op))
elif typ == "logTiG":
return _np.dot(target_op, _spl.expm(error_gen))
elif typ == "logGTi":
return _np.dot(_spl.expm(error_gen), target_op)
else:
raise ValueError("Invalid error-generator type: %s" % typ)
def std_scale_factor(dim, projection_type):
"""
Returns the multiplicative scaling that should be applied to the output of
:func"`std_error_generators`, before using them as projectors, in order to
compute the "standard" reported projection onto that type of error (i.e.
the coefficient of the standard generator terms built un-normalized-Paulis).
Parameters
----------
dim : int
The dimension of the error generators; also the associated gate
dimension. This must be a perfect square, as `sqrt(dim)`
is the dimension of density matrices. For a single qubit, dim == 4.
projection_type : {"hamiltonian", "stochastic", "affine"}
The type/class of error generators to get the scaling for.
Returns
-------
float
"""
d2 = dim
d = int(_np.sqrt(d2))
if projection_type == "hamiltonian":
scaleFctr = 1.0 / (d * _np.sqrt(2))
# so projection is coefficient of Hamiltonian term (w/un-normalized Paulis)
elif projection_type == "stochastic":
scaleFctr = 1.0 / d
# so projection is coefficient of P*rho*P stochastic term in generator (w/un-normalized Paulis)
elif projection_type == "affine":
scaleFctr = 1.0 # so projection is coefficient of P affine term in generator (w/un-normalized Paulis)
else:
raise ValueError("Invalid projection_type argument: %s"
% projection_type)
return scaleFctr
def std_error_generators(dim, projection_type, projection_basis):
"""
Compute the gate error generators for a standard set of errors which
correspond to "Hamiltonian"- or "Stochastic"-type errors in terms of the
elements of the specified basis.
Parameters
----------
dim : int
The dimension of the error generators to be returned. This is also the
associated gate dimension, and must be a perfect square, as `sqrt(dim)`
is the dimension of density matrices. For a single qubit, dim == 4.
projection_type : {"hamiltonian", "stochastic", "affine"}
The type of error generators to construct. If "hamiltonian", then the
Hamiltonian generators which take a density matrix rho -> -i*[ H, rho ]
for Pauli-product matrix H. If "stochastic", then the Stochastic error
generators which take rho -> P*rho*P for Pauli-product matrix P. If
"affine", then the affine generators which take rho -> P.
projection_basis : {'std', 'gm', 'pp', 'qt'}
Which basis is used to construct the error generators. Allowed
values are Matrix-unit (std), Gell-Mann (gm),
Pauli-product (pp) and Qutrit (qt).
Returns
-------
generators : numpy.ndarray
An array of shape (#basis-elements,dim,dim). `generators[i]` is the
generator corresponding to the ith basis matrix in the
*std* (matrix unit) basis. (Note that in most cases #basis-elements
== dim, so the size of `generators` is (dim,dim,dim) ). Each
generator is normalized so that as a vector it has unit Frobenius norm.
"""
d2 = dim
d = int(_np.sqrt(d2))
#Get a list of the basis matrices
mxs = _bt.basis_matrices(projection_basis, d2)
assert(len(mxs) <= d2) # OK if there are fewer basis matrices (e.g. for bases w/multiple blocks)
assert(_np.isclose(d * d, d2)) # d2 must be a perfect square
lindbladMxs = _np.empty((len(mxs), d2, d2), 'complex')
for i, basisMx in enumerate(mxs):
if projection_type == "hamiltonian":
lindbladMxs[i] = _lt.hamiltonian_to_lindbladian(basisMx) # in std basis
elif projection_type == "stochastic":
lindbladMxs[i] = _lt.stochastic_lindbladian(basisMx) # in std basis
elif projection_type == "affine":
lindbladMxs[i] = _lt.affine_lindbladian(basisMx) # in std basis
else:
raise ValueError("Invalid projection_type argument: %s"
% projection_type)
norm = _np.linalg.norm(lindbladMxs[i].flat)
if not _np.isclose(norm, 0):
lindbladMxs[i] /= norm # normalize projector
assert(_np.isclose(_np.linalg.norm(lindbladMxs[i].flat), 1.0))
return lindbladMxs
def std_errgen_projections(errgen, projection_type, projection_basis,
mxBasis="gm", return_generators=False,
return_scale_fctr=False):
"""
Compute the projections of a gate error generator onto generators
for a standard set of errors constructed from the elements of a
specified basis.
Parameters
----------
errgen: : ndarray
The error generator matrix to project.
projection_type : {"hamiltonian", "stochastic", "affine"}
The type of error generators to project the gate error generator onto.
If "hamiltonian", then use the Hamiltonian generators which take a density
matrix rho -> -i*[ H, rho ] for Pauli-product matrix H. If "stochastic",
then use the Stochastic error generators which take rho -> P*rho*P for
Pauli-product matrix P (recall P is self adjoint). If "affine", then
use the affine error generators which take rho -> P (superop is |P>><<1|).
projection_basis : {'std', 'gm', 'pp', 'qt'} or Basis object
The source and destination basis, respectively. Allowed
values are Matrix-unit (std), Gell-Mann (gm), Pauli-product (pp),
and Qutrit (qt) (or a custom basis object).
mxBasis : {'std', 'gm', 'pp', 'qt'} or Basis object
The source and destination basis, respectively. Allowed
values are Matrix-unit (std), Gell-Mann (gm), Pauli-product (pp),
and Qutrit (qt) (or a custom basis object).
return_generators : bool, optional
If True, return the error generators projected against along with the
projection values themseves.
return_scale_fctr : bool, optional
If True, also return the scaling factor that was used to multply the
projections onto *normalized* error generators to get the returned
values.
Returns
-------
projections : numpy.ndarray
An array of length equal to the number of elements in the
basis used to construct the projectors. Typically this is
is also the dimension of the gate (e.g. 4 for a single qubit).
generators : numpy.ndarray
Only returned when `return_generators == True`. An array of shape
(#basis-els,op_dim,op_dim) such that `generators[i]` is the
generator corresponding to the i-th basis element. Note
that these matricies are in the *std* (matrix unit) basis.
scale : float
Only returned when `return_scale_fctr == True`. A mulitplicative
scaling constant that *has already been applied* to `projections`.
"""
if isinstance(mxBasis, _Basis):
errgen_std = _bt.change_basis(errgen, mxBasis, mxBasis.equivalent('std'))
#expand operation matrix so it acts on entire space of dmDim x dmDim density matrices
errgen_std = _bt.resize_std_mx(errgen_std, 'expand', mxBasis.equivalent('std'),
mxBasis.simple_equivalent('std'))
else:
errgen_std = _bt.change_basis(errgen, mxBasis, "std")
d2 = errgen_std.shape[0]
d = int(_np.sqrt(d2))
# nQubits = _np.log2(d)
#Get a list of the d2 generators (in corresspondence with the
# Pauli-product matrices given by _basis.pp_matrices(d) ).
lindbladMxs = std_error_generators(d2, projection_type, projection_basis) # in std basis
assert(len(lindbladMxs) <= d2) # can be fewer projection matrices (== lenght of projection_basis)
assert(_np.isclose(d * d, d2)) # d2 must be a perfect square
projections = _np.empty(len(lindbladMxs), 'd')
for i, lindbladMx in enumerate(lindbladMxs):
proj = _np.real_if_close(_np.vdot(errgen_std.flatten(), lindbladMx.flatten()), tol=1000)
# # DEBUG - for checking why perfect gates gave weird projections --> log ambiguity
# print("DB: rawproj(%d) = " % i, proj)
# errgen_pp = errgen.copy() #_bt.change_basis(errgen_std,"std","pp")
# lindbladMx_pp = _bt.change_basis(lindbladMx,"std","pp")
# if proj > 1.0:
# for k in range(errgen_std.shape[0]):
# for j in range(errgen_std.shape[1]):
# if abs(errgen_pp[k,j].conjugate() * lindbladMx_pp[k,j]) > 1e-2:
# print(" [%d,%d]: + " % (k,j), errgen_pp[k,j].conjugate(),
# "*", lindbladMx_pp[k,j],
# "=", (errgen_pp[k,j].conjugate() * lindbladMx_pp[i,j]))
#assert(_np.isreal(proj)), "non-real projection: %s" % str(proj) #just a warning now
if not _np.isreal(proj):
_warnings.warn("Taking abs() of non-real projection: %s" % str(proj))
proj = abs(proj)
projections[i] = proj
scaleFctr = std_scale_factor(d2, projection_type)
projections *= scaleFctr
lindbladMxs /= scaleFctr # so projections * generators give original
ret = [projections]
if return_generators: ret.append(lindbladMxs)
if return_scale_fctr: ret.append(scaleFctr)
return ret[0] if len(ret) == 1 else tuple(ret)
def _assert_shape(ar, shape, sparse=False):
""" Asserts ar.shape == shape ; works with sparse matrices too """
if not sparse or len(shape) == 2:
assert(ar.shape == shape), \
"Shape mismatch: %s != %s!" % (str(ar.shape), str(shape))
else:
if len(shape) == 3: # first "dim" is a list
assert(len(ar) == shape[0]), \
"Leading dim mismatch: %d != %d!" % (len(ar), shape[0])
assert(shape[0] == 0 or ar[0].shape == (shape[1], shape[2])), \
"Shape mismatch: %s != %s!" % (str(ar[0].shape), str(shape[1:]))
elif len(shape) == 4: # first 2 dims are lists
assert(len(ar) == shape[0]), \
"Leading dim mismatch: %d != %d!" % (len(ar), shape[0])
assert(shape[0] == 0 or len(ar[0]) == shape[1]), \
"Second dim mismatch: %d != %d!" % (len(ar[0]), shape[1])
assert(shape[0] == 0 or shape[1] == 0 or ar[0][0].shape == (shape[2], shape[3])), \
"Shape mismatch: %s != %s!" % (str(ar[0][0].shape), str(shape[2:]))
else:
raise NotImplementedError("Number of dimensions must be <= 4!")
def lindblad_error_generators(dmbasis_ham, dmbasis_other, normalize,
other_mode="all"):
"""
Compute the superoperator-generators corresponding to Lindblad terms.
This routine computes the Hamiltonian and Non-Hamiltonian ("other")
superoperator generators which correspond to the terms of the Lindblad
expression:
L(rho) = sum_i( h_i [A_i,rho] ) +
sum_ij( o_ij * (B_i rho B_j^dag -
0.5( rho B_j^dag B_i + B_j^dag B_i rho) ) )
where {A_i} and {B_i} are bases (possibly the same) for Hilbert Schmidt
(density matrix) space with the identity element removed so that each
A_i and B_i are traceless. If we write L(rho) in terms of superoperators
H_i and O_ij,
L(rho) = sum_i( h_i H_i(rho) ) + sum_ij( o_ij O_ij(rho) )
then this function computes the matrices for H_i and O_ij using the given
density matrix basis. Thus, if `dmbasis` is expressed in the standard
basis (as it should be), the returned matrices are also in this basis.
If these elements are used as projectors it may be usedful to normalize
them (by setting `normalize=True`). Note, however, that these projectors
are not all orthogonal - in particular the O_ij's are not orthogonal to
one another.
Parameters
----------
dmbasis_ham : list
A list of basis matrices {B_i} *including* the identity as the first
element, for the returned Hamiltonian-type error generators. This
argument is easily obtained by call to :func:`pp_matrices` or a
similar function. The matrices are expected to be in the standard
basis, and should be traceless except for the identity. Matrices
should be NumPy arrays or SciPy CSR sparse matrices.
dmbasis_other : list
A list of basis matrices {B_i} *including* the identity as the first
element, for the returned Stochastic-type error generators. This
argument is easily obtained by call to :func:`pp_matrices` or a
similar function. The matrices are expected to be in the standard
basis, and should be traceless except for the identity. Matrices
should be NumPy arrays or SciPy CSR sparse matrices.
normalize : bool
Whether or not generators should be normalized so that
numpy.linalg.norm(generator.flat) == 1.0 Note that the generators
will still, in general, be non-orthogonal.
other_mode : {"diagonal", "diag_affine", "all"}
Which non-Hamiltonian Lindblad error generators to construct.
Allowed values are: `"diagonal"` (only the diagonal Stochastic
generators are returned; that is, the generators corresponding to the
`i==j` terms in the Lindblad expression.), `"diag_affine"` (diagonal +
affine generators), and `"all"` (all generators).
Returns
-------
ham_generators : numpy.ndarray or list of SciPy CSR matrices
If dense matrices where given, an array of shape (d-1,d,d), where d is
the size of the basis, i.e. d == len(dmbasis). `ham_generators[i]`
gives the matrix for H_i. If sparse matrices were given, a list
of shape (d,d) CSR matrices.
other_generators : numpy.ndarray or list of lists of SciPy CSR matrices
If dense matrices where given, An array of shape (d-1,d-1,d,d),
(2,d-1,d,d), or (d-1,d,d), where d is the size of the basis, for
`other_mode` equal to `"all"`, `"diag_affine"`, or `"diagonal"`,
respectively. For instance, in the `"all"` case,
`other_generators[i,j]` gives the matrix for O_ij. If sparse matrices
were given, the all but the final 2 dimensions are lists (e.g. the
`"all"` case returns a list of lists of shape (d,d) CSR matrices).
"""
if dmbasis_ham is not None:
ham_mxs = dmbasis_ham # list of basis matrices (assumed to be in std basis)
ham_nMxs = len(ham_mxs) # usually == d2, but not necessary (e.g. w/maxWeight)
else:
ham_nMxs = 0
if dmbasis_other is not None:
other_mxs = dmbasis_other # list of basis matrices (assumed to be in std basis)
other_nMxs = len(other_mxs) # usually == d2, but not necessary (e.g. w/maxWeight)
else:
other_nMxs = 0
if ham_nMxs > 0:
d = ham_mxs[0].shape[0]
sparse = _sps.issparse(ham_mxs[0])
elif other_nMxs > 0:
d = other_mxs[0].shape[0]
sparse = _sps.issparse(other_mxs[0])
else:
d = 0 # will end up returning no generators
sparse = False
d2 = d**2
normfn = _spsl.norm if sparse else _np.linalg.norm
identityfn = (lambda d: _sps.identity(d, 'd', 'csr')) if sparse else _np.identity
if ham_nMxs > 0 and other_nMxs > 0:
assert(other_mxs[0].shape[0] == ham_mxs[0].shape[0]), \
"Bases must have the same dimension!"
if ham_nMxs > 0:
assert(_np.isclose(normfn(ham_mxs[0] - identityfn(d) / _np.sqrt(d)), 0)),\
"The first matrix in 'dmbasis_ham' must be the identity"
hamLindbladTerms = [None] * (ham_nMxs - 1) if sparse else \
_np.empty((ham_nMxs - 1, d2, d2), 'complex')
for i, B in enumerate(ham_mxs[1:]): # don't include identity
hamLindbladTerms[i] = _lt.hamiltonian_to_lindbladian(B, sparse) # in std basis
if normalize:
norm = normfn(hamLindbladTerms[i]) # same as norm(term.flat)
if not _np.isclose(norm, 0):
hamLindbladTerms[i] /= norm # normalize projector
assert(_np.isclose(normfn(hamLindbladTerms[i]), 1.0))
else:
hamLindbladTerms = None
if other_nMxs > 0:
assert(_np.isclose(normfn(other_mxs[0] - identityfn(d) / _np.sqrt(d)), 0)),\
"The first matrix in 'dmbasis_other' must be the identity"
if other_mode == "diagonal":
otherLindbladTerms = [None] * (other_nMxs - 1) if sparse else \
_np.empty((other_nMxs - 1, d2, d2), 'complex')
for i, Lm in enumerate(other_mxs[1:]): # don't include identity
otherLindbladTerms[i] = _lt.nonham_lindbladian(Lm, Lm, sparse)
if normalize:
norm = normfn(otherLindbladTerms[i]) # same as norm(term.flat)
if not _np.isclose(norm, 0):
otherLindbladTerms[i] /= norm # normalize projector
assert(_np.isclose(normfn(otherLindbladTerms[i]), 1.0))
elif other_mode == "diag_affine":
otherLindbladTerms = [[None] * (other_nMxs - 1)] * 2 if sparse else \
_np.empty((2, other_nMxs - 1, d2, d2), 'complex')
for i, Lm in enumerate(other_mxs[1:]): # don't include identity
otherLindbladTerms[0][i] = _lt.nonham_lindbladian(Lm, Lm, sparse)
otherLindbladTerms[1][i] = _lt.affine_lindbladian(Lm, sparse)
if normalize:
for k in (0, 1):
norm = normfn(otherLindbladTerms[k][i]) # same as norm(term.flat)
if not _np.isclose(norm, 0):
otherLindbladTerms[k][i] /= norm # normalize projector
assert(_np.isclose(normfn(otherLindbladTerms[k][i]), 1.0))
else: # other_mode == "all"
otherLindbladTerms = \
[[None] * (other_nMxs - 1) for i in range(other_nMxs - 1)] if sparse else \
_np.empty((other_nMxs - 1, other_nMxs - 1, d2, d2), 'complex')
for i, Lm in enumerate(other_mxs[1:]): # don't include identity
for j, Ln in enumerate(other_mxs[1:]): # don't include identity
#print("DEBUG NONHAM LIND (%d,%d)" % (i,j)) #DEBUG!!!
otherLindbladTerms[i][j] = _lt.nonham_lindbladian(Lm, Ln, sparse)
if normalize:
norm = normfn(otherLindbladTerms[i][j]) # same as norm(term.flat)
if not _np.isclose(norm, 0):
otherLindbladTerms[i][j] /= norm # normalize projector
assert(_np.isclose(normfn(otherLindbladTerms[i][j]), 1.0))
#I don't think this is true in general, but appears to be true for "pp" basis (why?)
#if j < i: # check that other[i,j] == other[j,i].C, i.e. other is Hermitian
# assert(_np.isclose(_np.linalg.norm(
# otherLindbladTerms[i][j]-
# otherLindbladTerms[j][i].conjugate()),0))
else:
otherLindbladTerms = None
#Check for orthogonality - otherLindblad terms are *not* orthogonal!
#N = otherLindbladTerms.shape[0]
#for i in range(N):
# for j in range(N):
# v1 = otherLindbladTerms[i,j].flatten()
# for k in range(N):
# for l in range(N):
# if k == i and l == j: continue
# v2 = otherLindbladTerms[k,l].flatten()
# if not _np.isclose(0, _np.vdot(v1,v2)):
# print("%d,%d <-> %d,%d dot = %g [%g]" % (i,j,k,l,_np.vdot(v1,v2),_np.dot(v1,v2)))
# #print("v1 = ",v1)
# #print("v2 = ",v2)
# # assert(False)
# #assert(_np.isclose(0, _np.vdot(v1,v2)))
#Check hamiltonian error gens are orthogonal to others
#N = otherLindbladTerms.shape[0]
#for i,hlt in enumerate(hamLindbladTerms):
# v1 = hlt.flatten()
# for j in range(N):
# for k in range(N):
# v2 = otherLindbladTerms[j,k].flatten()
# assert(_np.isclose(0, _np.vdot(v1,v2)))
return hamLindbladTerms, otherLindbladTerms
def lindblad_errgen_projections(errgen, ham_basis,
other_basis, mxBasis="gm",
normalize=True, return_generators=False,
other_mode="all", sparse=False):
"""
Compute the projections of a gate error generator onto generators
for the Lindblad-term errors when expressed in the given
"projection basis".
Parameters
----------
errgen: : ndarray
The error generator matrix to project.
ham_basis: {'std', 'gm', 'pp', 'qt'}, list of matrices, or Basis object
The basis used to construct the Hamiltonian-type lindblad error
Allowed values are Matrix-unit (std), Gell-Mann (gm), Pauli-product (pp),
and Qutrit (qt), list of numpy arrays, or a custom basis object.
other_basis : {'std', 'gm', 'pp', 'qt'}, list of matrices, or Basis object
The basis used to construct the Stochastic-type lindblad error
Allowed values are Matrix-unit (std), Gell-Mann (gm), Pauli-product (pp),
and Qutrit (qt), list of numpy arrays, or a custom basis object.
mxBasis : {'std', 'gm', 'pp', 'qt'} or Basis object
The source basis. Allowed values are Matrix-unit (std),
Gell-Mann (gm), Pauli-product (pp),
and Qutrit (qt) (or a custom basis object).
normalize : bool, optional
Whether or not the generators being projected onto are normalized, so
that numpy.linalg.norm(generator.flat) == 1.0. Note that the generators
will still, in general, be non-orthogonal.
return_generators : bool, optional
If True, return the error generators projected against along with the
projection values themseves.
other_mode : {"diagonal", "diag_affine", "all"}
Which non-Hamiltonian Lindblad error projections to obtain.
Allowed values are: `"diagonal"` (only the diagonal Stochastic),
`"diag_affine"` (diagonal + affine generators), and `"all"`
(all generators).
sparse : bool, optional
Whether to create sparse or dense basis matrices when strings
are given as `ham_basis` and `other_basis`
Returns
-------
ham_projections : numpy.ndarray
An array of length d-1, where d is the dimension of the gate,
giving the projections onto the Hamiltonian-type Lindblad terms.
other_projections : numpy.ndarray
An array of shape (d-1,d-1), (2,d-1), or (d-1,), where d is the dimension
of the gate, for `other_mode` equal to `"all"`, `"diag_affine"`, or
`"diagonal"`, respectively. Values give the projections onto the
non-Hamiltonian-type Lindblad terms.
ham_generators : numpy.ndarray
The Hamiltonian-type Lindblad term generators, as would be returned
from `lindblad_error_generators(pp_matrices(sqrt(d)), normalize)`.
Shape is (d-1,d,d), and `ham_generators[i]` is in the standard basis.
other_generators : numpy.ndarray
The Stochastic-type Lindblad term generators, as would be returned
from `lindblad_error_generators(pp_matrices(sqrt(d)), normalize)`.
Shape is (d-1,d-1,d,d), (2,d-1,d,d), or (d-1,d,d) for `other_mode`
equal to `"all"`, `"diag_affine"`, or `"diagonal"`, respectively,
and `other_generators[i]` is in the std basis.
"""
errgen_std = _bt.change_basis(errgen, mxBasis, "std")
if _sps.issparse(errgen_std):
errgen_std_flat = errgen_std.tolil().reshape(
(errgen_std.shape[0] * errgen_std.shape[1], 1)).tocsr() # b/c lil's are only type that can reshape...
else:
errgen_std_flat = errgen_std.flatten()
errgen_std = None # ununsed below, and sparse reshape doesn't copy, so mark as None
d2 = errgen.shape[0]
d = int(_np.sqrt(d2))
#nQubits = _np.log2(d)
#Get a list of the generators in corresspondence with the
# specified basis elements.
if isinstance(ham_basis, _Basis):
hamBasisMxs = ham_basis.elements
elif isinstance(ham_basis, str):
hamBasisMxs = _bt.basis_matrices(ham_basis, d2, sparse=sparse)
else:
hamBasisMxs = ham_basis
if isinstance(other_basis, _Basis):
otherBasisMxs = other_basis.elements
elif isinstance(other_basis, str):
otherBasisMxs = _bt.basis_matrices(other_basis, d2, sparse=sparse)
else:
otherBasisMxs = other_basis
hamGens, otherGens = lindblad_error_generators(
hamBasisMxs, otherBasisMxs, normalize, other_mode) # in std basis
if hamBasisMxs is not None:
bsH = len(hamBasisMxs) # basis size (not necessarily d2)
else: bsH = 0
if otherBasisMxs is not None:
bsO = len(otherBasisMxs) # basis size (not necessarily d2)
else: bsO = 0
if bsH > 0: sparse = _sps.issparse(hamBasisMxs[0])
elif bsO > 0: sparse = _sps.issparse(otherBasisMxs[0])
else: sparse = False # default?
assert(_np.isclose(d * d, d2)) # d2 must be a perfect square
if bsH > 0:
_assert_shape(hamGens, (bsH - 1, d2, d2), sparse)
if bsO > 0:
if other_mode == "diagonal":
_assert_shape(otherGens, (bsO - 1, d2, d2), sparse)
elif other_mode == "diag_affine":
_assert_shape(otherGens, (2, bsO - 1, d2, d2), sparse)
else: # other_mode == "all"
_assert_shape(otherGens, (bsO - 1, bsO - 1, d2, d2), sparse)
#Perform linear least squares solve to find "projections" onto each otherGens element - defined so that
# sum_i projection_i * otherGen_i = (errgen_std-ham_errgen) as well as possible.
#ham_error_gen = _np.einsum('i,ijk', hamProjs, hamGens)
#other_errgen = errgen_std - ham_error_gen #what's left once hamiltonian errors are projected out
#Do linear least squares soln to expressing errgen_std as a linear combo
# of the lindblad generators
if bsH > 0:
if not sparse:
H = hamGens.reshape((bsH - 1, d2**2)).T # ham generators == columns
Hdag = H.T.conjugate()
#Do linear least squares: this is what takes the bulk of the time
hamProjs = _np.linalg.solve(_np.dot(Hdag, H), _np.dot(Hdag, errgen_std_flat))
hamProjs.shape = (hamGens.shape[0],)
else:
rows = [hamGen.tolil().reshape((1, d2**2)) for hamGen in hamGens]
H = _sps.vstack(rows, 'csr').transpose()
Hdag = H.copy().transpose().conjugate()
#Do linear least squares: this is what takes the bulk of the time
if _mt.safenorm(errgen_std_flat) < 1e-8: # protect against singular RHS
hamProjs = _np.zeros(bsH - 1, 'd')
else:
hamProjs = _spsl.spsolve(Hdag.dot(H), Hdag.dot(errgen_std_flat))
if _sps.issparse(hamProjs): hamProjs = hamProjs.toarray().flatten()
hamProjs.shape = (bsH - 1,)
else:
hamProjs = None
if bsO > 0:
if not sparse:
if other_mode == "diagonal":
O = otherGens.reshape((bsO - 1, d2**2)).T # other generators == columns
elif other_mode == "diag_affine":
O = otherGens.reshape((2 * (bsO - 1), d2**2)).T # other generators == columns
else:
O = otherGens.reshape(((bsO - 1)**2, d2**2)).T # other generators == columns
Odag = O.T.conjugate()
#Do linear least squares: this is what takes the bulk of the time
otherProjs = _np.linalg.solve(_np.dot(Odag, O), _np.dot(Odag, errgen_std_flat))
if other_mode == "diagonal":
otherProjs.shape = (otherGens.shape[0],)
elif other_mode == "diag_affine":
otherProjs.shape = (2, otherGens.shape[1])
else:
otherProjs.shape = (otherGens.shape[0], otherGens.shape[1])
else:
if other_mode == "diagonal":
rows = [oGen.tolil().reshape((1, d2**2)) for oGen in otherGens]
O = _sps.vstack(rows, 'csr').transpose() # other generators == columns
else: # "diag_affine" or "all"
rows = [oGen.tolil().reshape((1, d2**2)) for oGenRow in otherGens for oGen in oGenRow]
O = _sps.vstack(rows, 'csr').transpose() # other generators == columns
Odag = O.copy().transpose().conjugate() # TODO: maybe conjugate copies data?
#Do linear least squares: this is what takes the bulk of the time
if _mt.safenorm(errgen_std_flat) < 1e-8: # protect against singular RHS
if other_mode == "diagonal": otherProjs = _np.zeros(bsO - 1, 'd')
elif other_mode == "diag_affine": otherProjs = _np.zeros((2, bsO - 1), 'd')
else: otherProjs = _np.zeros((bsO - 1, bsO - 1), 'd')
else:
otherProjs = _spsl.spsolve(Odag.dot(O), Odag.dot(errgen_std_flat))
if _sps.issparse(otherProjs): otherProjs = otherProjs.toarray().flatten()
if other_mode == "diagonal":
otherProjs.shape = (bsO - 1,)
elif other_mode == "diag_affine":
otherProjs.shape = (2, bsO - 1)
else: # other_mode == "all"
otherProjs.shape = (bsO - 1, bsO - 1)
else:
otherProjs = None
#check err gens are linearly independent -- but can take a very long time, so comment out!
#assert(_np.linalg.matrix_rank(H,1e-7) == H.shape[1])
#assert(_np.linalg.matrix_rank(O,1e-7) == O.shape[1])
#if False: # further check against older (slower) version
# M = _np.concatenate( (hamGens.reshape((bs-1,d2**2)).T, otherGens.reshape(((bs-1)**2,d2**2)).T), axis=1)
# assert(_np.linalg.matrix_rank(M,1e-7) == M.shape[1]) #check err gens are linearly independent
# Mdag = M.T.conjugate()
# print("DB D: %.1f" % (time.time()-t)); t = time.time()
# projs = _np.linalg.solve(_np.dot(Mdag,M), _np.dot(Mdag,errgen_std_flat))
# hamProjs_chk = projs[0:(bs-1)]
# otherProjs_chk = projs[(bs-1):]
# assert(_np.linalg.norm(hamProjs-hamProjs_chk) < 1e-6)
# assert(_np.linalg.norm(otherProjs-otherProjs_chk) < 1e-6)
if return_generators:
return hamProjs, otherProjs, hamGens, otherGens
else:
return hamProjs, otherProjs
def projections_to_lindblad_terms(hamProjs, otherProjs, ham_basis, other_basis,
other_mode="all", return_basis=True):
"""
Converts the projections of an error generator onto basis elements into
the Lindblad-term dictionary and basis used to individually specify
Lindblad terms.
Parameters
----------
hamProjs : numpy.ndarray
An array of length d-1, where d is the dimension of the projected error
generator, giving the projections onto the Hamiltonian-type Lindblad
terms.
otherProjs : numpy.ndarray
An array of shape (d-1,d-1), (2,d-1), or (d-1,), where d is the dimension
of the projected error generator, for `other_mode` equal to `"all"`,
`"diag_affine"`, or `"diagonal"`, respectively. Values give the
projections onto the non-Hamiltonian-type Lindblad terms.
ham_basis: {'std', 'gm', 'pp', 'qt'}, list of matrices, or Basis object
The basis used to construct `hamProjs`. Allowed values are Matrix-unit
(std), Gell-Mann (gm), Pauli-product (pp), and Qutrit (qt), list of
numpy arrays, or a custom basis object.
other_basis : {'std', 'gm', 'pp', 'qt'}, list of matrices, or Basis object
The basis used to construct `otherProjs`. Allowed values are
Matrix-unit (std), Gell-Mann (gm), Pauli-product (pp), and Qutrit (qt),
list of numpy arrays, or a custom basis object.
other_mode : {"diagonal", "diag_affine", "all"}
Which non-Hamiltonian Lindblad error projections `otherProjs` includes.
Allowed values are: `"diagonal"` (only the diagonal Stochastic),
`"diag_affine"` (diagonal + affine generators), and `"all"`
(all generators).
return_basis : bool, optional
Whether to return a :class:`Basis` containing the elements
corresponding to labels within the returned `Ltermdict`.
Returns
-------
Ltermdict : dict
Keys are `(termType, basisLabel1, <basisLabel2>)`
tuples, where `termType` is `"H"` (Hamiltonian), `"S"` (Stochastic), or
`"A"` (Affine). Hamiltonian and Affine terms always have a single basis
label (so key is a 2-tuple) whereas Stochastic tuples have 1 basis label
to indicate a *diagonal* term and otherwise have 2 basis labels to
specify off-diagonal non-Hamiltonian Lindblad terms. Basis labels
are taken from `ham_basis` and `other_basis`. Values are complex
coefficients (the projections).
basis : Basis
A single basis containing all the basis labels used in `Ltermdict` (and
*only* those elements). Only returned when `return_basis == True`.
"""
assert(not (ham_basis is None and other_basis is None)), \
"At least one of `ham_basis` and `other_basis` must be non-None"
# Make None => length-0 arrays so iteration code works below (when basis is None)
if hamProjs is None: hamProjs = _np.empty(0, 'd')
if otherProjs is None:
otherProjs = _np.empty(0, 'd') if other_mode == "diagonal" \
else _np.empty((0, 0), 'd')
# Construct a pair of dictionaries describing all of the
# Lindblad-terms:
# Ltermdict keys= ('H',basisLbl), ('S',basisLbl), or ('S',bLbl1,bLbl2)
# vals= coefficients of these terms (projections from errgen)
# basisdict keys= basis labels (just has to match Ltermdict keys)
# vals= basis matrices - can be either sparse or dense
Ltermdict = _collections.OrderedDict()
basisdict = _collections.OrderedDict()
if return_basis:
def set_basis_el(blbl, bel):
""" Sets an elment of basisdict, checking for consistency """
if blbl in basisdict:
assert(_mt.safenorm(basisdict[blbl] - bel) < 1e-8), "Ambiguous basis el label %s" % blbl
else:
basisdict[blbl] = bel
else:
def set_basis_el(blbl, bel):
pass
#Add Hamiltonian error elements
if ham_basis is not None:
ham_lbls = ham_basis.labels
ham_mxs = ham_basis.elements # can be sparse
assert(len(ham_mxs[1:]) == len(hamProjs))
for coeff, lbl, bmx in zip(hamProjs, ham_lbls[1:], ham_mxs[1:]): # skip identity
Ltermdict[('H', lbl)] = coeff
set_basis_el(lbl, bmx)
else:
ham_lbls = []
#Add "other" error elements
if other_basis is not None:
other_lbls = other_basis.labels
other_mxs = other_basis.elements # can be sparse
if other_mode == "diagonal":
assert(len(other_mxs[1:]) == len(otherProjs))
for coeff, lbl, bmx in zip(otherProjs, other_lbls[1:], other_mxs[1:]): # skip identity
Ltermdict[('S', lbl)] = coeff
set_basis_el(lbl, bmx)
elif other_mode == "diag_affine":
assert((2, len(other_mxs[1:])) == otherProjs.shape)
for coeff, lbl, bmx in zip(otherProjs[0], other_lbls[1:], other_mxs[1:]): # skip identity
Ltermdict[('S', lbl)] = coeff
set_basis_el(lbl, bmx)
for coeff, lbl, bmx in zip(otherProjs[1], other_lbls[1:], other_mxs[1:]): # skip identity
Ltermdict[('A', lbl)] = coeff
set_basis_el(lbl, bmx)
else:
assert((len(other_mxs[1:]), len(other_mxs[1:])) == otherProjs.shape)
for i, (lbl1, bmx1) in enumerate(zip(other_lbls[1:], other_mxs[1:])): # skip identity
set_basis_el(lbl1, bmx1)
for j, (lbl2, bmx2) in enumerate(zip(other_lbls[1:], other_mxs[1:])): # skip identity
set_basis_el(lbl2, bmx2)
Ltermdict[('S', lbl1, lbl2)] = otherProjs[i, j]
else:
other_lbls = []
#Turn basisdict into a Basis to return
if return_basis:
if ham_basis == other_basis:
basis = ham_basis
elif ham_basis is None or set(ham_lbls).issubset(set(other_lbls)):
basis = other_basis
elif other_basis is None or set(other_lbls).issubset(set(ham_lbls)):
basis = ham_basis
else:
#Create an ExplictBasis using the matrices in basisdict plus the identity
sparse = True; real = True
if ham_basis is not None:
elshape = ham_basis.elshape
sparse = sparse and ham_basis.sparse
real = real and ham_basis.real
if other_basis is not None:
elshape = other_basis.elshape
sparse = sparse and other_basis.sparse
real = real and other_basis.real
d = elshape[0]
Id = _sps.identity(d, 'complex', 'csr') / _np.sqrt(d) if sparse \
else _np.identity(d, 'complex') / _np.sqrt(d)
lbls = ['I'] + list(basisdict.keys())
mxs = [Id] + list(basisdict.values())
basis = _ExplicitBasis(mxs, lbls, name=None,
real=real, sparse=sparse)
return Ltermdict, basis
else:
return Ltermdict
def lindblad_terms_to_projections(Ltermdict, basis, other_mode="all"):
"""
Convert a set of Lindblad terms into a dense matrix/grid of projections.
Essentially the inverse of :function:`projections_to_lindblad_terms`.
Parameters
----------
Ltermdict : dict
A dictionary specifying which Linblad terms are present in the gate
parameteriztion. Keys are `(termType, basisLabel1, <basisLabel2>)`
tuples, where `termType` is `"H"` (Hamiltonian), `"S"` (Stochastic), or
`"A"` (Affine). Hamiltonian and Affine terms always have a single basis
label (so key is a 2-tuple) whereas Stochastic tuples with 1 basis label
indicate a *diagonal* term, and are the only types of terms allowed when
`nonham_mode != "all"`. Otherwise, Stochastic term tuples can include 2
basis labels to specify "off-diagonal" non-Hamiltonian Lindblad terms.
Basis labels can be strings or integers. Values are complex
coefficients (error rates).
basis : Basis, optional
A basis mapping the labels used in the keys of `Ltermdict` to
basis matrices (e.g. numpy arrays or Scipy sparse matrices). The
first element of this basis should be an identity element, and
will be propagated to the returned `ham_basis` and `other_basis`.
other_mode : {"diagonal", "diag_affine", "all"}
Which non-Hamiltonian terms are allowed in `Ltermdict`.
Allowed values are: `"diagonal"` (only the diagonal Stochastic),
`"diag_affine"` (diagonal + affine generators), and `"all"`
(all generators).
Returns
-------
hamProjs : numpy.ndarray
An array of length `basisdim-1`, giving the projections onto a
full set of the Hamiltonian-type Lindblad terms (onto each element of
`ham_basis`).
otherProjs : numpy.ndarray
An array of shape (d-1,d-1), (2,d-1), or (d-1,), where d=`basisdim`
for `other_mode` equal to `"all"`, `"diag_affine"`, or `"diagonal"`,
respectively. Values give the projections onto the non-Hamiltonian
-type Lindblad terms.
ham_basis: Basis
The basis used to construct `hamProjs`.
other_basis : Basis
The basis used to construct `otherProjs`.
hamBasisIndices : OrderedDict
A dictionary mapping the some or all of the basis labels of `basisdict`
to the integers 0 to `len(ham_basis)`. These are indices into
`hamProjs`, giving the projection associated with each Hamiltonian
basis element.
otherBasisIndices : OrderedDict
A dictionary mapping the some or all of the basis labels of `basisdict`
to the integers 0 to `len(other_basis)`. These are row and column
indices into `otherProjs`, giving the projection associated with each
pair of "other" basis elements (or single basis element if
`other_mode!="all"`).
"""
#Separately enumerate the (distinct) basis elements used for Hamiltonian
# and non-Hamiltonian error terms
#print("DB: lindblad term to proj: \n",Ltermdict,"\n",basis)
hamBasisLabels = []
otherBasisLabels = []
for termLbl, coeff in Ltermdict.items():
if isinstance(termLbl, str): termLbl = (termLbl[0], termLbl[1:]) # e.g. "HXX" => ('H','XX')
termType = termLbl[0]
if termType == "H": # Hamiltonian
assert(len(termLbl) == 2), "Hamiltonian term labels should have form ('H',<basis element label>)"
if termLbl[1] not in hamBasisLabels:
hamBasisLabels.append(termLbl[1])
elif termType == "S": # Stochastic
if other_mode in ("diagonal", "diag_affine"):
assert(len(termLbl) == 2), "Stochastic term labels should have form ('S',<basis element label>)"
if termLbl[1] not in otherBasisLabels:
otherBasisLabels.append(termLbl[1])
else:
assert(len(termLbl) == 3), "Stochastic term labels should have form ('S',<bel1>, <bel2>)"
if termLbl[1] not in otherBasisLabels:
otherBasisLabels.append(termLbl[1])
if termLbl[2] not in otherBasisLabels:
otherBasisLabels.append(termLbl[2])
elif termType == "A": # Affine
assert(other_mode == "diag_affine"), "Affine labels are only allowed in an affine mode"
assert(len(termLbl) == 2), "Affine term labels should have form ('A',<basis element label>)"
if termLbl[1] not in otherBasisLabels:
otherBasisLabels.append(termLbl[1])
#Construct bases
# Note: the lists of basis matrices shouldn't contain the identity, since
# the terms above shouldn't contain identity terms - but `basis` should
# contain an identity element as it's first element, so add this identity el
# to non-empty bases (empty bases stay empty!) to be consistent with the
# rest of the framework (bases *have* Ids)
sparse = basis.sparse
if set(hamBasisLabels) == set(basis.labels):
ham_basis = basis
else:
Id = basis[0]
ham_basis_mxs = [basis[bl] for bl in hamBasisLabels]
if len(ham_basis_mxs) > 0:
ham_basis = _ExplicitBasis([Id] + ham_basis_mxs, ['I'] + hamBasisLabels,
name=None, real=True, sparse=sparse)
else:
ham_basis = _ExplicitBasis(ham_basis_mxs, name=None, real=True, sparse=sparse)
if set(otherBasisLabels) == set(basis.labels):
other_basis = basis
else:
Id = basis[0]
other_basis_mxs = [basis[bl] for bl in otherBasisLabels]
if len(other_basis_mxs) > 0:
other_basis = _ExplicitBasis([Id] + other_basis_mxs, ['I'] + otherBasisLabels,
name=None, real=True, sparse=sparse)
else:
other_basis = _ExplicitBasis(other_basis_mxs, name=None, real=True, sparse=sparse)
bsH, bsO = len(ham_basis), len(other_basis)
#print("DB: constructed ham_basis = ",ham_basis)
#print("DB: other basis = ",other_basis)
#Create projection (term coefficient) arrays - or return None if
# the corresponding basis is empty (as per our convention)
hamProjs = _np.zeros(bsH - 1, 'complex') if bsH > 0 else None
if bsO > 0:
if other_mode == "diagonal": # OK if this runs for 'auto' too since then len(otherBasisIndices) == 0
otherProjs = _np.zeros(bsO - 1, 'complex')
elif other_mode == "diag_affine":
otherProjs = _np.zeros((2, bsO - 1), 'complex')
else:
otherProjs = _np.zeros((bsO - 1, bsO - 1), 'complex')
else: otherProjs = None
#Fill arrays
hamBasisIndices = {lbl: i - 1 for i, lbl in enumerate(ham_basis.labels)} # -1 to compensate for identity as
otherBasisIndices = {lbl: i - 1 for i, lbl in enumerate(other_basis.labels)} # first element (not in projections).
for termLbl, coeff in Ltermdict.items():
if isinstance(termLbl, str): termLbl = (termLbl[0], termLbl[1:]) # e.g. "HXX" => ('H','XX')
termType = termLbl[0]
if termType == "H": # Hamiltonian
k = hamBasisIndices[termLbl[1]] # index of coefficient in array
hamProjs[k] = coeff
elif termType == "S": # Stochastic
if other_mode == "diagonal":
k = otherBasisIndices[termLbl[1]] # index of coefficient in array
otherProjs[k] = coeff
elif other_mode == "diag_affine":
k = otherBasisIndices[termLbl[1]] # index of coefficient in array
otherProjs[0, k] = coeff
else: # other_mode == "all"
k = otherBasisIndices[termLbl[1]] # index of row in "other" coefficient matrix
j = otherBasisIndices[termLbl[2]] # index of col in "other" coefficient matrix
otherProjs[k, j] = coeff
elif termType == "A": # Affine
assert(other_mode == "diag_affine")
k = otherBasisIndices[termLbl[1]] # index of coefficient in array
otherProjs[1, k] = coeff
return hamProjs, otherProjs, ham_basis, other_basis
def lindblad_projections_to_paramvals(hamProjs, otherProjs, param_mode="cptp",
other_mode="all", truncate=True):
"""
Construct the array of Lindblad-gate parameter values from the separate
arrays of Hamiltonian and non-Hamiltonian Lindblad-term projections.
When `cptp=True`, this function handles parameterizing the projections
to that for (real) parameter values correspond to projections for a valid
CPTP gate (e.g. by parameterizing the Cholesky decomposition of `otherProjs`
instead of otherProjs itself). This function is closely related to
implementation details of the LindbladOp class.
Parameters
----------
hamProjs : numpy.ndarray
An array of length d-1, where d is the gate dimension, giving the
projections onto a full set of the Hamiltonian-type Lindblad terms.
otherProjs : numpy.ndarray
An array of shape (d-1,d-1), (2,d-1), or (d-1,), where d is the gate
dimension, for `other_mode` equal to `"all"`,`"diag_affine"`, or
`"diagonal"`, respectively. Values give the projections onto a full
set of non-Hamiltonian-type Lindblad terms.
param_mode : {"unconstrained", "cptp", "depol", "reldepol"}
Describes how values in `hamProjs` and `otherProj` relate to the
returned parameter values. Allowed values are:
`"unconstrained"` (projs are independent unconstrained parameters),
`"cptp"` (independent parameters but constrained so map is CPTP),
`"reldepol"` (all non-Ham. diagonal projs take the *same* value),
`"depol"` (same as `"reldepol"` but projs must be *positive*)
other_mode : {"diagonal", "diag_affine", "all"}
Which non-Hamiltonian Lindblad error projections `otherProjs` includes.
Allowed values are: `"diagonal"` (only the diagonal Stochastic),
`"diag_affine"` (diagonal + affine generators), and `"all"`.
truncate : bool, optional
Whether to truncate the projections onto the Lindblad terms in
order to meet constraints (e.g. to preserve CPTP) when necessary.
If False, then an error is thrown when the given projections
cannot be parameterized as specified.
Returns
-------
numpy.ndarray
A 1D array of real parameter values consisting of d-1 Hamiltonian
values followed by either (d-1)^2, 2*(d-1), or just d-1 non-Hamiltonian
values for `other_mode` equal to `"all"`, `"diag_affine"`, or
`"diagonal"`, respectively.
"""
if hamProjs is not None:
assert(_np.isclose(_np.linalg.norm(hamProjs.imag), 0)), \
"Hamiltoian projections (coefficients) are not all real!"
hamParams = hamProjs.real
else:
hamParams = _np.empty(0, 'd')
if otherProjs is not None:
if other_mode == "diagonal":
assert(_np.isclose(_np.linalg.norm(_np.imag(otherProjs)), 0)), \
"Diagonal stochastic projections (coefficients) are not all real!"
if param_mode == "depol": # otherParams is a *single-element* 1D vector of the sqrt of each diagonal el
assert(truncate or all([v >= -1e-12 for v in otherProjs])), \
"Lindblad coefficients are not CPTP (truncate == False)!"
assert(truncate or all([_np.isclose(v, otherProjs[0]) for v in otherProjs])), \
"Diagonal lindblad coefficients are not equal (truncate == False)!"
otherProj = _np.mean(otherProjs.clip(1e-16, 1e100))
otherParams = _np.array(_np.sqrt(_np.real(otherProj)), 'd') # shape (1,)
elif param_mode == "cptp": # otherParams is a 1D vector of the sqrts of diagonal els
assert(truncate or all([v >= -1e-12 for v in otherProjs])), \
"Lindblad coefficients are not CPTP (truncate == False)!"
otherProjs = otherProjs.clip(1e-16, 1e100)
otherParams = _np.sqrt(otherProjs.real) # shape (bsO-1,)
else: # "unconstrained": otherParams is a 1D vector of the real diagonal els of otherProjs
otherParams = otherProjs.real # shape (bsO-1,)
elif other_mode == "diag_affine":
assert(_np.isclose(_np.linalg.norm(_np.imag(otherProjs)), 0)), \
"Diagonal stochastic and affine projections (coefficients) are not all real!"
if param_mode == "depol": # otherParams is a single depol value + unconstrained affine coeffs
assert(truncate or all([v >= -1e-12 for v in otherProjs[0]])), \
"Lindblad coefficients are not CPTP (truncate == False)!"
assert(truncate or all([_np.isclose(v, otherProjs[0, 0]) for v in otherProjs[0]])), \
"Diagonal lindblad coefficients are not equal (truncate == False)!"
depolProj = _np.mean(otherProjs[0, :].clip(1e-16, 1e100))
otherParams = _np.concatenate(([_np.sqrt(_np.real(depolProj))],
otherProjs[1].real)) # shape (1+(bsO-1),)
elif param_mode == "cptp": # Note: does not constrained affine coeffs to CPTP
assert(truncate or all([v >= -1e-12 for v in otherProjs[0]])), \
"Lindblad coefficients are not CPTP (truncate == False)!"
diagParams = _np.sqrt(_np.real(otherProjs[0, :]).clip(1e-16, 1e100)) # shape (bsO-1,)
otherParams = _np.concatenate((diagParams, otherProjs[1].real)) # diag + affine params
else: # param_mode == "unconstrained": otherParams is a 1D vector of the real diagonal els of otherProjs
otherParams = otherProjs.real # shape (2,bsO-1)
else: # other_mode == "all"
assert(_np.isclose(_np.linalg.norm(otherProjs - otherProjs.T.conjugate()), 0)
), "Other projection/coefficient mx is not Hermitian!"
assert(param_mode != "depol"), "`depol` is not supported when `other_mode == 'all'`"
bsO = otherProjs.shape[0] + 1 # +1 to keep convention that this is the basis (w/Identity) size
otherParams = _np.empty((bsO - 1, bsO - 1), 'd')
if param_mode == "cptp": # otherParams mx stores Cholesky decomp
#push any slightly negative evals of otherProjs positive so that
# the Cholesky decomp will work.
evals, U = _np.linalg.eig(otherProjs)
Ui = _np.linalg.inv(U)
assert(truncate or all([ev >= -1e-12 for ev in evals])), \
"Lindblad coefficients are not CPTP (truncate == False)!"
pos_evals = evals.clip(1e-16, 1e100)
otherProjs = _np.dot(U, _np.dot(_np.diag(pos_evals), Ui))
try:
Lmx = _np.linalg.cholesky(otherProjs)
# if Lmx not postitive definite, try again with 1e-12 (same lines as above)
except _np.linalg.LinAlgError: # pragma: no cover
pos_evals = evals.clip(1e-12, 1e100) # pragma: no cover
otherProjs = _np.dot(U, _np.dot(_np.diag(pos_evals), Ui)) # pragma: no cover
Lmx = _np.linalg.cholesky(otherProjs) # pragma: no cover
for i in range(bsO - 1):
assert(_np.linalg.norm(_np.imag(Lmx[i, i])) < IMAG_TOL)
otherParams[i, i] = Lmx[i, i].real
for j in range(i):
otherParams[i, j] = Lmx[i, j].real
otherParams[j, i] = Lmx[i, j].imag
else: # param_mode == "unconstrained": otherParams mx stores otherProjs (hermitian) directly
for i in range(bsO - 1):
assert(_np.linalg.norm(_np.imag(otherProjs[i, i])) < IMAG_TOL)
otherParams[i, i] = otherProjs[i, i].real
for j in range(i):
otherParams[i, j] = otherProjs[i, j].real
otherParams[j, i] = otherProjs[i, j].imag
else:
otherParams = _np.empty(0, 'd')
assert(not _np.iscomplexobj(hamParams)) # params should always
assert(not _np.iscomplexobj(otherParams)) # be *real*
return _np.concatenate((hamParams, otherParams.flat))
def paramvals_to_lindblad_projections(paramvals, ham_basis_size,
other_basis_size, param_mode="cptp",
other_mode="all", Lmx=None):
"""
Construct the separate arrays of Hamiltonian and non-Hamiltonian
Lindblad-term projections from the array of Lindblad-gate parameter values.
This function essentially performs the inverse of
:function:`lindblad_projections_to_paramvals`.
Parameters
----------
paramvals : numpy.ndarray
A 1D array of real parameter values consisting of d-1 Hamiltonian
values followed by either (d-1)^2 or just d-1 non-Hamiltonian
values (the latter when `other_mode in ('diagonal','diag_affine')`).
ham_basis_size, other_basis_size : int
The number of elements in the Hamiltonian and non-Hamiltonian
bases used to construct `paramvals`. As such, `ham_basis_size`
gives the offset into `paramvals` where the non-Hamiltonian
parameters begin.
param_mode : {"unconstrained", "cptp", "depol", "reldepol"}
Specifies how the Lindblad-term coefficients are mapped to the set of
(real) parameter values. This really just applies to the "other"
(non-Hamiltonian) coefficients. "unconstrained" means that ranging
over the parameter values lets the coefficient-matrix vary over all
matrices, "cptp" restricts this to postitive matrices. "depol"
maps all of the coefficients to the *same, positive* parameter (only
available for "diagonal" and "diag_affine" other-modes), and "reldepol"
does the same thing but without the positivity constraint.
other_mode : {"all", "diagonal", "diag_affine"}
Specifies the structure of the matrix of other (non-Hamiltonian)
coefficients. If d is the gate dimension, "all" means a (d-1,d-1)
matrix is used; "diagonal" means just the (d2-1,) diagonal of this
matrix is used; "diag_affine" means the coefficients are in a (2,d2-1)
array with the diagonal-term coefficients being the first row and the
affine coefficients being the second row.
Lmx : ndarray, optional
Scratch space that is used to store the lower-triangular
Cholesky decomposition matrix that is used to construct
the "other" projections when there is a CPTP constraint.
Returns
-------
hamProjs : numpy.ndarray
An array of length d-1, where d is the gate dimension, giving the
projections onto a full set of the Hamiltonian-type Lindblad terms.
otherProjs : numpy.ndarray
An array of shape (d-1,d-1) or (d-1,) or (2,d-1) where d is the gate
dimension, giving the projections onto a full set of non-Hamiltonian
-type Lindblad terms (see `other_mode` above).
"""
bsH = ham_basis_size
bsO = other_basis_size
if Lmx is None:
Lmx = _np.zeros((bsO - 1, bsO - 1), 'complex') if bsO > 0 else None
# self.paramvals = [hamCoeffs] + [otherParams]
# where hamCoeffs are *real* and of length d2-1 (self.dim == d2)
if bsH > 0:
hamCoeffs = paramvals[0:bsH - 1]
nHam = bsH - 1
else:
hamCoeffs = None
nHam = 0
#built up otherCoeffs based on param_mode and nonham_mode
if bsO > 0:
if other_mode == "diagonal":
otherParams = paramvals[nHam:]
expected_shape = (1,) if (param_mode in ("depol", "reldepol")) else (bsO - 1,)
assert(otherParams.shape == expected_shape)
if param_mode in ("depol", "reldepol"):
otherParams = otherParams[0] * _np.ones(bsO - 1, 'd') # replicate single param bsO-1 times
if param_mode in ("cptp", "depol"):
otherCoeffs = otherParams**2 # Analagous to L*L_dagger
else: # "unconstrained"
otherCoeffs = otherParams
elif other_mode == "diag_affine":
if param_mode in ("depol", "reldepol"):
otherParams = paramvals[nHam:].reshape((1 + bsO - 1,))
otherCoeffs = _np.empty((2, bsO - 1), 'd') # leave as real type b/c doesn't have complex entries
if param_mode == "depol":
otherCoeffs[0, :] = otherParams[0]**2
else:
otherCoeffs[0, :] = otherParams[0]
otherCoeffs[1, :] = otherParams[1:]
else:
otherParams = paramvals[nHam:].reshape((2, bsO - 1))
if param_mode == "cptp":
otherCoeffs = otherParams.copy()
otherCoeffs[0, :] = otherParams[0]**2
else: # param_mode == "unconstrained"
#otherCoeffs = _np.empty((2,bsO-1),'complex')
otherCoeffs = otherParams
else: # other_mode == "all"
otherParams = paramvals[nHam:].reshape((bsO - 1, bsO - 1))
if param_mode == "cptp":
# otherParams is an array of length (bs-1)*(bs-1) that
# encodes a lower-triangular matrix "Lmx" via:
# Lmx[i,i] = otherParams[i,i]
# Lmx[i,j] = otherParams[i,j] + 1j*otherParams[j,i] (i > j)
for i in range(bsO - 1):
Lmx[i, i] = otherParams[i, i]
for j in range(i):
Lmx[i, j] = otherParams[i, j] + 1j * otherParams[j, i]
#The matrix of (complex) "other"-coefficients is build by
# assuming Lmx is its Cholesky decomp; means otherCoeffs
# is pos-def.
# NOTE that the Cholesky decomp with all positive real diagonal
# elements is *unique* for a given positive-definite otherCoeffs
# matrix, but we don't care about this uniqueness criteria and so
# the diagonal els of Lmx can be negative and that's fine -
# otherCoeffs will still be posdef.
otherCoeffs = _np.dot(Lmx, Lmx.T.conjugate())
#DEBUG - test for pos-def
#evals = _np.linalg.eigvalsh(otherCoeffs)
#DEBUG_TOL = 1e-16; #print("EVALS DEBUG = ",evals)
#assert(all([ev >= -DEBUG_TOL for ev in evals]))
else: # param_mode == "unconstrained"
#otherParams holds otherCoeff real and imaginary parts directly
otherCoeffs = _np.empty((bsO - 1, bsO - 1), 'complex')
for i in range(bsO - 1):
otherCoeffs[i, i] = otherParams[i, i]
for j in range(i):
otherCoeffs[i, j] = otherParams[i, j] + 1j * otherParams[j, i]
otherCoeffs[j, i] = otherParams[i, j] - 1j * otherParams[j, i]
else:
otherCoeffs = None
return hamCoeffs, otherCoeffs
#TODO: replace two_qubit_gate, one_qubit_gate, unitary_to_pauligate_* with
# calls to this one and unitary_to_processmx
def rotation_gate_mx(r, mxBasis="gm"):
"""
Construct a rotation operation matrix.
Build the operation matrix corresponding to the unitary
`exp(-i * (r[0]/2*PP[0]*sqrt(d) + r[1]/2*PP[1]*sqrt(d) + ...) )`
where `PP' is the array of Pauli-product matrices
obtained via `pp_matrices(d)`, where `d = sqrt(len(r)+1)`.
The division by 2 is for convention, and the sqrt(d) is to
essentially un-normalise the matrices returned by `pp_matrices`
to they are equal to products of the *standard* Pauli matrices.
Parameters
----------
r : tuple
A tuple of coeffiecients, one per non-identity
Pauli-product basis element
mxBasis : {'std', 'gm', 'pp', 'qt'} or Basis object
The source and destination basis, respectively. Allowed
values are Matrix-unit (std), Gell-Mann (gm), Pauli-product (pp),
and Qutrit (qt) (or a custom basis object).
.
Returns
-------
numpy array
a d^2 x d^2 operation matrix in the specified basis.
"""
d = int(round(_np.sqrt(len(r) + 1)))
assert(d**2 == len(r) + 1), "Invalid number of rotation angles"
#get Pauli-product matrices (in std basis)
pp = _bt.basis_matrices('pp', d**2)
assert(len(r) == len(pp[1:]))
#build unitary (in std basis)
ex = _np.zeros((d, d), 'complex')
for rot, pp_mx in zip(r, pp[1:]):
ex += rot / 2.0 * pp_mx * _np.sqrt(d)
U = _spl.expm(-1j * ex)
stdGate = unitary_to_process_mx(U)
ret = _bt.change_basis(stdGate, 'std', mxBasis)
return ret
def project_model(model, targetModel,
projectiontypes=('H', 'S', 'H+S', 'LND'),
genType="logG-logT"):
"""
Construct one or more new models by projecting the error generator of
`model` onto some sub-space then reconstructing.
Parameters
----------
model : Model
The model whose error generator should be projected.
targetModel : Model
The set of target (ideal) gates.
projectiontypes : tuple of {'H','S','H+S','LND','LNDCP'}
Which projections to use. The length of this tuple gives the
number of `Model` objects returned. Allowed values are:
- 'H' = Hamiltonian errors
- 'S' = Stochastic Pauli-channel errors
- 'H+S' = both of the above error types
- 'LND' = errgen projected to a normal (CPTP) Lindbladian
- 'LNDF' = errgen projected to an unrestricted (full) Lindbladian
genType : {"logG-logT", "logTiG"}
The type of error generator to compute. Allowed values are:
- "logG-logT" : errgen = log(gate) - log(target_op)
- "logTiG" : errgen = log( dot(inv(target_op), gate) )
Returns
-------
projected_models : list of Models
Elements are projected versions of `model` corresponding to
the elements of `projectiontypes`.
Nps : list of parameter counts
Integer parameter counts for each model in `projected_models`.
Useful for computing the expected log-likelihood or chi2.
"""
opLabels = list(model.operations.keys()) # operation labels
basis = model.basis
#The projection basis needs to be a basis for density matrices
# (i.e. 2x2 mxs in 1Q case) rather than superoperators (4x4 mxs
# in 1Q case) - whcih is what model.basis is. So, we just extract
# a builtin basis name for the projection basis.
if basis.name in ('pp', 'gm', 'std', 'qt'):
proj_basis_name = basis.name
else:
proj_basis_name = 'pp' # model.basis is weird so just use paulis as projection basis
if basis.name != targetModel.basis.name:
raise ValueError("Basis mismatch between model (%s) and target (%s)!"
% (model.basis.name, targetModel.basis.name))
# Note: set to "full" parameterization so we can set the gates below
# regardless of what parameterization the original model had.
gsDict = {}; NpDict = {}
for p in projectiontypes:
gsDict[p] = model.copy()
gsDict[p].set_all_parameterizations("full")
NpDict[p] = 0
errgens = [error_generator(model.operations[gl],
targetModel.operations[gl],
targetModel.basis, genType)
for gl in opLabels]
for gl, errgen in zip(opLabels, errgens):
if ('H' in projectiontypes) or ('H+S' in projectiontypes):
hamProj, hamGens = std_errgen_projections(
errgen, "hamiltonian", proj_basis_name, basis, True)
#ham_error_gen = _np.einsum('i,ijk', hamProj, hamGens)
ham_error_gen = _np.tensordot(hamProj, hamGens, (0, 0))
ham_error_gen = _bt.change_basis(ham_error_gen, "std", basis)
if ('S' in projectiontypes) or ('H+S' in projectiontypes):
stoProj, stoGens = std_errgen_projections(
errgen, "stochastic", proj_basis_name, basis, True)
#sto_error_gen = _np.einsum('i,ijk', stoProj, stoGens)
sto_error_gen = _np.tensordot(stoProj, stoGens, (0, 0))
sto_error_gen = _bt.change_basis(sto_error_gen, "std", basis)
if ('LND' in projectiontypes) or ('LNDF' in projectiontypes):
HProj, OProj, HGens, OGens = \
lindblad_errgen_projections(
errgen, proj_basis_name, proj_basis_name, basis, normalize=False,
return_generators=True)
#Note: return values *can* be None if an empty/None basis is given
#lnd_error_gen = _np.einsum('i,ijk', HProj, HGens) + \
# _np.einsum('ij,ijkl', OProj, OGens)
lnd_error_gen = _np.tensordot(HProj, HGens, (0, 0)) + \
_np.tensordot(OProj, OGens, ((0, 1), (0, 1)))
lnd_error_gen = _bt.change_basis(lnd_error_gen, "std", basis)
targetOp = targetModel.operations[gl]
if 'H' in projectiontypes:
gsDict['H'].operations[gl] = operation_from_error_generator(
ham_error_gen, targetOp, genType)
NpDict['H'] += len(hamProj)
if 'S' in projectiontypes:
gsDict['S'].operations[gl] = operation_from_error_generator(
sto_error_gen, targetOp, genType)
NpDict['S'] += len(stoProj)
if 'H+S' in projectiontypes:
gsDict['H+S'].operations[gl] = operation_from_error_generator(
ham_error_gen + sto_error_gen, targetOp, genType)
NpDict['H+S'] += len(hamProj) + len(stoProj)
if 'LNDF' in projectiontypes:
gsDict['LNDF'].operations[gl] = operation_from_error_generator(
lnd_error_gen, targetOp, genType)
NpDict['LNDF'] += HProj.size + OProj.size
if 'LND' in projectiontypes:
evals, U = _np.linalg.eig(OProj)
pos_evals = evals.clip(0, 1e100) # clip negative eigenvalues to 0
OProj_cp = _np.dot(U, _np.dot(_np.diag(pos_evals), _np.linalg.inv(U)))
#OProj_cp is now a pos-def matrix
#lnd_error_gen_cp = _np.einsum('i,ijk', HProj, HGens) + \
# _np.einsum('ij,ijkl', OProj_cp, OGens)
lnd_error_gen_cp = _np.tensordot(HProj, HGens, (0, 0)) + \
_np.tensordot(OProj_cp, OGens, ((0, 1), (0, 1)))
lnd_error_gen_cp = _bt.change_basis(lnd_error_gen_cp, "std", basis)
gsDict['LND'].operations[gl] = operation_from_error_generator(
lnd_error_gen_cp, targetOp, genType)
NpDict['LND'] += HProj.size + OProj.size
#Removed attempt to contract H+S to CPTP by removing positive stochastic projections,
# but this doesn't always return the gate to being CPTP (maybe b/c of normalization)...
#sto_error_gen_cp = _np.einsum('i,ijk', stoProj.clip(None,0), stoGens)
# # (only negative stochastic projections OK)
#sto_error_gen_cp = _tools.std_to_pp(sto_error_gen_cp)
#gsHSCP.operations[gl] = _tools.operation_from_error_generator(
# ham_error_gen, targetOp, genType) #+sto_error_gen_cp
#DEBUG!!!
#print("DEBUG: BEST sum neg evals = ",_tools.sum_of_negative_choi_evals(model))
#print("DEBUG: LNDCP sum neg evals = ",_tools.sum_of_negative_choi_evals(gsDict['LND']))
#Check for CPTP where expected
#assert(_tools.sum_of_negative_choi_evals(gsHSCP) < 1e-6)
#assert(_tools.sum_of_negative_choi_evals(gsDict['LND']) < 1e-6)
#Collect and return requrested results:
ret_gs = [gsDict[p] for p in projectiontypes]
ret_Nps = [NpDict[p] for p in projectiontypes]
return ret_gs, ret_Nps
def get_a_best_case_gauge_transform(gate_mx, target_gate_mx, returnAll=False):
"""
Returns a gauge transformation that maps `gate_mx` into a matrix that is
co-diagonal with `target_gate_mx`, i.e. they share a common set of eigenvectors.
Gauge transformations effectively change the basis of all the gates in a model.
From the perspective of a single gate a gauge transformation leaves it's
eigenvalues the same and changes its eigenvectors. This function finds a *real*
transformation that transforms the eigenspaces of `gate_mx` so that there exists
a set of eigenvectors which diagonalize both `gate_mx` and `target_gate_mx`.
Parameters
----------
gate_mx, target_gate_mx : numpy.ndarray
The gate and target-gate matrices.
returnAll : bool, optional
If true, also return the matrices of eigenvectors
for `Ugate` for gate_mx and `Utgt` for target_gate_mx such
that `U = dot(Utgt, inv(Ugate))` is real.
Returns
-------
U : numpy.ndarray
A gauge transformation such that if `epgate = U * gate_mx * U_inv`,
then `epgate` (which has the same eigenalues as `gate_mx`), can be
diagonalized with a set of eigenvectors that also diagonalize
`target_gate_mx`. Furthermore, `U` is real.
Ugate, Utgt : numpy.ndarray
only if `returnAll == True`. See above.
"""
# A complication that must be dealt with is that
# the eigenvalues of `target_gate_mx` can be degenerate,
# and so matching up eigenvalues can't be done *just* based on value.
# Our algorithm consists of two steps:
# 1) match gate & target eigenvalues based on value, ensuring conjugacy
# relationships between eigenvalues are preserved.
# 2) for each eigenvalue/vector of `gate`, project the eigenvector onto
# the eigenspace of `tgt_gate` corresponding to the matched eigenvalue.
# (treat conj-pair eigenvalues of `gate` together).
# we want a matrix that gauge-transforms gate_mx into a matrix as
# close to target_gate_mx as possible, i.e. that puts gate_mx's
# eigenvalues in the eigenspaces of target_gate_mx. This is done
# by Ubest = _np.dot(Utgt, inv(Uop)), but there are often degrees
# of freedom in Uop because of its degeneracies. Also, we want Ubest
# to be *real*, so we need to ensure the conjugacy structure of Utgt
# and Uop match...
assert(_np.linalg.norm(gate_mx.imag) < 1e-8)
assert(_np.linalg.norm(target_gate_mx.imag) < 1e-8)
if True: # NEW approach that gives sorted eigenvectors
def get_eigenspace_pairs(mx, TOL=1e-6):
evals, U = _np.linalg.eig(mx) # so mx = U * evals * Uinv
espace_pairs = {}; conj_pair_indices = []
#Pass 1: real evals and positive-imaginary-element-of-conjugate pair evals
# (these are the representatives of "eigenspace pairs")
for i, ev in enumerate(evals):
if ev.imag < -TOL:
conj_pair_indices.append(i); continue # save for pass2
#see if ev is already in espace_pairs
for k, v in espace_pairs.items():
if abs(k - ev) < TOL:
espace_pairs[k]['indices'].append(i)
espace_pairs[k]['conj_pair_indices'].append(None)
#espace_pairs[k]['evecs'].append(U[:,i])
break
else:
espace_pairs[ev] = {'indices': [i], 'conj_pair_indices': [None]}
#Pass 2: negative-imaginary-part elements of evals that occur in conjugate pairs
for i in conj_pair_indices:
ev_pos = _np.conjugate(evals[i])
for k, v in espace_pairs.items(): # ev_pos *should* be in espace_pairs
if abs(k - ev_pos) < TOL:
#found the correct eigenspace-pair to add this eval & evec to,
# now figure our where to put this index based on conjugacy relationships,
# i.e. U[:,esp['indices'][i]] is always conjugate to U[:,esp['conj_pair_indices'][i]]
for jj, j in enumerate(espace_pairs[k]['indices']):
if espace_pairs[k]['conj_pair_indices'][jj] is None: # an empty slot
espace_pairs[k]['conj_pair_indices'][jj] = i
U[:, i] = U[:, j].conj()
break
else:
raise ValueError("Nowhere to place a conjugate eigenvector %d-dim eigenbasis for %s!"
% (len(espace_pairs[k]['indices']), str(k)))
break
else:
raise ValueError("Expected to find %s as an espace-pair representative in %s"
% (str(ev_pos), str(espace_pairs.keys())))
#if not (_np.allclose(mx, _np.dot(U, _np.dot(_np.diag(evals), _np.linalg.inv(U))))):
# import bpdb; bpdb.set_trace()
return evals, U, espace_pairs
def standard_diag(mx, TOL=1e-6):
evals, U, espairs = get_eigenspace_pairs(mx)
std_evals = []
std_evecs = []
sorted_rep_evals = sorted(list(espairs.keys()), key=lambda x: (x.real, x.imag))
for ev in sorted_rep_evals: # iterate in sorted order just for definitiveness
info = espairs[ev]
dim = len(info['indices']) # dimension of this eigenspace (and it's pair, if there is one)
#Ensure real eigenvalue blocks should have real eigenvectors
if abs(ev.imag) < TOL:
#find linear combinations of the eigenvectors that are real
Usub = U[:, info['indices']]
if _np.linalg.norm(Usub.imag) > TOL:
# Im part of Usub * combo = Usub.real*combo.imag + Usub.imag*combo.real
combo_real_imag = _mt.nullspace(_np.concatenate((Usub.imag, Usub.real), axis=1))
combos = combo_real_imag[0:dim, :] + 1j * combo_real_imag[dim:, :]
if combos.shape[1] != dim:
raise ValueError(("Can only find %d (< %d) *real* linear combinations of"
" vectors in eigenspace for %s!") % (combos.shape[1], dim, str(ev)))
U[:, info['indices']] = _np.dot(Usub, combos)
assert(_np.linalg.norm(U[:, info['indices']].imag) < TOL)
#Add real eigenvalues and vectors
std_evals.extend([ev] * dim)
std_evecs.extend([U[:, i] for i in info['indices']])
else: # complex eigenvalue case - should have conjugate pair info
#Ensure blocks for conjugate-pairs of eigenvalues follow one after another and
# corresponding eigenvectors (e.g. the first of each block) are conjugate pairs
# (this is already done in the eigenspace construction)
assert(len(info['conj_pair_indices']) == dim)
std_evals.extend([ev] * dim)
std_evals.extend([_np.conjugate(ev)] * dim)
std_evecs.extend([U[:, i] for i in info['indices']])
std_evecs.extend([U[:, i] for i in info['conj_pair_indices']])
return _np.array(std_evals), _np.array(std_evecs).T
#Create "gate_tilde" which has the eigenvectors of gate_mx around the matched eigenvalues of target_gate_mx
# Doing this essentially decouples the problem of eigenvalue matching from the rest of the task -
# after gate_tilde is created, it and target_gate_mx have exactly the *same* eigenvalues.
evals_tgt, Utgt = _np.linalg.eig(target_gate_mx)
evals_gate, Uop = _np.linalg.eig(gate_mx)
pairs = _mt.minweight_match_realmxeigs(evals_gate, evals_tgt)
replace_evals = _np.array([evals_tgt[j] for _, j in pairs])
gate_tilde = _np.dot(Uop, _np.dot(_np.diag(replace_evals), _np.linalg.inv(Uop)))
#Create "standard diagonalizations" of gate_tilde and target_gate_mx, which give
# sort the eigenvalues and ensure eigenvectors occur in *corresponding* conjugate pairs
# (e.g. even when evals +1j and -1j have multiplicity 4, the first 4-D eigenspace, the
evals_tgt, Utgt = standard_diag(target_gate_mx)
evals_tilde, Uop = standard_diag(gate_tilde)
assert(_np.allclose(evals_tgt, evals_tilde))
#Update Utgt so that Utgt * inv_Uop is close to the identity
kite = _mt.get_kite(evals_tgt) # evals are grouped by standard_diag, so this works
D_prior_to_proj = _np.dot(_np.linalg.inv(Utgt), Uop)
#print("D prior to projection to ",kite," kite:"); _mt.print_mx(D_prior_to_proj)
D = _mt.project_onto_kite(D_prior_to_proj, kite)
start = 0
for i, k in enumerate(kite):
slc = slice(start, start + k)
dstart = start + k
for kk in kite[i + 1:]:
if k == kk and _np.isclose(evals_tgt[start], evals_tgt[dstart].conj()): # conjugate block!
dslc = slice(dstart, dstart + kk)
# enforce block conjugacy needed to retain Uproj conjugacy structure
D[dslc, dslc] = D[slc, slc].conj()
break
dstart += kk
start += k
Utgt = _np.dot(Utgt, D) # update Utgt
Utrans = _np.dot(Utgt, _np.linalg.inv(Uop))
assert(_np.linalg.norm(_np.imag(Utrans)) < 1e-7)
Utrans = Utrans.real # _np.real_if_close(Utrans, tol=1000)
if returnAll:
return Utrans, Uop, Utgt, evals_tgt
else:
return Utrans
evals_tgt, Utgt = _np.linalg.eig(target_gate_mx)
evals_gate, Uop = | _np.linalg.eig(gate_mx) | numpy.linalg.eig |
from gym_kuka_mujoco.utils.kinematics import forwardKin, inverseKin, identity_quat
from gym_kuka_mujoco.utils.quaternion import mat2Quat
import numpy as np
def hole_insertion_samples(sim, nsamples=10, range=(0, 0.05)):
# The points to be transformed.
pos = np.array([0., 0., 0.])
peg_body_id = sim.model.body_name2id('peg')
tip_site_id = sim.model.site_name2id('peg_tip')
tip_body_pos = sim.model.site_pos[tip_site_id]
# The desired world coordinates
hole_id = sim.model.body_name2id('hole')
world_pos_desired, _ = forwardKin(sim, np.zeros(3), identity_quat, hole_id)
world_pos_delta = | np.zeros((nsamples, 3)) | numpy.zeros |
# 元データと低次元表現それぞれに対し精度を計算する
# X:特徴行列, L: ラベル
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.cluster import KMeans
from munkres import Munkres
import csv
from numpy import savetxt
from pandas import DataFrame
from sklearn.metrics import adjusted_rand_score, adjusted_mutual_info_score
import os
import glob
from matplotlib.backends.backend_pdf import PdfPages
from MantelTest import Mantel
from hub_toolbox.distances import euclidean_distance
from sklearn.model_selection import StratifiedShuffleSplit
import pandas as pd
import numba
from sklearn import neighbors
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.model_selection import KFold
from sklearn.metrics import accuracy_score
from sklearn.neighbors import kneighbors_graph
from sklearn.preprocessing import StandardScaler
def kNN_acc(X, L):
X_train, X_test, Y_train, Y_test = train_test_split(X, L, random_state=0)
knc = KNeighborsClassifier(n_neighbors=1)
knc.fit(X_train, Y_train)
Y_pred = knc.predict(X_test)
score = knc.score(X_test, Y_test)
return score
def kNN_acc_kfold(X, y, n_neighbors=1):
"""
Returns the average 10-fold validation accuracy of a NN classifier trained on the given embeddings
Args:
X (np.array): feature matrix of size n x d
y (np.array): label matrix of size n x 1
n_neighbors (int): number of nearest neighbors to be used for inference
Returns:
score (float): Accuracy of the NN classifier
"""
kf = KFold(n_splits=10)
kf.get_n_splits(X)
scores = []
for train_index, test_index in kf.split(X):
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
clf = neighbors.KNeighborsClassifier(n_neighbors)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
scores.append(accuracy_score(y_test, y_pred))
return np.average(scores)
def visualize(X, L, cmap='Spectral', s=10):
sns.set(context="paper", style="white")
fig, ax = plt.subplots(figsize=(12, 10))
color = L.astype(int)
plt.scatter(
X[:, 0], X[:, 1], c=color, cmap=cmap, s=s
)
plt.setp(ax, xticks=[], yticks=[])
# plt.title("MNIST data embedded into two dimensions by UMAP", fontsize=18)
plt.show()
def save_visualization(X, L, cmap='viridis', s=0.1, dir='./fig_vis/', dataset = 'F-MNIST', hub_org = 'org', i=0):
sns.set(context="paper", style="white")
fig, ax = plt.subplots(figsize=(12, 10))
color = L.astype(int)
plt.scatter(
X[:, 0], X[:, 1], c=color, cmap=cmap, s=s
)
plt.setp(ax, xticks=[], yticks=[])
if hub_org == 'org':
model = 'UMAP'
else:
model = 'HR-UMAP'
# plt.title(dataset + " data by " + model, fontsize=18)
# # pdfファイルの初期化
# pp = PdfPages(dir + dataset + '_' + model + str(i+1) + '.pdf')
#
# # figureをセーブする
# plt.savefig(pp, format='pdf')
#
# # pdfファイルをクローズする。
# pp.close()
plt.savefig(dir + dataset + '_' + model + str(i+1) + '.png')
def kmeans_acc_ari_ami(X, L):
"""
Calculate clustering accuracy. Require scikit-learn installed
# Arguments
y: true labels, numpy.array with shape `(n_samples,)`
y_pred: predicted labels, numpy.array with shape `(n_samples,)`
# Return
accuracy, in [0,1]
"""
n_clusters = len(np.unique(L))
kmeans = KMeans(n_clusters=n_clusters, n_init=20)
y_pred = kmeans.fit_predict(X)
y_pred = y_pred.astype(np.int64)
y_true = L.astype(np.int64)
assert y_pred.size == y_true.size
y_pred = y_pred.reshape((1, -1))
y_true = y_true.reshape((1, -1))
# D = max(y_pred.max(), L.max()) + 1
# w = np.zeros((D, D), dtype=np.int64)
# for i in range(y_pred.size):
# w[y_pred[i], L[i]] += 1
# # from sklearn.utils.linear_assignment_ import linear_assignment
# from scipy.optimize import linear_sum_assignment
# row_ind, col_ind = linear_sum_assignment(w.max() - w)
#
# return sum([w[i, j] for i in row_ind for j in col_ind]) * 1.0 / y_pred.size
if len(np.unique(y_pred)) == len(np.unique(y_true)):
C = len(np.unique(y_true))
cost_m = np.zeros((C, C), dtype=float)
for i in np.arange(0, C):
a = np.where(y_pred == i)
# print(a.shape)
a = a[1]
l = len(a)
for j in np.arange(0, C):
yj = np.ones((1, l)).reshape(1, l)
yj = j * yj
cost_m[i, j] = np.count_nonzero(yj - y_true[0, a])
mk = Munkres()
best_map = mk.compute(cost_m)
(_, h) = y_pred.shape
for i in np.arange(0, h):
c = y_pred[0, i]
v = best_map[c]
v = v[1]
y_pred[0, i] = v
acc = 1 - (np.count_nonzero(y_pred - y_true) / h)
else:
acc = 0
# print(y_pred.shape)
y_pred = y_pred[0]
y_true = y_true[0]
ari, ami = adjusted_rand_score(y_true, y_pred), adjusted_mutual_info_score(y_true, y_pred)
return acc, ari, ami
@numba.jit()
def mantel_test(X, L, embed, describe = True):
sss = StratifiedShuffleSplit(n_splits=50, test_size=1000, random_state=0)
sss.get_n_splits(X, L)
label_type = list(set(L))
r_lst = np.array([])
p_lst = np.array([])
for _, idx in sss.split(X, L):
# print('Index: ', idx)
# X_test = X[idx]
# y_train =
X_high, L_hl = X[idx], L[idx]
X_low = embed[idx]
# print(X_high.shape, L_high.shape)
# print(X_low.shape, L_low.shape)
label_idx = []
for _, i in enumerate(label_type):
l_idx = np.where(L_hl == i)
label_idx.append(l_idx)
# print(label_type)
# label_idx
X_high_lst = []
X_low_lst = []
# for _, i in enumerate(label_type):
# X_high_lst.append(X_high[label_idx[i]])
for i, _ in enumerate(label_type):
centroid = | np.mean(X_high[label_idx[i]], axis=0) | numpy.mean |
# Copyright 2019 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import mock
import itertools
import random
import numpy as np
import pytest
import sympy
import cirq
class PlusGate(cirq.Gate):
"""A qudit gate that increments a qudit state mod its dimension."""
def __init__(self, dimension, increment=1):
self.dimension = dimension
self.increment = increment % dimension
def _qid_shape_(self):
return (self.dimension,)
def _unitary_(self):
inc = (self.increment - 1) % self.dimension + 1
u = np.empty((self.dimension, self.dimension))
u[inc:] = np.eye(self.dimension)[:-inc]
u[:inc] = | np.eye(self.dimension) | numpy.eye |
import numpy as np
import matplotlib
from matplotlib import pyplot as plt
import os
import placentagen as pg
import csv
D0_list = [150,150,172.8,172.8,170.8,135.8,43,230,255.6,301.6,304.1,108.1,85.7,60.7,235.6,156.5,255.4,164.8,100.8,64.9]
Cpass_list = [1.168,1.168,1.416,1.416,1.386,1.067,0.316,1.697,1.417,1.672,1.857,0.843,0.655,0.371,1.802,1.043,1.719,1.141,0.687,0.459]
Cpassdash_list = [7.24,7.24,7.901,7.901,10.568,10.516,11.247,5.298,5.628,5.324,5.226,24.279,36.785,21.035,6.782,8.293,14.354,13.828,12.606,13.431]
Cact_list = [1.108, 1.103, 1.499, 1.858, 1.514, 1.202, 0.392, 3.995, 2.649, 1.395, 3.748, 1.665, 1.024, 0.654,
0.908, 3.491, 1.564, 1.36, 1.131, 0.405]
D0_list_act = [150,172.8,170.8,135.8,43,156.5,255.4,164.8,100.8,64.9]
Cmyo_list = [7.479,8.871,8.462,7.973,24.934,9.018,4.674,7.508,15.977,22.252]
expt_pressure = np.array([10.,30.,50.,70.,90.]) # defined in mmHg
passive_diameter_preg = np.array([76.258, 122.33566667, 145.152, 137.5625, 144.64166667])
passive_se_preg = np.array([10.8693589, 10.23274183, 13.36969036, 11.7338111, 12.88427201])
passive_diameter = np.array([54.11314286, 74.08128571, 88.831, 89.99828571, 86.769])
passive_se = np.array([3.71311161,5.78277879,9.940847,9.98130157,12.93325597])
active_diameter_preg = np.array([92.70733333,113.74933333,121.8715,107.93166667,101.19983333])
active_se_preg = np.array([8.36576993,6.12886374,15.68328409,15.01816237,19.29603708])
active_diameter = np.array([65.587,74.17528571,79.87185714,83.58714286,80.92285714])
active_se = np.array([5.52633482,5.86497481,7.06835057,7.71278033,9.02834107])
num_plot= 101
def main():
## Create a directory to output figures
export_directory = 'output'
if not os.path.exists(export_directory):
os.makedirs(export_directory)
passive_file = 'data/PassiveFits.csv'
active_file = 'data/ActiveFits.csv'
shear_file = 'data/FlowFits.csv'
file = open(passive_file)
passive_data = csv.reader(file)
header = next(passive_data)
rows = []
for row in passive_data:
rows.append(row)
file.close()
D0 = float(rows[0][0])
Cpass = float(rows[1][0])
Cpassdash = float(rows[2][0])
Cpass_preg = float(rows[4][0])
Cpassdash_preg = float(rows[5][0])
D0_preg = float(rows[3][0])
file = open(active_file)
active_data = csv.reader(file)
header = next(active_data)
rows = []
for row in active_data:
rows.append(row)
print(rows)
file.close()
Cact = float(rows[0][0])
Cactdash = float(rows[1][0])
Cactdashdash = float(rows[2][0])
Cmyo = float(rows[3][0])
Cdashdashtone = float(rows[4][0])
Cact_preg = float(rows[5][0])
Cactdash_preg = float(rows[6][0])
Cactdashdash_preg = float(rows[7][0])
Cmyo_preg = float(rows[8][0])
Cdashdashtone_preg = float(rows[9][0])
file = open(shear_file)
shear_data = csv.reader(file)
header = next(shear_data)
rows = []
for row in shear_data:
rows.append(row)
print(rows)
file.close()
Cshear = float(rows[0][0])
Cshear1 = float(rows[1][0])
shear_offset1 = float(rows[2][0])
shear_offset2 = float(rows[3][0])
Cshear_preg = float(rows[4][0])
Cshear1_preg = float(rows[5][0])
shear_offset1_preg = float(rows[6][0])
shear_offset2_preg = float(rows[7][0])
print("Non-pregnant D0 (um) ", D0)
print("Non-pregnant Cpass (N.m)", Cpass/1000.)
print("Non-pregnant Cpassdash (no units)",Cpassdash)
print("Non-pregnant Cact (N.m) ", Cact / 1000.)
print("Non-pregnant Cactdash (no units) ", Cactdash)
print("non-pregnant Cactdashdash (no units)", Cactdashdash)
print("non-pregnant Cmyo (m/N)", Cmyo * 1000.)
print("non-pregnant C'tone (no units)", Cdashdashtone)
print("non-pregnant Cshear (no units)", Cshear)
print("non-pregnant Cshear1 (no units)", Cshear1)
print("non-pregnant tau1 (no units)", shear_offset1)
print("non-pregnant tau2 (no units)", shear_offset2)
print("-------------------------------------")
print("pregnant D0 (um) ", D0_preg)
print("pregnant Cpass (N.m)", Cpass_preg/1000.)
print("pregnant Cpassdash (no units)",Cpassdash_preg)
print("pregnant Cact (N.m) ", Cact_preg / 1000.)
print("pregnant Cactdash (no units) ", Cactdash_preg)
print("pregnant Cactdashdash (no units)", Cactdashdash_preg)
print("pregnant Cmyo (m/N)", Cmyo_preg * 1000.)
print("pregnant C'tone (no units)", Cdashdashtone_preg)
print("pregnant Cshear (no units)", Cshear_preg)
print("pregnant Cshear1 (no units)", Cshear1_preg)
print("pregnant tau1 (no units)", shear_offset1_preg)
print("pregnant tau2 (no units)", shear_offset2_preg)
new_passive_d = np.zeros((num_plot, 1))
new_passive_d_preg = np.zeros((num_plot, 1))
new_active_d = np.zeros((num_plot, 1))
new_active_d_preg = np.zeros((num_plot, 1))
fit_passive_params = [D0, Cpass, Cpassdash]
fit_passive_params_preg = [D0_preg, Cpass_preg, Cpassdash_preg]
dummy_myo_params = [0., 0., 0., 0., 0.]
fit_myo_params = [Cact, Cactdash,Cactdashdash,Cmyo,Cdashdashtone]
fit_myo_params_preg = [Cact_preg, Cactdash_preg, Cactdashdash_preg, Cmyo_preg, Cdashdashtone_preg]
flow_params = [Cshear,Cshear1,shear_offset1,shear_offset2]
flow_params_preg = [Cshear_preg,Cshear1_preg,shear_offset1_preg,shear_offset2_preg]
dummy_flow_params = [0., 0., 0., 0.]
dummy_fixed_flow_params = [0., 0., 0.]
new_pressure = np.linspace(10, 90, num_plot) * 133. / 1000.
for i in range(0, num_plot):
new_passive_d[i] = pg.diameter_from_pressure(fit_passive_params,
dummy_myo_params, dummy_flow_params, dummy_fixed_flow_params, new_pressure[i],
True)
new_passive_d_preg[i] = pg.diameter_from_pressure(fit_passive_params_preg,
dummy_myo_params, dummy_flow_params, dummy_fixed_flow_params, new_pressure[i],
True)
new_active_d[i] = pg.diameter_from_pressure(fit_passive_params,
fit_myo_params, dummy_flow_params, dummy_fixed_flow_params,
new_pressure[i],
True)
new_active_d_preg[i] = pg.diameter_from_pressure(fit_passive_params_preg,
fit_myo_params_preg, dummy_flow_params, dummy_fixed_flow_params,
new_pressure[i],
True)
###############################################
#Plot passive results against experimental data
###############################################
plt.errorbar(expt_pressure, passive_diameter_preg, passive_se_preg, marker='s', ls='--', color='#F01D7F',
label="Experimental data (pregnant)", capsize=5.)
plt.errorbar(expt_pressure, passive_diameter, passive_se, marker='o', ls='--', color='.5',
label="Experimental data (non-pregnant)", capsize=5.)
plt.ylim((0, 250.))
plt.xlim((0., 100.))
plt.plot(np.linspace(10, 90, num_plot), new_passive_d_preg, '#F01D7F', label="Model fit (pregnant)")
plt.plot(np.linspace(10, 90, num_plot), new_passive_d, '0.5', label="Model fit (non-pregnant)")
plt.xlabel('Pressure (mmHg)')
plt.ylabel('Inner diameter ($\mu$m)')
plt.legend()
plt.savefig(export_directory + '/PassiveFitsNonNormalised.png')
plt.close()
plt.errorbar(expt_pressure, passive_diameter_preg / passive_diameter_preg[0], passive_se_preg / passive_diameter_preg[0],
marker='s', ls='--', color='#F01D7F', label="Experimental data (pregnant)", capsize=5.)
plt.errorbar(expt_pressure, passive_diameter / passive_diameter[0],
passive_se / passive_diameter[0], marker='o', ls='--', color='.5',
label="Experimental data (non-pregnant)", capsize=5.)
plt.ylim((0, 2.5))
plt.xlim((0., 100.))
plt.plot(np.linspace(10, 90, num_plot), new_passive_d_preg / passive_diameter_preg[0], '#F01D7F', label="Model fit (pregnant)")
plt.plot(np.linspace(10, 90, num_plot), new_passive_d / passive_diameter[0], '0.5', label="Model fit (non-pregnant)")
plt.xlabel('Pressure (mmHg)')
plt.ylabel('Inner diameter / Diameter at 10mmHg')
plt.legend()
plt.savefig(export_directory + '/PassiveFitsNormalisedTo10mmHg.png')
plt.close()
##################################################################
#Plot active model results against experimental data
#################################################################
plt.errorbar(expt_pressure, passive_diameter_preg, passive_se_preg, marker='s',
markerfacecolor='none', ls='--', color='#F01D7F', label="Passive data (pregnant)", capsize=5.)
plt.errorbar(expt_pressure, passive_diameter, passive_se, marker='o',
markerfacecolor='none', ls='--', color='.5', label="Passive data (non-pregnant)", capsize=5.)
plt.errorbar(expt_pressure, active_diameter_preg, active_se_preg, marker='s', color='#F01D7F',
label="Active data (pregnant)", capsize=5.)
plt.errorbar(expt_pressure, active_diameter, active_se, marker='o', color='.5',
label="Active data (non-pregnant)", capsize=5.)
plt.ylim((0, 250.))
plt.xlim((0., 100.))
plt.xlabel('Pressure (mmHg)')
plt.ylabel('Inner diameter ($\mu$m)')
plt.legend()
plt.savefig(export_directory + '/ExperimentalDataActiveNoFlow.png')
plt.close()
plt.ylim((0, 250.))
plt.xlim((0., 100.))
plt.xlabel('Pressure (mmHg)')
plt.ylabel('Inner diameter ($\mu$m)')
plt.plot(np.linspace(10, 90, num_plot), new_passive_d_preg, color='#F01D7F', linestyle='--',
label="Passive model fit (pregnant)")
plt.plot(np.linspace(10, 90, num_plot), new_passive_d, color='0.5', linestyle='--',
label="Passive model fit (non-pregnant)")
plt.plot(np.linspace(10, 90, num_plot), new_active_d_preg, color='#F01D7F', label="Active model fit (pregnant)")
plt.plot(np.linspace(10, 90, num_plot), new_active_d, color='0.5', label="Active model fit (non-pregnant)")
plt.errorbar(expt_pressure, active_diameter_preg, active_se_preg, marker='s', ls=':', color='#F01D7F',
label="Experimental data (pregnant)", capsize=5.)
plt.errorbar(expt_pressure, active_diameter, active_se, marker='o', ls=':', color='0.5',
label="Experimental data (non-pregnant)", capsize=5.)
plt.savefig(export_directory + '/ActiveNoFlowFits.png')
plt.close()
##########################################################
#Plot Cpass Comparisons
############################################################
x = np.linspace(30, 320, 3)
y = 0.0059 * x + 0.1892
plt.plot(D0_list, Cpass_list, color='k', marker='x', linestyle='None', label="Carlson and Secombe (2005)")
plt.plot(x, y, color='k', ls=':', label='Fit to Carlson and Secombe data')
plt.plot(D0, Cpass/1000., marker='o', color='#F01D7F', linestyle='None', label='Rat radial (non-pregnant)')
plt.plot(D0_preg, Cpass_preg/1000., marker='s', color='#F01D7F', linestyle='None', label='Rat radial (pregnant)')
plt.annotate("", xy=(0.975*D0_preg,0.975*Cpass_preg/1000.), xytext=(D0, Cpass/1000.), arrowprops=dict(headwidth=5, headlength=5, width=0.1,color='#F01D7F'))
matplotlib.pyplot.text(48., 0.75, "R=0.94", fontsize=12)
plt.xlabel('D$_0$ ($\mu$m)', fontsize=16)
plt.ylabel('C$_{pass}$ (N/m)', fontsize=16)
plt.legend()
plt.savefig(export_directory + '/CpassComparison.png')
plt.close()
##########################################################
#Plot Cpass Comparisons
############################################################
x = | np.linspace(30, 320, 3) | numpy.linspace |
"""Luminosity Function Constructor and Modeller
This script allows the user to construct and model Galaxian Luminosity Functions using the 1/Vmax estimator and Schechter function.
Rest-frame magnitudes and spatial variance on the counts can be obtained.
Plotting function for easy visualisation are included.
This file can also be imported as a module and contains the following
functions:
* get_maggy - converts magnitudes into maggies
* get_maggy_inv_var - returns inverse variances on maggies
* get_obs_maggies_file - saves file of calculated maggies and inverse variances
* get_rec_maggies_files - saves file of reconstructed maggies at input redshift
* get_rest_maggy_ratio_file - saves file of calculated rest-frame maggy ratios
* get_rest_mag - converts apparent magnitudes into rest-frame magnitudes
* get_maggy_ratio_file - saves file of calculated reconstructed maggy ratios
* get_all_maggy_ratios_file - consolidates files of calculated maggy ratios
* get_volume - returns comoving volume of input survey area and redshift
* get_binned_phi - bins and weights galaxy counts per magnitude by 1/Vmax
* get_patch_centers - saves file of centers of equal patches over survey area
* get_patch_labels - divides survey into equal patches
* get_binned_phi_error - returns spatial variance of the luminosity function
* get_plot - plots magnitude-binned and 1/Vmax weighted luminosity function
* filter_plot_by_colour - plots luminosity functions filtered by galaxy colour
* SchechterMagModel - single Schechter function in terms of magnitude
* DoubleSchechterMagModel - double Schechter function in terms of magnitude
* get_gof - returns reduced chi squared estimate of goodness of fit
* get_schechter_phi - best fits single Schechter function on data
* get_double_schechter_phi - best fits double Schechter function on data
"""
# -----------------------
# Package Imports
# -----------------------
# import kcorrect
import numpy as np
from typing import Tuple
import matplotlib
import matplotlib.cm as cm
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
import kmeans_radec
from kmeans_radec import KMeans, kmeans_sample
from astropy.io import ascii
from astropy.cosmology import FlatLambdaCDM
import astropy.units as u
cosmo = FlatLambdaCDM(H0=70 * u.km / u.s / u.Mpc, Tcmb0=2.725 * u.K, Om0=0.3)
# -----------------------
# Methods
# -----------------------
def get_maggy(app_mag_list: np.ndarray) -> np.ndarray:
"""
Converts magnitudes into maggies.
Parameters
----------
app_mag_list : np.ndarray
apparent magnitude of each data point (galaxy)
Returns
-------
np.ndarray
all corresponding maggy values
"""
maggies_list = 10**(app_mag_list / (-2.5))
return maggies_list
def get_maggy_inv_var(maggies_list: np.ndarray,
app_mag_err_list: np.ndarray) -> np.ndarray:
"""
Returns inverse variances on maggies using maggies and magnitude errors.
Parameters
----------
maggies_list : np.ndarray
maggy value of each data point (galaxy)
app_mag_err_list : np.ndarray
all corresponding errors on apparent magnitude values
Returns
-------
np.ndarray
all correspoding maggy inverse variance values
"""
inv_var_list = (0.4 * np.log(10) * maggies_list * app_mag_err_list)**(-2)
return inv_var_list
def get_obs_maggies_file(obs_maggies_outfile_name: str,
bands: str,
redshift_list: np.ndarray,
u_app_mag_list: np.ndarray,
g_app_mag_list: np.ndarray,
r_app_mag_list: np.ndarray,
i_app_mag_list: np.ndarray,
Z_app_mag_list: np.ndarray,
Y_app_mag_list=np.empty(0),
J_app_mag_list=np.empty(0),
H_app_mag_list=np.empty(0),
Ks_app_mag_list=np.empty(0),
u_app_mag_err_list=np.empty(0),
g_app_mag_err_list=np.empty(0),
r_app_mag_err_list=np.empty(0),
i_app_mag_err_list=np.empty(0),
Z_app_mag_err_list=np.empty(0),
Y_app_mag_err_list=np.empty(0),
J_app_mag_err_list=np.empty(0),
H_app_mag_err_list=np.empty(0),
Ks_app_mag_err_list=np.empty(0)):
'''
Calculates maggy and inverse variance values from apparent magnitude and their error values
and saves the values in a space delimited csv file with columns (without headers):
redshift u_maggy g_maggy r_maggy... u_inv_var g_inv_var r_inv_var...
File is required to be used with the get_rec_maggies_files function
or other kcorrect_python functions that best-fit SED coefficients.
WARNING: pre-existing file with same name will be over-written.
Parameters
----------
obs_maggies_outfile_name : str
name/path of file with '.csv' extention to save maggies and respective inverse variance values in
bands : str
'ugriz' or 'ugriZYJHKs' - refer source code if using other bands
redshift_list : np.ndarray
redshift of each data point (galaxy)
u_app_mag_list : np.ndarray
all corresponding apparent magnitudes in u-band
g_app_mag_list : np.ndarray
all corresponding apparent magnitudes in g-band
r_app_mag_list : np.ndarray
all corresponding apparent magnitudes in r-band
i_app_mag_list : np.ndarray
all corresponding apparent magnitudes in i-band
Z_app_mag_list : np.ndarray
all corresponding apparent magnitudes in Z-band
Y_app_mag_list : np.ndarray
all corresponding apparent magnitudes in Y-band
J_app_mag_list : np.ndarray
all corresponding apparent magnitudes in J-band
H_app_mag_list : np.ndarray
all corresponding apparent magnitudes in H-band
Ks_app_mag_list : np.ndarray
all corresponding apparent magnitudes in Ks-band
u_app_mag_err_list : np.ndarray
all corresponding errors on apparent magnitudes in u-band
g_app_mag_err_list : np.ndarray
all corresponding errors on apparent magnitudes in g-band
r_app_mag_err_list : np.ndarray
all corresponding errors on apparent magnitudes in r-band
i_app_mag_err_list : np.ndarray
all corresponding errors on apparent magnitudes in i-band
Z_app_mag_err_list : np.ndarray
all corresponding errors on apparent magnitudes in Z-band
Y_app_mag_err_list : np.ndarray
all corresponding errors on apparent magnitudes in Y-band
J_app_mag_err_list : np.ndarray
all corresponding errors on apparent magnitudes in J-band
H_app_mag_err_list : np.ndarray
all corresponding errors on apparent magnitudes in H-band
Ks_app_mag_err_list : np.ndarray
all corresponding errors on apparent magnitudes in Ks-band
'''
if bands == 'ugriz':
maggy_inv_var_table = np.column_stack(
(redshift_list, get_maggy(u_app_mag_list),
get_maggy(g_app_mag_list), get_maggy(r_app_mag_list),
get_maggy(i_app_mag_list), get_maggy(Z_app_mag_list),
get_maggy_inv_var(get_maggy(u_app_mag_list), u_app_mag_err_list),
get_maggy_inv_var(get_maggy(g_app_mag_list), g_app_mag_err_list),
get_maggy_inv_var(get_maggy(r_app_mag_list), r_app_mag_err_list),
get_maggy_inv_var(get_maggy(i_app_mag_list), i_app_mag_err_list),
get_maggy_inv_var(get_maggy(Z_app_mag_list), Z_app_mag_err_list)))
ascii.write(maggy_inv_var_table,
obs_maggies_outfile_name,
overwrite=True,
format='no_header',
names=[
'redshift', 'u_maggy', 'g_maggy', 'r_maggy', 'i_maggy',
'z_maggy', 'u_inv_var', 'g_inv_var', 'r_inv_var',
'i_inv_var', 'z_inv_var'
])
print(
'\tRedshifts, and ' + bands +
' maggies and their inverse variances calculated, stacked and saved in '
+ obs_maggies_outfile_name + '.')
elif bands == 'ugriZYJHKs':
maggy_inv_var_table = np.column_stack(
(redshift_list, get_maggy(u_app_mag_list),
get_maggy(g_app_mag_list), get_maggy(r_app_mag_list),
get_maggy(i_app_mag_list), get_maggy(Z_app_mag_list),
get_maggy(Y_app_mag_list), get_maggy(J_app_mag_list),
get_maggy(H_app_mag_list), get_maggy(Ks_app_mag_list),
get_maggy_inv_var(get_maggy(u_app_mag_list), u_app_mag_err_list),
get_maggy_inv_var(get_maggy(g_app_mag_list), g_app_mag_err_list),
get_maggy_inv_var(get_maggy(r_app_mag_list), r_app_mag_err_list),
get_maggy_inv_var(get_maggy(i_app_mag_list), i_app_mag_err_list),
get_maggy_inv_var(get_maggy(Z_app_mag_list), Z_app_mag_err_list),
get_maggy_inv_var(get_maggy(Y_app_mag_list), Y_app_mag_err_list),
get_maggy_inv_var(get_maggy(J_app_mag_list), J_app_mag_err_list),
get_maggy_inv_var(get_maggy(H_app_mag_list), H_app_mag_err_list),
get_maggy_inv_var(get_maggy(Ks_app_mag_list), Ks_app_mag_err_list)))
ascii.write(maggy_inv_var_table,
obs_maggies_outfile_name,
overwrite=True,
format='no_header',
names=[
'redshift', 'u_maggy', 'g_maggy', 'r_maggy', 'i_maggy',
'Z_maggy', 'Y_maggy', 'J_maggy', 'H_maggy', 'Ks_maggy',
'u_inv_var', 'g_inv_var', 'r_inv_var', 'i_inv_var',
'Z_inv_var', 'Y_inv_var', 'J_inv_var', 'H_inv_var',
'Ks_inv_var'
])
print(
'\tRedshifts, and ' + bands +
' maggies and their inverse variances calculated, stacked and saved in '
+ obs_maggies_outfile_name + '.')
else:
print('\tOnly valid for bands ugriz or ugriZYJHKs.')
print(
'\tCheck the source code for basic structure of this function that creates the required file if using other bands.'
)
# def get_rec_maggies_files(obs_maggies_file_path: str,
# n_bands: int,
# rec_z_list: np.ndarray,
# rec_maggies_outfile_affix='',
# survey='sdss',
# band_z_shift=0.0,
# template_vmatrix_file_path='vmatrix.default.dat',
# template_lambda_file_path='lambda.default.dat',
# filters_list_file_path='sdss_filters.dat'):
# '''
# Reconstructs the observed maggy values at required redshift values
# by best-fitting galaxy SEDs on data using templates and filter transmission curves,
# and saves the reconstructed maggy values in a space delimited csv file with columns (without headers):
# redshift rec_u_maggy rec_g_maggy rec_r_maggy...
# File is required to be used with the get_maggy_ratio_file or get_rest_maggy_ratio_file functions.
# WARNING: pre-existing file with same name will be over-written.
# Parameters
# ----------
# obs_maggies_file_path : str
# path of '.csv' file with the observed maggies and respective inverse variance values. File can be obtained from the get_obs_maggies_file function
# n_bands : int
# number of bands used in the survey (and present in the obs_maggies_file)
# rec_z_list : np.ndarray
# redshift values required to reconstruct maggies at
# rec_maggies_outfile_affix : str
# output file identifier - reconstructed maggies will be saved in 'maggies_at_z[redshift-value]_[identifier].csv'
# survey : str
# name of survey being used. Set as 'sdss' by default - do not change if sdss-ugriz are being used
# band_z_shift : float
# redshift value to shift the bandpasses/filters by, default is set at 0.0 i.e. no shift
# template_vmatrix_file_path : str
# path of '.dat' file with vmatrix of SED templates - must change if survey parameter is not 'sdss'
# template_lambda_file_path : str
# path of '.dat' file with lambda of SED templates - must change if survey parameter is not 'sdss'
# filters_list_file_path : str
# path of '.dat' file with the list of '.dat' files corresponding to each band and containing its filter transmission curve - must change if survey parameter is not 'sdss'
# '''
# if survey == 'sdss':
# kcorrect.load_templates()
# print('\tTemplates loaded.')
# kcorrect.load_filters(band_shift=band_z_shift)
# print('\tFilters loaded.')
# else:
# kcorrect.load_templates(v=template_vmatrix_file_path,
# l=template_lambda_file_path)
# print('\tTemplates loaded.')
# kcorrect.load_filters(filters_list_file_path, band_shift=band_z_shift)
# print('\tFilters loaded.')
# maggy_inv_var_table = np.genfromtxt(obs_maggies_file_path, delimiter=' ')
# print('\tRead ' + obs_maggies_file_path + '.')
# for rec_z in rec_z_list:
# rec_maggies_outfile_name = 'maggies_at_z' + str(rec_z) + '_' + rec_maggies_outfile_affix + '.csv'
# rec_maggies_stack = []
# for i in range(len(maggy_inv_var_table[:, 0])):
# redshift = maggy_inv_var_table[i, 0]
# maggies = maggy_inv_var_table[i, 1:(n_bands + 1)]
# maggies_inv_var = maggy_inv_var_table[i, (n_bands + 1):((2 * n_bands) + 1)]
# coeffs = kcorrect.fit_nonneg(redshift, maggies, maggies_inv_var)
# rec_maggies_row = kcorrect.reconstruct_maggies(coeffs, redshift=rec_z)
# rec_maggies_stack.append(rec_maggies_row)
# rec_maggies_table = np.array(rec_maggies_stack)
# ascii.write(rec_maggies_table,
# rec_maggies_outfile_name,
# overwrite=True,
# format='no_header')
# print('\t' + rec_maggies_outfile_name + ' saved.')
# print('\tMaggies reconstructed at all redshifts in input array rec_z_list.')
def get_rest_maggy_ratio_file(ID_list: np.ndarray,
obs_maggies_file_path: str,
rest_maggies_file_path: str,
band_index: int,
rest_maggy_ratio_outfile_affix=''):
'''
Calculates rest-frame maggy ratios i.e. (obs_maggy/rest_maggy),
and saves the maggy ratio values in a csv file with 3 space delimited columns, of headers:
ID rest_z maggy_ratio
File can be unpacked and used with get_rest_mag function to calculate rest-frame magnitudes.
WARNING: pre-existing file with same name will be over-written.
Parameters
----------
ID_list: np.ndarray
ID of each data point (galaxy)
obs_maggies_file_path : str
path of '.csv' file with the observed maggies and respective inverse variance values. File can be obtained from the get_obs_maggies_file function
rest_maggies_file_path : str
path of '.csv' file with the reconstructed maggies at redshift zero. File can be obtained from the get_rec_maggies_files function by setting rec_z_list to np.array([0.0])
band_index : int
band number of required maggy ratio (e.g. 3 for r maggy in ugriz bands)
rest_maggy_ratio_outfile_affix : str
output file identifier - rest-frame maggy ratios will be saved in 'rest_maggy_ratios_[identifier].csv'
'''
obs_maggies_table = np.genfromtxt(obs_maggies_file_path, delimiter=' ')
rest_maggies_table = np.genfromtxt(rest_maggies_file_path, delimiter=' ')
rest_z_list = rest_maggies_table[:, 0]
obs_maggies_list = obs_maggies_table[:, band_index]
rest_maggies_list = rest_maggies_table[:, band_index]
rest_maggy_ratios_list = obs_maggies_list / rest_maggies_list
rest_maggy_ratio_outfile_name = 'rest_maggy_ratios_' + rest_maggy_ratio_outfile_affix + '.csv'
rest_maggy_ratios_table = np.column_stack(
(ID_list, rest_z_list, rest_maggy_ratios_list))
ascii.write(rest_maggy_ratios_table,
rest_maggy_ratio_outfile_name,
overwrite=True,
names=['ID', 'rest_z', 'maggy_ratio'])
print('\t' + rest_maggy_ratio_outfile_name + ' created.')
def get_rest_mag(redshift_list: np.ndarray,
app_mag_list: np.ndarray,
maggy_ratio_list: np.ndarray) -> np.ndarray:
"""
Converts apparent magnitudes into rest-frame magnitudes.
It uses the apparent magnitudes, redshifts and maggy ratios.
Parameters
----------
redshift_list : np.ndarray
redshift of each data point (galaxy)
app_mag_list : np.ndarray
all corresponding apparent magnitudes
maggy_ratio_list : np.ndarray
all corresponding maggy ratios
Returns
-------
np.ndarray
all corresponding rest-frame magnitudes
"""
# calculate luminosity distance
lum_dist_list = cosmo.luminosity_distance(redshift_list).value
print('\tLuminosity distance calculated.')
# calculate abs mag
abs_mag_list = app_mag_list - (5 * np.log10(lum_dist_list)) - 25
print('\tAbsolute magnitude calculated.')
# calculate K corrections
Kcorr_list = -2.5 * np.log10(maggy_ratio_list)
print('\tK-corrections calculated.')
# calculate rest mag
rest_mag_list = abs_mag_list - Kcorr_list
print('\tRest-frame magnitude calculated.')
return rest_mag_list
def get_maggy_ratio_file(ID_list: np.ndarray,
rest_maggies_file_path: str,
rec_maggies_file_path: str,
rec_z: float,
band_index: int,
maggy_ratio_outfile_affix=''):
'''
Calculates reconstructed maggy ratios i.e. (rec_maggy/rest_maggy),
and saves the maggy ratio values in a csv file with 3 space delimited columns, of headers:
ID rec_z maggy_ratio
WARNING: pre-existing file with same name will be over-written.
Parameters
----------
ID_list: np.ndarray
ID of each data point (galaxy)
rest_maggies_file_path : str
path of '.csv' file with the reconstructed maggies at redshift zero. File can be obtained from the get_rec_maggies_files function by setting rec_z_list to np.array([0.0])
rec_maggies_file_path : str
path of '.csv' file with the reconstructed maggies at required reconstruction redshift (rec_z). File can be obtained from the get_rec_maggies_files function by setting rec_z_list to np.array([rec_z])
rec_z : float
redshift value where maggies have been reconstruct at
band_index : int
band number of required maggy ratio (e.g. 3 for r maggy in ugriz bands)
rest_maggy_ratio_outfile_affix : str
output file identifier - maggy ratios will be saved in 'maggy_ratios_at_z[redshift-value]_[identifier].csv'
'''
rec_maggies_table = np.genfromtxt(rec_maggies_file_path, delimiter=' ')
rest_maggies_table = np.genfromtxt(rest_maggies_file_path, delimiter=' ')
rec_z_list = rec_maggies_table[:, 0]
rec_maggies_list = rec_maggies_table[:, band_index]
rest_maggies_list = rest_maggies_table[:, band_index]
maggy_ratios_list = rec_maggies_list / rest_maggies_list
maggy_ratio_outfile_name = 'maggy_ratios_at_z' + str(rec_z) + '_' + maggy_ratio_outfile_affix + '.csv'
maggy_ratios_table = np.column_stack(
(ID_list, rec_z_list, maggy_ratios_list))
ascii.write(maggy_ratios_table,
maggy_ratio_outfile_name,
overwrite=True,
names=['ID', 'rec_z', 'maggy_ratio'])
print('\t' + maggy_ratio_outfile_name + ' saved.')
def get_all_maggy_ratios_file(rec_z_list: np.ndarray,
ID_list: np.ndarray,
band_index: int,
maggies_and_out_files_affix=''):
'''
Calculates reconstructed maggy ratios i.e. (rec_maggy/rest_maggy)
and saves the maggy ratio values at each redshift value in rec_z_list
in a separate csv file with 3 space delimited columns, of headers:
ID rec_z maggy_ratio
Finally, consolidates all maggy ratios by joining the above files in the order of rec_z_list
in a single csv file with 3 space delimited columns, of headers:
ID rec_z maggy_ratio
File with all maggy ratios can be used to calculate z-max.
WARNING: pre-existing file with same name will be over-written.
Parameters
----------
rec_z_list : np.ndarray
redshift values where maggies have been reconstruct at - array must have 0.0 redshift value at index 0
ID_list : np.ndarray
ID of each data point (galaxy)
band_index : int
band number of required maggy ratio (e.g. 3 for r maggy in ugriz bands)
maggies_and_out_files_affix : str
output file identifier - values will be saved in 'maggy_ratios_at_z[redshift-value]_[identifier].csv' and 'all_maggy_ratios_[identifier].csv' - must be the same string as rec_maggies_outfile_affix parameter used in get_rec_maggies_files function
'''
rest_maggies_file_name = 'maggies_at_z' + str(rec_z_list[0]) + '_' + maggies_and_out_files_affix + '.csv'
for rec_z in rec_z_list:
rec_maggies_file_name = 'maggies_at_z' + str(rec_z) + '_' + maggies_and_out_files_affix + '.csv'
get_maggy_ratio_file(ID_list,
rest_maggies_file_name,
rec_maggies_file_name,
rec_z,
band_index,
maggy_ratio_outfile_affix=maggies_and_out_files_affix)
print('\tMaggy ratios calculated at all redshifts in input array rec_z_list.')
all_maggy_ratios_outfile_name = 'all_maggy_ratios_' + maggies_and_out_files_affix + '.csv'
rest_maggy_ratio_file_name = 'maggy_ratios_at_z' + str(rec_z_list[0]) + '_' + maggies_and_out_files_affix + '.csv'
all_maggy_ratios_file = open(all_maggy_ratios_outfile_name, 'w')
# first file:
for line in open(rest_maggy_ratio_file_name):
all_maggy_ratios_file.write(line)
# now the rest:
for i in range(len(rec_z_list) - 1):
maggy_ratio_file_name = 'maggy_ratios_at_z' + str(rec_z_list[i + 1]) + '_' + maggies_and_out_files_affix + '.csv'
maggy_ratio_file = open(maggy_ratio_file_name)
maggy_ratio_lines = maggy_ratio_file.readlines()[1:] # skip the header
for line in maggy_ratio_lines:
all_maggy_ratios_file.write(line)
maggy_ratio_file.close()
all_maggy_ratios_file.close()
print('\tAll maggy ratios consolidated in file ' + all_maggy_ratios_outfile_name + '.')
def get_volume(survey_area: float,
redshift_list: np.ndarray) -> np.ndarray:
"""
Returns comoving volume of input survey area and redshift.
Parameters
----------
survey_area : float
survey area in sq. deg.
redshift_list : np.ndarray
redshift of each data point (galaxy)
Returns
-------
np.ndarray
all corresponding comoving volumes
"""
# calculate comoving distance
com_dist_list = cosmo.comoving_distance(redshift_list).value
print('\tComoving distance calculated.')
# convert survey area to steradian
survey_steradian = survey_area * ((np.pi / 180.)**2)
print('\tSurvey area converted.')
# calculate comoving volume
vol_list = (com_dist_list**3) * (survey_steradian / 3)
print('\tComoving volume calculated.')
return vol_list
def get_binned_phi(rest_mag_list: np.ndarray,
Vmax_list: np.ndarray,
n_mag_bins: int) -> np.ndarray:
"""
Bins and weighs galaxy counts per magnitude implementing the 1/Vmax estimator.
Returns phi using rest-frame magnitude, maximum observed volume and the number of bins.
Parameters
----------
rest_mag_list : np.ndarray
rest-frame magnitude of each data point (galaxy)
Vmax_list : np.ndarray
all corresponding maximum volumes
n_mag_bins: int
number of magnitude bins required
Returns
-------
np.ndarray
mid-magnitude (i.e. x) value of each bin
np.ndarray
magnitude-width/2 (i.e. x-error) value of each bin
np.ndarray
phi (i.e. y) value of each bin (with h = 0.7)
"""
# get bin_edges for diving the rest_mags in n_bins
counts, bin_edges = np.histogram(rest_mag_list, bins=n_mag_bins)
# sort rest_mag and Vmax lists per increasing mag
sorted_index = np.argsort(rest_mag_list)
sorted_Vmax_list = np.array(Vmax_list)[sorted_index]
sorted_rest_mag_list = np.sort(rest_mag_list)
# create empty lists for mid_M, phi and M_err
mid_M_list = np.empty(n_mag_bins)
M_err_list = np.empty(n_mag_bins)
phi_list = np.empty(n_mag_bins)
# loop over each bin
for i in range(n_mag_bins):
# find min and max M of bin
max_M = bin_edges[i + 1]
min_M = bin_edges[i]
# add mid_M to list
mid_M_list[i] = (min_M + max_M) / 2
# add M_err to list
M_err_list[i] = (abs(min_M) - abs(max_M)) / 2
# find indicies upto the max_M
up_lim_indices = np.where(sorted_rest_mag_list <= max_M)[0]
# limit M and Vmax corresponding to max_M
up_lim_rest_mag_list = sorted_rest_mag_list[up_lim_indices]
up_lim_Vmax_list = sorted_Vmax_list[up_lim_indices]
# find indicies from min_M to max_M value of bin
if i != 0:
lim_indices = np.where(up_lim_rest_mag_list > min_M)[0]
else:
lim_indices = np.where(up_lim_rest_mag_list >= min_M)[0]
# limit Vmax corresponding from min_M to max_M
Vmax_values = up_lim_Vmax_list[lim_indices]
# calculate 1/Vmax
phi_values = np.reciprocal(Vmax_values)
# sum 1/Vmax all in this bin
phi = sum(phi_values)
# convert 1/Vmax to phi and add to list
h = 0.7
phi_list[i] = phi * ((h)**3) / M_err_list[i]
return mid_M_list, M_err_list, phi_list
def get_patch_centers(uniform_random_RA_list: np.ndarray,
uniform_random_DEC_list: np.ndarray,
n_patches: int,
survey='kids',
max_iterations=int(100),
tolerance=1.0e-5,
patch_centers_outfile_affix=''):
"""
Divides the input uniform random survey into equally distributed and equally sized patches.
Calculates n_patches centers [RA,Dec] from RA, Dec and number of patches and saves in a csv file
with 2 space delimited columns (without headers):
RA Dec
Function does not overwrite any existing file with the same name. File need not be updated with every run.
Parameters
----------
uniform_random_RA_list : np.ndarray
RA values of each data point (galaxy) in a uniform random catalogue
uniform_random_DEC_list : np.ndarray
all corresponding Dec values in the uniform random catalogue
n_patches : int
number of equal survey area patches required
survey : str, optional
survey name - only change if survey area covers/connects over 320 degrees RA and does not connect over 360 to 0 degrees RA
max_iterations : int, optional
maximum number of iterations to run
tolerance : float, optional
relative change in the average distance to centers, signifies convergence
patch_centers_outfile_affix : str
output file identifier - values will be saved in 'patch_centers_tol[tolerance]_[identifier].csv'
"""
# MAKE SURE ALL PATCHES ARE SITCHED ON SKY
# works for most surveys - GAMA, KiDS - check rest
if survey == 'kids':
corrected_uniform_random_RA_list = np.where(
uniform_random_RA_list > 320., uniform_random_RA_list - 360.,
uniform_random_RA_list)
# use if a survey patch covers/connects over 320 degrees RA
# and does not connect over 360 to 0 degree RA
if survey != 'kids':
corrected_uniform_random_RA_list = uniform_random_RA_list
# STACK RA AND DEC AS uniform_random_X
uniform_random_X = np.column_stack(
(corrected_uniform_random_RA_list, uniform_random_DEC_list))
# DIVIDE uniform_random_X INTO EQUAL n_patches
uniform_random_km = kmeans_sample(uniform_random_X,
n_patches,
maxiter=max_iterations,
tol=tolerance)
center_guesses = uniform_random_km.centers
ra_guesses = center_guesses[:, 0]
dec_guesses = center_guesses[:, 1]
centers_table = np.column_stack((ra_guesses, dec_guesses))
patch_centers_outfile_name = 'patch_centers_tol' + str(tolerance) + '_' + patch_centers_outfile_affix + '.csv'
ascii.write(centers_table,
patch_centers_outfile_name,
overwrite=False,
format='no_header')
print('Patch center guesses saved in '+ patch_centers_outfile_name)
def get_patch_labels(RA_list: np.ndarray,
DEC_list: np.ndarray,
n_patches: int,
patch_centers_file_path: str,
survey='kids',
numba_installed=True,
plot_savename='none') -> np.ndarray:
"""
Divides survey into equally distributed and equally sized patches. Returns labels for patches from RA, Dec, number of patches and patch center guesses file.
WARNING: does not display plot, must specify plot_savename parameter to save plot
Parameters
----------
RA_list : np.ndarray
RA values of each data point (galaxy)
DEC_list : np.ndarray
all corresponding Dec values
n_patches : int
number of equal survey area patches required
patch_centers_file_path : str
path of '.csv' file with (n_patches x 2) patch center guesses (RA, Dec). File can be obtained from the get_patch_centers function
survey : str, optional
survey name - only change if survey area covers/connects over 320 degrees RA and does not connect over 360 to 0 degrees RA
numba_installed : bool, optional
mark as False if numba is not installed
plot_savename : str, optional
name and extension to save plot as, plot will not be saved if not changed
Returns
-------
np.ndarray
array of patch assignment label for each data point
"""
# MAKE SURE ALL PATCHES ARE STITCHED ON SKY
# works for most surveys - GAMA, KiDS - check rest
if survey == 'kids':
corrected_RA_list = np.where(RA_list > 320., RA_list - 360., RA_list)
# use if a survey patch covers/connects over 320 degrees RA
# and does not connect over 360 to 0 degree RA
if survey != 'kids':
corrected_RA_list = RA_list
# STACK RA AND DEC AS X
X = np.column_stack((corrected_RA_list, DEC_list))
#UNPACK PATCH CENTER GUESSES
centers_table = np.genfromtxt(patch_centers_file_path, delimiter=' ')
ra_guesses = centers_table[ : , 0]
dec_guesses = centers_table[ : , 1]
center_guesses = np.column_stack((ra_guesses, dec_guesses))
# FIND LABELS TO DIVIDE X INTO EQUAL n_patches
if numba_installed:
km = KMeans(center_guesses, method='fast')
else:
km = KMeans(center_guesses)
labels = km.find_nearest(X)
# VISUALISE ON PLOT
if plot_savename != 'none':
colors = cm.tab20(np.linspace(0, 1, n_patches))
plt.figure(figsize=(10, 10))
plt.suptitle("Galaxy Patches", fontsize=20)
# get patch counts on histogram
plt.subplot(211)
plt.grid(True)
N, b, p = plt.hist(labels, bins=n_patches)
for n in range(n_patches):
p[n].set_facecolor(colors[n])
plt.xlabel("Label", fontsize=20)
plt.ylabel("Count", fontsize=20)
# get patches on sky
plt.subplot(212)
plt.grid(True)
for n in range(n_patches):
subset_indices = np.where(labels == n)
plt.scatter(corrected_RA_list[subset_indices],
DEC_list[subset_indices],
color=colors[n],
s=1)
# if 'gama' in datasetname:
# plt.xlim(120, 240)
# plt.ylim(-10, 10)
# if 'kids' in datasetname:
# plt.xlim(-50, 250)
# plt.ylim(-40, 10)
plt.xlabel("RA(J2000)/ deg", fontsize=20)
plt.ylabel("Dec(J2000)/ deg", fontsize=20)
plt.savefig(plot_savename, dpi=300)
return labels
def get_binned_phi_error(rest_mag_list: np.ndarray,
Vmax_list: np.ndarray,
labels: np.ndarray,
n_patches: int,
n_mag_bins: int) -> np.ndarray:
"""
Spatial variance on galaxy number density per magnitude.
Returns error on phi from rest-frame magnitude, maximum observed volume, labels, number of patches and number of bins.
Parameters
----------
rest_mag_list : np.ndarray
rest-frame magnitude of each data point (galaxy)
Vmax_list : np.ndarray
all corresponding maximum volumes
labels : np.ndarray
all corresponding survey patch assignment labels
n_patches : int
number of equal survey area patches required
n_mag_bins : int
number of magnitude bins required
Returns
-------
np.ndarray
phi error (i.e. y-error) value of each bin
"""
# GET PHI VALUES USING ONLY VALUES IN EACH PATCH
patch_phis = []
for n in range(n_patches):
patch_indices = np.where(labels == n)
patch_M = rest_mag_list[patch_indices]
patch_Vmax = Vmax_list[patch_indices] / n_patches
mid_M_list, M_err_list, phi_list = get_binned_phi(
patch_M, patch_Vmax, n_mag_bins)
patch_phis.append(phi_list)
# STANDARD ERRORS ON PHI VALUES BETWEEN EACH PATCH
phi_err_list = np.std(patch_phis, axis=0)
return phi_err_list
def get_plot(rest_mag_list: np.ndarray,
Vmax_list: np.ndarray,
n_mag_bins: int,
RA_list: np.ndarray,
DEC_list: np.ndarray,
n_patches: int,
patch_centers_file_path: str,
survey='kids',
numba_installed=True,
plot_savename='none') -> np.ndarray:
"""
Plots the 1/Vmax weighted luminosity function from data, binned by magnitude.
WARNING: does not display plot, must specify plot_savename parameter to save plot
Parameters
----------
rest_mag_list : np.ndarray
rest-frame magnitude of each data point (galaxy)
Vmax_list : np.ndarray
all corresponding maximum volumes
n_mag_bins : int
number of magnitude bins required
RA_list : np.ndarray
all corresponding RA values
DEC_list : np.ndarray
all corresponding Dec values
n_patches : int
number of equal survey area patches required
patch_centers_file_path : str
path of '.csv' file with (n_patches x 2) patch center guesses (RA, Dec). File can be obtained from the get_patch_centers function
survey : str, optional
survey name - only change if survey area covers/connects over 320 degrees RA and does not connect over 360 to 0 degrees RA
numba_installed : bool, optional
mark as False if numba is not installed
plot_savename : str, optional
name and extension to save plot as, plot will not be saved if not changed
Returns
-------
np.ndarray
mid-magnitude (i.e. x) value of each bin
np.ndarray
magnitude-width/2 (i.e. x-error) value of each bin
np.ndarray
phi (i.e. y) value of each bin (with h = 0.7)
np.ndarray
phi error (i.e. y-error) value of each bin
"""
# phi
M_list, M_err_list, phi_list = get_binned_phi(rest_mag_list, Vmax_list, n_mag_bins)
# patches
labels = get_patch_labels(RA_list, DEC_list, n_patches, patch_centers_file_path, survey, numba_installed)
# phi errors
phi_err_list = get_binned_phi_error(rest_mag_list, Vmax_list, labels, n_patches, n_mag_bins)
if plot_savename != 'none':
plt.figure(figsize=(10, 10))
# plot data
plt.errorbar(M_list,
phi_list,
xerr=M_err_list,
yerr=phi_err_list,
fmt='gx',
mec='k',
label='galaxies:' + str(len(rest_mag_list)))
plt.yscale('log')
# plt.xlim(-26,-12)
# plt.ylim(1e-8,0.9)
plt.xlabel("rest-frame magnitude/ $(M_{r})_{cal}$/ mag", fontsize=20)
plt.ylabel(
"number density / $\Phi (M_{r})/ h_{70}^{3}Mpc^{-3}mag^{-1}$",
fontsize=20)
# plt.title(plot_savename, fontsize=20)
plt.grid(True)
plt.legend(loc='upper left')
plt.savefig(plot_savename, dpi=300)
return M_list, M_err_list, phi_list, phi_err_list
def filter_plot_by_colour(dichotomy_slope: float,
dichotomy_intercept: float,
rest_mag_list: np.ndarray,
higher_band_rest_mag_list: np.ndarray,
Vmax_list: np.ndarray,
n_mag_bins: int,
RA_list: np.ndarray,
DEC_list: np.ndarray,
n_patches: int,
patch_centers_file_path: str,
survey='kids',
numba_installed=True,
plot_savename='none') -> np.ndarray:
"""
Plots the 1/Vmax weighted luminosity function from data, binned by magnitude and filtered by galaxy colours. The galaxy colours are filtered by red and blue with the help of the input colour dichotomy line parameters. The colour dichotomy line parameters can be inferred from a CMD plot.
WARNING: does not display plot, must specify plot_savename parameter to save plot
Parameters
----------
dichotomy_slope : float
slope of the colour dichotomy line
dichotomy_intercept : float
intercept of the colour dichotomy line
rest_mag_list : np.ndarray
rest-frame magnitude of each data point (galaxy)
higher_band_rest_mag_list : np.ndarray
rest-frame magnitudes of each data point (galaxy) from a higher wavelength band
Vmax_list : np.ndarray
all corresponding maximum volumes
n_mag_bins : int
number of magnitude bins required
RA_list : np.ndarray
all coressponding RA values
DEC_list : np.ndarray
all corresponding Dec values
n_patches : int
number of patches required
patch_centers_file_path : str
path of '.csv' file with (n_patches x 2) patch center guesses (RA, Dec). File can be obtained from the get_patch_centers function
survey : str, optional
survey name - only change if survey area covers/connects over 320 degrees RA and does not connect over 360 to 0 degrees RA
numba_installed : bool, optional
mark as False if numba is not installed
plot_savename : str, optional
name and extension to save plot as, plot will not be saved if not changed
Returns
-------
np.ndarray
all galaxies' LF's mid-magnitude (i.e. x) value of each bin
np.ndarray
all galaxies' LF's magnitude-width/2 (i.e. x-error) value of each bin
np.ndarray
all galaxies' LF's phi (i.e. y) value of each bin (with h = 0.7)
np.ndarray
all galaxies' LF's phi error (i.e. y-error) value of each bin
np.ndarray
red galaxies' LF's mid-magnitude (i.e. x) value of each bin
np.ndarray
red galaxies' LF's magnitude-width/2 (i.e. x-error) value of each bin
np.ndarray
red galaxies' LF's phi (i.e. y) value of each bin (with h = 0.7)
np.ndarray
red galaxies' LF's phi error (i.e. y-error) value of each bin
np.ndarray
blue galaxies' LF's mid-magnitude (i.e. x) value of each bin
np.ndarray
blue galaxies' LF's magnitude-width/2 (i.e. x-error) value of each bin
np.ndarray
blue galaxies' LF's phi (i.e. y) value of each bin (with h = 0.7)
np.ndarray
blue galaxies' LF's phi error (i.e. y-error) value of each bin
"""
colour_mag_list = higher_band_rest_mag_list - rest_mag_list
dichotomy_line = dichotomy_slope * rest_mag_list + dichotomy_intercept
red_index = np.where(colour_mag_list >= dichotomy_line)[0]
blue_index = np.where(colour_mag_list < dichotomy_line)[0]
# all
M_list, M_err_list, phi_list, phi_err_list = get_plot(
rest_mag_list, Vmax_list, n_mag_bins, RA_list, DEC_list, n_patches,
patch_centers_file_path, survey, numba_installed)
# red
red_M_list, red_M_err_list, red_phi_list, red_phi_err_list = get_plot(
rest_mag_list[red_index], Vmax_list[red_index], n_mag_bins,
RA_list[red_index], DEC_list[red_index], n_patches, patch_centers_file_path,
survey, numba_installed)
# blue
blue_M_list, blue_M_err_list, blue_phi_list, blue_phi_err_list = get_plot(
rest_mag_list[blue_index], Vmax_list[blue_index], n_mag_bins,
RA_list[blue_index], DEC_list[blue_index], n_patches, patch_centers_file_path,
survey, numba_installed)
if plot_savename != 'none':
plt.figure(figsize=(10, 10))
# plot all data
plt.errorbar(M_list,
phi_list,
xerr=M_err_list,
yerr=phi_err_list,
fmt='gx',
mec='k',
label='all:' + str(len(rest_mag_list)))
# plot red data
plt.errorbar(red_M_list,
red_phi_list,
xerr=red_M_err_list,
yerr=red_phi_err_list,
fmt='rx',
mec='k',
label='red:' + str(len(rest_mag_list[red_index])))
# plot blue data
plt.errorbar(blue_M_list,
blue_phi_list,
xerr=blue_M_err_list,
yerr=blue_phi_err_list,
fmt='bx',
mec='k',
label='blue:' + str(len(rest_mag_list[blue_index])))
plt.yscale('log')
# plt.xlim(-26,-12)
# plt.ylim(1e-8,0.9)
plt.xlabel("rest-frame r-magnitude/ $(M_{r})_{cal}$/ mag", fontsize=20)
plt.ylabel(
"number density / $\Phi (M_{r})/ h_{70}^{3}Mpc^{-3}mag^{-1}$",
fontsize=20)
# plt.title(plot_savename, fontsize=20)
plt.grid(True)
plt.legend(loc='upper left')
plt.savefig(plot_savename, dpi=300)
return M_list, M_err_list, phi_list, phi_err_list, red_M_list, red_M_err_list, red_phi_list, red_phi_err_list, blue_M_list, blue_M_err_list, blue_phi_list, blue_phi_err_list
def SchechterMagModel(M_list: np.ndarray,
M_star: float,
phi_star: float,
alpha: float) -> np.ndarray:
"""
Single Schechter luminosity function in terms of magnitude from 3 free parameters of the model.
Parameters
----------
M_list : np.ndarray
array of magnitudes (i.e. x)
M_star : float
model parameter M_star
phi_star : float
model parameter phi_star
alpha : float
model parameter alpha
Returns
-------
np.ndarray
array of Schechter modelled phi (i.e. y)
"""
# FACTOR
factor = (2 / 5) * np.log(10)
# POWER
Mstar_Mlist = M_star - M_list
power = (2 / 5) * Mstar_Mlist
# PART 1
power1 = -10**(power)
part1 = np.exp(power1)
# PART 2
index = alpha + 1
power2 = power * index
part2 = phi_star * 10**(power2)
# PHI(M)
phi_list = factor * part1 * part2
return phi_list
def DoubleSchechterMagModel(M_list: np.ndarray,
M_star: float,
phi_star1: float,
alpha1: float,
phi_star2: float,
alpha2: float) -> np.ndarray:
"""
Double Schechter luminosity function in terms of magnitude from 5 free parameters of the model.
Parameters
----------
M_list : np.ndarray
array of magnitudes (i.e. x)
M_star : float
model parameter M_star
phi_star1 : float
model parameter phi_star1
alpha1 : float
model parameter alpha1
phi_star2 : float
model parameter phi_star2
alpha2 : float
model parameter alpha2
Returns
-------
np.ndarray
array of Double Schechter modelled phi (i.e. y)
"""
# FACTOR
factor = (2 / 5) * np.log(10)
# POWER
Mstar_Mlist = M_star - M_list
power = (2 / 5) * Mstar_Mlist
# PART 1
power1 = -10**(power)
part1 = | np.exp(power1) | numpy.exp |
from compression_tools.pruning.helper_functions import get_layer_index
import tensorflow as tf
import numpy as np
def delete_conv2d_output(new_model_param, layer_bias, index, filter, soft_prune=False):
if not soft_prune:
new_model_param[index][0] = np.delete(new_model_param[index][0], filter, axis=3)
if layer_bias:
new_model_param[index][1] = np.delete(new_model_param[index][1], filter, axis=0)
else:
new_model_param[index][0][:, :, :, filter] = float(0)
if layer_bias:
new_model_param[index][1][filter] = float(0)
def delete_conv2d_intput(new_model_param, index, filter, soft_prune=False):
if not soft_prune:
new_model_param[index][0] = np.delete(new_model_param[index][0], filter, axis=2)
# else:
# initializer = tf.keras.initializers.GlorotNormal(seed=None)
# new_model_param[index][0][:, :, filter, :] = initializer(
# shape=new_model_param[index][0][:, :, filter, :].shape)
def delete_dense_input(new_model_param, index, filter, soft_prune=False):
if not soft_prune:
new_model_param[index][0] = np.delete(new_model_param[index][0], filter, axis=0)
# else:
# initializer = tf.keras.initializers.RandomNormal(stddev=0.01)
# new_model_param[index][0][filter, :] = initializer(
# shape=new_model_param[index][0][filter, :].shape)
def delete_dense_output(new_model_param, index, filter, layer_bias, soft_prune=False):
if not soft_prune:
new_model_param[index][0] = np.delete(new_model_param[index][0], filter, axis=1)
if layer_bias:
new_model_param[index][1] = np.delete(new_model_param[index][1], filter, axis=0)
# else:
# initializer = tf.keras.initializers.RandomNormal(stddev=0.01)
# new_model_param[index][0][:, filter] = initializer(
# shape=new_model_param[index][0][:, filter].shape)
# if layer_bias:
# new_model_param[index][1] = 0
def delete_bn_output(new_model_param, index, filter, soft_prune=False):
if not soft_prune:
new_model_param[index][0] = np.delete(new_model_param[index][0], filter)
new_model_param[index][1] = np.delete(new_model_param[index][1], filter)
new_model_param[index][2] = np.delete(new_model_param[index][2], filter)
new_model_param[index][3] = np.delete(new_model_param[index][3], filter)
# else:
# new_model_param[index][0][filter] = float(1)
# new_model_param[index][1][filter] = float(0)
# new_model_param[index][2][filter] = float(0)
# new_model_param[index][3][filter] = float(1)
def get_down_left_layer(layer_index_dic, layer_index):
left_layer = layer_index_dic[layer_index].outbound_nodes[0].layer
return left_layer
def get_down_right_layer(layer_index_dic, layer_index):
right_layer = layer_index_dic[layer_index].outbound_nodes[1].layer
return right_layer
def get_up_layers(layer_index_dic, layer_index):
up_layers = layer_index_dic[layer_index].inbound_nodes[0].inbound_layers
return up_layers
def get_up_left_layer(layer_index_dic, layer_index):
left_layer = layer_index_dic[layer_index].inbound_nodes[0].inbound_layers[1]
return left_layer
def get_up_right_layer(layer_index_dic, layer_index):
right_layer = layer_index_dic[layer_index].inbound_nodes[0].inbound_layers[0]
return right_layer
def up_delete_until_conv2D(layer_index_dic, new_model_param, layer_index, filter, soft_prune=False):
layer = layer_index_dic[layer_index]
while(not isinstance(layer, tf.compat.v1.keras.layers.Conv2D)
or isinstance(layer, tf.keras.layers.DepthwiseConv2D)):
if isinstance(layer, tf.keras.layers.BatchNormalization):
left_end_index = get_layer_index(layer_index_dic, layer)
if(new_model_param[layer_index][0].shape[0] == layer_index_dic[layer_index].output_shape[3]):
delete_bn_output(new_model_param, left_end_index, filter, soft_prune)
elif isinstance(layer, tf.keras.layers.DepthwiseConv2D):
left_end_index = get_layer_index(layer_index_dic, layer)
delete_conv2d_intput(new_model_param, left_end_index, filter, soft_prune)
new_model_param[left_end_index][1] = np.delete(new_model_param[left_end_index][1], filter, axis=0)
layer = layer.inbound_nodes[0].inbound_layers
conv_end_index = get_layer_index(layer_index_dic, layer)
layer_bias = layer.use_bias
delete_conv2d_output(new_model_param, layer_bias, conv_end_index, filter, soft_prune)
def delete_filter_after(
new_model_param,
layer_index,
layer_index_dic,
filter,
soft_prune=False,
layer_type="conv2D"):
''' Delete channels params related with pruned channels
Args:
new_model_param: (float list) weights or parameters in each layer
layer_index: (int) Layer to be pruned
layer_index_dic: (dictionary) [layer_index:layer]
filter: pruned channels index
soft_prune: whether or not use soft_prune strategy
'''
current_layer = layer_index_dic[layer_index]
layer_bias = current_layer.use_bias
if layer_type == "Dense":
following_layer = current_layer.outbound_nodes[0].layer
if isinstance(following_layer, tf.keras.layers.Dense):
following_index = get_layer_index(layer_index_dic, following_layer)
delete_dense_output(new_model_param, layer_index, filter, layer_bias, soft_prune)
delete_dense_input(new_model_param, following_index, filter, soft_prune)
return new_model_param
else:
return new_model_param
elif layer_type == "conv2D":
while(not isinstance(current_layer.outbound_nodes[0].layer, tf.keras.layers.Flatten) and\
not isinstance(current_layer.outbound_nodes[0].layer, tf.keras.layers.Dense) and\
not isinstance(current_layer.outbound_nodes[0].layer, tf.keras.layers.AveragePooling2D)):
following_layer = current_layer.outbound_nodes[0].layer
following_index = get_layer_index(layer_index_dic, following_layer)
if isinstance(following_layer, tf.keras.layers.BatchNormalization):
if(new_model_param[following_index][0].shape[0] == following_layer.output_shape[3]):
delete_bn_output(new_model_param, following_index, filter, soft_prune)
# The conv2D layer is before a branch
if(len(following_layer.outbound_nodes) == 2 and
len(following_layer.inbound_nodes[0].flat_input_ids) == 1):
branch_layer_index = following_index
# Delete the output channels of current Conv2D
delete_conv2d_output(new_model_param, layer_bias, layer_index, filter, soft_prune)
# Delete the input channels of following left Conv2D
left_down_conv_layer = get_down_left_layer(layer_index_dic, branch_layer_index)
left_down_conv_layer_index = get_layer_index(layer_index_dic, left_down_conv_layer)
if isinstance(left_down_conv_layer, tf.keras.layers.Conv2D):
delete_conv2d_intput(
new_model_param, left_down_conv_layer_index, filter, soft_prune)
# Delete the input channels of following right Conv2D
# elif isinstance(left_down_conv_layer, tf.keras.layers.GlobalAveragePooling2D):
# dense_layer_index = get_layer_index(
# layer_index_dic, left_down_conv_layer.outbound_nodes[0].layer)
# delete_dense_input(new_model_param, dense_layer_index, filter, soft_prune)
# Delete the input channels of following right Conv2D
right_down_conv_layer = get_down_right_layer(layer_index_dic, branch_layer_index)
right_down_conv_layer_index = get_layer_index(
layer_index_dic, right_down_conv_layer)
if isinstance(right_down_conv_layer, tf.keras.layers.Conv2D):
delete_conv2d_intput(
new_model_param, right_down_conv_layer_index, filter, soft_prune)
# elif isinstance(right_down_conv_layer, tf.keras.layers.GlobalMaxPooling2D):
# dense_layer_index = get_layer_index(
# layer_index_dic, right_down_conv_layer.outbound_nodes[0].layer)
# delete_dense_input(new_model_param, dense_layer_index, filter, soft_prune)
# No Conv layer, direct connect to add layer
add_layer_index = get_layer_index(layer_index_dic, right_down_conv_layer)
if(not isinstance(left_down_conv_layer, tf.keras.layers.Conv2D)):
add_layer_index = get_layer_index(layer_index_dic, left_down_conv_layer)
else:
ct_flag1 = True
while(ct_flag1):
# delete output channels of last conv2D layer on the other branch
if(add_layer_index==right_down_conv_layer_index):
left_end_index = get_layer_index(
layer_index_dic,
right_down_conv_layer.inbound_nodes[0].inbound_layers[1])
up_delete_until_conv2D(
layer_index_dic, new_model_param, left_end_index, filter, soft_prune)
else:
right_end_index = get_layer_index(
layer_index_dic,
left_down_conv_layer.inbound_nodes[0].inbound_layers[1])
up_delete_until_conv2D(
layer_index_dic, new_model_param, right_end_index, filter, soft_prune)
act_layer_id = add_layer_index+1
left_down_conv_layer = get_down_left_layer(
layer_index_dic, act_layer_id)
right_down_conv_layer = get_down_right_layer(
layer_index_dic, act_layer_id)
next_add_layer = left_down_conv_layer
if(isinstance(left_down_conv_layer, tf.keras.layers.Conv2D) and\
isinstance(right_down_conv_layer, tf.keras.layers.Conv2D)):
ct_flag1 = False
if(isinstance(left_down_conv_layer, tf.keras.layers.Conv2D)):
left_down_conv_layer_index = get_layer_index(
layer_index_dic, left_down_conv_layer)
delete_conv2d_intput(
new_model_param, left_down_conv_layer_index, filter, soft_prune)
else:
next_add_layer = left_down_conv_layer
if(isinstance(right_down_conv_layer, tf.keras.layers.Conv2D)):
right_down_conv_layer_index = get_layer_index(
layer_index_dic, right_down_conv_layer)
delete_conv2d_intput(
new_model_param, right_down_conv_layer_index, filter, soft_prune)
else:
next_add_layer = left_down_conv_layer
right_down_conv_layer = next_add_layer
return new_model_param
# This conv2D layer is followed by a conv2D
elif isinstance(following_layer, tf.compat.v1.keras.layers.Conv2D):
# # print("This Conv2D is before a Conv2D")
# Delete the input channels of following Conv2D
delete_conv2d_intput(new_model_param, following_index, filter, soft_prune)
if (not isinstance(following_layer, tf.keras.layers.DepthwiseConv2D)):
# Delete the output channels of current Conv2D
delete_conv2d_output(
new_model_param, layer_bias, layer_index, filter, soft_prune)
return new_model_param
else:
new_model_param[following_index][1] = np.delete(new_model_param[following_index][1], filter, axis=0)
# return new_model_param
# This conv2D layer is followed by an Add layer
elif isinstance(following_layer, tf.keras.layers.Add):
add_layer = following_layer
up_layer = get_up_layers(layer_index_dic, layer_index)
# right layer
if len(up_layer.outbound_nodes) == 2:
# # print("This is a conv2D at right up position of a add layer")
# 1. Delete the output channels of current Conv2D
delete_conv2d_output(
new_model_param, layer_bias, layer_index, filter, soft_prune)
final_flag = False
if True:
next_layer = following_layer.outbound_nodes[0].layer
while(len(next_layer.outbound_nodes) != 2):
next_layer = next_layer.outbound_nodes[0].layer
if(isinstance(next_layer, tf.keras.layers.AveragePooling2D)):
end_layer = next_layer
layer_index = get_layer_index(layer_index_dic, end_layer)
while(not isinstance(end_layer, tf.keras.layers.Dense)):
end_layer = end_layer.outbound_nodes[0].layer
dense_layer = end_layer
dense_layer_index = get_layer_index(layer_index_dic, dense_layer)
layer_output = layer_index_dic[dense_layer_index-2]
layer_output_shape = layer_output.output_shape
shape = (layer_output_shape[1]*layer_output_shape[2])
filters = []
channels = layer_output_shape[3]
new_filter = filter[0]
for s in range(shape):
filters = np.concatenate([filters, new_filter])
new_filter = new_filter+channels
filters = [int(i) for i in filters]
delete_dense_input(new_model_param, dense_layer_index, filters, soft_prune)
final_flag = True
break
# return new_model_param
# Start of next block
end_layer = next_layer
continue_flag = True
while(continue_flag):
next_layer_index = get_layer_index(layer_index_dic, end_layer)
if final_flag:
continue_flag = False
before_layer = end_layer
else:
before_layer = end_layer.inbound_nodes[0].inbound_layers
if len(end_layer.outbound_nodes) == 2:
# 1. Delete the input channels of following LEFT Conv2D
left_down_conv_layer = get_down_left_layer(
layer_index_dic, next_layer_index)
if isinstance(
left_down_conv_layer, tf.compat.v1.keras.layers.Conv2D):
left_down_conv_layer_index = get_layer_index(
layer_index_dic, left_down_conv_layer)
delete_conv2d_intput(
new_model_param, left_down_conv_layer_index,
filter, soft_prune)
# 2. RIGHT Conv2D
right_down_conv_layer = get_down_right_layer(
layer_index_dic, next_layer_index)
if isinstance(
right_down_conv_layer, tf.compat.v1.keras.layers.Conv2D):
continue_flag = False
right_down_conv_layer_index = get_layer_index(
layer_index_dic, right_down_conv_layer)
delete_conv2d_intput(
new_model_param, right_down_conv_layer_index,
filter, soft_prune)
if(isinstance(
right_down_conv_layer, tf.compat.v1.keras.layers.Conv2D) and\
isinstance(
left_down_conv_layer, tf.compat.v1.keras.layers.Conv2D)):
continue_flag = False
final_flag = True
while(not isinstance(before_layer, tf.keras.layers.Add)):
before_layer = before_layer.inbound_nodes[0].inbound_layers
# 3. left end output
left_end_index = get_layer_index(
layer_index_dic, before_layer.inbound_nodes[0].inbound_layers[1])
up_delete_until_conv2D(
layer_index_dic, new_model_param,
left_end_index, filter, soft_prune)
# find the next block
if not final_flag:
end_layer = end_layer.outbound_nodes[1].layer
while(len(end_layer.outbound_nodes) != 2):
if(isinstance(end_layer, tf.keras.layers.Dense)):
dense_layer_index = get_layer_index(
layer_index_dic, end_layer)
delete_dense_input(
new_model_param, dense_layer_index, filter, soft_prune)
final_flag = True
fore_layer = end_layer.inbound_nodes[0].inbound_layers
while(not isinstance(fore_layer, tf.keras.layers.Add)):
fore_layer = fore_layer.inbound_nodes[0].inbound_layers
end_layer = fore_layer
break
end_layer = end_layer.outbound_nodes[0].layer
return new_model_param
else:
# # print("LAST LEFT")
# add_layer = following_layer
# next_layer = add_layer.outbound_nodes[0].layer
# while(len(next_layer.outbound_nodes) == 1):
# if isinstance(next_layer, tf.keras.layers.AveragePooling2D):
# dense_layer_index = get_layer_index(layer_index_dic, next_layer)
# # # print("Got a Dense Layer at the end")
# break
# next_layer = next_layer.outbound_nodes[0].layer
return new_model_param
current_layer = following_layer
# # print("Got a Dense Layer at the end")
# Delete the output channels of current Conv2D
if isinstance(current_layer.outbound_nodes[0].layer, tf.keras.layers.AveragePooling2D):
delete_conv2d_output(new_model_param, layer_bias, layer_index, filter, soft_prune)
while(not isinstance(current_layer.outbound_nodes[0].layer, tf.keras.layers.Dense)):
current_layer = current_layer.outbound_nodes[0].layer
dense_layer = current_layer.outbound_nodes[0].layer
dense_layer_index = get_layer_index(layer_index_dic, dense_layer)
layer_output = layer_index_dic[dense_layer_index-2]
layer_output_shape = layer_output.output_shape
shape = (layer_output_shape[1]*layer_output_shape[2])
filters = []
channels = layer_output_shape[3]
new_filter = filter[0]
for s in range(shape):
filters = | np.concatenate([filters, new_filter]) | numpy.concatenate |
import numpy as np
import pytest
from climateforcing.utci import (
mean_radiant_temperature,
universal_thermal_climate_index,
)
def test_utci_array():
EXPECTED_RESULT = 273.15 + np.array([19.60850656, 21.2151128])
TEST_RESULT = universal_thermal_climate_index(
{
"tas": np.array([295, 296]),
"sfcWind": np.array([6.0, 6.0]),
"hurs": np.array([100, 100]),
},
np.array([303, 304]),
)
assert np.allclose(TEST_RESULT, EXPECTED_RESULT)
def test_utci_huss():
EXPECTED_RESULT = 273.15 + np.array([19.81876334, 20.98888025])
TEST_RESULT = universal_thermal_climate_index(
{
"tas": np.array([295, 296]),
"sfcWind": np.array([6.0, 6.0]),
"huss": np.array([0.0167, 0.0167]), # approx 100% RH at 295K, 1000hPa
},
np.array([303, 304]),
)
assert np.allclose(TEST_RESULT, EXPECTED_RESULT)
def test_utci_huss_ps():
EXPECTED_RESULT = 273.15 + np.array([17.86596553, 17.84009998])
TEST_RESULT = universal_thermal_climate_index(
{
"tas": np.array([295, 296]),
"sfcWind": np.array([6.0, 6.0]),
"huss": np.array([0.012, 0.010]),
"ps": np.array([96000, 75000]),
},
np.array([303, 304]),
)
assert np.allclose(TEST_RESULT, EXPECTED_RESULT)
def test_utci_raises():
with pytest.raises(ValueError):
universal_thermal_climate_index(
{
"tas": np.array([295, 296]),
"sfcWind": np.array([6.0, 6.0]),
"hurs": np.array([100, 100]),
"huss": np.array([0.006, 0.006]),
},
np.array([303, 304]),
)
with pytest.raises(ValueError):
universal_thermal_climate_index({}, np.array([303, 304]))
def test_mrt_array():
EXPECTED_RESULT = np.array([313.33875095746555, 291.2519186818613])
TEST_RESULT = mean_radiant_temperature(
{
"rlds": np.array([150, 50]),
"rlus": np.array([350, 150]),
"rsdsdiff": np.array([400, 200]),
"rsus": np.array([100, 50]),
"rsds": | np.array([700, 400]) | numpy.array |
import unittest
from unittest.mock import patch
from gym_powerworld.envs import voltage_control_env
# noinspection PyProtectedMember
from gym_powerworld.envs.voltage_control_env import LOSS, \
MinLoadBelowMinGenError, MaxLoadAboveMaxGenError, OutOfScenariosError, \
MIN_V, MAX_V, MIN_V_SCALED, MAX_V_SCALED, _scale_voltages
import os
import pandas as pd
import numpy as np
import numpy.testing as np_test
import logging
import warnings
from esa import SAW, PowerWorldError
from gym.spaces import Discrete
import shutil
# Get full path to this directory.
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
# Cases are within this directory.
CASE_DIR = os.path.join(THIS_DIR, 'cases')
# IEEE 14 bus
DIR_14 = os.path.join(CASE_DIR, 'ieee_14')
PWB_14 = os.path.join(DIR_14, 'IEEE 14 bus.pwb')
AXD_14 = os.path.join(DIR_14, 'IEEE 14 bus.axd')
CONTOUR = os.path.join(DIR_14, 'contour.axd')
# Case with 3 gens modeled as condensers:
PWB_14_CONDENSERS = os.path.join(DIR_14, 'IEEE 14 bus condensers.pwb')
# Case with min and max MW limits on all 5 generators.
PWB_14_LIMITS = os.path.join(DIR_14, 'IEEE 14 bus limits.pwb')
# IL 200
PWB_200 = os.path.join(CASE_DIR, 'il_200', 'ACTIVSg200.pwb')
# TX 2000
PWB_2000 = os.path.join(CASE_DIR, 'tx_2000',
'ACTIVSg2000_AUG-09-2018_Ride_mod.PWB')
# Define some constants related to the IEEE 14 bus case.
N_GENS_14 = 5
N_LOADS_14 = 11
LOAD_MW_14 = 259.0
# noinspection DuplicatedCode
class DiscreteVoltageControlEnv14BusTestCase(unittest.TestCase):
"""Test initializing the environment with the 14 bus model."""
@classmethod
def setUpClass(cls) -> None:
# Initialize the environment. Then, we'll use individual test
# methods to test various attributes, methods, etc.
# Define inputs to the constructor.
cls.num_scenarios = 1000
cls.max_load_factor = 2
cls.min_load_factor = 0.5
cls.min_load_pf = 0.8
cls.lead_pf_probability = 0.1
cls.load_on_probability = 0.8
cls.num_gen_voltage_bins = 9
cls.gen_voltage_range = (0.9, 1.1)
cls.seed = 18
cls.log_level = logging.INFO
cls.dtype = np.float32
cls.log_buffer = 100
cls.env = voltage_control_env.DiscreteVoltageControlEnv(
pwb_path=PWB_14, num_scenarios=cls.num_scenarios,
max_load_factor=cls.max_load_factor,
min_load_factor=cls.min_load_factor,
min_load_pf=cls.min_load_pf,
lead_pf_probability=cls.lead_pf_probability,
load_on_probability=cls.load_on_probability,
num_gen_voltage_bins=cls.num_gen_voltage_bins,
gen_voltage_range=cls.gen_voltage_range,
seed=cls.seed,
log_level=logging.INFO,
dtype=cls.dtype,
log_buffer=cls.log_buffer
)
# For easy comparison with the original case, get a fresh SAW
# object. Do not make any changes to this, use only "get" type
# methods.
cls.saw = SAW(PWB_14, early_bind=True)
# noinspection PyUnresolvedReferences
@classmethod
def tearDownClass(cls) -> None:
cls.saw.exit()
cls.env.close()
def test_branches_to_open(self):
"""Ensure branches_to_open is the right shape and is in the
appropriate range.
"""
self.assertIsNotNone(self.env.branches_to_open)
self.assertEqual((self.num_scenarios,),
self.env.branches_to_open.shape)
self.assertTrue(self.env.branches_to_open.min() >= 0)
self.assertTrue(
self.env.branches_to_open.max()
< self.env.branch_init_data.shape[0])
def test_saw_load_state(self):
"""Ensure that calling saw.LoadState() works (testing that
saw.SaveState() has already been called).
"""
# NOTE: This changes the state of self.env.saw, which can cause
# issues in other tests.
self.assertIsNone(self.env.saw.LoadState())
def test_gen_key_fields(self):
"""Ensure the gen key fields are correct. Hard coding style."""
self.assertListEqual(['BusNum', 'GenID'], self.env.gen_key_fields)
def test_gen_init_fields(self):
self.assertListEqual(
self.env.gen_key_fields + self.env.GEN_INIT_FIELDS,
self.env.gen_init_fields)
def test_gen_obs_fields(self):
self.assertListEqual(self.env.gen_key_fields + self.env.GEN_OBS_FIELDS,
self.env.gen_obs_fields)
def test_gen_init_data(self):
self.assertIsInstance(self.env.gen_init_data, pd.DataFrame)
self.assertListEqual(self.env.gen_init_fields,
self.env.gen_init_data.columns.tolist())
def test_num_gens(self):
# 15 bus case has 5 generators.
self.assertEqual(5, self.env.num_gens)
def test_zero_negative_gen_mw_limits(self):
"""Ensure the _zero_negative_gen_mw_limits function works as
intended.
"""
# First, ensure it has been called.
self.assertTrue((self.env.gen_init_data['GenMWMin'] >= 0).all())
# Now, patch gen_init_data and saw and call the function.
gen_copy = self.env.gen_init_data.copy(deep=True)
gen_copy['GenMWMin'] = -10
# I wanted to use self.assertLogs, but that has trouble working
# with nested context managers...
with patch.object(self.env, '_gen_init_data', new=gen_copy):
with patch.object(self.env, 'saw') as p:
self.env._zero_negative_gen_mw_limits()
# The gen_copy should have had its GenMWMin values zeroed out.
self.assertTrue((gen_copy['GenMWMin'] == 0).all())
# change_parameters_multiple_element_df should have been
# called.
p.change_and_confirm_params_multiple_element.assert_called_once()
# Ensure the change was reflected in PowerWorld.
gens = self.env.saw.GetParametersMultipleElement(
'gen', ['BusNum', 'GenID', 'GenMWMin'])
self.assertTrue((gens['GenMWMin'] == 0).all())
# Finally, (this could have been done first, but oh well), make
# sure that the case started with negative GenMWMin values.
gens_orig = self.saw.GetParametersMultipleElement(
'gen', ['BusNum', 'GenID', 'GenMWMin'])
self.assertTrue((gens_orig['GenMWMin'] < 0).any())
def test_gen_mw_capacity(self):
# The generators are all set to a ridiculous maximum of 10 GW.
self.assertEqual(5 * 10000.0, self.env.gen_mw_capacity)
def test_gen_mvar_produce_capacity(self):
self.assertEqual(50. + 40. + 24. + 24.,
round(self.env.gen_mvar_produce_capacity, 2))
def test_gen_mvar_consume_capacity(self):
self.assertEqual(-40. - 6. - 6.,
round(self.env.gen_mvar_consume_capacity, 2))
def test_load_key_fields(self):
# Hard coding!
self.assertListEqual(self.env.load_key_fields, ['BusNum', 'LoadID'])
def test_load_init_fields(self):
self.assertListEqual(self.env.load_init_fields,
self.env.load_key_fields
+ self.env.LOAD_INIT_FIELDS)
def test_load_obs_fields(self):
self.assertListEqual(
self.env.load_obs_fields,
self.env.load_key_fields + self.env.LOAD_OBS_FIELDS)
def test_load_init_data(self):
self.assertIsInstance(self.env.load_init_data, pd.DataFrame)
self.assertListEqual(self.env.load_init_data.columns.tolist(),
self.env.load_init_fields)
def test_num_loads(self):
self.assertEqual(11, self.env.num_loads)
def test_zero_i_z_loads(self):
"""Patch the environment's load_init_data and ensure the method is
working properly.
"""
data = self.env.load_init_data.copy(deep=True)
data[voltage_control_env.LOAD_I_Z] = 1
with patch.object(self.env, '_load_init_data', new=data):
with patch.object(self.env, 'saw') as p:
self.env._zero_i_z_loads()
self.assertTrue((data[voltage_control_env.LOAD_I_Z] == 0).all().all())
p.change_and_confirm_params_multiple_element.assert_called_once()
def test_bus_key_fields(self):
self.assertListEqual(['BusNum'], self.env.bus_key_fields)
def test_bus_obs_fields(self):
self.assertListEqual(self.env.bus_key_fields + self.env.BUS_OBS_FIELDS,
self.env.bus_obs_fields)
def test_bus_init_data(self):
self.assertIsInstance(self.env.bus_init_data, pd.DataFrame)
self.assertListEqual(self.env.bus_init_fields,
self.env.bus_init_data.columns.tolist())
def test_num_buses(self):
self.assertEqual(14, self.env.num_buses)
def test_max_load_mw(self):
# System loading obtained from PowerWorld's Case Summary
# dialogue.
self.assertEqual(round(self.env.max_load_mw, 2),
self.max_load_factor * LOAD_MW_14)
def test_check_max_load_exception(self):
"""Ensure that an exception is thrown if maximum loading exceeds
maximum generation.
"""
with patch.object(self.env, 'max_load_mw', 10):
with patch.object(self.env, 'gen_mw_capacity', 9.9):
with self.assertRaisesRegex(MaxLoadAboveMaxGenError,
'The given max_load'):
self.env._check_max_load(2)
def test_check_max_load_warning(self):
"""Ensure we get a warning if the generation is in excess of
2x maximum load.
"""
with self.assertLogs(logger=self.env.log, level='WARNING'):
self.env._check_max_load(2)
def test_min_load_mw(self):
# System loading obtained from PowerWorld's Case Summary
# dialogue.
self.assertEqual(round(self.env.min_load_mw, 2),
self.min_load_factor * LOAD_MW_14)
def test_check_min_load(self):
# Get generator data.
gens = self.env.gen_init_data.copy(deep=True)
# Increase all minimum generation.
gens['GenMWMin'] = 10
# Patch:
with patch.object(self.env, '_gen_init_data', gens):
with patch.object(self.env, 'min_load_mw', 9.9):
with self.assertRaisesRegex(MinLoadBelowMinGenError,
'The given min_load'):
self.env._check_min_load(2)
def test_total_load_mw(self):
# Ensure it's 1D.
self.assertEqual(len(self.env.total_load_mw.shape), 1)
# Check shape.
self.assertEqual(self.env.total_load_mw.shape[0],
self.env.num_scenarios)
# Ensure all loads are less than the maximum.
np_test.assert_array_less(self.env.total_load_mw, self.env.max_load_mw)
# Ensure all loads are greater than the minimum.
np_test.assert_array_less(self.env.min_load_mw, self.env.total_load_mw)
def test_loads_mw(self):
# Check shape
self.assertEqual(self.env.loads_mw.shape,
(self.num_scenarios, self.env.num_loads))
# Ensure the individual loads match total loading.
np_test.assert_allclose(self.env.loads_mw.sum(axis=1),
self.env.total_load_mw, rtol=1e-6)
def test_loads_mvar(self):
# Check shape.
self.assertEqual(self.env.loads_mvar.shape,
(self.num_scenarios, self.env.num_loads))
# Ensure that portion of negative var loads (leading power
# factor) is close to the lead_pf_probability.
neg_portion = (self.env.loads_mvar < 0).sum().sum() \
/ (self.num_scenarios * self.env.num_loads)
# Ensure we're within 0.75 * prob and 1.25 * prob. This seems
# reasonable.
self.assertLessEqual(neg_portion, 1.25 * self.lead_pf_probability)
self.assertGreaterEqual(neg_portion, 0.75 * self.lead_pf_probability)
def test_load_power_factors(self):
"""Ensure all loads have a power factor greater than the min."""
# Ensure all power factors are valid. pf = P / |S|
s_mag = np.sqrt(np.square(self.env.loads_mw)
+ np.square(self.env.loads_mvar))
# Suppress numpy warnings - we'll be replacing NaNs.
with warnings.catch_warnings():
warnings.simplefilter("ignore")
pf = self.env.loads_mw / s_mag
# For sake of testing, set loads with 0 power to have a
# power factor of 1.
pf[np.isnan(pf)] = 1
np_test.assert_array_less(self.min_load_pf, pf)
def test_loads_on_match_probability(self):
"""Ensure the proportion of loads which are on matches the
load_on_probability to a reasonable tolerance.
"""
# First, ensure the zeros match up between loads_mw and loads_mvar.
mw_0 = self.env.loads_mw == 0
np.testing.assert_array_equal(mw_0, self.env.loads_mvar == 0)
# Now, ensure the total portion of loads that are "on" is close
# to the load_on_probability.
# noinspection PyUnresolvedReferences
portion = (~mw_0).sum().sum() \
/ (self.num_scenarios * self.env.num_loads)
# Ensure we're within 0.75 * prob and 1.25 * prob. This seems
# reasonable.
self.assertLessEqual(portion, 1.25 * self.load_on_probability)
self.assertGreaterEqual(portion, 0.75 * self.load_on_probability)
def test_gen_mw(self):
# Start with shape.
self.assertEqual(self.env.gen_mw.shape,
(self.num_scenarios, self.env.num_gens))
# Ensure total generation is close to total load plus losses.
np_test.assert_allclose(self.env.gen_mw.sum(axis=1),
self.env.total_load_mw * (1 + LOSS), rtol=1e-6)
# TODO: Since the generators in this case have ridiculously high
# maximums, I'm not going to bother testing that all gens are
# within their bounds. When we move to a more realistic case,
# e.g. the Texas 2000 bus case, we need to test that.
#
# # Ensure generator outputs are within bounds.
# for gen_idx, row in enumerate(env.gen_init_data.itertuples()):
# gen_output = env.gen_mw[:, gen_idx]
# # noinspection PyUnresolvedReferences
# self.assertTrue((gen_output <= row.GenMWMax).all())
# # noinspection PyUnresolvedReferences
# self.assertTrue((gen_output >= row.GenMWMin).all())
def test_gen_v(self):
# Shape.
self.assertEqual(self.env.gen_v.shape,
(self.env.num_scenarios, self.env.num_gens))
# Values.
self.assertTrue(
((self.env.gen_v >= self.gen_voltage_range[0]).all()
and
(self.env.gen_v <= self.gen_voltage_range[1]).all()
)
)
def test_action_space(self):
self.assertIsInstance(self.env.action_space, Discrete)
# Plus 1 because no-op action
self.assertEqual(self.env.action_space.n,
self.env.num_gens * self.num_gen_voltage_bins + 1)
def test_gen_bins(self):
# Hard coding!
np.testing.assert_allclose(
np.array([0.9, 0.925, 0.95, 0.975, 1.0, 1.025, 1.05, 1.075, 1.1]),
self.env.gen_bins)
def test_gen_action_array(self):
# Minus 1 because no-op action.
self.assertEqual(self.env.action_space.n - 1,
self.env.gen_action_array.shape[0])
self.assertEqual(2, self.env.gen_action_array.shape[1])
# Initialize array for comparison. Again, -1 due to no-op.
a = np.zeros(shape=(self.env.action_space.n - 1, 2), dtype=int)
# Put generator bus numbers in column 0. No need to worry about
# multiple generators at the same bus for this case.
a[:, 0] = np.array(
self.env.gen_init_data['BusNum'].tolist()
* self.num_gen_voltage_bins)
# Write a crappy, simple, loop to put the indices of the
# generator voltage levels in.
b = []
for i in range(self.num_gen_voltage_bins):
for _ in range(self.env.num_gens):
b.append(i)
a[:, 1] = np.array(b)
np.testing.assert_array_equal(a, self.env.gen_action_array)
def test_num_obs(self):
"""Ensure the number of observations matches the expected number
"""
# 14 buses + 3 * 5 gens + 3 * 11 loads
self.assertEqual(14 + 3 * 5 + 3 * 11, self.env.num_obs)
def test_observation_space(self):
"""Ensure the observation space has the appropriate properties.
"""
# Test shape.
self.assertEqual(self.env.observation_space.shape, (self.env.num_obs,))
# Test bounds. Bus voltages should have a high of 2, and the
# rest should have a high of 1.
self.assertTrue((self.env.observation_space.high[
0:self.env.num_buses] == 2.).all())
self.assertTrue((self.env.observation_space.high[
self.env.num_buses:] == 1.).all())
self.assertTrue((self.env.observation_space.low == 0.).all())
def test_observation_attributes(self):
"""After initialization, several observation related attributes
should be initialized to None.
"""
self.assertIsNone(self.env.gen_obs_data)
self.assertIsNone(self.env.load_obs_data)
self.assertIsNone(self.env.bus_obs_data)
self.assertIsNone(self.env.gen_obs_data_prev)
self.assertIsNone(self.env.load_obs_data_prev)
self.assertIsNone(self.env.bus_obs_data_prev)
def test_action_count(self):
"""After initialization, the action count should be 0."""
self.assertEqual(0, self.env.action_count)
def test_reward_matches(self):
"""For this simple initialization, the rewards should be the
same as the class constant.
"""
self.assertDictEqual(self.env.rewards, self.env.REWARDS)
def test_override_reward(self):
"""Ensure overriding a portion of the rewards behaves as
expected.
"""
# Create a new env, but use new rewards.
env = voltage_control_env.DiscreteVoltageControlEnv(
pwb_path=PWB_14, num_scenarios=10,
max_load_factor=self.max_load_factor,
min_load_factor=self.min_load_factor,
num_gen_voltage_bins=5,
rewards={'v_delta': 1000})
# Loop and assert.
for key, value in env.rewards.items():
if key == 'v_delta':
self.assertNotEqual(env.REWARDS[key], value)
else:
self.assertEqual(env.REWARDS[key], value)
# Ensure the keys are the same.
self.assertListEqual(list(env.rewards.keys()),
list(env.REWARDS.keys()))
def test_bad_reward_key(self):
"""Ensure an exception is raised if a bad reward key is given.
"""
with self.assertRaisesRegex(KeyError, 'The given rewards key, v_detl'):
_ = voltage_control_env.DiscreteVoltageControlEnv(
pwb_path=PWB_14, num_scenarios=10,
max_load_factor=self.max_load_factor,
min_load_factor=self.min_load_factor,
rewards={'v_detla': 1000})
def test_log_columns(self):
"""Ensure the log columns are as they should be."""
self.assertListEqual(
['episode', 'action_taken', 'reward']
+ [f'bus_{x+1}_v' for x in range(14)]
+ [f'gen_{x}_{y}' for x, y in zip([1, 2, 3, 6, 8], [1] * 5)],
self.env.log_columns
)
def test_log_array(self):
self.assertEqual(self.env.log_array.shape,
# 14 + 3 --> num buses plus ep, action, reward,
# and num gens.
(self.log_buffer, 14 + 3 + 5))
def test_no_op_action(self):
# Cover several data types because the action comes directly
# from a neural network, which could have different data types.
self.assertEqual(0, self.env.no_op_action)
self.assertEqual(0.0, self.env.no_op_action)
self.assertEqual(np.float64(0), self.env.no_op_action)
self.assertEqual(np.float32(0), self.env.no_op_action)
self.assertEqual(np.int(0.0), self.env.no_op_action)
def test_last_action(self):
self.assertIsNone(self.env.last_action)
def test_gen_var_lim_zero_arr(self):
"""Ensure the gen_var_lim_zero_arr is as expected."""
# The swing bus (which is listed first) and generator at bus 3
# should have 0 limits.
mask = np.ones(self.env.gen_var_lim_zero_arr.shape[0], dtype=bool)
mask[0] = False
mask[2] = False
self.assertTrue(self.env.gen_var_lim_zero_arr[~mask].all())
self.assertFalse(self.env.gen_var_lim_zero_arr[mask].any())
# noinspection DuplicatedCode
class DiscreteVoltageControlEnv14BusLimitsTestCase(unittest.TestCase):
"""Test initializing the environment with the 14 bus model with
limits added.
"""
@classmethod
def setUpClass(cls) -> None:
# Initialize the environment. Then, we'll use individual test
# methods to test various attributes, methods, etc.
# Define inputs to the constructor.
# Create a ton of scenarios so the generator dispatch is
# thoroughly exercised.
cls.num_scenarios = 100000
cls.max_load_factor = 1.44
cls.min_load_factor = 0.5
cls.min_load_pf = 0.8
cls.lead_pf_probability = 0.1
cls.load_on_probability = 0.8
cls.num_gen_voltage_bins = 9
cls.gen_voltage_range = (0.9, 1.1)
cls.seed = 18
cls.log_level = logging.INFO
cls.dtype = np.float32
cls.log_buffer = 100
cls.env = voltage_control_env.DiscreteVoltageControlEnv(
pwb_path=PWB_14_LIMITS, num_scenarios=cls.num_scenarios,
max_load_factor=cls.max_load_factor,
min_load_factor=cls.min_load_factor,
min_load_pf=cls.min_load_pf,
lead_pf_probability=cls.lead_pf_probability,
load_on_probability=cls.load_on_probability,
num_gen_voltage_bins=cls.num_gen_voltage_bins,
gen_voltage_range=cls.gen_voltage_range,
seed=cls.seed,
log_level=logging.INFO,
dtype=cls.dtype,
log_buffer=cls.log_buffer
)
# noinspection PyUnresolvedReferences
@classmethod
def tearDownClass(cls) -> None:
cls.env.close()
# noinspection PyUnresolvedReferences
def test_gens_in_bounds(self):
self.assertTrue(
(self.env.gen_mw
<= self.env.gen_init_data['GenMWMax'].to_numpy()).all()
)
self.assertTrue(
(self.env.gen_mw
>= self.env.gen_init_data['GenMWMin'].to_numpy()).all()
)
def test_gen_meets_load(self):
np.testing.assert_allclose(self.env.total_load_mw * (1 + LOSS),
self.env.gen_mw.sum(axis=1))
# noinspection DuplicatedCode
class DiscreteVoltageControlEnv14BusResetTestCase(unittest.TestCase):
"""Test the reset method of the environment."""
@classmethod
def setUpClass(cls) -> None:
# Initialize the environment. Then, we'll use individual test
# methods to test various attributes, methods, etc.
# Define inputs to the constructor.
cls.num_scenarios = 1000
cls.max_load_factor = 2
cls.min_load_factor = 0.5
cls.min_load_pf = 0.8
cls.lead_pf_probability = 0.1
cls.load_on_probability = 0.8
cls.num_gen_voltage_bins = 9
cls.gen_voltage_range = (0.9, 1.1)
cls.seed = 18
cls.log_level = logging.INFO
cls.dtype = np.float32
cls.env = voltage_control_env.DiscreteVoltageControlEnv(
pwb_path=PWB_14, num_scenarios=cls.num_scenarios,
max_load_factor=cls.max_load_factor,
min_load_factor=cls.min_load_factor,
min_load_pf=cls.min_load_pf,
lead_pf_probability=cls.lead_pf_probability,
load_on_probability=cls.load_on_probability,
num_gen_voltage_bins=cls.num_gen_voltage_bins,
gen_voltage_range=cls.gen_voltage_range,
seed=cls.seed,
log_level=logging.INFO,
dtype=cls.dtype
)
# For easy comparison with the original case, get a fresh SAW
# object. Do not make any changes to this, use only "get" type
# methods.
cls.saw = SAW(PWB_14, early_bind=True)
# Extract generator data needed for testing the reset method.
cls.gens = cls.saw.GetParametersMultipleElement(
ObjectType='gen',
ParamList=cls.env.gen_key_fields + cls.env.GEN_RESET_FIELDS)
# Extract generator data needed for testing the reset method.
cls.loads = cls.saw.GetParametersMultipleElement(
ObjectType='load',
ParamList=cls.env.load_key_fields + cls.env.LOAD_RESET_FIELDS
)
# noinspection PyUnresolvedReferences
@classmethod
def tearDownClass(cls) -> None:
cls.saw.exit()
cls.env.close()
def setUp(self) -> None:
"""Reset the scenario index for each run, and restore the
case.
"""
self.env.scenario_idx = 0
self.env.saw.LoadState()
def test_scenario_idx_increments(self):
"""Ensure subsequent calls to reset update the scenario index.
"""
# Patch the changing of parameters so that we'll get a
# a consistent incrementing of the index (no failed power flow).
with patch.object(self.env.saw,
'change_parameters_multiple_element_df'):
self.env.reset()
self.assertEqual(1, self.env.scenario_idx)
self.env.reset()
self.assertEqual(2, self.env.scenario_idx)
self.env.reset()
self.assertEqual(3, self.env.scenario_idx)
def test_action_count_reset(self):
"""Ensure subsequent calls to reset reset the action_count."""
# Patch the changing of parameters so that we'll get a
# a consistent incrementing of the index (no failed power flow).
with patch.object(self.env.saw,
'change_parameters_multiple_element_df'):
self.env.action_count = 10
self.env.reset()
self.assertEqual(0, self.env.action_count)
self.env.action_count = 17
self.env.reset()
self.assertEqual(0, self.env.action_count)
self.env.action_count = 1
self.env.reset()
self.assertEqual(0, self.env.action_count)
def test_load_state_called(self):
"""Ensure the SAW object's LoadState method is called in reset.
"""
# Patch the changing of parameters so that we'll get a
# a consistent incrementing of the index (no failed power flow).
with patch.object(self.env.saw,
'change_parameters_multiple_element_df'):
with patch.object(
self.env.saw, 'LoadState',
side_effect=self.env.saw.LoadState) as p:
self.env.reset()
p.assert_called_once()
def test_gens_and_loads_set_correctly(self):
"""Ensure that the appropriate generators get opened and closed,
and that the power levels get set correctly in the case for both
generators and loads.
"""
# There are 5 generators in the 14 bus case. In the base case,
# only gens at buses 1 and 2 are providing active power, but
# the others are "Closed" and thus regulating their voltage.
# We'll patch the environment's gen_mw to have all gens on
# and sharing the load evenly except the generator at bus 2.
# We'll also patch all gens to be regulating to 1.05 per unit.
p = LOAD_MW_14 / 4
gen_mw_row = np.array([p, 0, p, p, p])
gen_mw = self.env.gen_mw.copy()
gen_mw[0, :] = gen_mw_row
gen_v_row = np.array([1.05] * 5)
gen_v = self.env.gen_v.copy()
gen_v[0, :] = gen_v_row
# Extract the original loading, but we'll bump one load by 1 MW
# and 1 MVAR and decrement another by 1 MW and 1 MVAR.
loads_mw_row = self.loads['LoadSMW'].to_numpy()
loads_mw_row[3] += 1
loads_mw_row[4] -= 1
loads_mw = self.env.loads_mw.copy()
loads_mw[0, :] = loads_mw_row
loads_mvar_row = self.loads['LoadSMVR'].to_numpy()
loads_mvar_row[3] += 1
loads_mvar_row[4] -= 1
loads_mvar = self.env.loads_mvar.copy()
loads_mvar[0, :] = loads_mvar_row
# Patch the scenario index, generator output, and loading. Then
# reset the environment.
with patch.object(self.env, 'gen_mw', new=gen_mw):
with patch.object(self.env, 'gen_v', new=gen_v):
with patch.object(self.env, 'loads_mw', new=loads_mw):
with patch.object(self.env, 'loads_mvar', new=loads_mvar):
# Patch branches_to_open so a line does not get
# opened.
with patch.object(self.env, 'branches_to_open',
new=None):
self.env.reset()
# Pull the generator data from PowerWorld and ensure that both
# the status and output match up.
gen_reset_data = self.env.saw.GetParametersMultipleElement(
ObjectType='gen',
ParamList=self.env.gen_key_fields + self.env.GEN_RESET_FIELDS)
# All gens except for the 2nd should be closed.
status = ['Closed'] * 5
status[1] = 'Open'
self.assertListEqual(status, gen_reset_data['GenStatus'].tolist())
# Excluding the slack, generator MW output should exactly match
# what was commanded.
np.testing.assert_allclose(
gen_mw_row[1:], gen_reset_data['GenMW'].to_numpy()[1:])
# The slack should be equal to within our assumed line losses.
np.testing.assert_allclose(
gen_mw_row[0], gen_reset_data['GenMW'].to_numpy()[0],
rtol=LOSS, atol=0
)
# Generator voltage setpoints should match.
np.testing.assert_allclose(
gen_v_row, gen_reset_data['GenVoltSet'].to_numpy()
)
# Pull the load data from PowerWorld and ensure that both the
# MW and MVAR outputs match up.
load_init_data = self.env.saw.GetParametersMultipleElement(
ObjectType='load',
ParamList=self.env.load_key_fields + self.env.LOAD_RESET_FIELDS
)
np.testing.assert_allclose(
loads_mw_row, load_init_data['LoadSMW'].to_numpy()
)
np.testing.assert_allclose(
loads_mvar_row, load_init_data['LoadSMVR'].to_numpy()
)
def test_failed_power_flow(self):
"""Ensure that if the power flow fails to solve, we move on
to the next scenario.
"""
# Patch SolvePowerFlow so that the second call fails, while
# the first, third, and fourth succeed.
with patch.object(
self.env.saw, 'SolvePowerFlow',
side_effect=[None, PowerWorldError('failure'), None,
None]):
self.env.reset()
# Our first attempt should fail, and the second should succeed.
# The index is always bumped at the end of each iteration, so
# it should end up at 2 (starts at 0, bumped to 1 after first
# failed iteration, bumped to 2 after second successful
# iteration).
self.assertEqual(2, self.env.scenario_idx)
def test_hit_max_iterations(self):
"""Exception should be raised once all scenarios are exhausted.
"""
# We want every other power flow solve to fail.
side_effect = [None, PowerWorldError('failure')] * 10
with patch.object(self.env.saw, 'SolvePowerFlow',
side_effect=side_effect):
with patch.object(self.env, 'num_scenarios', new=5):
with self.assertRaisesRegex(
OutOfScenariosError,
'We have gone through all scenarios'):
self.env.reset()
def test_reset_returns_proper_observation(self):
"""Ensure a single call to reset calls _get_observation and
returns the observation.
"""
with patch.object(self.env, '_get_observation',
side_effect=self.env._get_observation) as p:
obs = self.env.reset()
# _get_observation should be called once only. Note if we get
# into a bad state where the voltages are two low, it may
# be called more than once. Bad test design due to the fact
# we can't just spin up new ESA instances for each test.
p.assert_called_once()
self.assertIsInstance(obs, np.ndarray)
self.assertEqual(obs.shape, self.env.observation_space.shape)
def test_extra_reset_actions_called(self):
with patch.object(self.env, '_set_gens_for_scenario') as p:
self.env.reset()
p.assert_called_once()
def test_set_gens_for_scenario_called(self):
with patch.object(self.env, '_set_gens_for_scenario') as p:
self.env.reset()
p.assert_called_once()
def test_set_loads_for_scenario_called(self):
with patch.object(self.env, '_set_loads_for_scenario') as p:
with patch.object(self.env, '_solve_and_observe'):
self.env.reset()
p.assert_called_once()
def test_solve_and_observe_called(self):
with patch.object(self.env, '_solve_and_observe') as p:
self.env.reset()
p.assert_called_once()
def test_current_reward_cleared(self):
self.env.current_reward = 10
self.env.reset()
self.assertTrue( | np.isnan(self.env.current_reward) | numpy.isnan |
import numpy as np
from numba import njit
import scipy as sp
import scipy.optimize as spo
from netket.stats import (
statistics as _statistics,
mean as _mean,
sum_inplace as _sum_inplace,
)
from netket.utils import (
MPI_comm as _MPI_comm,
n_nodes as _n_nodes,
node_number as _rank
)
from mpi4py import MPI
from netket.machine import QGPSLinExp
class SupervisedLearning():
def __init__(self, machine):
self.machine = machine
def mean_squared_error(self, basis, target_amplitudes, weightings):
if len(target_amplitudes) > 0:
local_error = np.sum(weightings * abs(np.exp(self.machine.log_val(basis)) - target_amplitudes)**2)
else:
local_error = 0.0
return _MPI_comm.allreduce(local_error)
def mean_squared_error_der(self, basis, target_amplitudes, weightings):
if len(target_amplitudes) > 0:
estimates = np.exp(self.machine.log_val(basis))
der_log = self.machine.der_log(basis)
residuals = (estimates - target_amplitudes).conj()*weightings
der = 2 * np.einsum("ij,i,i->j", der_log, estimates, residuals)
if self.machine.has_complex_parameters:
der = np.concatenate((der.real, (1.j*der).real))
else:
if self.machine.has_complex_parameters:
der = np.zeros(2*self.machine._npar)
else:
der = np.zeros(self.machine._npar)
return _sum_inplace(der)
def mean_squared_error_hess(self, basis, target_amplitudes, weightings):
if len(target_amplitudes) > 0:
estimates = np.exp(self.machine.log_val(basis))
der = self.machine.der_log(basis)
proper_der = (der.T * estimates)
hess_el = self.machine.hess(basis)
wfn_hess_first_term = hess_el.T * estimates
wfn_hess_sec_term = np.einsum("ij,jk->ikj", proper_der, der)
wfn_hess = wfn_hess_first_term + wfn_hess_sec_term
residuals = (estimates-target_amplitudes).conj()*weightings
hess_first_term = (np.dot(wfn_hess, residuals))
hess_sec_term = np.matmul(proper_der*weightings, proper_der.T.conj())
if self.machine.has_complex_parameters:
hess_first_term = np.block([[hess_first_term.real, (1.j*hess_first_term).real],[(1.j*hess_first_term).real,-hess_first_term.real]])
hess_sec_term = np.block([[hess_sec_term, -1.j*hess_sec_term],[1.j*hess_sec_term, hess_sec_term]])
else:
if self.machine.has_complex_parameters:
hess = np.zeros(2*self.machine._npar)
else:
hess = np.zeros(self.machine._npar)
hess = 2 * (hess_first_term + hess_sec_term)
return _sum_inplace(hess)
def overlap(self, basis, target_amplitudes, weightings):
assert(len(target_amplitudes) > 0)
predictions = np.exp(self.machine.log_val(basis))
overlap = abs(_MPI_comm.allreduce(np.sum(weightings * predictions * target_amplitudes.conj())))**2
norm = _MPI_comm.allreduce(np.sum(weightings * abs(predictions)**2)) * _MPI_comm.allreduce(np.sum(weightings * abs(target_amplitudes)**2))
return overlap/norm
def overlap_der(self, basis, target_amplitudes, weightings):
assert(len(target_amplitudes) > 0)
estimates = np.exp(self.machine.log_val(basis)).conj()
der = self.machine.der_log(basis).conj()
overlap1 = _sum_inplace(np.einsum("i,ij->j", (weightings * estimates * target_amplitudes), der))
norm1 = _MPI_comm.allreduce(np.sum(weightings * estimates * target_amplitudes))
overlap2 = _sum_inplace(np.einsum("i,ij->j", (weightings * abs(estimates)**2), der))
norm2 = _MPI_comm.allreduce(np.sum(weightings * abs(estimates)**2))
derivative = overlap1/norm1 - overlap2/norm2
overlap = self.overlap(basis, target_amplitudes, weightings)
if self.machine.has_complex_parameters:
derivative = np.concatenate((derivative.real, derivative.imag))
return overlap * derivative.real
def neg_log_overlap_der(self, basis, target_amplitudes, weightings):
assert(len(target_amplitudes) > 0)
estimates = np.exp(self.machine.log_val(basis)).conj()
der = self.machine.der_log(basis).conj()
overlap1 = _sum_inplace(np.einsum("i,ij->j", (weightings * estimates * target_amplitudes), der))
norm1 = _MPI_comm.allreduce(np.sum(weightings * estimates * target_amplitudes))
overlap2 = _sum_inplace(np.einsum("i,ij->j", (weightings * abs(estimates)**2), der))
norm2 = _MPI_comm.allreduce(np.sum(weightings * abs(estimates)**2))
derivative = -overlap1/norm1 + overlap2/norm2
if self.machine.has_complex_parameters:
derivative = np.concatenate((derivative.real, derivative.imag))
return derivative.real
def bayes_loss(self, basis, target_amplitudes, weightings, beta, alpha):
parameters = self.machine.parameters
if self.machine.has_complex_parameters:
parameters = np.concatenate((parameters.real, parameters.imag))
return beta/2 * self.mean_squared_error(basis, target_amplitudes, weightings) + 0.5 * np.sum((parameters**2) * alpha)
def grad_bayes(self, basis, target_amplitudes, weightings, beta, alpha):
parameters = self.machine.parameters
if self.machine.has_complex_parameters:
parameters = np.concatenate((parameters.real, parameters.imag))
der = beta/2 * self.mean_squared_error_der(basis, target_amplitudes, weightings)
der += parameters * alpha
return der
def hess_bayes(self, basis, target_amplitudes, weightings, beta, alpha):
parameters = self.machine.parameters
if self.machine.has_complex_parameters:
parameters = np.concatenate((parameters.real, parameters.imag))
hess = beta/2 * self.mean_squared_error_hess(basis, target_amplitudes, weightings)
hess += np.diag(alpha)
return hess
def get_bias(self, target_amplitudes, weightings=None, dtype=complex):
if len(target_amplitudes) > 0:
if weightings is None:
local_sum = np.sum(np.log(target_amplitudes))
n_terms = len(target_amplitudes)
else:
local_sum = np.sum(np.log(target_amplitudes)*weightings)
n_terms = np.sum(weightings)
else:
local_sum = 0.
n_terms = 1
return _MPI_comm.allreduce(local_sum)/_MPI_comm.allreduce(n_terms)
class QGPSLearning(SupervisedLearning):
def __init__(self, machine, init_alpha=1.0, bond_min_id=0, bond_max_id=None, complex_expand=True):
super().__init__(machine)
self.K = None
self.weights = None
self.site_prod = None
self.confs = None
self.ref_site = None
self.bond_min_id = bond_min_id
if bond_max_id is None:
self.bond_max_id = self.machine._epsilon.shape[1]
else:
self.bond_max_id = bond_max_id
self.n_bond = self.bond_max_id - self.bond_min_id
self.complex_expand = complex_expand
self.local_dim = self.machine.hilbert._local_size
if self.complex_expand and self.machine.dtype==complex:
self.alpha_mat = np.ones((self.machine._epsilon.shape[0], self.local_dim*2*self.n_bond))*init_alpha
else:
self.alpha_mat = np.ones((self.machine._epsilon.shape[0], self.local_dim*self.n_bond))*init_alpha
self.alpha_cutoff = 1.e10
self.kern_cutoff = 1.e-15
self.sinv_fallback = True
self.alpha_convergence_tol = 1.e-15
@staticmethod
@njit()
def kernel_mat_inner(site_prod, ref_site, confs, Smap, sym_spin_flip_sign, K):
K.fill(0.0)
for i in range(site_prod.shape[0]):
for x in range(site_prod.shape[1]):
for t in range(site_prod.shape[2]):
if sym_spin_flip_sign[t] * confs[i, Smap[t, ref_site]] < 0.0:
K[i, 2*x] += site_prod[i, x, t]
else:
K[i, 2*x+1] += site_prod[i, x, t]
return K
@staticmethod
@njit()
def compute_site_prod_fast(epsilon, bond_min, bond_max, ref_site, confs, Smap, sym_spin_flip_sign, site_product):
site_product.fill(1.0)
for i in range(confs.shape[0]):
for (x, w) in enumerate(range(bond_min, bond_max)):
for t in range(Smap.shape[0]):
for j in range(confs.shape[1]):
if j != ref_site:
if sym_spin_flip_sign[t] * confs[i, Smap[t,j]] < 0:
site_product[i, x, t] *= epsilon[j, w, 0]
else:
site_product[i, x, t] *= epsilon[j, w, 1]
return site_product
@staticmethod
@njit()
def update_site_prod_fast(epsilon, bond_min, bond_max, ref_site, ref_site_old, confs, Smap, sym_spin_flip_sign, site_product):
eps = np.finfo(np.double).eps
for (x, w) in enumerate(range(bond_min, bond_max)):
if abs(epsilon[ref_site, w, 0]) > 1.e4 * eps and abs(epsilon[ref_site, w, 1]) > 1.e4 * eps:
for i in range(confs.shape[0]):
for t in range(Smap.shape[0]):
if sym_spin_flip_sign[t] * confs[i, Smap[t,ref_site]] < 0:
site_product[i, x, t] /= epsilon[ref_site, w, 0]
else:
site_product[i, x, t] /= epsilon[ref_site, w, 1]
if sym_spin_flip_sign[t] * confs[i, Smap[t,ref_site_old]] < 0:
site_product[i, x, t] *= epsilon[ref_site_old, w, 0]
else:
site_product[i, x, t] *= epsilon[ref_site_old, w, 1]
else:
for i in range(confs.shape[0]):
for t in range(Smap.shape[0]):
site_product[i, x, t] = 1.0
for j in range(confs.shape[1]):
if j != ref_site:
if sym_spin_flip_sign[t] * confs[i, Smap[t,j]] < 0:
site_product[i, x, t] *= epsilon[j, w, 0]
else:
site_product[i, x, t] *= epsilon[j, w, 1]
return site_product
@staticmethod
@njit()
def kernel_mat_inner_fermion(site_prod, ref_site, confs, Smap, sym_spin_flip_sign, K):
K.fill(0.0)
for i in range(site_prod.shape[0]):
for x in range(site_prod.shape[1]):
for t in range(site_prod.shape[2]):
index = round(confs[i, Smap[t, ref_site]])
if sym_spin_flip_sign[t] < 0.0:
if index == 1:
index = 2
elif index == 2:
index = 1
K[i, 4*x + index] += site_prod[i, x, t]
return K
@staticmethod
@njit()
def compute_site_prod_fast_fermion(epsilon, bond_min, bond_max, ref_site, confs, Smap, sym_spin_flip_sign, site_product):
site_product.fill(1.0)
for i in range(confs.shape[0]):
for (x, w) in enumerate(range(bond_min, bond_max)):
for t in range(Smap.shape[0]):
for j in range(confs.shape[1]):
if j != ref_site:
index = round(confs[i, Smap[t, j]])
if sym_spin_flip_sign[t] < 0.0:
if index == 1:
index = 2
elif index == 2:
index = 1
site_product[i, x, t] *= epsilon[j, w, index]
return site_product
@staticmethod
@njit()
def update_site_prod_fast_fermion(epsilon, bond_min, bond_max, ref_site, ref_site_old, confs, Smap, sym_spin_flip_sign, site_product):
eps = np.finfo(np.double).eps
for (x, w) in enumerate(range(bond_min, bond_max)):
if np.min( | np.abs(epsilon[ref_site, w, :]) | numpy.abs |
import argparse
import fnmatch
import os
import shutil
import h5py as h5py
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sn
from sklearn.metrics import confusion_matrix
import sunrgbd
import wrgbd51
from alexnet_model import AlexNet
from basic_utils import Models, RunSteps
from densenet_model import DenseNet
from main import init_save_dirs
from resnet_models import ResNet
from vgg16_model import VGG16Net
def get_rnn_model(params):
if params.net_model == Models.AlexNet:
model_rnn = AlexNet(params)
elif params.net_model == Models.VGGNet16:
model_rnn = VGG16Net(params)
elif params.net_model == Models.ResNet50 or params.net_model == Models.ResNet101:
model_rnn = ResNet(params)
else: # params.net_model == Models.DenseNet121:
model_rnn = DenseNet(params)
return model_rnn
def calc_scores(l123_preds, test_labels, model_rnn):
model_rnn.test_labels = test_labels
avg_res, true_preds, test_size = model_rnn.calc_scores(l123_preds)
conf_mat = confusion_matrix(test_labels, l123_preds)
return avg_res, true_preds, test_size, conf_mat
def show_sunrgbd_conf_mat(conf_mat):
num_ctgs = len(conf_mat)
cm_sum = np.sum(conf_mat, axis=1, keepdims=True)
cm_perc = conf_mat / cm_sum.astype(float) * 100
columns = sunrgbd.get_class_names(range(num_ctgs))
df_cm = pd.DataFrame(cm_perc, index=columns, columns=columns)
plt.figure(figsize=(20, 15))
sn.set(font_scale=1.4) # for label size
heatmap = sn.heatmap(df_cm, annot=True, annot_kws={"size": 16}, cmap='Oranges', fmt=".1f", vmax=100) # font size
heatmap.yaxis.set_ticklabels(heatmap.yaxis.get_ticklabels(), rotation=0, ha='right', fontsize=16)
heatmap.xaxis.set_ticklabels(heatmap.xaxis.get_ticklabels(), rotation=45, ha='right', fontsize=16)
# plt.ylabel('True Label')
# plt.xlabel('Predicted Label')
plt.show()
# plt.savefig('sunrgb_confusion_matrix.eps', format='eps', dpi=1000)
def calc_scores_conf_mat(svm_path):
model_rnn = get_rnn_model(params)
l1, l2, l3 = 'layer5', 'layer6', 'layer7'
with h5py.File(svm_path, 'r') as f:
l1_conf_scores = np.asarray(f[l1])
l2_conf_scores = np.asarray(f[l2])
l3_conf_scores = np.asarray(f[l3])
test_labels = np.asarray(f['labels'])
f.close()
print('Running Layer-[{}+{}+{}] Confidence Average Fusion...'.format(l1, l2, l3))
print('SVM confidence scores of {}, {} and {} are average fused'.format(l1, l2, l3))
print('SVM confidence average fusion')
l123_avr_confidence = np.mean(np.array([l1_conf_scores, l2_conf_scores, l3_conf_scores]), axis=0)
l123_preds = np.argmax(l123_avr_confidence, axis=1)
avg_res, true_preds, test_size, conf_mat = calc_scores(l123_preds, test_labels, model_rnn)
print('Fusion result: {0:.2f}% ({1}/{2})..'.format(avg_res, true_preds, test_size))
show_sunrgbd_conf_mat(conf_mat)
def sunrgbd_combined_scores_conf_mat(rgb_svm_path, depth_svm_path):
model_rnn = get_rnn_model(params)
l1, l2, l3 = 'layer5', 'layer6', 'layer7'
with h5py.File(rgb_svm_path, 'r') as f:
rgb1_conf_scores = np.asarray(f[l1])
rgb2_conf_scores = np.asarray(f[l2])
rgb3_conf_scores = np.asarray(f[l3])
test_labels = np.asarray(f['labels'])
f.close()
with h5py.File(depth_svm_path, 'r') as f:
depth1_conf_scores = np.asarray(f[l1])
depth2_conf_scores = np.asarray(f[l2])
depth3_conf_scores = np.asarray(f[l3])
f.close()
rgb_l123_sum_confidence = np.sum(np.array([rgb1_conf_scores, rgb2_conf_scores, rgb3_conf_scores]), axis=0)
depth_l123_sum_confidence = np.sum(np.array([depth1_conf_scores, depth2_conf_scores, depth3_conf_scores]), axis=0)
print('Weighted Average SVM confidence scores of [RGB({}+{}+{})+Depth({}+{}+{})] are taken')
print('SVMs confidence weighted fusion')
w_rgb, w_depth = model_rnn.calc_modality_weights((rgb_l123_sum_confidence, depth_l123_sum_confidence))
rgbd_l123_wadd_confidence = np.add(rgb_l123_sum_confidence * w_rgb[:, np.newaxis],
depth_l123_sum_confidence * w_depth[:, np.newaxis])
l123_preds = np.argmax(rgbd_l123_wadd_confidence, axis=1)
avg_res, true_preds, test_size, conf_mat = calc_scores(l123_preds, test_labels, model_rnn)
print('Combined Weighted Confidence result: {0:.2f}% ({1}/{2})..'.format(avg_res, true_preds, test_size))
show_sunrgbd_conf_mat(conf_mat)
def sunrgbd_main(params):
root_path = '../../data/sunrgbd/'
svm_conf_paths = root_path + params.features_root + params.proceed_step + '/svm_confidence_scores/'
rgb_svm_path = svm_conf_paths + params.net_model + '_RGB_JPG.hdf5'
depth_svm_path = svm_conf_paths + params.net_model + '_Depth_Colorized_HDF5.hdf5'
if params.data_type == 'rgb':
calc_scores_conf_mat(rgb_svm_path)
elif params.data_type == 'depth':
calc_scores_conf_mat(depth_svm_path)
else:
sunrgbd_combined_scores_conf_mat(rgb_svm_path, depth_svm_path)
def individual_class_scores(total_conf_mat):
num_ctgs = len(total_conf_mat)
cm_sum = np.sum(total_conf_mat, axis=1, keepdims=True)
cm_perc = total_conf_mat / cm_sum.astype(float) * 100
indidual_scores = cm_perc.diagonal()
categories = wrgbd51.get_class_names(range(num_ctgs))
i = 0
for category, category_score in zip(categories, indidual_scores):
print(f'{category:<15} {category_score:>10.1f}')
def show_wrgbd_conf_mat(conf_mat):
num_ctgs = len(conf_mat)
cm_sum = np.sum(conf_mat, axis=1, keepdims=True)
cm_perc = conf_mat / cm_sum.astype(float) * 100
columns = wrgbd51.get_class_names(range(num_ctgs))
df_cm = pd.DataFrame(cm_perc, index=columns, columns=columns)
plt.figure(figsize=(20, 15))
sn.set(font_scale=1.4) # for label size
heatmap = sn.heatmap(df_cm, annot=True, annot_kws={"size": 10}, cmap='Oranges', fmt=".1f", vmax=100) # font size
heatmap.yaxis.set_ticklabels(heatmap.yaxis.get_ticklabels(), rotation=0, ha='right', fontsize=12)
heatmap.xaxis.set_ticklabels(heatmap.xaxis.get_ticklabels(), rotation=45, ha='right', fontsize=12)
plt.ylabel('True Label')
plt.xlabel('Predicted Label')
plt.show()
def wrgb_scores_conf_mat(params, svm_conf_paths):
model_rnn = get_rnn_model(params)
if params.data_type == 'rgb':
params.proceed_step = RunSteps.FIX_RECURSIVE_NN
data_type_ex = 'crop'
params.data_type = 'crop'
l1, l2, l3 = model_rnn.get_best_trio_layers()
params.data_type = 'rgb'
else:
params.proceed_step = RunSteps.FINE_RECURSIVE_NN
data_type_ex = 'depthcrop'
params.data_type = 'depthcrop'
l1, l2, l3 = model_rnn.get_best_trio_layers()
params.data_type = 'depths'
all_splits_scores = []
for split in range(1, 11):
conf_file = params.net_model + '_' + data_type_ex + '_split_' + str(split) + '.hdf5'
svm_conf_file_path = svm_conf_paths + conf_file
with h5py.File(svm_conf_file_path, 'r') as f:
l1_conf_scores = np.asarray(f[l1])
l2_conf_scores = np.asarray(f[l2])
l3_conf_scores = np.asarray(f[l3])
test_labels = np.asarray(f['labels'])
f.close()
# print('Running Layer-[{}+{}+{}] Confidence Average Fusion...'.format(l1, l2, l3))
# print('SVM confidence scores of {}, {} and {} are average fused'.format(l1, l2, l3))
# print('SVM confidence average fusion')
l123_avr_confidence = np.mean(np.array([l1_conf_scores, l2_conf_scores, l3_conf_scores]), axis=0)
l123_preds = np.argmax(l123_avr_confidence, axis=1)
avg_res, true_preds, test_size, conf_mat = calc_scores(l123_preds, test_labels, model_rnn)
# print('Fusion result: {0:.2f}% ({1}/{2})..'.format(avg_res, true_preds, test_size))
all_splits_scores.append((avg_res, true_preds, test_size, conf_mat))
total_avg_res = 0.0
total_true_preds = 0.0
total_test_size = 0.0
total_conf_mat = np.zeros(shape=(51, 51), dtype=float)
for avg_res, true_preds, test_size, conf_mat in all_splits_scores:
total_avg_res += avg_res
total_true_preds += true_preds
total_test_size += test_size
total_conf_mat += conf_mat
print('Average score is {0:.1f}% ({1}/{2})'.format(total_avg_res / 10, total_true_preds, total_test_size))
individual_class_scores(total_conf_mat)
# show_wrgbd_conf_mat(total_conf_mat)
def wrgbd_combined_scores_conf_mat(params, svm_conf_paths):
model_rnn = get_rnn_model(params)
params.proceed_step = RunSteps.FIX_RECURSIVE_NN
rgb_data_type_ex = 'crop'
params.data_type = 'crop'
rgb_l1, rgb_l2, rgb_l3 = model_rnn.get_best_trio_layers()
params.proceed_step = RunSteps.FINE_RECURSIVE_NN
depth_data_type_ex = 'depthcrop'
params.data_type = 'depthcrop'
depth_l1, depth_l2, depth_l3 = model_rnn.get_best_trio_layers()
params.data_type = 'rgbd'
all_splits_scores = []
for split in range(1, 11):
rgb_conf_file = params.net_model + '_' + rgb_data_type_ex + '_split_' + str(split) + '.hdf5'
rgb_svm_conf_file_path = svm_conf_paths + rgb_conf_file
with h5py.File(rgb_svm_conf_file_path, 'r') as f:
rgb1_conf_scores = np.asarray(f[rgb_l1])
rgb2_conf_scores = | np.asarray(f[rgb_l2]) | numpy.asarray |
#
# Copyright 2020 Xilinx Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for testing the pyxir TF executor"""
import unittest
import numpy as np
import pyxir as px
from pyxir.shapes import TensorShape
from pyxir.runtime import base
from pyxir.graph.layer import xlayer
from pyxir.graph.io import xlayer_io
try:
import tensorflow as tf
from pyxir.runtime.tensorflow.x_2_tf_registry import *
from pyxir.runtime.tensorflow.ops.tf_l0_input_and_other import *
from pyxir.runtime.tensorflow.ops.tf_l2_convolutions import *
except ModuleNotFoundError:
raise unittest.SkipTest("Skipping Tensorflow related test because Tensorflow is not available")
class TestTfL2Convolutions(unittest.TestCase):
def test_conv2d(self):
tf.compat.v1.reset_default_graph()
K = np.reshape(np.array([[[1, 2], [3, 4]],
[[5, 6], [7, 8]]],
dtype=np.float32),
(2, 1, 2, 2))
B = np.array([0, 0], dtype=np.float32)
X = xlayer.XLayer(
name='test_conv2d',
type=['Convolution'],
shapes=[1, 2, 3, 3],
sizes=[18],
bottoms=['input'],
tops=[],
data=xlayer.ConvData(K, B),
attrs={
'data_layout': 'NCHW',
'kernel_layout': 'OIHW',
'padding': [[0, 0], [0, 0], [0, 0], [0, 0]],
'strides': [1, 1],
'dilation': [1, 1],
'groups': 1
},
targets=[]
)
input_shapes = {
'input': TensorShape([1, 1, 4, 4])
}
inputs = {
'input': np.ones((1, 1, 4, 4), dtype=np.float32)
}
params = {
'test_conv2d_kernel': np.reshape(np.array([[[1, 2], [3, 4]],
[[5, 6], [7, 8]]],
dtype=np.float32),
(2, 1, 2, 2)),
'test_conv2d_biases': np.array([0, 0], dtype=np.float32)
}
layers = base.get_conv2d_layer(ConvLayer,
ConstantLayer)(
X, input_shapes, params)
assert(len(layers) == 3)
inputs.update(params)
for layer in layers:
# print("-----------------------")
# print("Run layer: {}".format(layer.name))
inpts = [inputs[name] for name in layer.inputs]
outpt = layer.forward_exec(inpts)
# print("Output:", outpt.shape, outpt)
inputs[layer.name] = outpt
expected_outpt = np.array([[[[10., 10., 10.],
[10., 10., 10.],
[10., 10., 10.]],
[[26., 26., 26.],
[26., 26., 26.],
[26., 26., 26.]]]])
np.testing.assert_array_equal(outpt, expected_outpt)
def test_conv2d_tfl(self):
tf.compat.v1.reset_default_graph()
K = np.transpose(np.reshape(np.array([[[1, 2], [3, 4]],
[[5, 6], [7, 8]]],
dtype=np.float32),
(2, 1, 2, 2)),
(0, 2, 3, 1))
B = np.array([0, 0], dtype=np.float32)
X = xlayer.XLayer(
name='test_conv2d_tfl',
type=['Convolution'],
shapes=[1, 3, 3, 2],
sizes=[18],
bottoms=['input'],
tops=[],
data=xlayer.ConvData(K, B),
attrs={
'data_layout': 'NHWC',
'kernel_layout': 'OHWI',
'padding': [[0, 0], [0, 0], [0, 0], [0, 0]],
'strides': [1, 1],
'dilation': [1, 1],
'groups': 1
},
targets=[]
)
input_shapes = {
'input': TensorShape([1, 4, 4, 1])
}
inputs = {
'input': np.transpose( | np.ones((1, 1, 4, 4), dtype=np.float32) | numpy.ones |
import numpy as np
import os
import sharpy.utils.cout_utils as cout
from sharpy.utils.solver_interface import solver, BaseSolver
import sharpy.utils.settings as settings
import sharpy.utils.algebra as algebra
class ForcesContainer(object):
def __init__(self):
self.ts = 0
self.t = 0.0
self.forces = []
self.coords = []
@solver
class AeroForcesCalculator(BaseSolver):
"""AeroForcesCalculator
Calculates the total aerodynamic forces on the frame of reference ``A``.
"""
solver_id = 'AeroForcesCalculator'
solver_classification = 'post-processor'
settings_types = dict()
settings_default = dict()
settings_description = dict()
settings_types['folder'] = 'str'
settings_default['folder'] = './output'
settings_description['folder'] = 'Output folder location'
settings_types['write_text_file'] = 'bool'
settings_default['write_text_file'] = False
settings_description['write_text_file'] = 'Write ``txt`` file with results'
settings_types['text_file_name'] = 'str'
settings_default['text_file_name'] = 'aeroforces.txt'
settings_description['text_file_name'] = 'Text file name'
settings_types['screen_output'] = 'bool'
settings_default['screen_output'] = True
settings_description['screen_output'] = 'Show results on screen'
settings_types['unsteady'] = 'bool'
settings_default['unsteady'] = False
settings_description['unsteady'] = 'Include unsteady contributions'
settings_default['coefficients'] = False
settings_types['coefficients'] = 'bool'
settings_description['coefficients'] = 'Calculate aerodynamic coefficients'
settings_types['q_ref'] = 'float'
settings_default['q_ref'] = 1
settings_description['q_ref'] = 'Reference dynamic pressure'
settings_types['S_ref'] = 'float'
settings_default['S_ref'] = 1
settings_description['S_ref'] = 'Reference area'
settings_table = settings.SettingsTable()
__doc__ += settings_table.generate(settings_types, settings_default, settings_description)
def __init__(self):
self.settings = None
self.data = None
self.ts_max = 0
self.ts = 0
self.folder = ''
self.caller = None
def initialise(self, data, custom_settings=None, caller=None):
self.data = data
self.settings = data.settings[self.solver_id]
if self.data.structure.settings['unsteady']:
self.ts_max = self.data.ts + 1
else:
self.ts_max = 1
self.ts_max = len(self.data.structure.timestep_info)
settings.to_custom_types(self.settings, self.settings_types, self.settings_default)
self.caller = caller
def run(self, online=False):
self.ts = 0
self.calculate_forces()
if self.settings['write_text_file']:
self.folder = (self.settings['folder'] + '/' +
self.data.settings['SHARPy']['case'] + '/' +
'forces/')
# create folder for containing files if necessary
if not os.path.exists(self.folder):
os.makedirs(self.folder)
self.folder += self.settings['text_file_name']
self.file_output()
if self.settings['screen_output']:
self.screen_output()
cout.cout_wrap('...Finished', 1)
return self.data
def calculate_forces(self):
for self.ts in range(self.ts_max):
rot = algebra.quat2rotation(self.data.structure.timestep_info[self.ts].quat)
force = self.data.aero.timestep_info[self.ts].forces
unsteady_force = self.data.aero.timestep_info[self.ts].dynamic_forces
n_surf = len(force)
for i_surf in range(n_surf):
total_steady_force = | np.zeros((3,)) | numpy.zeros |
import numpy as np
import pymc3 as pm
from sklearn.metrics import r2_score
import theano
import theano.tensor as T
from pymc3_models.exc import PyMC3ModelsError
from pymc3_models.models import BayesianModel
class LinearRegression(BayesianModel):
"""
Linear Regression built using PyMC3.
"""
def __init__(self):
super(LinearRegression, self).__init__()
def create_model(self, sd, dof):
"""
Creates and returns the PyMC3 model.
Note: The size of the shared variables must match the size of the training data.
Otherwise, setting the shared variables later will raise an error.
See http://docs.pymc.io/advanced_theano.html
Returns
-------
the PyMC3 model
"""
model_input = theano.shared( | np.zeros([self.num_training_samples, self.num_pred]) | numpy.zeros |
from PIL import Image, ImageFile
import numpy as np
import os
ImageFile.LOAD_TRUNCATED_IMAGES=True
Image.MAX_IMAGE_PIXELS=None
PIXEL_TO_RGB = {
0: [0, 0, 0], # blank
1: [255, 0, 0], # red --> bare-land
2: [0, 255, 0], # green
3: [0, 0, 255], # blue --> vegetation
4: [255, 255, 0], # yellow
5: [255, 0, 255], # pink --> building
6: [0, 255, 255], # cyan --> road
7: [127, 0, 0], # dark red
8: [0, 127, 0], # dark green
9: [0, 0, 127], # dark blue
10: [127, 127, 0], # dark yellow --> warter-body
11: [127, 0, 127], # dark pink
12: [0, 127, 127], # dark cyan
}
RGB_TO_PIXEL = {"_".join([str(v) for v in value]): key for key, value in PIXEL_TO_RGB.items()}
def gray2RGB(img):
H, W = img.shape
print(">>>>>>>>>>", img)
img_rgb = np.random.randint(0, 256, size=[H, W, 3], dtype=np.uint8)
for i in range(H):
for j in range(W):
k = img[i][j]
rgb = PIXEL_TO_RGB.get(k, [255, 255, 255])
img_rgb[i][j] = rgb
return img_rgb
def RGB2gray(img_rgb):
H, W, C = img_rgb.shape
img = np.zeros((H, W), dtype=np.uint8)
for i in range(H):
for j in range(W):
rgb = img_rgb[i][j]
rgb = "_".join([str(v) for v in rgb])
k = RGB_TO_PIXEL.get(rgb, 255)
img[i][j] = k
return img
def convertGray2RGB(img_path, dest_path=None):
print(">>> img_path={}, dest_path={}".format(img_path, dest_path))
img_data = Image.open(img_path).convert("L")
img_np = np.array(img_data)
print(">>> gray2RGB before, img_np.shape={}".format(img_np.shape))
img_np = gray2RGB(img_np)
print(">>> gray2RGB after, img_np.shape={}".format(img_np.shape))
im = Image.fromarray(img_np)
if dest_path is None:
name = os.path.splitext(img_path)[0]
ext = os.path.splitext(img_path)[1]
dest_path = "{}_rgb{}".format(name, ext)
elif os.path.isdir(dest_path):
basename = os.path.basename(img_path)
name = os.path.splitext(basename)[0]
ext = os.path.splitext(basename)[1]
dest_path = os.path.join(dest_path, "{}_rgb{}".format(name, ext))
print(">>> save to dest_path={} start...".format(dest_path))
im.save(dest_path)
print(">>> save to dest_path={} success.".format(dest_path))
def convertGray2RGB_Muti(img_path, dest_path=None, h_num=1, w_num=1):
print(">>> img_path={}, dest_path={}".format(img_path, dest_path))
img_data = Image.open(img_path).convert("L")
img_np = np.array(img_data)
from erhsh import utils as eut
processor = eut.MutiProcessor(img_np, h_num, w_num, cube_func=gray2RGB)
ret = processor.process()
if dest_path is None:
name = os.path.splitext(img_path)[0]
ext = os.path.splitext(img_path)[1]
dest_path = "{}_rgb{}".format(name, ext)
elif os.path.isdir(dest_path):
basename = os.path.basename(img_path)
name = os.path.splitext(basename)[0]
ext = os.path.splitext(basename)[1]
dest_path = os.path.join(dest_path, "{}_rgb{}".format(name, ext))
to_image = Image.new("RGB", img_np.shape[:2][::-1])
for k, v in ret.items():
h_s, w_s, _, _ = tuple([int(x) for x in k.split("_")])
to_image.paste(Image.fromarray(v), (w_s, h_s))
to_image.save(dest_path)
def convertRGB2Gray(img_path, dest_path=None):
print(">>> img_path={}, dest_path={}".format(img_path, dest_path))
img_data = Image.open(img_path)
img_rgb = | np.array(img_data) | numpy.array |
import numpy as np
from utils.disp.showphases import show_csignals # , show_signal, magnospec
def get_erps(args):
# args.ave_wind_all = np.zeros([args.channels.shape[0], args.len_uc, args.win_l + args.win_r])
# args.std_wind_all = np.zeros([args.channels.shape[0], args.len_uc, args.win_l + args.win_r])
for cnt in range(args.channels.shape[0]):
signal = args.singled_out_filtered_notched[cnt, :]
# show_signal(signal)
# magnospec(signal, args.fs)
wind_ = np.zeros([len(args.times), args.win_l + args.win_r])
for cnti, i in enumerate(args.times):
baseline = np.mean(signal[i - args.win_l: i])
wind_[cnti, :] = signal[i - args.win_l: i + args.win_r] - baseline # baseline, faster
ave_wind = np.zeros([args.len_uc, args.win_l + args.win_r])
std_wind = | np.zeros([args.len_uc, args.win_l + args.win_r]) | numpy.zeros |
#!/usr/bin/env python
# encoding: utf-8
"""
csystem.py
Created by <NAME> on 2/21/2012.
Copyright (c) NREL. All rights reserved.
"""
from __future__ import print_function
import numpy as np
class DirectionVector(object):
"""Handles rotation of direction vectors to appropriate coordinate systems.
All angles must be in degrees.
"""
def __init__(self, x, y, z, dx=None, dy=None, dz=None):
"""3-Dimensional vector that depends on direction only (not position).
Parameters
----------
x : float or ndarray
x-direction of vector(s)
y : float or ndarray
y-direction of vector(s)
z : float or ndarray
z-direction of vector(s)
"""
self.x = np.array(x)
self.y = np.array(y)
self.z = np.array(z)
if dx is None:
dx = {}
dx["dx"] = np.ones_like(self.x)
dx["dy"] = np.zeros_like(self.y)
dx["dz"] = np.zeros_like(self.z)
dy = {}
dy["dx"] = np.zeros_like(self.x)
dy["dy"] = np.ones_like(self.y)
dy["dz"] = np.zeros_like(self.z)
dz = {}
dz["dx"] = np.zeros_like(self.x)
dz["dy"] = np.zeros_like(self.y)
dz["dz"] = np.ones_like(self.z)
self.dx = dx
self.dy = dy
self.dz = dz
@classmethod
def fromArray(cls, array):
"""initialize with NumPy array
Parameters
----------
array : ndarray
construct DirectionVector using array of size 3
"""
return cls(array[0], array[1], array[2])
def toArray(self):
"""convert DirectionVector to NumPy array
Returns
-------
array : ndarray
NumPy array in order x, y, z containing DirectionVector data
"""
return np.c_[self.x, self.y, self.z]
def _rotateAboutZ(self, xstring, ystring, zstring, theta, thetaname, reverse=False):
"""
x X y = z. rotate c.s. about z, +theta
all angles in degrees
"""
thetaM = 1.0
if reverse:
thetaM = -1.0
x = getattr(self, xstring)
y = getattr(self, ystring)
z = getattr(self, zstring)
dx = getattr(self, "d" + xstring)
dy = getattr(self, "d" + ystring)
dz = getattr(self, "d" + zstring)
theta = np.radians(theta * thetaM)
c = np.cos(theta)
s = np.sin(theta)
xnew = x * c + y * s
ynew = -x * s + y * c
znew = z
angles = []
for key in dx.keys():
if not key in ["dx", "dy", "dz"]:
angles.append(key)
dxnew = {}
dxnew["dx"] = dx["dx"] * c + dy["dx"] * s
dxnew["dy"] = dx["dy"] * c + dy["dy"] * s
dxnew["dz"] = dx["dz"] * c + dy["dz"] * s
dxnew["d" + thetaname] = (-x * s + y * c) * np.radians(thetaM)
for dangle in angles:
dxnew[dangle] = dx[dangle] * c + dy[dangle] * s
dynew = {}
dynew["dx"] = -dx["dx"] * s + dy["dx"] * c
dynew["dy"] = -dx["dy"] * s + dy["dy"] * c
dynew["dz"] = -dx["dz"] * s + dy["dz"] * c
dynew["d" + thetaname] = (-x * c - y * s) * | np.radians(thetaM) | numpy.radians |
import EncoderFactory
from DatasetManager import DatasetManager
import pandas as pd
import numpy as np
from sklearn.metrics import roc_auc_score
from sklearn.pipeline import FeatureUnion
from sklearn.ensemble import RandomForestClassifier
import time
import os
import sys
from sys import argv
import pickle
def create_model(args):
cls = RandomForestClassifier(n_estimators=500, max_features=float(args['max_features']),
max_depth=args['max_depth'], random_state=22,n_jobs=-1)
cls.fit(X_train, y_train)
return cls
dataset_name = argv[1]
optimal_params_filename = argv[2]
results_dir = argv[3]
split_type = "temporal"
train_ratio = 0.8
val_ratio = 0.2
# create results directory
if not os.path.exists(os.path.join(results_dir)):
os.makedirs(os.path.join(results_dir))
print('Preparing data...')
start = time.time()
# read the data
dataset_manager = DatasetManager(dataset_name)
data = dataset_manager.read_dataset()
min_prefix_length = 1
max_prefix_length = int(np.ceil(data.groupby(dataset_manager.case_id_col).size().quantile(0.9)))
cls_encoder_args = {'case_id_col': dataset_manager.case_id_col,
'static_cat_cols': dataset_manager.static_cat_cols,
'static_num_cols': dataset_manager.static_num_cols,
'dynamic_cat_cols': dataset_manager.dynamic_cat_cols,
'dynamic_num_cols': dataset_manager.dynamic_num_cols,
'fillna': True}
print(time.time() - start)
print("split into training and test")
# split into training and test
if split_type == "temporal":
train, test = dataset_manager.split_data_strict(data, train_ratio, split=split_type)
else:
train, test = dataset_manager.split_data(data, train_ratio, split=split_type)
train, val = dataset_manager.split_val(train, val_ratio)
print(time.time() - start)
print("generate data where each prefix is a separate instance")
# generate data where each prefix is a separate instance
dt_train_prefixes = dataset_manager.generate_prefix_data(train, min_prefix_length, max_prefix_length)
dt_val_prefixes = dataset_manager.generate_prefix_data(val, min_prefix_length, max_prefix_length)
dt_test_prefixes = dataset_manager.generate_prefix_data(test, min_prefix_length, max_prefix_length)
print(time.time() - start)
print("encode all prefixes")
# encode all prefixes
feature_combiner = FeatureUnion([(method, EncoderFactory.get_encoder(method, **cls_encoder_args)) for method in ["static", "agg"]],n_jobs=-1)
arrayPrefixes = [dt_train_prefixes,dt_test_prefixes,dt_val_prefixes]
X_train = feature_combiner.fit_transform(dt_train_prefixes)
X_test = feature_combiner.fit_transform(dt_test_prefixes)
X_val = feature_combiner.fit_transform(dt_val_prefixes)
y_train = dataset_manager.get_label_numeric(dt_train_prefixes)
y_test = dataset_manager.get_label_numeric(dt_test_prefixes)
y_val = dataset_manager.get_label_numeric(dt_val_prefixes)
print(time.time() - start)
print("train the model with pre-tuned parameters")
# train the model with pre-tuned parameters
with open(optimal_params_filename, "rb") as fin:
best_params = pickle.load(fin)
print(time.time() - start)
print("get predictions for test set")
# get predictions for test set
cls = create_model(best_params)
preds_pos_label_idx = | np.where(cls.classes_ == 1) | numpy.where |
from __future__ import division
from matplotlib import pyplot as plt
import numpy as np
import math as m
import matplotlib as mlp
pgf_with_rc_fonts = {
"font.family": "serif",
"font.size": 16,
"legend.fontsize": 16,
"font.sans-serif": ["DejaVu Sans"], # use a specific sans-serif font
}
mlp.rcParams.update(pgf_with_rc_fonts)
def estimate_time(x, y):
angle = np.degrees(np.arctan2(y, x))
rot_time = np.abs(angle / velRot)
# calculate the distance
distance = np.hypot(x, y)
distance_time = distance / velWalk
total_time = distance_time + rot_time # type: np.ndarray
for d1 in range(len(x)):
for d2 in range(len(y)):
total_time[d1, d2] = 1.5 * total_time[d1, d2] * m.exp(-total_time[d1, d2] * 0.1)
if total_time[d1, d2] >= 5:
total_time[d1, d2] = 5
total_time[d1, d2] -= 5
return total_time
if __name__ == "__main__":
# Constants for robot
velRot = 60 # grad pro second
velWalk = 200 # mm pro second
size = 1000
x_val = | np.arange(-size, size, 10) | numpy.arange |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
# CODE NAME HERE
# CODE DESCRIPTION HERE
Created on 2019-11-07 at 13:28
@author: cook
"""
from astropy import constants as cc
from astropy import units as uu
import numpy as np
from scipy.optimize import curve_fit
import warnings
import os
from apero import core
from apero.core import math as mp
from apero import lang
from apero.core import constants
from apero.core.core import drs_log
from apero.core.core import drs_file
from apero.io import drs_data
# =============================================================================
# Define variables
# =============================================================================
__NAME__ = 'polar.lsd.py'
__INSTRUMENT__ = 'None'
# Get constants
Constants = constants.load(__INSTRUMENT__)
# Get version and author
__version__ = Constants['DRS_VERSION']
__author__ = Constants['AUTHORS']
__date__ = Constants['DRS_DATE']
__release__ = Constants['DRS_RELEASE']
# get param dict
ParamDict = constants.ParamDict
DrsFitsFile = drs_file.DrsFitsFile
# Get Logging function
WLOG = core.wlog
# Get function string
display_func = drs_log.display_func
# Get the text types
TextEntry = lang.drs_text.TextEntry
TextDict = lang.drs_text.TextDict
# alias pcheck
pcheck = core.pcheck
# Speed of light
# noinspection PyUnresolvedReferences
speed_of_light_ms = cc.c.to(uu.m / uu.s).value
# noinspection PyUnresolvedReferences
speed_of_light = cc.c.to(uu.km / uu.s).value
# =============================================================================
# Define user functions
# =============================================================================
def lsd_analysis_wrapper(params, pobjects, pprops, wprops, **kwargs):
"""
Function to call functions to perform Least Squares Deconvolution (LSD)
analysis on the polarimetry data.
:param params: ParamDict, parameter dictionary of constants
:param pprops: ParamDict, parameter dictionary of polar data
:param wprops: ParamDict, parameter dictionary of wavelength data
:param kwargs: additional arguments (overwrite param properties)
:return:
"""
# set function name
func_name = display_func(params, 'lsd_analysis', __NAME__)
# get parameters from params/kwargs
do_lsd = pcheck(params, 'POLAR_LSD_ANALYSIS', 'do_lsd', kwargs, func_name)
wl_lower = pcheck(params, 'POLAR_LSD_WL_LOWER', 'wl_lower', kwargs,
func_name, mapf='list', dtype=float)
wl_upper = pcheck(params, 'POLAR_LSD_WL_UPPER', 'wl_lower', kwargs,
func_name, mapf='list', dtype=float)
min_depth = pcheck(params, 'POLAR_LSD_MIN_LINEDEPTH', 'min_depth', kwargs,
func_name)
min_lande = pcheck(params, 'POLAR_LSD_MIN_LANDE', 'min_lande', kwargs,
func_name)
max_lande = pcheck(params, 'POLAR_LSD_MAX_LANDE', 'max_lande',kwargs,
func_name)
vinit = pcheck(params, 'POLAR_LSD_VINIT', 'vinit', kwargs, func_name)
vfinal = pcheck(params, 'POLAR_LSD_VFINAL', 'vfinal', kwargs, func_name)
normalize = pcheck(params, 'POLAR_LSD_NORM', 'normalize', kwargs, func_name)
nbinsize1 = pcheck(params, 'POLAR_LSD_NBIN1', 'nbinsize1', kwargs,
func_name)
noverlap1 = pcheck(params, 'POLAR_LSD_NOVERLAP1', 'noverlap1', kwargs,
func_name)
nsigclip1 = pcheck(params, 'POLAR_LSD_NSIGCLIP1', 'nsigclip1', kwargs,
func_name)
nwindow1 = pcheck(params, 'POLAR_LSD_NWINDOW1', 'nwindow1', kwargs,
func_name)
nmode1 = pcheck(params, 'POLAR_LSD_NMODE1', 'nmode1', kwargs, func_name)
nlfit1 = pcheck(params, 'POLAR_LSD_NLFIT1', 'nlfit1', kwargs, func_name)
npoints = pcheck(params, 'POLAR_LSD_NPOINTS', 'npoints', kwargs, func_name)
nbinsize2 = pcheck(params, 'POLAR_LSD_NBIN2', 'nbinsize2', kwargs,
func_name)
noverlap2 = pcheck(params, 'POLAR_LSD_NOVERLAP2', 'noverlap2', kwargs,
func_name)
nsigclip2 = pcheck(params, 'POLAR_LSD_NSIGCLIP1', 'nsigclip1', kwargs,
func_name)
nwindow2 = pcheck(params, 'POLAR_LSD_NWINDOW2', 'nwindow2', kwargs,
func_name)
nmode2 = pcheck(params, 'POLAR_LSD_NMODE2', 'nmode2', kwargs, func_name)
nlfit2 = pcheck(params, 'POLAR_LSD_NLFIT2', 'nlfit2', kwargs, func_name)
# define outputs
lprops = ParamDict()
# ----------------------------------------------------------------------
# log progress
WLOG(params, '', TextEntry('40-021-00004'))
# ----------------------------------------------------------------------
# deal with not running lsd
if not do_lsd:
oargs = [lprops, func_name, do_lsd, wl_lower, wl_upper, min_depth,
min_lande, max_lande,
vinit, vfinal, normalize, nbinsize1, noverlap1, nsigclip1,
nwindow1, nmode1, nlfit1, npoints, nbinsize2, noverlap2,
nsigclip2, nwindow2, nmode2, nlfit2]
return add_outputs(*oargs)
# ----------------------------------------------------------------------
# get lsd mask file name (if set)
lsdmask = kwargs.get('lsdmask', None)
if lsdmask is None:
lsdmask = params['INPUTS'].get('lsdmask', None)
# check that path exists
if lsdmask is not None:
# make sure path is absolute
lsdmask = os.path.abspath(lsdmask)
# check that lsd mask exists
if not os.path.exists(lsdmask):
# warn user we are not using LSD mask
# TODO: move to language DB
wmsg = 'LSD mask "{0}" does not exist - using defaults'
wargs = [lsdmask]
WLOG(params, 'warning', wmsg.format(*wargs))
# set lsdmask to None
lsdmask = None
# ----------------------------------------------------------------------
# get data from pprops
pol = pprops['POL']
polerr = pprops['POLERR']
null = pprops['NULL2']
stokesi = pprops['STOKESI']
stokesierr = pprops['STOKESIERR']
# get data from wprops
wavemap = wprops['WAVEMAP']
# get first file as reference
pobj = pobjects['A_1']
# ----------------------------------------------------------------------
# get temperature from file
temperature = pobj.infile.get_key('KW_OBJ_TEMP', dtype=float,
required=False)
# deal with no temperature
if temperature is None and lsdmask is None:
eargs = [pobj.filename, params['KW_OBJTEMP'][0], func_name]
WLOG(params, 'warning', TextEntry('09-021-00008', args=eargs))
# return outputs
oargs = [lprops, func_name, False, wl_lower, wl_upper, min_depth,
vinit, vfinal, normalize, nbinsize1, noverlap1, nsigclip1,
nwindow1, nmode1, nlfit1, npoints, nbinsize2, noverlap2,
nsigclip2, nwindow2, nmode2, nlfit2]
return add_outputs(*oargs)
# ----------------------------------------------------------------------
# load the spectral lines
# ----------------------------------------------------------------------
out = load_lsd_spectral_lines(params, temperature, wl_lower, wl_upper,
min_depth, lsdmask)
sp_filename, wavec, zn, depth, weight = out
# ----------------------------------------------------------------------
# get wavelength ranges covering spectral lines in the ccf mask
# ----------------------------------------------------------------------
fwave_lower, fwave_upper = get_wl_ranges(wavec, vinit, vfinal)
# ----------------------------------------------------------------------
# prepare polarimetry data
# ----------------------------------------------------------------------
# bunch normalisation params into nparams
nparams = dict(binsize=nbinsize1, overlap=noverlap1, sigmaclip=nsigclip1,
window=nwindow1, mode=nmode1, use_linear_fit=nlfit1)
# prepare data
out = prepare_polarimetry_data(params, wavemap, stokesi, stokesierr, pol,
polerr, null, fwave_lower, fwave_upper,
normalize, nparams)
spfile, lsd_wave, lsd_stokesi, lsd_stokesierr, lsd_pol = out[:5]
lsd_polerr, lsd_null = out[5:]
# ----------------------------------------------------------------------
# call function to perform lsd analysis
# ----------------------------------------------------------------------
# bunch normalisation params into nparams
nparams = dict(binsize=nbinsize2, overlap=noverlap2, sigmaclip=nsigclip2,
window=nwindow2, mode=nmode2, use_linear_fit=nlfit2)
# run lsd analysis
out = lsd_analysis(lsd_wave, lsd_stokesi, lsd_stokesierr, lsd_pol,
lsd_polerr, lsd_null, wavec, depth, weight, vinit,
vfinal, npoints, nparams)
# ----------------------------------------------------------------------
# push into storage
lprops['LSD_WAVE'] = lsd_wave
lprops['LSD_VELOCITIES'] = out[0]
lprops['LSD_STOKES_I'] = out[1]
lprops['LSD_STOKES_I_ERR'] = lsd_stokesierr
lprops['LSD_STOKES_I_MODEL'] = out[2]
lprops['LSD_STOKES_I_FIT_RV'] = out[3]
lprops['LSD_STOKES_FIT_RESOL'] = out[4]
lprops['LSD_POL'] = lsd_pol
lprops['LSD_POLERR'] = lsd_polerr
lprops['LSD_POL_MEAN'] = out[5]
lprops['LSD_POL_STD'] = out[6]
lprops['LSD_POL_MEDIAN'] = out[7]
lprops['LSD_POL_MED_ABS_DEV'] = out[8]
lprops['LSD_STOKES_VQU'] = out[9]
lprops['LSD_STOKES_VQU_MEAN'] = out[10]
lprops['LSD_STOKES_VQU_STD'] = out[11]
lprops['LSD_NULL'] = out[12]
lprops['LSD_NULL_MEAN'] = out[13]
lprops['LSD_NULL_STD'] = out[14]
lprops['LSD_MASK'] = spfile
# set source
keys = ['LSD_WAVE', 'LSD_VELOCITIES', 'LSD_STOKES_I', 'LSD_STOKES_I_ERR',
'LSD_STOKES_I_MODEL', 'LSD_STOKES_I_FIT_RV', 'LSD_STOKES_FIT_RESOL',
'LSD_POL', 'LSD_POLERR', 'LSD_POL_MEAN', 'LSD_POL_STD',
'LSD_POL_MEDIAN', 'LSD_POL_MED_ABS_DEV', 'LSD_STOKES_VQU',
'LSD_STOKES_VQU_MEAN', 'LSD_STOKES_VQU_STD', 'LSD_NULL',
'LSD_NULL_MEAN', 'LSD_NULL_STD', 'LSD_MASK']
lprops.set_sources(keys, func_name)
# return lsd properties
oargs = [lprops, func_name, do_lsd, wl_lower, wl_upper, min_depth,
vinit, vfinal, normalize, nbinsize1, noverlap1, nsigclip1,
nwindow1, nmode1, nlfit1, npoints, nbinsize2, noverlap2,
nsigclip2, nwindow2, nmode2, nlfit2]
return add_outputs(*oargs)
# =============================================================================
# Define worker functions
# =============================================================================
def load_lsd_spectral_lines(params, temperature, wl_lower, wl_upper,
min_depth, lsdmask=None):
"""
Function to load spectral lines data for LSD analysis.
:param p: parameter dictionary, ParamDict containing constants
Must contain at least:
LOG_OPT: string, option for logging
IC_POLAR_LSD_CCFLINES: list of strings, list of files containing
spectral lines data
IC_POLAR_LSD_WLRANGES: array of float pairs for wavelength ranges
IC_POLAR_LSD_MIN_LINEDEPTH: float, line depth threshold
:param loc: parameter dictionary, ParamDict to store data
:return loc: parameter dictionaries,
The updated parameter dictionary adds/updates the following:
sp_filename: string, selected filename with CCF lines
wavec: numpy array (1D), central wavelengths
znum: numpy array (1D), atomic number (Z)
loc['LSD_LINES_DEPTH']: numpy array (1D), line depths
loc['LSD_LINES_POL_WEIGHT']: numpy array (1D), line weights =
depth * lande * wlc
"""
# set function name
func_name = display_func(params, 'load_lsd_spectral_lines', __NAME__)
# ----------------------------------------------------------------------
# get temperature data
sp_data, sp_filename = drs_data.load_sp_mask_lsd(params, temperature,
filename=lsdmask)
# get flag for lines
flagf = np.array(sp_data['flagf'] == 1)
# get data and mask by flag
wavec = sp_data['wavec'][flagf]
znum = sp_data['znum'][flagf]
depth = sp_data['depth'][flagf]
lande = sp_data['lande'][flagf]
# ----------------------------------------------------------------------
# set up mask for wl ranges
wl_mask = np.zeros(len(wavec), dtype=bool)
# loop over spectral ranges to select only spectral lines within ranges
for it in range(len(wl_lower)):
wl_mask |= (wavec > wl_lower[it]) & (wavec < wl_upper[it])
# apply mask to data
wavec = wavec[wl_mask]
zn = znum[wl_mask]
depth = depth[wl_mask]
lande = lande[wl_mask]
# ----------------------------------------------------------------------
# PS. Below it applies a line depth mask, however the cut in line depth
# should be done according to the SNR. This will be studied and implemented
# later. <NAME>, Aug 10 2018.
# create mask to cutoff lines with lande g-factor without sensible values
gmask = (lande > min_lande) & (lande < max_lande)
# apply mask to the data
wavec = wavec[gmask]
zn = zn[gmask]
depth = depth[gmask]
lande = lande[gmask]
# create mask to cut lines with depth lower than POLAR_LSD_MIN_LINEDEPTH
dmask = np.where(depth > min_depth)
# apply mask to the data
wavec = wavec[dmask]
zn = zn[dmask]
depth = depth[dmask]
lande = lande[dmask]
# calculate weights for calculation of polarimetric Z-profile
weight = wavec * depth * lande
weight = weight / np.max(weight)
# return variables
return sp_filename, wavec, zn, depth, weight
def get_wl_ranges(wavec, vinit, vfinal):
"""
Function to generate a list of spectral ranges covering all spectral
lines in the CCF mask, where the width of each individual range is
defined by the LSD velocity vector
:param wavec: numpy array (1D), central wavelengths
:param vinit: initial velocity for LSD profile
:param vfinal: final velocity for LSD profile
:returns: the wavelength ranges tuple of lower and upper bounds
"""
# calculate the velocity difference
vdiff = vfinal - vinit
# define the spectral ranges
d_wave = wavec * vdiff / (2 * speed_of_light)
wave_lower = wavec - d_wave
wave_upper = wavec + d_wave
# merge overlapping regions
current_lower, current_upper = wave_lower[0], wave_upper[0]
# storage for outputs
final_wave_lower, final_wave_upper = [], []
# loop through limits and merge
for it in range(len(wave_lower)):
# if lower is less than current upper change the current upper value
if wave_lower[it] <= current_upper:
current_upper = wave_upper[it]
# else append to final bounds
else:
final_wave_lower.append(current_lower)
final_wave_upper.append(current_upper)
# update the current bounds
current_lower, current_upper = wave_lower[it], wave_upper[it]
# append last bounds
final_wave_lower.append(current_lower)
final_wave_upper.append(current_upper)
# return wlranges
return final_wave_lower, final_wave_upper
def prepare_polarimetry_data(params, wavemap, stokesi, stokesierr, pol, polerr,
null, fwave_lower, fwave_upper, normalize=True,
nparams=None):
"""
Function to prepare polarimetry data for LSD analysis.
:param wave: numpy array (2D), wavelength data
:param stokesi: numpy array (2D), Stokes I data
:param stokesierr: numpy array (2D), errors of Stokes I
:param pol: numpy array (2D), degree of polarization data
:param polerr: numpy array (2D), errors of degree of polarization
:param null2: numpy array (2D), 2nd null polarization
:param normalize: bool, normalize Stokes I data
:returns: updated data (wave, stokesi, stokesierr, pol, polerr, null2)
"""
# get the dimensions from wavemap
nord, nbpix = wavemap.shape
# get the wavelength mask (per order)
# TODO: Question: Why do we need this?
owltable, owlfilename = drs_data.load_order_mask(params)
owl_lower = owltable['lower']
owl_upper = owltable['upper']
# ------------------------------------------------------------------
# storage for lsd
lsd_wave, lsd_stokesi, lsd_stokesierr = [], [], []
lsd_pol, lsd_polerr, lsd_null = [], [], []
# ------------------------------------------------------------------
# loop over each order
for order_num in range(nord):
# ------------------------------------------------------------------
# mask the nan values
nanmask = np.isfinite(stokesi[order_num]) & np.isfinite(pol[order_num])
# ------------------------------------------------------------------
# mask by wavelength
wavemask = wavemap[order_num] > owl_lower[order_num]
wavemask &= wavemap[order_num] < owl_upper[order_num]
# ------------------------------------------------------------------
# combine masks
mask = nanmask & wavemask
# ------------------------------------------------------------------
# test if we still have valid elements
if np.sum(mask) == 0:
continue
# ------------------------------------------------------------------
# normalise if required
if normalize and nparams is not None:
# add x and y to nparams
nparams['x'] = wavemap[order_num][mask]
nparams['y'] = stokesi[order_num][mask]
# calculate continuum
continuum, _, _ = mp.continuum(**nparams)
# normalize stokesi
flux = stokesi[order_num][mask] / continuum
else:
flux = stokesi[order_num][mask]
# ------------------------------------------------------------------
# append to lsd storage
lsd_wave += list(wavemap[order_num][mask])
lsd_stokesi += list(flux)
lsd_stokesierr += list(stokesierr[order_num][mask])
lsd_pol += list(pol[order_num][mask])
lsd_polerr += list(polerr[order_num][mask])
lsd_null += list(null[order_num][mask])
# ----------------------------------------------------------------------
# sort by wavelength
sortmask = np.argsort(lsd_wave)
lsd_wave = np.array(lsd_wave)[sortmask]
lsd_stokesi = np.array(lsd_stokesi)[sortmask]
lsd_stokesierr = np.array(lsd_stokesierr)[sortmask]
lsd_pol = np.array(lsd_pol)[sortmask]
lsd_polerr = np.array(lsd_polerr)[sortmask]
lsd_null = np.array(lsd_null)[sortmask]
# ----------------------------------------------------------------------
# combine mask
lsdmask = np.zeros(len(lsd_wave), dtype=bool)
# loop over spectral ranges to select only spectral regions of interest
for it in range(len(fwave_lower)):
# create wavelength mask to limit wavelength range
wavemask = lsd_wave > fwave_lower[it]
wavemask &= lsd_wave < fwave_upper[it]
# add to lsdmask
lsdmask |= wavemask
# ----------------------------------------------------------------------
# apply mask to lsd data
lsd_wave = lsd_wave[lsdmask]
lsd_stokesi = lsd_stokesi[lsdmask]
lsd_stokesierr = lsd_stokesierr[lsdmask]
lsd_pol = lsd_pol[lsdmask]
lsd_polerr = lsd_polerr[lsdmask]
lsd_null = lsd_null[lsdmask]
# ----------------------------------------------------------------------
# return data
return (owlfilename, lsd_wave, lsd_stokesi, lsd_stokesierr, lsd_pol,
lsd_polerr, lsd_null)
def lsd_analysis(lsd_wave, lsd_stokesi, lsd_stokesierr, lsd_pol, lsd_polerr,
lsd_null, wavec, depths, weight, vinit, vfinal, npoints,
nparams):
# create velocity vector for output LSD profile
velocities = np.linspace(vinit, vfinal, npoints)
# ----------------------------------------------------------------------
# create line pattern matrix for flux LSD
mmf, mmp = line_pattern_matrix(lsd_wave, wavec, depths, weight, velocities)
# ----------------------------------------------------------------------
# calculate flux LSD profile
stokesi = calculate_lsd_profile(lsd_stokesi, lsd_stokesierr,
velocities, mmf, normalize=False)
# ----------------------------------------------------------------------
# fit gaussian to the measured flux LSD profile
out = fit_gauss_lsd_profile(velocities, stokesi)
stokesi_model, fit_rv, fit_resol = out
# ----------------------------------------------------------------------
# calculate polarimetry LSD profile
stokes_vqu = calculate_lsd_profile(lsd_pol, lsd_polerr, velocities, mmp,
nparams)
# ----------------------------------------------------------------------
# calculate null polarimetry LSD profile
null = calculate_lsd_profile(lsd_null, lsd_polerr, velocities, mmp,
nparams)
# ----------------------------------------------------------------------
# calculate statistical quantities
# for pol
pol_mean = mp.nanmean(lsd_pol)
pol_std = mp.nanstd(lsd_pol)
pol_median = mp.nanmedian(lsd_pol)
pol_medabsdev = mp.nanmedian(abs(lsd_pol - pol_median))
# for stokesi
stokesvqu_mean = mp.nanmean(stokes_vqu)
stokesvqu_std = mp.nanstd(stokes_vqu)
# for null
null_mean = mp.nanmean(null)
null_std = mp.nanstd(null)
# return all lsd values
return (velocities, stokesi, stokesi_model, fit_rv, fit_resol, pol_mean,
pol_std, pol_median, pol_medabsdev, stokes_vqu, stokesvqu_mean,
stokesvqu_std, null, null_mean, null_std)
def line_pattern_matrix(wl, wlc, depth, weight, vels):
"""
Function to calculate the line pattern matrix M given in Eq (4) of paper
Donati et al. (1997), MNRAS 291, 658-682
:param wl: numpy array (1D), input wavelength data (size n = spectrum size)
:param wlc: numpy array (1D), central wavelengths (size = number of lines)
:param depth: numpy array (1D), line depths (size = number of lines)
:param weight: numpy array (1D), line polar weights (size = number of lines)
:param vels: numpy array (1D), , LSD profile velocity vector (size = m)
:return mm, mmp
mm: numpy array (2D) of size n x m, line pattern matrix for flux LSD.
mmp: numpy array (2D) of size n x m, line pattern matrix for polar LSD.
"""
# set number of points and velocity (km/s) limits in LSD profile
mnum, vinit, vfinal = len(vels), vels[0], vels[-1]
# set number of spectral points
num = len(wl)
# initialize line pattern matrix for flux LSD
mmf = np.zeros((num, mnum))
# initialize line pattern matrix for polar LSD
mmp = | np.zeros((num, mnum)) | numpy.zeros |
from keras.utils import Sequence
import pandas as pd
import numpy as np
import random
import math
import pysam
from dragonn.utils import ltrdict
import threading
def dinuc_shuffle(seq):
#get list of dinucleotides
nucs=[]
for i in range(0,len(seq),2):
nucs.append(seq[i:i+2])
#generate a random permutation
random.shuffle(nucs)
return ''.join(nucs)
def revcomp(seq):
seq=seq[::-1].upper()
comp_dict=dict()
comp_dict['A']='T'
comp_dict['T']='A'
comp_dict['C']='G'
comp_dict['G']='C'
rc=[]
for base in seq:
if base in comp_dict:
rc.append(comp_dict[base])
else:
rc.append(base)
return ''.join(rc)
def open_data_file(data_path,tasks,num_to_read=None):
if data_path.endswith('.hdf5'):
if (tasks is None) and (num_to_read is not None):
data=pd.read_hdf(data_path,start=0,stop=num_to_read)
elif (tasks is not None) and (num_to_read is None):
data=pd.read_hdf(data_path,columns=tasks)
elif (tasks is None) and (num_to_read is None):
data=pd.read_hdf(data_path)
else:
data=pd.read_hdf(data_path,columns=tasks,start=0,stop=num_to_read)
else:
#treat as bed file
if (tasks is None) and (num_to_read is not None):
data=pd.read_csv(data_path,header=0,sep='\t',index_col=[0,1,2],start=0,stop=num_to_read)
elif (tasks is None) and (num_to_read is None):
data=pd.read_csv(data_path,header=0,sep='\t',index_col=[0,1,2])
else:
data=pd.read_csv(data_path,header=0,sep='\t',nrows=1)
chrom_col=data.columns[0]
start_col=data.columns[1]
end_col=data.columns[2]
if num_to_read is None:
data=pd.read_csv(data_path,header=0,sep='\t',usecols=[chrom_col,start_col,end_col]+tasks,index_col=[0,1,2])
else:
data=pd.read_csv(data_path,header=0,sep='\t',usecols=[chrom_col,start_col,end_col]+tasks,index_col=[0,1,2],start=0,stop=num_to_read)
return data
#use wrappers for keras Sequence generator class to allow batch shuffling upon epoch end
class DataGenerator(Sequence):
def __init__(self,data_path,ref_fasta,batch_size=128,add_revcomp=True,tasks=None,shuffled_ref_negatives=False,upsample=True,upsample_ratio=0.1,upsample_thresh=0.5,num_to_read=None):
self.lock = threading.Lock()
self.batch_size=batch_size
#decide if reverse complement should be used
self.add_revcomp=add_revcomp
if add_revcomp==True:
self.batch_size=int(batch_size/2)
#determine whether negative set should consist of the shuffled refs.
# If so, split batch size in 2, as each batch will be augmented with shuffled ref negatives
# in ratio equal to positives
self.shuffled_ref_negatives=shuffled_ref_negatives
if self.shuffled_ref_negatives==True:
self.batch_size=int(self.batch_size/2)
#open the reference file
self.ref_fasta=ref_fasta
self.data=open_data_file(data_path,tasks,num_to_read)
self.indices=np.arange(self.data.shape[0])
num_indices=self.indices.shape[0]
self.add_revcomp=add_revcomp
#set variables needed for upsampling the positives
self.upsample=upsample
if self.upsample==True:
self.upsample_ratio=upsample_ratio
self.upsample_thresh=upsample_thresh
self.ones = self.data.loc[(self.data >= self.upsample_thresh).any(axis=1)]
self.zeros = self.data.loc[(self.data < self.upsample_thresh).all(axis=1)]
self.pos_batch_size = int(self.batch_size * self.upsample_ratio)
self.neg_batch_size = self.batch_size - self.pos_batch_size
self.pos_indices=np.arange(self.ones.shape[0])
self.neg_indices=np.arange(self.zeros.shape[0])
#wrap the positive and negative indices to reach size of self.indices
num_pos_wraps=math.ceil(num_indices/self.pos_indices.shape[0])
num_neg_wraps=math.ceil(num_indices/self.neg_indices.shape[0])
self.pos_indices=np.repeat(self.pos_indices,num_pos_wraps)[0:num_indices]
np.random.shuffle(self.pos_indices)
self.neg_indices=np.repeat(self.neg_indices,num_neg_wraps)[0:num_indices]
np.random.shuffle(self.neg_indices)
def __len__(self):
return math.ceil(self.data.shape[0]/self.batch_size)
def __getitem__(self,idx):
with self.lock:
self.ref=pysam.FastaFile(self.ref_fasta)
if self.shuffled_ref_negatives==True:
return self.get_shuffled_ref_negatives_batch(idx)
elif self.upsample==True:
return self.get_upsampled_positives_batch(idx)
else:
return self.get_basic_batch(idx)
def get_shuffled_ref_negatives_batch(self,idx):
#get seq positions
inds=self.indices[idx*self.batch_size:(idx+1)*self.batch_size]
bed_entries=self.data.index[inds]
#get sequences
seqs=[self.ref.fetch(i[0],i[1],i[2]) for i in bed_entries]
if self.add_revcomp==True:
#add in the reverse-complemented sequences for training.
seqs_rc=[revcomp(s) for s in seqs]
seqs=seqs+seqs_rc
#generate the corresponding negative set by dinucleotide-shuffling the sequences
seqs_shuffled=[dinuc_shuffle(s) for s in seqs]
seqs=seqs+seqs_shuffled
#one-hot-encode the fasta sequences
seqs=np.array([[ltrdict.get(x,[0,0,0,0]) for x in seq] for seq in seqs])
x_batch=np.expand_dims(seqs,1)
y_batch=np.asarray(self.data.iloc[inds])
if self.add_revcomp==True:
y_batch=np.concatenate((y_batch,y_batch),axis=0)
y_shape=y_batch.shape
y_batch=np.concatenate((y_batch,np.zeros(y_shape)))
return (x_batch,y_batch)
def get_upsampled_positives_batch(self,idx):
#get seq positions
pos_inds=self.pos_indices[idx*self.pos_batch_size:(idx+1)*self.pos_batch_size]
pos_bed_entries=self.ones.index[pos_inds]
neg_inds=self.neg_indices[idx*self.neg_batch_size:(idx+1)*self.neg_batch_size]
neg_bed_entries=self.zeros.index[neg_inds]
#print(neg_inds[0:10])
#bed_entries=pos_bed_entries+neg_bed_entries
#get sequences
pos_seqs=[self.ref.fetch(i[0],i[1],i[2]) for i in pos_bed_entries]
neg_seqs=[self.ref.fetch(i[0],i[1],i[2]) for i in neg_bed_entries]
seqs=pos_seqs+neg_seqs
if self.add_revcomp==True:
#add in the reverse-complemented sequences for training.
seqs_rc=[revcomp(s) for s in seqs]
seqs=seqs+seqs_rc
#one-hot-encode the fasta sequences
seqs=np.array([[ltrdict.get(x,[0,0,0,0]) for x in seq] for seq in seqs])
x_batch=np.expand_dims(seqs,1)
#extract the positive and negative labels at the current batch of indices
y_batch_pos=self.ones.iloc[pos_inds]
y_batch_neg=self.zeros.iloc[neg_inds]
y_batch=np.concatenate((y_batch_pos,y_batch_neg),axis=0)
#add in the labels for the reverse complement sequences, if used
if self.add_revcomp==True:
y_batch=np.concatenate((y_batch,y_batch),axis=0)
return (x_batch,y_batch)
def get_basic_batch(self,idx):
#get seq positions
inds=self.indices[idx*self.batch_size:(idx+1)*self.batch_size]
bed_entries=self.data.index[inds]
#get sequences
seqs=[self.ref.fetch(i[0],i[1],i[2]) for i in bed_entries]
if self.add_revcomp==True:
#add in the reverse-complemented sequences for training.
seqs_rc=[revcomp(s) for s in seqs]
seqs=seqs+seqs_rc
#one-hot-encode the fasta sequences
seqs=np.array([[ltrdict.get(x,[0,0,0,0]) for x in seq] for seq in seqs])
x_batch=np.expand_dims(seqs,1)
#extract the labels at the current batch of indices
y_batch=np.asarray(self.data.iloc[inds])
#add in the labels for the reverse complement sequences, if used
if self.add_revcomp==True:
y_batch=np.concatenate((y_batch,y_batch),axis=0)
return (x_batch,y_batch)
def on_epoch_end(self):
#if upsampling is being used, shuffle the positive and negative indices
if self.upsample==True:
np.random.shuffle(self.pos_indices)
np.random.shuffle(self.neg_indices)
else:
| np.random.shuffle(self.indices) | numpy.random.shuffle |
# -*- coding: utf-8 -*-
#!/usr/bin/env python
from data_iterator import *
from state import *
from dialog_encdec import *
from utils import *
import time
import traceback
import sys
import argparse
import cPickle
import logging
import search
import pprint
import numpy
import collections
import signal
import math
import gc
import os
import os.path
# For certain clusters (e.g. Guillumin) we use flag 'DUMP_EXPERIMENT_LOGS_TO_DISC'
# to force dumping log outputs to file.
if 'DUMP_EXPERIMENT_LOGS_TO_DISC' in os.environ:
if os.environ['DUMP_EXPERIMENT_LOGS_TO_DISC'] == '1':
sys.stdout = open('Exp_Out.txt', 'a')
sys.stderr = open('Exp_Err.txt', 'a')
from os import listdir
from os.path import isfile, join
import matplotlib
matplotlib.use('Agg')
import pylab
class Unbuffered:
def __init__(self, stream):
self.stream = stream
def write(self, data):
self.stream.write(data)
self.stream.flush()
def __getattr__(self, attr):
return getattr(self.stream, attr)
sys.stdout = Unbuffered(sys.stdout)
logger = logging.getLogger(__name__)
### Unique RUN_ID for this execution
RUN_ID = str(time.time())
### Additional measures can be set here
measures = ["train_cost", "train_misclass", "train_kl_divergence_cost", "train_posterior_gaussian_mean_variance", "valid_cost", "valid_misclass", "valid_posterior_gaussian_mean_variance", "valid_kl_divergence_cost", "valid_emi"]
def init_timings():
timings = {}
for m in measures:
timings[m] = []
return timings
def save(model, timings, train_iterator, post_fix = ''):
print("Saving the model...")
# ignore keyboard interrupt while saving
start = time.time()
s = signal.signal(signal.SIGINT, signal.SIG_IGN)
model.state['train_iterator_offset'] = train_iterator.get_offset() + 1
model.state['train_iterator_reshuffle_count'] = train_iterator.get_reshuffle_count()
model.save(model.state['save_dir'] + '/' + model.state['run_id'] + "_" + model.state['prefix'] + post_fix + 'model.npz')
cPickle.dump(model.state, open(model.state['save_dir'] + '/' + model.state['run_id'] + "_" + model.state['prefix'] + post_fix + 'state.pkl', 'w'))
numpy.savez(model.state['save_dir'] + '/' + model.state['run_id'] + "_" + model.state['prefix'] + post_fix + 'timing.npz', **timings)
signal.signal(signal.SIGINT, s)
print("Model saved, took {}".format(time.time() - start))
def load(model, filename, parameter_strings_to_ignore):
print("Loading the model...")
# ignore keyboard interrupt while saving
start = time.time()
s = signal.signal(signal.SIGINT, signal.SIG_IGN)
model.load(filename, parameter_strings_to_ignore)
signal.signal(signal.SIGINT, s)
print("Model loaded, took {}".format(time.time() - start))
def main(args):
logging.basicConfig(level = logging.DEBUG,
format = "%(asctime)s: %(name)s: %(levelname)s: %(message)s")
state = eval(args.prototype)()
timings = init_timings()
auto_restarting = False
if args.auto_restart:
assert not args.save_every_valid_iteration
assert len(args.resume) == 0
directory = state['save_dir']
if not directory[-1] == '/':
directory = directory + '/'
auto_resume_postfix = state['prefix'] + '_auto_model.npz'
if os.path.exists(directory):
directory_files = [f for f in listdir(directory) if isfile(join(directory, f))]
resume_filename = ''
for f in directory_files:
if len(f) > len(auto_resume_postfix):
if f[len(f) - len(auto_resume_postfix):len(f)] == auto_resume_postfix:
if len(resume_filename) > 0:
print('ERROR: FOUND MULTIPLE MODELS IN DIRECTORY:', directory)
assert False
else:
resume_filename = directory + f[0:len(f)-len('__auto_model.npz')]
if len(resume_filename) > 0:
logger.debug("Found model to automatically resume: %s" % resume_filename)
auto_restarting = True
# Setup training to automatically resume training with the model found
args.resume = resume_filename + '__auto'
# Disable training from reinitialization any parameters
args.reinitialize_decoder_parameters = False
args.reinitialize_latent_variable_parameters = False
else:
logger.debug("Could not find any model to automatically resume...")
if args.resume != "":
logger.debug("Resuming %s" % args.resume)
state_file = args.resume + '_state.pkl'
timings_file = args.resume + '_timing.npz'
if os.path.isfile(state_file) and os.path.isfile(timings_file):
logger.debug("Loading previous state")
state = cPickle.load(open(state_file, 'r'))
timings = dict(numpy.load(open(timings_file, 'r')))
for x, y in timings.items():
timings[x] = list(y)
# Increment seed to make sure we get newly shuffled batches when training on large datasets
state['seed'] = state['seed']
else:
raise Exception("Cannot resume, cannot find files!")
logger.debug("State:\n{}".format(pprint.pformat(state)))
logger.debug("Timings:\n{}".format(pprint.pformat(timings)))
if args.force_train_all_wordemb == True:
state['fix_pretrained_word_embeddings'] = False
model = DialogEncoderDecoder(state)
rng = model.rng
valid_rounds = 0
save_model_on_first_valid = False
if args.resume != "":
filename = args.resume + '_model.npz'
if os.path.isfile(filename):
logger.debug("Loading previous model")
parameter_strings_to_ignore = []
if args.reinitialize_decoder_parameters:
parameter_strings_to_ignore += ['Wd_']
parameter_strings_to_ignore += ['bd_']
save_model_on_first_valid = True
if args.reinitialize_latent_variable_parameters:
parameter_strings_to_ignore += ['latent_utterance_prior']
parameter_strings_to_ignore += ['latent_utterance_approx_posterior']
parameter_strings_to_ignore += ['kl_divergence_cost_weight']
parameter_strings_to_ignore += ['latent_dcgm_encoder']
save_model_on_first_valid = True
load(model, filename, parameter_strings_to_ignore)
else:
raise Exception("Cannot resume, cannot find model file!")
if 'run_id' not in model.state:
raise Exception('Backward compatibility not ensured! (need run_id in state)')
else:
# assign new run_id key
model.state['run_id'] = RUN_ID
logger.debug("Compile trainer")
if not state["use_nce"]:
if ('add_latent_gaussian_per_utterance' in state) and (state["add_latent_gaussian_per_utterance"]):
logger.debug("Training using variational lower bound on log-likelihood")
else:
logger.debug("Training using exact log-likelihood")
train_batch = model.build_train_function()
else:
logger.debug("Training with noise contrastive estimation")
train_batch = model.build_nce_function()
eval_batch = model.build_eval_function()
gamma_bounding = model.build_gamma_bounding_function()
random_sampler = search.RandomSampler(model)
beam_sampler = search.BeamSampler(model)
logger.debug("Load data")
train_data, \
valid_data, = get_train_iterator(state)
train_data.start()
# Start looping through the dataset
step = 0
patience = state['patience']
start_time = time.time()
train_cost = 0
train_kl_divergence_cost = 0
train_posterior_gaussian_mean_variance = 0
train_misclass = 0
train_done = 0
train_dialogues_done = 0.0
prev_train_cost = 0
prev_train_done = 0
ex_done = 0
is_end_of_batch = True
start_validation = False
batch = None
while (step < state['loop_iters'] and
(time.time() - start_time)/60. < state['time_stop'] and
patience >= 0):
# Flush to log files
sys.stderr.flush()
sys.stdout.flush()
### Sampling phase
if step % 200 == 0:
# First generate stochastic samples
for param in model.params:
print("%s = %.4f" % (param.name, numpy.sum(param.get_value() ** 2) ** 0.5))
samples, costs = random_sampler.sample([[]], n_samples=1, n_turns=3)
print("Sampled : {}".format(samples[0]))
### Training phase
batch = train_data.next()
# Train finished
if not batch:
# Restart training
logger.debug("Got None...")
break
logger.debug("[TRAIN] - Got batch %d,%d" % (batch['x'].shape[1], batch['max_length']))
x_data = batch['x']
x_data_reversed = batch['x_reversed']
max_length = batch['max_length']
x_cost_mask = batch['x_mask']
x_reset = batch['x_reset']
ran_gaussian_const_utterance = batch['ran_var_gaussian_constutterance']
ran_uniform_const_utterance = batch['ran_var_uniform_constutterance']
ran_decoder_drop_mask = batch['ran_decoder_drop_mask']
is_end_of_batch = False
if numpy.sum(numpy.abs(x_reset)) < 1:
# Print when we reach the end of an example (e.g. the end of a dialogue or a document)
# Knowing when the training procedure reaches the end is useful for diagnosing training problems
# print('END-OF-BATCH EXAMPLE!')
is_end_of_batch = True
if state['use_nce']:
y_neg = rng.choice(size=(10, max_length, x_data.shape[1]), a=model.idim, p=model.noise_probs).astype('int32')
c, kl_divergence_cost, posterior_gaussian_mean_variance = train_batch(x_data, x_data_reversed, y_neg, max_length, x_cost_mask, x_reset, ran_gaussian_const_utterance, ran_uniform_const_utterance, ran_decoder_drop_mask)
else:
latent_piecewise_utterance_variable_approx_posterior_alpha = 0.0
latent_piecewise_utterance_variable_prior_alpha = 0.0
kl_divergences_between_piecewise_prior_and_posterior = 0.0
kl_divergences_between_gaussian_prior_and_posterior = 0.0
latent_piecewise_posterior_sample = 0.0
posterior_gaussian_mean_variance = 0.0
if model.add_latent_piecewise_per_utterance and model.add_latent_gaussian_per_utterance:
c, kl_divergence_cost, posterior_gaussian_mean_variance, latent_piecewise_utterance_variable_approx_posterior_alpha, latent_piecewise_utterance_variable_prior_alpha, kl_divergences_between_piecewise_prior_and_posterior, kl_divergences_between_gaussian_prior_and_posterior, latent_piecewise_posterior_sample = train_batch(x_data, x_data_reversed, max_length, x_cost_mask, x_reset, ran_gaussian_const_utterance, ran_uniform_const_utterance, ran_decoder_drop_mask)
elif model.add_latent_gaussian_per_utterance:
c, kl_divergence_cost, posterior_gaussian_mean_variance, kl_divergences_between_gaussian_prior_and_posterior = train_batch(x_data, x_data_reversed, max_length, x_cost_mask, x_reset, ran_gaussian_const_utterance, ran_uniform_const_utterance, ran_decoder_drop_mask)
elif model.add_latent_piecewise_per_utterance:
c, kl_divergence_cost, kl_divergences_between_piecewise_prior_and_posterior = train_batch(x_data, x_data_reversed, max_length, x_cost_mask, x_reset, ran_gaussian_const_utterance, ran_uniform_const_utterance, ran_decoder_drop_mask)
else:
c = train_batch(x_data, x_data_reversed, max_length, x_cost_mask, x_reset, ran_gaussian_const_utterance, ran_uniform_const_utterance, ran_decoder_drop_mask)
kl_divergence_cost = 0.0
gamma_bounding()
# Print batch statistics
print('cost_sum', c)
print('cost_mean', c / float(numpy.sum(x_cost_mask)))
if model.add_latent_piecewise_per_utterance or model.add_latent_gaussian_per_utterance:
print('kl_divergence_cost_sum', kl_divergence_cost)
print('kl_divergence_cost_mean', kl_divergence_cost / float(len(numpy.where(x_data == model.eos_sym)[0])))
if model.add_latent_gaussian_per_utterance:
print('posterior_gaussian_mean_variance', posterior_gaussian_mean_variance)
print('kl_divergences_between_gaussian_prior_and_posterior', numpy.sum(kl_divergences_between_gaussian_prior_and_posterior), numpy.min(kl_divergences_between_gaussian_prior_and_posterior), numpy.max(kl_divergences_between_gaussian_prior_and_posterior))
if model.add_latent_piecewise_per_utterance:
print('kl_divergences_between_piecewise_prior_and_posterior', numpy.sum(kl_divergences_between_piecewise_prior_and_posterior), numpy.min(kl_divergences_between_piecewise_prior_and_posterior), numpy.max(kl_divergences_between_piecewise_prior_and_posterior))
if numpy.isinf(c) or numpy.isnan(c):
logger.warn("Got NaN cost .. skipping")
gc.collect()
continue
train_cost += c
train_kl_divergence_cost += kl_divergence_cost
train_posterior_gaussian_mean_variance += posterior_gaussian_mean_variance
train_done += batch['num_preds']
train_dialogues_done += batch['num_dialogues']
this_time = time.time()
if step % state['train_freq'] == 0:
elapsed = this_time - start_time
# Keep track of training cost for the last 'train_freq' batches.
current_train_cost = train_cost/train_done
if prev_train_done >= 1 and abs(train_done - prev_train_done) > 0:
current_train_cost = float(train_cost - prev_train_cost)/float(train_done - prev_train_done)
if numpy.isinf(c) or numpy.isnan(c):
current_train_cost = 0
prev_train_cost = train_cost
prev_train_done = train_done
h, m, s = ConvertTimedelta(this_time - start_time)
# We need to catch exceptions due to high numbers in exp
try:
print(".. %.2d:%.2d:%.2d %4d mb # %d bs %d maxl %d acc_cost = %.4f acc_word_perplexity = %.4f cur_cost = %.4f cur_word_perplexity = %.4f acc_mean_word_error = %.4f acc_mean_kl_divergence_cost = %.8f acc_mean_posterior_variance = %.8f" % (h, m, s,\
state['time_stop'] - (time.time() - start_time)/60.,\
step, \
batch['x'].shape[1], \
batch['max_length'], \
float(train_cost/train_done), \
math.exp(float(train_cost/train_done)), \
current_train_cost, \
math.exp(current_train_cost), \
float(train_misclass)/float(train_done), \
float(train_kl_divergence_cost/train_done), \
float(train_posterior_gaussian_mean_variance/train_dialogues_done)))
except:
pass
### Inspection phase
if (step % 20 == 0):
if model.add_latent_gaussian_per_utterance and model.add_latent_piecewise_per_utterance:
try:
print('posterior_gaussian_mean_combination', model.posterior_mean_combination.W.get_value())
except:
pass
print('latent_piecewise_utterance_variable_approx_posterior_alpha', numpy.mean(latent_piecewise_utterance_variable_approx_posterior_alpha), latent_piecewise_utterance_variable_approx_posterior_alpha)
print('latent_piecewise_utterance_variable_prior_alpha', numpy.mean(latent_piecewise_utterance_variable_prior_alpha), latent_piecewise_utterance_variable_prior_alpha)
print('latent_piecewise_utterance_variable_alpha_diff', (latent_piecewise_utterance_variable_approx_posterior_alpha-latent_piecewise_utterance_variable_prior_alpha))
print('latent_piecewise_posterior_sample', numpy.min(latent_piecewise_posterior_sample), numpy.max(latent_piecewise_posterior_sample), latent_piecewise_posterior_sample[0, 0, :])
print('ran_uniform_const_utterance', numpy.min(ran_uniform_const_utterance), numpy.max(ran_uniform_const_utterance), ran_uniform_const_utterance[0, 0, :])
if model.utterance_decoder_gating.upper() == 'GRU' and model.decoder_bias_type.upper() == 'ALL':
Wd_s_q = model.utterance_decoder.Wd_s_q.get_value()
Wd_s_q_len = Wd_s_q.shape[0]
print('model.utterance_decoder Wd_s_q full', numpy.mean(numpy.abs(Wd_s_q)), numpy.mean(Wd_s_q**2))
if model.add_latent_gaussian_per_utterance and model.add_latent_piecewise_per_utterance:
Wd_s_q_gaussian = Wd_s_q[Wd_s_q_len-2*model.latent_piecewise_per_utterance_dim:Wd_s_q_len-model.latent_piecewise_per_utterance_dim, :]
Wd_s_q_piecewise = Wd_s_q[Wd_s_q_len-model.latent_piecewise_per_utterance_dim:Wd_s_q_len, :]
print('model.utterance_decoder Wd_s_q gaussian', numpy.mean(numpy.abs(Wd_s_q_gaussian)), numpy.mean(Wd_s_q_gaussian**2))
print('model.utterance_decoder Wd_s_q piecewise', numpy.mean(numpy.abs(Wd_s_q_piecewise)), numpy.mean(Wd_s_q_piecewise**2))
print('model.utterance_decoder Wd_s_q piecewise/gaussian', numpy.mean(numpy.abs(Wd_s_q_piecewise))/numpy.mean(numpy.abs(Wd_s_q_gaussian)), numpy.mean(Wd_s_q_piecewise**2)/numpy.mean(Wd_s_q_gaussian**2))
elif model.add_latent_gaussian_per_utterance:
Wd_s_q_piecewise = Wd_s_q[Wd_s_q_len-model.latent_piecewise_per_utterance_dim:Wd_s_q_len, :]
print('model.utterance_decoder Wd_s_q piecewise', numpy.mean(numpy.abs(Wd_s_q_piecewise)), numpy.mean(Wd_s_q_piecewise**2))
elif model.add_latent_piecewise_per_utterance:
Wd_s_q_gaussian = Wd_s_q[Wd_s_q_len-model.latent_piecewise_per_utterance_dim:Wd_s_q_len, :]
print('model.utterance_decoder Wd_s_q gaussian', numpy.mean(numpy.abs(Wd_s_q_gaussian)), numpy.mean(Wd_s_q_gaussian**2))
if model.utterance_decoder_gating.upper() == 'BOW' and model.decoder_bias_type.upper() == 'ALL':
Wd_bow_W_in = model.utterance_decoder.Wd_bow_W_in.get_value()
Wd_bow_W_in_len = Wd_bow_W_in.shape[0]
print('model.utterance_decoder Wd_bow_W_in full', numpy.mean(numpy.abs(Wd_bow_W_in)), numpy.mean(Wd_bow_W_in**2))
if model.add_latent_gaussian_per_utterance and model.add_latent_piecewise_per_utterance:
Wd_bow_W_in_gaussian = Wd_bow_W_in[Wd_bow_W_in_len-2*model.latent_piecewise_per_utterance_dim:Wd_bow_W_in_len-model.latent_piecewise_per_utterance_dim, :]
Wd_bow_W_in_piecewise = Wd_bow_W_in[Wd_bow_W_in_len-model.latent_piecewise_per_utterance_dim:Wd_bow_W_in_len, :]
print('model.utterance_decoder Wd_bow_W_in gaussian', numpy.mean(numpy.abs(Wd_bow_W_in_gaussian)), numpy.mean(Wd_bow_W_in_gaussian**2))
print('model.utterance_decoder Wd_bow_W_in piecewise', numpy.mean(numpy.abs(Wd_bow_W_in_piecewise)), numpy.mean(Wd_bow_W_in_piecewise**2))
print('model.utterance_decoder Wd_bow_W_in piecewise/gaussian', numpy.mean(numpy.abs(Wd_bow_W_in_piecewise))/numpy.mean(numpy.abs(Wd_bow_W_in_gaussian)), numpy.mean(Wd_bow_W_in_piecewise**2)/numpy.mean(Wd_bow_W_in_gaussian**2))
elif model.add_latent_gaussian_per_utterance:
Wd_bow_W_in_piecewise = Wd_bow_W_in[Wd_bow_W_in_len-model.latent_piecewise_per_utterance_dim:Wd_bow_W_in_len, :]
print('model.utterance_decoder Wd_bow_W_in piecewise', numpy.mean( | numpy.abs(Wd_bow_W_in_piecewise) | numpy.abs |
#this programm simulates the flight of multistage rocket from lift-off to orbit insertion
#input parameters are:
#the empty mass of the rocket
#the fuel mass of the rocket
#the engine expansion ratio, exit area, chamber pressure, propellant molecular mass
#the mass flow of the rocket engine
#the drag coefficient of the rocket
#the drag surface of the rocket
#it is suggested that the input variables are taken out from a file that will be loaded in the beginning
#the effects of wind and turbulences are neglected, as well as the rounded character of the earth and celestial effects
#the result of the simulation are the graphs:
#y-position vs x-position
#y-position vs time
#velocity vs time
#acceleration vs time
#dynamic pressure vs time
#Mach number vs time
#import all necessary funtions
from math import sqrt, atan2, sin, cos, pi
import xlrd
import numpy as np
import matplotlib.pyplot as plt
import modules.aerodynamics as aero
import modules.thrust as th
import modules.controls as ctl
import modules.fdm as fdm
import modules.guidance as guid
from modules.isatmos import press
class vehicle:
def __init__(self,fm,epsilon,Tc,Mw,kappa,At,pc,pratio,
cd,S,nengines):
self.fm = fm
self.epsilon = epsilon
self.Tc = Tc
self.Mw = Mw
self.kappa = kappa
self.At = At
self.pc = pc
self.pratio = pratio
self.cd = cd
self.S = S
self.nengines = nengines
#read out data source
#filename=raw_input("Please enter the data source file name.")
filename="launcher_data/Saturn5.xls"
table = xlrd.open_workbook(filename)
sheet = table.sheet_by_name('Sheet')
#define rocket technical data lists
oem = []
fm = []
Tc = []
pc = []
epsilon = []
Mw = []
kappa = []
Ae = []
nengines = []
cd = []
S = []
t_ctl = []
sepdur = []
dt = sheet.cell_value(3,1)
maxt = sheet.cell_value(4,1)
nstages = int(sheet.cell_value(1,1))
payld = sheet.cell_value(2,1)
targetalt = sheet.cell_value(5,1)
targetvel = sheet.cell_value(6,1)
for i in range(nstages):
oem.append(sheet.cell_value(8,1+i))
fm.append(sheet.cell_value(9,1+i))
Tc.append(sheet.cell_value(10,1+i))
pc.append(sheet.cell_value(11,1+i))
epsilon.append(sheet.cell_value(12,1+i))
Mw.append(sheet.cell_value(13,1+i))
kappa.append(sheet.cell_value(14,1+i))
Ae.append(sheet.cell_value(15,1+i))
nengines.append(sheet.cell_value(16,1+i))
cd.append(sheet.cell_value(17,1+i))
S.append(sheet.cell_value(18,1+i))
t_ctl.append(sheet.cell_value(19,1+i))
sepdur.append(sheet.cell_value(20,1+i))
At = np.divide(Ae,epsilon)
pratio = nstages * [0]
for p in range(nstages):
pratio[p] = th.pratio(epsilon[p],kappa[p])
ttab=[0]
mtab=[sum(oem)+sum(fm)+payld]
Ttab=[0]
Dtab=[0]
vtab=[0]
atab=[0]
xtab=[0]
ytab=[0]
thtab = [0]
qtab=[0]
Mtab=[0]
m = sum(oem)+sum(fm)+payld
I = 1.
cg = 0.
t = 0
x = 0.
y = 0.
theta = 90.*np.pi/180
vx = 0.
vy = 0.
th_dot = 0.
ax = 0.
ay = 0.
th_ddot = 0.
g0 = 9.81
T = 0.
M = 0.
D = 0.
q = 0.
state = np.array([x,y,theta,vx,vy,th_dot,ax,ay,th_ddot,m,I,cg,T,M,D,q,fm[0]])
startstage = []
reference = targetalt
for i in range(nstages):
print("Simulating stage:",i+1)
if i>0:
fm[i-1]=0
oem[i-1]=0
state[9] = sum(oem)+sum(fm)+payld
state[16] = fm[i]
stage = vehicle(fm[i],epsilon[i],Tc[i],Mw[i],kappa[i],At[i],pc[i],pratio[i],cd[i],S[i],nengines[i])
startstage.append(t)
sep = False
while y>=0 and t<2000 and not sep:
t = t + dt
ttab.append(t)
throttle, T_angle, sep = ctl.control(state,reference)
#A,B = fdm.linearize(stage,state,throttle,T_angle,dt)
state = fdm.fdm(stage,state,throttle,T_angle,dt)
y = state[1]
fm[i] = state[16]
mtab.append(state[9])
xtab.append(state[0])
ytab.append(y)
Ttab.append(state[12])
vtab.append(np.sqrt(state[3]**2+state[4]**2))
qtab.append(state[15])
thtab.append(state[2])
atab.append(np.sqrt(state[6]**2+state[7]**2))
Mtab.append(state[13])
Dtab.append(state[14])
ttab = np.array(ttab)/60
plt.subplot(331)
plt.plot(ttab, np.array(ytab)/1000)
plt.title("flight profile: altitude [km] vs time [min]")
plt.subplot(332)
plt.plot(np.array(xtab)/1000, np.array(ytab)/1000, 'b')
plt.title("flight profile: altitude [km] vs ground range [km]")
plt.subplot(338)
plt.plot(ttab, np.array(Ttab)/1000000)
plt.title("Thrust [MN] vs time [min]")
plt.subplot(334)
plt.plot(ttab, np.array(vtab)/1000)
plt.title("velocity [km/s] vs time [min]")
plt.subplot(336)
plt.plot(ttab, np.array(qtab)/1000)
plt.title("dynamic pressure [kPa] vs time [min]")
plt.subplot(335)
plt.plot(ttab, np.array(thtab)*180/np.pi,'b')
plt.title("pitch angle [deg] vs time [min]")
plt.subplot(337)
plt.plot(ttab, np.array(atab)/g0+1)
plt.title("acceleration [g0] vs time [min]")
plt.subplot(333)
plt.plot(ttab, np.array(Mtab))
plt.title("Mach number [-] vs time [min]")
plt.subplot(339)
plt.plot(ttab, np.array(Dtab)/1000)
plt.title("Drag [kN] vs time [min]")
print('Orbit Insertion Velocity: ', round(targetvel), 'm/s')
print('Horizontal Velocity: ', round(state[3]), 'm/s')
print('Vertical Veloctity: ', round(state[4]), 'm/s')
print('Orbit Height: ', round(state[1]/1000), 'km')
plt.show()
# Required Total Energy as function of orbit altitude
mu = 398600.4418
Re = 6371.0
Vmax = targetvel/1000
hmax = targetalt/1000
rid = | np.linspace(Re,Re+hmax) | numpy.linspace |
""" functinos for new stuff like non-thermal and prompyt dissoc.
"""
import numpy
import automol
import mess_io
import mechanalyzer
from mechlib.amech_io import reader
def set_prod_density_param(rgts, pesgrp_num, pes_param_dct):
""" Figure out if densities should be calculated
"""
if pes_param_dct is not None:
# print('prod density test')
all_peds = pes_param_dct['peds']
pes_peds = all_peds[pesgrp_num]
calc_dens = (False, False)
for ped in pes_peds:
ped_spc = ped.split('_')[1] # Get a string with prds
# print('ped_spc', ped_spc)
# print('rgts', rgts)
if all(rgt in ped_spc for rgt in rgts):
calc_dens = (True, True)
break
else:
calc_dens = tuple(False for _ in rgts)
return calc_dens
def energy_dist_params(pesgrp_num, pes_param_dct, hot_enes_dct, label_dct):
""" set values to determine input parameters for handling
energy distributions in MESS calculations
maybe just call this before the writer and pass to make_pes_str
"""
if pes_param_dct is not None:
# Grab the desired PED and hot enes for the PES in the group
all_peds = pes_param_dct['peds']
pes_peds = all_peds[pesgrp_num]
# Set the PEDs
if any(pes_peds):
ped_spc_lst = tuple()
for ped in pes_peds:
_ped = ped.split('_')
ped_spc_lst += (f'{label_dct[_ped[0]]}_{label_dct[_ped[1]]}',)
ped_str = ' '.join(ped_spc_lst)
print(f'Species for PED: {ped_str}')
else:
ped_spc_lst = None
# Set the Hot Energies section
if hot_enes_dct is not None:
_hot_enes_dct = {label_dct[spc]: enes
for spc, enes in hot_enes_dct.items()}
hot_str = ' '.join(_hot_enes_dct.keys())
print(f'Species for Hot: {hot_str}')
else:
_hot_enes_dct = None
# Set the micro params for writing k(E)s
# When to set this
micro_out_params = (0.1, 320.0, 0.1)
print(f'Ranges for k(E) calculations: {micro_out_params}')
else:
ped_spc_lst = None
_hot_enes_dct = None
micro_out_params = None
return ped_spc_lst, _hot_enes_dct, micro_out_params
def set_hot_enes(pesgrp_num, reacs, prods,
chnl_enes, pes_param_dct,
ene_range=None):
""" Determine what hot energies should be for the requested
species.
Returns a dictionary where keys are the the mechanism names
for the side of the reaction the hot spc appears and the values
are the energies to set in the mechanism file. {side: ene_lst}
"""
if ene_range is None:
ene_range = | numpy.arange(0.0, 226.0, 1.0) | numpy.arange |
import numpy as np
import tensorflow as tf
from tensorflow.keras import models
from tensorflow.keras import layers
class MaskedConv2D(layers.Layer):
def __init__(self, mask_type, kernel, filters):
super(MaskedConv2D, self).__init__()
self.kernel = kernel
self.filters = filters
self.mask_type = mask_type
# this only runs once
def build(self, input_shape):
self.w = self.add_weight(
shape=[
self.kernel,
self.kernel,
input_shape[-1],
self.filters
],
initializer='glorot_normal',
trainable=True,
name='w'
)
self.b = self.add_weight(
shape=(self.filters,),
initializer='zeros',
trainable=True,
name='b'
)
# create our mask
mask = | np.ones(self.kernel**2, dtype=np.float32) | numpy.ones |
from numpy import sin,cos,deg2rad,rad2deg,arctan2,sqrt
import numpy
import numexpr
def cv_coord(a,b,c,fr=None,to=None,degr=False):
if degr:
degrad = deg2rad
raddeg = rad2deg
else:
degrad = lambda x: x
raddeg = lambda x: x
if fr=='sph':
x=c*cos(degrad(a))*cos(degrad(b))
y=c*sin(degrad(a))*cos(degrad(b))
z=c*sin(degrad(b))
elif fr=='rect':
x=a
y=b
z=c
elif fr is None:
raise Exception('You must specify the input coordinate system')
else:
raise Exception('Unknown input coordinate system')
if to=='rect':
return (x,y,z)
elif to=='sph':
ra = raddeg(arctan2(y,x))
dec = raddeg(arctan2(z,sqrt(x**2+y**2)))
rad = sqrt(x**2+y**2+z**2)
return (ra,dec,rad)
elif to is None:
raise Exception('You must specify the output coordinate system')
else:
raise Exception('Unknown output coordinate system')
def torect(ra,dec):
x=numexpr.evaluate('cos(ra/57.295779513082323)*cos(dec/57.295779513082323)')
y=numexpr.evaluate('sin(ra/57.295779513082323)*cos(dec/57.295779513082323)')
z=numexpr.evaluate('sin(dec/57.295779513082323)')
return x,y,z
def fromrect(x,y,z):
ra=numexpr.evaluate('arctan2(y,x)*57.295779513082323')
dec=numexpr.evaluate('57.295779513082323*arctan2(z,sqrt(x**2+y**2))')
return ra,dec
def sphere_rotate(ra, dec, rapol, decpol, ra0):
""" rotate ra,dec to a new spherical coordinate system where the pole is
at rapol,decpol and the zeropoint is at ra=ra0
revert flag allows to reverse the transformation
"""
x,y,z=torect(ra,dec)
tmppol=cv_coord(rapol,decpol,1,degr=True,fr='sph',to='rect') #pole axis
tmpvec1=cv_coord(ra0,0,1,degr=True,fr='sph',to='rect') #x axis
tmpvec1= | numpy.array(tmpvec1) | numpy.array |
from gymenv_v2 import make_multiple_env
import numpy as np
import random
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import wandb
wandb.login()
run=wandb.init(project="finalproject", entity="ieor-4575", tags=["test"])
#run=wandb.init(project="finalproject", entity="ieor-4575", tags=["training-hard"])
#run=wandb.init(project="finalproject", entity="ieor-4575", tags=["test"])
### TRAINING
# Setup: You may generate your own instances on which you train the cutting agent.
custom_config = {
"load_dir" : 'instances/randomip_n60_m60', # this is the location of the randomly generated instances (you may specify a different directory)
"idx_list" : list(range(20)), # take the first 20 instances from the directory
"timelimit" : 50, # the maximum horizon length is 50
"reward_type" : 'obj' # DO NOT CHANGE reward_type
}
# Easy Setup: Use the following environment settings. We will evaluate your agent with the same easy config below:
easy_config = {
"load_dir" : 'instances/train_10_n60_m60',
"idx_list" : list(range(10)),
"timelimit" : 50,
"reward_type" : 'obj'
}
# Hard Setup: Use the following environment settings. We will evaluate your agent with the same hard config below:
hard_config = {
"load_dir" : 'instances/train_100_n60_m60',
"idx_list" : list(range(99)),
"timelimit" : 50,
"reward_type" : 'obj'
}
test_config = {
"load_dir" : 'instances/test_100_n60_m60',
"idx_list" : list(range(99)),
"timelimit" : 50,
"reward_type" : 'obj'
}
class LSTM_net(nn.Module):
def __init__(self, input_size, hidden_size, bidirectional=False):
super(LSTM_net, self).__init__()
self.hidden_size = hidden_size
self.input_size = input_size
self.bidirectional = bidirectional
self.lstm = nn.LSTM(input_size, hidden_size,
bidirectional=bidirectional, batch_first=True)
def forward(self, input):
hidden = self.init_hidden()
inputs = torch.FloatTensor(input).view(1, -1, self.input_size)
output, _ = self.lstm(inputs)
# output[-1] is same as last hidden state
output = output[-1].reshape(-1, self.hidden_size)
return output
def init_hidden(self):
return (torch.zeros(1 + int(self.bidirectional), 1, self.hidden_size),
torch.zeros(1 + int(self.bidirectional), 1, self.hidden_size))
class Attention_Net(nn.Module):
def __init__(self, input_size, hidden_size, hidden_size2):
super(Attention_Net, self).__init__()
# constrain and cuts dimension
self.input_size = int(input_size)
self.hidden_size = int(hidden_size)
self.hidden_size2 = int(hidden_size2)
self.lstm1 = LSTM_net(input_size, hidden_size)
self.lstm2 = LSTM_net(input_size, hidden_size)
self.linear1 = nn.Linear(self.hidden_size, self.hidden_size2)
self.linear2 = nn.Linear(self.hidden_size2, self.hidden_size2)
self.tanh = nn.Tanh()
def forward(self, constraints, cuts):
constraints = torch.FloatTensor(constraints)
cuts = torch.FloatTensor(cuts)
# lstm
A_embed = self.lstm1.forward(constraints)
D_embed = self.lstm2.forward(cuts)
# dense
A = self.linear2(self.tanh(self.linear1(A_embed)))
D = self.linear2(self.tanh(self.linear1(D_embed)))
# attention
logits = torch.sum(torch.mm(D, A.T), axis=1)
return logits
# Policy network will just be copied from lab4 and make small modification
class Policy(object):
def __init__(self, input_size, hidden_size, hidden_size2, lr):
self.model = Attention_Net(input_size, hidden_size, hidden_size2)
# DEFINE THE OPTIMIZER
self.optimizer = torch.optim.Adam(self.model.parameters(), lr=lr)
def compute_prob(self, constraints, cuts):
constraints = torch.FloatTensor(constraints)
cuts = torch.FloatTensor(cuts)
prob = torch.nn.functional.softmax(self.model(constraints, cuts), dim=-1)
return prob.cpu().data.numpy()
def _to_one_hot(self, y, num_classes):
"""
convert an integer vector y into one-hot representation
"""
scatter_dim = len(y.size())
y_tensor = y.view(*y.size(), -1)
zeros = torch.zeros(*y.size(), num_classes, dtype=y.dtype)
return zeros.scatter(scatter_dim, y_tensor, 1)
def train(self, constraints, cuts, actions, Qs):
"""
states: numpy array (states)
actions: numpy array (actions)
Qs: numpy array (Q values)
"""
actions = torch.LongTensor(actions)
Qs = torch.FloatTensor(Qs)
total_loss = 0
# for a bunch of constraints and cuts, need to go one by one
for i in range(len(constraints)):
curr_constraints = constraints[i]
curr_cuts = cuts[i]
curr_action = actions[i]
# COMPUTE probability vector pi(s) for all s in states
logits = self.model(curr_constraints, curr_cuts)
prob = torch.nn.functional.softmax(logits, dim=-1)
# Compute probaility pi(s,a) for all s,a
action_onehot = self._to_one_hot(curr_action, curr_cuts.shape[0])
prob_selected = torch.sum(prob * action_onehot, axis=-1)
# FOR ROBUSTNESS
prob_selected += 1e-8
loss = -torch.mean(Qs[i] * torch.log(prob_selected))
# BACKWARD PASS
self.optimizer.zero_grad()
loss.backward()
# UPDATE
self.optimizer.step()
total_loss += loss.detach().cpu().data.numpy()
return total_loss
def discounted_rewards(r, gamma):
""" take 1D float array of rewards and compute discounted reward """
discounted_r = np.zeros_like(r)
running_sum = 0
for i in reversed(range(0,len(r))):
discounted_r[i] = running_sum * gamma + r[i]
running_sum = discounted_r[i]
return list(discounted_r)
def normalization(A, b, E, d):
all_coeff = | np.concatenate((A, E), axis=0) | numpy.concatenate |
from __future__ import division
from builtins import range
from past.utils import old_div
from math import cos, sin, sqrt, atan2, acos, asin, pi
import csv
import os
import numpy as np
from proteus import AuxiliaryVariables, Archiver, Comm, Profiling
from proteus.Profiling import logEvent as logEvent
from collections import OrderedDict
class RigidBody(AuxiliaryVariables.AV_base, object):
"""
Auxiliary variable used to calculate attributes of an associated shape
class instance acting as a rigid body. To set a shape as a rigid body, use
shape.setRigidBody(). The class instance is created automatically when
shape.setRigidBody() has been called and after calling assembleDomain().
Parameters
----------
shape: proteus.mprans.SpatialTools.Shape_RANS
Class instance of the shape associated to the rigid body calculations.
cfl_target: Optional[float]
UNUSED (to implement), sets the maximum displacement of the body
allowed per time step.
dt_init: float
first time step of the simulation.
"""
def __init__(self, shape, cfl_target=0.9, dt_init=0.001, substeps=20):
self.Shape = shape
self.nd = nd = shape.Domain.nd
# if isinstance(shape, (Rectangle, Cuboid)):
# shape._setInertiaTensor()
self.substeps = substeps
self.dt_init = dt_init
self.cfl_target = 0.9
self.rotation_matrix = np.eye(3)
self.h = np.array([0., 0., 0.])
self.barycenter = np.zeros(3)
self.i_start = None # will be retrieved from setValues() of Domain
self.i_end = None # will be retrieved from setValues() of Domain
self.It = self.Shape.It
self.record_dict = OrderedDict()
# variables
self.position = np.zeros(3)
self.last_position = np.array([0., 0., 0.])
self.velocity = np.zeros(3, 'd')
self.last_velocity = np.zeros(3, 'd')
self.acceleration = np.zeros(3, 'd')
self.last_acceleration = np.zeros(3, 'd')
self.rotation = np.eye(3)
self.last_rotation = np.eye(3)
self.ang_disp = np.zeros(3, 'd')
self.last_ang_disp = np.zeros(3, 'd')
self.ang_vel = np.zeros(3, 'd')
self.last_ang_vel = np.zeros(3, 'd')
self.ang_acc = np.zeros(3, 'd')
self.last_ang_acc = np.zeros(3, 'd')
self.F = np.zeros(3, 'd')
self.M = np.zeros(3, 'd')
self.last_F = np.zeros(3, 'd')
self.last_M = np.zeros(3, 'd')
self.ang = 0.
self.barycenter = self.Shape.barycenter
self.mass = 0.
self.pivot = np.zeros(3)
self.last_pivot = np.zeros(3)
self.init_barycenter = self.Shape.barycenter.copy()
self.InputMotion = False
# variables for checking numerical method
self.ux = 0.0
self.uy = 0.0
self.uz = 0.0
self.last_ux = 0.0
self.last_uy = 0.0
self.last_uz = 0.0
# gravity
if 'RigidBody' not in shape.auxiliaryVariables:
shape._attachAuxiliaryVariable('RigidBody', self)
def attachModel(self, model, ar):
"""
Attaches model to auxiliary variable
"""
self.model = model
self.ar = ar
self.writer = Archiver.XdmfWriter()
self.nd = model.levelModelList[-1].nSpace_global
m = self.model.levelModelList[-1]
flagMax = max(m.mesh.elementBoundaryMaterialTypes)
# flagMin = min(m.mesh.elementBoundaryMaterialTypes)
self.nForces = flagMax + 1
return self
def calculate_init(self):
"""
Function called automatically at the very beginning of the simulation
by proteus.
"""
nd = self.nd
self.position[:] = self.Shape.barycenter.copy()
self.last_position[:] = self.position
self.rotation[:nd, :nd] = self.Shape.coords_system
self.last_rotation[:nd, :nd] = self.Shape.coords_system
self.rotation_euler = getEulerAngles(self.rotation)
self.last_rotation_euler = getEulerAngles(self.last_rotation)
def calculate(self):
"""
Function called automatically at each time step by proteus.
"""
# store previous values
self._store_last_values()
# for first time step
try:
self.dt = self.model.levelModelList[-1].dt_last
except:
self.dt = self.dt_init
# get forces and moments
self.F[:] = self.getTotalForces() * self.free_x
self.M[:] = self.getTotalMoments() * self.free_r
# calculate new properties with substepping
self.step(self.dt)
# record variables in .csv file
if self.record_dict:
self._recordValues()
# print in proteus log file
self._logTrace()
def _store_last_values(self):
"""
Store values of previous time step for displacement calculation
"""
self.last_position[:] = self.position
self.last_velocity[:] = self.velocity
self.last_acceleration[:] = self.acceleration
self.last_rotation[:] = self.rotation
self.last_rotation_euler[:] = self.rotation_euler
self.last_ang_disp[:] = self.ang_disp
self.last_ang_vel[:] = self.ang_vel
self.last_ang_acc[:] = self.ang_acc
self.last_F[:] = self.F
self.last_M[:] = self.M
self.last_pivot = self.pivot
self.last_ux = self.ux
self.last_uy = self.uy
self.last_uz = self.uz
def getPressureForces(self):
"""
Gives the pressure forces applied on each segments/facets of the rigid
body
Returns
-------
F_p: array_like
pressure forces (x, y, z) as provided by Proteus
"""
i0, i1 = self.i_start, self.i_end
F_p = self.model.levelModelList[-1].coefficients.netForces_p[i0:i1, :]
return F_p
def getShearForces(self):
"""
Gives the shear forces applied on each segments/facets of the rigid
body
Returns
-------
F_v: array_like
shear forces (x, y, z) as provided by Proteus
"""
i0, i1 = self.i_start, self.i_end
F_v = self.model.levelModelList[-1].coefficients.netForces_v[i0:i1, :]
return F_v
def getGravityForce(self):
"""
Returns
-------
Fg: array_like
gravity force
"""
nd = self.nd
if nd == 2:
Fg = self.mass * np.array([0., -9.81, 0.])
if nd == 3:
Fg = self.mass * np.array([0., 0., -9.81])
return Fg
def getMoments(self):
"""
Gives the moments applied on each segments/facets of the rigid body
Returns
-------
M: array_like
moments (x, y, z) as provided by Proteus
"""
i0, i1 = self.i_start, self.i_end
M = self.model.levelModelList[-1].coefficients.netMoments[i0:i1, :]
return M
def getTotalMoments(self):
"""
Gives the total moments applied the rigid body
Returns
-------
M_t: array_like
total moments (x, y, z) as provided by Proteus
"""
M = self.getMoments()
M_t = np.sum(M, axis=0)
return M_t
def getTotalForces(self):
"""
Gives the total forces applied the rigid body: shear, pressure and
gravity forces
Returns
-------
F_t: array_like
total forces (x, y, z) as provided by Proteus
"""
F_p = self.getPressureForces()
F_v = self.getShearForces()
F_g = self.getGravityForce()
F_t = np.sum(F_p + F_v, axis=0) + F_g
return F_t
def getAcceleration(self):
"""
Returns
-------
a: array_like
acceleration of current time step
"""
a = old_div(self.F, self.mass)
return a
def getAngularAcceleration(self):
if sum(self.M) != 0:
self.inertia = self.getInertia(self.M, self.Shape.barycenter)
assert self.inertia != 0, 'Zero inertia: inertia tensor (It)' \
'was not set correctly!'
self.ang_acc = old_div(self.M[:], self.inertia)
else:
self.inertia = None
self.ang_acc = np.array([0., 0., 0.])
return self.ang_acc
def getDisplacement(self, dt):
# acceleration from force
self.acceleration = self.getAcceleration()
# substeps for smoother motion between timesteps
dt_sub = old_div(dt, float(self.substeps))
# Forward_Euler
if self.scheme == 'Forward_Euler':
for i in range(self.substeps):
self.h[:], self.velocity[:] = forward_euler(p0=self.h, v0=self.velocity,
a=self.acceleration, dt=dt_sub)
# Runge_Kutta
elif self.scheme == 'Runge_Kutta':
# settings
Kx, Ky, Kz = self.Kx, self.Ky, self.Kz
Cx, Cy, Cz = self.Cx, self.Cy, self.Cz
Fx, Fy, Fz = self.F
mass = self.mass
# initial condition
ux0 = self.last_position[0] - self.init_barycenter[0] # x-axis displacement
uy0 = self.last_position[1] - self.init_barycenter[1] # y-axis displacement
uz0 = self.last_position[2] - self.init_barycenter[2] # z-axis displacement
vx0 = self.last_velocity[0] # x-axis velocity
vy0 = self.last_velocity[1] # y-axis velocity
vz0 = self.last_velocity[2] # z-axis velocity
ax0 = old_div((Fx - Cx * vx0 - Kx * ux0), mass) # x-axis acceleration
ay0 = old_div((Fy - Cy * vy0 - Ky * uy0), mass) # y-axis acceleration
az0 = old_div((Fz - Cz * vz0 - Kz * uz0), mass) # z-axis acceleration
# solving numerical scheme
ux, vx, ax = runge_kutta(u0=ux0, v0=vx0, a0=ax0, dt=dt_sub, substeps=self.substeps, F=Fx, K=Kx, C=Cx, m=mass, velCheck=False)
uy, vy, ay = runge_kutta(u0=uy0, v0=vy0, a0=ay0, dt=dt_sub, substeps=self.substeps, F=Fy, K=Ky, C=Cy, m=mass, velCheck=False)
uz, vz, az = runge_kutta(u0=uz0, v0=vz0, a0=az0, dt=dt_sub, substeps=self.substeps, F=Fz, K=Kz, C=Cz, m=mass, velCheck=False)
# used for storing values of displacements through timesteps
self.ux = ux
self.uy = uy
self.uz = uz
# final values
self.h[:] = np.array([self.ux - ux0, self.uy - uy0, self.uz - uz0])
self.velocity = np.array([vx, vy, vz])
self.acceleration = np.array([ax, ay, az])
return self.h
def getAngularDisplacement(self, dt):
# angular acceleration from moment
self.ang_acc = self.getAngularAcceleration()
dt_sub = old_div(dt, float(self.substeps))
# Forward_Euler
if self.scheme == 'Forward_Euler':
for i in range(self.substeps):
# rotation
self.ang_disp, self.ang_vel[:] = forward_euler(p0=self.ang_disp, v0=self.ang_vel,
a=self.ang_acc, dt=dt_sub)
# Runge_Kutta
elif self.scheme == 'Runge_Kutta':
# settings
Krot = self.Krot
Crot = self.Crot
Fx, Fy, Fz = self.F
# check for differenece between barycenter and pivot
self.rp = (self.pivot - self.Shape.barycenter)
rpx, rpy, rpz = self.rp
Mpivot = np.array([(rpy * Fz - rpz * Fy), -(rpx * Fz - rpz * Fx), (rpx * Fy - rpy * Fx)]) # moment transformation calculated in pivot
Mp = self.M - Mpivot # moment transformation
self.inertia = self.getInertia(Mp, self.pivot)
inertia = self.inertia
# initial condition
rz0 = atan2(self.last_rotation[0, 1], self.last_rotation[0, 0]) # angular displacement
vrz0 = self.last_ang_vel[2] # angular velocity
arz0 = old_div((Mp[2] - Crot * vrz0 - Krot * rz0), inertia) # angular acceleration
# solving numerical scheme
rz, vrz, arz = runge_kutta(u0=rz0, v0=vrz0, a0=arz0, dt=dt_sub, substeps=self.substeps, F=Mp[2], K=Krot, C=Crot, m=inertia, velCheck=False)
# final values
self.ang_disp[2] = rz - atan2(self.last_rotation[0, 1], self.last_rotation[0, 0])
self.ang_vel[2] = vrz
self.ang_acc[2] = arz
return self.ang_disp
def setSprings(self, springs, Kx, Ky, Krot, Cx, Cy, Crot, Kz=0.0, Cz=0.0):
"""
Sets a system of uniform springs to model soil's reactions (for moving bodies)
Parameters
----------
spring: string
If True, spring module is switched on.
Kx: float
horizontal stiffness
Ky: float
vertical stiffness
Krot: float
rotational stiffness
Cx: float
horizontal damping parameter
Cy: float
vertical damping parameter
Crot: float
rotational damping parameter
"""
self.springs = springs
self.Kx = Kx
self.Ky = Ky
self.Kz = Kz
self.Krot = Krot
self.Cx = Cx
self.Cy = Cy
self.Cz = Cz
self.Crot = Crot
def setPivot(self, pivot=None):
"""
Sets pivot centre of rotation for the angular calculation
Parameters
----------
pivot: array
"""
self.pivot = pivot
def setNumericalScheme(self, scheme):
"""
Sets the numerical scheme used to solve motion.
Parameters
----------
scheme: string
If Runge_Kutta, runge kutta scheme is applied.
If Forward_Euler, forward euler scheme is applied.
"""
self.scheme = scheme
def inputMotion(self, InputMotion=False, pivot=None,
At=[0., 0., 0], Tt=[0., 0., 0],
Ar=[0., 0., 0], Tr=[0., 0., 0]):
"""
Sets motion as an input. It's imposed rather than calculated.
Parameters
----------
InputMotion: bool
If True, motion as input is applied.
pivot: list
Centre of rotation. If only translation, write barycenter's coordinates
At: list
Amplitude of translational motion
Tt: list
Period of translational motion
Ar: list
Amplitude of rotational motion
Tr: list
Period of rotational motion
"""
self.InputMotion = InputMotion
if pivot is None:
self.pivot = self.Shape.barycenter
else:
self.pivot = np.array(pivot)
self.At = np.array(At)
self.Tt = np.array(Tt)
self.Ar = np.array(Ar)
self.Tr = np.array(Tr)
def imposeSinusoidalMotion(self):
"""
Motion is imposed rather than calculated.
Parameters
"""
t = self.model.stepController.t_model_last
Tra = np.array([0., 0., 0.])
Rot = np.array([0., 0., 0.])
for ii in [0, 1, 2]:
At, Tt = self.At[ii], self.Tt[ii]
Ar, Tr = self.Ar[ii], self.Tr[ii]
if Tt == 0.0:
Wt = 0.0
else:
Wt = 2. * 3.14 / Tt
if Tr == 0.0:
Wr = 0.0
else:
Wr = 2. * 3.14 / Tr
Dt = At * sin(Wt * t)
Dr = Ar * sin(Wr * t)
# motion update
Tra[ii] = Dt - (self.last_position[ii] - self.init_barycenter[ii])
Rot[ii] = Dr - (self.last_rotation_euler[ii])
return Tra, Rot
def step(self, dt):
"""
Step for rigid body calculations in Python
Parameters
----------
dt: float
time step
"""
nd = self.nd
# reinitialise displacement values
self.h[:] = np.zeros(3)
self.ang_disp[:] = np.zeros(3)
# Calculate or impose motion of the rigid body
if self.InputMotion == True:
# sinusoidal motion imposed
self.h[:], self.ang_disp[:] = self.imposeSinusoidalMotion()
else:
# Translational motion calculation
self.h[:] = self.getDisplacement(dt)
# Rotational motion calculation
self.ang_disp[:] = self.getAngularDisplacement(dt)
# translate
self.Shape.translate(self.h[:nd])
# rotate
self.ang = np.linalg.norm(self.ang_disp[:])
if nd == 2 and self.ang_vel[2] < 0:
self.ang = -self.ang
if self.ang != 0.:
self.Shape.rotate(self.ang, self.ang_vel, self.Shape.barycenter)
self.rotation[:nd, :nd] = self.Shape.coords_system
self.rotation_matrix[:] = np.dot(np.linalg.inv(self.last_rotation),
self.rotation)
self.rotation_euler[:] = getEulerAngles(self.rotation)
else:
self.rotation_matrix[:] = np.eye(3)
self.barycenter[:] = self.Shape.barycenter
self.position[:] = self.Shape.barycenter
def setConstraints(self, free_x, free_r):
"""
Sets constraints on the Shape (for moving bodies)
Parameters
----------
free_x: array_like
Translational constraints.
free_r: array_like
Rotational constraints.
"""
self.free_x = np.array(free_x)
self.free_r = np.array(free_r)
def setMass(self, mass):
"""
Set mass of the shape.
Parameters
----------
mass: float
mass of the body
"""
self.mass = float(mass)
def setInertiaTensor(self, It):
"""
Set the inertia tensor of the shape
Parameters
----------
It: array_like, float
Inertia tensor of the body (3x3 array in 3D, float in 2D)
Notes
-----
The inertia tensor should not be already scaled with the mass of the
shape.
"""
It = np.array(It)
if self.nd == 2:
assert isinstance(It, float), 'the inertia tensor of a 2D shape ' \
'must be a float'
if self.nd == 3:
assert It.shape == (3, 3), 'the inertia tensor of a 3D shape ' \
'must have a (3, 3) shape'
self.It = It
def getInertia(self, vec=(0., 0., 1.), pivot=None):
"""
Gives the inertia of the shape from an axis and a pivot
Parameters
----------
vec: array_like
Vector around which the body rotates.
pivot: Optional[array_like]
Pivotal point around which the body rotates. If not set, it will
be the barycenter coordinates
Returns
-------
I: float
inertia of the mass
Notes
-----
The inertia is calculated relative to the coordinate system of the
shape (self.coords_system). If the shape was not initialised with a
position corresponding to its inertia tensor (e.g. shape was already
rotated when initialised), set the coordinate system accordingly
before calling this function
"""
assert self.It is not None, 'No inertia tensor! (' + self.name + ')'
if pivot is None:
pivot = self.barycenter
# Pivot coords relative to shape centre of mass
pivot = pivot - np.array(self.barycenter)
# making unity vector/axis of rotation
vec = vx, vy, vz = np.array(vec)
length_vec = np.sqrt(vx**2 + vy**2 + vz**2)
vec = old_div(vec, length_vec)
if self.nd == 2:
I = self.It * self.mass
elif self.nd == 3:
# vector relative to original position of shape:
vec = np.dot(vec, np.linalg.inv(self.coords_system))
cx, cy, cz = vec
# getting the tensor for calculaing moment of inertia
# from arbitrary axis
vt = np.array([[cx**2, cx * cy, cx * cz],
[cx * cy, cy**2, cy * cz],
[cx * cz, cy * cz, cz**2]])
# total moment of inertia
I = np.einsum('ij,ij->', self.mass * self.It, vt)
return I
def setRecordValues(self, filename=None, all_values=False, pos=False,
rot=False, ang_disp=False, F=False, M=False,
inertia=False, vel=False, acc=False, ang_vel=False, ang_acc=False):
"""
Sets the rigid body attributes that are to be recorded in a csv file
during the simulation.
Parameters
----------
filename: Optional[string]
Name of file, if not set, the file will be named as follows:
'record_[shape.name].csv'
all_values: bool
Set to True to record all values listed below.
time: bool
Time of recorded row (default: True).
pos: bool
Position of body (default: False. Set to True to record).
rot: bool
Rotation of body (default: False. Set to True to record).
ang_disp: array
Angular displecement calculated during rigid body calculation step.
Applied on the body in order to make it rotating.
F: bool
Forces applied on body (default: False. Set to True to record).
M: bool
Moments applied on body (default: False. Set to True to record).
inertia: bool
Inertia of body (default: False. Set to True to record).
vel: bool
Velocity of body (default: False. Set to True to record).
acc: bool
Acceleration of body (default: False. Set to True to record).
ang_vel: array
Angular velocity of body (default: False. Set to True to record).
ang_acc: bool
Angular acceleration of body (default: False. Set to True to record).
Notes
-----
To add another value manually, add to dictionary self.record_dict:
key: header of the column in .csv
value: list of length 2: [variable name, index within variable]
(if no index, use None)
e.g. self.record_dict['m']['mass', None]
"""
if all_values is True:
pos = rot = F = M = acc = vel = ang_acc = ang_vel = True
if pos is True:
self.record_dict['x'] = ['last_position', 0]
self.record_dict['y'] = ['last_position', 1]
self.record_dict['z'] = ['last_position', 2]
if rot is True:
self.record_dict['rx'] = ['last_rotation_euler', 0]
self.record_dict['ry'] = ['last_rotation_euler', 1]
self.record_dict['rz'] = ['last_rotation_euler', 2]
if F is True:
self.record_dict['Fx'] = ['F', 0]
self.record_dict['Fy'] = ['F', 1]
self.record_dict['Fz'] = ['F', 2]
Fx = Fy = Fz = True
if M is True:
self.record_dict['Mx'] = ['M', 0]
self.record_dict['My'] = ['M', 1]
self.record_dict['Mz'] = ['M', 2]
if acc is True:
self.record_dict['ax'] = ['acceleration', 0]
self.record_dict['ay'] = ['acceleration', 1]
self.record_dict['az'] = ['acceleration', 2]
if vel is True:
self.record_dict['vx'] = ['velocity', 0]
self.record_dict['vy'] = ['velocity', 1]
self.record_dict['vz'] = ['velocity', 2]
if ang_acc is True:
self.record_dict['ang_ax'] = ['ang_acc', 0]
self.record_dict['ang_ay'] = ['ang_acc', 1]
self.record_dict['ang_az'] = ['ang_acc', 2]
if ang_vel is True:
self.record_dict['ang_vx'] = ['ang_vel', 0]
self.record_dict['ang_vy'] = ['ang_vel', 1]
self.record_dict['ang_vz'] = ['ang_vel', 2]
if inertia is True:
self.record_dict['inertia'] = ['inertia', None]
if filename is None:
self.record_filename = 'record_' + self.name + '.csv'
else:
self.record_filename = filename + '.csv'
self.record_file = os.path.join(Profiling.logDir, self.record_filename)
def _recordValues(self):
"""
Records values of rigid body attributes at each time step in a csv file.
"""
comm = Comm.get()
if comm.isMaster():
t_last = self.model.stepController.t_model_last
dt_last = self.model.levelModelList[-1].dt_last
t = t_last - dt_last
values_towrite = [t]
if t == 0:
headers = ['t']
for key in self.record_dict:
headers += [key]
with open(self.record_file, 'w') as csvfile:
writer = csv.writer(csvfile, delimiter=',')
writer.writerow(headers)
for key, val in list(self.record_dict.items()):
if val[1] is not None:
values_towrite += [getattr(self, val[0])[val[1]]]
else:
values_towrite += [getattr(self, val[0])]
with open(self.record_file, 'a') as csvfile:
writer = csv.writer(csvfile, delimiter=',')
writer.writerow(values_towrite)
def _logTrace(self):
# log values
t_previous = self.model.stepController.t_model_last - self.dt
t_current = self.model.stepController.t_model_last
h = self.h
last_pos, pos = self.last_position, self.position
last_vel, vel = self.last_velocity, self.velocity
rot = getEulerAngles(self.rotation)
rot_x, rot_y, rot_z = rot[0], rot[1], rot[2]
F = self.F
M = self.M
logEvent("================================================================")
logEvent("=================== Rigid Body Calculation =====================")
logEvent("================================================================")
logEvent("Name: " + repr(self.Shape.name))
logEvent("================================================================")
logEvent("[proteus] t=%1.5fsec to t=%1.5fsec" %
(t_previous, t_current))
logEvent("[proteus] dt=%1.5fsec" % (self.dt))
logEvent("[body] ============== Pre-calculation attributes ==============")
logEvent("[proteus] t=%1.5fsec" % (t_previous))
logEvent("[proteus] F=(% 12.7e, % 12.7e, % 12.7e)" % (F[0], F[1], F[2]))
logEvent("[proteus] M=(% 12.7e, % 12.7e, % 12.7e)" % (M[0], M[1], M[2]))
logEvent("[body] pos=(% 12.7e, % 12.7e, % 12.7e)" %
(last_pos[0], last_pos[1], last_pos[2]))
logEvent("[body] vel=(% 12.7e, % 12.7e, % 12.7e)" %
(last_vel[0], last_vel[1], last_vel[2]))
logEvent("[body] ===============Post-calculation attributes ==============")
logEvent("[body] t=%1.5fsec" % (t_current))
logEvent("[body] h=(% 12.7e, % 12.7e, % 12.7e)" % (h[0], h[1], h[2]))
logEvent("[body] pos=(% 12.7e, % 12.7e, % 12.7e)" %
(pos[0], pos[1], pos[2]))
logEvent("[body] vel=(% 12.7e, % 12.7e, % 12.7e)" %
(vel[0], vel[1], vel[2]))
logEvent("[body] rot=(% 12.7e, % 12.7e, % 12.7e)" %
(rot_x, rot_y, rot_z))
logEvent("================================================================")
class CaissonBody(RigidBody):
"""
Sub-class to create a caisson rigid body.
"""
def __init__(self, shape, substeps):
super(CaissonBody, self).__init__(shape, substeps)
# friciton module parameter used for switching to dynamic motion cases
self.sliding = False
self.sliding_last = False
self.friction = False
self.overturning = False
# friction and overturning parameters to be initialised
self.pivot_friction = np.zeros(3)
self.last_pivot_friction = np.zeros(3)
self.Ftan = 0.0
self.last_Ftan = 0.0
self.Mp = np.zeros(3, 'd')
self.last_Mp = np.zeros(3, 'd')
self.rp = np.zeros(3, 'd')
self.last_rp = | np.zeros(3, 'd') | numpy.zeros |
from __future__ import unicode_literals
from django.db import models
import datetime as dt
from django.contrib.auth.mixins import LoginRequiredMixin
from django.dispatch import receiver
from django.db.models.signals import (post_save,pre_save,)
# from PIL import Image
from django.core.files import File
from django.dispatch import receiver
from django.contrib.auth.models import User
from cloudinary.models import CloudinaryField
from phonenumber_field.modelfields import PhoneNumberField
import numpy as np
from django.db.models import Avg, Max, Min
# Create your models here.
class Profile(models.Model):
user = models.OneToOneField(User, null=True, on_delete=models.CASCADE)
first_name = models.CharField(max_length = 60,null=True,blank=True)
last_name = models.CharField(max_length = 60,null=True,blank=True)
pic = CloudinaryField('pic',null=True)
bio = models.TextField(null=True,blank=True)
likes = models.IntegerField(default=0)
email = models.EmailField(null=True)
phone_number = PhoneNumberField(null=True)
def get_total_likes(self):
return self.likes.user.count()
@classmethod
def update_profile(cls, id, email, phone_number, first_name, last_name, bio, pic):
profile = cls.objects.filter(id = id).update(pic = pic, id = id, first_name=first_name, last_name=last_name,bio=bio,phone_number=phone_number, email=email)
return update
def __str__(self):
return str(self.user.username)
class Meta:
ordering = ['first_name']
def save_profile(self):
self.save()
def delete_profile(self):
self.delete()
def create_profile(sender, instance, created, **kwargs):
if created: Profile.objects.create(user=instance)
post_save.connect(create_profile, sender = User)
class Project(models.Model):
title = models.CharField(max_length = 60)
pic = CloudinaryField('pic',null=True)
description = models.TextField()
link = models.URLField(max_length = 300)
@classmethod
def search_projects(cls, search_term):
projects = cls.objects.filter(title__icontains=search_term)
return projects
def save_project(self):
self.save()
def delete_project(self):
self.delete()
@classmethod
def update_project(cls, id, caption):
update = cls.objects.filter(id = id).update(description = description)
# return update
@classmethod
def get_all_projects(cls):
projects = cls.objects.all()
return projects
@classmethod
def get_project_by_id(cls,id):
project = cls.objects.filter(id= id).all()
return project
def average_design(self):
design_ratings = list(map(lambda x: x.design_rating, self.reviews.all()))
return np.mean(design_ratings)
def average_usability(self):
usability_ratings = list(map(lambda x: x.usability_rating, self.reviews.all()))
return np.mean(usability_ratings)
def average_content(self):
content_ratings = list(map(lambda x: x.content_rating, self.reviews.all()))
return | np.mean(content_ratings) | numpy.mean |
from typing import Tuple
import numpy as np
import pytest
from ertk.utils.array import (
batch_arrays_by_length,
check_3d,
clip_arrays,
flat_to_inst,
frame_array,
frame_arrays,
inst_to_flat,
make_array_array,
pad_array,
pad_arrays,
shuffle_multiple,
transpose_time,
)
def test_make_array_array_vlen():
arr = make_array_array([ | np.arange(i) | numpy.arange |
from core.nnlib import *
import unittest
import numpy as np
import pytest
import numpy.testing as ntest
from numpy._pytesttester import PytestTester
class TestActivations:
def test_leakyrelu_forward(self):
X = np.array([1, 0, -1], dtype=np.float32)
leakage = 0.1
comp = np.array(
[i if i > 0 else i * leakage for i in X], dtype=np.float32)
relu = LeakyRelu(leakage_ratio=leakage)
ntest.assert_array_equal(comp, relu(X))
X = np.zeros((10, 10), dtype=np.float32)
ntest.assert_array_equal(np.zeros((10, 10), dtype=np.float32), relu(X))
def test_leakyrelu_der(self):
X = np.array([1, 0, -1], dtype=np.float32)
leakage = 0.1
relu = LeakyRelu(leakage_ratio=leakage)
ntest.assert_array_equal(
np.array([1 if i >= 0 else leakage for i in X],
dtype=np.float32),
relu.derivative(X))
X = np.ones((10, 10), np.float32)
ntest.assert_array_equal(X, relu.derivative(X))
def test_softmax(self):
X = | np.array([[1, 0, -1], [1, 1, 0]], dtype=np.float32) | numpy.array |
import numpy as np
from scipy import linalg
from core.utils import fidelidad_vec, dot_prod_vec
def bases_2_3(a, b, fase):
"""
Bases basicas en dimension 2 y 3.
IN
a: float. Coeficiente acompañando a |0>.
b: float. Coeficiente acompañando a |1>.
fase: float. Fase acompañando a |1>.
OUT
B_2: array 2 x 2.
B_3: array 3 x 3
"""
#aseguramos que las bases sean complejas.
a = a + 0.*1.j
b = b + 0.*1.j
B_3 = np.array([[a, np.exp(1j*fase)*b, 0.*1.j],
[a*b, - np.exp(1j*fase)*a*a, np.exp(1j*fase)*b],
[b*b, - np.exp(1j*fase)*b*a, -np.exp(1j*fase)*a]]).T
B_2 = np.array([[a, np.exp(1j*fase)*b],
[b, - np.exp(1j*fase)*a]]).T
return B_2, B_3
def bases_separables_vec(dim, v_a, v_b, v_fase):
"""
Genera log2(dim) x n_bases bases separables
IN:
dim: int. Dimension del estado a reconstruir
v_a, v_b: arrays. Coeficientes de los estados de las bases medidas
v_fase: array. Angulos relativos de los estados de la base.
OUT
base_0: array n_qubits x n_qubits x n_qubits. Base diagonal
bases_sep: array n_qubits x n_qubits x n_qubits x n_qubits x n_bases.
Entrega las matrices con las que hay que calcular el producto
tensorial para construir bases_sep. Estan almacenadas
en el tercer indice "m".
"""
n_qubits = int(np.log2(dim))
n_bases = v_fase.shape[0]
b_0 = np.array([[1, 0], [0, 1]], dtype="complex")
base_0 = np.dstack([b_0]*n_qubits)
bases_sep_vec = np.stack([np.stack([np.stack([b_0]*n_qubits,
axis=-1)]*n_qubits,
axis=-1)]*n_bases, axis=-1)
for k in range(n_bases):
B_2, B_3 = bases_2_3(v_a[k], v_b[k], v_fase[k])
for j in range(n_qubits):
for m in range(n_qubits-j-1, n_qubits):
bases_sep_vec[:, :, m, j, k] = B_2
return base_0, bases_sep_vec[:, :, :, ::-1, :]
def tomography_vec(prob_diag_vec, prob_sep_vec, bases_sep_vec):
"""
Tomografia tres bases para estados en cualquier dimension
IN
prob_diag_vec: array dim. Contiene las mediciones de la base estandar
prob_sep_vec: array dim x n_qubits x n_bases. Contiene las mediciones de
las bases separables. Tenemos n_bases conjuntos de n_qubits
bases separables
bases_sep_vec: array 2 x 2 x nqubits x nqubits x n_bases. Bases de qubits
tal que su producto tensorial sobre la tercera dimension
entrega las bases separables a medir
OUT
psi_sis: array dim x 1. Estado del sistema.
"""
dim, n_qubits, n_bases = prob_sep_vec.shape
# comenzamos llenando todas las hojas
psi_list = [np.zeros((2**(j+1), 2**(n_qubits-j)), dtype="complex")
for j in range(n_qubits)]
psi_list.append(np.zeros((2**n_qubits, 1), dtype="complex"))
for k in range(2**(n_qubits-1)):
psi_list[0][:, 2*k] = np.array([np.sqrt(prob_diag_vec[2*k]), 0])
psi_list[0][:, 2*k+1] = np.array([0, np.sqrt(prob_diag_vec[2*k + 1])])
for lv in range(n_qubits-1, -1, -1):
for k in range(2**lv):
psi_j = psi_list[n_qubits-lv-1][:, 2*k]
psi_k = psi_list[n_qubits-lv-1][:, 2*k+1]
n_qubits_eff = n_qubits - lv
slice = 2**(n_qubits_eff)
prob = prob_sep_vec[slice*k:slice*(k+1), lv, :].reshape(-1, order="F")
proyectores = bases_sep_vec[:, :, lv:, lv, :]
psi_n = block_n_vec(psi_j, psi_k, prob, proyectores, n_qubits_eff,
n_bases)
pad = np.zeros(psi_n.shape[0])
if lv != 0:
if k%2 == 0:
psi_n = np.concatenate([psi_n, pad])
else:
psi_n = np.concatenate([pad, psi_n])
psi_list[n_qubits-lv][:, k] = psi_n
psi_sis = psi_list[-1]
return psi_sis
def block_n_vec(psi_j, psi_k, prob, proyectores, n_qubits_eff, n_bases):
"""
Reconstruye un subestado en dimension dim usando subestados en
dimension k y j.
IN
psi_j, psi_k: arrays. Subestados que hay que acoplar.
prob: array slice*n_bases. Una probabilidad por cada proyector.
proyectores: array dim x slice*n_bases. Proyectores de las bases medidas
OUT
psi_n: array. Subestado de la union de psi_k y psi_j.
"""
# si uno de los dos subestados es cero no calculamos nada
if np.all(psi_k == 0) | np.all(psi_j == 0):
return psi_k + psi_j
n_eqs_bas = 2**proyectores.shape[2]
dot_j = np.zeros((n_eqs_bas*n_bases), dtype="complex")
dot_k = np.zeros((n_eqs_bas*n_bases), dtype="complex")
for r in range(n_bases):
dot_j[r*n_eqs_bas:(r+1)*n_eqs_bas] = dot_prod_vec(
psi_j,
proyectores[:, :, :, r],
n_qubits_eff
)
dot_k[r*n_eqs_bas:(r+1)*n_eqs_bas] = dot_prod_vec(
psi_k,
proyectores[:, :, :, r],
n_qubits_eff
)
p_tilde = (prob - np.abs(dot_j)**2
- np.abs(dot_k)**2)
X = dot_k*(dot_j.conj())
eqs = | np.zeros((n_eqs_bas*n_bases, 2)) | numpy.zeros |
from __future__ import division
import os
import numpy as np
import fnmatch
import re
from lxml import etree
import torch
import cPickle
import argparse
def fetch_iamondb(data_path):
strokes_path = os.path.join(data_path, "lineStrokes")
ascii_path = os.path.join(data_path, "ascii")
train_files_path = os.path.join(data_path, "train.txt")
valid_files_path = os.path.join(data_path, "valid.txt")
if not os.path.exists(strokes_path) or not os.path.exists(ascii_path):
raise ValueError("You must download the data from IAMOnDB, and"
"unpack in %s" % data_path)
if not os.path.exists(train_files_path) or not os.path.exists(valid_files_path):
raise ValueError("Cannot find concatenated train.txt and valid.txt"
"files! See the README in %s" % data_path)
partial_path = data_path
def construct_ascii_path(f):
primary_dir = f.split("-")[0]
if f[-1].isalpha():
sub_dir = f[:-1]
else:
sub_dir = f
file_path = os.path.join(ascii_path, primary_dir, sub_dir, f + ".txt")
return file_path
def construct_stroke_paths(f):
primary_dir = f.split("-")[0]
if f[-1].isalpha():
sub_dir = f[:-1]
else:
sub_dir = f
files_path = os.path.join(strokes_path, primary_dir, sub_dir)
#Dash is crucial to obtain correct match!
files = fnmatch.filter(os.listdir(files_path), f + "-*.xml")
files = [os.path.join(files_path, fi) for fi in files]
files = sorted(files, key=lambda x: int(x.split(os.sep)[-1].split("-")[-1][:-4]))
return files
train_npy_x = os.path.join(partial_path, "train_npy_x.npy")
train_npy_y = os.path.join(partial_path, "train_npy_y.npy")
valid_npy_x = os.path.join(partial_path, "valid_npy_x.npy")
valid_npy_y = os.path.join(partial_path, "valid_npy_y.npy")
if not os.path.exists(train_npy_x):
train_names = [f.strip()
for f in open(train_files_path, mode='r').readlines()]
valid_names = [f.strip()
for f in open(valid_files_path, mode='r').readlines()]
train_ascii_files = [construct_ascii_path(f) for f in train_names]
valid_ascii_files = [construct_ascii_path(f) for f in valid_names]
train_stroke_files = [construct_stroke_paths(f) for f in train_names]
valid_stroke_files = [construct_stroke_paths(f) for f in valid_names]
train_set = (zip(train_stroke_files, train_ascii_files),
train_npy_x, train_npy_y)
valid_set = (zip(valid_stroke_files, valid_ascii_files),
valid_npy_x, valid_npy_y)
for se, x_npy_file, y_npy_file in [train_set, valid_set]:
x_set = []
y_set = []
se = list(se)
for n, (strokes_files, ascii_file) in enumerate(se):
if n % 100 == 0:
print("Processing file %i of %i" % (n, len(se)))
with open(ascii_file) as fp:
cleaned = [t.strip() for t in fp.readlines()
if t != '\r\n'
and t != '\n'
and t != ' \r\n']
# Try using CSR
idx = [n for
n, li in enumerate(cleaned) if li == "CSR:"][0]
cleaned_sub = cleaned[idx + 1:]
corrected_sub = []
for li in cleaned_sub:
# Handle edge case with %%%%% meaning new line?
if "%" in li:
li2 = re.sub('\%\%+', '%', li).split("%")
li2 = [l.strip() for l in li2]
corrected_sub.extend(li2)
else:
corrected_sub.append(li)
n_one_hot = 57
y = [np.zeros((len(li), n_one_hot), dtype='int16')
for li in corrected_sub]
# A-Z, a-z, space, apostrophe, comma, period
charset = list(range(65, 90 + 1)) + list(range(97, 122 + 1)) + [32, 39, 44, 46]
tmap = {k: n + 1 for n, k in enumerate(charset)}
# 0 for UNK/other
tmap[0] = 0
def tokenize_ind(line):
t = [ord(c) if ord(c) in charset else 0 for c in line]
r = [tmap[i] for i in t]
return r
for n, li in enumerate(corrected_sub):
y[n][np.arange(len(li)), tokenize_ind(li)] = 1
x = []
for stroke_file in strokes_files:
with open(stroke_file) as fp:
tree = etree.parse(fp)
root = tree.getroot()
# Get all the values from the XML
# 0th index is stroke ID, will become up/down
s = np.array([[i, int(Point.attrib['x']),
int(Point.attrib['y'])]
for StrokeSet in root
for i, Stroke in enumerate(StrokeSet)
for Point in Stroke])
# flip y axis
s[:, 2] = -s[:, 2]
# Get end of stroke points
c = s[1:, 0] != s[:-1, 0]
ci = | np.where(c == True) | numpy.where |
#!/usr/bin/env python
import abc
import numpy as np
# third party imports
from .dataset import DataSet
from .geodict import GeoDict
class Grid(DataSet):
"""
An abstract class to represent lat/lon gridded datasets. Grids are
assumed to be pixel-registered - that is, grid coordinates
represent the value at the *center* of the cells.
"""
@abc.abstractmethod # should be a classmethod when instantiated
def getFileGeoDict(filename):
"""
Abstract method to return the bounding box, resolution, and shape of a file in whatever Grid format.
:param filename:
The path to the filename of whatever grid format this is being implemented in.
:returns:
A geodict specifying the bounding box, resolution, and shape of the data in a file.
"""
raise NotImplementedError
@abc.abstractmethod # should be a classmethod when instantiated
def getBoundsWithin(filename, geodict):
"""
Abstract method to return a geodict for this file that is guaranteed to be inside the input geodict defined, without resampling.
:param filename:
The name of the file whose resolution/extent should be used.
:param geodict:
The geodict which is used as the base for finding the bounds for this file guaranteed to be inside of this geodict.
:raises NotImplementedError:
Always in base class
"""
raise NotImplementedError
@classmethod
def _getPadding(cls, geodict, paddict, padvalue):
# get pad left columns - go outside specified bounds if not exact edge
pxmin, pxmax, pymin, pymax = (
paddict.xmin, paddict.xmax, paddict.ymin, paddict.ymax)
gxmin, gxmax, gymin, gymax = (
geodict.xmin, geodict.xmax, geodict.ymin, geodict.ymax)
dx, dy = (geodict.dx, geodict.dy)
ny, nx = (geodict.ny, geodict.nx)
padleftcols = int(np.ceil((gxmin - pxmin) / dx))
padrightcols = int(np.ceil((pxmax - gxmax) / dx))
padbottomrows = int(np.ceil((gymin - pymin) / dy))
padtoprows = int(np.ceil((pymax - gymax) / dy))
# if any of these are negative, set them to zero
if padleftcols < 0:
padleftcols = 0
if padrightcols < 0:
padrightcols = 0
if padbottomrows < 0:
padbottomrows = 0
if padtoprows < 0:
padtoprows = 0
leftpad = np.ones((ny, padleftcols)) * padvalue
rightpad = np.ones((ny, padrightcols)) * padvalue
nx += padrightcols + padleftcols
bottompad = np.ones((padbottomrows, nx)) * padvalue
toppad = | np.ones((padtoprows, nx)) | numpy.ones |
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Code to load, preprocess and train on CIFAR-10."""
from absl import app
from absl import flags
from absl import logging
import functools
import os
import pickle
import numpy as np
import tensorflow.compat.v2 as tf
import tensorflow_datasets as tfds
from do_wide_and_deep_networks_learn_the_same_things.resnet_cifar import ResNet_CIFAR
from do_wide_and_deep_networks_learn_the_same_things.shake_shake import build_shake_shake_model
tf.enable_v2_behavior()
FLAGS = flags.FLAGS
#Define training hyperparameters
flags.DEFINE_integer('batch_size', 128, 'Batch size')
flags.DEFINE_float('learning_rate', 0.01, 'Learning rate')
flags.DEFINE_integer('epochs', 300, 'Number of epochs to train for')
flags.DEFINE_float('weight_decay', 0.0001, 'L2 regularization')
#Define model & data hyperparameters
flags.DEFINE_integer('depth', 56, 'No. of layers to use in the ResNet model')
flags.DEFINE_integer(
'width_multiplier', 1,
'How much to scale the width of the standard ResNet model by')
flags.DEFINE_integer(
'copy', 0,
'If the same model configuration has been run before, train another copy with a different random initialization'
)
flags.DEFINE_string('base_dir', None,
'Where the trained model will be saved')
flags.DEFINE_string('data_path', '',
'Directory where CIFAR subsampled dataset is stored')
flags.DEFINE_string('dataset_name', 'cifar10',
'Name of dataset used (CIFAR-10 of CIFAR-100)')
flags.DEFINE_boolean('use_residual', True,
'Whether to include residual connections in the model')
flags.DEFINE_boolean('randomize_labels', False,
'Whether to randomize labels during training')
flags.DEFINE_string('pretrain_dir', '',
'Directory where the pretrained model is saved')
flags.DEFINE_boolean(
'partial_init', False,
'Whether to initialize only the first few layers with pretrained weights')
flags.DEFINE_boolean('shake_shake', False, 'Whether to use shake shake model')
flags.DEFINE_boolean('distort_color', False,
'Whether to apply color distortion augmentation')
flags.DEFINE_integer('epoch_save_freq', 0, 'Frequency at which ckpts are saved')
flags.DEFINE_boolean(
'save_image', False,
'Whether to save metadata of images used for each minibatch')
def find_stack_markers(model):
"""Finds the layers where a new stack starts."""
stack_markers = []
old_shape = None
for i, layer in enumerate(model.layers):
if i == 0:
continue
if 'conv' in layer.name:
conv_weights_shape = layer.get_weights()[0].shape
if conv_weights_shape[-1] != conv_weights_shape[-2] and conv_weights_shape[
0] != 1 and conv_weights_shape[-2] % 16 == 0:
stack_markers.append(i)
assert (len(stack_markers) == 2)
return stack_markers
def random_apply(transform_fn, image, p):
"""Randomly apply with probability p a transformation to an image"""
if tf.random.uniform([]) < p:
return transform_fn(image)
else:
return image
def color_distortion(image, s=1.0):
"""Color distortion data augmentation"""
# image is a tensor with value range in [0, 1].
# s is the strength of color distortion.
def color_jitter(x):
# one can also shuffle the order of following augmentations
# each time they are applied.
x = tf.image.random_brightness(x, max_delta=0.8 * s)
x = tf.image.random_contrast(x, lower=1 - 0.8 * s, upper=1 + 0.8 * s)
x = tf.image.random_saturation(x, lower=1 - 0.8 * s, upper=1 + 0.8 * s)
x = tf.image.random_hue(x, max_delta=0.2 * s)
x = tf.clip_by_value(x, 0, 1)
return x
def color_drop(x):
x = tf.image.rgb_to_grayscale(x)
x = tf.tile(x, [1, 1, 3])
return x
# randomly apply transformation with probability p.
image = random_apply(color_jitter, image, p=0.8)
image = random_apply(color_drop, image, p=0.2)
return image
def preprocess_data(image, label, is_training):
"""CIFAR data preprocessing"""
image = tf.image.convert_image_dtype(image, tf.float32)
if is_training:
crop_padding = 4
image = tf.pad(image, [[crop_padding, crop_padding],
[crop_padding, crop_padding], [0, 0]], 'REFLECT')
image = tf.image.random_crop(image, [32, 32, 3])
image = tf.image.random_flip_left_right(image)
if FLAGS.distort_color:
image = color_distortion(image, s=1.0)
else:
image = tf.image.resize_with_crop_or_pad(image, 32, 32) # central crop
return image, label
def preprocess_data_with_id(data, is_training):
"""CIFAR data preprocessing when image ids are included in the data loader"""
image = data['image']
image = tf.image.convert_image_dtype(image, tf.float32)
if is_training:
crop_padding = 4
image = tf.pad(image, [[crop_padding, crop_padding],
[crop_padding, crop_padding], [0, 0]], 'REFLECT')
image = tf.image.random_crop(image, [32, 32, 3])
image = tf.image.random_flip_left_right(image)
else:
image = tf.image.resize_with_crop_or_pad(image, 32, 32) # central crop
return data['id'], image, data['label']
def load_train_data(batch_size,
data_path='',
dataset_name='cifar10',
n_data=50000,
randomize_labels=False,
as_supervised=True):
"""Load CIFAR training data"""
if not data_path:
train_dataset = tfds.load(
name=dataset_name, split='train', as_supervised=as_supervised)
else:
if 'tiny' in data_path: # load about 1/16 of the data
train_dataset = tfds.load(
name=dataset_name, split='train[:6%]', as_supervised=as_supervised)
elif 'half' in data_path: # load half of the data
train_dataset = tfds.load(
name=dataset_name, split='train[:50%]', as_supervised=as_supervised)
else: # load 1/4 of the data
train_dataset = tfds.load(
name=dataset_name, split='train[:25%]', as_supervised=as_supervised)
if randomize_labels:
all_labels = []
all_images = []
for images, labels in train_dataset:
all_labels.extend([labels.numpy()])
all_images.append(images.numpy()[np.newaxis, :, :, :])
all_images = np.vstack(all_images)
np.random.seed(FLAGS.copy)
np.random.shuffle(all_labels)
train_dataset = tf.data.Dataset.from_tensor_slices(
(tf.convert_to_tensor(all_images, dtype=tf.float32),
tf.convert_to_tensor(all_labels, dtype=tf.int64)))
train_dataset = train_dataset.shuffle(buffer_size=n_data)
if as_supervised:
train_dataset = train_dataset.map(
functools.partial(preprocess_data, is_training=True))
else:
train_dataset = train_dataset.map(
functools.partial(preprocess_data_with_id, is_training=True))
train_dataset = train_dataset.batch(batch_size, drop_remainder=True)
train_dataset = train_dataset.prefetch(tf.data.experimental.AUTOTUNE)
return train_dataset
def load_test_data(batch_size,
shuffle=False,
data_path='',
dataset_name='cifar10',
n_data=10000,
as_supervised=True):
"""Load CIFAR test data"""
if 'random' in dataset_name:
np.random.seed(0)
test_labels = | np.zeros((n_data,), dtype=np.int64) | numpy.zeros |
import numpy as np
import pandas as pd
import matplotlib as mpl
mpl.use("agg", warn=False) # noqa
import matplotlib.pyplot as plt
import matplotlib.colors
import matplotlib.patches
import matplotlib.gridspec
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
import seaborn as sns
import networkx as nx
from scedar.eda import mtype
from collections import OrderedDict
sns.set(style="ticks")
def labs_to_cmap(labels, return_lut=False, shuffle_colors=False,
random_state=None):
np.random.seed(random_state)
# Each label has its own index and color
mtype.check_is_valid_labs(labels)
labels = np.array(labels)
uniq_lab_arr = np.unique(labels)
num_uniq_labs = len(uniq_lab_arr)
uniq_lab_inds = list(range(num_uniq_labs))
lab_col_list = list(sns.hls_palette(num_uniq_labs))
if shuffle_colors:
np.random.shuffle(lab_col_list)
lab_cmap = mpl.colors.ListedColormap(lab_col_list)
# Need to keep track the order of unique labels, so that a labeled
# legend can be generated.
# Map unique label indices to unique labels
uniq_lab_lut = dict(zip(range(num_uniq_labs), uniq_lab_arr))
# Map unique labels to indices
uniq_ind_lut = dict(zip(uniq_lab_arr, range(num_uniq_labs)))
# a list of label indices
lab_ind_arr = np.array([uniq_ind_lut[x] for x in labels])
# map unique labels to colors
# Used to generate legends
lab_col_lut = dict(zip([uniq_lab_lut[i]
for i in range(len(uniq_lab_arr))],
lab_col_list))
# norm separates cmap to difference indices
# https://matplotlib.org/tutorials/colors/colorbar_only.html
lab_norm = mpl.colors.BoundaryNorm(uniq_lab_inds + [lab_cmap.N],
lab_cmap.N)
if return_lut:
return lab_cmap, lab_norm, lab_ind_arr, lab_col_lut, uniq_lab_lut
else:
return lab_cmap, lab_norm
def cluster_scatter(projection2d, labels=None,
selected_labels=None,
plot_different_markers=False,
label_markers=None,
shuffle_label_colors=False, gradient=None,
xlim=None, ylim=None,
title=None, xlab=None, ylab=None,
figsize=(20, 20), add_legend=True, n_txt_per_cluster=3,
alpha=1, s=0.5, random_state=None, **kwargs):
"""Scatter plot for clustering illustration
Args:
projection2d (2 col numeric array): (n, 2) matrix to plot
labels (list of labels): labels of n samples
selected_labels (list of labels): selected labels to plot
plot_different_markers (bool): plot different markers for samples with
different labels
label_markers (list of marker shapes): passed to matplotlib plot
shuffle_label_colors (bool): shuffle the color of labels to avoid
similar colors show up in close clusters
gradient (list of number): color gradient of n samples
title (str)
xlab (str): x axis label
ylab (str): y axis label
figsize (tuple of two number): (width, height)
add_legend (bool)
n_txt_per_cluster (number): the number of text to plot per cluster.
Could be 0.
alpha (number)
s (number): size of the points
random_state (int): random seed to shuffle features
**kwards: passed to matplotlib plot
Return:
matplotlib figure of the created scatter plot
"""
kwargs = kwargs.copy()
# randomly:
# - select labels for annotation if required
# - shuffle colors if required
np.random.seed(random_state)
# check projection2d
projection2d = np.array(projection2d, dtype="float")
if (projection2d.ndim != 2) or (projection2d.shape[1] != 2):
raise ValueError("projection2d matrix should have shape "
"(n_samples, 2). {}".format(projection2d))
# check gradient length
if gradient is not None:
gradient = | np.array(gradient) | numpy.array |
import cv2 as cv
import numpy as np
import SimpleITK as sitk
from cellori.netmap import get_touch_map
from pathlib import Path
from scipy import special
from skimage import feature, filters, measure, morphology, segmentation
class Cellori:
"""Cellori class object that takes the path to an image file or an image array.
Parameters
----------
image : str or numpy.ndarray
The path to an ND2 or TIFF file or a ``numpy.ndarray`` of an image that has already been loaded.
nd2_overlap : float, optional, default 0.1
The overlap percentage used by StitchWell for ND2 stitching. If ``None``, the value will be determined by
automatic overlap calculation. This value is ignored if ``image`` is not the path to an ND2 file.
nd2_stitch_channel : float, optional, default 0
The index of the channel used by StitchWell for automatic overlap calculation during ND2 stitching. This
value is ignored if automatic overlap calculation is not applicable.
nuclei_channel : int, optional, default 0
The index of the channel containing the nuclei for segmentation. This value is ignored if ``image`` has a
single channel.
Raises
------
ValueError
If ``image`` is an invalid image path or array.
ValueError
If ``nuclei_channel`` is not specified for an ``image`` with multiple channels.
ValueError
If ``image`` has invalid dimensions.
"""
def __init__(self, image, **kwargs):
self.all_coords = None
self.default_nuclei_diameter = None
self.default_sigma = None
self.masks = None
if isinstance(image, np.ndarray):
self.image = image
elif Path(image).is_file():
if image.endswith('.nd2'):
from stitchwell import StitchWell
nd2_overlap = kwargs.get('nd2_overlap', 0.1)
nd2_stitch_channel = kwargs.get('nd2_stitch_channel', 0)
self.image = StitchWell(image).stitch(0, nd2_overlap, nd2_stitch_channel)
elif image.endswith(('.tif', '.tiff')):
from tifffile import imread
self.image = imread(image)
else:
raise ValueError("Invalid image.")
if self.image.ndim != 2:
if self.image.ndim == 3:
nuclei_channel = kwargs.get('nuclei_channel')
if nuclei_channel is not None:
self.image = self.image[nuclei_channel]
else:
raise ValueError("Nuclei channel not specified.")
else:
raise ValueError("Invalid image dimensions.")
global_thresh = filters.threshold_otsu(self.image[self.image > 0])
self.global_binary = self.image > global_thresh
background = np.ma.masked_array(self.image, self.global_binary)
self.background_std = np.std(background)
markers = measure.label(self.global_binary)
watershed = sitk.MorphologicalWatershedFromMarkers(sitk.GetImageFromArray(np.zeros(self.image.shape)),
sitk.GetImageFromArray(markers), markWatershedLine=False)
watershed = sitk.GetArrayFromImage(watershed)
self.watershed_labeled = measure.label(watershed)
self.watershed_regions = measure.regionprops(self.watershed_labeled, cache=False)
def gui(self, estimate_parameters=True):
"""Initiates the Cellori GUI.
Parameters
----------
estimate_parameters : bool, optional, default True
Whether or not to run automatic parameter detection.
Returns
-------
masks : numpy.ndarray
Labeled array of the same size as the original image with background pixels as 0 and cells as 1, 2, 3,
..., N. The most recent segmentation result is returned, if any.
coords : numpy.ndarray
Array of size (N, 2) with the coordinates of cell nuclei. The most recent segmentation result is
returned, if any.
image : numpy.ndarray
Array of the image for use in post-processing. The most recent segmentation result is returned, if any.
"""
from cellori.run_gui import run_gui
if estimate_parameters:
self._estimate_parameters()
else:
self.default_sigma = 1
self.default_nuclei_diameter = 6
self.masks = None
run_gui(self)
if self.masks is not None:
masks = self.masks
coords = self.all_coords
image = self.image
return masks, coords, image
def segment(self, segmentation_mode='combined', threshold_locality=0.5, sigma=None, nuclei_diameter=None,
coordinate_format='indices'):
"""Segments the image using the Cellori algorithm.
Parameters
----------
segmentation_mode : {'combined', 'intensity', 'morphology'}, optional, default 'combined'
* ‘combined’: Use a combined maxima metric that incorporates both intensity and morphology.
* ‘intensity’: Use an intensity-only maxima metric.
* ‘morphology’: Use a morphology-only maxima metric.
threshold_locality : float, optional, default 0.5
Fractional weight on local intensity used in thresholding. The value must be between 0 (global
thresholding) and 1 (local thresholding).
sigma : float, optional, default 1.5
Gaussian sigma used for denoising. If ``None``, the value will be determined by automatic parameter
detection.
nuclei_diameter : int, optional, default None
Estimated lower bound of nuclei diameters. Any objects smaller than this threshold will not be
considered for segmentation. If ``None``, the value will be determined by automatic parameter detection.
coordinate_format : {'xy', 'indices'}, optional, default 'indices'
* ‘xy’: Format coordinates for plotting on standard XY axes.
* ‘indices’: Format coordinates as indices of the original image array.
Returns
-------
masks : numpy.ndarray
Labeled array of the same size as the original image with background pixels as 0 and cells as 1, 2, 3,
..., N.
coords : numpy.ndarray
Array of size (N, 2) with the coordinates of cell nuclei.
image : numpy.ndarray
Array of the image for use in post-processing.
Raises
------
ValueError
If ``segmentation_mode`` is an invalid segmentation mode.
ValueError
If ``coordinate_format`` is an invalid coordinate format.
"""
if segmentation_mode not in ['combined', 'intensity', 'morphology']:
raise ValueError("Invalid segmentation mode.")
if (sigma is None) or (nuclei_diameter is None):
self._estimate_parameters()
if sigma is None:
sigma = self.default_sigma
if nuclei_diameter is None:
nuclei_diameter = self.default_nuclei_diameter
masks, coords = self._segment(self.image, self.watershed_labeled, segmentation_mode, threshold_locality, sigma,
nuclei_diameter)
if coordinate_format == 'xy':
coords = self._indices_to_xy(coords)
elif coordinate_format != 'indices':
raise ValueError("Invalid coordinate format.")
image = self.image
return masks, coords, image
def _segment(self, image, watershed_labeled, segmentation_mode, threshold_locality, sigma, nuclei_diameter,
origin=None):
"""(For internal use) Get masks and nuclei coordinates using the Cellori algorithm.
Parameters
----------
image : numpy.ndarray
Array of the image to be segmented.
watershed_labeled : numpy.ndarray
Array of labeled watershed regions.
segmentation_mode : {'combined', 'intensity', 'morphology'}
* ‘combined’: Use a combined maxima metric that incorporates both intensity and morphology.
* ‘intensity’: Use an intensity-only maxima metric.
* ‘morphology’: Use a morphology-only maxima metric.
threshold_locality : float
Fractional weight on local intensity used in thresholding. The value must be between 0 (global
thresholding) and 1 (local thresholding).
sigma : float
Gaussian sigma used for denoising. If ``None``, the value will be determined by automatic parameter
detection.
nuclei_diameter : int
Estimated lower bound of nuclei diameters. Any objects smaller than this threshold will not be
considered for segmentation. If ``None``, the value will be determined by automatic parameter detection.
origin : tuple, optional, default None
Origin coordinates of the GUI preview region.
Returns
-------
masks : numpy.ndarray
Labeled array of the same size as the original image with background pixels as 0 and cells as 1, 2, 3,
..., N.
coords : numpy.ndarray
Array of size (N, 2) with the coordinates of cell nuclei.
"""
self.min_area = np.pi * (nuclei_diameter / 2) ** 2
coords, binary = self._find_nuclei(image, watershed_labeled, segmentation_mode, sigma, threshold_locality,
nuclei_diameter, origin)
masks = self._get_masks(binary, coords)
masks, coords = self._merge_correct(masks)
return masks, coords
def _find_nuclei(self, image, watershed_labeled, segmentation_mode, sigma, threshold_locality, nuclei_diameter,
origin):
"""(For internal use) Find nuclei using the Cellori algorithm.
Parameters
----------
image : numpy.ndarray
Array of the image to be segmented.
watershed_labeled : numpy.ndarray
Array of labeled watershed regions.
segmentation_mode : {'combined', 'intensity', 'morphology'}
* ‘combined’: Use a combined maxima metric that incorporates both intensity and morphology.
* ‘intensity’: Use an intensity-only maxima metric.
* ‘morphology’: Use a morphology-only maxima metric.
threshold_locality : float
Fractional weight on local intensity used in thresholding. The value must be between 0 (global
thresholding) and 1 (local thresholding).
sigma : float
Gaussian sigma used for denoising. If ``None``, the value will be determined by automatic parameter
detection.
nuclei_diameter : int
Estimated lower bound of nuclei diameters. Any objects smaller than this threshold will not be
considered for segmentation. If ``None``, the value will be determined by automatic parameter detection.
origin : tuple
Origin coordinates of the GUI preview region.
Returns
-------
coords : numpy.ndarray
Array of size (N, 2) with the coordinates of cell nuclei.
binary : numpy.ndarray
Binarized array of the same size as the original image.
"""
block_size = 2 * round(nuclei_diameter) + 1
binary = np.zeros(image.shape, dtype=bool)
if origin is None:
watershed_regions = self.watershed_regions
origin = (0, 0)
else:
watershed_regions = measure.regionprops(watershed_labeled, cache=False)
for region in watershed_regions:
indices = [region.bbox[0], region.bbox[2], region.bbox[1], region.bbox[3]]
image_crop = image[indices[0]:indices[1], indices[2]:indices[3]]
global_binary_crop = self.global_binary[indices[0] + origin[0]:indices[1] + origin[0],
indices[2] + origin[1]:indices[3] + origin[1]]
binary_crop = self._conditional_local_threshold(image_crop, region.image, global_binary_crop, block_size,
threshold_locality, self.background_std)
binary_crop = np.where(region.image, binary_crop, 0)
binary_current = binary[indices[0]:indices[1], indices[2]:indices[3]]
binary[indices[0]:indices[1], indices[2]:indices[3]] = np.where(binary_crop, True, binary_current)
binary = morphology.remove_small_objects(binary, self.min_area)
binary = morphology.remove_small_holes(binary, self.min_area)
binary_labeled = morphology.label(binary)
regions = measure.regionprops(binary_labeled, cache=False)
coords = np.empty(0)
for region in regions:
indices = [region.bbox[0], region.bbox[2], region.bbox[1], region.bbox[3]]
if segmentation_mode == 'combined' or segmentation_mode == 'intensity':
image_crop = self.image[indices[0] + origin[0]:indices[1] + origin[0],
indices[2] + origin[1]:indices[3] + origin[1]]
image_crop = np.where(region.image, image_crop, 0)
if segmentation_mode == 'combined' or segmentation_mode == 'morphology':
binary_crop = binary[indices[0]:indices[1], indices[2]:indices[3]]
binary_distance = cv.distanceTransform(binary_crop.astype(np.uint8), cv.DIST_L2, 0)
if segmentation_mode == 'combined':
binary_distance = self._normalize(binary_distance)
image_crop = self._normalize(image_crop)
metric = image_crop + binary_distance
elif segmentation_mode == 'intensity':
metric = image_crop
elif segmentation_mode == 'morphology':
metric = binary_distance
metric = filters.gaussian(metric, sigma, preserve_range=True)
maxima = feature.peak_local_max(metric, min_distance=round(nuclei_diameter / 4), threshold_rel=0.5,
exclude_border=False)
coords = np.append(coords, maxima + region.bbox[:2])
coords = np.reshape(coords, (-1, 2))
return coords, binary
@staticmethod
def _get_masks(binary, coords):
"""(For internal use) Get masks using the watershed algorithm.
Parameters
----------
binary : numpy.ndarray
Binarized array of the same size as the original image.
coords : numpy.ndarray
Array of size (N, 2) with the coordinates of cell nuclei.
Returns
-------
masks : numpy.ndarray
Labeled array of the same size as the original image with background pixels as 0 and cells as 1, 2, 3,
..., N.
"""
markers = np.zeros(binary.shape, dtype=bool)
markers[tuple(np.rint(coords).astype(np.uint).T)] = True
markers = measure.label(markers)
masks = segmentation.watershed(binary, markers, mask=binary)
return masks
def _merge_correct(self, masks):
"""(For internal use) Correct for oversegmentation via rule-based region merging.
Parameters
----------
masks : numpy.ndarray
Labeled array of the same size as the original image with background pixels as 0 and cells as 1, 2, 3,
..., N.
Returns
-------
masks : numpy.ndarray
Labeled array of the image after region merging corrections.
"""
masks = measure.label(masks)
regions = measure.regionprops(masks, cache=False)
idx = get_touch_map(masks)
cells = np.unique(list(idx.keys()))
if len(cells) > 0:
corrected_masks = masks.copy()
merged_cells = list()
cell_map = {cell: cell for cell in cells}
for cell in cells:
merge_candidates = [merge_candidate for merge_candidate in idx[cell] if
merge_candidate not in merged_cells]
merged_cells.append(cell)
if len(merge_candidates) == 0:
continue
cell_region = regions[cell - 1]
cell_indices = np.array(
[cell_region.bbox[0], cell_region.bbox[2], cell_region.bbox[1], cell_region.bbox[3]])
for merge_candidate in merge_candidates:
merge_candidate_region = regions[merge_candidate - 1]
merge_candidate_indices = np.array(
[merge_candidate_region.bbox[0], merge_candidate_region.bbox[2], merge_candidate_region.bbox[1],
merge_candidate_region.bbox[3]])
merge_indices = np.array([min(cell_indices[0], merge_candidate_indices[0]),
max(cell_indices[1], merge_candidate_indices[1]),
min(cell_indices[2], merge_candidate_indices[2]),
max(cell_indices[3], merge_candidate_indices[3])])
merge_crop = masks[merge_indices[0]:merge_indices[1], merge_indices[2]:merge_indices[3]]
merge_test = np.where((merge_crop == cell) | (merge_crop == merge_candidate), 1, 0)
merge_region = measure.regionprops(merge_test, cache=False)[0]
average_solidity = (cell_region.solidity + merge_candidate_region.solidity) / 2
if (4 * cell_region.area < self.min_area) | (4 * merge_candidate_region.area < self.min_area) | (
(merge_region.area < 4 * self.min_area) & (
merge_region.solidity > 0.975 * average_solidity)):
new_cell = min(cell_map[cell], cell_map[merge_candidate])
corrected_masks[merge_indices[0]:merge_indices[1], merge_indices[2]:merge_indices[3]][
merge_test > 0] = new_cell
cell_map[merge_candidate] = new_cell
masks = corrected_masks
regions = measure.regionprops(masks)
coords = np.array([region.centroid for region in regions])
return masks, coords
@staticmethod
def _conditional_local_threshold(image, mask, global_binary, block_size, k_max, c):
"""(For internal use) Calculate conditional local threshold.
Parameters
----------
image : numpy.ndarray
Array of the image to be thresholded.
mask : numpy.ndarray
Array marking the region to be thresholding.
global_binary : numpy.ndarray
Binarized array of the image using global thresholding.
block_size : int
Odd size of pixel neighborhood which is used for local thresholding (e.g., 3, 5, 7, ..., 21).
k_max : float
Maximum fractional weight on local intensity used in thresholding. The value must be between 0 (global
thresholding) and 1 (local thresholding).
c : float
Global constant subtracted from local threshold array.
Returns
-------
threshold : numpy.ndarray
Array of local threshold values. All pixels in the input image higher than the corresponding pixel in
the threshold array are considered foreground.
"""
image_masked = np.float64(image) * mask
background = image_masked * np.invert(global_binary)
mask = np.float64(mask)
image_blurred = cv.blur(image_masked, (block_size, block_size))
mask = cv.blur(mask, (block_size, block_size))
smooth = image_blurred / (mask + 1e-15)
threshold = image_masked > smooth + c
if (k_max > 0) & (np.sum(background) > 0):
k = k_max * (1 - np.sqrt(np.sum(threshold) / np.sum(mask)))
bg_std = np.std(background[background > 0])
offset = k * bg_std + (1 - k) * c
threshold = image_masked > smooth + offset
threshold = threshold * mask
return threshold
def _estimate_parameters(self):
"""(For internal use) Estimate parameters for segmentation.
"""
global_binary = cv.morphologyEx(self.global_binary.astype(np.uint8), cv.MORPH_ERODE, np.ones((3, 3)))
global_binary = segmentation.clear_border(global_binary)
foreground_labeled = measure.label(global_binary)
regions = measure.regionprops(foreground_labeled, cache=False)
equivalent_diameters = np.array([region.equivalent_diameter for region in regions])
self.default_nuclei_diameter = np.around(np.median(equivalent_diameters), 2)
n = round(self.default_nuclei_diameter / 4)
self.default_sigma = np.around(2 ** (2 * n) / (special.comb(2 * n, n) * | np.sqrt(2 * np.pi) | numpy.sqrt |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Tests for the catalog module.
"""
from astropy.coordinates import SkyCoord
from astropy.modeling.models import Gaussian2D
from astropy.table import QTable
import astropy.units as u
from numpy.testing import assert_allclose, assert_equal, assert_raises
import numpy as np
import pytest
from ..catalog import SourceCatalog
from ..core import SegmentationImage
from ..detect import detect_sources
from ...aperture import CircularAperture, EllipticalAperture
from ...datasets import make_gwcs, make_wcs, make_noise_image
from ...utils._optional_deps import HAS_GWCS, HAS_MATPLOTLIB, HAS_SCIPY # noqa
@pytest.mark.skipif('not HAS_SCIPY')
class TestSourceCatalog:
def setup_class(self):
xcen = 51.
ycen = 52.7
major_sigma = 8.
minor_sigma = 3.
theta = np.pi / 6.
g1 = Gaussian2D(111., xcen, ycen, major_sigma, minor_sigma,
theta=theta)
g2 = Gaussian2D(50, 20, 80, 5.1, 4.5)
g3 = Gaussian2D(70, 75, 18, 9.2, 4.5)
g4 = Gaussian2D(111., 11.1, 12.2, major_sigma, minor_sigma,
theta=theta)
g5 = Gaussian2D(81., 61, 42.7, major_sigma, minor_sigma, theta=theta)
g6 = Gaussian2D(107., 75, 61, major_sigma, minor_sigma, theta=-theta)
g7 = Gaussian2D(107., 90, 90, 4, 2, theta=-theta)
yy, xx = np.mgrid[0:101, 0:101]
self.data = (g1(xx, yy) + g2(xx, yy) + g3(xx, yy) + g4(xx, yy)
+ g5(xx, yy) + g6(xx, yy) + g7(xx, yy))
threshold = 27.
self.segm = detect_sources(self.data, threshold, npixels=5)
self.error = make_noise_image(self.data.shape, mean=0, stddev=2.,
seed=123)
self.background = np.ones(self.data.shape) * 5.1
self.mask = np.zeros(self.data.shape, dtype=bool)
self.mask[0:30, 0:30] = True
self.wcs = make_wcs(self.data.shape)
self.cat = SourceCatalog(self.data, self.segm, error=self.error,
background=self.background, mask=self.mask,
wcs=self.wcs, localbkg_width=24)
unit = u.nJy
self.unit = unit
self.cat_units = SourceCatalog(self.data << unit, self.segm,
error=self.error << unit,
background=self.background << unit,
mask=self.mask, wcs=self.wcs,
localbkg_width=24)
@pytest.mark.parametrize('with_units', (True, False))
def test_catalog(self, with_units):
props1 = ('background_centroid', 'background_mean', 'background_sum',
'bbox', 'covar_sigx2', 'covar_sigxy', 'covar_sigy2', 'cxx',
'cxy', 'cyy', 'ellipticity', 'elongation', 'fwhm',
'equivalent_radius', 'gini', 'kron_radius', 'maxval_xindex',
'maxval_yindex', 'minval_xindex', 'minval_yindex',
'perimeter', 'sky_bbox_ll', 'sky_bbox_lr', 'sky_bbox_ul',
'sky_bbox_ur', 'sky_centroid_icrs', 'local_background',
'segment_flux', 'segment_fluxerr', 'kron_flux',
'kron_fluxerr')
props2 = ('centroid', 'covariance', 'covariance_eigvals',
'cutout_centroid', 'cutout_maxval_index',
'cutout_minval_index', 'inertia_tensor', 'maxval_index',
'minval_index', 'moments', 'moments_central', 'background',
'background_ma', 'convdata', 'convdata_ma', 'data',
'data_ma', 'error', 'error_ma', 'segment', 'segment_ma')
props = tuple(self.cat.default_columns) + props1 + props2
if with_units:
cat1 = self.cat_units.copy()
cat2 = self.cat_units.copy()
else:
cat1 = self.cat.copy()
cat2 = self.cat.copy()
# test extra properties
cat1.circular_photometry(5.0, name='circ5')
cat1.kron_photometry((2.0, 1.0), name='kron2')
cat1.fluxfrac_radius(0.5, name='r_hl')
segment_snr = cat1.segment_flux / cat1.segment_fluxerr
cat1.add_extra_property('segment_snr', segment_snr)
props = list(props)
props.extend(cat1.extra_properties)
idx = 1
# evaluate (cache) catalog properties before slice
obj = cat1[idx]
for prop in props:
assert_equal(getattr(cat1, prop)[idx], getattr(obj, prop))
# slice catalog before evaluating catalog properties
obj = cat2[idx]
obj.circular_photometry(5.0, name='circ5')
obj.kron_photometry((2.0, 1.0), name='kron2')
obj.fluxfrac_radius(0.5, name='r_hl')
segment_snr = obj.segment_flux / obj.segment_fluxerr
obj.add_extra_property('segment_snr', segment_snr)
for prop in props:
assert_equal(getattr(obj, prop), getattr(cat1, prop)[idx])
@pytest.mark.parametrize('with_units', (True, False))
def test_catalog_detection_cat(self, with_units):
"""
Test aperture-based properties with an input detection catalog.
"""
error = 2.0 * self.error
data2 = self.data + error
if with_units:
cat1 = self.cat_units.copy()
cat2 = SourceCatalog(data2 << self.unit, self.segm,
error=error << self.unit,
background=self.background << self.unit,
mask=self.mask, wcs=self.wcs,
localbkg_width=24, detection_cat=None)
cat3 = SourceCatalog(data2 << self.unit, self.segm,
error=error << self.unit,
background=self.background << self.unit,
mask=self.mask, wcs=self.wcs,
localbkg_width=24, detection_cat=cat1)
else:
cat1 = self.cat.copy()
cat2 = SourceCatalog(data2, self.segm, error=error,
background=self.background, mask=self.mask,
wcs=self.wcs, localbkg_width=24,
detection_cat=None)
cat3 = SourceCatalog(data2, self.segm, error=error,
background=self.background, mask=self.mask,
wcs=self.wcs, localbkg_width=24,
detection_cat=cat1)
assert_equal(cat1.kron_radius, cat3.kron_radius)
# assert not equal
with assert_raises(AssertionError):
assert_equal(cat1.kron_radius, cat2.kron_radius)
with assert_raises(AssertionError):
assert_equal(cat2.kron_flux, cat3.kron_flux)
with assert_raises(AssertionError):
assert_equal(cat2.kron_fluxerr, cat3.kron_fluxerr)
with assert_raises(AssertionError):
assert_equal(cat1.kron_flux, cat3.kron_flux)
with assert_raises(AssertionError):
assert_equal(cat1.kron_fluxerr, cat3.kron_fluxerr)
flux1, fluxerr1 = cat1.circular_photometry(1.0)
flux2, fluxerr2 = cat2.circular_photometry(1.0)
flux3, fluxerr3 = cat3.circular_photometry(1.0)
with assert_raises(AssertionError):
assert_equal(flux2, flux3)
with assert_raises(AssertionError):
assert_equal(fluxerr2, fluxerr3)
with assert_raises(AssertionError):
assert_equal(flux1, flux2)
with assert_raises(AssertionError):
assert_equal(fluxerr1, fluxerr2)
flux1, fluxerr1 = cat1.kron_photometry((2.0, 1.0))
flux2, fluxerr2 = cat2.kron_photometry((2.0, 1.0))
flux3, fluxerr3 = cat3.kron_photometry((2.0, 1.0))
with assert_raises(AssertionError):
assert_equal(flux2, flux3)
with assert_raises(AssertionError):
assert_equal(fluxerr2, fluxerr3)
with assert_raises(AssertionError):
assert_equal(flux1, flux2)
with assert_raises(AssertionError):
assert_equal(fluxerr1, fluxerr2)
radius1 = cat1.fluxfrac_radius(0.5)
radius2 = cat2.fluxfrac_radius(0.5)
radius3 = cat3.fluxfrac_radius(0.5)
with assert_raises(AssertionError):
assert_equal(radius2, radius3)
with assert_raises(AssertionError):
assert_equal(radius1, radius2)
cat4 = cat3[0:1]
assert len(cat4.kron_radius) == 1
def test_minimal_catalog(self):
cat = SourceCatalog(self.data, self.segm)
obj = cat[4]
props = ('background', 'background_ma', 'error', 'error_ma')
for prop in props:
assert getattr(obj, prop) is None
props = ('background_mean', 'background_sum', 'background_centroid',
'segment_fluxerr', 'kron_fluxerr')
for prop in props:
assert np.isnan(getattr(obj, prop))
assert obj.local_background_aperture is None
assert obj.local_background == 0.
def test_slicing(self):
self.cat.to_table() # evaluate and cache several properties
obj1 = self.cat[0]
assert obj1.nlabels == 1
obj1b = self.cat.get_label(1)
assert obj1b.nlabels == 1
obj2 = self.cat[0:1]
assert obj2.nlabels == 1
assert len(obj2) == 1
obj3 = self.cat[0:3]
obj3b = self.cat.get_labels((1, 2, 3))
assert_equal(obj3.label, obj3b.label)
obj4 = self.cat[[0, 1, 2]]
assert obj3.nlabels == 3
assert obj3b.nlabels == 3
assert obj4.nlabels == 3
assert len(obj3) == 3
assert len(obj4) == 3
obj5 = self.cat[[3, 2, 1]]
labels = [4, 3, 2]
obj5b = self.cat.get_labels(labels)
assert_equal(obj5.label, obj5b.label)
assert obj5.nlabels == 3
assert len(obj5) == 3
assert_equal(obj5.label, labels)
obj6 = obj5[0]
assert obj6.label == labels[0]
mask = self.cat.label > 3
obj7 = self.cat[mask]
assert obj7.nlabels == 4
assert len(obj7) == 4
with pytest.raises(TypeError):
obj1 = self.cat[0]
obj2 = obj1[0]
def test_iter(self):
labels = []
for obj in self.cat:
labels.append(obj.label)
assert len(labels) == len(self.cat)
def test_table(self):
columns = ['label', 'xcentroid', 'ycentroid']
tbl = self.cat.to_table(columns=columns)
assert len(tbl) == 7
assert tbl.colnames == columns
def test_invalid_inputs(self):
# test 1D arrays
img1d = np.arange(4)
segm = SegmentationImage(img1d)
with pytest.raises(ValueError):
SourceCatalog(img1d, segm)
wrong_shape = np.ones((3, 3))
with pytest.raises(ValueError):
SourceCatalog(wrong_shape, self.segm)
with pytest.raises(ValueError):
SourceCatalog(self.data, self.segm, error=wrong_shape)
with pytest.raises(ValueError):
SourceCatalog(self.data, self.segm, background=wrong_shape)
with pytest.raises(ValueError):
SourceCatalog(self.data, self.segm, mask=wrong_shape)
with pytest.raises(ValueError):
segm = SegmentationImage(wrong_shape)
SourceCatalog(self.data, segm)
with pytest.raises(TypeError):
SourceCatalog(self.data, wrong_shape)
with pytest.raises(TypeError):
obj = SourceCatalog(self.data, self.segm)[0]
len(obj)
with pytest.raises(ValueError):
SourceCatalog(self.data, self.segm, localbkg_width=-1)
with pytest.raises(ValueError):
SourceCatalog(self.data, self.segm, localbkg_width=3.4)
with pytest.raises(ValueError):
apermask_method = 'invalid'
SourceCatalog(self.data, self.segm,
apermask_method=apermask_method)
with pytest.raises(ValueError):
kron_params = (2.5, 0.0, 3.0)
SourceCatalog(self.data, self.segm, kron_params=kron_params)
with pytest.raises(ValueError):
kron_params = (-2.5, 0.0)
SourceCatalog(self.data, self.segm, kron_params=kron_params)
with pytest.raises(ValueError):
kron_params = (2.5, -4.0)
SourceCatalog(self.data, self.segm, kron_params=kron_params)
def test_invalid_units(self):
unit = u.uJy
wrong_unit = u.km
with pytest.raises(ValueError):
SourceCatalog(self.data << unit, self.segm,
error=self.error << wrong_unit)
with pytest.raises(ValueError):
SourceCatalog(self.data << unit, self.segm,
background=self.background << wrong_unit)
# all array inputs must have the same unit
with pytest.raises(ValueError):
SourceCatalog(self.data << unit, self.segm, error=self.error)
with pytest.raises(ValueError):
SourceCatalog(self.data, self.segm,
background=self.background << unit)
def test_wcs(self):
mywcs = make_wcs(self.data.shape)
cat = SourceCatalog(self.data, self.segm, wcs=mywcs)
obj = cat[0]
assert obj.sky_centroid is not None
assert obj.sky_centroid_icrs is not None
assert obj.sky_bbox_ll is not None
assert obj.sky_bbox_ul is not None
assert obj.sky_bbox_lr is not None
assert obj.sky_bbox_ur is not None
@pytest.mark.skipif('not HAS_GWCS')
def test_gwcs(self):
mywcs = make_gwcs(self.data.shape)
cat = SourceCatalog(self.data, self.segm, wcs=mywcs)
obj = cat[1]
assert obj.sky_centroid is not None
assert obj.sky_centroid_icrs is not None
assert obj.sky_bbox_ll is not None
assert obj.sky_bbox_ul is not None
assert obj.sky_bbox_lr is not None
assert obj.sky_bbox_ur is not None
def test_nowcs(self):
cat = SourceCatalog(self.data, self.segm, wcs=None)
obj = cat[2]
assert obj.sky_centroid is None
assert obj.sky_centroid_icrs is None
assert obj.sky_bbox_ll is None
assert obj.sky_bbox_ul is None
assert obj.sky_bbox_lr is None
assert obj.sky_bbox_ur is None
def test_to_table(self):
cat = SourceCatalog(self.data, self.segm)
assert len(cat) == 7
tbl = cat.to_table()
assert isinstance(tbl, QTable)
assert len(tbl) == 7
obj = cat[0]
assert obj.nlabels == 1
tbl = obj.to_table()
assert len(tbl) == 1
def test_masks(self):
"""
Test masks, including automatic masking of all non-finite (e.g.,
NaN, inf) values in the data array.
"""
data = np.copy(self.data)
error = np.copy(self.error)
background = np.copy(self.background)
data[:, 55] = np.nan
data[16, :] = np.inf
error[:, 55] = np.nan
error[16, :] = np.inf
background[:, 55] = np.nan
background[16, :] = np.inf
cat = SourceCatalog(data, self.segm, error=error,
background=background, mask=self.mask)
props = ('xcentroid', 'ycentroid', 'area', 'orientation',
'segment_flux', 'segment_fluxerr', 'kron_flux',
'kron_fluxerr', 'background_mean')
obj = cat[0]
for prop in props:
assert np.isnan(getattr(obj, prop))
objs = cat[1:]
for prop in props:
assert np.all(np.isfinite(getattr(objs, prop)))
# test that mask=None is the same as mask=np.ma.nomask
cat1 = SourceCatalog(data, self.segm, mask=None)
cat2 = SourceCatalog(data, self.segm, mask=np.ma.nomask)
assert cat1[0].xcentroid == cat2[0].xcentroid
def test_repr_str(self):
cat = SourceCatalog(self.data, self.segm)
assert repr(cat) == str(cat)
lines = ('Length: 7', 'labels: [1 2 3 4 5 6 7]')
for line in lines:
assert line in repr(cat)
def test_kernel(self):
kernel = np.array([[1., 2, 1], [2, 4, 2], [1, 2, 100]])
kernel /= kernel.sum()
cat1 = SourceCatalog(self.data, self.segm, kernel=None)
cat2 = SourceCatalog(self.data, self.segm, kernel=kernel)
assert not np.array_equal(cat1.xcentroid, cat2.xcentroid)
assert not np.array_equal(cat1.ycentroid, cat2.ycentroid)
def test_detection_cat(self):
data2 = self.data - 5
cat1 = SourceCatalog(data2, self.segm)
cat2 = SourceCatalog(data2, self.segm, detection_cat=self.cat)
assert len(cat2.kron_aperture) == len(cat2)
assert not np.array_equal(cat1.kron_radius, cat2.kron_radius)
assert not np.array_equal(cat1.kron_flux, cat2.kron_flux)
assert_allclose(cat2.kron_radius, self.cat.kron_radius)
assert not np.array_equal(cat2.kron_flux, self.cat.kron_flux)
with pytest.raises(TypeError):
SourceCatalog(data2, self.segm, detection_cat=np.arange(4))
with pytest.raises(ValueError):
segm = self.segm.copy()
segm.remove_labels((6, 7))
cat = SourceCatalog(self.data, segm)
SourceCatalog(self.data, self.segm, detection_cat=cat)
def test_kron_minradius(self):
kron_params = (2.5, 10.0)
cat = SourceCatalog(self.data, self.segm, mask=self.mask,
apermask_method='none', kron_params=kron_params)
assert cat.kron_aperture[0] is None
assert isinstance(cat.kron_aperture[2], EllipticalAperture)
assert isinstance(cat.kron_aperture[4], CircularAperture)
def test_kron_masking(self):
apermask_method = 'none'
cat1 = SourceCatalog(self.data, self.segm,
apermask_method=apermask_method)
apermask_method = 'mask'
cat2 = SourceCatalog(self.data, self.segm,
apermask_method=apermask_method)
apermask_method = 'correct'
cat3 = SourceCatalog(self.data, self.segm,
apermask_method=apermask_method)
idx = 2 # source with close neighbors
assert cat1[idx].kron_flux > cat2[idx].kron_flux
assert cat3[idx].kron_flux > cat2[idx].kron_flux
assert cat1[idx].kron_flux > cat3[idx].kron_flux
def test_kron_negative(self):
cat = SourceCatalog(self.data - 10, self.segm)
assert np.all(np.isnan(cat.kron_radius.value))
assert np.all(np.isnan(cat.kron_flux))
def test_kron_photometry(self):
flux1, fluxerr1 = self.cat.kron_photometry((2.5, 1.0))
assert_allclose(flux1, self.cat.kron_flux)
assert_allclose(fluxerr1, self.cat.kron_fluxerr)
flux1, fluxerr1 = self.cat.kron_photometry((1.0, 1.0), name='kron1')
flux2, fluxerr2 = self.cat.kron_photometry((2.0, 1.0), name='kron2')
| assert_allclose(flux1, self.cat.kron1_flux) | numpy.testing.assert_allclose |
from qutiepy import *
import numpy as np
from scipy.linalg import expm
import warnings
warnings.filterwarnings('ignore')
"""
Ax = b
"""
def main(debug=False):
A = np.array([[0.707,0.707],
[0.707,-0.707]])
k = np.linalg.cond(A)
print("k = ", k)
bBits = int(np.log2(A.shape[0]))
bAmps = [1, 0]
b = register(bBits)
b.setAmps(bAmps)
answer = np.linalg.solve(A, b.amps).astype(float)
t = 6 # bits in phi
T = 2 ** t # states in phi
amps = np.flip(np.sqrt(2/T) * np.array([np.sin((np.pi*(tau+0.5)/T)) for tau in range(T)]))
phi0 = register(t)
phi0.setAmps(amps)
phi0b = prod(phi0, b)
t0 = 1
### HAMILTONIAN SIMULATION
hamMatTerms = []
for tau in range(T): #construct hamilton operator
tautau = np.zeros((T, T))
tautau[tau, tau] = 1 # t x t
oper = expm(1j*tau*t0*A/T) # t x t
term = | np.kron(tautau, oper) | numpy.kron |
import unittest
from functools import partial
from pathlib import Path
import numpy as np
from brainbox.core import Bunch
from oneibl.one import ONE
from ibllib.qc import task_metrics as qcmetrics
from brainbox.behavior.wheel import cm_to_rad
class TestTaskMetrics(unittest.TestCase):
def setUp(self):
self.data = self.load_fake_bpod_data()
self.wheel_gain = 4
wheel_data = self.load_fake_wheel_data(self.data, wheel_gain=self.wheel_gain)
self.data.update(wheel_data)
@staticmethod
def load_fake_bpod_data(n=5):
"""Create fake extractor output of bpodqc.load_data
:param n: the number of trials
:return: a dict of simulated trial data
"""
trigg_delay = 1e-4 # an ideal delay between triggers and measured times
resp_feeback_delay = 1e-3 # delay between feedback and response
stimOff_itiIn_delay = 5e-3 # delay between stimOff and itiIn
N = partial(np.random.normal, (n,)) # Convenience function for norm dist sampling
choice = np.ones((n,), dtype=int)
choice[[1, 3]] = -1 # a couple of incorrect trials
choice[0] = 0 # a nogo trial
# One trial of each type incorrect
correct = choice != 0
correct[np.argmax(choice == 1)] = 0
correct[np.argmax(choice == -1)] = 0
quiescence_length = 0.2 + np.random.standard_exponential(size=(n,))
iti_length = 0.5 # inter-trial interval
# trial lengths include quiescence period, a couple small trigger delays and iti
trial_lengths = quiescence_length + resp_feeback_delay + (trigg_delay * 4) + iti_length
# add on 60s for nogos + feedback time (1 or 2s) + ~0.5s for other responses
trial_lengths += (choice == 0) * 60 + (~correct + 1) + (choice != 0) * N(0.5)
start_times = np.concatenate(([0], np.cumsum(trial_lengths)[:-1]))
end_times = np.cumsum(trial_lengths) - 1e-2
data = {
"phase": np.random.uniform(low=0, high=2 * np.pi, size=(n,)),
"quiescence": quiescence_length,
"choice": choice,
"correct": correct,
"intervals": np.c_[start_times, end_times],
"itiIn_times": end_times - iti_length + stimOff_itiIn_delay,
"position": np.ones_like(choice) * 35
}
data["stimOnTrigger_times"] = start_times + data["quiescence"] + 1e-4
data["stimOn_times"] = data["stimOnTrigger_times"] + 1e-1
data["goCueTrigger_times"] = data["stimOn_times"] + 1e-3
data["goCue_times"] = data["goCueTrigger_times"] + trigg_delay
data["response_times"] = end_times - (
resp_feeback_delay + iti_length + (~correct + 1)
)
data["feedback_times"] = data["response_times"] + resp_feeback_delay
data["stimFreeze_times"] = data["response_times"] + 1e-2
data["stimFreezeTrigger_times"] = data["stimFreeze_times"] - trigg_delay
data["feedbackType"] = np.vectorize(lambda x: -1 if x == 0 else x)(data["correct"])
outcome = data["feedbackType"].copy()
outcome[data["choice"] == 0] = 0
data["outcome"] = outcome
# Delay of 1 second if correct, 2 seconds if incorrect
data["stimOffTrigger_times"] = data["feedback_times"] + (~correct + 1)
data["stimOff_times"] = data["stimOffTrigger_times"] + trigg_delay
# Error tone times nan on incorrect trials
outcome_times = np.vectorize(lambda x, y: x + 1e-2 if y else np.nan)
data["errorCueTrigger_times"] = outcome_times(data["feedback_times"], ~data["correct"])
data["errorCue_times"] = data["errorCueTrigger_times"] + trigg_delay
data["valveOpen_times"] = outcome_times(data["feedback_times"], data["correct"])
data["rewardVolume"] = ~np.isnan(data["valveOpen_times"]) * 3.0
return data
@staticmethod
def load_fake_wheel_data(trial_data, wheel_gain=4):
# Load a wheel fragment: a numpy array of the form [timestamps, positions], for a wheel
# movement during one trial. Wheel is X1 bpod RE in radians.
wh_path = Path(__file__).parent.joinpath('..', 'fixtures', 'qc').resolve()
wheel_frag = np.load(wh_path.joinpath('wheel.npy'))
resolution = np.mean(np.abs(np.diff(wheel_frag[:, 1]))) # pos diff between samples
# abs displacement, s, in mm required to move 35 visual degrees
POS_THRESH = 35
s_mm = np.abs(POS_THRESH / wheel_gain) # don't care about direction
# convert abs displacement to radians (wheel pos is in rad)
pos_thresh = cm_to_rad(s_mm * 1e-1)
# index of threshold cross
pos_thresh_idx = np.argmax(np.abs(wheel_frag[:, 1]) > pos_thresh)
def qt_wheel_fill(start, end, t_step=0.001, p_step=None):
if p_step is None:
p_step = 2 * np.pi / 1024
t = np.arange(start, end, t_step)
p = np.random.randint(-1, 2, len(t))
t = t[p != 0]
p = p[p != 0].cumsum() * p_step
return t, p
wheel_data = [] # List generated of wheel data fragments
movement_times = [] # List of generated first movement times
def add_frag(t, p):
"""Add wheel data fragments to list, adjusting positions to be within one sample of
one another"""
last_samp = getattr(add_frag, 'last_samp', (0, 0))
p += last_samp[1]
if np.abs(p[0] - last_samp[1]) == 0:
p += resolution
wheel_data.append((t, p))
add_frag.last_samp = (t[-1], p[-1])
for i in np.arange(len(trial_data['choice'])):
# Iterate over trials generating wheel samples for the necessary periods
# trial start to stim on; should be below quiescence threshold
stimOn_trig = trial_data['stimOnTrigger_times'][i]
trial_start = trial_data['intervals'][i, 0]
t, p = qt_wheel_fill(trial_start, stimOn_trig, .5, resolution)
if len(t) > 0: # Possible for no movement during quiescence
add_frag(t, p)
# stim on to trial end
trial_end = trial_data['intervals'][i, 1]
if trial_data['choice'][i] == 0:
# Add random wheel movements for duration of trial
goCue = trial_data['goCue_times'][i]
t, p = qt_wheel_fill(goCue, trial_end, .1, resolution)
add_frag(t, p)
movement_times.append(t[0])
else:
# Align wheel fragment with response time
response_time = trial_data['response_times'][i]
t = wheel_frag[:, 0] + response_time - wheel_frag[pos_thresh_idx, 0]
p = np.abs(wheel_frag[:, 1]) * trial_data['choice'][i]
assert t[0] > add_frag.last_samp[0]
movement_times.append(t[1])
add_frag(t, p)
# Fill in random movements between end of response and trial end
t, p = qt_wheel_fill(t[-1] + 0.01, trial_end, p_step=resolution)
add_frag(t, p)
# Stitch wheel fragments and assert no skips
wheel_data = np.concatenate(list(map(np.column_stack, wheel_data)))
assert np.all(np.diff(wheel_data[:, 0]) > 0), "timestamps don't strictly increase"
np.testing.assert_allclose(np.abs(np.diff(wheel_data[:, 1])), resolution)
assert len(movement_times) == trial_data['intervals'].shape[0]
return {
'wheel_timestamps': wheel_data[:, 0],
'wheel_position': wheel_data[:, 1],
'firstMovement_times': np.array(movement_times)
}
def test_check_stimOn_goCue_delays(self):
metric, passed = qcmetrics.check_stimOn_goCue_delays(self.data)
self.assertTrue(np.allclose(metric, 0.0011), "failed to return correct metric")
# Set incorrect timestamp (goCue occurs before stimOn)
self.data["goCue_times"][-1] = self.data["stimOn_times"][-1] - 1e-4
metric, passed = qcmetrics.check_stimOn_goCue_delays(self.data)
n = len(self.data["stimOn_times"])
expected = (n - 1) / n
self.assertEqual(np.nanmean(passed), expected, "failed to detect dodgy timestamp")
def test_check_response_feedback_delays(self):
metric, passed = qcmetrics.check_response_feedback_delays(self.data)
self.assertTrue(np.allclose(metric, 0.001), "failed to return correct metric")
# Set incorrect timestamp (feedback occurs before response)
self.data["feedback_times"][-1] = self.data["response_times"][-1] - 1e-4
metric, passed = qcmetrics.check_response_feedback_delays(self.data)
n = len(self.data["feedback_times"])
expected = (n - 1) / n
self.assertEqual(np.nanmean(passed), expected, "failed to detect dodgy timestamp")
def test_check_response_stimFreeze_delays(self):
metric, passed = qcmetrics.check_response_stimFreeze_delays(self.data)
self.assertTrue(np.allclose(metric, 1e-2), "failed to return correct metric")
# Set incorrect timestamp (stimFreeze occurs before response)
self.data["stimFreeze_times"][-1] = self.data["response_times"][-1] - 1e-4
metric, passed = qcmetrics.check_response_stimFreeze_delays(self.data)
n = len(self.data["feedback_times"]) - np.sum(self.data["choice"] == 0)
expected = (n - 1) / n
self.assertEqual(np.nanmean(passed), expected, "failed to detect dodgy timestamp")
def test_check_positive_feedback_stimOff_delays(self):
metric, passed = qcmetrics.check_positive_feedback_stimOff_delays(self.data)
self.assertTrue(
np.allclose(metric[self.data["correct"]], 1e-4), "failed to return correct metric"
)
# Set incorrect timestamp (stimOff occurs just after response)
id = np.argmax(self.data["correct"])
self.data["stimOff_times"][id] = self.data["response_times"][id] + 1e-2
metric, passed = qcmetrics.check_positive_feedback_stimOff_delays(self.data)
expected = (self.data["correct"].sum() - 1) / self.data["correct"].sum()
self.assertEqual(np.nanmean(passed), expected, "failed to detect dodgy timestamp")
def test_check_negative_feedback_stimOff_delays(self):
err_trial = ~self.data["correct"]
metric, passed = qcmetrics.check_negative_feedback_stimOff_delays(self.data)
values = np.abs(metric[err_trial])
self.assertTrue(np.allclose(values, 1e-2), "failed to return correct metric")
# Set incorrect timestamp (stimOff occurs 1s after response)
id = np.argmax(err_trial)
self.data["stimOff_times"][id] = self.data["response_times"][id] + 1
metric, passed = qcmetrics.check_negative_feedback_stimOff_delays(self.data)
expected = (err_trial.sum() - 1) / err_trial.sum()
self.assertEqual(np.nanmean(passed), expected, "failed to detect dodgy timestamp")
def test_check_error_trial_event_sequence(self):
metric, passed = qcmetrics.check_error_trial_event_sequence(self.data)
self.assertTrue(np.all(metric == ~self.data['correct']), "failed to return correct metric")
self.assertTrue(np.all(passed))
# Set incorrect timestamp (itiIn occurs before errorCue)
err_trial = ~self.data["correct"]
(id,) = np.where(err_trial)
self.data["intervals"][id[0], 0] = np.inf
self.data["errorCue_times"][id[1]] = 0
metric, passed = qcmetrics.check_error_trial_event_sequence(self.data)
expected = (err_trial.sum() - 2) / err_trial.sum()
self.assertEqual(np.nanmean(passed), expected, "failed to detect dodgy timestamp")
def test_check_correct_trial_event_sequence(self):
metric, passed = qcmetrics.check_correct_trial_event_sequence(self.data)
self.assertTrue(np.all(metric == self.data['correct']), "failed to return correct metric")
self.assertTrue(np.all(passed))
# Set incorrect timestamp
correct = self.data["correct"]
id = np.argmax(correct)
self.data["intervals"][id, 0] = np.inf
metric, passed = qcmetrics.check_correct_trial_event_sequence(self.data)
expected = (correct.sum() - 1) / correct.sum()
self.assertEqual(np.nanmean(passed), expected, "failed to detect dodgy timestamp")
def test_check_trial_length(self):
metric, passed = qcmetrics.check_trial_length(self.data)
self.assertTrue(np.all(metric), "failed to return correct metric")
# Set incorrect timestamp
self.data["goCue_times"][-1] = 0
metric, passed = qcmetrics.check_trial_length(self.data)
n = len(self.data["goCue_times"])
expected = (n - 1) / n
self.assertEqual(np.nanmean(passed), expected, "failed to detect dodgy timestamp")
def test_check_goCue_delays(self):
metric, passed = qcmetrics.check_goCue_delays(self.data)
self.assertTrue(np.allclose(metric, 1e-4), "failed to return correct metric")
# Set incorrect timestamp
self.data["goCue_times"][1] = self.data["goCueTrigger_times"][1] + 0.1
metric, passed = qcmetrics.check_goCue_delays(self.data)
n = len(self.data["goCue_times"])
expected = (n - 1) / n
self.assertEqual(np.nanmean(passed), expected, "failed to detect dodgy timestamp")
def test_check_errorCue_delays(self):
metric, passed = qcmetrics.check_errorCue_delays(self.data)
err_trial = ~self.data["correct"]
self.assertTrue(np.allclose(metric[err_trial], 1e-4), "failed to return correct metric")
# Set incorrect timestamp
id = np.argmax(err_trial)
self.data["errorCue_times"][id] = self.data["errorCueTrigger_times"][id] + 0.1
metric, passed = qcmetrics.check_errorCue_delays(self.data)
n = err_trial.sum()
expected = (n - 1) / n
self.assertEqual(np.nanmean(passed), expected, "failed to detect dodgy timestamp")
def test_check_stimOn_delays(self):
metric, passed = qcmetrics.check_stimOn_delays(self.data)
self.assertTrue(np.allclose(metric, 1e-1), "failed to return correct metric")
# Set incorrect timestamp
self.data["stimOn_times"][-1] = self.data["stimOnTrigger_times"][-1] + 0.2
metric, passed = qcmetrics.check_stimOn_delays(self.data)
n = len(self.data["stimOn_times"])
expected = (n - 1) / n
self.assertEqual(np.nanmean(passed), expected, "failed to detect dodgy timestamp")
def test_check_stimOff_delays(self):
metric, passed = qcmetrics.check_stimOff_delays(self.data)
self.assertTrue(np.allclose(metric, 1e-4), "failed to return correct metric")
# Set incorrect timestamp
self.data["stimOff_times"][-1] = self.data["stimOffTrigger_times"][-1] + 0.2
metric, passed = qcmetrics.check_stimOff_delays(self.data)
n = len(self.data["stimOff_times"])
expected = (n - 1) / n
self.assertEqual(np.nanmean(passed), expected, "failed to detect dodgy timestamp")
def test_check_stimFreeze_delays(self):
metric, passed = qcmetrics.check_stimFreeze_delays(self.data)
self.assertTrue(np.allclose(metric, 1e-4), "failed to return correct metric")
# Set incorrect timestamp
self.data["stimFreeze_times"][-1] = self.data["stimFreezeTrigger_times"][-1] + 0.2
metric, passed = qcmetrics.check_stimFreeze_delays(self.data)
n = len(self.data["stimFreeze_times"])
expected = (n - 1) / n
self.assertEqual(np.nanmean(passed), expected, "failed to detect dodgy timestamp")
def test_check_reward_volumes(self):
metric, passed = qcmetrics.check_reward_volumes(self.data)
self.assertTrue(all(x in {0.0, 3.0} for x in metric), "failed to return correct metric")
self.assertTrue(np.all(passed))
# Set incorrect volume
id = np.array([np.argmax(self.data["correct"]), np.argmax(~self.data["correct"])])
self.data["rewardVolume"][id] = self.data["rewardVolume"][id] + 1
metric, passed = qcmetrics.check_reward_volumes(self.data)
self.assertTrue(np.mean(passed) == 0.6, "failed to detect incorrect reward volumes")
def test_check_reward_volume_set(self):
metric, passed = qcmetrics.check_reward_volume_set(self.data)
self.assertTrue(all(x in {0.0, 3.0} for x in metric), "failed to return correct metric")
self.assertTrue(passed)
# Add a new volume to the set
id = np.argmax(self.data["correct"])
self.data["rewardVolume"][id] = 2.3
metric, passed = qcmetrics.check_reward_volume_set(self.data)
self.assertFalse(passed, "failed to detect incorrect reward volume set")
# Set 0 volumes to new value; set length still 2 but should fail anyway
self.data["rewardVolume"][~self.data["correct"]] = 2.3
metric, passed = qcmetrics.check_reward_volume_set(self.data)
self.assertFalse(passed, "failed to detect incorrect reward volume set")
def test_check_audio_pre_trial(self):
# Create Sound sync fake data that is OK
BNC2_OK = {
"times": self.data["goCue_times"] + 1e-1,
"polarities": np.array([1, -1, 1, -1, 1]),
}
# Create Sound sync fake data that is NOT OK
BNC2_NOK = {
"times": self.data["goCue_times"] - 1e-1,
"polarities": np.array([1, -1, 1, -1, 1]),
}
metric, passed = qcmetrics.check_audio_pre_trial(self.data, audio=BNC2_OK)
self.assertTrue(~np.all(metric))
self.assertTrue(np.all(passed))
metric, passed = qcmetrics.check_audio_pre_trial(self.data, audio=BNC2_NOK)
self.assertTrue(np.all(metric))
self.assertTrue(~np.all(passed))
def test_check_wheel_freeze_during_quiescence(self):
metric, passed = qcmetrics.check_wheel_freeze_during_quiescence(self.data)
self.assertTrue(np.all(passed))
# Make one trial move more
n = 1 # Index of trial to manipulate
t1 = self.data['intervals'][n, 0]
t2 = self.data['stimOnTrigger_times'][n]
ts, pos = (self.data['wheel_timestamps'], self.data['wheel_position'])
wh_idx = np.argmax(ts > t1)
if ts[wh_idx] > self.data['stimOnTrigger_times'][n]:
# No sample during quiescence; insert one
self.data['wheel_timestamps'] = np.insert(ts, wh_idx, t2 - .001)
self.data['wheel_position'] = np.insert(pos, wh_idx, np.inf)
else: # Otherwise make one sample infinite
self.data['wheel_position'][wh_idx] = np.inf
metric, passed = qcmetrics.check_wheel_freeze_during_quiescence(self.data)
self.assertFalse(passed[n])
self.assertTrue(metric[n] > 2)
def test_check_wheel_move_before_feedback(self):
metric, passed = qcmetrics.check_wheel_move_before_feedback(self.data)
nogo = self.data['choice'] == 0
self.assertTrue(np.all(passed[~nogo]))
self.assertTrue(np.isnan(metric[nogo]).all())
self.assertTrue(np.isnan(passed[nogo]).all())
# Remove wheel data around feedback for choice trial
assert self.data['choice'].any(), 'no choice trials in test data'
n = np.argmax(self.data['choice'] != 0) # Index of choice trial
mask = np.logical_xor(self.data['wheel_timestamps'] > self.data['feedback_times'][n] - 1,
self.data['wheel_timestamps'] < self.data['feedback_times'][n] + 1)
self.data['wheel_timestamps'] = self.data['wheel_timestamps'][mask]
self.data['wheel_position'] = self.data['wheel_position'][mask]
metric, passed = qcmetrics.check_wheel_move_before_feedback(self.data)
self.assertFalse(passed[n] or metric[n] != 0)
def test_check_wheel_move_during_closed_loop(self):
gain = self.wheel_gain or 4
metric, passed = qcmetrics.check_wheel_move_during_closed_loop(self.data, gain)
nogo = self.data['choice'] == 0
self.assertTrue(np.all(passed[~nogo]))
self.assertTrue(np.isnan(metric[nogo]).all())
self.assertTrue(np.isnan(passed[nogo]).all())
# Remove wheel data for choice trial
assert self.data['choice'].any(), 'no choice trials in test data'
n = np.argmax(self.data['choice'] != 0) # Index of choice trial
mask = np.logical_xor(self.data['wheel_timestamps'] < self.data['goCue_times'][n],
self.data['wheel_timestamps'] > self.data['response_times'][n])
self.data['wheel_timestamps'] = self.data['wheel_timestamps'][mask]
self.data['wheel_position'] = self.data['wheel_position'][mask]
metric, passed = qcmetrics.check_wheel_move_during_closed_loop(self.data, gain)
self.assertFalse(passed[n])
def test_check_wheel_integrity(self):
metric, passed = qcmetrics.check_wheel_integrity(self.data, re_encoding='X1')
self.assertTrue(np.all(passed))
# Insert some violations and verify that they're caught
idx = np.random.randint(self.data['wheel_timestamps'].size, size=2)
self.data['wheel_timestamps'][idx[0] + 1] -= 1
self.data['wheel_position'][idx[1]] -= 1
metric, passed = qcmetrics.check_wheel_integrity(self.data, re_encoding='X1')
self.assertFalse(passed[idx].any())
def test_check_n_trial_events(self):
metric, passed = qcmetrics.check_n_trial_events(self.data)
self.assertTrue(np.all(passed == 1.) and np.all(metric))
# Change errorCueTriggers
id = np.argmax(self.data['correct'])
self.data['errorCueTrigger_times'][id] = self.data['intervals'][id, 0] + np.random.rand()
_, passed = qcmetrics.check_n_trial_events(self.data)
self.assertFalse(passed[id])
# Change another event
id = id - 1 if id > 0 else id + 1
self.data['goCue_times'][id] = self.data['intervals'][id, 1] + np.random.rand()
_, passed = qcmetrics.check_n_trial_events(self.data)
self.assertFalse(passed[id])
def test_check_detected_wheel_moves(self):
metric, passed = qcmetrics.check_detected_wheel_moves(self.data)
self.assertTrue(np.all(self.data['firstMovement_times'] == metric))
self.assertTrue(np.all(passed))
# Change a movement time
id = np.argmax(self.data['choice'] != 0)
self.data['firstMovement_times'][id] = self.data['goCue_times'][id] - 0.3
_, passed = qcmetrics.check_detected_wheel_moves(self.data)
self.assertEqual(0.75, np.nanmean(passed))
# Change the min_qt
_, passed = qcmetrics.check_detected_wheel_moves(self.data, min_qt=0.3)
self.assertTrue(np.all(passed))
@unittest.skip("not implemented")
def test_check_stimulus_move_before_goCue(self):
pass # TODO Nicco?
def test_check_stimOff_itiIn_delays(self):
metric, passed = qcmetrics.check_stimOff_itiIn_delays(self.data)
self.assertTrue(np.nanmean(passed))
# No go should be NaN
id = np.argmax(self.data['choice'] == 0)
self.assertTrue(np.isnan(passed[id]), 'No go trials should be excluded')
# Change a trial
id = np.argmax(self.data['choice'] != 0)
self.data['stimOff_times'][id] = self.data['itiIn_times'][id] + 1e-4
_, passed = qcmetrics.check_stimOff_itiIn_delays(self.data) # recompute
self.assertEqual(0.75, np.nanmean(passed))
def test_check_iti_delays(self):
metric, passed = qcmetrics.check_iti_delays(self.data)
# We want the metric to return positive values that are close to 0.1, given the test data
self.assertTrue(np.allclose(metric[:-1], 1e-2, atol=0.001),
"failed to return correct metric")
self.assertTrue(np.isnan(metric[-1]), "last trial should be NaN")
self.assertTrue(np.all(passed))
# Mess up a trial
id = 2
self.data["intervals"][id + 1, 0] += 0.5 # Next trial starts 0.5 sec later
metric, passed = qcmetrics.check_iti_delays(self.data)
n_trials = len(self.data["stimOff_times"]) - 1 # Last trial NaN here
expected = (n_trials - 1) / n_trials
self.assertTrue(expected, np.nanmean(passed))
@unittest.skip("not implemented")
def test_check_frame_frequency(self):
pass # TODO Miles
@unittest.skip("not implemented")
def test_check_frame_updates(self):
pass # TODO Nicco?
class TestHabituationQC(unittest.TestCase):
"""Test HabituationQC class
NB: For complete coverage this should be run along slide the integration tests
"""
def setUp(self):
eid = '8dd0fcb0-1151-4c97-ae35-2e2421695ad7'
one = ONE(base_url='https://test.alyx.internationalbrainlab.org',
username='test_user', password='<PASSWORD>')
self.qc = qcmetrics.HabituationQC(eid, one=one)
self.qc.extractor = Bunch({'data': self.load_fake_bpod_data()}) # Dummy extractor obj
@staticmethod
def load_fake_bpod_data(n=5):
"""Create fake extractor output of bpodqc.load_data
:param n: the number of trials
:return: a dict of simulated trial data
"""
trigg_delay = 1e-4 # an ideal delay between triggers and measured times
iti_length = 0.5 # the so-called 'inter-trial interval'
blank_length = 1. # the time between trial start and stim on
stimCenter_length = 1. # the length of time the stimulus is in the center
# the lengths of time between stim on and stim center
stimOn_length = np.random.normal(size=(n,)) + 10
# trial lengths include couple small trigger delays and iti
trial_lengths = blank_length + stimOn_length + 1e-1 + stimCenter_length
start_times = np.concatenate(([0], np.cumsum(trial_lengths)[:-1]))
end_times = np.cumsum(trial_lengths) - 1e-2
data = {
"phase": np.random.uniform(low=0, high=2 * np.pi, size=(n,)),
"stimOnTrigger_times": start_times + blank_length,
"intervals": np.c_[start_times, end_times],
"itiIn_times": end_times - iti_length,
"position": np.random.choice([-1, 1], n, replace=True) * 35,
"feedbackType": | np.ones(n) | numpy.ones |
"""Define functions to create the triplet loss with online triplet mining."""
import logging
import time
import numpy as np
import tensorflow as tf
from pyxtools import calc_distance_pairs
def _pairwise_distances(embeddings, squared=False):
"""Compute the 2D matrix of distances between all the embeddings.
Args:
embeddings: tensor of shape (batch_size, embed_dim)
squared: Boolean. If true, output is the pairwise squared euclidean distance matrix.
If false, output is the pairwise euclidean distance matrix.
Returns:
pairwise_distances: tensor of shape (batch_size, batch_size)
"""
# Get the dot product between all embeddings
# shape (batch_size, batch_size)
dot_product = tf.matmul(embeddings, tf.transpose(embeddings))
# Get squared L2 norm for each embedding. We can just take the diagonal of `dot_product`.
# This also provides more numerical stability (the diagonal of the result will be exactly 0).
# shape (batch_size,)
square_norm = tf.diag_part(dot_product)
# Compute the pairwise distance matrix as we have:
# ||a - b||^2 = ||a||^2 - 2 <a, b> + ||b||^2
# shape (batch_size, batch_size)
distances = tf.expand_dims(square_norm, 1) - 2.0 * dot_product + tf.expand_dims(square_norm, 0)
# Because of computation errors, some distances might be negative so we put everything >= 0.0
distances = tf.maximum(distances, 0.0)
if not squared:
# Because the gradient of sqrt is infinite when distances == 0.0 (ex: on the diagonal)
# we need to add a small epsilon where distances == 0.0
mask = tf.to_float(tf.equal(distances, 0.0))
distances = distances + mask * 1e-16
distances = tf.sqrt(distances)
# Correct the epsilon added: set the distances on the mask to be exactly 0.0
distances = distances * (1.0 - mask)
return distances
def _get_anchor_positive_triplet_mask(labels):
"""Return a 2D mask where mask[a, p] is True iff a and p are distinct and have same label.
Args:
labels: tf.int32 `Tensor` with shape [batch_size]
Returns:
mask: tf.bool `Tensor` with shape [batch_size, batch_size]
"""
# Check that i and j are distinct
indices_equal = tf.cast(tf.eye(tf.shape(labels)[0]), tf.bool)
indices_not_equal = tf.logical_not(indices_equal)
# Check if labels[i] == labels[j]
# Uses broadcasting where the 1st argument has shape (1, batch_size) and the 2nd (batch_size, 1)
labels_equal = tf.equal(tf.expand_dims(labels, 0), tf.expand_dims(labels, 1))
# Combine the two masks
mask = tf.logical_and(indices_not_equal, labels_equal)
return mask
def _get_anchor_negative_triplet_mask(labels):
"""Return a 2D mask where mask[a, n] is True iff a and n have distinct labels.
Args:
labels: tf.int32 `Tensor` with shape [batch_size]
Returns:
mask: tf.bool `Tensor` with shape [batch_size, batch_size]
"""
# Check if labels[i] != labels[k]
# Uses broadcasting where the 1st argument has shape (1, batch_size) and the 2nd (batch_size, 1)
labels_equal = tf.equal(tf.expand_dims(labels, 0), tf.expand_dims(labels, 1))
mask = tf.logical_not(labels_equal)
return mask
def _get_triplet_mask(labels):
"""Return a 3D mask where mask[a, p, n] is True iff the triplet (a, p, n) is valid.
A triplet (i, j, k) is valid if:
- i, j, k are distinct
- labels[i] == labels[j] and labels[i] != labels[k]
Args:
labels: tf.int32 `Tensor` with shape [batch_size]
"""
# Check that i, j and k are distinct
indices_equal = tf.cast(tf.eye(tf.shape(labels)[0]), tf.bool)
indices_not_equal = tf.logical_not(indices_equal)
i_not_equal_j = tf.expand_dims(indices_not_equal, 2)
i_not_equal_k = tf.expand_dims(indices_not_equal, 1)
j_not_equal_k = tf.expand_dims(indices_not_equal, 0)
distinct_indices = tf.logical_and(tf.logical_and(i_not_equal_j, i_not_equal_k), j_not_equal_k)
# Check if labels[i] == labels[j] and labels[i] != labels[k]
label_equal = tf.equal(tf.expand_dims(labels, 0), tf.expand_dims(labels, 1))
i_equal_j = tf.expand_dims(label_equal, 2)
i_equal_k = tf.expand_dims(label_equal, 1)
valid_labels = tf.logical_and(i_equal_j, tf.logical_not(i_equal_k))
# Combine the two masks
mask = tf.logical_and(distinct_indices, valid_labels)
return mask
def batch_all_triplet_loss(labels, embeddings, margin, squared=False):
"""Build the triplet loss over a batch of embeddings.
We generate all the valid triplets and average the loss over the positive ones.
Args:
labels: labels of the batch, of size (batch_size,)
embeddings: tensor of shape (batch_size, embed_dim)
margin: margin for triplet loss
squared: Boolean. If true, output is the pairwise squared euclidean distance matrix.
If false, output is the pairwise euclidean distance matrix.
Returns:
triplet_loss: scalar tensor containing the triplet loss
"""
# Get the pairwise distance matrix
pairwise_dist = _pairwise_distances(embeddings, squared=squared)
# shape (batch_size, batch_size, 1)
anchor_positive_dist = tf.expand_dims(pairwise_dist, 2)
assert anchor_positive_dist.shape[2] == 1, "{}".format(anchor_positive_dist.shape)
# shape (batch_size, 1, batch_size)
anchor_negative_dist = tf.expand_dims(pairwise_dist, 1)
assert anchor_negative_dist.shape[1] == 1, "{}".format(anchor_negative_dist.shape)
# Compute a 3D tensor of size (batch_size, batch_size, batch_size)
# triplet_loss[i, j, k] will contain the triplet loss of anchor=i, positive=j, negative=k
# Uses broadcasting where the 1st argument has shape (batch_size, batch_size, 1)
# and the 2nd (batch_size, 1, batch_size)
triplet_loss = anchor_positive_dist - anchor_negative_dist + margin
# Put to zero the invalid triplets
# (where label(a) != label(p) or label(n) == label(a) or a == p)
mask = _get_triplet_mask(labels)
mask = tf.to_float(mask)
triplet_loss = tf.multiply(mask, triplet_loss)
# Remove negative losses (i.e. the easy triplets)
triplet_loss = tf.maximum(triplet_loss, 0.0)
# Count number of positive triplets (where triplet_loss > 0)
valid_triplets = tf.to_float(tf.greater(triplet_loss, 1e-16))
num_positive_triplets = tf.reduce_sum(valid_triplets)
num_valid_triplets = tf.reduce_sum(mask)
fraction_positive_triplets = num_positive_triplets / (num_valid_triplets + 1e-16)
# Get final mean triplet loss over the positive valid triplets
triplet_loss = tf.reduce_sum(triplet_loss) / (num_positive_triplets + 1e-16)
return triplet_loss, fraction_positive_triplets
def batch_hard_triplet_loss(labels, embeddings, margin, squared=False):
"""Build the triplet loss over a batch of embeddings.
For each anchor, we get the hardest positive and hardest negative to form a triplet.
Args:
labels: labels of the batch, of size (batch_size,)
embeddings: tensor of shape (batch_size, embed_dim)
margin: margin for triplet loss
squared: Boolean. If true, output is the pairwise squared euclidean distance matrix.
If false, output is the pairwise euclidean distance matrix.
Returns:
triplet_loss: scalar tensor containing the triplet loss
"""
# Get the pairwise distance matrix
pairwise_dist = _pairwise_distances(embeddings, squared=squared)
# For each anchor, get the hardest positive
# First, we need to get a mask for every valid positive (they should have same label)
mask_anchor_positive = _get_anchor_positive_triplet_mask(labels)
mask_anchor_positive = tf.to_float(mask_anchor_positive)
# We put to 0 any element where (a, p) is not valid (valid if a != p and label(a) == label(p))
anchor_positive_dist = tf.multiply(mask_anchor_positive, pairwise_dist)
# shape (batch_size, 1)
hardest_positive_dist = tf.reduce_max(anchor_positive_dist, axis=1, keepdims=True)
tf.summary.scalar("hardest_positive_dist", tf.reduce_mean(hardest_positive_dist))
# For each anchor, get the hardest negative
# First, we need to get a mask for every valid negative (they should have different labels)
mask_anchor_negative = _get_anchor_negative_triplet_mask(labels)
mask_anchor_negative = tf.to_float(mask_anchor_negative)
# We add the maximum value in each row to the invalid negatives (label(a) == label(n))
max_anchor_negative_dist = tf.reduce_max(pairwise_dist, axis=1, keepdims=True)
anchor_negative_dist = pairwise_dist + max_anchor_negative_dist * (1.0 - mask_anchor_negative)
# shape (batch_size,)
hardest_negative_dist = tf.reduce_min(anchor_negative_dist, axis=1, keepdims=True)
tf.summary.scalar("hardest_negative_dist", tf.reduce_mean(hardest_negative_dist))
# Combine biggest d(a, p) and smallest d(a, n) into final triplet loss
triplet_loss = tf.maximum(hardest_positive_dist - hardest_negative_dist + margin, 0.0)
# Get final mean triplet loss
triplet_loss = tf.reduce_mean(triplet_loss)
return triplet_loss
def _get_pair_sum_loss(xi, margin, squared=False):
d12 = tf.linalg.norm(xi[0] - xi[1] + 1e-16)
d13 = tf.linalg.norm(xi[0] - xi[2] + 1e-16)
d14 = tf.linalg.norm(xi[0] - xi[3] + 1e-16)
d23 = tf.linalg.norm(xi[1] - xi[2] + 1e-16)
d24 = tf.linalg.norm(xi[1] - xi[3] + 1e-16)
if squared:
d12 = tf.square(d12)
d13 = tf.square(d13)
d14 = tf.square(d14)
d23 = tf.square(d23)
d24 = tf.square(d24)
return tf.maximum(d12 - d13 + margin, 0.0) + \
tf.maximum(d12 - d14 + margin, 0.0) + \
tf.maximum(d12 - d23 + margin, 0.0) + \
tf.maximum(d12 - d24 + margin, 0.0)
def test_numpy():
import numpy as np
def calc_loss(x) -> float:
loss_list = []
for i in range(x.shape[0]):
d12 = np.linalg.norm(x[i][0] - x[i][1])
d13 = np.linalg.norm(x[i][0] - x[i][2])
d14 = np.linalg.norm(x[i][0] - x[i][3])
d23 = np.linalg.norm(x[i][1] - x[i][2])
d24 = np.linalg.norm(x[i][1] - x[i][3])
loss_list.append(np.maximum(d12 - d13 + 0.5, 0.0))
loss_list.append(np.maximum(d12 - d14 + 0.5, 0.0))
loss_list.append( | np.maximum(d12 - d23 + 0.5, 0.0) | numpy.maximum |
"""
Test Surrogates Overview
========================
"""
# Author: <NAME> <<EMAIL>>
# License: new BSD
from PIL import Image
import numpy as np
import scripts.surrogates_overview as exo
import scripts.image_classifier as imgclf
import sklearn.datasets
import sklearn.linear_model
SAMPLES = 10
BATCH = 50
SAMPLE_IRIS = False
IRIS_SAMPLES = 50000
def test_bilmey_image():
"""Tests surrogate image bLIMEy."""
# Load the image
doggo_img = Image.open('surrogates_overview/img/doggo.jpg')
doggo_array = np.array(doggo_img)
# Load the classifier
clf = imgclf.ImageClassifier()
explain_classes = [('tennis ball', 852),
('golden retriever', 207),
('Labrador retriever', 208)]
# Configure widgets to select occlusion colour, segmentation granularity
# and explained class
colour_selection = {
i: i for i in ['mean', 'black', 'white', 'randomise-patch', 'green']
}
granularity_selection = {'low': 13, 'medium': 30, 'high': 50}
# Generate explanations
blimey_image_collection = {}
for gran_name, gran_number in granularity_selection.items():
blimey_image_collection[gran_name] = {}
for col_name in colour_selection:
blimey_image_collection[gran_name][col_name] = \
exo.build_image_blimey(
doggo_array,
clf.predict_proba,
explain_classes,
explanation_size=5,
segments_number=gran_number,
occlusion_colour=col_name,
samples_number=SAMPLES,
batch_size=BATCH,
random_seed=42)
exp = []
for gran_ in blimey_image_collection:
for col_ in blimey_image_collection[gran_]:
exp.append(blimey_image_collection[gran_][col_]['surrogates'])
assert len(exp) == len(EXP_IMG)
for e, E in zip(exp, EXP_IMG):
assert sorted(list(e.keys())) == sorted(list(E.keys()))
for key in e.keys():
assert e[key]['name'] == E[key]['name']
assert len(e[key]['explanation']) == len(E[key]['explanation'])
for e_, E_ in zip(e[key]['explanation'], E[key]['explanation']):
assert e_[0] == E_[0]
assert np.allclose(e_[1], E_[1], atol=.001, equal_nan=True)
def test_bilmey_tabular():
"""Tests surrogate tabular bLIMEy."""
# Load the iris data set
iris = sklearn.datasets.load_iris()
iris_X = iris.data # [:, :2] # take the first two features only
iris_y = iris.target
iris_labels = iris.target_names
iris_feature_names = iris.feature_names
label2class = {lab: i for i, lab in enumerate(iris_labels)}
# Fit the classifier
logreg = sklearn.linear_model.LogisticRegression(C=1e5)
logreg.fit(iris_X, iris_y)
# explained class
_dtype = iris_X.dtype
explained_instances = {
'setosa': np.array([5, 3.5, 1.5, 0.25]).astype(_dtype),
'versicolor': np.array([5.5, 2.75, 4.5, 1.25]).astype(_dtype),
'virginica': np.array([7, 3, 5.5, 2.25]).astype(_dtype)
}
petal_length_idx = iris_feature_names.index('petal length (cm)')
petal_length_bins = [1, 2, 3, 4, 5, 6, 7]
petal_width_idx = iris_feature_names.index('petal width (cm)')
petal_width_bins = [0, .5, 1, 1.5, 2, 2.5]
discs_ = []
for i, ix in enumerate(petal_length_bins): # X-axis
for iix in petal_length_bins[i + 1:]:
for j, jy in enumerate(petal_width_bins): # Y-axis
for jjy in petal_width_bins[j + 1:]:
discs_.append({
petal_length_idx: [ix, iix],
petal_width_idx: [jy, jjy]
})
for inst_i in explained_instances:
for cls_i in iris_labels:
for disc_i, disc in enumerate(discs_):
inst = explained_instances[inst_i]
cls = label2class[cls_i]
exp = exo.build_tabular_blimey(
inst, cls, iris_X, iris_y, logreg.predict_proba, disc,
IRIS_SAMPLES, SAMPLE_IRIS, 42)
key = '{}&{}&{}'.format(inst_i, cls, disc_i)
exp_ = EXP_TAB[key]
assert exp['explanation'].shape[0] == exp_.shape[0]
assert np.allclose(
exp['explanation'], exp_, atol=.001, equal_nan=True)
EXP_IMG = [
{207: {'explanation': [(13, -0.24406872165780585),
(11, -0.20456180387430317),
(9, -0.1866779131424261),
(4, 0.15001224157793785),
(3, 0.11589480417160983)],
'name': 'golden retriever'},
208: {'explanation': [(13, -0.08395966359346249),
(0, -0.0644986107387837),
(9, 0.05845584633658977),
(1, 0.04369763085720947),
(11, -0.035958188394941866)],
'name': '<NAME>'},
852: {'explanation': [(13, 0.3463529698715463),
(11, 0.2678050131923326),
(4, -0.10639863421417416),
(6, 0.08345792378117327),
(9, 0.07366945242386444)],
'name': '<NAME>'}},
{207: {'explanation': [(13, -0.0624167912596456),
(7, 0.06083359545295548),
(3, 0.0495953943686462),
(11, -0.04819787147412231),
(2, -0.03858823761391199)],
'name': '<NAME>'},
208: {'explanation': [(13, -0.08408428146916162),
(7, 0.07704235920590158),
(3, 0.06646468388122273),
(11, -0.0638326572126609),
(2, -0.052621478002380796)],
'name': '<NAME>'},
852: {'explanation': [(11, 0.35248212611685886),
(13, 0.2516925608037859),
(2, 0.13682853028454384),
(9, 0.12930134856644754),
(6, 0.1257747954095489)],
'name': '<NAME>'}},
{207: {'explanation': [(3, 0.21351937934930917),
(10, 0.16933456312772083),
(11, -0.13447244552856766),
(8, 0.11058919217055371),
(2, -0.06269239798368743)],
'name': '<NAME>'},
208: {'explanation': [(8, 0.05995551486884414),
(9, -0.05375302972380482),
(11, -0.051997353324246445),
(6, 0.04213181405953071),
(2, -0.039169895361928275)],
'name': '<NAME>'},
852: {'explanation': [(7, 0.31382219776986503),
(11, 0.24126214884275987),
(13, 0.21075924370226598),
(2, 0.11937652039885377),
(8, -0.11911265319329697)],
'name': '<NAME>'}},
{207: {'explanation': [(3, 0.39254403293049134),
(9, 0.19357165018747347),
(6, 0.16592079671652987),
(0, 0.14042059731407297),
(1, 0.09793027079765507)],
'name': '<NAME>'},
208: {'explanation': [(9, -0.19351859273276703),
(1, -0.15262967987262344),
(3, 0.12205127112235375),
(2, 0.11352141032313934),
(6, -0.11164209893429898)],
'name': '<NAME>'},
852: {'explanation': [(7, 0.17213007100844877),
(0, -0.1583030948868859),
(3, -0.13748574615069775),
(5, 0.13273283867075436),
(11, 0.12309551170070354)],
'name': '<NAME>'}},
{207: {'explanation': [(3, 0.4073533182995105),
(10, 0.20711667988142463),
(8, 0.15360813290032324),
(6, 0.1405424759832785),
(1, 0.1332920685413575)],
'name': '<NAME>'},
208: {'explanation': [(9, -0.14747910525112617),
(1, -0.13977061235228924),
(2, 0.10526833898161611),
(6, -0.10416022118399552),
(3, 0.09555992655161764)],
'name': '<NAME>'},
852: {'explanation': [(11, 0.2232260929107954),
(7, 0.21638443149433054),
(5, 0.21100464215582274),
(13, 0.145614853795006),
(1, -0.11416523431311262)],
'name': '<NAME>'}},
{207: {'explanation': [(1, 0.14700178977744183),
(0, 0.10346667279328238),
(2, 0.10346667279328238),
(7, 0.10346667279328238),
(8, 0.10162900633690726)],
'name': '<NAME>'},
208: {'explanation': [(10, -0.10845134816658476),
(8, -0.1026920429226184),
(6, -0.10238154733842847),
(18, 0.10094164937411244),
(16, 0.08646888450232793)],
'name': '<NAME>'},
852: {'explanation': [(18, -0.20542297091894474),
(13, 0.2012751176130666),
(8, -0.19194747162742365),
(20, 0.14686930696710473),
(15, 0.11796990086271067)],
'name': '<NAME>'}},
{207: {'explanation': [(13, 0.12446259821701779),
(17, 0.11859084421095789),
(15, 0.09690553833007137),
(12, -0.08869743701731962),
(4, 0.08124900427893789)],
'name': '<NAME>'},
208: {'explanation': [(10, -0.09478194981909983),
(20, -0.09173392507039077),
(9, 0.08768898801254493),
(17, -0.07553994244536394),
(4, 0.07422905503397653)],
'name': '<NAME>'},
852: {'explanation': [(21, 0.1327882942965061),
(1, 0.1238236573086363),
(18, -0.10911712271717902),
(19, 0.09707191051320978),
(6, 0.08593672504338913)],
'name': '<NAME>'}},
{207: {'explanation': [(6, 0.14931728779865114),
(14, 0.14092073957103526),
(1, 0.11071480021464616),
(4, 0.10655287976934531),
(8, 0.08705404649152573)],
'name': '<NAME>'},
208: {'explanation': [(8, -0.12242580400886727),
(9, 0.12142729544158742),
(14, -0.1148252787068248),
(16, -0.09562322208795092),
(4, 0.09350160975513132)],
'name': '<NAME>'},
852: {'explanation': [(6, 0.04227675072263027),
(9, -0.03107924340879173),
(14, 0.028007115650713045),
(13, 0.02771190348545554),
(19, 0.02640441416071482)],
'name': '<NAME>'}},
{207: {'explanation': [(19, 0.14313680656283245),
(18, 0.12866508562342843),
(8, 0.11809779264185447),
(0, 0.11286255403442104),
(2, 0.11286255403442104)],
'name': '<NAME>'},
208: {'explanation': [(9, 0.2397917428082761),
(14, -0.19435572812170654),
(6, -0.1760894833446507),
(18, -0.12243333818399058),
(15, 0.10986343675377105)],
'name': '<NAME>'},
852: {'explanation': [(14, 0.15378038774613365),
(9, -0.14245940635481966),
(6, 0.10213601012183973),
(20, 0.1009180838986786),
(3, 0.09780065767815548)],
'name': '<NAME>'}},
{207: {'explanation': [(15, 0.06525850448807077),
(9, 0.06286791243851698),
(19, 0.055189970374185854),
(8, 0.05499197604401475),
(13, 0.04748220842936177)],
'name': '<NAME>'},
208: {'explanation': [(6, -0.31549091899770765),
(5, 0.1862302670824446),
(8, -0.17381478451341995),
(10, -0.17353516098662508),
(14, -0.13591542421754205)],
'name': '<NAME>'},
852: {'explanation': [(14, 0.2163853942943355),
(6, 0.17565046338282214),
(1, 0.12446193028474549),
(9, -0.11365789839746396),
(10, 0.09239073691962967)],
'name': '<NAME>'}},
{207: {'explanation': [(19, 0.1141207265647932),
(36, -0.08861425922625768),
(30, 0.07219209872026074),
(9, -0.07150939547859836),
(38, -0.06988288637544438)],
'name': '<NAME>'},
208: {'explanation': [(29, 0.10531073909547647),
(13, 0.08279642208039652),
(34, -0.0817952443980797),
(33, -0.08086848205765082),
(12, 0.08086848205765082)],
'name': '<NAME>'},
852: {'explanation': [(13, -0.1330452414595897),
(4, 0.09942366413042845),
(12, -0.09881995683190645),
(33, 0.09881995683190645),
(19, -0.09596925317560831)],
'name': '<NAME>'}},
{207: {'explanation': [(37, 0.08193926967758253),
(35, 0.06804043021426347),
(15, 0.06396269230810163),
(11, 0.062255657227065296),
(8, 0.05529200233091672)],
'name': '<NAME>'},
208: {'explanation': [(19, 0.05711957286614678),
(27, -0.050230108135410824),
(16, -0.04743034616549999),
(5, -0.046717346734255705),
(9, -0.04419100026638039)],
'name': '<NAME>'},
852: {'explanation': [(3, -0.08390967998497496),
(30, -0.07037680222442452),
(22, 0.07029819368543713),
(8, -0.06861396187180349),
(37, -0.06662511956402824)],
'name': '<NAME>'}},
{207: {'explanation': [(19, 0.048418845359024805),
(9, -0.0423869575883795),
(30, 0.04012650790044438),
(36, -0.03787242980067195),
(10, 0.036557999380695635)],
'name': '<NAME>'},
208: {'explanation': [(10, 0.12120686823129677),
(17, 0.10196564232230493),
(7, 0.09495133975425854),
(25, -0.0759657891182803),
(2, -0.07035244568286837)],
'name': '<NAME>'},
852: {'explanation': [(3, -0.0770578003457272),
(28, 0.0769372258280398),
(6, -0.06044725989272927),
(22, 0.05550155775286349),
(31, -0.05399028046597057)],
'name': '<NAME>'}},
{207: {'explanation': [(14, 0.05371383110181226),
(0, -0.04442539316084218),
(18, 0.042589475382826494),
(19, 0.04227647855354252),
(17, 0.041685661662754295)],
'name': '<NAME>'},
208: {'explanation': [(29, 0.14419601354489464),
(17, 0.11785174500536676),
(36, 0.1000501679652906),
(10, 0.09679790134851017),
(35, 0.08710376081189208)],
'name': '<NAME>'},
852: {'explanation': [(8, -0.02486237985832769),
(3, -0.022559886154747102),
(11, -0.021878686669239856),
(36, 0.021847953817988534),
(19, -0.018317598300716522)],
'name': '<NAME>'}},
{207: {'explanation': [(37, 0.08098729255605368),
(35, 0.06639102704982619),
(15, 0.06033721190370432),
(34, 0.05826267856117829),
(28, 0.05549505160798173)],
'name': '<NAME>'},
208: {'explanation': [(17, 0.13839012042250542),
(10, 0.11312187488346881),
(7, 0.10729071207480922),
(25, -0.09529127965797404),
(11, -0.09279834572979286)],
'name': '<NAME>'},
852: {'explanation': [(3, -0.028385651836694076),
(22, 0.023364702783498722),
(8, -0.023097812578270233),
(30, -0.022931236620034406),
(37, -0.022040170736525342)],
'name': '<NAME>'}}
]
EXP_TAB = {
'setosa&0&0': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&1': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&2': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&3': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&4': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&5': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&6': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&7': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&8': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&9': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&10': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&11': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&12': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&13': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&14': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&15': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&16': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&17': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&18': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&19': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&20': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&21': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&22': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&23': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&24': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&25': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&26': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&27': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&28': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&29': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&30': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&31': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&32': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&33': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&34': np.array([0.7974072911132786, 0.006894018772033576]),
'setosa&0&35': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&36': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&37': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&38': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&39': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&40': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&41': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&42': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&43': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&44': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&45': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&46': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&47': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&48': np.array([0.4329463382004908, 0.057167210150691136]),
'setosa&0&49': np.array([0.4656481363306145, 0.007982539480288167]),
'setosa&0&50': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&51': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&52': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&53': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&54': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&55': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&56': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&57': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&58': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&59': np.array([0.4329463382004908, 0.057167210150691136]),
'setosa&0&60': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&61': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&62': np.array([0.009595083643662688, 0.5643652067423869]),
'setosa&0&63': np.array([0.13694026920485936, 0.36331091829858003]),
'setosa&0&64': np.array([0.3094460464703627, 0.11400643817329122]),
'setosa&0&65': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&66': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&67': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&68': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&69': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&70': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&71': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&72': np.array([0.009595083643662688, 0.5643652067423869]),
'setosa&0&73': np.array([0.009595083643662688, 0.5643652067423869]),
'setosa&0&74': np.array([0.13694026920485936, 0.36331091829858003]),
'setosa&0&75': np.array([0.0, 0.95124502153736]),
'setosa&0&76': np.array([0.0, 0.9708703761803881]),
'setosa&0&77': np.array([0.0, 0.5659706098422994]),
'setosa&0&78': np.array([0.0, 0.3962828716108186]),
'setosa&0&79': np.array([0.0, 0.2538069363248767]),
'setosa&0&80': np.array([0.0, 0.95124502153736]),
'setosa&0&81': np.array([0.0, 0.95124502153736]),
'setosa&0&82': np.array([0.0, 0.95124502153736]),
'setosa&0&83': np.array([0.0, 0.95124502153736]),
'setosa&0&84': np.array([0.0, 0.9708703761803881]),
'setosa&0&85': np.array([0.0, 0.9708703761803881]),
'setosa&0&86': np.array([0.0, 0.9708703761803881]),
'setosa&0&87': np.array([0.0, 0.5659706098422994]),
'setosa&0&88': np.array([0.0, 0.5659706098422994]),
'setosa&0&89': np.array([0.0, 0.3962828716108186]),
'setosa&0&90': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&91': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&92': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&93': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&94': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&95': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&96': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&97': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&98': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&99': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&100': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&101': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&102': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&103': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&104': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&105': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&106': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&107': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&108': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&109': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&110': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&111': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&112': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&113': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&114': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&115': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&116': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&117': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&118': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&119': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&120': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&121': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&122': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&123': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&124': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&125': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&126': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&127': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&128': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&129': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&130': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&131': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&132': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&133': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&134': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&135': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&136': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&137': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&138': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&139': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&140': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&141': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&142': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&143': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&144': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&145': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&146': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&147': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&148': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&149': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&150': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&151': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&152': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&153': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&154': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&155': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&156': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&157': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&158': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&159': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&160': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&161': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&162': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&163': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&164': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&165': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&166': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&167': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&168': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&169': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&170': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&171': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&172': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&173': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&174': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&175': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&176': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&177': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&178': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&179': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&180': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&181': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&182': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&183': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&184': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&185': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&186': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&187': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&188': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&189': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&190': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&191': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&192': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&193': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&194': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&195': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&196': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&197': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&198': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&199': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&200': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&201': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&202': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&203': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&204': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&205': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&206': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&207': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&208': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&209': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&210': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&211': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&212': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&213': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&214': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&215': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&216': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&217': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&218': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&219': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&220': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&221': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&222': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&223': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&224': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&225': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&226': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&227': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&228': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&229': np.array([0.7974072911132786, 0.006894018772033576]),
'setosa&0&230': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&231': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&232': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&233': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&234': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&235': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&236': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&237': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&238': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&239': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&240': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&241': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&242': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&243': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&244': np.array([0.7974072911132786, 0.006894018772033576]),
'setosa&0&245': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&246': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&247': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&248': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&249': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&250': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&251': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&252': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&253': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&254': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&255': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&256': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&257': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&258': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&259': np.array([0.7974072911132786, 0.006894018772033576]),
'setosa&0&260': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&261': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&262': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&263': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&264': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&265': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&266': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&267': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&268': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&269': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&270': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&271': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&272': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&273': np.array([0.4329463382004908, 0.057167210150691136]),
'setosa&0&274': np.array([0.4656481363306145, 0.007982539480288167]),
'setosa&0&275': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&276': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&277': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&278': | np.array([0.050316962184345455, 0.9292276112117481]) | numpy.array |
from pioneer.common import platform
from pioneer.das.api import categories
from pioneer.das.api.samples.sample import Sample
import cv2
import numpy as np
class Poly2d(Sample):
def __init__(self, index, datasource, virtual_raw = None, virtual_ts = None):
super(Poly2d, self).__init__(index, datasource, virtual_raw, virtual_ts)
def colored_image(self, resolution:tuple=None):
polygons = self.raw
_,_,ds_type = platform.parse_datasource_name(self.datasource.label)
poly_source = categories.get_source(ds_type)
image = np.zeros((polygons['resolution'][0],polygons['resolution'][1],3), dtype=np.uint8)
for poly in polygons['data']:
name, color = categories.get_name_color(poly_source,poly['classes'])
color = np.array(color)/255
cv2.fillPoly(image, [poly['polygon']], color)
return self.resize_mask(image, resolution) if resolution is not None else image
def mask_category(self, category:str, resolution:tuple=None, confidence_threshold:float=0.5):
polygons = self.raw
_,_,ds_type = platform.parse_datasource_name(self.datasource.label)
poly_source = categories.get_source(ds_type)
mask = | np.zeros((polygons['resolution'][0],polygons['resolution'][1]), dtype=np.uint8) | numpy.zeros |
from numpy import sqrt, pi, angle, fft, fix, zeros, roll, dot, mean, \
array, size, diag, tile, ones, asarray, polyfit, polyval, arange, \
percentile, ceil, float64
from thunder.rdds.series import Series
from thunder.utils.common import loadMatVar, checkParams
class TimeSeries(Series):
"""
Distributed collection of time series data.
Backed by an RDD of key-value pairs where the key is an identifier
and the value is a one-dimensional array. The common index
specifies the time of each entry in the array.
Parameters
----------
rdd : RDD of (tuple, array) pairs
RDD containing the series data.
index : array-like
Time indices, must be same length as the arrays in the input data.
Defaults to arange(len(data)) if not provided.
dims : Dimensions
Specify the dimensions of the keys (min, max, and count), can
avoid computation if known in advance.
See also
--------
Series : base class for Series data
"""
# use superclass __init__
@property
def _constructor(self):
return TimeSeries
def triggeredAverage(self, events, lag=0):
"""
Construct an average time series triggered on each of several events,
considering a range of lags before and after the event
Parameters
----------
events : array-like
List of events to trigger on
lag : int
Range of lags to consider, will cover (-lag, +lag)
"""
events = asarray(events)
m = zeros((lag*2+1, len(self.index)))
for i, shift in enumerate(range(-lag, lag+1)):
fillInds = events + shift
fillInds = fillInds[fillInds >= 0]
fillInds = fillInds[fillInds < len(self.index)]
m[i, fillInds] = 1
if lag == 0:
newIndex = 0
else:
newIndex = range(-lag, lag+1)
scale = m.sum(axis=1)
rdd = self.rdd.mapValues(lambda x: dot(m, x) / scale)
return self._constructor(rdd, index=newIndex).__finalize__(self)
def blockedAverage(self, blockLength):
"""
Average blocks of a time series together, e.g. because they correspond
to trials of some repeated measurement or process
Parameters
----------
triallength : int
Length of trial, must divide evenly into total length of time series
"""
n = len(self.index)
if divmod(n, blockLength)[1] != 0:
raise Exception('Trial length, %g, must evenly divide length of time series, %g'
% (blockLength, n))
if n == blockLength:
raise Exception('Trial length, %g, cannot be length of entire time series, %g'
% (blockLength, n))
m = tile(diag(ones((blockLength,))), [n/blockLength, 1]).T
newIndex = range(0, blockLength)
scale = n / blockLength
rdd = self.rdd.mapValues(lambda x: dot(m, x) / scale)
return self._constructor(rdd, index=newIndex).__finalize__(self)
def subsample(self, sampleFactor=2):
"""
Subsample time series by an integer factor
Parameters
----------
sampleFactor : positive integer, optional, default=2
"""
if sampleFactor < 0:
raise Exception('Factor for subsampling must be postive, got %g' % sampleFactor)
s = slice(0, len(self.index), sampleFactor)
newIndex = self.index[s]
return self._constructor(
self.rdd.mapValues(lambda v: v[s]), index=newIndex).__finalize__(self)
def fourier(self, freq=None):
"""
Compute statistics of a Fourier decomposition on time series data
Parameters
----------
freq : int
Digital frequency at which to compute coherence and phase
"""
def get(y, freq):
y = y - mean(y)
nframes = len(y)
ft = fft.fft(y)
ft = ft[0:int(fix(nframes/2))]
ampFt = 2*abs(ft)/nframes
amp = ampFt[freq]
ampSum = sqrt(sum(ampFt**2))
co = amp / ampSum
ph = -(pi/2) - angle(ft[freq])
if ph < 0:
ph += pi * 2
return array([co, ph])
if freq >= int(fix( | size(self.index) | numpy.size |
# -*- coding: utf-8 -*-
## @package som_cm.som
#
# Implementation of SOM.
# @author tody
# @date 2015/08/14
import os
import numpy as np
import matplotlib.pyplot as plt
from som_cm.np.norm import normVectors
## SOM parameter.
class SOMParam:
# @param h image grid size.
# @param L0 initial parameter for learning restraint.
# @param lmbd iteration limit.
# @param dimensoin target dimension for SOM.
def __init__(self, h=32, L0=0.16, lmbd=0.6, sigma0=0.3, dimension=2):
self.h = h
self.L0 = L0
self.lmbd = lmbd
self.sigma0 = sigma0
self.dimension = dimension
## Implementation of SOM.
#
# SOM with numpy functions.
# - Compute nodes as n x 3 vector.
# - Avoid the loops for x and y.
# - xy coordinates are cached as n x 2 vector.
class SOM:
## Constructor
# @param samples training samples.
# @param param SOM parameter.
def __init__(self, samples, param=SOMParam()):
self._h = param.h
self._dimension = param.dimension
self._samples = samples
self._L0 = param.L0
self._nodes = self._initialNode(param.h, param.dimension)
num_samples = self.numSamples()
self._lmbd = param.lmbd * num_samples
self._sigma0 = param.sigma0 * param.h
self._computePositions(param.h, param.dimension)
self._t = 0
## Return the number of training samples.
def numSamples(self):
return len(self._samples)
## Return the current node image.
def nodeImage(self):
if self._dimension == 1:
return self._nodeImage1D()
else:
return self._nodeImage2D()
## Return the current time step t.
def currentStep(self):
return self._t
## Return if the training is finished.
def finished(self):
return self._t == self.numSamples()
## Process all training process.
def trainAll(self):
while self._t < len(self._samples):
self._train(self._t)
self._t += 1
## Process training step t to t+1.
def trainStep(self):
if self._t < len(self._samples):
self._train(self._t)
self._t += 1
def _nodeImage1D(self):
h = 10
w = self._h
node_image = np.zeros((h, w, 3))
for y in range(h):
node_image[y, :, :] = self._nodes[:, :]
return node_image
def _nodeImage2D(self):
return self._nodes.reshape(self._h, self._h, 3)
## Initial node.
def _initialNode(self, h, dimension):
if dimension == 1:
return self._initialNode1D(h)
else:
return self._initialNode2D(h)
def _initialNode1D(self, h):
return np.random.rand(h, 3)
def _initialNode2D(self, h):
return np.random.rand(h, h, 3).reshape(-1, 3)
## Compute position.
def _computePositions(self, h, dimension):
if dimension == 1:
self._computePositions1D(h)
else:
self._computePositions2D(h)
def _computePositions1D(self, h):
x = np.arange(h)
self._positions = x
def _computePositions2D(self, h):
x = np.arange(h)
y = np.arange(h)
xs, ys = np.meshgrid(x, y)
xs = xs.flatten()
ys = ys.flatten()
self._positions = np.array([xs, ys]).T
## Train process.
def _train(self, t):
sample = self._samples[t]
# bmu
bmu_id = self._bmu(sample)
bmu_position = self._positions[bmu_id]
# update weight
D = normVectors(self._positions - bmu_position)
L = self._learningRestraint(t)
T = self._neighborhoodFunction(t, D)
# update nodes
for ci in range(3):
self._nodes[:, ci] += L * T * (sample[ci] - self._nodes[:, ci])
## BMU: best matching unit.
# Return the unit of minimum distance from the sample.
def _bmu(self, sample):
norms = normVectors(self._nodes - sample)
bmu_id = np.argmin(norms)
return bmu_id
## Neighborhood function: exp (-D^2 / 2 sigma^2)
def _neighborhoodFunction(self, t, D):
sigma = self._sigma0 * np.exp(-t / self._lmbd)
Theta = | np.exp(-D ** 2 / (2 * sigma ** 2)) | numpy.exp |
from flask import Flask, request
import os
import cv2
import dlib
import numpy as np
from keras.models import load_model
import os
# ---------------------------------
# LOADING MODEL
loadedModal = load_model('main.model')
# ---------------------------------
# CROPPING IMAGE FUNCTIONS
detector = dlib.get_frontal_face_detector()
new_path = './crops/'
def MyRec(rgb, x, y, w, h, v=20, color=(200, 0, 0), thikness=2):
"""To draw stylish rectangle around the objects"""
cv2.line(rgb, (x, y), (x+v, y), color, thikness)
cv2.line(rgb, (x, y), (x, y+v), color, thikness)
cv2.line(rgb, (x+w, y), (x+w-v, y), color, thikness)
cv2.line(rgb, (x+w, y), (x+w, y+v), color, thikness)
cv2.line(rgb, (x, y+h), (x, y+h-v), color, thikness)
cv2.line(rgb, (x, y+h), (x+v, y+h), color, thikness)
cv2.line(rgb, (x+w, y+h), (x+w, y+h-v), color, thikness)
cv2.line(rgb, (x+w, y+h), (x+w-v, y+h), color, thikness)
def save(img, name, bbox, width=48, height=48):
x, y, w, h = bbox
imgCrop = img[y:h, x: w]
# we need this line to reshape the images
imgCrop = cv2.resize(imgCrop, (width, height))
cv2.imwrite(name, imgCrop)
def faces(image, imageFilename):
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
faces = detector(gray)
# detect the face
for counter, face in enumerate(faces):
print(counter)
x1, y1 = face.left(), face.top()
x2, y2 = face.right(), face.bottom()
cv2.rectangle(image, (x1, y1), (x2, y2), (220, 255, 220), 1)
MyRec(image, x1, y1, x2 - x1, y2 - y1, 10, (0, 250, 0), 3)
save(gray, "./images/" + imageFilename, (x1, y1, x2, y2))
newImg = cv2.imread("./images/" + imageFilename)
print("done saving")
return newImg
# ---------------------------------
# API
app = Flask(__name__)
@app.route('/')
def hello_world():
return 'Muce - Facial Emotion Recognition Service'
@app.route('/upload', methods=['POST'])
def upload():
try:
image = request.files['image']
if not image:
return {'message': 'No image uploaded!'}, 404
image.save(os.path.join("images/", image.filename))
inputImg = cv2.imread("./images/" + image.filename)
cropedImg = faces(inputImg, image.filename)
cropedImg = | np.array([cropedImg]) | numpy.array |
import numpy as np
import matplotlib.pyplot as plt
from scipy.linalg import eigh
from scipy.optimize import fsolve
# Input Parameters
a=0.35 # Dimension of square column
area=np.square(a) # Area of C/S of column in m^2
I=np.power(a,4)/12 # Moment of inertia in m^4
ndof=3 # Number of degree of freedom in the system
h=3 # Interfloor space in m
E=25e9 # Young's modulus of concrete in N/m^2
# Mass of each floor
m1=400000
m2=300000
m3=200000
# Loading the Response Spectra from Question 2
PeriodArray=np.load('DispResponseSpectra.npy')[:,0]
PeakDispArray=np.load('DispResponseSpectra.npy')[:,1]
PeakAcclnArray=np.load('AccResponseSpectra.npy')[:,1]
# Lumped mass matrix
M=np.diag([m1,m2,m3]) # Mass matrix
print('Mass matrix (kg):\n'+str(M)+'\n')
# Lateral stiffness
k=12*E*I/np.power(h,3) # Stiffness for one column per floor
k=6*k # Stiffness per floor
K=np.matrix([[2*k,-k,0],[-k,2*k,-k],[0,-k,k]]) #Stiffness
print('Stiffness matrix (N/m):\n'+str(K)+'\n')
print('Moment of inetria (m^4): '+str(I)+'\n')
# MODULE 1: using eigenvalue solution-Exact solution------------------
print('**************************EIGENVALUE SOLUTION*********************\n' )
OmegaSquare,EigVector=np.linalg.eig(K*np.linalg.inv(M))
OmegaExact=np.sqrt(OmegaSquare) # Natural frequency
print('Omega (1/s): '+str(OmegaExact))
V1=EigVector[:,0]/EigVector[0,0] # Scale the modal shape
V2=EigVector[:,1]/EigVector[0,1]
V3=EigVector[:,2]/EigVector[0,2]
# np.insert transpose the matrix, will use transpose again
V1plot=np.transpose(np.insert(V1,0,0)) #inserting zero at the start of array for plotting
V2plot=np.transpose(np.insert(V2,0,0))
V3plot=np.transpose(np.insert(V3,0,0))
xArray=np.arange(np.shape(V1plot)[0])
# Mode plots
fig,ax=plt.subplots(1, 3,sharey=True)
ax[0].grid(color='k', linestyle='-', linewidth=1)
ax[0].plot(V3plot,xArray)
ax[0].set_yticklabels([])
ax[0].set_xlabel('1st mode')
ax[1].grid(color='k', linestyle='-', linewidth=1)
ax[1].plot(V2plot,xArray)
ax[1].set_xlabel('2nd mode')
ax[2].grid(color='k', linestyle='-', linewidth=1)
ax[2].plot(V1plot,xArray)
ax[2].set_xlabel('3rd mode')
plt.savefig('Modes_exact.jpg',dpi=200)
OmegaExact=np.flip(OmegaExact)
T=2*np.pi/OmegaExact # time period
# Displacement calculation
mBar2=np.zeros((ndof,1))
mBar2[0,0]=(np.matmul(np.matmul(np.transpose(V1),M),V1)) # Modal mass
mBar2[1,0]=(np.matmul(np.matmul(np.transpose(V2),M),V2)) # Modal mass
mBar2[2,0]=(np.matmul(np.matmul(np.transpose(V3),M),V3))
Sa2=np.zeros(ndof)
Su2=np.zeros(ndof)
T=2*np.pi/OmegaExact # time period
# Using linear interpolation to find displacement corresponding to time period (T)
for j in range(0,int(np.shape(Sa2)[0])): # for each Sa2
for i in range(0,int(np.shape(PeriodArray)[0])): # searching over period
if PeriodArray[i]>T[j]: # Value after T i.e. T(i+2)
T3=PeriodArray[i]
T1=PeriodArray[i-1]
Y3=PeakDispArray[i]
Y1=PeakDispArray[i-1]
A3=PeakAcclnArray[i]
A1=PeakAcclnArray[i-1]
Su2[j]=Y1+(Y3-Y1)/(T3-T1)*(T[j]-T1) # Peak displacement corresponding to T in the response spectra
Sa2[j]=A1+(A3-A1)/(T3-T1)*(T[j]-T1)
break
# Load participation factor
d=np.matrix([[1],[1],[1]]) # Earthquake input direction vector
l2=np.matmul(np.matmul(np.transpose(V1),M),d)/mBar2
print('Load participation factor: \n'+str(l2)+'\n')
# Maximum Displacement vectors
uMax2_1=l2[0,0]*Su2[0]*V1
uMax2_2=l2[1,0]*Su2[1]*V2
uMax2_3=l2[2,0]*Su2[2]*V3
# Total maximum displacement using SRSS
uMaxExact=np.zeros(ndof)
uMaxExact[0]=np.square(uMax2_1[0,0])+ np.square(uMax2_2[0,0])+np.square(uMax2_3[0,0])
uMaxExact[1]=np.square(uMax2_1[1,0])+ np.square(uMax2_2[1,0])+np.square(uMax2_3[1,0])
uMaxExact[2]=np.square(uMax2_1[2,0])+ np.square(uMax2_2[2,0])+np.square(uMax2_3[2,0])
print('Maximum Displacment (m): '+str(uMaxExact)+'\n')
# Maximum floor force vector
F=np.zeros(ndof)
F2_1=float(l2[0,0]*Sa2[0])*np.matmul(M,V1)
F2_2=float(l2[1,0]*Sa2[1])*np.matmul(M,V2)
F2_3=float(l2[2,0]*Sa2[2])*np.matmul(M,V3)
# Using SRSS
F[0]=np.square(F2_1[0,0])+ np.square(F2_2[0,0])+np.square(F2_3[0,0])
F[1]=np.square(F2_1[1,0])+ np.square(F2_2[1,0])+np.square(F2_3[1,0])
F[2]=np.square(F2_1[2,0])+ np.square(F2_2[2,0])+np.square(F2_3[2,0])
print('Shear forces (N): '+str(F)+'\n')
# Base shear
VbExact=np.sum(F)
print('Base shear force (N): '+str(VbExact)+'\n')
# Overturning moment
z=np.arange(h,h*ndof+1,h) # Height of floors
MbExact=np.sum(z[:]*F[:])
print('Overturning moment (N-m): '+str(MbExact)+'\n')
# MODULE 2: Using linearly increasing mode---------------------------
print('*********************LINEARLY INCREASING MODE*********************\n' )
V1=np.matrix([[1],[2],[3]]) # Linearly increasing mode
print('V1:\n'+str(V1)+'\n' )
mBar=float(np.matmul(np.matmul(np.transpose(V1),M),V1)) # Modal mass
kBar=float(np.matmul(np.matmul(np.transpose(V1),K),V1)) # Modal stiffness
Omega=np.sqrt(kBar/mBar) # Omega approx
T=2*np.pi/Omega # time period
# Using linear interpolation to find displacement corresponding to time period (T)
for i in range(0,int(np.shape(PeriodArray)[0])):
if PeriodArray[i]>T: # Value after T i.e. T(i+2)
T3=PeriodArray[i]
T1=PeriodArray[i-1]
Y3=PeakDispArray[i]
Y1=PeakDispArray[i-1]
A3=PeakAcclnArray[i]
A1=PeakAcclnArray[i-1]
break
Su=Y1+(Y3-Y1)/(T3-T1)*(T-T1) # Peak displacement corresponding to T in the response spectra
Sa=A1+(A3-A1)/(T3-T1)*(T-T1)
# Load participation factor
d=np.matrix([[1],[1],[1]]) # Earthquake input direction vector
l1=float(np.matmul(np.matmul(np.transpose(V1),M),d)/mBar)
print('Load participation factor: \n'+str(l1)+'\n')
# Maximum Displacement
uMax=l1*Su*V1 # Umax for each floor
print('Maximum Displacment(m): '+str(uMax)+'\n')
# Base shear
totalMass=m1+m2+m3 # Total mass of the structure
Vb=totalMass*Sa # Base Shear force
print('Base shear force (N): '+str(Vb)+'\n')
# Floor shear force
z=np.arange(h,h*ndof+1,h) # Height of floors
F1=np.zeros(ndof)
m=np.array([m1,m2,m3]) # Array is mass
denominator=np.dot(m,z)
for i in range(0,int(np.shape(F)[0])):
F1[i]=Vb*m[i]*z[i]/denominator
print('Shear forces (N): '+str(F1)+'\n')
# Overturning moment
Mb1=np.sum(z[:]*F1[:])
print('Overturning moment (N-m): '+str(Mb1)+'\n')
# MODULE 3: Using two ritz vector------------------------
print('********************TWO RITZ VECTOR WITH SRSS********************\n' )
r1= np.matrix([[1],[2],[3]]) # Linearly increasing mode
r2=np.matrix([[1],[4],[9]]) # Quadratically increasing mode
print('R1:\n'+str(r1)+'\n')
print('R2:\n'+str(r2)+'\n')
R=np.append(r1,r2,1)
M_Hat=np.matmul(np.matmul(np.transpose(R),M),R)
K_Hat=np.matmul(np.matmul(np.transpose(R),K),R)
OmegaSquare2,EigVector2=np.linalg.eig(K_Hat*np.linalg.inv(M_Hat))
Omega2=np.sqrt(OmegaSquare2) # Natural frequency
x1=EigVector2[:,0]/EigVector2[0,0] # Scale the modal shape
x2=EigVector2[:,1]/EigVector2[0,1]
V1=np.matmul(R,x1)
V2=np.matmul(R,x2)
mBar2=np.zeros((2,1))
mBar2[0,0]=(np.matmul(np.matmul(np.transpose(V1),M),V1)) # Modal mass
mBar2[1,0]=(np.matmul(np.matmul(np.transpose(V2),M),V2)) # Modal mass
Sa2=np.zeros(2)
Su2=np.zeros(2)
T=2*np.pi/Omega2 # time period
# Using linear interpolation to find displacement corresponding to time period (T)
for j in range(0,int(np.shape(Sa2)[0])):
for i in range(0,int(np.shape(PeriodArray)[0])):
if PeriodArray[i]>T[j]: # Value after T i.e. T(i+2)
T3=PeriodArray[i]
T1=PeriodArray[i-1]
Y3=PeakDispArray[i]
Y1=PeakDispArray[i-1]
A3=PeakAcclnArray[i]
A1=PeakAcclnArray[i-1]
Su2[j]=Y1+(Y3-Y1)/(T3-T1)*(T[j]-T1) # Peak displacement corresponding to T in the response spectra
Sa2[j]=A1+(A3-A1)/(T3-T1)*(T[j]-T1)
break
# Load participation factor
d=np.matrix([[1],[1],[1]]) # Earthquake input direction vector
l2=np.matmul(np.matmul(np.transpose(V1),M),d)/mBar2
print('Load participation factor: \n'+str(l2)+'\n')
# Maximum Displacement vectors
uMax2_1=l2[0,0]*Su2[0]*V1
uMax2_2=l2[1,0]*Su2[1]*V2
# Total maximum displacement using SRSS
uMax2=np.zeros(ndof)
uMax2[0]=np.square(uMax2_1[0,0])+ np.square(uMax2_2[0,0])
uMax2[1]=np.square(uMax2_1[1,0])+ np.square(uMax2_2[1,0])
uMax2[2]=np.square(uMax2_1[2,0])+ np.square(uMax2_2[2,0])
print('Maximum Displacment (m): '+str(uMax2)+'\n')
# Maximum floor force vector
F2=np.zeros(ndof)
F2_1=float(l2[0,0]*Sa2[0])*np.matmul(M,V1)
F2_2=float(l2[1,0]*Sa2[1])*np.matmul(M,V2)
# Using SRSS
F2[0]=np.square(F2_1[0,0])+ | np.square(F2_2[0,0]) | numpy.square |
"""
Implement a Vanilla NBP and NRW model for MC simulation of the NTE.
Code for the article "Monte Carlo Methods for the Neutron Transport Equation.
By <NAME>, <NAME>, <NAME>, <NAME>.
Thi sfile contains the code to produce the plots in the case of the 2D version
of the NTE.
MIT License
Copyright (c) <NAME>, 2020.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import numpy as np
import math
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from scipy.ndimage.filters import gaussian_filter
class circle:
"""Shape to determine area of scatter/branching."""
def __init__(self, centre, radius):
self.centre = centre
self.radius = radius
def time_in_circle(self, pos, theta, v):
"""Compute entry, exit times in circle for given trajectory."""
a = v**2
b = 2*v*((pos[0] - self.centre[0])*np.cos(theta)
+ (pos[1] - self.centre[1])*np.sin(theta))
c = ((pos[0] - self.centre[0])**2 + (pos[1] - self.centre[1])**2
- self.radius**2)
det = b**2 - 4*a*c
if det < 0:
self.entry_time = -100
self.exit_time = -100
elif det == 0:
self.entry_time = (-b - np.sqrt(det))/(2*a)
self.exit_time = self.entry_time
else:
if -b - np.sqrt(det) < 0:
if -b + np.sqrt(det) > 0:
self.entry_time = 0
self.exit_time = (-b + np.sqrt(det))/(2*a)
else:
self.entry_time = -100
self.exit_time = -100
else:
self.entry_time = (-b - | np.sqrt(det) | numpy.sqrt |
# Copyright (c) 2003-2019 by <NAME>
#
# TreeCorr is free software: redistribution and use in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions, and the disclaimer given in the accompanying LICENSE
# file.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the disclaimer given in the documentation
# and/or other materials provided with the distribution.
from __future__ import print_function
import numpy as np
import os
import coord
import time
import fitsio
import treecorr
from test_helper import assert_raises, do_pickle, timer, get_from_wiki, CaptureLog, clear_save
from test_helper import profile
def generate_shear_field(npos, nhalo, rng=None):
# We do something completely different here than we did for 2pt patch tests.
# A straight Gaussian field with a given power spectrum has no significant 3pt power,
# so it's not a great choice for simulating a field for 3pt tests.
# Instead we place N SIS "halos" randomly in the grid.
# Then we translate that to a shear field via FFT.
if rng is None:
rng = np.random.RandomState()
# Generate x,y values for the real-space field
x = rng.uniform(0,1000, size=npos)
y = rng.uniform(0,1000, size=npos)
nh = rng.poisson(nhalo)
# Fill the kappa values with SIS halo profiles.
xc = rng.uniform(0,1000, size=nh)
yc = rng.uniform(0,1000, size=nh)
scale = rng.uniform(20,50, size=nh)
mass = rng.uniform(0.01, 0.05, size=nh)
# Avoid making huge nhalo * nsource arrays. Loop in blocks of 64 halos
nblock = (nh-1) // 64 + 1
kappa = np.zeros_like(x)
gamma = np.zeros_like(x, dtype=complex)
for iblock in range(nblock):
i = iblock*64
j = (iblock+1)*64
dx = x[:,np.newaxis]-xc[np.newaxis,i:j]
dy = y[:,np.newaxis]-yc[np.newaxis,i:j]
dx[dx==0] = 1 # Avoid division by zero.
dy[dy==0] = 1
dx /= scale[i:j]
dy /= scale[i:j]
rsq = dx**2 + dy**2
r = rsq**0.5
k = mass[i:j] / r # "Mass" here is really just a dimensionless normalization propto mass.
kappa += np.sum(k, axis=1)
# gamma_t = kappa for SIS.
g = -k * (dx + 1j*dy)**2 / rsq
gamma += np.sum(g, axis=1)
return x, y, np.real(gamma), np.imag(gamma), kappa
@timer
def test_kkk_jk():
# Test jackknife and other covariance estimates for kkk correlations.
# Note: This test takes a while!
# The main version I think is a pretty decent test of the code correctness.
# It shows that bootstrap in particular easily gets to within 50% of the right variance.
# Sometimes within 20%, but because of the randomness there, it varies a bit.
# Jackknife isn't much worse. Just a little below 50%. But still pretty good.
# Sample and Marked are not great for this test. I think they will work ok when the
# triangles of interest are mostly within single patches, but that's not the case we
# have here, and it would take a lot more points to get to that regime. So the
# accuracy tests for those two are pretty loose.
if __name__ == '__main__':
# This setup takes about 740 sec to run.
nhalo = 3000
nsource = 5000
npatch = 32
tol_factor = 1
elif False:
# This setup takes about 180 sec to run.
nhalo = 2000
nsource = 2000
npatch = 16
tol_factor = 2
elif False:
# This setup takes about 51 sec to run.
nhalo = 1000
nsource = 1000
npatch = 16
tol_factor = 3
else:
# This setup takes about 20 sec to run.
# So we use this one for regular unit test runs.
# It's pretty terrible in terms of testing the accuracy, but it works for code coverage.
# But whenever actually working on this part of the code, definitely need to switch
# to one of the above setups. Preferably run the name==main version to get a good
# test of the code correctness.
nhalo = 500
nsource = 500
npatch = 16
tol_factor = 4
file_name = 'data/test_kkk_jk_{}.npz'.format(nsource)
print(file_name)
if not os.path.isfile(file_name):
nruns = 1000
all_kkks = []
rng1 = np.random.RandomState()
for run in range(nruns):
x, y, _, _, k = generate_shear_field(nsource, nhalo, rng1)
print(run,': ',np.mean(k),np.std(k))
cat = treecorr.Catalog(x=x, y=y, k=k)
kkk = treecorr.KKKCorrelation(nbins=3, min_sep=30., max_sep=100.,
min_u=0.9, max_u=1.0, nubins=1,
min_v=0.0, max_v=0.1, nvbins=1)
kkk.process(cat)
print(kkk.ntri.ravel().tolist())
print(kkk.zeta.ravel().tolist())
all_kkks.append(kkk)
mean_kkk = np.mean([kkk.zeta.ravel() for kkk in all_kkks], axis=0)
var_kkk = np.var([kkk.zeta.ravel() for kkk in all_kkks], axis=0)
np.savez(file_name, all_kkk=np.array([kkk.zeta.ravel() for kkk in all_kkks]),
mean_kkk=mean_kkk, var_kkk=var_kkk)
data = np.load(file_name)
mean_kkk = data['mean_kkk']
var_kkk = data['var_kkk']
print('mean = ',mean_kkk)
print('var = ',var_kkk)
rng = np.random.RandomState(12345)
x, y, _, _, k = generate_shear_field(nsource, nhalo, rng)
cat = treecorr.Catalog(x=x, y=y, k=k)
kkk = treecorr.KKKCorrelation(nbins=3, min_sep=30., max_sep=100.,
min_u=0.9, max_u=1.0, nubins=1,
min_v=0.0, max_v=0.1, nvbins=1, rng=rng)
kkk.process(cat)
print(kkk.ntri.ravel())
print(kkk.zeta.ravel())
print(kkk.varzeta.ravel())
kkkp = kkk.copy()
catp = treecorr.Catalog(x=x, y=y, k=k, npatch=npatch)
# Do the same thing with patches.
kkkp.process(catp)
print('with patches:')
print(kkkp.ntri.ravel())
print(kkkp.zeta.ravel())
print(kkkp.varzeta.ravel())
np.testing.assert_allclose(kkkp.ntri, kkk.ntri, rtol=0.05 * tol_factor)
np.testing.assert_allclose(kkkp.zeta, kkk.zeta, rtol=0.1 * tol_factor, atol=1e-3 * tol_factor)
np.testing.assert_allclose(kkkp.varzeta, kkk.varzeta, rtol=0.05 * tol_factor, atol=3.e-6)
print('jackknife:')
cov = kkkp.estimate_cov('jackknife')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.diagonal(cov), var_kkk, rtol=0.6 * tol_factor)
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.5*tol_factor)
print('sample:')
cov = kkkp.estimate_cov('sample')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.diagonal(cov), var_kkk, rtol=0.7 * tol_factor)
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.7*tol_factor)
print('marked:')
cov = kkkp.estimate_cov('marked_bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.diagonal(cov), var_kkk, rtol=0.7 * tol_factor)
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.7*tol_factor)
print('bootstrap:')
cov = kkkp.estimate_cov('bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.diagonal(cov), var_kkk, rtol=0.5 * tol_factor)
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.3*tol_factor)
# Now as a cross correlation with all 3 using the same patch catalog.
print('with 3 patched catalogs:')
kkkp.process(catp, catp, catp)
print(kkkp.zeta.ravel())
np.testing.assert_allclose(kkkp.zeta, kkk.zeta, rtol=0.1 * tol_factor, atol=1e-3 * tol_factor)
print('jackknife:')
cov = kkkp.estimate_cov('jackknife')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.5*tol_factor)
print('sample:')
cov = kkkp.estimate_cov('sample')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('marked:')
cov = kkkp.estimate_cov('marked_bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('bootstrap:')
cov = kkkp.estimate_cov('bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.3*tol_factor)
# Repeat this test with different combinations of patch with non-patch catalogs:
# All the methods work best when the patches are used for all 3 catalogs. But there
# are probably cases where this kind of cross correlation with only some catalogs having
# patches could be desired. So this mostly just checks that the code runs properly.
# Patch on 1 only:
print('with patches on 1 only:')
kkkp.process(catp, cat)
print(kkkp.zeta.ravel())
np.testing.assert_allclose(kkkp.zeta, kkk.zeta, rtol=0.1 * tol_factor, atol=1e-3 * tol_factor)
print('jackknife:')
cov = kkkp.estimate_cov('jackknife')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('sample:')
cov = kkkp.estimate_cov('sample')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('marked:')
cov = kkkp.estimate_cov('marked_bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('bootstrap:')
cov = kkkp.estimate_cov('bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
# Patch on 2 only:
print('with patches on 2 only:')
kkkp.process(cat, catp, cat)
print(kkkp.zeta.ravel())
np.testing.assert_allclose(kkkp.zeta, kkk.zeta, rtol=0.1 * tol_factor, atol=1e-3 * tol_factor)
print('jackknife:')
cov = kkkp.estimate_cov('jackknife')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.diagonal(cov), var_kkk, rtol=0.9 * tol_factor)
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('sample:')
cov = kkkp.estimate_cov('sample')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('marked:')
cov = kkkp.estimate_cov('marked_bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('bootstrap:')
cov = kkkp.estimate_cov('bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
# Patch on 3 only:
print('with patches on 3 only:')
kkkp.process(cat, cat, catp)
print(kkkp.zeta.ravel())
| np.testing.assert_allclose(kkkp.zeta, kkk.zeta, rtol=0.1 * tol_factor, atol=1e-3 * tol_factor) | numpy.testing.assert_allclose |
from baselines.common import Dataset, explained_variance, fmt_row, zipsame
from baselines import logger
import baselines.common.tf_util as U
import tensorflow as tf, numpy as np
import time
from baselines.common.mpi_adam import MpiAdam
from baselines.common.mpi_moments import mpi_moments
from mpi4py import MPI
from collections import deque
from tensorboardX import SummaryWriter
from baselines.ppo1.gcn_policy import discriminator,discriminator_net,embnet,oracle
import os
import copy
import random
import math
from keras.layers import Input,Reshape,Embedding,GRU,LSTM,Conv1D,LeakyReLU,MaxPooling1D,concatenate,Dropout,Dense,LeakyReLU,TimeDistributed
from keras import regularizers
from keras.optimizers import SGD,Adam
from keras.losses import mean_squared_error
from keras.models import Model
import keras.backend as K
from keras.activations import relu
from keras.engine.topology import Layer
from keras.callbacks import ModelCheckpoint,TensorBoard
num_feat = 10
num_proj = 10
proj_type = "linear"
lambda1 = 20
lambda2 = 10
nepis = 10
def traj_segment_generator(args, pi, env,disease_id, horizon, stochastic, d_step_func, d_final_func, num_episodes,env_ind):
t = 0
ac = env.action_space.sample() # not used, just so we have the datatype
new = True # marks if we're on first timestep of an episode
disease_id = yield
ob,disease_feat,disease_1hop,disease_genes = env.reset(disease_id)
ob_adj = ob['adj']
ob_node = ob['node']
cur_ep_ret = 0 # return in current episode
cur_ep_ret_env = 0
cur_ep_ret_d_step = 0
cur_ep_ret_d_final = 0
cur_ep_len = 0 # len of current episode
cur_ep_len_valid = 0
ep_rets = [] # returns of completed episodes in this segment
ep_rets_d_step = []
ep_rets_d_final = []
ep_rets_env = []
ep_lens = [] # lengths of ...
ep_lens_valid = [] # lengths of ...
ep_rew_final = []
ep_rew_final_stat = []
cur_num_epi = 1
#i = 0
# Initialize history arrays
# obs = np.array([ob for _ in range(horizon)])
#ob_adjs = np.array([ob_adj for _ in range(horizon)])
#ob_nodes = np.array([ob_node for _ in range(horizon)])
#ob_adjs_final = []
#ob_nodes_final = []
#rews = np.zeros(horizon, 'float32')
#vpreds = np.zeros(horizon, 'float32')
#news = np.zeros(horizon, 'int32')
#acs = np.array([ac for _ in range(horizon)])
#prevacs = acs.copy()
ob_adjs = []
ob_nodes = []
ob_adjs_final = []
ob_smi_final = []
ob_nodes_final = []
rews = []
vpreds = []
news = []
acs = []
prevacs = []
while True:
prevac = ac
ac, vpred, debug = pi.act(stochastic, ob,disease_feat)
# Slight weirdness here because we need value function at time T
# before returning segment [0, T-1] so we get the correct
# terminal value
#if t > 0 and t % horizon == 0:
if t > 0 and cur_num_epi % num_episodes == num_episodes -1:
#i = 0
ob_adjs = np.array(ob_adjs)
ob_nodes = np.array(ob_nodes)
rews = np.array(rews, dtype=np.float32)
vpreds = np.squeeze(np.array(vpreds, dtype=np.float32))
news = np.array(news, dtype=np.int32)
acs = np.squeeze(np.array(acs))
prevacs = np.squeeze(np.array(prevacs))
yield {"ob_adj" : ob_adjs, "ob_node" : ob_nodes,"ob_adj_final" : np.array(ob_adjs_final), "ob_node_final" : np.array(ob_nodes_final), "rew" : rews, "vpred" : vpreds, "new" : news,"smi":np.array(ob_smi_final),"disease_1hop":disease_1hop,"disease_genes":disease_genes,
"ac" : acs, "prevac" : prevacs, "nextvpred": vpred * (1 - new),
"ep_rets" : ep_rets, "ep_lens" : ep_lens, "ep_lens_valid" : ep_lens_valid, "ep_final_rew":ep_rew_final, "ep_final_rew_stat":ep_rew_final_stat,"ep_rets_env" : ep_rets_env,"ep_rets_d_step" : ep_rets_d_step,"ep_rets_d_final" : ep_rets_d_final}
# Be careful!!! if you change the downstream algorithm to aggregate
# several of these batches, then be sure to do a deepcopy
disease_id = yield
ep_rets = []
ep_lens = []
ep_lens_valid = []
ep_rew_final = []
ep_rew_final_stat = []
ep_rets_d_step = []
ep_rets_d_final = []
ep_rets_env = []
ob_adjs_final = []
ob_smi_final = []
ob_nodes_final = []
ob_adjs = []
ob_nodes = []
rews = []
vpreds = []
news = []
acs = []
prevacs = []
cur_num_epi = 1
#i = t % horizon
# obs[i] = ob
ob_adjs.append(ob['adj'])
ob_nodes.append(ob['node'])
vpreds.append(vpred)
news.append(new)
acs.append(ac)
prevacs.append(prevac)
ob, rew_env, new, info,disease_feat,disease_1hop,disease_genes = env.step(ac)
rew_d_step = 0 # default
if rew_env>0: # if action valid
cur_ep_len_valid += 1
# add stepwise discriminator reward
if args.has_d_step==1:
if args.gan_type=='normal' or args.gan_type=='wgan':
rew_d_step = args.gan_step_ratio * (
d_step_func(ob['adj'][np.newaxis, :, :, :], ob['node'][np.newaxis, :, :, :])) / env.max_atom
elif args.gan_type == 'recommend':
rew_d_step = args.gan_step_ratio * (
max(1-d_step_func(ob['adj'][np.newaxis, :, :, :], ob['node'][np.newaxis, :, :, :]),-2)) / env.max_atom
rew_d_final = 0 # default
if new:
if args.has_d_final==1:
if args.gan_type == 'normal' or args.gan_type=='wgan':
rew_d_final = args.gan_final_ratio * (
d_final_func(ob['adj'][np.newaxis, :, :, :], ob['node'][np.newaxis, :, :, :]))
elif args.gan_type == 'recommend':
rew_d_final = args.gan_final_ratio * (
max(1 - d_final_func(ob['adj'][np.newaxis, :, :, :], ob['node'][np.newaxis, :, :, :]),
-2))
#print("reward d step: "+str(rew_d_step))
#print("reward d final: "+str(rew_d_final))
#print(rew_d_step,rew_env,rew_d_final)
rews.append(rew_d_step + rew_env + rew_d_final)
cur_ep_ret += rews[-1]
cur_ep_ret_d_step += rew_d_step
cur_ep_ret_d_final += rew_d_final
cur_ep_ret_env += rew_env
cur_ep_len += 1
if new:
if args.env=='molecule':
with open('molecule_gen/'+args.name_full+'_'+env_ind+'.csv', 'a') as f:
str = ''.join(['{},']*(len(info)+4))[:-1]+'\n'
f.write(str.format(info['smile'],info['smile_code'], disease_id,info['reward_valid'], info['reward_qed'], info['reward_sa'], info['final_stat'], rew_env, rew_d_step, rew_d_final, cur_ep_ret, info['flag_steric_strain_filter'], info['flag_zinc_molecule_filter'], info['stop']))
ob_adjs_final.append(ob['adj'])
ob_smi_final.append(info['smile_code'])
ob_nodes_final.append(ob['node'])
ep_rets.append(cur_ep_ret)
ep_rets_env.append(cur_ep_ret_env)
ep_rets_d_step.append(cur_ep_ret_d_step)
ep_rets_d_final.append(cur_ep_ret_d_final)
ep_lens.append(cur_ep_len)
ep_lens_valid.append(cur_ep_len_valid)
ep_rew_final.append(rew_env)
ep_rew_final_stat.append(info['final_stat'])
cur_ep_ret = 0
cur_ep_len = 0
cur_ep_len_valid = 0
cur_ep_ret_d_step = 0
cur_ep_ret_d_final = 0
cur_ep_ret_env = 0
ob,disease_feat,disease_1hop,disease_genes = env.reset(disease_id)
cur_num_epi += 1
t += 1
#i += 1
def traj_final_generator(pi, env, disease_id,batch_size, stochastic):
ob,disease_feat,disease_1hop,disease_genes = env.reset(disease_id)
ob_adj = ob['adj']
ob_node = ob['node']
ob_adjs = np.array([ob_adj for _ in range(batch_size)])
ob_nodes = np.array([ob_node for _ in range(batch_size)])
for i in range(batch_size):
ob,disease_feat,disease_1hop,disease_genes = env.reset(disease_id)
while True:
ac, vpred, debug = pi.act(stochastic, ob,disease_feat)
ob, rew_env, new, info,disease_feat,disease_1hop,disease_genes = env.step(ac)
np.set_printoptions(precision=2, linewidth=200)
# print('ac',ac)
# print('ob',ob['adj'],ob['node'])
if new:
ob_adjs[i]=ob['adj']
ob_nodes[i]=ob['node']
break
return ob_adjs,ob_nodes
def get_binding(args,seg,loss_func,disease_genes):
disease_1hop = np.array(seg["disease_1hop"])[disease_genes]
num_prot = disease_1hop.shape[0]
binding = np.zeros((len(seg["smi"]),num_prot))
size = 64
binding_thr = args.deepaffinity_thr
num = math.ceil(num_prot/size)
for i in range(len(seg["smi"])):
print(i)
drugs = np.tile(np.expand_dims(np.array(seg["smi"][i]),axis=0),[num_prot,1])
for j in range(num):
if j == num -1:
d_temp = drugs[(num - 1)*size:num_prot,:]
p_temp = disease_1hop[(num - 1)*size:num_prot,:]
binding[i,(num - 1)*size:num_prot] = np.squeeze(loss_func(p_temp,d_temp),axis=-1)
else:
d_temp = drugs[size*j:size*(j+1),:]
p_temp = disease_1hop[size*j:size*(j+1),:]
binding[i,size*j:size*(j+1)] = np.squeeze(loss_func(p_temp,d_temp),axis=-1)
binding[np.where(binding < binding_thr )] = 0
binding[np.where(binding >= binding_thr )] = 1
return binding
def get_classifier_reward(binding1,binding2):
reward = np.sum(np.logical_xor(binding1,binding2),axis=1)/binding1.shape[1]
adverse = np.sum(np.logical_and(binding1,binding2),axis=1)/binding1.shape[1]
d1 = np.sum(binding1,axis=1)/binding1.shape[1]
d2 = np.sum(binding2,axis=1)/binding2.shape[1]
return reward,adverse,d1,d2
def add_vtarg_and_adv(args,seg, seg2, gamma, lam, loss_func1,loss_func2):
"""
Compute target value using TD(lambda) estimator, and advantage with GAE(lambda)
"""
binding1 = get_binding(args,seg,loss_func1,seg["disease_genes"])
binding2 = get_binding(args,seg2,loss_func2,seg2["disease_genes"])
temp_loss,adverse,binding_d1,binding_d2 = get_classifier_reward(binding1,binding2)
print("cls loss:")
print(temp_loss)
cls_weight = args.network_weight
cls_loss = np.zeros_like(seg["rew"])
T = len(seg["rew"])
idx_new = 0
for i in range(T):
if seg["new"][i]:
cls_loss[i] = temp_loss[idx_new]
idx_new +=1
seg["cls_loss"] = temp_loss
seg["adverse"] = adverse
seg["binding"] = binding_d1
seg["rew"] = cls_weight *cls_loss + seg["rew"] * args.others_weight
new = np.append(seg["new"], 0) # last element is only used for last vtarg, but we already zeroed it if last new = 1
vpred = np.append(seg["vpred"], seg["nextvpred"])
seg["adv"] = gaelam = np.empty(T, 'float32')
rew = seg["rew"]
lastgaelam = 0
for t in reversed(range(T)):
nonterminal = 1-new[t+1]
delta = rew[t] + gamma * vpred[t+1] * nonterminal - vpred[t]
gaelam[t] = lastgaelam = delta + gamma * lam * nonterminal * lastgaelam
seg["tdlamret"] = seg["adv"] + seg["vpred"]
cls_loss2 = np.zeros_like(seg2["rew"])
T2 = len(seg2["rew"])
idx_new2 = 0
for i in range(T2):
if seg2["new"][i]:
cls_loss2[i] = temp_loss[idx_new2]
idx_new2 +=1
seg2["cls_loss"] = temp_loss
seg2["adverse"] = adverse
seg2["binding"] = binding_d2
seg2["rew"] = cls_weight * cls_loss2 + seg2["rew"] * args.others_weight
new2 = np.append(seg2["new"], 0) # last element is only used for last vtarg, but we already zeroed it if last new = 1
vpred2 = np.append(seg2["vpred"], seg2["nextvpred"])
seg2["adv"] = gaelam2 = np.empty(T2, 'float32')
rew2 = seg2["rew"]
lastgaelam2 = 0
for t in reversed(range(T2)):
nonterminal2 = 1-new2[t+1]
delta2 = rew2[t] + gamma * vpred2[t+1] * nonterminal2 - vpred2[t]
gaelam2[t] = lastgaelam2 = delta2 + gamma * lam * nonterminal2 * lastgaelam2
seg2["tdlamret"] = seg2["adv"] + seg2["vpred"]
def learn(args, env1, env2, policy_fn,
num_disease,disease_id,
*,
timesteps_per_actorbatch, # timesteps per actor per update
clip_param, entcoeff, # clipping parameter epsilon, entropy coeff
optim_epochs, optim_stepsize, optim_batchsize,# optimization hypers
gamma, lam, # advantage estimation
max_timesteps=0, max_episodes=0, max_iters=0, max_seconds=0, # time constraint
callback=None, # you can do anything in the callback, since it takes locals(), globals()
adam_epsilon=1e-5,
schedule='constant', # annealing for stepsize parameters (epsilon and adam)
writer=None
):
# Setup losses and stuff
# ----------------------------------------
# ----------------------------------------
ob_space1 = env1.observation_space
ac_space1 = env1.action_space
disease_dim1 = env1.disease_feat.shape[1]
pi1 = policy_fn("pi1", ob_space1, ac_space1, disease_dim1) # Construct network for new policy
oldpi1 = policy_fn("oldpi1", ob_space1, ac_space1, disease_dim1) # Network for old policy
atarg1 = tf.placeholder(dtype=tf.float32, shape=[None]) # Target advantage function (if applicable)
ret1 = tf.placeholder(dtype=tf.float32, shape=[None]) # Empirical return
lrmult1 = tf.placeholder(name='lrmult1', dtype=tf.float32, shape=[]) # learning rate multiplier, updated with schedule
clip_param1 = clip_param * lrmult1 # Annealed cliping parameter epislon
# ----------------------------------------
ob_space2 = env2.observation_space
ac_space2 = env2.action_space
disease_dim2 = env1.disease_feat.shape[1]
pi2 = policy_fn("pi2", ob_space2, ac_space2, disease_dim2) # Construct network for new policy
oldpi2 = policy_fn("oldpi2", ob_space2, ac_space2, disease_dim2) # Network for old policy
atarg2 = tf.placeholder(dtype=tf.float32, shape=[None]) # Target advantage function (if applicable)
ret2 = tf.placeholder(dtype=tf.float32, shape=[None]) # Empirical return
lrmult2 = tf.placeholder(name='lrmult2', dtype=tf.float32, shape=[]) # learning rate multiplier, updated with schedule
clip_param2 = clip_param * lrmult2 # Annealed cliping parameter epislon
# ----------------------------------------
# ----------------------------------------
ob1 = {}
ob1['adj'] = U.get_placeholder_cached(name="adj1")
ob1['node'] = U.get_placeholder_cached(name="node1")
ob_gen1 = {}
ob_gen1['adj'] = U.get_placeholder(shape=[None, ob_space1['adj'].shape[0], None, None], dtype=tf.float32,name='adj_gen1')
ob_gen1['node'] = U.get_placeholder(shape=[None, 1, None, ob_space1['node'].shape[2]], dtype=tf.float32,name='node_gen1')
ob_real1 = {}
ob_real1['adj'] = U.get_placeholder(shape=[None,ob_space1['adj'].shape[0],None,None],dtype=tf.float32,name='adj_real1')
ob_real1['node'] = U.get_placeholder(shape=[None,1,None,ob_space1['node'].shape[2]],dtype=tf.float32,name='node_real1')
disease_dim = 16
ac1 = tf.placeholder(dtype=tf.int64, shape=[None,4], name='ac_real1')
disease = U.get_placeholder(shape=[None,disease_dim ], dtype=tf.float32,name='disease')
ob2 = {}
ob2['adj'] = U.get_placeholder_cached(name="adj2")
ob2['node'] = U.get_placeholder_cached(name="node2")
ob_gen2 = {}
ob_gen2['adj'] = U.get_placeholder(shape=[None, ob_space2['adj'].shape[0], None, None], dtype=tf.float32,name='adj_gen2')
ob_gen2['node'] = U.get_placeholder(shape=[None, 1, None, ob_space2['node'].shape[2]], dtype=tf.float32,name='node_gen2')
ob_real2 = {}
ob_real2['adj'] = U.get_placeholder(shape=[None,ob_space2['adj'].shape[0],None,None],dtype=tf.float32,name='adj_real2')
ob_real2['node'] = U.get_placeholder(shape=[None,1,None,ob_space2['node'].shape[2]],dtype=tf.float32,name='node_real2')
ac2 = tf.placeholder(dtype=tf.int64, shape=[None,4], name='ac_real2')
# ----------------------------------------
# ----------------------------------------
prot_data_class = Input(shape=(152,))
drug1_data_class = Input(shape=(100,))
drug2_data_class = Input(shape=(100,))
linear1 = oracle(prot_data_class,drug1_data_class)
linear2 = oracle(prot_data_class,drug2_data_class)
class_model1 = Model(inputs=[prot_data_class,drug1_data_class],outputs=[linear1])
class_model2 = Model(inputs=[prot_data_class,drug2_data_class],outputs=[linear2])
loss1 = linear1
loss2 = linear2
loss_class_func1 = U.function([prot_data_class,drug1_data_class],loss1)
loss_class_func2 = U.function([prot_data_class,drug2_data_class],loss2)
## PPO loss
kloldnew1 = oldpi1.pd.kl(pi1.pd)
ent1 = pi1.pd.entropy()
meankl1 = tf.reduce_mean(kloldnew1)
meanent1 = tf.reduce_mean(ent1)
pol_entpen1 = (-entcoeff) * meanent1
pi_logp1 = pi1.pd.logp(ac1)
oldpi_logp1 = oldpi1.pd.logp(ac1)
ratio_log1 = pi1.pd.logp(ac1) - oldpi1.pd.logp(ac1)
ratio1 = tf.exp(pi1.pd.logp(ac1) - oldpi1.pd.logp(ac1)) # pnew / pold
surr11 = ratio1 * atarg1 # surrogate from conservative policy iteration
surr21 = tf.clip_by_value(ratio1, 1.0 - clip_param1, 1.0 + clip_param1) * atarg1 #
pol_surr1 = - tf.reduce_mean(tf.minimum(surr11, surr21)) # PPO's pessimistic surrogate (L^CLIP)
vf_loss1 = tf.reduce_mean(tf.square(pi1.vpred - ret1))
total_loss1 = pol_surr1 + pol_entpen1 + vf_loss1
losses1 = [pol_surr1, pol_entpen1, vf_loss1, meankl1, meanent1]
loss_names = ["pol_surr", "pol_entpen", "vf_loss", "kl", "ent"]
# ----------------------------------------
kloldnew2 = oldpi2.pd.kl(pi2.pd)
ent2 = pi2.pd.entropy()
meankl2 = tf.reduce_mean(kloldnew2)
meanent2 = tf.reduce_mean(ent2)
pol_entpen2 = (-entcoeff) * meanent2
pi_logp2 = pi2.pd.logp(ac2)
oldpi_logp2 = oldpi2.pd.logp(ac2)
ratio_log2 = pi2.pd.logp(ac2) - oldpi2.pd.logp(ac2)
ratio2 = tf.exp(pi2.pd.logp(ac2) - oldpi2.pd.logp(ac2)) # pnew / pold
surr12 = ratio2 * atarg2 # surrogate from conservative policy iteration
surr22 = tf.clip_by_value(ratio2, 1.0 - clip_param2, 1.0 + clip_param2) * atarg2 #
pol_surr2 = - tf.reduce_mean(tf.minimum(surr12, surr22)) # PPO's pessimistic surrogate (L^CLIP)
vf_loss2 = tf.reduce_mean(tf.square(pi2.vpred - ret2))
total_loss2 = pol_surr2 + pol_entpen2 + vf_loss2
losses2 = [pol_surr2, pol_entpen2, vf_loss2, meankl2, meanent2]
loss_names = ["pol_surr", "pol_entpen", "vf_loss", "kl", "ent"]
# ----------------------------------------
# ----------------------------------------
## Expert loss
loss_expert1 = -tf.reduce_mean(pi_logp1)
## Discriminator loss
# loss_d_step, _, _ = discriminator(ob_real, ob_gen,args, name='d_step')
# loss_d_gen_step,_ = discriminator_net(ob_gen,args, name='d_step')
# loss_d_final, _, _ = discriminator(ob_real, ob_gen,args, name='d_final')
# loss_d_gen_final,_ = discriminator_net(ob_gen,args, name='d_final')
if args.gan_type=='normal':
step_pred_real1, step_logit_real1 = discriminator_net(ob_real1, num_feat,args, name='d_step')
step_pred_gen1, step_logit_gen1 = discriminator_net(ob_gen1, num_feat,args, name='d_step')
loss_d_step_real1 = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=step_logit_real1, labels=tf.ones_like(step_logit_real1)*0.9))
loss_d_step_gen1 = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=step_logit_gen1, labels=tf.zeros_like(step_logit_gen1)))
loss_d_step1 = loss_d_step_real1 + loss_d_step_gen1
loss_g_step_gen1 = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=step_logit_gen1, labels=tf.zeros_like(step_logit_gen1)))
elif args.gan_type=='recommend':
step_pred_real1, step_logit_real1 = discriminator_net(ob_real1, num_feat,args, name='d_step')
step_pred_gen1, step_logit_gen1 = discriminator_net(ob_gen1, num_feat,args, name='d_step')
loss_d_step_real1 = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=step_logit_real1, labels=tf.ones_like(step_logit_real1)*0.9))
loss_d_step_gen1 = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=step_logit_gen1, labels=tf.zeros_like(step_logit_gen1)))
loss_d_step1 = loss_d_step_real1 + loss_d_step_gen1
loss_g_step_gen1 = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=step_logit_gen1, labels=tf.ones_like(step_logit_gen1)*0.9))
elif args.gan_type=='wgan':
loss_d_step1, loss_g_step_gen1, _ = discriminator(ob_real1, ob_gen1, num_feat, num_proj, proj_type, lambda1, lambda2, args, name='d_step')
#loss_d_step = loss_d_step*-1
#loss_g_step_gen,_ = discriminator_net(ob_gen,args, name='d_step')
# ----------------------------------------
loss_expert2 = -tf.reduce_mean(pi_logp2)
## Discriminator loss
# loss_d_step, _, _ = discriminator(ob_real, ob_gen,args, name='d_step')
# loss_d_gen_step,_ = discriminator_net(ob_gen,args, name='d_step')
# loss_d_final, _, _ = discriminator(ob_real, ob_gen,args, name='d_final')
# loss_d_gen_final,_ = discriminator_net(ob_gen,args, name='d_final')
if args.gan_type=='normal':
step_pred_real2, step_logit_real2 = discriminator_net(ob_real2, num_feat, args, name='d_step')
step_pred_gen2, step_logit_gen2 = discriminator_net(ob_gen2, num_feat, args, name='d_step')
loss_d_step_real2 = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=step_logit_real2, labels=tf.ones_like(step_logit_real2)*0.9))
loss_d_step_gen2 = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=step_logit_gen2, labels=tf.zeros_like(step_logit_gen2)))
loss_d_step2 = loss_d_step_real2 + loss_d_step_gen2
loss_g_step_gen2 = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=step_logit_gen2, labels=tf.zeros_like(step_logit_gen2)))
elif args.gan_type=='recommend':
step_pred_real2, step_logit_real2 = discriminator_net(ob_real2, num_feat, args, name='d_step')
step_pred_gen2, step_logit_gen2 = discriminator_net(ob_gen2, num_feat, args, name='d_step')
loss_d_step_real2 = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=step_logit_real2, labels=tf.ones_like(step_logit_real2)*0.9))
loss_d_step_gen2 = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=step_logit_gen2, labels=tf.zeros_like(step_logit_gen2)))
loss_d_step2 = loss_d_step_real2 + loss_d_step_gen2
loss_g_step_gen2 = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=step_logit_gen2, labels=tf.ones_like(step_logit_gen2)*0.9))
elif args.gan_type=='wgan':
loss_d_step2, loss_g_step_gen2, _ = discriminator(ob_real2, ob_gen2, num_feat, num_proj, proj_type, lambda1, lambda2, args, name='d_step')
#loss_d_step = loss_d_step*-1
#loss_g_step_gen,_ = discriminator_net(ob_gen,args, name='d_step')
# ----------------------------------------
# ----------------------------------------
# loss_d_step = loss_d_step2 + loss_d_step1
# loss_g_step_gen = loss_g_step_gen2 + loss_g_step_gen1
# ----------------------------------------
# ----------------------------------------
final_pred_real1, final_logit_real1 = discriminator_net(ob_real1, num_feat, args, name='d_final')
final_pred_gen1, final_logit_gen1 = discriminator_net(ob_gen1, num_feat, args, name='d_final')
loss_d_final_real1 = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=final_logit_real1, labels=tf.ones_like(final_logit_real1)*0.9))
loss_d_final_gen1 = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=final_logit_gen1, labels=tf.zeros_like(final_logit_gen1)))
loss_d_final1 = loss_d_final_real1 + loss_d_final_gen1
if args.gan_type == 'normal':
loss_g_final_gen1 = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=final_logit_gen1, labels=tf.zeros_like(final_logit_gen1)))
elif args.gan_type == 'recommend':
loss_g_final_gen1 = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=final_logit_gen1, labels=tf.ones_like(final_logit_gen1)*0.9))
elif args.gan_type=='wgan':
loss_d_final1, loss_g_final_gen1, _ = discriminator(ob_real1, ob_gen1, num_feat, num_proj, proj_type, lambda1, lambda2, args, name='d_final')
#loss_d_final = loss_d_final*-1
#loss_g_final_gen,_ = discriminator_net(ob_gen,args, name='d_final')
# ----------------------------------------
final_pred_real2, final_logit_real2 = discriminator_net(ob_real2, num_feat, args, name='d_final')
final_pred_gen2, final_logit_gen2 = discriminator_net(ob_gen2, num_feat, args, name='d_final')
loss_d_final_real2 = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=final_logit_real2, labels=tf.ones_like(final_logit_real2)*0.9))
loss_d_final_gen2 = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=final_logit_gen2, labels=tf.zeros_like(final_logit_gen2)))
loss_d_final2 = loss_d_final_real2 + loss_d_final_gen2
if args.gan_type == 'normal':
loss_g_final_gen2 = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=final_logit_gen2, labels=tf.zeros_like(final_logit_gen2)))
elif args.gan_type == 'recommend':
loss_g_final_gen2 = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=final_logit_gen2, labels=tf.ones_like(final_logit_gen2)*0.9))
elif args.gan_type=='wgan':
loss_d_final2, loss_g_final_gen2, _ = discriminator(ob_real2, ob_gen2, num_feat, num_proj, proj_type, lambda1, lambda2, args, name='d_final')
#loss_d_final = loss_d_final*-1
#loss_g_final_gen,_ = discriminator_net(ob_gen,args, name='d_final')
# ----------------------------------------
# ----------------------------------------
# loss_d_final = loss_d_final2 + loss_d_final1
# loss_g_final_gen = loss_g_final_gen2 + loss_g_final_gen1
# ----------------------------------------
# ----------------------------------------
var_list_pi1 = pi1.get_trainable_variables()
var_list_pi_stop1 = [var for var in var_list_pi1 if ('emb' in var.name) or ('gcn' in var.name) or ('stop' in var.name)]
var_list_pi2 = pi2.get_trainable_variables()
var_list_pi_stop2 = [var for var in var_list_pi2 if ('emb' in var.name) or ('gcn' in var.name) or ('stop' in var.name)]
var_list_d_step = [var for var in tf.global_variables() if 'd_step' in var.name]
var_list_d_final = [var for var in tf.global_variables() if 'd_final' in var.name]
var_list_classifier = [var for var in tf.global_variables() if 'class' in var.name]
# ----------------------------------------
# ----------------------------------------
## debug
debug={}
## loss update function
lossandgrad_ppo1 = U.function([ob1['adj'], ob1['node'], ac1, pi1.ac_real, oldpi1.ac_real, atarg1, ret1, lrmult1,disease], losses1 + [U.flatgrad(total_loss1, var_list_pi1)])
lossandgrad_expert1 = U.function([ob1['adj'], ob1['node'], ac1, pi1.ac_real,disease], [loss_expert1, U.flatgrad(loss_expert1, var_list_pi1)])
lossandgrad_expert_stop1 = U.function([ob1['adj'], ob1['node'], ac1, pi1.ac_real,disease], [loss_expert1, U.flatgrad(loss_expert1, var_list_pi_stop1)])
# ----------------------------------------
lossandgrad_ppo2 = U.function([ob2['adj'], ob2['node'], ac2, pi2.ac_real, oldpi2.ac_real, atarg2, ret2, lrmult2,disease], losses2 + [U.flatgrad(total_loss2, var_list_pi2)])
lossandgrad_expert2 = U.function([ob2['adj'], ob2['node'], ac2, pi2.ac_real,disease], [loss_expert2, U.flatgrad(loss_expert2, var_list_pi2)])
lossandgrad_expert_stop2 = U.function([ob2['adj'], ob2['node'], ac2, pi2.ac_real,disease], [loss_expert2, U.flatgrad(loss_expert2, var_list_pi_stop2)])
# ----------------------------------------
# ----------------------------------------
lossandgrad_d_step1 = U.function([ob_real1['adj'], ob_real1['node'], ob_gen1['adj'], ob_gen1['node']], [loss_d_step1, U.flatgrad(loss_d_step1, var_list_d_step)])
lossandgrad_d_final1 = U.function([ob_real1['adj'], ob_real1['node'], ob_gen1['adj'], ob_gen1['node']], [loss_d_final1, U.flatgrad(loss_d_final1, var_list_d_final)])
loss_g_gen_step_func1 = U.function([ob_gen1['adj'], ob_gen1['node']], loss_g_step_gen1)
loss_g_gen_final_func1 = U.function([ob_gen1['adj'], ob_gen1['node']], loss_g_final_gen1)
# ----------------------------------------
lossandgrad_d_step2 = U.function([ob_real2['adj'], ob_real2['node'], ob_gen2['adj'], ob_gen2['node']], [loss_d_step2, U.flatgrad(loss_d_step2, var_list_d_step)])
lossandgrad_d_final2 = U.function([ob_real2['adj'], ob_real2['node'], ob_gen2['adj'], ob_gen2['node']], [loss_d_final2, U.flatgrad(loss_d_final2, var_list_d_final)])
loss_g_gen_step_func2 = U.function([ob_gen2['adj'], ob_gen2['node']], loss_g_step_gen2)
loss_g_gen_final_func2 = U.function([ob_gen2['adj'], ob_gen2['node']], loss_g_final_gen2)
# ----------------------------------------
# ----------------------------------------
adam_pi1 = MpiAdam(var_list_pi1, epsilon=adam_epsilon)
adam_pi_stop1 = MpiAdam(var_list_pi_stop1, epsilon=adam_epsilon)
adam_pi2 = MpiAdam(var_list_pi2, epsilon=adam_epsilon)
adam_pi_stop2 = MpiAdam(var_list_pi_stop2, epsilon=adam_epsilon)
# ----------------------------------------
# ----------------------------------------
adam_d_step = MpiAdam(var_list_d_step, epsilon=adam_epsilon)
adam_d_final = MpiAdam(var_list_d_final, epsilon=adam_epsilon)
# ----------------------------------------
# ----------------------------------------
assign_old_eq_new1 = U.function([],[], updates=[tf.assign(oldv, newv) for (oldv, newv) in zipsame(oldpi1.get_variables(), pi1.get_variables())])
#
# compute_losses_expert = U.function([ob['adj'], ob['node'], ac, pi.ac_real],
# loss_expert)
compute_losses1 = U.function([ob1['adj'], ob1['node'], ac1, pi1.ac_real, oldpi1.ac_real, atarg1, ret1, lrmult1,disease], losses1)
# ----------------------------------------
assign_old_eq_new2 = U.function([],[], updates=[tf.assign(oldv, newv) for (oldv, newv) in zipsame(oldpi2.get_variables(), pi2.get_variables())])
#
# compute_losses_expert = U.function([ob['adj'], ob['node'], ac, pi.ac_real],
# loss_expert)
compute_losses2 = U.function([ob2['adj'], ob2['node'], ac2, pi2.ac_real, oldpi2.ac_real, atarg2, ret2, lrmult2,disease], losses2)
# ----------------------------------------
# ----------------------------------------
# Prepare for rollouts
# ----------------------------------------
episodes_so_far = 0
timesteps_so_far = 0
iters_so_far = 0
tstart = time.time()
# ----------------------------------------
# ----------------------------------------
lenbuffer1 = deque(maxlen=100) # rolling buffer for episode lengths
lenbuffer_valid1 = deque(maxlen=100) # rolling buffer for episode lengths
rewbuffer1 = deque(maxlen=100) # rolling buffer for episode rewards
rewbuffer_env1 = deque(maxlen=100) # rolling buffer for episode rewards
rewbuffer_d_step1 = deque(maxlen=100) # rolling buffer for episode rewards
rewbuffer_d_final1 = deque(maxlen=100) # rolling buffer for episode rewards
rewbuffer_final1 = deque(maxlen=100) # rolling buffer for episode rewards
rewbuffer_final_stat1 = deque(maxlen=100) # rolling buffer for episode rewardsn
# ----------------------------------------
lenbuffer2 = deque(maxlen=100) # rolling buffer for episode lengths
lenbuffer_valid2 = deque(maxlen=100) # rolling buffer for episode lengths
rewbuffer2 = deque(maxlen=100) # rolling buffer for episode rewards
rewbuffer_env2 = deque(maxlen=100) # rolling buffer for episode rewards
rewbuffer_d_step2 = deque(maxlen=100) # rolling buffer for episode rewards
rewbuffer_d_final2 = deque(maxlen=100) # rolling buffer for episode rewards
rewbuffer_final2 = deque(maxlen=100) # rolling buffer for episode rewards
rewbuffer_final_stat2 = deque(maxlen=100) # rolling buffer for episode rewardsn
# ----------------------------------------
# ----------------------------------------
classifier_buffer = deque(maxlen=1)
classifier_adverse_buffer = deque(maxlen=1)
classifier_binding1_buffer = deque(maxlen=1)
classifier_binding2_buffer = deque(maxlen=1)
# ----------------------------------------
# ----------------------------------------
#disease_count = 0
#disease_list = list(range(num_disease))
#random.shuffle(disease_list)
seg_gen1 = traj_segment_generator(args, pi1, env1,disease_id, timesteps_per_actorbatch, True, loss_g_gen_step_func1, loss_g_gen_final_func1, nepis,'1')
seg_gen2 = traj_segment_generator(args, pi2, env2,disease_id, timesteps_per_actorbatch, True, loss_g_gen_step_func2, loss_g_gen_final_func2, nepis,'2')
U.initialize()
#saver_classifier = tf.train.Saver(var_list_classifier)
#saver_classifier.restore(tf.get_default_session(), "/scratch/user/mostafa_karimi/rlproj/checkpoint_classifier/classifier_iter_99450")
class_model1.load_weights('./ckpt/weights.best.hdf5')
class_model2.load_weights('./ckpt/weights.best.hdf5')
assert sum([max_iters>0, max_timesteps>0, max_episodes>0, max_seconds>0])==1, "Only one time constraint permitted"
if args.load==1:
try:
fname1 = './ckpt/' + args.name_full_load1
fname2 = './ckpt/' + args.name_full_load2
sess = tf.get_default_session()
# sess.run(tf.global_variables_initializer())
saver1 = tf.train.Saver(var_list_pi1)
saver2 = tf.train.Saver(var_list_pi2)
saver1.restore(sess, fname1)
saver2.restore(sess, fname2)
iters_so_far = int(fname1.split('_')[-2])+1
print('model restored!', fname1, 'iters_so_far:', iters_so_far,flush=True)
print('model restored!', fname2, 'iters_so_far:', iters_so_far,flush=True)
except:
print(fname,'ckpt not found, start with iters 0')
adam_pi1.sync()
adam_pi_stop1.sync()
adam_pi2.sync()
adam_pi_stop2.sync()
adam_d_step.sync()
adam_d_final.sync()
counter = 0
level = 0
## start training
while True:
#if disease_count == len(disease_list):
# disease_count = 0
# random.shuffle(disease_list)
if callback: callback(locals(), globals())
if max_timesteps and timesteps_so_far >= max_timesteps:
break
elif max_episodes and episodes_so_far >= max_episodes:
break
elif max_iters and iters_so_far >= max_iters:
break
elif max_seconds and time.time() - tstart >= max_seconds:
break
if schedule == 'constant':
cur_lrmult = 1.0
elif schedule == 'linear':
cur_lrmult = max(1.0 - float(timesteps_so_far) / max_timesteps, 0)
else:
raise NotImplementedError
# logger.log("********** Iteration %i ************"%iters_so_far)
seg_gen1.__next__()
seg1 = seg_gen1.send(disease_id)
seg_gen2.__next__()
seg2 = seg_gen2.send(disease_id)
add_vtarg_and_adv(args,seg1, seg2, gamma, lam, loss_class_func1,loss_class_func2)
print("iter: ",iters_so_far,flush=True)
# ob, ac, atarg, ret, td1ret = map(np.concatenate, (obs, acs, atargs, rets, td1rets))
ob_adj1, ob_node1, ac1, atarg1, tdlamret1 = seg1["ob_adj"], seg1["ob_node"], seg1["ac"], seg1["adv"], seg1["tdlamret"]
vpredbefore1 = seg1["vpred"] # predicted value function before udpate
atarg1 = (atarg1 - atarg1.mean()) / atarg1.std() # standardized advantage function estimate
d1 = Dataset(dict(ob_adj=ob_adj1, ob_node=ob_node1, ac=ac1, atarg=atarg1, vtarg=tdlamret1), shuffle=not pi1.recurrent)
optim_batchsize1 = optim_batchsize or ob_adj1.shape[0]
ob_adj2, ob_node2, ac2, atarg2, tdlamret2 = seg2["ob_adj"], seg2["ob_node"], seg2["ac"], seg2["adv"], seg2["tdlamret"]
vpredbefore2 = seg2["vpred"] # predicted value function before udpate
atarg2 = (atarg2 - atarg2.mean()) / atarg2.std() # standardized advantage function estimate
d2 = Dataset(dict(ob_adj=ob_adj2, ob_node=ob_node2, ac=ac2, atarg=atarg2, vtarg=tdlamret2), shuffle=not pi2.recurrent)
optim_batchsize2 = optim_batchsize or ob_adj2.shape[0]
# inner training loop, train policy
for i_optim in range(optim_epochs):
loss_expert1=0
loss_expert_stop1=0
g_expert1=0
g_expert_stop1=0
loss_expert2=0
loss_expert_stop2=0
g_expert2=0
g_expert_stop2=0
loss_d_step1 = 0
loss_d_final1 = 0
g_ppo1 = 0
g_d_step1 = 0
g_d_final1 = 0
loss_d_step2 = 0
loss_d_final2 = 0
g_ppo2 = 0
g_d_step2 = 0
g_d_final2 = 0
_, _,disease_feat = env1.get_expert(optim_batchsize1,disease_id)
pretrain_shift = 5
## Expert
if iters_so_far>=args.expert_start and iters_so_far<=args.expert_end+pretrain_shift:
ob_expert1, ac_expert1,_ = env1.get_expert(optim_batchsize1,disease_id)
loss_expert1, g_expert1 = lossandgrad_expert1(ob_expert1['adj'], ob_expert1['node'], ac_expert1, ac_expert1,disease_feat)
loss_expert1 = np.mean(loss_expert1)
ob_expert2, ac_expert2,_ = env2.get_expert(optim_batchsize2,disease_id)
loss_expert2, g_expert2 = lossandgrad_expert2(ob_expert2['adj'], ob_expert2['node'], ac_expert2, ac_expert2,disease_feat)
loss_expert2 = np.mean(loss_expert2)
## PPO
if iters_so_far>=args.rl_start and iters_so_far<=args.rl_end:
assign_old_eq_new1() # set old parameter values to new parameter values
batch1 = d1.next_batch(optim_batchsize1)
assign_old_eq_new2() # set old parameter values to new parameter values
batch2 = d2.next_batch(optim_batchsize2)
# ppo
# if args.has_ppo==1:
if iters_so_far >= args.rl_start+pretrain_shift: # start generator after discriminator trained a well..
*newlosses1, g_ppo1 = lossandgrad_ppo1(batch1["ob_adj"], batch1["ob_node"], batch1["ac"], batch1["ac"], batch1["ac"], batch1["atarg"], batch1["vtarg"], cur_lrmult,disease_feat)
losses_ppo1=newlosses1
*newlosses2, g_ppo2 = lossandgrad_ppo2(batch2["ob_adj"], batch2["ob_node"], batch2["ac"], batch2["ac"], batch2["ac"], batch2["atarg"], batch2["vtarg"], cur_lrmult,disease_feat)
losses_ppo2=newlosses2
if args.has_d_step==1 and i_optim>=optim_epochs//2:
# update step discriminator
ob_expert1, _,_ = env1.get_expert(optim_batchsize1,disease_id,curriculum=args.curriculum,level_total=args.curriculum_num,level=level)
loss_d_step1, g_d_step1 = lossandgrad_d_step1(ob_expert1["adj"], ob_expert1["node"], batch1["ob_adj"], batch1["ob_node"])
adam_d_step.update(g_d_step1, optim_stepsize * cur_lrmult)
loss_d_step1 = np.mean(loss_d_step1)
ob_expert2, _,_ = env2.get_expert(optim_batchsize2,disease_id,curriculum=args.curriculum,level_total=args.curriculum_num,level=level)
loss_d_step2, g_d_step2 = lossandgrad_d_step2(ob_expert2["adj"], ob_expert2["node"], batch2["ob_adj"], batch2["ob_node"])
adam_d_step.update(g_d_step2, optim_stepsize * cur_lrmult)
loss_d_step2 = np.mean(loss_d_step2)
if args.has_d_final==1 and i_optim>=optim_epochs//4*3:
# update final discriminator
ob_expert1, _ ,_ = env1.get_expert(optim_batchsize1, disease_id,is_final=True,
curriculum=args.curriculum,level_total=args.curriculum_num, level=level)
seg_final_adj1, seg_final_node1 = traj_final_generator(pi1, copy.deepcopy(env1),disease_id, optim_batchsize1, True)
# update final discriminator
loss_d_final1, g_d_final1 = lossandgrad_d_final1(ob_expert1["adj"], ob_expert1["node"], seg_final_adj1, seg_final_node1)
# loss_d_final, g_d_final = lossandgrad_d_final(ob_expert["adj"], ob_expert["node"], ob_adjs, ob_nodes)
adam_d_final.update(g_d_final1, optim_stepsize * cur_lrmult)
# logger.log(fmt_row(13, np.mean(losses, axis=0)))
ob_expert2, _,_ = env2.get_expert(optim_batchsize2, disease_id,is_final=True,
curriculum=args.curriculum,level_total=args.curriculum_num, level=level)
seg_final_adj2, seg_final_node2 = traj_final_generator(pi2, copy.deepcopy(env2),disease_id, optim_batchsize2, True)
# update final discriminator
loss_d_final2, g_d_final2 = lossandgrad_d_final2(ob_expert2["adj"], ob_expert2["node"], seg_final_adj2, seg_final_node2)
# loss_d_final, g_d_final = lossandgrad_d_final(ob_expert["adj"], ob_expert["node"], ob_adjs, ob_nodes)
adam_d_final.update(g_d_final2, optim_stepsize * cur_lrmult)
#print("gradient1 PPO: "+str(0.2*g_ppo1)+ " Expert: "+str(0.05*g_expert1))
#print("gradient2 PPO: "+str(0.2*g_ppo2)+ " Expert: "+str(0.05*g_expert2))
#print("step size: "+ str(optim_stepsize)+" and "+str(cur_lrmult))
adam_pi1.update(0.2*g_ppo1+0.05*g_expert1, optim_stepsize * cur_lrmult)
adam_pi2.update(0.2*g_ppo2+0.05*g_expert2, optim_stepsize * cur_lrmult)
losses1 = []
for batch1 in d1.iterate_once(optim_batchsize1):
newlosses1 = compute_losses1(batch1["ob_adj"], batch1["ob_node"], batch1["ac"], batch1["ac"], batch1["ac"], batch1["atarg"], batch1["vtarg"], cur_lrmult,disease_feat)
losses1.append(newlosses1)
meanlosses1,_,_ = mpi_moments(losses1, axis=0)
losses2 = []
for batch2 in d2.iterate_once(optim_batchsize2):
newlosses2 = compute_losses2(batch2["ob_adj"], batch2["ob_node"], batch2["ac"], batch2["ac"], batch2["ac"], batch2["atarg"], batch2["vtarg"], cur_lrmult,disease_feat)
losses2.append(newlosses2)
meanlosses2,_,_ = mpi_moments(losses2, axis=0)
if writer is not None:
writer.add_scalar("loss_expert1", loss_expert1, iters_so_far)
writer.add_scalar("loss_expert_stop1", loss_expert_stop1, iters_so_far)
writer.add_scalar("loss_d_step1", loss_d_step1, iters_so_far)
writer.add_scalar("loss_d_final1", loss_d_final1, iters_so_far)
writer.add_scalar('grad_expert_min1', np.amin(g_expert1), iters_so_far)
writer.add_scalar('grad_expert_max1', np.amax(g_expert1), iters_so_far)
writer.add_scalar('grad_expert_norm1', np.linalg.norm(g_expert1), iters_so_far)
writer.add_scalar('grad_expert_stop_min1', np.amin(g_expert_stop1), iters_so_far)
writer.add_scalar('grad_expert_stop_max1', np.amax(g_expert_stop1), iters_so_far)
writer.add_scalar('grad_expert_stop_norm1', np.linalg.norm(g_expert_stop1), iters_so_far)
writer.add_scalar('grad_rl_min1', np.amin(g_ppo1), iters_so_far)
writer.add_scalar('grad_rl_max1', np.amax(g_ppo1), iters_so_far)
writer.add_scalar('grad_rl_norm1', np.linalg.norm(g_ppo1), iters_so_far)
writer.add_scalar('g_d_step_min1', np.amin(g_d_step1), iters_so_far)
writer.add_scalar('g_d_step_max1', np.amax(g_d_step1), iters_so_far)
writer.add_scalar('g_d_step_norm1', np.linalg.norm(g_d_step1), iters_so_far)
writer.add_scalar('g_d_final_min1', np.amin(g_d_final1), iters_so_far)
writer.add_scalar('g_d_final_max1', | np.amax(g_d_final1) | numpy.amax |
import numpy as np
from pathlib import Path
from scipy.ndimage import fourier_shift
from skimage._shared import testing
from skimage._shared.testing import assert_equal
from skimage.data import camera
from skimage.feature.register_translation import register_translation
from skimage.feature.masked_register_translation import (
masked_register_translation, cross_correlate_masked)
from skimage.io import imread
# Location of test images
# These images are taken from Dirk Padfields' MATLAB package
# available on his website: www.dirkpadfield.com
IMAGES_DIR = Path(__file__).parent / 'data'
def test_masked_registration_vs_register_translation():
"""masked_register_translation should give the same results as
register_translation in the case of trivial masks."""
reference_image = camera()
shift = (-7, 12)
shifted = np.real(np.fft.ifft2(fourier_shift(
np.fft.fft2(reference_image), shift)))
trivial_mask = np.ones_like(reference_image)
nonmasked_result, *_ = register_translation(reference_image, shifted)
masked_result = masked_register_translation(
reference_image, shifted, trivial_mask, overlap_ratio=1 / 10)
assert_equal(nonmasked_result, masked_result)
def test_masked_registration_random_masks():
"""masked_register_translation should be able to register translations
between images even with random masks."""
# See random number generator for reproducible results
np.random.seed(23)
reference_image = camera()
shift = (-7, 12)
shifted = np.real(np.fft.ifft2(fourier_shift(
| np.fft.fft2(reference_image) | numpy.fft.fft2 |
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 05 14:05:24 2013
Aug 15 2020: add brunnermunzel, rank_compare_2indep
Author: <NAME>
"""
from statsmodels.compat.python import lzip
import numpy as np
from numpy.testing import (assert_allclose, assert_almost_equal,
assert_approx_equal, assert_)
from scipy import stats
import pytest
from statsmodels.stats.contingency_tables import (
mcnemar, cochrans_q, SquareTable)
from statsmodels.sandbox.stats.runs import (Runs,
runstest_1samp, runstest_2samp)
from statsmodels.sandbox.stats.runs import mcnemar as sbmcnemar
from statsmodels.stats.nonparametric import (
rank_compare_2indep, rank_compare_2ordinal, prob_larger_continuous,
cohensd2problarger)
from statsmodels.tools.testing import Holder
def _expand_table(table):
'''expand a 2 by 2 contingency table to observations
'''
return np.repeat([[1, 1], [1, 0], [0, 1], [0, 0]], table.ravel(), axis=0)
def test_mcnemar_exact():
f_obs1 = np.array([[101, 121], [59, 33]])
f_obs2 = np.array([[101, 70], [59, 33]])
f_obs3 = np.array([[101, 80], [59, 33]])
f_obs4 = np.array([[101, 30], [60, 33]])
f_obs5 = np.array([[101, 10], [30, 33]])
f_obs6 = np.array([[101, 10], [10, 33]])
#vassar college online computation
res1 = 0.000004
res2 = 0.378688
res3 = 0.089452
res4 = 0.00206
res5 = 0.002221
res6 = 1.
stat = mcnemar(f_obs1, exact=True)
assert_almost_equal([stat.statistic, stat.pvalue], [59, res1], decimal=6)
stat = mcnemar(f_obs2, exact=True)
assert_almost_equal([stat.statistic, stat.pvalue], [59, res2], decimal=6)
stat = mcnemar(f_obs3, exact=True)
assert_almost_equal([stat.statistic, stat.pvalue], [59, res3], decimal=6)
stat = mcnemar(f_obs4, exact=True)
assert_almost_equal([stat.statistic, stat.pvalue], [30, res4], decimal=6)
stat = mcnemar(f_obs5, exact=True)
assert_almost_equal([stat.statistic, stat.pvalue], [10, res5], decimal=6)
stat = mcnemar(f_obs6, exact=True)
assert_almost_equal([stat.statistic, stat.pvalue], [10, res6], decimal=6)
def test_mcnemar_chisquare():
f_obs1 = np.array([[101, 121], [59, 33]])
f_obs2 = np.array([[101, 70], [59, 33]])
f_obs3 = np.array([[101, 80], [59, 33]])
#> mcn = mcnemar.test(matrix(c(101, 121, 59, 33),nrow=2))
res1 = [2.067222e01, 5.450095e-06]
res2 = [0.7751938, 0.3786151]
res3 = [2.87769784, 0.08981434]
stat = mcnemar(f_obs1, exact=False)
assert_allclose([stat.statistic, stat.pvalue], res1, rtol=1e-6)
stat = mcnemar(f_obs2, exact=False)
assert_allclose([stat.statistic, stat.pvalue], res2, rtol=1e-6)
stat = mcnemar(f_obs3, exact=False)
assert_allclose([stat.statistic, stat.pvalue], res3, rtol=1e-6)
# test correction = False
res1 = [2.135556e01, 3.815136e-06]
res2 = [0.9379845, 0.3327967]
res3 = [3.17266187, 0.07488031]
res = mcnemar(f_obs1, exact=False, correction=False)
assert_allclose([res.statistic, res.pvalue], res1, rtol=1e-6)
res = mcnemar(f_obs2, exact=False, correction=False)
assert_allclose([res.statistic, res.pvalue], res2, rtol=1e-6)
res = mcnemar(f_obs3, exact=False, correction=False)
assert_allclose([res.statistic, res.pvalue], res3, rtol=1e-6)
def test_mcnemar_vectorized(reset_randomstate):
ttk = np.random.randint(5,15, size=(2,2,3))
with pytest.deprecated_call():
res = sbmcnemar(ttk, exact=False)
with pytest.deprecated_call():
res1 = lzip(*[sbmcnemar(ttk[:, :, i], exact=False) for i in range(3)])
assert_allclose(res, res1, rtol=1e-13)
with pytest.deprecated_call():
res = sbmcnemar(ttk, exact=False, correction=False)
with pytest.deprecated_call():
res1 = lzip(*[sbmcnemar(ttk[:, :, i], exact=False, correction=False)
for i in range(3)])
| assert_allclose(res, res1, rtol=1e-13) | numpy.testing.assert_allclose |
import numpy as np
from river import base
from river.utils import dict2numpy
from .base_neighbors import BaseNeighbors
class KNNRegressor(BaseNeighbors, base.Regressor):
"""k-Nearest Neighbors regressor.
This non-parametric regression method keeps track of the last
`window_size` training samples. Predictions are obtained by
aggregating the values of the closest n_neighbors stored-samples with
respect to a query sample.
Parameters
----------
n_neighbors
The number of nearest neighbors to search for.
window_size
The maximum size of the window storing the last observed samples.
leaf_size
scipy.spatial.cKDTree parameter. The maximum number of samples that can
be stored in one leaf node, which determines from which point the algorithm
will switch for a brute-force approach. The bigger this number the faster
the tree construction time, but the slower the query time will be.
p
p-norm value for the Minkowski metric. When `p=1`, this corresponds to the
Manhattan distance, while `p=2` corresponds to the Euclidean distance.
Valid values are in the interval $[1, +\\infty)$
aggregation_method
The method to aggregate the target values of neighbors.
| 'mean'
| 'median'
| 'weighted_mean'
kwargs
Other parameters passed to scipy.spatial.cKDTree.
Notes
-----
This estimator is not optimal for a mixture of categorical and numerical
features. This implementation treats all features from a given stream as
numerical.
Examples
--------
>>> from river import datasets
>>> from river import evaluate
>>> from river import metrics
>>> from river import neighbors
>>> from river import preprocessing
>>> dataset = datasets.TrumpApproval()
>>> model = (
... preprocessing.StandardScaler() |
... neighbors.KNNRegressor(window_size=50)
... )
>>> metric = metrics.MAE()
>>> evaluate.progressive_val_score(dataset, model, metric)
MAE: 0.441308
"""
_MEAN = "mean"
_MEDIAN = "median"
_WEIGHTED_MEAN = "weighted_mean"
def __init__(
self,
n_neighbors: int = 5,
window_size: int = 1000,
leaf_size: int = 30,
p: float = 2,
aggregation_method: str = "mean",
**kwargs
):
super().__init__(
n_neighbors=n_neighbors, window_size=window_size, leaf_size=leaf_size, p=p, **kwargs
)
if aggregation_method not in {self._MEAN, self._MEDIAN, self._WEIGHTED_MEAN}:
raise ValueError(
"Invalid aggregation_method: {}.\n"
"Valid options are: {}".format(
aggregation_method, {self._MEAN, self._MEDIAN, self._WEIGHTED_MEAN}
)
)
self.aggregation_method = aggregation_method
self.kwargs = kwargs
def _unit_test_skips(self):
return {"check_emerging_features", "check_disappearing_features"}
def learn_one(self, x, y):
"""Update the model with a set of features `x` and a real target value `y`.
Parameters
----------
x
A dictionary of features.
y
A numeric target.
Returns
-------
self
Notes
-----
For the K-Nearest Neighbors regressor, fitting the model is the
equivalent of inserting the newer samples in the observed window,
and if the `window_size` is reached, removing older results.
"""
x_arr = dict2numpy(x)
self.data_window.append(x_arr, y)
return self
def predict_one(self, x):
"""Predict the target value of a set of features `x`.
Search the KDTree for the `n_neighbors` nearest neighbors.
Parameters
----------
x
A dictionary of features.
Returns
-------
The prediction.
"""
if self.data_window.size == 0:
# Not enough information available, return default prediction
return 0.0
x_arr = dict2numpy(x)
dists, neighbor_idx = self._get_neighbors(x_arr)
target_buffer = self.data_window.targets_buffer
# If the closest neighbor has a distance of 0, then return it's output
if dists[0][0] == 0:
return target_buffer[neighbor_idx[0][0]]
if self.data_window.size < self.n_neighbors: # Select only the valid neighbors
neighbor_vals = [
target_buffer[index]
for cnt, index in enumerate(neighbor_idx[0])
if cnt < self.data_window.size
]
dists = [dist for cnt, dist in enumerate(dists[0]) if cnt < self.data_window.size]
else:
neighbor_vals = [target_buffer[index] for index in neighbor_idx[0]]
dists = dists[0]
if self.aggregation_method == self._MEAN:
return np.mean(neighbor_vals)
elif self.aggregation_method == self._MEDIAN:
return | np.median(neighbor_vals) | numpy.median |
import HaPPPy
import unittest
import numpy as np
import scipy.integrate
import math
from math import sqrt
from scipy import constants
hbar = constants.hbar
e = constants.e
m = 3
intersection = 0
sigma = 1
n_i = 200
l_i = 50
target_n = 1000
target_l = 200
step_n = 200
step_l = 50
unit_gauss = 1
omega = 1.875537349*(10**13)
class OneBodyTestSuite(unittest.TestCase):
"""A test class to the OneBody module.
First of all, i would like to test the import and if the constants are choosen.
These are the Main target of my first few tests.
"""
def test_input(self):
""" This test should confirm, that the ammount of gridpoints are positive."""
self.assertTrue((n_i > 0),msg ="You have to choose a positive ammount of gridpoints.")
def test_OneBody_exists(self):
""" This test, checks wether the One Body module exists.
"""
self.assertTrue(hasattr(HaPPPy, 'OneBody'),msg ="One Body Module doesn't exist")
def test_ammount_cutted_eigenvalues(self):
""" This confirm that the user didn't want to get more cutted eigenvalues then eigenvalues exists."""
self.assertTrue((m <= n_i),msg =" You want to get more cutted eigenvalues, then eigenvalues.")
""" Now the test start to check the kinetic matrices from the chosen potential."""
def test_OneBody_Harmonic_kinetic(self):
""" This test check the kinetic Matrix in the harmonic potential by building diagonalarrays."""
for n in range (n_i,target_n,step_n):
for l in range (l_i,target_l,step_l):
OBSolver = HaPPPy.OneBody.OneBodySolver(l,n,m)
_,_,_,kin_mat,_,_,_,_ = OBSolver.calcualteHarmonicPotential(intersection)
diagonalarray_harm_kin_main = np.diagonal(kin_mat, 0)
""" all elements in the diagonalarray of the main "diagonale" should be positive."""
self.assertTrue( | np.all(diagonalarray_harm_kin_main > 0) | numpy.all |
"""Defines a polyhedron."""
import numpy as np
import rowan
from scipy.sparse.csgraph import connected_components
from .base_classes import Shape3D
from .convex_polygon import ConvexPolygon, _is_convex
from .polygon import Polygon, _is_simple
from .sphere import Sphere
from .utils import _generate_ax, _set_3d_axes_equal, translate_inertia_tensor
try:
import miniball
MINIBALL = True
except ImportError:
MINIBALL = False
def _face_to_edges(face, reverse=False):
"""Convert a face into a sequence of edges (tuples).
Args:
face (array-like):
A face composed of vertex indices.
reverse (bool):
Whether to return the edges in reverse.
Returns:
list[tuple[int, int]]:
A list of edges where each is a tuple of a pair of vertices.
"""
shift = 1 if reverse else -1
return list(zip(*np.stack((face, np.roll(face, shift)))))
class Polyhedron(Shape3D):
"""A three-dimensional polytope.
A polyhedron is defined by a set of vertices and a set of faces
composed of the vertices. On construction, the faces are reordered
counterclockwise with respect to an outward normal. The polyhedron
provides various standard geometric calculations, such as volume and
surface area. Most features of the polyhedron can be accessed via
properties, including the plane equations defining the faces and the
neighbors of each face.
.. note::
For the purposes of calculations like moments of inertia, the
polyhedron is assumed to be of constant, unit density.
Args:
vertices (:math:`(N, 3)` :class:`numpy.ndarray`):
The vertices of the polyhedron.
faces (list(list)):
The faces of the polyhedron.
faces_are_convex (bool, optional):
Whether or not the faces of the polyhedron are all convex.
This is used to determine whether certain operations like
coplanar face merging are allowed (Default value: False).
Example:
>>> cube = coxeter.shapes.ConvexPolyhedron(
... [[1, 1, 1], [1, -1, 1], [1, 1, -1], [1, -1, -1],
... [-1, 1, 1], [-1, -1, 1], [-1, 1, -1], [-1, -1, -1]])
>>> cube = coxeter.shapes.Polyhedron(
... vertices=cube.vertices, faces=cube.faces)
>>> bounding_sphere = cube.bounding_sphere
>>> import numpy as np
>>> assert np.isclose(bounding_sphere.radius, np.sqrt(3))
>>> cube.center
array([0., 0., 0.])
>>> cube.circumsphere
<coxeter.shapes.sphere.Sphere object at 0x...>
>>> cube.faces
[array([4, 5, 1, 0], dtype=int32), array([0, 2, 6, 4], dtype=int32),
array([6, 7, 5, 4], dtype=int32), array([0, 1, 3, 2], dtype=int32),
array([5, 7, 3, 1], dtype=int32), array([2, 3, 7, 6], dtype=int32)]
>>> cube.gsd_shape_spec
{'type': 'Mesh', 'vertices': [[1.0, 1.0, 1.0], [1.0, -1.0, 1.0],
[1.0, 1.0, -1.0], [1.0, -1.0, -1.0], [-1.0, 1.0, 1.0],
[-1.0, -1.0, 1.0], [-1.0, 1.0, -1.0], [-1.0, -1.0, -1.0]], 'faces':
[array([4, 5, 1, 0], dtype=int32), array([0, 2, 6, 4], dtype=int32),
array([6, 7, 5, 4], dtype=int32), array([0, 1, 3, 2], dtype=int32),
array([5, 7, 3, 1], dtype=int32), array([2, 3, 7, 6], dtype=int32)]}
>>> assert np.allclose(
... cube.inertia_tensor,
... np.diag([16. / 3., 16. / 3., 16. / 3.]))
>>> assert np.isclose(cube.iq, np.pi / 6.)
>>> cube.neighbors
[array([1, 2, 3, 4]), array([0, 2, 3, 5]), array([0, 1, 4, 5]),
array([0, 1, 4, 5]), array([0, 2, 3, 5]), array([1, 2, 3, 4])]
>>> cube.normals
array([[ 0., 0., 1.],
[ 0., 1., -0.],
[-1., 0., 0.],
[ 1., -0., 0.],
[ 0., -1., 0.],
[ 0., 0., -1.]])
>>> cube.num_faces
6
>>> cube.num_vertices
8
>>> assert np.isclose(cube.surface_area, 24.0)
>>> cube.vertices
array([[ 1., 1., 1.],
[ 1., -1., 1.],
[ 1., 1., -1.],
[ 1., -1., -1.],
[-1., 1., 1.],
[-1., -1., 1.],
[-1., 1., -1.],
[-1., -1., -1.]])
>>> assert np.isclose(cube.volume, 8.0)
"""
def __init__(self, vertices, faces, faces_are_convex=None):
self._vertices = np.array(vertices, dtype=np.float64)
self._faces = [face for face in faces]
if faces_are_convex is None:
faces_are_convex = all(len(face) == 3 for face in faces)
self._faces_are_convex = faces_are_convex
self._find_equations()
self._find_neighbors()
def _find_equations(self):
"""Find the plane equations of the polyhedron faces."""
self._equations = np.empty((len(self.faces), 4))
for i, face in enumerate(self.faces):
# The direction of the normal is selected such that vertices that
# are already ordered counterclockwise will point outward.
normal = np.cross(
self.vertices[face[2]] - self.vertices[face[1]],
self.vertices[face[0]] - self.vertices[face[1]],
)
normal /= np.linalg.norm(normal)
self._equations[i, :3] = normal
# Sign conventions chosen to match scipy.spatial.ConvexHull
# We use ax + by + cz + d = 0 (not ax + by + cz = d)
self._equations[i, 3] = -normal.dot(self.vertices[face[0]])
def _find_neighbors(self):
"""Find neighbors of faces."""
self._neighbors = [[] for _ in range(self.num_faces)]
for i, j, _ in self._get_face_intersections():
self._neighbors[i].append(j)
self._neighbors[j].append(i)
self._neighbors = [np.array(neigh) for neigh in self._neighbors]
def _get_face_intersections(self):
"""Get pairs of faces and their common edges.
This function yields a generator of tuples of the form (face, neighbor,
(vertex1, vertex2)) indicating neighboring faces and their common
edge.
"""
# First enumerate all edges of each neighbor. We include both
# directions of the edges for comparison.
face_edges = [
set(_face_to_edges(f) + _face_to_edges(f, True)) for f in self.faces
]
for i in range(self.num_faces):
for j in range(i + 1, self.num_faces):
common_edges = face_edges[i].intersection(face_edges[j])
if len(common_edges) > 0:
# Can never have multiple intersections, but we should have
# the same edge show up twice (forward and reverse).
assert len(common_edges) == 2
common_edge = list(common_edges)[0]
yield (i, j, (common_edge[0], common_edge[1]))
@property
def gsd_shape_spec(self):
"""dict: Get a :ref:`complete GSD specification <shapes>`.""" # noqa: D401
return {
"type": "Mesh",
"vertices": self._vertices.tolist(),
"faces": self._faces,
}
def merge_faces(self, atol=1e-8, rtol=1e-5):
"""Merge coplanar faces to a given tolerance.
Whether or not faces should be merged is determined using
:func:`numpy.allclose` to compare the plane equations of neighboring
faces. Connected components of mergeable faces are then merged into
a single face. This method can be safely called many times with
different tolerances, however, the operation is destructive in the
sense that merged faces cannot be recovered. Users wishing to undo a
merge to attempt a less expansive merge must build a new polyhedron.
Args:
atol (float):
Absolute tolerance for :func:`numpy.allclose`.
rtol (float):
Relative tolerance for :func:`numpy.allclose`.
"""
if not self._faces_are_convex:
# Can only sort faces if they are guaranteed to be convex.
raise ValueError(
"Faces cannot be merged unless they are convex because the "
"correct ordering of vertices in a face cannot be determined "
"for nonconvex faces."
)
# Construct a graph where connectivity indicates merging, then identify
# connected components to merge.
merge_graph = | np.zeros((self.num_faces, self.num_faces)) | numpy.zeros |
import functools
import numpy as np
from scipy.ndimage import map_coordinates
def uv_meshgrid(w, h):
uv = np.stack(np.meshgrid(range(w), range(h)), axis=-1)
uv = uv.astype(np.float64)
uv[..., 0] = ((uv[..., 0] + 0.5) / w - 0.5) * 2 * np.pi
uv[..., 1] = ((uv[..., 1] + 0.5) / h - 0.5) * np.pi
return uv
@functools.lru_cache()
def _uv_tri(w, h):
uv = uv_meshgrid(w, h)
sin_u = np.sin(uv[..., 0])
cos_u = np.cos(uv[..., 0])
tan_v = np.tan(uv[..., 1])
return sin_u, cos_u, tan_v
def uv_tri(w, h):
sin_u, cos_u, tan_v = _uv_tri(w, h)
return sin_u.copy(), cos_u.copy(), tan_v.copy()
def coorx2u(x, w=1024):
return ((x + 0.5) / w - 0.5) * 2 * np.pi
def coory2v(y, h=512):
return ((y + 0.5) / h - 0.5) * np.pi
def u2coorx(u, w=1024):
return (u / (2 * np.pi) + 0.5) * w - 0.5
def v2coory(v, h=512):
return (v / np.pi + 0.5) * h - 0.5
def uv2xy(u, v, z=-50):
c = z / np.tan(v)
x = c * np.cos(u)
y = c * np.sin(u)
return x, y
def pano_connect_points(p1, p2, z=-50, w=1024, h=512):
if p1[0] == p2[0]:
return np.array([p1, p2], np.float32)
u1 = coorx2u(p1[0], w)
v1 = coory2v(p1[1], h)
u2 = coorx2u(p2[0], w)
v2 = coory2v(p2[1], h)
x1, y1 = uv2xy(u1, v1, z)
x2, y2 = uv2xy(u2, v2, z)
if abs(p1[0] - p2[0]) < w / 2:
pstart = np.ceil(min(p1[0], p2[0]))
pend = np.floor(max(p1[0], p2[0]))
else:
pstart = np.ceil(max(p1[0], p2[0]))
pend = np.floor(min(p1[0], p2[0]) + w)
coorxs = (np.arange(pstart, pend + 1) % w).astype(np.float64)
vx = x2 - x1
vy = y2 - y1
us = coorx2u(coorxs, w)
ps = (np.tan(us) * x1 - y1) / (vy - np.tan(us) * vx)
cs = np.sqrt((x1 + ps * vx) ** 2 + (y1 + ps * vy) ** 2)
vs = np.arctan2(z, cs)
coorys = v2coory(vs)
return np.stack([coorxs, coorys], axis=-1)
def pano_stretch(img, mask, corners, kx, ky, order=1):
'''
img: [H, W, C]
corners: [N, 2] in image coordinate (x, y) format
kx: Stretching along front-back direction
ky: Stretching along left-right direction
order: Interpolation order. 0 for nearest-neighbor. 1 for bilinear.
'''
# Process image
sin_u, cos_u, tan_v = uv_tri(img.shape[1], img.shape[0])
u0 = | np.arctan2(sin_u * kx / ky, cos_u) | numpy.arctan2 |
import os
from utils.file import load_from_json
from utils.model import load_lenet
import numpy as np
from utils.model import load_pool
from utils.metrics import error_rate, get_corrections
from models.athena import Ensemble, ENSEMBLE_STRATEGY
def generate_ae(model, data, labels, attack_configs, save=False, output_dir=None):
"""
Generate adversarial examples
:param model: WeakDefense. The targeted model.
:param data: array. The benign samples to generate adversarial for.
:param labels: array or list. The true labels.
:param attack_configs: dictionary. Attacks and corresponding settings.
:param save: boolean. True, if save the adversarial examples.
:param output_dir: str or path. Location to save the adversarial examples.
It cannot be None when save is True.
:return:
"""
img_rows, img_cols = data.shape[1], data.shape[2]
num_attacks = attack_configs.get("num_attacks")
data_loader = (data, labels)
if len(labels.shape) > 1:
labels = np.asarray([np.argmax(p) for p in labels])
# generate attacks one by one
for id in range(num_attacks):
key = "configs{}".format(id)
data_adv = generate(model=model,
data_loader=data_loader,
attack_args=attack_configs.get(key)
)
# predict the adversarial examples
predictions = model.predict(data_adv)
predictions = np.asarray([np.argmax(p) for p in predictions])
err = error_rate(y_pred=predictions, y_true=labels)
print(">>> error rate:", err)
# plotting some examplesjsm
num_plotting = min(data.shape[0], 2)
for i in range(num_plotting):
img = data_adv[i].reshape((img_rows, img_cols))
plt.imshow(img, cmap='gray')
title = '{}: {}->{}'.format(attack_configs.get(key).get("description"),
labels[i],
predictions[i]
)
plt.title(title)
plt.show()
plt.close()
# save the adversarial example
if save:
if output_dir is None:
raise ValueError("Cannot save images to a none path.")
# save with a random name
file = os.path.join(output_dir, "{}.npy".format(time.monotonic()))
print("Save the adversarial examples to file [{}].".format(file))
np.save(file, data_adv)
model_configs = load_from_json("./md.json")
data_configs = load_from_json("./dt.json")
attack_configs = load_from_json("./at.json")
# load the targeted model
model_file = os.path.join(model_configs.get("dir"), model_configs.get("um_file"))
target = load_lenet(file=model_file, wrap=True)
# load the benign samples
data_file = os.path.join(data_configs.get('dir'), data_configs.get('bs_file'))
data_bs = np.load(data_file)
# load the corresponding true labels
label_file = os.path.join(data_configs.get('dir'), data_configs.get('label_file'))
labels = np.load(label_file)
# generate adversarial examples for a small subset
data_bs = data_bs[:10]
labels = labels[:10]
generate_ae(model=target, data=data_bs, labels=labels, attack_configs=attack_configs)
# copied from tutorials/eval_model.py
def evaluate(trans_configs, model_configs,
data_configs, save=False, output_dir=None):
"""
Apply transformation(s) on images.
:param trans_configs: dictionary. The collection of the parameterized transformations to test.
in the form of
{ configsx: {
param: value,
}
}
The key of a configuration is 'configs'x, where 'x' is the id of corresponding weak defense.
:param model_configs: dictionary. Defines model related information.
Such as, location, the undefended model, the file format, etc.
:param data_configs: dictionary. Defines data related information.
Such as, location, the file for the true labels, the file for the benign samples,
the files for the adversarial examples, etc.
:param save: boolean. Save the transformed sample or not.
:param output_dir: path or str. The location to store the transformed samples.
It cannot be None when save is True.
:return:
"""
# Load the baseline defense (PGD-ADT model)
baseline = load_lenet(file=model_configs.get('jsma_trained'), trans_configs=None,
use_logits=False, wrap=False)
# get the undefended model (UM)
file = os.path.join(model_configs.get('dir'), model_configs.get('um_file'))
undefended = load_lenet(file=file,
trans_configs=trans_configs.get('configs0'),
wrap=True)
print(">>> um:", type(undefended))
# load weak defenses into a pool
pool, _ = load_pool(trans_configs=trans_configs,
model_configs=model_configs,
active_list=True,
wrap=True)
# create an AVEP ensemble from the WD pool
wds = list(pool.values())
print(">>> wds:", type(wds), type(wds[0]))
ensemble = Ensemble(classifiers=wds, strategy=ENSEMBLE_STRATEGY.AVEP.value)
# load the benign samples
bs_file = os.path.join(data_configs.get('dir'), data_configs.get('bs_file'))
x_bs = np.load(bs_file)
img_rows, img_cols = x_bs.shape[1], x_bs.shape[2]
# load the corresponding true labels
label_file = os.path.join(data_configs.get('dir'), data_configs.get('label_file'))
labels = np.load(label_file)
# get indices of benign samples that are correctly classified by the targeted model
print(">>> Evaluating UM on [{}], it may take a while...".format(bs_file))
pred_bs = undefended.predict(x_bs)
corrections = get_corrections(y_pred=pred_bs, y_true=labels)
# Evaluate AEs.
results = {}
ae_list = data_configs.get('ae_files')
ae_file = os.path.join(data_configs.get('dir'), ae_list[4])
x_adv = | np.load(ae_file) | numpy.load |
""" Base divergence estimators. """
from numpy import mean, log, absolute, sqrt, floor, sum, arange, vstack, \
dot, abs
from scipy.spatial.distance import cdist, pdist
from ite.cost.x_initialization import InitX, InitKnnK, InitKnnKiTi, \
InitKnnKAlpha, InitKnnKAlphaBeta, \
InitKernel, InitEtaKernel
from ite.cost.x_verification import VerEqualDSubspaces, \
VerEqualSampleNumbers, \
VerEvenSampleNumbers
from ite.shared import knn_distances, estimate_d_temp2, estimate_i_alpha,\
estimate_d_temp3, volume_of_the_unit_ball,\
estimate_d_temp1
class BDKL_KnnK(InitKnnK, VerEqualDSubspaces):
""" Kullback-Leibler divergence estimator using the kNN method (S={k}).
Initialization is inherited from 'InitKnnK', verification comes from
'VerEqualDSubspaces' (see 'ite.cost.x_initialization.py',
'ite.cost.x_verification.py').
Examples
--------
>>> import ite
>>> co1 = ite.cost.BDKL_KnnK()
>>> co2 = ite.cost.BDKL_KnnK(knn_method='cKDTree', k=5, eps=0.1)
>>> co3 = ite.cost.BDKL_KnnK(k=4)
"""
def estimation(self, y1, y2):
""" Estimate KL divergence.
Parameters
----------
y1 : (number of samples1, dimension)-ndarray
One row of y1 corresponds to one sample.
y2 : (number of samples2, dimension)-ndarray
One row of y2 corresponds to one sample.
Returns
-------
d : float
Estimated KL divergence.
References
----------
<NAME>. Estimation of Information Theoretic Measures
for Continuous Random Variables. Advances in Neural Information
Processing Systems (NIPS), pp. 1257-1264, 2008.
<NAME>, <NAME>, and <NAME>. A class of
Renyi information estimators for multidimensional densities.
Annals of Statistics, 36(5):2153-2182, 2008.
<NAME>, <NAME>, and <NAME>. Divergence
estimation for multidimensional densities via k-nearest-neighbor
distances. IEEE Transactions on Information Theory, 55:2392-2405,
2009.
Examples
--------
d = co.estimation(y1,y2)
"""
# verification:
self.verification_equal_d_subspaces(y1, y2)
# sizes:
num_of_samples1, dim = y1.shape
num_of_samples2 = y2.shape[0]
# computation:
distances_y1y1 = knn_distances(y1, y1, True, self.knn_method,
self.k, self.eps, 2)[0]
distances_y2y1 = knn_distances(y2, y1, False, self.knn_method,
self.k, self.eps, 2)[0]
d = dim * mean(log(distances_y2y1[:, -1] /
distances_y1y1[:, -1])) + \
log(num_of_samples2/(num_of_samples1-1))
return d
class BDEnergyDist(InitX, VerEqualDSubspaces):
""" Energy distance estimator using pairwise distances of the samples.
Initialization is inherited from 'InitX', verification comes from
'VerEqualDSubspaces' (see 'ite.cost.x_initialization.py',
'ite.cost.x_verification.py').
Examples
--------
>>> import ite
>>> co = ite.cost.BDEnergyDist()
"""
def estimation(self, y1, y2):
""" Estimate energy distance.
Parameters
----------
y1 : (number of samples1, dimension)-ndarray
One row of y1 corresponds to one sample.
y2 : (number of samples2, dimension)-ndarray
One row of y2 corresponds to one sample.
Returns
-------
d : float
Estimated energy distance.
References
----------
<NAME> and <NAME>. A new test for multivariate
normality. Journal of Multivariate Analysis, 93:58-80, 2005.
(metric space of negative type)
<NAME> and <NAME>. Testing for equal
distributions in high dimension. InterStat, 5, 2004. (R^d)
<NAME> and <NAME>. On a new multivariate
two-sample test. Journal of Multivariate Analysis, 88, 190-206,
2004. (R^d)
<NAME>. N-Distances and Their Applications. Charles
University, Prague, 2005. (N-distance)
<NAME> and <NAME> and <NAME>. A
characterization of distributions by mean values of statistics
and certain probabilistic metrics. Journal of Soviet
Mathematics, 1992 (N-distance, general case).
Examples
--------
d = co.estimation(y1, y2)
"""
# verification:
self.verification_equal_d_subspaces(y1, y2)
# Euclidean distances:
num_of_samples1, num_of_samples2 = y1.shape[0], y2.shape[0]
mean_dist_y1y1 = 2 * sum(pdist(y1)) / num_of_samples1**2
mean_dist_y2y2 = 2 * sum(pdist(y2)) / num_of_samples2**2
mean_dist_y1y2 = mean(cdist(y1, y2))
d = 2 * mean_dist_y1y2 - mean_dist_y1y1 - mean_dist_y2y2
return d
class BDBhattacharyya_KnnK(InitKnnK, VerEqualDSubspaces):
""" Bhattacharyya distance estimator using the kNN method (S={k}).
Partial initialization comes from 'InitKnnK', verification is
inherited from 'VerEqualDSubspaces' (see
'ite.cost.x_initialization.py', 'ite.cost.x_verification.py').
"""
def __init__(self, mult=True, knn_method='cKDTree', k=3, eps=0,
pxdx=True):
""" Initialize the estimator.
Parameters
----------
mult : bool, optional
'True': multiplicative constant relevant (needed) in the
estimation. 'False': estimation up to 'proportionality'.
(default is True)
knn_method : str, optional
kNN computation method; 'cKDTree' or 'KDTree'.
k : int, >= 1, optional
k-nearest neighbors (default is 3).
eps : float, >= 0
The k^th returned value is guaranteed to be no further than
(1+eps) times the distance to the real kNN (default is 0).
pxdx : boolean, optional
If pxdx == True, then we rewrite the Bhattacharyya distance
as \int p^{1/2}(x)q^{1/2}(x)dx = \int p^{-1/2}(x)q^{1/2}(x)
p(x)dx. [p(x)dx] Else, the Bhattacharyya distance is
rewritten as \int p^{1/2}(x)q^{1/2}(x)dx =
\int q^{-1/2}(x)p^{1/2}(x) q(x)dx. [q(x)dx]
Examples
--------
>>> import ite
>>> co1 = ite.cost.BDBhattacharyya_KnnK()
>>> co2 = ite.cost.BDBhattacharyya_KnnK(k=4)
"""
# initialize with 'InitKnnK':
super().__init__(mult=mult, knn_method=knn_method, k=k, eps=eps)
# other attributes (pxdx,_a,_b):
self.pxdx, self._a, self._b = pxdx, -1/2, 1/2
def estimation(self, y1, y2):
""" Estimate Bhattacharyya distance.
Parameters
----------
y1 : (number of samples1, dimension)-ndarray
One row of y1 corresponds to one sample.
y2 : (number of samples2, dimension)-ndarray
One row of y2 corresponds to one sample.
Returns
-------
d : float
Estimated Bhattacharyya distance.
References
----------
<NAME> and <NAME> and <NAME> and <NAME>. Support Distribution Machines. Technical Report, 2012.
"http://arxiv.org/abs/1202.0302" (estimation of d_temp2)
Examples
--------
d = co.estimation(y1,y2)
"""
# verification:
self.verification_equal_d_subspaces(y1, y2)
if self.pxdx:
d_ab = estimate_d_temp2(y1, y2, self)
else:
d_ab = estimate_d_temp2(y2, y1, self)
# absolute() to avoid possible 'log(negative)' values due to the
# finite number of samples:
d = -log(absolute(d_ab))
return d
class BDBregman_KnnK(InitKnnKAlpha, VerEqualDSubspaces):
""" Bregman distance estimator using the kNN method (S={k}).
Initialization comes from 'InitKnnKAlpha', verification is inherited
from 'VerEqualDSubspaces' (see 'ite.cost.x_initialization.py',
'ite.cost.x_verification.py').
Examples
--------
>>> import ite
>>> co1 = ite.cost.BDBregman_KnnK()
>>> co2 = ite.cost.BDBregman_KnnK(alpha=0.9, k=5, eps=0.1)
"""
def estimation(self, y1, y2):
""" Estimate Bregman distance.
Parameters
----------
y1 : (number of samples1, dimension)-ndarray
One row of y1 corresponds to one sample.
y2 : (number of samples2, dimension)-ndarray
One row of y2 corresponds to one sample.
Returns
-------
d : float
Estimated Bregman distance.
References
----------
<NAME>, <NAME>, and <NAME>. A class of
Renyi information estimators for multidimensional densities.
Annals of Statistics, 36(5):2153-2182, 2008.
<NAME>. Generalized projections for non-negative functions.
Acta Mathematica Hungarica, 68:161-185, 1995.
<NAME>. The relaxation method of finding the common points
of convex sets and its application to the solution of problems in
convex programming. USSR Computational Mathematics and
Mathematical Physics, 7:200-217, 1967.
Examples
--------
d = co.estimation(y1,y2)
"""
# verification:
self.verification_equal_d_subspaces(y1, y2)
i_alpha_y1 = estimate_i_alpha(y1, self)
i_alpha_y2 = estimate_i_alpha(y2, self)
d_temp3 = estimate_d_temp3(y1, y2, self)
d = i_alpha_y2 + i_alpha_y1 / (self.alpha - 1) -\
self.alpha / (self.alpha - 1) * d_temp3
return d
class BDChi2_KnnK(InitKnnK, VerEqualDSubspaces):
""" Chi-square distance estimator using the kNN method (S={k}).
Partial initialization comes from 'InitKnnK', verification is
inherited from 'VerEqualDSubspaces' (see
'ite.cost.x_initialization.py', 'ite.cost.x_verification.py').
"""
def __init__(self, mult=True, knn_method='cKDTree', k=3, eps=0,
pxdx=True):
""" Initialize the estimator.
Parameters
----------
mult : bool, optional
'True': multiplicative constant relevant (needed) in the
estimation. 'False': estimation up to 'proportionality'.
(default is True)
knn_method : str, optional
kNN computation method; 'cKDTree' or 'KDTree'.
k : int, >= 1, optional
k-nearest neighbors (default is 3).
eps : float, >= 0
The k^th returned value is guaranteed to be no further than
(1+eps) times the distance to the real kNN (default is 0).
pxdx : boolean, optional
If pxdx == True, then we rewrite the Pearson chi-square
divergence as \int p^2(x)q^{-1}(x)dx - 1 =
\int p^1(x)q^{-1}(x) p(x)dx - 1. [p(x)dx]
Else, the Pearson chi-square divergence is rewritten as
\int p^2(x)q^{-1}(x)dx - 1= \int q^{-2}(x)p^2(x) q(x)dx -1.
[q(x)dx]
Examples
--------
>>> import ite
>>> co1 = ite.cost.BDChi2_KnnK()
>>> co2 = ite.cost.BDChi2_KnnK(k=4)
"""
# initialize with 'InitKnnK':
super().__init__(mult=mult, knn_method=knn_method, k=k, eps=eps)
# other attributes (pxdx,_a,_b):
self.pxdx = pxdx
if pxdx:
self._a, self._b = 1, -1
else:
self._a, self._b = -2, 2
def estimation(self, y1, y2):
""" Estimate Pearson chi-square divergence.
Parameters
----------
y1 : (number of samples1, dimension)-ndarray
One row of y1 corresponds to one sample.
y2 : (number of samples2, dimension)-ndarray
One row of y2 corresponds to one sample.
Returns
-------
d : float
Estimated Pearson chi-square divergence.
References
----------
<NAME>, <NAME>, <NAME>, and <NAME>. Support distribution machines. Technical Report,
Carnegie Mellon University, 2012. http://arxiv.org/abs/1202.0302.
(estimation of d_temp2)
<NAME>. On the criterion that a given system of deviations
from the probable in the case of correlated system of variables is
such that it can be reasonable supposed to have arisen from random
sampling. Philosophical Magazine Series, 50:157-172, 1900.
Examples
--------
d = co.estimation(y1,y2)
"""
# verification:
self.verification_equal_d_subspaces(y1, y2)
if self.pxdx:
d = estimate_d_temp2(y1, y2, self) - 1
else:
d = estimate_d_temp2(y2, y1, self) - 1
return d
class BDHellinger_KnnK(InitKnnK, VerEqualDSubspaces):
""" Hellinger distance estimator using the kNN method (S={k}).
Partial initialization comes from 'InitKnnK', verification is
inherited from 'VerEqualDSubspaces' (see
'ite.cost.x_initialization.py', 'ite.cost.x_verification.py').
"""
def __init__(self, mult=True, knn_method='cKDTree', k=3, eps=0,
pxdx=True):
""" Initialize the estimator.
Parameters
----------
mult : bool, optional
'True': multiplicative constant relevant (needed) in the
estimation. 'False': estimation up to 'proportionality'.
(default is True)
knn_method : str, optional
kNN computation method; 'cKDTree' or 'KDTree'.
k : int, >= 1, optional
k-nearest neighbors (default is 3).
eps : float, >= 0
The k^th returned value is guaranteed to be no further than
(1+eps) times the distance to the real kNN (default is 0).
pxdx : boolean, optional
If pxdx == True, then we rewrite the Pearson chi-square
divergence as \int p^{1/2}(x)q^{1/2}(x)dx =
\int p^{-1/2}(x)q^{1/2}(x) p(x)dx. [p(x)dx]
Else, the Pearson chi-square divergence is rewritten as
\int p^{1/2}(x)q^{1/2}(x)dx =
\int q^{-1/2}(x)p^{1/2}(x) q(x)dx. [q(x)dx]
Examples
--------
>>> import ite
>>> co1 = ite.cost.BDHellinger_KnnK()
>>> co2 = ite.cost.BDHellinger_KnnK(k=4)
"""
# initialize with 'InitKnnK':
super().__init__(mult=mult, knn_method=knn_method, k=k, eps=eps)
# other attributes (pxdx,_a,_b):
self.pxdx, self._a, self._b = pxdx, -1/2, 1/2
def estimation(self, y1, y2):
""" Estimate Hellinger distance.
Parameters
----------
y1 : (number of samples1, dimension)-ndarray
One row of y1 corresponds to one sample.
y2 : (number of samples2, dimension)-ndarray
One row of y2 corresponds to one sample.
Returns
-------
d : float
Estimated Hellinger distance.
References
----------
<NAME>, <NAME>, <NAME>, and <NAME>. Support distribution machines. Technical Report,
Carnegie Mellon University, 2012. http://arxiv.org/abs/1202.0302.
(estimation of d_temp2)
Examples
--------
d = co.estimation(y1,y2)
"""
# verification:
self.verification_equal_d_subspaces(y1, y2)
# D_ab (Bhattacharyya coefficient):
if self.pxdx:
d_ab = estimate_d_temp2(y1, y2, self)
else:
d_ab = estimate_d_temp2(y2, y1, self)
# absolute() to avoid possible 'sqrt(negative)' values due to the
# finite number of samples:
d = sqrt(absolute(1 - d_ab))
return d
class BDKL_KnnKiTi(InitKnnKiTi, VerEqualDSubspaces):
""" Kullback-Leibler divergence estimator using the kNN method.
In the kNN method: S_1={k_1}, S_2={k_2}; ki-s depend on the number of
samples.
Initialization is inherited from 'InitKnnKiTi', verification comes
from 'VerEqualDSubspaces' (see 'ite.cost.x_initialization.py',
'ite.cost.x_verification.py').
Examples
--------
>>> import ite
>>> co1 = ite.cost.BDKL_KnnKiTi()
>>> co2 = ite.cost.BDKL_KnnKiTi(knn_method='cKDTree', eps=0.1)
"""
def estimation(self, y1, y2):
""" Estimate KL divergence.
Parameters
----------
y1 : (number of samples1, dimension)-ndarray
One row of y1 corresponds to one sample.
y2 : (number of samples2, dimension)-ndarray
One row of y2 corresponds to one sample.
Returns
-------
d : float
Estimated KL divergence.
References
----------
<NAME>, <NAME>, and <NAME>. Divergence
estimation for multidimensional densities via k-nearest-neighbor
distances. IEEE Transactions on Information Theory, 55:2392-2405,
2009.
Examples
--------
d = co.estimation(y1,y2)
"""
# verification:
self.verification_equal_d_subspaces(y1, y2)
# sizes:
num_of_samples1, dim = y1.shape
num_of_samples2 = y2.shape[0]
# ki-s depend on the number of samples:
k1 = int(floor(sqrt(num_of_samples1)))
k2 = int(floor(sqrt(num_of_samples2)))
# computation:
dist_k1_y1y1 = knn_distances(y1, y1, True, self.knn_method, k1,
self.eps, 2)[0]
dist_k2_y2y1 = knn_distances(y2, y1, False, self.knn_method, k2,
self.eps, 2)[0]
d = dim * mean(log(dist_k2_y2y1[:, -1] / dist_k1_y1y1[:, -1])) +\
log(k1 / k2 * num_of_samples2 / (num_of_samples1 - 1))
return d
class BDL2_KnnK(InitKnnK, VerEqualDSubspaces):
""" L2 divergence estimator using the kNN method (S={k}).
Initialization is inherited from 'InitKnnK', verification comes from
'VerEqualDSubspaces' (see 'ite.cost.x_initialization.py',
'ite.cost.x_verification.py').
Examples
--------
>>> import ite
>>> co1 = ite.cost.BDL2_KnnK()
>>> co2 = ite.cost.BDL2_KnnK(knn_method='cKDTree', k=5, eps=0.1)
>>> co3 = ite.cost.BDL2_KnnK(k=4)
"""
def estimation(self, y1, y2):
""" Estimate L2 divergence.
Parameters
----------
y1 : (number of samples1, dimension)-ndarray
One row of y1 corresponds to one sample.
y2 : (number of samples2, dimension)-ndarray
One row of y2 corresponds to one sample.
Returns
-------
d : float
Estimated L2 divergence.
References
----------
<NAME>, <NAME>, <NAME>. Nonparametric
divergence estimators for Independent Subspace Analysis. European
Signal Processing Conference (EUSIPCO), pages 1849-1853, 2011.
<NAME>, <NAME>, <NAME>. Nonparametric
Divergence: Estimation with Applications to Machine Learning on
Distributions. Uncertainty in Artificial Intelligence (UAI), 2011.
<NAME> and <NAME>. On the Estimation of
alpha-Divergences. International Conference on Artificial
Intelligence and Statistics (AISTATS), pages 609-617, 2011.
Examples
--------
d = co.estimation(y1,y2)
"""
# verification:
self.verification_equal_d_subspaces(y1, y2)
# sizes:
num_of_samples1, dim = y1.shape
num_of_samples2 = y2.shape[0]
c = volume_of_the_unit_ball(dim)
dist_k_y1y1 = knn_distances(y1, y1, True, self.knn_method, self.k,
self.eps, 2)[0][:, -1]
dist_k_y2y1 = knn_distances(y2, y1, False, self.knn_method, self.k,
self.eps, 2)[0][:, -1]
term1 = \
mean(dist_k_y1y1**(-dim)) * (self.k - 1) /\
((num_of_samples1 - 1) * c)
term2 = \
mean(dist_k_y2y1**(-dim)) * 2 * (self.k - 1) /\
(num_of_samples2 * c)
term3 = \
mean((dist_k_y1y1**dim) / (dist_k_y2y1**(2 * dim))) *\
(num_of_samples1 - 1) * (self.k - 2) * (self.k - 1) /\
(num_of_samples2**2 * c * self.k)
l2 = term1 - term2 + term3
# absolute() to avoid possible 'sqrt(negative)' values due to the
# finite number of samples:
d = sqrt(absolute(l2))
return d
class BDRenyi_KnnK(InitKnnKAlpha, VerEqualDSubspaces):
""" Renyi divergence estimator using the kNN method (S={k}).
Initialization comes from 'InitKnnKAlpha', verification is inherited
from 'VerEqualDSubspaces' (see 'ite.cost.x_initialization.py',
'ite.cost.x_verification.py').
The Renyi divergence (D_{R,alpha}) equals to the Kullback-Leibler
divergence (D) in limit: D_{R,alpha} -> D, as alpha -> 1.
Examples
--------
>>> import ite
>>> co1 = ite.cost.BDRenyi_KnnK()
>>> co2 = ite.cost.BDRenyi_KnnK(alpha=0.9, k=5, eps=0.1)
"""
def estimation(self, y1, y2):
""" Estimate Renyi divergence.
Parameters
----------
y1 : (number of samples1, dimension)-ndarray
One row of y1 corresponds to one sample.
y2 : (number of samples2, dimension)-ndarray
One row of y2 corresponds to one sample.
Returns
-------
d : float
Estimated Renyi divergence.
References
----------
<NAME>, <NAME>, <NAME>. Nonparametric
divergence estimators for Independent Subspace Analysis. European
Signal Processing Conference (EUSIPCO), pages 1849-1853, 2011.
<NAME>, <NAME>. On the Estimation of
alpha-Divergences. International conference on Artificial
Intelligence and Statistics (AISTATS), pages 609-617, 2011.
<NAME>, <NAME>, <NAME>. Nonparametric
Divergence: Estimation with Applications to Machine Learning on
Distributions. Uncertainty in Artificial Intelligence (UAI), 2011.
Examples
--------
d = co.estimation(y1,y2)
"""
# verification:
self.verification_equal_d_subspaces(y1, y2)
d_temp1 = estimate_d_temp1(y1, y2, self)
d = | log(d_temp1) | numpy.log |
import numpy as np
from typing import *
from numpy.typing import ArrayLike
from scipy.spatial import Delaunay
from tallem.utility import ask_package_install, package_exists
def flywing():
''' Fly wings example (Klingenberg, 2015 | https://en.wikipedia.org/wiki/Procrustes_analysis) '''
arr1 = np.array([[588.0, 443.0], [178.0, 443.0], [56.0, 436.0], [50.0, 376.0], [129.0, 360.0], [15.0, 342.0], [92.0, 293.0], [79.0, 269.0], [276.0, 295.0], [281.0, 331.0], [785.0, 260.0], [754.0, 174.0], [405.0, 233.0], [386.0, 167.0], [466.0, 59.0]])
arr2 = np.array([[477.0, 557.0], [130.129, 374.307], [52.0, 334.0], [67.662, 306.953], [111.916, 323.0], [55.119, 275.854], [107.935, 277.723], [101.899, 259.73], [175.0, 329.0], [171.0, 345.0], [589.0, 527.0], [591.0, 468.0], [299.0, 363.0], [306.0, 317.0], [406.0, 288.0]])
return([arr1, arr2])
def gaussian_blob(n_pixels: int, r: float):
'''
Generates a closure which, given a 2D location *mu=(x,y)*, generates a white blob
with [normalized] radius 0 < r <= 1 in a (n_pixels x n_pixels) image.
If *mu* is in [0,1] x [0,1], the center of the white blob should be visible
If *mu* has as both of its coordinates outside of [0,1]x[0,1], the blob may be partially visible
If *mu* has both of its coordinates outside of [-r, 1+r]x[-r, 1+r], then image should be essentially black
The returned closure completely autograd's numpy wrapper to do the image generation. Thus, the resulting
function can be differentiated (w.r.t *mu*) using the reverse-mode differentiation process that *autograd* provides.
This function also returns the global normalizing constant needed normalize the pixel intensities in [0,1],
for plotting or other purposes.
Return: (blob, c) where
- blob := differentiable closure which, given a vector (x,y), generates the blob image a flat vector.
- c := maximum value of the intensity of any given pixel for any choice of *mu*.
'''
import autograd.numpy as auto_np
sd = r/3.090232
sigma = sd**2
sigma_inv = 1.0/sigma
denom = np.sqrt(((2*auto_np.pi)**2) * (sigma**2))
def blob(mu): # mu can be anywhere; center of image is [0.5, 0.5]
loc = auto_np.linspace(0, 1, n_pixels, False) + 1/(2*n_pixels)
x,y = auto_np.meshgrid(loc, loc)
grid = auto_np.exp(-0.5*(sigma_inv * ((x-mu[0])**2 + (y-mu[1])**2)))/denom
return(auto_np.ravel(grid).flatten())
return(blob, auto_np.exp(0)/denom)
def plot_image(P, figsize=(8,8), max_val = "default"):
if max_val == "default": max_val = np.max(P)
import matplotlib.pyplot as plt
fig = plt.figure(figsize=figsize)
plt.imshow(P, cmap='gray', vmin=0, vmax=max_val)
fig.gca().axes.get_xaxis().set_visible(False)
fig.gca().axes.get_yaxis().set_visible(False)
def plot_images(P, shape, max_val = "default", figsize=(8,8), layout = None):
'''
P := numpy array where each row is a grayscale image
shape := the shape to reshape each row of P prior to plotting
'''
import matplotlib.pyplot as plt
if max_val == "default":
max_val = np.max(P)
if P.ndim == 1:
fig = plt.figure(figsize=figsize)
plt.imshow(P.reshape(shape), cmap='gray', vmin=0, vmax=max_val)
fig.gca().axes.get_xaxis().set_visible(False)
fig.gca().axes.get_yaxis().set_visible(False)
return(fig, ax)
else:
assert layout is not None, "missing layout"
fig, axs = plt.subplots(*layout, figsize=figsize)
axs = axs.flatten()
for i, (img, ax) in enumerate(zip(P, axs)):
#fig.add_subplot(layout[0], layout[1], i+1)
plt.axis("off")
ax.imshow(P[i,:].reshape(shape), cmap='gray', vmin=0, vmax=max_val, aspect='auto')
ax.axes.xaxis.set_visible(False)
ax.axes.yaxis.set_visible(False)
plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=0.1, hspace=0.1)
return(fig, axs)
def scatter2D(P, layout = None, figsize=(8,8), **kwargs):
import matplotlib.pyplot as plt
if isinstance(P, np.ndarray):
if "fig" in kwargs.keys() and "ax" in kwargs.keys():
fig, ax = kwargs["fig"], kwargs["ax"]
kwargs.pop('fig', None)
kwargs.pop('ax', None)
else:
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot()
ax.scatter(*P.T, **kwargs)
return(fig, ax)
elif isinstance(P, Iterable):
assert layout is not None, "missing layout"
assert len(P) == np.prod(layout)
if "fig" in kwargs.keys() and "ax" in kwargs.keys():
fig, ax = kwargs["fig"], kwargs["ax"]
kwargs.pop('fig', None)
else:
fig = plt.figure(figsize=figsize)
for i, p in enumerate(P):
ax = fig.add_subplot(layout[0], layout[1], i+1)
ax.scatter(*p.T, **kwargs)
return(fig, ax)
def scatter3D(P, angles = None, layout = None, figsize=(8,8), **kwargs):
import matplotlib.pyplot as plt
if isinstance(P, np.ndarray):
import numbers
if angles is not None:
if isinstance(angles, numbers.Integral):
angles = np.linspace(0, 360, angles, endpoint=False)
assert len(angles) == np.prod(layout)
if "fig" in kwargs.keys() and "ax" in kwargs.keys():
fig, ax = kwargs["fig"], kwargs["ax"]
kwargs.pop('fig', None)
kwargs.pop('ax', None)
else:
fig, ax = plt.subplots(*layout, figsize=figsize)
for i, theta in enumerate(angles):
ax = fig.add_subplot(layout[0], layout[1], i+1, projection='3d')
ax.scatter3D(*P.T, **kwargs)
ax.view_init(30, theta)
else:
if "fig" in kwargs.keys() and "ax" in kwargs.keys():
fig, ax = kwargs["fig"], kwargs["ax"]
kwargs.pop('fig', None)
kwargs.pop('ax', None)
else:
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(projection='3d')
ax.scatter3D(*P.T, **kwargs)
elif isinstance(P, Iterable):
import numbers
assert layout is not None, "missing layout"
if angles is None:
angles = np.repeat(60, len(P))
elif isinstance(angles, numbers.Integral):
angles = np.linspace(0, 2*np.pi, len(P), endpoint=False)
assert len(angles) == np.prod(layout)
if "fig" in kwargs.keys() and "ax" in kwargs.keys():
fig, ax = kwargs["fig"], kwargs["ax"]
kwargs.pop('fig', None)
kwargs.pop('ax', None)
else:
fig, ax = plt.subplots(*layout, figsize=figsize)
for i, p in enumerate(P):
ax = fig.add_subplot(layout[0], layout[1], i+1, projection='3d')
ax.scatter3D(*p.T, **kwargs)
ax.view_init(30, angles[i])
plt.setp(plt.gcf().get_axes(), xticks=[], yticks=[]);
return(fig, ax)
def rotating_disk(n_pixels: int, r: float, sigma: float = 1.0):
from scipy.ndimage import gaussian_filter
import numpy as np
I = np.zeros(shape=(n_pixels, n_pixels))
p = np.linspace(0, 1, n_pixels, False) + 1/(2*n_pixels) # center locations of pixels, in normalized space
z = np.array([r, 0.0]).reshape((2,1))
d = np.array([0.5, 0.5]).reshape((2,1))
x,y = np.meshgrid(p, p)
def disk_image(theta: float):
R = np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]])
c = (R @ z) + d # center of disk in [0,1]^2
D = np.flipud(np.sqrt((x - c[0])**2 + (y - c[1])**2))
D[D <= r] = -1.0
D[D > r] = 0.0
D[D == -1.0] = 1.0
return(np.ravel(gaussian_filter(D, sigma=1.0)).flatten())
return(disk_image, 1.0)
# D = np.zeros(np.prod(x.shape))
# for i, (xi,yi) in enumerate(zip(x.flatten(),y.flatten())):
# p = np.array([xi,yi])
# D[i] = np.dot(p-b, u)# np.linalg.norm(z-v)
def white_bars(n_pixels: int, r: float, sigma: float = 1.0):
'''
Returns a parameterization that yields a white vertical bar at various orientations in an image.
Fixed parameters:
n_pixels := number of pixels to make square image
r := constant between [0,1] indicating how wide to make the bar
sigma := kernel parameter for gaussian blur
Returns:
bar := closure w/ parameters y_offset in [0, 1] and theta in [0, pi]
c := normalizing constant for plotting
'''
from scipy.ndimage import gaussian_filter
import numpy as np
w = r
p = np.linspace(0, 1, n_pixels, False) + 1/(2*n_pixels) # center locations of pixels, in normalized space
x,y = np.meshgrid(p,p)
c = np.array([0.5, 0.5]) # center of image
def bar(theta: float, d: float):
assert np.all(np.bitwise_and(d >= -1.0, d <= 1.0)), "d must be in the range [-1, 1]"
assert np.all(np.bitwise_and(theta >= 0.0, theta <= np.pi)), "theta must be in the range [0, pi]"
u = np.array([ 1.0, np.tan(theta) ])
u = u / np.linalg.norm(u)
c = np.array([0.5, 0.5]) # center of image
d = d * (np.sqrt(2) / 2) # scale where to place center of bar
if theta > np.pi/2:
d = -d
b = c + d*u # center of bar
D = np.zeros(np.prod(x.shape))
for i, (xi,yi) in enumerate(zip(x.flatten(),y.flatten())):
p = np.array([xi,yi])
D[i] = np.dot(p-b, u)# np.linalg.norm(z-v)
I = abs(D.reshape((n_pixels, n_pixels))).T
I[I > w] = 1
I = 1.0 - I
return(gaussian_filter(I, sigma=sigma))
c = np.max(bar(0.0, 0.0))
return(bar, c)
# u = np.array([ 1.0, np.tan(theta) ])
# u = u / np.linalg.norm(u)
# d = np.array([-di if ti <= np.pi/2 else di for di,ti in zip(d, theta)])*(np.sqrt(2) / 2)
# U = np.c_[np.repeat(1.0, len(theta)), theta]
# U = U / np.linalg.norm(U, axis = 1, keepdims = True)
# B = c + d.reshape((len(d), 1)) * U # center of bars
# D = [abs((x - b[0])*u[0] + (y - b[1])*u[1]).T for (u, b) in zip(U, B)]
# # b = c + d*u # center of bar
# # D = (x - b[0])*u[0] + (y - b[1])*u[1]
# # I = abs(D.reshape((n_pixels, n_pixels))).T
# images = np.zeros((B.shape[0], n_pixels**2))
# for i, img in enumerate(D):
# img[img > w] = 1
# img = 1.0 - img
# images[i,:] = np.ravel(gaussian_filter(img, sigma=sigma).flatten())
# return(images)
# from scipy.ndimage import gaussian_filter
# import numpy as np
# w = r*np.sqrt(2)
# p = np.linspace(0, 1, n_pixels, False) + 1/(2*n_pixels) # center locations of pixels, in normalized space
# x,y = np.meshgrid(p,p)
# def bar(y_offset: float, theta: float):
# assert y_offset >= 0.0 and y_offset <= 1.0
# assert theta >= 0.0 and theta <= np.pi
# # z = np.array([0.5, y_offset]) # intercept
# # dist_to_line = np.cos(theta)*(z[1] - y) - np.sin(theta)*(z[0]-x)
# # dist_to_line = ((y - y_offset)/np.tan(theta))*np.sin(theta)
# # Z = np.array([np.array([xi,yi]) for xi,yi in zip(x.flatten(),y.flatten())])
# # fig,ax = scatter2D(Z, c="blue")
# # fig,ax = scatter2D(np.array(P), c="red", fig=fig, ax=ax)
# # fig,ax = scatter2D(np.array([0.5, 0.5]), c="green", fig=fig, ax=ax)
# # fig,ax = scatter2D(np.c_[x.flatten(), x.flatten()*m + b], c="purple", fig=fig, ax=ax)
# m, b = np.tan(theta), y_offset
# #pt = np.array([1.0, m + b])
# z1 = np.array([0.50, b])
# z2 = np.array([1.0, 1.0*m + b])
# pt = z2 - z1
# d = []
# P = []
# for xi,yi in zip(x.flatten(),y.flatten()):
# u = pt / np.linalg.norm(pt)
# v = np.array([xi,yi])
# z = u*np.dot(v-np.array([0.5, b]), u)+np.array([0.5, b])
# d.append(np.linalg.norm(z-v))
# P.append(z)
# dist_to_line = np.array(d)
# # fig, ax = scatter2D(np.array(P))
# I = abs(dist_to_line.reshape((n_pixels, n_pixels)))
# I = np.flipud(I) # make origin lower-left, not top-left
# # I = (np.sqrt(2)/2)*(I/np.max(I))
# I[I > w] = np.sqrt(2)
# I = np.sqrt(2) - I ## invert intensity
# # I[I < (np.sqrt(2) - w)] = 0.0
# # B = I.copy()
# # I[I <= w] = -1.0
# # I[I > w] = 0.0
# # I[I == -1.0] = np.max(B[B <= w]) - B[B <= w] # 1.0
# return(gaussian_filter(I, sigma=sigma))
# c = np.max(bar(0.0, 0.0))
# return(bar, c)
# def _gaussian_pixel(d, n_pixels):
# from scipy.stats import norm
# sigma = d/3.0
# Sigma = auto_np.diag([sigma, sigma])
# sigma_inv = auto_np.linalg.inv(Sigma)[0,0]
# denom = np.sqrt(((2*np.pi)**2) * auto_np.linalg.det(Sigma))
# normal_constant = norm.pdf(0, loc=0, scale=sigma)
# def blob(mu): # generates blob at location mu
# # mu = mu.reshape((2, 1))
# # np.exp(-0.5 * ((x - mu).T @ SigmaI @ (x - mu))).flatten()
# #x, y = auto_np.meshgrid(auto_np.arange(n_pixels), auto_np.arange(n_pixels))
# loc = auto_np.linspace(0, 1, n_pixels, False) + (1/(2*n_pixels))
# x,y = auto_np.meshgrid(loc, loc)
# grid = auto_np.exp(-0.5*(sigma_inv * ((x-mu[0])**2 + (y-mu[1])**2)))/denom
# #grid = auto_np.exp(-0.5*((x - mu[0])**2 + (y - mu[1])**2))/denom
# #return(auto_np.ravel(grid).flatten())
# return(grid/normal_constant)
# return(blob)
# plot_image(gaussian_pixel2(1/32, 11)([-0.5, 0.5]))
def white_dot(n_pixels: int, r: float, n: Optional[int], method: Optional[str] = "grid", mu: Optional[ArrayLike] = None):
'''
Generates a grayscale image data set where white blobs are placed on a (n_pixels x n_pixels) grid
using a multivariate normal density whose standard deviation sigma (in both directions) is sigma=d/3.
If 'n' is specified, then 'n' samples are generated from a larger space s([-d, 1+d]^2) where s(*)
denotes the scaling of the interval [-d,1+d] by 'n_pixels'.
Parameters:
n_pixels := number of pixels wide/tall to make the resulting images
r := relative radius of dot (in (0, 1])
n := (optional) number of samples desired
method := (optional) how to generate samples in the parameter space. Can be either "grid" or "random".
mu := (optional) locations of dot centers to generate the dots at
Returns:
samples := generated image samples
params := (x,y,i) parameters associated with each sample,
f := closure for generating more samples. See gaussian_blob() for more details.
c := normalizing constant. See gaussian_blob() for more details.
'''
assert r > 0 and r <= 1.0, "r must be in the range 0 < r <= 1.0"
assert isinstance(n, int) or isinstance(n, tuple), "n must be integer of tuple of integers"
ask_package_install("autograd")
import numpy as np
import autograd.numpy as auto_np
## First generate the closure to make the images
blob, c = gaussian_blob(n_pixels, r)
if not(mu is None):
samples = np.vstack([blob(auto_np.array([x,y])) for x,y in mu])
params = mu
elif method == "random":
## Generate uniformly random locations (in domain)
assert n is not None, "'n' must be supplied if 'mu' is not."
n1, n2 = (n, n) if isinstance(n, int) else (n[0], n[1])
samples, params = [], []
X, Y = np.random.uniform(size=n1,low=-r,high=1+r), np.random.uniform(size=n1,low=-r,high=1+r)
for x,y in zip(X, Y):
samples.append(blob(auto_np.array([x,y])))
params.append([x, y, 1.0])
NP = blob(auto_np.array([0.5, 0.5]))
for t in np.random.uniform(size=n2, low=0.0, high=1.0):
samples.append(t*NP)
params.append([0.5, 0.5, 1-t])
## Vertically stack
samples, params = np.vstack(samples), np.vstack(params)
elif method == "grid":
assert n is not None, "'n' must be supplied if 'mu' is not."
if isinstance(n, int):
n1, n2 = (n, n)
else:
n1, n2 = (n[0], n[1])
ng = int(np.floor(np.sqrt(n1)))
samples, params = [], []
for x in np.linspace(0.0-r,1.0+r,ng):
for y in np.linspace(0.0-r,1.0+r,ng):
samples.append(blob(auto_np.array([x, y])))
params.append([x, y, 1.0])
## Generate the pole
NP = blob(auto_np.array([0.5, 0.5]))
for t in np.linspace(0, 1, n2):
samples.append(t*NP)
params.append([0.5, 0.5, 1-t])
## Vertically stack
samples, params = np.vstack(samples), np.vstack(params)
## Return the data
return(samples, params, blob, c)
def mobius_band(n_polar=66, n_wide=9, scale_band=0.25):
'''
Generates stratified samples on a Mobius band embedded in R^3
To get uniform samples, N = (n_polar*n_wide) uniformly spaced coordinates are generated initially
from the intrinsic space of M. These points are converted to their extrinsic (3D) coordinates and
are then triangulated using a Delaunay triangulation. Finally, using the Delaunay triangles as stratum,
a stratified sampling scheme is employed by sampling randomly from each triangle using its barycentric
coordinates. This stratification ensures the samples are both sufficiently random but sufficiently "uniformly
spaced" around the band.
Returns:
- M := (n x 3) matrix of embedding coordinates
- B := (n x 2) matrix of intrinsic coordinates
In the intrinsic coordinates, B[:,0] is the width parameter and B[:,1] is the angular coordinate
'''
## Generate random (deterministic) polar coordinates around Mobius Band
np.random.seed(0)
s = | np.linspace(-scale_band, scale_band, 2*n_wide) | numpy.linspace |
import unittest
import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.metrics import pairwise_distances
from graphs.construction import neighbor_graph
from graphs.construction.incremental import incremental_neighbor_graph
from graphs.mini_six import zip_longest, range
| np.set_printoptions(precision=3, suppress=True) | numpy.set_printoptions |
"""
Test for file IO
"""
import pickle
from pickle import PicklingError
import re
import pytest
import numpy as np
from bioptim import InterpolationType, OdeSolver
from .utils import TestUtils
@pytest.mark.parametrize("n_threads", [1, 2])
@pytest.mark.parametrize("use_sx", [False, True])
@pytest.mark.parametrize("ode_solver", [OdeSolver.RK4, OdeSolver.RK8, OdeSolver.IRK, OdeSolver.COLLOCATION])
def test_pendulum(ode_solver, use_sx, n_threads):
bioptim_folder = TestUtils.bioptim_folder()
pendulum = TestUtils.load_module(bioptim_folder + "/examples/getting_started/pendulum.py")
ode_solver = ode_solver()
if isinstance(ode_solver, OdeSolver.IRK) and use_sx:
with pytest.raises(NotImplementedError, match="use_sx=True and OdeSolver.IRK are not yet compatible"):
pendulum.prepare_ocp(
biorbd_model_path=bioptim_folder + "/examples/getting_started/models/pendulum.bioMod",
final_time=2,
n_shooting=10,
n_threads=n_threads,
use_sx=use_sx,
ode_solver=ode_solver,
)
return
ocp = pendulum.prepare_ocp(
biorbd_model_path=bioptim_folder + "/examples/getting_started/models/pendulum.bioMod",
final_time=1,
n_shooting=30,
n_threads=n_threads,
use_sx=use_sx,
ode_solver=ode_solver,
)
sol = ocp.solve()
# Check objective function value
f = np.array(sol.cost)
np.testing.assert_equal(f.shape, (1, 1))
if isinstance(ode_solver, OdeSolver.RK8):
np.testing.assert_almost_equal(f[0, 0], 41.57063948309302)
elif isinstance(ode_solver, OdeSolver.IRK):
np.testing.assert_almost_equal(f[0, 0], 65.8236055171619)
elif isinstance(ode_solver, OdeSolver.COLLOCATION):
np.testing.assert_almost_equal(f[0, 0], 46.667345680854794)
else:
np.testing.assert_almost_equal(f[0, 0], 41.58259426)
# Check constraints
g = np.array(sol.constraints)
if ode_solver.is_direct_collocation:
np.testing.assert_equal(g.shape, (600, 1))
np.testing.assert_almost_equal(g, np.zeros((600, 1)))
else:
np.testing.assert_equal(g.shape, (120, 1))
np.testing.assert_almost_equal(g, np.zeros((120, 1)))
# Check some of the results
states, controls = sol.states, sol.controls
q, qdot, tau = states["q"], states["qdot"], controls["tau"]
# initial and final position
np.testing.assert_almost_equal(q[:, 0], np.array((0, 0)))
np.testing.assert_almost_equal(q[:, -1], np.array((0, 3.14)))
# initial and final velocities
np.testing.assert_almost_equal(qdot[:, 0], np.array((0, 0)))
np.testing.assert_almost_equal(qdot[:, -1], np.array((0, 0)))
# initial and final controls
if isinstance(ode_solver, OdeSolver.RK8):
np.testing.assert_almost_equal(tau[:, 0], np.array((6.03763589, 0)))
np.testing.assert_almost_equal(tau[:, -2], np.array((-13.59527556, 0)))
elif isinstance(ode_solver, OdeSolver.IRK):
np.testing.assert_almost_equal(tau[:, 0], np.array((5.40765381, 0)))
np.testing.assert_almost_equal(tau[:, -2], np.array((-25.26494109, 0)))
elif isinstance(ode_solver, OdeSolver.COLLOCATION):
np.testing.assert_almost_equal(tau[:, 0], np.array((5.78386563, 0)))
np.testing.assert_almost_equal(tau[:, -2], np.array((-18.22245512, 0)))
else:
np.testing.assert_almost_equal(tau[:, 0], np.array((6.01549798, 0)))
np.testing.assert_almost_equal(tau[:, -2], np.array((-13.68877181, 0)))
# save and load
TestUtils.save_and_load(sol, ocp, True)
# simulate
TestUtils.simulate(sol)
@pytest.mark.parametrize("n_threads", [1, 2])
@pytest.mark.parametrize("use_sx", [False, True])
@pytest.mark.parametrize("ode_solver", [OdeSolver.RK4, OdeSolver.RK8, OdeSolver.IRK, OdeSolver.COLLOCATION])
def test_pendulum_save_and_load(n_threads, use_sx, ode_solver):
bioptim_folder = TestUtils.bioptim_folder()
pendulum = TestUtils.load_module(bioptim_folder + "/examples/getting_started/example_save_and_load.py")
ode_solver = ode_solver()
if isinstance(ode_solver, OdeSolver.IRK):
if use_sx:
with pytest.raises(NotImplementedError, match="use_sx=True and OdeSolver.IRK are not yet compatible"):
pendulum.prepare_ocp(
biorbd_model_path=bioptim_folder + "/examples/getting_started/models/pendulum.bioMod",
final_time=1,
n_shooting=30,
n_threads=n_threads,
use_sx=use_sx,
ode_solver=ode_solver,
)
else:
ocp = pendulum.prepare_ocp(
biorbd_model_path=bioptim_folder + "/examples/getting_started/models/pendulum.bioMod",
final_time=1,
n_shooting=30,
n_threads=n_threads,
use_sx=use_sx,
ode_solver=ode_solver,
)
sol = ocp.solve()
# Check objective function value
f = np.array(sol.cost)
np.testing.assert_equal(f.shape, (1, 1))
# Check constraints
g = np.array(sol.constraints)
np.testing.assert_equal(g.shape, (120, 1))
np.testing.assert_almost_equal(g, np.zeros((120, 1)))
# Check some of the results
q, qdot, tau = sol.states["q"], sol.states["qdot"], sol.controls["tau"]
# initial and final position
np.testing.assert_almost_equal(q[:, 0], np.array((0, 0)))
np.testing.assert_almost_equal(q[:, -1], np.array((0, 3.14)))
# initial and final velocities
np.testing.assert_almost_equal(qdot[:, 0], np.array((0, 0)))
np.testing.assert_almost_equal(qdot[:, -1], np.array((0, 0)))
# save and load
TestUtils.save_and_load(sol, ocp, True)
# simulate
TestUtils.simulate(sol)
else:
ocp = pendulum.prepare_ocp(
biorbd_model_path=bioptim_folder + "/examples/getting_started/models/pendulum.bioMod",
final_time=1,
n_shooting=30,
n_threads=n_threads,
use_sx=use_sx,
ode_solver=ode_solver,
)
sol = ocp.solve()
# Check objective function value
is_collocation = isinstance(ode_solver, OdeSolver.COLLOCATION) and not isinstance(ode_solver, OdeSolver.IRK)
f = np.array(sol.cost)
np.testing.assert_equal(f.shape, (1, 1))
if isinstance(ode_solver, OdeSolver.RK8):
np.testing.assert_almost_equal(f[0, 0], 9.821989132327003)
elif is_collocation:
pass
else:
np.testing.assert_almost_equal(f[0, 0], 9.834017207589055)
# Check constraints
g = np.array(sol.constraints)
if is_collocation:
np.testing.assert_equal(g.shape, (600, 1))
np.testing.assert_almost_equal(g, np.zeros((600, 1)))
else:
np.testing.assert_equal(g.shape, (120, 1))
np.testing.assert_almost_equal(g, np.zeros((120, 1)))
# Check some of the results
q, qdot, tau = sol.states["q"], sol.states["qdot"], sol.controls["tau"]
# initial and final position
np.testing.assert_almost_equal(q[:, 0], np.array((0, 0)))
np.testing.assert_almost_equal(q[:, -1], np.array((0, 3.14)))
# initial and final velocities
np.testing.assert_almost_equal(qdot[:, 0], np.array((0, 0)))
np.testing.assert_almost_equal(qdot[:, -1], | np.array((0, 0)) | numpy.array |
__author__ = 'Chronis'
import numpy as np
import matplotlib.pyplot as plt
from astropy.io import fits
import time
# UTILITY FUNCTIONS
def myfft2(wave_in):
return np.fft.fftshift(np.fft.fft2( | np.fft.ifftshift(wave_in) | numpy.fft.ifftshift |
import numpy as np
import matplotlib.pyplot as plt
state_colormap = 'viridis' # other sequential colormaps: 'plasma', 'gray'
change_colormap = 'coolwarm' # other diverging colormaps: 'bwr', 'RdBu'
def visualise(data, initial_conditions = [], include_end_state = False,
state_colormap = state_colormap, change_colormap = change_colormap):
Q_map = data['Q_map']
Q_V = data['Q_V']
grids = data['grids']
s_grid = grids['states']
a_grid = grids['actions']
s_min = s_grid[0]
s_max = s_grid[-1]
a_min = a_grid[0]
a_max = a_grid[-1]
initial_state = np.repeat( | np.array([s_grid]) | numpy.array |
"""
Module to search for the supergroup symmetry
"""
from copy import deepcopy
from random import sample
import itertools
import numpy as np
from scipy.optimize import minimize
from pymatgen.core.operations import SymmOp
import pymatgen.analysis.structure_matcher as sm
import pyxtal.symmetry as sym
from pyxtal.lattice import Lattice
from pyxtal.wyckoff_site import atom_site
from pyxtal.operations import apply_ops
from pyxtal.wyckoff_split import wyckoff_split
def new_solution(A, refs):
"""
check if A is already in the reference solutions
"""
for B in refs:
match = True
for a, b in zip(A, B):
a.sort()
b.sort()
if a != b:
match = False
break
if match:
return False
return True
def find_mapping_per_element(sites1, sites2,max_num=720):
"""
search for all mappings for a given splitter
Args:
sites1 (list): e.g., l layer ['4a', '8b', '4c']
sites2 (list): e.g., 2 layers [['4a'], ['8b', '4c']]
max_num (int): maximum number of atomic mapping
Returns:
unique solutions: e.g. 3 layers: [[[0], [1,2]]]
"""
unique_letters=list(set(sites1))
site1_letter_indices=[]
for letter in unique_letters:
site1_letter_indices.append([i for i, x in enumerate(sites1) if x==letter])
site2_letter_bins=[]
for lbin in sites2:
site2_letter_bins.append([unique_letters.index(x) for x in lbin])
combo_list=[]
for s in site2_letter_bins:
ls=list(set(s))
rs=[s.count(r) for r in ls]
p=[]
for i, l in enumerate(ls):
combo=itertools.combinations(site1_letter_indices[l],rs[i])
combo=[list(x) for x in combo]
p.append(deepcopy(combo))
pr=p[0]
for i in range(1,len(p)):
pr=itertools.product(pr,p[i])
pr=[sum(list(x),[]) for x in pr]
combo_list.append(pr)
unique_solutions=[[x] for x in combo_list[0]]
for i in range(1,len(combo_list)):
unique_solutions=[x+[y] for x in unique_solutions for y in combo_list[i] if len(set(sum(x,[])).intersection(y))==0]
return unique_solutions
#depreciated mapping function
# print('sites1=',sites1)
# print('sites2=',sites2)
# unique_solutions = []
# solution_template = [[None]*len(site2) for site2 in sites2]
# assigned_ids = []
# # first identify the unique assignment
# for i, site2 in enumerate(sites2):
# wp_letters = set(site2)
# if len(wp_letters) == len(site2): #site2: ['a', 'b'] or ['a']
# for j, s2 in enumerate(site2):
# ids = [id for id, s1 in enumerate(sites1) if s1==s2]
# if len(ids) == 1:
# solution_template[i][j] = ids[0]
# assigned_ids.append(ids[0])
# elif len(wp_letters)==1: #site2: ['a','a'] or ['a','a','a']
# ids = [id for id, s1 in enumerate(sites1) if s1==list(wp_letters)[0]]
# if len(ids) == len(site2):
# solution_template[i] = ids
# assigned_ids.extend(ids)
# elif len(wp_letters)==2: #site2: ['a','a','b']
# count = 0
# for j in range(2):
# ids = [id for id, s1 in enumerate(sites1) if s1==list(wp_letters)[j]]
# if len(ids) == site2.count(list(wp_letters)[j]):
# solution_template[i][count:count+len(ids)] = ids
# assigned_ids.extend(ids)
# count += len(ids)
# #raise NotImplementedError("unsupported:", site2)
# #print(assigned_ids)
# ids = [id for id, site in enumerate(sites1) if id not in assigned_ids]
# all_permutations = list(itertools.permutations(ids))
# if len(all_permutations) > max_num:
# print("Warning: ignore some mapping: ", str(len(all_permutations)-max_num))
# print(solution_template)
# all_permutations = sample(all_permutations, max_num)
# print('allpermuataions')
# for x in all_permutations:
# print(x)
# #print(solution_template)
# for perm in all_permutations:
# solution = deepcopy(solution_template)
# perm = list(perm)
# valid = True
# count = 0
# for i, sol in enumerate(solution):
# if None in sol:
# for j, s2 in enumerate(sites2[i]):
# if sol[j] is None:
# if s2 == sites1[perm[count]]:
# solution[i][j] = deepcopy(perm[count])
# count += 1
# else:
# valid = False
# break
# if not valid:
# break
# if valid and new_solution(solution, unique_solutions):
# unique_solutions.append(solution)
# print('unique_solutions')
# for x in unique_solutions:
# print(x)
# return unique_solutions
def find_mapping(atom_sites, splitter, max_num=720):
"""
search for all mappings for a given splitter
Args:
atom_sites: list of wyc object
splitter: wyc_splitter object
max_num (int): maximum number of atomic mapping
Returns:
unique solutions
"""
eles = set([site.specie for site in atom_sites])
# loop over the mapping for each element
# then propogate the possible mapping via itertools.product
lists = []
for ele in eles:
# ids of atom sites
site_ids = [id for id, site in enumerate(atom_sites) if site.specie==ele]
# ids to be assigned
wp2_ids = [id for id, e in enumerate(splitter.elements) if e==ele]
letters1 = [atom_sites[id].wp.letter for id in site_ids]
letters2 = []
for id in wp2_ids:
wp2 = splitter.wp2_lists[id]
letters2.append([wp.letter for wp in wp2])
#print(ele, letters1, letters2)
res = find_mapping_per_element(letters1, letters2, max_num=720)
lists.append(res)
mappings = list(itertools.product(*lists))
# resort the mapping
ordered_mappings = []
for mapping in mappings:
ordered_mapping = [None]*len(splitter.wp2_lists)
for i, ele in enumerate(eles):
site_ids = [id for id, site in enumerate(atom_sites) if site.specie==ele]
count = 0
for j, wp2 in enumerate(splitter.wp2_lists):
if splitter.elements[j] == ele:
ordered_mapping[j] = [site_ids[m] for m in mapping[i][count]]
count += 1
#print("res", ordered_mapping)
ordered_mappings.append(ordered_mapping)
#if len(ordered_mappings)==0: import sys; sys.exit()
return ordered_mappings
def search_G1(G, rot, tran, pos, wp1, op):
if np.linalg.det(rot) < 1:
shifts = np.array([[0,0,0],[0,1,0],[1,0,0],[0,0,1],[0,1,1],[1,1,0],[1,0,1],[1,1,1]])
else:
shifts = np.array([[0,0,0]])
diffs = []
coords = []
for shift in shifts:
res = np.dot(rot, pos + shift) + tran.T
tmp = sym.search_cloest_wp(G, wp1, op, res)
diff = res - tmp
diff -= np.round(diff)
dist = np.linalg.norm(diff)
diffs.append(dist)
coords.append(tmp)
if dist < 1e-1:
break
diffs = np.array(diffs)
minID = np.argmin(diffs)
tmp = coords[minID]
tmp -= np.round(tmp)
return tmp, np.min(diffs)
def search_G2(rot, tran, pos1, pos2, cell=None, ortho=True):
"""
apply symmetry operation on pos1 when it involves cell change.
e.g., when the transformation is (a+b, a-b, c),
trial translation needs to be considered to minimize the
difference between the transformed pos1 and reference pos2
"""
pos1 -= np.round(pos1)
if np.linalg.det(rot) < 1:
shifts = np.array([[0,0,0],[0,1,0],[1,0,0],[0,0,1],[0,1,1],[1,1,0],[1,0,1],[1,1,1]])
elif not ortho:
shifts = np.array([[0,0,0],[0,1,0],[1,0,0],[0,0,1],[0,1,1],[1,1,0],[1,0,1],[1,1,1]])
else:
shifts = np.array([[0,0,0]])
shifts = np.array([[0,0,0],[0,1,0],[1,0,0],[0,0,1],[0,1,1],[1,1,0],[1,0,1],[1,1,1]])
dists = []
for shift in shifts:
res = np.dot(rot, pos1 + shift + tran.T)
diff = res - pos2
diff -= np.round(diff)
dist = np.linalg.norm(diff)
dists.append(dist)
if dist < 1e-1:
break
#print("TTTTTTTTT", dists, np.linalg.det(rot))
dists = np.array(dists)
dist = np.min(dists)
shift = shifts[np.argmin(dists)]
pos = np.dot(rot, pos1 + shift + tran.T)
diff = pos - pos2
diff -= np.round(diff)
if cell is not None:
diff = np.dot(diff, cell)
dist = np.linalg.norm(diff)
return pos, dist
def find_xyz(G2_op, coord, quadrant=[0,0,0]):
"""
Finds the x,y,z free parameter values for positions in the G_2 basis.
Args:
G2_op: a symmetry operation in G2
coord: the coordinate that matches G2_op
quadrant: a 3 item list (ex:[1,1,-1]) that contains information
on the orientation of the molecule
Returns:
G2_holder: The corresponding x,y,z parameters written in the G2 basis
"""
if np.all(quadrant==[0,0,0]):
for i,n in enumerate(coord):
if n>=0.:
quadrant[i]=1
else:
quadrant[i]=-1
#prepare the rotation matrix and translation vector seperately
G2_holder=[1,1,1]
G2_op=np.array(G2_op.as_dict()['matrix'])
rot_G2=G2_op[:3,:3].T
tau_G2=G2_op[:3,3]
b=coord-tau_G2
for k in range(3):
b[k]=b[k]%quadrant[k]
#eliminate any unused free parameters in G2
#The goal is to reduce the symmetry operations to be a full rank matrix
#any free parameter that is not used has its spot deleted from the rotation matrix and translation vector
for i,x in reversed(list(enumerate(rot_G2))):
if set(x)=={0.}:
G2_holder[i]=0
rot_G2=np.delete(rot_G2,i,0)
quadrant=np.delete(quadrant,i)
#eliminate any leftover empty rows to have fulll rank matrix
rot_G2=rot_G2.T
for i,x in reversed(list(enumerate(rot_G2))):
if set(x)=={0.}:
rot_G2=np.delete(rot_G2,i,0)
b=np.delete(b,i)
while len(rot_G2)!=0 and len(rot_G2)!=len(rot_G2[0]):
rot_G2=np.delete(rot_G2,len(rot_G2)-1,0)
b=np.delete(b,len(b)-1)
#Must come back later and add Schwarz Inequality check to elininate any dependent vectors
#solves a linear system to find the free parameters
if set(G2_holder)=={0.}:
return np.array(G2_holder)
else:
try:
G2_basis_xyz=np.linalg.solve(rot_G2,b)
for i in range(len(quadrant)):
G2_basis_xyz[i]=G2_basis_xyz[i]%quadrant[i]
# print("!ST G2 HOLDER")
for i in range(G2_holder.count(1)):
G2_holder[G2_holder.index(1)]=G2_basis_xyz[i]
# print('second G## holder')
return np.array(G2_holder)
except:
raise RuntimeError('unable to find free parameters using this operation')
def new_structure(struc, refs):
"""
check if struc is already in the reference solutions
"""
g1 = struc.group.number
pmg1 = struc.to_pymatgen()
for ref in refs:
g2 = ref.group.number
if g1 == g2:
pmg2 = ref.to_pymatgen()
if sm.StructureMatcher().fit(pmg1, pmg2):
return False
return True
def get_best_match(positions, ref, cell):
"""
find the best match with the reference from a set of positions
Args:
positions: N*3 array
ref: 1*3 array
Returns:
position: matched position
id: matched id
"""
diffs = positions - ref
diffs -= np.round(diffs)
diffs = np.dot(diffs, cell)
dists = np.linalg.norm(diffs, axis=1)
id = np.argmin(dists)
return positions[id], dists[id]
def check_freedom(G, solutions):
"""
check if the solutions are valid
a special WP such as (0,0,0) cannot be occupied twice
"""
valid_solutions = []
#G = sym.Group(G)
for solution in solutions:
sites = []
for s in solution:
sites.extend(s)
if G.is_valid_combination(sites):
valid_solutions.append(solution)
return valid_solutions
def check_lattice(G, trans, struc, tol=1.0, a_tol=10):
"""
check if the lattice mismatch is big
used to save some computational cost
"""
matrix = np.dot(trans.T, struc.lattice.get_matrix())
l1 = Lattice.from_matrix(matrix)
l2 = Lattice.from_matrix(matrix, ltype=G.lattice_type)
(a1,b1,c1,alpha1,beta1,gamma1)=l1.get_para(degree=True)
(a2,b2,c2,alpha2,beta2,gamma2)=l2.get_para(degree=True)
abc_diff = np.abs(np.array([a2-a1, b2-b1, c2-c1])).max()
ang_diff = np.abs(np.array([alpha2-alpha1, beta2-beta1, gamma2-gamma1])).max()
#print(l1, l2)
if abc_diff > tol or ang_diff > a_tol:
return False
else:
return True
def check_compatibility(G, relation, sites, elements):
"""
find the compatible splitter to let the atoms of subgroup H fit the group G.
Args:
G: the target space group with high symmetry
relation: a dictionary to describe the relation between G and H
"""
#G = sym.Group(G)
#results = {}
wyc_list = [(str(x.multiplicity)+x.letter) for x in G]
wyc_list.reverse()
good_splittings_list=[]
# A lot of integer math below.
# The goal is to find all the integer combinations of supergroup
# wyckoff positions with the same number of atoms
# each element is solved one at a time
for i in range(len(elements)):
site = np.unique(sites[i])
site_counts = [sites[i].count(x) for x in site]
possible_wyc_indices = []
# the sum of all positions should be fixed.
total_units = 0
for j, x in enumerate(site):
total_units += int(x[:-1])*site_counts[j]
# collect all possible supergroup transitions
# make sure all sites are included in the split
for j, split in enumerate(relation):
# print(j, split)
if np.all([x in site for x in split]):
possible_wyc_indices.append(j)
# for the case of 173 ['2b'] -> 176
# print(possible_wyc_indices) [2, 3, 5]
# a vector to represent the possible combinations of positions
# when the site is [6c, 2b]
# the split from [6c, 6c] to [12i] will be counted as [2,0].
# a entire split from [6c, 6c, 6c, 2b] will need [3, 1]
possible_wycs = [wyc_list[x] for x in possible_wyc_indices]
blocks = [np.array([relation[j].count(s) for s in site]) for j in possible_wyc_indices]
block_units = [sum([int(x[:-1])*block[j] for j,x in enumerate(site)]) for block in blocks]
# print(possible_wycs) # ['2c', '2d', '4f']
# print(blocks) # [array([1]), array([1]), array([2])]
# print(block_units) # [2, 2, 4]
# the position_block_units stores the total sum multiplicty
# from the available G's wyckoff positions.
# below is a brute force search for the valid combinations
combo_storage = [np.zeros(len(block_units))]
good_list = []
# print(combo_storage)
# print(block_units)
# print(blocks)
# print(possible_wycs)
# print(total_units)
# print(site_counts)
while len(combo_storage)!=0:
holder = []
for j, x in enumerate(combo_storage):
for k in range(len(block_units)):
trial = np.array(deepcopy(x)) # trial solution
trial[k] += 1
if trial.tolist() in holder:
continue
sum_units = np.dot(trial, block_units)
if sum_units > total_units:
continue
elif sum_units < total_units:
holder.append(trial.tolist())
else:
tester = np.zeros(len(site_counts))
for l, z in enumerate(trial):
tester += z*blocks[l]
if np.all(tester == site_counts):
G_sites = []
for l, number in enumerate(trial):
if number==0:
continue
elif number==1:
G_sites.append(possible_wycs[l])
else:
for i in range(int(number)):
G_sites.append(possible_wycs[l])
if G_sites not in good_list:
good_list.append(G_sites)
combo_storage=holder
if len(good_list)==0:
# print("cannot find the valid split, quit the search asap")
return None
else:
good_splittings_list.append(good_list)
# if len(good_splittings_list[0])==1:
# print(good_splittings_list[0])
return good_splittings_list
def search_paths(H, G, max_layers=5):
"""
Search function throws away paths that take a roundabout. if
path1:a>>e>>f>>g
path2:a>>b>>c>>e>>f>>g
path 2 will not be counted as there is already a shorter path from a>>e
Args:
H: starting structure IT group number
G: final supergroup IT Group number
max_layers: the number of supergroup calculations needed.
Return:
list of possible paths ordered from smallest to biggest
"""
layers={}
layers[0]={'groups':[G], 'subgroups':[]}
final=[]
traversed=[]
# searches for every subgroup of the the groups from the previous layer.
# Stores the possible groups of each layer and their subgroups
# in a dictinoary to avoid redundant calculations.
# Starts from G and goes down to H
for l in range(1,max_layers+1):
previous_layer_groups=layers[l-1]['groups']
groups=[]
subgroups=[]
for g in previous_layer_groups:
subgroup_numbers=np.unique(sym.Group(g).get_max_subgroup_numbers())
# If a subgroup list has been found with H, will trace
# a path through the dictionary to build the path
if H in subgroup_numbers:
paths=[[g]]
for j in reversed(range(l-1)):
holder=[]
for path in paths:
tail_number=path[-1]
indices=[]
for idx, numbers in enumerate(layers[j]['subgroups']):
if tail_number in numbers:
indices.append(idx)
for idx in indices:
holder.append(path+[layers[j]['groups'][idx]])
paths=deepcopy(holder)
final.extend(paths)
subgroups.append([])
#will continue to generate a layer of groups if the path to H has not been found.
else:
subgroups.append(subgroup_numbers)
[groups.append(x) for x in subgroup_numbers if (x not in groups) and (x not in traversed)]
traversed.extend(groups)
layers[l]={'groups':deepcopy(groups),'subgroups':[]}
layers[l-1]['subgroups']=deepcopy(subgroups)
return final
def new_path(path, paths):
"""
check if struc is already in the reference solutions
"""
for ref in paths:
if path[:len(ref)] == ref:
return False
return True
class supergroups():
"""
Class to search for the feasible transition to a given super group
Args:
struc: pyxtal structure
G (int): the desired super group number
path: the path to connect G and H, e.g, [62, 59, 74]
d_tol (float): tolerance for largest atomic displacement
show (bool): whether or not show the detailed process
"""
def __init__(self, struc, G=None, path=None, d_tol=1.0, max_per_G=100, show=False):
self.struc0 = struc
self.show = show
self.d_tol = d_tol
self.max_per_G = max_per_G
if path is None:
paths = search_paths(struc.group.number, G, max_layers=5)
else:
paths = [path]
print("{:d} paths will be checked".format(len(paths)))
self.strucs = None
failed_paths = []
for i, p in enumerate(paths):
status = "path{:2d}: {:s}, ".format(i, str(p))
#print(status)
if new_path(p, failed_paths):
strucs, w_path, valid = self.struc_along_path(p)
status += "stops at: {:s}".format(str(w_path))
if valid:
self.strucs = strucs
if len(strucs) > len(p):
self.path = [self.struc0.group.number] + p
else:
self.path = p
break
else:
failed_paths.append(w_path)
else:
status += "skipped..."
#print(status)
def __str__(self):
s = "\nTransition to super group: "
if self.strucs is None:
s += "Unsuccessful, check your input"
else:
s += "{:d}".format(self.path[0])
for i, p in enumerate(self.path[1:]):
s += " -> {:d}[{:5.3f}]".format(p, self.strucs[i+1].disp)
s += '\n'
for struc in self.strucs:
s += str(struc)
return s
def __repr__(self):
return str(self)
def struc_along_path(self, path):
"""
search for the super group structure along a given path
"""
strucs = []
G_strucs = [self.struc0]
working_path = []
for G in path:
working_path.append(G)
H = G_strucs[0].group.number
#print(G, H)
#if G != H:
if sym.get_point_group(G) == sym.get_point_group(H):
group_type = 'k'
else:
group_type = 't'
for G_struc in G_strucs:
my = supergroup(G_struc, [G], group_type)
solutions = my.search_supergroup(self.d_tol, self.max_per_G)
new_G_strucs = my.make_supergroup(solutions, show_detail=self.show)
if len(new_G_strucs) > 0:
strucs.append(G_struc)
G_strucs = new_G_strucs
break
if len(new_G_strucs) == 0:
break
# add the final struc
if len(new_G_strucs) > 0:
ds = [st.disp for st in new_G_strucs]
minID = np.argmin(np.array(ds))
strucs.append(new_G_strucs[minID])
valid = True
else:
valid = False
return strucs, working_path, valid
class supergroup():
"""
Class to find the structure with supergroup symmetry
Args:
struc: pyxtal structure
G: list of possible supergroup numbers, default is None
group_type: `t` or `k`
"""
def __init__(self, struc, G=None, group_type='t'):
# initilize the necesary parameters
self.group_type = group_type
self.error = False
# extract the supergroup information
self.wyc_supergroups = struc.group.get_min_supergroup(group_type, G)
# list of all alternative wycsets
strucs = struc.get_alternatives()
for struc in strucs:
# group the elements, sites, positions
elements = []
sites = []
for at_site in struc.atom_sites:
e = at_site.specie
site = str(at_site.wp.multiplicity) + at_site.wp.letter
if e not in elements:
elements.append(e)
sites.append([site])
else:
id = elements.index(e)
sites[id].append(site)
# search for the compatible solutions
solutions = []
for idx in range(len(self.wyc_supergroups['supergroup'])):
G = sym.Group(self.wyc_supergroups['supergroup'][idx])
relation = self.wyc_supergroups['relations'][idx]
id = self.wyc_supergroups['idx'][idx]
trans = np.linalg.inv(self.wyc_supergroups['transformation'][idx][:,:3])
if check_lattice(G, trans, struc):
#print(G, relation)
results = check_compatibility(G, relation, sites, elements)
if results is not None:
sols = list(itertools.product(*results))
trials = check_freedom(G, sols)
sol = {'group': G, 'id': id, 'splits': trials}
solutions.append(sol)
if len(solutions) > 0:
# exit if one solution is found
break
if len(solutions) == 0:
self.solutions = []
self.error = True
#print("No compatible solution exists")
else:
#print(struc)
self.sites = sites
self.elements = elements
self.struc = struc
self.solutions = solutions
self.cell = struc.lattice.matrix
def search_supergroup(self, d_tol=1.0, max_per_G=2500):
"""
search for valid supergroup transition
Args:
d_tol (float): tolerance for atomic displacement
max_per_G (int): maximum number of possible solution for each G
Returns:
valid_solutions: dictionary
"""
#self.d_tol = d_tol
valid_solutions = []
if len(self.solutions) > 0:
# extract the valid
for sols in self.solutions:
G, id, sols = sols['group'], sols['id'], sols['splits']
if len(sols) > max_per_G:
print("Warning: ignore some solutions: ", len(sols)-max_per_G)
sols = sample(sols, max_per_G)
#sols = [[['2f'], ['1a'], ['4n']]]
#sols=[(['8c'], ['4a', '4b'], ['4b', '8c', '8c'])]
for sol in sols:
#print(sol)
mae, disp, mapping, sp = self.get_displacement(G, id, sol, d_tol*1.1)
#print(G.number, sol, mae, disp)
if mae < d_tol:
valid_solutions.append((sp, mapping, disp, mae))
return valid_solutions
def make_supergroup(self, solutions, once=False, show_detail=False):
"""
create supergroup structures based on the list of solutions
Args:
solutions: list of tuples (splitter, mapping, disp)
show_detail (bool): print out the detail
Returns:
list of pyxtal structures
"""
G_strucs = []
if len(solutions) > 0:
if once:
disps = np.array([sol[-1] for sol in solutions])
ID = np.argmin(disps)
solutions = [solutions[ID]]
for solution in solutions:
(sp, mapping, disp, mae) = solution
lat1 = np.dot(np.linalg.inv(sp.R[:3,:3]).T, self.struc.lattice.matrix)
lattice = Lattice.from_matrix(lat1, ltype=sp.G.lattice_type)
details = self.symmetrize(sp, mapping, disp)
coords_G1, coords_G2, coords_H1, elements = details
G_struc = self.struc.copy()
G_struc.group = sp.G
G_sites = []
for i, wp in enumerate(sp.wp1_lists):
pos = coords_G1[i]
pos -= np.floor(pos)
pos1 = sym.search_matched_position(sp.G, wp, pos)
if pos1 is not None:
site = atom_site(wp, pos1, sp.elements[i])
G_sites.append(site)
else:
print(">>>>>>>>>>>>>>")
print(self.struc.group.number)
print(pos)
print(wp)
print(">>>>>>>>>>>>>>")
raise RuntimeError("cannot assign the right wp")
G_struc.atom_sites = G_sites
G_struc.source = 'supergroup {:6.3f}'.format(mae)
G_struc.lattice = lattice
G_struc.numIons *= int(round(np.abs(np.linalg.det(sp.R[:3,:3]))))
G_struc._get_formula()
G_struc.disp = mae
if new_structure(G_struc, G_strucs):
G_strucs.append(G_struc)
if show_detail:
G = sp.G.number
self.print_detail(G, coords_H1, coords_G2, elements, disp)
print(G_struc)
return G_strucs
def get_displacement(self, G, split_id, solution, d_tol):
"""
For a given solution, search for the possbile supergroup structure
Args:
G: group object
split_id (int): integer
solution (list): e.g., [['2d'], ['6h'], ['2c', '6h', '12i']]
d_tol (float): tolerance
Returns:
mae: mean absolute atomic displcement
disp: overall cell translation
"""
sites_G = []
elements = []
muls = []
for i, e in enumerate(self.elements):
sites_G.extend(solution[i])
elements.extend([e]*len(solution[i]))
muls.extend([int(sol[:-1]) for sol in solution[i]])
# resort the sites_G by multiplicity
ids = np.argsort(np.array(muls))
elements = [elements[id] for id in ids]
sites_G = [sites_G[id] for id in ids]
#print(G, self.struc.group.number, sites_G)
splitter = wyckoff_split(G, split_id, sites_G, self.group_type, elements)
mappings = find_mapping(self.struc.atom_sites, splitter)
dists = []
disps = []
masks = []
if len(mappings) > 0:
for mapping in mappings:
dist, disp, mask = self.symmetrize_dist(splitter, mapping, None, None, d_tol)
dists.append(dist)
disps.append(disp)
masks.append(mask)
dists = | np.array(dists) | numpy.array |
import pyopencl as cl
import numpy as np
import matplotlib.pyplot as plt
kernel_disp = '''
float3 mattranspose(__global float* mat) {
float temp;
temp = mat[1];
mat[1] = mat[3];
mat[3] = temp;
temp = mat[2];
mat[2] = mat[6];
mat[6] = temp;
temp = mat[5];
mat[5] = mat[7];
mat[7] = temp;
//return mat;
}
float3 matmulvec(__global float* mat, float3 vec) {
float3 out;
out.x = mat[0] * vec.x + mat[1] * vec.y + mat[2] * vec.z;
out.y = mat[3] * vec.x + mat[4] * vec.y + mat[5] * vec.z;
out.z = mat[6] * vec.x + mat[7] * vec.y + mat[8] * vec.z;
return out;
}
__kernel void difference(__global float *in_out, __global float *in, float dz) {
int xid = get_global_id(1);
int zid = get_global_id(0);
int xsiz = get_global_size(1);
int id = xid + zid * xsiz;
in_out[id] = (-in[id] + in_out[id]) / dz;//should this change sign???
}
__kernel void displacement(__global float *image_out,
__global float *c2d, __global float *d2c,
float pix2nm, float3 u, float3 g, float3 b, float3 b_unit, float3 b_edge_d,
float b_screw, float dt, float dz, float nu,
float phi, float psi, float theta) {
// x-coordinate
int xid = get_global_id(1);
// z-coordinate
int zid = get_global_id(0);
// x-dimension
int xsiz = get_global_size(1);
// z-dimension
int zsiz = get_global_size(0);
// position of this pixel in the 1D array
int id = xid + zid * xsiz;
// vector from dislocation to this pixel, in nm
float xR = ((float) xid + 0.5f - (float) xsiz / 2);// / pix2nm;
float til = sin(phi) + (float) xsiz * tan(psi)/( 2* (float) zsiz);
float yR = (((float) zid + 0.5f + dz - (float) zsiz / 2)*til*dt );// / pix2nm + xR*tan(theta);
float3 r_d = {xR, yR, 0.0f};
float r_mag = sqrt(dot(r_d, r_d));
//cos(theta) & sin(theta) relative to Burgers vector
float ct = dot(r_d, b_unit) / r_mag; //-ve of v2.1
float3 tz = {0.0f,0.0f,1.0f};
float st = dot(tz, cross(b_unit, r_d) / r_mag);
//Screw displacement in dislocation frame
float d_screw = b_screw * ( atan(r_d.y / (r_d.x)) - M_PI_F * (r_d.x < 0) ) / (2.0f * M_PI_F);
float3 screw_1 = (float3) {0.0f, 0.0f, d_screw};
//edge part 1 in dislocation frame
float3 edge_1 = b_edge_d * ct * st / (2.0f * M_PI_F * (1.0f - nu));//error fixed from v2.1, 2 not 4
//edge part 2 in dislocation frame
float3 edge_2 = cross(b, u) * (((2.0f - 4.0f * nu) * log(r_mag) + (ct * ct - st * st)) / (8.0f * M_PI_F * (1.0f - nu)));
float3 r_sum = matmulvec(d2c, (screw_1 + edge_1 + edge_2) );
//
// image_out[id] += r_sum.x;
image_out[id] += dot(g, r_sum);
}
'''
kernel_image = '''
float2 complexconj(float2 a) {
return (float2) {a.x, -a.y};
}
float2 complexmul(float2 a, float2 b) {
return (float2) { a.x * b.x - a.y * b.y, a.x * b.y + a.y * b.x };
}
float4 matmulvec(float2* mat, float4 vec) {
float4 out;
out.xy = complexmul(mat[0], vec.xy) + complexmul(mat[1], vec.zw);
out.zw = complexmul(mat[2], vec.xy) + complexmul(mat[3], vec.zw);
return out;
}
__kernel void amplitude2intensity(__global float4 *in_out) {
int xid = get_global_id(0);
int yid = get_global_id(1);
int xsiz = get_global_size(1);
int id = xid * xsiz + yid;
float2 f_0 = in_out[id].xy;
float2 f_1 = in_out[id].zw;
float2 bf = complexmul(f_0, complexconj(f_0));
float2 df = complexmul(f_1, complexconj(f_1));
in_out[id] = (float4) {bf, df};
}
__kernel void propagate_wave(__global float4 *in_out, __global float *sxz,
float3 g, float3 b, float3 nS,
int zsiz, float t, float dt, float pix2nm,
int zlen, float s, float2 x_g, float x_0_i,
float phi, float psi, float theta, int k) {
float eps = 0.000000000001f;
int xid = get_global_id(0);
int xsiz = get_global_size(0);
int yid = get_global_id(1);
int ysiz = get_global_size(1);
// position of the current pixel in the linear array
int id = xid * ysiz + yid;
//top of the foil for this pixel
float top = (zsiz-zlen)*( (float) yid / (float) ysiz);
int h = (int) top;
float m = top - h;
//position of pixels in linear array
int id_2 = (h + k) * xsiz + xid;
int id_3 = (h + k + 1) * xsiz + xid;
float s_local = s + (1.0f - m) * sxz[id_2] + m * sxz[id_3];
float alpha = 0.0f;
//if (xid > firs && xid < las && h + k - (int) (z_range / 2.0f) == 0.0f)
// alpha = 2.0f * M_PI_F * dot(g, b);
// Howie-Whelan bit
float x_g_r = x_g.x;
float x_g_i = x_g.y;
s_local += eps;
float s_local2 = s_local * s_local;
float recip_x_g_r = 1.0f / x_g_r;
float recip_x_0_i_2 = 0.5f / x_0_i;
float gamma_term = sqrt(s_local2 + recip_x_g_r * recip_x_g_r);
float2 gamma = (float2) { (s_local - gamma_term) * 0.5f, (s_local + gamma_term) * 0.5f };
float q_term = 0.5f / (x_g_i * sqrt(1.0f + (s_local * x_g_r) * (s_local * x_g_r)));
float2 q = (float2) { recip_x_0_i_2 - q_term, recip_x_0_i_2 + q_term };
float beta_2 = 0.5f * acos( s_local * x_g_r / sqrt( 1.0f + s_local2 * x_g_r * x_g_r ) );
float2 sin_beta_2 = (float2) {sin(beta_2), 0.0f};
float2 cos_beta_2 = (float2) {cos(beta_2), 0.0f};
float2 exp_alpha = (float2) {cos(alpha), sin(alpha)};
float2 big_c[4] = {cos_beta_2, sin_beta_2, -sin_beta_2.x * exp_alpha, cos_beta_2.x * exp_alpha};
float2 big_c_t[4] = {big_c[0], big_c[2], big_c[1], big_c[3]};
float2 big_g_g = 2.0f * M_PI_F * gamma * dt ;
float2 big_g_q = -2.0f * M_PI_F * q * dt ;
float2 big_g_0 = exp(big_g_q.x) * (float2) { cos(big_g_g.x), sin(big_g_g.x) };
float2 big_g_3 = exp(big_g_q.y) * (float2) { cos(big_g_g.y), sin(big_g_g.y) };
float2 big_g[4] = {big_g_0, 0.0f, 0.0f, big_g_3};
float4 out = matmulvec(big_c, matmulvec(big_g, matmulvec(big_c_t, in_out[id])));
// float4 out = {top,0.0f,yid, 0.0f};
in_out[id] = out;
}
'''
class ClHowieWhelan:
def __init__(self):
self.ctx = cl.create_some_context()
self.queue = cl.CommandQueue(self.ctx)
self.disp_r_prog = cl.Program(self.ctx, kernel_disp).build()
self.image_prog = cl.Program(self.ctx, kernel_image).build()
self.sxz_buf = None
def calculate_deviations(self, xsiz, zsiz, pix2nm, dt, u, g, b, c2d, d2c, nu, phi, psi, theta):
#fills up sxz array
#set up buffers to pass between python and C++
buf_size = xsiz * (zsiz+1) * 4
mf = cl.mem_flags
self.sxz_buf = cl.Buffer(self.ctx, mf.READ_WRITE, buf_size)
out_buf_2 = cl.Buffer(self.ctx, mf.READ_WRITE, buf_size)
c2d_buf = cl.Buffer(self.ctx, mf.READ_ONLY, c2d.size * 4)
d2c_buf = cl.Buffer(self.ctx, mf.READ_ONLY, d2c.size * 4)
shape = np.array([(zsiz+1), xsiz], dtype=np.int32)
# set up contiguous buffers
c2d_ = np.ascontiguousarray(c2d.ravel().astype(np.float32))
cl.enqueue_copy(self.queue, c2d_buf, c2d_)
d2c_ = np.ascontiguousarray(d2c.ravel().astype(np.float32))
cl.enqueue_copy(self.queue, d2c_buf, d2c_)
# fill with zeros as we add to initial buffer (makes handling the 2 bs easier)
cl.enqueue_fill_buffer(self.queue, self.sxz_buf, np.float32(0.0), 0, buf_size)
cl.enqueue_fill_buffer(self.queue, out_buf_2, np.float32(0.0), 0, buf_size)
# the actual calculation
#small change in z-coordinate to get derivative
dz = 0.0
#calculate x-z array of displacements
self.displace_r(shape, self.sxz_buf, c2d_buf, d2c_buf, pix2nm, u, g, b, c2d, nu, phi, psi, theta, dt, dz)
# calculate second array at a small z-shift
dz = 0.01
self.displace_r(shape, out_buf_2, c2d_buf, d2c_buf, pix2nm, u, g, b, c2d, nu, phi, psi, theta, dt, dz)
# subtract one from the other to get the gradient
dz_32 = np.float32(dz*dt)
self.disp_r_prog.difference(self.queue, shape, None, self.sxz_buf, out_buf_2, dz_32)
def displace_r(self, shape, out_buf, c2d_buf, d2c_buf, pix2nm, u, g, b, c2d, nu, phi, psi, theta, dt, dz):
#this simply converts the variables into float32 type and sends it off to the C++ subroutine
b_screw = np.dot(b, u)
b_edge = c2d @ (b - b_screw * u) # NB a vector
b_unit = b_edge / (np.dot(b_edge, b_edge) ** 0.5)
gD = c2d @ g
# float3 is actually a float4 in disguise?
nu_32 = np.float32(nu)
dt_32 = np.float32(dt)
dz_32 = np.float32(dz)
b_screw_32 = np.float32(b_screw)
pix2nm_32 = np.float32(pix2nm)
phi_32 = np.float32(phi)
psi_32 = np.float32(psi)
theta_32 = np.float32(theta)
u_32 = np.append(u, 0.0).astype(np.float32)
g_32 = np.append(g, 0.0).astype(np.float32)
b_32 = np.append(b, 0.0).astype(np.float32)
b_unit_32 = np.append(b_unit, 0.0).astype(np.float32)
b_edge_32 = np.append(b_edge, 0.0).astype(np.float32)
self.disp_r_prog.displacement(self.queue, shape, None, out_buf,
c2d_buf, d2c_buf,
pix2nm_32, u_32, g_32, b_32, b_unit_32, b_edge_32, b_screw_32,
dt_32, dz_32, nu_32, phi_32, psi_32, theta_32)
def get_sxz_buffer(self, xsiz, zsiz):
output = | np.zeros((zsiz, xsiz), dtype=np.float32) | numpy.zeros |
import numpy as np
import torch
import torch.optim as optim
from dataset import *
from train import train, loss_func, test
from model import NN, CNN
def main():
dataset = 'cifar10'
ite = 10
num_nu_data = 1000
num_de_data = 1000
Net = CNN
learning_rate = 1e-4
epoch = 500
batchsize = 256
seed = 2020
train_loss_pu = np.zeros((ite, epoch))
test_loss_pu = np.zeros((ite, epoch))
test_auc_pu = np.zeros((ite, epoch))
mean_dr_pu = np.zeros((ite, epoch))
train_loss_ulsif = np.zeros((ite, epoch))
test_loss_ulsif = np.zeros((ite, epoch))
test_auc_ulsif = np.zeros((ite, epoch))
mean_dr_ulsif = np.zeros((ite, epoch))
train_loss_kliep = np.zeros((ite, epoch))
test_loss_kliep = np.zeros((ite, epoch))
test_auc_kliep = np.zeros((ite, epoch))
mean_dr_kliep = np.zeros((ite, epoch))
train_loss_nnpu = np.zeros((ite, epoch))
test_loss_nnpu = np.zeros((ite, epoch))
test_auc_nnpu = np.zeros((ite, epoch))
mean_dr_nnpu = np.zeros((ite, epoch))
train_loss_nnulsif = np.zeros((ite, epoch))
test_loss_nnulsif = np.zeros((ite, epoch))
test_auc_nnulsif = np.zeros((ite, epoch))
mean_dr_nnulsif = np.zeros((ite, epoch))
train_loss_boundedulsif = np.zeros((ite, epoch))
test_loss_boundedulsif = np.zeros((ite, epoch))
test_auc_boundedulsif = np.zeros((ite, epoch))
mean_dr_boundedulsif = np.zeros((ite, epoch))
for i in range(ite):
np.random.seed(seed)
x_train, t_train, x_test, t_test = load_dataset(dataset)
dim = x_train.shape[1]
perm = np.random.permutation(len(x_train))
x_train_de = x_train[perm[:num_de_data]]
x_train_nu = x_train[t_train==1]
perm = np.random.permutation(len(x_train_nu))
x_train_nu = x_train_nu[perm[:num_nu_data]]
x_data = | np.concatenate([x_train_nu, x_train_de], axis=0) | numpy.concatenate |
#!/usr/bin/env python3
from collections import defaultdict
from heapq import heappush, heappop
import sys
import numpy as np
sys.setrecursionlimit(10**6)
input = sys.stdin.buffer.readline
INF = 10 ** 9 + 1 # sys.maxsize # float("inf")
def debug(*x):
print(*x)
def get_longest(start, values, next, head, longest):
ret = longest[start]
if ret != -1:
return ret
ret = 0
p = head[start]
while p:
v = values[p]
x = get_longest(v, values, next, head, longest) + 1
if x > ret:
ret = x
p = next[p]
longest[start] = ret
return ret
def solve(N, M, data):
longest = np.repeat(-1, N + 1)
values = | np.zeros(M + 1, np.int32) | numpy.zeros |
import numpy as np
def braille():
return {
'a' : np.array([[1, 0], [0, 0], [0, 0]], dtype=bool),
'b' : np.array([[1, 0], [1, 0], [0, 0]], dtype=bool),
'c' : np.array([[1, 1], [0, 0], [0, 0]], dtype=bool),
'd' : np.array([[1, 1], [0, 1], [0, 0]], dtype=bool),
'e' : np.array([[1, 0], [0, 1], [0, 0]], dtype=bool),
'f' : np.array([[1, 1], [1, 0], [0, 0]], dtype=bool),
'g' : | np.array([[1, 1], [1, 1], [0, 0]], dtype=bool) | numpy.array |
# need to have a more uniform method to exchange (pack/unpack) 1D and 2D PROCESSED data with hdf5
# type of data: Data1d, MatrixWithCoordinates (not just simple numpy arrays)
import pylab as plt
import h5py
import numpy as np
import time,datetime
import os,copy,subprocess,re
import json,pickle,fabio
import multiprocessing as mp
from py4xs.slnxs import Data1d,average,filter_by_similarity,trans_mode,estimate_scaling_factor
from py4xs.utils import common_name,max_len,Schilling_p_value
from py4xs.detector_config import create_det_from_attrs
from py4xs.local import det_names,det_model,beamline_name # e.g. "_SAXS": "pil1M_image"
from py4xs.data2d import Data2d,Axes2dPlot,MatrixWithCoords,DataType
from py4xs.utils import run
from itertools import combinations
from scipy.interpolate import interp1d
from scipy.ndimage.filters import gaussian_filter
from scipy.interpolate import UnivariateSpline as uspline
from scipy.integrate import simpson
def lsh5(hd, prefix='', top_only=False, silent=False, print_attrs=True):
""" list the content of a HDF5 file
hd: a handle returned by h5py.File()
prefix: use to format the output when lsh5() is called recursively
top_only: returns the names of the top-level groups
silent: suppress printouts if True
"""
if top_only:
tp_grps = list(hd.keys())
if not silent:
print(tp_grps)
return tp_grps
for k in list(hd.keys()):
print(prefix, k)
if isinstance(hd[k], h5py.Group):
if print_attrs:
print(list(hd[k].attrs.items()))
lsh5(hd[k], prefix+"=", silent=silent, print_attrs=print_attrs)
def create_linked_files(fn, fnlist):
""" create a new file to links to data in existing files in the fn_list
for now assume that all files have the same detector/qgrid configuration without checking
"""
ff = h5py.File(fn, 'w')
for s in fnlist:
fs = h5py.File(s, "r")
if len(ff.attrs)==0:
for an in fs.attrs:
ff.attrs[an] = fs.attrs[an]
ff.flush()
for ds in lsh5(fs, top_only=True, silent=True):
ff[ds] = h5py.ExternalLink(s, ds)
fs.close()
ff.close()
def integrate_mon(em, ts, ts0, exp):
""" integrate monitor counts
monitor counts are given by em with timestamps ts
ts0 is the timestamps on the exposures, with duration of exp
"""
ffe = interp1d(ts, em)
em0 = []
for t in ts0:
tt = np.concatenate(([t], ts[(ts>t) & (ts<t+exp)], [t+exp]))
ee = ffe(tt)
em0.append(simpson(ee, tt))
return np.asarray(em0)/exp
def pack_d1(data, ret_trans=True):
""" utility function to creat a list of [intensity, error] from a Data1d object
or from a list of Data1s objects
"""
if isinstance(data, Data1d):
if ret_trans:
return | np.asarray([data.data,data.err]) | numpy.asarray |
import numpy as np
import scipy as sp
import networkx as nx
import bct
from scipy.spatial import distance
import pandas as pd
"""
beta could be 0.5 or -0.5
"""
def bonachic_centrality_und(CIJ, beta=0.5):
alfa = 1
e = np.ones((1, CIJ.shape[0]))
I = np.identity(CIJ.shape[0])
s = beta*CIJ
g = I - s
r = np.linalg.inv(g)
b = np.dot(np.dot(alfa*e, r), CIJ)
p = np.transpose(b)
return p
"""
binarize
"""
def binarize(w, copy=True):
if copy:
w = w.copy()
w[w != 0] = 1
return w
"""
betweenness_wei
"""
def betweenness_wei(G):
n = len(G)
BC = np.zeros((n,)) # vertex betweenness
for u in range(n):
D = np.tile(np.inf, (n,))
D[u] = 0 # distance from u
NP = np.zeros((n,))
NP[u] = 1 # number of paths from u
S = np.ones((n,), dtype=bool) # distance permanence
P = np.zeros((n, n)) # predecessors
Q = np.zeros((n,), dtype=int) # indices
q = n - 1 # order of non-increasing distance
G1 = G.copy()
V = [u]
while True:
S[V] = 0 # distance u->V is now permanent
G1[:, V] = 0 # no in-edges as already shortest
for v in V:
Q[q] = v
q -= 1
W, = np.where(G1[v, :]) # neighbors of v
for w in W:
Duw = D[v] + G1[v, w] # path length to be tested
if Duw < D[w]: # if new u->w shorter than old
D[w] = Duw
NP[w] = NP[v] # NP(u->w) = NP of new path
P[w, :] = 0
P[w, v] = 1 # v is the only predecessor
elif Duw == D[w]: # if new u->w equal to old
NP[w] += NP[v] # NP(u->w) sum of old and new
P[w, v] = 1 # v is also predecessor
if D[S].size == 0:
break # all nodes were reached
if np.isinf(np.min(D[S])): # some nodes cannot be reached
Q[:q + 1], = np.where(np.isinf(D)) # these are first in line
break
V, = np.where(D == np.min(D[S]))
DP = np.zeros((n,))
for w in Q[:n - 1]:
BC[w] += DP[w]
for v in | np.where(P[w, :]) | numpy.where |
import time
import numpy as np
import collections
class PrettyTable():
'''
For format output the simplex table
'''
def __init__(self):
# all element is str
self.table = []
return None
def add_row(self, row):
self.table.append(row)
return None
def pretty(self, hlines=[], vlines=[], col_width='c'):
n_row, n_col = len(self.table), len(self.table[0])
for i, e in enumerate(hlines):
if e < 0: hlines[i] = n_row + e - 1
for i, e in enumerate(vlines):
if e < 0: vlines[i] = n_col + e - 1
# column width
col_width = [0 for j in range(n_col)]
for row in self.table:
for j, e in enumerate(row):
col_width[j] = max(col_width[j], len(e))
if col_width in ['c', 'center']:
col_width = np.array(col_width)
col_width[1:-1] = np.max(col_width[1:-1])
elif col_width in ['e', 'each']:
pass
elif col_width in ['a', 'all']:
col_width[:] = np.max(col_width)
# extra char
extra_width = n_col + 5
doub_line = '=' * (extra_width + np.sum(col_width))
sing_line = '-' * (extra_width + np.sum(col_width))
# head line
cont = doub_line + '\n'
for i, row in enumerate(self.table):
cont_row = ' '
for j, e in enumerate(row):
cont_row = cont_row + e.rjust(col_width[j]) + ' '
# vertical lines
if j in vlines: cont_row = cont_row + '| '
cont = cont + cont_row + '\n'
# horizontal lines
if i in hlines: cont = cont + sing_line + '\n'
# bottom line
cont = cont + doub_line + '\n'
return cont
class RevisedSimplexMethod():
'''
The revised simplex method to solve the LP
'''
def __init__(self, c, A, b, c_0=0., var_names=None, mini=True):
'''
Args:
---
A: nested list or ndarray, shape of (m, n)
constraint coefficient matrix
c: list or ndarray, shape of (n,)
object coefficient vector
b: list or ndarray, shape of (m,)
right-hand side vector
var_names: list, its length should equal to the number of decision variables (n)
decision variables' names
mini: bool
mini=True, is to minimize the object function
mini=False, is to maximize the object function
'''
self.c = np.array(c, dtype=np.float_)
self.A = np.array(A, dtype=np.float_)
self.b = np.array(b, dtype=np.float_)
self.c_0 = -c_0
self.mini = mini
# the number of constraints
self.m = self.A.shape[0]
# the number of decision variables
self.n = self.A.shape[1]
# decision variables' names
self.var_names = var_names
if self.var_names is None:
self.var_names = ['x'+str(i) for i in range(1, self.n+1)]
# indicate the whether is a two-phase simplex method
self.phase = False # True: two-phase
# check matrix A
self.check(self.A)
# the statue of solution
self.statue = None
# logs: for procedure
# number of iteration
self.n_iter = 0
self.logs = {}
self.logs['phase'] = []
self.logs['ent_var'] = []
self.logs['lev_var'] = []
self.logs['basis'] = []
self.logs['non_basis'] = []
self.logs['w'] = []
self.logs['c'] = []
self.logs['A'] = []
self.logs['b'] = []
self.logs['c_0'] = []
return None
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
def check(self, A):
'''
Check matrix A is fulfill the conditions of Standard Form of LP
Args:
-----
A: numpy.ndarray, shape of (m,n)
'''
(_m, _n) = A.shape
try:
assert _m <= _n
except AssertionError:
print('The number of decision variables (n={0}) should not least than the number of constraints (m={1})!'.format(_n, _m))
raise
try:
assert np.linalg.matrix_rank(A) == _m
except AssertionError:
print('Redundant constraint exists! (Rank(A)={0} < m={1})'.format(np.linalg.matrix_rank(A), _m))
raise
return None
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
def record_logs(self, **kwargs):
'''
record some useful logs
Args:
-----
**kwargs:
'''
for key, value in kwargs.items():
self.logs[key].append(value)
return None
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
def init_basis(self, A):
'''
From the constraint coefficient matrix, find a BFS
Maybe return not a full basis
Args:
-----
A: numpy.ndarray, shape of (m,n)
Return:
-------
basis:
non_basis:
'''
(_m, _n) = A.shape
basis = []
for i in range(_n):
r = collections.Counter(A[:, i].tolist())
if (r[1.] == 1) and (r[0.] == _m-1):
if not(i in basis):
basis.append(i)
# get the non-basic variable
non_basis = list(range(_n))
for e in basis: non_basis.remove(e)
return basis, non_basis
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
def find_ent_var(self, c_D, non_basis, mini):
'''
To find an entering variable x_r from non_basis
Args:
---
c_D: ndarray, shape of (n,)
current object coefficient vector of non-basis
non_basis: list, it's length equal to n-m
the index of non-basic variables
mini: bool
To find the most negative element (mini=True) for minimum LP
or the most positive element (mini=True) for maximum LP
'''
if mini:
index = np.argmin(c_D)
else:
index = np.argmax(c_D)
return non_basis[index]
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
def find_lev_var(self, b, p_s, basis):
'''
To find leaving variable from basis
To find the less ratio (b_i/p_is) of p_s for all p_is>0.
Args:
---
b: ndarray, shape of (m,)
the current right-hand vector
p_s: ndarray, shape of (m,)
the current constrain coefficient vector of the entering variable x_r
basis: list, it's length equal to m
the index of basic variables
'''
_m = p_s.shape[0]
lev_var = None
_ratio = np.inf
for i in range(_m):
if p_s[i] <= 0.:
continue
else:
ratio = b[i]/p_s[i]
if ratio < _ratio:
lev_var = basis[i]
_ratio = ratio
return lev_var
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
def find_art_lev_var(self, c_D, non_basis):
'''
To find an entering variable x_r from non_basis and non-artifical variables
Specially using for Phase I, it must be a minimal object, which indicates to find a most negative cost coefficient
Args:
---
c: ndarray, shape of (n,)
current object coefficient vector of non-basis
non_basis: list
the index of non-basic variables
'''
ent_c = 0.
ent_var = None
for i, e in enumerate(non_basis):
if e < self.n: # is a non-artifical variable
if c_D[i] < ent_c:
ent_c = c_D[i]
ent_var = e
return ent_var
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
def init_phase1(self, c, A, b):
'''
Introduce artifical variables for two-phase simplex method in phase I:
The standard form of Phase I:
len: n m 1 1 |len n+m+1 1
| x | s | z | b | | x | b |
-w | 0 | 1 | 0 | | 1 -w | 0 | 0 | 1
-z | c | 0 | 1 | c_0 | 1 => x_B | A | b | m+1
x_B | A | I | 0 | b | m
Args:
-----
A: ndarray, shape of (m,n)
original constraint coefficient
'''
(m, n) = A.shape
# names of artifical variables
self.art_names = ['S'+str(i) for i in range(1, m+1)]
# basic variable for Phase I
ph1_basis = list(range(n, n+m))
# get the non-basic variable
ph1_non_basis = list(range(n+m))
for e in ph1_basis:
ph1_non_basis.remove(e)
# the cost coefficient of w(x) for Phase I
ph1_w = np.zeros(n+m, dtype=np.float_)
ph1_w[n:] = 1
ph1_c = np.concatenate((self.c, np.zeros(m, dtype=np.float_)))
ph1_A = np.block([A, np.identity(m, dtype=np.float_)])
ph1_b = b
return ph1_w, ph1_c, ph1_A, ph1_b, ph1_basis, ph1_non_basis
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
def iteration(self, c, A, b, basis, mini, phase):
'''
One iteration of simplex method, where the basis have be found
Args:
-----
c:
A:
b:
basis:
mini:
phase:
'''
(m, n) = A.shape
# get the non-basic variable
non_basis = list(range(n))
for e in basis: non_basis.remove(e)
# Step 0: prepare B, B_inv, D, c_D, c_B, b, c_0
B, D = A[:, basis], A[:, non_basis]
c_B, c_D = c[basis], c[non_basis]
B_inv = np.linalg.inv(B)
x_B = np.dot(B_inv, b)
b = np.dot(B_inv, b)
c_0 = np.dot(c_B, c_B)
# Step 1:calculate current c_D
c_D = (c_D.T - np.dot(np.dot(c_B.T, B_inv), D)).T
# Step 2: Determine the entering variable
enter_var = self.find_ent_var(c_D, non_basis, mini=mini)
# calculate the corresponding p_s
p_s = np.dot(B_inv, A[:, enter_var])
# Step 3: from non-basis variables find a leaving variable x_r
leave_var = self.find_lev_var(b, p_s, basis)
if leave_var is None: # no variable should leave basis
self.statue = 2 # unbounded solution
return enter_var, leave_var, basis, non_basis, c_D
# Step 4: Updata basis and non-basis
basis[basis.index(leave_var)] = enter_var
basis.sort()
non_basis[non_basis.index(enter_var)] = leave_var
non_basis.sort()
return enter_var, leave_var, basis, non_basis, c_D
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
def compute(self):
'''
Iteration procedures of revised simplex method
'''
# record the running time
start_time = time.process_time()
# intial basis
basis, non_basis = self.init_basis(self.A)
c, A, b = self.c, self.A, self.b
if len(basis) < self.m:
self.phase = True
ph1_w, ph1_c, ph1_A, ph1_b, ph1_basis, ph1_non_basis = self.init_phase1(c, A, b)
self.ph1_w, self.ph1_c, self.ph1_A, self.ph1_b, self.ph1_basis = ph1_w, ph1_c, ph1_A, ph1_b, ph1_basis
# Phase I:
while np.any(np.array(ph1_basis) >= self.n): # stop until all non-artifical variable leave the basis
# print(ph1_basis, ph1_non_basis, self.n)
# record logs
self.n_iter = self.n_iter + 1
self.record_logs(phase=True, basis=ph1_basis.copy(), non_basis=ph1_non_basis.copy())
# each iteration
ph1_enter_var, ph1_leave_var, ph1_basis, ph1_non_basis, c_D = self.iteration(ph1_w, ph1_A, ph1_b, ph1_basis, mini=True, phase=True)
self.record_logs(ent_var=ph1_enter_var, lev_var=ph1_leave_var)
# Phase I terminated:
ph1_c_B = ph1_c[ph1_basis]
ph1_B_inv = np.linalg.inv(ph1_A[:, ph1_basis])
ph1_c = (self.ph1_c.T - np.dot(np.dot(ph1_c_B.T, ph1_B_inv), ph1_A)).T
ph1_A = np.dot(ph1_B_inv, ph1_A)
ph1_b = np.dot(ph1_B_inv, ph1_b)
# self.ph1_c_0 = self.c_0 - np.dot(np.dot(ph1_c_B.T, ph1_B_inv), self.ph1_b)
# print(ph1_basis, ph1_b, ph1_c[ph1_basis])
self.ph1_c_0 = self.c_0 - np.dot(ph1_c[ph1_basis], ph1_b)
c = ph1_c[:self.n]
A = ph1_A[:, :self.n]
b = ph1_b
basis = ph1_basis.copy()
# Phase II:
while True:
# record logs
self.n_iter = self.n_iter + 1
self.record_logs(phase=False, basis=basis.copy(), non_basis=non_basis.copy())
# each iteration
enter_var, leave_var, basis, non_basis, c_D = self.iteration(c, A, b, basis, mini=self.mini, phase=False)
self.record_logs(ent_var=enter_var, lev_var=leave_var)
# stop criterion
if self.mini:
if np.all(c_D >= 0.):
self.statue = 0 # successfully find the optimal solution
break
else:
if np.all(c_D <= 0.):
self.statue = 0
break
#
if leave_var is None:
break
# record the running time
self.run_time = time.process_time() - start_time
# the optimal BFS
self.opt_basis = self.logs['basis'][-1]
self.opt_non_basis = self.logs['non_basis'][-1]
return None
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
def get_optimal(self):
'''
obtain the optimal solutions and objective function value
Return:
-------
sol: ndarray, shape of (n, )
The optimal solutions of the input LP
obj: float,
The optimal objective function value
'''
x_B = np.dot( | np.linalg.inv(self.A[:, self.opt_basis]) | numpy.linalg.inv |
#!/usr/bin/python
"""
atom_energy_reporter.py
MIT License
Copyright (c) 2018
Weill Cornell Medicine, Memorial Sloan Kettering Cancer Center, and Authors
Authors:
<NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import numpy as np
from simtk.openmm.app import *
from simtk.openmm import *
from simtk.unit import *
from openmmtools import testsystems
import mdtraj as md
import netCDF4
from netCDF4 import Dataset
import warnings
import time
# NOTE:
# - currently only the most common energy were implemented
# TODO:
# - implement AMOEBA forces
class AtomEnergyReporter(object):
"""
AtomEnergyReporter outputs information about every atom in a simulation,
including the energy breakdown, to a file.
to use it, create an AtomEnergyReporter, then add it to the list of reporters
of the list of the Simulation. by default the data is written in CSV format.
this module is written in order to implement the algorithm developed by
<NAME> and <NAME> in University of Cambridge.
this calculates the Eq.11 in the paper:
$$
u_{X_a} = \\
\frac12(u_{electrostaic} + u_{Lennard-Jones} + u_{bonded} + u_{Urey-Bradley}) \\
+ \frac13 u_{angle} \\
+ \frac14 u_{dihedral} + u_{improper}
$$
further data analysis is needed
ref:
https://pubs.acs.org/doi/abs/10.1021/acs.jctc.8b00027
"""
def __init__(self, file_path, reportInterval, idxs = None):
"""
create a AtomEnergyReporter
parameters
----------
file : a string
the file to write to
reportInterval : int
the interval at which to write
"""
self._reportInterval = reportInterval
self.idxs = idxs
self.force_map = {
'AmoebaAngleForce' : self.analyze_amoeba_angle_force,
'AmoebaBondForce' : self.analyze_amoeba_bond_force,
'AmoebaGeneralizedKirkwoodForce' : self.analyze_amoeba_generalized_kirkwood_force,
'AmoebaInPlaneAngleForce' : self.analyze_amoeba_in_plane_angle_force,
'AmoebaMultipoleForce' : self.analyze_amoeba_multipole_force,
'AmoebaOutOfPlaneBendForce' : self.analyze_amoeba_out_of_plane_bend_force,
'AmoebaPiTorsionForce' : self.analyze_amoeba_pi_torsion_force,
'AmoebaStretchBendForce' : self.analyze_amoeba_stretch_bend_force,
'AmoebaTorsionTorsionForce' : self.analyze_amoeba_torsion_torsion_force,
'AmoebaVdwForce' : self.analyze_amoeba_vdw_force,
'AmoebaWcaDispersionForce' : self.analyze_amoeba_wca_dispersion_force,
'AndersenThermostat' : self.analyze_andersen_thermostat,
'CMAPTorsionForce' : self.analyze_cmap_torsion_force,
'CMMotionRemover' : self.analyze_cmm_motion_remover,
'CustomAngleForce' : self.analyze_custom_angle_force,
'CustomBondForce' : self.analyze_custom_bond_force,
'CustomCVForce' : self.analyze_custom_cv_force,
'CustomCentroidBondForce' : self.analyze_centroid_bond_force,
'CustomCompoundBondForce' : self.analyze_custom_compound_bond_force,
'CustomExternalForce' : self.analyze_custom_external_force,
'CustomGBForce' : self.analyze_gb_force,
'CustomHbondForce' : self.analyze_hbond_force,
'CustomManyParticleForce' : self.analyze_custom_many_particle_force,
'CustomNonbondedForce' : self.analyze_custom_nonbonded_force,
'CustomTorsionForce' : self.analyze_custom_torsion_force,
'DrudeForce' : self.analyze_drude_force,
'GBSAOBCForce' : self.analyze_gbsaobc_force,
'GayBerneForce' : self.analyze_gay_berne_force,
'HarmonicAngleForce' : self.analyze_harmonic_angle_force,
'HarmonicBondForce' : self.analyze_harmonic_bond_force,
'MonteCarloAnisotropicBarostat' : self.analyze_monte_carlo_anisotropic_barostat,
'MonteCarloBarostat' : self.analyze_monte_carlo_barostat,
'MonteCarloMembraneBarostat' : self.analyze_monte_carlo_membrane_barostat,
'NonbondedForce' : self.analyze_nonbonded_force,
'PeriodicTorsionForce' : self.analyze_periodic_torsion_force,
'RBTorsionForce' : self.analyze_rb_torsion_force,
'RPMDMonteCarloBarostat' : self.analyze_rpmd_monte_carlo_barostat
}
# create a netCDF4 Dataset to record the energy
self._out = Dataset(file_path ,'w')
self._out.createDimension("time", None)
times = self._out.createVariable("time", "i8", ("time",))
times.unit = str(self._reportInterval)
self.time = 0
# let the analyzer register for once
self.registered = False
def describeNextReport(self, simulation):
"""
adopted from:
openmm/wrappers/python/simtk/openmm/app/statedatareporter.py
Get information about the next report this object will generate.
Parameters
----------
simulation : Simulation
The Simulation to generate a report for
Returns
-------
tuple
A five element tuple. The first element is the number of steps
until the next report. The remaining elements specify whether
that report will require positions, velocities, forces, and
energies respectively.
"""
steps = self._reportInterval - simulation.currentStep%self._reportInterval
return (steps, True, False, True, True)
def report(self, simulation, state):
"""
generate a report
parameters
----------
simulation : an OpenMM simulation object
state : an OpenMM state object
"""
# find the small molecule to analyze
if self.registered == False: # if the system is not registered, register the system
if self.idxs == None:
self.find_small_mol(simulation, state)
# set the attributes in Dataset
self._out.description = 'record of an OpenMM run'
self._out.history = 'created ' + time.ctime(time. time())
# initialize the Dataset
self._out.createDimension("atom", len(self.idxs))
self._out.createVariable("atom", "i8", ("atom", ))
atoms_name = ["idx = %s; mass = %s" % (idx, simulation.system.getParticleMass(idx)) for idx in self.idxs]
self._out.setncattr('atoms_name', atoms_name)
# get the forces
self.forces = simulation.system.getForces()
self.force_idx_mapping = [force for force in self.forces]
forces_name = [force.__class__.__name__ for force in self.forces]
self._out.setncattr('forces_name', forces_name)
# create a force dimension, using idxs
# and initialize the forces
self._out.createDimension("force", len(self.forces))
self._out.createVariable("force", "i8", ("force", ))
# initialize the energy variable
# that stands on the dimensions of: time, atom, and force
self.energy_var = self._out.createVariable("energy", "f4", ("time", "atom", "force"))
self.energy_var.units = 'kJ/mol'
# keep a copy of all the positions
self._out.createDimension("xyz", 3)
self.pos_var = self._out.createVariable("pos", "f4", ("time", "atom", "xyz"))
# keep a copy of the parameters of atoms
param_array = np.zeros((len(self.idxs), 3))
for force in self.forces:
if force.__class__.__name__ == "NonbondedForce":
for idx in self.idxs:
charge, sigma, epsilon = force.getParticleParameters(idx)
param_array[idx, 0], param_array[idx, 1], param_array[idx, 2] = charge._value, sigma._value, epsilon._value
# note that the units here are: elementary charge, nanometer, kilojoule/mole
self._out.setncattr('param_array', param_array)
# set the registered flag to True,
# since you only need to do this once
self.registered = True
# point these objects to the class, and update them
self.simulation = simulation
self.state = state
# get the positions of the small molecules
self.pos = tuple([state.getPositions()[idx] for idx in self.idxs])
pos_matrix = np.array([state.getPositions(asNumpy=True)[idx]._value for idx in self.idxs])
self.pos_var[self.time, :, :] = pos_matrix
# analyze each force in the system
for force_idx, force in enumerate(self.force_idx_mapping):
energy_dict = self.get_energy(force)
if energy_dict == None:
warnings.warn("no force information could be extracted from %s" % force.__class__.__name__)
continue
for atom_idx, energy in energy_dict.items():
self.energy_var[self.time, atom_idx, force_idx] = energy._value
# note that the unit here is kilojoule/mole
# increase the time dimension by one
self.time += 1
def find_small_mol(self, simulation, state):
"""
find the atoms of the smallest molecule, which is most likely to be
the region of greates interest for a simulation
parameters
----------
simulation : an OpenMM Simulation object
state : an OpenMM State object
returns
-------
atoms : a tuple of indicies of atoms that belongs to the small molecule
"""
context = simulation.context
mols = context.getMolecules()
small_mol = sorted([mol for mol in mols if len(mol) > 4],
key = lambda mol : len(mol), reverse = False)[0]
# register the atoms and idxs in the class
self.idxs = small_mol
return small_mol
def get_energy(self, force):
"""
anlyzes force and return the energy,
to be more specific, match the force with a certain type of analysis function
"""
name = str(force.__class__.__name__) # get the name of the force
energy_dict = self.force_map[name](force) # map the force to its specific analyze function and get the energy
return energy_dict
#################################################
# herlper functions to calculate distance, angles,
# and dihedral angels from positions of atoms
#################################################
def dist(self, atom0, atom1):
"""
calculate the distance between two atoms
require that self.pos is defined
parameters
----------
atom0 : the idx of the first atom
atom1 : the idx of the second atom
returns
-------
dist : a float representing the distance between the two atoms
"""
pos0 = self.pos[atom0]
pos1 = self.pos[atom1]
dist = np.linalg.norm(pos0 - pos1)
return dist
def angle(self, center_atom, atom0, atom1):
"""
calculate the angle between bond:
center_atom -- atom0
and
center_atom -- atom1
$ cos(<v0, v1>) = (v0 \dot v1) / |v0||v1| $
parameters
----------
center_atom : the idx of the center atom
atom0 : the idx of the first atom involved in the angle
atom1 : the idx of the second atom involved in the angle
returns
-------
angle : the value of the angle in rads
"""
# get all the positions
pos_center = self.pos[center_atom]
pos0 = self.pos[atom0]
pos1 = self.pos[atom1]
# express the distance in vectors
v0 = np.array(pos0) - np.array(pos_center)
v1 = np.array(pos1) - np.array(pos_center)
# to calculate:
# $ cos(<v0, v1>) = (v0 \dot v1) / |v0||v1| $
v0_dot_v1 = np.dot(v0, v1)
v0_norm = np.linalg.norm(v0)
v1_norm = np.linalg.norm(v1)
angle = np.arccos(np.true_divide(v0_dot_v1, v0_norm * v1_norm))
return Quantity(angle, radian)
def dihedral(self, atom0, atom1, atom2, atom3):
"""
calculate the dihedral between the plane formed by:
atom0, atom1, and atom2
and that by
atom1, atom2, and atom3
$$
n_A = q_0 \cross q_1 \\
n_B = q_1 \cross q_2 \\
\Phi = |n_A \dot n_B| / |n_A||n_B|
$$
parameters
----------
atom0 : the idx of the first atom involved in the torsion
atom1 : the idx of the second atom involved in the torsion
atom2 : the idx of the thrid atom involved in the torsion
atom3 : the idx of the fourth atom involved in the torsion
returns
-------
angle : the value of the dihedral angle in rads
"""
# get the positions of the atoms
pos0 = self.pos[atom0]
pos1 = self.pos[atom1]
pos2 = self.pos[atom2]
pos3 = self.pos[atom3]
# calculate the vectors between these atoms
q1 = pos1 - pos0
q2 = pos2 - pos1
q3 = pos3 - pos2
# calculate the normal vectors
na = np.cross(q1, q2)
nb = np.cross(q2, q3)
# calculate the dihedral angel
na_dot_nb = | np.dot(na, nb) | numpy.dot |
###############################################################################
# DehnenBarPotential: Dehnen (2000)'s bar potential
###############################################################################
import numpy
from ..util import conversion
from .Potential import Potential
_degtorad= numpy.pi/180.
class DehnenBarPotential(Potential):
"""Class that implements the Dehnen bar potential (`Dehnen 2000 <http://adsabs.harvard.edu/abs/2000AJ....119..800D>`__), generalized to 3D following `Monari et al. (2016) <http://adsabs.harvard.edu/abs/2016MNRAS.461.3835M>`__
.. math::
\\Phi(R,z,\\phi) = A_b(t)\\,\\cos\\left(2\\,(\\phi-\\Omega_b\\,t)\\right))\\,\\left(\\frac{R}{r}\\right)^2\\,\\times \\begin{cases}
-(R_b/r)^3\\,, & \\text{for}\\ r \\geq R_b\\\\
(r/R_b)^3-2\\,, & \\text{for}\\ r\\leq R_b.
\\end{cases}
where :math:`r^2 = R^2+z^2` is the spherical radius and
.. math::
A_b(t) = A_f\\,\\left(\\frac{3}{16}\\xi^5-\\frac{5}{8}\\xi^3+\\frac{15}{16}\\xi+\\frac{1}{2}\\right)\\,, \\xi = 2\\frac{t/T_b-t_\\mathrm{form}}{T_\mathrm{steady}}-1\\,,\ \mathrm{if}\ t_\\mathrm{form} \\leq \\frac{t}{T_b} \\leq t_\\mathrm{form}+T_\\mathrm{steady}
and
.. math::
A_b(t) = \\begin{cases}
0\\,, & \\frac{t}{T_b} < t_\mathrm{form}\\\\
A_f\\,, & \\frac{t}{T_b} > t_\mathrm{form}+T_\mathrm{steady}
\\end{cases}
where
.. math::
T_b = \\frac{2\pi}{\\Omega_b}
is the bar period and the strength can also be specified using :math:`\\alpha`
.. math::
\\alpha = 3\\,\\frac{A_f}{v_0^2}\\,\\left(\\frac{R_b}{r_0}\\right)^3\,.
"""
normalize= property() # turn off normalize
def __init__(self,amp=1.,omegab=None,rb=None,chi=0.8,
rolr=0.9,barphi=25.*_degtorad,
tform=-4.,tsteady=None,beta=0.,
alpha=0.01,Af=None,ro=None,vo=None):
"""
NAME:
__init__
PURPOSE:
initialize a Dehnen bar potential
INPUT:
amp - amplitude to be applied to the potential (default:
1., see alpha or Ab below)
barphi - angle between sun-GC line and the bar's major axis
(in rad; default=25 degree; or can be Quantity))
tform - start of bar growth / bar period (default: -4)
tsteady - time from tform at which the bar is fully grown / bar period (default: -tform/2, st the perturbation is fully grown at tform/2)
Either provide:
a) rolr - radius of the Outer Lindblad Resonance for a
circular orbit (can be Quantity)
chi - fraction R_bar / R_CR (corotation radius of bar)
alpha - relative bar strength (default: 0.01)
beta - power law index of rotation curve (to
calculate OLR, etc.)
b) omegab - rotation speed of the bar (can be Quantity)
rb - bar radius (can be Quantity)
Af - bar strength (can be Quantity)
OUTPUT:
(none)
HISTORY:
2010-11-24 - Started - Bovy (NYU)
2017-06-23 - Converted to 3D following Monari et al. (2016) - Bovy (UofT/CCA)
"""
Potential.__init__(self,amp=amp,ro=ro,vo=vo)
barphi= conversion.parse_angle(barphi)
rolr= conversion.parse_length(rolr,ro=self._ro)
rb= conversion.parse_length(rb,ro=self._ro)
omegab= conversion.parse_frequency(omegab,ro=self._ro,vo=self._vo)
Af= conversion.parse_energy(Af,vo=self._vo)
self.hasC= True
self.hasC_dxdv= True
self.isNonAxi= True
self._barphi= barphi
if omegab is None:
self._rolr= rolr
self._chi= chi
self._beta= beta
#Calculate omegab and rb
self._omegab= 1./((self._rolr**(1.-self._beta))/(1.+numpy.sqrt((1.+self._beta)/2.)))
self._rb= self._chi*self._omegab**(1./(self._beta-1.))
self._alpha= alpha
self._af= self._alpha/3./self._rb**3.
else:
self._omegab= omegab
self._rb= rb
self._af= Af
self._tb= 2.*numpy.pi/self._omegab
self._tform= tform*self._tb
if tsteady is None:
self._tsteady= self._tform/2.
else:
self._tsteady= self._tform+tsteady*self._tb
def _smooth(self,t):
if isinstance(t,numpy.ndarray):
smooth=numpy.ones(len(t))
indx=(t < self._tform)
smooth[indx]=0.
indx=(t < self._tsteady) * (t >= self._tform)
deltat=t[indx]-self._tform
xi= 2.*deltat/(self._tsteady-self._tform)-1.
smooth[indx]= (3./16.*xi**5.-5./8*xi**3.+15./16.*xi+.5)
else:
if t < self._tform:
smooth= 0.
elif t < self._tsteady:
deltat= t-self._tform
xi= 2.*deltat/(self._tsteady-self._tform)-1.
smooth= (3./16.*xi**5.-5./8*xi**3.+15./16.*xi+.5)
else: #bar is fully on
smooth= 1.
return smooth
def _evaluate(self,R,z,phi=0.,t=0.):
"""
NAME:
_evaluate
PURPOSE:
evaluate the potential at R,phi,t
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
Phi(R,z,phi,t)
HISTORY:
2010-11-24 - Started - Bovy (NYU)
"""
#Calculate relevant time
smooth=self._smooth(t)
r2= R**2.+z**2.
r= numpy.sqrt(r2)
if isinstance(r,numpy.ndarray):
if not isinstance(R,numpy.ndarray):
R=numpy.repeat(R,len(r))
if not isinstance(z,numpy.ndarray):
z=numpy.repeat(z,len(r))
out=numpy.empty(len(r))
indx= r <= self._rb
out[indx]= ((r[indx]/self._rb)**3.-2.)\
*numpy.divide(R[indx]**2.,r2[indx],numpy.ones_like(R[indx]),
where=R[indx]!=0)
indx=numpy.invert(indx)
out[indx]= -(self._rb/r[indx])**3.*1./(1.+z[indx]**2./R[indx]**2.)
out*=self._af*smooth*numpy.cos(2.*(phi-self._omegab*t-self._barphi))
return out
else:
if r == 0:
return -2.*self._af*smooth*numpy.cos(2.*(phi-self._omegab*t-self._barphi))
elif r <= self._rb:
return self._af*smooth*numpy.cos(2.*(phi-self._omegab*t-self._barphi))\
*((r/self._rb)**3.-2.)*R**2./r2
else:
return -self._af*smooth*numpy.cos(2.*(phi-self._omegab*t-
self._barphi))\
*(self._rb/r)**3.\
*1./(1.+z**2./R**2.)
def _Rforce(self,R,z,phi=0.,t=0.):
"""
NAME:
_Rforce
PURPOSE:
evaluate the radial force for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the radial force
HISTORY:
2010-11-24 - Written - Bovy (NYU)
"""
#Calculate relevant time
smooth=self._smooth(t)
r= numpy.sqrt(R**2.+z**2.)
if isinstance(r,numpy.ndarray):
if not isinstance(R,numpy.ndarray):
R=numpy.repeat(R,len(r))
if not isinstance(z,numpy.ndarray):
z=numpy.repeat(z,len(r))
out=numpy.empty(len(r))
indx= r <= self._rb
out[indx]= -((r[indx]/self._rb)**3.*R[indx]*(3.*R[indx]**2.+2.*z[indx]**2.)-4.*R[indx]*z[indx]**2.)/r[indx]**4.
indx= numpy.invert(indx)
out[indx]= -(self._rb/r[indx])**3.*R[indx]/r[indx]**4.*(3.*R[indx]**2.-2.*z[indx]**2.)
out*=self._af*smooth*numpy.cos(2.*(phi-self._omegab*t-self._barphi))
return out
else:
if r <= self._rb:
return -self._af*smooth*numpy.cos(2.*(phi-self._omegab*t
-self._barphi))\
*((r/self._rb)**3.*R*(3.*R**2.+2.*z**2.)-4.*R*z**2.)/r**4.
else:
return -self._af*smooth*numpy.cos(2.*(phi-self._omegab*t-
self._barphi))\
*(self._rb/r)**3.*R/r**4.*(3.*R**2.-2.*z**2.)
def _phiforce(self,R,z,phi=0.,t=0.):
"""
NAME:
_phiforce
PURPOSE:
evaluate the azimuthal force for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the azimuthal force
HISTORY:
2010-11-24 - Written - Bovy (NYU)
"""
#Calculate relevant time
smooth=self._smooth(t)
r2= R**2.+z**2.
r= | numpy.sqrt(r2) | numpy.sqrt |
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
"""
Created on Mon Feb 10 17:24 2020
@author: <NAME>
======================================================================
Purpose: Outputs max trends in gsat for a range of N year periods in
each observational historical record, for use in Figure 3
======================================================================
"""
# Load in required directories
basedir = 'Priestley-Centre/Near_term_warming/observation_data'
savedir = 'Priestley-Centre/Near_term_warming/analysis_figure_code/'+\
'Figure3/saved_arrays'
# Load in data and apply scaling factors
# to convert from GBST to GSAT
temp_BE = np.loadtxt(basedir+'/BE_Land_and_Ocean.csv',\
delimiter=',')[:,1]*1.087
years_BE = np.loadtxt(basedir+'/BE_Land_and_Ocean.csv',\
delimiter=',')[:,0]
nyear_BE = len(years_BE)
temp_GI = np.loadtxt(basedir+'/GISTEMPv4.csv',\
delimiter=',')[:,1]*1.087
years_GI = np.loadtxt(basedir+'/GISTEMPv4.csv',\
delimiter=',')[:,0]
nyear_GI = len(years_GI)
temp_Ha = np.loadtxt(basedir+'/HadCRUT4.6.csv',\
delimiter=',')[:,1]*1.19
years_Ha = np.loadtxt(basedir+'/HadCRUT4.6.csv',\
delimiter=',')[:,0]
nyear_Ha = len(years_Ha)
temp_CW = np.loadtxt(basedir+'/CWv2_had4sst3.csv',\
delimiter=',')[:,1]*1.087
years_CW = np.loadtxt(basedir+'/CWv2_had4sst3.csv',\
delimiter=',')[:,0]
nyear_CW = len(years_CW)
# Calculate maximum Ny trends
trend_lengths = np.linspace(10,50,41)
max_trends = | np.zeros([41,4]) | numpy.zeros |
#! /usr/bin/python3
import os
import time
import numpy as np
import math as m
import matplotlib.pyplot as plt
from matplotlib import animation
from xarray import open_mfdataset
# mesonhFiles = '/home/pnarvor/work/nephelae/data/MesoNH-2019-02/REFHR.1.ARMCu.4D.nc'
# mesonhFiles = '/home/pnarvor/work/nephelae/data/nephelae-remote/MesoNH02/bomex_hf.nc'
mesonhFiles = '/home/pnarvor/work/nephelae/data/skyscanner-remote/Nephelae/MesoNH-2019-10/L12zo.1.BOMEX.OUT.040.nc'
atm = open_mfdataset(mesonhFiles)
wt = atm.WT
print("Shape :", wt.shape)
# t0 = 40
t0 = 0
z0 = 40
borderSize = 10
snBorders = np.empty([wt.shape[2], 2*borderSize])
snBorders[:, :borderSize] = wt[t0,z0,:,-borderSize:]
snBorders[:,-borderSize:] = wt[t0,z0,:, :borderSize]
toRemove = 6
wt0 = wt[t0,z0,:,:-toRemove]
snBorders0 = np.empty([wt0.shape[0], 2*borderSize])
snBorders0[:, :borderSize] = wt0[:,-borderSize:]
snBorders0[:,-borderSize:] = wt0[:, :borderSize]
pad = 0
bottom = np.array(wt[t0,z0,:,:(toRemove + pad)]).squeeze()
top = np.array(wt[t0,z0,:,-(toRemove + pad):]).squeeze()
diff0 = top - bottom
print("Residual top/bottom:", np.linalg.norm(diff0.ravel()))
pad = 0
left = np.array(wt[t0,z0,:(toRemove + pad) ,:]).squeeze()
right = | np.array(wt[t0,z0,-(toRemove + pad):,:]) | numpy.array |
# general imports
import numpy as np
import sys
import copy
import matplotlib.pyplot as plt
# local modul imports
from pemfc import channel
from pemfc import fluid
from pemfc import species
from pemfc import interpolation as ip
np.set_printoptions(threshold=sys.maxsize, linewidth=10000,
precision=9, suppress=True)
np.seterr(all='raise')
nodes = 20
mass_flow_hydrogen = 0.0001 # kg/s
mass_flow_air = 0.0001
mass_flow_water = 0.002
wall_temp = None # 380
# wall_temp_1 = 380.0
# wall_temp_2 = 420.0
heat_flux = 1000.0 # W/m²
inlet_temperature = 293.15
outlet_pressure = 101325.0
length = 0.1
width = 0.001
height = 0.001
hydrogen_dict = {
'name': 'Hydrogen',
'fluid_components': {'H2': 'gas'},
'inlet_composition': 1.0,
'temp_init': inlet_temperature,
'press_init': outlet_pressure,
'nodes': nodes
}
air_dict = {
'name': 'Air',
'fluid_components': {'O2': 'gas', 'N2': 'gas'},
'inlet_composition': [0.21, 0.79],
'temp_init': inlet_temperature,
'press_init': outlet_pressure,
'nodes': nodes
}
water_dict = {
'name': 'Water',
'fluid_components': None,
'inlet_composition': None,
'liquid_props':
species.ConstantProperties('Liquid',
specific_heat=4000.0,
density=1000.0,
viscosity=1e-3,
thermal_conductivity=0.6),
'temp_init': inlet_temperature,
'press_init': outlet_pressure,
'nodes': nodes
}
channel_dict = {
'name': 'Channel',
'length': length,
'cross_sectional_shape': 'rectangular',
'p_out': outlet_pressure,
'temp_in': inlet_temperature,
'flow_direction': 1,
'width': width,
'height': height,
'bend_number': 0,
'bend_friction_factor': 500,
'additional_friction_fractor': 0.01
}
hydrogen = fluid.factory(hydrogen_dict)
air = fluid.factory(air_dict)
water = fluid.factory(water_dict)
fluids = [hydrogen, air, water]
channel_dicts = [copy.deepcopy(channel_dict) for i in range(3)]
channels = [channel.Channel(channel_dicts[i], fluids[i]) for i in range(3)]
error = 1e5
iter_max = 50
temp_old = np.asarray([channel.temperature for channel in channels])
mass_flows = [mass_flow_hydrogen, mass_flow_air, mass_flow_water]
delta_temp = 30.0
for i in range(iter_max):
if error < 1e-4:
break
error = 0.0
for j, channel in enumerate(channels):
channel.update(mass_flow_in=mass_flows[j], heat_flux=heat_flux,
wall_temp=wall_temp)
mass_flows[j] = np.sum(channel.heat) \
/ (np.average(channel.fluid.specific_heat) * delta_temp)
error += np.sum(np.abs(((temp_old[j] - channel.temperature) / channel.temperature)))
temp_old[j, :] = channel.temperature
x = ip.interpolate_1d(channels[0].x)
for channel in channels:
plt.plot(x, channel.temp_ele,
label='Fluid Temperature - ' + channel.fluid.name)
plt.plot(x, channel.wall_temp,
label='Wall Temperature - ' + channel.fluid.name)
plt.xlabel('Channel Location [m]')
plt.ylabel('Temperature [K]')
plt.legend()
plt.show()
for channel in channels:
plt.plot(x, channel.heat,
label='Heat Transfer - ' + channel.fluid.name)
plt.xlabel('Channel Location [m]')
plt.ylabel('Heat [W]')
plt.legend()
plt.show()
print('Pumping Power:')
for i, channel in enumerate(channels):
print(channel.fluid.name + ': ',
np.average(channel.vol_flow) * (channel.pressure[channel.id_in]
- channel.pressure[channel.id_out]))
print('Mass Flows:')
for i, channel in enumerate(channels):
print(channel.fluid.name + ': ', | np.average(channel.mass_flow_total) | numpy.average |
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import fsolve
from numpy import cos, sin
from scipy.optimize import minimize
from scipy.optimize import Bounds
from scipy.optimize import LinearConstraint
class Rover():
def __init__(self,l1, l2, l3, l4, alpha, beta, gamma, wheel_rad = 0.4, body_len = None, body_wid = None):
self.l1 = l1
self.l2 = l2
self.l3 = l3
self.l4 = l4
self.alpha = alpha
self.beta = beta
self.gamma = gamma
self.wheel_rad = wheel_rad
self.body_len = body_len
self.body_wid = body_wid
def set_terrain(self, terr):
self.terrain = terr
def set_inertias(self, mass, g):
self.mass = mass
self.g = g
def z_center(self, x):
if not hasattr(self, 'terrain'):
print("No terrain specified")
z_gnd = 0.0
grad = 0.0
else:
z_gnd = self.terrain.heightAt(x)
grad = self.terrain.gradient(x)
z_center = z_gnd + self.wheel_rad * np.cos(np.arctan(grad))
return z_center
def func_th2(self, th2, x2, z2):
l3 = self.l3
l4 = self.l4
beta = self.beta
z_center = self.z_center
x3 = x2 + l3*np.cos(th2) + l4*np.cos(np.pi - beta - th2)
z3_gnd = z_center(x3)
z3_kin = z2 + l3*np.sin(th2) - l4*np.sin(np.pi - beta - th2)
return z3_gnd - z3_kin
def func_th1(self, th1, xb, zb):
l1 = self.l1
l2 = self.l2
alpha = self.alpha
z_center = self.z_center
x1 = xb - l2*np.cos(np.pi - alpha - th1) - l1*np.cos(th1)
z1_gnd = z_center(x1)
z1_kin = zb + l2*np.sin(np.pi - alpha - th1) - l1*np.sin(th1)
return z1_gnd - z1_kin
def find_angles(self, x2):
z2 = self.z_center(x2)
th2_guess = np.deg2rad(50) # guess
th2 = fsolve(self.func_th2, th2_guess, args=(x2, z2))[0]
xb = x2 + self.l3*np.cos(th2)
zb = z2 + self.l3*np.sin(th2)
th1_guess = np.deg2rad(50) # guess
th1 = fsolve(self.func_th1, th1_guess, args=(xb, zb))[0]
return th1, th2
def find_geom(self, x2):
l1 = self.l1
l2 = self.l2
l3 = self.l3
l4 = self.l4
alpha = self.alpha
beta = self.beta
th1, th2 = self.find_angles(x2)
z2 = self.z_center(x2)
xb = x2 + l3*np.cos(th2)
zb = z2 + l3* | np.sin(th2) | numpy.sin |
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 16 11:42:57 2015
@author: jmilli
"""
import sys
from sympy import Symbol, nsolve
import math
import numpy as np
import matplotlib.pyplot as plt
#from scipy import ndimage
sys.path.append('/Users/jmilli/Dropbox/lib_py/image_utilities')
import rotation_images as rot
from numpy.linalg import eig, inv
import matplotlib.pylab as plt
def ellipse_points(a,b,R,precision=0.2*math.pi/180, step_R=0.1,plot=True):
"""
Function ellipse_points finds points on an ellipse equaly spaced
Arguments:
1. a: semi-major axis of the ellipse
2. b: semi-minor axis of the ellipse
3. R: spacing between points
Optional arguments:
4. precision: the precision in radians on spacing
5. step_R: the step in spacing between each iteration
"""
x = Symbol('x')
y = Symbol('y')
ellipse = (x/a)**2 + (y/b)**2 - 1
t_final=math.pi/2
iter_nb=0
continue_loop = True
while continue_loop:
iter_nb += 1
if iter_nb > 1:
print('Iterations: {0:.0f}, deviation at final position {1:4.2f} degrees'.format(iter_nb-1,(t-t_final)*180/math.pi))
t=0 #math.pi/10
x_sol=[a*math.cos(t)]
y_sol=[b*math.sin(t)]
t_sol=[t]
while t < t_final-precision:
x0 = a*math.cos(t)
y0 = b*math.sin(t)
cercle = (x-x0)**2 + (y-y0)**2 -R**2
trynextguess=True
nbguessiter=0
while (trynextguess and nbguessiter < 10):
try:
derivative= [-a*math.sin(t),b*math.cos(t)]
direction = R/np.linalg.norm(derivative)*np.array(derivative)
guess = np.array([x0,y0])+direction
sol=nsolve((ellipse,cercle), (x, y), (guess[0],guess[1]))
trynextguess=False
except ValueError as e:
nbguessiter += 1
print(e)
print('Initial guess changed. We retry: {0:4.0f} iterations'.format(
nbguessiter))
t+=math.atan(R/4/a)
#print(sol)
t = math.acos(float(sol[0])/a)
t_sol.append(t)
x_sol.append(a*math.cos(t))
y_sol.append(b*math.sin(t))
if math.fabs(t-t_final) < precision:
continue_loop = False
else:
R-=step_R
print('Number of iterations: {0:4.0f}'.format(iter_nb))
print('Deviation in degrees at final position = {0:4.2f}'.format(
(t-t_final)*180/math.pi))
print('Spacing between points = {0:4.2f}'.format(R))
if plot:
nb_points = 100
theta = np.arange(0,math.pi/2,math.pi/2/nb_points)
x_ellipse = np.array([a*math.cos(i) for i in theta])
y_ellipse = np.array([b*math.sin(i) for i in theta])
plt.plot(x_sol,y_sol, 'ro')
plt.plot(x_ellipse,y_ellipse)
plt.plot([0,a],[0,0])
plt.plot([0,0],[0,b])
plt.axis([0,a, 0, b])
plt.axis('equal') # ajout
plt.show()
return t_sol
def elliptical_mask(size,a,b,epsilon=2.,delta=2.,yc=None,xc=None,theta=0):
"""
Function ellitical_mask builds an elliptical mask. Two ellipses of semi major
axis a-epsilon and a+espislon and of semi-minor axis b-delta and b+delta are built.
The mask is 0 everywhere outside the 2 ellipses and 1 within the 2 ellipses.
Arguments:
1. a: semi-major axis of the ellipse
2. b: semi-minor axis of the ellipse
Optional arguments:
4. epsilon: 2*epsilon+1 is the difference between the inner and outer ellipse.
By default it is 2px
5. delta: 2*epsilon+1 is the difference between the inner and outer ellipse.
By default it is 2px
6.yc: the center of the ellipse in y. By default, size/2
7.xc: the center of the ellipse in x. By default, size/2
8. theta: the position angle of the semi-major axis of the ellipse, measured
anti-clockwise from the horizontal
Output
id_inner: indices of the pixels nested within the 2 ellipse
"""
x1 = np.arange(0,size)
y1 = np.arange(0,size)
x,y = np.meshgrid(y1,x1)
if yc == None:
yc = size/2
if xc == None:
xc = size/2
ellipse_ext = (x-xc)**2/(a+delta)**2+(y-yc)**2/(b+epsilon)**2-1
ellipse_int = (x-xc)**2/(a-delta)**2+(y-yc)**2/(b-epsilon)**2-1
if theta != 0:
ellipse_ext = rot.frame_rotate(ellipse_ext,-theta)
ellipse_int = rot.frame_rotate(ellipse_int,-theta)
id_inner_ellipse = np.where((ellipse_ext < 0) * (ellipse_int > 0))
return id_inner_ellipse
def elliptical_mask_advanced(size,a1,b1,a2,b2,xc1=None,yc1=None,yc2=None,
xc2=None,theta1=0,theta2=0):
"""
Function ellitical_mask builds an elliptical mask. Two ellipses of semi major
axis a1 and a2 and of semi-minor axis b1 and b2 are built.
The mask is 0 everywhere outside the 2 ellipses and 1 within the 2 ellipses.
Arguments:
1. size: the size of the image
2. a1: semi-major axis of the inner ellipse
3. b1: semi-minor axis of the inner ellipse
4. a2: semi-major axis of the outer ellipse
5. b2: semi-minor axis of the outer ellipse
Optional arguments:
6.yc1: the x center of the ellipse in y. By default, size/2
7.xc1: the y center of the ellipse in x. By default, size/2
8. theta1: the position angle of the semi-major axis of the inner ellipse, measured
anti-clockwise from the horizontal
Output
id_inner: indices of the pixels nested within the 2 ellipse
"""
x1 = np.arange(0,size)
y1 = np.arange(0,size)
x,y = np.meshgrid(y1,x1)
if yc1 == None:
yc1 = size/2
if xc1 == None:
xc1 = size/2
if yc2 == None:
yc2 = size/2
if xc2 == None:
xc2 = size/2
ellipse_int = (x-xc1)**2/a1**2+(y-yc1)**2/b1**2-1
ellipse_ext = (x-xc2)**2/a2**2+(y-yc2)**2/b2**2-1
if theta1 != 0:
ellipse_int = rot.frame_rotate(ellipse_int,-theta1)
if theta2 != 0:
ellipse_ext = rot.frame_rotate(ellipse_ext,-theta2)
id_inner_ellipse = np.where((ellipse_ext < 0) * (ellipse_int > 0))
id_outer_ellipse = np.where((ellipse_ext > 0) + (ellipse_int < 0))
return id_inner_ellipse,id_outer_ellipse
def ellipse_polynomial_coeff(a,b,x0,y0,pa):
"""
This function returns the polynomial coefficient of an ellipse which is
parametrized through a semi-major axis a, a semi-minor axis b, an offset
(x0,y0) and a position angle pa measured from North counter-clockwise. The
output is an array called coeff such that the ellipse equation is
coeff[0]*x**2 + coeff[1]*x*y + coeff[2]*y**2 + coeff[3]*x + coeff[4]*y + coeff[5] = 0
with coeff[5]=1
"""
trigo_pa=-pa-math.pi/2
cosa=np.cos(trigo_pa)
sina=np.sin(trigo_pa)
coeff=np.zeros(6)
coeff[0]=a**2*cosa**2+b**2*sina**2
coeff[1]=2*cosa*sina*(b**2-a**2)
coeff[2]=a**2*sina**2+b**2*cosa**2
coeff[3]=a**2*(-2*cosa**2*x0+2*cosa*sina*y0)+b**2*(-2*cosa*sina*y0 - 2*sina**2*x0)
coeff[4]=a**2*(2*cosa*sina*x0 - 2*sina**2*y0)+b**2*(- 2*cosa**2*y0 - 2*cosa*sina*x0)
coeff[5]=-a**2*b**2+a**2*(cosa**2*x0**2 - 2*cosa*sina*x0*y0 + sina**2*y0**2)+b**2*(cosa**2*y0**2+sina**2*x0**2+ 2*cosa*sina*x0*y0)
return coeff/coeff[5]
###############################################################################
###############################################################################
## Algebraic solution for an ellipse fitting
## from http://nicky.vanforeest.com/misc/fitEllipse/fitEllipse.html
###############################################################################
###############################################################################
def fitEllipse(x,y):
"""
This function minimizes
a[0]*x**2 + a[1]*x*y + a[2]*y**2 + a[3]*x + a[4]*y + a[5]
for a set of points (x,y) and returns the coefficients.
"""
x = x[:,np.newaxis]
y = y[:,np.newaxis]
D = np.hstack((x*x, x*y, y*y, x, y, np.ones_like(x)))
S = np.dot(D.T,D)
C = np.zeros([6,6])
C[0,2] = C[2,0] = 2; C[1,1] = -1
E, V = eig(np.dot(inv(S), C))
n = np.argmax(np.abs(E))
a = V[:,n]
return a
def ellipse_center(coeff):
"""
This function converts a set of 6 polynomial coefficients defined as
coeff[0]*x**2 + coeff[1]*x*y + coeff[2]*y**2 + coeff[3]*x + coeff[4]*y + coeff[5]
to the ellipse parameters in a new frame aligned with the axis of the ellipse. It returns
the offset of the ellipse center in this new frame.
Adapted from http://nicky.vanforeest.com/misc/fitEllipse/fitEllipse.html
"""
a,b,c,d,f,g = coeff[0], coeff[1]/2., coeff[2], coeff[3]/2., coeff[4]/2., coeff[5]
delta = b*b-a*c
if delta ==0:
print('Warning the ellipse is degenerate: delta=0 (single point)')
x0=(c*d-b*f)/delta
y0=(a*f-b*d)/delta
return np.array([x0,y0])
def ellipse_angle_of_rotation(coeff):
"""
This function converts a set of 6 polynomial coefficients defined as
coeff[0]*x**2 + coeff[1]*x*y + coeff[2]*y**2 + coeff[3]*x + coeff[4]*y + coeff[5]
to the ellipse parameters in a new frame aligned with the axis of the ellipse. It returns
the position angle of the ellipse.
Adapted from http://nicky.vanforeest.com/misc/fitEllipse/fitEllipse.html
"""
a,b,c,d,f,g = coeff[0] , coeff[1]/2, coeff[2], coeff[3]/2, coeff[4]/2, coeff[5]
if (a == c):
print('Warning: the ellipse is degenerate to a circle, position angle set to 0 by default')
return 0
return 0.5*np.arctan(2*b/(a-c))
#def ellipse_axis_length( a ):
# b,c,d,f,g,a = a[1]/2, a[2], a[3]/2, a[4]/2, a[5], a[0]
# up = 2*(a*f*f+c*d*d+g*b*b-2*b*d*f-a*c*g)
# down1=(b*b-a*c)*( (c-a)*np.sqrt(1+4*b*b/((a-c)*(a-c)))-(c+a))
# down2=(b*b-a*c)*( (a-c)*np.sqrt(1+4*b*b/((a-c)*(a-c)))-(c+a))
# res1=np.sqrt(up/down1)
# res2=np.sqrt(up/down2)
# return np.array([res1, res2])
def ellipse_axis_length(coeff):
"""
This function converts a set of 6 polynomial coefficients defined as
coeff[0]*x**2 + coeff[1]*x*y + coeff[2]*y**2 + coeff[3]*x + coeff[4]*y + coeff[5]
to the ellipse parameters in a new frame aligned with the axis of the ellipse. It returns
the semi-major and semi-minor axis of the ellipse.
Adapted from http://nicky.vanforeest.com/misc/fitEllipse/fitEllipse.html
"""
a,b,c,d,f,g = coeff[0] , coeff[1]/2, coeff[2], coeff[3]/2, coeff[4]/2, coeff[5]
up = 2*(a*f**2+c*d**2+g*b**2-2*b*d*f-a*c*g)
# print((a-c)*(a-c))
down1=(b**2-a*c)*( np.sqrt((a-c)**2+4*b**2)-(a+c))
down2=(b**2-a*c)*(-np.sqrt((a-c)**2+4*b**2)-(a+c))
# print(down1,down2)
res1=np.sqrt(up/down1)
res2=np.sqrt(up/down2)
return np.array([res1, res2])
###############################################################################
###############################################################################
## Least square fit
###############################################################################
###############################################################################
def chi2(param_model, theta, rho, rho_error):
"""
This functions defines a chi squared between measurements given as (theta,rho)
and an ellipse parametrized in the sky plabe by param_model=x0, y0, a, b, alpha
The error of each point is defined as the distance between the point of the
ellipse at the same theta and rho.
"""
x0, y0, a, b, alpha = param_model
x = rho*np.cos(theta)
y = rho*np.sin(theta)
distance_data_to_ell_center = np.sqrt((x-x0)**2+(y-y0)**2)
p=(y0-y)/(x0-x)
phi = np.arctan(a/b*(p*np.cos(alpha)-np.sin(alpha))/(p*np.sin(alpha)+np.cos(alpha)))
distance_ell_to_ell_center = np.sqrt( a**2*np.cos(phi)**2+b**2*np.sin(phi)**2)
sigma2 = rho_error**2
return np.sum((distance_data_to_ell_center-distance_ell_to_ell_center)**2/sigma2)
def chi2_from_deprojected_ellipse(orbital_param_model, theta, rho, rho_error):
"""
This functions defines a chi squared between measurements given as (theta,rho)
and an ellipse parametrized in the orbital plane by (a,e,itilt,omega,Omega).
the angles must be expressed in radians.
The error of each point is defined as the distance between the point of the
ellipse at the same theta and rho.
"""
# a,e,itilt,omega,Omega=orbital_param_model
a,b,x0,y0,alpha=projected_param_from_ellipse_param(*orbital_param_model[0:6],verbose=False)
skyplane_param_model=x0,y0,a,b,alpha
return chi2(skyplane_param_model, theta, rho, rho_error)
###############################################################################
###############################################################################
## Deprojection of the ellipse
###############################################################################
###############################################################################
def deprojection_from_poly_coeff(coeff,verbose=True):
"""
This function takes in input the ellipse polynomial values a such as
coeff[0]*x**2 + coeff[1]*x*y + coeff[2]*y**2 + coeff[3]*x + coeff[4]*y + coeff[5]
and return the deprojected parameters of the ellipse :
omega = argument of pericenter
Omega = longitude of ascending node
a = semi-major axis
e = eccentricity
"""
# This nomenclature is from Smart 1930
A=coeff[0]/coeff[5]
H=coeff[1]/2./coeff[5]
B=coeff[2]/coeff[5]
G=coeff[3]/2./coeff[5]
F=coeff[4]/2./coeff[5]
tan2Omega=(2*(H-F*G))/(F**2-G**2+A-B)
# print(' tan(2Omega)={0:5.2f}'.format(tan2Omega))
Omega=(np.arctan(tan2Omega))/2
tan2ioverp2=2*(H-F*G)/np.sin(2*Omega)
if tan2ioverp2 < 0:
Omega=(np.arctan(tan2Omega)+math.pi)/2
tan2ioverp2=2*(H-F*G)/np.sin(2*Omega)
if verbose:
print('Warning: increase Omega by pi/2 to avoid inconsistency')
p=np.sqrt(2/(F**2+G**2-A-B-tan2ioverp2))
itilt=np.arctan(p*np.sqrt(tan2ioverp2))
denom_tanomega=G*np.cos(Omega)+F*np.sin(Omega)
# print(' denom tan(omega)={0:5.2f}'.format(denom_tanomega))
if denom_tanomega != 0:
omega=np.arctan((F*np.cos(Omega)-G*np.sin(Omega))*np.cos(itilt)/(G*np.cos(Omega)+F*np.sin(Omega)))
else:
omega=0
e=-p/np.cos(omega)*(G*np.cos(Omega)+F*np.sin(Omega))
true_a=p/(1-e**2)
if verbose:
a,b=ellipse_axis_length(coeff)
itilt_before=np.arccos(np.min([a,b])/np.max([a,b]))
pa=ellipse_angle_of_rotation(coeff)
x0,y0=ellipse_center(coeff)
offset_distance=np.sqrt(x0**2+y0**2)
omega_before=np.arctan(y0/x0) #+270
e_before=offset_distance/(b)
print('Parameters of the ellipse before deprojection')
print(' a={0:5.2f}'.format(np.max([a,b])))
print(' e={0:5.3f}'.format(e_before))
print(' offset={0:5.2f}'.format(offset_distance))
print(' direction of offset={0:5.2f} deg (from W ccw)'.format(np.rad2deg(omega_before)))
print(' Omega={0:5.2f} deg'.format(np.rad2deg(pa)))
print(' i={0:5.2f} deg'.format(np.rad2deg(itilt_before)))
print('Parameters of the ellipse after deprojection')
print(' a={0:5.2f}'.format(true_a))
print(' e={0:5.3f}'.format(e))
print(' p={0:5.3f}'.format(p))
print(' omega={0:5.2f} deg'.format(np.rad2deg(omega)))
print(' Omega={0:5.2f} deg'.format(np.rad2deg(Omega)))
print(' i={0:5.2f} deg'.format(np.rad2deg(itilt)))
return [true_a, e, omega, Omega,itilt]
def deprojection_from_ellipse_param(a,b,x0,y0,pa,verbose=True):
"""
This function takes in input the ellipse parameters
param=a,b,x0,y0,pa (in radian) and
returns the deprojected parameters of the ellipse :
a = semi-major axis
e = eccentricity
omega = argument of pericenter in radian
Omega = longitude of ascending node in radian
i = inclination in radian
"""
coeff=ellipse_polynomial_coeff(a,b,x0,y0,pa)
print(coeff)
return deprojection_from_poly_coeff(coeff,verbose=verbose)
#coeff = projected_coeff_from_ellipse_param(a,e,i,omega,Omega)
def projected_coeff_from_ellipse_param(a,e,i,omega,Omega):
"""
This function takes in input true orbital parameters of an ellipse (a,e,i,
omega,Omega), the angles being in radians,
and projects them on the plane of the sky. It returns the polynomial
coefficent of the ellipse in the plane of the sky (notation from Smart 1930)
defined as
coeff[0]*x**2 + coeff[1]*x*y + coeff[2]*y**2 + coeff[3]*x + coeff[4]*y + coeff[5]
"""
n3 = np.cos(i)
cosomega= | np.cos(omega) | numpy.cos |
import datetime
import numpy as np
import matplotlib.pyplot as plt
from numpy.lib.function_base import append
import sympy as sp
from multiprocessing import Pool
import os
import cppsolver as cs
from tqdm import tqdm
from ..filter import Magnet_UKF, Magnet_KF
from ..solver import Solver, Solver_jac
class Simu_Data:
def __init__(self, gt, snr, result):
self.gt = gt
self.snr = snr
self.result = result
def __len__(self):
return self.gt.shape[0]
def store(self):
np.savez('result/test.npz', gt=self.gt, data=self.result)
class expression:
def __init__(self, mag_count=1):
if mag_count == 1:
x, y, z, M, theta, phy, gx, gy, gz, xs, ys, zs = sp.symbols(
'x, y, z, M, theta, phy, gx, gy, gz, xs, ys, zs', real=True)
G = sp.Matrix([[gx], [gy], [gz]])
# theta2 = sp.tanh(theta)
# phy2 = sp.tanh(phy)
vecR = sp.Matrix([xs - x, ys - y, zs - z]).reshape(3, 1)
# vecR = sp.Matrix([x, y, z]).reshape(3, 1)
dis = sp.sqrt(vecR[0]**2 + vecR[1]**2 + vecR[2]**2)
# VecM = M*sp.Matrix([sp.sin(theta2)*sp.cos(phy2),
# sp.sin(theta2)*sp.sin(phy2), sp.cos(theta2)])
VecM = 1e-7 * sp.exp(M) * sp.Matrix([
sp.sin(theta) * sp.cos(phy),
sp.sin(theta) * sp.sin(phy),
sp.cos(theta)
])
VecB = 3 * vecR * (VecM.T * vecR) / dis**5 - VecM / dis**3 + G
VecB *= 1e6
# convert to function for faster evaluation
self.VecB = sp.lambdify(
[gx, gy, gz, xs, ys, zs, x, y, z, M, theta, phy],
VecB, 'numpy')
elif mag_count == 2:
x0, y0, z0, M0, theta0, phy0, x1, y1, z1, M1, theta1, phy1, gx, gy, gz, xs, ys, zs = sp.symbols(
'x0, y0, z0, M0, theta0, phy0, x1, y1, z1, M1, theta1, phy1, gx, gy, gz, xs, ys, zs', real=True)
G = sp.Matrix([[gx], [gy], [gz]])
# theta2 = sp.tanh(theta)
# phy2 = sp.tanh(phy)
x = [x0, x1]
y = [y0, y1]
z = [z0, z1]
M = [M0, M1]
theta = [theta0, theta1]
phy = [phy0, phy1]
VecB = G
for i in range(mag_count):
vecR = sp.Matrix(
[xs - x[i], ys - y[i], zs - z[i]]).reshape(3, 1)
# vecR = sp.Matrix([x, y, z]).reshape(3, 1)
dis = sp.sqrt(vecR[0] ** 2 + vecR[1] ** 2 + vecR[2] ** 2)
# VecM = M*sp.Matrix([sp.sin(theta2)*sp.cos(phy2),
# sp.sin(theta2)*sp.sin(phy2), sp.cos(theta2)])
VecMi = 1e-7 * sp.exp(M[i]) * sp.Matrix([sp.sin(theta[i]) * sp.cos(
phy[i]), sp.sin(theta[i]) * sp.sin(phy[i]), sp.cos(theta[i])])
VecBi = 3 * vecR * (VecMi.T * vecR) / \
dis ** 5 - VecMi / dis ** 3
VecB += VecBi
VecB = 1e6 * VecB
# convert to function for faster evaluation
self.VecB = sp.lambdify(
[gx, gy, gz, xs, ys, zs, x0, y0, z0, M0, theta0, phy0, x1, y1,
z1, M1, theta1, phy1],
VecB, 'numpy')
class Result_Handler:
def __init__(self, simu_data, scale):
self.track_result = []
self.simu_data = simu_data
self.scale = scale
def __add__(self, new):
self.track_result.append(new)
return self
def get_gt_result(self):
a = self.simu_data.gt
b = []
for i in range(len(self.track_result)):
b.append(np.array([
self.track_result[i]['X0'], self.track_result[i]['Y0'],
self.track_result[i]['Z0']
]))
b = np.stack(b)
return [a, b]
def cal_loss(self):
dist = []
loss = []
for i in range(len(self.simu_data)):
point_gt = self.simu_data.gt[i]
point_estimate = np.array([
self.track_result[i]['X0'], self.track_result[i]['Y0'],
self.track_result[i]['Z0']
])
dist.append(np.linalg.norm(point_gt, 2))
loss.append(np.linalg.norm(point_gt - point_estimate, 2))
dist = 1e2 * np.array(dist)
loss = 1e2 * np.array(loss)
return [self.scale, dist, loss]
def gt_and_route(self):
dist = []
route = []
for i in range(len(self.simu_data)):
point_gt = self.simu_data.gt[i]
dist.append(np.linalg.norm(point_gt, 2))
route.append(np.array([
self.track_result[i]['X0'], self.track_result[i]['Y0'],
self.track_result[i]['Z0']
]))
dist = np.array(dist)
route = np.stack(route, axis=0)
idx = np.argsort(dist)
gt = self.simu_data.gt[idx]
route = route[idx]
return [gt, route]
# plt.plot(dist, loss, label='scale = {}'.format(self.scale))
# plt.legend()
# print('debug')
class Simu_Test:
def __init__(self, start, stop, scales, pSensor=None, resolution=100):
self.scales = scales
self.M = 2.7
self.build_route(start, stop, resolution)
if pSensor is None:
self.build_psensor()
else:
self.pSensor = pSensor
# self.build_expression()
self.params = {
'm': np.log(self.M),
'theta': 0,
'phy': 0,
'gx': 50 / np.sqrt(2) * 1e-6,
'gy': 50 / np.sqrt(2) * 1e-6,
'gz': 0,
}
def build_expression(self):
x, y, z, M, theta, phy, gx, gy, gz, xs, ys, zs = sp.symbols(
'x, y, z, M, theta, phy, gx, gy, gz, xs, ys, zs', real=True)
G = sp.Matrix([[gx], [gy], [gz]])
# theta2 = sp.tanh(theta)
# phy2 = sp.tanh(phy)
vecR = sp.Matrix([xs - x, ys - y, zs - z]).reshape(3, 1)
# vecR = sp.Matrix([x, y, z]).reshape(3, 1)
dis = sp.sqrt(vecR[0]**2 + vecR[1]**2 + vecR[2]**2)
# VecM = M*sp.Matrix([sp.sin(theta2)*sp.cos(phy2),
# sp.sin(theta2)*sp.sin(phy2), sp.cos(theta2)])
VecM = 1e-7 * sp.exp(M) * sp.Matrix([
sp.sin(theta) * sp.cos(phy),
sp.sin(theta) * sp.sin(phy),
sp.cos(theta)
])
VecB = 3 * vecR * (VecM.T * vecR) / dis**5 - VecM / dis**3 + G
VecB *= 1e6
# convert to function for faster evaluation
self.VecB = sp.lambdify(
[gx, gy, gz, xs, ys, zs, x, y, z, M, theta, phy], VecB, 'numpy')
def build_route(self, start, stop, resolution):
# linear route
theta = 90 / 180.0 * np.pi
route = np.linspace(start, stop, resolution)
route = np.stack([route * np.cos(theta), route * np.sin(theta)]).T
route = np.pad(route, ((0, 0), (1, 0)),
mode='constant',
constant_values=0)
self.route = 1e-2 * route
# curvy route
tmp = np.linspace(start, stop, resolution)
route = np.stack([np.sin((tmp-start)/(stop-start) * np.pi * 5),
np.cos((tmp-start)/(stop-start) * np.pi * 5), tmp], axis=0).T
self.route = 1e-2 * route
def build_psensor(self):
self.pSensor = 1e-2 * np.array([
[1, 1, 1],
[-1, 1, 1],
[-1, -1, 1],
[1, -1, 1],
[1, 1, -1],
[-1, 1, -1],
[-1, -1, -1],
[1, -1, -1],
])
def simulate_process(self, scale):
print(scale)
pSensori = scale * self.pSensor
simu = self.estimate_B(pSensori)
simu.store()
model = Solver_jac(1, self.route[0, 0], self.route[0, 1],
self.route[0, 2])
model.fit_params['m0'].value = np.log(self.M)
model.fit_params['m0'].vary = False
results = Result_Handler(simu, scale)
for i in range(simu.result.shape[0]):
datai = simu.result[i].reshape(-1, 3)
result = model.solve(datai, pSensori,
not model.fit_params['m0'].vary)
results += result
return results.cal_loss()
def gt_and_result(self):
pSensori = 1 * self.pSensor
simu = self.estimate_B(pSensori)
simu.store()
model = Solver_jac(1, self.route[0, 0], self.route[0, 1],
self.route[0, 2])
model.fit_params['m0'].value = np.log(self.M)
model.fit_params['m0'].vary = False
results = Result_Handler(simu, 1)
for i in range(simu.result.shape[0]):
datai = simu.result[i].reshape(-1, 3)
result = model.solve(datai, pSensori,
not model.fit_params['m0'].vary)
results += result
return results.get_gt_result()
def compare_noise_thread(self, choice):
scale = 5
pSensori = scale * self.pSensor
if choice == 1:
simu = self.estimate_B(pSensori)
elif choice == 0:
simu = self.estimate_B_even_noise(pSensori)
elif choice == 2:
simu = self.estimate_B_singular_noise(pSensori)
model = Solver_jac(1, self.route[0, 0], self.route[0, 1],
self.route[0, 2])
model.fit_params['m0'].value = np.log(self.M)
model.fit_params['m0'].vary = False
results = Result_Handler(simu, scale)
for i in range(simu.result.shape[0]):
datai = simu.result[i].reshape(-1, 3)
result = model.solve(datai, pSensori,
not model.fit_params['m0'].vary)
results += result
[tmp, dist, loss] = results.cal_loss()
return [choice, dist, loss]
def compare_3_noise(self, loop):
results = []
pool = Pool()
for i in range(loop):
# self.calculate_process(scale)
results.append(
pool.apply_async(self.compare_noise_thread, args=(0, )))
results.append(
pool.apply_async(self.compare_noise_thread, args=(1, )))
results.append(
pool.apply_async(self.compare_noise_thread, args=(2, )))
pool.close()
pool.join()
# print('debug')
loss_dict = {}
dist_dict = {}
for result in results:
[scale, dist, loss] = result.get()
if not str(scale) in loss_dict.keys():
loss_dict[str(scale)] = loss
dist_dict[str(scale)] = dist
else:
loss_dict[str(scale)] += loss
msg = ['Even Noise', 'Raw Noise', 'Single Noise']
for key in dist_dict.keys():
plt.plot(dist_dict[key],
loss_dict[key] / loop,
label=msg[int(key)])
plt.legend()
plt.ylabel('Error(cm)')
plt.xlabel('Distance(cm)')
plt.savefig('result/butterfly.jpg', dpi=900)
def compare_noise_type(self, loop):
results = []
pool = Pool()
for i in range(loop):
# self.calculate_process(scale)
results.append(
pool.apply_async(self.compare_noise_type_thread, args=(0, )))
results.append(
pool.apply_async(self.compare_noise_type_thread, args=(1, )))
results.append(
pool.apply_async(self.compare_noise_type_thread, args=(2, )))
pool.close()
pool.join()
# print('debug')
loss_dict = {}
dist_dict = {}
for result in results:
[scale, dist, loss] = result.get()
if not str(scale) in loss_dict.keys():
loss_dict[str(scale)] = loss
dist_dict[str(scale)] = dist
else:
loss_dict[str(scale)] += loss
msg = ['ALL Noise', 'Only Noise', 'Only Precision']
for key in dist_dict.keys():
plt.plot(dist_dict[key],
loss_dict[key] / loop,
label=msg[int(key)])
plt.legend()
plt.ylabel('Error(cm)')
plt.xlabel('Distance(cm)')
plt.savefig('result/compare_noise_type.jpg', dpi=900)
def compare_noise_type_thread(self, choice):
scale = 5
pSensori = scale * self.pSensor
simu = self.estimate_B(pSensori, choice)
model = Solver_jac(1, self.route[0, 0], self.route[0, 1],
self.route[0, 2])
model.fit_params['m0'].value = np.log(self.M)
model.fit_params['m0'].vary = False
results = Result_Handler(simu, scale)
for i in range(simu.result.shape[0]):
datai = simu.result[i].reshape(-1, 3)
result = model.solve(datai, pSensori,
not model.fit_params['m0'].vary)
results += result
[tmp, dist, loss] = results.cal_loss()
return [choice, dist, loss]
def simulate(self, loop=1):
results = []
pool = Pool()
for scale in self.scales:
# self.calculate_process(scale)
# test(self, scale)
for i in range(loop):
# self.simulate_process(scale)
results.append(
pool.apply_async(self.simulate_process, args=(scale, )))
pool.close()
pool.join()
# print('debug')
loss_dict = {}
dist_dict = {}
for result in results:
[scale, dist, loss] = result.get()
if not str(scale) in loss_dict.keys():
loss_dict[str(scale)] = loss
dist_dict[str(scale)] = dist
else:
loss_dict[str(scale)] += loss
for key in dist_dict.keys():
plt.plot(dist_dict[key],
loss_dict[key] / loop,
label='scale = {} cm'.format(int(key) * 2))
plt.legend()
plt.ylabel('Error(cm)')
plt.xlabel('Distance(cm)')
name = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
plt.savefig('result/compare_scale/{}.jpg'.format(name), dpi=900)
def simu_readings(self, pSensor):
simu = self.estimate_B(pSensor, noise_type=3)
simu.store()
def simu_gt_and_result(self, pSensor, route, path, name):
pSensori = pSensor
simu = self.estimate_B(pSensori, route=route)
# simu.store()
# params = np.array([40 / np.sqrt(2) * 1e-6, 40 / np.sqrt(2) * 1e-6, 0, np.log(
# self.M), 1e-2 * route[0, 0], 1e-2 * (route[0, 1]), 1e-2 * (route[0,
# 2]), 0, 0])
model = Solver_jac(1, route[0, 0], route[0, 1], route[0, 2])
model.fit_params['m0'].value = np.log(self.M)
model.fit_params['m0'].vary = False
gt_ang = []
rec_ang = []
results = Result_Handler(simu, 1)
for i in tqdm(range(simu.result.shape[0])):
datai = simu.result[i].reshape(-1, 3)
result = model.solve(datai, pSensori,
not model.fit_params['m0'].vary)
results += result
gt_ang.append(np.array([0, 0, 1]))
t1 = result['theta0'].value
t2 = result['phy0'].value
rec_ang.append(
np.array(
[np.sin(t1) * np.cos(t2),
np.sin(t1) * np.sin(t2),
np.cos(t1)]))
[gt, route] = results.gt_and_route()
gt_ang = np.stack(gt_ang)
rec_ang = np.stack(rec_ang)
if not os.path.exists(path):
os.makedirs(path)
np.savez(os.path.join(path, name), gt=gt * 1e2, result=route *
1e2, gt_ang=gt_ang, result_ang=rec_ang)
def compare_layout_thread(self, index, pSensori):
overall_noise = np.random.randn(3)
simu = self.estimate_B(pSensori)
model = Solver_jac(1, self.route[0, 0], self.route[0, 1],
self.route[0, 2])
model.fit_params['m0'].value = np.log(self.M)
model.fit_params['m0'].vary = False
results = Result_Handler(simu, 1)
for i in range(simu.result.shape[0]):
datai = simu.result[i].reshape(-1, 3)
result = model.solve(datai, pSensori,
not model.fit_params['m0'].vary)
results += result
[tmp, dist, loss] = results.cal_loss()
return [index, dist, loss]
def compare_layouts(self, pSensors, loop=1):
results = []
pool = Pool()
for index, pSensor in enumerate(pSensors):
# self.calculate_process(scale)
# test(self, scale)
for i in range(loop):
# self.calculate_process(scale)
# self.compare_layout_thread(index, pSensor)
results.append(
pool.apply_async(self.compare_layout_thread,
args=(index, pSensor)))
pool.close()
pool.join()
# print('debug')
loss_dict = {}
dist_dict = {}
for result in results:
[scale, dist, loss] = result.get()
if not str(scale) in loss_dict.keys():
loss_dict[str(scale)] = loss
dist_dict[str(scale)] = dist
else:
loss_dict[str(scale)] += loss
# msg = ['Plane Layout(MIT)', 'Our Current Layout', 'Cube Layout']
msg = ['Best Layout', 'Current Layout']
for key in dist_dict.keys():
plt.plot(dist_dict[key],
loss_dict[key] / loop,
label=msg[int(key)])
plt.legend()
plt.ylabel('Error(cm)')
plt.xlabel('Distance(cm)')
name = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
# plt.savefig('result/compare_layout/{}.jpg'.format(name), dpi=900)
plt.show()
def estimate_B(
self,
pSensor,
route=None,
noise_type=0,
overall_noise=None):
# noise type: 0: noise+precision, 1:only noise, 2: only precision
# 3:none
result = []
exp = expression()
if route is None:
route = self.route
for i in range(route.shape[0]):
routei = route[i]
tmp = []
for j in range(pSensor.shape[0]):
param = [
self.params['gx'], self.params['gy'], self.params['gz'],
pSensor[j][0], pSensor[j][1], pSensor[j][2], routei[0],
routei[1], routei[2], self.params['m'],
self.params['theta'], self.params['phy']
]
tmp.append(exp.VecB(*param).squeeze())
tmp = np.concatenate(tmp, axis=0).reshape(-1)
result.append(tmp)
result = np.concatenate(result, axis=0).reshape(-1, 3)
Noise_x = 0.8 * np.random.randn(result.shape[0])
Noise_y = 0.8 * np.random.randn(result.shape[0])
Noise_z = 1.2 * np.random.randn(result.shape[0])
Noise = np.stack([Noise_x, Noise_y, Noise_z]).T
if noise_type != 3:
if noise_type != 2:
result += Noise
if overall_noise is not None:
result += overall_noise
# add sensor resolution
if noise_type != 1:
result = np.floor(result * 100.0)
result = result - np.mod(result, 15)
result = 1e-2 * result
# compute SNR
G = 1e6 * np.array(
[self.params['gx'], self.params['gy'], self.params['gz']])
signal_power = np.sum(np.power(result - Noise, 2), 1)
noise_power = np.sum(np.power(G + Noise, 2), 1)
SNR = 10 * np.log(signal_power / noise_power)
result = result.reshape(-1, pSensor.size)
SNR = SNR.reshape(-1, pSensor.shape[0])
# print('Debug')
return Simu_Data(route, SNR, result)
def estimate_B_even_noise(self, pSensor):
result = []
exp = expression()
for i in range(self.route.shape[0]):
routei = self.route[i]
tmp = []
for j in range(pSensor.shape[0]):
param = [
self.params['gx'], self.params['gy'], self.params['gz'],
pSensor[j][0], pSensor[j][1], pSensor[j][2], routei[0],
routei[1], routei[2], self.params['m'],
self.params['theta'], self.params['phy']
]
tmp.append(exp.VecB(*param).squeeze())
tmp = np.concatenate(tmp, axis=0).reshape(-1)
result.append(tmp)
result = np.concatenate(result, axis=0).reshape(-1, 3)
Noise_x = np.sqrt(2) / 2 * np.random.randn(result.shape[0])
Noise_y = np.sqrt(2) / 2 * np.random.randn(result.shape[0])
Noise_z = np.sqrt(2) / 2 * np.random.randn(result.shape[0])
Noise = np.stack([Noise_x, Noise_y, Noise_z]).T
result += Noise
# add sensor resolution
result = np.floor(result * 100.0)
result = result - np.mod(result, 15)
result = 1e-2 * result
# compute SNR
G = 1e6 * np.array(
[self.params['gx'], self.params['gy'], self.params['gz']])
signal_power = np.sum(np.power(result - Noise, 2), 1)
noise_power = np.sum(np.power(G + Noise, 2), 1)
SNR = 10 * np.log(signal_power / noise_power)
result = result.reshape(-1, pSensor.size)
SNR = SNR.reshape(-1, pSensor.shape[0])
# print('Debug')
return Simu_Data(self.route, SNR, result)
def compare_method_thread(self, choice):
pSensori = 5 * self.pSensor
simu = self.estimate_B(pSensori)
if choice == 0:
model = Solver_jac(1, self.route[0, 0], self.route[0, 1],
self.route[0, 2])
model.fit_params['m0'].value = np.log(self.M)
model.fit_params['m0'].vary = False
results = Result_Handler(simu, choice)
for i in range(simu.result.shape[0]):
datai = simu.result[i].reshape(-1, 3)
result = model.solve(datai, pSensori,
not model.fit_params['m0'].vary)
results += result
if choice == 1:
sensor_count = pSensori.shape[0]
my_filter = Magnet_UKF(
1, pSensori, R_std=[0.8, 0.8, 1.5] * sensor_count)
my_filter.lm_model.fit_params['m0'].value = np.log(self.M)
my_filter.lm_model.fit_params['m0'].vary = False
my_filter.lm_model.fit_params['X0'].value = self.route[0, 0]
my_filter.lm_model.fit_params['Y0'].value = self.route[0, 1]
my_filter.lm_model.fit_params['Z0'].value = self.route[0, 2]
my_filter.ukf.x[0] = self.params['gx']
my_filter.ukf.x[1] = self.params['gy']
my_filter.ukf.x[2] = self.params['gz']
my_filter.kf.x[0] = self.params['gx']
my_filter.kf.x[1] = self.params['gy']
my_filter.kf.x[2] = self.params['gz']
my_filter.kf.x[3] = self.route[0, 0]
my_filter.ukf.x[3] = self.route[0, 0]
my_filter.kf.x[5] = self.route[0, 1]
my_filter.ukf.x[5] = self.route[0, 1]
my_filter.kf.x[7] = self.route[0, 2]
my_filter.ukf.x[7] = self.route[0, 2]
my_filter.kf.x[9] = self.params['theta']
my_filter.ukf.x[9] = self.params['theta']
my_filter.kf.x[11] = self.params['phy']
my_filter.ukf.x[11] = self.params['phy']
results = Result_Handler(simu, choice)
for i in range(simu.result.shape[0]):
my_filter.predict()
datai = simu.result[i].reshape(-1)
result = my_filter.update(datai)
results += result
if choice == 2: # simple kf
sensor_count = pSensori.shape[0]
my_filter = Magnet_KF(1, pSensori, R_std=[
0.8, 0.8, 1.5] * sensor_count)
my_filter.lm_model.fit_params['m0'].value = np.log(self.M)
my_filter.lm_model.fit_params['m0'].vary = False
my_filter.lm_model.fit_params['X0'].value = self.route[0, 0]
my_filter.lm_model.fit_params['Y0'].value = self.route[0, 1]
my_filter.lm_model.fit_params['Z0'].value = self.route[0, 2]
my_filter.kf.x[0] = self.params['gx']
my_filter.kf.x[1] = self.params['gy']
my_filter.kf.x[2] = self.params['gz']
my_filter.kf.x[3] = self.route[0, 0]
my_filter.kf.x[5] = self.route[0, 1]
my_filter.kf.x[7] = self.route[0, 2]
my_filter.kf.x[9] = self.params['theta']
my_filter.kf.x[11] = self.params['phy']
results = Result_Handler(simu, choice)
for i in range(simu.result.shape[0]):
my_filter.predict()
datai = simu.result[i].reshape(-1, 3)
result = my_filter.update(datai)
results += result
if choice == 3: # simple kf
sensor_count = pSensori.shape[0]
my_filter = Magnet_KF(
1, pSensori, R_std=[0.8, 0.8, 1.5] * sensor_count, ord=3)
my_filter.lm_model.fit_params['m0'].value = np.log(self.M)
my_filter.lm_model.fit_params['m0'].vary = False
my_filter.lm_model.fit_params['X0'].value = self.route[0, 0]
my_filter.lm_model.fit_params['Y0'].value = self.route[0, 1]
my_filter.lm_model.fit_params['Z0'].value = self.route[0, 2]
my_filter.kf.x[0] = self.params['gx']
my_filter.kf.x[1] = self.params['gy']
my_filter.kf.x[2] = self.params['gz']
my_filter.kf.x[3] = self.route[0, 0]
my_filter.kf.x[6] = self.route[0, 1]
my_filter.kf.x[9] = self.route[0, 2]
my_filter.kf.x[12] = self.params['theta']
my_filter.kf.x[15] = self.params['phy']
results = Result_Handler(simu, choice)
for i in range(simu.result.shape[0]):
my_filter.predict()
datai = simu.result[i].reshape(-1, 3)
result = my_filter.update(datai)
results += result
return results.cal_loss()
def compare_method(self, loop):
results = []
pool = Pool()
for i in range(loop):
# self.compare_method_thread(1)
results.append(
pool.apply_async(self.compare_method_thread, args=(0, )))
results.append(
pool.apply_async(self.compare_method_thread, args=(2, )))
# results.append(
# pool.apply_async(self.compare_method_thread, args=(2, )))
# results.append(
# pool.apply_async(self.compare_method_thread, args=(3, )))
pool.close()
pool.join()
# print('debug')
loss_dict = {}
dist_dict = {}
for result in results:
[scale, dist, loss] = result.get()
if not str(scale) in loss_dict.keys():
loss_dict[str(scale)] = loss
dist_dict[str(scale)] = dist
else:
loss_dict[str(scale)] += loss
msg = ['LM', 'MY UKF', "KF on LM results", "KF on LM results ord=3"]
for key in dist_dict.keys():
plt.plot(dist_dict[key],
loss_dict[key] / loop,
label=msg[int(key)])
plt.legend()
plt.ylabel('Error(cm)')
plt.xlabel('Distance(cm)')
name = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
plt.savefig('result/compare_method/{}.jpg'.format(name), dpi=600)
def compare_softiron(self, loop):
results = []
pool = Pool()
for i in range(loop):
# self.compare_method_thread(1)
results.append(
pool.apply_async(self.compare_softiron_thread, args=(0, )))
results.append(
pool.apply_async(self.compare_softiron_thread, args=(1, )))
# results.append(
# pool.apply_async(self.compare_method_thread, args=(2, )))
# results.append(
# pool.apply_async(self.compare_method_thread, args=(3, )))
pool.close()
pool.join()
# print('debug')
loss_dict = {}
dist_dict = {}
for result in results:
[scale, dist, loss] = result.get()
if not str(scale) in loss_dict.keys():
loss_dict[str(scale)] = loss
dist_dict[str(scale)] = dist
else:
loss_dict[str(scale)] += loss
msg = ['origin', 'Add softiron', ]
for key in dist_dict.keys():
plt.plot(dist_dict[key],
loss_dict[key] / loop,
label=msg[int(key)])
plt.legend()
plt.ylabel('Error(cm)')
plt.xlabel('Distance(cm)')
name = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
root = 'result/compare_softiron'
if not os.path.exists(root):
os.makedirs(root)
plt.savefig(os.path.join(root, '{}.jpg'.format(name)), dpi=600)
def compare_softiron_thread(self, choice):
pSensori = 5 * self.pSensor
simu = self.estimate_B(pSensori)
if choice == 0:
init_param = np.array([0, 0, 0, np.log(
self.M), self.route[0, 0], self.route[0, 1], self.route[0, 2], 0, 0])
param = init_param.copy()
results = Result_Handler(simu, choice)
for i in range(simu.result.shape[0]):
datai = simu.result[i].reshape(-1, 3)
result = cs.solve_1mag(
datai.reshape(-1), pSensori.reshape(-1), param)
param = result.copy()
results += {'X0': param[4], 'Y0': param[5], 'Z0': param[6]}
if choice == 1:
init_param = np.array([0, 0, 0, np.log(
self.M), self.route[0, 0], self.route[0, 1], self.route[0, 2], 0, 0])
param = init_param.copy()
results = Result_Handler(simu, choice)
soft_iron_param = 0.05 * np.random.randn(
simu.result.size//simu.result.shape[0])+1
for i in range(simu.result.shape[0]):
datai = simu.result[i].reshape(-1)
datai *= soft_iron_param
result = cs.solve_1mag(
datai.reshape(-1), pSensori.reshape(-1), param)
param = result.copy()
results += {'X0': param[4], 'Y0': param[5], 'Z0': param[6]}
return results.cal_loss()
def compare_hardiron(self, loop):
results = []
pool = Pool()
for i in range(loop):
# self.compare_method_thread(1)
results.append(
pool.apply_async(self.compare_hardiron_thread, args=(0, )))
results.append(
pool.apply_async(self.compare_hardiron_thread, args=(1, )))
# results.append(
# pool.apply_async(self.compare_method_thread, args=(2, )))
# results.append(
# pool.apply_async(self.compare_method_thread, args=(3, )))
pool.close()
pool.join()
# print('debug')
loss_dict = {}
dist_dict = {}
for result in results:
[scale, dist, loss] = result.get()
if not str(scale) in loss_dict.keys():
loss_dict[str(scale)] = loss
dist_dict[str(scale)] = dist
else:
loss_dict[str(scale)] += loss
msg = ['origin', 'Add hardiron', ]
for key in dist_dict.keys():
plt.plot(dist_dict[key],
loss_dict[key] / loop,
label=msg[int(key)])
plt.legend()
plt.ylabel('Error(cm)')
plt.xlabel('Distance(cm)')
name = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
root = 'result/compare_hardiron'
if not os.path.exists(root):
os.makedirs(root)
plt.savefig(os.path.join(root, '{}.jpg'.format(name)), dpi=600)
def compare_hardiron_thread(self, choice):
pSensori = 5 * self.pSensor
simu = self.estimate_B(pSensori, noise_type=0)
if choice == 0:
init_param = np.array([0, 0, 0, np.log(
self.M), self.route[0, 0], self.route[0, 1], self.route[0, 2], 0, 0])
param = init_param.copy()
results = Result_Handler(simu, choice)
for i in range(simu.result.shape[0]):
datai = simu.result[i].reshape(-1, 3)
result = cs.solve_1mag(
datai.reshape(-1), pSensori.reshape(-1), param)
param = result.copy()
results += {'X0': param[4], 'Y0': param[5], 'Z0': param[6]}
if choice == 1:
init_param = np.array([0, 0, 0, np.log(
self.M), self.route[0, 0], self.route[0, 1], self.route[0, 2], 0, 0])
param = init_param.copy()
results = Result_Handler(simu, choice)
soft_iron_param = 5.0 * np.random.randn(
simu.result.size//simu.result.shape[0])+1
for i in range(simu.result.shape[0]):
datai = simu.result[i].reshape(-1)
datai += soft_iron_param
result = cs.solve_1mag(
datai.reshape(-1), pSensori.reshape(-1), param)
param = result.copy()
results += {'X0': param[4], 'Y0': param[5], 'Z0': param[6]}
return results.cal_loss()
def estimate_B_singular_noise(self, pSensor):
result = []
exp = expression()
for i in range(self.route.shape[0]):
routei = self.route[i]
tmp = []
for j in range(pSensor.shape[0]):
param = [
self.params['gx'], self.params['gy'], self.params['gz'],
pSensor[j][0], pSensor[j][1], pSensor[j][2], routei[0],
routei[1], routei[2], self.params['m'],
self.params['theta'], self.params['phy']
]
tmp.append(exp.VecB(*param).squeeze())
tmp = np.concatenate(tmp, axis=0).reshape(-1)
result.append(tmp)
result = np.concatenate(result, axis=0).reshape(-1, 3)
Noise_x = np.sqrt(1.5) * np.random.randn(result.shape[0])
Noise_y = 0 * np.random.randn(result.shape[0])
Noise_z = 0 * np.random.randn(result.shape[0])
Noise = np.stack([Noise_x, Noise_y, Noise_z]).T
result += Noise
# add sensor resolution
result = np.floor(result * 100.0)
result = result - np.mod(result, 15)
result = 1e-2 * result
# compute SNR
G = 1e6 * np.array(
[self.params['gx'], self.params['gy'], self.params['gz']])
signal_power = np.sum(np.power(result - Noise, 2), 1)
noise_power = np.sum(np.power(G + Noise, 2), 1)
SNR = 10 * np.log(signal_power / noise_power)
result = result.reshape(-1, pSensor.size)
SNR = SNR.reshape(-1, pSensor.shape[0])
# print('Debug')
return Simu_Data(self.route, SNR, result)
def simulate_2mag_3type_thread(pSensor, params, typ, i):
tmp = []
for j in range(pSensor.shape[0]):
param = [
params['gx'], params['gy'], params['gz'],
pSensor[j][0], pSensor[j][1], pSensor[j][2], params['X0'],
params['Y0'], params['Z0'], params['m'],
params['theta0'], params['phy0'], params['X1'],
params['Y1'], params['Z1'], params['m'],
params['theta1'], params['phy1'],
]
tmp.append(simulate_2mag_3type.exp.VecB(*param).squeeze())
tmp = np.concatenate(tmp, axis=0)
tmp = tmp.reshape(-1)
print(i, ' finished ')
return [tmp, typ]
def simulate_2mag_3type_delta_thread(pSensor, params, typ, i):
tmp = []
for j in range(pSensor.shape[0]):
param = [
params['gx'], params['gy'], params['gz'],
pSensor[j][0], pSensor[j][1], pSensor[j][2], params['X0'],
params['Y0'], params['Z0'], params['m'],
params['theta0'], params['phy0'], params['X1'],
params['Y1'], params['Z1'], params['m'],
params['theta1'], params['phy1'],
]
# the result after a short period of time
r = 1 * 1e-2 * np.random.rand()
theta = np.random.rand() * np.pi
phy = np.random.rand() * 2 * np.pi
dx0 = r * np.sin(theta) * np.cos(phy)
dy0 = r * np.sin(theta) * np.sin(phy)
dz0 = r * np.cos(theta)
r = 1 * 1e-2 * np.random.rand()
theta = np.random.rand() * np.pi
phy = np.random.rand() * 2 * np.pi
dx1 = r * np.sin(theta) * np.cos(phy)
dy1 = r * np.sin(theta) * np.sin(phy)
dz1 = r * np.cos(theta)
param2 = [
params['gx'], params['gy'], params['gz'],
pSensor[j][0], pSensor[j][1], pSensor[j][2], params['X0'] + dx0,
params['Y0'] + dy0, params['Z0'] + dz0, params['m'],
params['theta0'], params['phy0'], params['X1'] + dx1,
params['Y1'] + dy1, params['Z1'] + dz1, params['m'],
params['theta1'], params['phy1'],
]
aaa = np.concatenate(
[simulate_2mag_3type.exp.VecB(*param).squeeze(),
simulate_2mag_3type.exp.VecB(*param2).squeeze() -
simulate_2mag_3type.exp.VecB(*param).squeeze()],
axis=0)
tmp.append(aaa)
print(aaa.shape)
tmp = np.concatenate(tmp, axis=0)
tmp = tmp.reshape(-1)
print(i, ' finished ')
return [tmp, typ]
def simulate_2mag_3type(pSensor, size=1000, cls=3, edge=20):
size = int(size)
results = []
types = []
simulate_2mag_3type.exp = expression(2)
pool = Pool()
pool_results = []
i = 0
# for i in range(size * cls):
while(i < size * cls):
# G's Spherical Coordinates
t1 = np.pi * np.random.rand()
t2 = 2 * np.pi * np.random.rand()
# P1's Spherical Coordinates
tt1 = np.pi * | np.random.rand() | numpy.random.rand |
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import scipy.interpolate
from scipy.spatial import distance
from scipy import ndimage
from PIL import Image, ImageDraw
from skimage import measure
from skimage import morphology
from matplotlib.colors import LinearSegmentedColormap
import time, sys
import numba
import matplotlib.colors as mcolors
import matplotlib.gridspec as gridspec
def update_progress(progress):
"""progress bar from https://stackoverflow.com/questions/3160699/python-progress-bar
update_progress() : Displays or updates a console progress bar
Accepts a float between 0 and 1. Any int will be converted to a float.
A value under 0 represents a 'halt'.
A value at 1 or bigger represents 100%"""
barLength = 20 # Modify this to change the length of the progress bar
status = ""
if isinstance(progress, int):
progress = float(progress)
if not isinstance(progress, float):
progress = 0
status = "error: progress var must be float\r\n"
if progress < 0:
progress = 0
status = "Halt...\r\n"
if progress >= 1:
progress = 1
status = "Done...\r\n"
block = int(round(barLength*progress))
text = "\rPercent: [{0}] {1}% {2}".format( "#"*block + "-"*(barLength-block), progress*100, status)
sys.stdout.write(text)
sys.stdout.flush()
class Channel:
"""class for Channel objects"""
def __init__(self,x,y,z,W,D):
"""initialize Channel object
x, y, z - coordinates of centerline
W - channel width
D - channel depth"""
self.x = x
self.y = y
self.z = z
self.W = W
self.D = D
class Cutoff:
"""class for Cutoff objects"""
def __init__(self,x,y,z,W,D):
"""initialize Cutoff object
x, y, z - coordinates of centerline
W - channel width
D - channel depth"""
self.x = x
self.y = y
self.z = z
self.W = W
self.D = D
class ChannelBelt3D:
"""class for 3D models of channel belts"""
def __init__(self, model_type, topo, strat, facies, facies_code, dx, channels):
"""model_type - can be either 'fluvial' or 'submarine'
topo - set of topographic surfaces (3D numpy array)
strat - set of stratigraphic surfaces (3D numpy array)
facies - facies volume (3D numpy array)
facies_code - dictionary of facies codes, e.g. {0:'oxbow', 1:'point bar', 2:'levee'}
dx - gridcell size (m)
channels - list of channel objects that form 3D model"""
self.model_type = model_type
self.topo = topo
self.strat = strat
self.facies = facies
self.facies_code = facies_code
self.dx = dx
self.channels = channels
def plot_xsection(self, xsec, colors, ve):
"""method for plotting a cross section through a 3D model; also plots map of
basal erosional surface and map of final geomorphic surface
xsec - location of cross section along the x-axis (in pixel/ voxel coordinates)
colors - list of RGB values that define the colors for different facies
ve - vertical exaggeration"""
strat = self.strat
dx = self.dx
fig1 = plt.figure(figsize=(20,5))
ax1 = fig1.add_subplot(111)
r,c,ts = np.shape(strat)
Xv = dx * np.arange(0,r)
for xloc in range(xsec,xsec+1,1):
for i in range(0,ts-1,3):
X1 = np.concatenate((Xv, Xv[::-1]))
Y1 = np.concatenate((strat[:,xloc,i], strat[::-1,xloc,i+1]))
Y2 = np.concatenate((strat[:,xloc,i+1], strat[::-1,xloc,i+2]))
Y3 = np.concatenate((strat[:,xloc,i+2], strat[::-1,xloc,i+3]))
if self.model_type == 'submarine':
ax1.fill(X1, Y1, facecolor=colors[2], linewidth=0.5, edgecolor=[0,0,0]) # oxbow mud
ax1.fill(X1, Y2, facecolor=colors[0], linewidth=0.5, edgecolor=[0,0,0]) # point bar sand
ax1.fill(X1, Y3, facecolor=colors[1], linewidth=0.5) # levee mud
if self.model_type == 'fluvial':
ax1.fill(X1, Y1, facecolor=colors[0], linewidth=0.5, edgecolor=[0,0,0]) # levee mud
ax1.fill(X1, Y2, facecolor=colors[1], linewidth=0.5, edgecolor=[0,0,0]) # oxbow mud
ax1.fill(X1, Y3, facecolor=colors[2], linewidth=0.5) # channel sand
ax1.set_xlim(0,dx*(r-1))
ax1.set_aspect(ve, adjustable='datalim')
fig2 = plt.figure()
ax2 = fig2.add_subplot(111)
ax2.contourf(strat[:,:,ts-1],100,cmap='viridis')
ax2.contour(strat[:,:,ts-1],100,colors='k',linestyles='solid',linewidths=0.1,alpha=0.4)
ax2.plot([xloc, xloc],[0,r],'k',linewidth=2)
ax2.axis([0,c,0,r])
ax2.set_aspect('equal', adjustable='box')
ax2.set_title('final geomorphic surface')
ax2.tick_params(bottom=False,top=False,left=False,right=False,labelbottom=False,labelleft=False)
fig3 = plt.figure()
ax3 = fig3.add_subplot(111)
ax3.contourf(strat[:,:,0],100,cmap='viridis')
ax3.contour(strat[:,:,0],100,colors='k',linestyles='solid',linewidths=0.1,alpha=0.4)
ax3.plot([xloc, xloc],[0,r],'k',linewidth=2)
ax3.axis([0,c,0,r])
ax3.set_aspect('equal', adjustable='box')
ax3.set_title('basal erosional surface')
ax3.tick_params(bottom=False,top=False,left=False,right=False,labelbottom=False,labelleft=False)
return fig1,fig2,fig3
class ChannelBelt:
"""class for ChannelBelt objects"""
def __init__(self, channels, cutoffs, cl_times, cutoff_times):
"""initialize ChannelBelt object
channels - list of Channel objects
cutoffs - list of Cutoff objects
cl_times - list of ages of Channel objects
cutoff_times - list of ages of Cutoff objects"""
self.channels = channels
self.cutoffs = cutoffs
self.cl_times = cl_times
self.cutoff_times = cutoff_times
def migrate(self,nit,saved_ts,deltas,pad,crdist,Cf,kl,kv,dt,dens,t1,t2,t3,aggr_factor,*D):
"""function for computing migration rates along channel centerlines and moving the centerlines accordingly
inputs:
nit - number of iterations
saved_ts - which time steps will be saved
deltas - distance between nodes on centerline
pad - padding (number of nodepoints along centerline)
crdist - threshold distance at which cutoffs occur
Cf - dimensionless Chezy friction factor
kl - migration rate constant (m/s)
kv - vertical slope-dependent erosion rate constant (m/s)
dt - time step (s)
dens - density of fluid (kg/m3)
t1 - time step when incision starts
t2 - time step when lateral migration starts
t3 - time step when aggradation starts
aggr_factor - aggradation factor
D - channel depth (m)"""
channel = self.channels[-1] # first channel is the same as last channel of input
x = channel.x; y = channel.y; z = channel.z
W = channel.W;
if len(D)==0:
D = channel.D
else:
D = D[0]
k = 1.0 # constant in HK equation
xc = [] # initialize cutoff coordinates
# determine age of last channel:
if len(self.cl_times)>0:
last_cl_time = self.cl_times[-1]
else:
last_cl_time = 0
dx, dy, dz, ds, s = compute_derivatives(x,y,z)
slope = np.gradient(z)/ds
# padding at the beginning can be shorter than padding at the downstream end:
pad1 = int(pad/10.0)
if pad1<5:
pad1 = 5
omega = -1.0 # constant in curvature calculation (Howard and Knutson, 1984)
gamma = 2.5 # from Ikeda et al., 1981 and Howard and Knutson, 1984
for itn in range(nit): # main loop
update_progress(itn/nit)
x, y = migrate_one_step(x,y,z,W,kl,dt,k,Cf,D,pad,pad1,omega,gamma)
x,y,z,xc,yc,zc = cut_off_cutoffs(x,y,z,s,crdist,deltas) # find and execute cutoffs
x,y,z,dx,dy,dz,ds,s = resample_centerline(x,y,z,deltas) # resample centerline
slope = np.gradient(z)/ds
# for itn<t1, z is unchanged
if (itn>t1) & (itn<=t2): # incision
if np.min(np.abs(slope))!=0:
z = z + kv*dens*9.81*D*slope*dt
else:
z = z - kv*dens*9.81*D*dt*0.00001
if (itn>t2) & (itn<=t3): # lateral migration
if np.min(np.abs(slope))!=0:
z = z + kv*dens*9.81*D*slope*dt - kv*dens*9.81*D*np.median(slope)*dt
else:
z = z # no change in z
if (itn>t3): # aggradation
if np.min(np.abs(slope))!=0:
z = z + kv*dens*9.81*D*slope*dt - aggr_factor*kv*dens*9.81*D*np.mean(slope)*dt
else:
z = z + aggr_factor*dt
if len(xc)>0: # save cutoff data
self.cutoff_times.append(last_cl_time+(itn+1)*dt/(365*24*60*60.0))
cutoff = Cutoff(xc,yc,zc,W,D) # create cutoff object
self.cutoffs.append(cutoff)
# saving centerlines:
if np.mod(itn,saved_ts)==0:
self.cl_times.append(last_cl_time+(itn+1)*dt/(365*24*60*60.0))
channel = Channel(x,y,z,W,D) # create channel object
self.channels.append(channel)
def plot(self,plot_type,pb_age,ob_age,*end_time):
"""plot ChannelBelt object
plot_type - can be either 'strat' (for stratigraphic plot) or 'morph' (for morphologic plot)
pb_age - age of point bars (in years) at which they get covered by vegetation
ob_age - age of oxbow lakes (in years) at which they get covered by vegetation
end_time (optional) - age of last channel to be plotted (in years)"""
cot = np.array(self.cutoff_times)
sclt = np.array(self.cl_times)
if len(end_time)>0:
cot = cot[cot<=end_time]
sclt = sclt[sclt<=end_time]
times = np.sort(np.hstack((cot,sclt)))
times = np.unique(times)
order = 0 # variable for ordering objects in plot
# set up min and max x and y coordinates of the plot:
xmin = np.min(self.channels[0].x)
xmax = np.max(self.channels[0].x)
ymax = 0
for i in range(len(self.channels)):
ymax = max(ymax, np.max(np.abs(self.channels[i].y)))
ymax = ymax+2*self.channels[0].W # add a bit of space on top and bottom
ymin = -1*ymax
# size figure so that its size matches the size of the model:
fig = plt.figure(figsize=(20,(ymax-ymin)*20/(xmax-xmin)))
if plot_type == 'morph':
pb_crit = len(times[times<times[-1]-pb_age])/float(len(times))
ob_crit = len(times[times<times[-1]-ob_age])/float(len(times))
green = (106/255.0,159/255.0,67/255.0) # vegetation color
pb_color = (189/255.0,153/255.0,148/255.0) # point bar color
ob_color = (15/255.0,58/255.0,65/255.0) # oxbow color
pb_cmap = make_colormap([green,green,pb_crit,green,pb_color,1.0,pb_color]) # colormap for point bars
ob_cmap = make_colormap([green,green,ob_crit,green,ob_color,1.0,ob_color]) # colormap for oxbows
plt.fill([xmin,xmax,xmax,xmin],[ymin,ymin,ymax,ymax],color=(106/255.0,159/255.0,67/255.0))
for i in range(0,len(times)):
if times[i] in sclt:
ind = np.where(sclt==times[i])[0][0]
x1 = self.channels[ind].x
y1 = self.channels[ind].y
W = self.channels[ind].W
xm, ym = get_channel_banks(x1,y1,W)
if plot_type == 'morph':
if times[i]>times[-1]-pb_age:
plt.fill(xm,ym,facecolor=pb_cmap(i/float(len(times)-1)),edgecolor='k',linewidth=0.2)
else:
plt.fill(xm,ym,facecolor=pb_cmap(i/float(len(times)-1)))
else:
order = order+1
plt.fill(xm,ym,sns.xkcd_rgb["light tan"],edgecolor='k',linewidth=0.25,zorder=order)
if times[i] in cot:
ind = np.where(cot==times[i])[0][0]
for j in range(0,len(self.cutoffs[ind].x)):
x1 = self.cutoffs[ind].x[j]
y1 = self.cutoffs[ind].y[j]
xm, ym = get_channel_banks(x1,y1,self.cutoffs[ind].W)
if plot_type == 'morph':
plt.fill(xm,ym,color=ob_cmap(i/float(len(times)-1)))
else:
order = order+1
plt.fill(xm,ym,sns.xkcd_rgb["ocean blue"],edgecolor='k',linewidth=0.25,zorder=order)
x1 = self.channels[len(sclt)-1].x
y1 = self.channels[len(sclt)-1].y
xm, ym = get_channel_banks(x1,y1,self.channels[len(sclt)-1].W)
order = order+1
plt.fill(xm,ym,color=(16/255.0,73/255.0,90/255.0),zorder=order) #,edgecolor='k')
plt.axis('equal')
plt.xlim(xmin,xmax)
plt.ylim(ymin,ymax)
return fig
def create_movie(self,xmin,xmax,plot_type,filename,dirname,pb_age,ob_age,scale,*end_time):
"""method for creating movie frames (PNG files) that capture the plan-view evolution of a channel belt through time
movie has to be assembled from the PNG file after this method is applied
xmin - value of x coodinate on the left side of frame
xmax - value of x coordinate on right side of frame
plot_type = - can be either 'strat' (for stratigraphic plot) or 'morph' (for morphologic plot)
filename - first few characters of the output filenames
dirname - name of directory where output files should be written
pb_age - age of point bars (in years) at which they get covered by vegetation (if the 'morph' option is used for 'plot_type')
ob_age - age of oxbow lakes (in years) at which they get covered by vegetation (if the 'morph' option is used for 'plot_type')
scale - scaling factor (e.g., 2) that determines how many times larger you want the frame to be, compared to the default scaling of the figure
"""
sclt = np.array(self.cl_times)
if len(end_time)>0:
sclt = sclt[sclt<=end_time]
channels = self.channels[:len(sclt)]
ymax = 0
for i in range(len(channels)):
ymax = max(ymax, np.max(np.abs(channels[i].y)))
ymax = ymax+2*channels[0].W # add a bit of space on top and bottom
ymin = -1*ymax
for i in range(0,len(sclt)):
fig = self.plot(plot_type,pb_age,ob_age,sclt[i])
fig_height = scale*fig.get_figheight()
fig_width = (xmax-xmin)*fig_height/(ymax-ymin)
fig.set_figwidth(fig_width)
fig.set_figheight(fig_height)
fig.gca().set_xlim(xmin,xmax)
fig.gca().set_xticks([])
fig.gca().set_yticks([])
plt.plot([xmin+200, xmin+200+5000],[ymin+200, ymin+200], 'k', linewidth=2)
plt.text(xmin+200+2000, ymin+200+100, '5 km', fontsize=14)
fname = dirname+filename+'%03d.png'%(i)
fig.savefig(fname, bbox_inches='tight')
plt.close()
def build_3d_model(self,model_type,h_mud,levee_width,h,w,bth,dcr,dx,delta_s,starttime,endtime,xmin,xmax,ymin,ymax):
"""method for building 3D model from set of centerlines (that are part of a ChannelBelt object)
Inputs:
model_type - model type ('fluvial' or 'submarine')
h_mud - maximum thickness of overbank mud
levee_width - width of overbank mud
h - channel depth
w - channel width
bth - thickness of channel sand (only used in submarine models)
dcr - critical channel depth where sand thickness goes to zero (only used in submarine models)
dx - cell size in x and y directions
delta_s - sampling distance alogn centerlines
starttime - age of centerline that will be used as the first centerline in the model
endtime - age of centerline that will be used as the last centerline in the model
xmin,xmax,ymin,ymax - x and y coordinates that define the model domain; if xmin is set to zero,
a plot of the centerlines is generated and the model domain has to be defined by clicking its upper
left and lower right corners
Returns: a ChannelBelt3D object
"""
sclt = np.array(self.cl_times)
ind1 = np.where(sclt>=starttime)[0][0]
ind2 = np.where(sclt<=endtime)[0][-1]
sclt = sclt[ind1:ind2+1]
channels = self.channels[ind1:ind2+1]
cot = | np.array(self.cutoff_times) | numpy.array |
import pandas as pd
import thermotar as th
import numpy as np
import matplotlib.pyplot as plt
import warnings
from scipy import interpolate
import thermotar as th
from thermotar.sub_modules.potential_chunk import Potential
# fit within specified range to specified orer polynomial
def ranged_poly_fit(y,x,n=3,xl=None,xh=None,**kwargs):
'''
In the range of data, in x, fit to a polynomial of the given order.
Essentially just short hand for this
if xh and xl are not specified, it is just a regular polyfit
'''
if not xl: xl = x.min()
if not xh: xh = x.max()
select = (x >= xl) & (x <= xh)
xs = x.loc[select]
ys = y.loc[select]
return np.polyfit(xs,ys,n,**kwargs)
def get_poly_min(fit,xh=None,xl=None):
'''
For a given set of polynomial coefficients, calculate the location of the minimum
Looks only for global minimum for points in the range
Finds the minima from the coefficients
Ensure that only those taht are minima are added
Maybe also validate that the value at the edge is not the same as the value of the minimum - if minimum is at edge, suggests that there isn't a true inversion.
'''
poln = np.poly1d(fit) # create a polynomial from the coefficients
crit_points = poln.deriv().r #roots of the derivative of the polynomial
# filter crit points to be real
crit_points_real = crit_points[crit_points.imag==0].real
# filter further to ensure they are minima not maxima or points of inflection.
if xh and xl:
select = (crit_points_real <= xh) & (crit_points_real >= xl)
crit_points_real = crit_points_real[select]
# filter last so that
crit_points_real = crit_points_real[poln.deriv(2)(crit_points_real) > 0] # NB 2nd derivative is strictly greater than so that inflection points aren't found
# y_crits
y_crits = poln(crit_points_real) # evaluate the polynomial at the critical points
y_min = y_crits.min() # find the critical points with the lowest value of y
### Old Implementation
#y = np.polyval(fit
#y_min = np.min(y)
x_min = np.asscalar(crit_points_real[y_crits==y_min]) # go back to finding which on is the minimum
return x_min,y_min
def basic_min(x,y):
'''
Find the minimum indexes of a dataframe, by using .min() and find the corresponding x value
'''
y_min = np.min(y)
x_min = x[y == y_min]
return x_min,y_min
def choose_temp_range(df ,ptp = 200, pot_name = 'phi_tot',temp_name ='temp' ):
'''
Take a chunk, find the absolute minimum potential and then return the range of ptp centred on this minimum, and an array of length grid points between the max and min
The returned array is for use in interpolation with the poly fit later
'''
T = df[temp_name] # get the temperature data
pot = df[pot_name] # get the potential data
T_min, _pot_min = basic_min(T,pot) # find the temperature corresponding to the absoulte lowest value of the potential
T_min = np.asscalar(T_min)
Tl = T_min - ptp/2 # upper and lower limits of an interval ptp wide centred about T_min
Th = T_min + ptp/2
return Tl, Th
def find_min(y,x, n, xl=None,xh=None,grid = 100000,err = False,validate = True):
'''
Find the minimum of one series with respect to another, using polynomial fittings
interp_grid = grid to use for interpolation
Interpolate with polynomials???
y = data
x = x data
n = polynomial order to use
TODO: Don't use a grid to find the minimum. Use a np.poly1d object to find the critical points, filter to be within the region ( and real) and the find the lowest of these!!!!
Maybe also validate that the value at the edge is not the same as the value of the minimum - if minimum is at edge, suggests that there isn't a true inversion.
Optional inputs:
xmin, xmax = range to fit over
'''
if not xh: xh = np.max(x)
if not xl: xl = np.min(x)
fit = ranged_poly_fit(y,x,n=n,xl=xl,xh=xh )
#xs = np.linspace(xl,xh,grid)
try:
x_min,y_min = get_poly_min(fit,xl=xl,xh=xh) # to do, find more precise analytical minimum.
except ValueError:
x_min,y_min = (np.nan, np.nan)
return x_min,y_min, fit
def find_phi_min(chunk,n,potential_name = 'phi_tot', temp_name = 'temp',temp_range = 300,temp_centre = None,show_plots = False,grid=100000,verbose = False,plot_markers = 10):
temps = chunk.data[temp_name]
phis = chunk.data[potential_name]
if not temp_centre and (temp_range is not None):
Tl,Th = choose_temp_range(chunk.data, ptp = temp_range,pot_name = potential_name, temp_name=temp_name)
elif temp_range is not None:
Tl,Th = (temp_centre - temp_range/2,temp_centre+temp_range/2)
else:
Tl,Th = (temps.min(),temps.max())
# don't over extend the range, otherwise and incorrect minima will be found!!!!
if Th > temps.max(): Th = temps.max()
if Tl < temps.min(): Tl = temps.min()
if verbose: print(f'Fitting a {n}-order polynomial between T = {Tl:.3f},{Th:.3f} K.')
T_min,phi_min,fit = find_min(phis,temps,n,xl=Tl,xh=Th,grid=grid)
if verbose: print(f'Minimum found at T = {T_min:.3f} ')
if show_plots:
Ts = np.linspace(Tl,Th,grid)
plt.plot(Ts,np.polyval(fit,Ts),c='b',label =f'{n} order fit ',ls = '--')
plt.plot(temps,phis,'ro' ,markevery = plot_markers,label='data')
plt.plot(T_min,phi_min,'ko')
plt.xlabel(r'$T$/K')
plt.ylabel(r'$\phi$/V')
plt.legend()
plt.show()
return T_min,phi_min,fit
def find_x_intercept(y,x,offset=0, xmin=None,xmax=None,interp_grid = None, interp_modde = 'linear'):
'''
Find the x intercept of a set of data with a finite grid.
Uses a scipy tool to find the closest match to zero(+offset), then the corresponding finite value of x
can restrict to a range to prevent finding fake minima, for example noise in the data giving a minima that is not true??
interp grid is there to interpolate if need be. If used will interpolate y data between xmin and xmax with the specified number of points
'''
# If not specified, set to maximum and minimum value of range
if not xmin: xmin = | np.min(x) | numpy.min |
#%%
import os
cwd = os.getcwd()
dir_path = os.path.dirname(os.path.realpath(__file__))
os.chdir(dir_path)
import argparse
import sys
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
import torchvision.utils
import numpy as np
import os.path
from scipy.io import loadmat
from model import *
from utils import *
from args_python import *
from matplotlib import pyplot as plt
from torch.utils.tensorboard import SummaryWriter
from torch.utils.data import Dataset
import torchvision.transforms as transforms
from sklearn.model_selection import train_test_split
import hdf5storage
EulerN=3
QuaternionN=4
ScaleSpaceAndGainN=2
class CustomDataset(Dataset):
"""TensorDataset with support of transforms.
"""
def __init__(self, tensors, transform=None):
assert all(tensors[0].size(0) == tensor.size(0) for tensor in tensors)
self.tensors = tensors
self.transform = transform
def __getitem__(self, index):
x = self.tensors[0][index]
if self.transform:
x = self.transform(x)
y = self.tensors[1][index]
return x, y
def __len__(self):
return self.tensors[0].size(0)
#%%
def train(args, model, device, train_loader, optimizer, epoch, writer, Rbeta, zipped_vals, scheduler):
model.train()
run_loss = 0.0
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
R_est = euler2R(output[:,0:EulerN])
R_target = euler2R(target[:,0:EulerN])
gt, pred, rot_loss = getlossrotation(False, R_est, R_target)
gain_scale_loss = getlossspacescale(output[:,EulerN],target[:,EulerN]) + getlossgain(output[:,EulerN+1],target[:,EulerN+1])
loss = rot_loss + gain_scale_loss
if args.test:
print("Ground truth : {} \n Predicted values : {}".format(torch.transpose(gt,1,2), pred))
break
run_loss += loss.item()
loss.backward()
optimizer.step()
scheduler.step()
if (batch_idx+1) % args.log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.8f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx * len(data) / len(train_loader.dataset), run_loss/args.log_interval)) #
# grid = torchvision.utils.make_grid(data)
writer.add_scalar('training_loss', run_loss/args.log_interval, epoch*len(train_loader)+batch_idx)
# writer.add_image('images', grid)
writer.add_graph(model, data)
for tag, value in model.named_parameters():
tag = tag.replace('.', '/')
writer.add_histogram(tag, value.detach().cpu().numpy(), batch_idx+1)
run_loss = 0.0
def validate(args, model, device, val_loader, Rbeta, zipped_vals):
model.eval()
val_loss = 0.0
with torch.no_grad():
for data, target in val_loader:
data, target = data.to(device), target.to(device)
output = model(data)
R_est = euler2R(output[:,0:EulerN])
R_target = euler2R(target[:,0:EulerN])
gt, pred, rot_loss = getlossrotation(False, R_est, R_target)
gain_scale_loss = getlossspacescale(output[:,EulerN],target[:,EulerN]) + getlossgain(output[:,EulerN+1],target[:,EulerN+1])
loss_val = rot_loss + gain_scale_loss
val_loss += loss_val
val_loss /= len(val_loader)
print('\nValidation set: Average loss: {:.8f}\n'.format(val_loss.item()))
if args.test:
print("Ground truth : {} \n\n Predicted values : {} \n".format(torch.transpose(gt,1,2), pred))
return val_loss
def test(args, model, device, test_loader, Rbeta, zipped_vals, data_stat):
if args.get_pred_only:
model.eval()
test_out_list = []
with torch.no_grad():
for data in test_loader:
data = data[0].to(device)
output = model(data)
test_out_list.append(output.cpu().numpy())
save_mat = np.concatenate(test_out_list)
hdf5storage.savemat(args.pred_folder+'/pred_labels.mat', {'labeldata':save_mat})
else:
model.eval()
test_loss = 0.0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
R_est = euler2R(output[:,0:EulerN])
R_target = euler2R(target[:,0:EulerN])
gt, pred, rot_loss = getlossrotation(True, R_est, R_target)
gain_scale_loss = getlossspacescale(output[:,EulerN],target[:,EulerN]) + getlossgain(output[:,EulerN+1],target[:,EulerN+1])
loss_test = rot_loss + gain_scale_loss
test_loss += loss_test
test_loss /= len(test_loader)
print('\nTest set: Average loss: {:.8f}\n'.format(test_loss.item()))
print("Ground truth : {} \n\n Predicted values : {} \n".format(torch.transpose(gt,1,2), pred))
# value = torch.add(torch.matmul(pred,gt),-1*torch.eye(3))
# print("Loss value for these sample {}".format(torch.norm(value,p='fro',dim=(2, 3))))
def main():
# Training settings
parser = argparse.ArgumentParser(description='PyTorch 3D angle regression from 2D images')
parser.add_argument('--batch-size', type=int, default=64, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--test-batch-size', type=int, default=100, metavar='N',
help='input batch size for testing (default: 1000)')
parser.add_argument('--epochs', type=int, default=100, metavar='N',
help='number of epochs to train (default: 30)')
parser.add_argument('--no-cuda', action='store_false', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=100, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--UseQuaternionNotEuler', action='store_true', default=False, help='give this flag in order to use the Quaternion representation, otherwise the Euler angles representation will be used')
parser.add_argument('--ScaleSpaceMin', type=float, default=0.8, help='minimum value of the space scaling')
parser.add_argument('--ScaleSpaceMax', type=float, default=1.2, help='maximum value of the space scaling')
parser.add_argument('--GainMin', type=float, default=0.8, help='minimum value of the gain')
parser.add_argument('--GainMax', type=float, default=1.2, help='maximum value of the gain')
parser.add_argument('--RootDirectory4Data', default='./', help='the name of the root director for the data')
parser.add_argument('--arch', default='VGG',help='the architecture to use. options are VGG, MLP for now. Can add more')
parser.add_argument('--carve_val', action='store_false', default=True, help='Whether validation set has to be carved out from the training set. Default is true')
parser.add_argument('--test', action='store_true', default=False, help='Whether train or test mode. Default is train mode.')
parser.add_argument('--get_pred_only', action='store_true', default=False, help='Get only predictions from images')
parser.add_argument('--pred_folder', default='./', help='Directory of file with test images.')
args = parser.parse_args()
# args=Args()
use_cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
device = torch.device("cuda" if use_cuda else "cpu")
trainingdirectory = args.RootDirectory4Data+"/"+"training"
trainingimagefile="imagefile.mat"
traininglabelfile="labelfile.mat"
train_images = hdf5storage.loadmat(os.path.join(trainingdirectory, trainingimagefile))['imagedata']
train_labels = hdf5storage.loadmat(os.path.join(trainingdirectory, traininglabelfile))['labeldata']
if args.carve_val:
print("Carving out validation set from training set")
train_images, val_images, train_labels, val_labels = train_test_split(train_images, train_labels, test_size=0.1, random_state=42)
else:
print("Loading validation set")
validationdirectory = args.RootDirectory4Data+"/"+"validation"
validationimagefile="imagefile.mat"
validationlabelfile="labelfile.mat"
val_images = hdf5storage.loadmat(os.path.join(validationdirectory, validationimagefile))['imagedata']
val_labels = hdf5storage.loadmat(os.path.join(validationdirectory, validationlabelfile))['labeldata']
train_images = np.expand_dims(train_images,1)
val_images = | np.expand_dims(val_images,1) | numpy.expand_dims |
import unittest
import numpy as np
from controlinverilog import mechatronics
from controlinverilog.state_space import StateSpace
import controlinverilog as civ
import matplotlib.pyplot as plt
class TestLtiSystem(unittest.TestCase):
def test_sequence(self):
sys1 = get_system1()
norm_infc = mechatronics.norm_hinf_discrete(*sys1.cofs)
norm_2c = mechatronics.norm_h2_discrete(*sys1.cofs[:3])
self.assertTrue(abs(norm_infc - 12.9998) / 12.9998 < 0.01)
self.assertTrue(abs(norm_2c - 1.3232) / 1.3232 < 0.01)
# print('sysa Inf Norm: %g' % norm_infc)
# print('sysa 2 Norm: %g' % norm_2c)
sysa = get_system2()
norm_infc = mechatronics.norm_hinf_continuous(*sysa.cofs)
norm_2c = mechatronics.norm_h2_continuous(*sysa.cofs[:3])
self.assertTrue(abs(norm_infc - 0.9999) < 0.0001)
self.assertTrue(abs(norm_2c - 75.1826) / 75.1826 < 0.0001)
# print('sysa Inf Norm: %g' % norm_infc)
# print('sysa 2 Norm: %g' % norm_2c)
sysd = sysa.cont2shift(1 / 122.88e6)
norm_infc = mechatronics.norm_hinf_discrete(*sysd.cofs)
norm_2c = mechatronics.norm_h2_discrete(*sysd.cofs[:3])
self.assertTrue(abs(norm_infc - 0.9999) < 0.0001)
self.assertTrue(abs(norm_2c - 0.00678229) / 0.00678229 < 0.0001)
# print('sysd Inf Norm: %g' % norm_infc)
# print('sysd 2 Norm: %g' % norm_2c)
def test_gramians(self):
sysa = get_system2()
A, B, C, D = sysa.cofs
Wc = mechatronics.controllability_gramian_continuous(A, B)
Wo = mechatronics.observability_gramian_continuous(A, C)
cWc = np.array(
[[6.999969e-01, 1.114907e-05, 2.368522e-06, -1.562850e-06],
[1.114907e-05, 2.353370e-01, -2.539451e-06, 6.104784e-07],
[2.368522e-06, -2.539451e-06, 3.775594e-02, 2.332329e-07],
[-1.562850e-06, 6.104784e-07, 2.332329e-07, 2.452912e-03]])
cWo = np.array(
[[6.999969e-01, -1.114907e-05, 2.368522e-06, 1.562850e-06],
[-1.114907e-05, 2.353370e-01, 2.539451e-06, 6.104784e-07],
[2.368522e-06, 2.539451e-06, 3.775594e-02, -2.332329e-07],
[1.562850e-06, 6.104784e-07, -2.332329e-07, 2.452912e-03]])
self.assertTrue(np.all(np.isclose(cWc, Wc, rtol=1e-5)))
self.assertTrue(np.all(np.isclose(cWo, Wo, rtol=1e-5)))
sysd = sysa.cont2shift(1 / 122.88e6)
A, B, C, D = sysd.cofs
Wc = mechatronics.controllability_gramian_discrete(A, B)
Wo = mechatronics.observability_gramian_discrete(A, C)
dWc = np.array(
[[5.696589e-09, 9.073142e-14, 1.927509e-14, -1.271850e-14],
[9.073142e-14, 1.915178e-09, -2.066611e-14, 4.968086e-15],
[1.927509e-14, -2.066611e-14, 3.072586e-10, 1.898054e-15],
[-1.271850e-14, 4.968086e-15, 1.898054e-15, 1.996185e-11]])
dWo = np.array(
[[8.601562e+07, -1.369998e+03, 2.910440e+02, 1.920430e+02],
[-1.369998e+03, 2.891821e+07, 3.120477e+02, 7.501562e+01],
[2.910440e+02, 3.120477e+02, 4.639449e+06, -2.865966e+01],
[1.920430e+02, 7.501562e+01, -2.865966e+01, 3.014138e+05]])
self.assertTrue(np.all(np.isclose(dWc, Wc, rtol=1e-5)))
self.assertTrue(np.all(np.isclose(dWo, Wo, rtol=1e-5)))
def test_cont2shift(self):
sysa = get_system2()
sysd = sysa.cont2shift(1 / 122.88e6)
Ad = np.array([[1, 0.000113, 4.483e-05, -1.69e-05],
[-0.000113, 0.9999, -0.0002071, 6.131e-05],
[4.483e-05, 0.0002071, 0.9997, 0.0002602],
[1.69e-05, 6.131e-05, -0.0002602, 0.9993]])
Bd = | np.array([[-5.879e-07], [-7.271e-07], [4.579e-07], [1.632e-07]]) | numpy.array |
import sys
sys.path.insert(0, '../')
import matplotlib.pyplot as plt
import cv2
import json
import numpy as np
from mv3dpose.tracking import Track
from os.path import isdir, join, isfile
from os import listdir, makedirs
import mv3dpose.geometry.camera as camera
import shutil
from mpl_toolkits.mplot3d import Axes3D
from tqdm import tqdm
import math
dataset_dir = '/home/user/dataset'
dataset_json = join(dataset_dir, 'dataset.json')
vid_dir = join(dataset_dir, 'videos')
cam_dir = join(dataset_dir, 'cameras')
trk_dir = join(dataset_dir, 'tracks3d')
assert isdir(trk_dir), "the tracks must be extracted!"
assert isdir(cam_dir), "could not find cameras!"
assert isdir(vid_dir), "could not find videos!"
# ~~~~~ LOAD SETTINGS ~~~~~
Settings = json.load(open(dataset_json))
n_cameras = Settings['n_cameras']
valid_frames = Settings['valid_frames']
img_file_type = 'png'
if 'image_extension' in Settings:
img_file_type = Settings['image_extension']
print('CAMERAS', n_cameras)
print("#frames", len(valid_frames))
tracks = [json.load(open(join(trk_dir, f))) for f in sorted(listdir(trk_dir))]
print("#tracks", len(tracks))
# -- create lookups --
tracks_by_frame = {}
pose_by_track_and_frame = {}
for frame in valid_frames:
assert frame not in tracks_by_frame
tracks_by_frame[frame] = []
for tid, track in enumerate(tracks):
frames = track['frames']
poses = track['poses']
for i, t in enumerate(frames):
if t > frame:
break
elif t == frame:
tracks_by_frame[frame].append(tid)
pose_by_track_and_frame[tid, frame] = poses[i]
def get_cmap(n, name='hsv'):
return plt.cm.get_cmap(name, n)
# colors = get_cmap(len(tracks))
n_tracks = len(tracks)
if n_tracks > 11:
# colors = np.random.random(size=(n_tracks, 1, 3))
colors = plt.rcParams['axes.prop_cycle'].by_key()['color']
# colors = [
# 'tab:blue',
# 'tab:orange',
# 'tab:green',
# 'tab:red',
# 'tab:purple',
# 'red',
# 'blue',
# 'green',
# 'navy',
# 'maroon',
# 'darkgreen'
# ]
else:
colors = [
'tab:blue',
'tab:orange',
'tab:green',
'tab:red',
'tab:purple',
'red',
'blue',
'green',
'navy',
'maroon',
'darkgreen'
]
# colors = [
# 'red', # 0
# 'blue', # 1
# 'green', # 2
# 'yellow', # 3
# 'green', # 4
# 'blue', # 5
# 'white', # 6
# 'hotpink', # 7
# 'magenta', # 8
# 'lime', # 9
# 'peru' # 10
# ][:n_tracks]
# colors = plt.rcParams['axes.prop_cycle'].by_key()['color']
# ~~~~~~~~~~~~~
# C A M E R A S
# ~~~~~~~~~~~~~
print('\n[load cameras]')
try:
Calib = [] # { n_frames x n_cameras }
for t in tqdm(valid_frames):
calib = []
Calib.append(calib)
for cid in range(n_cameras):
local_camdir = join(cam_dir, 'camera%02d' % cid)
assert isdir(local_camdir)
cam_fname = join(local_camdir, 'frame%09d.json' % t)
assert isfile(cam_fname), cam_fname
cam = camera.Camera.load_from_file(cam_fname)
calib.append(cam)
except AssertionError:
print('\tnew version of cameras is used...')
class CamerasPerFrame:
def __init__(self, cam_dir, n_cameras, valid_frames):
self.n_cameras = n_cameras
self.n_frames = len(valid_frames)
self.first_frame = valid_frames[0]
self.cameras = []
for cid in range(n_cameras):
camfile = join(cam_dir, 'camera%02d.json' % cid)
with open(camfile, 'r') as f:
cam_as_dict_list = json.load(f)
cam_as_object_list = []
for cam in cam_as_dict_list:
start_frame = cam['start_frame']
end_frame = cam['end_frame']
K = np.array(cam['K'])
rvec = np.array(cam['rvec'])
tvec = np.array(cam['tvec'])
distCoef = np.array(cam['distCoef'])
w = int(cam['w'])
h = int(cam['h'])
cam = camera.ProjectiveCamera(K, rvec, tvec, distCoef, w, h)
cam_as_object_list.append({
"start_frame": start_frame,
"end_frame": end_frame,
"cam": cam
})
self.cameras.append(cam_as_object_list)
def __getitem__(self, frame):
"""
:param frame: frame, starting at 0!
"""
frame += self.first_frame
cameras = [1] * self.n_cameras
for cid, cam_as_object_list in enumerate(self.cameras):
for cam in cam_as_object_list:
start_frame = cam['start_frame']
end_frame = cam['end_frame']
if start_frame <= frame <= end_frame:
cameras[cid] = cam['cam']
break
for cam in cameras:
assert cam != 1
return cameras
def __len__(self):
return self.n_frames
Calib = CamerasPerFrame(cam_dir, n_cameras, valid_frames)
# ====================================
# ~~~~ PLOT FRAMES ~~~~
# ====================================
output_dir = join(dataset_dir, 'visualization')
if isdir(output_dir):
shutil.rmtree(output_dir)
LIMBS = [
(0, 1), (0, 15), (0, 14), (15, 17), (14, 16),
(1, 2), (2, 3), (3, 4),
(1, 5), (5, 6), (6, 7),
(2, 8), (5, 11), (8, 11),
(8, 9), (9, 10), (10, 21), (21, 22), (22, 23),
(11, 12), (12, 13), (13, 18), (18, 19), (19, 20)
]
makedirs(output_dir)
def isclose(a, b, rel_tol=1e-09, abs_tol=0.0):
return abs(a-b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
for i, frame in tqdm(enumerate(valid_frames)):
if True:
cameras = [1, 2, 5]
n_cameras = len(cameras)
else:
cameras = range(n_cameras)
fig = plt.figure(figsize=(16, 12))
H = 2 if n_cameras < 8 else 3
W = int(math.ceil(n_cameras / H))
fname = join(output_dir, 'frame%09d.png' % i)
tracks_on_frame = tracks_by_frame[frame]
for camnbr, cid in enumerate(cameras):
camera_img_dir = join(vid_dir, 'camera%02d' % cid)
# img_file = join(camera_img_dir, 'frame%09d.png' % frame)
img_file = join(camera_img_dir, ('frame%09d.' % frame) + img_file_type)
assert isfile(img_file), img_file
im = cv2.cvtColor(cv2.imread(img_file), cv2.COLOR_BGR2RGB)
h, w, _ = im.shape
cam = Calib[i][cid]
ax = fig.add_subplot(H, W, camnbr+1)
ax.axis('off')
ax.set_xlim([0, w])
ax.set_ylim([h, 0])
ax.imshow(im)
for tid in tracks_on_frame:
color = colors[tid%len(colors)]
pose3d = pose_by_track_and_frame[tid, frame]
# we need to mask over None
assert len(pose3d) == 24
mask = [True] * 24
for jid in range(24):
if pose3d[jid] is None:
pose3d[jid] = [0, 0, 0]
mask[jid] = False
else:
mm = np.mean(pose3d[jid])
if isclose(0., mm):
mask[jid] = False
pose3d = np.array(pose3d, dtype=np.float32)
pose2d = cam.projectPoints(pose3d)
for jid in range(24):
if mask[jid]:
x, y = pose2d[jid]
ax.scatter(x, y, color=color)
for a, b in LIMBS:
if mask[a] and mask[b]:
x1, y1 = pose2d[a]
x2, y2 = pose2d[b]
if n_tracks > 11:
# ax.plot([x1, x2], [y1, y2], c=np.squeeze(color))
ax.plot([x1, x2], [y1, y2], color=color)
else:
ax.plot([x1, x2], [y1, y2], color=color)
# 3D plot ================
if True: # no 3D plot pls
ax = fig.add_subplot(H, W, n_cameras + 1, projection='3d')
# ax.axis('off')
ax.set_xlim([0, 5])
ax.set_ylim([0, 5])
ax.set_zlim([0, 3.5])
ax.set_xlabel('X')
ax.set_ylabel('Y')
for tid in tracks_on_frame:
color = colors[tid%len(colors)]
pose3d = pose_by_track_and_frame[tid, frame]
mask = [True] * 24
for jid in range(24):
if pose3d[jid] is None:
mask[jid] = False
else:
x, y, z = pose3d[jid]
if np.isclose(x, .0) and | np.isclose(y, .0) | numpy.isclose |
import numpy as np
from scipy.interpolate import InterpolatedUnivariateSpline
import os,os.path
import re
from numpy.lib.recfunctions import append_fields
from . import localpath
class SN1a_feedback(object):
def __init__(self):
"""
this is the object that holds the feedback table for SN1a
.masses gives a list of masses
.metallicities gives a list of possible yield metallicities
.elements gives the elements considered in the yield table
.table gives a dictionary where the yield table for a specific metallicity can be queried
.table[0.02] gives a yield table.
Keys of this object are ['Mass','mass_in_remnants','elements']
Mass is in units of Msun
'mass_in_remnants' in units of Msun but with a '-'
'elements' yield in Msun normalised to Mass. i.e. integral over all elements is unity
"""
def Seitenzahl(self):
"""
Seitenzahl 2013 from Ivo txt
"""
y = np.genfromtxt(localpath + 'input/yields/Seitenzahl2013/0.02.txt', names = True, dtype = None)
self.metallicities = list([0.02])
self.masses = list([1.4004633930489443])
names = list(y.dtype.names)
self.elements = names[2:]
base = np.zeros(len(self.masses))
list_of_arrays = []
for i in range(len(names)):
list_of_arrays.append(base)
yield_tables_final_structure_subtable = np.core.records.fromarrays(list_of_arrays,names=names)
for name in names:
if name in ['Mass','mass_in_remnants']:
yield_tables_final_structure_subtable[name] = y[name]
else:
yield_tables_final_structure_subtable[name] = np.divide(y[name],self.masses)
yield_tables_final_structure = {}
yield_tables_final_structure[0.02] = yield_tables_final_structure_subtable
self.table = yield_tables_final_structure
def Thielemann(self):
"""
Thilemann 2003 yields as compiled in Travaglio 2004
"""
y = np.genfromtxt(localpath + 'input/yields/Thielemann2003/0.02.txt', names = True, dtype = None)
metallicity_list = [0.02]
self.metallicities = metallicity_list
self.masses = [1.37409]
names = y.dtype.names
base = np.zeros(len(self.masses))
list_of_arrays = []
for i in range(len(names)):
list_of_arrays.append(base)
yield_tables_final_structure_subtable = np.core.records.fromarrays(list_of_arrays,names=names)
for name in names:
if name in ['Mass','mass_in_remnants']:
yield_tables_final_structure_subtable[name] = y[name]
else:
yield_tables_final_structure_subtable[name] = np.divide(y[name],self.masses)
self.elements = list(y.dtype.names[2:])
yield_tables_final_structure = {}
yield_tables_final_structure[0.02] = yield_tables_final_structure_subtable
self.table = yield_tables_final_structure
def Iwamoto(self):
'''
Iwamoto99 yields building up on Nomoto84
'''
import numpy.lib.recfunctions as rcfuncs
tdtype = [('species1','|S4'),('W7',float),('W70',float),('WDD1',float),('WDD2',float),('WDD3',float),('CDD1',float),('CDD2',float)]
metallicity_list = [0.02,0.0]
self.metallicities = metallicity_list
self.masses = [1.38]
y = np.genfromtxt(localpath + 'input/yields/Iwamoto/sn1a_yields.txt',dtype = tdtype, names = None)
## Python3 need transformation between bytes and strings
element_list2 = []
for j,jtem in enumerate(y['species1']):
element_list2.append(jtem.decode('utf8'))
y = rcfuncs.append_fields(y,'species',element_list2,usemask = False)
################################
without_radioactive_isotopes=True
if without_radioactive_isotopes:### without radioactive isotopes it should be used this way because the radioactive nuclides are already calculated in here
carbon_list = ['12C','13C']
nitrogen_list = ['14N','15N']
oxygen_list = ['16O','17O','18O']
fluorin_list = ['19F']
neon_list = ['20Ne','21Ne','22Ne']#,'22Na']
sodium_list = ['23Na']
magnesium_list = ['24Mg','25Mg','26Mg']#,'26Al']
aluminium_list = ['27Al']
silicon_list = ['28Si','29Si','30Si']
phosphorus_list = ['31P']
sulfur_list = ['32S','33S','34S','36S']
chlorine_list = ['35Cl','37Cl']
argon_list = ['36Ar','38Ar','40Ar']#, '36Cl']
potassium_list = ['39K','41K']#, '39Ar', '41Ca']
calcium_list = ['40Ca','42Ca','43Ca','44Ca','46Ca','48Ca']#, '40K']
scandium_list = ['45Sc']#,'44Ti']
titanium_list = ['46Ti','47Ti','48Ti','49Ti','50Ti']#,'48V','49V']
vanadium_list = ['50V','51V']
chromium_list = ['50Cr','52Cr','53Cr','54Cr']#,'53Mn']
manganese_list = ['55Mn']
iron_list = ['54Fe', '56Fe','57Fe','58Fe']#,'56Co','57Co']
cobalt_list = ['59Co']#,'60Fe','56Ni','57Ni','59Ni']
nickel_list = ['58Ni','60Ni','61Ni','62Ni','64Ni']#,'60Co']
copper_list = ['63Cu','65Cu']#,'63Ni']
zinc_list = ['64Zn','66Zn','67Zn','68Zn']
##### with radioactive isotopes (unclear weather they are double, probably not but remnant mass is too big)
else:
carbon_list = ['12C','13C']
nitrogen_list = ['14N','15N']
oxygen_list = ['16O','17O','18O']
fluorin_list = ['19F']
neon_list = ['20Ne','21Ne','22Ne','22Na']
sodium_list = ['23Na']
magnesium_list = ['24Mg','25Mg','26Mg','26Al']
aluminium_list = ['27Al']
silicon_list = ['28Si','29Si','30Si']
phosphorus_list = ['31P']
sulfur_list = ['32S','33S','34S','36S']
chlorine_list = ['35Cl','37Cl']
argon_list = ['36Ar','38Ar','40Ar', '36Cl']
potassium_list = ['39K','41K', '39Ar', '41Ca']
calcium_list = ['40Ca','42Ca','43Ca','44Ca','46Ca','48Ca', '40K']
scandium_list = ['45Sc','44Ti']
titanium_list = ['46Ti','47Ti','48Ti','49Ti','50Ti','48V','49V']
vanadium_list = ['50V','51V']
chromium_list = ['50Cr','52Cr','53Cr','54Cr','53Mn']
manganese_list = ['55Mn']
iron_list = ['54Fe', '56Fe','57Fe','58Fe','56Co','57Co','56Ni','57Ni']
cobalt_list = ['59Co','60Fe','59Ni']
nickel_list = ['58Ni','60Ni','61Ni','62Ni','64Ni','60Co']
copper_list = ['63Cu','65Cu','63Ni']
zinc_list = ['64Zn','66Zn','67Zn','68Zn']
indexing = {}
indexing['C'] = carbon_list
indexing['N'] = nitrogen_list
indexing['O'] = oxygen_list
indexing['F'] = fluorin_list
indexing['Ne'] = neon_list
indexing['Na'] = sodium_list
indexing['Mg'] = magnesium_list
indexing['Al'] = aluminium_list
indexing['Si'] = silicon_list
indexing['P'] = phosphorus_list
indexing['S'] = sulfur_list
indexing['Cl'] = chlorine_list
indexing['Ar'] = argon_list
indexing['K'] = potassium_list
indexing['Ca'] = calcium_list
indexing['Sc'] = scandium_list
indexing['Ti'] = titanium_list
indexing['V'] = vanadium_list
indexing['Cr'] = chromium_list
indexing['Mn'] = manganese_list
indexing['Fe'] = iron_list
indexing['Co'] = cobalt_list
indexing['Ni'] = nickel_list
indexing['Cu'] = copper_list
indexing['Zn'] = zinc_list
self.elements = list(indexing.keys())
#################################
yield_tables_final_structure = {}
for metallicity_index,metallicity in enumerate(metallicity_list[:]):
if metallicity == 0.02:
model = 'W7'
elif metallicity == 0.0:
model = 'W70'
else:
print('this metallicity is not represented in the Iwamoto yields. They only have solar (0.02) and zero (0.0001)')
additional_keys = ['Mass', 'mass_in_remnants']
names = additional_keys + self.elements
base = np.zeros(len(self.masses))
list_of_arrays = []
for i in range(len(names)):
list_of_arrays.append(base)
yield_tables_final_structure_subtable = np.core.records.fromarrays(list_of_arrays,names=names)
yield_tables_final_structure_subtable['Mass'] = self.masses[0]
total_mass = []
for i,item in enumerate(self.elements):
for j,jtem in enumerate(indexing[item]):
cut = np.where(y['species']==jtem)
yield_tables_final_structure_subtable[item] += y[model][cut]
total_mass.append(y[model][cut])
yield_tables_final_structure_subtable['mass_in_remnants'] = -sum(total_mass)
for i,item in enumerate(self.elements):
yield_tables_final_structure_subtable[item] = np.divide(yield_tables_final_structure_subtable[item],-yield_tables_final_structure_subtable['mass_in_remnants'])
yield_tables_final_structure[metallicity] = yield_tables_final_structure_subtable
self.table = yield_tables_final_structure
class SN2_feedback(object):
def __init__(self):
"""
This is the object that holds the feedback table for CC-SN.
Different tables can be loaded by the methods.
"""
def Portinari(self):
'''
Loading the yield table from Portinari1998.
'''
self.metallicities = [0.0004,0.004,0.008,0.02,0.05]
x = np.genfromtxt(localpath + 'input/yields/Portinari_1998/0.02.txt',names=True)
self.masses = list(x['Mass'])
self.elements = list(x.dtype.names[3:])
yield_tables_final_structure = {}
for metallicity in self.metallicities:
additional_keys = ['Mass', 'mass_in_remnants','unprocessed_mass_in_winds']
names = additional_keys + self.elements
base = np.zeros(len(self.masses))
list_of_arrays = []
for i in range(len(names)):
list_of_arrays.append(base)
yield_tables_final_structure_subtable = np.core.records.fromarrays(list_of_arrays,names=names)
yield_tables_final_structure_subtable['Mass'] = np.array(self.masses)
x = np.genfromtxt(localpath + 'input/yields/Portinari_1998/%s.txt' %(metallicity),names=True)
for item in self.elements:
yield_tables_final_structure_subtable[item] = np.divide(x[item],x['Mass'])
yield_tables_final_structure_subtable['mass_in_remnants'] = np.divide(x['Mass'] - x['ejected_mass'], x['Mass'])
for i,item in enumerate(self.masses):
yield_tables_final_structure_subtable['unprocessed_mass_in_winds'][i] = 1. - (yield_tables_final_structure_subtable['mass_in_remnants'][i] + sum(list(yield_tables_final_structure_subtable[self.elements][i])))
yield_tables_final_structure[metallicity] = yield_tables_final_structure_subtable
self.table = yield_tables_final_structure
def francois(self):
'''
Loading the yield table of Francois et. al. 2004. Taken from the paper table 1 and 2 and added O H He from WW95 table 5A and 5B
where all elements are for Z=Zsun and values for Msun > 40 have been stayed the same as for Msun=40.
Values from 11-25 Msun used case A from WW95 and 30-40 Msun used case B.
'''
y = np.genfromtxt(localpath + 'input/yields/Francois04/francois_yields.txt',names=True)
self.elements = list(y.dtype.names[1:])
self.masses = y[y.dtype.names[0]]
self.metallicities = [0.02]
######### going from absolute ejected masses to relative ejected masses normed with the weight of the initial star
for i,item in enumerate(y.dtype.names[1:]):
y[item] = np.divide(y[item],y['Mass'])
yield_tables = {}
for i,item in enumerate(self.metallicities):
yield_tables[item] = y
self.table = yield_tables
def chieffi04(self):
'''
Loading the yield table of chieffi04.
'''
DATADIR = localpath + 'input/yields/Chieffi04'
if not os.path.exists(DATADIR):
os.mkdir(DATADIR)
MASTERFILE = '{}/chieffi04_yields'.format(DATADIR)
def _download_chieffi04():
"""
Downloads chieffi 04 yields from Vizier.
"""
url = 'http://cdsarc.u-strasbg.fr/viz-bin/nph-Cat/tar.gz?J%2FApJ%2F608%2F405'
import urllib
print('Downloading Chieffi 04 yield tables from Vizier (should happen only at the first time)...')
if os.path.exists(MASTERFILE):
os.remove(MASTERFILE)
urllib.urlretrieve(url,MASTERFILE)
import tarfile
tar = tarfile.open(MASTERFILE)
tar.extractall(path=DATADIR)
tar.close()
if not os.path.exists(MASTERFILE):
_download_chieffi04()
tdtype = [('metallicity',float),('date_after_explosion',float),('species','|S5'),('13',float),('15',float),('20',float),('25',float),('30',float),('35',float)]
y = np.genfromtxt('%s/yields.dat' %(DATADIR), dtype = tdtype, names = None)
metallicity_list = np.unique(y['metallicity'])
self.metallicities = np.sort(metallicity_list)
number_of_species = int(len(y)/len(self.metallicities))
tables = []
for i, item in enumerate(self.metallicities):
tables.append(y[(i*number_of_species):((i+1)*number_of_species)])
#############################################
for i in range(len(tables)):
tables[i] = tables[i][np.where(tables[i]['date_after_explosion']==0)]
element_list = tables[0]['species'][3:]
# For python 3 the bytes need to be changed into strings
element_list2 = []
for i, item in enumerate(element_list):
element_list2.append(item.decode('utf8'))
element_list = np.array(element_list2)
indexing = [re.split(r'(\d+)', s)[1:] for s in element_list]
element_position = []
for i,item in enumerate(element_list):
element_position.append(indexing[i][1])
self.elements = list(np.unique(element_position))
masses = tables[0].dtype.names[3:]
masses_list = []
for i,item in enumerate(masses):
masses_list.append(int(item))
self.masses = masses_list
yield_tables_final_structure = {}
for metallicity_index,metallicity in enumerate(self.metallicities):
yields_for_one_metallicity = tables[metallicity_index]
additional_keys = ['Mass','mass_in_remnants','unprocessed_mass_in_winds']
names = additional_keys + self.elements
base = np.zeros(len(self.masses))
list_of_arrays = []
for i in range(len(names)):
list_of_arrays.append(base)
yield_tables_final_structure_subtable = np.core.records.fromarrays(list_of_arrays,names=names)
yield_tables_final_structure_subtable['Mass'] = np.array(self.masses)
for j,jtem in enumerate(self.masses):
yield_tables_final_structure_subtable['mass_in_remnants'][j] = yields_for_one_metallicity[str(jtem)][1] / float(jtem) # ,yield_tables_final_structure_subtable['Mass'][i])
for i,item in enumerate(self.elements):
################### here we can change the yield that we need for processing. normalising 'ejected_mass' with the initial mass to get relative masses
for t,ttem in enumerate(element_position):
if ttem == item:
yield_tables_final_structure_subtable[item][j] += yields_for_one_metallicity[str(jtem)][t+3] / float(jtem)
# remnant + yields of all elements is less than the total mass. In the next loop the wind mass is calculated.
name_list = list(yield_tables_final_structure_subtable.dtype.names[3:]) + ['mass_in_remnants']
for i in range(len(yield_tables_final_structure_subtable)):
tmp = []
for j,jtem in enumerate(name_list):
tmp.append(yield_tables_final_structure_subtable[jtem][i])
tmp = sum(tmp)
yield_tables_final_structure_subtable['unprocessed_mass_in_winds'][i] = 1 - tmp
yield_tables_final_structure[self.metallicities[metallicity_index]] = yield_tables_final_structure_subtable#[::-1]
self.table = yield_tables_final_structure
def chieffi04_net(self):
'''
Loading the yield table of chieffi04 corrected for Anders & Grevesse 1989 solar scaled initial yields
'''
DATADIR = localpath + 'input/yields/Chieffi04'
if not os.path.exists(DATADIR):
os.mkdir(DATADIR)
MASTERFILE = '{}/chieffi04_yields'.format(DATADIR)
def _download_chieffi04():
"""
Downloads chieffi 04 yields from Vizier.
"""
url = 'http://cdsarc.u-strasbg.fr/viz-bin/nph-Cat/tar.gz?J%2FApJ%2F608%2F405'
import urllib
print('Downloading Chieffi 04 yield tables from Vizier (should happen only at the first time)...')
if os.path.exists(MASTERFILE):
os.remove(MASTERFILE)
urllib.urlretrieve(url,MASTERFILE)
import tarfile
tar = tarfile.open(MASTERFILE)
tar.extractall(path=DATADIR)
tar.close()
if not os.path.exists(MASTERFILE):
_download_chieffi04()
tdtype = [('metallicity',float),('date_after_explosion',float),('species','|S5'),('13',float),('15',float),('20',float),('25',float),('30',float),('35',float)]
y = np.genfromtxt('%s/yields.dat' %(DATADIR), dtype = tdtype, names = None)
metallicity_list = np.unique(y['metallicity'])
self.metallicities = np.sort(metallicity_list)
number_of_species = int(len(y)/len(self.metallicities))
tables = []
for i, item in enumerate(self.metallicities):
tables.append(y[(i*number_of_species):((i+1)*number_of_species)])
#############################################
for i in range(len(tables)):
tables[i] = tables[i][np.where(tables[i]['date_after_explosion']==0)]
element_list = tables[0]['species'][3:]
# For python 3 the bytes need to be changed into strings
element_list2 = []
for i, item in enumerate(element_list):
element_list2.append(item.decode('utf8'))
element_list = np.array(element_list2)
indexing = [re.split(r'(\d+)', s)[1:] for s in element_list]
element_position = []
for i,item in enumerate(element_list):
element_position.append(indexing[i][1])
self.elements = list(np.unique(element_position))
masses = tables[0].dtype.names[3:]
masses_list = []
for i,item in enumerate(masses):
masses_list.append(int(item))
self.masses = masses_list
yield_tables_final_structure = {}
for metallicity_index,metallicity in enumerate(self.metallicities):
yield_tables_final_structure[self.metallicities[metallicity_index]] = np.load(DATADIR + '/chieffi_net_met_ind_%d.npy' %(metallicity_index))
self.table = yield_tables_final_structure
#############################################
def Nugrid(self):
'''
loading the Nugrid sn2 stellar yields NuGrid stellar data set. I. Stellar yields from H to Bi for stars with metallicities Z = 0.02 and Z = 0.01
The wind yields need to be added to the *exp* explosion yields.
No r-process contribution but s and p process from AGB and massive stars
delayed and rapid SN Explosiom postprocessing is included. Rapid is not consistent with very massive stars so we use the 'delayed' yield set
mass in remnants not totally consistent with paper table: [ 6.47634087, 2.67590435, 1.98070676] vs. [6.05,2.73,1.61] see table 4
same with z=0.02 but other elements are implemented in the right way:[ 3.27070753, 8.99349996, 6.12286813, 3.1179861 , 1.96401573] vs. [3,8.75,5.71,2.7,1.6]
we have a switch to change between the two different methods (rapid/delay explosion)
'''
import numpy.lib.recfunctions as rcfuncs
tdtype = [('empty',int),('element1','|S3'),('165',float),('200',float),('300',float),('500',float),('1500',float),('2000',float),('2500',float)]
tdtype2 = [('empty',int),('element1','|S3'),('165',float),('200',float),('300',float),('500',float),('1500',float),('2000',float),('2500',float),('3200',float),('6000',float)]
expdtype = [('empty',int),('element1','|S3'),('15_delay',float),('15_rapid',float),('20_delay',float),('20_rapid',float),('25_delay',float),('25_rapid',float)]
expdtype2 = [('empty',int),('element1','|S3'),('15_delay',float),('15_rapid',float),('20_delay',float),('20_rapid',float),('25_delay',float),('32_delay',float),('32_rapid',float),('60_delay',float)]
yield_tables = {}
self.metallicities = [0.02,0.01]
which_sn_model_to_use = 'delay' # 'rapid'
for i,metallicity_index in enumerate([2,1]):
if i == 0:
z = np.genfromtxt(localpath + 'input/yields/NuGrid_AGB_SNII_2013/set1p%d/element_table_set1.%d_yields_winds.txt' %(metallicity_index,metallicity_index),dtype = tdtype2,names = None,skip_header = 3, delimiter = '&', autostrip = True)
y = np.genfromtxt(localpath + 'input/yields/NuGrid_AGB_SNII_2013/set1p%d/element_table_set1.%d_yields_exp.txt' %(metallicity_index,metallicity_index),dtype = expdtype2,names = None,skip_header = 3, delimiter = '&', autostrip = True)
y['15_%s' %(which_sn_model_to_use)] += z['1500']
y['20_%s' %(which_sn_model_to_use)] += z['2000']
y['25_delay'] += z['2500']
y['32_%s' %(which_sn_model_to_use)] += z['3200']
y['60_delay'] += z['6000']
else:
z = np.genfromtxt(localpath +'input/yields/NuGrid_AGB_SNII_2013/set1p%d/element_table_set1.%d_yields_winds.txt' %(metallicity_index,metallicity_index),dtype = tdtype,names = None,skip_header = 3, delimiter = '&', autostrip = True)
y = np.genfromtxt(localpath + 'input/yields/NuGrid_AGB_SNII_2013/set1p%d/element_table_set1.%d_yields_exp.txt' %(metallicity_index,metallicity_index),dtype = expdtype,names = None,skip_header = 3, delimiter = '&', autostrip = True)
y['15_%s' %(which_sn_model_to_use)] += z['1500']
y['20_%s' %(which_sn_model_to_use)] += z['2000']
y['25_%s' %(which_sn_model_to_use)] += z['2500']
# For python 3 the bytes need to be changed into strings
element_list2 = []
for j,item in enumerate(y['element1']):
element_list2.append(item.decode('utf8'))
y = rcfuncs.append_fields(y,'element',element_list2,usemask = False)
yield_tables[self.metallicities[i]] = y
self.elements = list(yield_tables[0.02]['element'])
# For python 3 the bytes need to be changed into strings
self.masses = np.array((15,20,25,32,60))
######
### restructuring the tables such that it looks like the sn2 dictionary: basic_agb[metallicicty][element]
yield_tables_final_structure = {}
for metallicity_index,metallicity in enumerate(self.metallicities):
yields_for_one_metallicity = yield_tables[metallicity]
final_mass_name_tag = 'mass_in_remnants'
additional_keys = ['Mass',final_mass_name_tag]
names = additional_keys + self.elements
if metallicity == 0.02:
base = np.zeros(len(self.masses))
else:
base = np.zeros(len(self.masses)-2)
list_of_arrays = []
for i in range(len(names)):
list_of_arrays.append(base)
yield_tables_final_structure_subtable = np.core.records.fromarrays(list_of_arrays,names=names)
if metallicity == 0.02:
yield_tables_final_structure_subtable['Mass'] = self.masses
else:
yield_tables_final_structure_subtable['Mass'] = self.masses[:-2]
for i,item in enumerate(self.elements):
################### here we can change the yield that we need for processing. normalising 'ejected_mass' with the initial mass to get relative masses
if metallicity == 0.02:
line_of_one_element = yields_for_one_metallicity[np.where(yields_for_one_metallicity['element']==item)]
temp1 = np.zeros(5)
temp1[0] = line_of_one_element['15_%s' %(which_sn_model_to_use)]
temp1[1] = line_of_one_element['20_%s' %(which_sn_model_to_use)]
temp1[2] = line_of_one_element['25_delay']
temp1[3] = line_of_one_element['32_%s' %(which_sn_model_to_use)]
temp1[4] = line_of_one_element['60_delay']
yield_tables_final_structure_subtable[item] = np.divide(temp1,self.masses)
else:
line_of_one_element = yields_for_one_metallicity[np.where(yields_for_one_metallicity['element']==item)]
temp1 = np.zeros(3)
temp1[0] = line_of_one_element['15_%s' %(which_sn_model_to_use)]
temp1[1] = line_of_one_element['20_%s' %(which_sn_model_to_use)]
temp1[2] = line_of_one_element['25_%s' %(which_sn_model_to_use)]
yield_tables_final_structure_subtable[item] = np.divide(temp1,self.masses[:-2])
if metallicity == 0.02:
yield_tables_final_structure_subtable[final_mass_name_tag][0] = (1-sum(yield_tables_final_structure_subtable[self.elements][0]))
yield_tables_final_structure_subtable[final_mass_name_tag][1] = (1-sum(yield_tables_final_structure_subtable[self.elements][1]))
yield_tables_final_structure_subtable[final_mass_name_tag][2] = (1-sum(yield_tables_final_structure_subtable[self.elements][2]))
yield_tables_final_structure_subtable[final_mass_name_tag][3] = (1-sum(yield_tables_final_structure_subtable[self.elements][3]))
yield_tables_final_structure_subtable[final_mass_name_tag][4] = (1-sum(yield_tables_final_structure_subtable[self.elements][4]))
else:
yield_tables_final_structure_subtable[final_mass_name_tag][0] = (1-sum(yield_tables_final_structure_subtable[self.elements][0]))
yield_tables_final_structure_subtable[final_mass_name_tag][1] = (1-sum(yield_tables_final_structure_subtable[self.elements][1]))
yield_tables_final_structure_subtable[final_mass_name_tag][2] = (1-sum(yield_tables_final_structure_subtable[self.elements][2]))
yield_tables_final_structure[metallicity] = yield_tables_final_structure_subtable#[::-1]
self.table = yield_tables_final_structure
def one_parameter(self, elements, element_fractions):
"""
This function was introduced in order to find best-fit yield sets where each element has just a single yield (no metallicity or mass dependence).
One potential problem is that sn2 feedback has a large fraction of Neon ~ 0.01, the next one missing is Argon but that only has 0.05%. This might spoil the metallicity derivation a bit.
Another problem: He and the remnant mass fraction is not constrained in the APOGEE data. Maybe these can be constrained externally by yield sets or cosmic abundance standard or solar abundances.
"""
self.metallicities = [0.01]
self.masses = np.array([10])
self.elements = elements
### restructuring the tables such that it looks like the sn2 dictionary: basic_agb[metallicicty][element]
yield_tables_final_structure = {}
additional_keys = ['Mass','mass_in_remnants','unprocessed_mass_in_winds']
names = additional_keys + self.elements
base = np.zeros(len(self.masses))
list_of_arrays = []
for i in range(len(names)):
list_of_arrays.append(base)
yield_table = np.core.records.fromarrays(list_of_arrays,names=names)
yield_table['Mass'] = self.masses
yield_table['mass_in_remnants'] = 0.1
yield_table['unprocessed_mass_in_winds'] = 1 - yield_table['mass_in_remnants']
for i,item in enumerate(self.elements[1:]):
yield_table[item] = element_fractions[i+1]
yield_table['H'] = -sum(element_fractions[1:])
yield_tables_final_structure[self.metallicities[0]] = yield_table
self.table = yield_tables_final_structure
def Nomoto2013(self):
'''
Nomoto2013 sn2 yields from 13Msun onwards
'''
import numpy.lib.recfunctions as rcfuncs
dt = np.dtype('a13,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8')
yield_tables = {}
self.metallicities = [0.0500,0.0200,0.0080,0.0040,0.0010]
self.masses = np.array((13,15,18,20,25,30,40))
z = np.genfromtxt(localpath + 'input/yields/Nomoto2013/nomoto_2013_z=0.0200.dat',dtype=dt,names = True)
yield_tables_dict = {}
for item in self.metallicities:
z = np.genfromtxt(localpath + 'input/yields/Nomoto2013/nomoto_2013_z=%.4f.dat' %(item),dtype=dt,names = True)
yield_tables_dict[item]=z
hydrogen_list = ['H__1','H__2']
helium_list = ['He_3','He_4']
lithium_list = ['Li_6','Li_7']
berillium_list = ['Be_9']
boron_list = ['B_10','B_11']
carbon_list = ['C_12','C_13']
nitrogen_list = ['N_14','N_15']
oxygen_list = ['O_16','O_17','O_18']
fluorin_list = ['F_19']
neon_list = ['Ne20','Ne21','Ne22']
sodium_list = ['Na23']
magnesium_list = ['Mg24','Mg25','Mg26']
aluminium_list = ['Al27']
silicon_list = ['Si28','Si29','Si30']
phosphorus_list = ['P_31']
sulfur_list = ['S_32','S_33','S_34','S_36']
chlorine_list = ['Cl35','Cl37']
argon_list = ['Ar36','Ar38','Ar40']
potassium_list = ['K_39','K_41']
calcium_list = ['K_40','Ca40','Ca42','Ca43','Ca44','Ca46','Ca48']
scandium_list = ['Sc45']
titanium_list = ['Ti46','Ti47','Ti48','Ti49','Ti50']
vanadium_list = ['V_50','V_51']
chromium_list = ['Cr50','Cr52','Cr53','Cr54']
manganese_list = ['Mn55']
iron_list = ['Fe54', 'Fe56','Fe57','Fe58']
cobalt_list = ['Co59']
nickel_list = ['Ni58','Ni60','Ni61','Ni62','Ni64']
copper_list = ['Cu63','Cu65']
zinc_list = ['Zn64','Zn66','Zn67','Zn68','Zn70']
gallium_list = ['Ga69','Ga71']
germanium_list = ['Ge70','Ge72','Ge73','Ge74']
indexing = {}
indexing['H'] = hydrogen_list
indexing['He'] = helium_list
indexing['Li'] = lithium_list
indexing['Be'] = berillium_list
indexing['B'] = boron_list
indexing['C'] = carbon_list
indexing['N'] = nitrogen_list
indexing['O'] = oxygen_list
indexing['F'] = fluorin_list
indexing['Ne'] = neon_list
indexing['Na'] = sodium_list
indexing['Mg'] = magnesium_list
indexing['Al'] = aluminium_list
indexing['Si'] = silicon_list
indexing['P'] = phosphorus_list
indexing['S'] = sulfur_list
indexing['Cl'] = chlorine_list
indexing['Ar'] = argon_list
indexing['K'] = potassium_list
indexing['Ca'] = calcium_list
indexing['Sc'] = scandium_list
indexing['Ti'] = titanium_list
indexing['V'] = vanadium_list
indexing['Cr'] = chromium_list
indexing['Mn'] = manganese_list
indexing['Fe'] = iron_list
indexing['Co'] = cobalt_list
indexing['Ni'] = nickel_list
indexing['Cu'] = copper_list
indexing['Zn'] = zinc_list
indexing['Ga'] = gallium_list
indexing['Ge'] = germanium_list
self.elements = list(indexing.keys())
### restructuring the tables such that it looks like the sn2 dictionary: basic_agb[metallicicty][element]
yield_tables_final_structure = {}
for metallicity_index,metallicity in enumerate(self.metallicities):
yields_for_one_metallicity = yield_tables_dict[metallicity]
# For python 3 the bytes need to be changed into strings
element_list2 = []
for j,item in enumerate(yields_for_one_metallicity['M']):
element_list2.append(item.decode('utf8'))
yields_for_one_metallicity = rcfuncs.append_fields(yields_for_one_metallicity,'element',element_list2,usemask = False)
additional_keys = ['Mass','mass_in_remnants','unprocessed_mass_in_winds']
names = additional_keys + self.elements
base = np.zeros(len(self.masses))
list_of_arrays = []
for i in range(len(names)):
list_of_arrays.append(base)
yield_tables_final_structure_subtable = np.core.records.fromarrays(list_of_arrays,names=names)
yield_tables_final_structure_subtable['Mass'] = self.masses
#yield_tables_final_structure_subtable['mass_in_remnants'] = yields_for_one_metallicity['M']
temp1 = np.zeros(len(self.masses))
temp1[0] = yields_for_one_metallicity[0][21]
temp1[1] = yields_for_one_metallicity[0][22]
temp1[2] = yields_for_one_metallicity[0][23]
temp1[3] = yields_for_one_metallicity[0][24]
temp1[4] = yields_for_one_metallicity[0][25]
temp1[5] = yields_for_one_metallicity[0][26]
temp1[6] = yields_for_one_metallicity[0][27]
yield_tables_final_structure_subtable['mass_in_remnants'] = np.divide(temp1,self.masses)
for i,item in enumerate(self.elements):
yield_tables_final_structure_subtable[item] = 0
for j,jtem in enumerate(indexing[item]):
################### here we can change the yield that we need for processing. normalising 'ejected_mass' with the initial mass to get relative masses
line_of_one_element = yields_for_one_metallicity[np.where(yields_for_one_metallicity['element']==jtem)][0]
temp1 = np.zeros(len(self.masses))
temp1[0] = line_of_one_element[21]
temp1[1] = line_of_one_element[22]
temp1[2] = line_of_one_element[23]
temp1[3] = line_of_one_element[24]
temp1[4] = line_of_one_element[25]
temp1[5] = line_of_one_element[26]
temp1[6] = line_of_one_element[27]
yield_tables_final_structure_subtable[item] += np.divide(temp1,self.masses)
yield_tables_final_structure_subtable['unprocessed_mass_in_winds'][0] = (1-yield_tables_final_structure_subtable['mass_in_remnants'][0]-sum(yield_tables_final_structure_subtable[self.elements][0]))#yields_for_one_metallicity[0][21]#
yield_tables_final_structure_subtable['unprocessed_mass_in_winds'][1] = (1-yield_tables_final_structure_subtable['mass_in_remnants'][1]-sum(yield_tables_final_structure_subtable[self.elements][1]))#yields_for_one_metallicity[0][22]#
yield_tables_final_structure_subtable['unprocessed_mass_in_winds'][2] = (1-yield_tables_final_structure_subtable['mass_in_remnants'][2]-sum(yield_tables_final_structure_subtable[self.elements][2]))#yields_for_one_metallicity[0][23]#divided by mass because 'mass in remnant' is also normalised
yield_tables_final_structure_subtable['unprocessed_mass_in_winds'][3] = (1-yield_tables_final_structure_subtable['mass_in_remnants'][3]-sum(yield_tables_final_structure_subtable[self.elements][3]))#yields_for_one_metallicity[0][24]#
yield_tables_final_structure_subtable['unprocessed_mass_in_winds'][4] = (1-yield_tables_final_structure_subtable['mass_in_remnants'][4]-sum(yield_tables_final_structure_subtable[self.elements][4]))#yields_for_one_metallicity[0][25]#
yield_tables_final_structure_subtable['unprocessed_mass_in_winds'][5] = (1-yield_tables_final_structure_subtable['mass_in_remnants'][5]-sum(yield_tables_final_structure_subtable[self.elements][5]))#yields_for_one_metallicity[0][26]#
yield_tables_final_structure_subtable['unprocessed_mass_in_winds'][6] = (1-yield_tables_final_structure_subtable['mass_in_remnants'][6]-sum(yield_tables_final_structure_subtable[self.elements][6]))#yields_for_one_metallicity[0][27]#
yield_tables_final_structure[metallicity] = yield_tables_final_structure_subtable#[::-1]
self.table = yield_tables_final_structure
def Nomoto2013_net(self):
'''
Nomoto2013 sn2 yields from 13Msun onwards
'''
import numpy.lib.recfunctions as rcfuncs
dt = np.dtype('a13,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8')
yield_tables = {}
self.metallicities = [0.0500,0.0200,0.0080,0.0040,0.0010]
self.masses = np.array((13,15,18,20,25,30,40))
z = np.genfromtxt(localpath + 'input/yields/Nomoto2013/nomoto_2013_z=0.0200.dat',dtype=dt,names = True)
yield_tables_dict = {}
for item in self.metallicities:
z = np.genfromtxt(localpath + 'input/yields/Nomoto2013/nomoto_2013_z=%.4f.dat' %(item),dtype=dt,names = True)
yield_tables_dict[item]=z
hydrogen_list = ['H__1','H__2']
helium_list = ['He_3','He_4']
lithium_list = ['Li_6','Li_7']
berillium_list = ['Be_9']
boron_list = ['B_10','B_11']
carbon_list = ['C_12','C_13']
nitrogen_list = ['N_14','N_15']
oxygen_list = ['O_16','O_17','O_18']
fluorin_list = ['F_19']
neon_list = ['Ne20','Ne21','Ne22']
sodium_list = ['Na23']
magnesium_list = ['Mg24','Mg25','Mg26']
aluminium_list = ['Al27']
silicon_list = ['Si28','Si29','Si30']
phosphorus_list = ['P_31']
sulfur_list = ['S_32','S_33','S_34','S_36']
chlorine_list = ['Cl35','Cl37']
argon_list = ['Ar36','Ar38','Ar40']
potassium_list = ['K_39','K_41']
calcium_list = ['K_40','Ca40','Ca42','Ca43','Ca44','Ca46','Ca48']
scandium_list = ['Sc45']
titanium_list = ['Ti46','Ti47','Ti48','Ti49','Ti50']
vanadium_list = ['V_50','V_51']
chromium_list = ['Cr50','Cr52','Cr53','Cr54']
manganese_list = ['Mn55']
iron_list = ['Fe54', 'Fe56','Fe57','Fe58']
cobalt_list = ['Co59']
nickel_list = ['Ni58','Ni60','Ni61','Ni62','Ni64']
copper_list = ['Cu63','Cu65']
zinc_list = ['Zn64','Zn66','Zn67','Zn68','Zn70']
gallium_list = ['Ga69','Ga71']
germanium_list = ['Ge70','Ge72','Ge73','Ge74']
indexing = {}
indexing['H'] = hydrogen_list
indexing['He'] = helium_list
indexing['Li'] = lithium_list
indexing['Be'] = berillium_list
indexing['B'] = boron_list
indexing['C'] = carbon_list
indexing['N'] = nitrogen_list
indexing['O'] = oxygen_list
indexing['F'] = fluorin_list
indexing['Ne'] = neon_list
indexing['Na'] = sodium_list
indexing['Mg'] = magnesium_list
indexing['Al'] = aluminium_list
indexing['Si'] = silicon_list
indexing['P'] = phosphorus_list
indexing['S'] = sulfur_list
indexing['Cl'] = chlorine_list
indexing['Ar'] = argon_list
indexing['K'] = potassium_list
indexing['Ca'] = calcium_list
indexing['Sc'] = scandium_list
indexing['Ti'] = titanium_list
indexing['V'] = vanadium_list
indexing['Cr'] = chromium_list
indexing['Mn'] = manganese_list
indexing['Fe'] = iron_list
indexing['Co'] = cobalt_list
indexing['Ni'] = nickel_list
indexing['Cu'] = copper_list
indexing['Zn'] = zinc_list
indexing['Ga'] = gallium_list
indexing['Ge'] = germanium_list
self.elements = list(indexing.keys())
### restructuring the tables such that it looks like the sn2 dictionary: basic_agb[metallicicty][element]
yield_tables_final_structure = {}
for metallicity_index,metallicity in enumerate(self.metallicities):
yield_tables_final_structure[metallicity] = np.load(localpath + 'input/yields/Nomoto2013/nomoto_net_met_ind_%d.npy' %(metallicity_index))
self.table = yield_tables_final_structure
#######################
class AGB_feedback(object):
def __init__(self):
"""
This is the object that holds the feedback table for agb stars.
The different methods load different tables from the literature. They are in the input/yields/ folder.
"""
def Ventura(self):
"""
Ventura 2013 net yields from Paolo himself
"""
self.metallicities = [0.04,0.018,0.008,0.004,0.001,0.0003]
x = np.genfromtxt(localpath + 'input/yields/Ventura2013/0.018.txt',names=True)
self.masses = x['Mass']
self.elements = ['H', 'He', 'Li','C','N','O','F','Ne','Na','Mg','Al','Si']
###
yield_tables_final_structure = {}
for metallicity in self.metallicities:
x = np.genfromtxt(localpath + 'input/yields/Ventura2013/%s.txt' %(str(metallicity)),names=True)
additional_keys = ['Mass', 'mass_in_remnants','unprocessed_mass_in_winds']
names = additional_keys + self.elements
base = np.zeros(len(x['Mass']))
list_of_arrays = []
for i in range(len(names)):
list_of_arrays.append(base)
yield_tables_final_structure_subtable = np.core.records.fromarrays(list_of_arrays,names=names)
yield_tables_final_structure_subtable['Mass'] = x['Mass']
yield_tables_final_structure_subtable['mass_in_remnants'] = np.divide(x['mass_in_remnants'],x['Mass'])
for item in self.elements:
if item == 'C':
yield_tables_final_structure_subtable[item] = x['C12']
yield_tables_final_structure_subtable[item] += x['C13']
elif item == 'N':
yield_tables_final_structure_subtable[item] = x['N14']
elif item == 'O':
yield_tables_final_structure_subtable[item] = x['O16']
yield_tables_final_structure_subtable[item] += x['O17']
yield_tables_final_structure_subtable[item] += x['O18']
elif item == 'F':
yield_tables_final_structure_subtable[item] = x['F19']
elif item == 'Ne':
yield_tables_final_structure_subtable[item] = x['NE20']
yield_tables_final_structure_subtable[item] += x['NE22']
elif item == 'Na':
yield_tables_final_structure_subtable[item] = x['NA23']
elif item == 'Mg':
yield_tables_final_structure_subtable[item] = x['MG24']
yield_tables_final_structure_subtable[item] += x['MG25']
yield_tables_final_structure_subtable[item] += x['MG26']
elif item == 'Al':
yield_tables_final_structure_subtable[item] = x['AL26']
yield_tables_final_structure_subtable[item] += x['AL27']
elif item == 'Si':
yield_tables_final_structure_subtable[item] = x['SI28']
else:
yield_tables_final_structure_subtable[item] = x[item]
for item in self.elements:
yield_tables_final_structure_subtable[item] = np.divide(yield_tables_final_structure_subtable[item],x['Mass'])
for i,item in enumerate(x['Mass']):
yield_tables_final_structure_subtable['unprocessed_mass_in_winds'][i] = 1. - (yield_tables_final_structure_subtable['mass_in_remnants'][i] + sum(list(yield_tables_final_structure_subtable[self.elements][i])))
yield_tables_final_structure[metallicity] = yield_tables_final_structure_subtable
self.table = yield_tables_final_structure
###
def Nomoto2013(self):
'''
Nomoto2013 agb yields up to 6.5Msun and are a copy of Karakas2010. Only that the yields here are given as net yields which does not help so much
'''
dt = np.dtype('a13,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8')
yield_tables = {}
self.metallicities = [0.0500,0.0200,0.0080,0.0040,0.0010]
self.masses = np.array((1.,1.2,1.5,1.8,1.9,2.0,2.2,2.5,3.0,3.5,4.0,4.5,5.0,5.5,6.0))#,6.5,7.0,8.0,10.))
z = np.genfromtxt(localpath + 'input/yields/Nomoto2013/nomoto_2013_z=0.0200.dat',dtype=dt,names = True)
yield_tables_dict = {}
for item in self.metallicities:
z = np.genfromtxt(localpath + 'input/yields/Nomoto2013/nomoto_2013_z=%.4f.dat' %(item),dtype=dt,names = True)
yield_tables_dict[item]=z
#########################
hydrogen_list = ['H__1','H__2']
helium_list = ['He_3','He_4']
lithium_list = ['Li_6','Li_7']
berillium_list = ['Be_9']
boron_list = ['B_10','B_11']
carbon_list = ['C_12','C_13']
nitrogen_list = ['N_14','N_15']
oxygen_list = ['O_16','O_17','O_18']
fluorin_list = ['F_19']
neon_list = ['Ne20','Ne21','Ne22']
sodium_list = ['Na23']
magnesium_list = ['Mg24','Mg25','Mg26']
aluminium_list = ['Al27']
silicon_list = ['Si28','Si29','Si30']
phosphorus_list = ['P_31']
sulfur_list = ['S_32','S_33','S_34','S_36']
chlorine_list = ['Cl35','Cl37']
argon_list = ['Ar36','Ar38','Ar40']
potassium_list = ['K_39','K_41']
calcium_list = ['K_40','Ca40','Ca42','Ca43','Ca44','Ca46','Ca48']
scandium_list = ['Sc45']
titanium_list = ['Ti46','Ti47','Ti48','Ti49','Ti50']
vanadium_list = ['V_50','V_51']
chromium_list = ['Cr50','Cr52','Cr53','Cr54']
manganese_list = ['Mn55']
iron_list = ['Fe54', 'Fe56','Fe57','Fe58']
cobalt_list = ['Co59']
nickel_list = ['Ni58','Ni60','Ni61','Ni62','Ni64']
copper_list = ['Cu63','Cu65']
zinc_list = ['Zn64','Zn66','Zn67','Zn68','Zn70']
gallium_list = ['Ga69','Ga71']
germanium_list = ['Ge70','Ge72','Ge73','Ge74']
indexing = {}
indexing['H'] = hydrogen_list
indexing['He'] = helium_list
indexing['Li'] = lithium_list
indexing['Be'] = berillium_list
indexing['B'] = boron_list
indexing['C'] = carbon_list
indexing['N'] = nitrogen_list
indexing['O'] = oxygen_list
indexing['F'] = fluorin_list
indexing['Ne'] = neon_list
indexing['Na'] = sodium_list
indexing['Mg'] = magnesium_list
indexing['Al'] = aluminium_list
indexing['Si'] = silicon_list
indexing['P'] = phosphorus_list
indexing['S'] = sulfur_list
indexing['Cl'] = chlorine_list
indexing['Ar'] = argon_list
indexing['K'] = potassium_list
indexing['Ca'] = calcium_list
indexing['Sc'] = scandium_list
indexing['Ti'] = titanium_list
indexing['V'] = vanadium_list
indexing['Cr'] = chromium_list
indexing['Mn'] = manganese_list
indexing['Fe'] = iron_list
indexing['Co'] = cobalt_list
indexing['Ni'] = nickel_list
indexing['Cu'] = copper_list
indexing['Zn'] = zinc_list
indexing['Ga'] = gallium_list
indexing['Ge'] = germanium_list
self.elements = indexing.keys()
### restructuring the tables such that it looks like the sn2 dictionary: basic_agb[metallicicty][element]
yield_tables_final_structure = {}
for metallicity_index,metallicity in enumerate(self.metallicities):
yields_for_one_metallicity = yield_tables_dict[metallicity]
final_mass_name_tag = 'mass_in_remnants'
additional_keys = ['Mass',final_mass_name_tag]
names = additional_keys + self.elements
base = np.zeros(len(self.masses))
list_of_arrays = []
for i in range(len(names)):
list_of_arrays.append(base)
yield_tables_final_structure_subtable = np.core.records.fromarrays(list_of_arrays,names=names)
yield_tables_final_structure_subtable['Mass'] = self.masses
for i,item in enumerate(self.elements):
yield_tables_final_structure_subtable[item] = 0
for j,jtem in enumerate(indexing[item]):
################### here we can change the yield that we need for processing. normalising 'ejected_mass' with the initial mass to get relative masses
line_of_one_element = yields_for_one_metallicity[np.where(yields_for_one_metallicity['M']==jtem)][0]
temp1 = np.zeros(len(self.masses))
for s in range(len(self.masses)):
temp1[s] = line_of_one_element[s+2]
yield_tables_final_structure_subtable[item] += np.divide(temp1,self.masses)
for t in range(len(self.masses)):
yield_tables_final_structure_subtable[final_mass_name_tag][t] = (1-sum(yield_tables_final_structure_subtable[self.elements][t]))#yields_for_one_metallicity[0][21]#
yield_tables_final_structure[metallicity] = yield_tables_final_structure_subtable#[::-1]
self.table = yield_tables_final_structure
def Nugrid(self):
'''
loading the Nugrid intermediate mass stellar yields NuGrid stellar data set. I. Stellar yields from H to Bi for stars with metallicities Z = 0.02 and Z = 0.01
'''
import numpy.lib.recfunctions as rcfuncs
tdtype = [('empty',int),('element1','|S3'),('165',float),('200',float),('300',float),('500',float),('1500',float),('2000',float),('2500',float)]
yield_tables = {}
self.metallicities = [0.02,0.01]
for i,metallicity_index in enumerate([2,1]):
y = np.genfromtxt(localpath + 'input/yields/NuGrid_AGB_SNII_2013/set1p%d/element_table_set1.%d_yields_winds.txt' %(metallicity_index,metallicity_index),dtype = tdtype,names = None,skip_header = 3, delimiter = '&', autostrip = True)
## Python3 need transformation between bytes and strings
element_list2 = []
for j,jtem in enumerate(y['element1']):
element_list2.append(jtem.decode('utf8'))
y = rcfuncs.append_fields(y,'element',element_list2,usemask = False)
yield_tables[self.metallicities[i]] = y
self.elements = list(yield_tables[0.02]['element'])
self.masses = np.array((1.65,2.0,3.0,5.0))
######
### restructuring the tables such that it looks like the sn2 dictionary: basic_agb[metallicicty][element]
yield_tables_final_structure = {}
for metallicity_index,metallicity in enumerate(self.metallicities):
yields_for_one_metallicity = yield_tables[metallicity]
final_mass_name_tag = 'mass_in_remnants'
additional_keys = ['Mass',final_mass_name_tag]
names = additional_keys + self.elements
base = np.zeros(len(self.masses))
list_of_arrays = []
for i in range(len(names)):
list_of_arrays.append(base)
yield_tables_final_structure_subtable = np.core.records.fromarrays(list_of_arrays,names=names)
yield_tables_final_structure_subtable['Mass'] = self.masses
for i,item in enumerate(self.elements):
################### here we can change the yield that we need for processing. normalising 'ejected_mass' with the initial mass to get relative masses
line_of_one_element = yields_for_one_metallicity[np.where(yields_for_one_metallicity['element']==item)]
temp1 = np.zeros(4)
temp1[0] = line_of_one_element['165']
temp1[1] = line_of_one_element['200']
temp1[2] = line_of_one_element['300']
temp1[3] = line_of_one_element['500']
yield_tables_final_structure_subtable[item] = np.divide(temp1,self.masses)
yield_tables_final_structure_subtable[final_mass_name_tag][0] = (1-sum(yield_tables_final_structure_subtable[self.elements][0]))
yield_tables_final_structure_subtable[final_mass_name_tag][1] = (1-sum(yield_tables_final_structure_subtable[self.elements][1]))
yield_tables_final_structure_subtable[final_mass_name_tag][2] = (1-sum(yield_tables_final_structure_subtable[self.elements][2]))
yield_tables_final_structure_subtable[final_mass_name_tag][3] = (1-sum(yield_tables_final_structure_subtable[self.elements][3]))
yield_tables_final_structure[metallicity] = yield_tables_final_structure_subtable[::-1]
self.table = yield_tables_final_structure
######
def Karakas(self):
'''
loading the yield table of Karakas 2010.
'''
import numpy.lib.recfunctions as rcfuncs
DATADIR = localpath + 'input/yields/Karakas2010'
if not os.path.exists(DATADIR):
os.mkdir(DATADIR)
MASTERFILE = '{}/karakas_yields'.format(DATADIR)
def _download_karakas():
"""
Downloads Karakas yields from Vizier.
"""
#url = 'http://zenodo.org/record/12800/files/dartmouth.h5'
url = 'http://cdsarc.u-strasbg.fr/viz-bin/nph-Cat/tar.gz?J%2FMNRAS%2F403%2F1413'
import urllib
print('Downloading Karakas 2010 yield tables from Vizier (should happen only at the first time)...')
if os.path.exists(MASTERFILE):
os.remove(MASTERFILE)
urllib.urlretrieve(url,MASTERFILE)
import tarfile
tar = tarfile.open(MASTERFILE)
tar.extractall(path=DATADIR)
tar.close()
if not os.path.exists(MASTERFILE):
_download_karakas()
tdtype = [('imass',float),('metallicity',float),('fmass',float),('species1','|S4'),('A',int),('net_yield',float),('ejected_mass',float),('initial_wind',float),('average_wind',float),('initial_mass_fraction',float),('production_factor',float)]
metallicity_list = [0.02, 0.008, 0.004 ,0.0001]
self.metallicities = metallicity_list
tables = []
for i,item in enumerate(metallicity_list):
y = np.genfromtxt('%s/tablea%d.dat' %(DATADIR,i+2), dtype = tdtype, names = None)
## Python3 need transformation between bytes and strings
element_list2 = []
for j,jtem in enumerate(y['species1']):
element_list2.append(jtem.decode('utf8'))
y = rcfuncs.append_fields(y,'species',element_list2,usemask = False)
tables.append(y)
### easy to extend to other species just make a new list of isotopes (see karakas tables)
### and then also extend the indexing variable.
### The choice for specific elements can be done later when just using specific species
hydrogen_list = ['n','p','d']
helium_list = ['he3','he4']
lithium_list = ['li7','be7','b8']
carbon_list = ['c12','c13','n13']
nitrogen_list = ['n14','n15','c14','o14','o15']
oxygen_list = [ 'o16','o17','o18','f17','f18']
fluorin_list = ['ne19','f19','o19']
neon_list = ['ne20','ne21','ne22','f20','na21','na22']
sodium_list = ['na23','ne23','mg23']
magnesium_list = ['mg24','mg25','mg26','al-6','na24','al25']
aluminium_list = ['mg27','al*6','al27','si27']
silicon_list = ['al28','si28','si29','si30','p29','p30']
phosphorus_list = ['si31','si32','si33','p31']
sulfur_list = ['s32','s33','s34','p32','p33','p34']
chlorine_list = ['s35']
iron_list = ['fe54', 'fe56','fe57','fe58']
manganese_list = ['fe55']
cobalt_list = ['ni59','fe59','co59']
nickel_list = ['ni58','ni60','ni61','ni62','co60','co61','fe60','fe61']
indexing = {}
indexing['H'] = hydrogen_list
indexing['He'] = helium_list
indexing['Li'] = lithium_list
indexing['C'] = carbon_list
indexing['N'] = nitrogen_list
indexing['O'] = oxygen_list
indexing['F'] = fluorin_list
indexing['Ne'] = neon_list
indexing['Na'] = sodium_list
indexing['Mg'] = magnesium_list
indexing['Al'] = aluminium_list
indexing['Si'] = silicon_list
indexing['P'] = phosphorus_list
indexing['S'] = sulfur_list
indexing['Cl'] = chlorine_list
indexing['Mn'] = manganese_list
indexing['Fe'] = iron_list
indexing['Co'] = cobalt_list
indexing['Ni'] = nickel_list
#indexing['S_el'] = ni_to_bi
self.elements = list(indexing.keys())
#### little fix for karakas tablea5.dat: 6.0 M_sun is written two times. We chose the first one
#tables[3]['imass'][-77:] = 6.5 # this is the fix if the second 6msun line was interpreted as 6.5 msun
tables[3] = tables[3][:-77]
#### making the general feedback table with yields for the individual elements
### loop for the different metallicities
yield_tables = {}
for metallicity_index,metallicity in enumerate(metallicity_list[:]):
### loop for the different elements
yields_002 = {}
for i,item1 in enumerate(indexing):
unique_masses = len(np.unique(tables[metallicity_index]['imass']))
element = np.zeros((unique_masses,), dtype=[('imass',float),('species','|S4'),('fmass',float),('net_yield',float),('ejected_mass',float),('initial_mass_fraction',float),('initial_wind',float),('average_wind',float),('production_factor',float)])
for j,item in enumerate(indexing[item1]):
cut = np.where(tables[metallicity_index]['species']==item)
temp = tables[metallicity_index][cut]
if j == 0:
element['imass'] = temp['imass']
element['fmass'] = temp['fmass']
element['species'] = temp['species'] ### just for test purposes
element['net_yield'] += temp['net_yield']
element['ejected_mass'] += temp['ejected_mass']
element['initial_mass_fraction'] += temp['initial_mass_fraction']
element['initial_wind'] += temp['initial_wind']
element['average_wind'] += temp['average_wind']
element['production_factor'] += temp['production_factor']
yields_002[item1] = element
yield_tables[metallicity] = yields_002
self.masses = np.unique(tables[0]['imass']) ## table a3 and a4 and maybe a5 are missing 6.5 Msun its probably easier to skip the 6.5 Msun entries altogether for interpolation reasons
### restructuring the tables such that it looks like the sn2 dictionary: basic_agb[metallicicty][element]
yield_tables_final_structure = {}
for metallicity_index,metallicity in enumerate(metallicity_list[:]):
yields_for_one_metallicity = yield_tables[metallicity]
final_mass_name_tag = 'mass_in_remnants'
additional_keys = ['Mass',final_mass_name_tag]
names = additional_keys + self.elements
if metallicity == 0.02: #or metallicity == 0.0001:
base = np.zeros(len(self.masses))
else:
base = np.zeros(len(self.masses)-1)
list_of_arrays = []
for i in range(len(names)):
list_of_arrays.append(base)
yield_tables_final_structure_subtable = np.core.records.fromarrays(list_of_arrays,names=names)
yield_tables_final_structure_subtable['Mass'] = yields_for_one_metallicity[self.elements[0]]['imass']
yield_tables_final_structure_subtable[final_mass_name_tag] = | np.divide(yields_for_one_metallicity[self.elements[0]]['fmass'],yield_tables_final_structure_subtable['Mass']) | numpy.divide |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.