prompt
stringlengths 15
655k
| completion
stringlengths 3
32.4k
| api
stringlengths 8
52
|
---|---|---|
import scipy
import numpy as np
from scipy.stats import pearsonr
from scipy.stats import spearmanr
import csv
ranking = np.array([])
GooogleScoreOriginData = np.array([])
GoogleScoreGoogleTranslatedData = np.array([])
GoogleScoreYandexTranslatedData = np.array([])
GoogleScoreBaiduTranslatedData = np.array([])
BaiduPositiveProbabilityOriginData = np.array([])
BaiduPositiveProbabilityGoogleTranslatedData = np.array([])
BaiduPositiveProbabilityYandexTranslatedData = np.array([])
BaiduPositiveProbabilityBaiduTranslatedData = np.array([])
BaiduAnalysisOriginDataGoogleStandard = np.array([])
BaiduAnalysisGoogleTranslatedDataGoogleStandard = np.array([])
BaiduAnalysisYandexTranslatedDataGoogleStandard = np.array([])
BaiduAnalysisBaiduTranslatedDataGoogleStandard =
|
np.array([])
|
numpy.array
|
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from math import fabs
from compas.utilities import pairwise
from compas.geometry.basic import add_vectors
from compas.geometry.basic import subtract_vectors
from compas.geometry.basic import scale_vector
from compas.geometry.basic import cross_vectors
from compas.geometry.basic import dot_vectors
from compas.geometry.basic import length_vector_xy
from compas.geometry.basic import subtract_vectors_xy
from compas.geometry.queries import is_point_on_segment
from compas.geometry.queries import is_point_on_segment_xy
__author__ = ['<NAME>', ]
__copyright__ = 'Copyright 2016 - Block Research Group, ETH Zurich'
__license__ = 'MIT License'
__email__ = '<EMAIL>'
__all__ = [
'intersection_line_line',
'intersection_line_line_xy',
'intersection_segment_segment_xy',
'intersection_circle_circle_xy',
'intersection_line_triangle',
'intersection_line_plane',
'intersection_segment_plane',
'intersection_plane_plane',
'intersection_plane_plane_plane',
# 'intersection_lines',
# 'intersection_lines_xy',
# 'intersection_planes',
# 'intersection_segment_segment',
# 'intersection_circle_circle',
]
def intersection_line_line(l1, l2):
"""Computes the intersection of two lines.
Parameters
----------
l1 : tuple, list
XYZ coordinates of two points defining the first line.
l2 : tuple, list
XYZ coordinates of two points defining the second line.
Returns
-------
list
XYZ coordinates of the two points marking the shortest distance between the lines.
If the lines intersect, these two points are identical.
If the lines are skewed and thus only have an apparent intersection, the two
points are different.
If the lines are parallel, the return value is [None, None].
Examples
--------
>>>
"""
a, b = l1
c, d = l2
ab = subtract_vectors(b, a)
cd = subtract_vectors(d, c)
n = cross_vectors(ab, cd)
n1 = cross_vectors(ab, n)
n2 = cross_vectors(cd, n)
plane_1 = (a, n1)
plane_2 = (c, n2)
i1 = intersection_line_plane(l1, plane_2)
i2 = intersection_line_plane(l2, plane_1)
return [i1, i2]
def intersection_line_line_xy(l1, l2):
"""Compute the intersection of two lines, assuming they lie in the XY plane.
Parameters
----------
ab : tuple
XY(Z) coordinates of two points defining a line.
cd : tuple
XY(Z) coordinates of two points defining another line.
Returns
-------
None
If there is no intersection point (parallel lines).
list
XYZ coordinates of intersection point if one exists (Z = 0).
Notes
-----
Only if the lines are parallel, there is no intersection point [1]_.
References
----------
.. [1] Wikipedia. *Line-line intersection*.
Available at: https://en.wikipedia.org/wiki/Line%E2%80%93line_intersection
"""
a, b = l1
c, d = l2
x1, y1 = a[0], a[1]
x2, y2 = b[0], b[1]
x3, y3 = c[0], c[1]
x4, y4 = d[0], d[1]
d = (x1 - x2) * (y3 - y4) - (y1 - y2) * (x3 - x4)
if d == 0.0:
return None
a = (x1 * y2 - y1 * x2)
b = (x3 * y4 - y3 * x4)
x = (a * (x3 - x4) - (x1 - x2) * b) / d
y = (a * (y3 - y4) - (y1 - y2) * b) / d
return x, y, 0.0
def intersection_segment_segment(ab, cd, tol=0.0):
"""Compute the intersection of two lines segments.
Parameters
----------
ab : tuple
XYZ coordinates of two points defining a line segment.
cd : tuple
XYZ coordinates of two points defining another line segment.
Returns
-------
None
If there is no intersection point.
list
XYZ coordinates of intersection point if one exists.
"""
intx_pt = intersection_line_line(ab, cd)
if not intx_pt:
return None
if not is_point_on_segment(intx_pt, ab, tol):
return None
if not is_point_on_segment(intx_pt, cd, tol):
return None
return intx_pt
def intersection_segment_segment_xy(ab, cd, tol=0.):
"""Compute the intersection of two lines segments, assuming they lie in the XY plane.
Parameters
----------
ab : tuple
XY(Z) coordinates of two points defining a line segment.
cd : tuple
XY(Z) coordinates of two points defining another line segment.
Returns
-------
None
If there is no intersection point.
list
XYZ coordinates of intersection point if one exists.
"""
intx_pt = intersection_line_line_xy(ab, cd)
if not intx_pt:
return None
if not is_point_on_segment_xy(intx_pt, ab, tol):
return None
if not is_point_on_segment_xy(intx_pt, cd, tol):
return None
return intx_pt
def intersection_circle_circle():
raise NotImplementedError
def intersection_circle_circle_xy(circle1, circle2):
"""Calculates the intersection points of two circles in 2d lying in the XY plane.
Parameters
----------
circle1 : tuple
center, radius of the first circle in the xy plane.
circle2 : tuple
center, radius of the second circle in the xy plane.
Returns
-------
points : list of tuples
the intersection points if there are any
None
if there are no intersection points
"""
p1, r1 = circle1[0], circle1[1]
p2, r2 = circle2[0], circle2[1]
d = length_vector_xy(subtract_vectors_xy(p2, p1))
if d > r1 + r2:
return None
if d < abs(r1 - r2):
return None
if (d == 0) and (r1 == r2):
return None
a = (r1 * r1 - r2 * r2 + d * d) / (2 * d)
h = (r1 * r1 - a * a) ** 0.5
cx2 = p1[0] + a * (p2[0] - p1[0]) / d
cy2 = p1[1] + a * (p2[1] - p1[1]) / d
i1 = ((cx2 + h * (p2[1] - p1[1]) / d), (cy2 - h * (p2[0] - p1[0]) / d), 0)
i2 = ((cx2 - h * (p2[1] - p1[1]) / d), (cy2 + h * (p2[0] - p1[0]) / d), 0)
return i1, i2
def intersection_line_triangle(line, triangle, epsilon=1e-6):
"""Computes the intersection point of a line (ray) and a triangle
based on the Moeller Trumbore intersection algorithm
Parameters
----------
line : tuple
Two points defining the line.
triangle : sequence of sequence of float
XYZ coordinates of the triangle corners.
Returns
-------
point : tuple
if the line (ray) intersects with the triangle, None otherwise.
Notes
-----
The line is treated as continues, directed ray and not as line segment with a start and end point
"""
a, b, c = triangle
v1 = subtract_vectors(line[1], line[0])
p1 = line[0]
# Find vectors for two edges sharing V1
e1 = subtract_vectors(b, a)
e2 = subtract_vectors(c, a)
# Begin calculating determinant - also used to calculate u parameter
p = cross_vectors(v1, e2)
# if determinant is near zero, ray lies in plane of triangle
det = dot_vectors(e1, p)
# NOT CULLING
if(det > - epsilon and det < epsilon):
return None
inv_det = 1.0 / det
# calculate distance from V1 to ray origin
t = subtract_vectors(p1, a)
# Calculate u parameter and make_blocks bound
u = dot_vectors(t, p) * inv_det
# The intersection lies outside of the triangle
if(u < 0.0 or u > 1.0):
return None
# Prepare to make_blocks v parameter
q = cross_vectors(t, e1)
# Calculate V parameter and make_blocks bound
v = dot_vectors(v1, q) * inv_det
# The intersection lies outside of the triangle
if(v < 0.0 or u + v > 1.0):
return None
t = dot_vectors(e2, q) * inv_det
if t > epsilon:
return add_vectors(p1, scale_vector(v1, t))
# No hit
return None
def intersection_line_plane(line, plane, epsilon=1e-6):
"""Computes the intersection point of a line (ray) and a plane
Parameters
----------
line : tuple
Two points defining the line.
plane : tuple
The base point and normal defining the plane.
Returns
-------
point : tuple
if the line (ray) intersects with the plane, None otherwise.
"""
pt1 = line[0]
pt2 = line[1]
p_cent = plane[0]
p_norm = plane[1]
v1 = subtract_vectors(pt2, pt1)
dot = dot_vectors(p_norm, v1)
if fabs(dot) > epsilon:
v2 = subtract_vectors(pt1, p_cent)
fac = -dot_vectors(p_norm, v2) / dot
vec = scale_vector(v1, fac)
return add_vectors(pt1, vec)
return None
def intersection_segment_plane(segment, plane, epsilon=1e-6):
"""Computes the intersection point of a line segment and a plane
Parameters
----------
segment : tuple
Two points defining the line segment.
plane : tuple
The base point and normal defining the plane.
Returns
-------
point : tuple
if the line segment intersects with the plane, None otherwise.
"""
pt1 = segment[0]
pt2 = segment[1]
p_cent = plane[0]
p_norm = plane[1]
v1 = subtract_vectors(pt2, pt1)
dot = dot_vectors(p_norm, v1)
if fabs(dot) > epsilon:
v2 = subtract_vectors(pt1, p_cent)
fac = - dot_vectors(p_norm, v2) / dot
if fac >= 0. and fac <= 1.:
vec = scale_vector(v1, fac)
return add_vectors(pt1, vec)
return None
else:
return None
def intersection_plane_plane(plane1, plane2, epsilon=1e-6):
"""Computes the intersection of two planes
Parameters
----------
plane1 : tuple
The base point and normal (normalized) defining the 1st plane.
plane2 : tuple
The base point and normal (normalized) defining the 2nd plane.
Returns
-------
line : tuple
Two points defining the intersection line. None if planes are parallel.
"""
# check for parallelity of planes
if abs(dot_vectors(plane1[1], plane2[1])) > 1 - epsilon:
return None
vec = cross_vectors(plane1[1], plane2[1]) # direction of intersection line
p1 = plane1[0]
vec_inplane = cross_vectors(vec, plane1[1])
p2 = add_vectors(p1, vec_inplane)
px1 = intersection_line_plane((p1, p2), plane2)
px2 = add_vectors(px1, vec)
return px1, px2
def intersection_plane_plane_plane(plane1, plane2, plane3, epsilon=1e-6):
"""Computes the intersection of three planes
Parameters
----------
plane1 : tuple
The base point and normal (normalized) defining the 1st plane.
plane2 : tuple
The base point and normal (normalized) defining the 2nd plane.
Returns
-------
point : tuple
The intersection point. None if two (or all three) planes are parallel.
Notes
-----
Currently this only computes the intersection point. E.g.: If two planes
are parallel the intersection lines are not computed [1]_.
References
----------
.. [1] http://geomalgorithms.com/Pic_3-planes.gif
"""
line = intersection_plane_plane(plane1, plane2, epsilon)
if not line:
return None
point = intersection_line_plane(line, plane3, epsilon)
if point:
return point
return None
def intersection_lines_numpy(lines):
"""
Examples
--------
.. code-block:: python
lines = []
"""
from numpy import array
from numpy import arange
from numpy import eye
from scipy.linalg import norm
from scipy.linalg import solve
l1 = array([[-2, 0], [0, 1]], dtype=float).T
l2 = array([[0, -2], [1, 0]], dtype=float).T
l3 = array([[5, 0], [0, 7]], dtype=float).T
l4 =
|
array([[3, 0], [0, 20]], dtype=float)
|
numpy.array
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 5 12:13:33 2018
@author: <NAME> (<EMAIL> / <EMAIL>)
"""
#Python dependencies
from __future__ import division
import pandas as pd
import numpy as np
from scipy.constants import codata
from pylab import *
from scipy.optimize import curve_fit
import mpmath as mp
from lmfit import minimize, Minimizer, Parameters, Parameter, report_fit
#from scipy.optimize import leastsq
pd.options.mode.chained_assignment = None
#Plotting
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.ticker import LinearLocator, FormatStrFormatter
import seaborn as sns
import matplotlib.ticker as mtick
mpl.rc('mathtext', fontset='stixsans', default='regular')
mpl.rcParams.update({'axes.labelsize':22})
mpl.rc('xtick', labelsize=16)
mpl.rc('ytick', labelsize=16)
mpl.rc('legend',fontsize=14)
from scipy.constants import codata
F = codata.physical_constants['Faraday constant'][0]
Rg = codata.physical_constants['molar gas constant'][0]
### Importing PyEIS add-ons
from .PyEIS_Data_extraction import *
from .PyEIS_Lin_KK import *
from .PyEIS_Advanced_tools import *
### Frequency generator
##
#
def freq_gen(f_start, f_stop, pts_decade=7):
'''
Frequency Generator with logspaced freqencies
Inputs
----------
f_start = frequency start [Hz]
f_stop = frequency stop [Hz]
pts_decade = Points/decade, default 7 [-]
Output
----------
[0] = frequency range [Hz]
[1] = Angular frequency range [1/s]
'''
f_decades = np.log10(f_start) - np.log10(f_stop)
f_range = np.logspace(np.log10(f_start), np.log10(f_stop), num=np.around(pts_decade*f_decades).astype(int), endpoint=True)
w_range = 2 * np.pi * f_range
return f_range, w_range
### Simulation Element Functions
##
#
def elem_L(w, L):
'''
Simulation Function: -L-
Returns the impedance of an inductor
<NAME> (<EMAIL> || <EMAIL>)
Inputs
----------
w = Angular frequency [1/s]
L = Inductance [ohm * s]
'''
return 1j*w*L
def elem_C(w,C):
'''
Simulation Function: -C-
Inputs
----------
w = Angular frequency [1/s]
C = Capacitance [F]
'''
return 1/(C*(w*1j))
def elem_Q(w,Q,n):
'''
Simulation Function: -Q-
Inputs
----------
w = Angular frequency [1/s]
Q = Constant phase element [s^n/ohm]
n = Constant phase elelment exponent [-]
'''
return 1/(Q*(w*1j)**n)
### Simulation Curciuts Functions
##
#
def cir_RsC(w, Rs, C):
'''
Simulation Function: -Rs-C-
Inputs
----------
w = Angular frequency [1/s]
Rs = Series resistance [Ohm]
C = Capacitance [F]
'''
return Rs + 1/(C*(w*1j))
def cir_RsQ(w, Rs, Q, n):
'''
Simulation Function: -Rs-Q-
Inputs
----------
w = Angular frequency [1/s]
Rs = Series resistance [Ohm]
Q = Constant phase element [s^n/ohm]
n = Constant phase elelment exponent [-]
'''
return Rs + 1/(Q*(w*1j)**n)
def cir_RQ(w, R='none', Q='none', n='none', fs='none'):
'''
Simulation Function: -RQ-
Return the impedance of an Rs-RQ circuit. See details for RQ under cir_RQ_fit()
<NAME> (<EMAIL> / <EMAIL>)
Inputs
----------
w = Angular frequency [1/s]
R = Resistance [Ohm]
Q = Constant phase element [s^n/ohm]
n = Constant phase elelment exponent [-]
fs = Summit frequency of RQ circuit [Hz]
'''
if R == 'none':
R = (1/(Q*(2*np.pi*fs)**n))
elif Q == 'none':
Q = (1/(R*(2*np.pi*fs)**n))
elif n == 'none':
n = np.log(Q*R)/np.log(1/(2*np.pi*fs))
return (R/(1+R*Q*(w*1j)**n))
def cir_RsRQ(w, Rs='none', R='none', Q='none', n='none', fs='none'):
'''
Simulation Function: -Rs-RQ-
Return the impedance of an Rs-RQ circuit. See details for RQ under cir_RQ_fit()
<NAME> (<EMAIL> / <EMAIL>)
Inputs
----------
w = Angular frequency [1/s]
Rs = Series resistance [Ohm]
R = Resistance [Ohm]
Q = Constant phase element [s^n/ohm]
n = Constant phase elelment exponent [-]
fs = Summit frequency of RQ circuit [Hz]
'''
if R == 'none':
R = (1/(Q*(2*np.pi*fs)**n))
elif Q == 'none':
Q = (1/(R*(2*np.pi*fs)**n))
elif n == 'none':
n = np.log(Q*R)/np.log(1/(2*np.pi*fs))
return Rs + (R/(1+R*Q*(w*1j)**n))
def cir_RC(w, C='none', R='none', fs='none'):
'''
Simulation Function: -RC-
Returns the impedance of an RC circuit, using RQ definations where n=1. see cir_RQ() for details
<NAME> (<EMAIL> || <EMAIL>)
Inputs
----------
w = Angular frequency [1/s]
R = Resistance [Ohm]
C = Capacitance [F]
fs = Summit frequency of RC circuit [Hz]
'''
return cir_RQ(w, R=R, Q=C, n=1, fs=fs)
def cir_RsRQRQ(w, Rs, R='none', Q='none', n='none', fs='none', R2='none', Q2='none', n2='none', fs2='none'):
'''
Simulation Function: -Rs-RQ-RQ-
Return the impedance of an Rs-RQ circuit. See details for RQ under cir_RQ_fit()
<NAME> (<EMAIL> || <EMAIL>)
Inputs
----------
w = Angular frequency [1/s]
Rs = Series Resistance [Ohm]
R = Resistance [Ohm]
Q = Constant phase element [s^n/ohm]
n = Constant phase element exponent [-]
fs = Summit frequency of RQ circuit [Hz]
R2 = Resistance [Ohm]
Q2 = Constant phase element [s^n/ohm]
n2 = Constant phase element exponent [-]
fs2 = Summit frequency of RQ circuit [Hz]
'''
if R == 'none':
R = (1/(Q*(2*np.pi*fs)**n))
elif Q == 'none':
Q = (1/(R*(2*np.pi*fs)**n))
elif n == 'none':
n = np.log(Q*R)/np.log(1/(2*np.pi*fs))
if R2 == 'none':
R2 = (1/(Q2*(2*np.pi*fs2)**n2))
elif Q2 == 'none':
Q2 = (1/(R2*(2*np.pi*fs2)**n2))
elif n2 == 'none':
n2 = np.log(Q2*R2)/np.log(1/(2*np.pi*fs2))
return Rs + (R/(1+R*Q*(w*1j)**n)) + (R2/(1+R2*Q2*(w*1j)**n2))
def cir_RsRQQ(w, Rs, Q, n, R1='none', Q1='none', n1='none', fs1='none'):
'''
Simulation Function: -Rs-RQ-Q-
Inputs
----------
w = Angular frequency [1/s]
Rs = Series Resistance [ohm]
R1 = Resistance in (RQ) circuit [ohm]
Q1 = Constant phase element in (RQ) circuit [s^n/ohm]
n1 = Constant phase elelment exponent in (RQ) circuit [-]
fs1 = Summit frequency of RQ circuit [Hz]
Q = Constant phase element of series Q [s^n/ohm]
n = Constant phase elelment exponent of series Q [-]
'''
return Rs + cir_RQ(w, R=R1, Q=Q1, n=n1, fs=fs1) + elem_Q(w,Q,n)
def cir_RsRQC(w, Rs, C, R1='none', Q1='none', n1='none', fs1='none'):
'''
Simulation Function: -Rs-RQ-C-
Inputs
----------
w = Angular frequency [1/s]
Rs = Series Resistance [ohm]
R1 = Resistance in (RQ) circuit [ohm]
Q1 = Constant phase element in (RQ) circuit [s^n/ohm]
n1 = Constant phase elelment exponent in (RQ) circuit [-]
fs1 = summit frequency of RQ circuit [Hz]
C = Constant phase element of series Q [s^n/ohm]
'''
return Rs + cir_RQ(w, R=R1, Q=Q1, n=n1, fs=fs1) + elem_C(w, C=C)
def cir_RsRCC(w, Rs, R1, C1, C):
'''
Simulation Function: -Rs-RC-C-
Inputs
----------
w = Angular frequency [1/s]
Rs = Series Resistance [ohm]
R1 = Resistance in (RQ) circuit [ohm]
C1 = Constant phase element in (RQ) circuit [s^n/ohm]
C = Capacitance of series C [s^n/ohm]
'''
return Rs + cir_RC(w, C=C1, R=R1, fs='none') + elem_C(w, C=C)
def cir_RsRCQ(w, Rs, R1, C1, Q, n):
'''
Simulation Function: -Rs-RC-Q-
Inputs
----------
w = Angular frequency [1/s]
Rs = Series Resistance [ohm]
R1 = Resistance in (RQ) circuit [ohm]
C1 = Constant phase element in (RQ) circuit [s^n/ohm]
Q = Constant phase element of series Q [s^n/ohm]
n = Constant phase elelment exponent of series Q [-]
'''
return Rs + cir_RC(w, C=C1, R=R1, fs='none') + elem_Q(w,Q,n)
def Randles_coeff(w, n_electron, A, E='none', E0='none', D_red='none', D_ox='none', C_red='none', C_ox='none', Rg=Rg, F=F, T=298.15):
'''
Returns the Randles coefficient sigma [ohm/s^1/2].
Two cases: a) ox and red are both present in solution here both Cred and Dred are defined, b) In the particular case where initially
only Ox species are present in the solution with bulk concentration C*_ox, the surface concentrations may be calculated as function
of the electrode potential following Nernst equation. Here C_red and D_red == 'none'
Ref.:
- <NAME>., ISBN: 978-1-4614-8932-0, "Electrochemical Impedance Spectroscopy and its Applications"
- <NAME>., ISBN: 0-471-04372-9, <NAME>. R. (2001) "Electrochemical methods: Fundamentals and applications". New York: Wiley.
<NAME> (<EMAIL> // <EMAIL>)
Inputs
----------
n_electron = number of e- [-]
A = geometrical surface area [cm2]
D_ox = Diffusion coefficent of oxidized specie [cm2/s]
D_red = Diffusion coefficent of reduced specie [cm2/s]
C_ox = Bulk concetration of oxidized specie [mol/cm3]
C_red = Bulk concetration of reduced specie [mol/cm3]
T = Temperature [K]
Rg = Gas constant [J/molK]
F = Faradays consntat [C/mol]
E = Potential [V]
if reduced specie is absent == 'none'
E0 = formal potential [V]
if reduced specie is absent == 'none'
Returns
----------
Randles coefficient [ohm/s^1/2]
'''
if C_red != 'none' and D_red != 'none':
sigma = ((Rg*T) / ((n_electron**2) * A * (F**2) * (2**(1/2)))) * ((1/(D_ox**(1/2) * C_ox)) + (1/(D_red**(1/2) * C_red)))
elif C_red == 'none' and D_red == 'none' and E!='none' and E0!= 'none':
f = F/(Rg*T)
x = (n_electron*f*(E-E0))/2
func_cosh2 = (np.cosh(2*x)+1)/2
sigma = ((4*Rg*T) / ((n_electron**2) * A * (F**2) * C_ox * ((2*D_ox)**(1/2)) )) * func_cosh2
else:
print('define E and E0')
Z_Aw = sigma*(w**(-0.5))-1j*sigma*(w**(-0.5))
return Z_Aw
def cir_Randles(w, n_electron, D_red, D_ox, C_red, C_ox, Rs, Rct, n, E, A, Q='none', fs='none', E0=0, F=F, Rg=Rg, T=298.15):
'''
Simulation Function: Randles -Rs-(Q-(RW)-)-
Return the impedance of a Randles circuit with full complity of the warbug constant
NOTE: This Randles circuit is only meant for semi-infinate linear diffusion
<NAME> (<EMAIL> / <EMAIL>)
Inputs
----------
n_electron = number of e- [-]
A = geometrical surface area [cm2]
D_ox = Diffusion coefficent of oxidized specie [cm2/s]
D_red = Diffusion coefficent of reduced specie [cm2/s]
C_ox = Concetration of oxidized specie [mol/cm3]
C_red = Concetration of reduced specie [mol/cm3]
T = Temperature [K]
Rg = Gas constant [J/molK]
F = Faradays consntat [C/mol]
E = Potential [V]
if reduced specie is absent == 'none'
E0 = Formal potential [V]
if reduced specie is absent == 'none'
Rs = Series resistance [ohm]
Rct = charge-transfer resistance [ohm]
Q = Constant phase element used to model the double-layer capacitance [F]
n = expononent of the CPE [-]
Returns
----------
The real and imaginary impedance of a Randles circuit [ohm]
'''
Z_Rct = Rct
Z_Q = elem_Q(w,Q,n)
Z_w = Randles_coeff(w, n_electron=n_electron, E=E, E0=E0, D_red=D_red, D_ox=D_ox, C_red=C_red, C_ox=C_ox, A=A, T=T, Rg=Rg, F=F)
return Rs + 1/(1/Z_Q + 1/(Z_Rct+Z_w))
def cir_Randles_simplified(w, Rs, R, n, sigma, Q='none', fs='none'):
'''
Simulation Function: Randles -Rs-(Q-(RW)-)-
Return the impedance of a Randles circuit with a simplified
NOTE: This Randles circuit is only meant for semi-infinate linear diffusion
<NAME> (<EMAIL> / <EMAIL>)
'''
if R == 'none':
R = (1/(Q*(2*np.pi*fs)**n))
elif Q == 'none':
Q = (1/(R*(2*np.pi*fs)**n))
elif n == 'none':
n = np.log(Q*R)/np.log(1/(2*np.pi*fs))
Z_Q = 1/(Q*(w*1j)**n)
Z_R = R
Z_w = sigma*(w**(-0.5))-1j*sigma*(w**(-0.5))
return Rs + 1/(1/Z_Q + 1/(Z_R+Z_w))
# Polymer electrolytes
def cir_C_RC_C(w, Ce, Cb='none', Rb='none', fsb='none'):
'''
Simulation Function: -C-(RC)-C-
This circuit is often used for modeling blocking electrodes with a polymeric electrolyte, which exhibts a immobile ionic species in bulk that gives a capacitance contribution
to the otherwise resistive electrolyte
Ref:
- <NAME>., and <NAME>. "Polymer Electrolyte Reviews - 1" Elsevier Applied Science Publishers LTD, London, Bruce, P. "Electrical Measurements on Polymer Electrolytes"
<NAME> (<EMAIL> || <EMAIL>)
Inputs
----------
w = Angular frequency [1/s]
Ce = Interfacial capacitance [F]
Rb = Bulk/series resistance [Ohm]
Cb = Bulk capacitance [F]
fsb = summit frequency of bulk (RC) circuit [Hz]
'''
Z_C = elem_C(w,C=Ce)
Z_RC = cir_RC(w, C=Cb, R=Rb, fs=fsb)
return Z_C + Z_RC
def cir_Q_RQ_Q(w, Qe, ne, Qb='none', Rb='none', fsb='none', nb='none'):
'''
Simulation Function: -Q-(RQ)-Q-
Modified cir_C_RC_C() circuits that can be used if electrodes and bulk are not behaving like ideal capacitors
<NAME> (<EMAIL> || <EMAIL>)
Inputs
----------
w = Angular frequency [1/s]
Qe = Interfacial capacitance modeled with a CPE [F]
ne = Interfacial constant phase element exponent [-]
Rb = Bulk/series resistance [Ohm]
Qb = Bulk capacitance modeled with a CPE [s^n/ohm]
nb = Bulk constant phase element exponent [-]
fsb = summit frequency of bulk (RQ) circuit [Hz]
'''
Z_Q = elem_Q(w,Q=Qe,n=ne)
Z_RQ = cir_RQ(w, Q=Qb, R=Rb, fs=fsb, n=nb)
return Z_Q + Z_RQ
def tanh(x):
'''
As numpy gives errors when tanh becomes very large, above 10^250, this functions is used for np.tanh
'''
return (1-np.exp(-2*x))/(1+np.exp(-2*x))
def cir_RCRCZD(w, L, D_s, u1, u2, Cb='none', Rb='none', fsb='none', Ce='none', Re='none', fse='none'):
'''
Simulation Function: -RC_b-RC_e-Z_D
This circuit has been used to study non-blocking electrodes with an ioniocally conducting electrolyte with a mobile and immobile ionic specie in bulk, this is mixed with a
ionically conducting salt. This behavior yields in a impedance response, that consists of the interfacial impendaces -(RC_e)-, the ionically conducitng polymer -(RC_e)-,
and the diffusional impedance from the dissolved salt.
Refs.:
- <NAME>. and <NAME>., Electrochimica Acta, 27, 1671-1675, 1982, "Conductivity, Charge Transfer and Transport number - An AC-Investigation
of the Polymer Electrolyte LiSCN-Poly(ethyleneoxide)"
- <NAME>., and <NAME>. "Polymer Electrolyte Reviews - 1" Elsevier Applied Science Publishers LTD, London
Bruce, P. "Electrical Measurements on Polymer Electrolytes"
<NAME> (<EMAIL> || <EMAIL>)
Inputs
----------
w = Angular frequency [1/s]
L = Thickness of electrode [cm]
D_s = Diffusion coefficient of dissolved salt [cm2/s]
u1 = Mobility of the ion reacting at the electrode interface
u2 = Mobility of other ion
Re = Interfacial resistance [Ohm]
Ce = Interfacial capacitance [F]
fse = Summit frequency of the interfacial (RC) circuit [Hz]
Rb = Bulk/series resistance [Ohm]
Cb = Bulk capacitance [F]
fsb = Summit frequency of the bulk (RC) circuit [Hz]
'''
Z_RCb = cir_RC(w, C=Cb, R=Rb, fs=fsb)
Z_RCe = cir_RC(w, C=Ce, R=Re, fs=fse)
alpha = ((w*1j*L**2)/D_s)**(1/2)
Z_D = Rb * (u2/u1) * (tanh(x=alpha)/alpha)
return Z_RCb + Z_RCe + Z_D
# Transmission lines
def cir_RsTLsQ(w, Rs, L, Ri, Q='none', n='none'):
'''
Simulation Function: -Rs-TLsQ-
TLs = Simplified Transmission Line, with a non-faradaic interfacial impedance (Q)
The simplified transmission line assumes that Ri is much greater than Rel (electrode resistance).
Ref.:
- <NAME>., and <NAME>., Advances in Electrochemistry and Electrochemical Engineering, p. 329, Wiley-Interscience, New York (1973)
- <NAME>. Electrochemistry Communications 1, 1999, 429-435, "Anamalous transport effects in the impedance of porous film electrodes"
- <NAME>. Phys. Chem. B., 2000, 104, 2287-2298, "Doubling exponent models for the analysis of porous film electrodes by impedance.
Relaxation of TiO2 nanoporous in aqueous solution"
<NAME> (<EMAIL> / <EMAIL>)
Inputs
-----------
Rs = Series resistance [ohm]
L = Length/Thickness of porous electrode [cm]
Ri = Ionic resistance inside of flodded pores [ohm/cm]
Q = Interfacial capacitance of non-faradaic interface [F/cm]
n = exponent for the interfacial capacitance [-]
'''
Phi = 1/(Q*(w*1j)**n)
X1 = Ri # ohm/cm
Lam = (Phi/X1)**(1/2) #np.sqrt(Phi/X1)
x = L/Lam
x_mp = mp.matrix(x) #x in mp.math format
coth_mp = []
for i in range(len(Lam)):
coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
Z_TLsQ = Lam * X1 * coth_mp
return Rs + Z_TLsQ
def cir_RsRQTLsQ(w, Rs, R1, fs1, n1, L, Ri, Q, n, Q1='none'):
'''
Simulation Function: -Rs-RQ-TLsQ-
TLs = Simplified Transmission Line, with a non-faradaic interfacial impedance(Q)
The simplified transmission line assumes that Ri is much greater than Rel (electrode resistance).
Ref.:
- <NAME>., and <NAME>., Advances in Electrochemistry and Electrochemical Engineering, p. 329, Wiley-Interscience, New York (1973)
- Bisquert J. Electrochemistry Communications 1, 1999, 429-435, "Anamalous transport effects in the impedance of porous film electrodes"
- <NAME>. J. Phys. Chem. B., 2000, 104, 2287-2298, "Doubling exponent models for the analysis of porous film electrodes by impedance.
Relaxation of TiO2 nanoporous in aqueous solution"
<NAME> (<EMAIL> / <EMAIL>)
Inputs
-----------
Rs = Series resistance [ohm]
R1 = Charge transfer resistance of RQ circuit [ohm]
fs1 = Summit frequency for RQ circuit [Hz]
n1 = Exponent for RQ circuit [-]
Q1 = Constant phase element of RQ circuit [s^n/ohm]
L = Length/Thickness of porous electrode [cm]
Ri = Ionic resistance inside of flodded pores [ohm/cm]
Q = Interfacial capacitance of non-faradaic interface [F/cm]
n = Exponent for the interfacial capacitance [-]
Output
-----------
Impdance of Rs-(RQ)1-TLsQ
'''
Z_RQ = cir_RQ(w=w, R=R1, Q=Q1, n=n1, fs=fs1)
Phi = 1/(Q*(w*1j)**n)
X1 = Ri
Lam = (Phi/X1)**(1/2)
x = L/Lam
x_mp = mp.matrix(x) #x in mp.math format
coth_mp = []
for i in range(len(Lam)):
coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j)
Z_TLsQ = Lam * X1 * coth_mp
return Rs + Z_RQ + Z_TLsQ
def cir_RsTLs(w, Rs, L, Ri, R='none', Q='none', n='none', fs='none'):
'''
Simulation Function: -Rs-TLs-
TLs = Simplified Transmission Line, with a faradaic interfacial impedance (RQ)
The simplified transmission line assumes that Ri is much greater than Rel (electrode resistance).
Ref.:
- <NAME>., and <NAME>., Advances in Electrochemistry and Electrochemical Engineering, p. 329, Wiley-Interscience, New York (1973)
- <NAME>. Electrochemistry Communications 1, 1999, 429-435, "Anamalous transport effects in the impedance of porous film electrodes"
- <NAME>., 2000, 104, 2287-2298, "Doubling exponent models for the analysis of porous film electrodes by impedance.
Relaxation of TiO2 nanoporous in aqueous solution"
<NAME> (<EMAIL> / <EMAIL>)
Inputs
-----------
Rs = Series resistance [ohm]
L = Length/Thickness of porous electrode [cm]
Ri = Ionic resistance inside of flodded pores [ohm/cm]
R = Interfacial Charge transfer resistance [ohm*cm]
fs = Summit frequency of interfacial RQ circuit [Hz]
n = Exponent for interfacial RQ circuit [-]
Q = Constant phase element of interfacial capacitance [s^n/Ohm]
Output
-----------
Impedance of Rs-TLs(RQ)
'''
Phi = cir_RQ(w, R, Q, n, fs)
X1 = Ri
Lam = (Phi/X1)**(1/2)
x = L/Lam
x_mp = mp.matrix(x) #x in mp.math format
coth_mp = []
for i in range(len(Lam)):
coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
Z_TLs = Lam * X1 * coth_mp
return Rs + Z_TLs
def cir_RsRQTLs(w, Rs, L, Ri, R1, n1, fs1, R2, n2, fs2, Q1='none', Q2='none'):
'''
Simulation Function: -Rs-RQ-TLs-
TLs = Simplified Transmission Line, with a faradaic interfacial impedance (RQ)
The simplified transmission line assumes that Ri is much greater than Rel (electrode resistance).
Ref.:
- <NAME>., and <NAME>., Advances in Electrochemistry and Electrochemical Engineering, p. 329, Wiley-Interscience, New York (1973)
- Bisquert J. Electrochemistry Communications 1, 1999, 429-435, "Anamalous transport effects in the impedance of porous film electrodes"
- Bisquert J. J. Phys. Chem. B., 2000, 104, 2287-2298, "Doubling exponent models for the analysis of porous film electrodes by impedance.
Relaxation of TiO2 nanoporous in aqueous solution"
<NAME> (<EMAIL> / <EMAIL>)
Inputs
-----------
Rs = Series resistance [ohm]
R1 = Charge transfer resistance of RQ circuit [ohm]
fs1 = Summit frequency for RQ circuit [Hz]
n1 = Exponent for RQ circuit [-]
Q1 = Constant phase element of RQ circuit [s^n/(ohm * cm)]
L = Length/Thickness of porous electrode [cm]
Ri = Ionic resistance inside of flodded pores [ohm/cm]
R2 = Interfacial Charge transfer resistance [ohm*cm]
fs2 = Summit frequency of interfacial RQ circuit [Hz]
n2 = Exponent for interfacial RQ circuit [-]
Q2 = Constant phase element of interfacial capacitance [s^n/Ohm]
Output
-----------
Impedance of Rs-(RQ)1-TLs(RQ)2
'''
Z_RQ = cir_RQ(w=w, R=R1, Q=Q1, n=n1, fs=fs1)
Phi = cir_RQ(w=w, R=R2, Q=Q2, n=n2, fs=fs2)
X1 = Ri
Lam = (Phi/X1)**(1/2)
x = L/Lam
x_mp = mp.matrix(x) #x in mp.math format
coth_mp = []
for i in range(len(Lam)):
coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
Z_TLs = Lam * X1 * coth_mp
return Rs + Z_RQ + Z_TLs
### Support function
def sinh(x):
'''
As numpy gives errors when sinh becomes very large, above 10^250, this functions is used instead of np/mp.sinh()
'''
return (1 - np.exp(-2*x))/(2*np.exp(-x))
def coth(x):
'''
As numpy gives errors when coth becomes very large, above 10^250, this functions is used instead of np/mp.coth()
'''
return (1 + np.exp(-2*x))/(1 - np.exp(-2*x))
###
def cir_RsTLQ(w, L, Rs, Q, n, Rel, Ri):
'''
Simulation Function: -R-TLQ- (interfacial non-reacting, i.e. blocking electrode)
Transmission line w/ full complexity, which both includes Ri and Rel
Ref.:
- <NAME>., and <NAME>., Advances in Electrochemistry and Electrochemical Engineering, p. 329, Wiley-Interscience, New York (1973)
- Bisquert J. Electrochemistry Communications 1, 1999, 429-435, "Anamalous transport effects in the impedance of porous film electrodes"
- Bisquert J. J. Phys. Chem. B., 2000, 104, 2287-2298, "Doubling exponent models for the analysis of porous film electrodes by impedance.
Relaxation of TiO2 nanoporous in aqueous solution"
<NAME> (<EMAIL> || <EMAIL>)
Inputs
------------------
Rs = Series resistance [ohm]
Q = Constant phase element for the interfacial capacitance [s^n/ohm]
n = exponenet for interfacial RQ element [-]
Rel = electronic resistance of electrode [ohm/cm]
Ri = Ionic resistance inside of flodded pores [ohm/cm]
L = thickness of porous electrode [cm]
Output
--------------
Impedance of Rs-TL
'''
#The impedance of the series resistance
Z_Rs = Rs
# The Interfacial impedance is given by an -(RQ)- circuit
Phi = elem_Q(w, Q=Q, n=n)
X1 = Ri
X2 = Rel
Lam = (Phi/(X1+X2))**(1/2)
x = L/Lam
# x_mp = mp.matrix(x) #x in mp.math format
# coth_mp = []
# sinh_mp = []
# for i in range(len(Lam)):
# coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
# sinh_mp.append(float(mp.sinh(x_mp[i]).real)+float(mp.sinh(x_mp[i]).imag)*1j)
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/np.array(sinh_mp))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/sinh(x))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * coth(x)
return Z_Rs + Z_TL
def cir_RsRQTLQ(w, L, Rs, Q, n, Rel, Ri, R1, n1, fs1, Q1='none'):
'''
Simulation Function: -R-RQ-TLQ- (interfacial non-reacting, i.e. blocking electrode)
Transmission line w/ full complexity, which both includes Ri and Rel
Ref.:
- <NAME>., and <NAME>., Advances in Electrochemistry and Electrochemical Engineering, p. 329, Wiley-Interscience, New York (1973)
- Bisquert J. Electrochemistry Communications 1, 1999, 429-435, "Anamalous transport effects in the impedance of porous film electrodes"
- <NAME>. J. Phys. Chem. B., 2000, 104, 2287-2298, "Doubling exponent models for the analysis of porous film electrodes by impedance.
Relaxation of TiO2 nanoporous in aqueous solution"
<NAME> (<EMAIL> || <EMAIL>)
Inputs
------------------
Rs = Series resistance [ohm]
R1 = Charge transfer resistance of RQ circuit [ohm]
fs1 = Summit frequency for RQ circuit [Hz]
n1 = exponent for RQ circuit [-]
Q1 = constant phase element of RQ circuit [s^n/(ohm * cm)]
Q = Constant phase element for the interfacial capacitance [s^n/ohm]
n = exponenet for interfacial RQ element [-]
Rel = electronic resistance of electrode [ohm/cm]
Ri = Ionic resistance inside of flodded pores [ohm/cm]
L = thickness of porous electrode [cm]
Output
--------------
Impedance of Rs-TL
'''
#The impedance of the series resistance
Z_Rs = Rs
#The (RQ) circuit in series with the transmission line
Z_RQ1 = cir_RQ(w=w, R=R1, Q=Q1, n=n1, fs=fs1)
# The Interfacial impedance is given by an -(RQ)- circuit
Phi = elem_Q(w, Q=Q, n=n)
X1 = Ri
X2 = Rel
Lam = (Phi/(X1+X2))**(1/2)
x = L/Lam
# x_mp = mp.matrix(x) #x in mp.math format
# coth_mp = []
# sinh_mp = []
# for i in range(len(Lam)):
# coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
# sinh_mp.append(float(mp.sinh(x_mp[i]).real)+float(mp.sinh(x_mp[i]).imag)*1j)
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/np.array(sinh_mp))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/sinh(x))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * coth(x)
return Z_Rs + Z_RQ1 + Z_TL
def cir_RsTL(w, L, Rs, R, fs, n, Rel, Ri, Q='none'):
'''
Simulation Function: -R-TL- (interfacial reacting, i.e. non-blocking)
Transmission line w/ full complexity, which both includes Ri and Rel
Ref.:
- <NAME>., and <NAME>., Advances in Electrochemistry and Electrochemical Engineering, p. 329, Wiley-Interscience, New York (1973)
- <NAME>. Electrochemistry Communications 1, 1999, 429-435, "Anamalous transport effects in the impedance of porous film electrodes"
- <NAME>. Phys. Chem. B., 2000, 104, 2287-2298, "Doubling exponent models for the analysis of porous film electrodes by impedance.
Relaxation of TiO2 nanoporous in aqueous solution"
<NAME> (<EMAIL> || <EMAIL>)
Inputs
------------------
Rs = Series resistance [ohm]
R = Interfacial charge transfer resistance [ohm * cm]
fs = Summit frequency for the interfacial RQ element [Hz]
n = Exponenet for interfacial RQ element [-]
Q = Constant phase element for the interfacial capacitance [s^n/ohm]
Rel = Electronic resistance of electrode [ohm/cm]
Ri = Ionic resistance inside of flodded pores [ohm/cm]
L = Thickness of porous electrode [cm]
Output
--------------
Impedance of Rs-TL
'''
#The impedance of the series resistance
Z_Rs = Rs
# The Interfacial impedance is given by an -(RQ)- circuit
Phi = cir_RQ(w, R=R, Q=Q, n=n, fs=fs)
X1 = Ri
X2 = Rel
Lam = (Phi/(X1+X2))**(1/2)
x = L/Lam
# x_mp = mp.matrix(x) #x in mp.math format
# coth_mp = []
# sinh_mp = []
# for i in range(len(Lam)):
# coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
# sinh_mp.append(float(mp.sinh(x_mp[i]).real)+float(mp.sinh(x_mp[i]).imag)*1j)
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/np.array(sinh_mp))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/sinh(x))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * coth(x)
return Z_Rs + Z_TL
def cir_RsRQTL(w, L, Rs, R1, fs1, n1, R2, fs2, n2, Rel, Ri, Q1='none', Q2='none'):
'''
Simulation Function: -R-RQ-TL- (interfacial reacting, i.e. non-blocking)
Transmission line w/ full complexity, which both includes Ri and Rel
Ref.:
- <NAME>., and <NAME>., Advances in Electrochemistry and Electrochemical Engineering, p. 329, Wiley-Interscience, New York (1973)
- Bisquert J. Electrochemistry Communications 1, 1999, 429-435, "Anamalous transport effects in the impedance of porous film electrodes"
- <NAME>. J. Phys. Chem. B., 2000, 104, 2287-2298, "Doubling exponent models for the analysis of porous film electrodes by impedance.
Relaxation of TiO2 nanoporous in aqueous solution"
<NAME> (<EMAIL> || <EMAIL>)
Inputs
------------------
Rs = Series resistance [ohm]
R1 = Charge transfer resistance of RQ circuit [ohm]
fs1 = Summit frequency for RQ circuit [Hz]
n1 = exponent for RQ circuit [-]
Q1 = constant phase element of RQ circuit [s^n/(ohm * cm)]
R2 = interfacial charge transfer resistance [ohm * cm]
fs2 = Summit frequency for the interfacial RQ element [Hz]
n2 = exponenet for interfacial RQ element [-]
Q2 = Constant phase element for the interfacial capacitance [s^n/ohm]
Rel = electronic resistance of electrode [ohm/cm]
Ri = Ionic resistance inside of flodded pores [ohm/cm]
L = thickness of porous electrode [cm]
Output
--------------
Impedance of Rs-TL
'''
#The impedance of the series resistance
Z_Rs = Rs
#The (RQ) circuit in series with the transmission line
Z_RQ1 = cir_RQ(w=w, R=R1, Q=Q1, n=n1, fs=fs1)
# The Interfacial impedance is given by an -(RQ)- circuit
Phi = cir_RQ(w, R=R2, Q=Q2, n=n2, fs=fs2)
X1 = Ri
X2 = Rel
Lam = (Phi/(X1+X2))**(1/2)
x = L/Lam
# x_mp = mp.matrix(x) #x in mp.math format
# coth_mp = []
# sinh_mp = []
# for i in range(len(Lam)):
# coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
# sinh_mp.append(float(mp.sinh(x_mp[i]).real)+float(mp.sinh(x_mp[i]).imag)*1j)
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/np.array(sinh_mp))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/sinh(x))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * coth(x)
return Z_Rs + Z_RQ1 + Z_TL
# Transmission lines with solid-state transport
def cir_RsTL_1Dsolid(w, L, D, radius, Rs, R, Q, n, R_w, n_w, Rel, Ri):
'''
Simulation Function: -R-TL(Q(RW))-
Transmission line w/ full complexity, which both includes Ri and Rel
Warburg element is specific for 1D solid-state diffusion
Refs:
- <NAME>., and <NAME>., Advances in Electrochemistry and Electrochemical Engineering, p. 329, Wiley-Interscience, New York (1973)
- Bisquert J. Electrochemistry Communications 1, 1999, 429-435, "Anamalous transport effects in the impedance of porous film electrodes"
- <NAME>. Phys. Chem. B., 2000, 104, 2287-2298, "Doubling exponent models for the analysis of porous film electrodes by impedance.
Relaxation of TiO2 nanoporous in aqueous solution"
- <NAME>., and <NAME>., Advances in Electrochemistry and Electrochemical Engineering, p. 329, Wiley-Interscience, New York (1973)
- Illig, J., Physically based Impedance Modelling of Lithium-ion Cells, KIT Scientific Publishing (2014)
- Scipioni, et al., ECS Transactions, 69 (18) 71-80 (2015)
<NAME> (<EMAIL> || <EMAIL>)
Inputs
------------------
Rs = Series resistance [ohm]
R = particle charge transfer resistance [ohm*cm^2]
Q = Summit frequency peak of RQ element in the modified randles element of a particle [Hz]
n = exponenet for internal RQ element in the modified randles element of a particle [-]
Rel = electronic resistance of electrode [ohm/cm]
Ri = ionic resistance of solution in flooded pores of electrode [ohm/cm]
R_w = polarization resistance of finite diffusion Warburg element [ohm]
n_w = exponent for Warburg element [-]
L = thickness of porous electrode [cm]
D = solid-state diffusion coefficient [cm^2/s]
radius = average particle radius [cm]
Output
--------------
Impedance of Rs-TL(Q(RW))
'''
#The impedance of the series resistance
Z_Rs = Rs
#The impedance of a 1D Warburg Element
time_const = (radius**2)/D
x = (time_const*w*1j)**n_w
x_mp = mp.matrix(x)
warburg_coth_mp = []
for i in range(len(w)):
warburg_coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j)
Z_w = R_w * np.array(warburg_coth_mp)/x
# The Interfacial impedance is given by a Randles Equivalent circuit with the finite space warburg element in series with R2
Z_Rct = R
Z_Q = elem_Q(w,Q=Q,n=n)
Z_Randles = 1/(1/Z_Q + 1/(Z_Rct+Z_w)) #Ohm
# The Impedance of the Transmission Line
lamb = (Z_Randles/(Rel+Ri))**(1/2)
x = L/lamb
# lamb_mp = mp.matrix(x)
# sinh_mp = []
# coth_mp = []
# for j in range(len(lamb_mp)):
# sinh_mp.append(float(mp.sinh(lamb_mp[j]).real)+float((mp.sinh(lamb_mp[j]).imag))*1j)
# coth_mp.append(float(mp.coth(lamb_mp[j]).real)+float(mp.coth(lamb_mp[j]).imag)*1j)
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*lamb)/np.array(sinh_mp))) + lamb * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*lamb)/sinh(x))) + lamb * ((Rel**2 + Ri**2)/(Rel+Ri)) * coth(x)
return Z_Rs + Z_TL
def cir_RsRQTL_1Dsolid(w, L, D, radius, Rs, R1, fs1, n1, R2, Q2, n2, R_w, n_w, Rel, Ri, Q1='none'):
'''
Simulation Function: -R-RQ-TL(Q(RW))-
Transmission line w/ full complexity, which both includes Ri and Rel
Warburg element is specific for 1D solid-state diffusion
Refs:
- <NAME>., and <NAME>., Advances in Electrochemistry and Electrochemical Engineering, p. 329, Wiley-Interscience, New York (1973)
- Bisquert J. Electrochemistry Communications 1, 1999, 429-435, "Anamalous transport effects in the impedance of porous film electrodes"
- <NAME>. J. Phys. Chem. B., 2000, 104, 2287-2298, "Doubling exponent models for the analysis of porous film electrodes by impedance.
Relaxation of TiO2 nanoporous in aqueous solution"
- Illig, J., Physically based Impedance Modelling of Lithium-ion Cells, KIT Scientific Publishing (2014)
- Scipioni, et al., ECS Transactions, 69 (18) 71-80 (2015)
<NAME> (<EMAIL>)
<NAME> (<EMAIL> || <EMAIL>)
Inputs
------------------
Rs = Series resistance [ohm]
R1 = charge transfer resistance of the interfacial RQ element [ohm*cm^2]
fs1 = max frequency peak of the interfacial RQ element[Hz]
n1 = exponenet for interfacial RQ element
R2 = particle charge transfer resistance [ohm*cm^2]
Q2 = Summit frequency peak of RQ element in the modified randles element of a particle [Hz]
n2 = exponenet for internal RQ element in the modified randles element of a particle [-]
Rel = electronic resistance of electrode [ohm/cm]
Ri = ionic resistance of solution in flooded pores of electrode [ohm/cm]
R_w = polarization resistance of finite diffusion Warburg element [ohm]
n_w = exponent for Warburg element [-]
L = thickness of porous electrode [cm]
D = solid-state diffusion coefficient [cm^2/s]
radius = average particle radius [cm]
Output
------------------
Impedance of R-RQ-TL(Q(RW))
'''
#The impedance of the series resistance
Z_Rs = Rs
# The Interfacial impedance is given by an -(RQ)- circuit
Z_RQ = cir_RQ(w=w, R=R1, Q=Q1, n=n1, fs=fs1)
#The impedance of a 1D Warburg Element
time_const = (radius**2)/D
x = (time_const*w*1j)**n_w
x_mp = mp.matrix(x)
warburg_coth_mp = []
for i in range(len(w)):
warburg_coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j)
Z_w = R_w * np.array(warburg_coth_mp)/x
# The Interfacial impedance is given by a Randles Equivalent circuit with the finite space warburg element in series with R2
Z_Rct = R2
Z_Q = elem_Q(w,Q=Q2,n=n2)
Z_Randles = 1/(1/Z_Q + 1/(Z_Rct+Z_w)) #Ohm
# The Impedance of the Transmission Line
lamb = (Z_Randles/(Rel+Ri))**(1/2)
x = L/lamb
# lamb_mp = mp.matrix(x)
# sinh_mp = []
# coth_mp = []
# for j in range(len(lamb_mp)):
# sinh_mp.append(float(mp.sinh(lamb_mp[j]).real)+float((mp.sinh(lamb_mp[j]).imag))*1j)
# coth_mp.append(float(mp.coth(lamb_mp[j]).real)+float(mp.coth(lamb_mp[j]).imag)*1j)
#
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*lamb)/np.array(sinh_mp))) + lamb * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*lamb)/sinh(x))) + lamb * ((Rel**2 + Ri**2)/(Rel+Ri)) * coth(x)
return Z_Rs + Z_RQ + Z_TL
### Fitting Circuit Functions
##
#
def elem_C_fit(params, w):
'''
Fit Function: -C-
'''
C = params['C']
return 1/(C*(w*1j))
def elem_Q_fit(params, w):
'''
Fit Function: -Q-
Constant Phase Element for Fitting
'''
Q = params['Q']
n = params['n']
return 1/(Q*(w*1j)**n)
def cir_RsC_fit(params, w):
'''
Fit Function: -Rs-C-
'''
Rs = params['Rs']
C = params['C']
return Rs + 1/(C*(w*1j))
def cir_RsQ_fit(params, w):
'''
Fit Function: -Rs-Q-
'''
Rs = params['Rs']
Q = params['Q']
n = params['n']
return Rs + 1/(Q*(w*1j)**n)
def cir_RC_fit(params, w):
'''
Fit Function: -RC-
Returns the impedance of an RC circuit, using RQ definations where n=1
'''
if str(params.keys())[10:].find("R") == -1: #if R == 'none':
Q = params['C']
fs = params['fs']
R = (1/(Q*(2*np.pi*fs)**n))
if str(params.keys())[10:].find("C") == -1: #elif Q == 'none':
R = params['R']
fs = params['fs']
Q = (1/(R*(2*np.pi*fs)**n))
if str(params.keys())[10:].find("n") == -1: #elif n == 'none':
R = params['R']
Q = params['C']
fs = params['fs']
n = np.log(Q*R)/np.log(1/(2*np.pi*fs))
if str(params.keys())[10:].find("fs") == -1: #elif fs == 'none':
R = params['R']
Q = params['C']
return cir_RQ(w, R=R, Q=C, n=1, fs=fs)
def cir_RQ_fit(params, w):
'''
Fit Function: -RQ-
Return the impedance of an RQ circuit:
Z(w) = R / (1+ R*Q * (2w)^n)
See Explanation of equations under cir_RQ()
The params.keys()[10:] finds the names of the user defined parameters that should be interated over if X == -1, if the paramter is not given, it becomes equal to 'none'
<NAME> (<EMAIL> / <EMAIL>)
'''
if str(params.keys())[10:].find("R") == -1: #if R == 'none':
Q = params['Q']
n = params['n']
fs = params['fs']
R = (1/(Q*(2*np.pi*fs)**n))
if str(params.keys())[10:].find("Q") == -1: #elif Q == 'none':
R = params['R']
n = params['n']
fs = params['fs']
Q = (1/(R*(2*np.pi*fs)**n))
if str(params.keys())[10:].find("n") == -1: #elif n == 'none':
R = params['R']
Q = params['Q']
fs = params['fs']
n = np.log(Q*R)/np.log(1/(2*np.pi*fs))
if str(params.keys())[10:].find("fs") == -1: #elif fs == 'none':
R = params['R']
n = params['n']
Q = params['Q']
return R/(1+R*Q*(w*1j)**n)
def cir_RsRQ_fit(params, w):
'''
Fit Function: -Rs-RQ-
Return the impedance of an Rs-RQ circuit. See details for RQ under cir_RsRQ_fit()
<NAME> (<EMAIL> / <EMAIL>)
'''
if str(params.keys())[10:].find("R") == -1: #if R == 'none':
Q = params['Q']
n = params['n']
fs = params['fs']
R = (1/(Q*(2*np.pi*fs)**n))
if str(params.keys())[10:].find("Q") == -1: #elif Q == 'none':
R = params['R']
n = params['n']
fs = params['fs']
Q = (1/(R*(2*np.pi*fs)**n))
if str(params.keys())[10:].find("n") == -1: #elif n == 'none':
R = params['R']
Q = params['Q']
fs = params['fs']
n = np.log(Q*R)/np.log(1/(2*np.pi*fs))
if str(params.keys())[10:].find("fs") == -1: #elif fs == 'none':
R = params['R']
Q = params['Q']
n = params['n']
Rs = params['Rs']
return Rs + (R/(1+R*Q*(w*1j)**n))
def cir_RsRQRQ_fit(params, w):
'''
Fit Function: -Rs-RQ-RQ-
Return the impedance of an Rs-RQ circuit. See details under cir_RsRQRQ()
<NAME> (<EMAIL> / <EMAIL>)
'''
if str(params.keys())[10:].find("'R'") == -1: #if R == 'none':
Q = params['Q']
n = params['n']
fs = params['fs']
R = (1/(Q*(2*np.pi*fs)**n))
if str(params.keys())[10:].find("'Q'") == -1: #elif Q == 'none':
R = params['R']
n = params['n']
fs = params['fs']
Q = (1/(R*(2*np.pi*fs)**n))
if str(params.keys())[10:].find("'n'") == -1: #elif n == 'none':
R = params['R']
Q = params['Q']
fs = params['fs']
n = np.log(Q*R)/np.log(1/(2*np.pi*fs))
if str(params.keys())[10:].find("'fs'") == -1: #elif fs == 'none':
R = params['R']
Q = params['Q']
n = params['n']
if str(params.keys())[10:].find("'R2'") == -1: #if R == 'none':
Q2 = params['Q2']
n2 = params['n2']
fs2 = params['fs2']
R2 = (1/(Q2*(2*np.pi*fs2)**n2))
if str(params.keys())[10:].find("'Q2'") == -1: #elif Q == 'none':
R2 = params['R2']
n2 = params['n2']
fs2 = params['fs2']
Q2 = (1/(R2*(2*np.pi*fs2)**n2))
if str(params.keys())[10:].find("'n2'") == -1: #elif n == 'none':
R2 = params['R2']
Q2 = params['Q2']
fs2 = params['fs2']
n2 = np.log(Q2*R2)/np.log(1/(2*np.pi*fs2))
if str(params.keys())[10:].find("'fs2'") == -1: #elif fs == 'none':
R2 = params['R2']
Q2 = params['Q2']
n2 = params['n2']
Rs = params['Rs']
return Rs + (R/(1+R*Q*(w*1j)**n)) + (R2/(1+R2*Q2*(w*1j)**n2))
def cir_Randles_simplified_Fit(params, w):
'''
Fit Function: Randles simplified -Rs-(Q-(RW)-)-
Return the impedance of a Randles circuit. See more under cir_Randles_simplified()
NOTE: This Randles circuit is only meant for semi-infinate linear diffusion
<NAME> (<EMAIL> || <EMAIL>)
'''
if str(params.keys())[10:].find("'R'") == -1: #if R == 'none':
Q = params['Q']
n = params['n']
fs = params['fs']
R = (1/(Q*(2*np.pi*fs)**n))
if str(params.keys())[10:].find("'Q'") == -1: #elif Q == 'none':
R = params['R']
n = params['n']
fs = params['fs']
Q = (1/(R*(2*np.pi*fs)**n))
if str(params.keys())[10:].find("'n'") == -1: #elif n == 'none':
R = params['R']
Q = params['Q']
fs = params['fs']
n = np.log(Q*R)/np.log(1/(2*np.pi*fs))
if str(params.keys())[10:].find("'fs'") == -1: #elif fs == 'none':
R = params['R']
Q = params['Q']
n = params['n']
Rs = params['Rs']
sigma = params['sigma']
Z_Q = 1/(Q*(w*1j)**n)
Z_R = R
Z_w = sigma*(w**(-0.5))-1j*sigma*(w**(-0.5))
return Rs + 1/(1/Z_Q + 1/(Z_R+Z_w))
def cir_RsRQQ_fit(params, w):
'''
Fit Function: -Rs-RQ-Q-
See cir_RsRQQ() for details
'''
Rs = params['Rs']
Q = params['Q']
n = params['n']
Z_Q = 1/(Q*(w*1j)**n)
if str(params.keys())[10:].find("R1") == -1: #if R == 'none':
Q1 = params['Q1']
n1 = params['n1']
fs1 = params['fs1']
R1 = (1/(Q1*(2*np.pi*fs1)**n1))
if str(params.keys())[10:].find("Q1") == -1: #elif Q == 'none':
R1 = params['R1']
n1 = params['n1']
fs1 = params['fs1']
Q1 = (1/(R1*(2*np.pi*fs1)**n1))
if str(params.keys())[10:].find("n1") == -1: #elif n == 'none':
R1 = params['R1']
Q1 = params['Q1']
fs1 = params['fs1']
n1 = np.log(Q1*R1)/np.log(1/(2*np.pi*fs1))
if str(params.keys())[10:].find("fs1") == -1: #elif fs == 'none':
R1 = params['R1']
n1 = params['n1']
Q1 = params['Q1']
Z_RQ = (R1/(1+R1*Q1*(w*1j)**n1))
return Rs + Z_RQ + Z_Q
def cir_RsRQC_fit(params, w):
'''
Fit Function: -Rs-RQ-C-
See cir_RsRQC() for details
'''
Rs = params['Rs']
C = params['C']
Z_C = 1/(C*(w*1j))
if str(params.keys())[10:].find("R1") == -1: #if R == 'none':
Q1 = params['Q1']
n1 = params['n1']
fs1 = params['fs1']
R1 = (1/(Q1*(2*np.pi*fs1)**n1))
if str(params.keys())[10:].find("Q1") == -1: #elif Q == 'none':
R1 = params['R1']
n1 = params['n1']
fs1 = params['fs1']
Q1 = (1/(R1*(2*np.pi*fs1)**n1))
if str(params.keys())[10:].find("n1") == -1: #elif n == 'none':
R1 = params['R1']
Q1 = params['Q1']
fs1 = params['fs1']
n1 = np.log(Q1*R1)/np.log(1/(2*np.pi*fs1))
if str(params.keys())[10:].find("fs1") == -1: #elif fs == 'none':
R1 = params['R1']
n1 = params['n1']
Q1 = params['Q1']
Z_RQ = (R1/(1+R1*Q1*(w*1j)**n1))
return Rs + Z_RQ + Z_C
def cir_RsRCC_fit(params, w):
'''
Fit Function: -Rs-RC-C-
See cir_RsRCC() for details
'''
Rs = params['Rs']
R1 = params['R1']
C1 = params['C1']
C = params['C']
return Rs + cir_RC(w, C=C1, R=R1, fs='none') + elem_C(w, C=C)
def cir_RsRCQ_fit(params, w):
'''
Fit Function: -Rs-RC-Q-
See cir_RsRCQ() for details
'''
Rs = params['Rs']
R1 = params['R1']
C1 = params['C1']
Q = params['Q']
n = params['n']
return Rs + cir_RC(w, C=C1, R=R1, fs='none') + elem_Q(w,Q,n)
# Polymer electrolytes
def cir_C_RC_C_fit(params, w):
'''
Fit Function: -C-(RC)-C-
See cir_C_RC_C() for details
<NAME> (<EMAIL> || <EMAIL>)
'''
# Interfacial impedance
Ce = params['Ce']
Z_C = 1/(Ce*(w*1j))
# Bulk impendance
if str(params.keys())[10:].find("Rb") == -1: #if R == 'none':
Cb = params['Cb']
fsb = params['fsb']
Rb = (1/(Cb*(2*np.pi*fsb)))
if str(params.keys())[10:].find("Cb") == -1: #elif Q == 'none':
Rb = params['Rb']
fsb = params['fsb']
Cb = (1/(Rb*(2*np.pi*fsb)))
if str(params.keys())[10:].find("fsb") == -1: #elif fs == 'none':
Rb = params['Rb']
Cb = params['Cb']
Z_RC = (Rb/(1+Rb*Cb*(w*1j)))
return Z_C + Z_RC
def cir_Q_RQ_Q_Fit(params, w):
'''
Fit Function: -Q-(RQ)-Q-
See cir_Q_RQ_Q() for details
<NAME> (<EMAIL> || <EMAIL>)
'''
# Interfacial impedance
Qe = params['Qe']
ne = params['ne']
Z_Q = 1/(Qe*(w*1j)**ne)
# Bulk impedance
if str(params.keys())[10:].find("Rb") == -1: #if R == 'none':
Qb = params['Qb']
nb = params['nb']
fsb = params['fsb']
Rb = (1/(Qb*(2*np.pi*fsb)**nb))
if str(params.keys())[10:].find("Qb") == -1: #elif Q == 'none':
Rb = params['Rb']
nb = params['nb']
fsb = params['fsb']
Qb = (1/(Rb*(2*np.pi*fsb)**nb))
if str(params.keys())[10:].find("nb") == -1: #elif n == 'none':
Rb = params['Rb']
Qb = params['Qb']
fsb = params['fsb']
nb = np.log(Qb*Rb)/np.log(1/(2*np.pi*fsb))
if str(params.keys())[10:].find("fsb") == -1: #elif fs == 'none':
Rb = params['Rb']
nb = params['nb']
Qb = params['Qb']
Z_RQ = Rb/(1+Rb*Qb*(w*1j)**nb)
return Z_Q + Z_RQ
def cir_RCRCZD_fit(params, w):
'''
Fit Function: -RC_b-RC_e-Z_D
See cir_RCRCZD() for details
<NAME> (<EMAIL> || <EMAIL>)
'''
# Interfacial impendace
if str(params.keys())[10:].find("Re") == -1: #if R == 'none':
Ce = params['Ce']
fse = params['fse']
Re = (1/(Ce*(2*np.pi*fse)))
if str(params.keys())[10:].find("Ce") == -1: #elif Q == 'none':
Re = params['Rb']
fse = params['fsb']
Ce = (1/(Re*(2*np.pi*fse)))
if str(params.keys())[10:].find("fse") == -1: #elif fs == 'none':
Re = params['Re']
Ce = params['Ce']
Z_RCe = (Re/(1+Re*Ce*(w*1j)))
# Bulk impendance
if str(params.keys())[10:].find("Rb") == -1: #if R == 'none':
Cb = params['Cb']
fsb = params['fsb']
Rb = (1/(Cb*(2*np.pi*fsb)))
if str(params.keys())[10:].find("Cb") == -1: #elif Q == 'none':
Rb = params['Rb']
fsb = params['fsb']
Cb = (1/(Rb*(2*np.pi*fsb)))
if str(params.keys())[10:].find("fsb") == -1: #elif fs == 'none':
Rb = params['Rb']
Cb = params['Cb']
Z_RCb = (Rb/(1+Rb*Cb*(w*1j)))
# Mass transport impendance
L = params['L']
D_s = params['D_s']
u1 = params['u1']
u2 = params['u2']
alpha = ((w*1j*L**2)/D_s)**(1/2)
Z_D = Rb * (u2/u1) * (tanh(alpha)/alpha)
return Z_RCb + Z_RCe + Z_D
# Transmission lines
def cir_RsTLsQ_fit(params, w):
'''
Fit Function: -Rs-TLsQ-
TLs = Simplified Transmission Line, with a non-faradaic interfacial impedance (Q)
See more under cir_RsTLsQ()
<NAME> (<EMAIL> / <EMAIL>)
'''
Rs = params['Rs']
L = params['L']
Ri = params['Ri']
Q = params['Q']
n = params['n']
Phi = 1/(Q*(w*1j)**n)
X1 = Ri # ohm/cm
Lam = (Phi/X1)**(1/2) #np.sqrt(Phi/X1)
x = L/Lam
# x_mp = mp.matrix(x) #x in mp.math format
# coth_mp = []
# for i in range(len(Lam)):
# coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
#
# Z_TLsQ = Lam * X1 * coth_mp
Z_TLsQ = Lam * X1 * coth(x)
return Rs + Z_TLsQ
def cir_RsRQTLsQ_Fit(params, w):
'''
Fit Function: -Rs-RQ-TLsQ-
TLs = Simplified Transmission Line, with a non-faradaic interfacial impedance (Q)
See more under cir_RsRQTLsQ
<NAME> (<EMAIL> / <EMAIL>)
'''
Rs = params['Rs']
L = params['L']
Ri = params['Ri']
Q = params['Q']
n = params['n']
if str(params.keys())[10:].find("R1") == -1: #if R == 'none':
Q1 = params['Q1']
n1 = params['n1']
fs1 = params['fs1']
R1 = (1/(Q1*(2*np.pi*fs1)**n1))
if str(params.keys())[10:].find("Q1") == -1: #elif Q == 'none':
R1 = params['R1']
n1 = params['n1']
fs1 = params['fs1']
Q1 = (1/(R1*(2*np.pi*fs1)**n1))
if str(params.keys())[10:].find("n1") == -1: #elif n == 'none':
R1 = params['R1']
Q1 = params['Q1']
fs1 = params['fs1']
n1 = np.log(Q1*R1)/np.log(1/(2*np.pi*fs1))
if str(params.keys())[10:].find("fs1") == -1: #elif fs == 'none':
R1 = params['R1']
n1 = params['n1']
Q1 = params['Q1']
Z_RQ = (R1/(1+R1*Q1*(w*1j)**n1))
Phi = 1/(Q*(w*1j)**n)
X1 = Ri
Lam = (Phi/X1)**(1/2)
x = L/Lam
x_mp = mp.matrix(x) #x in mp.math format
coth_mp = []
for i in range(len(Lam)):
coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
Z_TLsQ = Lam * X1 * coth_mp
return Rs + Z_RQ + Z_TLsQ
def cir_RsTLs_Fit(params, w):
'''
Fit Function: -Rs-RQ-TLs-
TLs = Simplified Transmission Line, with a faradaic interfacial impedance (RQ)
See mor under cir_RsTLs()
<NAME> (<EMAIL> / <EMAIL>)
'''
Rs = params['Rs']
L = params['L']
Ri = params['Ri']
if str(params.keys())[10:].find("R") == -1: #if R == 'none':
Q = params['Q']
n = params['n']
fs = params['fs']
R = (1/(Q*(2*np.pi*fs)**n))
if str(params.keys())[10:].find("Q") == -1: #elif Q == 'none':
R = params['R']
n = params['n']
fs = params['fs']
Q = (1/(R*(2*np.pi*fs)**n))
if str(params.keys())[10:].find("n") == -1: #elif n == 'none':
R = params['R']
Q = params['Q']
fs = params['fs']
n = np.log(Q*R)/np.log(1/(2*np.pi*fs))
if str(params.keys())[10:].find("fs") == -1: #elif fs == 'none':
R = params['R']
n = params['n']
Q = params['Q']
Phi = R/(1+R*Q*(w*1j)**n)
X1 = Ri
Lam = (Phi/X1)**(1/2)
x = L/Lam
x_mp = mp.matrix(x) #x in mp.math format
coth_mp = []
for i in range(len(Lam)):
coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
Z_TLs = Lam * X1 * coth_mp
return Rs + Z_TLs
def cir_RsRQTLs_Fit(params, w):
'''
Fit Function: -Rs-RQ-TLs-
TLs = Simplified Transmission Line with a faradaic interfacial impedance (RQ)
See more under cir_RsRQTLs()
<NAME> (<EMAIL> || <EMAIL>)
'''
Rs = params['Rs']
L = params['L']
Ri = params['Ri']
if str(params.keys())[10:].find("R1") == -1: #if R == 'none':
Q1 = params['Q1']
n1 = params['n1']
fs1 = params['fs1']
R1 = (1/(Q1*(2*np.pi*fs1)**n1))
if str(params.keys())[10:].find("Q1") == -1: #elif Q == 'none':
R1 = params['R1']
n1 = params['n1']
fs1 = params['fs1']
Q1 = (1/(R1*(2*np.pi*fs1)**n1))
if str(params.keys())[10:].find("n1") == -1: #elif n == 'none':
R1 = params['R1']
Q1 = params['Q1']
fs1 = params['fs1']
n1 = np.log(Q1*R1)/np.log(1/(2*np.pi*fs1))
if str(params.keys())[10:].find("fs1") == -1: #elif fs == 'none':
R1 = params['R1']
n1 = params['n1']
Q1 = params['Q1']
Z_RQ = (R1/(1+R1*Q1*(w*1j)**n1))
if str(params.keys())[10:].find("R2") == -1: #if R == 'none':
Q2 = params['Q2']
n2 = params['n2']
fs2 = params['fs2']
R2 = (1/(Q2*(2*np.pi*fs2)**n2))
if str(params.keys())[10:].find("Q2") == -1: #elif Q == 'none':
R2 = params['R2']
n2 = params['n2']
fs2 = params['fs2']
Q2 = (1/(R2*(2*np.pi*fs2)**n1))
if str(params.keys())[10:].find("n2") == -1: #elif n == 'none':
R2 = params['R2']
Q2 = params['Q2']
fs2 = params['fs2']
n2 = np.log(Q2*R2)/np.log(1/(2*np.pi*fs2))
if str(params.keys())[10:].find("fs2") == -1: #elif fs == 'none':
R2 = params['R2']
n2 = params['n2']
Q2 = params['Q2']
Phi = (R2/(1+R2*Q2*(w*1j)**n2))
X1 = Ri
Lam = (Phi/X1)**(1/2)
x = L/Lam
x_mp = mp.matrix(x) #x in mp.math format
coth_mp = []
for i in range(len(Lam)):
coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
Z_TLs = Lam * X1 * coth_mp
return Rs + Z_RQ + Z_TLs
def cir_RsTLQ_fit(params, w):
'''
Fit Function: -R-TLQ- (interface non-reacting, i.e. blocking electrode)
Transmission line w/ full complexity, which both includes Ri and Rel
<NAME> (<EMAIL> || <EMAIL>)
'''
Rs = params['Rs']
L = params['L']
Ri = params['Ri']
Rel = params['Rel']
Q = params['Q']
n = params['n']
#The impedance of the series resistance
Z_Rs = Rs
# The Interfacial impedance is given by an -(RQ)- circuit
Phi = elem_Q(w, Q=Q, n=n)
X1 = Ri
X2 = Rel
Lam = (Phi/(X1+X2))**(1/2)
x = L/Lam
# x_mp = mp.matrix(x) #x in mp.math format
# coth_mp = []
# sinh_mp = []
# for i in range(len(Lam)):
# coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
# sinh_mp.append(float(mp.sinh(x_mp[i]).real)+float(mp.sinh(x_mp[i]).imag)*1j)
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/np.array(sinh_mp))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/sinh(x))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * coth(x)
return Z_Rs + Z_TL
def cir_RsRQTLQ_fit(params, w):
'''
Fit Function: -R-RQ-TLQ- (interface non-reacting, i.e. blocking electrode)
Transmission line w/ full complexity, which both includes Ri and Rel
<NAME> (<EMAIL> || <EMAIL>)
'''
Rs = params['Rs']
L = params['L']
Ri = params['Ri']
Rel = params['Rel']
Q = params['Q']
n = params['n']
#The impedance of the series resistance
Z_Rs = Rs
#The (RQ) circuit in series with the transmission line
if str(params.keys())[10:].find("R1") == -1: #if R == 'none':
Q1 = params['Q1']
n1 = params['n1']
fs1 = params['fs1']
R1 = (1/(Q1*(2*np.pi*fs1)**n1))
if str(params.keys())[10:].find("Q1") == -1: #elif Q == 'none':
R1 = params['R1']
n1 = params['n1']
fs1 = params['fs1']
Q1 = (1/(R1*(2*np.pi*fs1)**n1))
if str(params.keys())[10:].find("n1") == -1: #elif n == 'none':
R1 = params['R1']
Q1 = params['Q1']
fs1 = params['fs1']
n1 = np.log(Q1*R1)/np.log(1/(2*np.pi*fs1))
if str(params.keys())[10:].find("fs1") == -1: #elif fs == 'none':
R1 = params['R1']
n1 = params['n1']
Q1 = params['Q1']
Z_RQ1 = (R1/(1+R1*Q1*(w*1j)**n1))
# The Interfacial impedance is given by an -(RQ)- circuit
Phi = elem_Q(w, Q=Q, n=n)
X1 = Ri
X2 = Rel
Lam = (Phi/(X1+X2))**(1/2)
x = L/Lam
# x_mp = mp.matrix(x) #x in mp.math format
# coth_mp = []
# sinh_mp = []
# for i in range(len(Lam)):
# coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
# sinh_mp.append(float(mp.sinh(x_mp[i]).real)+float(mp.sinh(x_mp[i]).imag)*1j)
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/np.array(sinh_mp))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/sinh(x))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * coth(x)
return Z_Rs + Z_RQ1 + Z_TL
def cir_RsTL_Fit(params, w):
'''
Fit Function: -R-TLQ- (interface reacting, i.e. non-blocking)
Transmission line w/ full complexity, which both includes Ri and Rel
See cir_RsTL() for details
<NAME> (<EMAIL> || <EMAIL>)
'''
Rs = params['Rs']
L = params['L']
Ri = params['Ri']
Rel = params['Rel']
#The impedance of the series resistance
Z_Rs = Rs
# The Interfacial impedance is given by an -(RQ)- circuit
if str(params.keys())[10:].find("R") == -1: #if R == 'none':
Q = params['Q']
n = params['n']
fs = params['fs']
R = (1/(Q*(2*np.pi*fs)**n))
if str(params.keys())[10:].find("Q") == -1: #elif Q == 'none':
R = params['R']
n = params['n']
fs = params['fs']
Q = (1/(R*(2*np.pi*fs)**n))
if str(params.keys())[10:].find("n") == -1: #elif n == 'none':
R = params['R']
Q = params['Q']
fs = params['fs']
n = np.log(Q*R)/np.log(1/(2*np.pi*fs))
if str(params.keys())[10:].find("fs") == -1: #elif fs == 'none':
R = params['R']
n = params['n']
Q = params['Q']
Phi = (R/(1+R*Q*(w*1j)**n))
X1 = Ri
X2 = Rel
Lam = (Phi/(X1+X2))**(1/2)
x = L/Lam
# x_mp = mp.matrix(x) #x in mp.math format
# coth_mp = []
# sinh_mp = []
# for i in range(len(Lam)):
# coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
# sinh_mp.append(float(mp.sinh(x_mp[i]).real)+float(mp.sinh(x_mp[i]).imag)*1j)
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/np.array(sinh_mp))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/sinh(x))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * coth(x)
return Z_Rs + Z_TL
def cir_RsRQTL_fit(params, w):
'''
Fit Function: -R-RQ-TL- (interface reacting, i.e. non-blocking)
Transmission line w/ full complexity including both includes Ri and Rel
<NAME> (<EMAIL> || <EMAIL>)
'''
Rs = params['Rs']
L = params['L']
Ri = params['Ri']
Rel = params['Rel']
#The impedance of the series resistance
Z_Rs = Rs
# The Interfacial impedance is given by an -(RQ)- circuit
if str(params.keys())[10:].find("R1") == -1: #if R == 'none':
Q1 = params['Q1']
n1 = params['n1']
fs1 = params['fs1']
R1 = (1/(Q1*(2*np.pi*fs1)**n1))
elif str(params.keys())[10:].find("Q1") == -1: #elif Q == 'none':
R1 = params['R1']
n1 = params['n1']
fs1 = params['fs1']
Q1 = (1/(R1*(2*np.pi*fs1)**n1))
elif str(params.keys())[10:].find("n1") == -1: #elif n == 'none':
R1 = params['R1']
Q1 = params['Q1']
fs1 = params['fs1']
n1 = np.log(Q1*R1)/np.log(1/(2*np.pi*fs1))
elif str(params.keys())[10:].find("fs1") == -1: #elif fs == 'none':
R1 = params['R1']
n1 = params['n1']
Q1 = params['Q1']
Z_RQ1 = (R1/(1+R1*Q1*(w*1j)**n1))
#
# # The Interfacial impedance is given by an -(RQ)- circuit
if str(params.keys())[10:].find("R2") == -1: #if R == 'none':
Q2 = params['Q2']
n2 = params['n2']
fs2 = params['fs2']
R2 = (1/(Q2*(2*np.pi*fs2)**n2))
elif str(params.keys())[10:].find("Q2") == -1: #elif Q == 'none':
R2 = params['R2']
n2 = params['n2']
fs2 = params['fs2']
Q2 = (1/(R2*(2*np.pi*fs2)**n1))
elif str(params.keys())[10:].find("n2") == -1: #elif n == 'none':
R2 = params['R2']
Q2 = params['Q2']
fs2 = params['fs2']
n2 = np.log(Q2*R2)/np.log(1/(2*np.pi*fs2))
elif str(params.keys())[10:].find("fs2") == -1: #elif fs == 'none':
R2 = params['R2']
n2 = params['n2']
Q2 = params['Q2']
Phi = (R2/(1+R2*Q2*(w*1j)**n2))
X1 = Ri
X2 = Rel
Lam = (Phi/(X1+X2))**(1/2)
x = L/Lam
# x_mp = mp.matrix(x) #x in mp.math format
# coth_mp = []
# sinh_mp = []
# for i in range(len(Lam)):
# coth_mp.append(float(mp.coth(x_mp[i]).real)+float((mp.coth(x_mp[i]).imag))*1j) #Handles coth with x having very large or very small numbers
# sinh_mp.append(float(((1-mp.exp(-2*x_mp[i]))/(2*mp.exp(-x_mp[i]))).real) + float(((1-mp.exp(-2*x_mp[i]))/(2*mp.exp(-x_mp[i]))).real)*1j)
# sinh_mp.append(float(mp.sinh(x_mp[i]).real)+float((mp.sinh(x_mp[i]).imag))*1j)
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/np.array(sinh_mp))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/sinh(x))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * coth(x)
return Z_Rs + Z_RQ1 + Z_TL
def cir_RsTL_1Dsolid_fit(params, w):
'''
Fit Function: -R-TL(Q(RW))-
Transmission line w/ full complexity
See cir_RsTL_1Dsolid() for details
<NAME> (<EMAIL>)
<NAME> (<EMAIL> || <EMAIL>)
'''
Rs = params['Rs']
L = params['L']
Ri = params['Ri']
radius = params['radius']
D = params['D']
R = params['R']
Q = params['Q']
n = params['n']
R_w = params['R_w']
n_w = params['n_w']
Rel = params['Rel']
Ri = params['Ri']
#The impedance of the series resistance
Z_Rs = Rs
#The impedance of a 1D Warburg Element
time_const = (radius**2)/D
x = (time_const*w*1j)**n_w
x_mp = mp.matrix(x)
warburg_coth_mp = []
for i in range(len(w)):
warburg_coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j)
Z_w = R_w * np.array(warburg_coth_mp)/x
# The Interfacial impedance is given by a Randles Equivalent circuit with the finite space warburg element in series with R2
Z_Rct = R
Z_Q = elem_Q(w=w, Q=Q, n=n)
Z_Randles = 1/(1/Z_Q + 1/(Z_Rct+Z_w)) #Ohm
# The Impedance of the Transmission Line
lamb = (Z_Randles/(Rel+Ri))**(1/2)
x = L/lamb
# lamb_mp = mp.matrix(x)
# sinh_mp = []
# coth_mp = []
# for j in range(len(lamb_mp)):
# sinh_mp.append(float(mp.sinh(lamb_mp[j]).real)+float((mp.sinh(lamb_mp[j]).imag))*1j)
# coth_mp.append(float(mp.coth(lamb_mp[j]).real)+float(mp.coth(lamb_mp[j]).imag)*1j)
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*lamb)/np.array(sinh_mp))) + lamb * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*lamb)/sinh(x))) + lamb * ((Rel**2 + Ri**2)/(Rel+Ri)) * coth(x)
return Z_Rs + Z_TL
def cir_RsRQTL_1Dsolid_fit(params, w):
'''
Fit Function: -R-RQ-TL(Q(RW))-
Transmission line w/ full complexity, which both includes Ri and Rel. The Warburg element is specific for 1D solid-state diffusion
See cir_RsRQTL_1Dsolid() for details
<NAME> (<EMAIL>)
<NAME> (<EMAIL> || <EMAIL>)
'''
Rs = params['Rs']
L = params['L']
Ri = params['Ri']
radius = params['radius']
D = params['D']
R2 = params['R2']
Q2 = params['Q2']
n2 = params['n2']
R_w = params['R_w']
n_w = params['n_w']
Rel = params['Rel']
Ri = params['Ri']
#The impedance of the series resistance
Z_Rs = Rs
# The Interfacial impedance is given by an -(RQ)- circuit
if str(params.keys())[10:].find("R1") == -1: #if R == 'none':
Q1 = params['Q1']
n1 = params['n1']
fs1 = params['fs1']
R1 = (1/(Q1*(2*np.pi*fs1)**n1))
elif str(params.keys())[10:].find("Q1") == -1: #elif Q == 'none':
R1 = params['R1']
n1 = params['n1']
fs1 = params['fs1']
Q1 = (1/(R1*(2*np.pi*fs1)**n1))
elif str(params.keys())[10:].find("n1") == -1: #elif n == 'none':
R1 = params['R1']
Q1 = params['Q1']
fs1 = params['fs1']
n1 = np.log(Q1*R1)/np.log(1/(2*np.pi*fs1))
elif str(params.keys())[10:].find("fs1") == -1: #elif fs == 'none':
R1 = params['R1']
n1 = params['n1']
Q1 = params['Q1']
Z_RQ1 = (R1/(1+R1*Q1*(w*1j)**n1))
#The impedance of a 1D Warburg Element
time_const = (radius**2)/D
x = (time_const*w*1j)**n_w
x_mp = mp.matrix(x)
warburg_coth_mp = []
for i in range(len(w)):
warburg_coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j)
Z_w = R_w * np.array(warburg_coth_mp)/x
# The Interfacial impedance is given by a Randles Equivalent circuit with the finite space warburg element in series with R2
Z_Rct = R2
Z_Q = elem_Q(w,Q=Q2,n=n2)
Z_Randles = 1/(1/Z_Q + 1/(Z_Rct+Z_w)) #Ohm
# The Impedance of the Transmission Line
lamb = (Z_Randles/(Rel+Ri))**(1/2)
x = L/lamb
# lamb_mp = mp.matrix(x)
# sinh_mp = []
# coth_mp = []
# for j in range(len(lamb_mp)):
# sinh_mp.append(float(mp.sinh(lamb_mp[j]).real)+float((mp.sinh(lamb_mp[j]).imag))*1j)
# coth_mp.append(float(mp.coth(lamb_mp[j]).real)+float(mp.coth(lamb_mp[j]).imag)*1j)
#
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*lamb)/np.array(sinh_mp))) + lamb * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*lamb)/sinh(x))) + lamb * ((Rel**2 + Ri**2)/(Rel+Ri)) * coth(x)
return Z_Rs + Z_RQ1 + Z_TL
### Least-Squares error function
def leastsq_errorfunc(params, w, re, im, circuit, weight_func):
'''
Sum of squares error function for the complex non-linear least-squares fitting procedure (CNLS). The fitting function (lmfit) will use this function to iterate over
until the total sum of errors is minimized.
During the minimization the fit is weighed, and currently three different weigh options are avaliable:
- modulus
- unity
- proportional
Modulus is generially recommended as random errors and a bias can exist in the experimental data.
<NAME> (<EMAIL> || <EMAIL>)
Inputs
------------
- params: parameters needed for CNLS
- re: real impedance
- im: Imaginary impedance
- circuit:
The avaliable circuits are shown below, and this this parameter needs it as a string.
- C
- Q
- R-C
- R-Q
- RC
- RQ
- R-RQ
- R-RQ-RQ
- R-RQ-Q
- R-(Q(RW))
- R-(Q(RM))
- R-RC-C
- R-RC-Q
- R-RQ-Q
- R-RQ-C
- RC-RC-ZD
- R-TLsQ
- R-RQ-TLsQ
- R-TLs
- R-RQ-TLs
- R-TLQ
- R-RQ-TLQ
- R-TL
- R-RQ-TL
- R-TL1Dsolid (reactive interface with 1D solid-state diffusion)
- R-RQ-TL1Dsolid
- weight_func
Weight function
- modulus
- unity
- proportional
'''
if circuit == 'C':
re_fit = elem_C_fit(params, w).real
im_fit = -elem_C_fit(params, w).imag
elif circuit == 'Q':
re_fit = elem_Q_fit(params, w).real
im_fit = -elem_Q_fit(params, w).imag
elif circuit == 'R-C':
re_fit = cir_RsC_fit(params, w).real
im_fit = -cir_RsC_fit(params, w).imag
elif circuit == 'R-Q':
re_fit = cir_RsQ_fit(params, w).real
im_fit = -cir_RsQ_fit(params, w).imag
elif circuit == 'RC':
re_fit = cir_RC_fit(params, w).real
im_fit = -cir_RC_fit(params, w).imag
elif circuit == 'RQ':
re_fit = cir_RQ_fit(params, w).real
im_fit = -cir_RQ_fit(params, w).imag
elif circuit == 'R-RQ':
re_fit = cir_RsRQ_fit(params, w).real
im_fit = -cir_RsRQ_fit(params, w).imag
elif circuit == 'R-RQ-RQ':
re_fit = cir_RsRQRQ_fit(params, w).real
im_fit = -cir_RsRQRQ_fit(params, w).imag
elif circuit == 'R-RC-C':
re_fit = cir_RsRCC_fit(params, w).real
im_fit = -cir_RsRCC_fit(params, w).imag
elif circuit == 'R-RC-Q':
re_fit = cir_RsRCQ_fit(params, w).real
im_fit = -cir_RsRCQ_fit(params, w).imag
elif circuit == 'R-RQ-Q':
re_fit = cir_RsRQQ_fit(params, w).real
im_fit = -cir_RsRQQ_fit(params, w).imag
elif circuit == 'R-RQ-C':
re_fit = cir_RsRQC_fit(params, w).real
im_fit = -cir_RsRQC_fit(params, w).imag
elif circuit == 'R-(Q(RW))':
re_fit = cir_Randles_simplified_Fit(params, w).real
im_fit = -cir_Randles_simplified_Fit(params, w).imag
elif circuit == 'R-(Q(RM))':
re_fit = cir_Randles_uelectrode_fit(params, w).real
im_fit = -cir_Randles_uelectrode_fit(params, w).imag
elif circuit == 'C-RC-C':
re_fit = cir_C_RC_C_fit(params, w).real
im_fit = -cir_C_RC_C_fit(params, w).imag
elif circuit == 'Q-RQ-Q':
re_fit = cir_Q_RQ_Q_Fit(params, w).real
im_fit = -cir_Q_RQ_Q_Fit(params, w).imag
elif circuit == 'RC-RC-ZD':
re_fit = cir_RCRCZD_fit(params, w).real
im_fit = -cir_RCRCZD_fit(params, w).imag
elif circuit == 'R-TLsQ':
re_fit = cir_RsTLsQ_fit(params, w).real
im_fit = -cir_RsTLsQ_fit(params, w).imag
elif circuit == 'R-RQ-TLsQ':
re_fit = cir_RsRQTLsQ_Fit(params, w).real
im_fit = -cir_RsRQTLsQ_Fit(params, w).imag
elif circuit == 'R-TLs':
re_fit = cir_RsTLs_Fit(params, w).real
im_fit = -cir_RsTLs_Fit(params, w).imag
elif circuit == 'R-RQ-TLs':
re_fit = cir_RsRQTLs_Fit(params, w).real
im_fit = -cir_RsRQTLs_Fit(params, w).imag
elif circuit == 'R-TLQ':
re_fit = cir_RsTLQ_fit(params, w).real
im_fit = -cir_RsTLQ_fit(params, w).imag
elif circuit == 'R-RQ-TLQ':
re_fit = cir_RsRQTLQ_fit(params, w).real
im_fit = -cir_RsRQTLQ_fit(params, w).imag
elif circuit == 'R-TL':
re_fit = cir_RsTL_Fit(params, w).real
im_fit = -cir_RsTL_Fit(params, w).imag
elif circuit == 'R-RQ-TL':
re_fit = cir_RsRQTL_fit(params, w).real
im_fit = -cir_RsRQTL_fit(params, w).imag
elif circuit == 'R-TL1Dsolid':
re_fit = cir_RsTL_1Dsolid_fit(params, w).real
im_fit = -cir_RsTL_1Dsolid_fit(params, w).imag
elif circuit == 'R-RQ-TL1Dsolid':
re_fit = cir_RsRQTL_1Dsolid_fit(params, w).real
im_fit = -cir_RsRQTL_1Dsolid_fit(params, w).imag
else:
print('Circuit is not defined in leastsq_errorfunc()')
error = [(re-re_fit)**2, (im-im_fit)**2] #sum of squares
#Different Weighing options, see Lasia
if weight_func == 'modulus':
weight = [1/((re_fit**2 + im_fit**2)**(1/2)), 1/((re_fit**2 + im_fit**2)**(1/2))]
elif weight_func == 'proportional':
weight = [1/(re_fit**2), 1/(im_fit**2)]
elif weight_func == 'unity':
unity_1s = []
for k in range(len(re)):
unity_1s.append(1) #makes an array of [1]'s, so that the weighing is == 1 * sum of squres.
weight = [unity_1s, unity_1s]
else:
print('weight not defined in leastsq_errorfunc()')
S = np.array(weight) * error #weighted sum of squares
return S
### Fitting Class
class EIS_exp:
'''
This class is used to plot and/or analyze experimental impedance data. The class has three major functions:
- EIS_plot()
- Lin_KK()
- EIS_fit()
- EIS_plot() is used to plot experimental data with or without fit
- Lin_KK() performs a linear Kramers-Kronig analysis of the experimental data set.
- EIS_fit() performs complex non-linear least-squares fitting of the experimental data to an equivalent circuit
<NAME> (<EMAIL> || <EMAIL>)
Inputs
-----------
- path: path of datafile(s) as a string
- data: datafile(s) including extension, e.g. ['EIS_data1', 'EIS_data2']
- cycle: Specific cycle numbers can be extracted using the cycle function. Default is 'none', which includes all cycle numbers.
Specific cycles can be extracted using this parameter, insert cycle numbers in brackets, e.g. cycle number 1,4, and 6 are wanted. cycle=[1,4,6]
- mask: ['high frequency' , 'low frequency'], if only a high- or low-frequency is desired use 'none' for the other, e.g. maks=[10**4,'none']
'''
def __init__(self, path, data, cycle='off', mask=['none','none']):
self.df_raw0 = []
self.cycleno = []
for j in range(len(data)):
if data[j].find(".mpt") != -1: #file is a .mpt file
self.df_raw0.append(extract_mpt(path=path, EIS_name=data[j])) #reads all datafiles
elif data[j].find(".DTA") != -1: #file is a .dta file
self.df_raw0.append(extract_dta(path=path, EIS_name=data[j])) #reads all datafiles
elif data[j].find(".z") != -1: #file is a .z file
self.df_raw0.append(extract_solar(path=path, EIS_name=data[j])) #reads all datafiles
else:
print('Data file(s) could not be identified')
self.cycleno.append(self.df_raw0[j].cycle_number)
if np.min(self.cycleno[j]) <= np.max(self.cycleno[j-1]):
if j > 0: #corrects cycle_number except for the first data file
self.df_raw0[j].update({'cycle_number': self.cycleno[j]+np.max(self.cycleno[j-1])}) #corrects cycle number
# else:
# print('__init__ Error (#1)')
#currently need to append a cycle_number coloumn to gamry files
# adds individual dataframes into one
if len(self.df_raw0) == 1:
self.df_raw = self.df_raw0[0]
elif len(self.df_raw0) == 2:
self.df_raw = pd.concat([self.df_raw0[0], self.df_raw0[1]], axis=0)
elif len(self.df_raw0) == 3:
self.df_raw = pd.concat([self.df_raw0[0], self.df_raw0[1], self.df_raw0[2]], axis=0)
elif len(self.df_raw0) == 4:
self.df_raw = pd.concat([self.df_raw0[0], self.df_raw0[1], self.df_raw0[2], self.df_raw0[3]], axis=0)
elif len(self.df_raw0) == 5:
self.df_raw = pd.concat([self.df_raw0[0], self.df_raw0[1], self.df_raw0[2], self.df_raw0[3], self.df_raw0[4]], axis=0)
elif len(self.df_raw0) == 6:
self.df_raw = pd.concat([self.df_raw0[0], self.df_raw0[1], self.df_raw0[2], self.df_raw0[3], self.df_raw0[4], self.df_raw0[5]], axis=0)
elif len(self.df_raw0) == 7:
self.df_raw = pd.concat([self.df_raw0[0], self.df_raw0[1], self.df_raw0[2], self.df_raw0[3], self.df_raw0[4], self.df_raw0[5], self.df_raw0[6]], axis=0)
elif len(self.df_raw0) == 8:
self.df_raw = pd.concat([self.df_raw0[0], self.df_raw0[1], self.df_raw0[2], self.df_raw0[3], self.df_raw0[4], self.df_raw0[5], self.df_raw0[6], self.df_raw0[7]], axis=0)
elif len(self.df_raw0) == 9:
self.df_raw = pd.concat([self.df_raw0[0], self.df_raw0[1], self.df_raw0[2], self.df_raw0[3], self.df_raw0[4], self.df_raw0[5], self.df_raw0[6], self.df_raw0[7], self.df_raw0[8]], axis=0)
elif len(self.df_raw0) == 10:
self.df_raw = pd.concat([self.df_raw0[0], self.df_raw0[1], self.df_raw0[2], self.df_raw0[3], self.df_raw0[4], self.df_raw0[5], self.df_raw0[6], self.df_raw0[7], self.df_raw0[8], self.df_raw0[9]], axis=0)
elif len(self.df_raw0) == 11:
self.df_raw = pd.concat([self.df_raw0[0], self.df_raw0[1], self.df_raw0[2], self.df_raw0[3], self.df_raw0[4], self.df_raw0[5], self.df_raw0[6], self.df_raw0[7], self.df_raw0[8], self.df_raw0[9], self.df_raw0[10]], axis=0)
elif len(self.df_raw0) == 12:
self.df_raw = pd.concat([self.df_raw0[0], self.df_raw0[1], self.df_raw0[2], self.df_raw0[3], self.df_raw0[4], self.df_raw0[5], self.df_raw0[6], self.df_raw0[7], self.df_raw0[8], self.df_raw0[9], self.df_raw0[10], self.df_raw0[11]], axis=0)
elif len(self.df_raw0) == 13:
self.df_raw = pd.concat([self.df_raw0[0], self.df_raw0[1], self.df_raw0[2], self.df_raw0[3], self.df_raw0[4], self.df_raw0[5], self.df_raw0[6], self.df_raw0[7], self.df_raw0[8], self.df_raw0[9], self.df_raw0[10], self.df_raw0[11], self.df_raw0[12]], axis=0)
elif len(self.df_raw0) == 14:
self.df_raw = pd.concat([self.df_raw0[0], self.df_raw0[1], self.df_raw0[2], self.df_raw0[3], self.df_raw0[4], self.df_raw0[5], self.df_raw0[6], self.df_raw0[7], self.df_raw0[8], self.df_raw0[9], self.df_raw0[10], self.df_raw0[11]], self.df_raw0[12], self.df_raw0[13], axis=0)
elif len(self.df_raw0) == 15:
self.df_raw = pd.concat([self.df_raw0[0], self.df_raw0[1], self.df_raw0[2], self.df_raw0[3], self.df_raw0[4], self.df_raw0[5], self.df_raw0[6], self.df_raw0[7], self.df_raw0[8], self.df_raw0[9], self.df_raw0[10], self.df_raw0[11]], self.df_raw0[12], self.df_raw0[13], self.df_raw0[14], axis=0)
else:
print("Too many data files || 15 allowed")
self.df_raw = self.df_raw.assign(w = 2*np.pi*self.df_raw.f) #creats a new coloumn with the angular frequency
#Masking data to each cycle
self.df_pre = []
self.df_limited = []
self.df_limited2 = []
self.df = []
if mask == ['none','none'] and cycle == 'off':
for i in range(len(self.df_raw.cycle_number.unique())): #includes all data
self.df.append(self.df_raw[self.df_raw.cycle_number == self.df_raw.cycle_number.unique()[i]])
elif mask == ['none','none'] and cycle != 'off':
for i in range(len(cycle)):
self.df.append(self.df_raw[self.df_raw.cycle_number == cycle[i]]) #extracting dataframe for each cycle
elif mask[0] != 'none' and mask[1] == 'none' and cycle == 'off':
self.df_pre = self.df_raw.mask(self.df_raw.f > mask[0])
self.df_pre.dropna(how='all', inplace=True)
for i in range(len(self.df_pre.cycle_number.unique())): #Appending data based on cycle number
self.df.append(self.df_pre[self.df_pre.cycle_number == self.df_pre.cycle_number.unique()[i]])
elif mask[0] != 'none' and mask[1] == 'none' and cycle != 'off': # or [i for i, e in enumerate(mask) if e == 'none'] == [0]
self.df_limited = self.df_raw.mask(self.df_raw.f > mask[0])
for i in range(len(cycle)):
self.df.append(self.df_limited[self.df_limited.cycle_number == cycle[i]])
elif mask[0] == 'none' and mask[1] != 'none' and cycle == 'off':
self.df_pre = self.df_raw.mask(self.df_raw.f < mask[1])
self.df_pre.dropna(how='all', inplace=True)
for i in range(len(self.df_raw.cycle_number.unique())): #includes all data
self.df.append(self.df_pre[self.df_pre.cycle_number == self.df_pre.cycle_number.unique()[i]])
elif mask[0] == 'none' and mask[1] != 'none' and cycle != 'off':
self.df_limited = self.df_raw.mask(self.df_raw.f < mask[1])
for i in range(len(cycle)):
self.df.append(self.df_limited[self.df_limited.cycle_number == cycle[i]])
elif mask[0] != 'none' and mask[1] != 'none' and cycle != 'off':
self.df_limited = self.df_raw.mask(self.df_raw.f < mask[1])
self.df_limited2 = self.df_limited.mask(self.df_raw.f > mask[0])
for i in range(len(cycle)):
self.df.append(self.df_limited[self.df_limited2.cycle_number == cycle[i]])
elif mask[0] != 'none' and mask[1] != 'none' and cycle == 'off':
self.df_limited = self.df_raw.mask(self.df_raw.f < mask[1])
self.df_limited2 = self.df_limited.mask(self.df_raw.f > mask[0])
for i in range(len(self.df_raw.cycle_number.unique())):
self.df.append(self.df_limited[self.df_limited2.cycle_number == self.df_raw.cycle_number.unique()[i]])
else:
print('__init__ error (#2)')
def Lin_KK(self, num_RC='auto', legend='on', plot='residuals', bode='off', nyq_xlim='none', nyq_ylim='none', weight_func='Boukamp', savefig='none'):
'''
Plots the Linear Kramers-Kronig (KK) Validity Test
The script is based on Boukamp and Schōnleber et al.'s papers for fitting the resistances of multiple -(RC)- circuits
to the data. A data quality analysis can hereby be made on the basis of the relative residuals
Ref.:
- Schōnleber, M. et al. Electrochimica Acta 131 (2014) 20-27
- Boukamp, B.A. J. Electrochem. Soc., 142, 6, 1885-1894
The function performs the KK analysis and as default the relative residuals in each subplot
Note, that weigh_func should be equal to 'Boukamp'.
<NAME> (<EMAIL> || <EMAIL>)
Optional Inputs
-----------------
- num_RC:
- 'auto' applies an automatic algorithm developed by Schōnleber, M. et al. Electrochimica Acta 131 (2014) 20-27
that ensures no under- or over-fitting occurs
- can be hardwired by inserting any number (RC-elements/decade)
- plot:
- 'residuals' = plots the relative residuals in subplots correspoding to the cycle numbers picked
- 'w_data' = plots the relative residuals with the experimental data, in Nyquist and bode plot if desired, see 'bode =' in description
- nyq_xlim/nyq_xlim: Change the x/y-axis limits on nyquist plot, if not equal to 'none' state [min,max] value
- legend:
- 'on' = displays cycle number
- 'potential' = displays average potential which the spectra was measured at
- 'off' = off
bode = Plots Bode Plot - options:
'on' = re, im vs. log(freq)
'log' = log(re, im) vs. log(freq)
're' = re vs. log(freq)
'log_re' = log(re) vs. log(freq)
'im' = im vs. log(freq)
'log_im' = log(im) vs. log(freq)
'''
if num_RC == 'auto':
print('cycle || No. RC-elements || u')
self.decade = []
self.Rparam = []
self.t_const = []
self.Lin_KK_Fit = []
self.R_names = []
self.KK_R0 = []
self.KK_R = []
self.number_RC = []
self.number_RC_sort = []
self.KK_u = []
self.KK_Rgreater = []
self.KK_Rminor = []
M = 2
for i in range(len(self.df)):
self.decade.append(np.log10(np.max(self.df[i].f))-np.log10(np.min(self.df[i].f))) #determine the number of RC circuits based on the number of decades measured and num_RC
self.number_RC.append(M)
self.number_RC_sort.append(M) #needed for self.KK_R
self.Rparam.append(KK_Rnam_val(re=self.df[i].re, re_start=self.df[i].re.idxmin(), num_RC=int(self.number_RC[i]))[0]) #Creates intial guesses for R's
self.t_const.append(KK_timeconst(w=self.df[i].w, num_RC=int(self.number_RC[i]))) #Creates time constants values for self.number_RC -(RC)- circuits
self.Lin_KK_Fit.append(minimize(KK_errorfunc, self.Rparam[i], method='leastsq', args=(self.df[i].w.values, self.df[i].re.values, self.df[i].im.values, self.number_RC[i], weight_func, self.t_const[i]) )) #maxfev=99
self.R_names.append(KK_Rnam_val(re=self.df[i].re, re_start=self.df[i].re.idxmin(), num_RC=int(self.number_RC[i]))[1]) #creates R names
for j in range(len(self.R_names[i])):
self.KK_R0.append(self.Lin_KK_Fit[i].params.get(self.R_names[i][j]).value)
self.number_RC_sort.insert(0,0) #needed for self.KK_R
for i in range(len(self.df)):
self.KK_R.append(self.KK_R0[int(np.cumsum(self.number_RC_sort)[i]):int(np.cumsum(self.number_RC_sort)[i+1])]) #assigns resistances from each spectra to their respective df
self.KK_Rgreater.append(np.where(np.array(self.KK_R)[i] >= 0, np.array(self.KK_R)[i], 0) )
self.KK_Rminor.append(np.where(np.array(self.KK_R)[i] < 0, np.array(self.KK_R)[i], 0) )
self.KK_u.append(1-(np.abs(np.sum(self.KK_Rminor[i]))/np.abs(np.sum(self.KK_Rgreater[i]))))
for i in range(len(self.df)):
while self.KK_u[i] <= 0.75 or self.KK_u[i] >= 0.88:
self.number_RC_sort0 = []
self.KK_R_lim = []
self.number_RC[i] = self.number_RC[i] + 1
self.number_RC_sort0.append(self.number_RC)
self.number_RC_sort = np.insert(self.number_RC_sort0, 0,0)
self.Rparam[i] = KK_Rnam_val(re=self.df[i].re, re_start=self.df[i].re.idxmin(), num_RC=int(self.number_RC[i]))[0] #Creates intial guesses for R's
self.t_const[i] = KK_timeconst(w=self.df[i].w, num_RC=int(self.number_RC[i])) #Creates time constants values for self.number_RC -(RC)- circuits
self.Lin_KK_Fit[i] = minimize(KK_errorfunc, self.Rparam[i], method='leastsq', args=(self.df[i].w.values, self.df[i].re.values, self.df[i].im.values, self.number_RC[i], weight_func, self.t_const[i]) ) #maxfev=99
self.R_names[i] = KK_Rnam_val(re=self.df[i].re, re_start=self.df[i].re.idxmin(), num_RC=int(self.number_RC[i]))[1] #creates R names
self.KK_R0 = np.delete(np.array(self.KK_R0), np.s_[0:len(self.KK_R0)])
self.KK_R0 = []
for q in range(len(self.df)):
for j in range(len(self.R_names[q])):
self.KK_R0.append(self.Lin_KK_Fit[q].params.get(self.R_names[q][j]).value)
self.KK_R_lim = np.cumsum(self.number_RC_sort) #used for KK_R[i]
self.KK_R[i] = self.KK_R0[self.KK_R_lim[i]:self.KK_R_lim[i+1]] #assigns resistances from each spectra to their respective df
self.KK_Rgreater[i] = np.where(np.array(self.KK_R[i]) >= 0, np.array(self.KK_R[i]), 0)
self.KK_Rminor[i] = np.where(np.array(self.KK_R[i]) < 0, np.array(self.KK_R[i]), 0)
self.KK_u[i] = 1-(np.abs(np.sum(self.KK_Rminor[i]))/np.abs(np.sum(self.KK_Rgreater[i])))
else:
print('['+str(i+1)+']'+' '+str(self.number_RC[i]),' '+str(np.round(self.KK_u[i],2)))
elif num_RC != 'auto': #hardwired number of RC-elements/decade
print('cycle || u')
self.decade = []
self.number_RC0 = []
self.number_RC = []
self.Rparam = []
self.t_const = []
self.Lin_KK_Fit = []
self.R_names = []
self.KK_R0 = []
self.KK_R = []
for i in range(len(self.df)):
self.decade.append(np.log10(np.max(self.df[i].f))-np.log10(np.min(self.df[i].f))) #determine the number of RC circuits based on the number of decades measured and num_RC
self.number_RC0.append(np.round(num_RC * self.decade[i]))
self.number_RC.append(np.round(num_RC * self.decade[i])) #Creats the the number of -(RC)- circuits
self.Rparam.append(KK_Rnam_val(re=self.df[i].re, re_start=self.df[i].re.idxmin(), num_RC=int(self.number_RC0[i]))[0]) #Creates intial guesses for R's
self.t_const.append(KK_timeconst(w=self.df[i].w, num_RC=int(self.number_RC0[i]))) #Creates time constants values for self.number_RC -(RC)- circuits
self.Lin_KK_Fit.append(minimize(KK_errorfunc, self.Rparam[i], method='leastsq', args=(self.df[i].w.values, self.df[i].re.values, self.df[i].im.values, self.number_RC0[i], weight_func, self.t_const[i]) )) #maxfev=99
self.R_names.append(KK_Rnam_val(re=self.df[i].re, re_start=self.df[i].re.idxmin(), num_RC=int(self.number_RC0[i]))[1]) #creates R names
for j in range(len(self.R_names[i])):
self.KK_R0.append(self.Lin_KK_Fit[i].params.get(self.R_names[i][j]).value)
self.number_RC0.insert(0,0)
# print(report_fit(self.Lin_KK_Fit[i])) # prints fitting report
self.KK_circuit_fit = []
self.KK_rr_re = []
self.KK_rr_im = []
self.KK_Rgreater = []
self.KK_Rminor = []
self.KK_u = []
for i in range(len(self.df)):
self.KK_R.append(self.KK_R0[int(np.cumsum(self.number_RC0)[i]):int(np.cumsum(self.number_RC0)[i+1])]) #assigns resistances from each spectra to their respective df
self.KK_Rx = np.array(self.KK_R)
self.KK_Rgreater.append(np.where(self.KK_Rx[i] >= 0, self.KK_Rx[i], 0) )
self.KK_Rminor.append(np.where(self.KK_Rx[i] < 0, self.KK_Rx[i], 0) )
self.KK_u.append(1-(np.abs(np.sum(self.KK_Rminor[i]))/np.abs(np.sum(self.KK_Rgreater[i])))) #currently gives incorrect values
print('['+str(i+1)+']'+' '+str(np.round(self.KK_u[i],2)))
else:
print('num_RC incorrectly defined')
self.KK_circuit_fit = []
self.KK_rr_re = []
self.KK_rr_im = []
for i in range(len(self.df)):
if int(self.number_RC[i]) == 2:
self.KK_circuit_fit.append(KK_RC2(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 3:
self.KK_circuit_fit.append(KK_RC3(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 4:
self.KK_circuit_fit.append(KK_RC4(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 5:
self.KK_circuit_fit.append(KK_RC5(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 6:
self.KK_circuit_fit.append(KK_RC6(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 7:
self.KK_circuit_fit.append(KK_RC7(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 8:
self.KK_circuit_fit.append(KK_RC8(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 9:
self.KK_circuit_fit.append(KK_RC9(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 10:
self.KK_circuit_fit.append(KK_RC10(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 11:
self.KK_circuit_fit.append(KK_RC11(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 12:
self.KK_circuit_fit.append(KK_RC12(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 13:
self.KK_circuit_fit.append(KK_RC13(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 14:
self.KK_circuit_fit.append(KK_RC14(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 15:
self.KK_circuit_fit.append(KK_RC15(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 16:
self.KK_circuit_fit.append(KK_RC16(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 17:
self.KK_circuit_fit.append(KK_RC17(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 18:
self.KK_circuit_fit.append(KK_RC18(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 19:
self.KK_circuit_fit.append(KK_RC19(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 20:
self.KK_circuit_fit.append(KK_RC20(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 21:
self.KK_circuit_fit.append(KK_RC21(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 22:
self.KK_circuit_fit.append(KK_RC22(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 23:
self.KK_circuit_fit.append(KK_RC23(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 24:
self.KK_circuit_fit.append(KK_RC24(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 25:
self.KK_circuit_fit.append(KK_RC25(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 26:
self.KK_circuit_fit.append(KK_RC26(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 27:
self.KK_circuit_fit.append(KK_RC27(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 28:
self.KK_circuit_fit.append(KK_RC28(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 29:
self.KK_circuit_fit.append(KK_RC29(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 30:
self.KK_circuit_fit.append(KK_RC30(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 31:
self.KK_circuit_fit.append(KK_RC31(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 32:
self.KK_circuit_fit.append(KK_RC32(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 33:
self.KK_circuit_fit.append(KK_RC33(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 34:
self.KK_circuit_fit.append(KK_RC34(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 35:
self.KK_circuit_fit.append(KK_RC35(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 36:
self.KK_circuit_fit.append(KK_RC36(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 37:
self.KK_circuit_fit.append(KK_RC37(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 38:
self.KK_circuit_fit.append(KK_RC38(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 39:
self.KK_circuit_fit.append(KK_RC39(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 40:
self.KK_circuit_fit.append(KK_RC40(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 41:
self.KK_circuit_fit.append(KK_RC41(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 42:
self.KK_circuit_fit.append(KK_RC42(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 43:
self.KK_circuit_fit.append(KK_RC43(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 44:
self.KK_circuit_fit.append(KK_RC44(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 45:
self.KK_circuit_fit.append(KK_RC45(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 46:
self.KK_circuit_fit.append(KK_RC46(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 47:
self.KK_circuit_fit.append(KK_RC47(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 48:
self.KK_circuit_fit.append(KK_RC48(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 49:
self.KK_circuit_fit.append(KK_RC49(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 50:
self.KK_circuit_fit.append(KK_RC50(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 51:
self.KK_circuit_fit.append(KK_RC51(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 52:
self.KK_circuit_fit.append(KK_RC52(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 53:
self.KK_circuit_fit.append(KK_RC53(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 54:
self.KK_circuit_fit.append(KK_RC54(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 55:
self.KK_circuit_fit.append(KK_RC55(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 56:
self.KK_circuit_fit.append(KK_RC56(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 57:
self.KK_circuit_fit.append(KK_RC57(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 58:
self.KK_circuit_fit.append(KK_RC58(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 59:
self.KK_circuit_fit.append(KK_RC59(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 60:
self.KK_circuit_fit.append(KK_RC60(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 61:
self.KK_circuit_fit.append(KK_RC61(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 62:
self.KK_circuit_fit.append(KK_RC62(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 63:
self.KK_circuit_fit.append(KK_RC63(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 64:
self.KK_circuit_fit.append(KK_RC64(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 65:
self.KK_circuit_fit.append(KK_RC65(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 66:
self.KK_circuit_fit.append(KK_RC66(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 67:
self.KK_circuit_fit.append(KK_RC67(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 68:
self.KK_circuit_fit.append(KK_RC68(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 69:
self.KK_circuit_fit.append(KK_RC69(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 70:
self.KK_circuit_fit.append(KK_RC70(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 71:
self.KK_circuit_fit.append(KK_RC71(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 72:
self.KK_circuit_fit.append(KK_RC72(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 73:
self.KK_circuit_fit.append(KK_RC73(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 74:
self.KK_circuit_fit.append(KK_RC74(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 75:
self.KK_circuit_fit.append(KK_RC75(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 76:
self.KK_circuit_fit.append(KK_RC76(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 77:
self.KK_circuit_fit.append(KK_RC77(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 78:
self.KK_circuit_fit.append(KK_RC78(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 79:
self.KK_circuit_fit.append(KK_RC79(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 80:
self.KK_circuit_fit.append(KK_RC80(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
else:
print('RC simulation circuit not defined')
print(' Number of RC = ', self.number_RC)
self.KK_rr_re.append(residual_real(re=self.df[i].re, fit_re=self.KK_circuit_fit[i].to_numpy().real, fit_im=-self.KK_circuit_fit[i].to_numpy().imag)) #relative residuals for the real part
self.KK_rr_im.append(residual_imag(im=self.df[i].im, fit_re=self.KK_circuit_fit[i].to_numpy().real, fit_im=-self.KK_circuit_fit[i].to_numpy().imag)) #relative residuals for the imag part
### Plotting Linear_kk results
##
#
### Label functions
self.label_re_1 = []
self.label_im_1 = []
self.label_cycleno = []
if legend == 'on':
for i in range(len(self.df)):
self.label_re_1.append("Z' (#"+str(i+1)+")")
self.label_im_1.append("Z'' (#"+str(i+1)+")")
self.label_cycleno.append('#'+str(i+1))
elif legend == 'potential':
for i in range(len(self.df)):
self.label_re_1.append("Z' ("+str(np.round(np.average(self.df[i].E_avg), 2))+' V)')
self.label_im_1.append("Z'' ("+str(np.round(np.average(self.df[i].E_avg), 2))+' V)')
self.label_cycleno.append(str(np.round(np.average(self.df[i].E_avg), 2))+' V')
if plot == 'w_data':
fig = figure(figsize=(6, 8), dpi=120, facecolor='w', edgecolor='k')
fig.subplots_adjust(left=0.1, right=0.95, hspace=0.5, bottom=0.1, top=0.95)
ax = fig.add_subplot(311, aspect='equal')
ax1 = fig.add_subplot(312)
ax2 = fig.add_subplot(313)
colors = sns.color_palette("colorblind", n_colors=len(self.df))
colors_real = sns.color_palette("Blues", n_colors=len(self.df)+2)
colors_imag = sns.color_palette("Oranges", n_colors=len(self.df)+2)
### Nyquist Plot
for i in range(len(self.df)):
ax.plot(self.df[i].re, self.df[i].im, marker='o', ms=4, lw=2, color=colors[i], ls='-', alpha=.7, label=self.label_cycleno[i])
### Bode Plot
if bode == 'on':
for i in range(len(self.df)):
ax1.plot(np.log10(self.df[i].f), self.df[i].re, color=colors_real[i+1], marker='D', ms=3, lw=2.25, ls='-', alpha=.7, label=self.label_re_1[i])
ax1.plot(np.log10(self.df[i].f), self.df[i].im, color=colors_imag[i+1], marker='s', ms=3, lw=2.25, ls='-', alpha=.7, label=self.label_im_1[i])
ax1.set_xlabel("log(f) [Hz]")
ax1.set_ylabel("Z', -Z'' [$\Omega$]")
if legend == 'on' or legend == 'potential':
ax1.legend(loc='best', fontsize=10, frameon=False)
elif bode == 're':
for i in range(len(self.df)):
ax1.plot(np.log10(self.df[i].f), self.df[i].re, color=colors_real[i+1], marker='D', ms=3, lw=2.25, ls='-', alpha=.7, label=self.label_cycleno[i])
ax1.set_xlabel("log(f) [Hz]")
ax1.set_ylabel("Z' [$\Omega$]")
if legend == 'on' or legend == 'potential':
ax1.legend(loc='best', fontsize=10, frameon=False)
elif bode == 'log_re':
for i in range(len(self.df)):
ax1.plot(np.log10(self.df[i].f), np.log10(self.df[i].re), color=colors_real[i+1], marker='D', ms=3, lw=2.25, ls='-', alpha=.7, label=self.label_cycleno[i])
ax1.set_xlabel("log(f) [Hz]")
ax1.set_ylabel("log(Z') [$\Omega$]")
if legend == 'on' or legend == 'potential':
ax1.legend(loc='best', fontsize=10, frameon=False)
elif bode == 'im':
for i in range(len(self.df)):
ax1.plot(np.log10(self.df[i].f), self.df[i].im, color=colors_imag[i+1], marker='s', ms=3, lw=2.25, ls='-', alpha=.7, label=self.label_cycleno[i])
ax1.set_xlabel("log(f) [Hz]")
ax1.set_ylabel("-Z'' [$\Omega$]")
if legend == 'on' or legend == 'potential':
ax1.legend(loc='best', fontsize=10, frameon=False)
elif bode == 'log_im':
for i in range(len(self.df)):
ax1.plot(np.log10(self.df[i].f), np.log10(self.df[i].im), color=colors_imag[i+1], marker='s', ms=3, lw=2.25, ls='-', alpha=.7, label=self.label_cycleno[i])
ax1.set_xlabel("log(f) [Hz]")
ax1.set_ylabel("log(-Z'') [$\Omega$]")
if legend == 'on' or legend == 'potential':
ax1.legend(loc='best', fontsize=10, frameon=False)
elif bode == 'log':
for i in range(len(self.df)):
ax1.plot(np.log10(self.df[i].f), np.log10(self.df[i].re), color=colors_real[i+1], marker='D', ms=3, lw=2.25, ls='-', alpha=.7, label=self.label_re_1[i])
ax1.plot(np.log10(self.df[i].f), np.log10(self.df[i].im), color=colors_imag[i+1], marker='s', ms=3, lw=2.25, ls='-', alpha=.7, label=self.label_im_1[i])
ax1.set_xlabel("log(f) [Hz]")
ax1.set_ylabel("log(Z', -Z'') [$\Omega$]")
if legend == 'on' or legend == 'potential':
ax1.legend(loc='best', fontsize=10, frameon=False)
### Kramers-Kronig Relative Residuals
for i in range(len(self.df)):
ax2.plot(np.log10(self.df[i].f), self.KK_rr_re[i]*100, color=colors_real[i+1], marker='D', ls='--', ms=6, alpha=.7, label=self.label_re_1[i])
ax2.plot(np.log10(self.df[i].f), self.KK_rr_im[i]*100, color=colors_imag[i+1], marker='s', ls='--', ms=6, alpha=.7, label=self.label_im_1[i])
ax2.set_xlabel("log(f) [Hz]")
ax2.set_ylabel("$\Delta$Z', $\Delta$-Z'' [%]")
if legend == 'on' or legend == 'potential':
ax2.legend(loc='best', fontsize=10, frameon=False)
ax2.axhline(0, ls='--', c='k', alpha=.5)
### Setting ylims and write 'KK-Test' on RR subplot
self.KK_rr_im_min = []
self.KK_rr_im_max = []
self.KK_rr_re_min = []
self.KK_rr_re_max = []
for i in range(len(self.df)):
self.KK_rr_im_min.append(np.min(self.KK_rr_im[i]))
self.KK_rr_im_max.append(np.max(self.KK_rr_im[i]))
self.KK_rr_re_min.append(np.min(self.KK_rr_re[i]))
self.KK_rr_re_max.append(np.max(self.KK_rr_re[i]))
if np.min(self.KK_rr_im_min) > np.min(self.KK_rr_re_min):
ax2.set_ylim(np.min(self.KK_rr_re_min)*100*1.5, np.max(np.abs(self.KK_rr_re_min))*100*1.5)
ax2.annotate('Lin-KK', xy=[np.min(np.log10(self.df[0].f)), np.max(self.KK_rr_re_max)*100*.9], color='k', fontweight='bold')
elif np.min(self.KK_rr_im_min) < np.min(self.KK_rr_re_min):
ax2.set_ylim(np.min(self.KK_rr_im_min)*100*1.5, np.max(self.KK_rr_im_max)*100*1.5)
ax2.annotate('Lin-KK', xy=[np.min(np.log10(self.df[0].f)), np.max(self.KK_rr_im_max)*100*.9], color='k', fontweight='bold')
### Figure specifics
if legend == 'on' or legend == 'potential':
ax.legend(loc='best', fontsize=10, frameon=False)
ax.set_xlabel("Z' [$\Omega$]")
ax.set_ylabel("-Z'' [$\Omega$]")
if nyq_xlim != 'none':
ax.set_xlim(nyq_xlim[0], nyq_xlim[1])
if nyq_ylim != 'none':
ax.set_ylim(nyq_ylim[0], nyq_ylim[1])
#Save Figure
if savefig != 'none':
fig.savefig(savefig)
### Illustrating residuals only
elif plot == 'residuals':
colors = sns.color_palette("colorblind", n_colors=9)
colors_real = sns.color_palette("Blues", n_colors=9)
colors_imag = sns.color_palette("Oranges", n_colors=9)
### 1 Cycle
if len(self.df) == 1:
fig = figure(figsize=(12, 3.8), dpi=120, facecolor='w', edgecolor='k')
fig.subplots_adjust(left=0.1, right=0.95, hspace=0.25, wspace=0.25, bottom=0.1, top=0.95)
ax = fig.add_subplot(231)
ax.plot(np.log10(self.df[0].f), self.KK_rr_re[0]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax.plot(np.log10(self.df[0].f), self.KK_rr_im[0]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax.set_xlabel("log(f) [Hz]")
ax.set_ylabel("$\Delta$Z', $\Delta$-Z'' [%]")
if legend == 'on' or legend == 'potential':
ax.legend(loc='best', fontsize=10, frameon=False)
ax.axhline(0, ls='--', c='k', alpha=.5)
### Setting ylims and write 'KK-Test' on RR subplot
self.KK_rr_im_min = np.min(self.KK_rr_im)
self.KK_rr_im_max = np.max(self.KK_rr_im)
self.KK_rr_re_min = np.min(self.KK_rr_re)
self.KK_rr_re_max = np.max(self.KK_rr_re)
if self.KK_rr_re_max > self.KK_rr_im_max:
self.KK_ymax = self.KK_rr_re_max
else:
self.KK_ymax = self.KK_rr_im_max
if self.KK_rr_re_min < self.KK_rr_im_min:
self.KK_ymin = self.KK_rr_re_min
else:
self.KK_ymin = self.KK_rr_im_min
if np.abs(self.KK_ymin) > self.KK_ymax:
ax.set_ylim(self.KK_ymin*100*1.5, np.abs(self.KK_ymin)*100*1.5)
if legend == 'on':
ax.annotate('Lin-KK, #1', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymin)*100*1.3], color='k', fontweight='bold')
elif legend == 'potential':
ax.annotate('Lin-KK, ('+str(np.round(np.average(self.df[0].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymin)*100*1.3], color='k', fontweight='bold')
elif np.abs(self.KK_ymin) < self.KK_ymax:
ax.set_ylim(np.negative(self.KK_ymax)*100*1.5, np.abs(self.KK_ymax)*100*1.5)
if legend == 'on':
ax.annotate('Lin-KK, #1', xy=[np.min(np.log10(self.df[0].f)), self.KK_ymax*100*1.3], color='k', fontweight='bold')
elif legend == 'potential':
ax.annotate('Lin-KK, ('+str(np.round(np.average(self.df[0].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[0].f)), self.KK_ymax*100*1.3], color='k', fontweight='bold')
#Save Figure
if savefig != 'none':
fig.savefig(savefig)
### 2 Cycles
elif len(self.df) == 2:
fig = figure(figsize=(12, 5), dpi=120, facecolor='w', edgecolor='k')
fig.subplots_adjust(left=0.1, right=0.95, hspace=0.25, wspace=0.25, bottom=0.1, top=0.95)
ax1 = fig.add_subplot(231)
ax2 = fig.add_subplot(232)
#cycle 1
ax1.plot(np.log10(self.df[0].f), self.KK_rr_re[0]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax1.plot(np.log10(self.df[0].f), self.KK_rr_im[0]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax1.set_xlabel("log(f) [Hz]")
ax1.set_ylabel("$\Delta$Z', $\Delta$-Z'' [%]", fontsize=18)
if legend == 'on' or legend == 'potential':
ax1.legend(loc='best', fontsize=10, frameon=False)
ax1.axhline(0, ls='--', c='k', alpha=.5)
#cycle 2
ax2.plot(np.log10(self.df[1].f), self.KK_rr_re[1]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax2.plot(np.log10(self.df[1].f), self.KK_rr_im[1]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax2.set_xlabel("log(f) [Hz]")
if legend == 'on' or legend == 'potential':
ax2.legend(loc='best', fontsize=10, frameon=False)
ax2.axhline(0, ls='--', c='k', alpha=.5)
### Setting ylims and labeling plot with 'KK-Test' in RR subplot
self.KK_rr_im_min = []
self.KK_rr_im_max = []
self.KK_rr_re_min = []
self.KK_rr_re_max = []
self.KK_ymin = []
self.KK_ymax = []
for i in range(len(self.df)):
self.KK_rr_im_min.append(np.min(self.KK_rr_im[i]))
self.KK_rr_im_max.append(np.max(self.KK_rr_im[i]))
self.KK_rr_re_min.append(np.min(self.KK_rr_re[i]))
self.KK_rr_re_max.append(np.max(self.KK_rr_re[i]))
if self.KK_rr_re_max[i] > self.KK_rr_im_max[i]:
self.KK_ymax.append(self.KK_rr_re_max[i])
else:
self.KK_ymax.append(self.KK_rr_im_max[i])
if self.KK_rr_re_min[i] < self.KK_rr_im_min[i]:
self.KK_ymin.append(self.KK_rr_re_min[i])
else:
self.KK_ymin.append(self.KK_rr_im_min[i])
if np.abs(self.KK_ymin[0]) > self.KK_ymax[0]:
ax1.set_ylim(self.KK_ymin[0]*100*1.5, np.abs(self.KK_ymin[0])*100*1.5)
if legend == 'on':
ax1.annotate('Lin-KK, #1', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymin[0])*100*1.3], color='k', fontweight='bold')
elif legend == 'potential':
ax1.annotate('Lin-KK ('+str(np.round(np.average(self.df[0].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymin[0])*100*1.3], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[0]) < self.KK_ymax[0]:
ax1.set_ylim(np.negative(self.KK_ymax[0])*100*1.5, np.abs(self.KK_ymax[0])*100*1.5)
if legend == 'on':
ax1.annotate('Lin-KK, #1', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymax[0])*100*1.3], color='k', fontweight='bold')
elif legend == 'potential':
ax1.annotate('Lin-KK, ('+str(np.round(np.average(self.df[0].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[0].f)), self.KK_ymax[0]*100*1.3], color='k', fontweight='bold')
if np.abs(self.KK_ymin[1]) > self.KK_ymax[1]:
ax2.set_ylim(self.KK_ymin[1]*100*1.5, np.abs(self.KK_ymin[1])*100*1.5)
if legend == 'on':
ax2.annotate('Lin-KK, #2', xy=[np.min(np.log10(self.df[1].f)), np.max(np.abs(self.KK_ymin[1]))*100*1.3], color='k', fontweight='bold')
elif legend == 'potential':
ax2.annotate('Lin-KK ('+str(np.round(np.average(self.df[1].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[1].f)), np.max(np.abs(self.KK_ymin[1]))*100*1.3], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[0]) < self.KK_ymax[0]:
ax2.set_ylim(np.negative(self.KK_ymax[1])*100*1.5, np.abs(self.KK_ymax[1])*100*1.5)
if legend == 'on':
ax2.annotate('Lin-KK, #2', xy=[np.min(np.log10(self.df[1].f)), self.KK_ymax[1]*100*1.3], color='k', fontweight='bold')
elif legend == 'potential':
ax2.annotate('Lin-KK ('+str(np.round(np.average(self.df[1].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[1].f)), self.KK_ymax[1]*100*1.3], color='k', fontweight='bold')
#Save Figure
if savefig != 'none':
fig.savefig(savefig)
### 3 Cycles
elif len(self.df) == 3:
fig = figure(figsize=(12, 5), dpi=120, facecolor='w', edgecolor='k')
fig.subplots_adjust(left=0.1, right=0.95, hspace=0.25, wspace=0.25, bottom=0.1, top=0.95)
ax1 = fig.add_subplot(231)
ax2 = fig.add_subplot(232)
ax3 = fig.add_subplot(233)
#cycle 1
ax1.plot(np.log10(self.df[0].f), self.KK_rr_re[0]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax1.plot(np.log10(self.df[0].f), self.KK_rr_im[0]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax1.set_xlabel("log(f) [Hz]")
ax1.set_ylabel("$\Delta$Z', $\Delta$-Z'' [%]", fontsize=18)
if legend == 'on' or legend == 'potential':
ax1.legend(loc='best', fontsize=10, frameon=False)
ax1.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 2
ax2.plot(np.log10(self.df[1].f), self.KK_rr_re[1]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax2.plot(np.log10(self.df[1].f), self.KK_rr_im[1]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax2.set_xlabel("log(f) [Hz]")
if legend == 'on' or legend == 'potential':
ax2.legend(loc='best', fontsize=10, frameon=False)
ax2.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 3
ax3.plot(np.log10(self.df[2].f), self.KK_rr_re[2]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax3.plot(np.log10(self.df[2].f), self.KK_rr_im[2]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax3.set_xlabel("log(f) [Hz]")
if legend == 'on' or legend == 'potential':
ax3.legend(loc='best', fontsize=10, frameon=False)
ax3.axhline(0, ls='--', c='k', alpha=.5)
### Setting ylims and labeling plot with 'KK-Test' in RR subplot
self.KK_rr_im_min = []
self.KK_rr_im_max = []
self.KK_rr_re_min = []
self.KK_rr_re_max = []
self.KK_ymin = []
self.KK_ymax = []
for i in range(len(self.df)):
self.KK_rr_im_min.append(np.min(self.KK_rr_im[i]))
self.KK_rr_im_max.append(np.max(self.KK_rr_im[i]))
self.KK_rr_re_min.append(np.min(self.KK_rr_re[i]))
self.KK_rr_re_max.append(np.max(self.KK_rr_re[i]))
if self.KK_rr_re_max[i] > self.KK_rr_im_max[i]:
self.KK_ymax.append(self.KK_rr_re_max[i])
else:
self.KK_ymax.append(self.KK_rr_im_max[i])
if self.KK_rr_re_min[i] < self.KK_rr_im_min[i]:
self.KK_ymin.append(self.KK_rr_re_min[i])
else:
self.KK_ymin.append(self.KK_rr_im_min[i])
if np.abs(self.KK_ymin[0]) > self.KK_ymax[0]:
ax1.set_ylim(self.KK_ymin[0]*100*1.5, np.abs(self.KK_ymin[0])*100*1.5)
if legend == 'on':
ax1.annotate('Lin-KK, #1', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymin[0])*100*1.3], color='k', fontweight='bold')
elif legend == 'potential':
ax1.annotate('Lin-KK ('+str(np.round(np.average(self.df[0].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymin[0])*100*1.3], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[0]) < self.KK_ymax[0]:
ax1.set_ylim(np.negative(self.KK_ymax[0])*100*1.5, np.abs(self.KK_ymax[0])*100*1.5)
if legend == 'on':
ax1.annotate('Lin-KK, #1', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymax[0])*100*1.3], color='k', fontweight='bold')
elif legend == 'potential':
ax1.annotate('Lin-KK, ('+str(np.round(np.average(self.df[0].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[0].f)), self.KK_ymax[0]*100*1.3], color='k', fontweight='bold')
if np.abs(self.KK_ymin[1]) > self.KK_ymax[1]:
ax2.set_ylim(self.KK_ymin[1]*100*1.5, np.abs(self.KK_ymin[1])*100*1.5)
if legend == 'on':
ax2.annotate('Lin-KK, #2', xy=[np.min(np.log10(self.df[1].f)), np.max(np.abs(self.KK_ymin[1]))*100*1.3], color='k', fontweight='bold')
elif legend == 'potential':
ax2.annotate('Lin-KK ('+str(np.round(np.average(self.df[1].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[1].f)), np.max(np.abs(self.KK_ymin[1]))*100*1.3], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[0]) < self.KK_ymax[0]:
ax2.set_ylim(np.negative(self.KK_ymax[1])*100*1.5, np.abs(self.KK_ymax[1])*100*1.5)
if legend == 'on':
ax2.annotate('Lin-KK, #2', xy=[np.min(np.log10(self.df[1].f)), self.KK_ymax[1]*100*1.3], color='k', fontweight='bold')
elif legend == 'potential':
ax2.annotate('Lin-KK ('+str(np.round(np.average(self.df[1].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[1].f)), self.KK_ymax[1]*100*1.3], color='k', fontweight='bold')
if np.abs(self.KK_ymin[2]) > self.KK_ymax[2]:
ax3.set_ylim(self.KK_ymin[2]*100*1.5, np.abs(self.KK_ymin[2])*100*1.5)
if legend == 'on':
ax3.annotate('Lin-KK, #3', xy=[np.min(np.log10(self.df[2].f)), np.abs(self.KK_ymin[2])*100*1.3], color='k', fontweight='bold')
elif legend == 'potential':
ax3.annotate('Lin-KK ('+str(np.round(np.average(self.df[2].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[2].f)), np.abs(self.KK_ymin[2])*100*1.3], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[2]) < self.KK_ymax[2]:
ax3.set_ylim(np.negative(self.KK_ymax[0])*100*1.5, np.abs(self.KK_ymax[2])*100*1.5)
if legend == 'on':
ax3.annotate('Lin-KK, #3', xy=[np.min(np.log10(self.df[2].f)), np.abs(self.KK_ymax[2])*100*1.3], color='k', fontweight='bold')
elif legend == 'potential':
ax3.annotate('Lin-KK, ('+str(np.round(np.average(self.df[2].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[2].f)), self.KK_ymax[2]*100*1.3], color='k', fontweight='bold')
#Save Figure
if savefig != 'none':
fig.savefig(savefig)
### 4 Cycles
elif len(self.df) == 4:
fig = figure(figsize=(12, 3.8), dpi=120, facecolor='w', edgecolor='k')
fig.subplots_adjust(left=0.1, right=0.95, hspace=0.25, wspace=0.25, bottom=0.1, top=0.95)
ax1 = fig.add_subplot(221)
ax2 = fig.add_subplot(222)
ax3 = fig.add_subplot(223)
ax4 = fig.add_subplot(224)
#cycle 1
ax1.plot(np.log10(self.df[0].f), self.KK_rr_re[0]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax1.plot(np.log10(self.df[0].f), self.KK_rr_im[0]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax1.set_ylabel("$\Delta$Z', $\Delta$-Z'' [%]", fontsize=18)
if legend == 'on' or legend == 'potential':
ax1.legend(loc='best', fontsize=10, frameon=False)
ax1.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 2
ax2.plot(np.log10(self.df[1].f), self.KK_rr_re[1]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax2.plot(np.log10(self.df[1].f), self.KK_rr_im[1]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax2.set_xlabel("log(f) [Hz]")
if legend == 'on' or legend == 'potential':
ax2.legend(loc='best', fontsize=10, frameon=False)
ax2.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 3
ax3.plot(np.log10(self.df[2].f), self.KK_rr_re[2]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax3.plot(np.log10(self.df[2].f), self.KK_rr_im[2]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax3.set_xlabel("log(f) [Hz]")
ax3.set_ylabel("$\Delta$Z', $\Delta$-Z'' [%]", fontsize=18)
if legend == 'on' or legend == 'potential':
ax3.legend(loc='best', fontsize=10, frameon=False)
ax3.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 4
ax4.plot(np.log10(self.df[3].f), self.KK_rr_re[3]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax4.plot(np.log10(self.df[3].f), self.KK_rr_im[3]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax4.set_xlabel("log(f) [Hz]")
if legend == 'on' or legend == 'potential':
ax4.legend(loc='best', fontsize=10, frameon=False)
ax4.axhline(0, ls='--', c='k', alpha=.5)
### Setting ylims and labeling plot with 'KK-Test' in RR subplot
self.KK_rr_im_min = []
self.KK_rr_im_max = []
self.KK_rr_re_min = []
self.KK_rr_re_max = []
self.KK_ymin = []
self.KK_ymax = []
for i in range(len(self.df)):
self.KK_rr_im_min.append(np.min(self.KK_rr_im[i]))
self.KK_rr_im_max.append(np.max(self.KK_rr_im[i]))
self.KK_rr_re_min.append(np.min(self.KK_rr_re[i]))
self.KK_rr_re_max.append(np.max(self.KK_rr_re[i]))
if self.KK_rr_re_max[i] > self.KK_rr_im_max[i]:
self.KK_ymax.append(self.KK_rr_re_max[i])
else:
self.KK_ymax.append(self.KK_rr_im_max[i])
if self.KK_rr_re_min[i] < self.KK_rr_im_min[i]:
self.KK_ymin.append(self.KK_rr_re_min[i])
else:
self.KK_ymin.append(self.KK_rr_im_min[i])
if np.abs(self.KK_ymin[0]) > self.KK_ymax[0]:
ax1.set_ylim(self.KK_ymin[0]*100*1.5, np.abs(self.KK_ymin[0])*100*1.5)
if legend == 'on':
ax1.annotate('Lin-KK, #1', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymin[0])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax1.annotate('Lin-KK ('+str(np.round(np.average(self.df[0].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymin[0])*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[0]) < self.KK_ymax[0]:
ax1.set_ylim(np.negative(self.KK_ymax[0])*100*1.5, np.abs(self.KK_ymax[0])*100*1.5)
if legend == 'on':
ax1.annotate('Lin-KK, #1', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymax[0])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax1.annotate('Lin-KK, ('+str(np.round(np.average(self.df[0].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[0].f)), self.KK_ymax[0]*100*1.2], color='k', fontweight='bold')
if np.abs(self.KK_ymin[1]) > self.KK_ymax[1]:
ax2.set_ylim(self.KK_ymin[1]*100*1.5, np.abs(self.KK_ymin[1])*100*1.5)
if legend == 'on':
ax2.annotate('Lin-KK, #2', xy=[np.min(np.log10(self.df[1].f)), np.max(np.abs(self.KK_ymin[1]))*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax2.annotate('Lin-KK ('+str(np.round(np.average(self.df[1].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[1].f)), np.max(np.abs(self.KK_ymin[1]))*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[0]) < self.KK_ymax[0]:
ax2.set_ylim(np.negative(self.KK_ymax[1])*100*1.5, np.abs(self.KK_ymax[1])*100*1.5)
if legend == 'on':
ax2.annotate('Lin-KK, #2', xy=[np.min(np.log10(self.df[1].f)), self.KK_ymax[1]*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax2.annotate('Lin-KK ('+str(np.round(np.average(self.df[1].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[1].f)), self.KK_ymax[1]*100*1.2], color='k', fontweight='bold')
if np.abs(self.KK_ymin[2]) > self.KK_ymax[2]:
ax3.set_ylim(self.KK_ymin[2]*100*1.5, np.abs(self.KK_ymin[2])*100*1.5)
if legend == 'on':
ax3.annotate('Lin-KK, #3', xy=[np.min(np.log10(self.df[2].f)), np.abs(self.KK_ymin[2])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax3.annotate('Lin-KK ('+str(np.round(np.average(self.df[2].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[2].f)), np.abs(self.KK_ymin[2])*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[2]) < self.KK_ymax[2]:
ax3.set_ylim(np.negative(self.KK_ymax[0])*100*1.5, np.abs(self.KK_ymax[2])*100*1.5)
if legend == 'on':
ax3.annotate('Lin-KK, #3', xy=[np.min(np.log10(self.df[2].f)), np.abs(self.KK_ymax[2])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax3.annotate('Lin-KK, ('+str(np.round(np.average(self.df[2].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[2].f)), self.KK_ymax[2]*100*1.2], color='k', fontweight='bold')
if np.abs(self.KK_ymin[3]) > self.KK_ymax[3]:
ax4.set_ylim(self.KK_ymin[3]*100*1.5, np.abs(self.KK_ymin[3])*100*1.5)
if legend == 'on':
ax4.annotate('Lin-KK, #4', xy=[np.min(np.log10(self.df[3].f)), np.abs(self.KK_ymin[3])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax4.annotate('Lin-KK ('+str(np.round(np.average(self.df[3].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[3].f)), np.abs(self.KK_ymin[3])*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[3]) < self.KK_ymax[3]:
ax4.set_ylim(np.negative(self.KK_ymax[3])*100*1.5, np.abs(self.KK_ymax[3])*100*1.5)
if legend == 'on':
ax4.annotate('Lin-KK, #4', xy=[np.min(np.log10(self.df[3].f)), np.abs(self.KK_ymax[3])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax4.annotate('Lin-KK, ('+str(np.round(np.average(self.df[3].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[3].f)), self.KK_ymax[3]*100*1.2], color='k', fontweight='bold')
#Save Figure
if savefig != 'none':
fig.savefig(savefig)
### 5 Cycles
elif len(self.df) == 5:
fig = figure(figsize=(12, 3.8), dpi=120, facecolor='w', edgecolor='k')
fig.subplots_adjust(left=0.1, right=0.95, hspace=0.25, wspace=0.25, bottom=0.1, top=0.95)
ax1 = fig.add_subplot(231)
ax2 = fig.add_subplot(232)
ax3 = fig.add_subplot(233)
ax4 = fig.add_subplot(234)
ax5 = fig.add_subplot(235)
#cycle 1
ax1.plot(np.log10(self.df[0].f), self.KK_rr_re[0]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax1.plot(np.log10(self.df[0].f), self.KK_rr_im[0]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax1.set_ylabel("$\Delta$Z', $\Delta$-Z'' [%]", fontsize=18)
if legend == 'on' or legend == 'potential':
ax1.legend(loc='best', fontsize=10, frameon=False)
ax1.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 2
ax2.plot(np.log10(self.df[1].f), self.KK_rr_re[1]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax2.plot(np.log10(self.df[1].f), self.KK_rr_im[1]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
if legend == 'on' or legend == 'potential':
ax2.legend(loc='best', fontsize=10, frameon=False)
ax2.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 3
ax3.plot(np.log10(self.df[2].f), self.KK_rr_re[2]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax3.plot(np.log10(self.df[2].f), self.KK_rr_im[2]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax3.set_xlabel("log(f) [Hz]")
if legend == 'on' or legend == 'potential':
ax3.legend(loc='best', fontsize=10, frameon=False)
ax3.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 4
ax4.plot(np.log10(self.df[3].f), self.KK_rr_re[3]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax4.plot(np.log10(self.df[3].f), self.KK_rr_im[3]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax4.set_ylabel("$\Delta$Z', $\Delta$-Z'' [%]", fontsize=18)
ax4.set_xlabel("log(f) [Hz]")
if legend == 'on' or legend == 'potential':
ax4.legend(loc='best', fontsize=10, frameon=False)
ax4.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 5
ax5.plot(np.log10(self.df[4].f), self.KK_rr_re[4]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax5.plot(np.log10(self.df[4].f), self.KK_rr_im[4]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax5.set_xlabel("log(f) [Hz]")
if legend == 'on' or legend == 'potential':
ax5.legend(loc='best', fontsize=10, frameon=False)
ax5.axhline(0, ls='--', c='k', alpha=.5)
### Setting ylims and labeling plot with 'KK-Test' in RR subplot
self.KK_rr_im_min = []
self.KK_rr_im_max = []
self.KK_rr_re_min = []
self.KK_rr_re_max = []
self.KK_ymin = []
self.KK_ymax = []
for i in range(len(self.df)):
self.KK_rr_im_min.append(np.min(self.KK_rr_im[i]))
self.KK_rr_im_max.append(np.max(self.KK_rr_im[i]))
self.KK_rr_re_min.append(np.min(self.KK_rr_re[i]))
self.KK_rr_re_max.append(np.max(self.KK_rr_re[i]))
if self.KK_rr_re_max[i] > self.KK_rr_im_max[i]:
self.KK_ymax.append(self.KK_rr_re_max[i])
else:
self.KK_ymax.append(self.KK_rr_im_max[i])
if self.KK_rr_re_min[i] < self.KK_rr_im_min[i]:
self.KK_ymin.append(self.KK_rr_re_min[i])
else:
self.KK_ymin.append(self.KK_rr_im_min[i])
if np.abs(self.KK_ymin[0]) > self.KK_ymax[0]:
ax1.set_ylim(self.KK_ymin[0]*100*1.5, np.abs(self.KK_ymin[0])*100*1.5)
if legend == 'on':
ax1.annotate('Lin-KK, #1', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymin[0])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax1.annotate('Lin-KK ('+str(np.round(np.average(self.df[0].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymin[0])*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[0]) < self.KK_ymax[0]:
ax1.set_ylim(np.negative(self.KK_ymax[0])*100*1.5, np.abs(self.KK_ymax[0])*100*1.5)
if legend == 'on':
ax1.annotate('Lin-KK, #1', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymax[0])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax1.annotate('Lin-KK, ('+str(np.round(np.average(self.df[0].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[0].f)), self.KK_ymax[0]*100*1.2], color='k', fontweight='bold')
if np.abs(self.KK_ymin[1]) > self.KK_ymax[1]:
ax2.set_ylim(self.KK_ymin[1]*100*1.5, np.abs(self.KK_ymin[1])*100*1.5)
if legend == 'on':
ax2.annotate('Lin-KK, #2', xy=[np.min(np.log10(self.df[1].f)), np.max(np.abs(self.KK_ymin[1]))*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax2.annotate('Lin-KK ('+str(np.round(np.average(self.df[1].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[1].f)), np.max(np.abs(self.KK_ymin[1]))*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[0]) < self.KK_ymax[0]:
ax2.set_ylim(np.negative(self.KK_ymax[1])*100*1.5, np.abs(self.KK_ymax[1])*100*1.5)
if legend == 'on':
ax2.annotate('Lin-KK, #2', xy=[np.min(np.log10(self.df[1].f)), self.KK_ymax[1]*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax2.annotate('Lin-KK ('+str(np.round(np.average(self.df[1].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[1].f)), self.KK_ymax[1]*100*1.2], color='k', fontweight='bold')
if np.abs(self.KK_ymin[2]) > self.KK_ymax[2]:
ax3.set_ylim(self.KK_ymin[2]*100*1.5, np.abs(self.KK_ymin[2])*100*1.5)
if legend == 'on':
ax3.annotate('Lin-KK, #3', xy=[np.min(np.log10(self.df[2].f)), np.abs(self.KK_ymin[2])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax3.annotate('Lin-KK ('+str(np.round(np.average(self.df[2].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[2].f)), np.abs(self.KK_ymin[2])*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[2]) < self.KK_ymax[2]:
ax3.set_ylim(np.negative(self.KK_ymax[0])*100*1.5, np.abs(self.KK_ymax[2])*100*1.5)
if legend == 'on':
ax3.annotate('Lin-KK, #3', xy=[np.min(np.log10(self.df[2].f)), np.abs(self.KK_ymax[2])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax3.annotate('Lin-KK, ('+str(np.round(np.average(self.df[2].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[2].f)), self.KK_ymax[2]*100*1.2], color='k', fontweight='bold')
if np.abs(self.KK_ymin[3]) > self.KK_ymax[3]:
ax4.set_ylim(self.KK_ymin[3]*100*1.5, np.abs(self.KK_ymin[3])*100*1.5)
if legend == 'on':
ax4.annotate('Lin-KK, #4', xy=[np.min(np.log10(self.df[3].f)), np.abs(self.KK_ymin[3])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax4.annotate('Lin-KK ('+str(np.round(np.average(self.df[3].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[3].f)), np.abs(self.KK_ymin[3])*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[3]) < self.KK_ymax[3]:
ax4.set_ylim(np.negative(self.KK_ymax[3])*100*1.5, np.abs(self.KK_ymax[3])*100*1.5)
if legend == 'on':
ax4.annotate('Lin-KK, #4', xy=[np.min(np.log10(self.df[3].f)), np.abs(self.KK_ymax[3])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax4.annotate('Lin-KK, ('+str(np.round(np.average(self.df[3].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[3].f)), self.KK_ymax[3]*100*1.2], color='k', fontweight='bold')
if np.abs(self.KK_ymin[4]) > self.KK_ymax[4]:
ax5.set_ylim(self.KK_ymin[4]*100*1.5, np.abs(self.KK_ymin[4])*100*1.5)
if legend == 'on':
ax5.annotate('Lin-KK, #5', xy=[np.min(np.log10(self.df[4].f)), np.abs(self.KK_ymin[4])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax5.annotate('Lin-KK ('+str(np.round(np.average(self.df[4].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[4].f)), np.abs(self.KK_ymin[4])*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[4]) < self.KK_ymax[4]:
ax5.set_ylim(np.negative(self.KK_ymax[4])*100*1.5, np.abs(self.KK_ymax[4])*100*1.5)
if legend == 'on':
ax5.annotate('Lin-KK, #5', xy=[np.min(np.log10(self.df[4].f)), np.abs(self.KK_ymax[4])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax5.annotate('Lin-KK, ('+str(np.round(np.average(self.df[4].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[4].f)), self.KK_ymax[4]*100*1.2], color='k', fontweight='bold')
#Save Figure
if savefig != 'none':
fig.savefig(savefig)
### 6 Cycles
elif len(self.df) == 6:
fig = figure(figsize=(12, 3.8), dpi=120, facecolor='w', edgecolor='k')
fig.subplots_adjust(left=0.1, right=0.95, hspace=0.25, wspace=0.25, bottom=0.1, top=0.95)
ax1 = fig.add_subplot(231)
ax2 = fig.add_subplot(232)
ax3 = fig.add_subplot(233)
ax4 = fig.add_subplot(234)
ax5 = fig.add_subplot(235)
ax6 = fig.add_subplot(236)
#cycle 1
ax1.plot(np.log10(self.df[0].f), self.KK_rr_re[0]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax1.plot(np.log10(self.df[0].f), self.KK_rr_im[0]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax1.set_ylabel("$\Delta$Z', $\Delta$-Z'' [%]", fontsize=15)
if legend == 'on' or legend == 'potential':
ax1.legend(loc='best', fontsize=10, frameon=False)
ax1.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 2
ax2.plot(np.log10(self.df[1].f), self.KK_rr_re[1]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax2.plot(np.log10(self.df[1].f), self.KK_rr_im[1]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
if legend == 'on' or legend == 'potential':
ax2.legend(loc='best', fontsize=10, frameon=False)
ax2.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 3
ax3.plot(np.log10(self.df[2].f), self.KK_rr_re[2]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax3.plot(np.log10(self.df[2].f), self.KK_rr_im[2]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
if legend == 'on' or legend == 'potential':
ax3.legend(loc='best', fontsize=10, frameon=False)
ax3.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 4
ax4.plot(np.log10(self.df[3].f), self.KK_rr_re[3]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax4.plot(np.log10(self.df[3].f), self.KK_rr_im[3]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax4.set_xlabel("log(f) [Hz]")
ax4.set_ylabel("$\Delta$Z', $\Delta$-Z'' [%]", fontsize=15)
if legend == 'on' or legend == 'potential':
ax4.legend(loc='best', fontsize=10, frameon=False)
ax4.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 5
ax5.plot(np.log10(self.df[4].f), self.KK_rr_re[4]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax5.plot(np.log10(self.df[4].f), self.KK_rr_im[4]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax5.set_xlabel("log(f) [Hz]")
if legend == 'on' or legend == 'potential':
ax5.legend(loc='best', fontsize=10, frameon=False)
ax5.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 6
ax6.plot(np.log10(self.df[5].f), self.KK_rr_re[5]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax6.plot(np.log10(self.df[5].f), self.KK_rr_im[5]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax6.set_xlabel("log(f) [Hz]")
if legend == 'on' or legend == 'potential':
ax6.legend(loc='best', fontsize=10, frameon=False)
ax6.axhline(0, ls='--', c='k', alpha=.5)
### Setting ylims and labeling plot with 'KK-Test' in RR subplot
self.KK_rr_im_min = []
self.KK_rr_im_max = []
self.KK_rr_re_min = []
self.KK_rr_re_max = []
self.KK_ymin = []
self.KK_ymax = []
for i in range(len(self.df)):
self.KK_rr_im_min.append(np.min(self.KK_rr_im[i]))
self.KK_rr_im_max.append(np.max(self.KK_rr_im[i]))
self.KK_rr_re_min.append(np.min(self.KK_rr_re[i]))
self.KK_rr_re_max.append(np.max(self.KK_rr_re[i]))
if self.KK_rr_re_max[i] > self.KK_rr_im_max[i]:
self.KK_ymax.append(self.KK_rr_re_max[i])
else:
self.KK_ymax.append(self.KK_rr_im_max[i])
if self.KK_rr_re_min[i] < self.KK_rr_im_min[i]:
self.KK_ymin.append(self.KK_rr_re_min[i])
else:
self.KK_ymin.append(self.KK_rr_im_min[i])
if np.abs(self.KK_ymin[0]) > self.KK_ymax[0]:
ax1.set_ylim(self.KK_ymin[0]*100*1.5, np.abs(self.KK_ymin[0])*100*1.5)
if legend == 'on':
ax1.annotate('Lin-KK, #1', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymin[0])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax1.annotate('Lin-KK ('+str(np.round(np.average(self.df[0].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymin[0])*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[0]) < self.KK_ymax[0]:
ax1.set_ylim(np.negative(self.KK_ymax[0])*100*1.5, np.abs(self.KK_ymax[0])*100*1.5)
if legend == 'on':
ax1.annotate('Lin-KK, #1', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymax[0])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax1.annotate('Lin-KK, ('+str(np.round(np.average(self.df[0].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[0].f)), self.KK_ymax[0]*100*1.2], color='k', fontweight='bold')
if np.abs(self.KK_ymin[1]) > self.KK_ymax[1]:
ax2.set_ylim(self.KK_ymin[1]*100*1.5, np.abs(self.KK_ymin[1])*100*1.5)
if legend == 'on':
ax2.annotate('Lin-KK, #2', xy=[np.min(np.log10(self.df[1].f)), np.max(np.abs(self.KK_ymin[1]))*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax2.annotate('Lin-KK ('+str(np.round(np.average(self.df[1].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[1].f)), np.max(np.abs(self.KK_ymin[1]))*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[0]) < self.KK_ymax[0]:
ax2.set_ylim(np.negative(self.KK_ymax[1])*100*1.5, np.abs(self.KK_ymax[1])*100*1.5)
if legend == 'on':
ax2.annotate('Lin-KK, #2', xy=[np.min(np.log10(self.df[1].f)), self.KK_ymax[1]*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax2.annotate('Lin-KK ('+str(np.round(np.average(self.df[1].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[1].f)), self.KK_ymax[1]*100*1.2], color='k', fontweight='bold')
if np.abs(self.KK_ymin[2]) > self.KK_ymax[2]:
ax3.set_ylim(self.KK_ymin[2]*100*1.5, np.abs(self.KK_ymin[2])*100*1.5)
if legend == 'on':
ax3.annotate('Lin-KK, #3', xy=[np.min(np.log10(self.df[2].f)), np.abs(self.KK_ymin[2])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax3.annotate('Lin-KK ('+str(np.round(np.average(self.df[2].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[2].f)), np.abs(self.KK_ymin[2])*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[2]) < self.KK_ymax[2]:
ax3.set_ylim(np.negative(self.KK_ymax[0])*100*1.5, np.abs(self.KK_ymax[2])*100*1.5)
if legend == 'on':
ax3.annotate('Lin-KK, #3', xy=[np.min(np.log10(self.df[2].f)), np.abs(self.KK_ymax[2])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax3.annotate('Lin-KK, ('+str(np.round(np.average(self.df[2].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[2].f)), self.KK_ymax[2]*100*1.2], color='k', fontweight='bold')
if np.abs(self.KK_ymin[3]) > self.KK_ymax[3]:
ax4.set_ylim(self.KK_ymin[3]*100*1.5, np.abs(self.KK_ymin[3])*100*1.5)
if legend == 'on':
ax4.annotate('Lin-KK, #4', xy=[np.min(np.log10(self.df[3].f)), np.abs(self.KK_ymin[3])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax4.annotate('Lin-KK ('+str(np.round(np.average(self.df[3].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[3].f)), np.abs(self.KK_ymin[3])*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[3]) < self.KK_ymax[3]:
ax4.set_ylim(np.negative(self.KK_ymax[3])*100*1.5, np.abs(self.KK_ymax[3])*100*1.5)
if legend == 'on':
ax4.annotate('Lin-KK, #4', xy=[np.min(np.log10(self.df[3].f)), np.abs(self.KK_ymax[3])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax4.annotate('Lin-KK, ('+str(np.round(np.average(self.df[3].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[3].f)), self.KK_ymax[3]*100*1.2], color='k', fontweight='bold')
if np.abs(self.KK_ymin[4]) > self.KK_ymax[4]:
ax5.set_ylim(self.KK_ymin[4]*100*1.5, np.abs(self.KK_ymin[4])*100*1.5)
if legend == 'on':
ax5.annotate('Lin-KK, #5', xy=[np.min(np.log10(self.df[4].f)), np.abs(self.KK_ymin[4])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax5.annotate('Lin-KK ('+str(np.round(np.average(self.df[4].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[4].f)), np.abs(self.KK_ymin[4])*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[4]) < self.KK_ymax[4]:
ax5.set_ylim(np.negative(self.KK_ymax[4])*100*1.5, np.abs(self.KK_ymax[4])*100*1.5)
if legend == 'on':
ax5.annotate('Lin-KK, #5', xy=[np.min(np.log10(self.df[4].f)), np.abs(self.KK_ymax[4])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax5.annotate('Lin-KK, ('+str(np.round(np.average(self.df[4].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[4].f)), self.KK_ymax[4]*100*1.2], color='k', fontweight='bold')
if np.abs(self.KK_ymin[5]) > self.KK_ymax[5]:
ax6.set_ylim(self.KK_ymin[5]*100*1.5, np.abs(self.KK_ymin[5])*100*1.5)
if legend == 'on':
ax6.annotate('Lin-KK, #6', xy=[np.min(np.log10(self.df[5].f)), np.abs(self.KK_ymin[5])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax6.annotate('Lin-KK ('+str(np.round(np.average(self.df[5].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[5].f)), np.abs(self.KK_ymin[5])*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[5]) < self.KK_ymax[5]:
ax6.set_ylim(np.negative(self.KK_ymax[5])*100*1.5, np.abs(self.KK_ymax[5])*100*1.5)
if legend == 'on':
ax6.annotate('Lin-KK, #6', xy=[np.min(np.log10(self.df[5].f)), np.abs(self.KK_ymax[5])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax6.annotate('Lin-KK, ('+str(np.round(np.average(self.df[5].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[5].f)), self.KK_ymax[5]*100*1.2], color='k', fontweight='bold')
#Save Figure
if savefig != 'none':
fig.savefig(savefig)
### 7 Cycles
elif len(self.df) == 7:
fig = figure(figsize=(12, 5), dpi=120, facecolor='w', edgecolor='k')
fig.subplots_adjust(left=0.1, right=0.95, hspace=0.25, wspace=0.25, bottom=0.1, top=0.95)
ax1 = fig.add_subplot(331)
ax2 = fig.add_subplot(332)
ax3 = fig.add_subplot(333)
ax4 = fig.add_subplot(334)
ax5 = fig.add_subplot(335)
ax6 = fig.add_subplot(336)
ax7 = fig.add_subplot(337)
#cycle 1
ax1.plot(np.log10(self.df[0].f), self.KK_rr_re[0]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax1.plot(np.log10(self.df[0].f), self.KK_rr_im[0]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax1.set_ylabel("$\Delta$Z', $\Delta$-Z'' [%]", fontsize=15)
if legend == 'on' or legend == 'potential':
ax1.legend(loc='best', fontsize=10, frameon=False)
ax1.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 2
ax2.plot(np.log10(self.df[1].f), self.KK_rr_re[1]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax2.plot(np.log10(self.df[1].f), self.KK_rr_im[1]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
if legend == 'on' or legend == 'potential':
ax2.legend(loc='best', fontsize=10, frameon=False)
ax2.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 3
ax3.plot(np.log10(self.df[2].f), self.KK_rr_re[2]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax3.plot(np.log10(self.df[2].f), self.KK_rr_im[2]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax3.set_xlabel("log(f) [Hz]")
if legend == 'on' or legend == 'potential':
ax3.legend(loc='best', fontsize=10, frameon=False)
ax3.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 4
ax4.plot(np.log10(self.df[3].f), self.KK_rr_re[3]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax4.plot(
|
np.log10(self.df[3].f)
|
numpy.log10
|
import tensorlayer as tl
import tensorflow as tf
import numpy as np
from LoadData import LoadData
from tensorlayer.utils import dict_to_one
import time
from tensorlayer.layers import *
from sklearn.metrics import confusion_matrix
def confusion_matrix(y_test, y):
print(y_test, y)
cnf_matrix = confusion_matrix(y_test, y)
print(cnf_matrix)
np.set_printoptions(precision=2)
# Plot normalized confusion matrix
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=class_names, normalize=True,
title='Normalized confusion matrix')
plt.savefig("rgb.pdf")
def get_session(gpu_fraction=0.2):
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_fraction,
allow_growth=True)
return tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
def minibatches(inputs=None, inputs2=None, targets=None, batch_size=None, shuffle=False):
assert len(inputs) == len(targets)
if shuffle:
indices = np.arange(len(inputs))
np.random.shuffle(indices)
for start_idx in range(0, len(inputs) - batch_size + 1, batch_size):
if shuffle:
excerpt = indices[start_idx:start_idx + batch_size]
else:
excerpt = slice(start_idx, start_idx + batch_size)
yield inputs[excerpt], inputs2[excerpt], targets[excerpt]
def fit(sess, network, train_op, cost, X_train, X_train2, y_train, x, x_2, y_, acc=None, batch_size=100,
n_epoch=100, print_freq=5, X_val=None, X_val2=None, y_val=None, eval_train=True,
tensorboard=False, tensorboard_epoch_freq=5, tensorboard_weight_histograms=True, tensorboard_graph_vis=True):
assert X_train.shape[0] >= batch_size, "Number of training examples should be bigger than the batch size"
print("Start training the network ...")
start_time_begin = time.time()
tensorboard_train_index, tensorboard_val_index = 0, 0
confusion =
|
np.zeros((1,2))
|
numpy.zeros
|
import numpy as np
np.random.seed(0)
def main_func(A, U, Z, lam, fun_num=1):
# main functions
if fun_num==0:
# no-regularization
return 0.5*(np.linalg.norm(A-np.matmul(U,Z))**2)
if fun_num==1:
# L2-regularization
return 0.5*(np.linalg.norm(A-np.matmul(U,Z))**2) \
+ lam*0.5*(np.linalg.norm(U)**2) \
+ lam*0.5*(np.linalg.norm(Z)**2)
if fun_num==2:
# L1-regularization
return 0.5*(np.linalg.norm(A-np.matmul(U,Z))**2) \
+ lam*(np.sum(np.abs(U))) + lam*(np.sum(np.abs(Z)))
if fun_num==3:
# No-regularization for some backward compatibility TODO.
return 0.5*(np.linalg.norm(A -np.matmul(U,Z))**2)
def grad(A, U, Z, lam, fun_num=1, option=1):
# Gradients of smooth part of the function
# Essentially function is f+g
# f is non-smooth part
# g is smooth part
# here gradients of g is computed.
# no-regularization
# option 1 gives all gradients
# option 2 gives gradient with respect to U
# option 3 gives gradient with respect to Z
if fun_num in [0,1,2]:
# grad u, grad z
if option==1:
return np.matmul(np.matmul(U,Z)-A, Z.T) , np.matmul(U.T, np.matmul(U,Z)-A)
elif option==2:
return np.matmul(np.matmul(U,Z)-A, Z.T)
elif option==3:
return np.matmul(U.T, np.matmul(U,Z)-A)
else:
pass
def abs_func(A, U, Z, U1, Z1, lam, abs_fun_num=1, fun_num=1):
# Denote abs_func = f(x) + g(x^k) + <grad g(x^k), x-x^k>
# x^k is the current iterate denoted by U1, Z1
# This function is just to make the code easy to handle
# There can be other efficient ways to implement TODO
if abs_fun_num == 1:
G0,G1 = grad(A, U1, Z1, lam, fun_num=fun_num)
return main_func(A, U1, Z1, lam, fun_num=1) + np.sum(np.multiply(U-U1,G0)) \
+ np.sum(np.multiply(Z-Z1,G1))
if abs_fun_num == 2:
G0,G1 = grad(A, U1, Z1, lam, fun_num=fun_num)
return main_func(A, U1, Z1, lam, fun_num=fun_num)-lam*(np.sum(np.abs(U1))) - lam*(np.sum(np.abs(Z1)))\
+ lam*(np.sum(np.abs(U))) + lam*(np.sum(np.abs(Z))) + np.sum(np.multiply(U-U1,G0)) + np.sum(np.multiply(Z-Z1,G1))
if abs_fun_num == 3:
G0,G1 = grad(A, U1, Z1, lam, fun_num=fun_num)
return main_func(A, U1, Z1, lam, fun_num=fun_num)-lam*0.5*(np.linalg.norm(U1)**2) - lam*0.5*(np.linalg.norm(Z1)**2)\
+ lam*0.5*(np.linalg.norm(U)**2) + lam*0.5*(np.linalg.norm(Z)**2) \
+ np.sum(np.multiply(U-U1,G0)) + np.sum(np.multiply(Z-Z1,G1))
def make_update(U1, Z1,uL_est=1,lam=0,fun_num=1, abs_fun_num=1,breg_num=1, A=1, U2=1,Z2=1, beta=0.0,c_1=1,c_2=1,exp_option=1):
# Main Update Step
if breg_num ==2:
# Calculates CoCaIn BPG-MF, BPG-MF, BPG-MF updates
# Getting gradients to compute P^k, Q^k later
grad_u, grad_z = grad(A, U1, Z1, lam, fun_num=0)
grad_h_1_a = U1*(np.linalg.norm(U1)**2 + np.linalg.norm(Z1)**2)
grad_h_1_b = Z1*(np.linalg.norm(U1)**2 + np.linalg.norm(Z1)**2)
grad_h_2_a = U1
grad_h_2_b = Z1
sym_setting = 0
if abs_fun_num == 3:
# Code for No-Regularization and L2 Regularization
if exp_option==1:
# Code for No-Regularization and L2 Regularization
# No-Regularization is equivalent to L2 Regularization with lam=0
# compute P^k
p_l = (1/uL_est)*grad_u - (c_1*grad_h_1_a + c_2*grad_h_2_a)
# compute Q^k
q_l = (1/uL_est)*grad_z - (c_1*grad_h_1_b + c_2*grad_h_2_b)
if sym_setting == 0: #default option
# solving cubic equation
coeff = [c_1*(np.linalg.norm(p_l)**2 + np.linalg.norm(q_l)**2), 0,(c_2 + (lam/uL_est)), -1]
temp_y = np.roots(coeff)[-1].real
return (-1)*temp_y*p_l, (-1)*temp_y*q_l
else:
p_new = p_l + q_l.T
coeff = [4*c_1*(np.linalg.norm(p_new)**2), 0,2*(c_2 + (lam/uL_est)), -1]
temp_y = np.roots(coeff)[-1].real
return (-1)*temp_y*p_new, (-1)*temp_y*(p_new.T)
elif exp_option==2:
# NMF case.
# Code for No-Regularization and L2 Regularization
if sym_setting == 0:
# compute P^k
p_l = np.maximum(0,-(1/uL_est)*grad_u + (c_1*grad_h_1_a + c_2*grad_h_2_a))
# compute Q^k
q_l = np.maximum(0,-(1/uL_est)*grad_z + (c_1*grad_h_1_b + c_2*grad_h_2_b))
# solving cubic equation
temp_pnrm = np.sqrt((np.linalg.norm(p_l)**2 + np.linalg.norm(q_l)**2))/np.sqrt(2)
# print('temp_pnrm '+ str(temp_pnrm))
# technique to improve the numerical stability
# same update anyway.
coeff = [c_1*2, 0,(c_2 + (lam/uL_est)), -(temp_pnrm)]
temp_y = np.roots(coeff)[-1].real
return temp_y*p_l/temp_pnrm, temp_y*q_l/temp_pnrm
else:
temp_pl = -(1/uL_est)*grad_u + (c_1*grad_h_1_a + c_2*grad_h_2_a)
temp_ql = -(1/uL_est)*grad_z + (c_1*grad_h_1_b + c_2*grad_h_2_b)
# compute P^k
p_new = np.maximum(0,temp_pl+temp_ql.T)
# solving cubic equation
coeff = [4*c_1*(np.linalg.norm(p_new)**2), 0,2*(c_2 + (lam/uL_est)), -1]
temp_y = np.roots(coeff)[-1].real
return temp_y*p_new, temp_y*(p_new.T)
else:
raise
if abs_fun_num == 2:
if exp_option==1:
# L1 Regularization simple
# compute P^k
tp_l = (1/uL_est)*grad_u - (c_1*grad_h_1_a + c_2*grad_h_2_a)
p_l = -np.maximum(0, np.abs(-tp_l)-lam*(1/uL_est))*np.sign(-tp_l)
# compute Q^K
tq_l = (1/uL_est)*grad_z - (c_1*grad_h_1_b + c_2*grad_h_2_b)
q_l = -np.maximum(0, np.abs(-tq_l)-lam*(1/uL_est))*np.sign(-tq_l)
# solving cubic equation
coeff = [c_1*(np.linalg.norm(p_l)**2 + np.linalg.norm(q_l)**2), 0,(c_2), -1]
temp_y = np.roots(coeff)[-1].real
return (-1)*temp_y*p_l, (-1)*temp_y*q_l
elif exp_option==2:
# L1 Regularization NMF case
# temporary matrices see update steps in the paper.
nx = np.shape(grad_u)[0]
ny = np.shape(grad_u)[1]
temp_mat1 = np.outer(np.ones(nx),np.ones(ny))
nx = np.shape(grad_z)[0]
ny = np.shape(grad_z)[1]
temp_mat2 = np.outer(np.ones(nx),np.ones(ny))
# compute P^k
tp_l = -(1/uL_est)*grad_u + (c_1*grad_h_1_a + c_2*grad_h_2_a) - (lam/uL_est)*(temp_mat1)
p_l = np.maximum(0,tp_l)
# compute Q^k
tq_l = -(1/uL_est)*grad_z + (c_1*grad_h_1_b + c_2*grad_h_2_b) - (lam/uL_est)*(temp_mat2)
q_l = np.maximum(0,tq_l)
# solving cubic equation
# print(np.linalg.norm(p_l)**2 + np.linalg.norm(q_l)**2)
coeff = [c_1*(np.linalg.norm(p_l)**2 + np.linalg.norm(q_l)**2), 0,(c_2), -1]
temp_y = np.roots(coeff)[-1].real
return temp_y*p_l, temp_y*q_l
else:
pass
if breg_num ==1:
# Update steps for PALM and iPALM
# Code for No-Regularization and L2 Regularization
if abs_fun_num == 3:
# compute extrapolation
U1 = U1+beta*(U1-U2)
grad_u = grad(A, U1, Z1, lam, fun_num=fun_num, option=2)
# compute Lipschitz constant
L2 = np.linalg.norm(np.mat(Z1) * np.mat(Z1.T))
L2 = np.max([L2,1e-4])
# print('L2 val '+ str(L2))
if beta>0:
# since we use convex regularizers
# step-size is less restrictive
step_size = (2*(1-beta)/(1+2*beta))*(1/ L2)
else:
# from PALM paper 1.1 is just a scaling factor
# can be set to any value >1.
step_size = (1/(1.1*L2))
# Update step for No-Regularization and L2 Regularization
U = ((U1 - step_size*grad_u))/(1+ step_size*lam)
# compute extrapolation
Z1 = Z1+beta*(Z1-Z2)
grad_z = grad(A, U, Z1, lam, fun_num=fun_num, option=3)
# compute Lipschitz constant
L1 = np.linalg.norm(np.mat(U.T) * np.mat(U))
L1 = np.max([L1,1e-4])
# print('L1 val '+ str(L1))
if beta>0:
# since we use convex regularizers
# step-size is less restrictive
step_size = (2*(1-beta)/(1+2*beta))*(1/ L1)
else:
# from PALM paper 1.1 is just a scaling factor
# can be set to any value >1.
step_size = 1/(1.1*L1)
# Update step for No-Regularization and L2 Regularization
Z = ((Z1 - step_size*grad_z))/(1+ step_size*lam)
return U,Z
if abs_fun_num == 2:
# Update steps for PALM and iPALM
# Code for L1 Regularization
# compute extrapolation
U1 = U1+beta*(U1-U2)
grad_u = grad(A, U1, Z1, lam, fun_num=fun_num, option=2)
# compute Lipschitz constant
L2 = np.linalg.norm(np.mat(Z1) * np.mat(Z1.T))
L2 = np.max([L2,1e-4])
if beta>0:
# since we use convex regularizers
# step-size is less restrictive
step_size = (2*(1-beta)/(1+2*beta))*(1/ L2)
else:
# from PALM paper 1.1 is just a scaling factor
# can be set to any value >1.
step_size = 1/(1.1*L2)
# compute update step with U
tU1 = ((U1 - step_size*grad_u))
U = np.maximum(0, np.abs(tU1)-lam*(step_size))*np.sign(tU1)
# compute extrapolation
Z1 = Z1+beta*(Z1-Z2)
grad_z = grad(A, U, Z1, lam, fun_num=fun_num, option=3)
# compute Lipschitz constant
L1 = np.linalg.norm(np.mat(U.T) * np.mat(U))
L1 = np.max([L1,1e-4])
if beta>0:
# since we use convex regularizers
# step-size is less restrictive
step_size = (2*(1-beta)/(1+2*beta))*(1/ L1)
else:
# compute update step with U
step_size = 1/(1.1*L1)
# compute update step with z
tZ1 = ((Z1 - step_size*grad_z))
Z = np.maximum(0, np.abs(tZ1)-lam*(step_size))*np.sign(tZ1)
return U,Z
def breg( U, Z, U1, Z1, breg_num=1, c_1=1,c_2=1):
if breg_num==1:
# Standard Euclidean distance
temp = 0.5*(np.linalg.norm(U-U1)**2) + 0.5*(np.linalg.norm(Z-Z1)**2)
if abs(temp) <= 1e-10:
# to fix numerical issues
temp = 0
if temp<0:
return 0
return temp
if breg_num==2:
# New Bregman distance as in the paper
# link: https://arxiv.org/abs/1905.09050
grad_h_1_a = U1*(np.linalg.norm(U1)**2 + np.linalg.norm(Z1)**2)
grad_h_1_b = Z1*(np.linalg.norm(U1)**2 +
|
np.linalg.norm(Z1)
|
numpy.linalg.norm
|
import logging
from typing import Sequence, List, Tuple, Optional
import numpy as np
from emukit.core.optimization.acquisition_optimizer import AcquisitionOptimizerBase
from emukit.core import ParameterSpace
from emukit.core.acquisition import Acquisition
from emukit.core.initial_designs import RandomDesign
from ..parameters.string_parameter import StringParameter
from emukit.core.optimization.context_manager import ContextManager
_log = logging.getLogger(__name__)
class StringGeneticProgrammingOptimizer(AcquisitionOptimizerBase):
"""
Optimizes acquisition function using Genetic programming over string spaces
"""
def __init__(self, space: ParameterSpace, dynamic:bool = False, num_evolutions: int = 10,
population_size: int = 5, tournament_prob: float = 0.5,
p_crossover: float = 0.8, p_mutation: float = 0.05
) -> None:
"""
:param space: The parameter space spanning the search problem (has to consist of a single StringParameter).
:param num_evolutions: Maximum number of evolutions.
:param dynamic: allow early stopping to choose number of steps (chooses between 10 and 100 evolutions)
:param num_init_points: Population size.
:param tournament_prob: proportion of population randomly chosen from which to choose a tree to evolve
(larger gives faster convergence but smaller gives better diversity in the population)
:p_crossover: probability of crossover evolution (if not corssover then just keep the same (reproducton))
:p_mutation: probability of randomly mutatiaon
"""
super().__init__(space)
#check that if parameter space is a single string param
if len(space.parameters)!=1 or not isinstance(space.parameters[0],StringParameter):
raise ValueError("StringGeneticProgrammingAcqusitionOptimizer only for spaces consisting of a single string parameter")
self.space = space
self.p_mutation = p_mutation
self.p_crossover = p_crossover
self.population_size = population_size
self.tournament_prob = tournament_prob
self.dynamic = dynamic
if self.dynamic:
self.num_evolutions = 10
else:
self.num_evolutions = num_evolutions
self.alphabet = self.space.parameters[0].alphabet
def _optimize(self, acquisition: Acquisition , context_manager: ContextManager) -> Tuple[np.ndarray, np.ndarray]:
"""
See AcquisitionOptimizerBase._optimizer for parameter descriptions.
Optimize an acqusition function using a GA
"""
# initialize population of strings
random_design = RandomDesign(self.space)
population = random_design.get_samples(self.population_size)
# clac fitness for current population
fitness_pop = acquisition.evaluate(population)
standardized_fitness_pop = fitness_pop / sum(fitness_pop)
# initialize best location and score so far
X_max = population[np.argmax(fitness_pop)].reshape(-1,1)
acq_max = np.max(fitness_pop).reshape(-1,1)
iteration_bests=[]
_log.info("Starting local optimization of acquisition function {}".format(type(acquisition)))
for step in range(self.num_evolutions):
_log.info("Performing evolution step {}".format(step))
# evolve populations
population = self._evolve(population,standardized_fitness_pop)
# recalc fitness
fitness_pop = acquisition.evaluate(population)
standardized_fitness_pop = fitness_pop / sum(fitness_pop)
# update best location and score (if found better solution)
acq_pop_max = np.max(fitness_pop)
iteration_bests.append(acq_pop_max)
_log.info("best acqusition score in the new population".format(acq_pop_max))
if acq_pop_max > acq_max[0][0]:
acq_max[0][0] = acq_pop_max
X_max[0] = population[np.argmax(fitness_pop)]
# if dynamic then keep running (stop when no improvement over most recent 10 iterations)
stop = False
i=self.num_evolutions
while not stop:
_log.info("Performing evolution step {}".format(step))
# evolve populations
population = self._evolve(population,standardized_fitness_pop)
# recalc fitness
fitness_pop = acquisition.evaluate(population)
standardized_fitness_pop = fitness_pop / sum(fitness_pop)
# update best location and score (if found better solution)
acq_pop_max = np.max(fitness_pop)
iteration_bests.append(acq_pop_max)
_log.info("best acqusition score in the new population".format(acq_pop_max))
if acq_pop_max > acq_max[0][0]:
acq_max[0][0] = acq_pop_max
X_max[0] = population[
|
np.argmax(fitness_pop)
|
numpy.argmax
|
# Copyright 2019 <NAME>, <NAME> and <NAME>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import scipy.sparse as sp
from numpy.testing import (
run_module_suite, assert_equal, assert_almost_equal
)
import normalized_matrix as nm
class TestNormalizedMatrix(object):
s = np.matrix([[1.0, 2.0], [4.0, 3.0], [5.0, 6.0], [8.0, 7.0], [9.0, 1.0]])
k = [np.array([0, 1, 1, 0, 1])]
r = [np.matrix([[1.1, 2.2], [3.3, 4.4]])]
m = np.matrix([[1.0, 2.0, 1.1, 2.2],
[4.0, 3.0, 3.3, 4.4],
[5.0, 6.0, 3.3, 4.4],
[8.0, 7.0, 1.1, 2.2],
[9.0, 1.0, 3.3, 4.4]])
n_matrix = nm.NormalizedMatrix(s, r, k)
def test_add(self):
n_matrix = self.n_matrix
local_matrix = n_matrix + 2
assert_equal(local_matrix.b, 2)
local_matrix = 3 + n_matrix
assert_equal(local_matrix.b, 3)
def test_sub(self):
n_matrix = self.n_matrix
local_matrix = n_matrix - 2
assert_equal(local_matrix.b, -2)
local_matrix = 3 - n_matrix
assert_equal(local_matrix.a, -1)
assert_equal(local_matrix.b, 3)
def test_mul(self):
n_matrix = self.n_matrix
local_matrix = n_matrix * 2
assert_equal(local_matrix.a, 2)
local_matrix = 3 * n_matrix
assert_equal(local_matrix.a, 3)
def test_div(self):
n_matrix = self.n_matrix
local_matrix = n_matrix / 2
assert_equal(local_matrix.a, 0.5)
local_matrix = 2 / n_matrix
assert_equal(local_matrix.a, 2)
assert_equal(local_matrix.c, -1)
def test_pow(self):
n_matrix = self.n_matrix
local_matrix = n_matrix ** 2
assert_equal(local_matrix.c, 2)
def test_transpose(self):
n_matrix = self.n_matrix
assert_equal(n_matrix.T.T.sum(axis=0), n_matrix.sum(axis=0))
assert_equal(np.array_equal(n_matrix.T.sum(axis=0), n_matrix.sum(axis=0)), False)
def test_inverse(self):
n_matrix = self.n_matrix
assert_almost_equal(n_matrix.I, self.n_matrix.I)
def test_row_sum(self):
n_matrix = self.n_matrix
assert_almost_equal(n_matrix.sum(axis=1), self.m.sum(axis=1))
s = np.matrix([[1.0, 2.0], [4.0, 3.0], [5.0, 6.0], [8.0, 7.0], [9.0, 1.0]])
k = [np.array([0, 1, 1, 0, 1]),
np.array([1, 1, 0, 1, 0])]
r = [np.matrix([[1.1, 2.2], [3.3, 4.4]]),
np.matrix([[5.5, 6.6, 7.7], [8.8, 9.9, 10.10]])]
n_matrix = nm.NormalizedMatrix(s, r, k, second_order=True)
rr = (np.asarray(r[0][k[0]][:, np.newaxis]) * np.asarray(r[1][k[1]])[..., np.newaxis]).reshape(s.shape[0], -1)
sr0 = (np.asarray(r[0][k[0]][:, np.newaxis]) * np.asarray(s)[..., np.newaxis]).reshape(s.shape[0], -1)
sr1 = (np.asarray(r[1][k[1]][:, np.newaxis]) * np.asarray(s)[..., np.newaxis]).reshape(s.shape[0], -1)
m = np.matrix(np.hstack([n_matrix.ent_table] + [n_matrix.att_table[0][k[0]], n_matrix.att_table[1][k[1]], sr0, sr1, rr]))
assert_almost_equal(np.sort(n_matrix.sum(axis=1), axis=0), np.sort(m.sum(axis=1), axis=0))
assert_almost_equal(np.sort((n_matrix + 2).sum(axis=1), axis=0), np.sort((m + 2).sum(axis=1), axis=0))
assert_almost_equal(np.sort((n_matrix * 2).sum(axis=1), axis=0), np.sort(m.dot(2).sum(axis=1), axis=0))
# sparse
indptr = np.array([0, 2, 3, 6, 6])
indices = np.array([0, 2, 2, 0, 1, 4])
data1 = np.array([1.1, 2.2, 3.3, 4.4, 5.5, 6.6])
s = sp.csc_matrix((data1, indices, indptr), shape=(5, 4)).tocoo()
row1 = np.array([0, 1, 1])
col1 = np.array([0, 4, 1])
data1 = np.array([1.0, 2.0, 3.0])
row2 = np.array([1, 1, 0])
col2 = np.array([2, 1, 1])
data2 = np.array([1.1, 2.2, 3.3])
k = [np.array([0, 1, 1, 0, 1]), np.array([1, 0, 0, 1, 1])]
r = [sp.coo_matrix((data1, (row1, col1)), shape=(2, 5)),
sp.coo_matrix((data2, (row2, col2)), shape=(2, 3))]
n_matrix = nm.NormalizedMatrix(s, r, k, second_order=True)
rr = (np.asarray(r[0].toarray()[k[0]][:, np.newaxis]) * np.asarray(r[1].toarray()[k[1]])[
..., np.newaxis]).reshape(s.shape[0], -1)
sr0 = (np.asarray(r[0].toarray()[k[0]][:, np.newaxis]) * np.asarray(s.toarray())[..., np.newaxis]).reshape(s.shape[0], -1)
sr1 = (np.asarray(r[1].toarray()[k[1]][:, np.newaxis]) * np.asarray(s.toarray())[..., np.newaxis]).reshape(s.shape[0], -1)
m = np.matrix(np.hstack([n_matrix.ent_table.toarray()] + [n_matrix.att_table[0].toarray()[k[0]],
n_matrix.att_table[1].toarray()[k[1]], sr0, sr1, rr]))
assert_almost_equal(np.sort(n_matrix.sum(axis=1), axis=0), np.sort(m.sum(axis=1), axis=0))
assert_almost_equal(np.sort((n_matrix + 2).sum(axis=1), axis=0), np.sort((m + 2).sum(axis=1), axis=0))
assert_almost_equal(np.sort((n_matrix * 2).sum(axis=1), axis=0), np.sort(m.dot(2).sum(axis=1), axis=0))
# identity
s = np.matrix([[1.0, 2.0], [4.0, 3.0], [5.0, 6.0], [8.0, 7.0], [9.0, 1.0]])
k = [np.array([0, 1, 1, 0, 1]),
np.array([1, 1, 0, 1, 0])]
r = [np.matrix([[1.1, 2.2], [3.3, 4.4]]),
np.matrix([[5.5, 6.6, 7.7], [8.8, 9.9, 10.10]])]
n_matrix = nm.NormalizedMatrix(s, r, k, identity=True)
m = np.hstack([n_matrix.ent_table] + [np.identity(2)[k[0]], n_matrix.att_table[0][k[0]],
np.identity(2)[k[1]], n_matrix.att_table[1][k[1]]])
assert_almost_equal(np.sort(n_matrix.sum(axis=1), axis=0), np.sort(m.sum(axis=1), axis=0))
assert_almost_equal(np.sort((n_matrix + 2).sum(axis=1), axis=0), np.sort((m + 2).sum(axis=1), axis=0))
assert_almost_equal(np.sort((n_matrix * 2).sum(axis=1), axis=0), np.sort(m.dot(2).sum(axis=1), axis=0))
def test_row_sum_trans(self):
n_matrix = nm.NormalizedMatrix(self.s, self.r, self.k, trans=True)
assert_almost_equal(n_matrix.sum(axis=1), self.m.T.sum(axis=1))
s = np.matrix([[1.0, 2.0], [4.0, 3.0], [5.0, 6.0], [8.0, 7.0], [9.0, 1.0]])
k = [np.array([0, 1, 1, 0, 1]),
np.array([1, 1, 0, 1, 0])]
r = [np.matrix([[1.1, 2.2], [3.3, 4.4]]),
np.matrix([[5.5, 6.6, 7.7], [8.8, 9.9, 10.10]])]
n_matrix = nm.NormalizedMatrix(s, r, k, second_order=True)
rr = (np.asarray(r[0][k[0]][:, np.newaxis]) * np.asarray(r[1][k[1]])[..., np.newaxis]).reshape(s.shape[0], -1)
sr0 = (np.asarray(r[0][k[0]][:, np.newaxis]) * np.asarray(s)[..., np.newaxis]).reshape(s.shape[0], -1)
sr1 = (np.asarray(r[1][k[1]][:, np.newaxis]) * np.asarray(s)[..., np.newaxis]).reshape(s.shape[0], -1)
m = np.matrix(np.hstack([n_matrix.ent_table] + [n_matrix.att_table[0][k[0]], n_matrix.att_table[1][k[1]], sr0, sr1, rr]))
assert_almost_equal(np.sort(n_matrix.T.sum(axis=1), axis=0), np.sort(m.T.sum(axis=1), axis=0))
assert_almost_equal(np.sort((n_matrix + 2).T.sum(axis=1), axis=0), np.sort((m + 2).T.sum(axis=1), axis=0))
assert_almost_equal(np.sort((n_matrix * 2).T.sum(axis=1), axis=0), np.sort(m.dot(2).T.sum(axis=1), axis=0))
def test_col_sum(self):
n_matrix = self.n_matrix
assert_almost_equal(n_matrix.sum(axis=0), self.m.sum(axis=0))
s = np.matrix([[1.0, 2.0], [4.0, 3.0], [5.0, 6.0], [8.0, 7.0], [9.0, 1.0]])
k = [np.array([0, 1, 1, 0, 1]),
np.array([1, 1, 0, 1, 0])]
r = [np.matrix([[1.1, 2.2], [3.3, 4.4]]),
np.matrix([[5.5, 6.6, 7.7], [8.8, 9.9, 10.10]])]
n_matrix = nm.NormalizedMatrix(s, r, k, second_order=True)
rr = (np.asarray(r[0][k[0]][:, np.newaxis]) * np.asarray(r[1][k[1]])[..., np.newaxis]).reshape(s.shape[0], -1)
sr0 = (np.asarray(r[0][k[0]][:, np.newaxis]) * np.asarray(s)[..., np.newaxis]).reshape(s.shape[0], -1)
sr1 = (np.asarray(r[1][k[1]][:, np.newaxis]) * np.asarray(s)[..., np.newaxis]).reshape(s.shape[0], -1)
m = np.matrix(np.hstack([n_matrix.ent_table] + [n_matrix.att_table[0][k[0]], n_matrix.att_table[1][k[1]], sr0, sr1, rr]))
assert_almost_equal(np.sort(n_matrix.sum(axis=0)), np.sort(m.sum(axis=0)))
assert_almost_equal(np.sort((n_matrix + 2).sum(axis=0)), np.sort((m + 2).sum(axis=0)))
assert_almost_equal(np.sort((n_matrix * 2).sum(axis=0)), np.sort(m.dot(2).sum(axis=0)))
def test_row_col_trans(self):
n_matrix = nm.NormalizedMatrix(self.s, self.r, self.k, trans=True)
assert_almost_equal(n_matrix.sum(axis=0), self.m.T.sum(axis=0))
def test_sum(self):
n_matrix = self.n_matrix
assert_almost_equal(n_matrix.sum(), self.m.sum())
s = np.matrix([[1.0, 2.0], [4.0, 3.0], [5.0, 6.0], [8.0, 7.0], [9.0, 1.0]])
k = [np.array([0, 1, 1, 0, 1]),
np.array([1, 1, 0, 1, 0])]
r = [np.matrix([[1.1, 2.2], [3.3, 4.4]]),
np.matrix([[5.5, 6.6, 7.7], [8.8, 9.9, 10.10]])]
n_matrix = nm.NormalizedMatrix(s, r, k, second_order=True)
rr = (np.asarray(r[0][k[0]][:, np.newaxis]) * np.asarray(r[1][k[1]])[..., np.newaxis]).reshape(s.shape[0], -1)
sr0 = (np.asarray(r[0][k[0]][:, np.newaxis]) * np.asarray(s)[..., np.newaxis]).reshape(s.shape[0], -1)
sr1 = (np.asarray(r[1][k[1]][:, np.newaxis]) * np.asarray(s)[..., np.newaxis]).reshape(s.shape[0], -1)
m = np.matrix(np.hstack([n_matrix.ent_table] + [n_matrix.att_table[0][k[0]], n_matrix.att_table[1][k[1]], sr0, sr1, rr]))
assert_almost_equal(n_matrix.sum(), m.sum())
assert_almost_equal((n_matrix+2).sum(), (m+2).sum())
assert_almost_equal((n_matrix*2).sum(), (m*2).sum())
s = np.matrix([[1.0, 2.0], [4.0, 3.0], [5.0, 6.0], [8.0, 7.0], [9.0, 1.0]])
k = [np.array([0, 1, 1, 0, 1]),
np.array([1, 1, 0, 1, 0])]
r = [np.matrix([[1.1, 2.2], [3.3, 4.4]]),
np.matrix([[5.5, 6.6, 7.7], [8.8, 9.9, 10.10]])]
n_matrix = nm.NormalizedMatrix(s, r, k, identity=True)
m = np.hstack([n_matrix.ent_table] + [np.identity(2)[k[0]], n_matrix.att_table[0][k[0]],
np.identity(2)[k[1]], n_matrix.att_table[1][k[1]]])
assert_almost_equal(n_matrix.sum(), m.sum())
assert_almost_equal((n_matrix+2).sum(), (m+2).sum())
assert_almost_equal((n_matrix*2).sum(), (m*2).sum())
def test_lmm(self):
# lmm with vector
n_matrix = self.n_matrix
x = np.matrix([[1.0], [2.0], [3.0], [4.0]])
assert_equal(n_matrix * x, self.m * x)
s = np.matrix([[1.0, 2.0], [4.0, 3.0], [5.0, 6.0], [8.0, 7.0], [9.0, 1.0]])
k = [np.array([0, 1, 1, 0, 1]),
np.array([1, 1, 0, 1, 0])]
r = [np.matrix([[1.1, 2.2], [3.3, 4.4]]),
np.matrix([[5.5, 6.6, 7.7], [8.8, 9.9, 10.10]])]
x = np.matrix(np.arange(1.0, 36.0)).T
n_matrix = nm.NormalizedMatrix(s, r, k, second_order=True)
rr = (np.asarray(r[0][k[0]][:, np.newaxis]) * np.asarray(r[1][k[1]])[..., np.newaxis]).reshape(s.shape[0], -1)
sr0 = (np.asarray(r[0][k[0]][:, np.newaxis]) * np.asarray(s)[..., np.newaxis]).reshape(s.shape[0], -1)
sr1 = (np.asarray(r[1][k[1]][:, np.newaxis]) * np.asarray(s)[..., np.newaxis]).reshape(s.shape[0], -1)
m = np.hstack([n_matrix.ent_table] + [n_matrix.att_table[0][k[0]], n_matrix.att_table[1][k[1]], sr0, sr1, rr])
assert_almost_equal(n_matrix * x, m.dot(x))
assert_almost_equal((n_matrix + 2) * x, (m + 2).dot(x))
assert_almost_equal(np.power(n_matrix, 2) * x, np.power(m, 2).dot(x))
# lmm with matrix
x = np.hstack((np.matrix(np.arange(1.0, 36.0)).T, np.matrix(np.arange(36.0, 71.0)).T))
assert_almost_equal(n_matrix * x, m.dot(x))
assert_almost_equal((n_matrix + 2) * x, (m + 2).dot(x))
assert_almost_equal(
|
np.power(n_matrix, 2)
|
numpy.power
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 15 14:27:13 2019
@author: matthieubriet
"""
"""
On importe les differents modules necessaires à la realisation du programme
"""
import PIL
from PIL import Image
from PIL import ImageOps
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import cv2
"""
etape 0 extraction des videos grace au module opencv
"""
def video_to_image(monfichier):
video=cv2.VideoCapture(monfichier)
j=0
for i in range(800):
a , image= video.read()
cv2.imwrite("%d.jpg" %j, image)
j+=1
#video_to_image("Piece 1 - Appareil 2.mp4")
""" etape 1: Traitement des images """
'Paramètres Piece1'
"On ressence l'ensemble des parametres pour avoir une modification plus aisé du programme"
hauteur_piece=197 # hauteur piece independant de 1 ou 2
"appareil1"
Y1min=2700 #rognage de l'image
Y1max=3400 #rognage de l'image
X1min=2300 #rognage de l'image
X1max=3300 #rognage de l'image
h1=625 # valeur du tableau excel
l1=1430 # valeur tableau excel
alpha1=20 # valeur tableau excel
beta1=30 # valeur tableau excel
f1=52 # valeur tableau excel
hi1=4000 #hauteur de l'image en pixel
ci1=3089 #coordonnee en largeur du trou dans la piece
hp1=1290 # hauteur de la piece en pixel
k1 = (hp1/hauteur_piece)*(l1/f1)
"appareil 2 photo "
Y2min=1900
Y2max=3000 #coordonnées utiles au rognage de l'image
X2min=1100
X2max=2800
h2=550
l2=1295
alpha2=18 #parametres propre à l'appareil 2
beta2=18.8
f2=42
hi2=3456 #hauteur de l'image en pixel
ci2=2550 #coordonnee en largeur du trou dans la piece
hp2=1096 #hauteur de la piece en pixel
k2=(hp2/hauteur_piece)*(l2/f2)
"données video appareil 2"
Ymin=420 #coordonnées utile au rognage de l'image
Ymax=920
Xmin=860
Xmax=1200
hiv=1080 #hauteur de l'image issue des videos en pixel
hpv=408 #coordonnee en largeur du trou dans la piece pour les images issus des vidéos
civ=1030 #hauteur de la piece en pixel pour les images issus des vidéos
kv=(hpv/hauteur_piece)*(l2/f2) #k de la video
def calcul_normale(a,b,c,d,e,f,g,h,i):
"""
cette fonction nous permet d'éffectuer un produit vectoriel
"""
return [(b-e)*(f-i)-(c-f)*(e-h),(c-f)*(d-g)-(f-i)*(a-d),(a-d)*(f-h)-(b-e)*(d-g)]
class Piece():
def __init__(self):
self.Mapiece=[]
def ajouter_ligne_a_ma_piece(self,ligne):
if isinstance(ligne,Ligne):
self.Mapiece.append(ligne.Repere)
def STL(self):
"""
cette methode permet d'afficher la representation de la piece sur STL
"""
longueur=0
triangle_tot=[]
n=len(self.Mapiece)
for i in range(n-1): # nbre de ligne
longueur=min(len(self.Mapiece[i][0]),len(self.Mapiece[i+1][0]))
for j in range(longueur-1): # pas avoir de +1 en fin de ligne
triangle1=[[self.Mapiece[i][0][j],self.Mapiece[i][1][j],self.Mapiece[i][2][j]],[self.Mapiece[i][0][j+1],self.Mapiece[i][1][j+1],self.Mapiece[i][2][j+1]],[self.Mapiece[i+1][0][j],self.Mapiece[i+1][1][j],self.Mapiece[i+1][2][j]]]
triangle2=[[self.Mapiece[i+1][0][j],self.Mapiece[i+1][1][j],self.Mapiece[i+1][2][j]],[self.Mapiece[i+1][0][j+1],self.Mapiece[i+1][1][j+1],self.Mapiece[i+1][2][j+1]],[self.Mapiece[i][0][j+1],self.Mapiece[i][1][j+1],self.Mapiece[i][2][j+1]]]
triangle_tot.append(triangle1)
triangle_tot.append(triangle2)
longueur2=min(len(self.Mapiece[len(self.Mapiece)-1][0]),len(self.Mapiece[0][0])) # raccolement premiere et derniere ligne
for j in range(longueur2-1): # pas avoir de +1 en fin de ligne
triangle1=[[self.Mapiece[n-1][0][j],self.Mapiece[n-1][1][j],self.Mapiece[n-1][2][j]],[self.Mapiece[n-1][0][j+1],self.Mapiece[n-1][1][j+1],self.Mapiece[n-1][2][j+1]],[self.Mapiece[0][0][j],self.Mapiece[0][1][j],self.Mapiece[0][2][j]]]
triangle2=[[self.Mapiece[0][0][j],self.Mapiece[0][1][j],self.Mapiece[0][2][j]],[self.Mapiece[0][0][j+1],self.Mapiece[0][1][j+1],self.Mapiece[0][2][j+1]],[self.Mapiece[n-1][0][j+1],self.Mapiece[n-1][1][j+1],self.Mapiece[n-1][2][j+1]]]
triangle_tot.append(triangle1)
triangle_tot.append(triangle2)
fichier=open("STL.stl",'w')
fichier.write("solid Piece1\n")
for i in triangle_tot:
a=i[0][0][0]
b=i[0][1][0]
c=i[0][2][0]
d=i[1][0][0]
e=i[1][1][0]
f=i[1][2][0]
g=i[2][0][0]
h=i[2][1][0]
i=i[2][2][0]
normale=calcul_normale(a,b,c,d,e,f,g,h,i) #on effcetue le produit vectoriel pour determiner la normale
fichier.write("facet normal "+str(normale[0])+" "+str(normale[1])+" "+str(normale[2])+"\n")
fichier.write("\touter loop\n")
fichier.write("\t\tvertex "+str(a)+" "+str(b)+" "+str(c)+"\n")
fichier.write("\t\tvertex "+str(d)+" "+str(e)+" "+str(f)+"\n")
fichier.write("\t\tvertex "+str(g)+" "+str(h)+" "+str(i)+"\n")
fichier.write("\tendloop\n")
fichier.write("endfacet\n")
fichier.write("endsolid Piece1\n")
fichier.close()
Xtot=[]
Ytot=[]
Ztot=[]
class Ligne(Piece):
def __init__(self,tonimage):
self.Image=str(tonimage)
print(self.Image)
a,b=str(tonimage).split(".")
if type_traitement=="video":
val=str(a)#.split("/")[2]
elif type_traitement=="image":
val=str(a).split("/")[2]
if type_traitement=="video":
self.Angle=float(val)*np.pi/180*360/799
elif type_traitement=="image":
self.Angle=float(val)*np.pi/180
self.Matrice=[]
self.ligne=[]
self.Camera=[]
self.Monde=[]
self.Repere=[]
def ImporterImage(self):
image=PIL.Image.open(self.Image,mode='r')
self.Matrice=np.array(image)
def changement_repere_camera(self,Xmin,Xmax,Ymin,Ymax,hi,ci,k):
"""
avec cette fonction on passe dans le repere camera en mm depuis le repere image en pixel
"""
Xcamera=[]
Ycamera=[]
if type_traitement=="video": #Ces filtres nous permettent d'enlever les points trop loin
filtre=0.5
elif type_traitement=="image":
filtre=0.2
for i in range(Ymin,Ymax):
L=[]
for j in range(Xmin,Xmax):
if int(self.Matrice[i,j][0])>int(254): #on effectue un premier filtrage: on s'interesse que au pixels rouge
L.append(j)
if len(L)>0:
n=len(L)
y=sum(L)
position=int(y/n) #on prend la moyenne des positions des pixels rouges lorsqu'il y en a trop
if len(Xcamera)==0:
Ycamera.append((-i+hi/2)/k) #On change l'origine du repère puis on passe en mm
Xcamera.append((position-ci)/k)
elif abs((position-ci)/k-Xcamera[-1])<filtre : #On effectue un second filtre pour supprimer les points parasites
Ycamera.append((-i+hi/2)/k)
Xcamera.append((position-ci)/k)
self.Camera.append(Xcamera)
self.Camera.append(Ycamera)
#plt.plot(Xcamera,Ycamera,".")
#plt.show()
def changement_repere_monde(self,f,beta,alpha,h,l): #alpha = angle d'inclinaison de la camera; beta=angle entre le laser et la camera (alpha= 20,b=30 f=52) a mettre en radian
"""
Cette fonction permet de projeter les points du repere camera dans le plan laser
pour determiner les coordonnes dans le plan laser, nous resolvons un systeme XP=Y avec P la matrice de rotation et Y le vecteur directeur du plan laser
"""
Xm=[]
Ym=[]
Zm=[]
alpharad=alpha*np.pi/180 #On passe les angles en radian pour les utiliser dans les cos et sin
betarad=beta*np.pi/180
n=len(self.Camera[0])
z=[-f]*n
self.Camera.append(z)
P=np.array([[-np.cos(betarad),-np.sin(betarad),0],[np.sin(betarad)*np.sin(alpharad),-np.cos(betarad)*np.sin(alpharad),np.cos(alpharad)],[-np.cos(alpharad)*np.sin(betarad),np.cos(alpharad)*np.cos(betarad),np.sin(alpharad)]])
P2=np.linalg.inv(P) #P2 est la matrice inversé de P (On a besoin de P2 pour resoudre le systeme)*
vecteur=[]
for i in range(n):
vecteur=
|
np.array([[self.Camera[0][i]],[self.Camera[1][i]],[-f]])
|
numpy.array
|
from pdb import set_trace as T
import pygame
from pygame.surface import Surface
import numpy as np
import time
from enum import Enum
class Neon(Enum):
def rgb(h):
h = h.lstrip('#')
return tuple(int(h[i:i+2], 16) for i in (0, 2, 4))
RED = rgb('#ff0000')
ORANGE = rgb('#ff8000')
YELLOW = rgb('#ffff00')
GREEN = rgb('#00ff00')
MINT = rgb('#00ff80')
CYAN = rgb('#00ffff')
BLUE = rgb('#0000ff')
PURPLE = rgb('#8000ff')
MAGENTA = rgb('#ff00ff')
WHITE = rgb('#ffffff')
GRAY = rgb('#666666')
BLACK = rgb('#000000')
BLOOD = rgb('#bb0000')
BROWN = rgb('#7a3402')
GOLD = rgb('#eec600') #238 198
SILVER = rgb('#b8b8b8')
FUCHSIA = rgb('#ff0080')
SPRING = rgb('#80ff80')
SKY = rgb('#0080ff')
TERM = rgb('#41ff00')
def rand12():
ind = np.random.randint(0, 12)
return (
Neon.RED, Neon.ORANGE, Neon.YELLOW,
Neon.GREEN, Neon.MINT, Neon.CYAN,
Neon.BLUE, Neon.PURPLE, Neon.MAGENTA,
Neon.FUCHSIA, Neon.SPRING, Neon.SKY)[ind].value
class Container:
def __init__(self, size, border=0, reset=False, key=None):
self.W, self.H = size
self.canvas = Surface((self.W, self.H))
if key is not None:
self.canvas.set_colorkey(key)
self.border = border
self.left, self.right = self.border, self.W-self.border
self.top, self.bottom = self.border, self.H-self.border
def renderBorder(self):
for coords in [
(0, 0, self.W, self.top),
(0, 0, self.left, self.H),
(0, self.bottom, self.W, self.H),
(self.right, 0, self.W, self.H)
]:
pygame.draw.rect(self.canvas, Color.RED, coords)
def reset(self):
self.canvas.fill((Color.BLACK))
if self.border > 0:
self.renderBorder()
def fill(self, color):
self.canvas.fill((color))
def blit(self, container, pos, area=None, flags=0):
w, h = pos
pos = (w+self.border, h+self.border)
if type(container) == Surface:
self.canvas.blit(container, pos, area=area, special_flags=flags)
else:
self.canvas.blit(container.canvas, pos, area=area, special_flags=flags)
if self.border > 0:
self.renderBorder()
def rect(self, color, coords, lw=0):
pygame.draw.rect(self.canvas, color, coords, lw)
def line(self, color, start, end, lw=1):
pygame.draw.line(self.canvas, color, start, end, lw)
def fromTiled(tiles, tileSz):
R, C, three = tiles.shape
ret = np.zeros((R, C), dtype=object)
for r in range(R):
for c in range(C):
ret[r, c] = Surface((tileSz, tileSz))
ret[r, c].fill(tiles[r, c, :])
return ret
def makeMap(tiles, tileSz):
R, C = tiles.shape
surf = Surface((int(R*tileSz), int(C*tileSz)))
for r in range(R):
for c in range(C):
surf.blit(tiles[r, c], (int(r*tileSz), int(c*tileSz)))
return surf
def surfToIso(surf):
ret = pygame.transform.rotate(surf, 45)
W, H = ret.get_width(), ret.get_height()
ret = pygame.transform.scale(ret, (W, H//2))
return ret
def rotate(x, y, theta):
pt = np.array([[x], [y]])
mat = np.array([
[np.cos(theta), -np.sin(theta)],
[
|
np.sin(theta)
|
numpy.sin
|
"""
Copyright 2020 by <NAME>, <NAME> and <NAME>.
Instituto de Matemáticas (UNAM-CU) México
This is free software; you can redistribute it and/or
modify it under the terms of the MIT License,
https://en.wikipedia.org/wiki/MIT_License.
This software has NO WARRANTY, not even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
"""
from __future__ import unicode_literals
import numpy as np
import sympy as sym
import permutation as perm
import torch
import tensorflow as tf
from scipy import linalg as splinalg
from scipy.sparse import csr_matrix, triu
from poisson.poisson import PoissonGeometry
from numpoisson.errors import (MultivectorError, FunctionError,
DiferentialFormError, CasimirError,
DimensionError)
from numpoisson.utils import (dict_mesh_eval, list_mesh_eval,
num_matrix_of, num_vector_of,
zeros_array, validate_dimension)
class NumPoissonGeometry:
""" This class provides some useful tools for Poisson-Nijenhuis calculus on Poisson manifolds."""
def __init__(self, dimension, variable='x'):
# Obtains the dimension
self.dim = validate_dimension(dimension)
# Define what variables the class will work with
self.variable = variable
# Create the symbolics symbols
self.coords = sym.symbols(f'{self.variable}1:{self.dim + 1}')
# Intances Poisson Geometry package
self.pg = PoissonGeometry(self.dim, self.variable)
def num_bivector(self, bivector, mesh, torch_output=False, tf_output=False, dict_output=False):
""" Evaluates a bivector field into at each point of the mesh.
Parameters
==========
:bivector:
Is a Poisson bivector in a dictionary format with tuple type 'keys' and string type 'values'.
:mesh:
Is a numpy array where each value is a list of float values that representa a point in R^{dim}.
:pt_out/tf_out:
Is a boolean flag to indicates if the result is given in a tensor from PyTorch/TensorFlow, its
default value is False.
Returns
=======
The default result is a NumPy array that contains all the evaluations of a bivector. This value
can be converted to Tensor PyTorch or TensorFlow by setting their respective flag as True in
the params.
Example
========
>>> import numpy as np
>>> from poisson import NumPoissonGeometry
>>> # Instance the class to dimension 3
>>> npg3 = NumPoissonGeometry(3)
>>> # For bivector x3*Dx1^Dx2 - x2*Dx1^Dx3 + x1*Dx2^Dx3
>>> bivector = {(1,2): 'x3', (1,3): '-x2', (2,3): 'x1'}
>>> # Creates a simple mesh
>>> mesh = np.array([[0., 0., 1.]])
>>> # Evaluates the mesh into bivector
>>> npg3.num_bivector(bivector, mesh)
>>> [[[ 0. 1. -0.]
[-1. 0. 0.]
[ 0. -0. 0.]]
>>> npg3.num_bivector(bivector, mesh, tf_output=True)
>>> tf.Tensor(
[[[ 0. 1. -0.]
[-1. 0. 0.]
[ 0. -0. 0.]]], shape=(1, 3, 3), dtype=float64)
>>> npg3.num_bivector(bivector, mesh, torch_output=True)
tensor([[[ 0., 1., -0.],
[-1., 0., 0.],
[ 0., -0., 0.]]], dtype=torch.float64)
>>> npg3.num_bivector(bivector, mesh, dict_output=True)
>>> [{(1, 2): 1.0, (1, 3): -0.0, (2, 3): 0.0}]
"""
len_keys = []
for e in bivector:
if len(set(e)) < len(e):
raise MultivectorError(F"repeated indexes {e} in {bivector}")
if len(tuple(filter(lambda x: (x <= 0), e))) > 0:
raise MultivectorError(F"invalid key {e} in {bivector}")
len_keys.append(len(e))
if len(set(len_keys)) > 1:
raise MultivectorError('keys with different lengths')
# Evaluates all point from the mesh in the bivector and save in a np array
dict_list = dict_mesh_eval(bivector, mesh, self.coords)
raw_result = [num_matrix_of(e, self.dim) for e in dict_list]
np_result = np.array(raw_result)
# return the result in a PyTorch tensor if the flag is True
if torch_output:
return torch.from_numpy(np_result)
# return the result in a TensorFlow tensor if the flag is True
if tf_output:
return tf.convert_to_tensor(np_result)
# return the result in dictionary type
if dict_output:
return dict_list
# return the result in Numpy array
return np_result
def num_bivector_to_matrix(self, bivector, mesh, torch_output=False, tf_output=False):
""" Evaluates a matrix of a 2-contravariant tensor field or bivector field into a mesh.
Parameters
==========
:bivector:
Is a Poisson bivector in a dictionary format with tuple type 'keys' and string type 'values'.
:mesh:
Is a numpy array where each value is a list of float values that representa a point in R^{dim}.
:pt_out/tf_out:
Is a boolean flag to indicates if the result is given in a tensor from PyTorch/TensorFlow, its
default value is False.
Returns
=======
The default result is a NumPy array that contains all the evaluations of a bivector. This value
can be converted to Tensor PyTorch or TensorFlow by setting their respective flag as True in
the params.
Example
========
>>> import numpy as np
>>> from poisson import NumPoissonGeometry
>>> # Instance the class to dimension 3
>>> npg3 = NumPoissonGeometry(3)
>>> # For bivector x3*Dx1^Dx2 - x2*Dx1^Dx3 + x1*Dx2^Dx3
>>> bivector = {(1,2): 'x3', (1,3): '-x2', (2,3): 'x1'}
>>> # Creates a mesh
>>> mesh = np.array([[0., 0., 1.]])
>>> # Evaluates the mesh into bivector
>>> npg3.num_bivector_to_matrix(bivector, mesh)
>>> [[[ 0. 1. 0.]
[-1. 0. 0.]
[-0. -0. 0.]]]
>>> npg3.num_bivector_to_matrix(bivector, mesh, tf_output=True)
>>> tf.Tensor(
[[[ 0. 1. 0.]
[-1. 0. 0.]
[-0. -0. 0.]]], shape=(1, 3, 3), dtype=float64)
>>> npg3.num_bivector_to_matrix(bivector, mesh, torch_output=True)
>>> tensor([[[ 0., 1., 0.],
[-1., 0., 0.],
[-0., -0., 0.]]], dtype=torch.float64)
"""
len_keys = []
for e in bivector:
if len(set(e)) < len(e):
raise MultivectorError(F"repeated indexes {e} in {bivector}")
if len(tuple(filter(lambda x: (x <= 0), e))) > 0:
raise MultivectorError(F"invalid key {e} in {bivector}")
len_keys.append(len(e))
if len(set(len_keys)) > 1:
raise MultivectorError('keys with different lengths')
dict_list = dict_mesh_eval(bivector, mesh, self.coords)
# Converts to bivector from dict format into matrix format
raw_result = [num_matrix_of(e, self.dim) for e in dict_list]
# Evaluates all point from the mesh in the bivector and save in a np array
np_result = np.array(raw_result)
# return the result in a PyTorch tensor if the flag is True
if torch_output:
return torch.from_numpy(np_result)
# return the result in a TensorFlow tensor if the flag is True
if tf_output:
return tf.convert_to_tensor(np_result)
# TODO add dict_output flag
# return the result in Numpy array
return np_result
def num_sharp_morphism(self, bivector, one_form, mesh, torch_output=False, tf_output=False, dict_output=False):
"""
Evaluates the image of a differential 1-form under the vector bundle morphism 'sharp' P #: T * M -> TM
defined by P # (alpha): = i_ (alpha) P in the points from a mesh given, where P is a Poisson bivector
field on a manifold M, alpha a 1-form on M and i the interior product of alpha and P.
Parameters
==========
:bivector:
Is a Poisson bivector in a dictionary format with tuple type 'keys' and string type 'values'.
:one_form:
Is a 1-form differential in a dictionary format with tuple type 'keys' and string type 'values'.
:mesh:
Is a numpy array where each value is a list of float values that representa a point in R^{dim}.
:pt_out/tf_out:
Is a boolean flag to indicates if the result is given in a tensor from PyTorch/TensorFlow, its
default value is False.
Returns
=======
The default result is a NumPy array that contains all the evaluations of sharp morphism. This value
can be converted to Tensor PyTorch or TensorFlow by setting their respective flag as True in
the params.
Example
========
>>> import numpy as np
>>> from poisson import NumPoissonGeometry
>>> # Instance the class to dimension 3
>>> npg3 = NumPoissonGeometry(3)
>>> # For bivector x3*Dx1^Dx2 - x2*Dx1^Dx3 + x1*Dx2^Dx3
>>> bivector = {(1, 2): 'x3', (1, 3): '-x2', (2, 3): 'x1'}
>>> # For one form x1*dx1 + x2*dx2 + x3*dx3.
>>> one_form = {(1,): 'x1', (2,): 'x2', (3,): 'x3'}
>>> # Creates a mesh
>>> mesh = np.array([[0., 0., 1.]])
>>> # Evaluates the mesh into bivector
>>> npg3.num_sharp_morphism(bivector, one_form, mesh)
>>> [[[-0.]
[-0.]
[-0.]]]
>>> npg3.num_sharp_morphism(bivector, one_form, mesh, torch_output=True)
>>> tensor([[[-0.],
[-0.],
[-0.]]], dtype=torch.float64)
>>> npg3.num_sharp_morphism(bivector, one_form, mesh, tf_output=True)
>>> tf.Tensor([[[-0.]
[-0.]
[-0.]]], shape=(1, 3, 1), dtype=float64)
>>> npg3.num_sharp_morphism(bivector, one_form, mesh, dict_output=True)
>>> array([{}], dtype=object)
"""
for e in one_form:
if len(e) > 1:
raise DiferentialFormError(F"invalid key {e} in {one_form}")
if e[0] <= 0:
raise DiferentialFormError(F"invalid key {e} in {one_form}")
# Converts to one-form from dict format into vector-column format (matrix of dim x 1)
dict_list = dict_mesh_eval(one_form, mesh, self.coords)
one_form_num_vec = [num_vector_of(e, self.dim) for e in dict_list]
# Converts to bivector from dict format into matrix format
bivector_num_mat = self.num_bivector_to_matrix(bivector, mesh)
raw_result = map(lambda e1, e2: (-1) * np.dot(e1, e2), bivector_num_mat, one_form_num_vec)
# Making the product of bivector matrix with vector-column and saving the result in a Numpy array
np_result = np.array(tuple(raw_result))
# return the result in a PyTorch tensor if the flag is True
if torch_output:
return torch.from_numpy(np_result)
# return the result in a TensorFlow tensor if the flag is True
if tf_output:
return tf.convert_to_tensor(np_result)
# return the result in dictionary type
if dict_output:
dict_list = []
for e in range(len(mesh)):
dictionary = dict(enumerate(np_result[e].flatten(), 1))
remove_zeros = {(e,): dictionary[e] for e in dictionary if dictionary[e] != 0}
if not bool(remove_zeros):
remove_zeros = {}
dict_list.append(remove_zeros)
return np.array(dict_list)
# return the result in Numpy array
return np_result
def num_hamiltonian_vf(self, bivector, function, mesh, torch_output=False, tf_output=False, dict_output=False):
"""
Evaluates the Hamiltonian vector field of a function relative to a Poisson bivector field in
the points from a mesh given. The Hamiltonian vector field is calculated as follows: X_h = P#(dh),
where d is the exterior derivative of h and P#: T*M -> TM is the vector bundle morphism defined
by P#(alpha) := i_(alpha)P, with i is the interior product of alpha and P.
Parameters
==========
:bivector:
Is a Poisson bivector in a dictionary format with tuple type 'keys' and string type 'values'.
:ham_function:
Is a function scalar h: M --> R that is a string type.
:mesh:
Is a numpy array where each value is a list of float values that representa a point in R^{dim}.
:torch_output/tf_output:
Is a boolean flag to indicates if the result is given in a tensor from PyTorch/TensorFlow, its
default value is False.
Returns
=======
The default result is a NumPy array that contains all the evaluations of a Hamiltonian vector field.
This value can be converted to Tensor PyTorch or TensorFlow by setting their respective flag as True
in the params
Example
========
>>> import numpy as np
>>> from poisson import NumPoissonGeometry
>>> # Instance the class to dimension 3
>>> npg3 = NumPoissonGeometry(3)
>>> # For bivector x3*Dx1^Dx2 - x2*Dx1^Dx3 + x1*Dx2^Dx3
>>> bivector = {(1, 2): 'x3', (1, 3): '-x2', (2, 3): 'x1'}
>>> # For hamiltonian_function h(x1,x2,x3) = x1 + x2 + x3.
>>> ham_function = '1/(x1 - x2) + 1/(x1 - x3) + 1/(x2 - x3)'
>>> # Creates a mesh
>>> mesh = np.array([[1., 2., 3.]])
>>> # Evaluates the mesh into bivector
>>> npg3.num_hamiltonian_vf(bivector, ham_function, mesh)
>>> [[[ 2.5]
[-5. ]
[ 2.5]]]
>>> npg3.num_hamiltonian_vf(bivector, ham_function, mesh, torch_output=True)
>>> tensor([[[ 2.5000],
[-5.0000],
[ 2.5000]]], dtype=torch.float64)
>>> npg3.num_hamiltonian_vf(bivector, ham_function, mesh, tf_output=True)
>>> tf.Tensor(
[[[ 2.5]
[-5. ]
[ 2.5]]], shape=(1, 3, 1), dtype=float64)
>>> npg3.num_hamiltonian_vf(bivector, ham_function, mesh, dict_output=True)
>>> array([{(1,): 2.5, (2,): -5.0, (3,): 2.5}], dtype=object)
"""
# Converts the hamiltonian_function from type string to symbolic expression
ff = sym.sympify(function)
# Calculates the differential matrix of Hamiltonian function
d_ff = sym.Matrix(sym.derive_by_array(ff, self.coords))
d_ff = {(i + 1,): d_ff[i] for i in range(self.dim) if sym.simplify(d_ff[i]) != 0}
return self.num_sharp_morphism(
bivector, d_ff, mesh,
tf_output=tf_output, torch_output=torch_output, dict_output=dict_output
)
def num_poisson_bracket(self, bivector, function_1, function_2,
mesh, torch_output=False, tf_output=False):
"""
Calculates the evaluation of Poisson bracket {f,g} = π(df,dg) = ⟨dg,π#(df)⟩ of two functions f and g in
a Poisson manifold (M,P) in all point from given a mesh. Where d is the exterior derivatives and
P#: T*M -> TM is the vector bundle morphism defined by P#(alpha) := i_(alpha)P, with i the interior
product of alpha and P.
Parameters
==========
:bivector:
Is a Poisson bivector in a dictionary format with tuple type 'keys' and string type 'values'.
:function_1/function_2:
Is a function scalar f: M --> R that is a string type.
:mesh:
Is a numpy array where each value is a list of float values that representa a point in R^{dim}.
:torch_output/tf_output:
Is a boolean flag to indicates if the result is given in a tensor from PyTorch/TensorFlow, its
default value is False.
Returns
=======
The default result is a NumPy array that contains all the evaluations of Poisson bracket.
This value can be converted to Tensor PyTorch or TensorFlow by setting their respective flag as True
in the params.
Example
========
>>> import numpy as np
>>> from poisson import NumPoissonGeometry
>>> # Instance the class to dimension 3
>>> npg3 = NumPoissonGeometry(3)
>>> # For bivector x3*Dx1^Dx2 - x2*Dx1^Dx3 + x1*Dx2^Dx3
>>> bivector = {(1, 2): 'x3', (1, 3): '-x2', (2, 3): 'x1'}
>>> # For f(x1,x2,x3) = x1 + x2 + x3.
>>> function_1 = 'x1 + x2 + x3'
>>> # For g(x1,x2,x3) = '2*x1 + 3*x2 + 4*x3'.
>>> function_2 = '2*x1 + 3*x2 + 4*x3'
>>> # Creates a mesh
>>> mesh = np.array([[5., 10., 0.]])
>>> # Evaluates the mesh into {f,g}
>>> npg3.num_poisson_bracket(bivector, function_1, function_2, mesh)
>>> [-15.]
>>> npg3.num_poisson_bracket(bivector, function_1, function_2, mesh, torch_output=True)
>>> tensor([-15.], dtype=torch.float64)
>>> npg3.num_poisson_bracket(bivector, function_1, function_2, mesh, tf_output=True)
>>> tf.Tensor([-15.], shape=(1,), dtype=float64)
"""
# Convert from string to sympy value the function_2
gg = sym.sympify(function_2)
# Calculates the differential matrix of function_2
d_gg = sym.derive_by_array(gg, self.coords)
# Evaluates the differential matrix of function_2 in each point from a mesh and converts to Numpy array
dgg_num_vec = list_mesh_eval(d_gg, mesh, self.coords)
# Evaluates the Hamiltonian vector field with function_1 in each point from a mesh and converts to Numpy
ham_ff_num_vec = self.num_hamiltonian_vf(bivector, function_1, mesh)
raw_result = map(lambda e1, e2: np.dot(e1, e2)[0], dgg_num_vec, ham_ff_num_vec)
np_result = np.array(tuple(raw_result))
# return the result in a PyTorch tensor if the flag is True
if torch_output:
return torch.from_numpy(np_result)
# return the result in a TensorFlow tensor if the flag is True
if tf_output:
return tf.convert_to_tensor(np_result)
# TODO add dict_output flag
# return the result in Numpy array
return np_result
def num_curl_operator(self, multivector, function, mesh, torch_output=False, tf_output=False, dict_output=False):
"""
Evaluates the divergence of multivector field in all points given of a mesh.
Parameters
==========
:multivector:
Is a multivector filed in a dictionary format with integer type 'keys' and string type 'values'.
:function:
Is a nowhere vanishing function in a string type. If the function is constant you can input the
number type.
:mesh:
Is a numpy array where each value is a list of float values that representa a point in R^{dim}.
:dict_output:
Is a boolean flag to indicates if the result is given in a bivector in dictionary format, its
default value is False.
:torch_output/tf_output:
Is a boolean flag to indicates if the result is given in a tensor from PyTorch/TensorFlow, its
default value is False.
Returns
=======
The default result is a NumPy array that contains all the evaluations of the divergence of multivertor
field in matricial format. This value can be converted to Tensor PyTorch, TensorFlow or dictionary form
by setting their respective flag as True in the params.
Example
========
>>> import numpy as np
>>> from poisson import NumPoissonGeometry
>>> # Instance the class to dimension 4
>>> npg4 = NumPoissonGeometry(4)
>>> # For bivector 2*x4*Dx1^Dx3 + 2*x3*Dx1^Dx4 - 2*x4*Dx2^Dx3 + 2*x3*Dx2^Dx4 + (x1-x2)*Dx3^Dx4
>>> bivector = {(1, 3): '2*x4', (1, 4): '2*x3', (2, 3): '-2*x4', (2, 4): '2*x3', (3, 4): 'x1 - x2')}
>>> mesh = np.array([0., 0., 0. ])
>>> # Evaluates the mesh into {f,g}
>>> npg4.num_curl_operator(bivector, function, mesh)
>>> [[[ 0.]
[ 0.]
[ 0.]
[ 0.]]]]
>>> npg4.num_curl_operator(bivector, function, mesh, torch_output=True)
>>> tensor([[[ 0.],
[ 0.],
[ 0.],
[ 0.]]], dtype=torch.float64)
>>> npg4.num_curl_operator(bivector, mesh, function, tf_output=True)
>>> tf.Tensor(
[[[ 0.],
[ 0.],
[ 0.],
[ 0.]]], shape=(1, 3, 1), dtype=float64)
>>> npg3.num_curl_operator(bivector, 1, mesh, dict_output=True)
>>> array([{}], dtype=object)
"""
if sym.simplify(sym.sympify(function)) == 0:
raise FunctionError(F'Fuction {function} == 0')
if not bool(multivector):
np_result = np.array([])
if torch_output:
return torch.from_numpy(np_result)
if tf_output:
return tf.convert_to_tensor(np_result)
if dict_output:
return np.array({})
return np_result
if isinstance(multivector, str):
np_result = np.array([])
if torch_output:
return torch.from_numpy(np_result)
if tf_output:
return tf.convert_to_tensor(np_result)
if dict_output:
return np.array({})
return np_result
len_keys = []
for e in multivector:
if len(set(e)) < len(e):
raise MultivectorError(F"repeated indexes {e} in {multivector}")
if len(tuple(filter(lambda x: (x <= 0), e))) > 0:
raise MultivectorError(F"invalid key {e} in {multivector}")
len_keys.append(len(e))
if len(set(len_keys)) > 1:
raise MultivectorError('keys with different lengths')
curl_operator = self.pg.curl_operator(multivector, function)
if not bool(curl_operator):
np_result = np.array([])
if torch_output:
return torch.from_numpy(np_result)
if tf_output:
return tf.convert_to_tensor(np_result)
if dict_output:
return np.array({})
return np_result
curl_operator = {tuple(map(lambda x: x-1, list(e))): curl_operator[e] for e in curl_operator}
deg_curl = len(next(iter(curl_operator)))
permut_curl = []
for e in curl_operator:
permut_curl.append({x.permute(e): (x.sign) * curl_operator[e] for x in perm.Permutation.group(deg_curl)})
for e in permut_curl:
curl_operator.update(e)
dict_eval = dict_mesh_eval(curl_operator, mesh, self.pg.coords)
np_result = []
zero_tensor = zeros_array(deg_curl, self.pg.dim)
for dictt in dict_eval:
copy_zero_tensor = np.copy(zero_tensor)
for e2 in dictt:
copy_zero_tensor[e2] = dictt[e2]
np_result.append(copy_zero_tensor)
np_result = np.array(np_result)
# return the result in a PyTorch tensor if the flag is True
if torch_output:
return torch.from_numpy(np_result)
# return the result in a TensorFlow tensor if the flag is True
if tf_output:
return tf.convert_to_tensor(np_result)
# return the result in dictionary type
if dict_output:
dict_eval = [{tuple(map(lambda x: x+1, list(e))): dictt[e] for e in dictt} for dictt in dict_eval]
return np.array(dict_eval)
# return the result in Numpy array
return np_result
def num_coboundary_operator(self, bivector, multivector, mesh,
torch_output=False, tf_output=False, dict_output=False):
"""
Evalueates the Schouten-Nijenhuis bracket between a given (Poisson) bivector field and a (arbitrary)
multivector field in all points given of a mesh.
The Lichnerowicz-Poisson operator is defined as
[P,A](df1,...,df(a+1)) = sum_(i=1)^(a+1) (-1)**(i)*{fi,A(df1,...,î,...,df(a+1))}_P
+ sum(1<=i<j<=a+1) (-1)**(i+j)*A(d{fi,fj}_P,..î..^j..,df(a+1))
where P = Pij*Dxi^Dxj (i < j), A = A^J*Dxj_1^Dxj_2^...^Dxj_a.
Parameters
==========
:bivector:
Is a Poisson bivector in a dictionary format with tuple type 'keys' and string type 'values'.
:multivector:
Is a multivector filed in a dictionary format with integer type 'keys' and string type 'values'.
:mesh:
Is a numpy array where each value is a list of float values that representa a point in R^{dim}.
:dict_output:
Is a boolean flag to indicates if the result is given in a bivector in dictionary format, its
default value is False.
:torch_output/tf_output:
Is a boolean flag to indicates if the result is given in a tensor from PyTorch/TensorFlow, its
default value is False.
Returns
=======
The default result is a NumPy array that contains all the evaluations of the Schouten-Nijenhuis bracket.
This value can be converted to Tensor PyTorch, TensorFlow or dictionary for by setting their respective
flag as True in the params.
Example
========
>>> import numpy as np
>>> from poisson import NumPoissonGeometry
>>> # Instance the class to dimension 3
>>> npg3 = NumPoissonGeometry(3)
>>> # For bivector x3*Dx1^Dx2 - x2*Dx1^Dx3 + x1*Dx2^Dx3
>>> bivector = {(1, 2): 'x3', (1, 3): '-x2', (2, 3): 'x1'}
>>> # Defines a one form W
>>> W = {(1,): 'x1 * exp(-1/(x1**2 + x2**2 - x3**2)**2) / (x1**2 + x2**2)',
(2,): 'x2 * exp(-1/(x1**2 + x2**2 - x3**2)**2) / (x1**2 + x2**2)',
(3,): 'exp(-1 / (x1**2 + x2**2 - x3**2)**2)'}
>>> mesh = np.array([[0., 0., 1.]])
>>> # Evaluates the mesh into {f,g}
>>> npg3.num_coboundary_operator(bivector, W, mesh)
>>> [[[ 0. , 0.36787945, 0. ],
[-0.36787945, 0. , 0. ],
[ 0. , 0. , 0. ]]]
>>> npg3.num_coboundary_operator(bivector, W, mesh, dict_output=True)
>>> [{(1, 2): 0.36787944117144233}]
>>> npg3.num_coboundary_operator(bivector, W, mesh, torch_output=True)
>>> tensor([[[ 0.0000, 0.3679, 0.0000],
[-0.3679, 0.0000, 0.0000],
[ 0.0000, 0.0000, 0.0000]]], dtype=torch.float64)
>>> npg3.num_coboundary_operator(bivector, W, mesh, tf_output=True)
>>> tf.Tensor: shape=(1, 3, 3), dtype=float64, numpy=
array([[[ 0. , 0.36787945, 0. ],
[-0.36787945, 0. , 0. ],
[ 0. , 0. , 0. ]]]
"""
if not bool(bivector) or not bool(multivector):
np_result = np.array([])
if torch_output:
return torch.from_numpy(np_result)
if tf_output:
return tf.convert_to_tensor(np_result)
if dict_output:
return np.array({})
return np_result
if isinstance(multivector, str):
# [P,f] = -X_f, for any function f.
return self.num_hamiltonian_vf(
bivector, f'(-1) * ({multivector})', mesh,
torch_output=torch_output, tf_output=tf_output, dict_output=dict_output
)
len_keys = []
for e in bivector:
if len(set(e)) < len(e):
raise MultivectorError(F'repeated indexes {e} in {multivector}')
if len(tuple(filter(lambda x: (x <= 0), e))) > 0:
raise MultivectorError(F'invalid key {e} in {multivector}')
len_keys.append(len(e))
if len(set(len_keys)) > 1:
raise MultivectorError('keys with different lengths')
len_keys = []
for e in multivector:
if len(set(e)) < len(e):
raise MultivectorError(F'repeated indexes {e} in {multivector}')
if len(tuple(filter(lambda x: (x <= 0), e))) > 0:
raise MultivectorError(F'invalid key {e} in {multivector}')
len_keys.append(len(e))
if len(set(len_keys)) > 1:
raise MultivectorError('keys with different lengths')
# Degree of multivector
deg_mltv = len(next(iter(multivector)))
if deg_mltv + 1 > self.pg.dim:
np_result = np.array([])
if torch_output:
return torch.from_numpy(np_result)
if tf_output:
return tf.convert_to_tensor(np_result)
if dict_output:
return np.array({})
return np_result
image_mltv = self.pg.coboundary_operator(bivector, multivector)
if not bool(image_mltv):
np_result = np.array([])
if torch_output:
return torch.from_numpy(np_result)
if tf_output:
return tf.convert_to_tensor(np_result)
if dict_output:
return np.array({})
return np_result
image_mltv = {tuple(map(lambda x: x-1, list(e))): image_mltv[e] for e in image_mltv}
permut_image = []
for e in image_mltv:
permut_image.append({x.permute(e): (x.sign) * image_mltv[e] for x in perm.Permutation.group(deg_mltv + 1)})
for e in permut_image:
image_mltv.update(e)
dict_eval = dict_mesh_eval(image_mltv, mesh, self.pg.coords)
np_result = []
zero_tensor = zeros_array(deg_mltv + 1, self.pg.dim)
for dictt in dict_eval:
copy_zero_tensor = np.copy(zero_tensor)
for e2 in dictt:
copy_zero_tensor[e2] = dictt[e2]
np_result.append(copy_zero_tensor)
np_result = np.array(np_result)
# return the result in a TensorFlow tensor if the flag is True
if torch_output:
return torch.from_numpy(np_result)
# return the result in a PyTorch tensor if the flag is True
if tf_output:
return tf.convert_to_tensor(np_result)
# return the result in dictionary type
if dict_output:
dict_eval = [{tuple(map(lambda x: x+1, list(e))): dictt[e] for e in dictt} for dictt in dict_eval]
return np.array(dict_eval)
# return the result in Numpy array
return np_result
def num_one_forms_bracket(self, bivector, one_form_1, one_form_2,
mesh, torch_output=False, tf_output=False, dict_output=False):
"""
Evaluates the Lie bracket of two differential 1-forms induced by a given Poisson bivector field in all
points given of a mesh.
The calculus is as {alpha,beta}_P := i_P#(alpha)(d_beta) - i_P#(beta)(d_alpha) + d_P(alpha,beta)
for 1-forms alpha and beta, where d_alpha and d_beta are the exterior derivative of alpha and beta,
respectively, i_ the interior product of vector fields on differential forms, P#: T*M -> TM the vector
bundle morphism defined by P#(alpha) := i_(alpha)P, with i the interior product of alpha and P. Note that,
by definition {df,dg}_P = d_{f,g}_P, for ant functions f,g on M.
Parameters
==========
:bivector:
Is a Poisson bivector in a dictionary format with tuple type 'keys' and string type 'values'.
one_form_1/one_form_2:
Is a 1-form differential in a dictionary format with tuple type 'keys' and string type 'values'.
:mesh:
Is a numpy array where each value is a list of float values that representa a point in R^{dim}.
:torch_output/tf_output:
Is a boolean flag to indicates if the result is given in a tensor from PyTorch/TensorFlow, its
default value is False.
Returns
=======
The default result is a NumPy array that contains all the evaluations of {one_form_1, one_form_2}_π
This value can be converted to Tensor PyTorch, TensorFlow or dictionary for by setting their respective
flag as True in the params.
Example
========
>>> import numpy as np
>>> from poisson import NumPoissonGeometry
>>> # Instance the class to dimension 3
>>> npg3 = NumPoissonGeometry(3)
>>> # For bivector x3*Dx1^Dx2 - x2*Dx1^Dx3 + x1*Dx2^Dx3
>>> bivector = {(1, 2): 'x3', (1, 3): '-x2', (2, 3): 'x1'}
>>> # For one form alpha
>>> one_form_1 = {(1,): '2', (2,): '1', (3,): '2'}
>>> # For one form beta
>>> one_form_2 = {(1,): '1', (2,): '1', (3,): '1'}
>>> # Defines a simple mesh
>>> mesh = np.array([[1., 1., 1.]])
>>> # Evaluates the mesh into {one_form_1, one_form_2}_π
>>> npg3.num_one_forms_bracket(bivector, one_form_1, one_form_2, mesh,)
>>> [[[-1.]
[ 0.]
[ 1.]]]
>>> npg3.num_one_forms_bracket(bivector, one_form_1, one_form_2, mesh, torch_output=True)
>>> tensor([[[-1.],
[ 0.],
[ 1.]]], dtype=torch.float64)
>>> npg3.num_one_forms_bracket(bivector, one_form_1, one_form_2, mesh, tf_output=True)
>>> tf.Tensor(
[[[-1.]
[ 0.]
[ 1.]]], shape=(1, 3, 1), dtype=float64)
>>> npg3.num_one_forms_bracket(bivector, one_form_1, one_form_2, mesh, dict_output=True)
>>> array([{(1,): -1.0, (3,): 1.0}], dtype=object)
"""
for e in bivector:
if len(set(e)) < len(e):
raise MultivectorError(F"repeated indexes {e} in {bivector}")
if len(tuple(filter(lambda x: (x <= 0), e))) > 0:
raise MultivectorError(F"invalid key {e} in {bivector}")
if self.pg.is_in_kernel(bivector, one_form_1) and self.pg.is_in_kernel(bivector, one_form_2):
np_result = np.array([])
if torch_output:
return torch.from_numpy(np_result)
if tf_output:
return tf.convert_to_tensor(np_result)
if dict_output:
return np.array({})
return np_result
if self.pg.is_in_kernel(bivector, one_form_1):
form_1_vector = sym.zeros(self.dim + 1, 1)
for e in bivector:
if len(set(e)) < len(e):
raise MultivectorError(F"repeated indexes {e} in {bivector}")
if len(tuple(filter(lambda x: (x <= 0), e))) > 0:
raise MultivectorError(F"invalid key {e} in {bivector}")
for e in one_form_1:
if e[0] <= 0:
raise DiferentialFormError(F"invalid key {e} in {one_form_1}")
form_1_vector[int(*e)] = one_form_1[e]
for e in one_form_2:
if e[0] <= 0:
raise DiferentialFormError(F"invalid key {e} in {one_form_2}")
form_1_vector = form_1_vector[1:, :]
jac_form_1 = form_1_vector.jacobian(self.pg.coords)
jac_form_1 = sym.lambdify(self.pg.coords, jac_form_1)
jac_form_1_eval = [jac_form_1(*e) for e in mesh]
sharp_form_2_eval = self.num_sharp_morphism(bivector, one_form_2, mesh)
raw_result = map(lambda e1, e2: np.dot(e1.T - e1, e2), jac_form_1_eval, sharp_form_2_eval)
np_result = np.array(tuple(raw_result))
# return the result in a PyTorch tensor if the flag is True
if torch_output:
return torch.from_numpy(np_result)
# return the result in a TensorFlow tensor if the flag is True
if tf_output:
return tf.convert_to_tensor(np_result)
# return the result in dictionary type
if dict_output:
dicts = [{(i + 1,): e[i][0] for i in range(self.pg.dim) if e[i][0] != 0} for e in np_result]
return np.array(dicts)
# return the result in Numpy array
return np_result
if self.pg.is_in_kernel(bivector, one_form_2):
form_2_vector = sym.zeros(self.pg.dim + 1, 1)
for e in bivector:
if len(set(e)) < len(e):
raise MultivectorError(F"repeated indexes {e} in {bivector}")
if len(tuple(filter(lambda x: (x <= 0), e))) > 0:
raise MultivectorError(F"invalid key {e} in {bivector}")
for e in one_form_1:
if e[0] <= 0:
raise DiferentialFormError(F"invalid key {e} in {one_form_1}")
for e in one_form_2:
if e[0] <= 0:
raise DiferentialFormError(F"invalid key {e} in {one_form_2}")
form_2_vector[int(*e)] = one_form_2[e]
form_2_vector = form_2_vector[1:, :]
jac_form_2 = form_2_vector.jacobian(self.pg.coords)
jac_form_2 = sym.lambdify(self.pg.coords, jac_form_2)
jac_form_2_eval = [jac_form_2(*e) for e in mesh]
sharp_form_1_eval = self.num_sharp_morphism(bivector, one_form_1, mesh)
raw_result = map(lambda e1, e2: np.dot(e1 - e1.T, e2), jac_form_2_eval, sharp_form_1_eval)
np_result = np.array(tuple(raw_result))
# return the result in a PyTorch tensor if the flag is True
if torch_output:
return torch.from_numpy(np_result)
# return the result in a TensorFlow tensor if the flag is True
if tf_output:
return tf.convert_to_tensor(np_result)
# return the result in dictionary type
if dict_output:
dicts = [{(i + 1,): e[i][0] for i in range(self.pg.dim) if e[i][0] != 0} for e in np_result]
return np.array(dicts)
# return the result in Numpy array
return np_result
form_1_vector = sym.zeros(self.pg.dim + 1, 1)
form_2_vector = sym.zeros(self.pg.dim + 1, 1)
for e in one_form_1:
if e[0] <= 0:
raise DiferentialFormError(F"invalid key {e} in {one_form_1}")
form_1_vector[int(*e)] = one_form_1[e]
for e in one_form_2:
if e[0] <= 0:
raise DiferentialFormError(F"invalid key {e} in {one_form_2}")
form_2_vector[int(*e)] = one_form_2[e]
form_1_vector = form_1_vector[1:, :]
form_2_vector = form_2_vector[1:, :]
jac_form_1 = form_1_vector.jacobian(self.pg.coords)
jac_form_2 = form_2_vector.jacobian(self.pg.coords)
jac_form_1 = sym.lambdify(self.pg.coords, jac_form_1)
jac_form_2 = sym.lambdify(self.pg.coords, jac_form_2)
jac_form_1_eval = [jac_form_1(*e) for e in mesh]
jac_form_2_eval = [jac_form_2(*e) for e in mesh]
sharp_form_1_eval = self.num_sharp_morphism(bivector, one_form_1, mesh)
sharp_form_2_eval = self.num_sharp_morphism(bivector, one_form_2, mesh)
raw_result_1 = map(lambda e1, e2: np.dot(e1 - e1.T, e2), jac_form_2_eval, sharp_form_1_eval) # T1
raw_result_2 = map(lambda e1, e2: np.dot(e1.T - e1, e2), jac_form_1_eval, sharp_form_2_eval) # T2
sharp_form_1 = self.pg.sharp_morphism(bivector, one_form_1)
sharp_1 = sym.zeros(self.pg.dim + 1, 1)
for e in sharp_form_1:
sharp_1[int(*e)] = sharp_form_1[e]
sharp_1 = sharp_1[1:, :]
pair_form_2_sharp_1 = (form_2_vector.T * sharp_1)[0]
dd_pair_form_2_sharp_1 = sym.Matrix(sym.derive_by_array(pair_form_2_sharp_1, self.pg.coords))
dd_pair_form_2_sharp_1 = sym.lambdify(self.pg.coords, dd_pair_form_2_sharp_1)
dd_pair_f2_s1_eval = [dd_pair_form_2_sharp_1(*e) for e in mesh] # T3
raw_result = map(lambda e1, e2, e3: e1 + e2 + e3, raw_result_1, raw_result_2, dd_pair_f2_s1_eval)
np_result = np.array(tuple(raw_result))
# return the result in a PyTorch tensor if the flag is True
if torch_output:
return torch.from_numpy(np_result)
# return the result in a TensorFlow tensor if the flag is True
if tf_output:
return tf.convert_to_tensor(np_result)
# return the result in dictionary type
if dict_output:
dicts = [{(i + 1,): e[i][0] for i in range(self.pg.dim) if e[i][0] != 0} for e in np_result]
return
|
np.array(dicts)
|
numpy.array
|
# Copyright 2014 Diamond Light Source Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module:: remove_unresponsive_and_fluctuating_rings
:platform: Unix
:synopsis: Method working in the sinogram space to remove ring artifacts
caused by dead pixels.
.. moduleauthor:: <NAME> <<EMAIL>>
"""
from savu.plugins.plugin import Plugin
from savu.plugins.driver.cpu_plugin import CpuPlugin
from savu.plugins.utils import register_plugin
import numpy as np
from scipy.ndimage import median_filter
from scipy.ndimage import binary_dilation
from scipy.ndimage import uniform_filter1d
from scipy import interpolate
@register_plugin
class RemoveUnresponsiveAndFluctuatingRings(Plugin, CpuPlugin):
def __init__(self):
super(RemoveUnresponsiveAndFluctuatingRings, self).__init__(
"RemoveUnresponsiveAndFluctuatingRings")
def setup(self):
in_dataset, out_dataset = self.get_datasets()
out_dataset[0].create_dataset(in_dataset[0])
in_pData, out_pData = self.get_plugin_datasets()
in_pData[0].plugin_data_setup('SINOGRAM', 'single')
out_pData[0].plugin_data_setup('SINOGRAM', 'single')
def detect_stripe(self, listdata, snr):
"""Algorithm 4 in the paper. To locate stripe positions.
Parameters
----------
listdata : 1D normalized array.
snr : Ratio (>1.0) used to detect stripe locations.
Returns
-------
listmask : 1D binary mask.
"""
numdata = len(listdata)
listsorted = np.sort(listdata)[::-1]
xlist = np.arange(0, numdata, 1.0)
ndrop = np.int16(0.25 * numdata)
(_slope, _intercept) = np.polyfit(
xlist[ndrop:-ndrop - 1], listsorted[ndrop:-ndrop - 1], 1)
numt1 = _intercept + _slope * xlist[-1]
noiselevel = np.abs(numt1 - _intercept)
if noiselevel == 0.0:
raise ValueError(
"The method doesn't work on noise-free data. If you " \
"apply the method on simulated data, please add" \
" noise!")
val1 = np.abs(listsorted[0] - _intercept) / noiselevel
val2 = np.abs(listsorted[-1] - numt1) / noiselevel
listmask = np.zeros_like(listdata)
if val1 >= snr:
upper_thresh = _intercept + noiselevel * snr * 0.5
listmask[listdata > upper_thresh] = 1.0
if val2 >= snr:
lower_thresh = numt1 - noiselevel * snr * 0.5
listmask[listdata <= lower_thresh] = 1.0
return listmask
def remove_large_stripe(self, matindex, sinogram, snr, size):
"""Algorithm 5 in the paper. To remove large stripes.
Parameters
-----------
sinogram : 2D array.
snr : Ratio (>1.0) used to detect stripe locations.
size : Window size of the median filter.
Returns
-------
sinogram : stripe-removed sinogram.
"""
badpixelratio = 0.05
(nrow, ncol) = sinogram.shape
ndrop = np.int16(badpixelratio * nrow)
sinosorted = np.sort(sinogram, axis=0)
sinosmoothed = median_filter(sinosorted, (1, size))
list1 = np.mean(sinosorted[ndrop:nrow - ndrop], axis=0)
list2 = np.mean(sinosmoothed[ndrop:nrow - ndrop], axis=0)
listfact = np.divide(list1, list2,
out=np.ones_like(list1), where=list2 != 0)
listmask = self.detect_stripe(listfact, snr)
listmask = binary_dilation(listmask, iterations=1).astype(
listmask.dtype)
matfact = np.tile(listfact, (nrow, 1))
sinogram = sinogram / matfact
sinogram1 = np.transpose(sinogram)
matcombine = np.asarray(np.dstack((matindex, sinogram1)))
matsort = np.asarray(
[row[row[:, 1].argsort()] for row in matcombine])
matsort[:, :, 1] = np.transpose(sinosmoothed)
matsortback = np.asarray(
[row[row[:, 0].argsort()] for row in matsort])
sino_corrected =
|
np.transpose(matsortback[:, :, 1])
|
numpy.transpose
|
import glob
import os
import shutil
from functools import lru_cache
import numpy as np
from astropy.table import Table, vstack
import pandas as pd
from astropy.time import Time
from scipy.interpolate import interp1d
from scipy.signal import savgol_filter
from scipy.ndimage import median_filter
from .utils import NUSTAR_MJDREF, splitext_improved, sec_to_mjd
from .utils import filter_with_region, fix_byteorder, rolling_std
from .utils import measure_overall_trend, cross_two_gtis, get_rough_trend_fun
from .utils import spline_through_data, cubic_interpolation, robust_poly_fit
from astropy.io import fits
import tqdm
from astropy import log
from statsmodels.robust import mad
import copy
import holoviews as hv
from holoviews.operation.datashader import datashade
from holoviews import opts
# import matplotlib.pyplot as plt
hv.extension('bokeh')
curdir = os.path.abspath(os.path.dirname(__file__))
datadir = os.path.join(curdir, 'data')
def get_bad_points_db(db_file='BAD_POINTS_DB.dat'):
if not os.path.exists(db_file):
db_file = os.path.join(datadir, 'BAD_POINTS_DB.dat')
return np.genfromtxt(db_file, dtype=np.longdouble)
def flag_bad_points(all_data, db_file='BAD_POINTS_DB.dat'):
"""
Examples
--------
>>> db_file = 'dummy_bad_points.dat'
>>> np.savetxt(db_file, np.array([-1, 3, 10]))
>>> all_data = Table({'met': [0, 1, 2, 3, 4]})
>>> all_data = flag_bad_points(all_data, db_file='dummy_bad_points.dat')
INFO: ...
>>> np.all(all_data['flag'] == [False, False, False, True, False])
True
"""
if not os.path.exists(db_file):
return all_data
log.info("Flagging bad points...")
intv = [all_data['met'][0] - 0.5, all_data['met'][-1] + 0.5]
ALL_BAD_POINTS = np.genfromtxt(db_file)
ALL_BAD_POINTS.sort()
ALL_BAD_POINTS = np.unique(ALL_BAD_POINTS)
ALL_BAD_POINTS = ALL_BAD_POINTS[
(ALL_BAD_POINTS > intv[0]) & (ALL_BAD_POINTS < intv[1])]
idxs = all_data['met'].searchsorted(ALL_BAD_POINTS)
if 'flag' in all_data.colnames:
mask = np.array(all_data['flag'], dtype=bool)
else:
mask = np.zeros(len(all_data), dtype=bool)
for idx in idxs:
if idx >= mask.size:
continue
mask[idx] = True
all_data['flag'] = mask
return all_data
def find_good_time_intervals(temperature_table,
clock_jump_times=None):
start_time = temperature_table['met'][0]
stop_time = temperature_table['met'][-1]
clock_gtis = no_jump_gtis(
start_time, stop_time, clock_jump_times)
if not 'gti' in temperature_table.meta:
temp_gtis = temperature_gtis(temperature_table)
else:
temp_gtis = temperature_table.meta['gti']
gtis = cross_two_gtis(temp_gtis, clock_gtis)
return gtis
def calculate_stats(all_data):
log.info("Calculating statistics")
r_std = residual_roll_std(all_data['residual_detrend'])
scatter = mad(all_data['residual_detrend'])
print()
print("----------------------------- Stats -----------------------------------")
print()
print(f"Overall MAD: {scatter * 1e6:.0f} us")
print(f"Minimum scatter: ±{np.min(r_std) * 1e6:.0f} us")
print()
print("-----------------------------------------------------------------------")
def load_and_flag_clock_table(clockfile="latest_clock.dat", shift_non_malindi=False):
clock_offset_table = load_clock_offset_table(clockfile,
shift_non_malindi=shift_non_malindi)
clock_offset_table = flag_bad_points(
clock_offset_table, db_file='BAD_POINTS_DB.dat')
return clock_offset_table
def spline_detrending(clock_offset_table, temptable, outlier_cuts=None,
fixed_control_points=None):
tempcorr_idx = np.searchsorted(temptable['met'], clock_offset_table['met'])
tempcorr_idx[tempcorr_idx >= temptable['met'].size] = \
temptable['met'].size - 1
clock_residuals = \
np.array(clock_offset_table['offset'] -
temptable['temp_corr'][tempcorr_idx])
clock_mets = clock_offset_table['met']
if outlier_cuts is not None:
log.info("Cutting outliers...")
better_points = np.array(clock_residuals == clock_residuals,
dtype=bool)
for i, cut in enumerate(outlier_cuts):
mm = median_filter(clock_residuals, 11)
wh = ((clock_residuals[better_points] - mm[better_points]) < outlier_cuts[
i]) | ((clock_residuals[better_points] - mm[better_points]) <
outlier_cuts[0])
better_points[better_points] = ~wh
# Eliminate too recent flags, in the last month of solution.
one_month = 86400 * 30
do_not_flag = clock_mets > clock_mets.max() - one_month
better_points[do_not_flag] = True
clock_offset_table = clock_offset_table[better_points]
clock_residuals = clock_residuals[better_points]
detrend_fun = spline_through_data(
clock_offset_table['met'], clock_residuals, downsample=20,
fixed_control_points=fixed_control_points)
r_std = residual_roll_std(
clock_residuals - detrend_fun(clock_offset_table['met']))
clidx = np.searchsorted(clock_offset_table['met'], temptable['met'])
clidx[clidx == clock_offset_table['met'].size] = \
clock_offset_table['met'].size - 1
temptable['std'] = r_std[clidx]
temptable['temp_corr_trend'] = detrend_fun(temptable['met'])
temptable['temp_corr_detrend'] = \
temptable['temp_corr'] + temptable['temp_corr_trend']
return temptable
def eliminate_trends_in_residuals(temp_table, clock_offset_table,
gtis, debug=False,
fixed_control_points=None):
# good = clock_offset_table['met'] < np.max(temp_table['met'])
# clock_offset_table = clock_offset_table[good]
temp_table['temp_corr_raw'] = temp_table['temp_corr']
tempcorr_idx = np.searchsorted(temp_table['met'],
clock_offset_table['met'])
tempcorr_idx[tempcorr_idx == temp_table['met'].size] = \
temp_table['met'].size - 1
clock_residuals = \
clock_offset_table['offset'] - temp_table['temp_corr'][tempcorr_idx]
# Only use for interpolation Malindi points; however, during the Malindi
# problem in 2013, use the other data for interpolation but subtracting
# half a millisecond
use_for_interpol, bad_malindi_time = \
get_malindi_data_except_when_out(clock_offset_table)
clock_residuals[bad_malindi_time] -= 0.0005
good = (clock_residuals == clock_residuals) & ~clock_offset_table['flag'] & use_for_interpol
clock_offset_table = clock_offset_table[good]
clock_residuals = clock_residuals[good]
for g in gtis:
log.info(f"Treating data from METs {g[0]}--{g[1]}")
start, stop = g
cl_idx_start, cl_idx_end = \
np.searchsorted(clock_offset_table['met'], g)
if cl_idx_end - cl_idx_start == 0:
continue
temp_idx_start, temp_idx_end = \
np.searchsorted(temp_table['met'], g)
table_new = temp_table[temp_idx_start:temp_idx_end]
cltable_new = clock_offset_table[cl_idx_start:cl_idx_end]
met = cltable_new['met']
residuals = clock_residuals[cl_idx_start:cl_idx_end]
met0 = met[0]
met_rescale = (met - met0)/(met[-1] - met0)
_, m, q = measure_overall_trend(met_rescale, residuals)
# p_new = get_rough_trend_fun(met, residuals)
#
# if p_new is not None:
# p = p_new
poly_order = min(met.size // 300 + 1, 2)
p0 = np.zeros(poly_order + 1)
p0[0] = q
if p0.size > 1:
p0[1] = m
log.info(f"Fitting a polinomial of order {poly_order}")
p = robust_poly_fit(met_rescale, residuals, order=poly_order,
p0=p0)
# if poly_order >=2:
import matplotlib.pyplot as plt
# plt.figure()
# plt.plot(met_rescale, residuals)
# plt.plot(met_rescale, p(met_rescale))
# plt.plot(met_rescale, m * (met_rescale) + q)
table_mets_rescale = (table_new['met'] - met0) / (met[-1] - met0)
corr = p(table_mets_rescale)
sub_residuals = residuals - p(met_rescale)
m = (sub_residuals[-1] - sub_residuals[0]) / (met_rescale[-1] - met_rescale[0])
q = sub_residuals[0]
# plt.plot(table_mets_rescale, corr + m * (table_mets_rescale - met_rescale[0]) + q, lw=2)
corr = corr + m * (table_mets_rescale - met_rescale[0]) + q
table_new['temp_corr'] += corr
if debug:
import matplotlib.pyplot as plt
fig = plt.figure()
plt.plot(table_new['met'], table_new['temp_corr'], alpha=0.5)
plt.scatter(cltable_new['met'], cltable_new['offset'])
plt.plot(table_new['met'], table_new['temp_corr'])
plt.savefig(f'{int(start)}--{int(stop)}_detr.png')
plt.close(fig)
# plt.show()
# print(f'df/f = {(p(stop) - p(start)) / (stop - start)}')
bti_list = [[g0, g1] for g0, g1 in zip(gtis[:-1, 1], gtis[1:, 0])]
bti_list += [[gtis[-1, 1], clock_offset_table['met'][-1] + 10]]
btis = np.array(bti_list)
# Interpolate the solution along bad time intervals
for g in btis:
start, stop = g
log.info(f"Treating bad data from METs {start}--{stop}")
temp_idx_start, temp_idx_end = \
np.searchsorted(temp_table['met'], g)
if temp_idx_end - temp_idx_start == 0 and \
temp_idx_end < len(temp_table):
continue
table_new = temp_table[temp_idx_start:temp_idx_end]
cl_idx_start, cl_idx_end = \
np.searchsorted(clock_offset_table['met'], g)
local_clockoff = clock_offset_table[cl_idx_start - 1:cl_idx_end + 1]
clock_off = local_clockoff['offset']
clock_tim = local_clockoff['met']
last_good_tempcorr = temp_table['temp_corr'][temp_idx_start - 1]
last_good_time = temp_table['met'][temp_idx_start - 1]
if temp_idx_end < temp_table['temp_corr'].size:
next_good_tempcorr = temp_table['temp_corr'][temp_idx_end + 1]
next_good_time = temp_table['met'][temp_idx_end + 1]
clock_off = np.concatenate(
([last_good_tempcorr], clock_off, [next_good_tempcorr]))
clock_tim = np.concatenate(
([last_good_time], clock_tim, [next_good_time]))
else:
clock_off = np.concatenate(
([last_good_tempcorr], clock_off))
clock_tim = np.concatenate(
([last_good_time], clock_tim))
next_good_tempcorr = clock_off[-1]
next_good_time = clock_tim[-1]
if cl_idx_end - cl_idx_start < 2:
log.info("Not enough good clock measurements. Interpolating")
m = (next_good_tempcorr - last_good_tempcorr) / \
(next_good_time - last_good_time)
q = last_good_tempcorr
table_new['temp_corr'][:] = \
q + (table_new['met'] - last_good_time) * m
continue
order = np.argsort(clock_tim)
clock_off_fun = interp1d(
clock_tim[order], clock_off[order], kind='linear',
assume_sorted=True)
table_new['temp_corr'][:] = clock_off_fun(table_new['met'])
log.info("Final detrending...")
table_new = spline_detrending(
clock_offset_table, temp_table,
outlier_cuts=[-0.002, -0.001],
fixed_control_points=fixed_control_points)
return table_new
def residual_roll_std(residuals, window=30):
"""
Examples
--------
>>> residuals = np.zeros(5000)
>>> residuals[:4000] = np.random.normal(0, 1, 4000)
>>> roll_std = residual_roll_std(residuals, window=500)
>>> np.allclose(roll_std[:3500], 1., rtol=0.2)
True
>>> np.all(roll_std[4500:] == 0.)
True
"""
r_std = rolling_std(residuals, window)
# r_std = rolling_std(np.diff(residuals), window) / np.sqrt(2)
# return np.concatenate(([r_std[:1], r_std]))
return r_std
def get_malindi_data_except_when_out(clock_offset_table):
"""Select offset measurements from Malindi, unless Malindi is out.
In the time interval between METs 93681591 and 98051312, Malindi was out
of work. For that time interval, we use all clock offset measurements
available. In all other cases, we just use Malindi
Parameters
----------
clock_offset_table : :class:`Table` object
Table containing the clock offset measurements. At least, it has to
contain a 'met' and a 'station' columns.
Returns
-------
use_for_interpol : array of ``bool``
Mask of "trustworthy" time measurements
bad_malindi_time : array of ``bool``
Mask of time measurements during Malindi outage
Example
-------
>>> clocktable = Table({'met': [93681592, 1e8, 1.5e8],
... 'station': ['SNG', 'MLD', 'UHI']})
>>> ufp, bmt = get_malindi_data_except_when_out(clocktable)
>>> assert np.all(ufp == [True, True, False])
>>> assert np.all(bmt == [True, False, False])
"""
# Covers 2012/12/20 - 2013/02/08 Malindi outage
# Also covers 2021/04/28 - 2021/05/06 issues with Malindi clock
no_malindi_intvs = [[93681591, 98051312],[357295300, 357972500]]
clock_mets = clock_offset_table['met']
bad_malindi_time = np.zeros(len(clock_mets), dtype=bool)
for nmi in no_malindi_intvs:
bad_malindi_time = bad_malindi_time | (clock_mets >= nmi[0]) & (
clock_mets < nmi[1])
malindi_stn = clock_offset_table['station'] == 'MLD'
use_for_interpol = \
(malindi_stn | bad_malindi_time)
return use_for_interpol, bad_malindi_time
def _look_for_temptable():
"""
Look for the default temperature table
Examples
--------
>>> import os
>>> tempt = _look_for_temptable() # doctest: +ELLIPSIS
...
>>> tempt.endswith('tp_eps_ceu_txco_tmp.csv')
True
"""
name = 'tp_eps_ceu_txco_tmp.csv'
fullpath = os.path.join(datadir, name)
if not os.path.exists(fullpath):
import shutil
import subprocess as sp
sp.check_call('wget --no-check-certificate https://www.dropbox.com/s/spkn4v018m5fvkf/tp_eps_ceu_txco_tmp.csv?dl=0 -O bu.csv'.split(" "))
shutil.copyfile('bu.csv', fullpath)
return fullpath
def _look_for_clock_offset_file():
"""
Look for the default clock offset table
Examples
--------
>>> import os
>>> tempt = _look_for_clock_offset_file()
>>> os.path.basename(tempt).startswith('nustar_clock_offsets')
True
"""
name = 'nustar_clock_offsets*.dat'
clockoff_files = sorted(glob.glob(os.path.join(datadir, name)))
assert len(clockoff_files) > 0, \
("Clock offset file not found. Have you run get_data.sh in "
"the data directory?")
return clockoff_files[-1]
def _look_for_freq_change_file():
"""
Look for the default frequency change table
Examples
--------
>>> import os
>>> tempt = _look_for_clock_offset_file()
>>> os.path.basename(tempt).startswith('nustar_clock_offsets')
True
"""
name = 'nustar_freq_changes*.dat'
fchange_files = sorted(glob.glob(os.path.join(datadir, name)))
assert len(fchange_files) > 0, \
("Frequency change file not found. Have you run get_data.sh in "
"the data directory?")
return fchange_files[-1]
def read_clock_offset_table(clockoffset_file=None, shift_non_malindi=False):
"""
Parameters
----------
clockoffset_file : str
e.g. 'nustar_clock_offsets-2018-10-30.dat'
Returns
-------
clock_offset_table : `astropy.table.Table` object
"""
if clockoffset_file is None:
clockoffset_file = _look_for_clock_offset_file()
log.info(f"Reading clock offsets from {clockoffset_file}")
clock_offset_table = Table.read(clockoffset_file,
format='csv', delimiter=' ',
names=['uxt', 'met', 'offset', 'divisor',
'station'])
if shift_non_malindi:
log.info("Shifting non-Malindi clock offsets down by 0.5 ms")
all_but_malindi = clock_offset_table['station'] != 'MLD'
clock_offset_table['offset'][all_but_malindi] -= 0.0005
clock_offset_table['mjd'] = sec_to_mjd(clock_offset_table['met'])
clock_offset_table.remove_row(len(clock_offset_table) - 1)
clock_offset_table['flag'] = np.zeros(len(clock_offset_table), dtype=bool)
log.info("Flagging bad points...")
ALL_BAD_POINTS = get_bad_points_db()
for b in ALL_BAD_POINTS:
nearest = np.argmin(np.abs(clock_offset_table['met'] - b))
if np.abs(clock_offset_table['met'][nearest] - b) < 1:
clock_offset_table['flag'][nearest] = True
return clock_offset_table
FREQ_CHANGE_DB= {"delete": [77509247, 78720802],
"add": [(1023848462, 77506869, 24000340),
(1025060017, 78709124, 24000337),
(0, 102576709, 24000339),
(1051488464, 105149249, 24000336),
(0, 182421890, 24000334),
(1157021125, 210681910, 24000337),
( 0, 215657278, 24000333),
(0, 215794126, 24000328),
(1174597307, 228258092, 24000334),
(1174759273, 228420058, 24000334)]}
def no_jump_gtis(start_time, stop_time, clock_jump_times=None):
"""
Examples
--------
>>> gtis = no_jump_gtis(0, 3, [1, 1.1])
>>> np.allclose(gtis, [[0, 1], [1, 1.1], [1.1, 3]])
True
>>> gtis = no_jump_gtis(0, 3)
>>> np.allclose(gtis, [[0, 3]])
True
"""
if clock_jump_times is None:
return [[start_time, stop_time]]
clock_gtis = []
current_start = start_time
for jump in clock_jump_times:
clock_gtis.append([current_start, jump])
current_start = jump
clock_gtis.append([current_start, stop_time])
clock_gtis = np.array(clock_gtis)
return clock_gtis
def temperature_gtis(temperature_table, max_distance=600):
"""
Examples
--------
>>> temperature_table = Table({'met': [0, 1, 2, 10, 11, 12]})
>>> gti = temperature_gtis(temperature_table, 5)
>>> np.allclose(gti, [[0, 2], [10, 12]])
True
>>> temperature_table = Table({'met': [-10, 0, 1, 2, 10, 11, 12, 20]})
>>> gti = temperature_gtis(temperature_table, 5)
>>> np.allclose(gti, [[0, 2], [10, 12]])
True
"""
temp_condition = np.concatenate(
([False], np.diff(temperature_table['met']) > max_distance, [False]))
temp_edges_l = np.concatenate((
[temperature_table['met'][0]],
temperature_table['met'][temp_condition[:-1]]))
temp_edges_h = np.concatenate((
[temperature_table['met'][temp_condition[1:]],
[temperature_table['met'][-1]]]))
temp_gtis = np.array(list(zip(
temp_edges_l, temp_edges_h)))
length = temp_gtis[:, 1] - temp_gtis[:, 0]
return temp_gtis[length > 0]
def read_freq_changes_table(freqchange_file=None, filter_bad=True):
"""Read the table with the list of commanded divisor frequencies.
Parameters
----------
freqchange_file : str
e.g. 'nustar_freq_changes-2018-10-30.dat'
Returns
-------
freq_changes_table : `astropy.table.Table` object
"""
if freqchange_file is None:
freqchange_file = _look_for_freq_change_file()
log.info(f"Reading frequency changes from {freqchange_file}")
freq_changes_table = Table.read(freqchange_file,
format='csv', delimiter=' ',
comment="\s*#",
names=['uxt', 'met', 'divisor'])
log.info("Correcting known bad frequency points")
for time in FREQ_CHANGE_DB['delete']:
bad_time_idx = freq_changes_table['met'] == time
freq_changes_table[bad_time_idx] = [0, 0, 0]
for line in FREQ_CHANGE_DB['add']:
freq_changes_table.add_row(line)
freq_changes_table = freq_changes_table[freq_changes_table['met'] > 0]
freq_changes_table.sort('met')
freq_changes_table['mjd'] = sec_to_mjd(freq_changes_table['met'])
freq_changes_table.remove_row(len(freq_changes_table) - 1)
freq_changes_table['flag'] = \
np.abs(freq_changes_table['divisor'] - 2.400034e7) > 20
if filter_bad:
freq_changes_table = freq_changes_table[~freq_changes_table['flag']]
return freq_changes_table
def _filter_table(tablefile, start_date=None, end_date=None, tmpfile='tmp.csv'):
try:
from datetime import timezone
except ImportError:
# Python 2
import pytz as timezone
if start_date is None:
start_date = 0
if end_date is None:
end_date = 99999
start_date = Time(start_date, format='mjd', scale='utc')
start_str = start_date.to_datetime(timezone=timezone.utc).strftime('%Y:%j')
start_yr, start_day = [float(n) for n in start_str.split(':')]
end_date = Time(end_date, format='mjd', scale='utc')
stop_str = end_date.to_datetime(timezone=timezone.utc).strftime('%Y:%j')
new_str = ""
with open(tablefile) as fobj:
before = True
for i, l in enumerate(fobj.readlines()):
if i == 0:
new_str += l
continue
l = l.strip()
# Now, let's check if the start date is before the start of the
# clock file. It's sufficient to do it for the first 2-3 line(s)
# (3 if there are units)
if i <=2:
try:
yr, day = [float(n) for n in l.split(':')[:2]]
except ValueError:
continue
if start_yr <= yr and start_day <= day:
before = False
if l.startswith(start_str) and before is True:
before = False
if before is False:
new_str += l + "\n"
if l.startswith(stop_str):
break
if new_str == "":
raise ValueError(f"No temperature information is available for the "
"wanted time range in {temperature_file}")
with open(tmpfile, "w") as fobj:
print(new_str, file=fobj)
return tmpfile
def read_csv_temptable(mjdstart=None, mjdstop=None, temperature_file=None):
if mjdstart is not None or mjdstop is not None:
mjdstart_use = mjdstart
mjdstop_use = mjdstop
if mjdstart is not None:
mjdstart_use -= 10
if mjdstop is not None:
mjdstop_use += 10
log.info("Filtering table...")
tmpfile = _filter_table(temperature_file,
start_date=mjdstart_use,
end_date=mjdstop_use, tmpfile='tmp.csv')
log.info("Done")
else:
tmpfile = temperature_file
temptable = Table.read(tmpfile)
temptable.remove_row(0)
log.info("Converting times (it'll take a while)...")
times_mjd = Time(temptable["Time"], scale='utc', format="yday",
in_subfmt="date_hms").mjd
log.info("Done.")
temptable["mjd"] = np.array(times_mjd)
temptable['met'] = (temptable["mjd"] - NUSTAR_MJDREF) * 86400
temptable.remove_column('Time')
temptable.rename_column('tp_eps_ceu_txco_tmp', 'temperature')
temptable["temperature"] = np.array(temptable["temperature"], dtype=float)
if os.path.exists('tmp.csv'):
os.unlink('tmp.csv')
return temptable
def read_saved_temptable(mjdstart=None, mjdstop=None,
temperature_file='temptable.hdf5'):
table = Table.read(temperature_file)
if mjdstart is None and mjdstop is None:
return table
if 'mjd' not in table.colnames:
table["mjd"] = sec_to_mjd(table['met'])
if mjdstart is None:
mjdstart = table['mjd'][0]
if mjdstop is None:
mjdstop = table['mjd'][-1]
good = (table['mjd'] >= mjdstart - 10)&(table['mjd'] <= mjdstop + 10)
if not np.any(good):
raise ValueError(f"No temperature information is available for the "
"wanted time range in {temperature_file}")
return table[good]
def read_fits_temptable(temperature_file):
with fits.open(temperature_file) as hdul:
temptable = Table.read(hdul['ENG_0x133'])
temptable.rename_column('TIME', 'met')
temptable.rename_column('sc_clock_ext_tmp', 'temperature')
for col in temptable.colnames:
if 'chu' in col:
temptable.remove_column(col)
temptable["mjd"] = sec_to_mjd(temptable['met'])
return temptable
def interpolate_temptable(temptable, dt=10):
time = temptable['met']
temperature = temptable['temperature']
new_times = np.arange(time[0], time[-1], dt)
idxs = np.searchsorted(time, new_times)
return Table({'met': new_times, 'temperature': temperature[idxs]})
def read_temptable(temperature_file=None, mjdstart=None, mjdstop=None,
dt=None, gti_tolerance=600):
if temperature_file is None:
temperature_file = _look_for_temptable()
log.info(f"Reading temperature_information from {temperature_file}")
ext = splitext_improved(temperature_file)[1]
if ext in ['.csv']:
temptable = read_csv_temptable(mjdstart, mjdstop, temperature_file)
elif ext in ['.hk', '.hk.gz']:
temptable = read_fits_temptable(temperature_file)
elif ext in ['.hdf5', '.h5']:
temptable = read_saved_temptable(mjdstart, mjdstop,
temperature_file)
temptable = fix_byteorder(temptable)
else:
raise ValueError('Unknown format for temperature file')
temp_gtis = temperature_gtis(temptable, gti_tolerance)
if dt is not None:
temptable = interpolate_temptable(temptable, dt)
else:
good = np.diff(temptable['met']) > 0
good = np.concatenate((good, [True]))
temptable = temptable[good]
temptable.meta['gti'] = temp_gtis
window = np.median(1000 / np.diff(temptable['met']))
window = int(window // 2 * 2 + 1)
log.info(f"Smoothing temperature with a window of {window} points")
temptable['temperature_smooth'] = \
savgol_filter(temptable['temperature'], window, 3)
temptable['temperature_smooth_gradient'] = \
np.gradient(temptable['temperature_smooth'], temptable['met'],
edge_order=2)
return temptable
@lru_cache(maxsize=64)
def load_temptable(temptable_name):
log.info(f"Reading data from {temptable_name}")
IS_CSV = temptable_name.endswith('.csv')
hdf5_name = temptable_name.replace('.csv', '.hdf5')
if IS_CSV and os.path.exists(hdf5_name):
IS_CSV = False
temptable_raw = read_temptable(hdf5_name)
else:
temptable_raw = read_temptable(temptable_name)
if IS_CSV:
log.info(f"Saving temperature data to {hdf5_name}")
temptable_raw.write(hdf5_name, overwrite=True)
return temptable_raw
@lru_cache(maxsize=64)
def load_freq_changes(freq_change_file):
log.info(f"Reading data from {freq_change_file}")
return read_freq_changes_table(freq_change_file)
@lru_cache(maxsize=64)
def load_clock_offset_table(clock_offset_file, shift_non_malindi=False):
return read_clock_offset_table(clock_offset_file,
shift_non_malindi=shift_non_malindi)
class ClockCorrection():
def __init__(self, temperature_file, mjdstart=None, mjdstop=None,
temperature_dt=10, adjust_absolute_timing=False,
force_divisor=None, label="", additional_days=2,
clock_offset_file=None,
hdf_dump_file='dumped_data.hdf5',
freqchange_file=None,
spline_through_residuals=False):
# hdf_dump_file_adj = hdf_dump_file.replace('.hdf5', '') + '_adj.hdf5'
self.temperature_dt = temperature_dt
self.temperature_file = temperature_file
self.freqchange_file = freqchange_file
# Initial value. it will be changed in the next steps
self.mjdstart = mjdstart
self.mjdstop = mjdstop
self.read_temptable()
if mjdstart is None:
mjdstart = sec_to_mjd(self.temptable['met'].min())
else:
mjdstart = mjdstart - additional_days / 2
self.clock_offset_file = clock_offset_file
self.clock_offset_table = \
read_clock_offset_table(self.clock_offset_file,
shift_non_malindi=True)
if mjdstop is None:
last_met = max(self.temptable['met'].max(),
self.clock_offset_table['met'].max())
mjdstop = sec_to_mjd(last_met)
mjdstop = mjdstop + additional_days / 2
self.mjdstart = mjdstart
self.mjdstop = mjdstop
self.met_start = (self.mjdstart - NUSTAR_MJDREF) * 86400
self.met_stop = (self.mjdstop - NUSTAR_MJDREF) * 86400
if label is None or label == "":
label = f"{self.met_start}-{self.met_stop}"
self.force_divisor = force_divisor
self.adjust_absolute_timing = adjust_absolute_timing
self.hdf_dump_file = hdf_dump_file
self.plot_file = label + "_clock_adjustment.png"
self.clock_jump_times = \
|
np.array([78708320, 79657575, 81043985, 82055671, 293346772])
|
numpy.array
|
'''
###############################################################################
"MajoranaNanowire" Python3 Module
v 1.0 (2020)
Created by <NAME> (2018)
###############################################################################
"H_class/Kane/builders" submodule
This sub-package builds 8-band k.p Hamiltonians for infinite nanowires.
###############################################################################
'''
#%%############################################################################
######################## Required Packages ############################
###############################################################################
import numpy as np
import scipy.sparse
import scipy.sparse.linalg
import scipy.linalg
import scipy.constants as cons
from MajoranaNanowires.Functions import diagonal, concatenate
#%%
def Kane_2D_builder(N,dis,mu,B=0,
params={},crystal='zincblende',
mesh=0,
sparse='yes'):
"""
2D 8-band k.p Hamiltonian builder. It obtaines the Hamiltoninan for a 3D
wire which is infinite in one direction, decribed using 8-band k.p theory.
Parameters
----------
N: int or arr
Number of sites.
dis: int or arr
Distance (in nm) between sites.
mu: float or arr
Chemical potential. If it is an array, each element is the on-site
chemical potential.
B: float
Magnetic field along the wire's direction.
params: dic or str
Kane/Luttinger parameters of the k.p Hamiltonian. 'InAs', 'InSb',
'GaAs' and 'GaSb' selects the defult parameters for these materials.
crystal: {'zincblende','wurtzite','minimal'}
Crystal symmetry along the nanowire growth. 'minimal' is a minimal
model in which the intra-valence band coupling are ignored.
mesh: mesh
If the discretization is homogeneous, mesh=0. Otherwise, mesh
provides a mesh with the position of the sites in the mesh.
sparse: {"yes","no"}
Sparsety of the built Hamiltonian. "yes" builds a dok_sparse matrix,
while "no" builds a dense matrix.
Returns
-------
H: arr
Hamiltonian matrix.
"""
if (params=={} or params=='InAs') and crystal=='minimal':
gamma0, gamma1, gamma2, gamma3 = 1, 0,0,0
P, m_eff = 919.7, 1.0
EF, Ecv, Evv, Ep = 0, -417, -390, (cons.hbar**2/(2*m_eff*cons.m_e)/cons.e*1e3*(1e9)**2)**(-1)*P**2
elif (params=={} or params=='InSb') and crystal=='minimal':
gamma0, gamma1, gamma2, gamma3 = 1, 0,0,0
P, m_eff = 940.2, 1.0
EF, Ecv, Evv, Ep = 0, -235, -810, (cons.hbar**2/(2*m_eff*cons.m_e)/cons.e*1e3*(1e9)**2)**(-1)*P**2
elif (params=={} or params=='InAs') and (crystal=='zincblende'):
gamma0, gamma1, gamma2, gamma3 = 1, 20.4, 8.3, 9.1
P, m_eff = 919.7, 1.0
EF, Ecv, Evv, Ep = 0, -417, -390, (cons.hbar**2/(2*m_eff*cons.m_e)/cons.e*1e3*(1e9)**2)**(-1)*P**2
gamma1, gamma2, gamma3 = gamma1-np.abs(Ep/(3*Ecv)), gamma2-np.abs(Ep/(6*Ecv)), gamma3-np.abs(Ep/(6*Ecv))
elif (params=={} or params=='InSb') and (crystal=='zincblende'):
gamma0, gamma1, gamma2, gamma3 = 1, 34.8, 15.5, 16.5
P, m_eff = 940.2, 1.0
EF, Ecv, Evv, Ep = 0, -235, -810, (cons.hbar**2/(2*m_eff*cons.m_e)/cons.e*1e3*(1e9)**2)**(-1)*P**2
gamma1, gamma2, gamma3 = gamma1-np.abs(Ep/(3*Ecv)), gamma2-np.abs(Ep/(6*Ecv)), gamma3-np.abs(Ep/(6*Ecv))
elif (params=={} or params=='GaAs') and (crystal=='zincblende'):
gamma0, gamma1, gamma2, gamma3 = 1, 6.98, 2.06, 2.93
P, m_eff = 1097.45, 1.0
EF, Ecv, Evv, Ep = 0, -1519, -341, (cons.hbar**2/(2*m_eff*cons.m_e)/cons.e*1e3*(1e9)**2)**(-1)*P**2
Ep=3/(0.063)/(3/np.abs(Ecv)+1/np.abs(Ecv+Evv))
gamma1, gamma2, gamma3 = gamma1-np.abs(Ep/(3*Ecv)), gamma2-np.abs(Ep/(6*Ecv)), gamma3-np.abs(Ep/(6*Ecv))
elif (params=={} or params=='GaSb') and (crystal=='zincblende'):
gamma0, gamma1, gamma2, gamma3 = 1, 13.4, 4.7, 6.0
P, m_eff = 971.3, 1.0
EF, Ecv, Evv, Ep = 0, -812, -760, (cons.hbar**2/(2*m_eff*cons.m_e)/cons.e*1e3*(1e9)**2)**(-1)*P**2
gamma1, gamma2, gamma3 = gamma1-np.abs(Ep/(3*Ecv)), gamma2-np.abs(Ep/(6*Ecv)), gamma3-np.abs(Ep/(6*Ecv))
elif (params=={} or params=='InAs') and (crystal=='wurtzite'):
m_eff = 1.0
D1,D2,D3,D4=100.3,102.3,104.1,38.8
A1,A2,A3,A4,A5,A6,A7=-1.5726,-1.6521,-2.6301,0.5126,0.1172,1.3103,-49.04
B1,B2,B3=-2.3925,2.3155,-1.7231
e1,e2=-3.2005,0.6363
P1,P2=838.6,689.87
alpha1,alpha2,alpha3=-1.89,-28.92,-51.17
beta1,beta2=-6.95,-21.71
gamma1,Ec, Ev=53.06,0,-664.9
elif crystal=='minimal' or crystal=='zincblende':
gamma0, gamma1, gamma2, gamma3 = params['gamma0'], params['gamma1'], params['gamma2'], params['gamma3']
P, m_eff = params['P'], params['m_eff']
EF, Ecv, Evv = params['EF'], params['Ecv'], params['Evv']
if crystal=='zincblende':
Ep=(cons.hbar**2/(2*m_eff*cons.m_e)/cons.e*1e3*(1e9)**2)**(-1)*P**2
gamma1, gamma2, gamma3 = gamma1-np.abs(Ep/(3*Ecv)), gamma2-np.abs(Ep/(6*Ecv)), gamma3-np.abs(Ep/(6*Ecv))
## Make sure that the onsite parameters are arrays:
Nx, Ny = N[0], N[1]
if np.ndim(dis)==0:
dis_x, dis_y = dis, dis
else:
dis_x, dis_y = dis[0], dis[1]
if np.isscalar(mesh):
xi_x, xi_y = np.ones(N), np.ones(N)
elif len(mesh)==2:
xi_x, xi_y = dis_x/mesh[0]*np.ones(N), dis_y/mesh[1]*np.ones(N)
else:
xi_x, xi_y = dis_x/mesh[0], dis_y/mesh[1]
if np.isscalar(mu):
mu = mu * np.ones((Nx,Ny))
#Number of bands and sites
m_b = 8 * Nx * Ny
m_s = Nx * Ny
#Obtain the eigenenergies:
tx=cons.hbar**2/(2*m_eff*cons.m_e*(dis_x*1e-9)**2)/cons.e*1e3*(xi_x[1::,:]+xi_x[:-1,:])/2
ty=cons.hbar**2/(2*m_eff*cons.m_e*(dis_y*1e-9)**2)/cons.e*1e3*(xi_y[:,1::]+xi_y[:,:-1])/2
txy=cons.hbar**2/(2*m_eff*cons.m_e*(dis_x*1e-9)*(dis_y*1e-9))/cons.e*1e3*np.append(np.zeros((1,Ny)),xi_x[1::,:]+xi_x[:-1,:],axis=0)/2*np.append(np.zeros((Nx,1)),xi_y[:,1::]+xi_y[:,:-1],axis=1)/2
txy=txy[1::,1::]
ax=(xi_x[1::,:]+xi_x[:-1,:])/2/(2*dis_x)
ay=(xi_y[:,1::]+xi_y[:,:-1])/2/(2*dis_y)
e = np.append(2*tx[0,:].reshape(1,Ny),np.append(tx[1::,:]+tx[:-1,:],2*tx[-1,:].reshape(1,Ny),axis=0),axis=0)
em = e - np.append(2*ty[:,0].reshape(Nx,1),np.append(ty[:,1::]+ty[:,:-1],2*ty[:,-1].reshape(Nx,1),axis=1),axis=1)
e += np.append(2*ty[:,0].reshape(Nx,1),np.append(ty[:,1::]+ty[:,:-1],2*ty[:,-1].reshape(Nx,1),axis=1),axis=1)
ty=np.insert(ty,np.arange(Ny-1,(Ny-1)*Nx,(Ny-1)),np.zeros(Nx-1))
ay=np.insert(ay,np.arange(Ny-1,(Ny-1)*Nx,(Ny-1)),np.zeros(Nx-1))
txy=np.insert(txy,np.arange(Ny-1,(Ny-1)*Nx,(Ny-1)),np.zeros(Nx-1))
e, em, mu, tx, ty = e.flatten(), em.flatten(), mu.flatten(), tx.flatten(), ty.flatten()
ax,ay=ax.flatten(),ay.flatten()
if not(B==0):
x, y = np.zeros(N), np.zeros(N)
if np.isscalar(mesh) and mesh==0:
mesh=np.ones((2,Nx,Ny))*dis[0]
for i in range(Nx):
for j in range(Ny):
x[i,j]=np.sum(mesh[0,0:i+1,j])-(Nx-1)*dis_x/2
y[i,j]=np.sum(mesh[1,i,0:j+1])-(Ny-1)*dis_y/2
for i in range(int((Nx-1)/2)):
x[Nx-i-1,:]=-x[i,:]
x[int((Nx-1)/2),:]=0
x=x/np.abs(x[0,0])*(Nx-1)*dis_x/2
for j in range(int((Ny-1)/2)):
y[:,Ny-j-1]=-y[:,j]
y[:,int((Ny-1)/2)]=0
y=y/np.abs(y[0,0])*(Ny-1)*dis_y/2
fact_B=cons.e/cons.hbar*1e-18
Mx, My = -fact_B*y/2*B, fact_B*x/2*B
Mx_kx, My_ky = (xi_x[1::,:]*Mx[1::,:]+xi_x[:-1,:]*Mx[:-1,:])/2/(2*dis_x), (xi_y[:,1::]*My[:,1::]+xi_y[:,:-1]*My[:,:-1])/2/(2*dis_y)
My_ky=np.insert(My_ky,np.arange(Ny-1,(Ny-1)*Nx,(Ny-1)),np.zeros(Nx-1))
Mm_kx, Mm_ky = (xi_x[1::,:]*(Mx[1::,:]-1j*My[1::,:])+xi_x[:-1,:]*(Mx[:-1,:]-1j*My[:-1,:]))/2/(2*dis_x), -(xi_y[:,1::]*(Mx[:,1::]+1j*My[:,1::])+xi_y[:,:-1]*(Mx[:,:-1]+1j*My[:,:-1]))/2/(2*dis_y)
Mm_ky=np.insert(Mm_ky,np.arange(Ny-1,(Ny-1)*Nx,(Ny-1)),np.zeros(Nx-1))
Mx, My = Mx.flatten(), My.flatten()
Mx_kx, My_ky = Mx_kx.flatten(), My_ky.flatten()
Mm_kx, Mm_ky = Mm_kx.flatten(), Mm_ky.flatten()
## Built the Hamiltonian:
if crystal=='zincblende':
T=(concatenate((e,-tx,-tx,-ty,-ty)),
concatenate((diagonal(m_s),diagonal(m_s,k=Ny),diagonal(m_s,k=-Ny),diagonal(m_s,k=1),diagonal(m_s,k=-1))))
G1=(concatenate((P/np.sqrt(6)*ay,-P/np.sqrt(6)*ay,-1j*P/np.sqrt(6)*ax,1j*P/np.sqrt(6)*ax)),
concatenate((diagonal(m_s,k=1),diagonal(m_s,k=-1),diagonal(m_s,k=Ny),diagonal(m_s,k=-Ny))))
O1=(concatenate(((-1/np.sqrt(3)*(gamma2+2*gamma3))*em,-tx*(-1/np.sqrt(3)*(gamma2+2*gamma3)),-tx*(-1/np.sqrt(3)*(gamma2+2*gamma3)),
(ty*(-1/np.sqrt(3)*(gamma2+2*gamma3))),ty*(-1/np.sqrt(3)*(gamma2+2*gamma3)),-1j*txy[0:-1]/2*(-1/np.sqrt(3)*(gamma2+2*gamma3)),
(1j*txy/2*(-1/np.sqrt(3)*(gamma2+2*gamma3))),1j*txy/2*(-1/np.sqrt(3)*(gamma2+2*gamma3)),-1j*txy[0:-1]/2*(-1/np.sqrt(3)*(gamma2+2*gamma3)))),
concatenate((diagonal(m_s),diagonal(m_s,k=Ny),diagonal(m_s,k=-Ny),diagonal(m_s,k=1),diagonal(m_s,k=-1),diagonal(m_s,k=Ny+1),diagonal(m_s,k=Ny-1,init=1),diagonal(m_s,k=-Ny+1,init=1),diagonal(m_s,k=-Ny-1))))
if not(B==0):
B_m=((Mx-1j*My),(diagonal(m_s)))
B_s=(((Mx**2+My**2)*cons.hbar**2/(2*m_eff*cons.m_e*1e-18)/cons.e*1e3),(diagonal(m_s)))
B_k=(concatenate((-2*1j*My_ky*cons.hbar**2/(2*m_eff*cons.m_e*1e-18)/cons.e*1e3,
2*1j*My_ky*cons.hbar**2/(2*m_eff*cons.m_e*1e-18)/cons.e*1e3,
-2*1j*Mx_kx*cons.hbar**2/(2*m_eff*cons.m_e*1e-18)/cons.e*1e3,
2*1j*Mx_kx*cons.hbar**2/(2*m_eff*cons.m_e*1e-18)/cons.e*1e3)),concatenate((diagonal(m_s,k=1),diagonal(m_s,k=-1),diagonal(m_s,k=Ny),diagonal(m_s,k=-Ny))))
B_s_m=(((Mx**2-My**2-2*1j*Mx*My)*cons.hbar**2/(2*m_eff*cons.m_e*1e-18)/cons.e*1e3),(diagonal(m_s)))
B_k_m=(concatenate((2*Mm_ky*cons.hbar**2/(2*m_eff*cons.m_e*1e-18)/cons.e*1e3,
-2*Mm_ky*cons.hbar**2/(2*m_eff*cons.m_e*1e-18)/cons.e*1e3,
-2*1j*Mm_kx*cons.hbar**2/(2*m_eff*cons.m_e*1e-18)/cons.e*1e3,
2*1j*Mm_kx*cons.hbar**2/(2*m_eff*cons.m_e*1e-18)/cons.e*1e3)),concatenate((diagonal(m_s,k=1),diagonal(m_s,k=-1),diagonal(m_s,k=Ny),diagonal(m_s,k=-Ny))))
### Upper diagonal:
## row 0:
# (0,2)
args=G1[0]
index=(G1[1][0]+0,G1[1][1]+2*m_s)
# (0,4)
args=np.append(args,np.conj(G1[0])*np.sqrt(3))
index=(np.append(index[0],G1[1][1]+0),np.append(index[1],G1[1][0]+4*m_s))
# (0,7)
args=np.append(args,G1[0]*np.sqrt(2))
index=(np.append(index[0],G1[1][0]+0),np.append(index[1],G1[1][1]+7*m_s))
## row 1:
# (1,3)
args=np.append(args,-G1[0]*np.sqrt(3))
index=(np.append(index[0],G1[1][0]+m_s), np.append(index[1],G1[1][1]+3*m_s))
# (1,5)
args=np.append(args,-np.conj(G1[0]))
index=(np.append(index[0],G1[1][1]+m_s),np.append(index[1],G1[1][0]+5*m_s))
# (1,6)
args=np.append(args,np.sqrt(2)*np.conj(G1[0]))
index=(np.append(index[0],G1[1][1]+m_s), np.append(index[1],G1[1][0]+6*m_s))
## row 2:
# (2,4)
args=np.append(args,O1[0])
index=(np.append(index[0],O1[1][0]+2*m_s),np.append(index[1],O1[1][1]+4*m_s))
# (2,7)
args=np.append(args,-np.sqrt(2)*T[0]*gamma3)
index=(np.append(index[0],T[1][0]+2*m_s),np.append(index[1],T[1][1]+7*m_s))
## row 3:
# (3,5)
args=np.append(args,O1[0])
index=(np.append(index[0],O1[1][0]+3*m_s),np.append(index[1],O1[1][1]+5*m_s))
# (3,6)
args=np.append(args,-np.sqrt(2)*np.conj(O1[0]))
index=(np.append(index[0],O1[1][1]+3*m_s),np.append(index[1],O1[1][0]+6*m_s))
## row 4:
# (4,7)
args=np.append(args,np.sqrt(2)*np.conj(O1[0]))
index=(np.append(index[0],O1[1][1]+4*m_s),np.append(index[1],O1[1][0]+7*m_s))
## row 5:
# (5,6)
args=np.append(args,np.sqrt(2)*T[0]*gamma3)
index=(np.append(index[0],T[1][0]+5*m_s),np.append(index[1],T[1][1]+6*m_s))
# # If there is magentic field:
if not(B==0):
## row 0:
# (0,2)
args=np.append(args,P/np.sqrt(6)*np.conj(B_m[0]))
index=(np.append(index[0],B_m[1][1]+0),np.append(index[1],B_m[1][0]+2*m_s))
# (0,4)
args=np.append(args,P/np.sqrt(2)*B_m[0])
index=(np.append(index[0],B_m[1][0]+0),np.append(index[1],B_m[1][1]+4*m_s))
# (0,7)
args=np.append(args,P/np.sqrt(3)*np.conj(B_m[0]))
index=(np.append(index[0],B_m[1][1]+0),np.append(index[1],B_m[1][0]+7*m_s))
## row 1:
# (1,3)
args=np.append(args,-P/np.sqrt(2)*np.conj(B_m[0]))
index=(np.append(index[0],B_m[1][1]+m_s),np.append(index[1],B_m[1][0]+3*m_s))
# (1,5)
args=np.append(args,-P/np.sqrt(6)*B_m[0])
index=(np.append(index[0],B_m[1][0]+m_s),np.append(index[1],B_m[1][1]+5*m_s))
# (1,6)
args=np.append(args,P/np.sqrt(3)*B_m[0])
index=(np.append(index[0],B_m[1][0]+m_s),np.append(index[1],B_m[1][1]+6*m_s))
## row 2:
# (2,7)
args=np.append(args,-np.sqrt(2)*gamma3*B_s[0])
index=(np.append(index[0],B_s[1][0]+2*m_s),np.append(index[1],B_s[1][1]+7*m_s))
args=np.append(args,-np.sqrt(2)*gamma3*B_k[0])
index=(np.append(index[0],B_k[1][0]+2*m_s),np.append(index[1],B_k[1][1]+7*m_s))
# (2,4)
args=np.append(args,-1/np.sqrt(3)*(gamma2+2*gamma3)*B_s_m[0])
index=(np.append(index[0],B_s_m[1][0]+2*m_s),np.append(index[1],B_s_m[1][1]+4*m_s))
args=np.append(args,-1/np.sqrt(3)*(gamma2+2*gamma3)*B_k_m[0])
index=(np.append(index[0],B_k_m[1][0]+2*m_s),np.append(index[1],B_k_m[1][1]+4*m_s))
## row 3:
# (3,5)
args=np.append(args,-1/np.sqrt(3)*(gamma2+2*gamma3)*B_s_m[0])
index=(np.append(index[0],B_s_m[1][0]+3*m_s),np.append(index[1],B_s_m[1][1]+5*m_s))
args=np.append(args,-1/np.sqrt(3)*(gamma2+2*gamma3)*B_k_m[0])
index=(np.append(index[0],B_k_m[1][0]+3*m_s),np.append(index[1],B_k_m[1][1]+5*m_s))
# (3,6)
args=np.append(args,np.sqrt(2/3)*(gamma2+2*gamma3)*np.conj(B_s_m[0]))
index=(np.append(index[0],B_s_m[1][1]+3*m_s),np.append(index[1],B_s_m[1][0]+6*m_s))
args=np.append(args,np.sqrt(2/3)*(gamma2+2*gamma3)*np.conj(B_k_m[0]))
index=(np.append(index[0],B_k_m[1][1]+3*m_s),np.append(index[1],B_k_m[1][0]+6*m_s))
## row 4:
# (4,7)
args=np.append(args,-np.sqrt(2/3)*(gamma2+2*gamma3)*np.conj(B_s_m[0]))
index=(np.append(index[0],B_s_m[1][1]+4*m_s),np.append(index[1],B_s_m[1][0]+7*m_s))
args=np.append(args,-np.sqrt(2/3)*(gamma2+2*gamma3)*np.conj(B_k_m[0]))
index=(np.append(index[0],B_k_m[1][1]+4*m_s),np.append(index[1],B_k_m[1][0]+7*m_s))
## row 5:
# (5,6)
args=np.append(args,np.sqrt(2)*gamma3*B_s[0])
index=(np.append(index[0],B_s[1][0]+5*m_s),np.append(index[1],B_s[1][1]+6*m_s))
args=np.append(args,np.sqrt(2)*gamma3*B_k[0])
index=(np.append(index[0],B_k[1][0]+5*m_s),np.append(index[1],B_k[1][1]+6*m_s))
### Lower diagonal:
args=np.append(args,np.conj(args))
index=(np.append(index[0],index[1]),np.append(index[1],index[0]))
### Diagonal:
# (0,0)
args=np.append(args,T[0])
index=(np.append(index[0],T[1][0]+0),np.append(index[1],T[1][1]+0))
# (1,1)
args=np.append(args,T[0])
index=(np.append(index[0],T[1][0]+m_s),np.append(index[1],T[1][1]+m_s))
# (2,2)
args=np.append(args,(gamma3-gamma1)*T[0])
index=(np.append(index[0],T[1][0]+2*m_s),np.append(index[1],T[1][1]+2*m_s))
# (3,3)
args=np.append(args,-(gamma3+gamma1)*T[0])
index=(np.append(index[0],T[1][0]+3*m_s),np.append(index[1],T[1][1]+3*m_s))
# (4,4)
args=np.append(args,-(gamma3+gamma1)*T[0])
index=(np.append(index[0],T[1][0]+4*m_s),np.append(index[1],T[1][1]+4*m_s))
# (5,5)
args=np.append(args,(gamma3-gamma1)*T[0])
index=(np.append(index[0],T[1][0]+5*m_s),np.append(index[1],T[1][1]+5*m_s))
# (6,6)
args=np.append(args,-gamma1*T[0])
index=(np.append(index[0],T[1][0]+6*m_s),np.append(index[1],T[1][1]+6*m_s))
# (7,7)
args=np.append(args,-gamma1*T[0])
index=(np.append(index[0],T[1][0]+7*m_s),np.append(index[1],T[1][1]+7*m_s))
if not(B==0):
# (0,0)
args=np.append(args,B_s[0])
index=(np.append(index[0],B_s[1][0]+0),np.append(index[1],B_s[1][1]+0))
args=np.append(args,B_k[0])
index=(np.append(index[0],B_k[1][0]+0),np.append(index[1],B_k[1][1]+0))
# (1,1)
args=np.append(args,B_s[0])
index=(np.append(index[0],B_s[1][0]+m_s),np.append(index[1],B_s[1][1]+m_s))
args=np.append(args,B_k[0])
index=(np.append(index[0],B_k[1][0]+m_s),np.append(index[1],B_k[1][1]+m_s))
# (2,2)
args=np.append(args,(gamma3-gamma1)*B_s[0])
index=(np.append(index[0],B_s[1][0]+2*m_s),np.append(index[1],B_s[1][1]+2*m_s))
args=np.append(args,(gamma3-gamma1)*B_k[0])
index=(np.append(index[0],B_k[1][0]+2*m_s),np.append(index[1],B_k[1][1]+2*m_s))
# (3,3)
args=np.append(args,-(gamma3+gamma1)*B_s[0])
index=(np.append(index[0],B_s[1][0]+3*m_s),np.append(index[1],B_s[1][1]+3*m_s))
args=np.append(args,-(gamma3-gamma1)*B_k[0])
index=(np.append(index[0],B_k[1][0]+3*m_s),np.append(index[1],B_k[1][1]+3*m_s))
# (4,4)
args=np.append(args,-(gamma3+gamma1)*B_s[0])
index=(np.append(index[0],B_s[1][0]+4*m_s),np.append(index[1],B_s[1][1]+4*m_s))
args=np.append(args,-(gamma3-gamma1)*B_k[0])
index=(np.append(index[0],B_k[1][0]+4*m_s),np.append(index[1],B_k[1][1]+4*m_s))
# (5,5)
args=np.append(args,(gamma3-gamma1)*B_s[0])
index=(np.append(index[0],B_s[1][0]+5*m_s),np.append(index[1],B_s[1][1]+5*m_s))
args=np.append(args,(gamma3-gamma1)*B_k[0])
index=(np.append(index[0],B_k[1][0]+5*m_s),np.append(index[1],B_k[1][1]+5*m_s))
# (6,6)
args=np.append(args,-gamma1*B_s[0])
index=(np.append(index[0],B_s[1][0]+6*m_s),np.append(index[1],B_s[1][1]+6*m_s))
args=np.append(args,-gamma1*B_k[0])
index=(np.append(index[0],B_k[1][0]+6*m_s),np.append(index[1],B_k[1][1]+6*m_s))
# (7,7)
args=np.append(args,-gamma1*B_s[0])
index=(np.append(index[0],B_s[1][0]+7*m_s),np.append(index[1],B_s[1][1]+7*m_s))
args=np.append(args,-gamma1*B_k[0])
index=(np.append(index[0],B_k[1][0]+7*m_s),np.append(index[1],B_k[1][1]+7*m_s))
### Built matrix:
H=scipy.sparse.csc_matrix((args,index),shape=(m_b,m_b))
if sparse=='no':
H=H.todense()
### Add potential and band edges:
H[diagonal(m_b)]+=-np.tile(mu,8) + concatenate((EF*np.ones(2*m_s),Ecv*np.ones(4*m_s),(Ecv+Evv)*np.ones(2*m_s)))
elif crystal=='wurtzite':
Kc=(concatenate((e,-tx,-tx,-ty,-ty)),
concatenate((diagonal(m_s),diagonal(m_s,k=Ny),diagonal(m_s,k=-Ny),diagonal(m_s,k=1),diagonal(m_s,k=-1))))
Kp=(concatenate((ay,-ay,-1j*ax,1j*ax)),
concatenate((diagonal(m_s,k=1),diagonal(m_s,k=-1),diagonal(m_s,k=Ny),diagonal(m_s,k=-Ny))))
Kpc=(concatenate((em,-tx,-tx,ty,ty,-1j*txy[0:-1]/2,1j*txy/2,1j*txy/2,-1j*txy[0:-1]/2)),
concatenate((diagonal(m_s),diagonal(m_s,k=Ny),diagonal(m_s,k=-Ny),diagonal(m_s,k=1),diagonal(m_s,k=-1),diagonal(m_s,k=Ny+1),diagonal(m_s,k=Ny-1,init=1),diagonal(m_s,k=-Ny+1,init=1),diagonal(m_s,k=-Ny-1))))
### Upper diagonal:
## row 0:
# (0,1)
args=-A5*np.conj(Kpc[0])
index=(Kpc[1][1]+0,Kpc[1][0]+m_s)
# (0,2)
args=np.append(args,1j*(A7-alpha1/np.sqrt(2))*np.conj(Kp[0]))
index=(
|
np.append(index[0],Kp[1][1]+0)
|
numpy.append
|
import numpy as np
import torch
import os
from random import shuffle
import datatypes as dt
from data_utils import save_data,save_all
from cardlib import encode,decode,winner,hand_rank,rank
from card_utils import to_2d,suits_to_str,convert_numpy_to_rust,convert_numpy_to_2d,to_52_vector,swap_suits
from create_hands import straight_flushes,quads,full_houses,flushes,straights,trips,two_pairs,one_pairs,high_cards,hero_5_cards,sort_hand
class CardDataset(object):
def __init__(self,params):
self.deck = np.arange(52)
self.suit_types = np.arange(dt.SUITS.LOW,dt.SUITS.HIGH)
self.rank_types = np.arange(dt.RANKS.LOW,dt.RANKS.HIGH)
self.alphabet_suits = ['s','h','d','c']
self.suit_dict = {suit:alpha for suit,alpha in zip(self.suit_types,self.alphabet_suits)}
self.fivecard_indicies = np.arange(5)
self.params = params
def generate_dataset(self,params):
"""
Hands in test set may or may not match hands in training set.
Builds regression datasets with a target of win,loss,tie [1,-1,0]
"""
if params['datatype'] == dt.DataTypes.THIRTEENCARD:
trainX,trainY = self.build_13card(params[dt.Globals.INPUT_SET_DICT['train']],params['encoding'])
valX,valY = self.build_13card(params[dt.Globals.INPUT_SET_DICT['val']],params['encoding'])
elif params['datatype'] == dt.DataTypes.TENCARD:
trainX,trainY = self.build_10card(params[dt.Globals.INPUT_SET_DICT['train']],params['encoding'])
valX,valY = self.build_10card(params[dt.Globals.INPUT_SET_DICT['val']],params['encoding'])
elif params['datatype'] == dt.DataTypes.PARTIAL:
trainX,trainY = self.build_partial(params[dt.Globals.INPUT_SET_DICT['train']])
valX,valY = self.build_partial(params[dt.Globals.INPUT_SET_DICT['val']])
else:
raise ValueError(f"Datatype {params['datatype']} not understood")
trainX,trainY,valX,valY = CardDataset.to_torch([trainX,trainY,valX,valY])
print(f'trainX: {trainX.shape}, trainY {trainY.shape}, valX {valX.shape}, valY {valY.shape}')
return trainX,trainY,valX,valY
def build_13card(self,iterations,encoding):
"""
Generates X = (i,13,2) y = [-1,0,1]
"""
X,y = [],[]
for i in range(iterations):
cards = np.random.choice(self.deck,13,replace=False)
rust_cards = convert_numpy_to_rust(cards)
encoded_cards = [encode(c) for c in rust_cards]
hand1 = encoded_cards[:4]
hand2 = encoded_cards[4:8]
board = encoded_cards[8:]
result = winner(hand1,hand2,board)
if encoding == '2d':
cards2d = convert_numpy_to_2d(cards)
X.append(cards2d)
else:
X.append(cards)
y.append(result)
X = np.stack(X)
y = np.stack(y)[:,None]
return X,y
def build_10card(self,iterations,encoding):
"""
Generates X = (i,10,2) y = [-1,0,1]
"""
X,y = [],[]
for i in range(iterations):
category = np.random.choice(np.arange(9))
hand1 = self.create_handtypes(category)
hand2 = self.create_handtypes(category)
encoded_hand1 = [encode(c) for c in hand1]
encoded_hand2 = [encode(c) for c in hand2]
hand1_rank = rank(encoded_hand1)
hand2_rank = rank(encoded_hand2)
if hand1_rank > hand2_rank:
result = 1
elif hand1_rank < hand2_rank:
result = -1
else:
result = 0
X.append(np.vstack((hand1,hand2)))
y.append(result)
X = np.stack(X)
y = np.stack(y)[:,None]
return X,y
def build_hand_classes(self,params):
"""
Builds categorical targets of hand class.
|Hand Value|Unique|Distinct|
|Straight Flush |40 |10|
|Four of a Kind |624 |156|
|Full Houses |3744 |156|
|Flush |5108 |1277|
|Straight |10200 |10|
|Three of a Kind|54912 |858|
|Two Pair |123552 |858|
|One Pair |1098240 |2860|
|High Card |1302540 |1277|
|TOTAL |2598960 |7462|
"""
for dataset in ['train','val']:
save_path = os.path.join(params['save_dir'],dataset)
xpath = f"{os.path.join(save_path,dataset)}X"
ypath = f"{os.path.join(save_path,dataset)}Y"
X = []
y = []
num_hands = params[dt.Globals.INPUT_SET_DICT[dataset]] // 9
if params['datatype'] == dt.DataTypes.NINECARD:
for category in dt.Globals.HAND_TYPE_DICT.keys():
print('category',category)
for _ in range(num_hands):
hand,board = self.create_ninecard_handtypes(category)
shuffled_hand,shuffled_board = CardDataset.shuffle_hand_board(hand,board)
x_input = np.concatenate([shuffled_hand,shuffled_board],axis=0)
X.append(x_input)
y.append(category)
elif params['datatype'] == dt.DataTypes.FIVECARD:
for category in dt.Globals.HAND_TYPE_DICT.keys():
print('category',category)
for _ in range(num_hands):
X.append(self.create_handtypes(category))
y.append(category)
else:
raise ValueError(f"{params['datatype']} datatype not understood")
X = np.stack(X)
y = np.stack(y)
save_data(X,xpath)
save_data(y,ypath)
def create_ninecard_handtypes(self,category):
"""
Grab 5 cards representing that handtype, then split into hand/board and add cards,
if handtype hasn't changed store hand.
"""
initial_cards = self.create_handtypes(category)
flat_card_vector = to_52_vector(initial_cards)
remaining_deck = list(set(self.deck) - set(flat_card_vector))
extra_cards_52 = np.random.choice(remaining_deck,4,replace=False)
extra_cards_2d = to_2d(extra_cards_52)
hand = np.concatenate([initial_cards[:2],extra_cards_2d[:2]],axis=0)
board = np.concatenate([initial_cards[2:],extra_cards_2d[2:]],axis=0)
assert(False not in [(s[1] > dt.SUITS.LOW-1 and s[1] < dt.SUITS.HIGH) == True for s in hand]),f'hand outside range {hand}'
assert(False not in [(s[1] > dt.SUITS.LOW-1 and s[1] < dt.SUITS.HIGH) == True for s in board]),f'board outside range {board}'
en_hand = [encode(c) for c in hand]
en_board = [encode(c) for c in board]
hand_strength = hand_rank(en_hand,en_board)
hand_type = CardDataset.find_strength(hand_strength)
while hand_type != category:
extra_cards_52 = np.random.choice(remaining_deck,4,replace=False)
extra_cards_2d = to_2d(extra_cards_52)
hand = np.concatenate([initial_cards[:2],extra_cards_2d[:2]],axis=0)
board = np.concatenate([initial_cards[2:],extra_cards_2d[2:]],axis=0)
assert(False not in [(s[1] > dt.SUITS.LOW-1 and s[1] < dt.SUITS.HIGH) == True for s in hand]),f'hand outside range {hand}'
assert(False not in [(s[1] > dt.SUITS.LOW-1 and s[1] < dt.SUITS.HIGH) == True for s in board]),f'board outside range {board}'
en_hand = [encode(c) for c in hand]
en_board = [encode(c) for c in board]
hand_strength = hand_rank(en_hand,en_board)
hand_type = CardDataset.find_strength(hand_strength)
return hand,board,hand_strength
def build_blockers(self,iterations):
"""
board always flush. Hand either has A blocker or A no blocker. Never has flush
"""
X = []
y = []
for _ in range(iterations):
ranks = np.arange(2,14)
board = np.random.choice(ranks,5,replace=False)
board_suits = np.full(5,np.random.choice(self.suit_types))
hand = np.random.choice(ranks,3,replace=False)
board_suit = set(board_suits)
other_suits = set(self.suit_types).difference(board_suit)
hand_suits = np.random.choice(list(other_suits),3)
ace = np.array([14])
ace_suit_choices = [list(board_suit)[0],list(other_suits)[0]]
ace_suit = np.random.choice(ace_suit_choices,1)
hand_ranks = np.hstack((ace,hand))
hand_suits = np.hstack((ace_suit,hand_suits))
hand = np.stack((hand_ranks,hand_suits),axis=-1)
board = np.stack((board,board_suits),axis=-1)
shuffled_hand,shuffled_board = CardDataset.shuffle_hand_board(hand,board)
result = 1 if ace_suit[0] == list(board_suit) else 0
X_input = np.concatenate([shuffled_hand,shuffled_board],axis=0)
X.append(X_input)
y.append(result)
X = np.stack(X)
y = np.stack(y)[:,None]
return X,y
def build_partial(self,iterations):
"""
inputs consistenting of hand + board during all streets
4+padding all the way to the full 9 cards. Always evaluated vs random hand.
inputs are sorted for data effeciency
target = {-1,0,1}
"""
X = []
y = []
for category in dt.Globals.HAND_TYPE_DICT.keys():
for _ in range(iterations // 9):
hero_hand,board,_ = self.create_ninecard_handtypes(category)
ninecards = np.concatenate([hero_hand,board],axis=0)
flat_card_vector = to_52_vector(ninecards)
available_cards = list(set(self.deck) - set(flat_card_vector))
flat_vil_hand = np.random.choice(available_cards,4,replace=False)
vil_hand = np.array(to_2d(flat_vil_hand))
en_hand = [encode(c) for c in hero_hand]
en_vil = [encode(c) for c in vil_hand]
en_board = [encode(c) for c in board]
result = winner(en_hand,en_vil,en_board)
# hand + board at all stages. Shuffle cards so its more difficult for network
np.random.shuffle(hero_hand)
pure_hand = np.concatenate([hero_hand,np.zeros((5,2))],axis=0)
np.random.shuffle(hero_hand)
hand_flop = np.concatenate([hero_hand,board[:3],np.zeros((2,2))],axis=0)
np.random.shuffle(hero_hand)
hand_turn = np.concatenate([hero_hand,board[:4],np.zeros((1,2))],axis=0)
X.append(pure_hand)
X.append(hand_flop)
X.append(hand_turn)
X.append(ninecards)
y.append(result)
y.append(result)
y.append(result)
y.append(result)
X = np.stack(X)
y = np.stack(y)[:,None]
return X,y
def build_hand_ranks_five(self,reduce_suits=True,valset=False):
"""
rank 5 card hands
input 5 cards
target = {0-7462}
"""
switcher = {
0: straight_flushes,
1: quads,
2: full_houses,
3: flushes,
4: straights,
5: trips,
6: two_pairs,
7: one_pairs,
8: high_cards
}
# if you want to make up for the samples
repeats = {
0:10,
1:4,
2:10,
3:4,
4:1,
5:1,
6:1,
7:1,
8:1
}
X = []
y = []
for category in dt.Globals.HAND_TYPE_DICT.keys():
if valset:
five_hands = switcher[category]()
for hand in five_hands:
sorted_hand = np.transpose(sort_hand(hand))
en_hand = [encode(c) for c in sorted_hand]
X.append(sorted_hand)
y.append(rank(en_hand))
else:
for _ in range(repeats[category]):
five_hands = switcher[category]()
for hand in five_hands:
hero_hands = hero_5_cards(hand)
for h in hero_hands:
en_hand = [encode(c) for c in h]
X.append(np.transpose(sort_hand(
|
np.transpose(h)
|
numpy.transpose
|
import datetime as dat
import numpy as np
import os
import pandas as pd
import scipy as sp
import scipy.stats as scat
import sys
print(os.getcwd())
c_dir = os.path.join(os.getcwd(), 'model_sources/migliore_etal_2018/MiglioreEtAl2018PLOSCompBiol2018')
res_dir = os.path.join(os.getcwd(), 'results/Tuning Calcium Model/RXD')
os.chdir(c_dir)
from mpl_toolkits.mplot3d import Axes3D
from itertools import cycle
from neuron import h
from os.path import join
from scipy import integrate as spint
from scipy import optimize as spopt
import matplotlib.pyplot as plt
import bokeh.io as bkio
import bokeh.layouts as blay
import bokeh.models as bmod
import bokeh.plotting as bplt
import various_scripts.frequency_analysis as fan
from bokeh.palettes import Category20 as palette
from bokeh.palettes import Category20b as paletteb
from selenium import webdriver
colrs = palette[20] + paletteb[20]
import plot_results as plt_res
#
# module_path = os.getcwd() # os.path.abspath(os.path.join('../..'))
# if module_path not in sys.path:
# sys.path.append(module_path)
import migliore_python as mig_py
mu = u'\u03BC'
delta = u'\u0394'
tau = u'\u03C4'
# chrome_path = r'/usr/local/bin/chromedriver'
# my_opts = webdriver.chrome.options.Options()
# my_opts.add_argument('start-maximized')
# my_opts.add_argument('disable-infobars')
# my_opts.add_argument('--disable-extensions')
# my_opts.add_argument('window-size=1200,1000')
# my_opts.add_argument('--headless')
def single_exponential(x_data, tau):
x_data = np.array(x_data)
return np.exp(-x_data / tau)
def single_exponential_rise(x_data, tau, asym, tshift):
return asym * (1.0 - np.exp(-(x_data - tshift) / tau))
def estimate_decay_constant(t_vals, f_vals, change_tol=0.001):
max_i = np.argmax(f_vals)
plus_i = range(max_i, f_vals.size)
pos_i = np.where(f_vals > 0)
targ_i = np.intersect1d(plus_i, pos_i)
min_val = 0.999 * np.min(f_vals[targ_i])
dec_t = t_vals[targ_i] - t_vals[max_i]
dec_norm = (f_vals[targ_i] - min_val) / (f_vals[max_i] - min_val)
opt_params, pcov = sp.optimize.curve_fit(single_exponential, dec_t, dec_norm)
return opt_params
def run_recharge_simulation(param_dict, sim_dur=180000):
t_path = join(os.getcwd(), 'morphologies/mpg141209_A_idA.asc')
t_steps = np.arange(0, sim_dur, 100)
cell = mig_py.MyCell(t_path, True, param_dict)
# Calreticulin Parameter Values
total_car = param_dict['car_total']
KD_car = param_dict['car_KD']
car_kon = param_dict['car_kon']
car_koff = KD_car * car_kon
carca_init = total_car * 0.001 / (KD_car + 0.01)
car_init = total_car - carca_init
ap_secs = [cell.apical[i] for i in range(10)]
t_secs = cell.somatic + ap_secs
h.CVode().active(True)
h.finitialize(-65.0)
for sec in t_secs:
cell.ca[cell.er].nodes(sec).concentration = 0.001
cell.carca.nodes(sec).concentration = carca_init
cell.car.nodes(sec).concentration = car_init
h.CVode().re_init()
# Create Numpy Arrays For Storing Data
ca_cyt_arr = np.zeros((t_steps.shape[0], 3))
ca_er_arr = np.zeros((t_steps.shape[0], 3))
carca_arr = np.zeros(t_steps.shape)
cbdhca_arr = np.zeros(t_steps.shape)
cbdlca_arr = np.zeros(t_steps.shape)
ogb1ca_arr = np.zeros(t_steps.shape)
# dyeca_arr = np.zeros(t_steps.shape)
#
# e1_arr = np.zeros(t_steps.shape)
# e1_2ca_arr = np.zeros(t_steps.shape)
# e1_2ca_p_arr = np.zeros(t_steps.shape)
# e2_2ca_p_arr = np.zeros(t_steps.shape)
# e2_p_arr = np.zeros(t_steps.shape)
# e2_arr = np.zeros(t_steps.shape)
#
# c1_arr = np.zeros(t_steps.shape)
# c2_arr = np.zeros(t_steps.shape)
# c3_arr = np.zeros(t_steps.shape)
# c4_arr = np.zeros(t_steps.shape)
# o5_arr = np.zeros(t_steps.shape)
# o6_arr = np.zeros(t_steps.shape)
for t_i, t_step in enumerate(t_steps):
h.continuerun(t_step)
ca_cyt_arr[t_i, 0] = cell.ca[cell.cyt].nodes(cell.somatic[0])(0.5)[0].concentration
ca_cyt_arr[t_i, 1] = cell.ca[cell.cyt].nodes(cell.apical[0])(0.5)[0].concentration
ca_cyt_arr[t_i, 2] = cell.ca[cell.cyt].nodes(cell.apical[9])(0.5)[0].concentration
ca_er_arr[t_i, 0] = cell.ca[cell.er].nodes(cell.somatic[0])(0.5)[0].concentration
ca_er_arr[t_i, 1] = cell.ca[cell.er].nodes(cell.apical[0])(0.5)[0].concentration
ca_er_arr[t_i, 2] = cell.ca[cell.er].nodes(cell.apical[9])(0.5)[0].concentration
cbdhca_arr[t_i] = cell.cbdhca[cell.cyt].nodes(cell.somatic[0])(0.5)[0].concentration
cbdlca_arr[t_i] = cell.cbdlca[cell.cyt].nodes(cell.somatic[0])(0.5)[0].concentration
ogb1ca_arr[t_i] = cell.dyeca[cell.cyt].nodes(cell.somatic[0])(0.5)[0].concentration
carca_arr[t_i] = cell.carca[cell.er].nodes(cell.somatic[0])(0.5)[0].concentration
# dyeca_arr[t_i] = dyeca.nodes(soma)(0.5)[0].concentration
#
# e1_arr[t_i] = e1_serca.nodes(soma)(0.5)[0].concentration
# e1_2ca_arr[t_i] = e1_2ca_serca.nodes(soma)(0.5)[0].concentration
# e1_2ca_p_arr[t_i] = e1_2ca_p_serca.nodes(soma)(0.5)[0].concentration
# e2_2ca_p_arr[t_i] = e2_2ca_p_serca.nodes(soma)(0.5)[0].concentration
# e2_p_arr[t_i] = e2_p_serca.nodes(soma)(0.5)[0].concentration
# e2_arr[t_i] = e2_serca.nodes(soma)(0.5)[0].concentration
#
# c1_arr[t_i] = c1_ip3r.nodes(soma)(0.5)[0].concentration
# c2_arr[t_i] = c2_ip3r.nodes(soma)(0.5)[0].concentration
# c3_arr[t_i] = c3_ip3r.nodes(soma)(0.5)[0].concentration
# c4_arr[t_i] = c4_ip3r.nodes(soma)(0.5)[0].concentration
# o5_arr[t_i] = o5_ip3r.nodes(soma)(0.5)[0].concentration
# o6_arr[t_i] = o6_ip3r.nodes(soma)(0.5)[0].concentration
print('Final SERCA States')
# e1 = cell.e1_serca.nodes(cell.somatic[0])(0.5)[0].concentration
# e1_2ca = cell.e1_2ca_serca.nodes(cell.somatic[0])(0.5)[0].concentration
# e1_2ca_p = cell.e1_2ca_p_serca.nodes(cell.somatic[0])(0.5)[0].concentration
# e2_2ca_p = cell.e2_2ca_p_serca.nodes(cell.somatic[0])(0.5)[0].concentration
# e2_p = cell.e2_p_serca.nodes(cell.somatic[0])(0.5)[0].concentration
# e2 = cell.e2_serca.nodes(cell.somatic[0])(0.5)[0].concentration
# total = e1 + e1_2ca + e1_2ca_p + e2_2ca_p + e2_p + e2
#
# print('e1: {}'.format(e1/total))
# print('e1-2ca: {}'.format(e1_2ca / total))
# print('e1-2ca-p: {}'.format(e1_2ca_p / total))
# print('e2-2ca-p: {}'.format(e2_2ca_p / total))
# print('e2-p: {}'.format(e2_p / total))
# print('e2: {}'.format(e2 / total))
result_d = {'t': t_steps,
'sec names': ['Soma', 'Proximal Apical Trunk', 'Distal Apical Trunk'],
'cyt ca': ca_cyt_arr,
'er ca': ca_er_arr,
'carca': carca_arr,
'cbdhca': cbdhca_arr,
'cbdlca': cbdlca_arr,
'ogb1ca': ogb1ca_arr,
}
return result_d
def plot_recharge(result_d):
t_steps = result_d['t'] / 1000.0
ret_figs = []
cyt_ca_fig = bplt.figure(title='Cytosol Calcium vs Time')
ret_figs.append(cyt_ca_fig)
cyt_ca_fig.xaxis.axis_label = 'time (seconds)'
cyt_ca_fig.yaxis.axis_label = 'concentration ({}M)'.format(mu)
er_ca_fig = bplt.figure(title='Endoplasmic Reticulum Calcium vs Time')
ret_figs.append(er_ca_fig)
er_ca_fig.xaxis.axis_label = 'time (seconds)'
er_ca_fig.yaxis.axis_label = 'concentration ({}M)'.format(mu)
carca_fig = bplt.figure(title='Bound Calreticulin vs Time')
ret_figs.append(carca_fig)
carca_fig.xaxis.axis_label = 'time (seconds)'
carca_fig.yaxis.axis_label = 'concentration ({}M)'.format(mu)
carca_fig.line(t_steps, 1000.0*result_d['carca'][:], line_width=3, color=colrs[0], legend='Somatic ER')
cbd_fig = bplt.figure(title='Bound Calbindin-D28k vs Time')
ret_figs.append(cbd_fig)
cbd_fig.xaxis.axis_label = 'time (seconds)'
cbd_fig.yaxis.axis_label = 'concentration ({}M)'.format(mu)
cbd_fig.line(t_steps, 1000.0 * result_d['cbdhca'][:], line_width=3, color=colrs[0], legend='High')
cbd_fig.line(t_steps, 1000.0 * result_d['cbdlca'][:], line_width=3, color=colrs[2], legend='Low')
ogb1_fig = bplt.figure(title='Bound OGB-1 vs Time')
ret_figs.append(ogb1_fig)
ogb1_fig.xaxis.axis_label = 'time (seconds)'
ogb1_fig.yaxis.axis_label = 'concentration ({}M)'.format(mu)
ogb1_fig.line(t_steps, 1000.0 * result_d['ogb1ca'][:], line_width=3, color=colrs[0], legend='High')
# for l_i, loc_name in enumerate(result_d['sec names']):
# if 'soma' in loc_name:
# cyt_ca_fig.line(t_steps, 1000.0*result_d['cyt ca'][:, l_i], line_width=4, color='black', legend='model result')
# er_ca_fig.line(t_steps, 1000.0*result_d['er ca'][:, l_i], line_width=4, color='black', legend='model result')
for l_i, loc_name in enumerate(result_d['sec names']):
cyt_ca_fig.line(t_steps, 1000.0*result_d['cyt ca'][:, l_i], line_width=4, color=colrs[l_i], legend=loc_name)
er_ca_fig.line(t_steps, 1000.0*result_d['er ca'][:, l_i], line_width=4, color=colrs[l_i], legend=loc_name)
cyt_ca_fig.legend.location = 'bottom_right'
return ret_figs
def run_current_injection(rxd_sim, param_dict={}, sim_dur=500, c_int=[50, 100], c_amp=1):
"""
:param cell_type: type of neuron cell to test\n
:param conf_obj: configuration dictionary which holds all of the parameters for the neuron cell to be tested\n
:param swc_path: location of swc_file, required for CA1PyramidalCell class\n
:param sim_dur: simulation duration (msec)\n
:param c_int: interval of time current injection pulse is active\n
:param c_amp: amplitude of current injection\n
:return: t_array, v_array, i_array: t_array\n
time array = numpy array of simulation time steps\n
v_array = numpy array potentials for soma[0](0.5) of cell\n
i_array = numpy array of values for the injected current\n
"""
t_path = join(os.getcwd(), 'morphologies/mpg141209_A_idA.asc')
cell = mig_py.MyCell(t_path, rxd_sim, param_dict)
t_curr = h.IClamp(cell.somatic[0](0.5))
t_curr.delay = c_int[0]
t_curr.amp = c_amp
t_curr.dur = c_int[1] - c_int[0]
# apical[9][0.5] distance to soma[0][1.0] = 105.53 um
# Record Values
i_rec = h.Vector().record(t_curr._ref_i)
t = h.Vector().record(h._ref_t)
soma_v = h.Vector().record(cell.somatic[0](0.5)._ref_v)
apic_v = h.Vector().record(cell.apical[9](0.5)._ref_v)
axon_v = h.Vector().record(cell.axonal[0](0.5)._ref_v)
s_ica_l = h.Vector().record(cell.somatic[0](0.5)._ref_ica_cal)
a_ica_l = h.Vector().record(cell.apical[9](0.5)._ref_ica_cal)
s_ica_n = h.Vector().record(cell.somatic[0](0.5)._ref_ica_can)
a_ica_n = h.Vector().record(cell.apical[9](0.5)._ref_ica_can)
s_ica_t = h.Vector().record(cell.somatic[0](0.5)._ref_ica_cat)
a_ica_t = h.Vector().record(cell.apical[9](0.5)._ref_ica_cat)
if not rxd_sim:
s_ca_cyt = h.Vector().record(cell.somatic[0](0.5)._ref_cai)
a_ca_cyt = h.Vector().record(cell.apical[9](0.5)._ref_cai)
else:
s_ca_cyt = h.Vector().record(cell.ca[cell.cyt].nodes(cell.somatic[0])[0]._ref_concentration)
a_ca_cyt = h.Vector().record(cell.ca[cell.cyt].nodes(cell.apical[9])[0]._ref_concentration)
ax_ca_cyt = h.Vector().record(cell.ca[cell.cyt].nodes(cell.axonal[0])[0]._ref_concentration)
s_ca_er = h.Vector().record(cell.ca[cell.er].nodes(cell.somatic[0])[0]._ref_concentration)
a_ca_er = h.Vector().record(cell.ca[cell.er].nodes(cell.apical[9])[0]._ref_concentration)
s_dyeca = h.Vector().record(cell.dyeca.nodes(cell.somatic[0])[0]._ref_concentration)
a_dyeca = h.Vector().record(cell.dyeca.nodes(cell.apical[9])[0]._ref_concentration)
s_cbdhca = h.Vector().record(cell.cbdhca.nodes(cell.somatic[0])[0]._ref_concentration)
a_cbdhca = h.Vector().record(cell.cbdhca.nodes(cell.apical[9])[0]._ref_concentration)
s_cbdlca = h.Vector().record(cell.cbdlca.nodes(cell.somatic[0])[0]._ref_concentration)
a_cbdlca = h.Vector().record(cell.cbdlca.nodes(cell.apical[9])[0]._ref_concentration)
s_carca = h.Vector().record(cell.carca.nodes(cell.somatic[0])[0]._ref_concentration)
a_carca = h.Vector().record(cell.carca.nodes(cell.apical[9])[0]._ref_concentration)
s_ip3r = h.Vector().record(cell.ro_ip3r.nodes(cell.somatic[0])[0]._ref_concentration)
a_ip3r = h.Vector().record(cell.ro_ip3r.nodes(cell.apical[9])[0]._ref_concentration)
h.cvode.active(1)
h.v_init = -69.4
h.tstop = sim_dur
h.celsius = 34.0
# h.load_file('negative_init.hoc')
# h.init()
h.stdinit()
print('Running current injection, amplitude = {0} nA'.format(c_amp))
h.continuerun(sim_dur)
print('Final IP3R Values')
print('r: {0}'.format(cell.r_ip3r.nodes(cell.somatic[0]).concentration))
print('ri: {0}'.format(cell.ri_ip3r.nodes(cell.somatic[0]).concentration))
print('ro: {0}'.format(cell.ro_ip3r.nodes(cell.somatic[0]).concentration))
print('rc: {0}'.format(cell.rc_ip3r.nodes(cell.somatic[0]).concentration))
print('rc2: {0}'.format(cell.rc2_ip3r.nodes(cell.somatic[0]).concentration))
print('rc3: {0}'.format(cell.rc3_ip3r.nodes(cell.somatic[0]).concentration))
print('rc4: {0}'.format(cell.rc4_ip3r.nodes(cell.somatic[0]).concentration))
print('Final IP3 Production Numbers')
print('ip3: {0}'.format(cell.ip3.nodes(cell.somatic[0]).concentration))
print('plc: {0}'.format(cell.plc_m1.nodes(cell.somatic[0]).concentration))
print('ga_gtp: {0}'.format(cell.ga_gtp_m1.nodes(cell.somatic[0]).concentration))
print('ga_gtp_plc: {0}'.format(cell.ga_gtp_plc_m1.nodes(cell.somatic[0]).concentration))
print('ip5p: {0}'.format(cell.ip5p.nodes(cell.somatic[0]).concentration))
print('ip5p_ip3: {0}'.format(cell.ip5p_ip3.nodes(cell.somatic[0]).concentration))
print('ip3k: {0}'.format(cell.ip3k.nodes(cell.somatic[0]).concentration))
print('ip3k_2ca: {0}'.format(cell.ip3k_2ca.nodes(cell.somatic[0]).concentration))
print('ip3k_2ca_ip3: {0}'.format(cell.ip3k_2ca_ip3.nodes(cell.somatic[0]).concentration))
res_dict = {'t': np.array(t.as_numpy()),
'i_stim': np.array(i_rec.as_numpy()),
'soma_v': np.array(soma_v),
'soma_cyt': np.array(s_ca_cyt),
'axon_v': np.array(axon_v),
'axon_cyt': np.array(ax_ca_cyt),
'apic9_v': np.array(apic_v),
'apic9_cyt': np.array(a_ca_cyt),
'soma_ica_l': np.array(s_ica_l),
'apic9_ica_l': np.array(a_ica_l),
'soma_ica_n': np.array(s_ica_n),
'apic9_ica_n': np.array(a_ica_n),
'soma_ica_t': np.array(s_ica_t),
'apic9_ica_t': np.array(a_ica_t),
}
if rxd_sim:
res_dict['soma_er'] = np.array(s_ca_er)
res_dict['apic9_er'] = np.array(a_ca_er)
res_dict['soma_ip3r_open'] = np.array(s_ip3r)
res_dict['apic9_ip3r_open'] = np.array(a_ip3r)
res_dict['soma_dyeca'] = np.array(s_dyeca)
res_dict['apic9_dyeca'] = np.array(a_dyeca)
res_dict['soma_cbdhca'] = np.array(s_cbdhca)
res_dict['apic9_cbdhca'] = np.array(s_cbdhca)
res_dict['soma_cbdlca'] = np.array(s_cbdlca)
res_dict['apic9_cbdlca'] = np.array(s_cbdlca)
res_dict['soma_carca'] = np.array(s_carca)
res_dict['apic9_carca'] = np.array(a_carca)
return res_dict
def run_current_injection_series(rxd_sim, param_dict={}, sim_dur=500, pulse_times=[50], pulse_amps=[1.0], pulse_length=10):
t_path = join(os.getcwd(), 'morphologies/mpg141209_A_idA.asc')
cell = mig_py.MyCell(t_path, rxd_sim, param_dict)
t_curr = h.IClamp(cell.somatic[0](0.5))
amp_list = []
amp_time = []
for p_time, p_amp in zip(pulse_times, pulse_amps):
amp_list += [p_amp, 0.0]
amp_time += [p_time, p_time+pulse_length]
c_vec = h.Vector().from_python(amp_list)
c_time = h.Vector().from_python(amp_time)
t_curr.delay = 0
t_curr.dur = 1e9
c_vec.play(t_curr._ref_amp, c_time)
# apical[9][0.5] distance to soma[0][1.0] = 105.53 um
# Record Values
i_rec = h.Vector().record(t_curr._ref_i)
t = h.Vector().record(h._ref_t)
soma_v = h.Vector().record(cell.somatic[0](0.5)._ref_v)
apic_v = h.Vector().record(cell.apical[9](0.5)._ref_v)
axon_v = h.Vector().record(cell.axonal[0](0.5)._ref_v)
s_ica_l = h.Vector().record(cell.somatic[0](0.5)._ref_ica_cal)
a_ica_l = h.Vector().record(cell.apical[9](0.5)._ref_ica_cal)
s_ica_n = h.Vector().record(cell.somatic[0](0.5)._ref_ica_can)
a_ica_n = h.Vector().record(cell.apical[9](0.5)._ref_ica_can)
s_ica_t = h.Vector().record(cell.somatic[0](0.5)._ref_ica_cat)
a_ica_t = h.Vector().record(cell.apical[9](0.5)._ref_ica_cat)
if not rxd_sim:
s_ca_cyt = h.Vector().record(cell.somatic[0](0.5)._ref_cai)
a_ca_cyt = h.Vector().record(cell.apical[9](0.5)._ref_cai)
else:
s_ca_cyt = h.Vector().record(cell.ca[cell.cyt].nodes(cell.somatic[0])[0]._ref_concentration)
a_ca_cyt = h.Vector().record(cell.ca[cell.cyt].nodes(cell.apical[9])[0]._ref_concentration)
ax_ca_cyt = h.Vector().record(cell.ca[cell.cyt].nodes(cell.axonal[0])[0]._ref_concentration)
s_ca_er = h.Vector().record(cell.ca[cell.er].nodes(cell.somatic[0])[0]._ref_concentration)
a_ca_er = h.Vector().record(cell.ca[cell.er].nodes(cell.apical[9])[0]._ref_concentration)
s_dyeca = h.Vector().record(cell.dyeca.nodes(cell.somatic[0])[0]._ref_concentration)
a_dyeca = h.Vector().record(cell.dyeca.nodes(cell.apical[9])[0]._ref_concentration)
s_cbdhca = h.Vector().record(cell.cbdhca.nodes(cell.somatic[0])[0]._ref_concentration)
a_cbdhca = h.Vector().record(cell.cbdhca.nodes(cell.apical[9])[0]._ref_concentration)
s_cbdlca = h.Vector().record(cell.cbdlca.nodes(cell.somatic[0])[0]._ref_concentration)
a_cbdlca = h.Vector().record(cell.cbdlca.nodes(cell.apical[9])[0]._ref_concentration)
s_carca = h.Vector().record(cell.carca.nodes(cell.somatic[0])[0]._ref_concentration)
a_carca = h.Vector().record(cell.carca.nodes(cell.apical[9])[0]._ref_concentration)
h.cvode.active(1)
h.v_init = -69.4
h.tstop = sim_dur
h.celsius = 34.0
# h.load_file('negative_init.hoc')
# h.init()
h.stdinit()
print('Running current injection')
h.continuerun(sim_dur)
res_dict = {'t': np.array(t.as_numpy()),
'i_stim': np.array(i_rec.as_numpy()),
'soma_v': np.array(soma_v),
'soma_cyt': np.array(s_ca_cyt),
'apic9_v': np.array(apic_v),
'apic9_cyt': np.array(a_ca_cyt),
'axon_v': np.array(axon_v),
'axon_cyt': np.array(ax_ca_cyt),
'soma_ica_l': np.array(s_ica_l),
'apic9_ica_l': np.array(a_ica_l),
'soma_ica_n': np.array(s_ica_n),
'apic9_ica_n': np.array(a_ica_n),
'soma_ica_t': np.array(s_ica_t),
'apic9_ica_t': np.array(a_ica_t)
}
if rxd_sim:
res_dict['soma_er'] = np.array(s_ca_er)
res_dict['apic9_er'] = np.array(a_ca_er)
res_dict['soma_dyeca'] = np.array(s_dyeca)
res_dict['apic9_dyeca'] = np.array(a_dyeca)
res_dict['soma_cbdhca'] = np.array(s_cbdhca)
res_dict['apic9_cbdhca'] = np.array(a_cbdhca)
res_dict['soma_cbdlca'] = np.array(s_cbdlca)
res_dict['apic9_cbdlca'] = np.array(a_cbdlca)
res_dict['soma_carca'] = np.array(s_carca)
res_dict['apic9_carca'] = np.array(a_carca)
return res_dict
def plot_current_injection(result_d, rxd_bool, t_inj, t_ignore=0):
result_figs = []
t_ig = np.squeeze(np.where(result_d['t'] > t_ignore))
i_inj = np.squeeze(np.where(result_d['t'] >= t_inj))[0]
t_arr = result_d['t'][t_ig] - t_ignore
v_fig = bplt.figure(title='Membrane Potential vs Time')
result_figs.append(v_fig)
v_fig.line(t_arr, result_d['soma_v'][t_ig], line_width=3, color='blue', legend='Soma')
v_fig.line(t_arr, result_d['apic9_v'][t_ig], line_width=3, color='green', legend='Apical Trunk',
line_dash='dashed')
v_fig.xaxis.axis_label = 'time (msec)'
v_fig.yaxis.axis_label = 'potential (mV)'
i_fig = bplt.figure(title='Current Injection vs Time')
result_figs.append(i_fig)
i_fig.line(t_arr, result_d['i_stim'][t_ig], line_width=3, color='blue', legend='Soma')
i_fig.xaxis.axis_label = 'time (msec)'
i_fig.yaxis.axis_label = 'current (nA)'
cai_fig = bplt.figure(title='Intracellular Calcium vs Time')
result_figs.append(cai_fig)
cai_fig.line(t_arr, result_d['soma_cyt'][t_ig] * 1000.0, line_width=3, color='blue', legend='Soma')
cai_fig.line(t_arr, result_d['apic9_cyt'][t_ig] * 1000.0, line_width=3, color='green', legend='Apical Trunk',
line_dash='dashed')
cai_fig.xaxis.axis_label = 'time (msec)'
cai_fig.yaxis.axis_label = '[Ca] ({}M)'.format(mu)
ica_fig = bplt.figure(title='Calcium Currents vs Time')
result_figs.append(ica_fig)
ica_fig.line(t_arr, result_d['soma_ica_l'][t_ig], line_width=3, color=colrs[0], legend='Soma - L')
ica_fig.line(t_arr, result_d['apic9_ica_l'][t_ig], line_width=3, color=colrs[1], legend='Apical Trunk - L')
ica_fig.line(t_arr, result_d['soma_ica_n'][t_ig], line_width=3, color=colrs[2], legend='Soma - N')
ica_fig.line(t_arr, result_d['apic9_ica_n'][t_ig], line_width=3, color=colrs[3], legend='Apical Trunk - N')
ica_fig.line(t_arr, result_d['soma_ica_t'][t_ig], line_width=3, color=colrs[4], legend='Soma - T')
ica_fig.line(t_arr, result_d['apic9_ica_t'][t_ig], line_width=3, color=colrs[5], legend='Apical Trunk - T')
ica_fig.xaxis.axis_label = 'time (msec)'
ica_fig.yaxis.axis_label = 'current (mA/cm^2)'
if rxd_bool:
caer_fig = bplt.figure(title='Endoplasmic Reticulum Calcium vs Time')
result_figs.append(caer_fig)
caer_fig.line(t_arr, result_d['soma_er'][t_ig] * 1000.0, line_width=3, color='blue', legend='Soma')
caer_fig.line(t_arr, result_d['apic9_er'][t_ig] * 1000.0, line_width=3, color='green', legend='Apical Trunk')
caer_fig.xaxis.axis_label = 'time (msec)'
caer_fig.yaxis.axis_label = '[Ca]_ER ({}M)'.format(mu)
cbd_fig = bplt.figure(title='Bound Calbindin D28k vs Time')
result_figs.append(cbd_fig)
cbd_fig.line(t_arr, result_d['soma_cbdhca'][t_ig] * 1000.0, line_width=3, color='blue', legend='Soma - HA')
cbd_fig.line(t_arr, result_d['apic9_cbdhca'][t_ig] * 1000.0, line_width=3, color='green',
legend='Apical Trunk - HA')
cbd_fig.line(t_arr, result_d['soma_cbdlca'][t_ig] * 1000.0, line_width=3, color='blue', legend='Soma - LA',
line_dash='dashed')
cbd_fig.line(t_arr, result_d['apic9_cbdlca'][t_ig] * 1000.0, line_width=3, color='green',
legend='Apical Trunk- LA', line_dash='dashed')
cbd_fig.xaxis.axis_label = 'time (msec)'
cbd_fig.yaxis.axis_label = 'concentration ({}M)'.format(mu)
dye_fig = bplt.figure(title='Change in Fluorescence vs Time')
result_figs.append(dye_fig)
dF_s = 100 * (result_d['soma_dyeca'][t_ig] - result_d['soma_dyeca'][i_inj]) / result_d['soma_dyeca'][i_inj]
dF_a = 100 * (result_d['apic9_dyeca'][t_ig] - result_d['apic9_dyeca'][i_inj]) / result_d['apic9_dyeca'][i_inj]
dye_fig.line(t_arr, dF_s, line_width=3, color='blue', legend='Soma')
dye_fig.line(t_arr, dF_a, line_width=3, color='green', legend='Apical Trunk')
dye_fig.xaxis.axis_label = 'time (msec)'
dye_fig.yaxis.axis_label = '{}F (%)'.format(delta)
carca_fig = bplt.figure(title='Bound Calreticulin vs Time')
result_figs.append(carca_fig)
carca_fig.line(t_arr, result_d['soma_carca'][t_ig] * 1000.0, line_width=3, color='blue', legend='Soma')
carca_fig.line(t_arr, result_d['apic9_carca'][t_ig] * 1000.0, line_width=3, color='green', legend='Distal Apical Trunk')
carca_fig.xaxis.axis_label = 'time (msec)'
carca_fig.yaxis.axis_label = 'concentration ({}M)'.format(mu)
ip3r_fig = bplt.figure(title='Open IP3R vs Time')
result_figs.append(ip3r_fig)
ip3r_fig.line(t_arr, result_d['soma_ip3r_open'][t_ig]*100, line_width=3, color='blue', legend='Soma')
ip3r_fig.line(t_arr, result_d['apic9_ip3r_open'][t_ig]*100, line_width=3, color='green',
legend='Distal Apical Trunk')
ip3r_fig.xaxis.axis_label = 'time (msec)'
ip3r_fig.yaxis.axis_label = 'Percent Open'
return result_figs
def run_ip3_pulse(t_steps, param_dict={}, pulse_times=[50], pulse_amps=[0.001], pulse_amps_high=[0.001],
pulse_length=10, current_dict={'amp': 0.0, 'dur': 100.0, 'start': 0.0},
im_inhib_dict={'perc': 1.0, 'start': 0.0, 'end': 0.0}):
p_times = []
p_amps_high = []
p_amps_f = []
p_amps_b = []
for p_t, p_a, p_h in zip(pulse_times, pulse_amps, pulse_amps_high):
p_times += [p_t, p_t+pulse_length]
p_amps_high += [p_h, 0]
p_amps_f += [p_a, 0]
p_amps_b += [0, p_a]
print('Pulse Times: {}'.format(p_times))
print('Pulse Amps: {}'.format(p_amps_f))
t_path = join(os.getcwd(), 'morphologies/mpg141209_A_idA.asc')
cell = mig_py.MyCell(t_path, True, param_dict)
if current_dict['amp']:
t_curr = h.IClamp(cell.somatic[0](0.5))
t_curr.amp = current_dict['amp']
t_curr.delay = current_dict['start']
t_curr.dur = current_dict['dur']
n_nodes = len(cell.ca[cell.cyt].nodes)
apical_trunk_inds = [0, 8, 9, 11, 13, 19]
sec_list = [sec for sec in cell.somatic] + [cell.apical[i] for i in apical_trunk_inds]
apic_names = ['apical_{0}'.format(num) for num in apical_trunk_inds]
sec_names = ['soma_0'] + apic_names
h.distance(0, cell.somatic[0](0.5))
node_dists = []
for sec in sec_list:
for seg in sec:
node_dists.append(h.distance(sec(seg.x)))
node_locs = []
for sec_name in sec_names:
sec = cell.get_sec_by_name(sec_name)
for node in cell.ca[cell.cyt].nodes:
if node.satisfies(sec):
node_locs.append('{0}({1:.3})'.format(sec_name, node.x))
# apical[9][0.5] distance to soma[0][1.0] = 105.53 um
# Record Values
t_vec = h.Vector().record(h._ref_t)
s_v = h.Vector().record(cell.somatic[0](0.5)._ref_v)
a0_v = h.Vector().record(cell.apical[0](0.5)._ref_v)
a9_v = h.Vector().record(cell.apical[9](0.5)._ref_v)
s_isk = h.Vector().record(cell.somatic[0](0.5)._ref_ik_kca)
a0_isk = h.Vector().record(cell.apical[0](0.5)._ref_ik_kca)
a9_isk = h.Vector().record(cell.apical[9](0.5)._ref_ik_kca)
s_im = h.Vector().record(cell.somatic[0](0.5)._ref_ik_kmb_inh)
ax_im = h.Vector().record(cell.axonal[0](0.5)._ref_ik_kmb_inh)
s_ica_l = h.Vector().record(cell.somatic[0](0.5)._ref_ica_cal)
a_ica_l = h.Vector().record(cell.apical[9](0.5)._ref_ica_cal)
s_ica_n = h.Vector().record(cell.somatic[0](0.5)._ref_ica_can)
a_ica_n = h.Vector().record(cell.apical[9](0.5)._ref_ica_can)
s_ica_t = h.Vector().record(cell.somatic[0](0.5)._ref_ica_cat)
a_ica_t = h.Vector().record(cell.apical[9](0.5)._ref_ica_cat)
s_ca_cyt = h.Vector().record(cell.ca[cell.cyt].nodes(cell.somatic[0])[0]._ref_concentration)
a0_ca_cyt = h.Vector().record(cell.ca[cell.cyt].nodes(cell.apical[0])[0]._ref_concentration)
a9_ca_cyt = h.Vector().record(cell.ca[cell.cyt].nodes(cell.apical[9])[0]._ref_concentration)
s_ca_er = h.Vector().record(cell.ca[cell.er].nodes(cell.somatic[0])[0]._ref_concentration)
a0_ca_er = h.Vector().record(cell.ca[cell.er].nodes(cell.apical[0])[0]._ref_concentration)
a9_ca_er = h.Vector().record(cell.ca[cell.er].nodes(cell.apical[9])[0]._ref_concentration)
s_ip3 = h.Vector().record(cell.ip3.nodes(cell.somatic[0])[0]._ref_concentration)
a0_ip3 = h.Vector().record(cell.ip3.nodes(cell.apical[0])[0]._ref_concentration)
a9_ip3 = h.Vector().record(cell.ip3.nodes(cell.apical[9])[0]._ref_concentration)
s_po = h.Vector().record(cell.ro_ip3r.nodes(cell.somatic[0])[0]._ref_concentration)
a0_po = h.Vector().record(cell.ro_ip3r.nodes(cell.apical[0])[0]._ref_concentration)
a9_po = h.Vector().record(cell.ro_ip3r.nodes(cell.apical[9])[0]._ref_concentration)
cyt_vals = np.zeros((t_steps.size, n_nodes))
er_vals = np.zeros((t_steps.size, n_nodes))
ip3_vals = np.zeros((t_steps.size, n_nodes))
ip3r_open_vals = np.zeros((t_steps.size, n_nodes))
cv_act = 1
print('CVode: {0}'.format(cv_act))
h.cvode.active(cv_act)
h.v_init = -69.4
h.celsius = 34.0
h.stdinit()
print('Running IP3 pulse')
for t_i, t_step in enumerate(t_steps):
h.continuerun(t_step)
if t_step in p_times:
p_i = p_times.index(t_step)
print('time: {0}, ip3 rate: {1}'.format(t_step, p_amps_f[p_i]))
cell.ip3.nodes(cell.cyt).concentration = p_amps_f[p_i]
cell.ip3.nodes(cell.apical[apical_trunk_inds[2]]).concentration = p_amps_high[p_i]
# ip3_f = p_amps_f[p_i]
# ip3_b = p_amps_b[p_i]
#
# cell.ip3_prod.b_rate = ip3_f
# cell.ip3_prod.b_rate = ip3_b
h.CVode().re_init()
# cell.ip3.concentration = ip3_amp
if im_inhib_dict['perc'] < 1.0:
if t_step == im_inhib_dict['start']:
for soma_sec in cell.somatic:
for seg in soma_sec:
seg.perc_test_kmb_inh = im_inhib_dict['perc']
elif t_step == im_inhib_dict['end']:
for soma_sec in cell.somatic:
for seg in soma_sec:
seg.perc_test_kmb_inh = 1.0
cyt_vals[t_i, :] = cell.ca[cell.cyt].nodes.concentration
er_vals[t_i, :] = cell.ca[cell.er].nodes.concentration
ip3_vals[t_i, :] = cell.ip3.nodes.concentration
# ip3r_open_vals[t_i, :] = np.array(cell.o5_ip3r.nodes.concentration) + np.array(cell.o6_ip3r.nodes.concentration)
ip3r_open_vals[t_i, :] = np.array(cell.ro_ip3r.nodes.concentration)
res_dict = {'node names': node_locs,
'node distances': node_dists,
'ip3 vals': ip3_vals,
'cyt vals': cyt_vals,
'er vals': er_vals,
'soma v': np.array(s_v),
'apical0 v': np.array(a0_v),
'apical9 v': np.array(a9_v),
'ip3r open vals': ip3r_open_vals,
'soma cyt time': np.array(s_ca_cyt),
'apical0 cyt time': np.array(a0_ca_cyt),
'apical9 cyt time': np.array(a9_ca_cyt),
'soma er time': np.array(s_ca_er),
'apical0 er time': np.array(a0_ca_er),
'apical9 er time': np.array(a9_ca_er),
'soma ip3 time': np.array(s_ip3),
'apical0 ip3 time': np.array(a0_ip3),
'apical9 ip3 time': np.array(a9_ip3),
'soma im': np.array(s_im),
'axon im': np.array(ax_im),
'soma isk': np.array(s_isk),
'apical0 isk': np.array(a0_isk),
'apical9 isk': np.array(a9_isk),
'soma ica_l': np.array(s_ica_l),
'apic9 ica_l':
|
np.array(a_ica_l)
|
numpy.array
|
# %%
import numpy as np
import numpy.fft as fft
import numpy.linalg as la
import pywt
# Generic Operator
class genericOperator(object):
pass
# Implementation of a simple linear operator
class OperatorLinear(genericOperator):
def __init__(self, mat, samplingSet=None, basisSet=None):
self.__mat = mat
self.__shape = mat.shape
# Define input and output shape for post-multiplication
self.inShape = self.__mat.shape[1]
self.outShape = self.__mat.shape[0]
# Define input and output shape for post-multiplication
self.wavShape = self.__mat.shape[1]
self.imShape = self.__mat.shape[0]
# Assign sampling set
if isinstance(samplingSet, list):
samplingSet = np.array(samplingSet)
tmp = np.zeros(self.outShape,dtype=bool)
tmp[samplingSet] = True
self.samplingSet = tmp
# Assign basis set
if isinstance(basisSet, list):
basisSet = np.array(basisSet)
tmp = np.zeros(self.inShape,dtype=bool)
tmp[basisSet] = True
self.basisSet = tmp
def eval(self, x, mode=1):
if(mode==1):
# Direct map
if(self.basisSet is not None):
tmp = np.dot(self.__mat[:,self.basisSet],x[self.basisSet])
else:
tmp = np.dot(self.__mat,x)
if(self.samplingSet is not None):
return tmp[self.samplingSet]
else:
return tmp
elif(mode==2):
# Adjoint map
if(self.samplingSet is not None):
tmp = np.dot(np.conjugate(self.__mat.T[:,self.samplingSet]),x[self.samplingSet])
else:
tmp = np.dot(np.conjugate(self.__mat.T),x)
if(self.basisSet is not None):
return tmp[self.basisSet]
else:
return tmp
def adjoint(self, x):
return self.eval(x, mode=2)
@property
def shape(self):
"The shape of the operator."
return self.__shape
def input_size(self):
return self.__shape[1]
@property
def T(self):
"Transposed of the operator."
return OperatorLinear(np.conjugate(self.__mat.T))
def __matmul__(self, x):
return np.dot(self.__mat,x)
def colRestrict(self,idxSet=None):
return OperatorLinear(self.__mat[:,idxSet])
def norm(self):
return np.linalg.norm(self.__mat,2)
# %%
# Find the name of the wavelet associated to the adjoint of
# the wavelet transform
def getAdjointWavelet(waveletName):
if waveletName is None or waveletName == 'None':
return None
if len(waveletName) > 4 and waveletName[0:4] == 'bior':
return 'rbio' + waveletName[4::]
if len(waveletName) > 4 and waveletName[0:4] == 'rbio':
return 'bior' + waveletName[4::]
return waveletName
def getWaveletTransformShape(imShape, waveletName, waveletLevel=None):
if waveletName is None or waveletName == 'None':
return imShape
x = np.zeros(imShape)
Wx = pywt.wavedec2(x, waveletName, mode='zero', level=waveletLevel)
return pywt.coeffs_to_array(Wx)[0].shape
def getWaveletTransformSlices(imShape, waveletName, waveletLevel=None):
if waveletName is None or waveletName == 'None':
return None
x = np.zeros(imShape)
Wx = pywt.wavedec2(x, waveletName, mode='zero', level=waveletLevel)
return pywt.coeffs_to_array(Wx)[1]
def getWaveletReconstructionShape(imShape, waveletName, waveletLevel=None):
if waveletName is None or waveletName == 'None':
return imShape
Wx = pywt.wavedec2(np.zeros(imShape), waveletName, mode='zero', level=waveletLevel)
return pywt.waverec2(Wx, wavelet=waveletName, mode='zero').shape
# OperatorWaveletToFourier
# This implements the linear map that takes as inputs
# the wavelet coefficients of a complex image, and
# outputs the Fourier coefficients of the image. The
# map supports restricting the support of the wavelet
# coefficients, and subsampling the Fourier coefficients
class OperatorWaveletToFourier(genericOperator):
def __init__(self, imShape, samplingSet=None, basisSet=None, isTransposed=False, waveletName=None, waveletLevel=None):
# Check for boundary case
if waveletName == 'None':
waveletName = None
# Parameters
self.imShape = imShape
self.waveletName = waveletName
self.waveletNameAdj = getAdjointWavelet(waveletName)
self.waveletLevel = waveletLevel
self.wavShape = getWaveletTransformShape(imShape, waveletName, waveletLevel)
self.wavSlices = getWaveletTransformSlices(imShape, waveletName, waveletLevel)
self.isTransposed = isTransposed
self._norm = None
# Test if the image shape produces a consistent reconstruction
# using pyWavelets or not
xrecShape = getWaveletReconstructionShape(imShape, waveletName, waveletLevel)
self.waveletCrop = [ xrecShape[0] != imShape[0],
xrecShape[1] != imShape[1] ]
# Validate shapes
if samplingSet is not None:
# The sampling set should represent a 2D complex image
if isinstance(samplingSet, list):
samplingSet = np.array(samplingSet)
if(samplingSet.ndim < 2):
tmp = np.zeros(self.imShape,dtype=bool).flatten()
tmp[samplingSet] = True
samplingSet = tmp.reshape(self.imShape)
# Check if the size is correct
if samplingSet.shape[0] != self.imShape[0] or samplingSet.shape[1] != self.imShape[1]:
raise ValueError('The sampling array does not match the shape of the image.')
if basisSet is not None:
# If the basisSet is given in terms of an indexset and not a binary mask, convert it to a binary mask
# Basis Set should refer to a 2D Wavelet coefficient representation
if isinstance(basisSet, list):
basisSet = np.array(basisSet)
if(basisSet.ndim < 2):
tmp = np.zeros(self.wavShape,dtype=bool).flatten()
tmp[basisSet] = True
basisSet = tmp.reshape(self.wavShape)
# Check if the size is correct
if basisSet.shape[0] != self.wavShape[0] or basisSet.shape[1] != self.wavShape[1]:
raise ValueError('The basis indices do not match the shape of the wavelet transform.')
# Restriction of the support of the wavelet coefficients
self.basisSet = basisSet
if basisSet is None:
basisShape = self.wavShape
else:
basisShape = (np.count_nonzero(basisSet),)
# Subsampling of Fourier coefficients
self.samplingSet = samplingSet
if samplingSet is None:
samplingShape = self.imShape
else:
samplingShape = (np.count_nonzero(samplingSet),)
# Input and output shapes
if isTransposed:
self.inShape = samplingShape
self.outShape = basisShape
else:
self.inShape = basisShape
self.outShape = samplingShape
# The method eval is the only one that should use the self.isTransposed flag.
def eval(self, x, mode=1):
# print("shape of x: ",x.shape)
# Verify if the instance is transposed
if self.isTransposed:
if mode == 1:
mode = 2
else:
mode = 1
# Evaluate forward map
if mode == 1:
# Check input dimension
if(x.shape != self.wavShape):
raise ValueError('ERROR: Input for direct application of the operator has not the correct size.')
# Verify if the support of the wavelet transform is restricted
if self.waveletName is None:
if self.basisSet is None:
_x = x
else:
_x = np.zeros(self.wavShape, dtype=np.complex)
_x[self.basisSet] = x[self.basisSet]
else:
if self.basisSet is None:
_w = pywt.array_to_coeffs(x, self.wavSlices, output_format='wavedec2')
else:
# Remove the wavelet coefficients not in the basis set
x[np.logical_not(self.basisSet)] = 0.0
_w = pywt.array_to_coeffs(x, self.wavSlices, output_format='wavedec2')
# Compute image from wavelet coefficients
_x = pywt.waverec2(_w, wavelet=self.waveletName, mode='zero')
# Verify if reconstruction is consistent
if self.waveletCrop[0]:
_x = _x[0:self.imShape[0], :]
if self.waveletCrop[1]:
_x = _x[:, 0:self.imShape[1]]
# Verify if there is subsampling of the Fourier coefficients
if self.samplingSet is None:
return fft.fft2(_x, norm='ortho')
else:
_f = fft.fft2(_x, norm='ortho')
_f[np.logical_not(self.samplingSet)] = 0.0 # Set the Fourier coefficients outside the set to zero
# return _f[self.samplingSet]
return _f
# Evaluate adjoint
if mode == 2:
# Check input dimension
if(x.shape != self.imShape):
raise ValueError('ERROR: Input for inverse application of the operator has not the correct size.')
# Verify if there is subsampling of the Fourier coefficients
if self.samplingSet is None:
_im = fft.ifft2(x, norm='ortho')
else:
_f = np.zeros(self.imShape, dtype=np.complex)
_f[self.samplingSet] = x[self.samplingSet]
# _f = x
# _f[np.logical_not(self.samplingSet)] = 0.0
_im = fft.ifft2(_f, norm='ortho')
if self.waveletName is None:
_w = _im
else:
_w = pywt.wavedec2(_im, wavelet=self.waveletNameAdj, mode='zero', level=self.waveletLevel)
_w = pywt.coeffs_to_array(_w)[0]
# Verify if the support of the wavelet transform is restricted
if self.basisSet is None:
return _w
else:
_w[np.logical_not(self.basisSet)] = 0.0
# return _w[self.basisSet]
return _w
def adjoint(self, x):
return self.eval(x, mode=2)
def norm(self, maxItns=1E3, absTol=1E-6, relTol=1E-9):
if self._norm is not None:
return self._norm
# Initialize variables
x = np.random.normal(size=self.wavShape) + 1j * np.random.normal(size=self.wavShape)
x = x / la.norm(x)
s = 0
ds = np.inf
itn = 0
stop = False
# Power iteration loop
while not stop:
# Evaluate xp = A'A x
xp = self.adjoint(self.eval(x))
# Evaluate x' A'Ax to estimate sigma_max^2
sp = np.sqrt(np.real(np.sum(np.conj(x.flatten()) * xp.flatten())))
ds = np.abs(sp - s)
if ds < absTol or ds < absTol * s or itn > maxItns:
stop = True
# Normalize singular vector
x = xp / la.norm(xp)
s = sp
return s
def getImageFromWavelet(self, x):
if(x.shape != self.wavShape):
raise ValueError('ERROR: Invalid input size for getImageFromWavelet.')
if self.waveletName is None:
if self.basisSet is None:
_x = x
else:
_x = np.zeros(self.imShape, dtype=np.complex)
_x[self.basisSet] = x[:]
else:
if self.basisSet is None:
_w = pywt.array_to_coeffs(x, self.wavSlices, output_format='wavedec2')
else:
# Set to zero wavelet coefficients not in the basis set
x[np.logical_not(self.basisSet)] = 0.0
_w = pywt.array_to_coeffs(x, self.wavSlices, output_format='wavedec2')
# Compute image from wavelet coefficients
_x = pywt.waverec2(_w, wavelet=self.waveletName, mode='zero')
# Verify if reconstruction is consistent
if self.waveletCrop[0]:
_x = _x[0:self.imShape[0], :]
if self.waveletCrop[1]:
_x = _x[:, 0:self.imShape[1]]
return _x
def getImageFromFourier(self, y):
if self.samplingSet is None:
_im = fft.ifft2(y, norm='ortho')
else:
y[np.logical_not(self.samplingSet)] = 0.0
_im = fft.ifft2(y, norm='ortho')
return _im
@property
def shape(self):
# This is the shape of the matrix representing
# the operator with vectorized inputs and outputs
return (np.prod(self.outShape), np.prod(self.inShape))
def __matmul__(self, x):
_y = self.eval(np.reshape(x, newshape=self.inShape))
return _y.ravel()
@property
def T(self):
# Instantiate the transpose of the operator
return OperatorWaveletToFourier(imShape=self.imShape, samplingSet=self.samplingSet, basisSet=self.basisSet, isTransposed=not(self.isTransposed), waveletName=self.waveletName, waveletLevel=self.waveletLevel)
# Create An operator with a
def colRestrict(self, basisSet=None):
# Instantiate operator restricted to some entries
return OperatorWaveletToFourier(imShape=self.imShape, samplingSet=self.samplingSet, basisSet=basisSet, isTransposed=self.isTransposed, waveletName=self.waveletName, waveletLevel=self.waveletLevel)
# Print Operator
def __str__(self):
res = '--- Wavelet to Fourier Operator\n'
res += 'Shape of the original image: %d x %d\n' % (self.imShape[0],self.imShape[1])
res += 'Wavelet name: %s\n' % (self.waveletName)
res += 'Wavelet adjoint name: %s\n' % (self.waveletNameAdj)
if self.waveletLevel is not None:
res += 'Wavelet level: %d\n' % (self.waveletLevel)
else:
res += 'Wavelet level is not defined\n'
res += 'Shape of the wavelet transform: %d x %d\n' % (self.wavShape[0],self.wavShape[1])
res += 'Is operator transposed: ' + str(self.isTransposed) + '\n'
# Sampling and basis sets
if self.samplingSet is not None:
res += 'Sampling set has size: %d x %d\n' % (self.samplingSet.shape[0],self.samplingSet.shape[1])
else:
res += 'Sampling set is not defined\n'
if self.basisSet is not None:
res += 'Basis set has size: %d x %d\n' % (self.basisSet.shape[0],self.basisSet.shape[1])
else:
res += 'Basis set is not defined\n'
# Input and output shapes
res += 'Input shape is: ' + str(self.inShape) + '\n'
res += 'Output shape is: ' + str(self.outShape) + '\n'
return res
# OperatorWaveletToFourierX4
# This implements the linear map that takes as inputs
# the wavelet coefficients of 4 complex images, and
# outputs their Fourier coefficients. The map supports restricting
# the support of the wavelet coefficients, and subsampling
# the Fourier coefficients
class OperatorWaveletToFourierX4(genericOperator):
def __init__(self, imShape, samplingSet=None, basisSet=None, isTransposed=False, waveletName='haar', waveletLevel=None):
# Parameters
self.imShape = imShape
self.waveletName = waveletName
self.waveletNameAdj = getAdjointWavelet(waveletName)
self.waveletLevel = waveletLevel
self.wavShape = getWaveletTransformShape(imShape, waveletName, waveletLevel)
self.wavSlices = getWaveletTransformSlices(imShape, waveletName, waveletLevel)
self.isTransposed = isTransposed
self.samplingSet = samplingSet
self.basisSet = basisSet
self._norm = None
# Generate array of maps
if samplingSet is None and basisSet is None:
self.map = [ OperatorWaveletToFourier(imShape=imShape, samplingSet=None, basisSet=None, isTransposed=isTransposed, waveletName=waveletName, waveletLevel=waveletLevel) for I in range(4) ]
else:
if samplingSet is None:
if basisSet.ndim == 3:
self.map = [ OperatorWaveletToFourier(imShape=imShape, samplingSet=None, basisSet=basisSet[:, :, I], isTransposed=isTransposed, waveletName=waveletName, waveletLevel=waveletLevel) for I in range(4) ]
else:
self.map = [ OperatorWaveletToFourier(imShape=imShape, samplingSet=None, basisSet=basisSet, isTransposed=isTransposed, waveletName=waveletName, waveletLevel=waveletLevel) for I in range(4) ]
else:
if samplingSet.ndim == 3:
if basisSet is None:
self.map = [ OperatorWaveletToFourier(imShape=imShape, samplingSet=samplingSet[:, :, I], basisSet=None, isTransposed=isTransposed, waveletName=waveletName, waveletLevel=waveletLevel) for I in range(4) ]
else:
if basisSet.ndim == 3:
self.map = [ OperatorWaveletToFourier(imShape=imShape, samplingSet=samplingSet[:, :, I], basisSet=basisSet[:, :, I], isTransposed=isTransposed, waveletName=waveletName, waveletLevel=waveletLevel) for I in range(4) ]
else:
self.map = [ OperatorWaveletToFourier(imShape=imShape, samplingSet=samplingSet[:, :, I], basisSet=basisSet, isTransposed=isTransposed, waveletName=waveletName, waveletLevel=waveletLevel) for I in range(4) ]
else:
if basisSet is None:
self.map = [ OperatorWaveletToFourier(imShape=imShape, samplingSet=samplingSet, basisSet=None, isTransposed=isTransposed, waveletName=waveletName, waveletLevel=waveletLevel) for I in range(4) ]
else:
if basisSet.ndim == 3:
self.map = [ OperatorWaveletToFourier(imShape=imShape, samplingSet=samplingSet, basisSet=basisSet[:, :, I], isTransposed=isTransposed, waveletName=waveletName, waveletLevel=waveletLevel) for I in range(4) ]
else:
self.map = [ OperatorWaveletToFourier(imShape=imShape, samplingSet=samplingSet, basisSet=basisSet, isTransposed=isTransposed, waveletName=waveletName, waveletLevel=waveletLevel) for I in range(4) ]
# Find input shape and output shapes
inShape = [ self.map[I].inShape for I in range(4) ]
self.inSlices = None
if len(inShape[0]) == 2:
self.inShape = (inShape[0][0], inShape[0][1], 4)
else:
self.inShape = np.prod(inShape[0])
self.inSlices = [ [0, 0] for I in range(4) ]
self.inSlices[0][1] = inShape[0][0]
for I in range(1, 4):
self.inSlices[I][0] = self.inSlices[I-1][1]
self.inSlices[I][1] = self.inSlices[I][0] + inShape[I][0]
self.inShape = self.inShape + np.prod(inShape[I])
self.inShape = (self.inShape,)
self._inShape = self.inShape
outShape = [ self.map[I].outShape for I in range(4) ]
self.outSlices = None
if len(outShape[0])== 2:
self.outShape = (outShape[0][0], outShape[0][1], 4)
else:
self.outShape = np.prod(outShape[0])
self.outSlices = [ [0, 0] for I in range(4) ]
self.outSlices[0][1] = outShape[0][0]
for I in range(1, 4):
self.outSlices[I][0] = self.outSlices[I-1][1]
self.outSlices[I][1] = self.outSlices[I][0] + outShape[I][0]
self.outShape = self.outShape + np.prod(outShape[I])
self.outShape = (self.outShape,)
self._outShape = self.outShape
def eval(self, x, mode=1):
if mode == 1:
_y = np.zeros(self._outShape, dtype=np.complex)
if self.inSlices is None:
if self.outSlices is None:
for I in range(4):
_y[:, :, I] = self.map[I].eval(x[:, :, I], mode)
else:
for I in range(4):
_y[self.outSlices[I][0]:self.outSlices[I][1]] = self.map[I].eval(x[:, :, I], mode)
else:
if self.outSlices is None:
for I in range(4):
_y[:, :, I] = self.map[I].eval(x[self.inSlices[I][0]:self.inSlices[I][1]], mode)
else:
for I in range(4):
_y[self.outSlices[I][0]:self.outSlices[I][1]] = self.map[I].eval(x[self.inSlices[I][0]:self.inSlices[I][1]], mode)
return _y
if mode == 2:
_x = np.zeros(self._inShape, dtype=np.complex)
if self.outSlices is None:
if self.inSlices is None:
for I in range(4):
_x[:, :, I] = self.map[I].eval(x[:, :, I], mode)
else:
for I in range(4):
_x[self.inSlices[I][0]:self.inSlices[I][1]] = self.map[I].eval(x[:, :, I], mode)
else:
if self.inSlices is None:
for I in range(4):
_x[:, :, I] = self.map[I].eval(x[self.outSlices[I][0]:self.outSlices[I][1]], mode)
else:
for I in range(4):
_x[self.inSlices[I][0]:self.inSlices[I][1]] = self.map[I].eval(x[self.outSlices[I][0]:self.outSlices[I][1]], mode)
return _x
def adjoint(self, x):
return self.eval(x, mode=2)
def norm(self, maxItns=1E3, absTol=1E-6, relTol=1E-9):
if self._norm is not None:
return self._norm
# Initialize variables
x = np.random.normal(size=self._inShape) + 1j * np.random.normal(size=self._inShape)
x = x / la.norm(x)
s = 0
ds = np.inf
itn = 0
stop = False
# Power iteration loop
while not stop:
# Evaluate xp = A'A x
xp = self.adjoint(self.eval(x))
# Evaluate x' A'Ax to estimate sigma_max^2
sp = np.sqrt(np.real(np.sum(np.conj(x.flatten()) * xp.flatten())))
ds = np.abs(sp - s)
if ds < absTol or ds < absTol * s or itn > maxItns:
stop = True
# Normalize singular vector
x = xp / la.norm(xp)
s = sp
return s
def getImageFromWavelet(self, x):
_im = np.zeros(self.imShape + (4,), dtype=np.complex)
if self.inSlices is None:
for I in range(4):
_im[:, :, I] = self.map[I].getImageFromWavelet(x[:, :, I])
else:
for I in range(4):
_im[:, :, I] = self.map[I].getImageFromWavelet(x[self.inSlices[I][0]:self.inSlices[I][1]])
return _im
def getImageFromFourier(self, y):
_im = np.zeros(self.imShape + (4,), dtype=np.complex)
if self.outSlices is None:
for I in range(4):
_im[:, :, I] = self.map[I].getImageFromFourier(y[:, :, I])
else:
for I in range(4):
_im[:, :, I] = self.map[I].getImageFromFourier(y[self.outSlices[I][0]:self.outSlices[I][1]])
return _im
@property
def shape(self):
# This is the shape of the matrix representing
# the operator with vectorized inputs and outputs
return (np.prod(self._outShape), np.prod(self._inShape))
def __matmul__(self, x):
_y = self.eval(np.reshape(x, newshape=self._inShape))
return _y.ravel()
@property
def T(self):
# Instantiate the transpose of the operator
return OperatorWaveletToFourierX4(imShape=self.imShape, samplingSet=self.samplingSet, basisSet=self.basisSet, isTransposed=not(self.isTransposed), waveletName=self.waveletName, waveletLevel=self.waveletLevel)
# Create An operator with a
def colRestrict(self, basisSet=None):
# Instantiate operator restricted to some entries
return OperatorWaveletToFourierX4(imShape=self.imShape, samplingSet=self.samplingSet, basisSet=basisSet, isTransposed=self.isTransposed, waveletName=self.waveletName, waveletLevel=self.waveletLevel)
# OperatorFourierLowRank
# This implements the linear map that takes as inputs
# the wavelet coefficients of 4 complex images, and
# outputs their Fourier coefficients. The map supports restricting
# the support of the wavelet coefficients, and subsampling
# the Fourier coefficients
class OperatorFourierLowRank(OperatorWaveletToFourierX4):
def __init__(self, imShape, samplingSet=None, isTransposed=False):
super().__init__(imShape, samplingSet=samplingSet, basisSet=None, isTransposed=isTransposed, waveletName=None, waveletLevel=None)
self.mtxShape = (np.prod(imShape), 4)
self.arrShape = (imShape[0], imShape[1], 4)
if self.isTransposed:
self.outShape = (np.prod(imShape), 4)
else:
self.inShape = (np.prod(imShape), 4)
def eval(self, x, mode=1):
if self.isTransposed:
if mode == 2:
return super().eval(np.reshape(x, newshape=self.arrShape), mode)
else:
return np.reshape(super().eval(x, mode), newshape=self.mtxShape)
else:
if mode == 1:
return super().eval(np.reshape(x, newshape=self.arrShape), mode)
else:
return np.reshape(super().eval(x, mode), newshape=self.mtxShape)
def __matmul__(self, x):
_y = self.eval(np.reshape(x, newshape=self.inShape))
return _y.ravel()
@property
def T(self):
# Instantiate the transpose of the operator
return OperatorFourierLowRank(imShape=self.imShape, samplingSet=self.samplingSet, isTransposed=not(self.isTransposed))
# Create An operator with a
def colRestrict(self, basisSet=None):
# Instantiate operator restricted to some entries
raise NotImplementedError('colRestrict is not implemented for OperatorFourierLowRank.')
# %%
def testLinearMap(A, ns=10):
err_mean = 0.0
err_std = 0.0
err_min = np.inf
err_max = -np.inf
err_mtx_eval = -np.inf
err_mtx_adj = -np.inf
err_adj_eval = -np.inf
err_adj_adj = -np.inf
At = A.T
for I in range(ns):
x = np.random.normal(size=A.inShape) + 1j * np.random.normal(size=A.inShape)
y = np.random.normal(size=A.outShape) + 1j * np.random.normal(size=A.outShape)
Ax = A.eval(x)
_Ax = At.adjoint(x)
tAy = A.adjoint(y)
_tAy = At.eval(y)
yAx = np.sum( np.conj(y.ravel()) * Ax.ravel() )
tAyx = np.sum( np.conj(tAy.ravel()) * x.ravel() )
err = np.abs(yAx - tAyx)
err_mean = err
err_std = err ** 2
err_max = np.maximum(err_max, err)
err_min = np.minimum(err_min, err)
err_mtx_eval = np.maximum(err_mtx_eval, la.norm(A @ x.ravel() - Ax.ravel()))
err_mtx_adj = np.maximum(err_mtx_adj, la.norm(A.T @ y - tAy.ravel()))
err_adj_eval = np.maximum(err_mtx_eval, la.norm(Ax.ravel() - _Ax.ravel()))
err_adj_adj = np.maximum(err_mtx_adj, la.norm(_tAy.ravel() - tAy.ravel()))
err_mean = err_mean / ns
err_std = np.sqrt(err_std / ns - err_mean ** 2)
return err_mean, err_std, err_min, err_max, err_mtx_eval, err_mtx_adj, err_adj_eval, err_adj_adj
if __name__ == '__main__':
ns = 10
do_sampling = False
imShape = [ (71, 77), (128, 128) ]
waveletName = [ 'None', 'haar', 'db4', 'sym5', 'coif5', 'dmey', 'bior2.6', 'rbio2.8', 'dmey' ]
for _imShape in imShape:
for _waveletName in waveletName:
print('-------------------------------------------------------')
print('Testing operator for image size {:d} x {:d} and wavelet {:s}'.format(_imShape[0], _imShape[1], _waveletName))
print('-------------------------------------------------------')
print('\n ****** OperatorWaveletToFourier ***********************')
if do_sampling:
delta = np.random.uniform()
rho = np.random.uniform()
print('Sampling set ratio (fraction kept): {:1.3f}'. format(delta))
print('Basis set ratio (fraction kept): {:1.3f}'. format(rho))
samplingSet = np.where(np.random.uniform(size=_imShape) < delta, True, False)
basisSet = np.where(np.random.uniform(size=getWaveletTransformShape(_imShape, _waveletName)) < rho, True, False)
A = OperatorWaveletToFourier(_imShape, samplingSet=samplingSet, basisSet=basisSet, isTransposed=False, waveletName=_waveletName)
inShape = A.inShape
outShape = A.outShape
print(' Input shape: {:d} x 1'.format(inShape[0]))
print(' Output shape: {:d} x 1'.format(outShape[0]))
else:
A = OperatorWaveletToFourier(_imShape, samplingSet=None, basisSet=None, isTransposed=False, waveletName=_waveletName)
inShape = A.inShape
outShape = A.outShape
print(' Input shape: {:d} x {:d}'.format(inShape[0], inShape[1]))
print(' Output shape: {:d} x {:d}'.format(outShape[0], outShape[1]))
print(' Operator norm: {:1.5E}'.format(A.norm()))
print('Testing inner products for {:d} samples...'.format(ns))
err_mean, err_std, err_min, err_max, err_mtx_eval, err_mtx_adj, err_adj_eval, err_adj_adj = testLinearMap(A)
print(' Max. Error : {:1.5E}'.format(err_max))
print(' Min. Error : {:1.5E}'.format(err_min))
print(' Mean Error : {:1.5E}'.format(err_mean))
print(' Std : {:1.5E}'.format(err_std))
print(' Implementation of __matmul__')
print(' Max. Error (eval) : {:1.5E}'.format(err_mtx_eval))
print(' Min. Error (adj) : {:1.5E}'.format(err_mtx_adj))
print(' Implementation of .T')
print(' Max. Error (eval) : {:1.5E}'.format(err_adj_eval))
print(' Min. Error (adj) : {:1.5E}'.format(err_adj_adj))
print('\n ****** OperatorWaveletToFourierX4 *********************')
if do_sampling:
delta = np.random.uniform()
rho = np.random.uniform()
print('Sampling set ratio (fraction kept): {:1.3f}'. format(delta))
print('Basis set ratio (fraction kept): {:1.3f}'. format(rho))
samplingSet = np.where(np.random.uniform(size=_imShape) < delta, True, False)
basisSet = np.where(np.random.uniform(size=getWaveletTransformShape(_imShape, _waveletName)) < rho, True, False)
A = OperatorWaveletToFourierX4(_imShape, samplingSet=samplingSet, basisSet=basisSet, isTransposed=False, waveletName=_waveletName)
inShape = A.inShape
outShape = A.outShape
print(' Input shape: {:d} x 1'.format(inShape[0]))
print(' Output shape: {:d} x 1'.format(outShape[0]))
else:
A = OperatorWaveletToFourierX4(_imShape, samplingSet=None, basisSet=None, isTransposed=False, waveletName=_waveletName)
inShape = A.inShape
outShape = A.outShape
print(' Input shape: {:d} x {:d}'.format(inShape[0], inShape[1]))
print(' Output shape: {:d} x {:d}'.format(outShape[0], outShape[1]))
print(' Operator norm: {:1.5E}'.format(A.norm()))
print('Testing inner products for {:d} samples...'.format(ns))
err_mean, err_std, err_min, err_max, err_mtx_eval, err_mtx_adj, err_adj_eval, err_adj_adj = testLinearMap(A)
print(' Max. Error : {:1.5E}'.format(err_max))
print(' Min. Error : {:1.5E}'.format(err_min))
print(' Mean Error : {:1.5E}'.format(err_mean))
print(' Std : {:1.5E}'.format(err_std))
print(' Implementation of __matmul__')
print(' Max. Error (eval) : {:1.5E}'.format(err_mtx_eval))
print(' Min. Error (adj) : {:1.5E}'.format(err_mtx_adj))
print(' Implementation of .T')
print(' Max. Error (eval) : {:1.5E}'.format(err_adj_eval))
print(' Min. Error (adj) : {:1.5E}'.format(err_adj_adj))
print('\n ****** OperatorFourierLowRank *************************')
if do_sampling:
delta = np.random.uniform()
rho =
|
np.random.uniform()
|
numpy.random.uniform
|
import ray
import time
import gym
import numpy as np
import tensorflow as tf
import pandas as pd
from collections import deque
from stable_baselines import logger
from stable_baselines.common import explained_variance, ActorCriticRLModel, tf_util, SetVerbosity, TensorboardWriter
from stable_baselines.common.runners import AbstractEnvRunner
from stable_baselines.common.policies import ActorCriticPolicy, RecurrentActorCriticPolicy
from stable_baselines.a2c.utils import total_episode_reward_logger
from latent_gce.trajectory_utils import mp_trajectories_to_input, \
mp_collect_input_from_state, mp_mj_collect_input_from_state, mp_mj_collect_input_from_state_return_final
from latent_gce.model import LatentGCEImage, LatentGCEIdentity
class GcePPO(ActorCriticRLModel):
"""
Unsupervised learning using empowerment from Latent-GCE using PPO.
Adapted from Stable-baseline's PPO implementation.
"""
def __init__(self, exp_name, policy, env, emp_trajectory_options, emp_options, gamma=0.99, n_steps=128,
ent_coef=0.01, learning_rate=2.5e-4, vf_coef=0.5, max_grad_norm=0.5, lam=0.95, nminibatches=4,
noptepochs=4, cliprange=0.2, cliprange_vf=None,
verbose=0, tensorboard_log=None, _init_setup_model=True, policy_kwargs=None,
full_tensorboard_log=False, seed=None, n_cpu_tf_sess=None, mode='identity'):
super(GcePPO, self).__init__(policy=policy, env=env, verbose=verbose, requires_vec_env=True,
_init_setup_model=_init_setup_model, policy_kwargs=policy_kwargs,
seed=seed, n_cpu_tf_sess=n_cpu_tf_sess)
self.emp_trajectory_options = emp_trajectory_options
self.emp_options = emp_options
self.exp_name = exp_name
self.timesteps_array = []
self.episode_reward_array = []
self.episode_length_array = []
self.emp_logging_keys = self.emp_options.get('logging')
self.emp_logging_arrays = {}
if self.emp_logging_keys:
for k in self.emp_logging_keys:
self.emp_logging_arrays[k] = []
self.learning_rate = learning_rate
self.cliprange = cliprange
self.cliprange_vf = cliprange_vf
self.n_steps = n_steps
self.ent_coef = ent_coef
self.vf_coef = vf_coef
self.max_grad_norm = max_grad_norm
self.gamma = gamma
self.lam = lam
self.nminibatches = nminibatches
self.noptepochs = noptepochs
self.tensorboard_log = tensorboard_log
self.full_tensorboard_log = full_tensorboard_log
self.graph = None
self.sess = None
self.action_ph = None
self.advs_ph = None
self.rewards_ph = None
self.old_neglog_pac_ph = None
self.old_vpred_ph = None
self.learning_rate_ph = None
self.clip_range_ph = None
self.entropy = None
self.vf_loss = None
self.pg_loss = None
self.approxkl = None
self.clipfrac = None
self.params = None
self._train = None
self.loss_names = None
self.train_model = None
self.act_model = None
self.step = None
self.proba_step = None
self.value = None
self.initial_state = None
self.n_batch = None
self.summary = None
self.episode_reward = None
self.runner = None
self.obs = None
self.no_reset_at_all = False
self.mode = mode
if _init_setup_model:
self.setup_model()
def _get_pretrain_placeholders(self):
policy = self.act_model
if isinstance(self.action_space, gym.spaces.Discrete):
return policy.obs_ph, self.action_ph, policy.policy
return policy.obs_ph, self.action_ph, policy.deterministic_action
def setup_model(self):
with SetVerbosity(self.verbose):
assert issubclass(self.policy, ActorCriticPolicy), "Error: the input policy for the PPO2 model must be " \
"an instance of common.policies.ActorCriticPolicy."
self.n_batch = self.n_envs * self.n_steps
self.graph = tf.Graph()
with self.graph.as_default():
self.set_random_seed(self.seed)
self.sess = tf_util.make_session(num_cpu=self.n_cpu_tf_sess, graph=self.graph)
n_batch_step = None
n_batch_train = None
if issubclass(self.policy, RecurrentActorCriticPolicy):
assert self.n_envs % self.nminibatches == 0, \
"For recurrent policies, " \
"the number of environments run in parallel should be a multiple of nminibatches."
n_batch_step = self.n_envs
n_batch_train = self.n_batch // self.nminibatches
act_model = self.policy(self.sess, self.observation_space, self.action_space, self.n_envs, 1,
n_batch_step, reuse=False, **self.policy_kwargs)
with tf.variable_scope("train_model", reuse=True,
custom_getter=tf_util.outer_scope_getter("train_model")):
train_model = self.policy(self.sess, self.observation_space, self.action_space,
self.n_envs // self.nminibatches, self.n_steps, n_batch_train,
reuse=True, **self.policy_kwargs)
with tf.variable_scope("loss", reuse=False):
self.action_ph = train_model.pdtype.sample_placeholder([None], name="action_ph")
self.advs_ph = tf.placeholder(tf.float32, [None], name="advs_ph")
self.rewards_ph = tf.placeholder(tf.float32, [None], name="rewards_ph")
self.old_neglog_pac_ph = tf.placeholder(tf.float32, [None], name="old_neglog_pac_ph")
self.old_vpred_ph = tf.placeholder(tf.float32, [None], name="old_vpred_ph")
self.learning_rate_ph = tf.placeholder(tf.float32, [], name="learning_rate_ph")
self.clip_range_ph = tf.placeholder(tf.float32, [], name="clip_range_ph")
neglogpac = train_model.proba_distribution.neglogp(self.action_ph)
self.entropy = tf.reduce_mean(train_model.proba_distribution.entropy())
vpred = train_model.value_flat
# Value function clipping: not present in the original PPO
if self.cliprange_vf is None:
# Default behavior (legacy from OpenAI baselines):
# use the same clipping as for the policy
self.clip_range_vf_ph = self.clip_range_ph
self.cliprange_vf = self.cliprange
elif isinstance(self.cliprange_vf, (float, int)) and self.cliprange_vf < 0:
# Original PPO implementation: no value function clipping
self.clip_range_vf_ph = None
else:
# Last possible behavior: clipping range
# specific to the value function
self.clip_range_vf_ph = tf.placeholder(tf.float32, [], name="clip_range_vf_ph")
if self.clip_range_vf_ph is None:
# No clipping
vpred_clipped = train_model.value_flat
else:
# Clip the different between old and new value
# NOTE: this depends on the reward scaling
vpred_clipped = self.old_vpred_ph + \
tf.clip_by_value(train_model.value_flat - self.old_vpred_ph,
- self.clip_range_vf_ph, self.clip_range_vf_ph)
vf_losses1 = tf.square(vpred - self.rewards_ph)
vf_losses2 = tf.square(vpred_clipped - self.rewards_ph)
self.vf_loss = .5 * tf.reduce_mean(tf.maximum(vf_losses1, vf_losses2))
ratio = tf.exp(self.old_neglog_pac_ph - neglogpac)
pg_losses = -self.advs_ph * ratio
pg_losses2 = -self.advs_ph * tf.clip_by_value(ratio, 1.0 - self.clip_range_ph, 1.0 +
self.clip_range_ph)
self.pg_loss = tf.reduce_mean(tf.maximum(pg_losses, pg_losses2))
self.approxkl = .5 * tf.reduce_mean(tf.square(neglogpac - self.old_neglog_pac_ph))
self.clipfrac = tf.reduce_mean(tf.cast(tf.greater(tf.abs(ratio - 1.0),
self.clip_range_ph), tf.float32))
loss = self.pg_loss - self.entropy * self.ent_coef + self.vf_loss * self.vf_coef
tf.summary.scalar('entropy_loss', self.entropy)
tf.summary.scalar('policy_gradient_loss', self.pg_loss)
tf.summary.scalar('value_function_loss', self.vf_loss)
tf.summary.scalar('approximate_kullback-leibler', self.approxkl)
tf.summary.scalar('clip_factor', self.clipfrac)
tf.summary.scalar('loss', loss)
with tf.variable_scope('model'):
self.params = tf.trainable_variables()
if self.full_tensorboard_log:
for var in self.params:
tf.summary.histogram(var.name, var)
grads = tf.gradients(loss, self.params)
if self.max_grad_norm is not None:
grads, _grad_norm = tf.clip_by_global_norm(grads, self.max_grad_norm)
grads = list(zip(grads, self.params))
trainer = tf.train.AdamOptimizer(learning_rate=self.learning_rate_ph, epsilon=1e-5)
self._train = trainer.apply_gradients(grads)
self.loss_names = ['policy_loss', 'value_loss', 'policy_entropy', 'approxkl', 'clipfrac']
with tf.variable_scope("input_info", reuse=False):
tf.summary.scalar('discounted_rewards', tf.reduce_mean(self.rewards_ph))
tf.summary.scalar('learning_rate', tf.reduce_mean(self.learning_rate_ph))
tf.summary.scalar('advantage', tf.reduce_mean(self.advs_ph))
tf.summary.scalar('clip_range', tf.reduce_mean(self.clip_range_ph))
if self.clip_range_vf_ph is not None:
tf.summary.scalar('clip_range_vf', tf.reduce_mean(self.clip_range_vf_ph))
tf.summary.scalar('old_neglog_action_probability', tf.reduce_mean(self.old_neglog_pac_ph))
tf.summary.scalar('old_value_pred', tf.reduce_mean(self.old_vpred_ph))
if self.full_tensorboard_log:
tf.summary.histogram('discounted_rewards', self.rewards_ph)
tf.summary.histogram('learning_rate', self.learning_rate_ph)
tf.summary.histogram('advantage', self.advs_ph)
tf.summary.histogram('clip_range', self.clip_range_ph)
tf.summary.histogram('old_neglog_action_probability', self.old_neglog_pac_ph)
tf.summary.histogram('old_value_pred', self.old_vpred_ph)
if tf_util.is_image(self.observation_space):
tf.summary.image('observation', train_model.obs_ph)
else:
tf.summary.histogram('observation', train_model.obs_ph)
self.train_model = train_model
self.act_model = act_model
self.step = act_model.step
self.proba_step = act_model.proba_step
self.value = act_model.value
self.initial_state = act_model.initial_state
tf.global_variables_initializer().run(session=self.sess) # pylint: disable=E1101
self.summary = tf.summary.merge_all()
def _train_step(self, learning_rate, cliprange, obs, returns, masks, actions, values, neglogpacs, update,
writer, states=None, cliprange_vf=None):
"""
Training of PPO2 Algorithm
:param learning_rate: (float) learning rate
:param cliprange: (float) Clipping factor
:param obs: (np.ndarray) The current observation of the environment
:param returns: (np.ndarray) the rewards
:param masks: (np.ndarray) The last masks for done episodes (used in recurent policies)
:param actions: (np.ndarray) the actions
:param values: (np.ndarray) the values
:param neglogpacs: (np.ndarray) Negative Log-likelihood probability of Actions
:param update: (int) the current step iteration
:param writer: (TensorFlow Summary.writer) the writer for tensorboard
:param states: (np.ndarray) For recurrent policies, the internal state of the recurrent model
:return: policy gradient loss, value function loss, policy entropy,
approximation of kl divergence, updated clipping range, training update operation
:param cliprange_vf: (float) Clipping factor for the value function
"""
advs = returns - values
advs = (advs - advs.mean()) / (advs.std() + 1e-8)
td_map = {self.train_model.obs_ph: obs, self.action_ph: actions,
self.advs_ph: advs, self.rewards_ph: returns,
self.learning_rate_ph: learning_rate, self.clip_range_ph: cliprange,
self.old_neglog_pac_ph: neglogpacs, self.old_vpred_ph: values}
if states is not None:
td_map[self.train_model.states_ph] = states
td_map[self.train_model.dones_ph] = masks
if cliprange_vf is not None and cliprange_vf >= 0:
td_map[self.clip_range_vf_ph] = cliprange_vf
if states is None:
update_fac = self.n_batch // self.nminibatches // self.noptepochs + 1
else:
update_fac = self.n_batch // self.nminibatches // self.noptepochs // self.n_steps + 1
if writer is not None:
# run loss backprop with summary, but once every 10 runs save the metadata (memory, compute time, ...)
if self.full_tensorboard_log and (1 + update) % 10 == 0:
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
summary, policy_loss, value_loss, policy_entropy, approxkl, clipfrac, _ = self.sess.run(
[self.summary, self.pg_loss, self.vf_loss, self.entropy, self.approxkl, self.clipfrac, self._train],
td_map, options=run_options, run_metadata=run_metadata)
writer.add_run_metadata(run_metadata, 'step%d' % (update * update_fac))
else:
summary, policy_loss, value_loss, policy_entropy, approxkl, clipfrac, _ = self.sess.run(
[self.summary, self.pg_loss, self.vf_loss, self.entropy, self.approxkl, self.clipfrac, self._train],
td_map)
writer.add_summary(summary, (update * update_fac))
else:
policy_loss, value_loss, policy_entropy, approxkl, clipfrac, _ = self.sess.run(
[self.pg_loss, self.vf_loss, self.entropy, self.approxkl, self.clipfrac, self._train], td_map)
return policy_loss, value_loss, policy_entropy, approxkl, clipfrac
def learn(self, total_timesteps, callback=None, log_interval=1, tb_log_name="GCE-PPO",
reset_num_timesteps=True, dump_log=True):
# Transform to callable if needed
self.learning_rate = get_schedule_fn(self.learning_rate)
self.cliprange = get_schedule_fn(self.cliprange)
cliprange_vf = get_schedule_fn(self.cliprange_vf)
new_tb_log = self._init_num_timesteps(reset_num_timesteps)
with SetVerbosity(self.verbose), TensorboardWriter(self.graph, self.tensorboard_log, tb_log_name, new_tb_log) \
as writer:
self._setup_learn()
if not self.runner:
self.runner = Runner(env=self.env, model=self, n_steps=self.n_steps, gamma=self.gamma, lam=self.lam,
emp_trajectory_options=self.emp_trajectory_options, emp_options=self.emp_options,
mode=self.mode, tensorboard_log=self.tensorboard_log)
runner = self.runner
self.episode_reward = np.zeros((self.n_envs,))
ep_info_buf = deque(maxlen=100)
t_first_start = time.time()
n_updates = total_timesteps // self.n_batch
for update in range(1, n_updates + 1):
assert self.n_batch % self.nminibatches == 0, ("The number of minibatches (`nminibatches`) "
"is not a factor of the total number of samples "
"collected per rollout (`n_batch`), "
"some samples won't be used."
)
batch_size = self.n_batch // self.nminibatches
t_start = time.time()
frac = 1.0 - (update - 1.0) / n_updates
lr_now = self.learning_rate(frac)
cliprange_now = self.cliprange(frac)
cliprange_vf_now = cliprange_vf(frac)
# true_reward is the reward without discount
obs, returns, masks, actions, values, neglogpacs, states, ep_infos, true_reward = runner.run()
self.obs = obs
self.num_timesteps += self.n_batch
ep_info_buf.extend(ep_infos)
mb_loss_vals = []
if states is None: # nonrecurrent version
update_fac = self.n_batch // self.nminibatches // self.noptepochs + 1
inds = np.arange(self.n_batch)
for epoch_num in range(self.noptepochs):
np.random.shuffle(inds)
for start in range(0, self.n_batch, batch_size):
timestep = self.num_timesteps // update_fac + ((self.noptepochs * self.n_batch + epoch_num *
self.n_batch + start) // batch_size)
end = start + batch_size
mbinds = inds[start:end]
slices = (arr[mbinds] for arr in (obs, returns, masks, actions, values, neglogpacs))
mb_loss_vals.append(self._train_step(lr_now, cliprange_now, *slices, writer=writer,
update=timestep, cliprange_vf=cliprange_vf_now))
else: # recurrent version
update_fac = self.n_batch // self.nminibatches // self.noptepochs // self.n_steps + 1
assert self.n_envs % self.nminibatches == 0
env_indices = np.arange(self.n_envs)
flat_indices = np.arange(self.n_envs * self.n_steps).reshape(self.n_envs, self.n_steps)
envs_per_batch = batch_size // self.n_steps
for epoch_num in range(self.noptepochs):
np.random.shuffle(env_indices)
for start in range(0, self.n_envs, envs_per_batch):
timestep = self.num_timesteps // update_fac + ((self.noptepochs * self.n_envs + epoch_num *
self.n_envs + start) // envs_per_batch)
end = start + envs_per_batch
mb_env_inds = env_indices[start:end]
mb_flat_inds = flat_indices[mb_env_inds].ravel()
slices = (arr[mb_flat_inds] for arr in (obs, returns, masks, actions, values, neglogpacs))
mb_states = states[mb_env_inds]
mb_loss_vals.append(self._train_step(lr_now, cliprange_now, *slices, update=timestep,
writer=writer, states=mb_states,
cliprange_vf=cliprange_vf_now))
loss_vals = np.mean(mb_loss_vals, axis=0)
t_now = time.time()
fps = int(self.n_batch / (t_now - t_start))
if writer is not None:
total_episode_reward_logger(self.episode_reward,
true_reward.reshape((self.n_envs, self.n_steps)),
masks.reshape((self.n_envs, self.n_steps)),
writer, self.num_timesteps)
if self.verbose >= 1 and (update % log_interval == 0 or update == 1):
explained_var = explained_variance(values, returns)
logger.logkv("serial_timesteps", update * self.n_steps)
logger.logkv("n_updates", update)
logger.logkv("total_timesteps", self.num_timesteps)
logger.logkv("fps", fps)
logger.logkv("explained_variance", float(explained_var))
if len(ep_info_buf) > 0 and len(ep_info_buf[0]) > 0:
logger.logkv('ep_reward_mean', safe_mean([ep_info['r'] for ep_info in ep_info_buf]))
logger.logkv('ep_len_mean', safe_mean([ep_info['l'] for ep_info in ep_info_buf]))
self.timesteps_array.append(self.num_timesteps)
self.episode_reward_array.append(safe_mean([ep_info['r'] for ep_info in ep_info_buf]))
self.episode_length_array.append(safe_mean([ep_info['l'] for ep_info in ep_info_buf]))
if self.emp_logging_keys:
for k in self.emp_logging_keys:
logger.logkv(k, safe_mean([ep_info[k] for ep_info in ep_info_buf]))
self.emp_logging_arrays[k].append(safe_mean([ep_info[k] for ep_info in ep_info_buf]))
ep_info_buf.clear()
logger.logkv('time_elapsed', t_start - t_first_start)
for (loss_val, loss_name) in zip(loss_vals, self.loss_names):
logger.logkv(loss_name, loss_val)
logger.dumpkvs()
if callback is not None:
# Only stop training if return value is False, not when it is None. This is for backwards
# compatibility with callbacks that have no return statement.
if callback(locals(), globals()) is False:
break
if dump_log:
logging_dict = {'Steps': self.timesteps_array,
'Episode Reward': self.episode_reward_array,
'Episode Length': self.episode_length_array}
if self.emp_logging_keys:
for k in self.emp_logging_keys:
logging_dict[k] = self.emp_logging_arrays[k]
df = pd.DataFrame(logging_dict)
df.to_csv(self.tensorboard_log + '/' + self.exp_name + '.csv', index=False)
save_emp_model = self.emp_options.get('save_model')
if save_emp_model:
self.runner.gce_model.save(save_emp_model)
return self
def save(self, save_path, cloudpickle=False):
data = {
"gamma": self.gamma,
"n_steps": self.n_steps,
"vf_coef": self.vf_coef,
"ent_coef": self.ent_coef,
"max_grad_norm": self.max_grad_norm,
"learning_rate": self.learning_rate,
"lam": self.lam,
"nminibatches": self.nminibatches,
"noptepochs": self.noptepochs,
"cliprange": self.cliprange,
"cliprange_vf": self.cliprange_vf,
"verbose": self.verbose,
"policy": self.policy,
"observation_space": self.observation_space,
"action_space": self.action_space,
"n_envs": self.n_envs,
"n_cpu_tf_sess": self.n_cpu_tf_sess,
"seed": self.seed,
"_vectorize_action": self._vectorize_action,
"policy_kwargs": self.policy_kwargs
}
params_to_save = self.get_parameters()
self._save_to_file(save_path, data=data, params=params_to_save, cloudpickle=cloudpickle)
class Runner(AbstractEnvRunner):
def __init__(self, *, env, model, n_steps, gamma, lam, emp_trajectory_options, emp_options, mode,
tensorboard_log):
"""
A runner to learn the policy of an environment for a model
:param env: (Gym environment) The environment to learn from
:param model: (Model) The model to learn
:param n_steps: (int) The number of steps to run for each environment
:param gamma: (float) Discount factor
:param lam: (float) Factor for trade-off of bias vs variance for Generalized Advantage Estimator
"""
super().__init__(env=env, model=model, n_steps=n_steps)
self.lam = lam
self.gamma = gamma
self.emp_trajectory_options = emp_trajectory_options
self.emp_history = None
self.reward_weight = emp_options['reward_weight']
self.emp_weight = emp_options['emp_weight']
self.emp_history_size = emp_options['buffer_size']
self.emp_obs_selection = emp_options['obs_selection']
self.is_mujoco = emp_options['is_mujoco']
self.action_penalty = emp_options['action_penalty']
self.exp_emp = emp_options.get('exp_emp')
self.logging = emp_options.get('logging')
self.uniform_actions = emp_options.get('uniform_actions')
self.multiplicative_emp = emp_options.get('multiplicative')
lr = emp_options['learning_rate']
obs_raw_dim = env.observation_space.shape[0] * emp_trajectory_options['num_steps_observation']
if mode == 'identity':
self.gce_model = LatentGCEIdentity(obs_raw_dim=obs_raw_dim,
action_raw_dim=env.action_space.shape[0] * emp_trajectory_options['T'],
obs_selection=self.emp_obs_selection,
learning_rate=lr,
log_dir=tensorboard_log)
elif mode == 'pixels':
self.gce_model = LatentGCEImage(env=env,
num_steps_observation=2,
action_raw_dim=env.action_space.shape[0] * emp_trajectory_options['T'],
learning_rate=lr,
state_latent_dimension=32,
action_latent_dimension=32,
log_dir=tensorboard_log)
if self.emp_weight != 0:
ray.init(num_cpus=16)
self.gce_train_loss = 0
self.total_random_steps = 0
def run(self):
"""
Run a learning step of the model
:return:
- observations: (np.ndarray) the observations
- rewards: (np.ndarray) the rewards
- masks: (numpy bool) whether an episode is over or not
- actions: (np.ndarray) the actions
- values: (np.ndarray) the value function output
- negative log probabilities: (np.ndarray)
- states: (np.ndarray) the internal states of the recurrent policies
- infos: (dict) the extra information of the model
"""
# mb stands for minibatch
mb_obs, mb_rewards, mb_actions, mb_values, mb_dones, mb_neglogpacs = [], [], [], [], [], []
mb_states = self.states
ep_infos = []
final_trajectories_obs = []
final_trajectories_actions = []
final_trajectories_neg_log_prob = []
trajectories_obs = []
trajectories_actions = []
trajectories_neg_log_prob = []
# Save mujoco states for resetting
mb_mujoco_sim = []
for o in self.obs:
trajectories_obs.append([o])
trajectories_actions.append([])
trajectories_neg_log_prob.append([])
for _ in range(self.n_steps):
actions, values, self.states, neglogpacs = self.model.step(self.obs, self.states, self.dones)
# Mujoco
if self.is_mujoco and self.uniform_actions:
for e in self.env.envs:
if np.random.rand() < 1 / self.emp_trajectory_options['total_steps']:
qpos = e.env.unwrapped.sim.data.qpos.copy()
qvel = e.env.unwrapped.sim.data.qvel.copy()
mb_mujoco_sim.append(np.array([qpos, qvel]))
mb_obs.append(self.obs.copy())
mb_actions.append(actions)
mb_values.append(values)
mb_neglogpacs.append(neglogpacs)
mb_dones.append(self.dones)
clipped_actions = actions
# Clip the actions to avoid out of bound error
if isinstance(self.env.action_space, gym.spaces.Box):
clipped_actions = np.clip(actions, self.env.action_space.low, self.env.action_space.high)
self.obs[:], rewards, self.dones, infos = self.env.step(clipped_actions)
for idx in range(len(infos)):
maybe_ep_info = infos[idx].get('episode')
if maybe_ep_info is not None:
if self.logging:
for k in self.logging:
maybe_ep_info[k] = infos[idx][k]
ep_infos.append(maybe_ep_info)
maybe_terminal_observation = infos[idx].get('terminal_observation')
if self.emp_weight != 0 and not self.uniform_actions:
trajectories_actions[idx].append(np.copy(actions[idx]))
trajectories_neg_log_prob[idx].append(np.copy(neglogpacs[idx]))
if maybe_terminal_observation is None:
trajectories_obs[idx].append(np.copy(self.obs[idx]))
else:
trajectories_obs[idx].append(np.copy(maybe_terminal_observation))
final_trajectories_obs.append(trajectories_obs[idx])
final_trajectories_actions.append(trajectories_actions[idx])
final_trajectories_neg_log_prob.append(trajectories_neg_log_prob[idx])
trajectories_obs[idx] = [np.copy(self.obs[idx])]
trajectories_actions[idx] = []
trajectories_neg_log_prob[idx] = []
mb_rewards.append(rewards)
# batch of steps to batch of rollouts
mb_obs = np.asarray(mb_obs, dtype=self.obs.dtype)
mb_rewards =
|
np.asarray(mb_rewards, dtype=np.float32)
|
numpy.asarray
|
#!/usr/bin/env python
# coding: utf-8
import numpy as np
import time as Ti
import matplotlib.pyplot as plt
# normal distribution centered on "mu" with covariance "cov"
def log_likelihood(yhat, y, invCov, lndetCov):
diff = yhat - y
N = len(y)
lnlike = -0.5 * (N*np.log(2*np.pi) + lndetCov + np.dot(diff, np.dot(invCov, diff)))
return lnlike
def log_uniform_prior(md_vec, lb, ub):
if np.all(md_vec > lb) & np.all(md_vec < ub):
return 0.0
return -np.inf
def log_GaussianTailed_prior(md_vec, inner_lb, inner_ub):
prior_vec = []
for ii, param in enumerate(md_vec):
prior = gaussian_tail(inner_lb[ii], inner_ub[ii], param)
prior_vec.append(prior)
prior_sum = np.sum(prior_vec)
return prior_sum
def gaussian_tail(inner_lb, inner_ub, param, logscale='TRUE'):
# Compute absolute bounds on the parameters
width = inner_ub - inner_lb # width of the uniform part
sig = width * 0.1
lb = inner_lb - 3 * sig
ub = inner_ub + 3 * sig
height = 1./(width + sig * np.sqrt(2*np.pi))
# set up the empty prior vector for one paramter
prior = np.zeros_like(param)
if
|
np.all(param < lb)
|
numpy.all
|
"""
@brief This module generates synthetic images for tool segmentation.
@author <NAME>-<NAME> (<EMAIL>).
@date 11 Mar 2019.
"""
# import random
import os
import cv2
import numpy as np
from keras_preprocessing.image import ImageDataGenerator as KerasGenerator
import albumentations
import scipy.ndimage
import scipy.ndimage.interpolation
import skimage.morphology
import random
import noise.perlin
# My imports
import common
import image
import blending
import geometry
class ToolCC:
"""
@class ToolCC represents a tool connected component where the edge pixels are identified.
"""
def __init__(self, im, mask, ep_mask):
"""
@param[in] im Original image, the same rotation will be performed in both image,
mask, and entrypoints.
@param[in] mask Binary (0/1) mask with positive pixels indicating tool presence.
@param[in] ep_mask Binary mask with positive pixels indicating the border of the tool.
This is expected to be a subset of the 'mask'.
"""
self.im = im
self.mask = mask
self.ep_mask = ep_mask
def rotate(self, deg):
"""
@brief Rotate the tool mask along with the entrypoints.
@param[in] deg Angle of rotation in degrees.
@returns nothing.
"""
if deg == 0:
return
# We get the original shape to make sure the rotated one has the same shape
prev_shape = self.im.shape
# Find centre of mass of the tool
cm_y, cm_x = scipy.ndimage.center_of_mass(self.mask)
cm_y = int(round(cm_y))
cm_x = int(round(cm_x))
# Rotate tool and mask around the centre of mass
im_rot = image.CaffeinatedAbstract.rotate_bound_centre(self.im,
(cm_x, cm_y), deg, cv2.INTER_LANCZOS4)
mask_rot = image.CaffeinatedAbstract.rotate_bound_centre(self.mask,
(cm_x, cm_y), deg, cv2.INTER_NEAREST)
# Find out whether we are dealing with a corner case or not
sides = geometry.entry_sides_in_mask(self.mask)
single_side = False
if len(sides) == 1:
single_side = True
# Crop depending on whether it is a single side or a corner case
if single_side:
ep_mask_rot = image.CaffeinatedAbstract.rotate_bound_centre(
self.ep_mask, (cm_x, cm_y), deg, cv2.INTER_NEAREST)
# Rotate tool that is connected to only one side of the image
self.im, self.mask, self.ep_mask = self.crop_rotated_single_border_case(im_rot,
mask_rot, ep_mask_rot)
elif deg == 180: # This is the only rotation allowed for complex tool configurations
self.im = im_rot
self.mask = mask_rot
self.ep_mask = image.CaffeinatedAbstract.rotate_bound_centre(
self.ep_mask, (cm_x, cm_y), deg, cv2.INTER_NEAREST)
elif sides == set(['left', 'top']) \
or sides == set(['top', 'right']) \
or sides == set(['right', 'bottom']) \
or sides == set(['bottom', 'left']):
# The tool is touching a corner of the image: two crops are needed to re-attach it to
# the border of the image after the random rotation
# Rotate keypoints
p1, p2, p3 = ToolCC.get_corner_keypoints(self.ep_mask, dilate_ksize=3)
rot_mat = ToolCC.get_rotate_bound_centre_matrix(
self.mask.shape[0], self.mask.shape[1], (cm_x, cm_y),
deg, cv2.INTER_NEAREST)
p1 = np.round(np.dot(rot_mat, p1)).astype(np.int)
p2 = np.round(np.dot(rot_mat, p2)).astype(np.int)
p3 = np.round(np.dot(rot_mat, p3)).astype(np.int)
p1_x = p1[0, 0]
p1_y = p1[1, 0]
p2_x = p2[0, 0]
p2_y = p2[1, 0]
p3_x = p3[0, 0]
p3_y = p3[1, 0]
# Define cropping points to leave rotated tool connected to the borders
y_crop_start = 0
y_crop_end = mask_rot.shape[0]
x_crop_start = 0
x_crop_end = mask_rot.shape[1]
min_x = np.min(np.where(mask_rot)[1])
max_x = np.max(np.where(mask_rot)[1])
min_y = np.min(np.where(mask_rot)[0])
max_y = np.max(np.where(mask_rot)[0])
tolerance = 20 # pixels
if p3_y < p1_y and p3_y < p2_y: # /\
if np.abs(p2_y - max_y) < tolerance:
y_crop_start = p1_y
elif np.abs(p1_y - max_y) < tolerance:
y_crop_start = p2_y
else:
y_crop_start = max(p1_y, p2_y)
elif p3_y > p1_y and p3_y > p2_y: # \/
if np.abs(p2_y - min_y) < tolerance:
y_crop_end = p1_y
elif np.abs(p1_y - min_y) < tolerance:
y_crop_end = p2_y
else:
y_crop_end = min(p1_y, p2_y)
elif p3_x < p1_x and p3_x < p2_x: # <
if np.abs(p2_x - max_x) < tolerance:
x_crop_start = p1_x
elif np.abs(p1_x - max_x) < tolerance:
x_crop_start = p2_x
else:
x_crop_start = max(p1_x, p2_x)
elif p3_x > p1_x and p3_x > p2_x: # >
if np.abs(p2_x - min_x) < tolerance:
x_crop_end = p1_x
elif np.abs(p1_x - min_x) < tolerance:
x_crop_end = p2_x
else:
x_crop_end = min(p1_x, p2_x)
# Crop image and mask, create new ep_mask according to the crop
im = im_rot[y_crop_start:y_crop_end, x_crop_start:x_crop_end]
mask = mask_rot[y_crop_start:y_crop_end, x_crop_start:x_crop_end]
ep_mask = np.zeros_like(self.mask)
ep_mask[0,:] = self.mask[0,:]
ep_mask[-1,:] = self.mask[-1,:]
ep_mask[:, 0] = self.mask[:, 0]
ep_mask[:, -1] = self.mask[:, -1]
# Pad or crop accordingly to come back to the original image size
new_sides = geometry.entry_sides_in_mask(mask)
self.im, self.mask, self.ep_mask = self.adjust_rotated_image(im,
mask, ep_mask, new_sides.pop())
else:
common.writeln_warn('This tool configuation cannot be rotated.')
# Sanity check: rotation should not change the dimensions of the
# image
assert(self.im.shape[0] == prev_shape[0])
assert(self.im.shape[1] == prev_shape[1])
@staticmethod
def get_rotate_bound_centre_matrix(h, w, centre, deg, interp):
cm_x = centre[0]
cm_y = centre[1]
# Build the rotation matrix
rot_mat = cv2.getRotationMatrix2D((cm_y, cm_x), -deg, 1.0)
rot_mat_hom = np.zeros((3, 3))
rot_mat_hom[:2,:] = rot_mat
rot_mat_hom[2, 2] = 1
# Find the coordinates of the corners in the rotated image
tl = np.array([0, 0, 1]).reshape((3, 1))
tr = np.array([w - 1, 0, 1]).reshape((3, 1))
bl = np.array([0, h - 1, 1]).reshape((3, 1))
br = np.array([w - 1, h - 1, 1]).reshape((3, 1))
tl_rot = np.round(np.dot(rot_mat_hom, tl)).astype(np.int)
tr_rot = np.round(np.dot(rot_mat_hom, tr)).astype(np.int)
bl_rot = np.round(np.dot(rot_mat_hom, bl)).astype(np.int)
br_rot = np.round(np.dot(rot_mat_hom, br)).astype(np.int)
# Compute the size of the new image from the coordinates of the rotated one so that
# we add black bounds around the rotated one
min_x = min([tl_rot[0], tr_rot[0], bl_rot[0], br_rot[0]])
max_x = max([tl_rot[0], tr_rot[0], bl_rot[0], br_rot[0]])
min_y = min([tl_rot[1], tr_rot[1], bl_rot[1], br_rot[1]])
max_y = max([tl_rot[1], tr_rot[1], bl_rot[1], br_rot[1]])
new_w = max_x + 1 - min_x
new_h = max_y + 1 - min_y
# Correct the translation so that the rotated image lies inside the window
rot_mat[0, 2] -= min_x
rot_mat[1, 2] -= min_y
# Create homogeneous rotation matrix
hom_rot_mat = np.zeros((3, 3), dtype=np.float64)
hom_rot_mat[0:2] = rot_mat
hom_rot_mat[2, 2] = 1
return hom_rot_mat
@staticmethod
def get_corner_keypoints(ep_mask, dilate_ksize=0):
"""
@brief It gets a mask of entrypoints of a tool attached to a corner
and produces a mask with three points, labelling 1 the
shortest side of the triangle, 2 the longest, and 3 the corner.
@param[in] ep_mask Binary mask with those pixels != 0 representing
the points of the tool that are touching the
borders of the image.
@returns three points p1, p2, p3 in homogeneous [x, y, 1]
coordinates.
"""
h = ep_mask.shape[0]
w = ep_mask.shape[1]
min_x = np.min(np.where(ep_mask)[1])
max_x = np.max(np.where(ep_mask)[1])
min_y = np.min(np.where(ep_mask)[0])
max_y = np.max(np.where(ep_mask)[0])
# Amend mask in case of 1-pixel stupid gaps
amended_mask = None
if dilate_ksize > 2:
kernel = np.ones((dilate_ksize, dilate_ksize), np.uint8)
amended_mask = cv2.dilate(ep_mask, kernel, iterations = 1)
if max_x < amended_mask.shape[1] - 1:
amended_mask[:, max_x + 1:] = 0
if min_x > 0:
amended_mask[:, :min_x] = 0
if max_y < amended_mask.shape[0] - 1:
amended_mask[max_y + 1:,:] = 0
if max_y > 0:
amended_mask[:min_y,:] = 0
else:
amended_mask = ep_mask
kp_mask = np.zeros_like(amended_mask)
if amended_mask[0, 0] != 0: # Top left corner
kp_mask[0, 0] = 3
if max_x < max_y:
kp_mask[0, max_x] = 1
kp_mask[max_y, 0] = 2
else:
kp_mask[0, max_x] = 2
kp_mask[max_y, 0] = 1
elif amended_mask[0, w - 1] != 0: # Top right corner
kp_mask[0, w - 1] = 3
if w - min_x < max_y:
kp_mask[0, min_x] = 1
kp_mask[max_y, w - 1] = 2
else:
kp_mask[0, min_x] = 2
kp_mask[max_y, w - 1] = 1
elif amended_mask[h - 1, 0] != 0: # Bottom left corner
kp_mask[h - 1, 0] = 3
if h - min_y < max_x:
kp_mask[min_y, 0] = 1
kp_mask[h - 1, max_x] = 2
else:
kp_mask[min_y, 0] = 2
kp_mask[h - 1, max_x] = 1
elif amended_mask[h - 1, w - 1] != 0: # Bottom right corner
kp_mask[h - 1, w - 1] = 3
if h - min_y < w - min_x:
kp_mask[min_y, w - 1] = 1
kp_mask[h - 1, min_x] = 2
else:
kp_mask[min_y, w - 1] = 2
kp_mask[h - 1, min_x] = 1
# Get point coordinates in format [x y 1].T
p1 = np.array([0, 0, 1]).reshape((3, 1))
p2 = np.array([0, 0, 1]).reshape((3, 1))
p3 = np.array([0, 0, 1]).reshape((3, 1))
p1[0:2] = np.flipud(np.array(np.where(kp_mask == 1)))
p2[0:2] = np.flipud(np.array(np.where(kp_mask == 2)))
p3[0:2] = np.flipud(np.array(np.where(kp_mask == 3)))
return p1, p2, p3
def crop_rotated_single_border_case(self, im_rot, mask_rot, ep_mask_rot):
"""
@brief Crop a rotated tool so that it can be reattached to the border of the image.
@details This function only deals with the case where the tool to be rotated touches only
one corner.
"""
# Find the positions of the min/max x/y coordinates of the entrypoints in the rotated image
min_x = np.min(np.where(ep_mask_rot)[1])
max_x = np.max(np.where(ep_mask_rot)[1])
min_y = np.min(np.where(ep_mask_rot)[0])
max_y = np.max(np.where(ep_mask_rot)[0])
# Compute the four possible crops to keep the tool attached to the
# border
min_x_image = im_rot[:, :min_x]
max_x_image = im_rot[:, max_x:]
min_y_image = im_rot[:min_y,:]
max_y_image = im_rot[max_y:,:]
min_x_mask = mask_rot[:, :min_x]
max_x_mask = mask_rot[:, max_x:]
min_y_mask = mask_rot[:min_y,:]
max_y_mask = mask_rot[max_y:,:]
# Compute the amount of tool pixels that each crop leaves inside the image
images = [min_x_image, max_x_image, min_y_image, max_y_image]
masks = [min_x_mask, max_x_mask, min_y_mask, max_y_mask]
sides = ['right', 'left', 'bottom', 'top']
pixels = [np.nonzero(mask)[0].shape[0] for mask in masks]
# Keep the crop that leaves more tool inside the image
best_idx = np.argmax(pixels)
best_im = images[best_idx]
best_mask = masks[best_idx]
border_side = sides[best_idx]
# Create new mask of entrypoints based on the rotated and cropped image
best_ep_mask = np.zeros_like(best_mask, dtype=np.uint8)
if border_side == 'right':
best_ep_mask[:, -1] = 1
elif border_side == 'left':
best_ep_mask[:, 0] = 1
elif border_side == 'bottom':
best_ep_mask[-1,:] = 1
elif border_side == 'top':
best_ep_mask[0,:] = 1
best_ep_mask *= best_mask
# Pad or crop accordingly to come back to the original image size
new_im, new_mask, new_ep_mask = self.adjust_rotated_image(best_im,
best_mask, best_ep_mask, border_side)
return new_im, new_mask, new_ep_mask
def adjust_rotated_image(self, im, mask, ep_mask, border_side):
im = im.copy()
mask = mask.copy()
ep_mask = ep_mask.copy()
# Compute the coordinates of the tool within the new image
new_tl_y = np.min(np.where(mask)[0])
new_tl_x = np.min(np.where(mask)[1])
new_br_y = np.max(np.where(mask)[0])
new_br_x = np.max(np.where(mask)[1])
new_tool_height = new_br_y + 1 - new_tl_y
new_tool_width = new_br_x + 1 - new_tl_x
# Compute the proportions of vertical free space in the new image
new_top_free_space = new_tl_y
new_bottom_free_space = im.shape[0] - new_br_y
new_vertical_free_space = new_tl_y + (im.shape[0] - new_br_y)
new_top_free_space_prop = float(new_top_free_space) / new_vertical_free_space
new_bottom_free_space_prop = float(new_bottom_free_space) / new_vertical_free_space
# Compute the proportions of horizontal free space in the new image
new_left_free_space = new_tl_x
new_right_free_space = im.shape[1] - new_br_x
new_horizontal_free_space = new_left_free_space + new_right_free_space
new_left_free_space_prop = float(new_left_free_space) / new_horizontal_free_space
new_right_free_space_prop = float(new_right_free_space) / new_horizontal_free_space
if mask.shape[0] > self.mask.shape[0]: # We have to cut height
if border_side == 'top':
cut = self.im.shape[0]
im = im[:cut,:]
mask = mask[:cut,:]
ep_mask = ep_mask[:cut,:]
elif border_side == 'bottom':
cut = im.shape[0] - self.im.shape[0]
im = im[cut:,:]
mask = mask[cut:,:]
ep_mask = ep_mask[cut:,:]
else: # border is left or right, we cut from top and bottom
top_cut = int(round(new_tl_y - (self.im.shape[0] - new_tool_height) * new_top_free_space_prop))
bottom_cut = top_cut + self.im.shape[0]
im = im[top_cut:bottom_cut,:]
mask = mask[top_cut:bottom_cut,:]
ep_mask = ep_mask[top_cut:bottom_cut,:]
else: # We have to pad height
if border_side == 'top':
bpad = self.im.shape[0] - im.shape[0]
im = np.pad(im, ((0, bpad), (0, 0), (0, 0)), 'constant', constant_values=(0))
mask = np.pad(mask, ((0, bpad), (0, 0)), 'constant', constant_values=(0))
ep_mask = np.pad(ep_mask, ((0, bpad), (0, 0)), 'constant', constant_values=(0))
elif border_side == 'bottom':
tpad = self.im.shape[0] - im.shape[0]
im = np.pad(im, ((tpad, 0), (0, 0), (0, 0)), 'constant', constant_values=(0))
mask = np.pad(mask, ((tpad, 0), (0, 0)), 'constant', constant_values=(0))
ep_mask = np.pad(ep_mask, ((tpad, 0), (0, 0)), 'constant', constant_values=(0))
else: # border is left or right
extra = self.im.shape[0] - im.shape[0]
tpad = int(round(extra * new_top_free_space_prop))
bpad = extra - tpad
im = np.pad(im, ((tpad, bpad), (0, 0), (0, 0)), 'constant', constant_values=(0))
mask = np.pad(mask, ((tpad, bpad), (0, 0)), 'constant', constant_values=(0))
ep_mask = np.pad(ep_mask, ((tpad, bpad), (0, 0)), 'constant', constant_values=(0))
if mask.shape[1] > self.mask.shape[1]: # We have to cut width
if border_side == 'left':
cut = self.im.shape[1]
im = im[:, :cut]
mask = mask[:, :cut]
ep_mask = ep_mask[:, :cut]
elif border_side == 'right':
cut = im.shape[1] - self.im.shape[1]
im = im[:, cut:]
mask = mask[:, cut:]
ep_mask = ep_mask[:, cut:]
else: # border is top or bottom, we cut from left and right
left_cut = int(round(new_tl_x - (self.im.shape[1] - new_tool_width) \
* new_left_free_space_prop))
right_cut = left_cut + self.im.shape[1]
im = im[:, left_cut:right_cut]
mask = mask[:, left_cut:right_cut]
ep_mask = ep_mask[:, left_cut:right_cut]
else: # We have to pad width
if border_side == 'left':
rpad = self.im.shape[1] - im.shape[1]
im = np.pad(im, ((0, 0), (0, rpad), (0, 0)), 'constant', constant_values=(0))
mask = np.pad(mask, ((0, 0), (0, rpad)), 'constant', constant_values=(0))
ep_mask = np.pad(ep_mask, ((0, 0), (0, rpad)), 'constant', constant_values=(0))
elif border_side == 'right':
lpad = self.im.shape[1] - im.shape[1]
im = np.pad(im, ((0, 0), (lpad, 0), (0, 0)), 'constant', constant_values=(0))
mask = np.pad(mask, ((0, 0), (lpad, 0)), 'constant', constant_values=(0))
ep_mask = np.pad(ep_mask, ((0, 0), (lpad, 0)), 'constant', constant_values=(0))
else: # We have to pad left and right
extra = self.im.shape[1] - im.shape[1]
lpad = int(round(extra * new_left_free_space_prop))
rpad = extra - lpad
im = np.pad(im, ((0, 0), (lpad, rpad), (0, 0)), 'constant', constant_values=(0))
mask = np.pad(mask, ((0, 0), (lpad, rpad)), 'constant', constant_values=(0))
ep_mask = np.pad(ep_mask, ((0, 0), (lpad, rpad)), 'constant', constant_values=(0))
return im, mask, ep_mask
class BloodDroplet:
def __init__(self, contour, height, width, min_hsv_hue=0, max_hsv_hue=10, min_hsv_sat=50,
max_hsv_sat=255, max_hsv_val=200):
"""
@param[in] contour Array of floating 2D points in image coordinates.
@param[in] height Height of the image with the blood droplet.
@param[in] width Width of the image with the blood droplet.
"""
contour = np.round(contour).astype(np.int)
# Correct those pixels outside the image plane
contour[contour < 0] = 0
contour[:, 0][contour[:, 0] > width - 1] = width - 1
contour[:, 1][contour[:, 1] > height - 1] = height - 1
# Generate segmentation mask for the blood droplet
self.seg = np.zeros((height, width), dtype=np.uint8)
self.seg[contour[:, 1], contour[:, 0]] = 255
# Get a point inside the contour to use it as filling seed: we use the centroid
cx = np.round(np.mean(contour[:, 0])).astype(np.int)
cy = np.round(np.mean(contour[:, 1])).astype(np.int)
# Fill the wholes of the blood droplet contour
cv2.floodFill(self.seg, None, (cx, cy), 255)
# Generate an empty image of the blood sample
self.frame = np.zeros((height, width, 3), dtype=np.uint8)
# Initialise HSV image of the blood droplet
self.frame = cv2.cvtColor(self.frame, cv2.COLOR_BGR2HSV)
self.frame[:,:, 0] = np.random.randint(min_hsv_hue, max_hsv_hue)
self.frame[:,:, 1] = np.random.randint(min_hsv_sat, max_hsv_sat + 1)
min_x = np.min(np.where(self.seg)[1])
max_x = np.max(np.where(self.seg)[1])
min_y = np.min(np.where(self.seg)[0])
max_y = np.max(np.where(self.seg)[0])
drop_h = max_y + 1 - min_y
drop_w = max_x + 1 - min_x
# Generate Perlin noise for the V channel of the HSV image of the blood droplet
# min_scale = 1
# max_scale = 5
# scale = np.random.randint(min_scale, max_scale + 1)
scale = 1
noise = image.perlin2d_smooth(drop_h, drop_w, scale)
# Set the V channel to the Perlin noise
self.frame[min_y:max_y + 1, min_x:max_x + 1, 2] = \
np.round(noise * max_hsv_val).astype(np.uint8)
# Convert image back to BGR and zero those pixels that are not within the droplet mask
self.frame = cv2.cvtColor(self.frame, cv2.COLOR_HSV2BGR)
self.frame[self.seg == 0] = 0
#@staticmethod
# def circle(r, n=100):
# """
# @returns a list of lists of (x, y) point coordinates.
# """
# return [(np.cos(2 * np.pi / n * x) * r, np.sin(2 * np.pi / n * x) * r) for x in range(0, n + 1)]
@staticmethod
def blob(delta=0.01, min_sf=0.1, max_sf=0.5):
"""
@brief This method generates a list of points representing a slightly deformed
circumference.
@param[in] delta Spacing between circle points. Smaller means
more circumference points will be generated.
@param[in] min_sf Minimum scaling factor applied to the noise. A smaller
value will produce blobs closer to a circle. Higher values will
make it closer to a flower.
@param[in] max_sf Maximum scaling factor.
@returns an array of [x, y] points.
"""
noise_gen = noise.perlin.SimplexNoise()
noise_gen.randomize()
points = []
scaling_factor = np.random.uniform(min_sf, max_sf)
for a in np.arange(0, 2 * np.pi, delta).tolist():
xoff = np.cos(a)
yoff = np.sin(a)
r = noise_gen.noise2(scaling_factor * xoff, scaling_factor * yoff) + 1.
x = r * xoff
y = r * yoff
points.append([x, y])
# Normalise contour to zero mean and one std
contour = np.array(points)
contour -= np.mean(contour, axis=0)
contour /= np.std(contour, axis=0)
return contour
@classmethod
def from_circle(cls, cx, cy, radius, height, width):
# Generate the droplet geometry using Perlin noise
drop_geo = np.array(BloodDroplet.blob())
# Transform the blob to have the desired radius
drop_geo *= radius
# Transform the blob to the desired location
drop_geo[:, 0] += cx
drop_geo[:, 1] += cy
return cls(drop_geo, height, width)
class CustomBackend:
def __init__(self, custom_dic, rs=np.random.RandomState(None)):
self.custom_dic = custom_dic
self.rs = rs
def augment(self, raw_image, raw_label=None):
"""
@brief Each augmentation in the internal dictionary should have a method in this class.
@param[in] raw_image OpenCV/Numpy ndarray containing a BGR image.
@param[in] raw_label OpenCV/Numpy ndarray of shape (height, width) containing the
segmentation label.
@returns a pair of image, label. Both are of type numpy ndarray.
"""
assert(isinstance(raw_image, np.ndarray))
new_image = raw_image
new_label = raw_label
if raw_label is not None:
assert(isinstance(raw_label, np.ndarray))
# Randomise the order of the augmentation effects
keys = np.array(self.custom_dic.keys())
# np.random.shuffle(keys)
keys = keys.tolist()
# Apply all the augmentations that can be applied by this engine
for aug_method in keys:
if aug_method in AugmentationEngine.BACKENDS['custom']:
new_image, new_label = getattr(self, aug_method)(self.custom_dic[aug_method],
new_image, new_label)
else:
common.writeln_warn(aug_method + ' is unknown to the CustomBackend.')
return new_image, new_label
def tool_gray(self, param, raw_image, raw_label=None):
"""
@brief Converts the image to grayscale adding a tiny bit of uniform noise.
@param[in] param Either zero (does nothing) or one (converts to grayscale).
@param[in] raw_image Numpy ndarray, shape (height, width, 3).
@param[in] raw_label Numpy ndarray, shape (height, width).
@returns a pair of image, label. Shapes are like input shapes.
"""
assert(isinstance(raw_image, np.ndarray))
if raw_label is not None:
assert(isinstance(raw_label, np.ndarray))
new_image = None
new_label = raw_label # This method does not touch the segmentation mask
# Convert tools to grayscale with a bit of noise
if param == 0 or param is False:
# Don't do anything
new_image = raw_image
elif param == 1 or param is True:
new_image = image.CaffeinatedImage.gray_tools(raw_image)
else:
raise ValueError('Error, gray_tools() does not understand the parameter ' + str(param))
return new_image, new_label
def tool_rotation(self, rot_range_deg, raw_image, raw_label, margin=1, dilate_ksize=3):
"""
@brief Performs an independent random rotation on each individual tool in the image.
@param[in] rot_range_deg Range of rotation, e.g. 45 would mean from -45 to +45 degrees.
@returns the pair image, label both with the same rotation applied.
"""
assert(rot_range_deg >= 0 and rot_range_deg <= 360)
if raw_label is None:
raise ValueError('tool_rotate is an augmentation that only works if there is a label.')
# Get a random angle of rotation
ang = np.random.randint(-rot_range_deg, rot_range_deg + 1)
# Convert mask to 0/1
label = np.zeros_like(raw_label, dtype=np.uint8)
label[raw_label != 0] = 1
# Detect entrypoints
ep_mask = geometry.entry_points_in_mask(label, margin, dilate_ksize).astype(np.uint8)
# If there is more than one insertion point (i.e. more than one connected component
# in the insertion mask)
if np.amax(ep_mask) > 1:
# If the tool has several insertion points only rotations of [0, 90, 180, 270]
# degrees can be performed while maintaining a realistic appearance for the tool
ang = np.random.choice(np.arange(0, rot_range_deg * 2, 180))
# Turn mask back into a 0/255 mask
ep_mask[ep_mask == 1] = 255
# Create ToolCC object
tool = ToolCC(raw_image, raw_label, ep_mask)
# Rotate tool
tool.rotate(ang)
return tool.im, tool.mask
def tool_shift(self, param, raw_image, raw_label):
"""
@param[in] param is not used in this augmentation method.
"""
# Find out which sides are being touched by the mask
sides = geometry.entry_sides_in_mask(raw_label)
# if len(sides) > 2:
# common.writeln_warn('The image provided cannot be shifted, it has ' \
# + 'more than two points of contact with the borders.')
new_image = raw_image
new_label = raw_label
# Horizontal shift
if sides == set(['top']) or sides == set(['bottom']) or sides == set(['top', 'bottom']):
if common.randbin():
new_image, new_label = CustomBackend.shift_left(raw_image, raw_label)
else:
new_image, new_label = CustomBackend.shift_right(raw_image, raw_label)
else:
common.writeln_warn('The image provided cannot be shifted horizontally.')
# Vertical shift
if sides == set(['left']) or sides == set(['right']) or sides == set(['left', 'right']):
if common.randbin():
new_image, new_label = CustomBackend.shift_top(raw_image, raw_label)
else:
new_image, new_label = CustomBackend.shift_bottom(raw_image, raw_label)
else:
common.writeln_warn('The image provided cannot be shifted vertically.')
# TODO: it is possible to shift corner cases, but they are not considered in this code
return new_image, new_label
def blend_border(self, param, raw_image, raw_label, max_pad=50, max_std=10, noise_p=0.5, rect_p=0.5):
"""
@param[in] param Probability of adding a border to the image.
@param[in] max_pad Maximum border padding that can be added to then image.
"""
black_im = raw_image
black_label = raw_label
# Add border to the image
h = raw_image.shape[0]
w = raw_image.shape[1]
proba = self.rs.binomial(1, param)
if proba:
# Compute random padding while respecting the form factor of the image
top_pad = self.rs.randint(1, max_pad + 1)
bottom_pad = top_pad
left_pad = int(round(0.5 * (((w * (h + 2 * top_pad)) / h) - w)))
right_pad = left_pad
# Create the black background
black_im = np.zeros((h + top_pad + bottom_pad, w + left_pad + right_pad, 3), dtype=np.uint8)
black_label = np.zeros((h + top_pad + bottom_pad, w + left_pad + right_pad), dtype=np.uint8)
# Add Gaussian noise to the border
if self.rs.binomial(1, noise_p):
size = black_im.shape[0] * black_im.shape[1] * 3
random_std = self.rs.randint(1, max_std)
noise = self.rs.normal(0, random_std, size).reshape((black_im.shape[0], black_im.shape[1], 3))
black_im = np.round(np.clip(black_im + noise, 0, max_std)).astype(np.uint8)
# Cropping from the original image to the new one
if self.rs.binomial(1, rect_p):
# Rectangular crop
black_im[top_pad:-bottom_pad, left_pad:-right_pad] = raw_image
black_label[top_pad:-bottom_pad, left_pad:-right_pad] = raw_label
else:
# Circular crop
half_h = h // 2
half_w = w // 2
radius = self.rs.randint(min(half_h, half_w), max(half_h, half_w) + 1)
color = 255
prev_mask = np.zeros((h, w), dtype=np.uint8)
cv2.circle(prev_mask, (prev_mask.shape[1] // 2, prev_mask.shape[0] // 2), radius, color, -1)
next_mask = np.pad(prev_mask, ((top_pad, bottom_pad), (left_pad, right_pad)), 'constant', constant_values=(0))
black_im[next_mask == color] = raw_image[prev_mask == color]
black_label[next_mask == color] = raw_label[prev_mask == color]
return black_im, black_label
def blood_droplets(self, param, raw_image, raw_label, min_ndrop=5, max_ndrop=10,
min_radius=1, max_radius=32, blending_mode='gaussian'):
new_image = raw_image
new_label = raw_label
# We add blood droplets following the probability given in the command line
proba = self.rs.binomial(1, param)
if proba:
# Get those pixel locations belonging to the tool, so that we choose randomly and place
# droplets inside the tool
tool_pixels = np.nonzero(new_label)
# Generate and blend blood droplets onto the image
ndrop = self.rs.randint(min_ndrop, max_ndrop + 1)
height = new_image.shape[0]
width = new_image.shape[1]
# droplets = []
for i in range(ndrop):
# Generate droplet
chosen_centre = self.rs.randint(tool_pixels[0].shape[0])
cx = tool_pixels[1][chosen_centre]
cy = tool_pixels[0][chosen_centre]
radius = self.rs.randint(min_radius, max_radius)
droplet = BloodDroplet.from_circle(cx, cy, radius, height, width)
# Blend droplet with Gaussian blending
image_cx = int(round(.5 * new_image.shape[1]))
image_cy = int(round(.5 * new_image.shape[0]))
new_image = blending.blend(droplet.frame, droplet.seg, new_image.copy(), image_cy,
image_cx, 'gaussian')
# Generate Perlin noise image
# scale = np.random.randint(min_scale, max_scale + 1)
# perlin = image.perlin2d_smooth(raw_image.shape[0], raw_image.shape[1],
# scale)
# Create image with Perlin noise in the red channel
# blood = np.zeros_like(raw_image)
# blood[:, :, 2] = np.round(perlin * 255.0).astype(np.uint8)
# Compute the mean lightness (Value on HSV) of the original tool
# hsv = cv2.cvtColor(raw_image, cv2.COLOR_BGR2HSV)
# mean = np.mean(hsv[:, :, 2])
# Add blood reflections to the tool
# new_image = np.round(.25 * raw_image + .75 * blood).astype(np.uint8)
# Compute the mean lightness (HSV value) of the new tool
# new_hsv = cv2.cvtColor(new_image, cv2.COLOR_BGR2HSV)
# new_mean = np.mean(new_hsv[:, :, 2])
return new_image, new_label
@staticmethod
def contiguous(sides):
if sides == set(['top', 'right']) \
or sides == set(['right', 'bottom']) \
or sides == set(['bottom', 'left']) \
or sides == set(['left', 'top']):
return True
return False
def tool_zoom(self, factor_range, raw_image, raw_label):
"""
@brief The size of the tool can change up to 'perc_range' percentage points.
@param[in] factor_range Range of change of the tool size.
@returns a randomly zoomed image and label.
"""
# Initially, we just take image and label as they are
new_image = None
new_label = None
# Compute the percentage of change
min_range = int(round(factor_range[0] * 100.))
max_range = int(round(factor_range[1] * 100.))
perc = np.random.randint(min_range, max_range + 1)
# Perform zoom operation (can be either zoom in or out)
if perc > 100:
crop_h = int(round(raw_image.shape[0] / (perc / 100.)))
crop_w = int(round(raw_image.shape[1] / (perc / 100.)))
crop_h_half = crop_h // 2
crop_w_half = crop_w // 2
# Choose a random point in the tool as crop centre
tool_pixels = np.nonzero(raw_label)
# Remove those points that would make the crop go out of the image
min_x = crop_w_half
min_y = crop_h_half
max_x = raw_image.shape[1] - crop_w_half
max_y = raw_image.shape[0] - crop_h_half
coords = [[x, y] for x, y in zip(tool_pixels[1].tolist(), tool_pixels[0].tolist()) \
if x >= min_x and x <= max_x and y >= min_y and y <= max_y]
if len(coords):
centre_idx = np.random.randint(len(coords))
cx = coords[centre_idx][0]
cy = coords[centre_idx][1]
else:
possible_centres = []
for x in range(min_x, max_x + crop_w_half, crop_w_half):
for y in range(min_y, max_y + crop_h_half, crop_h_half):
if np.nonzero(raw_label[y - crop_h_half:y + crop_h_half, x - crop_w_half:x + crop_w_half])[0].shape[0] > 0:
possible_centres.append([x, y])
cx, cy = possible_centres[np.random.randint(len(possible_centres))]
# Perform the crop
new_image = raw_image[cy - crop_h_half:cy + crop_h_half,
cx - crop_w_half:cx + crop_w_half]
new_label = raw_label[cy - crop_h_half:cy + crop_h_half,
cx - crop_w_half:cx + crop_w_half]
else:
# Compute the new size
new_h = int(round(raw_label.shape[0] * np.sqrt(perc / 100.)))
new_w = int(round(raw_label.shape[1] * np.sqrt(perc / 100.)))
# Compute the size of the offset (can be a crop or a pad)
offset_h = np.abs(new_h - raw_label.shape[0])
offset_w = np.abs(new_w - raw_label.shape[1])
offset_top, offset_left, offset_bottom, offset_right = \
CustomBackend.compute_padding_offsets(raw_label, offset_h,
offset_w)
new_image, new_label = CustomBackend.pad_top(raw_image, raw_label, offset_top)
new_image, new_label = CustomBackend.pad_bottom(new_image, new_label, offset_bottom)
new_image, new_label = CustomBackend.pad_right(new_image, new_label, offset_right)
new_image, new_label = CustomBackend.pad_left(new_image, new_label, offset_left)
return new_image, new_label
@staticmethod
def compute_cropping_offsets(raw_label, offset_h, offset_w):
# Compute instrument bounding box
min_x = np.min(np.where(raw_label)[1])
max_x = np.max(np.where(raw_label)[1])
min_y = np.min(np.where(raw_label)[0])
max_y = np.max(np.where(raw_label)[0])
# Compute proportions that are free to the sides of the bounding box
top_prop = float(min_y) / raw_label.shape[0]
bottom_prop = 1. - top_prop
left_prop = float(min_x) / raw_label.shape[1]
right_prop = 1. - left_prop
# Compute offsets according to proportions
offset_top = int(round(offset_h * top_prop))
offset_bottom = offset_h - offset_top
offset_left = int(round(offset_w * left_prop))
offset_right = offset_w - offset_left
# Compute maximum offsets
max_offset_top = min_y
max_offset_bottom = raw_label.shape[0] - max_y
max_offset_left = min_x
max_offset_right = raw_label.shape[1] - max_x
# Now the padding depends on which borders the tool is touching
sides = geometry.entry_sides_in_mask(raw_label)
if len(sides) == 1: # Only one border touched
if 'left' in sides:
offset_left = 0
offset_right = offset_w
offset_top = min(offset_top, max_offset_top)
offset_bottom = min(offset_bottom, max_offset_bottom)
elif 'top' in sides:
offset_top = 0
offset_bottom = offset_h
offset_left = min(offset_left, max_offset_left)
offset_right = min(offset_right, max_offset_right)
elif 'right' in sides:
offset_right = 0
offset_left = offset_w
offset_top = min(offset_top, max_offset_top)
offset_bottom = min(offset_bottom, max_offset_bottom)
elif 'bottom' in sides:
offset_bottom = 0
offset_top = offset_h
offset_left = min(offset_left, max_offset_left)
offset_right = min(offset_right, max_offset_right)
elif len(sides) == 2 and CustomBackend.contiguous(sides): # Two contiguous borders touched
if sides == set(['top', 'right']):
offset_top = 0
offset_right = 0
offset_bottom = offset_h
offset_left = offset_w
elif sides == set(['right', 'bottom']):
offset_right = 0
offset_bottom = 0
offset_left = offset_w
offset_top = offset_h
elif sides == set(['bottom', 'left']):
offset_bottom = 0
offset_left = 0
offset_top = offset_h
offset_right = offset_w
elif sides == set(['left', 'top']):
offset_left = 0
offset_top = 0
offset_right = offset_w
offset_bottom = offset_h
elif len(sides) == 2 and not CustomBackend.contiguous(sides):
if sides == set(['top', 'bottom']):
top_prop = np.random.uniform(0., 1.)
offset_top = int(round(top_prop * offset_h))
offset_bottom = offset_h - offset_top
elif sides == set(['left', 'right']):
left_prop = np.random.uniform(0., 1.)
offset_left = int(round(left_prop * offset_w))
offset_right = offset_w - offset_left
elif len(sides) == 3:
if 'top' not in sides:
offset_top = offset_h
offset_bottom = 0
left_prop = np.random.uniform(0., 1.)
offset_left = int(round(left_prop * offset_w))
offset_right = offset_w - offset_left
elif 'left' not in sides:
offset_left = offset_w
offset_right = 0
top_prop = np.random.uniform(0., 1.)
offset_top = int(round(top_prop * offset_h))
offset_bottom = offset_h - offset_top
elif 'bottom' not in sides:
offset_bottom = offset_h
offset_top = 0
left_prop = np.random.uniform(0., 1.)
offset_left = int(round(left_prop * offset_w))
offset_right = offset_w - offset_left
elif 'right' not in sides:
offset_right = offset_w
offset_left = 0
top_prop = np.random.uniform(0., 1.)
offset_top = int(round(top_prop * offset_h))
offset_bottom = offset_h - offset_top
elif len(sides) == 4:
# Two non-contiguous borders or more than two borders touched
top_prop = np.random.uniform(0., 1.)
offset_top = int(round(top_prop * offset_h))
offset_bottom = offset_h - offset_top
left_prop = np.random.uniform(0., 1.)
offset_left = int(round(left_prop * offset_w))
offset_right = offset_w - offset_left
else:
raise ValueError('Unexpected tool mask. Cannot compute \
croping offsets.')
return offset_top, offset_left, offset_bottom, offset_right
@staticmethod
def compute_padding_offsets(raw_label, offset_h, offset_w):
# Compute instrument bounding box
min_x = np.min(np.where(raw_label)[1])
min_y = np.min(np.where(raw_label)[0])
# Compute proportions that are free to the sides of the bounding box
top_prop = float(min_y) / raw_label.shape[0]
bottom_prop = 1. - top_prop
left_prop = float(min_x) / raw_label.shape[1]
right_prop = 1. - left_prop
# Compute offsets according to proportions
offset_top = int(round(offset_h * top_prop))
offset_bottom = offset_h - offset_top
offset_left = int(round(offset_w * left_prop))
offset_right = offset_w - offset_left
# Now the padding depends on which borders the tool is touching
sides = geometry.entry_sides_in_mask(raw_label)
if len(sides) == 1: # Only one border touched
if 'left' in sides:
offset_left = 0
offset_right = offset_w
elif 'top' in sides:
offset_top = 0
offset_bottom = offset_h
elif 'right' in sides:
offset_right = 0
offset_left = offset_w
elif 'bottom' in sides:
offset_bottom = 0
offset_top = offset_h
elif len(sides) == 2 and CustomBackend.contiguous(sides): # Two contiguous borders touched
if sides == set(['top', 'right']):
offset_top = 0
offset_right = 0
offset_bottom = offset_h
offset_left = offset_w
elif sides == set(['right', 'bottom']):
offset_right = 0
offset_bottom = 0
offset_left = offset_w
offset_top = offset_h
elif sides == set(['bottom', 'left']):
offset_bottom = 0
offset_left = 0
offset_top = offset_h
offset_right = offset_w
elif sides == set(['left', 'top']):
offset_left = 0
offset_top = 0
offset_right = offset_w
offset_bottom = offset_h
else:
offset_top = 0
offset_left = 0
offset_bottom = 0
offset_right = 0
common.writeln_warn('More than two contiguous sides touched. Cannot zoom.')
'''
elif len(sides) == 2 and not CustomBackend.contiguous(sides):
if sides == set(['top', 'bottom']):
offset_top = offset_h / 2
offset_bottom = offset_top
elif sides == set(['left', 'right']):
offset_left = offset_w / 2
offset_right = offset_left
elif len(sides) == 3:
if 'top' not in sides:
offset_top = offset_h
offset_bottom = 0
offset_left = offset_w / 2
offset_right = offset_w / 2
elif 'left' not in sides:
offset_left = offset_w
offset_right = 0
offset_top = offset_h / 2
offset_bottom = offset_h / 2
elif 'bottom' not in sides:
offset_bottom = offset_h
offset_top = 0
offset_left = offset_w / 2
offset_right = offset_w / 2
elif 'right' not in sides:
offset_right = offset_w
offset_left = 0
offset_top = offset_h / 2
offset_bottom = offset_h / 2
elif len(sides) == 4:
# Two non-contiguous borders or more than two borders touched
offset_top = offset_h / 2
offset_bottom = offset_h / 2
offset_left = offset_w / 2
offset_right = offset_w / 2
else:
raise ValueError('Unexpected tool mask. Cannot compute \
padding offsets.')
'''
return offset_top, offset_left, offset_bottom, offset_right
@staticmethod
def pad_top(im, label, pad):
new_im = np.pad(im, ((pad, 0), (0, 0), (0, 0)), 'constant', constant_values=(0))
new_label = np.pad(label, ((pad, 0), (0, 0)), 'constant', constant_values=(0))
return new_im, new_label
@staticmethod
def pad_bottom(im, label, pad):
new_im = np.pad(im, ((0, pad), (0, 0), (0, 0)), 'constant', constant_values=(0))
new_label = np.pad(label, ((0, pad), (0, 0)), 'constant', constant_values=(0))
return new_im, new_label
@staticmethod
def pad_left(im, label, pad):
new_im = np.pad(im, ((0, 0), (pad, 0), (0, 0)), 'constant', constant_values=(0))
new_label = np.pad(label, ((0, 0), (pad, 0)), 'constant', constant_values=(0))
return new_im, new_label
@staticmethod
def pad_right(im, label, pad):
new_im = np.pad(im, ((0, 0), (0, pad), (0, 0)), 'constant', constant_values=(0))
new_label = np.pad(label, ((0, 0), (0, pad)), 'constant', constant_values=(0))
return new_im, new_label
@staticmethod
def crop_top(im, label, crop):
if crop != 0:
return im[crop:,:], label[crop:,:]
else:
return im, label
@staticmethod
def crop_bottom(im, label, crop):
if crop != 0:
return im[:-crop,:], label[:-crop,:]
else:
return im, label
@staticmethod
def crop_left(im, label, crop):
if crop != 0:
return im[:, crop:], label[:, crop:]
else:
return im, label
@staticmethod
def crop_right(im, label, crop):
if crop != 0:
return im[:, :-crop], label[:, :-crop]
else:
return im, label
@staticmethod
def shift_left(raw_image, raw_label):
new_image = raw_image
new_label = raw_label
# Find centre of mass of the tool
cm_y, cm_x = scipy.ndimage.center_of_mass(raw_label)
cm_y = int(round(cm_y))
cm_x = int(round(cm_x))
# Compute shift distance
max_shift = cm_x
shift = np.random.randint(max_shift)
# Shift image
if shift != 0:
shape_y, shape_x, shape_z = raw_image.shape
image_x_zeros = np.zeros((shape_y, shift, shape_z), dtype=np.uint8)
label_x_zeros = np.zeros((shape_y, shift), dtype=np.uint8)
new_image = np.concatenate((raw_image[:, shift:], image_x_zeros), axis=1)
new_label = np.concatenate((raw_label[:, shift:], label_x_zeros), axis=1)
return new_image, new_label
@staticmethod
def shift_right(raw_image, raw_label):
new_image = raw_image
new_label = raw_label
# Find centre of mass of the tool
cm_y, cm_x = scipy.ndimage.center_of_mass(raw_label)
cm_y = int(round(cm_y))
cm_x = int(round(cm_x))
# Compute shift distance
max_shift = raw_label.shape[1] - cm_x
shift = np.random.randint(max_shift)
# Shift image
if shift != 0:
shape_y, shape_x, shape_z = raw_image.shape
image_x_zeros =
|
np.zeros((shape_y, shift, shape_z), dtype=np.uint8)
|
numpy.zeros
|
# Copyright (c) 2020-2022 by Fraunhofer Institute for Energy Economics
# and Energy System Technology (IEE), Kassel, and University of Kassel. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.
import pandapipes
import os
import pandas as pd
import numpy as np
from pandapipes.test.pipeflow_internals import internals_data_path
def test_valve():
"""
:return:
:rtype:
"""
net = pandapipes.create_empty_network("net", add_stdtypes=True)
j0 = pandapipes.create_junction(net, pn_bar=5, tfluid_k=283.15, index=5)
j1 = pandapipes.create_junction(net, pn_bar=5, tfluid_k=283.15, index=3)
j2 = pandapipes.create_junction(net, pn_bar=5, tfluid_k=283.15, index=6)
j3 = pandapipes.create_junction(net, pn_bar=5, tfluid_k=283.15, index=9)
j4 = pandapipes.create_junction(net, pn_bar=5, tfluid_k=283.15, index=20)
j5 = pandapipes.create_junction(net, pn_bar=5, tfluid_k=283.15, index=45)
j6 = pandapipes.create_junction(net, pn_bar=5, tfluid_k=283.15, index=4)
j7 = pandapipes.create_junction(net, pn_bar=5, tfluid_k=283.15, index=8)
pandapipes.create_ext_grid(net, j0, 5, 283.15, type="p")
pandapipes.create_pipe_from_parameters(net, j0, j1, diameter_m=.1, k_mm=1, length_km=1.)
pandapipes.create_pipe_from_parameters(net, j3, j4, diameter_m=.1, k_mm=1, length_km=.5)
pandapipes.create_pipe_from_parameters(net, j2, j4, diameter_m=.1, k_mm=1, length_km=.5)
pandapipes.create_pipe_from_parameters(net, j5, j4, diameter_m=.1, k_mm=1, length_km=.35)
pandapipes.create_pipe_from_parameters(net, j1, j6, diameter_m=.1, k_mm=1, length_km=.1,
loss_coefficient=9000)
pandapipes.create_pipe_from_parameters(net, j1, j7, diameter_m=.1, k_mm=1, length_km=.1,
loss_coefficient=9000)
pandapipes.create_valve(net, j6, j2, diameter_m=0.1, opened=False)
pandapipes.create_valve(net, j7, j3, diameter_m=0.1, opened=True)
pandapipes.create_sink(net, j5, 0.11667)
pandapipes.create_fluid_from_lib(net, "lgas", overwrite=True)
pandapipes.pipeflow(net, stop_condition="tol", iter=10, friction_model="nikuradse",
mode="hydraulics", transient=False, nonlinear_method="automatic",
tol_p=1e-4,
tol_v=1e-4)
data = pd.read_csv(os.path.join(internals_data_path, "test_valve.csv"), sep=';')
data_p = data['p'].dropna(inplace=False)
data_v = data['v'].dropna(inplace=False)
res_junction = net.res_junction.p_bar.values
res_pipe = net.res_pipe.v_mean_m_per_s.values
zeros = res_pipe == 0
test_zeros = data_v.values == 0
check_zeros = zeros == test_zeros
assert np.all(check_zeros)
p_diff = np.abs(1 - res_junction / data_p[data_p != 0].values)
v_diff =
|
np.abs(1 - res_pipe[res_pipe != 0] / data_v[data_v != 0].values)
|
numpy.abs
|
from coopihc.base.Space import Space
from coopihc.base.utils import SpaceNotSeparableError
from coopihc.helpers import flatten
from coopihc.base.elements import integer_space, integer_set
import numpy
import json
import pytest
def test_init_CatSet():
s = Space(array=numpy.array([1, 2, 3], dtype=numpy.int16))
# prop and attributes
assert s.dtype == numpy.int16
assert s.N == 3
assert s.shape == ()
def test_contains_CatSet():
s = Space(array=numpy.array([1, 2, 3], dtype=numpy.int16))
assert 1 in s
assert [1] in s
assert [[1]] in s
assert numpy.array(1) in s
assert numpy.array([1]) in s
assert numpy.array([[1]]) in s
assert numpy.array(2) in s
assert numpy.array(3) in s
assert numpy.array([1.0]) in s
assert numpy.array([2]) in s
assert 4 not in s
assert -1 not in s
def test_CatSet():
test_init_CatSet()
test_contains_CatSet()
def test_init_Numeric():
s = Space(
low=-numpy.ones((2, 2), dtype=numpy.float32),
high=numpy.ones((2, 2), dtype=numpy.float32),
)
# prop and attributes
assert s.dtype == numpy.float32
assert (s.high == numpy.ones((2, 2))).all()
assert (s.low == -numpy.ones((2, 2))).all()
assert s.shape == (2, 2)
s = Space(low=-numpy.float64(1), high=numpy.float64(1))
def test_contains_Numeric():
s = Space(
low=-numpy.ones((2, 2)),
high=numpy.ones((2, 2)),
)
assert [0.0, 0.0, 0.0, 0.0] not in s
assert [[0.0, 0.0], [0.0, 0.0]] in s
assert numpy.array([0.0, 0.0, 0.0, 0.0]) not in s
assert numpy.array([[0.0, 0.0], [0.0, 0.0]]) in s
assert 1.0 * numpy.ones((2, 2)) in s
assert -1.0 * numpy.ones((2, 2)) in s
assert numpy.ones((2, 2), dtype=numpy.int16) in s
def test_Numeric():
test_init_Numeric()
test_contains_Numeric()
def test_sample_CatSet():
s = Space(array=numpy.arange(1000), seed=123)
q = Space(array=numpy.arange(1000), seed=123)
r = Space(array=numpy.arange(1000), seed=12)
_s, _q, _r = s.sample(), q.sample(), r.sample()
assert _s in s
assert _q in q
assert _r in r
assert _s == _q
assert _s != _r
s = Space(array=numpy.arange(4), seed=123)
scont = {}
for i in range(1000):
_s = s.sample()
scont.update({str(_s): _s})
assert _s in s
assert sorted(scont.values()) == [0, 1, 2, 3]
def test_sample_shortcuts():
s = integer_space(N=3, start=-1, dtype=numpy.int8)
s.sample()
q = integer_set(2)
q.sample()
def test_sample_Numeric():
s = Space(low=-numpy.ones((2, 2)), high=numpy.ones((2, 2)), seed=123)
q = Space(low=-numpy.ones((2, 2)), high=numpy.ones((2, 2)), seed=123)
r = Space(low=-numpy.ones((2, 2)), high=numpy.ones((2, 2)), seed=12)
_s, _q, _r = s.sample(), q.sample(), r.sample()
assert _s in s
assert _q in q
assert _r in r
assert (_s == _q).all()
assert (_s != _r).any()
for i in range(1000):
assert s.sample() in s
def test_sample():
test_sample_CatSet()
test_sample_shortcuts()
test_sample_Numeric()
def test_dtype_CatSet():
s = Space(array=numpy.array([1, 2, 3], dtype=numpy.int16))
assert s.dtype == numpy.int16
assert s.sample().dtype == numpy.int16
s = Space(array=numpy.array([1, 2, 3], dtype=numpy.int16), dtype=numpy.int64)
assert s.dtype == numpy.int64
assert s.sample().dtype == numpy.int64
def test_dtype_Numeric():
s = Space(
low=-numpy.ones((2, 2)),
high=numpy.ones((2, 2)),
)
assert s.dtype == numpy.float64
assert s.sample().dtype == numpy.float64
s = Space(
low=-numpy.ones((2, 2), dtype=numpy.float32),
high=numpy.ones((2, 2), dtype=numpy.float32),
)
assert s.dtype == numpy.float32
assert s.sample().dtype == numpy.float32
s = Space(
low=-numpy.ones((2, 2)),
high=numpy.ones((2, 2)),
dtype=numpy.float32,
)
assert s.dtype == numpy.float32
assert s.sample().dtype == numpy.float32
s = Space(
low=-numpy.ones((2, 2)),
high=numpy.ones((2, 2)),
dtype=numpy.int16,
)
assert s.dtype == numpy.int16
assert s.sample().dtype == numpy.int16
def test_dtype():
test_dtype_CatSet()
test_dtype_Numeric()
def test_equal_CatSet():
s = Space(array=numpy.array([1, 2, 3], dtype=numpy.int16))
assert s == Space(array=numpy.array([1, 2, 3], dtype=numpy.int16))
assert s != Space(array=numpy.array([1, 2, 3, 4], dtype=numpy.int16))
def test_equal_Numeric():
s = Space(low=-numpy.ones((2, 2)), high=numpy.ones((2, 2)))
assert s == Space(low=-numpy.ones((2, 2)), high=numpy.ones((2, 2)))
assert s != Space(low=-1.5 * numpy.ones((2, 2)), high=2 * numpy.ones((2, 2)))
assert s != Space(low=-numpy.ones((1,)), high=numpy.ones((1,)))
def test_equal():
test_equal_CatSet()
test_equal_Numeric()
def test_serialize_CatSet():
s = Space(array=numpy.array([1, 2, 3], dtype=numpy.int16))
assert (
json.dumps(s.serialize())
== '{"space": "CatSet", "seed": null, "array": [1, 2, 3], "dtype": "dtype[int16]"}'
)
def test_serialize_Numeric():
s = Space(
low=-numpy.ones((2, 2)),
high=numpy.ones((2, 2)),
)
assert (
json.dumps(s.serialize())
== '{"space": "Numeric", "seed": null, "low,high": [[[-1.0, -1.0], [-1.0, -1.0]], [[1.0, 1.0], [1.0, 1.0]]], "shape": [2, 2], "dtype": "dtype[float64]"}'
)
def test_serialize():
test_serialize_CatSet()
test_serialize_Numeric()
def test_iter_CatSet():
pass
def test_iter_Numeric():
s = Space(low=numpy.array([[-1, -2], [-3, -4]]), high=numpy.array([[1, 2], [3, 4]]))
for i, _s in enumerate(s):
if i == 0:
assert _s == Space(low=numpy.array([-1, -2]), high=-numpy.array([-1, -2]))
if i == 1:
assert _s == Space(low=numpy.array([-3, -4]), high=-numpy.array([-3, -4]))
for j, _ss in enumerate(_s):
if i == 0 and j == 0:
assert _ss == Space(low=-numpy.int64(1), high=numpy.int64(1))
elif i == 0 and j == 1:
assert _ss == Space(low=-numpy.int64(2), high=numpy.int64(2))
elif i == 1 and j == 0:
assert _ss == Space(low=-numpy.int64(3), high=numpy.int64(3))
elif i == 1 and j == 1:
assert _ss == Space(low=-numpy.int64(4), high=numpy.int64(4))
def test_iter():
test_iter_CatSet()
test_iter_Numeric()
def test_cartesian_product_CatSet():
s = Space(array=numpy.array([1, 2, 3], dtype=numpy.int16))
q = Space(array=numpy.array([-3, -2, -1], dtype=numpy.int16))
cp, shape = Space.cartesian_product(s, q)
assert (
cp
== numpy.array(
[
[1, -3],
[1, -2],
[1, -1],
[2, -3],
[2, -2],
[2, -1],
[3, -3],
[3, -2],
[3, -1],
]
)
).all()
def test_cartesian_product_Numeric_continuous():
s = Space(
low=-numpy.ones((2, 2)),
high=numpy.ones((2, 2)),
)
q = Space(
low=-numpy.ones((2, 2)),
high=numpy.ones((2, 2)),
)
cp, _shape = Space.cartesian_product(s, q)
assert (cp == numpy.array([[None, None]])).all()
def test_cartesian_product_Numeric_discrete():
s = Space(low=-1, high=1, dtype=numpy.int64)
q = Space(low=-3, high=1, dtype=numpy.int64)
cp, _shape = Space.cartesian_product(s, q)
def test_cartesian_product_Numeric():
test_cartesian_product_Numeric_continuous()
test_cartesian_product_Numeric_discrete()
def test_cartesian_product_mix():
s = Space(array=numpy.array([1, 2, 3], dtype=numpy.int16))
q = Space(
low=-numpy.ones((2, 2)),
high=numpy.ones((2, 2)),
)
r = Space(array=numpy.array([5, 6, 7], dtype=numpy.int16))
cp, shape = Space.cartesian_product(s, q, r)
assert (
cp
== numpy.array(
[
[1, None, 5],
[1, None, 6],
[1, None, 7],
[2, None, 5],
[2, None, 6],
[2, None, 7],
[3, None, 5],
[3, None, 6],
[3, None, 7],
]
)
).all()
assert shape == [(), (2, 2), ()]
def test_cartesian_product_single():
s = Space(array=numpy.array([1, 2, 3], dtype=numpy.int16))
cp, shape = Space.cartesian_product(s)
assert (cp == numpy.array([[1], [2], [3]])).all()
assert shape == [()]
def test_cartesian_product():
test_cartesian_product_CatSet()
test_cartesian_product_Numeric()
test_cartesian_product_mix()
test_cartesian_product_single()
def test__getitem__CatSet():
s = Space(array=numpy.array([1, 2, 3], dtype=numpy.int16))
with pytest.raises(SpaceNotSeparableError):
s[0]
assert s[...] == s
assert s[:] == s
def test__getitem__int_interval():
s = Space(
low=-numpy.ones((2, 2), dtype=numpy.float32),
high=numpy.ones((2, 2), dtype=numpy.float32),
)
assert s[0] == Space(
low=-numpy.ones((2,), dtype=numpy.float32),
high=numpy.ones((2,), dtype=numpy.float32),
)
def test__getitem__slice_interval():
s = Space(
low=-numpy.ones((2, 2), dtype=numpy.float32),
high=numpy.ones((2, 2), dtype=numpy.float32),
)
assert s[:, 0] == Space(
low=-
|
numpy.ones((2,), dtype=numpy.float32)
|
numpy.ones
|
# Copyright 2020, by the California Institute of Technology.
# ALL RIGHTS RESERVED. United States Government Sponsorship acknowledged.
# Any commercial use must be negotiated with the Office of Technology Transfer at the California Institute of Technology.
# This software may be subject to U.S. export control laws.
# By accepting this software, the user agrees to comply with all applicable U.S. export laws and regulations.
# User has the responsibility to obtain export licenses, or other export authority as may be required before exporting
# such information to foreign countries or providing access to foreign persons.
# Codes last tested 05 April 2020 by MW and IF
import xarray as xr
import os
import numpy as np
from pyresample import kd_tree, geometry
import utm
import time
import math
from pyproj import Proj, transform
import swath_references as ref
from pathlib import Path
########################################################################################################################
# These are functions used throughout all main steps
def create_directory_structure(dataFolder,resolution,fileIndices,projection):
if 'EPSG' in projection:
baseDirectory = 'Resampled_'+str(resolution)+'m_'+projection.split(':')[1]
else:
baseDirectory = 'Resampled_'+str(resolution)+'m'
tmpDir = dataFolder / Path(baseDirectory)
print('Creating ' + str(tmpDir))
if not tmpDir.exists():
try:
tmpDir.mkdir(exist_ok=True)
except:
print('.. could not create ' + str(tmpDir))
# if baseDirectory not in os.listdir(dataFolder):
# os.mkdir(os.path.join(dataFolder,baseDirectory))
for fileIndex in fileIndices:
# if 'OMG_Ice_GLISTIN-A_L3_'+'{:02d}'.format(fileIndex) not in os.listdir(os.path.join(dataFolder,baseDirectory)):
# os.mkdir(os.path.join(dataFolder,baseDirectory,'OMG_Ice_GLISTIN-A_L3_'+'{:02d}'.format(fileIndex)))
tmpDir2 = tmpDir.joinpath('OMG_Ice_GLISTIN-A_L3_' + '{:02d}'.format(fileIndex))
print('Creating ' + str(tmpDir2))
try:
tmpDir2.mkdir(exist_ok=True)
except:
print('.. could not create ' + str(tmpDir2))
def read_metadata_dictionary(dataFolder,fileID):
swathID = ref.fileNameToSwathID(fileID)
# print(' Original data file: '+swathID)
metadata_dictionary={}
metadata_file = dataFolder.joinpath('Raw',fileID.split('_')[-2][:4],'Metadata',swathID+'_metadata.txt')
print(' Reading metadata from ' + str(metadata_file))
with open(str(metadata_file), "r") as f:
# with open(os.path.join(dataFolder,'Raw',fileID.split('_')[-2][:4],'Metadata',swathID+'_metadata.txt'), "r") as f:
lines = f.readlines()
idx = 0
for line in lines:
if line.startswith("GRD Latitude Lines"): # Start of data should start at index 62 but checking just in case
startofdata = idx
break
idx += 1
# Assuming all data is on successive lines and there are 14 data points
for r in range(14):
ln = lines[startofdata + r].split()
# The value will be at the 5th index for the first 6 lines and at the 6th index for the last 8
if r < 2:
metadata_dictionary[' '.join(ln[:3])]=int(ln[5])
elif r>=2 and r<6:
metadata_dictionary[' '.join(ln[:3])] = float(ln[5])
else:
metadata_dictionary[' '.join(ln[:4])] = float(ln[6])
return(metadata_dictionary)
def reproject_point(point, inputCRS, outputCRS):
x,y = transform(inputCRS, outputCRS, point[1], point[0])
return ([x,y])
########################################################################################################################
# step 1: for a given swath, find an extent which encompasses all available DEMs
def find_common_index_extent(dataFolder,fileIndex,printStatus,useMetadata=False):
if useMetadata:
if printStatus:
print(' Step 1: Finding a common extent for all DEMs with index '+str(fileIndex))
min_lon = 360
max_lon = -360
min_lat = 90
max_lat = -90
addFileData=True
for year in [2016,2017,2018,2019]:
if year==2016:
if fileIndex in ref.fileIndicesMissingIn2016():
addFileData=False
else:
addFileData=True
if addFileData:
fileID = ref.indexAndYearToFileID(fileIndex, year)
metadata_dictionary = read_metadata_dictionary(dataFolder,fileID)
min_swath_lon = metadata_dictionary['GRD Starting Longitude']
max_swath_lon = metadata_dictionary['GRD Starting Longitude'] + metadata_dictionary['GRD Longitude Samples'] * metadata_dictionary['GRD Longitude Spacing']
min_swath_lat = metadata_dictionary['GRD Starting Latitude'] + metadata_dictionary['GRD Latitude Lines'] * metadata_dictionary['GRD Latitude Spacing']
max_swath_lat = metadata_dictionary['GRD Starting Latitude']
min_lon = np.min([min_lon, min_swath_lon])
max_lon = np.max([max_lon, max_swath_lon])
min_lat = np.min([min_lat, min_swath_lat])
max_lat = np.max([max_lat, max_swath_lat])
if printStatus:
print(' Longitude extents -> Min: '+'{:.06f}'.format(min_lon)+' Max: '+'{:.06f}'.format(max_lon))
print(' Latitude extents -> Min: ' + '{:.06f}'.format(min_lat) + ' Max: ' + '{:.06f}'.format(max_lat))
else:
if printStatus:
print(' Step 1: Finding a common extent for all DEMs with index ' + str(fileIndex))
saved_extent = ref.indexToCommonExtent(fileIndex)
min_lon = saved_extent[0]
max_lon = saved_extent[1]
min_lat = saved_extent[2]
max_lat = saved_extent[3]
if printStatus:
print(' Longitude extents -> Min: ' + '{:.06f}'.format(min_lon) + ' Max: ' + '{:.06f}'.format(
max_lon))
print(' Latitude extents -> Min: ' + '{:.06f}'.format(min_lat) + ' Max: ' + '{:.06f}'.format(
max_lat))
return(min_lon,max_lon,min_lat,max_lat)
########################################################################################################################
# step 2: read in the swath and create the input geometry
def read_swath_and_create_geometry(dataFolder,fileIndex,year,printStatus):
if printStatus:
print(' Step 2: Reading in the binary grid and creating the original geometry')
print(' Reading in binary data from file')
fileID = ref.indexAndYearToFileID(fileIndex, year)
metadata_dictionary = read_metadata_dictionary(dataFolder, fileID)
swathID = ref.fileNameToSwathID(fileID)
dataPath = dataFolder.joinpath('Raw', str(year), 'Data', swathID + '.hgt.grd')
g = np.fromfile(str(dataPath), dtype='<f4')
# cast to float 2
g = g.astype(np.dtype('<f2'))
grid = np.reshape(g, (metadata_dictionary['GRD Latitude Lines'], metadata_dictionary['GRD Longitude Samples']))
if printStatus:
print(' Preparing original geometry of the swath from metadata')
grid = np.where(grid > grid.min(), grid, np.nan)
min_swath_lon = metadata_dictionary['GRD Starting Longitude']
max_swath_lon = metadata_dictionary['GRD Starting Longitude'] + metadata_dictionary['GRD Longitude Samples'] * \
metadata_dictionary['GRD Longitude Spacing']
min_swath_lat = metadata_dictionary['GRD Starting Latitude'] + metadata_dictionary['GRD Latitude Lines'] * \
metadata_dictionary['GRD Latitude Spacing']
max_swath_lat = metadata_dictionary['GRD Starting Latitude']
lats = np.linspace(min_swath_lat, max_swath_lat, metadata_dictionary['GRD Latitude Lines'])
lons = np.linspace(min_swath_lon, max_swath_lon, metadata_dictionary['GRD Longitude Samples'])
grid = np.flipud(grid)
if printStatus:
# print(" The grid shape is (" + str(len(lats)) + "," + str(len(lons)) + ")")
print(" The grid shape is (" + str(np.shape(grid)[0]) + "," + str(np.shape(grid)[1]) + ")")
# Original Area definition in swath geometry:
Lons, Lats = np.meshgrid(lons, lats)
Lons = np.reshape(Lons, (np.size(Lons), ))
Lats = np.reshape(Lats, (np.size(Lats), ))
grid = np.reshape(grid, (np.size(grid), ))
# Remove nans so averaging is ubiquitous
non_nans = np.invert(np.isnan(grid))
if printStatus:
print(' Removed '+str(np.sum(np.isnan(grid)))+' nan points out of '+str(np.size(grid))+' grid points')
Lons = Lons[non_nans]
Lats = Lats[non_nans]
grid = grid[non_nans]
area_original = geometry.SwathDefinition(lons=Lons, lats=Lats)
return(area_original,grid)
########################################################################################################################
# step 3: create the output geometry for the swath
def create_output_geometry(common_index_extent,resolution,projection,printStatus):
if printStatus:
print(' Step 3: Creating the destination geometry')
if 'EPSG' not in projection:
zone = utm.from_latlon(np.mean([common_index_extent[2],common_index_extent[3]]),
np.mean([common_index_extent[0],common_index_extent[1]]))[2]
projection = 'EPSG:326'+str(zone)
if printStatus:
print(' Destination geometry: '+projection)
else:
if printStatus:
print(' Destination geometry: ' + projection)
#######
ll_corner = reproject_point([common_index_extent[0], common_index_extent[2]], 4326, int(projection.split(':')[1]))
lr_corner = reproject_point([common_index_extent[1], common_index_extent[2]], 4326, int(projection.split(':')[1]))
ur_corner = reproject_point([common_index_extent[1], common_index_extent[3]], 4326, int(projection.split(':')[1]))
ul_corner = reproject_point([common_index_extent[0], common_index_extent[3]], 4326, int(projection.split(':')[1]))
buffer_dist = 10*resolution
left_x = np.min([ll_corner[0],ul_corner[0],ur_corner[0],lr_corner[0]])-buffer_dist
right_x =
|
np.max([lr_corner[0],ur_corner[0],ll_corner[0],ul_corner[0]])
|
numpy.max
|
import cv2
import numpy as np
import keras
import os
os.environ["CUDA_VISIBLE_DEVICES"]="1"
from mnist.loader import MNIST
m = MNIST('./data')
#Load the saved model
model = keras.models.load_model('model.h5')
classes = [0,1,2,3,4,5,6,7,8,9]
x_train,y_train = m.load_training()
x_test,y_test = m.load_testing()
x_train = np.asarray(x_train).astype(np.float32)
y_train = np.asarray(y_train).astype(np.float32)
x_test =
|
np.asarray(x_test)
|
numpy.asarray
|
"""
The four-armed bandit - Policy Gradient¶
Policy gradient based agent that solves a two-armed problem.
"""
import tensorflow as tf
import numpy as np
# list of bandits
bandits = [0.2, 0, -0.2, -5]
num_bandits = len(bandits)
def pull_bandit(bandit):
result = np.random.rand(1)
if result > bandit:
return 1
else:
return -1
# the agent
tf.reset_default_graph()
# establish feed-forward
weights = tf.Variable(tf.ones([num_bandits]))
choose_action = tf.argmax(weights, 0)
# training
reward_holder = tf.placeholder(shape=[1], dtype=tf.float32)
action_holder = tf.placeholder(shape=[1], dtype=tf.int32)
responsible_weight = tf.slice(weights, action_holder, [1])
loss = -(tf.log(responsible_weight) * reward_holder)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001)
update = optimizer.minimize(loss)
total_episodes = 1000
total_reward = np.zeros(num_bandits)
e = 0.1
init = tf.initialize_all_variables()
with tf.Session() as sess:
sess.run(init)
i = 0
while i < total_episodes:
rand = np.random.rand(1)
if rand < e:
action = np.random.randint(num_bandits)
else:
action = sess.run(choose_action)
reward = pull_bandit(bandits[action])
_, resp, ww = sess.run([update, responsible_weight, weights],
feed_dict={reward_holder: [reward], action_holder: [action]})
total_reward[action] += reward
if i % 50 == 0:
print(f"running reward for bandits: {total_reward}")
i += 1
print(f"The agent thinks bandit {np.argmax(ww) + 1} is the most promising.")
if
|
np.argmax(ww)
|
numpy.argmax
|
#! /usr/bin/env python
"""
This module takes a table of exposures, finds all exposures tagged as planetary nebula
exposures, and:
- For each grism
- For each spectral order
- For each file
- Determines which emission lines fall in that order of that file
- Attempts to automatically fit the line location
- Allows the user to override the result of the automatic fit
- Records the resulting fit location and status (good/bad/custom/etc.)
The module then saves a table of the results.
In addition, the module can take the result table above and use it to derive an overall
wavelength fit for each grism/order value of the inputs, and produce yet another output
table given the results of that fit.
Authors
-------
- <NAME> (all python code)
- <NAME> (original IDL code)
Use
---
This module can be run from the command line (although one of the `abscal.commands` or
`abscal.idl_commands` scripts would be preferred for that), but is mostly intended to be
imported, either by binary scripts or for use from within python::
from abscal.wfc3.reduce_grism_wavelength import wlmeas, wlmake
interim_table = wlmeas(input_table, command_line_arg_namespace, override_dict)
final_table = wlmake(input_table, interim_table, command_line_arg_namespace, override_dict)
The override dict allows for many of the default input parameters to be overriden (as
defaults -- individual per-exposure overrides defined in the data files will still take
priority). There are currently no default parameters that can be overriden in this module.
"""
# *****TODO*****
# The following things could potentially be overridable parameters:
# - wlmeas
# - search region size around emission line
# - order wavelength search ranges (wrang)
import datetime
import glob
import json
import os
import yaml
import matplotlib.pyplot as plt
import numpy as np
from astropy import constants as consts
from astropy.io import ascii, fits
from astropy.table import Table, Column, unique
from astropy.time import Time
from copy import deepcopy
from matplotlib.widgets import TextBox
from pathlib import Path
from photutils.detection import DAOStarFinder
from scipy.linalg import lstsq
from scipy.stats import mode
from abscal.common.args import parse
from abscal.common.standard_stars import find_star_by_name
from abscal.common.utils import air2vac, get_data_file, get_defaults, linecen
from abscal.common.utils import smooth_model, tabinv
from abscal.common.exposure_data_table import AbscalDataTable
from abscal.wfc3.reduce_grism_extract import reduce
def wlimaz(root, y_arr, wave_arr, directory, verbose):
"""
Find a line from the IMA zero-read.
If the very bright 10380A line falls in the first order, you can end up
trying to centre a saturated line. In this case, use the _ima.fits file,
which holds all of the individual reads, and measure the line centre from
the zero-read ima file in the first order.
Parameters
----------
root : str
The file name to be checked
y_arr : np.ndarray
The y-values (flux values) from the flt file
wave_arr : np.ndarray
The approximate wavelength values from the flt file
directory : str
The directory where the flt file is located (and where the ima file
should be located)
Returns
-------
star_x : float
The x centre of the line
star_y : float
The y centre of the line
"""
file_name = os.path.join(directory, root+"_ima.fits")
with fits.open(file_name) as in_file:
hd = in_file[0].header
nexten = hd['NEXTEND']
ima = in_file[nexten-4].data # zero read
ima = ima[5:1019,5:1019] # trim to match flt
dq = in_file[nexten-2].data # DQ=8 is unstable in Zread
dq = dq[5:1019,5:1019] # trim to match flt
# Get approximate position from preliminary extraction
xlin = tabinv(wave_arr, np.array((10830.,)))
xapprox = np.floor(xlin + .5).astype(np.int32)
if isinstance(xapprox, np.ndarray):
xapprox = xapprox[0]
yapprox = np.floor(y_arr[xapprox] + .5).astype(np.int32)
if isinstance(yapprox, np.ndarray):
yapprox = yapprox[0]
if verbose:
print(type(xapprox), type(yapprox))
msg = "WLIMAZ: 10830 line at approx ({},{})"
print(msg.format(xapprox, yapprox))
# Fix any DQ=8 pixels
ns = 11 # for an 11x11 search area
sbimg = ima[yapprox-ns//2:yapprox+ns//2+1,xapprox-ns//2:xapprox+ns//2+1]
sbdq = dq[yapprox-ns//2:yapprox+ns//2+1,xapprox-ns//2:xapprox+ns//2+1]
bad = np.where((sbdq & 8) != 0)
if len(bad) > 0:
nbad = len(bad[0])
# from wlimaz.pro comment:
# ; I see up to nbad=5 in later data, eg ic6906bzq, but ima NOT zeroed & looks OK
# ; no response from SED abt re-fetching new OTF processings
totbad =
|
np.sum(sbimg[bad])
|
numpy.sum
|
"""
Copyright (c) 2018-2019 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
import unittest
from extensions.front.caffe.bn import BNToScaleShift
from mo.graph.graph import Node
from mo.utils.unittest.extractors import FakeParam
from mo.utils.unittest.graph import build_graph_with_edge_attrs
class FakeBNProtoLayer:
def __init__(self, val):
self.bn_param = val
class FakeBNBinLayer:
def __init__(self, val):
self.blobs = val
class TestBNReplacer(unittest.TestCase):
def test_bn(self):
bn_pb = FakeBNProtoLayer(FakeParam('eps', 0.0001))
mean = [1, 2.5, 3]
var = [0.5, 0.1, 1.2]
scale = [2.3, 3.4, 4.5]
shift = [0.8, 0.6, 0.4]
bn_bin = FakeBNBinLayer([FakeParam('data', mean),
FakeParam('data', var),
FakeParam('data', scale),
FakeParam('data', shift)])
nodes = {
'node_1': {'kind': 'op', 'type': 'Identity', 'op': 'Placeholder'},
'bn': {'type': 'BN', 'kind': 'op', 'op': 'BN',
'pb': bn_pb,
'model_pb': bn_bin},
'node_2': {'kind': 'op', 'type': 'Identity', 'op': 'Placeholder'}}
edges = [
('node_1', 'bn', {'in': 0}),
('bn', 'node_2', {'in': 0})]
graph = build_graph_with_edge_attrs(nodes, edges)
node = Node(graph, 'bn')
replacer = BNToScaleShift()
replacer.replace_op(graph, node)
scale_node = [node for node, attrs in list(graph.nodes(data=True)) if attrs['type'] == 'ScaleShift']
self.assertEqual(len(scale_node), 1)
scale_ref =
|
np.array([1.11796412, 3.2272172, 4.74282367])
|
numpy.array
|
# -*- coding: utf-8 -*-
from ....Classes.NodeMat import NodeMat
from ....Classes.ElementMat import ElementMat
from ....definitions import PACKAGE_NAME
from collections import Counter
import numpy as np
def interface(self, other_mesh):
"""Define an Mesh object corresponding to the exact intersection between two Mesh (nodes must be in both meshes,
the nodes tags must be identically defined).
Parameters
----------
self : Mesh
a Mesh object
other_mesh : Mesh
an other Mesh object
Returns
-------
"""
# Dynamic import
module = __import__(PACKAGE_NAME + ".Classes." + "Mesh", fromlist=["Mesh"])
new_mesh = getattr(module, "Mesh")()
new_mesh.node = NodeMat()
new_mesh.element["Segment2"] = ElementMat(nb_node_per_element=2)
for key in self.element:
nodes_tags = self.element[key].get_all_node_tags()
other_nodes_tags = other_mesh.element[key].get_all_node_tags()
# Find the nodes on the interface (they are in both in and out)
interface_nodes_tags = np.intersect1d(nodes_tags, other_nodes_tags)
nb_interf_nodes = len(interface_nodes_tags)
tmp_element_tags = np.array([], dtype=int)
tmp_element_tags_other = np.array([], dtype=int)
node2elem_dict = dict()
node2elem_other_dict = dict()
# Find the elements in contact with the interface (they contain the interface nodes)
for ind in range(nb_interf_nodes):
tmp_tag = self.element[key].get_node2element(interface_nodes_tags[ind])
node2elem_dict[interface_nodes_tags[ind]] = tmp_tag
tmp_element_tags = np.concatenate((tmp_element_tags, tmp_tag))
tmp_tag = other_mesh.element[key].get_node2element(
interface_nodes_tags[ind]
)
node2elem_other_dict[interface_nodes_tags[ind]] = tmp_tag
tmp_element_tags_other = np.concatenate((tmp_element_tags_other, tmp_tag))
# Find element tags in contact and number of nodes in contact for each element
tmp_element_tags_unique = np.unique(
tmp_element_tags
) # List of element tag which are in contact with the interface
nb_elem_contact = len(tmp_element_tags_unique)
nb_element_tags_unique = np.zeros((nb_elem_contact, 1), dtype=int)
elem2node_dict = dict()
for ind in range(nb_elem_contact):
# Number of node on the interface for each element from tmp_element_tags_unique
Ipos = np.where(tmp_element_tags_unique[ind] == tmp_element_tags)[0]
nb_element_tags_unique[ind] = len(Ipos)
# Which nodes exactly are concerned; store them in elem2node_dict
nodes_tmp = self.get_node_tags(tmp_element_tags_unique[ind])
nodes_tmp_interf = np.array([], dtype=int)
for ipos in range(len(nodes_tmp)):
if nodes_tmp[ipos] in interface_nodes_tags:
nodes_tmp_interf = np.concatenate(
(nodes_tmp_interf, np.array([nodes_tmp[ipos]], dtype=int))
)
elem2node_dict[tmp_element_tags_unique[ind]] = nodes_tmp_interf
# Build element
seg_elem_pos = np.where(nb_element_tags_unique == 2)[
0
] # Position in the vector tmp_element_tags_unique
seg_elem_tag = tmp_element_tags_unique[
seg_elem_pos
] # Vector of element tags with only 2 nodes on the interface
nb_elem_segm = len(seg_elem_tag)
for i_seg in range(nb_elem_segm):
tag_two_nodes = elem2node_dict[seg_elem_tag[i_seg]]
new_tag = new_mesh.get_new_tag()
new_mesh.element["Segment2"].add_element(tag_two_nodes, new_tag)
# The same operation is applied in the other mesh because in the corners, 1 element will contain 3 nodes,
# and it will not be detected by seg_elem_pos. Applying the same process to the other mesh solve the issue
# if add_element ignore the already defined elements.
tmp_element_tags_other_unique = np.unique(tmp_element_tags_other)
nb_node_other_contact = len(tmp_element_tags_other_unique)
nb_element_tags_other_unique = np.zeros((nb_node_other_contact, 1), dtype=int)
elem2node_other_dict = dict()
for ind in range(nb_node_other_contact):
Ipos = np.where(
tmp_element_tags_other_unique[ind] == tmp_element_tags_other
)[0]
nb_element_tags_other_unique[ind] = len(Ipos)
nodes_tmp = other_mesh.get_node_tags(tmp_element_tags_other_unique[ind])
nodes_tmp_interf =
|
np.array([], dtype=int)
|
numpy.array
|
"""This module contains utilities for methods."""
import logging
from math import ceil
from typing import Union
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as ss
import elfi.model.augmenter as augmenter
from elfi.clients.native import Client
from elfi.model.elfi_model import ComputationContext
logger = logging.getLogger(__name__)
def arr2d_to_batch(x, names):
"""Convert a 2d array to a batch dictionary columnwise.
Parameters
----------
x : np.ndarray
2d array of values
names : list[str]
List of names
Returns
-------
dict
A batch dictionary
"""
# TODO: support vector parameter nodes
try:
x = x.reshape((-1, len(names)))
except BaseException:
raise ValueError("A dimension mismatch in converting array to batch dictionary. "
"This may be caused by multidimensional "
"prior nodes that are not yet supported.")
batch = {p: x[:, i] for i, p in enumerate(names)}
return batch
def batch_to_arr2d(batches, names):
"""Convert batches into a single numpy array.
Parameters
----------
batches : dict or list
A list of batches or a single batch
names : list
Name of outputs to include in the array. Specifies the order.
Returns
-------
np.array
2d, where columns are batch outputs
"""
if not batches:
return []
if not isinstance(batches, list):
batches = [batches]
rows = []
for batch_ in batches:
rows.append(np.column_stack([batch_[n] for n in names]))
return np.vstack(rows)
def ceil_to_batch_size(num, batch_size):
"""Calculate how many full batches in num.
Parameters
----------
num : int
batch_size : int
"""
return int(batch_size * ceil(num / batch_size))
def normalize_weights(weights):
"""Normalize weights to sum to unity."""
w = np.atleast_1d(weights)
if np.any(w < 0):
raise ValueError("Weights must be positive")
wsum = np.sum(weights)
if wsum == 0:
raise ValueError("All weights are zero")
return w / wsum
def compute_ess(weights: Union[None, np.ndarray] = None):
"""Compute the Effective Sample Size (ESS). Weights are assumed to be unnormalized.
Parameters
----------
weights: unnormalized weights
"""
# normalize weights
weights = normalize_weights(weights)
# compute ESS
numer = np.square(np.sum(weights))
denom = np.sum(np.square(weights))
return numer / denom
def weighted_var(x, weights=None):
"""Unbiased weighted variance (sample variance) for the components of x.
The weights are assumed to be non random (reliability weights).
Parameters
----------
x : np.ndarray
1d or 2d with observations in rows
weights : np.ndarray or None
1d array of weights. None defaults to standard variance.
Returns
-------
s2 : np.array
1d vector of component variances
References
----------
[1] https://en.wikipedia.org/wiki/Weighted_arithmetic_mean#Weighted_sample_variance
"""
if weights is None:
weights = np.ones(len(x))
V_1 = np.sum(weights)
V_2 = np.sum(weights ** 2)
xbar = np.average(x, weights=weights, axis=0)
numerator = weights.dot((x - xbar) ** 2)
s2 = numerator / (V_1 - (V_2 / V_1))
return s2
class GMDistribution:
"""Gaussian mixture distribution with a shared covariance matrix."""
@classmethod
def pdf(cls, x, means, cov=1, weights=None):
"""Evaluate the density at points x.
Parameters
----------
x : array_like
Scalar, 1d or 2d array of points where to evaluate, observations in rows
means : array_like
Means of the Gaussian mixture components. It is assumed that means[0] contains
the mean of the first gaussian component.
weights : array_like
1d array of weights of the gaussian mixture components
cov : array_like, float
A shared covariance matrix for the mixture components
"""
means, weights = cls._normalize_params(means, weights)
ndim = np.asanyarray(x).ndim
if means.ndim == 1:
x = np.atleast_1d(x)
if means.ndim == 2:
x = np.atleast_2d(x)
d = np.zeros(len(x))
for m, w in zip(means, weights):
d += w * ss.multivariate_normal.pdf(x, mean=m, cov=cov)
# Cast to correct ndim
if ndim == 0 or (ndim == 1 and means.ndim == 2):
return d.squeeze()
else:
return d
@classmethod
def logpdf(cls, x, means, cov=1, weights=None):
"""Evaluate the log density at points x.
Parameters
----------
x : array_like
Scalar, 1d or 2d array of points where to evaluate, observations in rows
means : array_like
Means of the Gaussian mixture components. It is assumed that means[0] contains
the mean of the first gaussian component.
weights : array_like
1d array of weights of the gaussian mixture components
cov : array_like, float
A shared covariance matrix for the mixture components
"""
return np.log(cls.pdf(x, means=means, cov=cov, weights=weights))
@classmethod
def rvs(cls, means, cov=1, weights=None, size=1, prior_logpdf=None, random_state=None):
"""Draw random variates from the distribution.
Parameters
----------
means : array_like
Means of the Gaussian mixture components
cov : array_like, optional
A shared covariance matrix for the mixture components
weights : array_like, optional
1d array of weights of the gaussian mixture components
size : int or tuple or None, optional
Number or shape of samples to draw (a single sample has the shape of `means`).
If None, return one sample without an enclosing array.
prior_logpdf : callable, optional
Can be used to check validity of random variable.
random_state : np.random.RandomState, optional
"""
random_state = random_state or np.random
means, weights = cls._normalize_params(means, weights)
if size is None:
size = 1
no_wrap = True
else:
no_wrap = False
output = np.empty((size,) + means.shape[1:])
n_accepted = 0
n_left = size
trials = 0
while n_accepted < size:
inds = random_state.choice(len(means), size=n_left, p=weights)
rvs = means[inds]
perturb = ss.multivariate_normal.rvs(mean=means[0] * 0,
cov=cov,
random_state=random_state,
size=n_left)
x = rvs + perturb
# check validity of x
if prior_logpdf is not None:
x = x[np.isfinite(prior_logpdf(x))]
n_accepted1 = len(x)
output[n_accepted: n_accepted + n_accepted1] = x
n_accepted += n_accepted1
n_left -= n_accepted1
trials += 1
if trials == 100:
logger.warning("SMC: It appears to be difficult to find enough valid proposals "
"with prior pdf > 0. ELFI will keep trying, but you may wish "
"to kill the process and adjust the model priors.")
logger.debug('Needed %i trials to find %i valid samples.', trials, size)
if no_wrap:
return output[0]
else:
return output
@staticmethod
def _normalize_params(means, weights):
means = np.atleast_1d(np.squeeze(means))
if means.ndim > 2:
raise ValueError('means.ndim = {} but must be at most 2.'.format(means.ndim))
if weights is None:
weights = np.ones(len(means))
weights = normalize_weights(weights)
return means, weights
def numgrad(fn, x, h=None, replace_neg_inf=True):
"""Naive numeric gradient implementation for scalar valued functions.
Parameters
----------
fn
x : np.ndarray
A single point in 1d vector
h : float or list
Stepsize or stepsizes for the dimensions
replace_neg_inf : bool
Replace neg inf fn values with gradient 0 (useful for logpdf gradients)
Returns
-------
grad : np.ndarray
1D gradient vector
"""
h = 0.00001 if h is None else h
h = np.asanyarray(h).reshape(-1)
x = np.asanyarray(x, dtype=np.float).reshape(-1)
dim = len(x)
X = np.zeros((dim * 3, dim))
for i in range(3):
Xi = np.tile(x, (dim, 1))
np.fill_diagonal(Xi, Xi.diagonal() + (i - 1) * h)
X[i * dim:(i + 1) * dim, :] = Xi
f = fn(X)
f = f.reshape((3, dim))
if replace_neg_inf:
if np.any(np.isneginf(f)):
return np.zeros(dim)
grad = np.gradient(f, *h, axis=0)
return grad[1, :]
# TODO: check that there are no latent variables in parameter parents.
# pdfs and gradients wouldn't be correct in those cases as it would require
# integrating out those latent variables. This is equivalent to that all
# stochastic nodes are parameters.
# TODO: could use some optimization
# TODO: support the case where some priors are multidimensional
class ModelPrior:
"""Construct a joint prior distribution over all the parameter nodes in `ElfiModel`."""
def __init__(self, model):
"""Initialize a ModelPrior.
Parameters
----------
model : ElfiModel
"""
model = model.copy()
self.parameter_names = model.parameter_names
self.dim = len(self.parameter_names)
self.client = Client()
# Prepare nets for the pdf methods
self._pdf_node = augmenter.add_pdf_nodes(model, log=False)[0]
self._logpdf_node = augmenter.add_pdf_nodes(model, log=True)[0]
self._rvs_net = self.client.compile(model.source_net, outputs=self.parameter_names)
self._pdf_net = self.client.compile(model.source_net, outputs=self._pdf_node)
self._logpdf_net = self.client.compile(model.source_net, outputs=self._logpdf_node)
def rvs(self, size=None, random_state=None):
"""Sample the joint prior."""
random_state = np.random if random_state is None else random_state
context = ComputationContext(size or 1, seed='global')
loaded_net = self.client.load_data(self._rvs_net, context, batch_index=0)
# Change to the correct random_state instance
# TODO: allow passing random_state to ComputationContext seed
loaded_net.nodes['_random_state'].update({'output': random_state})
del loaded_net.nodes['_random_state']['operation']
batch = self.client.compute(loaded_net)
rvs = np.column_stack([batch[p] for p in self.parameter_names])
if self.dim == 1:
rvs = rvs.reshape(size or 1)
return rvs[0] if size is None else rvs
def pdf(self, x):
"""Return the density of the joint prior at x."""
return self._evaluate_pdf(x)
def logpdf(self, x):
"""Return the log density of the joint prior at x."""
return self._evaluate_pdf(x, log=True)
def _evaluate_pdf(self, x, log=False):
if log:
net = self._logpdf_net
node = self._logpdf_node
else:
net = self._pdf_net
node = self._pdf_node
x = np.asanyarray(x)
ndim = x.ndim
x = x.reshape((-1, self.dim))
batch = self._to_batch(x)
# TODO: we could add a seed value that would load a "random state" instance
# throwing an error if it is used, for instance seed="not used".
context = ComputationContext(len(x), seed=0)
loaded_net = self.client.load_data(net, context, batch_index=0)
# Override
for k, v in batch.items():
loaded_net.nodes[k].update({'output': v})
del loaded_net.nodes[k]['operation']
val = self.client.compute(loaded_net)[node]
if ndim == 0 or (ndim == 1 and self.dim > 1):
val = val[0]
return val
def gradient_pdf(self, x):
"""Return the gradient of density of the joint prior at x."""
raise NotImplementedError
def gradient_logpdf(self, x, stepsize=None):
"""Return the gradient of log density of the joint prior at x.
Parameters
----------
x : float or np.ndarray
stepsize : float or list
Stepsize or stepsizes for the dimensions
"""
x =
|
np.asanyarray(x)
|
numpy.asanyarray
|
import torch
from tqdm import tqdm
from MDRSREID.utils.data_utils.evaluations.HOReID.evaluate import evaluate
from MDRSREID.utils.log_utils.log import score_str
from MDRSREID.utils.data_utils.Distance.numpy_distance import compute_dist
import numpy as np
def get_mAP_CMC(model, feat_dict, cfg, use_gm):
alpha = 0.1 if use_gm else 1.0
topk = 8
APs = []
CMC = []
query_feat_stage1 = feat_dict['query_feat_stage1']
query_feat_stage2 = feat_dict['query_feat_stage2']
query_cam = feat_dict['query_cam']
query_label = feat_dict['query_label']
gallery_feat_stage1 = feat_dict['gallery_feat_stage1']
gallery_feat_stage2 = feat_dict['gallery_feat_stage2']
gallery_cam = feat_dict['gallery_cam']
gallery_label = feat_dict['gallery_label']
distance_stage1 = compute_dist(query_feat_stage1, gallery_feat_stage1, dist_type='sklearn_cosine') # [2210, 17661]
# for sample_index in range(distance_stage1.shape[0]):
for sample_index in tqdm(range(distance_stage1.shape[0]), desc='Compute mAP and CMC', miniters=20, ncols=120, unit=' query_samples'):
a_sample_query_cam = query_cam[sample_index]
a_sample_query_label = query_label[sample_index]
# stage 1, compute distance, return index and topk
a_sample_distance_stage1 = distance_stage1[sample_index]
a_sample_index_stage1 = np.argsort(a_sample_distance_stage1)[::-1]
a_sample_topk_index_stage1 = a_sample_index_stage1[:topk]
# stage2: feature extract topk features
a_sample_query_feat_stage2 = query_feat_stage2[sample_index]
topk_gallery_feat_stage2 = gallery_feat_stage2[a_sample_topk_index_stage1]
a_sample_query_feat_stage2 = torch.Tensor(a_sample_query_feat_stage2).cuda().unsqueeze(0).repeat([topk, 1, 1])
topk_gallery_feat_stage2 = torch.Tensor(topk_gallery_feat_stage2).cuda()
with torch.no_grad():
item = {}
item['a_sample_query_feat_stage2'] = a_sample_query_feat_stage2
item['topk_gallery_feat_stage2'] = topk_gallery_feat_stage2
cfg.stage = 'Evaluation'
output = model(item, cfg) # get prob
cfg.stage = 'FeatureExtract'
ver_prob = output['ver_prob']
ver_prob = ver_prob.detach().view([-1]).cpu().data.numpy()
topk_distance_stage2 = alpha * a_sample_distance_stage1[a_sample_topk_index_stage1] + (1 - alpha) * (1 - ver_prob)
topk_index_stage2 = np.argsort(topk_distance_stage2)[::-1]
topk_index_stage2 = a_sample_topk_index_stage1[topk_index_stage2.tolist()]
a_sample_index_stage2 = np.concatenate([topk_index_stage2, a_sample_index_stage1[topk:]])
#
ap, cmc = evaluate(
a_sample_index_stage2, a_sample_query_cam, a_sample_query_label, gallery_cam, gallery_label, 'cosine')
APs.append(ap)
CMC.append(cmc)
mAP = np.mean(np.array(APs))
min_len = 99999999
for cmc in CMC:
if len(cmc) < min_len:
min_len = len(cmc)
for i, cmc in enumerate(CMC):
CMC[i] = cmc[0: min_len]
CMC = np.mean(
|
np.array(CMC)
|
numpy.array
|
"""
StrongCoupling.py computes the higher-order interaction functions from
Park and Wilson 2020 for $N=2$ models and one Floquet multiplier.
In broad strokes, this library computes functions in the following order:
* Use the equation for $\Delta x$ (15) to produce a hierarchy of
ODEs for $g^{(k)}$ and solve. (Wilson 2020)
* Do the same using (30) and (40) to generate a hierarchy of ODEs
for $Z^{(k)}$ and $I^{(k)}$, respectively. (Wilson 2020)
* Solve for $\phi$ in terms of $\\theta_i$, (13), (14) (Park and Wilson 2020)
* Compute the higher-order interaction functions (15) (Park and Wilson 2020)
Notes:
- ``pA`` requires endpoint=False. make sure corresponding `dxA`s are used.
"""
import copy
import lib.lib_sym as slib
#import lib.lib as lib
from lib import lib
from lib.interp_basic import interp_basic as interpb
from lib.interp2d_basic import interp2d_basic as interp2db
#from lam_vec import lam_vec
#import inspect
import time
import os
import math
#import sys
#import multiprocessing as multip
import tqdm
#from pathos.pools import ProcessPool
from pathos.pools import _ProcessPool
import scipy.interpolate as si
import numpy as np
#import scipy as sp
import sympy as sym
import matplotlib.pyplot as plt
import dill
from sympy import Matrix, symbols, Sum, Indexed, collect, expand
from sympy import sympify as s
from sympy.physics.quantum import TensorProduct as kp
from sympy.utilities.lambdify import lambdify, implemented_function
#import pdoc
imp_fn = implemented_function
#from interpolate import interp1d
#from scipy.interpolate import interp1d#, interp2d
from scipy.interpolate import interp2d
from scipy.integrate import solve_ivp
def module_exists(module_name):
try:
__import__(module_name)
except ImportError:
return False
else:
return True
class StrongCoupling(object):
def __init__(self,rhs,coupling,LC_init,var_names,pardict,**kwargs):
"""
See the defaults dict below for allowed kwargs.
All model parameters must follow the convention
'parameter_val'. No other underscores should be used.
the script splits the parameter name at '_' and uses the
string to the left as the sympy parmeter name.
Reserved names: ...
rhs: callable.
right-hand side of a model
coupling: callable.
coupling function between oscillators
LC_init: list or numpy array.
initial condition of limit cycle (must be found manually).
XPP is useful, otherwise integrate your RHS for various
initial conditions for long times and extract an initial
condition close to the limit cycle.
var_names: list.
list of variable names as strings
pardict: dict.
dictionary of parameter values. dict['par1_val'] = float.
Make sure to use par_val format, where each parameter name is
followed by _val.
recompute_LC: bool.
If True, recompute limit cycle. If false, load limit cycle if
limit cycle data exists. Otherwise, compute. Default: False.
recompute_monodromy: bool.
If true, recompute kappa, the FLoquet multiplier using the
monodromy matrix. If false, load kappa if data exists,
otherwise compute. Default: False.
recompute_g_sym: bool.
If true, recompute the symbolic equations for g^k. If false,
load the symbolic equations if they exist in storage.
Otherwise, compute. Default: False.
recompute_g: bool.
If true, recompute the ODEs for g^k. If false,
load the data for g^k if they exist in storage.
Otherwise, compute. Default: False.
recompute_het_sym: bool.
If true, recompute the symbolic equations for z^k and i^k.
If false, load the symbolic equations if they exist in
storage. Otherwise, compute. Default: False.
recompute_z: bool.
If true, recompute the ODEs for z^k. If false,
load the data for z^k if they exist in storage.
Otherwise, compute. Default: False.
recompute_i: bool.
If true, recompute the ODEs for i^k. If false,
load the data for i^k if they exist in storage.
Otherwise, compute. Default: False.
recompute_k_sym: bool.
If true, recompute the symbolic equations for K^k. If false,
load the symbolic equations if they exist in storage.
Otherwise, compute. Default: False.
recompute_p_sym: bool.
If true, recompute the symbolic equations for p^k. If false,
load the symbolic equations if they exist in storage.
Otherwise, compute. Default: False.
recompute_k_sym: bool.
If true, recompute the symbolic equations for H^k. If false,
load the symbolic equations if they exist in storage.
Otherwise, compute. Default: False.
recompute_h: bool.
If true, recompute the H functions for H^k. If false,
load the data equations if they exist in storage.
Otherwise, compute. Default: False.
g_forward: list or bool.
If bool, integrate forwards or backwards
when computing g^k. If list, integrate g^k forwards or
backwards based on bool value g_forward[k].
Default: False.
z_forward: list or bool.
Same idea as g_forward for PRCS. Default: False.
i_forward: list or bool.
Same idea as g_forward for IRCS. Default: False.
dense: bool.
If True, solve_ivp uses dense=True and evaluate solution
along tLC.
dir: str.
Location of data directory. Please choose carefully
because some outputs may be on the order of gigabytes
if NA >= 5000. Write 'home+data_dir/' to save to the folder
'data_dir' in the home directory. Otherwise the script
will use the current working directory by default uless
an absolute path is used. The trailing '/' is
required. Default: None.
trunc_order: int.
Highest order to truncate the expansion. For example,
trunc_order = 3 means the code will compute up to and
including order 3. Default: 3.
NA: int.
Number of partitions to discretize phase when computing p.
Default: 500.
p_iter: int.
Number of periods to integrate when computing the time
interal in p. Default: 10.
max_iter: int.
Number of Newton iterations. Default: 20.
TN: int.
Total time steps when computing g, z, i.
rtol, atol: float.
Relative and absolute tolerance for ODE solvers.
Defaults: 1e-7, 1e-7.
rel_tol: float.
Threshold for use in Newton scheme. Default: 1e-6.
method: string.
Specify the method used in scipy.integrate.solve_ivp.
Default: LSODA.
g_bad_dx: list or bool. If bool, use another variable to increase
the magnitude of the Newton derivative. This can only be
determined after attempting to run simulations and seeing that
the Jacobian for the Newton step is ill-conditioned. If list,
check for ill-conditioning for each order k.
For example, we use g_small_dx = [False,True,False,...,False]
for the thalamic model. The CGL model only needs
g_small_idx = False
z_Gbad_idx: same idea as g_small_idx for PRCs
i_bad_idx: same idea as g_small_idx for IRCs
"""
defaults = {
'trunc_order':3,
'trunc_deriv':3,
'TN':20000,
'dir':None,
'NA':500,
'p_iter':10,
'max_iter':100,
'rtol':1e-7,
'atol':1e-7,
'rel_tol':1e-6,
'method':'LSODA',
'g_forward':True,
'z_forward':True,
'i_forward':True,
'g_bad_dx':False,
'z_bad_dx':False,
'i_bad_dx':False,
'dense':False,
'g_jac_eps':1e-3,
'z_jac_eps':1e-3,
'i_jac_eps':1e-3,
'coupling_pars':'',
'recompute_LC':False,
'recompute_monodromy':False,
'recompute_g_sym':False,
'recompute_g':False,
'recompute_het_sym':False,
'recompute_z':False,
'recompute_i':False,
'recompute_k_sym':False,
'recompute_p_sym':False,
'recompute_p':False,
'recompute_h_sym':False,
'recompute_h':False,
'load_all':True,
'processes':2,
'chunksize':10000
}
self.rhs = rhs
self.coupling = coupling
self.LC_init = LC_init
self.rule_par = {}
# if no kwarg for default, use default. otherwise use input kwarg.
for (prop, default) in defaults.items():
value = kwargs.get(prop, default)
setattr(self, prop, value)
assert((type(self.g_forward) is bool) or\
(type(self.g_forward) is list))
assert((type(self.z_forward) is bool) or\
(type(self.z_forward) is list))
assert((type(self.i_forward) is bool) or
(type(self.i_forward) is list))
assert((type(self.g_jac_eps) is float) or\
(type(self.g_jac_eps) is list))
assert((type(self.z_jac_eps) is float) or\
(type(self.z_jac_eps) is list))
assert((type(self.i_jac_eps) is float) or\
(type(self.i_jac_eps) is list))
# update self with model parameters and save to dict
self.pardict_sym = {}
self.pardict_val = {}
for (prop, value) in pardict.items():
# define sympy names, and parameter replacement rule.
if prop.split('_')[-1] == 'val':
parname = prop.split('_')[0]
# save parname_val
setattr(self,prop,value)
# sympy name using parname
symvar = symbols(parname)
setattr(self,parname,symvar)
# define replacement rule for parameters
# i.e. parname (sympy) to parname_val (float/int)
self.rule_par.update({symvar:value})
self.pardict_sym.update({parname:symvar})
self.pardict_val.update({parname:value})
# variable names
self.var_names = var_names
self.dim = len(self.var_names)
# max iter number
self.miter = self.trunc_order+1
# Symbolic variables and functions
self.eye = np.identity(self.dim)
self.psi, self.eps, self.kappa = sym.symbols('psi eps kappa')
# single-oscillator variables and coupling variadef bles.
# single oscillator vars use the names from var_names
# A and B are appended to coupling variables
# to denote oscillator 1 and 2.
self.vars = []
self.A_vars = []
self.B_vars = []
self.dA_vars = []
self.dB_vars = []
#self.A_pair = Matrix([[self.vA,self.hA,self.rA,self.wA,
# self.vB,self.hB,self.rB,self.wB]])
self.A_pair = sym.zeros(1,2*self.dim)
self.B_pair = sym.zeros(1,2*self.dim)
self.dA_pair = sym.zeros(1,2*self.dim)
self.dB_pair = sym.zeros(1,2*self.dim)
self.dx_vec = sym.zeros(1,self.dim)
self.x_vec = sym.zeros(self.dim,1)
#Matrix([[self.dv,self.dh,self.dr,self.dw]])
#Matrix([[self.v],[self.h],[self.r],[self.w]])
for i,name in enumerate(var_names):
# save var1, var2, ..., varN
symname = symbols(name)
setattr(self, name, symname)
self.vars.append(symname)
self.x_vec[i] = symname
# save dvar1, dvar2, ..., dvarN
symd = symbols('d'+name)
setattr(self, 'd'+name, symd)
self.dx_vec[i] = symd
# save var1A, var2A, ..., varNA,
# var1B, var2B, ..., varNB
symA = symbols(name+'A')
symB = symbols(name+'B')
setattr(self, name+'A', symA)
setattr(self, name+'B', symB)
self.A_vars.append(symA)
self.B_vars.append(symB)
self.A_pair[:,i] = Matrix([[symA]])
self.A_pair[:,i+self.dim] = Matrix([[symB]])
self.B_pair[:,i] = Matrix([[symB]])
self.B_pair[:,i+self.dim] = Matrix([[symA]])
symdA = symbols('d'+name+'A')
symdB = symbols('d'+name+'B')
setattr(self, 'd'+name+'A', symdA)
setattr(self, 'd'+name+'B', symdB)
self.dA_vars.append(symdA)
self.dB_vars.append(symdB)
self.dA_pair[:,i] = Matrix([[symdA]])
self.dA_pair[:,i+self.dim] = Matrix([[symdB]])
self.dB_pair[:,i] = Matrix([[symdB]])
self.dB_pair[:,i+self.dim] = Matrix([[symdA]])
self.t = symbols('t')
self.tA, self.tB = symbols('tA tB')
#self.dv, self.dh, self.dr, self.dw = symbols('dv dh dr dw')
# coupling variables
self.thA, self.psiA = symbols('thA psiA')
self.thB, self.psiB = symbols('thB psiB')
# function dicts
# individual functions
self.LC = {}
self.g = {}
self.z = {}
self.i = {}
# for coupling
self.cA = {}
self.cB = {}
self.kA = {}
self.kB = {}
self.pA = {}
self.pB = {}
self.hodd = {}
self.het2 = {}
#from os.path import expanduser
#home = expanduser("~")
# filenames and directories
if self.dir is None:
raise ValueError('Please define a data directory using \
the keyword argument \'dir\'.\
Write dir=\'home+file\' to save to file in the\
home directory. Write dir=\'file\' to save to\
file in the current working directory.')
elif self.dir.split('+')[0] == 'home':
from pathlib import Path
home = str(Path.home())
self.dir = home+'/'+self.dir.split('+')[1]
else:
self.dir = self.dir
print('Saving data to '+self.dir)
if (not os.path.exists(self.dir)):
os.makedirs(self.dir)
if self.coupling_pars == '':
print('NOTE: coupling_pars set to default empty string.\
Please specify coupling_pars in kwargs if\
varying parameters.')
lib.generate_fnames(self,coupling_pars=self.coupling_pars)
# make rhs callable
#self.rhs_sym = self.thal_rhs(0,self.vars,option='sym')
self.rhs_sym = rhs(0,self.vars,self.pardict_sym,
option='sym')
#print('jac sym',self.jac_sym[0,0])
self.load_limit_cycle()
self.A_array,self.dxA = np.linspace(0,self.T,self.NA,
retstep=True,
endpoint=True)
self.Aarr_noend,self.dxA_noend = np.linspace(0,self.T,self.NA,
retstep=True,
endpoint=False)
if self.load_all:
slib.generate_expansions(self)
slib.load_coupling_expansions(self)
slib.load_jac_sym(self)
rule = {**self.rule_LC,**self.rule_par}
# callable jacobian matrix evaluated along limit cycle
self.jacLC = lambdify((self.t),self.jac_sym.subs(rule),
modules='numpy')
# get monodromy matrix
self.load_monodromy()
# get heterogeneous terms for g, floquet e. fun.
self.load_g_sym()
# get g
self.load_g()
# get het. terms for z and i
self.load_het_sym()
# get iPRC, iIRC.
self.load_z()
self.load_i()
self.load_k_sym()
self.load_p_sym()
self.load_p()
self.load_h_sym()
self.load_h()
def monodromy(self,t,z):
"""
calculate right-hand side of system
$\dot \Phi = J\Phi, \Phi(0)=I$,
where $\Phi$ is a matrix solution
jacLC is the jacobian evaluated along the limit cycle
"""
jac = self.jacLC(t)
#LC_vec = np.array([self.LC['lam_v'](t),
# self.LC['lam_h'](t),
# self.LC['lam_r'](t),
# self.LC['lam_w'](t)])
#jac = self.numerical_jac(rhs,self.LC_vec(t))
#print(jac)
n = int(np.sqrt(len(z)))
z = np.reshape(z,(n,n))
#print(n)
dy = np.dot(jac,z)
return np.reshape(dy,n*n)
def numerical_jac(self,fn,x,eps=1e-7):
"""
return numerical Jacobian function
"""
n = len(x)
J = np.zeros((n,n))
PM = np.zeros_like(J)
PP = np.zeros_like(J)
for k in range(n):
epsvec = np.zeros(n)
epsvec[k] = eps
PP[:,k] = fn(0,x+epsvec)
PM[:,k] = fn(0,x-epsvec)
J = (PP-PM)/(2*eps)
return J
def generate_expansions(self):
"""
generate expansions from Wilson 2020
"""
i_sym = sym.symbols('i_sym') # summation index
psi = self.psi
#self.g_expand = {}
for key in self.var_names:
sg = Sum(psi**i_sym*Indexed('g'+key,i_sym),(i_sym,1,self.miter))
sz = Sum(psi**i_sym*Indexed('z'+key,i_sym),(i_sym,0,self.miter))
si = Sum(psi**i_sym*Indexed('i'+key,i_sym),(i_sym,0,self.miter))
self.g['expand_'+key] = sg.doit()
self.z['expand_'+key] = sz.doit()
self.i['expand_'+key] = si.doit()
self.z['vec'] = Matrix([[self.z['expand_v']],
[self.z['expand_h']],
[self.z['expand_r']],
[self.z['expand_w']]])
# rule to replace dv with gv, dh with gh, etc.
self.rule_d2g = {self.d[k]:
self.g['expand_'+k] for k in self.var_names}
#print('self.rule_d2g)',self.rule_d2g)
#print('rule_d2g',self.rule_d2g)
def load_limit_cycle(self):
self.LC['dat'] = []
for key in self.var_names:
self.LC['imp_'+key] = []
self.LC['lam_'+key] = []
print('* Computing LC data...')
file_does_not_exist = not(os.path.isfile(self.LC['dat_fname']))
#print(os.path.isfile(self.LC['dat_fname']))
if self.recompute_LC or file_does_not_exist:
# get limit cycle (LC) period
sol,t_arr = self.generate_limit_cycle()
# save LC data
np.savetxt(self.LC['dat_fname'],sol)
np.savetxt(self.LC['t_fname'],t_arr)
else:
#print('loading LC')
sol = np.loadtxt(self.LC['dat_fname'])
t_arr = np.loadtxt(self.LC['t_fname'])
self.LC['dat'] = sol
self.LC['t'] = t_arr
# define basic variables
self.T = self.LC['t'][-1]
self.tLC = np.linspace(0,self.T,self.TN)#self.LC['t']
self.omega = 2*np.pi/self.T
print('* LC period = '+str(self.T))
# Make LC data callable from inside sympy
imp_lc = sym.zeros(1,self.dim)
for i,key in enumerate(self.var_names):
fn = interpb(self.LC['t'],self.LC['dat'][:,i],self.T)
#fn = interp1d(self.LC['t'],self.LC['dat'][:,i],self.T,kind='cubic')
self.LC['imp_'+key] = imp_fn(key,fn)
self.LC['lam_'+key] = fn
imp_lc[i] = self.LC['imp_'+key](self.t)
#lam_list.append(self.LC['lam_'+key])
self.LC_vec = lambdify(self.t,imp_lc,modules='numpy')
#self.LC_vec = lam_vec(lam_list)
if True:
fig, axs = plt.subplots(nrows=self.dim,ncols=1)
print('LC init',end=', ')
for i, ax in enumerate(axs.flat):
key = self.var_names[i]
ax.plot(self.tLC,self.LC['lam_'+key](self.tLC))
print(self.LC['lam_'+key](0),end=', ')
axs[0].set_title('LC')
plt.tight_layout()
plt.savefig('plot_LC.png')
plt.close()
#plt.show(block=True)
# single rule
self.rule_LC = {}
for i,key in enumerate(self.var_names):
self.rule_LC.update({self.vars[i]:self.LC['imp_'+key](self.t)})
# coupling rules
thA = self.thA
thB = self.thB
rule_dictA = {self.A_vars[i]:self.LC['imp_'+key](thA)
for i,key in enumerate(self.var_names)}
rule_dictB = {self.B_vars[i]:self.LC['imp_'+key](thB)
for i,key in enumerate(self.var_names)}
self.rule_LC_AB = {**rule_dictA,**rule_dictB}
def generate_limit_cycle(self):
tol = 1e-13
#T_init = 5.7
eps = np.zeros(self.dim) + 1e-2
epstime = 1e-4
dy = np.zeros(self.dim+1)+10
#T_init = 10.6
# rough init found using XPP
init = self.LC_init
T_init = init[-1]
#np.array([-.64,0.71,0.25,0,T_init])
#init = np.array([-.468,0.6,0.07,0,T_init])
#init = np.array([-.3,
# .7619,
# 0.1463,
# 0,
# T_init])
# run for a while to settle close to limit cycle
sol = solve_ivp(self.rhs,[0,500],init[:-1],
method=self.method,dense_output=True,
rtol=1e-14,atol=1e-14,args=(self.pardict_val,))
tn = len(sol.y.T)
maxidx = np.argmax(sol.y.T[int(.2*tn):,0])+int(.2*tn)
init = np.append(sol.y.T[maxidx,:],T_init)
#init = np.array([-4.65e+01,8.77e-01,4.68e-04,T_init])
#init = np.array([1.,1.,1.,1.,T_init])
counter = 0
while np.linalg.norm(dy) > tol:
#eps = np.zeros(self.dim)+1e-8
#for i in range(self.dim):
# eps[i] += np.amax(np.abs(sol.y.T[:,i]))*(1e-5)
J = np.zeros((self.dim+1,self.dim+1))
t = np.linspace(0,init[-1],self.TN)
for p in range(self.dim):
pertp = np.zeros(self.dim)
pertm = np.zeros(self.dim)
pertp[p] = eps[p]
pertm[p] = -eps[p]
initp = init[:-1] + pertp
initm = init[:-1] + pertm
# get error in position estimate
solp = solve_ivp(self.rhs,[0,t[-1]],initp,
method=self.method,
rtol=1e-13,atol=1e-13,
args=(self.pardict_val,))
solm = solve_ivp(self.rhs,[0,t[-1]],initm,
method=self.method,
rtol=1e-13,atol=1e-13,
args=(self.pardict_val,))
yp = solp.y.T
ym = solm.y.T
J[:-1,p] = (yp[-1,:]-ym[-1,:])/(2*eps[p])
J[:-1,:-1] = J[:-1,:-1] - np.eye(self.dim)
tp = np.linspace(0,init[-1]+epstime,self.TN)
tm = np.linspace(0,init[-1]-epstime,self.TN)
# get error in time estimate
solp = solve_ivp(self.rhs,[0,tp[-1]],initp,
method=self.method,
rtol=1e-13,atol=1e-13,
args=(self.pardict_val,))
solm = solve_ivp(self.rhs,[0,tm[-1]],initm,
method=self.method,
rtol=1e-13,atol=1e-13,
args=(self.pardict_val,))
yp = solp.y.T
ym = solm.y.T
J[:-1,-1] = (yp[-1,:]-ym[-1,:])/(2*epstime)
J[-1,:] = np.append(self.rhs(0,init[:-1],self.pardict_val),0)
#print(J)
sol = solve_ivp(self.rhs,[0,init[-1]],init[:-1],
method=self.method,
rtol=1e-13,atol=1e-13,
args=(self.pardict_val,))
y_final = sol.y.T[-1,:]
#print(np.dot(np.linalg.inv(J),J))
b = np.append(init[:-1]-y_final,0)
dy = np.dot(np.linalg.inv(J),b)
init += dy
print('LC rel. err =',np.linalg.norm(dy))
if False:
fig, axs = plt.subplots(nrows=self.dim,ncols=1)
for i,ax in enumerate(axs):
key = self.var_names[i]
ax.plot(sol.t,sol.y.T[:,i],label=key)
ax.legend()
axs[0].set_title('LC counter'+str(counter))
plt.tight_layout()
plt.show(block=True)
time.sleep(.1)
counter += 1
# find index of peak voltage and initialize.
peak_idx = np.argmax(sol.y.T[:,0])
#init = np.zeros(5)
#init[-1] = sol.t[-1]
#init[:-1] = np.array([-0.048536698617817,
# 0.256223512263409,
# 0.229445856262051,
# 0.438912900900591])
# run finalized limit cycle solution
sol = solve_ivp(self.rhs,[0,init[-1]],sol.y.T[peak_idx,:],
method=self.method,
t_eval=np.linspace(0,init[-1],self.TN),
rtol=1e-13,atol=1e-13,
args=(self.pardict_val,))
#print('warning: lc init set by hand')
#sol = solve_ivp(self.thal_rhs,[0,init[-1]],init[:-1],
# method='LSODA',
# t_eval=np.linspace(0,init[-1],self.TN),
# rtol=self.rtol,atol=self.atol)
return sol.y.T,sol.t
def load_monodromy(self):
"""
if monodromy data exists, load. if DNE or
recompute required, compute here.
"""
if self.recompute_monodromy\
or not(os.path.isfile(self.monodromy_fname)):
initm = copy.deepcopy(self.eye)
r,c = np.shape(initm)
init = np.reshape(initm,r*c)
sol = solve_ivp(self.monodromy,[0,self.tLC[-1]],init,
t_eval=self.tLC,
method=self.method,
rtol=1e-13,atol=1e-13)
self.sol = sol.y.T
self.M = np.reshape(self.sol[-1,:],(r,c))
np.savetxt(self.monodromy_fname,self.M)
else:
self.M = np.loadtxt(self.monodromy_fname)
self.eigenvalues, self.eigenvectors = np.linalg.eig(self.M)
# get smallest eigenvalue and associated eigenvector
self.min_lam_idx = np.argsort(self.eigenvalues)[-2]
#print(self.min_lam_idx)
#print(self.eigenvalues[self.min_lam_idx])
self.lam = self.eigenvalues[self.min_lam_idx] # floquet mult.
self.kappa_val = np.log(self.lam)/self.T # floquet exponent
if np.sum(self.eigenvectors[:,self.min_lam_idx]) < 0:
self.eigenvectors[:,self.min_lam_idx] *= -1
#print('eigenvalues',self.eigenvalues)
#print('eiogenvectors',self.eigenvectors)
#print(self.eigenvectors)
# print floquet multipliers
einv = np.linalg.inv(self.eigenvectors/2)
#print('eig inverse',einv)
idx = np.argsort(np.abs(self.eigenvalues-1))[0]
#min_lam_idx2 = np.argsort(einv)[-2]
self.g1_init = self.eigenvectors[:,self.min_lam_idx]/2.
self.z0_init = einv[idx,:]
self.i0_init = einv[self.min_lam_idx,:]
#print('min idx for prc',idx,)
#print('Monodromy',self.M)
#print('eigenvectors',self.eigenvectors)
print('g1_init',self.g1_init)
print('z0_init',self.z0_init)
print('i0_init',self.i0_init)
#print('Floquet Multiplier',self.lam)
print('* Floquet Exponent kappa =',self.kappa_val)
def load_g_sym(self):
# load het. functions h if they exist. otherwise generate.
#self.rule_g0 = {sym.Indexed('gx',0):s(0),sym.Indexed('gy',0):s(0)}
# create dict of gv0=0,gh0=0,etc for substitution later.
self.rule_g0 = {sym.Indexed('g'+name,0):
s(0) for name in self.var_names}
for key in self.var_names:
self.g['sym_'+key] = []
#self.g_sym = {k: [] for k in self.var_names}
# check that files exist
val = 0
for key in self.var_names:
val += not(lib.files_exist(self.g['sym_fnames_'+key]))
if val != 0:
files_do_not_exist = True
else:
files_do_not_exist = False
if self.recompute_g_sym or files_do_not_exist:
print('* Computing g symbolic...')
# create symbolic derivative
sym_collected = slib.generate_g_sym(self)
for i in range(self.miter):
for key in self.var_names:
expr = sym_collected[key].coeff(self.psi,i)
self.g['sym_'+key].append(expr)
dill.dump(self.g['sym_'+key][i],
open(self.g['sym_fnames_'+key][i],'wb'),
recurse=True)
else:
print('* Loading g symbolic...')
for key in self.var_names:
self.g['sym_'+key] = lib.load_dill(self.g['sym_fnames_'+key])
def load_g(self):
"""
load all Floquet eigenfunctions g or recompute
"""
self.g['dat'] = []
for key in self.var_names:
self.g['imp_'+key] = []
self.g['lam_'+key] = []
print('* Computing g...')
for i in range(self.miter):
print(str(i))
fname = self.g['dat_fnames'][i]
file_does_not_exist = not(os.path.isfile(fname))
if self.recompute_g or file_does_not_exist:
het_vec = self.interp_lam(i,self.g,fn_type='g')
data = self.generate_g(i,het_vec)
np.savetxt(self.g['dat_fnames'][i],data)
else:
data = np.loadtxt(fname)
if True:
fig, axs = plt.subplots(nrows=self.dim,ncols=1)
for j,ax in enumerate(axs):
key = self.var_names[j]
ax.plot(self.tLC,data[:,j],label=key)
ax.legend()
axs[0].set_title('g'+str(i))
print('g'+str(i)+' ini',data[0,:])
print('g'+str(i)+' fin',data[-1,:])
plt.tight_layout()
plt.savefig('plot_g'+str(i)+'.png')
plt.close()
self.g['dat'].append(data)
for j,key in enumerate(self.var_names):
fn = interpb(self.tLC,data[:,j],self.T)
#fn = interp1d(self.tLC,data[:,j],self.T,kind='cubic')
imp = imp_fn('g'+key+'_'+str(i),self.fmod(fn))
self.g['imp_'+key].append(imp)
self.g['lam_'+key].append(fn)
# replacement rules.
thA = self.thA
thB = self.thB
self.rule_g = {} # g function
self.rule_g_AB = {} # coupling
for key in self.var_names:
for i in range(self.miter):
dictg = {sym.Indexed('g'+key,i):self.g['imp_'+key][i](self.t)}
dictA = {Indexed('g'+key+'A',i):self.g['imp_'+key][i](thA)}
dictB = {Indexed('g'+key+'B',i):self.g['imp_'+key][i](thB)}
self.rule_g.update(dictg)
self.rule_g_AB.update(dictA)
self.rule_g_AB.update(dictB)
def generate_g(self,k,het_vec):
"""
generate Floquet eigenfunctions g
uses Newtons method
"""
if type(self.g_forward) is bool:
backwards = not(self.g_forward)
elif type(self.g_forward) is list:
backwards = not(self.g_forward[k])
else:
raise ValueError('g_forward must be bool or list, not',
type(self.g_forward))
# load kth expansion of g for k >= 0
if k == 0:
# g0 is 0. do this to keep indexing simple.
return np.zeros((self.TN,len(self.var_names)))
if k == 1:
# pick correct normalization
init = copy.deepcopy(self.g1_init)
eps = 1e-4
else:
init = np.zeros(self.dim)
eps = 1e-4
init = lib.run_newton2(self,self._dg,init,k,het_vec,
max_iter=self.max_iter,eps=eps,
rel_tol=self.rel_tol,rel_err=10,
alpha=1,backwards=backwards,
dense=self.dense)
# get full solution
if backwards:
tLC = -self.tLC
else:
tLC = self.tLC
sol = solve_ivp(self._dg,[0,tLC[-1]],
init,args=(k,het_vec),
t_eval=tLC,
method=self.method,
dense_output=True,
rtol=self.rtol,atol=self.atol)
if backwards:
gu = sol.y.T[::-1,:]
else:
gu = sol.y.T
return gu
def load_het_sym(self):
# load het. for z and i if they exist. otherwise generate.
for key in self.var_names:
self.z['sym_'+key] = []
self.i['sym_'+key] = []
# check that files exist
val = 0
for key in self.var_names:
val += not(lib.files_exist(self.z['sym_fnames_'+key]))
val += not(lib.files_exist(self.i['sym_fnames_'+key]))
val += not(lib.files_exist([self.A_fname]))
if val != 0:
files_do_not_exist = True
else:
files_do_not_exist = False
if self.recompute_het_sym or files_do_not_exist:
print('* Computing heterogeneous terms...')
sym_collected = self.generate_het_sym()
for i in range(self.miter):
for key in self.var_names:
expr = sym_collected[key].coeff(self.psi,i)
self.z['sym_'+key].append(expr)
self.i['sym_'+key].append(expr)
dill.dump(self.z['sym_'+key][i],
open(self.z['sym_fnames_'+key][i],'wb'),
recurse=True)
dill.dump(self.i['sym_'+key][i],
open(self.i['sym_fnames_'+key][i],'wb'),
recurse=True)
# save matrix of a_i
dill.dump(self.A,open(self.A_fname,'wb'),recurse=True)
else:
print('* Loading heterogeneous terms...')
self.A, = lib.load_dill([self.A_fname])
for key in self.var_names:
self.z['sym_'+key] = lib.load_dill(self.z['sym_fnames_'+key])
self.i['sym_'+key] = lib.load_dill(self.i['sym_fnames_'+key])
def generate_het_sym(self):
"""
Generate heterogeneous terms for integrating the Z_i and I_i terms.
Returns
-------
None.
"""
# get the general expression for h in z before plugging in g,z.
# column vectors ax ay for use in matrix A = [ax ay]
self.a = {k: sym.zeros(self.dim,1) for k in self.var_names}
#self.ax = Matrix([[0],[0]])
#self.ay = Matrix([[0],[0]])
for i in range(1,self.miter):
print('z,i het sym deriv order=',i)
p1 = lib.kProd(i,self.dx_vec)
p2 = kp(p1,sym.eye(self.dim))
for j,key in enumerate(self.var_names):
print('\t var=',key)
d1 = lib.vec(lib.df(self.rhs_sym[j],self.x_vec,i+1))
self.a[key] += (1/math.factorial(i))*(p2*d1)
self.A = sym.zeros(self.dim,self.dim)
for i,key in enumerate(self.var_names):
self.A[:,i] = self.a[key]
het = self.A*self.z['vec']
# expand all terms
out = {}
rule = {**self.rule_g0,**self.rule_d2g}
rule_trunc = {}
for k in range(self.miter,self.miter+200):
rule_trunc.update({self.psi**k:0})
for i,key in enumerate(self.var_names):
print('z,i het sym subs key=',key)
tmp = het[i].subs(rule)
tmp = sym.expand(tmp,basic=False,deep=True,
power_base=False,power_exp=False,
mul=False,log=False,
multinomial=True)
tmp = tmp.subs(rule_trunc)
tmp = sym.collect(tmp,self.psi).subs(rule_trunc)
tmp = sym.expand(tmp).subs(rule_trunc)
tmp = sym.collect(tmp,self.psi).subs(rule_trunc)
out[key] = tmp
return out
def load_z(self):
"""
load all PRCs z or recompute
"""
self.z['dat'] = []
for key in self.var_names:
self.z['imp_'+key] = []
self.z['lam_'+key] = []
print('* Computing z...')
for i in range(self.miter):
print(str(i))
fname = self.z['dat_fnames'][i]
file_does_not_exist = not(os.path.isfile(fname))
if self.recompute_z or file_does_not_exist:
het_vec = self.interp_lam(i,self.z,fn_type='z')
data = self.generate_z(i,het_vec)
np.savetxt(self.z['dat_fnames'][i],data)
else:
data = np.loadtxt(fname)
if True:
fig, axs = plt.subplots(nrows=self.dim,ncols=1)
for j,ax in enumerate(axs):
key = self.var_names[j]
ax.plot(self.tLC,data[:,j],label=key)
ax.legend()
print('z'+str(i)+' ini',data[0,:])
print('z'+str(i)+' fin',data[-1,:])
axs[0].set_title('z'+str(i))
plt.tight_layout()
plt.savefig('plot_z'+str(i)+'.png')
plt.close()
#time.sleep(.1)
self.z['dat'].append(data)
for j,key in enumerate(self.var_names):
fn = interpb(self.tLC,data[:,j],self.T)
#fn = interp1d(self.tLC,data[:,j],self.T,kind='cubic')
imp = imp_fn('z'+key+'_'+str(i),self.fmod(fn))
self.z['imp_'+key].append(imp)
self.z['lam_'+key].append(fn)
# coupling
thA = self.thA
thB = self.thB
self.rule_z_AB = {}
for key in self.var_names:
for i in range(self.miter):
dictA = {Indexed('z'+key+'A',i):self.z['imp_'+key][i](thA)}
dictB = {Indexed('z'+key+'B',i):self.z['imp_'+key][i](thB)}
self.rule_z_AB.update(dictA)
self.rule_z_AB.update(dictB)
def generate_z(self,k,het_vec):
if type(self.z_forward) is bool:
backwards = not(self.z_forward)
elif type(self.z_forward) is list:
backwards = not(self.z_forward[k])
else:
raise ValueError('z_forward must be bool or list, not',
type(self.z_forward))
if type(self.z_jac_eps) is float:
eps = self.z_jac_eps
elif type(self.z_jac_eps) is list:
eps= self.z_jac_eps[k]
else:
raise ValueError('z_jac_eps must be bool or list, not',
type(self.z_jac_eps))
if k == 0:
init = copy.deepcopy(self.z0_init)
#init = [-1.389, -1.077, 9.645, 0]
else:
init = np.zeros(self.dim)
init = lib.run_newton2(self,self._dz,init,k,het_vec,
max_iter=self.max_iter,eps=eps,alpha=1,
rel_tol=self.rel_tol,rel_err=10,
backwards=backwards,dense=self.dense)
if backwards:
tLC = -self.tLC
else:
tLC = self.tLC
sol = solve_ivp(self._dz,[0,tLC[-1]],
init,args=(k,het_vec),
method=self.method,dense_output=True,
t_eval=tLC,
rtol=self.rtol,atol=self.atol)
if backwards:
zu = sol.y.T[::-1,:]
else:
zu = sol.y.T
if k == 0:
# normalize
dLC = self.rhs(0,self.LC_vec(0)[0],self.pardict_val)
zu = zu/(np.dot(dLC,zu[0,:]))
return zu
def load_i(self):
"""
load all IRCs i or recomptue
"""
self.i['dat'] = []
for key in self.var_names:
self.i['imp_'+key] = []
self.i['lam_'+key] = []
print('* Computing i...')
for i in range(self.miter):
print(str(i))
fname = self.i['dat_fnames'][i]
file_does_not_exist = not(os.path.isfile(fname))
if self.recompute_i or file_does_not_exist:
het_vec = self.interp_lam(i,self.i,fn_type='i')
data = self.generate_i(i,het_vec)
np.savetxt(self.i['dat_fnames'][i],data)
else:
data = np.loadtxt(fname)
if True:
fig, axs = plt.subplots(nrows=self.dim,ncols=1)
for j,ax in enumerate(axs):
key = self.var_names[j]
ax.plot(self.tLC,data[:,j],label=key)
ax.legend()
print('i'+str(i)+' ini',data[0,:])
print('i'+str(i)+' fin',data[-1,:])
axs[0].set_title('i'+str(i))
plt.tight_layout()
plt.savefig('plot_i'+str(i)+'.png')
plt.close()
self.i['dat'].append(data)
for j,key in enumerate(self.var_names):
fn = interpb(self.tLC,data[:,j],self.T)
#fn = interp1d(self.tLC,data[:,j],self.T,kind='linear')
imp = imp_fn('i'+key+'_'+str(i),self.fmod(fn))
self.i['imp_'+key].append(imp)
self.i['lam_'+key].append(fn)
#lam_temp = lambdify(self.t,self.i['imp_'+key][i](self.t))
# coupling
thA = self.thA
thB = self.thB
self.rule_i_AB = {}
for key in self.var_names:
for i in range(self.miter):
dictA = {Indexed('i'+key+'A',i):self.i['imp_'+key][i](thA)}
dictB = {Indexed('i'+key+'B',i):self.i['imp_'+key][i](thB)}
self.rule_i_AB.update(dictA)
self.rule_i_AB.update(dictB)
def generate_i(self,k,het_vec):
"""
i0 equation is stable in forwards time
i1, i2, etc equations are stable in backwards time.
"""
if type(self.i_forward) is bool:
backwards = not(self.i_forward)
elif type(self.i_forward) is list:
backwards = not(self.i_forward[k])
else:
raise ValueError('i_forward must be bool or list, not',
type(self.i_forward))
if type(self.i_bad_dx) is bool:
exception = self.i_bad_dx
elif type(self.i_bad_dx) is list:
exception = self.i_bad_dx[k]
else:
raise ValueError('i_bad_dx must be bool or list, not',
type(self.i_bad_dx))
if type(self.i_jac_eps) is float:
eps = self.i_jac_eps
elif type(self.i_jac_eps) is list:
eps= self.i_jac_eps[k]
else:
raise ValueError('i_jac_eps must be bool or list, not',
type(self.i_jac_eps))
if k == 0:
init = copy.deepcopy(self.i0_init)
else:
if k == 1:
alpha = 1
else:
alpha = 1
init = np.zeros(self.dim)
init = lib.run_newton2(self,self._di,init,k,het_vec,
max_iter=self.max_iter,rel_tol=self.rel_tol,
eps=eps,alpha=alpha,
backwards=backwards,
exception=exception,
dense=self.dense)
if backwards:
tLC = -self.tLC
else:
tLC = self.tLC
sol = solve_ivp(self._di,[0,tLC[-1]],init,
args=(k,het_vec),
t_eval=tLC,
method=self.method,dense_output=True,
rtol=self.rtol,atol=self.atol)
if backwards:
iu = sol.y.T[::-1,:]
else:
iu = sol.y.T
print('i init',k,iu[0,:])
print('i final',k,iu[-1,:])
if k == 0:
# normalize. classic weak coupling theory normalization
c = np.dot(self.g1_init,iu[0,:])
iu /= c
if k == 1: # normalize
# kill off nonzero v
#if np.sum(self.g['dat'][1][:,-1]) < 1e-20:
# iu[:,-1] = 0
# see Wilson 2020 PRE for normalization formula.
LC0 = []
g10 = []
z00 = []
i00 = []
for varname in self.var_names:
key = 'lam_'+varname
LC0.append(self.LC[key](0))
g10.append(self.g[key][1](0))
z00.append(self.z[key][0](0))
i00.append(self.i[key][0](0))
F = self.rhs(0,LC0,self.pardict_val)
g1 =
|
np.array(g10)
|
numpy.array
|
# -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
from scipy.linalg import toeplitz
from scipy.integrate import solve_ivp
from tqdm import tqdm
from scipy.io import loadmat
import matplotlib.animation as animation
import os
# Complex Fourier Series
def fs(F):
n = int(len(F)/2)
F = np.fft.rfft(F)
F /= n
F[0]*= 0.5
return F[:-1] # Cutoff frequency removed
# Inverse Complex Fourier Series
def ifs(F):
m = len(F)
G = m*np.concatenate((F,[0.0]))
G[0] *= 2
return np.fft.irfft(G)
# Fourier Interpolation
def fourmat(n,y):
x = 2*np.pi*np.arange(n)/n
w = (-1.)**np.arange(n)
with np.errstate(divide='ignore', invalid='ignore'):
P = 1/np.tan(0.5*(x-y))
P = w*P
P = P/np.sum(P)
P[np.isnan(P)] = 1
return P
# Matrix for barycentric interpolation
def barymat(w,x,X):
P = X - x
with np.errstate(divide='ignore',invalid='ignore'):
P = w/P
P = P/np.sum(P)
P[np.isnan(P)] = 1
return P
# Periodic volume variations
def Vperiodic(t,m=20,p=100,Vavg=2*np.pi,Vamp=np.pi):
a = 2*np.pi/p
A = np.sqrt(1+(m*np.cos(a*t))**2)
V = Vavg + Vamp*np.arctan(m*np.sin(a*t)/A)/np.arctan(m)
Vdot = Vamp*a*m*np.cos(a*t)/A/np.arctan(m)
return V,Vdot
class ODEdrop(object):
"""
A class for a drop object.
Attributes
----------
slip: float [1e-3]
Slip length.
V: float [2*np.pi]
Droplet volume. It can be a constant or a function of time.
n: int [100]
Number of discrete points for solving. It must be even.
het: float or callable [1.0]
The heterogeneity profile.
ic: float or callable [1.0]
The initial condition for the radius.
t_end: float [None]
The final time.
Xc,Yc: float [0.0]
The initial coordinates of the centroid.
order: integer [2]
An integer specifying the order of approximation
0: Leading-order expression for the normal speed
1: Correction terms included in JFM2019
2: Correction terms included in PRF2021
flux: list of tuples [None]
Specifies whether a delta-function flux is applied. Each tuple consists of three elements (x,y,s), where (x,y) are the x and y coordinates of the flux position and s is the strength. Avoid placing the flux too near to the contact line.
bim: bool [True]
Do not change at present. For future development.
method: str ['RK45']
The method to be used with solve_ivp. If it is slow use 'LSODA'.
φ,u: array_like
Polar angle.
soltion: OdeSolution
OdeSolution instance containing the solution to the equations; See
documentation for solve_ivp.
events: callable function [None]
Events functionality passed to solve_ivp
Methods
-------
ic(ic), het(het), V(vol)
Sets the attributes for ic het and V
solve()
Computes the solution if t_end is specified. Otherwise it returns the
angle.
drawcl(T,color='b',style='-')
Draws contact line shapes for given values of time in T.
Parameters
----------
T : array_like
The times at which the shapes are to be plotted.
color: string, optional
The color of the plot. The default is 'b'.
style: string, optional
The line style for the plot. The default is '-'
Raises
------
Exception
- If t_end is undefined.
- If solution has not yet been computed.
- If some element of T lies outside the time range.
Returns
-------
None
angle(t)
Returns an array with the angle at a specified time, t.
Parameters
----------
t : float
The time for which the angle is to be returned. The default is None.
Raises
------
Exception
- If t lies outside the solution range
- If solution has yet to be computed
- If t is not a scalar
Returns
-------
angle: array_like
The apparent contact angle.
getcl(t)
Returns the X and Y coordinates of the contact line for prescribed
times.
Parameters
----------
t : float or array_like
The times at which the coordinates are to be returned. An exception
is thrown if some element of t lies outside the solution range.
coord: string
The coordinate system to output the contact line. It can be either 'cartesian' or 'polar'
Returns
-------
X,Y : array_like
Coordinates of the contact line shape if coord = 'cartesian'
X,Y,R: array_like
Centroid coordinates and radius of contact line if coord='polar
resume(t)
Resumes a completed simulation.
Parameters
----------
t : 2-tuple of floats
Interval of integration. The solver should start with the previous
t_end. Otherwise an exception is thrown.
Returns
-------
None.
makegif(file=None,fps=5,duration=10)
Creates a gif saved with the name file, for given fps and duration in
seconds. An exception is thrown if solution has not been computed.
Parameters
----------
file : str
The name of the file. The default is None.
fps : float
The number of frames per second. The default is 5.
duration : flat
The duration of the animation. The default is 10.
Returns
-------
None.
"""
def __init__(self, slip = 1e-3, V=2*np.pi, n = 100, het = 1., ic = 1.0,
order = 2, flux=None, t_end=None, bim=True, Xc=0., Yc=0.,
method='RK45',move_origin=True,events=None):
if (n%2 != 0):
raise Exception("An even number of points are required")
# Discretization
self.slip = slip
self.n = n
self.m = int(n/2-1)
self.φ = 2*np.pi*np.arange(n)/n
self.u = self.φ
self.bim = bim
self.order = order
self.__logλ = np.log(slip)
# Parse simulation parameters
self.V = V
self.flux = flux
if isinstance(self.flux,tuple):
self.flux = [self.flux]
# Initial condition
self.Xc = Xc
self.Yc = Yc
self.ic = ic
# ODE integrator
self.events = events
self.t_end = t_end
self.method = method
self.solution = None
self.move_origin = move_origin
# Chemical Heterogeneity
self.het = het
# Define a set of private variables for BIM
self.__mm = np.arange(1,self.m+1)
self.__m0 = np.arange(self.m)
if self.bim:
self.__j = np.arange(n)
self.__W = toeplitz(1.0/self.n*np.sum(np.cos((self.__j[:,None]*self.__mm)*np.pi/(self.m+1))/self.__mm,axis=1) \
+ 0.25*(-1.)**self.__j/(self.m+1)**2)
self.__δφ = self.φ - self.φ[:,None]
self.__sin_δφ = np.sin(self.__δφ)
self.__cos_δφ = np.cos(self.__δφ)
self.__sin2_δφ = np.sin(.5*self.__δφ)**2
self.__k = np.fft.rfftfreq(self.n,d=1/self.n)
with np.errstate(divide='ignore', invalid='ignore'):
self.__log_4_sin2_δφ = np.log(4*self.__sin2_δφ)
self.__cosφ = np.cos(self.φ)
self.__sinφ = np.sin(self.φ)
# Simulation parameters using the Low-order model
self.__β0 = np.full(self.m,-self.__logλ)
self.__βm = np.full(self.m,-self.__logλ)
self.__βp = np.full(self.m,-self.__logλ)
self.__βt = np.zeros(self.m)
self.__β00 = - self.__logλ
# Augment parameters following JFM2019
if self.order > 0:
pars = loadmat(os.path.join(os.path.dirname(__file__),"parameters.mat"))
self.__β0 += 1 - pars["beta"][0,:self.m]
self.__βm += 1 - pars["gamma"][0,:self.m]
self.__βp += 1 - 2*pars["beta"][0,:self.m] + pars["gamma"][0,:self.m]
self.__β00 -= 1 + np.log(2)
# Augment parameters following PoF2021
if self.order > 1:
self.__βm += pars["beta_m"][0,:self.m]
self.__βp -= pars["beta_p"][0,:self.m]
self.__βt = pars["beta_0"][0,:self.m]
# Flux functions
if self.flux is not None:
pars = loadmat(os.path.join(os.path.dirname(__file__),"hypergeom.mat"))
self.__Xq = 2*pars["x"][0]-1
self.__Wq = pars["w"][0]
self.__Iq = pars["I"][0,:self.m]
self.__Fq = pars["F"][:self.m,:]
self.__sqxq = pars["sqx"][0]
self.__Wbaryq = pars["wbary"][0]
# Volume property
@property
def V(self):
return self._V
@V.setter
def V(self,value):
if not callable(value):
self._V = lambda t: (value,0)
else:
self._V = value
# Intial Condition
@property
def ic(self):
return self._ic
@ic.setter
def ic(self,value):
if not callable(value):
self._ic = np.full(self.n,value,dtype='float64')
else:
self._ic = value(self.φ)
self.solution = None
# Heterogeneity profile
@property
def het(self):
return self._g
@het.setter
def het(self,value):
if not callable(value):
self._g = lambda x,y: np.full(x.shape,value,dtype='float64')
else:
self._g = value
self.solution = None
# Evaluate Radius via BIM
def __angle(self,Vo,Ro):
# Scale data to avoid the Γ-contour
self.__scale = 0.8/np.max(Ro)
R = Ro*self.__scale
V = Vo*self.__scale**3
# Derivatives
self.__Rhat = np.fft.rfft(R)
self.__Rhat[-1] = 0
self.__Ru = np.fft.irfft(1j*self.__k*self.__Rhat)
self.__Ruu = np.fft.irfft(-self.__k**2*self.__Rhat)
# Other variables
self.__D = R**2 + self.__Ru**2
self.__sqD = np.sqrt(self.__D)
self.__RRo = R*R[:,None]
self.__x_xo = (R-R[:,None])**2 + 4*self.__RRo*self.__sin2_δφ
self.__x_dot_n = R**2/self.__sqD
# Curvature (times sqD)
self.__K = (R**2 + 2*self.__Ru**2 - R*self.__Ruu)/self.__D
with np.errstate(divide='ignore', invalid='ignore'):
# Normal derivative of Green's function
# to obtain Gn multiply by n/(0.5*π*sqD)
self.__Gn = - (0.25/self.n)*(R**2 - self.__RRo*self.__cos_δφ \
- (R[:,None]*self.__Ru)*self.__sin_δφ)/self.__x_xo
np.fill_diagonal(self.__Gn,-(0.125/self.n)*self.__K)
# Green's function
# 2*π/m * (G - 0.5*log(4*sin(δφ)^2))
self.__Gm = -0.5*(np.log(self.__x_xo)-self.__log_4_sin2_δφ)/self.n
np.fill_diagonal(self.__Gm,-0.5*np.log(self.__D)/self.n)
# Solve and determine the local angle
self.__Wn = np.linalg.solve((self.__Gm + self.__W)*self.__sqD,0.125*R**2 + self.__Gn@(R**2))
self.__kk = 4*V*self.n/(2*np.pi*np.sum((0.25*self.__x_dot_n-self.__Wn)*R**2*self.__sqD))
return self.__kk*(0.5*self.__x_dot_n-self.__Wn)
# Return Radius from fourier harmonics
def __radius(self,Uo):
a_hat = Uo[:self.m+1].astype(np.complex128)
a_hat[1:self.m+1] -= 1j*Uo[self.m+1:]
return ifs(a_hat)
# ODE
def __ode(self,t,U,pbar,state):
dU = np.zeros(2*self.m+3)
# Centroid and harmonics
Xc,Yc = U[-2], U[-1]
bm = (U[1:self.m+1] -1j*U[self.m+1:-2])/U[0]
V,Vdot = self.V(t)
# Contact line radius
a = self.__radius(U[:-2])
# Local contact angle
θs = self._g(Xc + a*self.__cosφ, Yc + a*self.__sinφ)
θs3_hat = fs(θs**3)
# Compute ψ
if self.order == 1:
self.__ψ[0] = np.log(U[0]*np.mean(θs))
elif self.order==2:
self.__ψ = fs(np.log(a*θs))
# Apparent contact angle
if self.bim:
θ = self.__angle(V,a)
θ3_hat = fs(θ**3)
else:
θo = 4*V/(np.pi*U[0]**3)
θm = - bm*self.__m0
θ = θo*ifs(np.concatenate(([1],θm)))
θ3_hat = θo**3*np.concatenate(([1],3*θm))
# Compute mass fluxes
I = 0
if self.flux is not None:
I = np.zeros(self.m+1,dtype=np.cdouble)
for delta in self.flux:
Xo = delta[0]
Yo = delta[1]
So = delta[2]
# Radial distance of delta function
Rd = np.sqrt((Xo-Xc)**2 + (Yo-Yc)**2)
φd = n
|
p.arctan2(Yo-Yc, Xo-Xc)
|
numpy.arctan2
|
import numpy as np
import pyeer.eer_info
import pytest
import sklearn.metrics
import audmetric
@pytest.mark.parametrize('truth,prediction,labels,to_string', [
(
np.random.randint(0, 10, size=5),
np.random.randint(0, 10, size=5),
None,
False,
),
(
np.random.randint(0, 10, size=1),
np.random.randint(0, 10, size=1),
list(range(1, 10)),
False,
),
(
np.random.randint(0, 10, size=10),
np.random.randint(0, 10, size=10),
list(range(1, 10)),
False,
),
(
|
np.random.randint(0, 10, size=10)
|
numpy.random.randint
|
#!/usr/bin/env python
"""
Utilities for manipulating coordinates or list of coordinates, under periodic
boundary conditions or otherwise. Many of these are heavily vectorized in
numpy for performance.
"""
from __future__ import division
__author__ = "<NAME>"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "1.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__date__ = "Nov 27, 2011"
import numpy as np
import math
from monty.dev import deprecated
from pymatgen.core.lattice import Lattice
def find_in_coord_list(coord_list, coord, atol=1e-8):
"""
Find the indices of matches of a particular coord in a coord_list.
Args:
coord_list: List of coords to test
coord: Specific coordinates
atol: Absolute tolerance. Defaults to 1e-8. Accepts both scalar and
array.
Returns:
Indices of matches, e.g., [0, 1, 2, 3]. Empty list if not found.
"""
if len(coord_list) == 0:
return []
diff = np.array(coord_list) - np.array(coord)[None, :]
return np.where(np.all(np.abs(diff) < atol, axis=1))[0]
def in_coord_list(coord_list, coord, atol=1e-8):
"""
Tests if a particular coord is within a coord_list.
Args:
coord_list: List of coords to test
coord: Specific coordinates
atol: Absolute tolerance. Defaults to 1e-8. Accepts both scalar and
array.
Returns:
True if coord is in the coord list.
"""
return len(find_in_coord_list(coord_list, coord, atol=atol)) > 0
def is_coord_subset(subset, superset, atol=1e-8):
"""
Tests if all coords in subset are contained in superset.
Doesn't use periodic boundary conditions
Args:
subset, superset: List of coords
Returns:
True if all of subset is in superset.
"""
c1 = np.array(subset)
c2 = np.array(superset)
is_close = np.all(np.abs(c1[:, None, :] - c2[None, :, :]) < atol, axis=-1)
any_close = np.any(is_close, axis=-1)
return np.all(any_close)
def coord_list_mapping(subset, superset):
"""
Gives the index mapping from a subset to a superset.
Subset and superset cannot contain duplicate rows
Args:
subset, superset: List of coords
Returns:
list of indices such that superset[indices] = subset
"""
c1 = np.array(subset)
c2 = np.array(superset)
inds = np.where(np.all(np.isclose(c1[:, None, :], c2[None, :, :]),
axis=2))[1]
result = c2[inds]
if not np.allclose(c1, result):
if not is_coord_subset(subset, superset):
raise ValueError("subset is not a subset of superset")
if not result.shape == c1.shape:
raise ValueError("Something wrong with the inputs, likely duplicates "
"in superset")
return inds
def coord_list_mapping_pbc(subset, superset, atol=1e-8):
"""
Gives the index mapping from a subset to a superset.
Subset and superset cannot contain duplicate rows
Args:
subset, superset: List of frac_coords
Returns:
list of indices such that superset[indices] = subset
"""
c1 = np.array(subset)
c2 = np.array(superset)
diff = c1[:, None, :] - c2[None, :, :]
diff -= np.round(diff)
inds = np.where(np.all(np.abs(diff) < atol, axis = 2))[1]
#verify result (its easier to check validity of the result than
#the validity of inputs)
test = c2[inds] - c1
test -= np.round(test)
if not np.allclose(test, 0):
if not is_coord_subset_pbc(subset, superset):
raise ValueError("subset is not a subset of superset")
if not test.shape == c1.shape:
raise ValueError("Something wrong with the inputs, likely duplicates "
"in superset")
return inds
def get_linear_interpolated_value(x_values, y_values, x):
"""
Returns an interpolated value by linear interpolation between two values.
This method is written to avoid dependency on scipy, which causes issues on
threading servers.
Args:
x_values: Sequence of x values.
y_values: Corresponding sequence of y values
x: Get value at particular x
Returns:
Value at x.
"""
a = np.array(sorted(zip(x_values, y_values), key=lambda d: d[0]))
ind = np.where(a[:, 0] >= x)[0]
if len(ind) == 0 or ind[0] == 0:
raise ValueError("x is out of range of provided x_values")
i = ind[0]
x1, x2 = a[i - 1][0], a[i][0]
y1, y2 = a[i - 1][1], a[i][1]
return y1 + (y2 - y1) / (x2 - x1) * (x - x1)
def all_distances(coords1, coords2):
"""
Returns the distances between two lists of coordinates
Args:
coords1: First set of cartesian coordinates.
coords2: Second set of cartesian coordinates.
Returns:
2d array of cartesian distances. E.g the distance between
coords1[i] and coords2[j] is distances[i,j]
"""
c1 = np.array(coords1)
c2 = np.array(coords2)
z = (c1[:, None, :] - c2[None, :, :]) ** 2
return np.sum(z, axis=-1) ** 0.5
def pbc_diff(fcoords1, fcoords2):
"""
Returns the 'fractional distance' between two coordinates taking into
account periodic boundary conditions.
Args:
fcoords1: First set of fractional coordinates. e.g., [0.5, 0.6,
0.7] or [[1.1, 1.2, 4.3], [0.5, 0.6, 0.7]]. It can be a single
coord or any array of coords.
fcoords2: Second set of fractional coordinates.
Returns:
Fractional distance. Each coordinate must have the property that
abs(a) <= 0.5. Examples:
pbc_diff([0.1, 0.1, 0.1], [0.3, 0.5, 0.9]) = [-0.2, -0.4, 0.2]
pbc_diff([0.9, 0.1, 1.01], [0.3, 0.5, 0.9]) = [-0.4, -0.4, 0.11]
"""
fdist = np.subtract(fcoords1, fcoords2)
return fdist - np.round(fdist)
@deprecated(Lattice.get_all_distances)
def pbc_all_distances(lattice, fcoords1, fcoords2):
"""
Returns the distances between two lists of coordinates taking into
account periodic boundary conditions and the lattice. Note that this
computes an MxN array of distances (i.e. the distance between each
point in fcoords1 and every coordinate in fcoords2). This is
different functionality from pbc_diff.
Args:
lattice: lattice to use
fcoords1: First set of fractional coordinates. e.g., [0.5, 0.6,
0.7] or [[1.1, 1.2, 4.3], [0.5, 0.6, 0.7]]. It can be a single
coord or any array of coords.
fcoords2: Second set of fractional coordinates.
Returns:
2d array of cartesian distances. E.g the distance between
fcoords1[i] and fcoords2[j] is distances[i,j]
"""
return lattice.get_all_distances(fcoords1, fcoords2)
def pbc_shortest_vectors(lattice, fcoords1, fcoords2):
"""
Returns the shortest vectors between two lists of coordinates taking into
account periodic boundary conditions and the lattice.
Args:
lattice: lattice to use
fcoords1: First set of fractional coordinates. e.g., [0.5, 0.6, 0.7]
or [[1.1, 1.2, 4.3], [0.5, 0.6, 0.7]]. It can be a single
coord or any array of coords.
fcoords2: Second set of fractional coordinates.
Returns:
array of displacement vectors from fcoords1 to fcoords2
first index is fcoords1 index, second is fcoords2 index
"""
#ensure correct shape
fcoords1, fcoords2 = np.atleast_2d(fcoords1, fcoords2)
#ensure that all points are in the unit cell
fcoords1 = np.mod(fcoords1, 1)
fcoords2 = np.mod(fcoords2, 1)
#create images, 2d array of all length 3 combinations of [-1,0,1]
r = np.arange(-1, 2)
arange = r[:, None] * np.array([1, 0, 0])[None, :]
brange = r[:, None] * np.array([0, 1, 0])[None, :]
crange = r[:, None] * np.array([0, 0, 1])[None, :]
images = arange[:, None, None] + brange[None, :, None] + \
crange[None, None, :]
images = images.reshape((27, 3))
#create images of f2
shifted_f2 = fcoords2[:, None, :] + images[None, :, :]
cart_f1 = lattice.get_cartesian_coords(fcoords1)
cart_f2 = lattice.get_cartesian_coords(shifted_f2)
#all vectors from f1 to f2
vectors = cart_f2[None, :, :, :] - cart_f1[:, None, None, :]
d_2 = np.sum(vectors ** 2, axis=3)
a, b = np.indices([len(fcoords1), len(fcoords2)])
return vectors[a, b, np.argmin(d_2, axis=2)]
def find_in_coord_list_pbc(fcoord_list, fcoord, atol=1e-8):
"""
Get the indices of all points in a fractional coord list that are
equal to a fractional coord (with a tolerance), taking into account
periodic boundary conditions.
Args:
fcoord_list: List of fractional coords
fcoord: A specific fractional coord to test.
atol: Absolute tolerance. Defaults to 1e-8.
Returns:
Indices of matches, e.g., [0, 1, 2, 3]. Empty list if not found.
"""
if len(fcoord_list) == 0:
return []
fcoords = np.tile(fcoord, (len(fcoord_list), 1))
fdist = fcoord_list - fcoords
fdist -=
|
np.round(fdist)
|
numpy.round
|
# ============================================================================
# ============================================================================
# Copyright (c) 2021 <NAME>. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# Author: <NAME>
# E-mail:
# Description: Python implementations of preprocessing techniques.
# Contributors:
# ============================================================================
"""
Module of removal methods in the preprocessing stage:
- Many methods for removing stripe artifact in a sinogram (<-> ring artifact
in a reconstructed image).
- A zinger removal method.
- Blob removal methods.
"""
import numpy as np
import scipy.ndimage as ndi
from scipy import interpolate
import numpy.fft as fft
import algotom.util.utility as util
def remove_stripe_based_sorting(sinogram, size=21, dim=1, **options):
"""
Remove stripe artifacts in a sinogram using the sorting technique,
algorithm 3 in Ref. [1]. Angular direction is along the axis 0.
Parameters
----------
sinogram : array_like
2D array. Sinogram image.
size : int
Window size of the median filter.
dim : {1, 2}, optional
Dimension of the window.
options : dict, optional
Use another smoothing filter rather than the median filter.
E.g. options={"method": "gaussian_filter", "para1": (1,21))}
Returns
-------
array_like
2D array. Stripe-removed sinogram.
References
----------
.. [1] https://doi.org/10.1364/OE.26.028396
"""
msg = "\n Please use the dictionary format: options={'method':" \
" 'filter_name', 'para1': parameter_1, 'para2': parameter_2}"
sino_sort, sino_index = util.sort_forward(np.float32(sinogram), axis=0)
if len(options) == 0:
if dim == 2:
sino_sort = ndi.median_filter(sino_sort, (size, size))
else:
sino_sort = ndi.median_filter(sino_sort, (1, size))
else:
if not isinstance(options, dict):
raise ValueError(msg)
for opt_name in options:
opt = options[opt_name]
method = tuple(opt.values())[0]
para = tuple(opt.values())[1:]
if method in dir(ndi):
try:
sino_sort = getattr(ndi, method)(sino_sort, *para)
except:
raise ValueError(msg)
else:
if method in dir(util):
try:
sino_sort = getattr(util, method)(sino_sort, *para)
except:
raise ValueError(msg)
else:
raise ValueError("Can't find the method: '{}' in the"
" namespace".format(method))
return util.sort_backward(sino_sort, sino_index, axis=0)
def remove_stripe_based_filtering(sinogram, sigma=3, size=21, dim=1, sort=True,
**options):
"""
Remove stripe artifacts in a sinogram using the filtering technique,
algorithm 2 in Ref. [1]. Angular direction is along the axis 0.
Parameters
----------
sinogram : array_like
2D array. Sinogram image
sigma : int
Sigma of the Gaussian window used to separate the low-pass and
high-pass components of the intensity profile of each column.
size : int
Window size of the median filter.
dim : {1, 2}, optional
Dimension of the window.
sort : bool, optional
Apply sorting if True.
options : dict, optional
Use another smoothing filter rather than the median filter.
E.g. options={"method": "gaussian_filter", "para1": (1,21))}.
Returns
-------
array_like
2D array. Stripe-removed sinogram.
References
----------
.. [1] https://doi.org/10.1364/OE.26.028396
"""
msg = "\n Please use the dictionary format: options={'method':" \
" 'filter_name', 'para1': parameter_1, 'para2': parameter_2}"
window = {"name": "gaussian", "sigma": sigma}
sino_smooth, sino_sharp = util.separate_frequency_component(
np.float32(sinogram), axis=0, window=window)
if sort is True:
sino_smooth, sino_index = util.sort_forward(sino_smooth, axis=0)
if len(options) == 0:
if dim == 2:
sino_smooth = ndi.median_filter(sino_smooth, (size, size))
else:
sino_smooth = ndi.median_filter(sino_smooth, (1, size))
else:
if not isinstance(options, dict):
raise ValueError(msg)
for opt_name in options:
opt = options[opt_name]
method = tuple(opt.values())[0]
if method in dir(ndi):
para = tuple(opt.values())[1:]
try:
sino_smooth = getattr(ndi, method)(sino_smooth, *para)
except:
raise ValueError(msg)
else:
if method in dir(util):
try:
sino_smooth = getattr(util, method)(sino_smooth, *para)
except:
raise ValueError(msg)
else:
raise ValueError("Can't find the method: '{}' in the"
" namespace".format(method))
if sort is True:
sino_smooth = util.sort_backward(sino_smooth, sino_index, axis=0)
return sino_smooth + sino_sharp
def remove_stripe_based_fitting(sinogram, order=2, sigma=10, sort=False,
num_chunk=1, **options):
"""
Remove stripe artifacts in a sinogram using the fitting technique,
algorithm 1 in Ref. [1]. Angular direction is along the axis 0.
Parameters
----------
sinogram : array_like
2D array. Sinogram image
order : int
Polynomial fit order.
sigma : int
Sigma of the Gaussian window in the x-direction. Smaller is stronger.
sort : bool, optional
Apply sorting if True.
num_chunk : int
Number of chunks of rows to apply the fitting.
options : dict, optional
Use another smoothing filter rather than the Fourier gaussian filter.
E.g. options={"method": "gaussian_filter", "para1": (1,21))}.
Returns
-------
array_like
2D array. Stripe-removed sinogram.
References
----------
.. [1] https://doi.org/10.1364/OE.26.028396
"""
msg = "\n Please use the dictionary format: options={'method':" \
" 'filter_name', 'para1': parameter_1, 'para2': parameter_2}"
(nrow, ncol) = sinogram.shape
pad = min(150, int(0.1 * nrow))
sigmay = np.clip(min(60, int(0.1 * ncol)), 10, None)
if sort is True:
sinogram, sino_index = util.sort_forward(sinogram, axis=0)
sino_fit = util.generate_fitted_image(sinogram, order, axis=0,
num_chunk=num_chunk)
if len(options) == 0:
sino_filt = util.apply_gaussian_filter(sino_fit, sigma, sigmay, pad)
else:
if not isinstance(options, dict):
raise ValueError(msg)
sino_filt = np.copy(sino_fit)
for opt_name in options:
opt = options[opt_name]
method = tuple(opt.values())[0]
if method in dir(ndi):
para = tuple(opt.values())[1:]
try:
sino_filt = getattr(ndi, method)(sino_filt, *para)
except:
raise ValueError(msg)
else:
if method in dir(util):
try:
sino_filt = getattr(util, method)(sino_filt, *para)
except:
raise ValueError(msg)
else:
raise ValueError("Can't find the method: '{}' in the"
" namespace".format(method))
sino_filt = np.mean(np.abs(sino_fit)) * sino_filt / np.mean(
np.abs(sino_filt))
sino_corr = ((sinogram / sino_fit) * sino_filt)
if sort is True:
sino_corr = util.sort_backward(sino_corr, sino_index, axis=0)
return sino_corr
def remove_large_stripe(sinogram, snr=3.0, size=51, drop_ratio=0.1, norm=True,
**options):
"""
Remove large stripe artifacts in a sinogram, algorithm 5 in Ref. [1].
Angular direction is along the axis 0.
Parameters
----------
sinogram : array_like
2D array. Sinogram image
snr : float
Ratio (>1.0) used to detect stripe locations. Greater is less sensitive.
size : int
Window size of the median filter.
drop_ratio : float, optional
Ratio of pixels to be dropped, which is used to to reduce
the possibility of the false detection of stripes.
norm : bool, optional
Apply normalization if True.
options : dict, optional
Use another smoothing filter rather than the median filter.
E.g. options={"method": "gaussian_filter", "para1": (1,21))}.
Returns
-------
array_like
2D array. Stripe-removed sinogram.
References
----------
.. [1] https://doi.org/10.1364/OE.26.028396
"""
msg = "\n Please use the dictionary format: options={'method':" \
" 'filter_name', 'para1': parameter_1, 'para2': parameter_2}"
sinogram = np.copy(np.float32(sinogram))
drop_ratio = np.clip(drop_ratio, 0.0, 0.8)
(nrow, ncol) = sinogram.shape
ndrop = int(0.5 * drop_ratio * nrow)
sino_sort, sino_index = util.sort_forward(sinogram, axis=0)
if len(options) == 0:
sino_smooth = ndi.median_filter(sino_sort, (1, size))
else:
if not isinstance(options, dict):
raise ValueError(msg)
sino_smooth = np.copy(sino_sort)
for opt_name in options:
opt = options[opt_name]
method = tuple(opt.values())[0]
if method in dir(ndi):
para = tuple(opt.values())[1:]
try:
sino_smooth = getattr(ndi, method)(sino_smooth, *para)
except:
raise ValueError(msg)
else:
if method in dir(util):
try:
sino_smooth = getattr(util, method)(sino_smooth, *para)
except:
raise ValueError(msg)
else:
raise ValueError("Can't find the method: '{}' in the"
" namespace".format(method))
list1 = np.mean(sino_sort[ndrop:nrow - ndrop], axis=0)
list2 = np.mean(sino_smooth[ndrop:nrow - ndrop], axis=0)
list_fact = np.divide(list1, list2,
out=np.ones_like(list1), where=list2 != 0)
list_mask = util.detect_stripe(list_fact, snr)
list_mask = np.float32(ndi.binary_dilation(list_mask, iterations=1))
if norm is True:
sinogram = sinogram / np.tile(list_fact, (nrow, 1))
sino_corr = util.sort_backward(sino_smooth, sino_index, axis=0)
xlist_miss = np.where(list_mask > 0.0)[0]
sinogram[:, xlist_miss] = sino_corr[:, xlist_miss]
return sinogram
def remove_dead_stripe(sinogram, snr=3.0, size=51, residual=True):
"""
Remove unresponsive or fluctuating stripe artifacts in a sinogram,
algorithm 6 in Ref. [1]. Angular direction is along the axis 0.
Parameters
----------
sinogram : array_like
2D array. Sinogram image.
snr : float
Ratio (>1.0) used to detect stripe locations. Greater is less sensitive.
size : int
Window size of the median filter.
residual : bool, optional
Removing residual stripes if True.
Returns
-------
ndarray
2D array. Stripe-removed sinogram.
References
----------
.. [1] https://doi.org/10.1364/OE.26.028396
"""
sinogram = np.copy(sinogram) # Make it mutable
(nrow, _) = sinogram.shape
sino_smooth = np.apply_along_axis(ndi.uniform_filter1d, 0, sinogram, 10)
list_diff = np.sum(np.abs(sinogram - sino_smooth), axis=0)
list_diff_bck = ndi.median_filter(list_diff, size)
nmean = np.mean(np.abs(list_diff_bck))
list_diff_bck[list_diff_bck == 0.0] = nmean
list_fact = list_diff / list_diff_bck
list_mask = util.detect_stripe(list_fact, snr)
list_mask = np.float32(ndi.binary_dilation(list_mask, iterations=1))
list_mask[0:2] = 0.0
list_mask[-2:] = 0.0
xlist = np.where(list_mask < 1.0)[0]
ylist = np.arange(nrow)
mat = sinogram[:, xlist]
finter = interpolate.interp2d(xlist, ylist, mat, kind='linear')
xlist_miss = np.where(list_mask > 0.0)[0]
if len(xlist_miss) > 0:
sinogram[:, xlist_miss] = finter(xlist_miss, ylist)
if residual is True:
sinogram = remove_large_stripe(sinogram, snr, size)
return sinogram
def remove_all_stripe(sinogram, snr=3.0, la_size=51, sm_size=21, drop_ratio=0.1,
dim=1, **options):
"""
Remove all types of stripe artifacts in a sinogram by combining algorithm
6, 5, 4, and 3 in Ref. [1]. Angular direction is along the axis 0.
Parameters
----------
sinogram : array_like
2D array. Sinogram image.
snr : float
Ratio (>1.0) used to detect stripe locations. Greater is less sensitive.
la_size : int
Window size of the median filter to remove large stripes.
sm_size : int
Window size of the median filter to remove small-to-medium stripes.
drop_ratio : float, optional
Ratio of pixels to be dropped, which is used to to reduce
the possibility of the false detection of stripes.
dim : {1, 2}, optional
Dimension of the window.
options : dict, optional
Use another smoothing filter rather than the median filter.
E.g. options={"method": "gaussian_filter", "para1": (1,21))}
Returns
-------
array_like
2D array. Stripe-removed sinogram.
References
----------
.. [1] https://doi.org/10.1364/OE.26.028396
"""
sinogram = remove_dead_stripe(sinogram, snr, la_size, residual=False)
sinogram = remove_large_stripe(sinogram, snr, la_size, drop_ratio,
**options)
sinogram = remove_stripe_based_sorting(sinogram, sm_size, dim, **options)
return sinogram
def remove_stripe_based_2d_filtering_sorting(sinogram, sigma=3, size=21, dim=1,
**options):
"""
Remove stripes using a 2D low-pass filter and the sorting-based technique,
algorithm in section 3.3.4 in Ref. [1].
Angular direction is along the axis 0.
Parameters
---------
sinogram : array_like
2D array. Sinogram image.
sigma : int
Sigma of the Gaussian window.
size : int
Window size of the median filter.
dim : {1, 2}, optional
Dimension of the window.
Returns
-------
array_like
2D array. Stripe-removed sinogram.
References
----------
.. [1] https://doi.org/10.1117/12.2530324
"""
(nrow, ncol) = sinogram.shape
pad = min(150, int(0.1 * min(nrow, ncol)))
sino_smooth = util.apply_gaussian_filter(sinogram, sigma, sigma, pad)
sino_sharp = sinogram - sino_smooth
sino_sharp = remove_stripe_based_sorting(sino_sharp, size, dim, **options)
return sino_smooth + sino_sharp
def remove_stripe_based_normalization(sinogram, sigma=15, num_chunk=1,
sort=True, **options):
"""
Remove stripes using the method in Ref. [1].
Angular direction is along the axis 0.
Parameters
----------
sinogram : array_like
2D array. Sinogram image.
sigma : int
Sigma of the Gaussian window.
num_chunk : int
Number of chunks of rows.
sort : bool, optional
Apply sorting (Ref. [2]) if True.
options : dict, optional
Use another smoothing 1D-filter rather than the Gaussian filter.
E.g. options={"method": "median_filter", "para1": 21)}.
Returns
-------
array_like
2D array. Stripe-removed sinogram.
References
----------
.. [1] https://www.mcs.anl.gov/research/projects/X-ray-cmt/rivers/
tutorial.html
.. [2] https://doi.org/10.1364/OE.26.028396
"""
msg = "\n Please use the dictionary format: options={'method':" \
" 'filter_name', 'para1': parameter_1, 'para2': parameter_2}" \
"\n Note that the filter must be a 1D-filter."
(nrow, _) = sinogram.shape
sinogram = np.copy(sinogram)
if sort is True:
sinogram, sino_index = util.sort_forward(sinogram, axis=0)
list_index = np.array_split(
|
np.arange(nrow)
|
numpy.arange
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2017/11/14 15:45
# @Author : <NAME>
# @Site :
# @File : pandas_advanced.py
import pandas as pd
import numpy as np
dates = pd.date_range('20130101', periods=6)
df = pd.DataFrame(np.random.randn(6, 4), index=dates, columns=list('ABCD'))
df['E'] = np.where(df['D'] >= 0, '>=0', '<0')
df['F'] =
|
np.random.randint(0, 2, 6)
|
numpy.random.randint
|
from matplotlib import pyplot as plt
import numpy as np
import scipy.signal
from scipy.io import loadmat
from scipy.linalg import norm
def mat_multiply(ax, bx):
# 二维矩阵乘法实现
try:
ax, bx = np.asmatrix(ax), np.asmatrix(bx)
except Exception as e:
raise e
m, p = ax.shape
q, n = bx.shape
if p == q:
res = np.zeros([m, n])
for i in range(m):
for j in range(n):
for k in range(p):
res[i, j] += (ax[i, k] * bx[k, j])
return res
else:
print(p, q, 'shapes', ax.shape, 'and', bx.shape, 'not aligned')
def demo_function_convolution(p0=[1, 2, 3], p1=[4, 5, 6]):
a =
|
np.array(p0)
|
numpy.array
|
import numpy
import SimpleITK as sitk
from radiomics import base, cShape, deprecated
class RadiomicsShape(base.RadiomicsFeaturesBase):
r"""
In this group of features we included descriptors of the three-dimensional size and shape of the ROI. These features
are independent from the gray level intensity distribution in the ROI and are therefore only calculated on the
non-derived image and mask.
Unless otherwise specified, features are derived from the approximated shape defined by the triangle mesh. To build
this mesh, vertices (points) are first defined as points halfway on an edge between a voxel included in the ROI and
one outside the ROI. By connecting these vertices a mesh of connected triangles is obtained, with each triangle
defined by 3 adjacent vertices, which shares each side with exactly one other triangle.
This mesh is generated using a marching cubes algorithm. In this algorithm, a 2x2 cube is moved through the mask
space. For each position, the corners of the cube are then marked 'segmented' (1) or 'not segmented' (0). Treating the
corners as specific bits in a binary number, a unique cube-index is obtained (0-255). This index is then used to
determine which triangles are present in the cube, which are defined in a lookup table.
These triangles are defined in such a way, that the normal (obtained from the cross product of vectors describing 2
out of 3 edges) are always oriented in the same direction. For PyRadiomics, the calculated normals are always pointing
outward. This is necessary to obtain the correct signed volume used in calculation of ``MeshVolume``.
Let:
- :math:`N_v` represent the number of voxels included in the ROI
- :math:`N_f` represent the number of faces (triangles) defining the Mesh.
- :math:`V` the volume of the mesh in mm\ :sup:`3`, calculated by :py:func:`getMeshVolumeFeatureValue`
- :math:`A` the surface area of the mesh in mm\ :sup:`2`, calculated by :py:func:`getMeshSurfaceAreaFeatureValue`
References:
- Lorensen WE, Cline HE. Marching cubes: A high resolution 3D surface construction algorithm. ACM SIGGRAPH Comput
Graph `Internet <http://portal.acm.org/citation.cfm?doid=37402.37422>`_. 1987;21:163-9.
"""
def __init__(self, inputImage, inputMask, **kwargs):
super(RadiomicsShape, self).__init__(inputImage, inputMask, **kwargs)
def _initVoxelBasedCalculation(self):
raise NotImplementedError('Shape features are not available in voxel-based mode')
def _initSegmentBasedCalculation(self):
self.pixelSpacing = numpy.array(self.inputImage.GetSpacing()[::-1])
# Pad inputMask to prevent index-out-of-range errors
self.logger.debug('Padding the mask with 0s')
cpif = sitk.ConstantPadImageFilter()
padding = numpy.tile(1, 3)
try:
cpif.SetPadLowerBound(padding)
cpif.SetPadUpperBound(padding)
except TypeError:
# newer versions of SITK/python want a tuple or list
cpif.SetPadLowerBound(padding.tolist())
cpif.SetPadUpperBound(padding.tolist())
self.inputMask = cpif.Execute(self.inputMask)
# Reassign self.maskArray using the now-padded self.inputMask
self.maskArray = (sitk.GetArrayFromImage(self.inputMask) == self.label)
self.labelledVoxelCoordinates = numpy.where(self.maskArray != 0)
self.logger.debug('Pre-calculate Volume, Surface Area and Eigenvalues')
# Volume, Surface Area and eigenvalues are pre-calculated
# Compute Surface Area and volume
self.SurfaceArea, self.Volume, self.diameters = cShape.calculate_coefficients(self.maskArray, self.pixelSpacing)
# Compute eigenvalues and -vectors
Np = len(self.labelledVoxelCoordinates[0])
coordinates = numpy.array(self.labelledVoxelCoordinates, dtype='int').transpose((1, 0)) # Transpose equals zip(*a)
physicalCoordinates = coordinates * self.pixelSpacing[None, :]
physicalCoordinates -=
|
numpy.mean(physicalCoordinates, axis=0)
|
numpy.mean
|
from __future__ import division
import torch
import math
import random
try:
import accimage
except ImportError:
accimage = None
import numpy as np
import numbers
import types
import collections
import warnings
import cv2
from . import cv2_funcs as F
__all__ = ["Compose", "ToTensor", "Normalize", "Lambda",
"Resize", "CenterCrop", "RandomCrop",
"RandomHorizontalFlip", "RandomResizedCrop",
"Resize", "ResizeShort", "CenterCrop",
"RandomSaturation", "RandomBrightness",
"RandomContrastion", "RandomPrimary"
"MotionBlur", "MedianBlur", "RandomOcclusion"]
class Compose(object):
"""Composes several transforms together.
Args:
transforms (list of ``Transform`` objects): list of transforms to compose.
Example:
>>> transforms.Compose([
>>> transforms.CenterCrop(10),
>>> transforms.ToTensor(),
>>> ])
"""
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, img):
for t in self.transforms:
img = t(img)
return img
def __repr__(self):
format_string = self.__class__.__name__ + '('
for t in self.transforms:
format_string += '\n'
format_string += ' {0}'.format(t)
format_string += '\n)'
return format_string
class ToTensor(object):
"""Convert a ``PIL Image`` or ``numpy.ndarray`` to tensor.
Converts a PIL Image or numpy.ndarray (H x W x C) in the range
[0, 255] to a torch.FloatTensor of shape (C x H x W) in the range [0.0, 1.0].
"""
def __call__(self, pic):
"""
Args:
pic (PIL Image or numpy.ndarray): Image to be converted to tensor.
Returns:
Tensor: Converted image.
"""
return F.to_tensor(pic)
def __repr__(self):
return self.__class__.__name__ + '()'
class Normalize(object):
"""Normalize a tensor image with mean and standard deviation.
Given mean: ``(M1,...,Mn)`` and std: ``(S1,..,Sn)`` for ``n`` channels, this transform
will normalize each channel of the input ``torch.*Tensor`` i.e.
``input[channel] = (input[channel] - mean[channel]) / std[channel]``
.. note::
This transform acts in-place, i.e., it mutates the input tensor.
Args:
mean (sequence): Sequence of means for each channel.
std (sequence): Sequence of standard deviations for each channel.
"""
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, tensor):
"""
Args:
tensor (Tensor): Tensor image of size (C, H, W) to be normalized.
Returns:
Tensor: Normalized Tensor image.
"""
return F.normalize(tensor, self.mean, self.std)
def __repr__(self):
return self.__class__.__name__ + '(mean={0}, std={1})'.format(self.mean, self.std)
class Resize(object):
"""Resize the input numpy ndarray to the given size.
Args:
size (sequence or int): Desired output size. If size is a sequence like
(h, w), output size will be matched to this. If size is an int,
smaller edge of the image will be matched to this number.
i.e, if height > width, then image will be rescaled to
(size * height / width, size)
interpolation (int, optional): Desired interpolation. Default is
``cv2.INTER_CUBIC``, bicubic interpolation
"""
def __init__(self, size, interpolation=cv2.INTER_LINEAR):
assert isinstance(size, int) or (isinstance(
size, collections.Iterable) and len(size) == 2)
self.size = size
self.interpolation = interpolation
def __call__(self, img):
"""
Args:
img (numpy ndarray): Image to be scaled.
Returns:
numpy ndarray: Rescaled image.
"""
return F.resize(img, self.size, self.interpolation)
def __repr__(self):
return self.__class__.__name__ + '(size={0}, interpolation={1})'.format(self.size, self.interpolation)
class ResizeShort(object):
"""Resize the input numpy ndarray to the given size, make the short edge to given size.
Args:
size (int): Desired output size of shorter edge.
interpolation (int, optional): Desired interpolation. Default is
``cv2.INTER_CUBIC``, bicubic interpolation
"""
def __init__(self, size, interpolation=cv2.INTER_LINEAR):
assert isinstance(size, int) or (isinstance(
size, collections.Iterable) and len(size) == 2)
self.size = size
self.interpolation = interpolation
def __call__(self, img):
"""
Args:
img (numpy ndarray): Image to be scaled.
Returns:
numpy ndarray: Rescaled image.
"""
h, w = img.shape[:2]
short_edge = min(h, w)
ratio = self.size / short_edge
h_new, w_new = int(h * ratio), int(w * ratio)
return F.resize(img, (h_new, w_new), self.interpolation)
def __repr__(self):
return self.__class__.__name__ + '(size={0}, interpolation={1})'.format(self.size, self.interpolation)
class CenterCrop(object):
"""Crops the given numpy ndarray at the center.
Args:
size (sequence or int): Desired output size of the crop. If size is an
int instead of sequence like (h, w), a square crop (size, size) is
made.
"""
def __init__(self, size):
if isinstance(size, numbers.Number):
self.size = (int(size), int(size))
else:
self.size = size
def __call__(self, img):
"""
Args:
img (numpy ndarray): Image to be cropped.
Returns:
numpy ndarray: Cropped image.
"""
return F.center_crop(img, self.size)
def __repr__(self):
return self.__class__.__name__ + '(size={0})'.format(self.size)
class Lambda(object):
"""Apply a user-defined lambda as a transform.
Args:
lambd (function): Lambda/function to be used for transform.
"""
def __init__(self, lambd):
assert isinstance(lambd, types.LambdaType)
self.lambd = lambd
def __call__(self, img):
return self.lambd(img)
def __repr__(self):
return self.__class__.__name__ + '()'
class RandomCrop(object):
def __init__(self, size, padding=None, pad_if_needed=False, fill=0, padding_mode='constant'):
if isinstance(size, numbers.Number):
self.size = (int(size), int(size))
else:
self.size = size
self.padding = padding
self.pad_if_needed = pad_if_needed
self.fill = fill
self.padding_mode = padding_mode
@staticmethod
def get_params(img, output_size):
"""Get parameters for ``crop`` for a random crop.
Args:
img (numpy ndarray): Image to be cropped.
output_size (tuple): Expected output size of the crop.
Returns:
tuple: params (i, j, h, w) to be passed to ``crop`` for random crop.
"""
h, w = img.shape[0:2]
th, tw = output_size
if w == tw and h == th:
return 0, 0, h, w
i = random.randint(0, h - th)
j = random.randint(0, w - tw)
return i, j, th, tw
def __call__(self, img):
"""
Args:
img (numpy ndarray): Image to be cropped.
Returns:
numpy ndarray: Cropped image.
"""
if self.padding is not None:
img = F.pad(img, self.padding, self.fill, self.padding_mode)
# pad the width if needed
if self.pad_if_needed and img.shape[1] < self.size[1]:
img = F.pad(
img, (self.size[1] - img.shape[1], 0), self.fill, self.padding_mode)
# pad the height if needed
if self.pad_if_needed and img.shape[0] < self.size[0]:
img = F.pad(
img, (0, self.size[0] - img.shape[0]), self.fill, self.padding_mode)
i, j, h, w = self.get_params(img, self.size)
return F.crop(img, i, j, h, w)
def __repr__(self):
return self.__class__.__name__ + '(size={0}, padding={1})'.format(self.size, self.padding)
class RandomHorizontalFlip(object):
"""Horizontally flip the given PIL Image randomly with a given probability.
Args:
p (float): probability of the image being flipped. Default value is 0.5
"""
def __init__(self, p=0.5):
self.p = p
def __call__(self, img):
"""random
Args:
img (numpy ndarray): Image to be flipped.
Returns:
numpy ndarray: Randomly flipped image.
"""
# if random.random() < self.p:
# print('flip')
# return F.hflip(img)
if random.random() < self.p:
return F.hflip(img)
return img
def __repr__(self):
return self.__class__.__name__ + '(p={})'.format(self.p)
class RandomResizedCrop(object):
"""Crop the given numpy ndarray to random size and aspect ratio.
A crop of random size (default: of 0.08 to 1.0) of the original size and a random
aspect ratio (default: of 3/4 to 4/3) of the original aspect ratio is made. This crop
is finally resized to given size.
This is popularly used to train the Inception networks.
Args:
size: expected output size of each edge
scale: range of size of the origin size cropped
ratio: range of aspect ratio of the origin aspect ratio cropped
interpolation: Default: cv2.INTER_CUBIC
"""
def __init__(self, size, scale=(0.08, 1.0), ratio=(3. / 4., 4. / 3.), interpolation=cv2.INTER_LINEAR):
self.size = (size, size)
self.interpolation = interpolation
self.scale = scale
self.ratio = ratio
@staticmethod
def get_params(img, scale, ratio):
"""Get parameters for ``crop`` for a random sized crop.
Args:
img (numpy ndarray): Image to be cropped.
scale (tuple): range of size of the origin size cropped
ratio (tuple): range of aspect ratio of the origin aspect ratio cropped
Returns:
tuple: params (i, j, h, w) to be passed to ``crop`` for a random
sized crop.
"""
for attempt in range(10):
area = img.shape[0] * img.shape[1]
target_area = random.uniform(*scale) * area
aspect_ratio = random.uniform(*ratio)
w = int(round(math.sqrt(target_area * aspect_ratio)))
h = int(round(math.sqrt(target_area / aspect_ratio)))
if random.random() < 0.5:
w, h = h, w
if w <= img.shape[1] and h <= img.shape[0]:
i = random.randint(0, img.shape[0] - h)
j = random.randint(0, img.shape[1] - w)
return i, j, h, w
# Fallback
w = min(img.shape[0], img.shape[1])
i = (img.shape[0] - w) // 2
j = (img.shape[1] - w) // 2
return i, j, w, w
def __call__(self, img):
"""
Args:
img (numpy ndarray): Image to be cropped and resized.
Returns:
numpy ndarray: Randomly cropped and resized image.
"""
i, j, h, w = self.get_params(img, self.scale, self.ratio)
return F.resized_crop(img, i, j, h, w, self.size, self.interpolation)
def __repr__(self):
interpolate_str = _pil_interpolation_to_str[self.interpolation]
format_string = self.__class__.__name__ + '(size={0}'.format(self.size)
format_string += ', scale={0}'.format(tuple(round(s, 4)
for s in self.scale))
format_string += ', ratio={0}'.format(tuple(round(r, 4)
for r in self.ratio))
format_string += ', interpolation={0})'.format(interpolate_str)
return format_string
class RandomBrightness(object):
"""Randomly change the brightness, contrast and saturation of an image.
Args:
brightness (float or tuple of float (min, max)): How much to jitter brightness.
brightness_factor is chosen uniformly from [max(0, 1 - brightness), 1 + brightness]
or the given [min, max]. Should be non negative numbers.
contrast (float or tuple of float (min, max)): How much to jitter contrast.
contrast_factor is chosen uniformly from [max(0, 1 - contrast), 1 + contrast]
or the given [min, max]. Should be non negative numbers.
saturation (float or tuple of float (min, max)): How much to jitter saturation.
saturation_factor is chosen uniformly from [max(0, 1 - saturation), 1 + saturation]
or the given [min, max]. Should be non negative numbers.
hue (float or tuple of float (min, max)): How much to jitter hue.
hue_factor is chosen uniformly from [-hue, hue] or the given [min, max].
Should have 0<= hue <= 0.5 or -0.5 <= min <= max <= 0.5.
"""
def __init__(self, p=0):
self.p = p
@staticmethod
def get_param(img, p):
if np.random.rand() > 0.5:
return img
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
factor = np.random.uniform()
factor = 1-p + 2*p*factor
hsv[:, :, 2] = np.clip(factor*hsv[:, :, 2], 0, 255).astype(np.uint8)
img = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
return img
def __call__(self, img):
img = self.get_param(img, self.p)
return img
# transforms.append(Lambda(lambda img: F.adjust_brightness(img, brightness_factor)))
#transforms.append(Lambda(lambda img: do_random_brightness(img, brightness_factor)))
# if contrast is not None:
# contrast_factor = random.uniform(contrast[0], contrast[1])
# transforms.append(Lambda(lambda img: F.adjust_contrast(img, contrast_factor)))
# if saturation is not None:
# saturation_factor = random.uniform(saturation[0], saturation[1])
# transforms.append(Lambda(lambda img: do_random_saturation(img, saturation_factor)))
# transforms.append(Lambda(lambda img: F.adjust_saturation(img, saturation_factor)))
# if hue is not None:
# hue_factor = random.uniform(hue[0], hue[1])
# transforms.append(Lambda(lambda img: F.adjust_hue(img, hue_factor)))
# random.shuffle(transforms)
# transform = Compose(transforms)
def __repr__(self):
return self.__class__.__name__ + '(p={})'.format(self.p)
class RandomSaturation(object):
"""Randomly change the saturation of an image.
"""
def __init__(self, saturation=0):
self.saturation = saturation
@staticmethod
def get_params(img, saturation):
if np.random.rand() > 0.5:
return img
saturation_factor =
|
np.random.uniform()
|
numpy.random.uniform
|
from math import isclose
import numpy as np
import scipy.spatial
class PointData:
def __init__(self, left, point, right):
self.left = left
self.point = point
self.right = right
self.between = between_neighbors(left, point, right)
self.offset = midpoint_projection_offset(left, point, right)
def __eq__(self, other):
if isinstance(self, other.__class__):
return all([
np.array_equal(self.left, other.left),
np.array_equal(self.point, other.point),
np.array_equal(self.right, other.right),
(self.between == other.between),
isclose(self.offset, other.offset),
])
return NotImplemented
def less_or_close(a, b, *args, **kwargs):
# Use isclose for handling effective equivalence
return a < b or isclose(a, b, *args, **kwargs)
def neighbor_window(seq, index, count=1):
if len(seq) < (count + 2):
raise ValueError("seq must have at least 3 elements to have neighbors")
if index < 1 or index > (len(seq) - (count + 1)):
raise IndexError(f"Index must fall between 1 and len(seq) - 2 to have neighbors: (index={index}, seq={seq})")
return seq[index - 1:index + count + 1]
def modified_point_list(seq):
if len(seq) < 3:
raise ValueError("seq must have at least 3 elements to have neighbors")
if not np.array_equal(seq[0], seq[-1]):
raise ValueError("First and last element must match")
return_seq = []
for pnt in tuple(seq) + (seq[1],):
try:
if len(pnt) == 2:
return_seq.append(np.asarray(pnt))
continue
except TypeError:
raise ValueError("each element in seq must have len(2)")
return return_seq
def point_window_iter(seq):
# Iterates over groups of three points, where the input seq
# has first and last the same, then add a final group with the
# first/last element in the middle
elem_wrapped_seq = seq + (seq[1],)
for i in range(1, len(elem_wrapped_seq) - 1):
yield neighbor_window(elem_wrapped_seq, i)
def within_tolerance(value, within, float_tol=1e-9):
if (within < 0):
raise ValueError('Argument "within" cannot be negative')
abs_value = abs(value)
return less_or_close(abs_value, within, rel_tol=float_tol)
def midpoint_projection_offset(pnt1, pnt2, pnt3):
outer_vec = pnt3 - pnt1
norm_outer = np.linalg.norm(outer_vec)
return abs(np.cross(outer_vec, pnt1 - pnt2) / norm_outer)
def between_neighbors(pnt1, pnt2, pnt3):
"""Midpoint projected onto neighboring points line is contained in segment"""
# Make sure the projection of the midpoint lies between the outer points
outer_vec = pnt3 - pnt1
norm_outer = np.linalg.norm(outer_vec)
scalar_proj = np.dot(pnt2 - pnt1, outer_vec / norm_outer)
return (
less_or_close(0, scalar_proj) and less_or_close(scalar_proj, norm_outer)
)
def points_inline(pnt1, pnt2, pnt3, tolerance, float_tol=1e-9):
"""Check if the middle point lies on the line between 1 and 2 withing tolerance"""
mid_offset = midpoint_projection_offset(pnt1, pnt2, pnt3)
# First check point is inline within tolerence
is_inline = within_tolerance(mid_offset, tolerance, float_tol)
# Make sure the projection of the midpoint lies between the outer points
is_between = between_neighbors(pnt1, pnt2, pnt3)
return is_inline and is_between
def get_radians(pnt1, pnt2, pnt3):
v1 = pnt1 - pnt2
v2 = pnt3 - pnt2
return np.arccos(np.dot(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2)))
def orthogonal(pnt1, pnt2, pnt3, tolerance):
rad = get_radians(pnt1, pnt2, pnt3)
return within_tolerance(rad - (np.pi / 2), tolerance)
def same_side(pnt1, line_start, line_end, pnt2):
to_base = get_radians(pnt1, line_start, line_end)
to_pnt2 = get_radians(pnt1, line_start, pnt2)
return to_base > to_pnt2
def point_data_list(point_seq):
for i in range(1, len(point_seq) - 1):
p1, p2, p3 = neighbor_window(point_seq, i)
yield PointData(p1, p2, p3)
def remove_insignificant(point_iter, data_iter, tolerance):
data_seq = list(data_iter)
sig_points = list(point_iter)
while True:
rem_values = [x.offset for x in data_seq if x.between and less_or_close(x.offset, tolerance)]
if rem_values:
next_rmv = min(rem_values)
for index, data in enumerate(data_seq):
if data.between and isclose(data.offset, next_rmv):
break
# Remove then recalculate neighbors
del sig_points[index + 1]
del data_seq[index]
if index == 0:
# Replace last point with new following point
sig_points[-1] = sig_points[1]
if index == len(data_seq):
sig_points[0] = sig_points[index]
if index > 0:
data_seq[index - 1] = PointData(*neighbor_window(sig_points, index))
if index < len(data_seq):
data_seq[index] = PointData(*neighbor_window(sig_points, index + 1))
if index == len(data_seq):
data_seq[index - 1] = PointData(*neighbor_window(sig_points, index))
else:
break
return sig_points
def significant_points(points, tolerance):
point_seq = modified_point_list(points)
data_seq = point_data_list(point_seq)
return remove_insignificant(point_seq, data_seq, tolerance)
def has_box(points, tolerance, angle_tolerance, min_len=10, max_len=80):
sig_points = significant_points(points, tolerance)
# Under 5 and the box is not possible
if len(sig_points) < 5:
return False
for i in range(1, len(sig_points) - 2):
p1, p2, p3, p4 = neighbor_window(sig_points, i, count=2)
mid_dist = distance(p2, p3)
if (orthogonal(p1, p2, p3, angle_tolerance) and
orthogonal(p2, p3, p4, angle_tolerance) and
same_side(p1, p2, p3, p4) and
less_or_close(mid_dist, max_len) and
less_or_close(min_len, mid_dist)):
return True
return False
def distance(pnt1, pnt2):
return np.linalg.norm(pnt2 - pnt1)
def centroid(points):
arr = np.asarray(points)
if np.array_equal(arr[0], arr[-1]):
arr = arr[:-1]
length = arr.shape[0]
sum_x = np.sum(arr[:, 0])
sum_y = np.sum(arr[:, 1])
return np.asarray((sum_x / length, sum_y / length))
def nearest_distances(points, num_nearest=1):
if num_nearest < 1:
ValueError("num_nearest must be at least 1")
if len(points) <= num_nearest:
ValueError("num_nearest cannot be larges than len(points) - 1")
arr = np.array(points)
tree = scipy.spatial.KDTree(arr)
res = tree.query(tree.data, num_nearest + 1)
# Return {
# [p_x, p_y].tobytes() : [dist_first_nearest, dist_sec_nearest, ..., dist_nth_nearest]
# } tobytes used as bytestring is hashable
return {
point.astype(np.float).tobytes(): dist[1:]
for point, dist in zip(tree.data, res[0])
}
def split_list(original, split_indexes):
if not split_indexes:
split_indexes = [0]
if split_indexes[0] != 0:
split_indexes.insert(0, 0)
split_indexes.append(len(original))
for i in range(1, len(split_indexes)):
s, e = neighbor_window(split_indexes, i, 0)
yield original[s:e]
def get_point_index_by_value(points, search_point):
# https://stackoverflow.com/a/18927811
arr = np.array(points)
search_arr = np.array(search_point)
return np.where(np.all(arr == search_arr, axis=1))[0][0]
def get_top_point(points):
arr = np.asarray(points)
return arr[
|
np.lexsort((arr[:,0], arr[:,1]))
|
numpy.lexsort
|
"""
This gives the user functions that are necessary to evaluate
multi-label classification
One error, coverage and average precision are called rank measures
which operate on the scores calculate by the classifier on different labels
Then there are bi-partition measures that operate on the labels that
are produced by the classifier
For bi-partition measure refer to the book
Introduction to information retrieval book by Manning
Pg.282
"""
import numpy as np
def one_error(scores, labels):
"""
Args:
scores: Type:ndarray
shape: N * Nc
N - Number of training examples
Nc - Number of classes
labels: Type: ndarray
shape: N * Nc
N - Number of training examples
Nc - Number of classes
Returns: error
Type: float
"""
assert scores.shape == labels.shape
N, Nc = scores.shape
accuracy = 0.0
num_no_right_classes = 0
for i in range(0, N):
scores_row = scores[i]
label_row = labels[i]
positions_label_row_where_one = np.where(label_row == 1)[0]
if len(positions_label_row_where_one) == 0:
num_no_right_classes += 1
else:
position_max = np.argmax(scores_row)
label = label_row[position_max]
accuracy += label
if N == num_no_right_classes:
accuracy = 1.0
else:
accuracy /= (N - num_no_right_classes)
return 1 - accuracy
def coverage(scores, labels):
"""
Coverage measures how far along the ranked list of scores
should we traverse to achieve maximum precision
For examples if the ranked list of labels is given below
[[0.8, 1.0, 0.7, 0.9]]
and the true label for this data point is
[[1, 0, 1, 0]]
Then the rank is 3 i.e
The scores corresponding to the correct labels are 0.8 and 0.7
Now consider the descending order of scores which is
[1.0, 0.9, 0.8, 0.7]
The rank of 0.8 is 2 and that of 0.7 is 3
So the maximum rank is is 3 which is the rank of the data point
In case of tied scores for two labels (irrespective of the scores
corresponding to positive or negative labels) they
are taken to be the maximum one.. See the test cases for example
Paste the formula below in latex it .
coverage_{S}(H) = \frac{1}{m} \sum_{i=1}^{m} \max_{l \in Y_i}rank_f(x_i,l) -1
H is the hypothesis from the input space to the output space
m is the number of training examples
Y_i is the labels that are assigned to the training example x_i
S = [(x1, Y1)....(xm, Ym)]
every Y is a subset(say 1, 2) of the total label set(say 1, 2, 3, 4)
Args:
scores: Type:ndarray
shape: N * Nc
N - Number of training examples
Nc - Number of classes
labels: Type: ndarray
shape: N * Nc
N - Number of training examples
Nc - Number of classes
Returns: error
Type: float
"""
assert scores.shape == labels.shape
N, Nc = scores.shape
scores_ascending = np.sort(scores, axis=1)
average_rank = 0.0
for i in range(N):
right_labels_index = np.where(labels[i] == 1)[0]
# When there are no right_labels in the datum, max_rank is 0
if len(right_labels_index) == 0:
max_rank = 0
else:
scores_corresponding_to_right_labels = scores[i][right_labels_index]
inverse_rank = np.searchsorted(scores_ascending[i],
scores_corresponding_to_right_labels)
rank = (Nc) - inverse_rank
max_rank = np.max(rank)
average_rank += max_rank
average_rank /= float(N)
return average_rank
def average_precision(scores, labels):
"""
The formula for calculating average precision is as follows
avgprec_s(H) = \frac{1}{m}
\sum_{i = 1}^{m}
\frac{1}{|Y_i|}
\sum_{y \in Y_i} \frac{|l' \in Y_i| rank_f(x_i, l') \le rank_f(x_i, l)}{rank_f(x,l)}
Args:
scores: Type:ndarray
shape: N * N_c
N - Number of training examples
Nc - Number of classes
labels: Type: ndarray
shape: N * N_c
N - Number of training examples
N_c - Number of classes
Returns: precision
Type: float
"""
assert scores.shape == labels.shape
N, Nc = scores.shape
precision = 0.0
scores_ascending = np.sort(scores, axis=1)
for i in range(0, N):
right_labels_index = np.where(labels[i] == 1)[0]
wrong_labels_index = np.where(labels[i] == 0)[0]
scores_positive_labels = scores[i][right_labels_index]
scores_negative_labels = scores[i][wrong_labels_index]
if len(right_labels_index) == 0:
precision = 1.0
else:
inverse_rank_positive_scores = np.searchsorted(scores_ascending[i],
scores_positive_labels)
inverse_rank_negative_scores = np.searchsorted(scores_ascending[i],
scores_negative_labels)
rank_positive_scores = Nc - inverse_rank_positive_scores
rank_negative_scores = Nc - inverse_rank_negative_scores
sum_over_right_labels = 0.0
for each_rank in rank_positive_scores:
sum_over_right_labels += len(np.where(rank_negative_scores <=
each_rank)[0]) / float(each_rank)
precision += (sum_over_right_labels) / float(len(right_labels_index))
precision /= N
return round(precision, 4)
def macro_precision(predicted_labels, true_labels):
"""
Args:
predicted_labels: Type: ndarray
shape: N * Nc
N - Number of training examples
Nc - NUmber of classes
true_labels: Type: ndarray
shape: N * Nc
N - Number of training examples
Nc - Number of classes
Returns: macros_precision
"""
assert predicted_labels.shape == true_labels.shape
N, Nc = predicted_labels.shape
tp = true_positives(predicted_labels, true_labels).astype("float64")
fp = false_positives(predicted_labels, true_labels).astype("float64")
sum_tp_fp = tp + fp
# If true positives are zero, irrespective of the denominator
# the precision is zero
tp_zero_location = np.where(tp == 0.0)
if len(tp_zero_location[0]) > 0:
sum_tp_fp[tp_zero_location] = 1.0
precision = np.sum(tp / sum_tp_fp) / float(Nc)
return round(precision, 4)
def macro_recall(predicted_labels, true_labels):
"""
Args:
predicted_labels: Type: ndarray
shape: N * Nc
N - Number of training examples
Nc - NUmber of classes
true_labels: Type: ndarray
shape: N * Nc
N - Number of training examples
Nc - Number of classes
Returns: macros_precision
"""
assert predicted_labels.shape == true_labels.shape
N, Nc = predicted_labels.shape
tp = true_positives(predicted_labels, true_labels).astype("float64")
fn = false_negatives(predicted_labels, true_labels).astype("float64")
sum_tp_fn = tp + fn
tp_zero_locations = np.where(tp == 0.0)
if len(tp_zero_locations[0]) > 0:
sum_tp_fn[tp_zero_locations] = 1.0
recall = np.sum(tp / sum_tp_fn) / float(Nc)
return round(recall, 4)
def macro_fscore(predicted_labels, true_labels):
"""
Args:
predicted_labels: Type: ndarray
shape: N * Nc
N - Number of training examples
Nc - NUmber of classes
true_labels: Type: ndarray
shape: N * Nc
N - Number of training examples
Nc - Number of classes
Returns: macros_precision
"""
assert predicted_labels.shape == true_labels.shape
N, Nc = predicted_labels.shape
tp = true_positives(predicted_labels, true_labels).astype("float64")
fp = false_positives(predicted_labels, true_labels).astype("float64")
fn = false_negatives(predicted_labels, true_labels).astype("float64")
tp_zero_locations = np.where(tp == 0)
denominator = (2 * tp) + fp + fn
if len(tp_zero_locations[0]) > 0:
denominator[tp_zero_locations] = 1
macro_fscore = (1.0 / Nc) * (np.sum((2 * tp) / denominator))
return round(macro_fscore, 4)
def micro_precision(predicted_labels, true_labels):
"""
Args:
predicted_labels: Type: ndarray
shape: N * N_c
N - Number of training examples
Nc - Number of classes
true_labels: Type: ndarray
shape: N * N_c
N - Number of training examples
N_c - Number of classes
Returns: micro precision
Type:float number
"""
assert predicted_labels.shape == true_labels.shape
tp = true_positives(predicted_labels, true_labels).astype("float64")
fp = false_positives(predicted_labels, true_labels).astype("float64")
numerator = np.sum(tp)
denominator = np.sum(tp + fp)
numerator_zero_positions = np.where(numerator == 0.0)
if len(numerator_zero_positions[0]) > 0:
denominator[numerator_zero_positions] = 1.0
precision = numerator / denominator
return round(precision, 4)
def micro_recall(predicted_labels, true_labels):
"""
Args:
predicted_labels: Type: ndarray
shape: N * N_c
N - Number of training examples
Nc - Number of classes
true_labels: Type: ndarray
shape: N * N_c
N - Number of training examples
N_c - Number of classes
Returns: micro precision
Type:float number
"""
assert predicted_labels.shape == true_labels.shape
tp = true_positives(predicted_labels, true_labels).astype("float64")
fn = false_negatives(predicted_labels, true_labels).astype("float64")
numerator = np.sum(tp)
denominator = np.sum(tp + fn)
numerator_zero_positions = np.where(numerator == 0.0)
if len(numerator_zero_positions[0]) > 0:
denominator[numerator_zero_positions] = 1.0
recall = numerator / denominator
return round(recall, 4)
def micro_fscore(predicted_labels, true_labels):
"""
Args:
predicted_labels: Type: ndarray
shape: N * N_c
N - Number of training examples
Nc - Number of classes
true_labels: Type: ndarray
shape: N * N_c
N - Number of training examples
N_c - Number of classes
Returns: micro precision
Type:float number
"""
assert predicted_labels.shape == true_labels.shape
tp = true_positives(predicted_labels, true_labels).astype(np.float64)
fp = false_positives(predicted_labels, true_labels).astype(np.float64)
fn = false_negatives(predicted_labels, true_labels).astype(np.float64)
numerator = np.sum(2 * tp)
denominator = np.sum((2 * tp) + fp + fn)
numerator_zero_positions = np.where(numerator == 0.0)
if len(numerator_zero_positions[0]) > 0:
denominator[numerator_zero_positions] = 1.0
fscore = numerator / denominator
return round(fscore, 4)
def true_positives(predicted_labels, true_labels):
"""
Predicted condition positive
True condition positive
Get the number of true positives for all the classes
Args:
predicted_labels: Type: ndarray
shape: N * N_c
N - Number of training examples
Nc - Number of classes
true_labels: Type: ndarray
shape: N * N_c
N - Number of training examples
N_c - Number of classes
Returns: true_positives for all classes
Type:float number
"""
assert predicted_labels.shape == true_labels.shape
N, Nc = predicted_labels.shape
true_positives_array = []
for i in range(Nc):
location_true_ones =
|
np.where(true_labels[:, i] == 1)
|
numpy.where
|
#!/bin/env python3
import soundcard as sc
import numpy as np
import cv2
import threading
import time
import math
terminate_program = 0
class AudioOutputThread(threading.Thread):
def __init__(self):
"""
初始化
"""
threading.Thread.__init__(self)
self.data = None
def run(self):
t1 = time.time()
# speakers = sc.all_speakers()
default_speaker = sc.default_speaker()
while terminate_program == 0:
if self.data is None:
time.sleep(0.01)
continue
nd = self.data
self.data = None
print(time.time()-t1, nd.shape)
try:
# speakers[2].play(nd, samplerate=96000)
default_speaker.play(nd, samplerate=96000)
except:
pass
at = AudioOutputThread()
at.start()
data = np.zeros((100000,2), np.float32)
p = 0
cv2.namedWindow("win")
cv2.namedWindow("win2")
cv2.resizeWindow("win", 640, 480)
cv2.resizeWindow("win", 640, 480)
cv2.moveWindow("win",1280,280)
cv2.moveWindow("win2",1280,600)
cv2.waitKey(500)
try:
videofile = sys.argv[1]
except:
videofile = "test.mp4"
cap = cv2.VideoCapture(videofile)
n = 0
fps = 30
video_fps = 30
pframe_samples = math.floor(96000 / fps)
t = time.time()
while cap.isOpened():
res, img = cap.read()
n = n + 1
pts = n / video_fps
pass_time = (time.time() - t)
if pts < pass_time:
continue
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# img = cv2.resize(img, (int(0.6 * img.shape[1]),int(0.6 * img.shape[0])))
edges = cv2.Canny(img,100,200)
ret,thresh = cv2.threshold(edges, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
# contours, hierarchy = cv2.findContours(thresh, cv2.RETR_EXTERNAL , cv2.CHAIN_APPROX_TC89_L1)
contours, hierarchy = cv2.findContours(thresh, cv2.RETR_EXTERNAL , cv2.CHAIN_APPROX_NONE)
condatas = np.array([], dtype=np.int32)
dsize = 0
for i in range(0, len(contours)):
if contours[i].shape[0] < 3:
continue
dsize += contours[i].shape[0]
condatas=np.append(condatas, contours[i][:,0,:])
condatas = condatas.reshape((-1,2))
voldatas = condatas.astype(np.float32)
wh = np.max(img.shape)
voldatas[:,0] *= 1 / wh * 1.2
voldatas[:,0] -= 0.6
voldatas[:,1] /= wh / -1.2
voldatas[:,1] += 0.6
last_p = p
osc_image =
|
np.zeros((img.shape[0], img.shape[1], 1), np.uint8)
|
numpy.zeros
|
import tensorflow as tf
import tensorflow.contrib.slim as slim
import numpy as np
import pickle
import os
import scipy.io
import time
import matplotlib.pyplot as plt
import matplotlib as mpl
from svhn2mnist import utils
from sklearn.manifold import TSNE
class Solver(object):
def __init__(self, model, batch_size=128, train_iter=100000,
svhn_dir='svhn2mnist/svhn', mnist_dir='svhn2mnist/mnist', log_dir='logs',
model_save_path='model', trained_model='model/model'):
self.model = model
self.batch_size = batch_size
self.train_iter = train_iter
self.svhn_dir = svhn_dir
self.mnist_dir = mnist_dir
self.log_dir = log_dir
self.model_save_path = model_save_path
self.trained_model = model_save_path + '/model'
self.config = tf.ConfigProto()
self.config.gpu_options.allow_growth = True
def load_mnist(self, image_dir, split='train'):
print('Loading MNIST dataset.')
image_file = 'train.pkl' if split == 'train' else 'test.pkl'
image_dir = os.path.join(image_dir, image_file)
with open(image_dir, 'rb') as f:
mnist = pickle.load(f)
images = mnist['X'] / 127.5 - 1
labels = mnist['y']
return images, np.squeeze(labels).astype(int)
def load_svhn(self, image_dir, split='train'):
print('Loading SVHN dataset.')
image_file = 'train_32x32.mat' if split == 'train' else 'test_32x32.mat'
image_dir = os.path.join(image_dir, image_file)
svhn = scipy.io.loadmat(image_dir)
images = np.transpose(svhn['X'], [3, 0, 1, 2]) / 127.5 - 1
# ~ images= resize_images(images)
labels = svhn['y'].reshape(-1)
labels[np.where(labels == 10)] = 0
return images, labels
def train(self):
# make directory if not exists
if tf.gfile.Exists(self.log_dir):
tf.gfile.DeleteRecursively(self.log_dir)
tf.gfile.MakeDirs(self.log_dir)
print('Training.')
trg_images, trg_labels = self.load_mnist(self.mnist_dir, split='train')
trg_test_images, trg_test_labels = self.load_mnist(self.mnist_dir,
split='test')
src_images, src_labels = self.load_svhn(self.svhn_dir, split='train')
src_test_images, src_test_labels = self.load_svhn(self.svhn_dir,
split='test')
# build a graph
model = self.model
model.build_model()
config = tf.ConfigProto()
config.allow_soft_placement = True
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
tf.global_variables_initializer().run()
saver = tf.train.Saver()
summary_writer = tf.summary.FileWriter(logdir=self.log_dir,
graph=tf.get_default_graph())
print('Start training.')
trg_count = 0
t = 0
start_time = time.time()
for step in range(self.train_iter):
trg_count += 1
t += 1
i = step % int(src_images.shape[0] / self.batch_size)
j = step % int(trg_images.shape[0] / self.batch_size)
feed_dict = {model.src_images:
src_images[
i * self.batch_size:(i + 1) * self.batch_size],
model.src_labels:
src_labels[i * self.batch_size:(i + 1) * self.batch_size],
model.trg_images:
trg_images[j * self.batch_size:(j + 1) * self.batch_size],
}
sess.run(model.train_op, feed_dict)
if t % 5000 == 0 or t == 1:
summary, l_c, l_d, src_acc = sess.run(
[model.summary_op, model.class_loss, model.domain_loss,
model.src_accuracy], feed_dict)
summary_writer.add_summary(summary, t)
print(
'Step: [%d/%d] c_loss: [%.6f] d_loss: [%.6f] train acc: [%.2f]' \
% (t, self.train_iter, l_c, l_d, src_acc))
# ~ if t%10000==0:
# ~ print 'Saved.'
with open('time_' + str(model.alpha) + '_' + model.method + '.txt',
"a") as resfile:
resfile.write(str(
(time.time() - start_time) / float(self.train_iter)) + '\n')
saver.save(sess, os.path.join(self.model_save_path, 'model'))
def test(self):
trg_images, trg_labels = self.load_mnist(self.mnist_dir, split='test')
# build a graph
model = self.model
model.build_model()
config = tf.ConfigProto()
config.allow_soft_placement = True
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
tf.global_variables_initializer().run()
print('Loading model.')
variables_to_restore = slim.get_model_variables()
restorer = tf.train.Saver(variables_to_restore)
restorer.restore(sess, self.trained_model)
trg_acc, trg_entr = sess.run(
fetches=[model.trg_accuracy, model.trg_entropy],
feed_dict={model.trg_images: trg_images[:],
model.trg_labels: trg_labels[:]})
print('test acc [%.3f]' % (trg_acc))
print('entropy [%.3f]' % (trg_entr))
with open('test_' + str(model.alpha) + '_' + model.method + '.txt',
"a") as resfile:
resfile.write(str(trg_acc) + '\t' + str(trg_entr) + '\n')
# ~ print confusion_matrix(trg_labels, trg_pred)
def tsne(self, n_samples=2000):
source_images, source_labels = self.load_svhn(self.svhn_dir,
split='test')
target_images, target_labels = self.load_mnist(self.mnist_dir,
split='test')
model = self.model
model.build_model()
config = tf.ConfigProto()
config.allow_soft_placement = True
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
print('Loading test model.')
variables_to_restore = tf.global_variables()
restorer = tf.train.Saver(variables_to_restore)
restorer.restore(sess, self.trained_model)
target_images = target_images[:n_samples]
target_labels = target_labels[:n_samples]
source_images = source_images[:n_samples]
source_labels = source_labels[:n_samples]
print(source_labels.shape)
assert len(target_labels) == len(source_labels)
src_labels = utils.one_hot(source_labels.astype(int), 10)
trg_labels = utils.one_hot(target_labels.astype(int), 10)
n_slices = int(n_samples / self.batch_size)
fx_src = np.empty((0, model.hidden_repr_size))
fx_trg = np.empty((0, model.hidden_repr_size))
for src_im, trg_im in zip(np.array_split(source_images, n_slices),
|
np.array_split(target_images, n_slices)
|
numpy.array_split
|
"""
Code to glue together chiral merge trees, hyperbolic
delaunay triangluations / equations, and hyperbolic
Voronoi diagrams
"""
import numpy as np
import time
import matplotlib.pyplot as plt
import seaborn as sn
from HypDelaunay import *
from HypMergeTree import *
from MergeTree import *
def mergetree_to_hypmergetree(cmt, constraints, max_trials = 500, z_eps = 1e-4, sol_eps = 1e-7, verbose=True):
"""
Given a chiral merge tree, setup the Delaunay triangulation
and associated equations, solve for the zs/rs, and then
solve for the hyperbolic Voronoi diagram from the zs/rs
Parameters
----------
cmt: MergeTree
A chiral merge tree object from which to construct
a hyperbolic structure
constraints: list of [(variable type ('z' or 'r'),
index (int),
value (float)]
A dictionary of constraints to enforce on the zs and rs.
-1 for r is r_infinity
max_trials: int
Number of random initializations to try
z_eps: float
All zs must be at least this far apart
sol_eps: float
Objective function must have converged to this level
verbose: boolean
Whether to print the solutions
Returns
-------
{
'hd': HyperbolicDelaunay
An object holding the Delaunay triangulation and
methods for constructing, solving, and plotting the equations,
'hmt': HypMergeTree
An object holding the hyperbolic Voronoi diagram corresponding
to the zs/rs solution,
'times': ndarray(max_trials)
The time taken to solve each initial condition,
'n_invalid': int
The number of solutions deemed not to be valid
}
"""
hd = HyperbolicDelaunay()
hd.init_from_mergetree(cmt)
N = len(hd.vertices)
times = np.zeros(max_trials)
np.random.seed(0)
n_invalid = 0
hmt = HypMergeTree()
hmt.z = np.zeros(N-1)
hmt.radii = np.zeros(N)
i = 0
solution_found = False
while i < max_trials and not solution_found:
tic = time.time()
# Setup some random initial conditions
zs0 = np.random.randn(N-1)
rs0 = np.abs(np.random.randn(N))
for (constraint_type, index, value) in constraints:
if constraint_type == 'r':
rs0[index] = value
elif constraint_type == 'z':
zs0[index] = value
vs0 = np.random.randn(N-3)
res = hd.solve_equations(zs0, rs0, vs0, constraints)
times[i] = time.time()-tic
zs, rs, vs = res['zs'], res['rs'], res['vs']
fx_sol = res['fx_sol']
# Check the following conditions
# 1) The zs are in the right order
# 2) Adjacent zs are more than epsilon apart
# 3) The rs are nonzero
# 4) The objective function is less than epsilon
if np.sum(zs[1::] - zs[0:-1] < 0) == 0 and np.sum(zs[1::]-zs[0:-1] < z_eps) == 0 and np.sum(rs < 0) == 0 and fx_sol < sol_eps:
# Copy over the solution to the hyperbolic voronoi diagram
# if it is valid
hmt.z = zs
hmt.radii = rs
solution_found = True
if verbose:
print("zs:", ["%.3g, "*zs.size%(tuple(list(zs)))])
print("rs", ["%.3g, "*rs.size%(tuple(list(rs)))])
print("fx_initial = %.3g"%res['fx_initial'])
print("fx_sol = %.3g\n"%fx_sol)
else:
n_invalid += 1
i += 1
return {'hd':hd, 'hmt':hmt, 'times':times, 'n_invalid':n_invalid}
def plot_solution_grid(cmt, hd, hmt, constraints, symbolic=False,
xlims = None, ylims_voronoi = None,
ylims_masses = None, perturb=0):
"""
Show the original chiral merge tree next to the topological
triangulation, the associated equations, the Voronoi diagram
solution, and the solution expressed as point masses
Parameters
----------
cmt: MergeTree
The original chiral merge tree
hd: HyperbolicDelaunay
An object holding the Delaunay triangulation and
methods for constructing, solving, and plotting the equations
hmt: HypMergeTree
An object holding the hyperbolic Voronoi diagram corresponding
to the zs/rs solution
constraints: list of [(variable type ('z' or 'r'),
index (int),
value (float)]
A dictionary of constraints to enforce on the zs and rs.
-1 for r is r_infinity
symbolic: boolean
Whether to use variables for the edge weights (True), or
whether to display the actual numerical edge weights (False)
xlims: [int, int]
Optional x limits for Voronoi diagram and point masses
ylims_voronoi: [int, int]
Optional y limits for Voronoi diagram
ylims_masses: [int, int]
Opitional y limits for point masses
perturb: boolean
Whether to perturb z positions slightly
"""
plt.subplot(231)
cmt.render(offset=np.array([0, 0]))
plt.title("Chiral Merge Tree")
plt.subplot(232)
hd.render(symbolic=symbolic)
plt.title("Topological Triangulation")
plt.subplot2grid((2, 3), (0, 2), rowspan=1, colspan=1)
plt.text(0, 0, hd.get_equations_tex(constraints=constraints, symbolic=symbolic))
plt.title("Hyperbolic Equations")
plt.axis('off')
plt.subplot(234)
if perturb > 0:
z_orig = np.array(hmt.z)
hmt.z += np.random.rand(hmt.z.size)*perturb
hmt.refreshNeeded = True
hmt.renderVoronoiDiagram()
if perturb > 0:
hmt.z = z_orig
plt.title("Hyperbolic Voronoi Diagram")
if xlims:
plt.xlim(xlims)
if ylims_voronoi:
plt.ylim(ylims_voronoi)
plt.subplot(235)
lengths = hd.get_horocycle_arclens()
z = np.zeros_like(lengths)
z[0:-1] = hmt.z
z[-1] = -1 # Plot the infinity weight at -1
plt.stem(z, lengths)
if xlims:
sxlims = [xlims[0], xlims[1]]
# Make sure infinity shows up
if sxlims[0] > -1.5:
sxlims[0] = -1.5
plt.xlim(sxlims)
if ylims_masses:
plt.ylim(ylims_masses)
plt.xticks(z, ["%.3g"%zi for zi in hmt.z] + ["$\infty$"])
plt.title("Masses (Total Mass %.3g)"%np.sum(lengths))
plt.xlabel("z")
plt.ylabel("Mass")
def test_pentagon_infedges_edgecollapse(constraints = [('z', 0, 0), ('r', -1, 1)]):
"""
Test an edge collapse of a pentagon whose edges
all go to infinity
"""
cmt = MergeTree(TotalOrder2DX)
cmt.root = MergeNode(np.array([0, 5]))
A = MergeNode(np.array([-1, 4]))
B = MergeNode(np.array([1, 4]))
cmt.root.addChildren([A, B])
C = MergeNode(np.array([0.5, 3]))
D = MergeNode(np.array([2, 3]))
B.addChildren([C, D])
E = MergeNode(np.array([1.5, 2]))
F = MergeNode(np.array([3, 2]))
D.addChildren([E, F])
plt.figure(figsize=(18, 12))
N = 20
for i, Ey in enumerate(np.linspace(2, 3, N)):
E.X[1] = Ey
res = mergetree_to_hypmergetree(cmt, constraints)
plt.clf()
plot_solution_grid(cmt, res['hd'], res['hmt'], constraints, xlims=[-0.5, 4.5], ylims_voronoi=[0, 6.5], ylims_masses=[0, 5])
plt.savefig("%i.png"%i, bbox_inches='tight')
cmt = MergeTree(TotalOrder2DX)
cmt.root = MergeNode(np.array([0, 5]))
A = MergeNode(np.array([-1, 4]))
B = MergeNode(np.array([1, 4]))
cmt.root.addChildren([A, B])
C = MergeNode(np.array([0.5, 3]))
D = MergeNode(np.array([2, 3]))
F = MergeNode(np.array([3, 2]))
B.addChildren([C, F])
plt.clf()
res = mergetree_to_hypmergetree(cmt, constraints)
plot_solution_grid(cmt, res['hd'], res['hmt'], constraints, xlims=[-0.5, 4.5], ylims_voronoi=[0, 6.5], ylims_masses=[0, 5])
plt.savefig("%i.png"%N, bbox_inches='tight')
def test_pentagon_general_edgecollapse(constraints=[('z', 0, 0), ('r', -1, 1)]):
"""
Test an edge collapse of a pentagon whose edges
don't all go to infinity
"""
cmt = MergeTree(TotalOrder2DX)
cmt.root = MergeNode(np.array([0, 5]))
A = MergeNode(np.array([-1, 4]))
B = MergeNode(np.array([2, 4]))
cmt.root.addChildren([A, B])
C = MergeNode(np.array([-3, 3]))
D = MergeNode(np.array([-0.5, 2.8]))
E = MergeNode(np.array([-4, 2]))
A.addChildren([E, D])
I = MergeNode(np.array([1, 2.6]))
J = MergeNode(np.array([4, 2.3]))
B.addChildren([J, I])
plt.figure(figsize=(18, 12))
N = 20
for i, Iy in enumerate(np.linspace(2.6, 4, N)):
I.X[1] = Iy
res = mergetree_to_hypmergetree(cmt, constraints)
plt.clf()
plot_solution_grid(cmt, res['hd'], res['hmt'], constraints, xlims=[-0.5, 4.5], ylims_voronoi=[0, 6.5], ylims_masses=[0, 5])
plt.savefig("%i.png"%i, bbox_inches='tight')
cmt = MergeTree(TotalOrder2DX)
cmt.root = MergeNode(np.array([0, 5]))
A = MergeNode(np.array([-1, 4]))
B = MergeNode(np.array([2, 4]))
C = MergeNode(np.array([-3, 3]))
D = MergeNode(np.array([-0.5, 2.8]))
E = MergeNode(np.array([-4, 2]))
A.addChildren([E, D])
I = MergeNode(np.array([1, 2.6]))
J = MergeNode(np.array([4, 2.3]))
cmt.root.addChildren([A, J])
plt.clf()
res = mergetree_to_hypmergetree(cmt, constraints)
plot_solution_grid(cmt, res['hd'], res['hmt'], constraints, xlims=[-0.5, 4.5], ylims_voronoi=[0, 6.5], ylims_masses=[0, 5])
plt.savefig("%i.png"%N, bbox_inches='tight')
def test_septagon_general_edgecollapse(constraints=[('z', 0, 0), ('r', -1, 1)]):
"""
Test an edge collapse of a pentagon whose edges
don't all go to infinity
"""
cmt = MergeTree(TotalOrder2DX)
cmt.root = MergeNode(np.array([0, 5]))
A = MergeNode(np.array([-1, 4]))
B = MergeNode(np.array([2, 4]))
cmt.root.addChildren([A, B])
C = MergeNode(np.array([-3, 3]))
D = MergeNode(np.array([-0.5, 2.8]))
A.addChildren([C, D])
E = MergeNode(np.array([-4, 2]))
F = MergeNode(np.array([-2, 1.6]))
C.addChildren([E, F])
G = MergeNode(np.array([-3, 0]))
H = MergeNode(np.array([-1, 0.5]))
F.addChildren([G, H])
I = MergeNode(np.array([1, 2.6]))
J = MergeNode(np.array([4, 2.3]))
B.addChildren([J, I])
plt.figure(figsize=(18, 12))
N = 20
xlims=[-0.5, 6]
ylims_voronoi=[0, 6.5]
ylims_masses=[0, 5]
for i, Iy in enumerate(np.linspace(2.6, 4, N)):
I.X[1] = Iy
res = mergetree_to_hypmergetree(cmt, constraints)
plt.clf()
plot_solution_grid(cmt, res['hd'], res['hmt'], constraints, xlims=xlims, ylims_voronoi=ylims_voronoi, ylims_masses=ylims_masses)
plt.savefig("%i.png"%i, bbox_inches='tight')
cmt = MergeTree(TotalOrder2DX)
cmt.root = MergeNode(np.array([0, 5]))
A = MergeNode(np.array([-1, 4]))
B = MergeNode(np.array([2, 4]))
C = MergeNode(np.array([-3, 3]))
D = MergeNode(np.array([-0.5, 2.8]))
A.addChildren([C, D])
E = MergeNode(np.array([-4, 2]))
F = MergeNode(np.array([-2, 1.6]))
C.addChildren([E, F])
G = MergeNode(np.array([-3, 0]))
H = MergeNode(np.array([-1, 0.5]))
F.addChildren([G, H])
I = MergeNode(np.array([1, 2.6]))
J = MergeNode(np.array([4, 2.3]))
cmt.root.addChildren([A, J])
plt.clf()
res = mergetree_to_hypmergetree(cmt, constraints)
plot_solution_grid(cmt, res['hd'], res['hmt'], constraints, xlims=xlims, ylims_voronoi=ylims_voronoi, ylims_masses=ylims_masses)
plt.savefig("%i.png"%N, bbox_inches='tight')
def test_pentagon_two_small_edges(constraints=[('z', 0, 0), ('r', -1, 1)]):
"""
Test an edge collapse of a pentagon whose edges
don't all go to infinity
"""
cmt = MergeTree(TotalOrder2DX)
cmt.root = MergeNode(np.array([0, 5]))
A = MergeNode(np.array([-1, 4]))
B = MergeNode(np.array([2, 4]))
cmt.root.addChildren([A, B])
C = MergeNode(np.array([-3, 3]))
D = MergeNode(np.array([-0.5, 3.9]))
E = MergeNode(np.array([-4, 2]))
A.addChildren([E, D])
I = MergeNode(
|
np.array([1, 2.6])
|
numpy.array
|
"""
Base classes for all discretize meshes
"""
import numpy as np
import properties
import os
import json
from ..utils import mkvc
from ..mixins import InterfaceMixins
class BaseMesh(properties.HasProperties, InterfaceMixins):
"""
BaseMesh does all the counting you don't want to do.
BaseMesh should be inherited by meshes with a regular structure.
"""
_REGISTRY = {}
# Properties
_n = properties.Array(
"number of cells in each direction (dim, )",
dtype=int,
required=True,
shape=('*',)
)
x0 = properties.Array(
"origin of the mesh (dim, )",
dtype=(float, int),
shape=('*',),
required=True,
)
# Instantiate the class
def __init__(self, n=None, x0=None, **kwargs):
if n is not None:
self._n = n # number of dimensions
if x0 is None:
self.x0 = np.zeros(len(self._n))
else:
self.x0 = x0
super(BaseMesh, self).__init__(**kwargs)
# Validators
@properties.validator('_n')
def _check_n_shape(self, change):
if not (
not isinstance(change['value'], properties.utils.Sentinel) and
change['value'] is not None
):
raise Exception("Cannot delete n. Instead, create a new mesh")
change['value'] = np.array(change['value'], dtype=int).ravel()
if len(change['value']) > 3:
raise Exception(
"Dimensions of {}, which is higher than 3 are not "
"supported".format(change['value'])
)
if np.any(change['previous'] != properties.undefined):
# can't change dimension of the mesh
if len(change['previous']) != len(change['value']):
raise Exception(
"Cannot change dimensionality of the mesh. Expected {} "
"dimensions, got {} dimensions".format(
len(change['previous']), len(change['value'])
)
)
# check that if h has been set, sizes still agree
if getattr(self, 'h', None) is not None and len(self.h) > 0:
for i in range(len(change['value'])):
if len(self.h[i]) != change['value'][i]:
raise Exception(
"Mismatched shape of n. Expected {}, len(h[{}]), got "
"{}".format(
len(self.h[i]), i, change['value'][i]
)
)
# check that if nodes have been set for curvi mesh, sizes still
# agree
if (
getattr(self, 'nodes', None) is not None and
len(self.nodes) > 0
):
for i in range(len(change['value'])):
if self.nodes[0].shape[i]-1 != change['value'][i]:
raise Exception(
"Mismatched shape of n. Expected {}, len(nodes[{}]), "
"got {}".format(
self.nodes[0].shape[i]-1, i, change['value'][i]
)
)
@properties.validator('x0')
def _check_x0(self, change):
if not (
not isinstance(change['value'], properties.utils.Sentinel) and
change['value'] is not None
):
raise Exception("n must be set prior to setting x0")
if len(self._n) != len(change['value']):
raise Exception(
"Dimension mismatch. x0 has length {} != len(n) which is "
"{}".format(len(x0), len(n))
)
@property
def dim(self):
"""The dimension of the mesh (1, 2, or 3).
Returns
-------
int
dimension of the mesh
"""
return len(self._n)
@property
def nC(self):
"""Total number of cells in the mesh.
Returns
-------
int
number of cells in the mesh
Examples
--------
.. plot::
:include-source:
import discretize
import numpy as np
mesh = discretize.TensorMesh([np.ones(n) for n in [2,3]])
mesh.plotGrid(centers=True, show_it=True)
print(mesh.nC)
"""
return int(self._n.prod())
@property
def nN(self):
"""Total number of nodes
Returns
-------
int
number of nodes in the mesh
Examples
--------
.. plot::
:include-source:
import discretize
import numpy as np
mesh = discretize.TensorMesh([np.ones(n) for n in [2,3]])
mesh.plotGrid(nodes=True, show_it=True)
print(mesh.nN)
"""
return int((self._n+1).prod())
@property
def nEx(self):
"""Number of x-edges
Returns
-------
nEx : int
"""
return int((self._n + np.r_[0, 1, 1][:self.dim]).prod())
@property
def nEy(self):
"""Number of y-edges
Returns
-------
nEy : int
"""
if self.dim < 2:
return None
return int((self._n + np.r_[1, 0, 1][:self.dim]).prod())
@property
def nEz(self):
"""Number of z-edges
Returns
-------
nEz : int
"""
if self.dim < 3:
return None
return int((self._n + np.r_[1, 1, 0][:self.dim]).prod())
@property
def vnE(self):
"""Total number of edges in each direction
Returns
-------
vnE : numpy.ndarray = [nEx, nEy, nEz], (dim, )
.. plot::
:include-source:
import discretize
import numpy as np
M = discretize.TensorMesh([np.ones(n) for n in [2,3]])
M.plotGrid(edges=True, show_it=True)
"""
return np.array(
[x for x in [self.nEx, self.nEy, self.nEz] if x is not None],
dtype=int
)
@property
def nE(self):
"""Total number of edges.
Returns
-------
nE : int = sum([nEx, nEy, nEz])
"""
return int(self.vnE.sum())
@property
def nFx(self):
"""Number of x-faces
:rtype: int
:return: nFx
"""
return int((self._n + np.r_[1, 0, 0][:self.dim]).prod())
@property
def nFy(self):
"""Number of y-faces
:rtype: int
:return: nFy
"""
if self.dim < 2:
return None
return int((self._n + np.r_[0, 1, 0][:self.dim]).prod())
@property
def nFz(self):
"""Number of z-faces
:rtype: int
:return: nFz
"""
if self.dim < 3:
return None
return int((self._n + np.r_[0, 0, 1][:self.dim]).prod())
@property
def vnF(self):
"""Total number of faces in each direction
:rtype: numpy.ndarray
:return: [nFx, nFy, nFz], (dim, )
.. plot::
:include-source:
import discretize
import numpy as np
M = discretize.TensorMesh([np.ones(n) for n in [2,3]])
M.plotGrid(faces=True, show_it=True)
"""
return np.array(
[x for x in [self.nFx, self.nFy, self.nFz] if x is not None],
dtype=int
)
@property
def nF(self):
"""Total number of faces.
:rtype: int
:return: sum([nFx, nFy, nFz])
"""
return int(self.vnF.sum())
@property
def normals(self):
"""Face Normals
:rtype: numpy.ndarray
:return: normals, (sum(nF), dim)
"""
if self.dim == 2:
nX = np.c_[
np.ones(self.nFx), np.zeros(self.nFx)
]
nY = np.c_[
np.zeros(self.nFy), np.ones(self.nFy)
]
return np.r_[nX, nY]
elif self.dim == 3:
nX = np.c_[
np.ones(self.nFx), np.zeros(self.nFx), np.zeros(self.nFx)
]
nY = np.c_[
np.zeros(self.nFy), np.ones(self.nFy), np.zeros(self.nFy)
]
nZ = np.c_[
np.zeros(self.nFz), np.zeros(self.nFz), np.ones(self.nFz)
]
return np.r_[nX, nY, nZ]
@property
def tangents(self):
"""Edge Tangents
:rtype: numpy.ndarray
:return: normals, (sum(nE), dim)
"""
if self.dim == 2:
tX = np.c_[
np.ones(self.nEx), np.zeros(self.nEx)
]
tY = np.c_[
np.zeros(self.nEy), np.ones(self.nEy)
]
return np.r_[tX, tY]
elif self.dim == 3:
tX = np.c_[
np.ones(self.nEx), np.zeros(self.nEx), np.zeros(self.nEx)
]
tY = np.c_[
|
np.zeros(self.nEy)
|
numpy.zeros
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 2 08:10:11 2017
@author: thoma
"""
import numpy as np
import pylab as plt
import skfmm
'''
eikonal: simple wrappwe for skfmmm using as single source
'''
def eikonal(x=[],y=[],z=[],V=[],S=[]):
import numpy as np
import skfmm
t=[];
dx = float(x[1]-x[0])
phi = -1*np.ones_like(V)
if S.ndim==1:
S=np.array([S]);
ns=1
ns, ndim = S.shape
else:
ns, ndim = S.shape
for i in range(ns):
# get location of source
#print(i)
ix = np.abs(x-S[i,0]).argmin();
if ndim>1:
iy = np.abs(y-S[i,1]).argmin();
if ndim>2:
iz = np.abs(z-S[i,2]).argmin();
if ndim>2:
phi[iy,ix,iz]=1;
elif ndim>1:
phi[iy,ix]=1;
else:
phi[ix]=1;
t_comp = skfmm.travel_time(phi, V, dx)
t.append(t_comp)
return t
'''
eikonal_traveltime: simple wrappwe for skfmmm using as single source
'''
def eikonal_traveltime(x=[],y=[],z=[],V=[],S=[],R=[]):
import numpy as np
#import skfmm
from scipy import interpolate
nr, ndim = np.atleast_2d(R).shape
ns, ndim = np.atleast_2d(S).shape
if ((ns==nr)&(ns>1)):
print('More than one set of sources and receivers (ns=nr=%d). using eikonal_traveltime_mul instaed' % ns)
t = eikonal_traveltime_mul(x,y,z,V,S,R)
return t
if (ns>1):
print('Number for sources larger than 1(ns=%d)! use eikonal_traveltime_mul instead' % ns)
t=np.zeros(nr)
#phi = -1*np.ones_like(V)
#i_source = 0;
#t_map = eikonal(x,y,z,V,S[i_source,:])
t_map = eikonal(x,y,z,V,S)
if ndim==2:
f = interpolate.interp2d(x, y, t_map, kind='cubic');
tt = f(R[:,0].transpose(),R[:,1].transpose())
for i in range(nr):
Rx=R[i,0]
Ry=R[i,1]
tt=f(Rx,Ry)
t[i]=tt[0]
return t
'''
eikonal_traveltime: simple wrappwe for skfmmm using multiple sources
'''
def eikonal_traveltime_mul(x=[],y=[],z=[],V=[],S=[],R=[]):
nr, ndim = R.shape
ns, ndim = S.shape
# Check that S and R have the same size
if (ns != nr):
print('Number for sources and receivers is not the same(ns=%d, nr=%d)! ' % (ns,nr))
t=np.zeros(nr)
# Find unique sources
Su=np.unique(S, axis=0)
nsu,ndim = Su.shape
# print('Number for unique source locations is %d (out of %d sources). ' % (nsu,ns))
for i in range(nsu):
# print('working with source %03d/%03d, at location [%4g,%4g]' % (i+1,nsu,Su[i,0],Su[i,1]))
Srow = Su[i,0:2]
# findo matching rows
dummy=np.where(np.all(Srow==S,axis=1))
i_index = dummy[0]
t_i = eikonal_traveltime(x,y,z,V,Srow,R[i_index,:])
# update traveltime
t[i_index] = t_i
return t
def example_map():
#%% TRAVELTIME MAP
dx=0.1;
x = np.arange(-1,6,dx)
y = np.arange(-1,13,dx)
x_src = np.array([1, 2.50, 2.5, 1])
y_src = np.array([1, 5, 10, 1])
S=np.array([x_src,y_src]).transpose()
xx,yy = np.meshgrid(x,y)
phi = -1*np.ones_like(xx)
V = 0.1*np.ones_like(xx);
#V[yy>6] = 13;
#V[np.logical_and(np.abs(yy)>7, xx>2.5)] = 5
t_map = eikonal(x,y,[],V,S)
plt.subplot(1,2,2)
plt.pcolor(x,y,t_map[0])
plt.subplot(1,2,1)
plt.pcolor(x,y,V)
plt.show
#%% TRAVELTIME S-R
nr=14;
ns=1;
x_rec=5*np.ones([nr])
y_rec = np.linspace(1, 12,nr);
R=
|
np.array([x_rec,y_rec])
|
numpy.array
|
import autoarray as aa
import numpy as np
class TestDataVectorFromData:
def test__simple_blurred_mapping_matrix__correct_data_vector(self):
blurred_mapping_matrix = np.array(
[
[1.0, 1.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 1.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
]
)
image = np.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0])
noise_map = np.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0])
data_vector = aa.util.inversion.data_vector_via_blurred_mapping_matrix_from(
blurred_mapping_matrix=blurred_mapping_matrix,
image=image,
noise_map=noise_map,
)
assert (data_vector == np.array([2.0, 3.0, 1.0])).all()
def test__simple_blurred_mapping_matrix__change_image_values__correct_data_vector(
self,
):
blurred_mapping_matrix = np.array(
[
[1.0, 1.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 1.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
]
)
image = np.array([3.0, 1.0, 1.0, 10.0, 1.0, 1.0])
noise_map = np.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0])
data_vector = aa.util.inversion.data_vector_via_blurred_mapping_matrix_from(
blurred_mapping_matrix=blurred_mapping_matrix,
image=image,
noise_map=noise_map,
)
assert (data_vector == np.array([4.0, 14.0, 10.0])).all()
def test__simple_blurred_mapping_matrix__change_noise_values__correct_data_vector(
self,
):
blurred_mapping_matrix = np.array(
[
[1.0, 1.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 1.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
]
)
image = np.array([4.0, 1.0, 1.0, 16.0, 1.0, 1.0])
noise_map = np.array([2.0, 1.0, 1.0, 4.0, 1.0, 1.0])
data_vector = aa.util.inversion.data_vector_via_blurred_mapping_matrix_from(
blurred_mapping_matrix=blurred_mapping_matrix,
image=image,
noise_map=noise_map,
)
assert (data_vector == np.array([2.0, 3.0, 1.0])).all()
def test__data_vector_via_transformer_mapping_matrix_method__same_as_blurred_method_using_real_imag_separate(
self,
):
mapping_matrix = np.array(
[
[1.0, 1.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 1.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
]
)
data_real = np.array([4.0, 1.0, 1.0, 16.0, 1.0, 1.0])
noise_map_real = np.array([2.0, 1.0, 1.0, 4.0, 1.0, 1.0])
data_vector_real_via_blurred = aa.util.inversion.data_vector_via_blurred_mapping_matrix_from(
blurred_mapping_matrix=mapping_matrix,
image=data_real,
noise_map=noise_map_real,
)
data_imag = np.array([4.0, 1.0, 1.0, 16.0, 1.0, 1.0])
noise_map_imag = np.array([2.0, 1.0, 1.0, 4.0, 1.0, 1.0])
data_vector_imag_via_blurred = aa.util.inversion.data_vector_via_blurred_mapping_matrix_from(
blurred_mapping_matrix=mapping_matrix,
image=data_imag,
noise_map=noise_map_imag,
)
data_vector_complex_via_blurred = (
data_vector_real_via_blurred + data_vector_imag_via_blurred
)
transformed_mapping_matrix = np.array(
[
[1.0 + 1.0j, 1.0 + 1.0j, 0.0 + 0.0j],
[1.0 + 1.0j, 0.0 + 0.0j, 0.0 + 0.0j],
[0.0 + 0.0j, 1.0 + 1.0j, 0.0 + 0.0j],
[0.0 + 0.0j, 1.0 + 1.0j, 1.0 + 1.0j],
[0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j],
[0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j],
]
)
data = np.array(
[4.0 + 4.0j, 1.0 + 1.0j, 1.0 + 1.0j, 16.0 + 16.0j, 1.0 + 1.0j, 1.0 + 1.0j]
)
noise_map = np.array(
[2.0 + 2.0j, 1.0 + 1.0j, 1.0 + 1.0j, 4.0 + 4.0j, 1.0 + 1.0j, 1.0 + 1.0j]
)
data_vector_via_transformed = aa.util.inversion.data_vector_via_transformed_mapping_matrix_from(
transformed_mapping_matrix=transformed_mapping_matrix,
visibilities=data,
noise_map=noise_map,
)
assert (data_vector_complex_via_blurred == data_vector_via_transformed).all()
class TestCurvatureMatrixFromBlurred:
def test__simple_blurred_mapping_matrix(self):
blurred_mapping_matrix = np.array(
[
[1.0, 1.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 1.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
]
)
noise_map = np.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0])
curvature_matrix = aa.util.inversion.curvature_matrix_via_mapping_matrix_from(
mapping_matrix=blurred_mapping_matrix, noise_map=noise_map
)
assert (
curvature_matrix
== np.array([[2.0, 1.0, 0.0], [1.0, 3.0, 1.0], [0.0, 1.0, 1.0]])
).all()
def test__simple_blurred_mapping_matrix__change_noise_values(self):
blurred_mapping_matrix = np.array(
[
[1.0, 1.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 1.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
]
)
noise_map =
|
np.array([2.0, 1.0, 1.0, 1.0, 1.0, 1.0])
|
numpy.array
|
import os
import subprocess
import sys
import threading
import shutil
import numpy as np
from PyQt5 import QtWidgets
from PyQt5.QtWidgets import QMessageBox
from matplotlib.backends.backend_qt5 import NavigationToolbar2QT as NavigationToolbar
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.figure import Figure
from sympy import roots
from sympy.abc import x
from fractureSH_gui import Ui_MainWindow
class MyFirstGuiProgram(Ui_MainWindow):
def __init__(self, dialog):
Ui_MainWindow.__init__(self)
self.setupUi(dialog)
###Cria o layout para plotagem
# figura Tab1
self.fig = Figure(figsize=(8,3),facecolor='white')
self.fig.subplots_adjust(hspace= 0.40, wspace= 0.60,left=0.10, right=0.98, top=0.88, bottom=0.17)
self.canvas = FigureCanvas(self.fig)
self.canvas.setParent(self.widget)
layout = QtWidgets.QVBoxLayout()
self.widget.setLayout(layout)
layout.addWidget(self.canvas)
self.mpl_toolbar = NavigationToolbar(self.canvas, self.widget)
self.fig.text(0.5, 0.1, 'Geofone', va='center')
self.fig.text(0.02, 0.33, 'Tempo(s)', va='center', rotation='vertical')
self.fig.text(0.45, 0.5, 'Ângulo de incidência (graus)', va='center', size= 8)
self.fig.text(0.02, 0.73, 'Coeficiente de reflexão', va='center', rotation='vertical', size=7)
self.axes = self.fig.add_subplot(211)
self.axes2 = self.axes.twiny()
self.axes.grid()
self.axes_time = self.fig.add_subplot(212)
self.axes.tick_params(labelsize=6)
self.axes2.tick_params(labelsize=6)
self.axes_time.tick_params(labelsize=6)
self.axes_time.grid()
#figura tab2
self.fig_anray = Figure(figsize=(9,6), facecolor='white')
self.fig_anray2 = Figure(figsize=(9, 6), facecolor='white')
self.fig_anray.text(0, 0.6, 'Coeficiente de reflexão', va='center', rotation='vertical')
self.fig_anray.text(0.985, 0.6, 'Separação', va='center', rotation='vertical')
self.fig_anray.text(0.5, 0.12, 'Geofone', va='center')
self.fig_anray2.text(0, 0.6, 'Coeficiente de reflexão', va='center', rotation='vertical')
self.fig_anray2.text(0.5, 0.12, 'Geofone', va='center')
self.canvas_anray = FigureCanvas(self.fig_anray)
self.canvas_anray2 = FigureCanvas(self.fig_anray2)
self.canvas_anray.setParent(self.widget_anray)
self.canvas_anray2.setParent(self.widget_anray2)
layout = QtWidgets.QVBoxLayout()
layout2 = QtWidgets.QVBoxLayout()
self.widget_anray.setLayout(layout)
layout.addWidget(self.canvas_anray)
self.widget_anray2.setLayout(layout2)
layout2.addWidget(self.canvas_anray2)
self.mpl_toolbar = NavigationToolbar(self.canvas_anray, self.widget_anray)
self.mpl_toolbar2 = NavigationToolbar(self.canvas_anray2, self.widget_anray2)
self.fig_anray.subplots_adjust(hspace=0.27, left=0.10, right=0.92, top=0.88, bottom=0.17)
self.fig_anray2.subplots_adjust(hspace=0.27, left=0.10, right=0.98, top=0.93, bottom=0.17)
#subplots
self.axes_anray_tot = self.fig_anray.add_subplot(311)
self.axes_anray2_tot = self.fig_anray2.add_subplot(311)
self.axes_anray_tot2 = self.axes_anray_tot.twinx()
self.axes_anray_tot.set_ylabel("total")
self.axes_anray2_tot.set_ylabel("total")
self.axes_anray2_rad = self.fig_anray2.add_subplot(312)
self.axes_anray2_rad.set_ylabel("transversal")
self.axes_anray_time = self.fig_anray.add_subplot(313)
self.axes_anray2_time = self.fig_anray2.add_subplot(313)
self.axes_anray_trans = self.fig_anray.add_subplot(312)
self.axes_anray_trans.set_ylabel("transversal")
self.axes_anray_trans2 = self.axes_anray_trans.twinx()
self.axes_anray_time.set_ylabel('tempo')
self.axes_anray2_time.set_ylabel('tempo')
self.axes_anray_tot.grid()
self.axes_anray_trans.grid()
self.axes_anray2_rad.grid()
self.axes_anray_time.grid()
self.axes_anray2_tot.grid()
self.axes_anray2_time.grid()
self.axes_anray_tot.tick_params(labelsize=6)
self.axes_anray_trans.tick_params(labelsize=6)
self.axes_anray2_rad.tick_params(labelsize=6)
self.axes_anray_tot2.tick_params(labelsize=6)
self.axes_anray2_tot.tick_params(labelsize=6)
self.axes_anray_trans2.tick_params(labelsize=6)
###
#figura tab3
self.fig_sismo = Figure(dpi=50, facecolor='white')
self.canvas_sismo = FigureCanvas(self.fig_sismo)
self.canvas_sismo.setParent(self.widget_sismo)
self.fig_sismo.subplots_adjust(wspace=0.11, left=0.05, right=0.98, top=0.93, bottom=0.10)
layout = QtWidgets.QVBoxLayout()
self.widget_sismo.setLayout(layout)
layout.addWidget(self.canvas_sismo)
self.axes_sismo_x = self.fig_sismo.add_subplot(111)
self.mpl_toolbar = NavigationToolbar(self.canvas_sismo, self.widget_sismo)
self.fig_sismo.text(0.48, 0.04, 'Distância (m)', va='center', size= 14)
self.fig_sismo.text(0.01, 0.5, 'Tempo (s)', va='center', rotation='vertical', size= 14)
self.fig_sismo.text(0.48, 0.96, 'Transversal', va='center', size= 14)
# self.fig_sismo.text(0.75, 0.96, 'Vertical', va='center', size=14)
#figura tab4
self.fig_sismo2 = Figure(dpi=100, facecolor='white')
self.canvas_sismo2 = FigureCanvas(self.fig_sismo2)
self.canvas_sismo2.setParent(self.widget_sismo2)
self.fig_sismo2.set_tight_layout(True)
layout = QtWidgets.QVBoxLayout()
self.widget_sismo2.setLayout(layout)
layout.addWidget(self.canvas_sismo2)
self.axes_sismo2_1 = self.fig_sismo2.add_subplot(211)
self.axes_sismo2_2 = self.fig_sismo2.add_subplot(212)
self.mpl_toolbar = NavigationToolbar(self.canvas_sismo2, self.widget_sismo2)
###Define os valores iniciais
self.spinBox_vp1.setValue(2250)
self.spinBox_vs1.setValue(1200)
self.spinBox_p1.setValue(2100)
self.spinBox_vp2.setValue(4500)
self.spinBox_vs2.setValue(2500)
self.spinBox_p2.setValue(2700)
#Velocidades do modelo de Ruger(para teste)
#self.spinBox_vp1.setValue(2433)
#self.spinBox_vs1.setValue(1627)
#self.spinBox_p1.setValue(2405)
#self.spinBox_vp2.setValue(2690)
#self.spinBox_vs2.setValue(1400)
#self.spinBox_p2.setValue(2070)
self.doubleSpinBox_aspect.setValue(0.01)
self.spinBox_fract.setValue(5)
self.doubleSpinBox_bulk.setValue(2.2)
self.doubleSpinBox_shear.setValue(0)
self.spinBox_thick.setValue(100)
self.spinBox_ngeo.setValue(48)
self.spinBox_rmin.setValue(20)
self.spinBox_rstep.setValue(2)
self.size = 0
self.size_plot = 0
self.time_basalto =0
self.time_solo = 0
self.refl_tot_0 = 0
self.refl_tot_30 = 0
self.refl_tot_45 = 0
self.refl_tot_60 = 0
self.refl_tot_90 = 0
self.refl_x_0 = 0
self.refl_x_30 = 0
self.refl_x_45 = 0
self.refl_x_60 = 0
self.refl_x_90 = 0
self.refl_y_0 = 0
self.refl_y_30 = 0
self.refl_y_45 = 0
self.refl_y_60 = 0
self.refl_y_90 = 0
self.refl_z_0 = 0
self.refl_z_30 = 0
self.refl_z_45 = 0
self.refl_z_60 = 0
self.refl_z_90 = 0
self.refl_solo_rad_0 = 0
self.refl_solo_y_0 = 0
self.refl_solo_z_0 = 0
self.refl_solo_x_30 = 0
self.refl_solo_y_30 = 0
self.refl_solo_z_30 = 0
self.refl_solo_x_45 = 0
self.refl_solo_y_45 = 0
self.refl_solo_z_45 = 0
self.refl_solo_x_60 = 0
self.refl_solo_y_60 = 0
self.refl_solo_z_60 = 0
self.refl_solo_x_60 = 0
self.refl_solo_y_60 = 0
self.refl_solo_z_60 = 0
self.refl_solo_x_90 = 0
self.refl_solo_y_90 = 0
self.refl_solo_z_90 = 0
self.solo_fase_rad = 0 #para o solo as fases são iguais em todos azimutes...
self.solo_fase_z = 0
self.hti_fase_rad_0 = 0
self.hti_fase_rad_30 = 0
self.hti_fase_rad_45 = 0
self.hti_fase_rad_60 = 0
self.hti_fase_rad_90 = 0
self.hti_fase_z_0 = 0
self.hti_fase_z_30 = 0
self.hti_fase_z_45 = 0
self.hti_fase_z_60 = 0
self.hti_fase_z_90 = 0
self.dn = 0
self.dt = 0
###
###define as ações
self.spinBox_vp1.valueChanged.connect(self.vp1)
self.spinBox_vp2.valueChanged.connect(self.vp2)
self.spinBox_vs1.valueChanged.connect(self.plot)
self.spinBox_p1.valueChanged.connect(self.plot)
self.spinBox_vp2.valueChanged.connect(self.weak_calc)
self.spinBox_vs2.valueChanged.connect(self.weak_calc)
self.spinBox_p2.valueChanged.connect(self.weak_calc)
self.doubleSpinBox_aspect.valueChanged.connect(self.weak_calc)
self.spinBox_fract.valueChanged.connect(self.slider_pos)
self.doubleSpinBox_aspect.valueChanged.connect(self.slider_pos)
self.doubleSpinBox_bulk.valueChanged.connect(self.weak_calc)
self.doubleSpinBox_shear.valueChanged.connect(self.weak_calc)
self.verticalSlider_fract.valueChanged.connect(self.weak_calc)
self.verticalSlider_aspect.valueChanged.connect(self.slider_pos1)
self.doubleSpinBox_DN.valueChanged.connect(self.slider_pos2)
self.doubleSpinBox_DT.valueChanged.connect(self.slider_pos2)
self.verticalSlider_DN.valueChanged.connect(self.slider_pos3)
self.verticalSlider_DT.valueChanged.connect(self.slider_pos3)
self.doubleSpinBox_d.valueChanged.connect(self.plot)
self.doubleSpinBox_e.valueChanged.connect(self.plot)
self.doubleSpinBox_y.valueChanged.connect(self.plot)
self.spinBox_ngeo.valueChanged.connect(self.plot)
self.spinBox_rmin.valueChanged.connect(self.plot)
self.spinBox_rstep.valueChanged.connect(self.plot)
self.spinBox_thick.valueChanged.connect(self.plot)
self.split_box0_90.stateChanged.connect(self.plot)
self.split_box_anray_0_90.stateChanged.connect(self.split)
self.split_box_anray_0_45.stateChanged.connect(self.split)
self.split_box_anray_30_60.stateChanged.connect(self.split)
self.split_box_anray_45_90.stateChanged.connect(self.split)
self.pushButton.clicked.connect(self.anray)
self.checkBox_solo.pressed.connect(self.activate)
self.checkBox_solo.released.connect(self.plot)
self.pushButton_2.pressed.connect(self.plot)
self.verticalSlider_aspect.valueChanged.connect(self.slider_pos1)
self.sismo_button.clicked.connect(self.plot_sismograma)
self.radioButton_0.toggled.connect(self.plot_sismograma_v)
self.radioButton_30.toggled.connect(self.plot_sismograma_v)
self.radioButton_45.toggled.connect(self.plot_sismograma_v)
self.radioButton_60.toggled.connect(self.plot_sismograma_v)
self.radioButton_90.toggled.connect(self.plot_sismograma_v)
self.radioButton_plot_x.toggled.connect(self.plot_sismo_azim)
self.radio_sismo_0_90.toggled.connect(self.plot_sismo_azim)
self.radio_sismo_0_45.toggled.connect(self.plot_sismo_azim)
self.radio_sismo_45_90.toggled.connect(self.plot_sismo_azim)
self.radio_sismo_30_60.toggled.connect(self.plot_sismo_azim)
self.checkBox_solo_sismo.clicked.connect(self.sismo_enable)
self.az_tmin.valueChanged.connect(self.plot_sismo_azim)
self.az_tmax.valueChanged.connect(self.plot_sismo_azim)
self.slider_pos()
self.anray_path = os.getcwd()
if not os.path.exists('HTI_SH_model'):
os.makedirs('HTI_SH_model')
def vp1(self):
vp = self.spinBox_vp1.value()
vs = vp/np.sqrt(3)
self.spinBox_vs1.setValue(vs)
def vp2(self):
vp = self.spinBox_vp2.value()
vs = vp/np.sqrt(3)
self.spinBox_vs2.setValue(vs)
def message(self):
msg = QMessageBox()
msg.setIcon(QMessageBox.Warning)
msg.setText("Erro")
msg.setInformativeText("Certifique-se de gerar os arquivos e manter a opção (solo) correspondente na primeira aba.")
msg.exec_()
#Função para ativar a camada de solo nos cálculos
def activate(self):
if self.checkBox_solo.isChecked():
self.solo_espessura.setDisabled(True)
self.solo_vp.setDisabled(True)
self.solo_vs.setDisabled(True)
self.solo_densidade.setDisabled(True)
else:
self.solo_espessura.setEnabled(True)
self.solo_vp.setEnabled(True)
self.solo_vs.setEnabled(True)
self.solo_densidade.setEnabled(True)
self.pushButton_2.setEnabled(True)
#Funções para ajustar spinbox e slider.
def slider_pos(self):
self.verticalSlider_fract.setValue(self.spinBox_fract.value())
def slider_pos1(self):
self.doubleSpinBox_aspect.setValue(self.verticalSlider_aspect.value() / 10000)
def slider_pos2(self):
self.verticalSlider_DN.setValue(self.doubleSpinBox_DN.value()*1000)
self.verticalSlider_DT.setValue(self.doubleSpinBox_DT.value()*1000)
def slider_pos3(self):
self.doubleSpinBox_DN.setValue(self.verticalSlider_DN.value()/1000)
self.doubleSpinBox_DT.setValue(self.verticalSlider_DT.value()/1000)
self.aniso_parameters()
#Função para calcular os parametros de fraqueza
def weak_calc(self):
self.doubleSpinBox_DN.valueChanged.disconnect(self.slider_pos2)
self.doubleSpinBox_DT.valueChanged.disconnect(self.slider_pos2)
self.verticalSlider_DN.valueChanged.disconnect(self.slider_pos3)
self.verticalSlider_DT.valueChanged.disconnect(self.slider_pos3)
#Ajusta o valor do spinbox de acordo com o slider
self.spinBox_fract.setValue(self.verticalSlider_fract.value())
self.verticalSlider_aspect.setValue(self.doubleSpinBox_aspect.value()*10000)
# grau de fraturamento e aspect_ratio
e = self.spinBox_fract.value() / 100
a = self.doubleSpinBox_aspect.value()
vp2 = self.spinBox_vp2.value()
vs2 = self.spinBox_vs2.value()
p2 = self.spinBox_p2.value()
g = (vs2 ** 2) / (vp2 ** 2)
# parametro de Lame
mu = p2 * (vs2 ** 2)
# bulk and shear modulus
kl = self.doubleSpinBox_bulk.value() * 10 ** 9
ul = self.doubleSpinBox_shear.value() * 10 ** 9
# grau de fraturamento de Hudson. Obtido de Chen 2014 (2) e Bakulin 2000 (14)
DN = 4 * e / (3 * g * (1 - g) * (1 + ((kl + (4 / 3) * ul) / (np.pi * (1 - g) * mu * a))))
self.doubleSpinBox_DN.setValue(DN)
self.verticalSlider_DN.setValue(DN*1000)
DT= 16 * e / (3 * (3 - 2 * g) * (1 + ((4 * ul) / (np.pi * (3 - 2 * g) * mu * a))))
self.doubleSpinBox_DT.setValue(DT)
self.verticalSlider_DT.setValue(DT*1000)
self.doubleSpinBox_DN.valueChanged.connect(self.slider_pos2)
self.doubleSpinBox_DT.valueChanged.connect(self.slider_pos2)
self.verticalSlider_DN.valueChanged.connect(self.slider_pos3)
self.verticalSlider_DT.valueChanged.connect(self.slider_pos3)
self.aniso_parameters()
#Função que calcula os parametros de anisotropia
def aniso_parameters(self):
self.doubleSpinBox_d.valueChanged.disconnect(self.plot)
self.doubleSpinBox_e.valueChanged.disconnect(self.plot)
self.doubleSpinBox_y.valueChanged.disconnect(self.plot)
vp2 = self.spinBox_vp2.value()
vs2 = self.spinBox_vs2.value()
p2 = self.spinBox_p2.value()
DN_H = self.doubleSpinBox_DN.value()
DT_H = self.doubleSpinBox_DT.value()
# A partir de Chen 2014 e Bakulin 2000 (27)
# parametros de Lame
lamb = p2 * (vp2 ** 2 - 2 * (vs2 ** 2))
mu = p2 * (vs2 ** 2)
M = lamb + 2 * mu
r = lamb / M
c11 = M * (1 - DN_H)
c33 = M * (1 - (r ** 2) * DN_H)
c13 = lamb * (1 - DN_H)
c44 = mu
c66 = mu * (1 - DT_H)
c55 = c66
c23 = c33 - 2 * c44
self.c11 = (c11/p2)/1000000
self.c13 = (c13/p2)/1000000
self.c23 = (c23/p2)/1000000
self.c33 = (c33/p2)/1000000
self.c44 = (c44/p2)/1000000
self.c55 = (c55 /p2)/1000000
#Para imprimir os parâmetros elásticos, descomentar as linhas abaixo.
# print('A11=', c11/p2)
# print('A13=', c13/p2)
# print('A23=', c23/p2)
# print('A33=', c33/p2)
# print('A44=', c44/p2)
# print('A55=', c55/p2)
self.dn = DN_H
self.dt = DT_H
e2_v = (c11 - c33) / (2 * c33)
self.doubleSpinBox_e.setValue(abs(e2_v))
d2_v = (((c13 + c55) ** 2) - ((c33 - c55) ** 2)) / (2 * c33 * (c33 - c55))
self.doubleSpinBox_d.setValue(abs(d2_v))
y2_v = (c66 - c44) / (2 * c44)
self.doubleSpinBox_y.setValue(abs(y2_v))
self.doubleSpinBox_d.valueChanged.connect(self.plot)
self.doubleSpinBox_e.valueChanged.connect(self.plot)
self.doubleSpinBox_y.valueChanged.connect(self.plot)
self.plot()
#Função que realiza a plotagem principal
def plot(self):
self.axes.cla()
self.axes_time.cla()
# Parametros do meio superior(1)
vp1 = self.spinBox_vp1.value()
vs1 = self.spinBox_vs1.value()
p1 = self.spinBox_p1.value()
# Parametros do meio inferior(2)
vp2 = self.spinBox_vp2.value()
vs2 = self.spinBox_vs2.value()
p2 = self.spinBox_p2.value()
vs2p = np.sqrt(self.c55 * 1000000)
# Impedância vertical
Z1 = p1 * vs1
Z2 = p2 * vs2
# Módulo de cisalhamento
G1 = p1 * pow(vs1, 2)
G2 = p2 * pow(vs2, 2)
# diferenças e médias
deltaZ = Z2 - Z1
medZ = (Z1 + Z2) / 2
deltap = p2 - p1
medp = (p1 + p2) / 2
deltavs = vs2 - vs1
medvs = (vs1 + vs2) / 2
deltavs2 = vs2p - vs1
medvs2 = (vs1 + vs2p) / 2
deltavp = vp2 - vp1
medvp = (vp1 + vp2) / 2
deltad = -self.doubleSpinBox_d.value()
deltae = -self.doubleSpinBox_e.value()
deltay = self.doubleSpinBox_y.value()
rmin = self.spinBox_rmin.value()
rstep = self.spinBox_rstep.value()
thick = self.spinBox_thick.value()
# ângulo de incidência crítico
ang_critico = np.arcsin(vs1 / vs2)
ang_critico_graus = ang_critico * 180 / np.pi
ang_text = str(round(ang_critico_graus,1))
self.label_33.setText('Ângulo crítico = ' + ang_text)
# angulo, geofone e cálculo de tempo
ngeo = self.spinBox_ngeo.value()
if self.checkBox_solo.isChecked():
v1 = self.solo_vs.value()
v2 = self.spinBox_vs1.value()
p1 = self.solo_espessura.value()
p2 = thick
theta_solo, a = self.geofone_to_angle(ngeo, rmin, rstep, p1)
geo, time1 = self.reflect_travel_time(1, p1, theta_solo, v1, 0, 0, 0)
theta = self.geofone_to_angle_2(ngeo, rmin, rstep, v1, v2, p1, p2)
geo, time2 = self.reflect_travel_time(2, p1, 0, v1, p2, theta, v2)
self.time_basalto = time2
self.time_solo = time1
self.axes_time.plot(geo, time1, color= 'brown', label='Solo')
self.axes_time.plot(geo, time2, color= 'blue', label='Basalto')
else:
theta, a = self.geofone_to_angle(ngeo, rmin, rstep, thick)
geo, time = self.reflect_travel_time(1, thick, theta, vs1, 0, 0, 0)
self.time_basalto = time
self.axes_time.plot(geo, time, color= 'blue', label = 'Basalto')
self.axes_time.grid()
self.axes_time.legend(title='Reflexão')
#Azimutes para o calculo do coeficiente de reflexão
A1 = -(deltaZ/ medZ) / 2
A2 = -((deltaZ / medZ) - deltay) / 2
Rspar_0 = A1 + 0.5*((deltavs/medvs)- deltay)*pow(np.tan(theta * np.pi / 180), 2)
Rspar_90 = A2 + 0.5*((deltavs2/medvs2)- deltay)*pow(np.tan(theta * np.pi / 180), 2)
self.axes.grid()
self.axes.plot(theta, Rspar_90, '+', label='90')
self.axes.plot(theta, Rspar_0, '+', label='0')
self.axes2.set_xlim(self.axes.get_xlim())
self.axes2.set_xticks(theta)
self.axes2.set_xticklabels(a)
self.axes2.set_xlabel('Distância (m)', size=6)
for label in self.axes2.xaxis.get_ticklabels()[::2]:
label.set_visible(False)
if self.split_box0_90.isChecked():
dif1= np.zeros(len(Rspar_90))
for i in range(len(Rspar_90)):
if abs(Rspar_90[i]) > abs(Rspar_0[i]):
dif1[i] = abs(Rspar_90[i] - Rspar_0[i]) / abs(Rspar_0[i])
if dif1[i] > 0.1:
self.axes.plot(theta[i], Rspar_0[i], 'ro')
self.axes.plot(theta[i], Rspar_90[i], 'ro')
break
else:
dif1[i] = abs(Rspar_0[i] - Rspar_90[i]) / abs(Rspar_90[i])
if dif1[i] > 0.1:
self.axes.plot(theta[i], Rspar_90[i], 'ro')
self.axes.plot(theta[i], Rspar_0[i], 'ro')
break
self.axes.legend(title='Azimute')
self.canvas.draw()
#Função para gerar arquivos anray para diferentes azimutes (0, 30, 45, 60, 90)
def anray(self):
azimute = np.array([0, 30, 45, 60, 90])
self.anray_file(azimute)
#Função que gera o arquivo do anray para um azimute específico.
def anray_file(self, azimute):
azh = azimute
self.size = 0
self.progressBar.setValue(self.size)
for h in azh:
self.size = self.size + 10
self.progressBar.setValue(self.size)
file = open('modelo_anray_%s.modelo' %h, 'w')
file.write("'modelo HTI azimute %s'\n" %(h))
file.write("/\n")
if self.checkBox_solo.isChecked():
file.write('%s %s %s %s\n' % (2, 4, 10, 10))
else:
file.write('%s %s %s %s\n' % (2, 3, 10, 10))
#camada1
file.write('%s %s\n' % (2, 2))
file.write('%s %s\n' % (0, 100))
file.write('%s %s\n' % (0, 100))
file.write('%s %s\n' % (0, 0))
file.write('%s %s\n' % (0, 0))
#camada de solo
if self.checkBox_solo.isChecked():
file.write('%s %s\n' % (2, 2))
file.write('%s %s\n' % (0, 100))
file.write('%s %s\n' % (0, 100))
file.write('%s %s\n' % (self.solo_espessura.value() / 1000, self.solo_espessura.value() / 1000))
file.write('%s %s\n' % (self.solo_espessura.value() / 1000, self.solo_espessura.value() / 1000))
# camada2
file.write('%s %s\n' % (2, 2))
file.write('%s %s\n' % (0, 100))
file.write('%s %s\n' % (0, 100))
file.write('%s %s\n' % (self.spinBox_thick.value()/1000, self.spinBox_thick.value()/1000))
file.write('%s %s\n' % (self.spinBox_thick.value()/1000, self.spinBox_thick.value()/1000))
# camada3
file.write('%s %s\n' % (2, 2))
file.write('%s %s\n' % (0, 100))
file.write('%s %s\n' % (0, 100))
file.write('%s %s\n' % (2, 2))
file.write('%s %s\n' % (2, 2))
#printerplot
file.write('%s %s\n%s %s\n%s %s\n' % (0, 0.5, 0.9, 1.1, 1.9, 2.1))
if self.checkBox_solo.isChecked():
file.write('%s %s\n' % (1.9, 2.1))
#especificação de parametros elásticos e densidade constante
file.write('%s %s\n' % (0, 1))
#densidades
if self.checkBox_solo.isChecked():
file.write('%s '% (self.solo_densidade.value() / 1000))
file.write('%s %s\n' % (self.spinBox_p1.value()/1000, self.spinBox_p2.value()/1000))
if self.checkBox_solo.isChecked():
file.write('%s %s\n' % (0, 0))
file.write('%s %s %s\n' % (1, 1, 1)) # homogenea em x,y,z
file.write('/\n/\n/\n') # gridlines
file.write('%s\n%s\n' % ((self.solo_vp.value() / 1000) ** 2, (self.solo_vs.value() / 1000) ** 2)) # quadrado da onda P e S
#camada isotrópica
file.write('%s %s\n' % (0, 0))
file.write('%s %s %s\n' % (1, 1, 1)) #homogenea em x,y,z
file.write('/\n/\n/\n') #gridlines
file.write('%s\n%s\n' % ((self.spinBox_vp1.value()/1000)**2, (self.spinBox_vs1.value()/1000)**2)) #quadrado da onda P e S
# camada anisotrópica
if self.dn and self.dt != 0:
file.write('%s %s\n' % (1, 0))
file.write('%s %s %s\n' % (1, 1, 1)) # homogenea em x,y,z
file.write('/\n/\n/\n') # gridlines
file.write('%s\n' % (self.c11)) #A11
file.write('%s\n' % (self.c13)) # A12
file.write('%s\n' % (self.c13)) # A13
file.write('%s\n' % (0)) # A14
file.write('%s\n' % (0)) # A15
file.write('%s\n' % (0)) # A16
file.write('%s\n' % (self.c33)) # A22
file.write('%s\n' % (self.c23)) # A23
file.write('%s\n' % (0)) # A24
file.write('%s\n' % (0)) # A25
file.write('%s\n' % (0)) # A26
file.write('%s\n' % (self.c33)) # A33
file.write('%s\n' % (0)) # A34
file.write('%s\n' % (0)) # A35
file.write('%s\n' % (0)) # A36
file.write('%s\n' % (self.c44)) # A44
file.write('%s\n' % (0)) # A45
file.write('%s\n' % (0)) # A46
file.write('%s\n' % (self.c55)) # A55
file.write('%s\n' % (0)) # A55
file.write('%s\n' % (self.c55)) # A66
else:
file.write('%s %s\n' % (0, 0))
file.write('%s %s %s\n' % (1, 1, 1)) # homogenea em x,y,z
file.write('/\n/\n/\n') # gridlines
file.write('%s\n%s\n' % ((self.spinBox_vp2.value() / 1000) ** 2, (self.spinBox_vs2.value() / 1000) ** 2))
#!ICONT,MEP,MOUT,MDIM,METHOD,MREG,ITMAX,IPOL,IPREC,IRAYPL,IPRINT,IAMP,MTRNS,ICOEF,IRT,ILOC,MCOD,MORI
file.write('%s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s\n' % (1, self.spinBox_ngeo.value(), 1, 1, 0, 1, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1))
#!PROF(1),RMIN,RSTEP,XPRF,YPRF
if h < 90:
azh = (h/180)*np.pi
else:
azh = 1.5
file.write('%s %s %s %s %s\n' % (azh, self.spinBox_rmin.value()/1000, self.spinBox_rstep.value()/1000, 10, 10))
#!XSOUR,YSOUR,ZSOUR,TSOUR,DT,AC,REPS,PREPS
file.write('%s %s %s %s %s %s %s %s\n' % (10, 10, 0, 0, 0.04, 0.0001, 0.0005, 0.0005))
#!AMIN, ASTEP, AMAX
file.write('%s %s %s\n' % (-0.3, 0.005, 1.8))
#!BMIN, BSTEP, BMAX
file.write('%s %s %s\n' % (-0.3, 0.005, 1.8))
#!KC, KREF, ((CODE(I, K), K = 1, 2), I = 1, KREF)
file.write('%s %s %s %s %s %s\n' % (1, 2, 1, 2, 1, 2))
if self.checkBox_solo.isChecked():
file.write('%s %s %s %s %s %s %s %s %s %s\n' % (1, 4, 1, 2, 2, 2, 2, 2, 1, 2))
file.write('%s %s\n' % (0, 0))
file.write('%s/' % (0))
file.close()
self.anray_script(h)
#Função que constrói um script para rodar os modelos e gerar as figuras
def anray_script(self, azh):
files = open('anray_script%s.sh' %azh, 'w')
files.write('modname=modelo_anray\nanrayinput="$modname"_%s.modelo\n./anray <<FIM\n$anrayinput\nFIM\n\n\n' %(azh))
files.write('cp fort.30 amplitudes_%s.dat\n\n' %azh)
files.write('cp lu2.anray lu2_%s.anray' %azh)
files.close()
subprocess.call('chmod +x anray_script%s.sh' %azh, shell=True)
thread_anray = threading.Thread(target=self.anray_thr(azh))
thread_anray.start()
#Função para executar o script e aguardar o término da execução
def anray_thr(self, azh):
FNULL = open(os.devnull, 'w')
str = './anray_script%s.sh' %azh
p = subprocess.Popen(str, shell=True, stdout=FNULL, stderr=subprocess.STDOUT)
status = p.wait()
shutil.copy2('fort.30', '%s/HTI_SH_model/amplitudes_%s.dat' %(self.anray_path, azh))
shutil.copy2('lu2.anray', '%s/HTI_SH_model/lu2_%s.anray' % (self.anray_path, azh))
shutil.move('modelo_anray_%s.modelo' % azh,'%s/HTI_SH_model/modelo_anray_%s.modelo' % (self.anray_path, azh))
os.remove('%s/anray_script%s.sh' %(self.anray_path, azh))
self.size = self.size + 10
self.progressBar.setValue(self.size)
if self.progressBar.value() == 100:
self.frame_7.setEnabled(True)
self.frame_8.setEnabled(True)
self.frame_11.setEnabled(True)
self.frame_13.setEnabled(True)
self.sismo_button.setEnabled(True)
self.frame_14.setEnabled(True)
self.frame_9.setEnabled(True)
if self.checkBox_solo.isChecked() == False:
self.checkBox_solo_sismo.setChecked(False)
self.checkBox_solo_sismo2.setChecked(False)
self.checkBox_solo_sismo.setEnabled(False)
self.frame_12.setEnabled(False)
self.label_47.setEnabled(False)
else:
self.frame_12.setEnabled(True)
self.checkBox_solo_sismo.setEnabled(True)
self.label_47.setEnabled(True)
self.split()
#Função que plota as componentes a partir do anray e analisa a separação
def split(self):
self.axes_anray_tot.cla()
self.axes_anray_trans.cla()
self.axes_anray_trans2.cla()
self.axes_anray_tot2.cla()
# self.axes_anray_z.cla()
# self.axes_anray_z2.cla()
self.axes_anray_time.cla()
self.axes_anray2_tot.cla()
# self.axes_anray2_z.cla()
self.axes_anray2_time.cla()
self.axes_anray2_rad.cla()
f_0 = open('amplitudes_0.dat', "r")
f_30 = open('amplitudes_30.dat', "r")
f_45 = open('amplitudes_45.dat', "r")
f_60 = open('amplitudes_60.dat', "r")
f_90= open('amplitudes_90.dat', "r")
time_basalto = []
time_solo=[]
geofone_0 = []
x_0 = []
y_0 = []
z_0 = []
xc_0 = []
yc_0 = []
zc_0 = []
geofone_30 = []
x_30 = []
y_30 = []
z_30 = []
xc_30 = []
yc_30 = []
zc_30 = []
geofone_45 = []
x_45 = []
y_45 = []
z_45 = []
xc_45 = []
yc_45 = []
zc_45 = []
geofone_60 = []
x_60 = []
y_60 = []
z_60 = []
xc_60 = []
yc_60 = []
zc_60 = []
geofone_90 = []
x_90 = []
y_90 = []
z_90 = []
xc_90 = []
yc_90 = []
zc_90 = []
solo_x_0=[]
solo_x_30=[]
solo_x_45 = []
solo_x_60 = []
solo_x_90 = []
solo_y_0=[]
solo_y_30=[]
solo_y_45 = []
solo_y_60 = []
solo_y_90 = []
solo_z_0=[]
solo_z_30=[]
solo_z_45 = []
solo_z_60 = []
solo_z_90 = []
fase_x_0 = []
fase_x_30 = []
fase_x_45 = []
fase_x_60 = []
fase_x_90 = []
fase_y_0 = []
fase_y_30 = []
fase_y_45 = []
fase_y_60 = []
fase_y_90 = []
fase_z_0 = []
fase_z_30 = []
fase_z_45 = []
fase_z_60 = []
fase_z_90 = []
self.axes_anray_tot.set_ylabel("total")
self.axes_anray_trans.set_ylabel("transversal")
self.axes_anray_tot.grid()
self.axes_anray_trans.grid()
self.axes_anray_time.grid()
self.axes_anray2_tot.set_ylabel("total")
self.axes_anray2_rad.set_ylabel("transversal")
self.axes_anray2_tot.grid()
self.axes_anray2_rad.grid()
self.axes_anray2_time.grid()
if self.checkBox_solo.isChecked():
two_layer = True
var = -2
else:
two_layer = False
var = -1
for line in f_0:
coluna = line.split()
if float(coluna[0]) == var:
geofone_0.append(int(coluna[1]))
#parte real
x_0.append(float(coluna[3]))
y_0.append(float(coluna[5]))
z_0.append(float(coluna[7]))
#parte complexa
xc_0.append(float(coluna[4]))
yc_0.append(float(coluna[6]))
zc_0.append(float(coluna[8]))
if two_layer == True:
if float(coluna[0]) == -2:
time_basalto.append(float(coluna[2]))
else :
time_solo.append(float(coluna[2]))
solo_x_0.append(np.sqrt(float(coluna[3])**2+float(coluna[4])**2))
solo_y_0.append(np.sqrt(float(coluna[5]) ** 2 + float(coluna[6]) ** 2))
solo_z_0.append(np.sqrt(float(coluna[7]) ** 2 + float(coluna[8]) ** 2))
fase_x_0.append(np.arctan2(float(coluna[4]), float(coluna[3])))
fase_y_0.append(np.arctan2(float(coluna[6]), float(coluna[5])))
fase_z_0.append(np.arctan2(float(coluna[8]), float(coluna[7])))
if two_layer == False:
time_basalto.append(float(coluna[2]))
f_0.close()
geo_0 = np.asarray(geofone_0)
time_basalto = np.asarray(time_basalto)
time_solo = np.asarray(time_solo)
x_0 = np.asarray(x_0)
y_0 = np.asarray(y_0)
z_0 = np.asarray(z_0)
xc_0 = np.asarray(xc_0)
yc_0 = np.asarray(yc_0)
zc_0 = np.asarray(zc_0)
solo_x_0 = np.asarray(solo_x_0)
solo_x_0 = np.fliplr([solo_x_0])[0]
solo_y_0 = np.asarray(solo_y_0)
solo_y_0 = np.fliplr([solo_y_0])[0]
solo_z_0 = np.asarray(solo_z_0)
solo_z_0 = np.fliplr([solo_z_0])[0]
fase_x_0 = np.asarray(fase_x_0)
fase_x_0 = np.fliplr([fase_x_0])[0]
fase_y_0 = np.asarray(fase_y_0)
fase_y_0 = np.fliplr([fase_y_0])[0]
fase_z_0 = np.asarray(fase_z_0)
fase_z_0 = np.fliplr([fase_z_0])[0]
solo_rad_0 = np.sqrt(solo_x_0 ** 2 + solo_y_0 ** 2)
self.solo_fase_rad = fase_x_0
self.solo_fase_z = fase_z_0
solo_0_tot = np.sqrt(solo_x_0 ** 2 + solo_y_0 ** 2 + solo_z_0 ** 2)
self.refl_solo_rad_0 = solo_rad_0
self.refl_solo_z_0 = solo_z_0
self.time_basalto = np.fliplr([time_basalto])[0]
self.time_solo = np.fliplr([time_solo])[0]
x0_re = np.fliplr([x_0])[0]
y0_re = np.fliplr([y_0])[0]
z0_re = np.fliplr([z_0])[0]
x0c_re = np.fliplr([xc_0])[0]
y0c_re = np.fliplr([yc_0])[0]
z0c_re = np.fliplr([zc_0])[0]
ampx_0 = np.sqrt(x0_re**2 + x0c_re**2)
ampy_0 = np.sqrt(y0_re **2 + y0c_re ** 2)
ampz_0 = np.sqrt(z0_re **2 + z0c_re ** 2)
phx_0 = np.arctan2(x0c_re, x0_re)
phy_0 = np.arctan2(y0c_re, y0_re)
phz_0 = np.arctan2(z0c_re, z0_re)
self.hti_fase_rad_0 = phx_0
self.hti_fase_z_0 = phz_0
geo0_re = np.fliplr([geo_0])[0]
tot0 = np.sqrt(ampx_0 ** 2 + ampy_0 ** 2 + ampz_0 ** 2)
trans_0 = np.sqrt(ampx_0 ** 2 + ampy_0 ** 2)
self.axes_anray_tot.plot(geo0_re, tot0, label=0)
self.refl_tot_0 = tot0
self.refl_rad_0 = trans_0
self.refl_z_0 = ampz_0
# self.axes_anray_z.plot(geo0_re, ampz_0, label=0)
self.axes_anray_trans.plot(geo0_re, trans_0, label=0)
if two_layer==True:
self.axes_anray2_tot.plot(geo0_re, solo_0_tot, label=0)
self.axes_anray2_tot.set_ylim([0,2])
self.axes_anray2_rad.plot(geo0_re, solo_rad_0, label=0)
# self.axes_anray2_z.plot(geo0_re, solo_z_0, label=0)
if two_layer == True:
self.axes_anray_time.plot(geo0_re, self.time_basalto, color='blue')
self.axes_anray2_time.plot(geo0_re, self.time_solo, color='brown')
else:
self.axes_anray_time.plot(geo0_re, self.time_basalto, color='blue')
self.axes_anray_time.set_ylabel('tempo (s)')
self.axes_anray2_time.set_ylabel('tempo (s)')
for line in f_30:
coluna = line.split()
if float(coluna[0]) == var:
geofone_30.append(int(coluna[1]))
x_30.append(float(coluna[3]))
y_30.append(float(coluna[5]))
z_30.append(float(coluna[7]))
xc_30.append(float(coluna[4]))
yc_30.append(float(coluna[6]))
zc_30.append(float(coluna[8]))
if two_layer == True:
if float(coluna[0]) == -1:
solo_x_30.append(np.sqrt(float(coluna[3])**2+float(coluna[4])**2))
solo_y_30.append(np.sqrt(float(coluna[5]) ** 2 + float(coluna[6]) ** 2))
solo_z_30.append(np.sqrt(float(coluna[7]) ** 2 + float(coluna[8]) ** 2))
fase_x_30.append(np.arctan2(float(coluna[4]), float(coluna[3])))
fase_y_30.append(np.arctan2(float(coluna[6]), float(coluna[5])))
fase_z_30.append(np.arctan2(float(coluna[8]), float(coluna[7])))
f_30.close()
geo_30 = np.asarray(geofone_30)
x_30 = np.asarray(x_30)
y_30 = np.asarray(y_30)
z_30 = np.asarray(z_30)
xc_30 = np.asarray(xc_30)
yc_30 = np.asarray(yc_30)
zc_30 = np.asarray(zc_30)
x30_re = np.fliplr([x_30])[0]
y30_re = np.fliplr([y_30])[0]
z30_re = np.fliplr([z_30])[0]
x30c_re = np.fliplr([xc_30])[0]
y30c_re = np.fliplr([yc_30])[0]
z30c_re = np.fliplr([zc_30])[0]
ampx_30 = np.sqrt(x30_re ** 2 + x30c_re ** 2)
ampy_30 = np.sqrt(y30_re ** 2 + y30c_re ** 2)
ampz_30 = np.sqrt(z30_re ** 2 + z30c_re ** 2)
phx_30 = np.arctan2(x30c_re, x30_re)
phy_30 = np.arctan2(y30c_re, y30_re)
phz_30 = np.arctan2(z30c_re, z30_re)
self.hti_fase_rad_30 = phx_30
self.hti_fase_z_30 = phz_30
geo30_re = np.fliplr([geo_30])[0]
tot30 = np.sqrt(ampx_30 ** 2 + ampy_30 ** 2 + ampz_30 ** 2)
trans_30 = np.sqrt(ampx_30 ** 2 + ampy_30 ** 2)
solo_x_30 = np.asarray(solo_x_30)
solo_x_30 = np.fliplr([solo_x_30])[0]
solo_y_30 = np.asarray(solo_y_30)
solo_y_30 = np.fliplr([solo_y_30])[0]
solo_z_30 = np.asarray(solo_z_30)
solo_z_30 = np.fliplr([solo_z_30])[0]
solo_30_tot = np.sqrt(solo_x_30 ** 2 + solo_y_30 ** 2 + solo_z_30 ** 2)
solo_rad_30 = np.sqrt(solo_x_30 ** 2 + solo_y_30 ** 2)
fase_x_30 = np.asarray(fase_x_30)
fase_x_30 = np.fliplr([fase_x_30])[0]
fase_y_30 = np.asarray(fase_y_30)
fase_y_30 = np.fliplr([fase_y_30])[0]
fase_z_30 = np.asarray(fase_z_30)
fase_z_30 = np.fliplr([fase_z_30])[0]
self.refl_solo_x_30 = solo_rad_30
self.refl_solo_y_30 = solo_y_30
self.refl_solo_z_30 = solo_z_30
self.refl_tot_30 = tot30
self.refl_rad_30 = trans_30
self.refl_y_30 = y30_re
self.refl_z_30 = ampz_30
self.axes_anray_tot.plot(geo30_re, tot30, label=30)
self.axes_anray_trans.plot(geo30_re, trans_30, label=30)
if two_layer == True:
self.axes_anray2_tot.plot(geo30_re, solo_30_tot, label=30)
self.axes_anray2_rad.plot(geo30_re, solo_rad_30, label=30)
for line in f_45:
coluna = line.split()
if float(coluna[0]) == var:
geofone_45.append(int(coluna[1]))
x_45.append(float(coluna[3]))
y_45.append(float(coluna[5]))
z_45.append(float(coluna[7]))
xc_45.append(float(coluna[4]))
yc_45.append(float(coluna[6]))
zc_45.append(float(coluna[8]))
if two_layer == True:
if float(coluna[0]) == -1:
solo_x_45.append(np.sqrt(float(coluna[3])**2+float(coluna[4])**2))
solo_y_45.append(np.sqrt(float(coluna[5]) ** 2 + float(coluna[6]) ** 2))
solo_z_45.append(np.sqrt(float(coluna[7]) ** 2 + float(coluna[8]) ** 2))
fase_x_45.append(np.arctan2(float(coluna[4]), float(coluna[3])))
fase_y_45.append(np.arctan2(float(coluna[6]), float(coluna[5])))
fase_z_45.append(np.arctan2(float(coluna[8]), float(coluna[7])))
f_45.close()
geo_45 = np.asarray(geofone_45)
x_45 = np.asarray(x_45)
y_45 = np.asarray(y_45)
z_45 = np.asarray(z_45)
xc_45 = np.asarray(xc_45)
yc_45 = np.asarray(yc_45)
zc_45 = np.asarray(zc_45)
x45_re = np.fliplr([x_45])[0]
y45_re = np.fliplr([y_45])[0]
z45_re = np.fliplr([z_45])[0]
x45c_re = np.fliplr([xc_45])[0]
y45c_re = np.fliplr([yc_45])[0]
z45c_re = np.fliplr([zc_45])[0]
ampx_45 = np.sqrt(x45_re ** 2 + x45c_re ** 2)
ampy_45 = np.sqrt(y45_re ** 2 + y45c_re ** 2)
ampz_45 = np.sqrt(z45_re ** 2 + z45c_re ** 2)
phx_45 = np.arctan2(x45c_re, x45_re)
phy_45 = np.arctan2(y45c_re, y45_re)
phz_45 = np.arctan2(z45c_re, z45_re)
self.hti_fase_rad_45 = phx_45
self.hti_fase_z_45 = phz_45
geo45_re = np.fliplr([geo_45])[0]
tot45 = np.sqrt(ampx_45 ** 2 + ampy_45 ** 2 + ampz_45 ** 2)
trans_45 = np.sqrt(ampx_45 ** 2 + ampy_45 ** 2)
solo_x_45 =
|
np.asarray(solo_x_45)
|
numpy.asarray
|
"""
Tests for dataset creation
"""
import random
import math
import unittest
import os
import numpy as np
import pytest
import deepchem as dc
try:
import torch # noqa
PYTORCH_IMPORT_FAILED = False
except ImportError:
PYTORCH_IMPORT_FAILED = True
def load_solubility_data():
"""Loads solubility dataset"""
current_dir = os.path.dirname(os.path.abspath(__file__))
featurizer = dc.feat.CircularFingerprint(size=1024)
tasks = ["log-solubility"]
input_file = os.path.join(current_dir, "../../models/tests/example.csv")
loader = dc.data.CSVLoader(
tasks=tasks, feature_field="smiles", featurizer=featurizer)
return loader.create_dataset(input_file)
def load_multitask_data():
"""Load example multitask data."""
current_dir = os.path.dirname(os.path.abspath(__file__))
featurizer = dc.feat.CircularFingerprint(size=1024)
tasks = [
"task0", "task1", "task2", "task3", "task4", "task5", "task6", "task7",
"task8", "task9", "task10", "task11", "task12", "task13", "task14",
"task15", "task16"
]
input_file = os.path.join(current_dir,
"../../models/tests/multitask_example.csv")
loader = dc.data.CSVLoader(
tasks=tasks, feature_field="smiles", featurizer=featurizer)
return loader.create_dataset(input_file)
class TestTransformer(dc.trans.Transformer):
def transform_array(self, X, y, w, ids):
return (2 * X, 1.5 * y, w, ids)
def test_transform_disk():
"""Test that the transform() method works for DiskDatasets."""
dataset = load_solubility_data()
X = dataset.X
y = dataset.y
w = dataset.w
ids = dataset.ids
# Transform it
transformer = TestTransformer(transform_X=True, transform_y=True)
for parallel in (True, False):
transformed = dataset.transform(transformer, parallel=parallel)
np.testing.assert_array_equal(X, dataset.X)
np.testing.assert_array_equal(y, dataset.y)
np.testing.assert_array_equal(w, dataset.w)
np.testing.assert_array_equal(ids, dataset.ids)
np.testing.assert_array_equal(2 * X, transformed.X)
np.testing.assert_array_equal(1.5 * y, transformed.y)
np.testing.assert_array_equal(w, transformed.w)
np.testing.assert_array_equal(ids, transformed.ids)
def test_sparsify_and_densify():
"""Test that sparsify and densify work as inverses."""
# Test on identity matrix
num_samples = 10
num_features = num_samples
X = np.eye(num_samples)
X_sparse = dc.data.sparsify_features(X)
X_reconstructed = dc.data.densify_features(X_sparse, num_features)
np.testing.assert_array_equal(X, X_reconstructed)
# Generate random sparse features dataset
np.random.seed(123)
p = .05
X = np.random.binomial(1, p, size=(num_samples, num_features))
X_sparse = dc.data.sparsify_features(X)
X_reconstructed = dc.data.densify_features(X_sparse, num_features)
np.testing.assert_array_equal(X, X_reconstructed)
# Test edge case with array of all zeros
X = np.zeros((num_samples, num_features))
X_sparse = dc.data.sparsify_features(X)
X_reconstructed = dc.data.densify_features(X_sparse, num_features)
np.testing.assert_array_equal(X, X_reconstructed)
def test_pad_features():
"""Test that pad_features pads features correctly."""
batch_size = 100
num_features = 10
# Test cases where n_samples < 2*n_samples < batch_size
n_samples = 29
X_b = np.zeros((n_samples, num_features))
X_out = dc.data.pad_features(batch_size, X_b)
assert len(X_out) == batch_size
# Test cases where n_samples < batch_size
n_samples = 79
X_b = np.zeros((n_samples, num_features))
X_out = dc.data.pad_features(batch_size, X_b)
assert len(X_out) == batch_size
# Test case where n_samples == batch_size
n_samples = 100
X_b = np.zeros((n_samples, num_features))
X_out = dc.data.pad_features(batch_size, X_b)
assert len(X_out) == batch_size
# Test case for object featurization.
n_samples = 2
X_b = np.array([{"a": 1}, {"b": 2}])
X_out = dc.data.pad_features(batch_size, X_b)
assert len(X_out) == batch_size
# Test case for more complicated object featurization
n_samples = 2
X_b = np.array([(1, {"a": 1}), (2, {"b": 2})])
X_out = dc.data.pad_features(batch_size, X_b)
assert len(X_out) == batch_size
# Test case with multidimensional data
n_samples = 50
num_atoms = 15
d = 3
X_b = np.zeros((n_samples, num_atoms, d))
X_out = dc.data.pad_features(batch_size, X_b)
assert len(X_out) == batch_size
def test_pad_batches():
"""Test that pad_batch pads batches correctly."""
batch_size = 100
num_features = 10
num_tasks = 5
# Test cases where n_samples < 2*n_samples < batch_size
n_samples = 29
X_b = np.zeros((n_samples, num_features))
y_b = np.zeros((n_samples, num_tasks))
w_b = np.zeros((n_samples, num_tasks))
ids_b = np.zeros((n_samples,))
X_out, y_out, w_out, ids_out = dc.data.pad_batch(batch_size, X_b, y_b, w_b,
ids_b)
assert len(X_out) == len(y_out) == len(w_out) == len(ids_out) == batch_size
# Test cases where n_samples < batch_size
n_samples = 79
X_b = np.zeros((n_samples, num_features))
y_b = np.zeros((n_samples, num_tasks))
w_b = np.zeros((n_samples, num_tasks))
ids_b = np.zeros((n_samples,))
X_out, y_out, w_out, ids_out = dc.data.pad_batch(batch_size, X_b, y_b, w_b,
ids_b)
assert len(X_out) == len(y_out) == len(w_out) == len(ids_out) == batch_size
# Test case where n_samples == batch_size
n_samples = 100
X_b = np.zeros((n_samples, num_features))
y_b = np.zeros((n_samples, num_tasks))
w_b = np.zeros((n_samples, num_tasks))
ids_b = np.zeros((n_samples,))
X_out, y_out, w_out, ids_out = dc.data.pad_batch(batch_size, X_b, y_b, w_b,
ids_b)
assert len(X_out) == len(y_out) == len(w_out) == len(ids_out) == batch_size
# Test case for object featurization.
n_samples = 2
X_b = np.array([{"a": 1}, {"b": 2}])
y_b = np.zeros((n_samples, num_tasks))
w_b = np.zeros((n_samples, num_tasks))
ids_b = np.zeros((n_samples,))
X_out, y_out, w_out, ids_out = dc.data.pad_batch(batch_size, X_b, y_b, w_b,
ids_b)
assert len(X_out) == len(y_out) == len(w_out) == len(ids_out) == batch_size
# Test case for more complicated object featurization
n_samples = 2
X_b = np.array([(1, {"a": 1}), (2, {"b": 2})])
y_b = np.zeros((n_samples, num_tasks))
w_b = np.zeros((n_samples, num_tasks))
ids_b = np.zeros((n_samples,))
X_out, y_out, w_out, ids_out = dc.data.pad_batch(batch_size, X_b, y_b, w_b,
ids_b)
assert len(X_out) == len(y_out) == len(w_out) == len(ids_out) == batch_size
# Test case with multidimensional data
n_samples = 50
num_atoms = 15
d = 3
X_b = np.zeros((n_samples, num_atoms, d))
y_b = np.zeros((n_samples, num_tasks))
w_b = np.zeros((n_samples, num_tasks))
ids_b = np.zeros((n_samples,))
X_out, y_out, w_out, ids_out = dc.data.pad_batch(batch_size, X_b, y_b, w_b,
ids_b)
assert len(X_out) == len(y_out) == len(w_out) == len(ids_out) == batch_size
def test_get_task_names():
"""Test that get_task_names returns correct task_names"""
solubility_dataset = load_solubility_data()
assert solubility_dataset.get_task_names() == ["log-solubility"]
multitask_dataset = load_multitask_data()
assert sorted(multitask_dataset.get_task_names()) == sorted([
"task0", "task1", "task2", "task3", "task4", "task5", "task6", "task7",
"task8", "task9", "task10", "task11", "task12", "task13", "task14",
"task15", "task16"
])
def test_get_data_shape():
"""Test that get_data_shape returns currect data shape"""
solubility_dataset = load_solubility_data()
assert solubility_dataset.get_data_shape() == (1024,)
multitask_dataset = load_multitask_data()
assert multitask_dataset.get_data_shape() == (1024,)
def test_len():
"""Test that len(dataset) works."""
solubility_dataset = load_solubility_data()
assert len(solubility_dataset) == 10
def test_reshard():
"""Test that resharding the dataset works."""
solubility_dataset = load_solubility_data()
X, y, w, ids = (solubility_dataset.X, solubility_dataset.y,
solubility_dataset.w, solubility_dataset.ids)
assert solubility_dataset.get_number_shards() == 1
solubility_dataset.reshard(shard_size=1)
assert solubility_dataset.get_shard_size() == 1
X_r, y_r, w_r, ids_r = (solubility_dataset.X, solubility_dataset.y,
solubility_dataset.w, solubility_dataset.ids)
assert solubility_dataset.get_number_shards() == 10
solubility_dataset.reshard(shard_size=10)
assert solubility_dataset.get_shard_size() == 10
X_rr, y_rr, w_rr, ids_rr = (solubility_dataset.X, solubility_dataset.y,
solubility_dataset.w, solubility_dataset.ids)
# Test first resharding worked
np.testing.assert_array_equal(X, X_r)
np.testing.assert_array_equal(y, y_r)
np.testing.assert_array_equal(w, w_r)
np.testing.assert_array_equal(ids, ids_r)
# Test second resharding worked
np.testing.assert_array_equal(X, X_rr)
np.testing.assert_array_equal(y, y_rr)
np.testing.assert_array_equal(w, w_rr)
np.testing.assert_array_equal(ids, ids_rr)
def test_complete_shuffle():
shard_sizes = [1, 2, 3, 4, 5]
all_Xs, all_ys, all_ws, all_ids = [], [], [], []
def shard_generator():
for sz in shard_sizes:
X_b =
|
np.random.rand(sz, 1)
|
numpy.random.rand
|
import numpy as np
import scipy
import scipy.fft as fft
from scipy.ndimage import fourier_shift
def autocorr2d(vals, pad_mode="reflect"):
"""
Compute 2-D autocorrelation of image via the FFT.
Parameters
----------
vals : py:class:`~numpy.ndarray`
2-D image.
pad_mode : str
Desired padding. See NumPy documentation: https://numpy.org/doc/stable/reference/generated/numpy.pad.html
Return
------
autocorr : py:class:`~numpy.ndarray`
"""
(x_dim, y_dim) = vals.shape
(x_padding, y_padding) = (x_dim//2, y_dim//2)
padded_signal =
|
np.pad(vals, ((x_padding, x_padding), (y_padding, y_padding)), pad_mode)
|
numpy.pad
|
from sub_units.utils import Stopwatch, ApproxType
import numpy as np
import pandas as pd
from enum import Enum
pd.plotting.register_matplotlib_converters() # addresses complaints about Timestamp instead of float for plotting x-values
import matplotlib
import matplotlib.pyplot as plt
plt.style.use('seaborn-darkgrid')
matplotlib.use('Agg')
import matplotlib.dates as mdates
import matplotlib.ticker as mtick
from matplotlib import collections as mplc
import scipy as sp
import joblib
from os import path
from tqdm import tqdm
import os
from scipy.optimize import approx_fprime
from functools import lru_cache, partial
from abc import ABC, abstractmethod
import datetime
import arviz as az
import seaborn as sns
import numdifftools
import emcee
import logging
logging.basicConfig(level=logging.INFO)
class WhichDistro(Enum):
norm = 'norm'
laplace = 'laplace'
sphere = 'hypersphere'
norm_trunc = 'norm_trunc'
laplace_trunc = 'laplace_trunc'
def __str__(self):
return str(self.value)
class BayesModel(ABC):
@staticmethod
def FWHM(in_list_locs, in_list_vals):
'''
Calculate full width half maximum
:param in_list_locs: list of locations
:param in_list_valss: list of values
:return:
'''
sorted_ind = np.argsort(in_list_locs)
sorted_locs = in_list_locs[sorted_ind]
sorted_vals = in_list_vals[sorted_ind]
peak = max(in_list_vals)
start = None
end = None
for i in range(len(sorted_ind)):
loc = sorted_locs[i]
next_loc = sorted_locs[i + 1]
val = sorted_vals[i]
next_val = sorted_vals[i + 1]
if start is not None and val < peak / 2 and next_val >= peak / 2:
start = (next_loc - loc) / 2
if end is not None and val >= peak / 2 and next_val < peak / 2:
end = (next_loc - loc) / 2
return end - start
# this fella isn't necessary like other abstractmethods, but optional in a subclass that supports statsmodels solutions
def render_statsmodels_fit(self):
pass
# this fella isn't necessary like other abstractmethods, but optional in a subclass that supports statsmodels solutions
def render_PyMC3_fit(self):
pass
@abstractmethod
def run_simulation(self, in_params, offset=None):
pass
# this fella isn't necessary like other abstractmethods, but optional in a subclass that supports statsmodels solutions
def run_fits_simplified(self, in_params):
pass
@abstractmethod
def _get_log_likelihood_precursor(self,
in_params,
data_new_tested=None,
data_new_dead=None,
cases_bootstrap_indices=None,
deaths_bootstrap_indices=None,
):
pass
def convert_params_as_list_to_dict(self, in_params, map_name_to_sorted_ind=None):
'''
Helper function to convert params as a list to the dictionary form
:param in_params: params as list
:return: params as dict
'''
if map_name_to_sorted_ind is None:
map_name_to_sorted_ind = self.map_name_to_sorted_ind
# convert from list to dictionary (for compatibility with the least-sq solver
if type(in_params) != dict and self.map_name_to_sorted_ind is not None:
params = {key: in_params[ind] for key, ind in map_name_to_sorted_ind.items()}
else:
params = in_params.copy()
return params
def convert_params_as_dict_to_list(self, in_params):
'''
Helper function to convert params as a dict to the list form
:param in_params: params as dict
:return: params as list
'''
if type(in_params) == dict:
p0 = [in_params[name] for name in self.sorted_names]
else:
p0 = in_params.copy()
return p0
def __init__(self,
state_name,
n_bootstraps=1000,
n_likelihood_samples=1000,
load_data_obj=None,
burn_in=20,
sorted_param_names=None,
sorted_init_condit_names=None,
curve_fit_bounds=None,
priors=None,
test_params=None,
static_params=None,
logarithmic_params=None,
extra_params=None,
plot_dpi=300,
opt_force_plot=True,
opt_calc=True,
opt_force_calc=False,
opt_plot=True,
model_type_name=None,
plot_param_names=None,
opt_simplified=False,
log_offset=0.1,
# this kwarg became redundant after I filled in zeros with 0.1 in load_data, leave at 0
opt_smoothing=True,
prediction_window=28 * 3, # predict three months into the future
model_approx_types=[ApproxType.SP_CF, ApproxType.NDT_Hess,
ApproxType.NDT_Jac, ApproxType.BS,
ApproxType.LS, ApproxType.MCMC],
# still haven't gotten ApproxType.SP_LS, ApproxType.SP_min to work
plot_two_vals=None,
override_max_date_str=None,
cases_cnt_threshold=20,
deaths_cnt_threshold=20,
n_samples=500,
**kwargs
):
for key, val in kwargs.items():
print(f'Adding extra params to attributes... {key}: {val}')
setattr(self, key, val)
if load_data_obj is None:
from scrap_code import load_data as load_data_obj
state_data = load_data_obj.get_state_data(state_name, opt_smoothing=opt_smoothing)
max_date_str = datetime.datetime.strftime(state_data['max_date'], '%Y-%m-%d')
self.max_date_str = max_date_str
if hasattr(self, 'moving_window_size'):
self.min_sol_date = state_data['max_date'] - datetime.timedelta(days=self.moving_window_size)
self.cases_cnt_threshold = cases_cnt_threshold
self.deaths_cnt_threshold = deaths_cnt_threshold
self.override_max_date_str = override_max_date_str
self.plot_two_vals = plot_two_vals
self.prediction_window = prediction_window
self.map_approx_type_to_MVN = dict()
self.model_approx_types = model_approx_types
self.opt_smoothing = opt_smoothing # determines whether to smooth results from load_data_obj.get_state_data\
self.opt_plot = opt_plot
self.log_offset = log_offset
self.model_type_name = model_type_name
self.state_name = state_name
self.n_bootstraps = n_bootstraps
self.n_likelihood_samples = n_likelihood_samples
self.burn_in = burn_in
self.max_date = datetime.datetime.strptime(max_date_str, '%Y-%m-%d')
self.static_params = static_params
self.opt_simplified = opt_simplified
self.n_samples = n_samples
self.discovered_MLE_params = list()
if self.opt_smoothing:
smoothing_str = 'smoothed_'
else:
smoothing_str = ''
if override_max_date_str is None:
hyperparameter_max_date_str = datetime.datetime.today().strftime('%Y-%m-%d')
else:
hyperparameter_max_date_str = override_max_date_str
state_lc = state_name.lower().replace(' ', '_').replace(':', '_')
self.all_data_fit_filename = path.join('state_all_data_fits',
f"{state_lc}_{smoothing_str}{model_type_name}_max_date_{hyperparameter_max_date_str.replace('-', '_')}.joblib")
self.bootstrap_filename = path.join('state_bootstraps',
f"{state_lc}_{smoothing_str}{model_type_name}_{n_bootstraps}_bootstraps_max_date_{hyperparameter_max_date_str.replace('-', '_')}.joblib")
self.likelihood_samples_filename_format_str = path.join('state_likelihood_samples',
f"{state_lc}_{smoothing_str}{model_type_name}_{{}}_{n_likelihood_samples}_samples_max_date_{hyperparameter_max_date_str.replace('-', '_')}.joblib")
self.likelihood_samples_from_bootstraps_filename = path.join('state_likelihood_samples',
f"{state_lc}_{smoothing_str}{model_type_name}_{n_bootstraps}_bootstraps_likelihoods_max_date_{hyperparameter_max_date_str.replace('-', '_')}.joblib")
self.PyMC3_filename = path.join('state_PyMC3_traces',
f"{state_lc}_{smoothing_str}{model_type_name}_max_date_{hyperparameter_max_date_str.replace('-', '_')}.joblib")
if opt_simplified:
self.plot_subfolder = f'{hyperparameter_max_date_str.replace("-", "_")}_date_{smoothing_str}{model_type_name}_{"_".join(val.value[1] for val in model_approx_types)}'
else:
self.plot_subfolder = f'{hyperparameter_max_date_str.replace("-", "_")}_date_{smoothing_str}{model_type_name}_{n_bootstraps}_bootstraps_{n_likelihood_samples}_likelihood_samples'
self.plot_subfolder = path.join('state_plots', self.plot_subfolder)
self.plot_filename_base = path.join(self.plot_subfolder,
state_name.lower().replace(' ', '_').replace('.', ''))
if not os.path.exists('state_all_data_fits'):
os.mkdir('state_all_data_fits')
if not os.path.exists('state_bootstraps'):
os.mkdir('state_bootstraps')
if not os.path.exists('state_likelihood_samples'):
os.mkdir('state_likelihood_samples')
if not os.path.exists('state_PyMC3_traces'):
os.mkdir('state_PyMC3_traces')
if not os.path.exists('state_plots'):
os.mkdir('state_plots')
if not os.path.exists(self.plot_subfolder):
os.mkdir(self.plot_subfolder)
if not os.path.exists(self.plot_filename_base):
os.mkdir(self.plot_filename_base)
# I replaced this with the U.S. total so everyone's on the same playing field, otherwise: state_data['sip_date']
self.SIP_date = datetime.datetime.strptime('2020-03-20', '%Y-%m-%d')
self.cases_indices = None
self.deaths_indices = None
self.min_date = state_data['min_date']
self.population = state_data['population']
self.n_count_data = state_data['series_data'][:, 1].size
self.SIP_date_in_days = (self.SIP_date - self.min_date).days
self.max_date_in_days = (self.max_date - self.min_date).days
self.series_data = state_data['series_data'][:self.max_date_in_days, :] # cut off recent days if desired
n_t_vals = self.max_date_in_days + self.prediction_window
self.t_vals = np.linspace(-burn_in, n_t_vals, burn_in + n_t_vals + 1)
# print('min_date', self.min_date)
# print('max_date_in_days', self.max_date_in_days)
# print('t_vals', self.t_vals)
self.threshold_cases = min(self.cases_cnt_threshold, self.series_data[-1, 1] * 0.1)
print(f"Setting cases threshold to {self.threshold_cases} ({self.series_data[-1, 1]} total)")
try:
self.day_of_threshold_met_case = \
[i for i, x in enumerate(self.series_data[:, 1]) if x >= self.threshold_cases][0]
except:
self.day_of_threshold_met_case = len(self.series_data) - 1
self.threshold_deaths = min(self.deaths_cnt_threshold, self.series_data[-1, 2] * 0.1)
print(f"Setting death threshold to {self.threshold_deaths} ({self.series_data[-1, 2]} total)")
try:
self.day_of_threshold_met_death = \
[i for i, x in enumerate(self.series_data[:, 2]) if x >= self.threshold_deaths][0]
except:
self.day_of_threshold_met_death = len(self.series_data) - 1
data_cum_tested = self.series_data[:, 1].copy()
self.data_new_tested = [data_cum_tested[0]] + [data_cum_tested[i] - data_cum_tested[i - 1] for i in
range(1, len(data_cum_tested))]
data_cum_dead = self.series_data[:, 2].copy()
self.data_new_dead = [data_cum_dead[0]] + [data_cum_dead[i] - data_cum_dead[i - 1] for i in
range(1, len(data_cum_dead))]
delta_t = 17
self.data_new_recovered = [0.0] * delta_t + self.data_new_tested
self.curve_fit_bounds = curve_fit_bounds
self.priors = priors
self.test_params = test_params
self.all_data_params = test_params
self.sorted_param_names = [name for name in sorted_param_names if name not in static_params]
self.sorted_init_condit_names = sorted_init_condit_names
self.sorted_names = self.sorted_init_condit_names + self.sorted_param_names
self.logarithmic_params = logarithmic_params
self.map_name_to_sorted_ind = {val: ind for ind, val in enumerate(self.sorted_names)}
self.all_samples_as_list = list()
self.all_log_probs_as_list = list()
self.all_propensities_as_list = list()
self.all_random_walk_samples_as_list = list()
self.all_random_walk_log_probs_as_list = list()
self.plot_dpi = plot_dpi
self.opt_force_plot = opt_force_plot
self.opt_calc = opt_calc
self.opt_force_calc = opt_force_calc
self.loaded_bootstraps = False
self.loaded_likelihood_samples = list()
self.loaded_MCMC = list()
self.map_approx_type_to_means = dict()
self.map_approx_type_to_cov = dict()
self.map_approx_type_to_model = dict()
self.extra_params = {key: partial(val, map_name_to_sorted_ind=self.map_name_to_sorted_ind) for key, val in
extra_params.items()}
if plot_param_names is None:
self.plot_param_names = self.sorted_names
else:
self.plot_param_names = plot_param_names
def _errfunc_for_least_squares(self,
in_params,
data_new_tested=None,
data_new_dead=None,
cases_bootstrap_indices=None,
deaths_bootstrap_indices=None,
precursor_func=None,
):
'''
Helper function for scipy.optimize.least_squares
Basically returns log likelihood precursors
:param in_params: dictionary or list of parameters
:param cases_bootstrap_indices: bootstrap indices when applicable
:param deaths_bootstrap_indices: bootstrap indices when applicable
:param cases_bootstrap_indices: which indices to include in the likelihood?
:param deaths_bootstrap_indices: which indices to include in the likelihood?
:return: list: distances and other loss function contributions
'''
in_params_as_dict = self.convert_params_as_list_to_dict(in_params)
if precursor_func is None:
precursor_func = self._get_log_likelihood_precursor
positive_dists, deceased_dists, other_errs, sol, positive_vals, deceased_vals, \
predicted_tested, actual_tested, predicted_dead, actual_dead = precursor_func(
in_params,
data_new_tested=data_new_tested,
data_new_dead=data_new_dead,
cases_bootstrap_indices=cases_bootstrap_indices,
deaths_bootstrap_indices=deaths_bootstrap_indices)
dists = [x / in_params_as_dict['sigma_positive'] for x in positive_dists] + \
[x / in_params_as_dict['sigma_deceased'] for x in deceased_dists]
return dists + other_errs
def get_log_likelihood(self,
in_params,
data_new_tested=None,
data_new_dead=None,
cases_bootstrap_indices=None,
deaths_bootstrap_indices=None,
opt_return_sol=False,
precursor_func=None,
):
'''
Obtain the log likelihood given a set of in_params
:param in_params: dictionary or list of parameters
:param cases_bootstrap_indices: bootstrap indices when applicable
:param deaths_bootstrap_indices: bootstrap indices when applicable
:param cases_bootstrap_indices: which indices to include in the likelihood?
:param deaths_bootstrap_indices: which indices to include in the likelihood?
:return: float: log likelihood
'''
params = self.convert_params_as_list_to_dict(in_params)
if precursor_func is None:
precursor_func = self._get_log_likelihood_precursor
dists_positive, dists_deceased, other_errs, sol, vals_positive, vals_deceased, \
predicted_tested, actual_tested, predicted_dead, actual_dead = precursor_func(
params,
data_new_tested=data_new_tested,
data_new_dead=data_new_dead,
cases_bootstrap_indices=cases_bootstrap_indices,
deaths_bootstrap_indices=deaths_bootstrap_indices)
# positive_norm = sp.stats.norm(loc=0, scale=params['sigma_positive']).logpdf
# deceased_norm = sp.stats.norm(loc=0, scale=params['sigma_deceased']).logpdf
# from https://codereview.stackexchange.com/questions/69718/fastest-computation-of-n-likelihoods-on-normal-distributions
# def my_logpdf_sum(x, loc, scale):
# root2 = np.sqrt(2)
# root2pi = np.sqrt(2 * np.pi)
# prefactor = - x.size * np.log(scale * root2pi)
# summand = -np.square((x - loc) / (root2 * scale))
# return prefactor + summand.sum()
#
# log_likelihood_positive = np.sum(my_logpdf_sum(dist, 0, params['sigma_positive']) for dist in dists_positive)
# log_likelihood_deceased = np.sum(my_logpdf_sum(dist, 0, params['sigma_deceased']) for dist in dists_deceased)
# from https://emcee.readthedocs.io/en/stable/tutorials/line/ you can use
# -0.5 * np.sum((y - model) ** 2 / sigma2 + np.log(sigma2))
log_likelihood_positive = -0.5 * np.sum(
np.power(dists_positive, 2) / np.power(params['sigma_positive'], 2) + np.log(
2 * np.pi * params['sigma_positive'] ** 2))
log_likelihood_deceased = -0.5 * np.sum(
np.power(dists_deceased, 2) / np.power(params['sigma_deceased'], 2) + np.log(
2 * np.pi * params['sigma_deceased'] ** 2))
log_likelihood_other = -sum(x ** 2 for x in other_errs)
log_likelihood = log_likelihood_positive + log_likelihood_deceased + log_likelihood_other
if opt_return_sol:
return log_likelihood, sol
else:
return log_likelihood
def fit_curve_via_curve_fit(self,
p0,
data_tested=None,
data_dead=None,
tested_indices=None,
deaths_indices=None):
'''
Given initial parameters, fit the curve with scipy's curve_fit method
:param p0: initial parameters
:param data_tested: list of observables (passable since we may want to add jitter)
:param data_dead: list of observables (passable since we may want to add jitter)
:param tested_indices: bootstrap indices when applicable
:param deaths_indices: bootstrap indices when applicable
:return: optimized parameters as dictionary
'''
positive_dists, deceased_dists, other_errs, sol, positive_vals, deceased_vals, \
predicted_tested, actual_tested, predicted_dead, actual_dead = self._get_log_likelihood_precursor(
self.test_params)
inv_deaths_indices = {val: ind for ind, val in enumerate(self.deaths_indices)}
inv_cases_indices = {val: ind for ind, val in enumerate(self.cases_indices)}
ind_use = list()
for x in range(len(predicted_tested) + len(predicted_dead)):
if x > len(predicted_tested) - 1:
if deaths_indices is None or deaths_indices is not None and int(x) - len(
predicted_tested) in [inv_deaths_indices[tmp_ind] for tmp_ind in deaths_indices]:
ind_use.append(int(x))
else:
if tested_indices is None or tested_indices is not None and int(x) in [inv_cases_indices[tmp_ind] for
tmp_ind in tested_indices]:
ind_use.append(int(x))
data_use = actual_tested + actual_dead
data_use = [data_use[i] for i in ind_use]
good_ind = [i for i, x in enumerate(data_use) if np.isfinite(x)]
def curve_fit_func(x_list, *params):
positive_dists, deceased_dists, other_errs, sol, positive_vals, deceased_vals, \
predicted_tested, actual_tested, predicted_dead, actual_dead = self._get_log_likelihood_precursor(
self.convert_params_as_list_to_dict(params))
out_list = list()
for x in x_list:
if x > len(predicted_tested) - 1:
out_list.append(predicted_dead[int(x) - len(predicted_tested)])
else:
out_list.append(predicted_tested[int(x)])
# print('out_list')
# print(out_list)
return out_list
params_as_list, cov = sp.optimize.curve_fit(curve_fit_func,
np.array(ind_use)[good_ind],
np.array(data_use)[good_ind],
p0=self.convert_params_as_dict_to_list(self.test_params),
bounds=(
[self.curve_fit_bounds[name][0] for name in self.sorted_names],
[self.curve_fit_bounds[name][1] for name in self.sorted_names]))
# Calculate the observation error:
positive_dists, deceased_dists, other_errs, sol, positive_vals, deceased_vals, \
predicted_tested, actual_tested, predicted_dead, actual_dead = self._get_log_likelihood_precursor(
self.convert_params_as_list_to_dict(params_as_list))
obs_err = np.sqrt(np.mean(np.array(positive_dists + deceased_dists)[ind_use][good_ind] ** 2))
# Add empirical obs. err. to dict.
params_as_dict = self.convert_params_as_list_to_dict(params_as_list)
params_as_dict['sigma_positive'] = np.mean(obs_err)
params_as_dict['sigma_deceased'] = np.mean(obs_err)
return params_as_dict, cov
def fit_curve_via_least_squares(self,
p0,
data_tested=None,
data_dead=None,
tested_indices=None,
deaths_indices=None):
'''
Given initial parameters, fit the curve with MSE
:param p0: initial parameters
:param data_tested: list of observables (passable since we may want to add jitter)
:param data_dead: list of observables (passable since we may want to add jitter)
:param tested_indices: bootstrap indices when applicable
:param deaths_indices: bootstrap indices when applicable
:return: optimized parameters as dictionary
'''
optimize_test_errfunc = partial(self._errfunc_for_least_squares,
data_new_tested=data_tested,
data_new_dead=data_dead,
cases_bootstrap_indices=tested_indices,
deaths_bootstrap_indices=deaths_indices)
results = sp.optimize.least_squares(optimize_test_errfunc,
p0,
bounds=(
[self.curve_fit_bounds[name][0] for name in self.sorted_names],
[self.curve_fit_bounds[name][1] for name in self.sorted_names]))
print('dir(results) for fit_curve_via_least_squares:')
print(dir(results))
params_as_list = results.x
hess = results.jac.T @ results.jac
cov = np.linalg.inv(hess)
params_as_dict = {key: params_as_list[i] for i, key in enumerate(self.sorted_names)}
return params_as_dict, cov
@staticmethod
def make_PSD(cov):
eigenw, eigenv = np.linalg.eig(cov)
# get rid of negative eigenvalue contributions to make PSD
cov = np.zeros(cov.shape)
for ind, val in enumerate(eigenw):
vec = eigenv[:, ind]
if val > 0:
tmp_contrib = np.outer(vec, vec)
cov += tmp_contrib * val
return cov
def get_covariance_matrix(self, in_params):
p0 = self.convert_params_as_dict_to_list(in_params)
# hess = numdifftools.Hessian(lambda x: np.exp(self.get_log_likelihood(x)))(p0)
hess = numdifftools.Hessian(self.get_log_likelihood)(p0)
# this uses the jacobian approx to the hessian, but I end up with a singular matrix
# jacobian = numdifftools.Jacobian(self.get_log_likelihood)(p0)
# hess = jacobian.T @ jacobian
# eigenw, eigenv = np.linalg.eig(hess)
# print('hess eigenw:')
# print(eigenw)
# print('hess:')
# print(hess)
# hess = self.remove_sigma_entries_from_matrix(hess)
cov = np.linalg.inv(-hess)
print('p0:')
print(self.convert_params_as_list_to_dict(p0))
print('hess:')
print(hess)
print('cov:')
print(cov)
eigenw, eigenv = np.linalg.eig(cov)
print('orig_eig:')
print(eigenw)
# print('orig diagonal elements of cov:')
# print(self.convert_params_as_list_to_dict(np.diagonal(cov)))
# get rid of negative eigenvalue contributions to make PSD
cov = np.zeros(cov.shape)
for ind, val in enumerate(eigenw):
vec = eigenv[:, ind]
if val > 0:
tmp_contrib = np.outer(vec, vec)
cov += tmp_contrib * val
# cov = self.recover_sigma_entries_from_matrix(cov)
eigenw, eigenv = np.linalg.eig(cov)
# print('new_cov:')
# print(cov)
print('new_eig:')
print(eigenw)
print('new diagonal elements of cov:')
print(self.convert_params_as_list_to_dict(np.diagonal(cov)))
return cov
def fit_curve_via_likelihood(self,
in_params,
tested_indices=None,
deaths_indices=None,
method=None,
print_success=False,
opt_cov=False
):
'''
Given initial parameters, fit the curve by minimizing log likelihood using measure error Gaussian PDFs
:param p0: initial parameters
:param data_tested: list of observables (passable since we may want to add jitter)
:param data_dead: list of observables (passable since we may want to add jitter)
:param tested_indices: bootstrap indices when applicable
:param deaths_indices: bootstrap indices when applicable
:return: optimized parameters as dictionary
'''
if method is None:
method = self.optimizer_method
p0 = self.convert_params_as_dict_to_list(in_params)
def get_neg_log_likelihood(p):
return -self.get_log_likelihood(p,
cases_bootstrap_indices=tested_indices,
deaths_bootstrap_indices=deaths_indices
)
bounds_to_use = [self.curve_fit_bounds[name] for name in self.sorted_names]
results = sp.optimize.minimize(get_neg_log_likelihood, p0, bounds=bounds_to_use, method=method)
print('method: ', method)
print('dir(results):', dir(results))
if print_success:
print(f'success? {results.success}')
params_as_list = results.x
params_as_dict = {key: params_as_list[i] for i, key in enumerate(self.sorted_names)}
if opt_cov:
if hasattr(results, 'hess_inv'):
print('Using hessian approx for the covariance!')
cov = results.hess_inv
else:
try:
cov = self.get_covariance_matrix(p0)
print('Re-calculating hessian approx using numdifftools for the covariance!')
except:
print('Error calculating covariance matrix, substituting with blah')
cov = np.diag([1e-12] * len(p0))
else:
cov = None # results.hess_inv # this fella only works for certain methods so avoid for now
# sometimes we will fit to a negative value for observation error due to the symmetry
# (it's only used within a square operation). This is an easy fix:
for param_name in params_as_dict:
if 'sigma' in param_name and params_as_dict[param_name] < 0:
params_as_dict[param_name] *= -1
return params_as_dict, cov
def solve_and_plot_solution(self,
in_params=None,
title=None,
plot_filename_filename='test_plot',
opt_force_plot=False):
'''
Solve ODEs and plot relavant parts
:param in_params: dictionary of parameters
:param t_vals: values in time to plot against
:param plot_filename_filename: string to add to plot filename
:return: None
'''
if in_params is None:
params = self.all_data_params
else:
params = in_params.copy()
timer = Stopwatch()
for i in range(100):
# note the extra comma after the args list is to ensure we pass the whole shebang as a tuple otherwise errors!
sol = self.run_simulation(params)
print(f'time to simulate (ms): {(timer.elapsed_time()) / 100 * 1000}')
new_positive = sol[1]
new_deceased = sol[2]
min_plot_pt = self.burn_in
max_plot_pt = min(len(sol[0]), len(self.series_data) + self.prediction_window + self.burn_in)
data_plot_date_range = [self.min_date + datetime.timedelta(days=1) * i for i in range(len(self.series_data))]
sol_plot_date_range = [self.min_date - datetime.timedelta(days=self.burn_in) + datetime.timedelta(days=1) * i
for i in
range(len(sol[0]))][min_plot_pt:max_plot_pt]
full_output_filename = path.join(self.plot_filename_base, plot_filename_filename)
if self.opt_plot and (not path.exists(full_output_filename) or self.opt_force_plot) or opt_force_plot:
plt.clf()
fig, ax = plt.subplots()
ax.plot(sol_plot_date_range, [sol[0][i] for i in range(min_plot_pt, max_plot_pt)], 'blue',
label='contagious')
min_slice = None
if self.min_sol_date is not None:
for i in range(len(sol_plot_date_range)):
if sol_plot_date_range[i] >= self.min_sol_date:
min_slice = i
break
ax.plot(sol_plot_date_range[slice(min_slice, None)],
new_positive[min_plot_pt: max_plot_pt][slice(min_slice, None)], 'green', label='positive')
ax.plot(sol_plot_date_range[slice(min_slice, None)],
new_deceased[min_plot_pt: max_plot_pt][slice(min_slice, None)], 'red', label='deceased')
ax.plot(data_plot_date_range, self.data_new_tested, '.', color='darkgreen', label='confirmed cases')
ax.plot(data_plot_date_range, self.data_new_dead, '.', color='darkred', label='confirmed deaths')
# this removes the year from the x-axis ticks
fig.autofmt_xdate()
ax.xaxis.set_major_formatter(mdates.DateFormatter('%m-%d'))
plt.yscale('log')
plt.ylabel('cumulative numbers')
plt.xlabel('day')
plt.ylabel('new people each day')
plt.ylim((0.5, max(self.data_new_tested) * 100))
plt.xlim((self.min_date + datetime.timedelta(days=self.day_of_threshold_met_case - 10), None))
plt.legend()
if title is not None:
plt.title(title)
plt.savefig(full_output_filename, dpi=self.plot_dpi)
plt.close()
# for i in range(len(sol)):
# print(f'index: {i}, odeint_value: {sol[i]}, real_value: {[None, series_data[i]]}')
def plot_all_solutions(self, n_samples=None, approx_type=ApproxType.BS, mvn_fit=False, n_sols_to_plot=1000,
offset=0):
'''
Plot all the bootstrap simulation solutions
:param n_sols_to_plot: how many simulations should we sample for the plot?
:return: None
'''
if n_samples is None:
n_samples = self.n_samples
if offset == 0:
offset_str = ''
else:
offset_str = f'_offset_{offset}_days'
key = approx_type.value[1]
if mvn_fit:
key = 'MVN_' + key
output_filename = f'{key}{offset_str}_solutions_discrete.png'
output_filename2 = f'{key}{offset_str}_solutions_filled_quantiles.png'
output_filename3 = f'{key}{offset_str}_solutions_cumulative_discrete.png'
output_filename4 = f'{key}{offset_str}_solutions_cumulative_filled_quantiles.png'
if not self.opt_plot or (all(path.exists(path.join(self.plot_filename_base, x)) for x in \
[output_filename, output_filename2, output_filename3,
output_filename4]) and not self.opt_force_plot):
return
params, _, _, log_probs = self.get_weighted_samples(approx_type=approx_type, mvn_fit=mvn_fit)
param_inds_to_plot = list(range(len(params)))
# Put this in to diagnose plotting
# print(f'\nDiagnosing in plot_all_solutions for approx_type {approx_type}...')
# distro_list = list()
# for param_ind in range(len(params[0])):
# distro_list.append([params[i][param_ind] for i in range(len(params))])
# print(f'{self.sorted_names[param_ind]}: Mean: {np.average(distro_list[param_ind]):.4g}, Std.: {np.std(distro_list[param_ind]):.4g}')
print(f'Rendering solutions for {key}...')
param_inds_to_plot = np.random.choice(param_inds_to_plot, min(n_samples, len(param_inds_to_plot)),
replace=False)
sols_to_plot = [self.run_simulation(in_params=params[param_ind], offset=offset) for param_ind in
tqdm(param_inds_to_plot)]
data_plot_kwargs = {'markersize': 6, 'markeredgewidth': 0.5, 'markeredgecolor': 'black'}
if not self.opt_simplified:
self._plot_all_solutions_sub_distinct_lines_with_alpha(sols_to_plot,
plot_filename_filename=output_filename,
data_plot_kwargs=data_plot_kwargs,
offset=offset)
self._plot_all_solutions_sub_filled_quantiles(sols_to_plot,
plot_filename_filename=output_filename2,
data_plot_kwargs=data_plot_kwargs,
offset=offset)
if not self.opt_simplified:
self._plot_all_solutions_sub_distinct_lines_with_alpha_cumulative(sols_to_plot,
plot_filename_filename=output_filename3,
data_plot_kwargs=data_plot_kwargs)
self._plot_all_solutions_sub_filled_quantiles_cumulative(sols_to_plot,
plot_filename_filename=output_filename4,
data_plot_kwargs=data_plot_kwargs)
def _plot_all_solutions_sub_filled_quantiles(self,
sols_to_plot,
plot_filename_filename=None,
data_markersize=36,
data_plot_kwargs=dict(),
offset=0):
'''
Helper function to plot_all_solutions
:param n_sols_to_plot: how many simulations should we sample for the plot?
:param plot_filename_filename: string to add to the plot filename
:return: None
'''
full_output_filename = path.join(self.plot_filename_base, plot_filename_filename)
if path.exists(full_output_filename) and not self.opt_force_plot or not self.opt_plot or len(sols_to_plot) == 0:
return
print('Printing...', path.join(self.plot_filename_base, plot_filename_filename))
sol = sols_to_plot[0]
fig, ax = plt.subplots()
min_plot_pt = self.burn_in
max_plot_pt = min(len(sol[0]), len(self.series_data) + self.prediction_window + self.burn_in)
data_plot_date_range = [self.min_date + datetime.timedelta(days=1) * i for i in
range(len(self.data_new_tested))]
sol_plot_date_range = [self.min_date - datetime.timedelta(days=self.burn_in) + datetime.timedelta(
days=1) * i for i in
range(len(sol[0]))][min_plot_pt:max_plot_pt]
map_t_val_ind_to_tested_distro = dict()
map_t_val_ind_to_deceased_distro = dict()
for sol in sols_to_plot:
new_tested = sol[1]
# cum_tested = np.cumsum(new_tested)
new_dead = sol[2]
# cum_dead = np.cumsum(new_dead)
for val_ind, val in enumerate(self.t_vals):
if val_ind not in map_t_val_ind_to_tested_distro:
map_t_val_ind_to_tested_distro[val_ind] = list()
if np.isfinite(new_tested[val_ind]):
map_t_val_ind_to_tested_distro[val_ind].append(new_tested[val_ind])
if val_ind not in map_t_val_ind_to_deceased_distro:
map_t_val_ind_to_deceased_distro[val_ind] = list()
if np.isfinite(new_dead[val_ind]):
map_t_val_ind_to_deceased_distro[val_ind].append(new_dead[val_ind])
def safe_percentile(in_list, percent):
if len(in_list) == 0:
return 0
else:
return np.percentile(in_list, percent)
p5_curve = [safe_percentile(map_t_val_ind_to_deceased_distro[val_ind], 5) for val_ind in
range(len(self.t_vals))]
p25_curve = [safe_percentile(map_t_val_ind_to_deceased_distro[val_ind], 25) for val_ind in
range(len(self.t_vals))]
p50_curve = [safe_percentile(map_t_val_ind_to_deceased_distro[val_ind], 50) for val_ind in
range(len(self.t_vals))]
p75_curve = [safe_percentile(map_t_val_ind_to_deceased_distro[val_ind], 75) for val_ind in
range(len(self.t_vals))]
p95_curve = [safe_percentile(map_t_val_ind_to_deceased_distro[val_ind], 95) for val_ind in
range(len(self.t_vals))]
min_slice = None
if self.min_sol_date is not None:
for i in range(len(sol_plot_date_range)):
if sol_plot_date_range[i] >= self.min_sol_date - datetime.timedelta(days=offset):
min_slice = i
break
ax.fill_between(sol_plot_date_range[slice(min_slice, None)],
p5_curve[min_plot_pt:max_plot_pt][slice(min_slice, None)],
p95_curve[min_plot_pt:max_plot_pt][slice(min_slice, None)],
facecolor=matplotlib.colors.colorConverter.to_rgba('red', alpha=0.3),
edgecolor=(0, 0, 0, 0) # get rid of the darker edge
)
ax.fill_between(sol_plot_date_range[slice(min_slice, None)],
p25_curve[min_plot_pt:max_plot_pt][slice(min_slice, None)],
p75_curve[min_plot_pt:max_plot_pt][slice(min_slice, None)],
facecolor=matplotlib.colors.colorConverter.to_rgba('red', alpha=0.6),
edgecolor=(0, 0, 0, 0) # r=get rid of the darker edge
)
ax.plot(sol_plot_date_range[slice(min_slice, None)], p50_curve[min_plot_pt:max_plot_pt][slice(min_slice, None)],
color="darkred")
p5_curve = [safe_percentile(map_t_val_ind_to_tested_distro[val_ind], 5) for val_ind in range(len(self.t_vals))]
p25_curve = [safe_percentile(map_t_val_ind_to_tested_distro[val_ind], 25) for val_ind in
range(len(self.t_vals))]
p50_curve = [safe_percentile(map_t_val_ind_to_tested_distro[val_ind], 50) for val_ind in
range(len(self.t_vals))]
p75_curve = [safe_percentile(map_t_val_ind_to_tested_distro[val_ind], 75) for val_ind in
range(len(self.t_vals))]
p95_curve = [safe_percentile(map_t_val_ind_to_tested_distro[val_ind], 95) for val_ind in
range(len(self.t_vals))]
ax.fill_between(sol_plot_date_range[slice(min_slice, None)],
p5_curve[min_plot_pt:max_plot_pt][slice(min_slice, None)],
p95_curve[min_plot_pt:max_plot_pt][slice(min_slice, None)],
facecolor=matplotlib.colors.colorConverter.to_rgba('green', alpha=0.3),
edgecolor=(0, 0, 0, 0) # get rid of the darker edge
)
ax.fill_between(sol_plot_date_range[slice(min_slice, None)],
p25_curve[min_plot_pt:max_plot_pt][slice(min_slice, None)],
p75_curve[min_plot_pt:max_plot_pt][slice(min_slice, None)],
facecolor=matplotlib.colors.colorConverter.to_rgba('green', alpha=0.6),
edgecolor=(0, 0, 0, 0) # get rid of the darker edge
)
ax.plot(sol_plot_date_range[slice(min_slice, None)], p50_curve[min_plot_pt:max_plot_pt][slice(min_slice, None)],
color="darkgreen")
ax.plot(data_plot_date_range, self.data_new_tested, '.', color='darkgreen', label='Infections',
**data_plot_kwargs)
ax.plot(data_plot_date_range, self.data_new_dead, '.', color='darkred', label='Deaths', **data_plot_kwargs)
fig.autofmt_xdate()
# this removes the year from the x-axis ticks
ax.xaxis.set_major_formatter(mdates.DateFormatter('%m-%d'))
# ax.fmt_xdata = mdates.DateFormatter('%Y-%m-%d')
plt.yscale('log')
plt.ylabel('Daily Reported Counts')
plt.xlim((self.min_date + datetime.timedelta(days=self.day_of_threshold_met_case - 10), None))
plt.ylim((0.1, max(self.data_new_tested) * 100))
plt.legend()
# plt.title(f'{state} Data (points) and Model Predictions (lines)')
plt.savefig(full_output_filename, dpi=self.plot_dpi)
plt.close()
def _plot_all_solutions_sub_filled_quantiles_cumulative(self,
sols_to_plot,
plot_filename_filename=None,
opt_predict=True,
data_plot_kwargs=dict()):
'''
Helper function to plot_all_solutions
:param n_sols_to_plot: how many simulations should we sample for the plot?
:param plot_filename_filename: string to add to the plot filename
:return: None
'''
full_output_filename = path.join(self.plot_filename_base, plot_filename_filename)
if path.exists(full_output_filename) and not self.opt_force_plot or not self.opt_plot or len(sols_to_plot) == 0:
return
print('Printing...', path.join(self.plot_filename_base, plot_filename_filename))
sol = sols_to_plot[0]
fig, ax = plt.subplots()
min_plot_pt = self.burn_in
max_plot_pt = min(len(sol[0]), len(self.series_data) + self.prediction_window + self.burn_in)
data_plot_date_range = [self.min_date + datetime.timedelta(days=1) * i for i in
range(len(self.data_new_tested))]
sol_plot_date_range = [self.min_date - datetime.timedelta(days=self.burn_in) + datetime.timedelta(
days=1) * i for i in
range(len(sol[0]))][min_plot_pt:max_plot_pt]
map_t_val_ind_to_tested_distro = dict()
map_t_val_ind_to_deceased_distro = dict()
for sol in sols_to_plot:
if opt_predict:
start_ind_sol = len(self.data_new_tested) + self.burn_in
else:
start_ind_sol = len(self.data_new_tested) + self.burn_in - self.moving_window_size
start_ind_data = start_ind_sol - 1 - self.burn_in
tested = [max(sol[1][i] - self.log_offset, 0) for i in range(len(sol[1]))]
tested_range = np.cumsum(tested[start_ind_sol:])
dead = [max(sol[2][i] - self.log_offset, 0) for i in range(len(sol[2]))]
dead_range = np.cumsum(dead[start_ind_sol:])
data_tested_at_start = np.cumsum(self.data_new_tested)[start_ind_data]
data_dead_at_start = np.cumsum(self.data_new_dead)[start_ind_data]
tested = [0] * start_ind_sol + [data_tested_at_start + tested_val for tested_val in tested_range]
dead = [0] * start_ind_sol + [data_dead_at_start + dead_val for dead_val in dead_range]
for val_ind, val in enumerate(self.t_vals):
if val_ind not in map_t_val_ind_to_tested_distro:
map_t_val_ind_to_tested_distro[val_ind] = list()
if np.isfinite(tested[val_ind]):
map_t_val_ind_to_tested_distro[val_ind].append(tested[val_ind])
else:
map_t_val_ind_to_tested_distro[val_ind].append(0)
if val_ind not in map_t_val_ind_to_deceased_distro:
map_t_val_ind_to_deceased_distro[val_ind] = list()
if np.isfinite(dead[val_ind]):
map_t_val_ind_to_deceased_distro[val_ind].append(dead[val_ind])
else:
map_t_val_ind_to_deceased_distro[val_ind].append(0)
p5_curve = [np.percentile(map_t_val_ind_to_deceased_distro[val_ind], 5) for val_ind in range(len(self.t_vals))]
p25_curve = [np.percentile(map_t_val_ind_to_deceased_distro[val_ind], 25) for val_ind in
range(len(self.t_vals))]
p50_curve = [np.percentile(map_t_val_ind_to_deceased_distro[val_ind], 50) for val_ind in
range(len(self.t_vals))]
p75_curve = [np.percentile(map_t_val_ind_to_deceased_distro[val_ind], 75) for val_ind in
range(len(self.t_vals))]
p95_curve = [np.percentile(map_t_val_ind_to_deceased_distro[val_ind], 95) for val_ind in
range(len(self.t_vals))]
min_slice = None
if opt_predict:
use_min_sol_date = self.max_date
else:
use_min_sol_date = self.min_sol_date
if use_min_sol_date is not None:
for i in range(len(sol_plot_date_range)):
if sol_plot_date_range[i] >= use_min_sol_date:
min_slice = i
break
ax.fill_between(sol_plot_date_range[slice(min_slice, None)],
p5_curve[min_plot_pt:max_plot_pt][slice(min_slice, None)],
p95_curve[min_plot_pt:max_plot_pt][slice(min_slice, None)],
facecolor=matplotlib.colors.colorConverter.to_rgba('red', alpha=0.3),
edgecolor=(0, 0, 0, 0) # get rid of the darker edge
)
ax.fill_between(sol_plot_date_range[slice(min_slice, None)],
p25_curve[min_plot_pt:max_plot_pt][slice(min_slice, None)],
p75_curve[min_plot_pt:max_plot_pt][slice(min_slice, None)],
facecolor=matplotlib.colors.colorConverter.to_rgba('red', alpha=0.6),
edgecolor=(0, 0, 0, 0) # r=get rid of the darker edge
)
ax.plot(sol_plot_date_range[slice(min_slice, None)], p50_curve[min_plot_pt:max_plot_pt][slice(min_slice, None)],
color="darkred")
p5_curve = [np.percentile(map_t_val_ind_to_tested_distro[val_ind], 5) for val_ind in range(len(self.t_vals))]
p25_curve = [np.percentile(map_t_val_ind_to_tested_distro[val_ind], 25) for val_ind in
range(len(self.t_vals))]
p50_curve = [np.percentile(map_t_val_ind_to_tested_distro[val_ind], 50) for val_ind in
range(len(self.t_vals))]
p75_curve = [np.percentile(map_t_val_ind_to_tested_distro[val_ind], 75) for val_ind in
range(len(self.t_vals))]
p95_curve = [np.percentile(map_t_val_ind_to_tested_distro[val_ind], 95) for val_ind in
range(len(self.t_vals))]
ax.fill_between(sol_plot_date_range[slice(min_slice, None)],
p5_curve[min_plot_pt:max_plot_pt][slice(min_slice, None)],
p95_curve[min_plot_pt:max_plot_pt][slice(min_slice, None)],
facecolor=matplotlib.colors.colorConverter.to_rgba('green', alpha=0.3),
edgecolor=(0, 0, 0, 0) # get rid of the darker edge
)
ax.fill_between(sol_plot_date_range[slice(min_slice, None)],
p25_curve[min_plot_pt:max_plot_pt][slice(min_slice, None)],
p75_curve[min_plot_pt:max_plot_pt][slice(min_slice, None)],
facecolor=matplotlib.colors.colorConverter.to_rgba('green', alpha=0.6),
edgecolor=(0, 0, 0, 0) # get rid of the darker edge
)
ax.plot(sol_plot_date_range[slice(min_slice, None)], p50_curve[min_plot_pt:max_plot_pt][slice(min_slice, None)],
color="darkgreen")
ax.plot(data_plot_date_range, np.cumsum(self.data_new_tested), '.', color='darkgreen', label='Infections',
**data_plot_kwargs)
ax.plot(data_plot_date_range, np.cumsum(self.data_new_dead), '.', color='darkred', label='Deaths',
**data_plot_kwargs)
fig.autofmt_xdate()
# this removes the year from the x-axis ticks
ax.xaxis.set_major_formatter(mdates.DateFormatter('%m-%d'))
# ax.fmt_xdata = mdates.DateFormatter('%Y-%m-%d')
plt.yscale('log')
plt.ylabel('Cumulative Reported Counts')
plt.xlim((self.min_date + datetime.timedelta(days=self.day_of_threshold_met_case - 10), None))
plt.ylim((1, sum(self.data_new_tested) * 100))
plt.legend()
# plt.title(f'{state} Data (points) and Model Predictions (lines)')
plt.savefig(full_output_filename, dpi=self.plot_dpi)
plt.close()
def _plot_all_solutions_sub_distinct_lines_with_alpha(self,
sols_to_plot,
n_sols_to_plot=1000,
plot_filename_filename=None,
data_plot_kwargs=dict(),
offset=0):
'''
Helper function to plot_all_solutions
:param n_sols_to_plot: how many simulations should we sample for the plot?
:param plot_filename_filename: string to add to the plot filename
:return: None
'''
if len(sols_to_plot) == 0:
return
full_output_filename = path.join(self.plot_filename_base, plot_filename_filename)
if path.exists(full_output_filename) and not self.opt_force_plot or not self.opt_plot:
return
if n_sols_to_plot > len(sols_to_plot):
n_sols_to_plot = len(sols_to_plot)
sols_to_plot = [sols_to_plot[i] for i in np.random.choice(len(sols_to_plot), n_sols_to_plot, replace=False)]
print('Printing...', path.join(self.plot_filename_base, plot_filename_filename))
sol = sols_to_plot[0]
n_sols = len(sols_to_plot)
fig, ax = plt.subplots()
min_plot_pt = self.burn_in
max_plot_pt = min(len(sol[0]), len(self.series_data) + self.prediction_window + self.burn_in)
data_plot_date_range = [self.min_date + datetime.timedelta(days=1) * i for i in
range(len(self.data_new_tested))]
for sol in sols_to_plot:
new_tested = sol[1]
# cum_tested = np.cumsum(new_tested)
new_dead = sol[2]
# cum_dead = np.cumsum(new_dead)
sol_plot_date_range = [self.min_date - datetime.timedelta(days=self.burn_in) + datetime.timedelta(
days=1) * i for i in
range(len(sol[0]))][min_plot_pt:max_plot_pt]
# ax.plot(plot_date_range[min_plot_pt:], [(sol[i][0]) for i in range(min_plot_pt, len(sol[0))], 'b', alpha=0.1)
# ax.plot(plot_date_range[min_plot_pt:max_plot_pt], [(sol[i][1]) for i in range(min_plot_pt, max_plot_pt)], 'g', alpha=0.1)
min_slice = None
if self.min_sol_date is not None:
for i in range(len(sol_plot_date_range)):
if sol_plot_date_range[i] >= self.min_sol_date - datetime.timedelta(days=offset):
min_slice = i
break
ax.plot(sol_plot_date_range[slice(min_slice, None)],
new_tested[min_plot_pt:max_plot_pt][slice(min_slice, None)], 'g',
alpha=5 / n_sols)
ax.plot(sol_plot_date_range[slice(min_slice, None)],
new_dead[min_plot_pt:max_plot_pt][slice(min_slice, None)], 'r',
alpha=5 / n_sols)
ax.plot(data_plot_date_range, self.data_new_tested, '.', color='darkgreen', label='Infections',
**data_plot_kwargs)
ax.plot(data_plot_date_range, self.data_new_dead, '.', color='darkred', label='Deaths', **data_plot_kwargs)
fig.autofmt_xdate()
# this removes the year from the x-axis ticks
ax.xaxis.set_major_formatter(mdates.DateFormatter('%m-%d'))
# ax.fmt_xdata = mdates.DateFormatter('%Y-%m-%d')
plt.yscale('log')
plt.ylabel('Daily Reported Counts')
plt.xlim((self.min_date + datetime.timedelta(days=self.day_of_threshold_met_case - 10), None))
plt.ylim((0.1, max(self.data_new_tested) * 100))
plt.legend()
# plt.title(f'{state} Data (points) and Model Predictions (lines)')
plt.savefig(full_output_filename, dpi=self.plot_dpi)
plt.close()
def _plot_all_solutions_sub_distinct_lines_with_alpha_cumulative(self,
sols_to_plot,
n_sols_to_plot=1000,
plot_filename_filename=None,
opt_predict=True,
data_plot_kwargs=dict()):
'''
Helper function to plot_all_solutions
:param n_sols_to_plot: how many simulations should we sample for the plot?
:param plot_filename_filename: string to add to the plot filename
:return: None
'''
full_output_filename = path.join(self.plot_filename_base, plot_filename_filename)
if path.exists(full_output_filename) and not self.opt_force_plot or not self.opt_plot or len(sols_to_plot) == 0:
return
if n_sols_to_plot > len(sols_to_plot):
n_sols_to_plot = len(sols_to_plot)
sols_to_plot = [sols_to_plot[i] for i in np.random.choice(len(sols_to_plot), n_sols_to_plot, replace=False)]
print('Printing...', path.join(self.plot_filename_base, plot_filename_filename))
sol = sols_to_plot[0]
n_sols = len(sols_to_plot)
fig, ax = plt.subplots()
min_plot_pt = self.burn_in
max_plot_pt = min(len(sol[0]), len(self.series_data) + self.prediction_window + self.burn_in)
data_plot_date_range = [self.min_date + datetime.timedelta(days=1) * i for i in
range(len(self.data_new_tested))]
for sol in sols_to_plot:
if opt_predict:
start_ind_sol = len(self.data_new_tested) + self.burn_in
else:
start_ind_sol = len(self.data_new_tested) + self.burn_in - self.moving_window_size
start_ind_data = start_ind_sol - 1 - self.burn_in
tested = [max(sol[1][i] - self.log_offset, 0) for i in range(len(sol[1]))]
tested_range = np.cumsum(tested[start_ind_sol:])
dead = [max(sol[2][i] - self.log_offset, 0) for i in range(len(sol[2]))]
dead_range = np.cumsum(dead[start_ind_sol:])
data_tested_at_start = np.cumsum(self.data_new_tested)[start_ind_data]
data_dead_at_start = np.cumsum(self.data_new_dead)[start_ind_data]
tested = [0] * start_ind_sol + [data_tested_at_start + tested_val for tested_val in tested_range]
dead = [0] * start_ind_sol + [data_dead_at_start + dead_val for dead_val in dead_range]
sol_plot_date_range = [self.min_date - datetime.timedelta(days=self.burn_in) + datetime.timedelta(
days=1) * i for i in
range(len(sol[0]))][min_plot_pt:max_plot_pt]
# ax.plot(plot_date_range[min_plot_pt:], [(sol[i][0]) for i in range(min_plot_pt, len(sol[0))], 'b', alpha=0.1)
# ax.plot(plot_date_range[min_plot_pt:max_plot_pt], [(sol[i][1]) for i in range(min_plot_pt, max_plot_pt)], 'g', alpha=0.1)
min_slice = None
if opt_predict:
use_min_sol_date = self.max_date
else:
use_min_sol_date = self.min_sol_date
if use_min_sol_date is not None:
for i in range(len(sol_plot_date_range)):
if sol_plot_date_range[i] >= use_min_sol_date:
min_slice = i
break
ax.plot(sol_plot_date_range[slice(min_slice, None)],
tested[min_plot_pt:max_plot_pt][slice(min_slice, None)], 'g',
alpha=5 / n_sols)
ax.plot(sol_plot_date_range[slice(min_slice, None)],
dead[min_plot_pt:max_plot_pt][slice(min_slice, None)], 'r',
alpha=5 / n_sols)
ax.plot(data_plot_date_range, np.cumsum(self.data_new_tested), '.', color='darkgreen', label='Infections',
**data_plot_kwargs)
ax.plot(data_plot_date_range, np.cumsum(self.data_new_dead), '.', color='darkred', label='Deaths',
**data_plot_kwargs)
fig.autofmt_xdate()
# this removes the year from the x-axis ticks
ax.xaxis.set_major_formatter(mdates.DateFormatter('%m-%d'))
# ax.fmt_xdata = mdates.DateFormatter('%Y-%m-%d')
plt.yscale('log')
plt.ylabel('Cumulative Reported Counts')
plt.xlim((self.min_date + datetime.timedelta(days=self.day_of_threshold_met_case - 10), None))
plt.ylim((1, sum(self.data_new_tested) * 100))
plt.legend()
# plt.title(f'{state} Data (points) and Model Predictions (lines)')
plt.savefig(full_output_filename, dpi=self.plot_dpi)
plt.close()
@staticmethod
def norm_2d(xv, yv, mu=(0, 0), sigma=(1, 1)):
arg = -((xv - mu[0]) ** 2 / sigma[0] + (yv - mu[1]) ** 2 / sigma[1])
vals = np.exp(arg)
return vals
def Emcee(self):
# from https://emcee.readthedocs.io/en/stable/tutorials/line/
n_walkers = 50
n_steps = 5000
scale_param = 1000
filename_str = f'Emcee_{n_walkers}_walkers_{n_steps}_steps_{scale_param}_scale_param'
success = False
try:
print(f'loading from {self.likelihood_samples_filename_format_str.format(filename_str)}...')
tmp_dict = joblib.load(self.likelihood_samples_filename_format_str.format(filename_str))
model = tmp_dict['model']
means_as_list = tmp_dict['means_as_list']
cov = tmp_dict['cov']
success = True
print('...done!')
except:
print('...load failed!... doing calculations...')
if not success:
def emcee_ll(*args):
return self.get_log_likelihood(args[0])
# starting point is a ball around MLE
starting_point = np.array([self.convert_params_as_dict_to_list(self.all_data_params)] * n_walkers)
n_params = len(self.all_data_params)
print('starting with:')
self.pretty_print_params(self.all_data_params)
print('starting_point.shape', starting_point.shape)
for param_ind, param_name in enumerate(self.sorted_names):
if param_name in self.logarithmic_params:
std_err = self.all_data_params[param_name] / scale_param
else:
std_err = 1 / scale_param
starting_point[:, param_ind] = sp.stats.norm(loc=self.all_data_params[param_name], scale=std_err).rvs(
n_walkers)
C = starting_point - np.mean(starting_point, axis=0)[None, :]
# print(C.T)
print(f'Condition number: {np.linalg.cond(C.astype(float))}')
sampler = emcee.EnsembleSampler(n_walkers, n_params, emcee_ll)
print(self.convert_params_as_dict_to_list(self.all_data_params))
sampler.run_mcmc(starting_point, n_steps, progress=True)
print('Autocorrelation:')
try:
print(sampler.get_autocorr_time())
except:
print("...Whoops on the autocorrelation time!")
samples = sampler.get_chain(flat=True)
burn_in = int(len(samples) * 0.5)
samples = samples[burn_in:]
means_as_list = np.average(samples, axis=0)
std_errs_as_list = np.std(samples, axis=0)
cov = np.diag(std_errs_as_list)
model = sp.stats.multivariate_normal(
mean=means_as_list,
cov=cov, allow_singular=True)
tmp_dict = dict()
tmp_dict['model'] = model
tmp_dict['means_as_list'] = means_as_list
tmp_dict['cov'] = cov
print(f'saving bootstraps to {self.likelihood_samples_filename_format_str.format(filename_str)}...')
joblib.dump(tmp_dict, self.likelihood_samples_filename_format_str.format(filename_str))
print('...done!')
self.map_approx_type_to_model[ApproxType.Emcee] = model
self.map_approx_type_to_means[ApproxType.Emcee] = means_as_list
self.map_approx_type_to_cov[ApproxType.Emcee] = cov
def render_all_data_fit(self,
passed_params=None,
method='curve_fit', # orig, curve_fit
):
'''
Compute the all-data MLE/MAP
:param params: dictionary of parameters to replace the usual fit, if necessary
:return: None
'''
success = False
try:
all_data_dict = joblib.load(self.all_data_fit_filename)
all_data_params = all_data_dict['all_data_params']
all_data_sol = all_data_dict['all_data_sol']
all_data_cov = all_data_dict['all_data_cov']
all_data_params_for_sigma = all_data_dict['all_data_params_for_sigma']
all_data_cov_for_sigma = all_data_dict['all_data_cov_for_sigma']
success = True
self.loaded_all_data_fit = True
except:
self.loaded_all_data_fit = False
if passed_params is not None:
all_data_params = passed_params
all_data_sol = self.run_simulation(passed_params)
all_data_cov = self.get_covariance_matrix(passed_params)
if (not success and self.opt_calc and passed_params is None) or self.opt_force_calc:
print('\n----\nRendering all-data model fits... \n----')
# This is kind of a kludge, I find more reliable fits with fit_curve_exactly_with_jitter
# But it doesn't fit the observation error, which I need for likelihood samples
# So I use it to fit everything BUT observation error, then insert the test_params entries for the sigmas,
# and re-fit using the jankier (via_likelihood) method that fits the observation error
# TODO: make the sigma substitutions empirical, rather than hacky the way I've done it
print('Employing Scipy\'s curve_fit method...')
if passed_params is None:
print('Starting with test parameters:')
self.pretty_print_params(self.test_params, opt_log_likelihood=True)
test_params_as_list = [self.test_params[key] for key in self.sorted_names]
else:
print('Starting with passed parameters:')
self.pretty_print_params(passed_params, opt_log_likelihood=True)
test_params_as_list = [self.convert_params_as_list_to_dict(passed_params)[key] for key in
self.sorted_names]
try:
all_data_params, all_data_cov = self.fit_curve_via_curve_fit(test_params_as_list)
except:
all_data_params, all_data_cov = self.fit_curve_via_likelihood(test_params_as_list,
print_success=True,
opt_cov=True)
print('refitting all-data params to get sigma values')
all_data_params_for_sigma, all_data_cov_for_sigma = self.fit_curve_via_likelihood(all_data_params,
print_success=True,
opt_cov=True)
print('\nOrig params:')
self.pretty_print_params(all_data_params, opt_log_likelihood=True)
print('\nRe-fit params for sigmas:')
self.pretty_print_params(all_data_params_for_sigma, opt_log_likelihood=True)
for ind, name in enumerate(self.sorted_names):
if 'sigma' in name:
all_data_cov[ind, :] = all_data_cov_for_sigma[ind, :]
all_data_cov[:, ind] = all_data_cov_for_sigma[:, ind]
all_data_cov = self.make_PSD(all_data_cov)
print('\nOrig std-errs:')
self.pretty_print_params(np.sqrt(np.diagonal(all_data_cov)))
self.plot_correlation_matrix(self.cov2corr(all_data_cov), filename_str='curve_fit')
print('\nRe-fit std-errs for sigmas:')
self.pretty_print_params(np.sqrt(np.diagonal(all_data_cov_for_sigma)))
self.plot_correlation_matrix(self.cov2corr(all_data_cov_for_sigma), filename_str='numdifftools')
for key in all_data_params:
if 'sigma' in key:
print(f'Stealing value for {key}: {all_data_params_for_sigma[key]}')
all_data_params[key] = all_data_params_for_sigma[key]
print('\nscipy.curve_fit params:')
self.pretty_print_params(all_data_params, opt_log_likelihood=True)
if ApproxType.SP_min in self.model_approx_types:
self.map_approx_type_to_means[ApproxType.SP_min] = self.convert_params_as_dict_to_list(
all_data_params_for_sigma)
self.map_approx_type_to_cov[ApproxType.SP_min] = all_data_cov_for_sigma
self.map_approx_type_to_model[ApproxType.SP_min] = sp.stats.multivariate_normal(
mean=self.convert_params_as_dict_to_list(all_data_params_for_sigma),
cov=all_data_cov_for_sigma, allow_singular=True)
print('\nscipy.minimize params:')
self.pretty_print_params(all_data_params_for_sigma, opt_log_likelihood=True)
if ApproxType.SP_LS in self.model_approx_types:
all_data_params_leastsq, cov_leastsq = self.fit_curve_via_least_squares(
self.convert_params_as_dict_to_list(self.test_params))
self.map_approx_type_to_means[ApproxType.SP_LS] = self.convert_params_as_dict_to_list(
all_data_params_leastsq)
self.map_approx_type_to_cov[ApproxType.SP_LS] = cov_leastsq
self.map_approx_type_to_model[ApproxType.SP_LS] = sp.stats.multivariate_normal(
mean=self.convert_params_as_dict_to_list(all_data_params_leastsq),
cov=cov_leastsq, allow_singular=True)
print('\nscipy.least_squares params:')
self.pretty_print_params(all_data_params_leastsq)
all_data_sol = self.run_simulation(all_data_params)
print('\nParameters when trained on all data (this is our starting point for optimization):')
self.pretty_print_params(all_data_params, opt_log_likelihood=True)
# methods = [
# # 'Nelder-Mead', # ok results, but claims it failed
# # 'Powell', #warnings, bad answer
# # 'CG', #warnings, never stopped
# 'BFGS', # love this one, and it gives you hess_inv!
# # 'Newton-CG', #can't do bounds, failed
# # 'L-BFGS-B', #failed, bad results
# # 'TNC', #bad results
# # 'COBYLA', #failed, bad results
# # 'SLSQP', # failed, bad results
# # 'trust-constr', #warnings, never stopped
# # 'dogleg', #bad results, can't do bounds
# # 'trust-ncg', #failed, can't do bounds
# # 'trust-exact', #failed
# # 'trust-krylov' # failed
# ]
#
# for method in methods:
# print('\ntrying method', method)
# try:
# test_params_as_list = [self.test_params[key] for key in self.sorted_names]
# all_data_params2, cov = self.fit_curve_via_likelihood(test_params_as_list,
# method=method, print_success=True)
# # all_data_params2, _ = self.fit_curve_via_least_squares(test_params_as_list,
# # method=method, print_success=True)
# self.pretty_print_params(all_data_params2, opt_log_likelihood=True)
# except:
# print(f'method {method} failed!')
# Add deterministic parameters to all-data solution
for extra_param, extra_param_func in self.extra_params.items():
all_data_params[extra_param] = extra_param_func(
[all_data_params[name] for name in self.sorted_names])
print(f'saving bootstraps to {self.all_data_fit_filename}...')
joblib.dump({'all_data_sol': all_data_sol, 'all_data_params': all_data_params, 'all_data_cov': all_data_cov,
'all_data_params_for_sigma': all_data_params_for_sigma,
'all_data_cov_for_sigma': all_data_cov_for_sigma},
self.all_data_fit_filename)
print('...done!')
all_data_ll = self.get_log_likelihood(all_data_params)
all_data_for_sigma_ll = self.get_log_likelihood(all_data_params_for_sigma)
if all_data_for_sigma_ll > all_data_ll:
self.discovered_MLE_params.append(all_data_params_for_sigma)
self.all_data_params = all_data_params_for_sigma
self.all_data_sol = all_data_sol
self.all_data_cov = all_data_cov_for_sigma
else:
self.all_data_params = all_data_params
self.all_data_sol = all_data_sol
self.all_data_cov = all_data_cov
self.map_approx_type_to_means[ApproxType.SP_CF] = self.convert_params_as_dict_to_list(all_data_params)
self.map_approx_type_to_cov[ApproxType.SP_CF] = all_data_cov
self.map_approx_type_to_model[ApproxType.SP_CF] = sp.stats.multivariate_normal(
mean=self.convert_params_as_dict_to_list(all_data_params),
cov=all_data_cov, allow_singular=True)
self.map_approx_type_to_means[ApproxType.SP_min] = self.convert_params_as_dict_to_list(
all_data_params_for_sigma)
self.map_approx_type_to_cov[ApproxType.SP_min] = all_data_cov
self.map_approx_type_to_model[ApproxType.SP_min] = sp.stats.multivariate_normal(
mean=self.convert_params_as_dict_to_list(all_data_params_for_sigma),
cov=all_data_cov_for_sigma, allow_singular=True)
def render_additional_covariance_approximations(self, all_data_params):
# Get other covariance approximations here
if ApproxType.NDT_Hess in self.model_approx_types:
approx_type = ApproxType.NDT_Hess
try:
print(f'Trying {approx_type}...')
means = self.convert_params_as_dict_to_list(all_data_params)
hess = numdifftools.Hessian(self.get_log_likelihood)(means)
good_inds = list()
bad_inds = list(range(hess.shape[0]))
for i in range(hess.shape[0]):
if np.isfinite(hess[i, i]):
good_inds.append(i)
bad_inds.remove(i)
hess = hess[good_inds, :]
hess = hess[:, good_inds]
print('hess:', hess.shape, hess)
cov = np.linalg.inv(-hess)
cov = self.make_PSD(cov)
cov = self.recover_sigma_entries_from_matrix(cov, sigma_inds=bad_inds)
print('cov:', cov.shape, cov)
self.plot_correlation_matrix(self.cov2corr(cov), filename_str=approx_type.value[1])
self.map_approx_type_to_means[approx_type] = means
self.map_approx_type_to_cov[approx_type] = cov
self.map_approx_type_to_model[approx_type] = sp.stats.multivariate_normal(mean=means,
cov=cov, allow_singular=True)
except Exception as ee: # except Exception as ee:
print('Error with', approx_type)
print(' error:', ee)
def jacobian_func(all_data_params):
all_data_params_as_dict = self.convert_params_as_list_to_dict(all_data_params)
positive_dists, deceased_dists, other_errs, sol, positive_vals, deceased_vals, \
predicted_tested, actual_tested, predicted_dead, actual_dead = self._get_log_likelihood_precursor(
all_data_params)
scaled_positive_dists = [x / all_data_params_as_dict['sigma_positive'] for x in positive_dists]
scaled_deceased_dists = [x / all_data_params_as_dict['sigma_deceased'] for x in deceased_dists]
return np.array(scaled_positive_dists + scaled_deceased_dists + other_errs)
if ApproxType.NDT_Jac in self.model_approx_types:
approx_type = ApproxType.NDT_Jac
jac = numdifftools.Jacobian(jacobian_func)(means)
print('jac:', jac.shape, jac)
hess = jac.T @ jac
# hess = self.remove_sigma_entries_from_matrix(hess)
print('hes:', hess.shape, hess)
cov = np.linalg.inv(hess)
# cov = self.recover_sigma_entries_from_matrix(cov)
print('cov:', cov.shape, cov)
self.plot_correlation_matrix(self.cov2corr(cov), filename_str=approx_type.value[1])
self.map_approx_type_to_means[approx_type] = means
self.map_approx_type_to_cov[approx_type] = cov
self.map_approx_type_to_model[approx_type] = sp.stats.multivariate_normal(mean=means,
cov=cov, allow_singular=True)
def render_bootstraps(self):
'''
Compute the bootstrap solutions
:return: None
'''
success = False
try:
bootstrap_dict = joblib.load(self.bootstrap_filename)
bootstrap_sols = bootstrap_dict['bootstrap_sols']
bootstrap_params = bootstrap_dict['bootstrap_params']
success = True
self.loaded_bootstraps = True
except:
self.loaded_bootstraps = False
# TODO: Break out all-data fit to its own method, not embedded in render_bootstraps
if (not success and self.opt_calc) or self.opt_force_calc:
bootstrap_sols = list()
bootstrap_params = list()
print('\n----\nRendering bootstrap model fits... now going through bootstraps...\n----')
for bootstrap_ind in tqdm(range(self.n_bootstraps)):
# get bootstrap indices by concatenating cases and deaths
bootstrap_tuples = [('cases', x) for x in self.cases_indices] + [('deaths', x) for x in
self.deaths_indices]
bootstrap_indices_tuples = np.random.choice(list(range(len(bootstrap_tuples))), len(bootstrap_tuples),
replace=True)
cases_bootstrap_indices = [bootstrap_tuples[i][1] for i in bootstrap_indices_tuples if
bootstrap_tuples[i][0] == 'cases']
deaths_bootstrap_indices = [bootstrap_tuples[i][1] for i in bootstrap_indices_tuples if
bootstrap_tuples[i][0] == 'deaths']
# here is where we select the all-data parameters as our starting point
tmp_params = self.all_data_params
starting_point_as_list = [tmp_params[key] for key in self.sorted_names]
params_as_dict, cov = self.fit_curve_via_likelihood(starting_point_as_list,
# data_tested=tested_jitter,
# data_dead=dead_jitter,
tested_indices=cases_bootstrap_indices,
deaths_indices=deaths_bootstrap_indices
)
sol = self.run_simulation(params_as_dict)
bootstrap_sols.append(sol.copy())
bootstrap_params.append(params_as_dict.copy())
print(f'\nResults for bootstrap #{bootstrap_ind}')
self.pretty_print_params(bootstrap_params[-1])
print(f'saving bootstraps to {self.bootstrap_filename}...')
joblib.dump({'bootstrap_sols': bootstrap_sols, 'bootstrap_params': bootstrap_params},
self.bootstrap_filename)
print('...done!')
print('\nParameters when trained on all data (this is our starting point for optimization):')
[print(f'{key}: {val:.4g}') for key, val in self.all_data_params.items()]
# Add deterministic parameters to bootstraps
for params in bootstrap_params:
for extra_param, extra_param_func in self.extra_params.items():
params[extra_param] = extra_param_func([params[name] for name in self.sorted_names])
self.bootstrap_sols = bootstrap_sols
self.bootstrap_params = bootstrap_params
# add the means of the results to our consideration
mean_of_bootstrap_params = list()
for param_name in bootstrap_params[0]:
mean_of_bootstrap_params.append(
np.mean([self.bootstrap_params[i][param_name] for i in range(len(self.bootstrap_params))]))
proposed_params_list = self.bootstrap_params + [mean_of_bootstrap_params]
bootstrap_log_probs = list()
for proposed_params in proposed_params_list:
bootstrap_log_probs.append(self.get_log_likelihood(proposed_params))
param_ind = np.argmax(bootstrap_log_probs)
max_bootstrap_ll = self.get_log_likelihood(proposed_params_list[param_ind])
all_data_params_ll = self.get_log_likelihood(self.all_data_params)
if max_bootstrap_ll > all_data_params_ll + 1e-3:
# re-run all-data fit using new params
print('-----\n Will re-do all-data fit from render_bootstrap_fits with new MLE at end of run...\n-----')
print(f' log likelihood at all_data_params: {all_data_params_ll:.4g}')
print(f' log likelihood at MLE: {max_bootstrap_ll:.4g}')
print(f' bootstrap index with the MLE: {param_ind}')
if param_ind == len(self.bootstrap_params):
print(f' (this one comes from the mean)')
self.discovered_MLE_params.append(proposed_params_list[param_ind])
bootstrap_weights = [1] * len(self.bootstrap_params)
for bootstrap_ind in range(len(self.bootstrap_params)):
for param_name, (lower, upper) in self.priors.items():
if param_name in self.static_params:
continue
if lower is not None and self.bootstrap_params[bootstrap_ind][param_name] < lower:
bootstrap_weights[bootstrap_ind] = 0
if upper is not None and self.bootstrap_params[bootstrap_ind][param_name] > upper:
bootstrap_weights[bootstrap_ind] = 0
self.bootstrap_weights = bootstrap_weights
def render_likelihood_samples(self,
n_samples=None
):
'''
Obtain likelihood samples
:param n_samples: how many samples to obtain
:param bounds_to_use_str: string for which bounds to use
:return: None, saves results to object attributes
'''
if n_samples is None:
n_samples = self.n_samples
success = False
try:
print(
f'\n----\nLoading likelihood samples from {self.likelihood_samples_filename_format_str.format(bounds_to_use_str)}...\n----')
samples_dict = joblib.load(self.likelihood_samples_filename_format_str.format(bounds_to_use_str))
print('...done!')
all_samples_as_list = samples_dict['all_samples_as_list']
all_log_probs_as_list = samples_dict['all_log_probs_as_list']
all_propensities_as_list = samples_dict['all_propensities_as_list']
success = True
self.loaded_likelihood_samples.append(bounds_to_use_str)
except:
pass
if (not success and self.opt_calc) or self.opt_force_calc:
bounds_to_use = self.curve_fit_bounds
if n_samples is None:
n_samples = self.n_likelihood_samples
all_samples = list()
all_log_probs = list()
print('\n----\nRendering likelihood samples...\n----')
for _ in tqdm(range(n_samples)):
indiv_sample_dict = dict()
for param_name in self.sorted_names:
indiv_sample_dict[param_name] = \
np.random.uniform(bounds_to_use[param_name][0], bounds_to_use[param_name][1], 1)[0]
all_samples.append(indiv_sample_dict)
all_log_probs.append(
self.get_log_likelihood(
indiv_sample_dict)) # this is the part that takes a while? Still surprised this takes so long
all_samples_as_list = list()
all_log_probs_as_list = list()
for i in tqdm(range(n_samples)):
if not np.isfinite(all_log_probs[i]):
continue
else:
sample_as_list = np.array([float(all_samples[i][name]) for name in self.map_name_to_sorted_ind])
all_samples_as_list.append(sample_as_list)
all_log_probs_as_list.append(all_log_probs[i])
all_propensities_as_list = [len(all_samples_as_list)] * len(all_samples_as_list)
print(f'saving samples to {self.likelihood_samples_filename_format_str.format("medium")}...')
joblib.dump({'all_samples_as_list': all_samples_as_list,
'all_log_probs_as_list': all_log_probs_as_list,
'all_propensities_as_list': all_propensities_as_list
},
self.likelihood_samples_filename_format_str.format("medium"))
print('...done!')
self.random_likelihood_samples = all_samples_as_list
self.random_likelihood_vals = all_log_probs_as_list
self.random_likelihood_propensities = all_propensities_as_list
self._add_samples(all_samples_as_list, all_log_probs_as_list, all_propensities_as_list)
def _add_samples(self, samples, vals, propensities, key=False):
print('adding samples...')
print('checking sizes...')
if key == 'likelihood_samples':
print(f'samples: {len(samples)}, vals: {len(vals)}, propensities: {len(propensities)}')
self.all_samples_as_list += samples
self.all_log_probs_as_list += vals
self.all_propensities_as_list += propensities
shuffled_ind = list(range(len(self.all_samples_as_list)))
np.random.shuffle(shuffled_ind)
self.all_samples_as_list = [self.all_samples_as_list[i] for i in shuffled_ind]
self.all_log_probs_as_list = [self.all_log_probs_as_list[i] for i in shuffled_ind]
self.all_propensities_as_list = [self.all_propensities_as_list[i] for i in shuffled_ind]
print('...done!')
elif key == 'random_walk':
print(f'samples: {len(samples)}, vals: {len(vals)}, propensities: {len(propensities)}')
self.all_random_walk_samples_as_list += samples
self.all_random_walk_log_probs_as_list += vals
shuffled_ind = list(range(len(self.all_random_walk_samples_as_list)))
np.random.shuffle(shuffled_ind)
self.all_random_walk_samples_as_list = [self.all_random_walk_samples_as_list[i] for i in shuffled_ind]
self.all_random_walk_log_probs_as_list = [self.all_random_walk_log_probs_as_list[i] for i in shuffled_ind]
print('...done!')
elif key == 'PyMC3':
print(f'samples: {len(samples)}, vals: {len(vals)}, propensities: {len(propensities)}')
self.all_PyMC3_samples_as_list += samples
self.all_PyMC3_log_probs_as_list += vals
shuffled_ind = list(range(len(self.all_PYMC3_samples_as_list)))
|
np.random.shuffle(shuffled_ind)
|
numpy.random.shuffle
|
import logging
import numpy as np
import tensorflow as tf
class Trainer(object):
"""Neural network trainer with custom train()
function. The trainer minimize self.loss using
self.optimizer.
"""
def _random_sample_feed_dictionary(self,
placeholders_dict,
input_values_dict,
batch_size):
"""Build self.feed_dict (a feed dictionary) for neural
network training purpose. The keys are tf.placeholder
and the values are np.array.
Args:
placeholders_dict(str -> tf.placeholder): a dictionary
of placeholders with keys being placeholder names
and values being the corresponding placeholders.
input_values_dict(str -> tf.placeholder): a dictionary
of input values with keys being input value names
and values being the corresponding input values.
"""
data_length = list(input_values_dict.values())[0].shape[0]
rand_ind = np.random.choice(data_length,
batch_size,
replace=True)
feed_dict = {}
for key in input_values_dict:
assert key in placeholders_dict
feed_dict[placeholders_dict[key]] = input_values_dict[key][rand_ind]
return feed_dict
def train(self,
optimizer,
loss,
placeholders_dict,
input_values_dict,
batch_size,
epochs,
tensorflow_session,
verbose=True,
logging_per=None):
"""Train the model given placeholders, input values,
and other parameters.
Args:
optimizer(tf.train.Optimizer): an optimizer that minimize
the loss when training.
loss(tf.tensor): the compiled loss of the model that it can
be optimized by optimizer.
placeholders_dict(str -> tf.placeholder): a dictionary
of placeholders with keys being placeholder names
and values being the corresponding placeholders.
input_values_dict(str -> tf.placeholder): a dictionary
of input values with keys being input value names
and values being the corresponding input values.
batch_size(int): the training batch size.
epochs(int): the training epochs.
tensorflow_session(tf.session): tensorflow session under
which the variables lives.
verbose(boolean): whether to log training loss information
or not.
logging_per(int): log training per unit of epoch. Default
value is max(epochs / 10, 1).
"""
if logging_per is None:
logging_per = np.max([epochs / 10, 1])
# Initialize all tensorflow variables
tensorflow_session.run(tf.global_variables_initializer())
total_loss = 0
for e in range(epochs):
feed_dict = self._random_sample_feed_dictionary(
placeholders_dict,
input_values_dict,
batch_size)
_, loss_value = tensorflow_session.run(
[optimizer, loss],
feed_dict=feed_dict)
total_loss +=
|
np.array(loss_value)
|
numpy.array
|
"""Unit tests for code in model.py.
Copyright by <NAME>
Released under the MIT license - see LICENSE file for details
"""
from unittest import TestCase
import numpy as np
import pandas as pd
from sklearn.exceptions import NotFittedError
from sklearn.utils.estimator_checks import check_estimator
from statsmodels.distributions.empirical_distribution import ECDF
from proset import ClassifierModel
from proset.model import LOG_OFFSET
from proset.objective import ClassifierObjective
from proset.set_manager import ClassifierSetManager
from test.test_objective import FEATURES, TARGET, COUNTS, WEIGHTS # pylint: disable=wrong-import-order
from test.test_set_manager import BATCH_INFO # pylint: disable=wrong-import-order
MARGINALS = COUNTS / np.sum(COUNTS)
LOG_PROBABILITY = np.log(MARGINALS[TARGET] + LOG_OFFSET)
# pylint: disable=missing-function-docstring, protected-access, too-many-public-methods
class TestClassifierModel(TestCase):
"""Unit tests for class ClassifierModel.
The tests also cover abstract superclass Model.
"""
@staticmethod
def test_estimator():
check_estimator(ClassifierModel())
# no test for __init__() which only assigns public properties
def test_fit_1(self):
model = ClassifierModel(n_iter=1)
model.fit(X=FEATURES, y=TARGET)
self.assertEqual(model.set_manager_.num_batches, 1)
model.fit(X=FEATURES, y=TARGET, warm_start=False)
self.assertEqual(model.set_manager_.num_batches, 1)
def test_fit_2(self):
model = ClassifierModel(n_iter=1)
model.fit(X=FEATURES, y=TARGET)
self.assertEqual(model.set_manager_.num_batches, 1)
model.fit(X=FEATURES, y=TARGET, warm_start=True)
self.assertEqual(model.set_manager_.num_batches, 2)
# more extensive tests of predict() are performed by the sklearn test suite called in test_estimator()
def test_check_hyperparameters_fail_1(self):
message = ""
try:
ClassifierModel._check_hyperparameters(ClassifierModel(n_iter=1.0))
except TypeError as ex:
message = ex.args[0]
self.assertEqual(message, "Parameter n_iter must be integer.")
def test_check_hyperparameters_fail_2(self):
message = ""
try:
ClassifierModel._check_hyperparameters(ClassifierModel(n_iter=-1))
except ValueError as ex:
message = ex.args[0]
self.assertEqual(message, "Parameter n_iter must not be negative.")
@staticmethod
def test_check_hyperparameters_1():
ClassifierModel._check_hyperparameters(ClassifierModel(n_iter=0))
def test_validate_arrays_fail_1(self):
model = ClassifierModel()
model.n_features_in_ = FEATURES.shape[1] + 1
message = ""
try:
model._validate_arrays(X=FEATURES, y=TARGET, sample_weight=None, reset=False)
except ValueError as ex:
message = ex.args[0]
self.assertEqual(message, "Parameter X must have 5 columns.")
def test_validate_arrays_fail_2(self):
message = ""
try:
ClassifierModel()._validate_arrays(
X=FEATURES,
y=TARGET,
sample_weight=np.ones(FEATURES.shape[0] + 1),
reset=False
)
except ValueError as ex:
message = ex.args[0]
self.assertEqual(message, "Parameter sample_weight must have one element per row of X if not None.")
def test_validate_arrays_1(self):
model = ClassifierModel()
new_x, new_y, new_weight = model._validate_arrays(X=FEATURES, y=TARGET, sample_weight=None, reset=False)
np.testing.assert_array_equal(new_x, FEATURES)
np.testing.assert_array_equal(new_y, TARGET)
self.assertEqual(new_weight, None)
self.assertTrue(hasattr(model, "label_encoder_"))
np.testing.assert_array_equal(model.classes_, np.unique(TARGET))
def test_validate_arrays_2(self):
model = ClassifierModel()
model.n_features_in_ = FEATURES.shape[1] + 1
string_target = np.array([str(value) for value in TARGET])
new_x, new_y, new_weight = model._validate_arrays(
X=FEATURES,
y=string_target,
sample_weight=WEIGHTS,
reset=True
)
np.testing.assert_array_equal(new_x, FEATURES)
np.testing.assert_array_equal(new_y, TARGET) # converted to integer
np.testing.assert_array_equal(new_weight, WEIGHTS)
self.assertEqual(model.n_features_in_, FEATURES.shape[1])
self.assertTrue(hasattr(model, "label_encoder_"))
np.testing.assert_array_equal(model.classes_, np.unique(string_target))
# function _validate_y() already tested by the above
def test_get_compute_classes_1(self):
# noinspection PyPep8Naming
SetManager, Objective = ClassifierModel._get_compute_classes()
self.assertTrue(SetManager is ClassifierSetManager)
self.assertTrue(Objective is ClassifierObjective)
def test_parse_solver_status_1(self):
result = ClassifierModel._parse_solver_status({"warnflag": 0})
self.assertEqual(result, "converged")
def test_parse_solver_status_2(self):
result = ClassifierModel._parse_solver_status({"warnflag": 1})
self.assertEqual(result, "reached limit on iterations or function calls")
def test_parse_solver_status_3(self):
result = ClassifierModel._parse_solver_status({"warnflag": 2, "task": "error"})
self.assertEqual(result, "not converged (error)")
def test_predict_fail_1(self):
message = ""
try:
ClassifierModel().predict(X=FEATURES)
except NotFittedError as ex:
message = ex.args[0]
self.assertEqual(message, " ".join([
"This ClassifierModel instance is not fitted yet.",
"Call 'fit' with appropriate arguments before using this estimator."
]))
@staticmethod
def test_predict_1():
model = ClassifierModel(n_iter=0) # constant model uses marginal distribution of target for predictions
model.fit(X=FEATURES, y=TARGET)
labels, familiarity = model.predict(X=FEATURES, compute_familiarity=True)
np.testing.assert_array_equal(labels, np.argmax(COUNTS) * np.ones(FEATURES.shape[0], dtype=int))
np.testing.assert_array_equal(familiarity, np.zeros(FEATURES.shape[0]))
def test_predict_2(self):
model = ClassifierModel(n_iter=0) # constant model uses marginal distribution of target for predictions
model.fit(X=FEATURES, y=TARGET)
labels = model.predict(X=FEATURES, n_iter=np.array([0]))
self.assertEqual(len(labels), 1)
np.testing.assert_array_equal(labels[0], np.argmax(COUNTS) * np.ones(FEATURES.shape[0], dtype=int))
def test_predict_3(self):
model = ClassifierModel(n_iter=0) # constant model uses marginal distribution of target for predictions
model.fit(X=FEATURES, y=TARGET)
labels, familiarity = model.predict(X=FEATURES, n_iter=np.array([0]), compute_familiarity=True)
self.assertEqual(len(labels), 1)
self.assertEqual(len(familiarity), 1)
np.testing.assert_array_equal(labels[0], np.argmax(COUNTS) * np.ones(FEATURES.shape[0], dtype=int))
np.testing.assert_array_equal(familiarity[0], np.zeros(FEATURES.shape[0]))
# more extensive tests of predict() are performed by the sklearn test suite called in test_estimator()
# function _compute_prediction() already covered by the above
def test_score_fail_1(self):
message = ""
try:
ClassifierModel().score(X=FEATURES, y=TARGET)
except NotFittedError as ex:
message = ex.args[0]
self.assertEqual(message, " ".join([
"This ClassifierModel instance is not fitted yet.",
"Call 'fit' with appropriate arguments before using this estimator."
]))
def test_score_1(self):
model = ClassifierModel(n_iter=0) # constant model uses marginal distribution of target for predictions
model.fit(X=FEATURES, y=TARGET)
score = model.score(X=FEATURES, y=TARGET)
# noinspection PyTypeChecker
self.assertAlmostEqual(score, np.mean(LOG_PROBABILITY))
def test_score_2(self):
model = ClassifierModel(n_iter=0) # constant model uses marginal distribution of target for predictions
model.fit(X=FEATURES, y=TARGET)
score = model.score(X=FEATURES, y=TARGET, sample_weight=WEIGHTS, n_iter=np.array([0]))
# noinspection PyTypeChecker
self.assertEqual(score.shape, (1, ))
self.assertAlmostEqual(score[0], np.inner(LOG_PROBABILITY, WEIGHTS) / np.sum(WEIGHTS))
# more extensive tests of score() are performed by the sklearn test suite called in test_estimator()
# function _compute_score() already covered by the above
def test_predict_proba_fail_1(self):
message = ""
try:
ClassifierModel().predict_proba(X=FEATURES)
except NotFittedError as ex:
message = ex.args[0]
self.assertEqual(message, " ".join([
"This ClassifierModel instance is not fitted yet.",
"Call 'fit' with appropriate arguments before using this estimator."
]))
@staticmethod
def test_predict_proba_1():
model = ClassifierModel(n_iter=0) # constant model uses marginal distribution of target for predictions
model.fit(X=FEATURES, y=TARGET)
probabilities, familiarity = model.predict_proba(X=FEATURES, compute_familiarity=True)
np.testing.assert_array_equal(probabilities, np.tile(MARGINALS, (FEATURES.shape[0], 1)))
np.testing.assert_array_equal(familiarity, np.zeros(FEATURES.shape[0]))
def test_predict_proba_2(self):
model = ClassifierModel(n_iter=0) # constant model uses marginal distribution of target for predictions
model.fit(X=FEATURES, y=TARGET)
probabilities = model.predict_proba(X=FEATURES, n_iter=np.array([0]))
self.assertEqual(len(probabilities), 1)
np.testing.assert_array_equal(probabilities[0], np.tile(MARGINALS, (FEATURES.shape[0], 1)))
def test_predict_proba_3(self):
model = ClassifierModel(n_iter=0) # constant model uses marginal distribution of target for predictions
model.fit(X=FEATURES, y=TARGET)
probabilities, familiarity = model.predict_proba(X=FEATURES, n_iter=np.array([0]), compute_familiarity=True)
self.assertEqual(len(probabilities), 1)
self.assertEqual(len(familiarity), 1)
np.testing.assert_array_equal(probabilities[0], np.tile(MARGINALS, (FEATURES.shape[0], 1)))
np.testing.assert_array_equal(familiarity[0], np.zeros(FEATURES.shape[0]))
# more extensive tests of predict_proba() are performed by the sklearn test suite called in test_estimator()
@staticmethod
def test_export_1():
model = ClassifierModel()
model.fit(X=FEATURES, y=TARGET)
ref_baseline = model._make_baseline_for_export()
batches = model.set_manager_.get_batches()
ref_prototypes = model._make_prototype_report(batches=batches, train_names=None, compute_impact=False)
feature_columns = model._check_report_input(
feature_names=None,
num_features=FEATURES.shape[1],
scale=None,
offset=None,
sample_name=None
)[0]
ref_features = model._make_feature_report(
batches=batches,
feature_columns=feature_columns,
include_original=False,
scale=np.ones(FEATURES.shape[1]),
offset=np.zeros(FEATURES.shape[1]),
active_features=model.set_manager_.get_active_features(),
include_similarities=False
)
ref_export = pd.concat([ref_prototypes, ref_features], axis=1)
ref_export.sort_values(["batch", "prototype weight"], ascending=[True, False], inplace=True)
ref_export = pd.concat([ref_baseline, ref_export], axis=0)
ref_export.reset_index(drop=True, inplace=True)
result = model.export()
pd.testing.assert_frame_equal(result, ref_export)
def test_check_report_input_fail_1(self):
message = ""
try:
# test only one exception raised by shared.check_feature_names() to ensure it is called; other exceptions
# tested by the unit tests for that function
ClassifierModel._check_report_input(
feature_names=None,
num_features=0.0,
scale=None,
offset=None,
sample_name=None
)
except TypeError as ex:
message = ex.args[0]
self.assertEqual(message, "Parameter num_features must be integer.")
def test_check_report_input_fail_2(self):
message = ""
try:
# test only one exception raised by shared.check_scale_offset() to ensure it is called; other exceptions
# tested by the unit tests for that function
ClassifierModel._check_report_input(
feature_names=None,
num_features=1,
scale=np.array([[1.0]]),
offset=None,
sample_name=None
)
except ValueError as ex:
message = ex.args[0]
self.assertEqual(message, "Parameter scale must be a 1D array.")
def test_check_report_input_1(self):
feature_columns, include_original, scale, offset, sample_name = ClassifierModel._check_report_input(
feature_names=None,
num_features=2,
scale=None,
offset=None,
sample_name=None
)
self.assertEqual(feature_columns, [
["X0 weight", "X0 value", "X0 original", "X0 similarity"],
["X1 weight", "X1 value", "X1 original", "X1 similarity"]
])
self.assertFalse(include_original) # no custom scale or offset provided
np.testing.assert_array_equal(scale, np.ones(2))
np.testing.assert_array_equal(offset, np.zeros(2))
self.assertEqual(sample_name, "new sample")
def test_check_report_input_2(self):
feature_columns, include_original, scale, offset, sample_name = ClassifierModel._check_report_input(
feature_names=["Y0", "Y1"],
num_features=2,
scale=np.array([0.5, 2.0]),
offset=np.array([-1.0, 1.0]),
sample_name="test sample"
)
self.assertEqual(feature_columns, [
["Y0 weight", "Y0 value", "Y0 original", "Y0 similarity"],
["Y1 weight", "Y1 value", "Y1 original", "Y1 similarity"]
])
self.assertTrue(include_original) # custom scale and offset provided
np.testing.assert_array_equal(scale, np.array([0.5, 2.0]))
np.testing.assert_array_equal(offset, np.array([-1.0, 1.0]))
self.assertEqual(sample_name, "test sample")
def test_make_prototype_report_1(self):
model = ClassifierModel()
result = model._make_prototype_report(batches=[], train_names=None, compute_impact=False)
self.assertEqual(result.shape, (0, 5))
self.assertEqual(list(result.columns), ["batch", "sample", "sample name", "target", "prototype weight"])
def test_make_prototype_report_2(self):
model = ClassifierModel()
result = model._make_prototype_report(batches=[None], train_names=None, compute_impact=True)
self.assertEqual(result.shape, (0, 7))
self.assertEqual(
list(result.columns),
["batch", "sample", "sample name", "target", "prototype weight", "similarity", "impact"]
)
def test_make_prototype_report_3(self):
model = ClassifierModel()
set_manager = ClassifierSetManager(target=TARGET)
set_manager.add_batch(BATCH_INFO)
set_manager.add_batch(BATCH_INFO)
batches = set_manager.get_batches()
ref_batch = model._format_batch(batches[0], batch_index=0, train_names=None)
result = model._make_prototype_report(batches=batches, train_names=None, compute_impact=False)
self.assertEqual(result.shape, (2 * ref_batch.shape[0], 5))
pd.testing.assert_frame_equal(result.iloc[:ref_batch.shape[0]], ref_batch)
ref_batch["batch"] = 2
result = result.iloc[ref_batch.shape[0]:].reset_index(drop=True)
pd.testing.assert_frame_equal(result, ref_batch)
@staticmethod
def test_make_prototype_report_4():
model = ClassifierModel()
set_manager = ClassifierSetManager(target=TARGET)
set_manager.add_batch(BATCH_INFO)
batches = set_manager.get_batches(features=FEATURES[0:1, :]) + [None]
train_names = ["training {}".format(j) for j in range(np.max(batches[0]["sample_index"]) + 1)]
ref_batch = model._format_batch(batches[0], batch_index=0, train_names=train_names)
result = model._make_prototype_report(batches=batches, train_names=train_names, compute_impact=True)
pd.testing.assert_frame_equal(result, ref_batch)
def test_format_batch_1(self):
set_manager = ClassifierSetManager(target=TARGET)
set_manager.add_batch(BATCH_INFO)
batch = set_manager.get_batches()[0]
num_prototypes = batch["prototypes"].shape[0]
result = ClassifierModel._format_batch(
batch=batch,
batch_index=0,
train_names=None
)
self.assertEqual(result.shape, (num_prototypes, 5))
np.testing.assert_array_equal(result["batch"].values, np.ones(num_prototypes))
np.testing.assert_array_equal(result["sample"].values, batch["sample_index"])
np.testing.assert_array_equal(
result["sample name"].values, np.array(["sample {}".format(j) for j in batch["sample_index"]])
)
np.testing.assert_array_equal(result["target"].values, batch["target"])
np.testing.assert_array_equal(result["prototype weight"].values, batch["prototype_weights"])
def test_format_batch_2(self):
set_manager = ClassifierSetManager(target=TARGET)
set_manager.add_batch(BATCH_INFO)
batch = set_manager.get_batches(features=FEATURES[0:1, :])[0]
num_prototypes = batch["prototypes"].shape[0]
result = ClassifierModel._format_batch(
batch=batch,
batch_index=0,
train_names=["training {}".format(j) for j in range(np.max(batch["sample_index"]) + 1)]
)
self.assertEqual(result.shape, (num_prototypes, 7))
np.testing.assert_array_equal(result["batch"].values, np.ones(num_prototypes))
np.testing.assert_array_equal(result["sample"].values, batch["sample_index"])
np.testing.assert_array_equal(
result["sample name"].values, np.array(["training {}".format(j) for j in batch["sample_index"]])
)
np.testing.assert_array_equal(result["target"].values, batch["target"])
np.testing.assert_array_equal(result["prototype weight"].values, batch["prototype_weights"])
similarity = np.prod(batch["similarities"], axis=1)
np.testing.assert_almost_equal(result["similarity"], similarity)
np.testing.assert_almost_equal(result["impact"], similarity * batch["prototype_weights"])
@staticmethod
def test_make_feature_report_1():
model = ClassifierModel()
num_features = 1
scale = np.ones(num_features)
offset = np.zeros(num_features)
feature_columns = ClassifierModel._check_report_input(
feature_names=None,
num_features=num_features,
scale=scale,
offset=offset,
sample_name=None
)[0]
result = model._make_feature_report(
batches=[],
feature_columns=feature_columns,
include_original=False,
scale=scale,
offset=offset,
active_features=np.zeros(0, dtype=int), # no active features means nothing to report
include_similarities=False
)
pd.testing.assert_frame_equal(result, pd.DataFrame())
def test_make_feature_report_2(self):
model = ClassifierModel()
num_features = 3
scale = np.ones(num_features)
offset = np.zeros(num_features)
feature_columns = ClassifierModel._check_report_input(
feature_names=None,
num_features=num_features,
scale=scale,
offset=offset,
sample_name=None
)[0]
result = model._make_feature_report(
batches=[None], # no prototypes in batch means data frame has zero rows
feature_columns=feature_columns,
include_original=False,
scale=scale,
offset=offset,
active_features=np.array([0, 2]),
include_similarities=False
)
self.assertEqual(result.shape, (0, 4)) # two columns per active feature
self.assertEqual(list(result.columns), ["X0 weight", "X0 value", "X2 weight", "X2 value"])
def test_make_feature_report_3(self):
model = ClassifierModel()
num_features = 3
scale = np.ones(num_features)
offset = np.zeros(num_features)
feature_columns = ClassifierModel._check_report_input(
feature_names=None,
num_features=num_features,
scale=scale,
offset=offset,
sample_name=None
)[0]
result = model._make_feature_report(
batches=[None], # no prototypes in batch means data frame has zero rows
feature_columns=feature_columns,
include_original=True,
scale=scale,
offset=offset,
active_features=np.array([0, 2]),
include_similarities=True
)
self.assertEqual(result.shape, (0, 8)) # four columns per active feature
self.assertEqual(list(result.columns), [
"X0 weight", "X0 value", "X0 original", "X0 similarity",
"X2 weight", "X2 value", "X2 original", "X2 similarity"
])
@staticmethod
def test_make_feature_report_4():
model = ClassifierModel()
set_manager = ClassifierSetManager(target=TARGET)
set_manager.add_batch(BATCH_INFO)
set_manager.add_batch(BATCH_INFO)
batches = set_manager.get_batches()
num_features = np.max(batches[0]["active_features"]) + 1
scale = np.ones(num_features)
offset = np.zeros(num_features)
feature_columns = ClassifierModel._check_report_input(
feature_names=None,
num_features=num_features,
scale=scale,
offset=offset,
sample_name=None
)[0]
reference = pd.concat([
ClassifierModel._format_feature(
batches=batches,
feature_index=index,
feature_columns=feature_columns,
include_original=False,
scale=scale,
offset=offset,
include_similarities=False
)
for index in batches[0]["active_features"]
], axis=1)
result = model._make_feature_report(
batches=batches,
feature_columns=feature_columns,
include_original=False,
scale=scale,
offset=offset,
active_features=batches[0]["active_features"],
include_similarities=False
)
pd.testing.assert_frame_equal(result, reference)
@staticmethod
def test_make_feature_report_5():
model = ClassifierModel()
set_manager = ClassifierSetManager(target=TARGET)
set_manager.add_batch(BATCH_INFO)
set_manager.add_batch(BATCH_INFO)
batches = set_manager.get_batches(features=FEATURES[0:1, :])
num_features = np.max(batches[0]["active_features"]) + 1
scale = 2.0 * np.ones(num_features)
offset = -1.0 * np.ones(num_features)
feature_columns = ClassifierModel._check_report_input(
feature_names=None,
num_features=num_features,
scale=scale,
offset=offset,
sample_name=None
)[0]
reference = pd.concat([
ClassifierModel._format_feature(
batches=batches,
feature_index=index,
feature_columns=feature_columns,
include_original=True,
scale=scale,
offset=offset,
include_similarities=True
)
for index in batches[0]["active_features"]
], axis=1)
result = model._make_feature_report(
batches=batches,
feature_columns=feature_columns,
include_original=True,
scale=scale,
offset=offset,
active_features=batches[0]["active_features"],
include_similarities=True
)
pd.testing.assert_frame_equal(result, reference)
@staticmethod
def test_make_feature_report_6():
model = ClassifierModel()
set_manager = ClassifierSetManager(target=TARGET)
set_manager.add_batch(BATCH_INFO)
batches = set_manager.get_batches(features=FEATURES[0:1, :]) + [None]
num_features = np.max(batches[0]["active_features"]) + 1
scale = 2.0 * np.ones(num_features)
offset = -1.0 * np.ones(num_features)
feature_columns = ClassifierModel._check_report_input(
feature_names=None,
num_features=num_features,
scale=scale,
offset=offset,
sample_name=None
)[0]
reference = pd.concat([
ClassifierModel._format_feature(
batches=[batches[0]], # second batch should have no contribution
feature_index=index,
feature_columns=feature_columns,
include_original=True,
scale=scale,
offset=offset,
include_similarities=True
)
for index in batches[0]["active_features"]
], axis=1)
result = model._make_feature_report(
batches=batches,
feature_columns=feature_columns,
include_original=True,
scale=scale,
offset=offset,
active_features=batches[0]["active_features"],
include_similarities=True
)
pd.testing.assert_frame_equal(result, reference)
def test_format_feature_1(self):
feature_columns = ClassifierModel._check_report_input(
feature_names=None,
num_features=FEATURES.shape[1],
scale=None,
offset=None,
sample_name=None
)[0]
result = ClassifierModel._format_feature(
batches=[],
feature_index=0,
feature_columns=feature_columns,
include_original=False,
scale=np.ones(FEATURES.shape[1]),
offset=np.zeros(FEATURES.shape[1]),
include_similarities=False
)
self.assertEqual(result.shape, (0, 2))
self.assertEqual(list(result.columns), feature_columns[0][:2])
def test_format_feature_2(self):
feature_columns = ClassifierModel._check_report_input(
feature_names=None,
num_features=FEATURES.shape[1],
scale=None,
offset=None,
sample_name=None
)[0]
result = ClassifierModel._format_feature(
batches=[None],
feature_index=0,
feature_columns=feature_columns,
include_original=True,
scale=np.ones(FEATURES.shape[1]),
offset=np.zeros(FEATURES.shape[1]),
include_similarities=True
)
self.assertEqual(result.shape, (0, 4))
self.assertEqual(list(result.columns), feature_columns[0])
def test_format_feature_3(self):
set_manager = ClassifierSetManager(target=TARGET)
set_manager.add_batch(BATCH_INFO)
batches = set_manager.get_batches()
feature_columns = ClassifierModel._check_report_input(
feature_names=None,
num_features=FEATURES.shape[1],
scale=None,
offset=None,
sample_name=None
)[0]
index = batches[0]["active_features"][0]
result = ClassifierModel._format_feature(
batches=batches,
feature_index=index,
feature_columns=feature_columns,
include_original=False,
scale=np.ones(FEATURES.shape[1]),
offset=np.zeros(FEATURES.shape[1]),
include_similarities=False
)
num_prototypes = batches[0]["prototypes"].shape[0]
self.assertEqual(result.shape, (num_prototypes, 2))
np.testing.assert_array_equal(
result["X{} weight".format(index)].values, batches[0]["feature_weights"][0] * np.ones(num_prototypes)
)
np.testing.assert_array_equal(result["X{} value".format(index)].values, batches[0]["prototypes"][:, 0])
def test_format_feature_4(self):
set_manager = ClassifierSetManager(target=TARGET)
set_manager.add_batch(BATCH_INFO)
batches = set_manager.get_batches(features=FEATURES[0:1, :])
feature_columns = ClassifierModel._check_report_input(
feature_names=None,
num_features=FEATURES.shape[1],
scale=None,
offset=None,
sample_name=None
)[0]
index = batches[0]["active_features"][1]
result = ClassifierModel._format_feature(
batches=batches,
feature_index=index,
feature_columns=feature_columns,
include_original=True,
scale=2.0 * np.ones(FEATURES.shape[1]),
offset=-1.0 * np.ones(FEATURES.shape[1]),
include_similarities=True
)
num_prototypes = batches[0]["prototypes"].shape[0]
self.assertEqual(result.shape, (num_prototypes, 4))
np.testing.assert_array_equal(
result["X{} weight".format(index)].values, batches[0]["feature_weights"][1] * np.ones(num_prototypes)
)
np.testing.assert_array_equal(result["X{} value".format(index)].values, batches[0]["prototypes"][:, 1])
np.testing.assert_array_equal(
result["X{} original".format(index)].values, 2.0 * batches[0]["prototypes"][:, 1] - 1.0
)
np.testing.assert_array_equal(result["X{} similarity".format(index)].values, batches[0]["similarities"][:, 1])
def test_format_feature_5(self):
set_manager = ClassifierSetManager(target=TARGET)
set_manager.add_batch(BATCH_INFO)
batches = set_manager.get_batches(features=FEATURES[0:1, :])
feature_columns = ClassifierModel._check_report_input(
feature_names=None,
num_features=FEATURES.shape[1],
scale=None,
offset=None,
sample_name=None
)[0]
index = 0
for index in range(FEATURES.shape[0]):
if index not in batches[0]["active_features"]:
break
if index in batches[0]["active_features"]: # pragma: no cover
raise RuntimeError("Constant BATCH_INFO from test_set_manager.py has no inactive features.")
result = ClassifierModel._format_feature(
batches=batches,
feature_index=index,
feature_columns=feature_columns,
include_original=True,
scale=2.0 * np.ones(FEATURES.shape[1]),
offset=-1.0 * np.ones(FEATURES.shape[1]),
include_similarities=True
)
num_prototypes = batches[0]["prototypes"].shape[0]
self.assertEqual(result.shape, (num_prototypes, 4))
self.assertTrue(np.all(pd.isna(result["X{} weight".format(index)].values)))
self.assertTrue(np.all(pd.isna(result["X{} value".format(index)].values)))
self.assertTrue(np.all(pd.isna(result["X{} original".format(index)].values)))
self.assertTrue(np.all(pd.isna(result["X{} similarity".format(index)].values)))
def test_make_baseline_for_export_1(self):
model = ClassifierModel(n_iter=0)
model.fit(X=FEATURES, y=TARGET)
result = model._make_baseline_for_export()
classes_int = np.unique(TARGET)
classes_str = [str(label) for label in classes_int]
self.assertEqual(result.shape, (len(classes_int), 5))
self.assertTrue(np.all(pd.isna(result["batch"].values)))
self.assertTrue(np.all(pd.isna(result["sample"].values)))
self.assertEqual(list(result["sample name"]), model._format_class_labels(classes_str))
np.testing.assert_array_equal(result["target"].values, classes_int)
|
np.testing.assert_array_equal(result["prototype weight"], MARGINALS)
|
numpy.testing.assert_array_equal
|
# Copyright 2020 Petuum, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import autograd
import numpy as np
import collections
import scipy.optimize
import scipy.stats
# Parameters for a performance model which predicts the per-step time of
# distributed SGD using all-reduce. At a high level, models compute time and
# network time separately, and combines them with some degree of overlap.
# Compute time is modeled as a linear function of the local batch size.
# Network time is modeled using different parameters depending on if the job
# is inter-node (there exists a pair of replicas on different nodes), or
# intra-node (all replicas are on the same node). For both cases, network time
# is modeled as a constant term plus a retrogression term which increases
# linearly with the total number of replicas.
Params = collections.namedtuple("Params", [
# T_compute ~ alpha_c + beta_c * local_bsz
"alpha_c", # Constant term of compute time
"beta_c", # Multiplicative factor of compute time
# If inter-node: T_network ~ alpha_n + beta_n * replicas
"alpha_n", # Constant term of inter-node network time
"beta_n", # Retrogression factor of inter-node network time
# If intra-node: T_network ~ alpha_r + beta_r * replicas
"alpha_r", # Constant term of intra-node network time
"beta_r", # Retrogression factor of intra-node network time
# T_step ~ (T_compute ^ gamma + T_network ^ gamma) ^ (1 / gamma)
# Essentially is a p-norm where p = gamma. When p ~ 1 then
# T_step ~ T_compute + T_network, indicating no overlap between compute
# and network. When p -> infinity then T_step = max(T_compute, T_network),
# indicating perfect overlap. We limit gamma to [1, 10] since 10 is close
# enough to approximate the max function for our purposes.
"gamma", # Models the degree of overlap between compute and network
])
class SpeedupFunction(object):
def __init__(self, params, grad_params=None, init_batch_size=None,
max_batch_size=None, local_bsz_bounds=None,
elastic_bsz=False):
self._grad_params = grad_params
self._init_batch_size = init_batch_size
if local_bsz_bounds is not None:
self._max_local_bsz = local_bsz_bounds[1]
self._min_local_bsz = local_bsz_bounds[0]
else:
self._max_local_bsz = None
self._min_local_bsz = None
default_max_batch_size_scale = 100
if max_batch_size is not None:
self._max_batch_size = max_batch_size
elif elastic_bsz:
self._max_batch_size = (default_max_batch_size_scale *
init_batch_size)
else:
self._max_batch_size = init_batch_size
if params is not None:
self._params = Params(*params)
else:
self._params = None
if params is not None and init_batch_size is not None:
base_step_time, _, _ = _predict_log(self._params,
np.array([1]), np.array([1]),
init_batch_size)
base_step_time = base_step_time.item()
self._base_goodput = 1.0 / np.exp(base_step_time)
else:
self._base_goodput = 1.0
self._elastic_bsz = elastic_bsz
# Memoization for fast repeated queries.
self._mem_size = 32
self._mem_speedup = np.full((self._mem_size, self._mem_size), -1.0)
self._mem_local_bsz = np.full((self._mem_size, self._mem_size), -1)
self._mem_speedup[0, 0] = 0.0 # replicas = 0 ==> speedup = 0
self._mem_local_bsz[0, 0] = 0
self._mem_speedup[1, 1] = 1.0 # replicas = 1 ==> speedup = 1
self._mem_local_bsz[1, 1] = self._init_batch_size
def __call__(self, nodes, replicas, return_local_bsz=False):
# nodes and replicas must have the same shape, dtype=int
assert np.shape(nodes) == np.shape(replicas)
assert np.all(np.less_equal(0, nodes))
assert np.all(np.less_equal(nodes, replicas))
assert np.all((nodes > 0) == (replicas > 0))
# Remember if original arguments are scalars.
isscalar = np.isscalar(replicas)
nodes, replicas = np.atleast_1d(nodes, replicas)
# Return values which will be filled out.
ret_speedup = np.full(np.shape(replicas), -1.0)
ret_local_bsz = np.full(np.shape(replicas), -1)
# Fill in any memoized results first.
ret_indices = replicas < self._mem_size
mem_indices = (nodes[ret_indices], replicas[ret_indices])
ret_speedup[ret_indices] = self._mem_speedup[mem_indices]
ret_local_bsz[ret_indices] = self._mem_local_bsz[mem_indices]
# Find the indices which still need to be computed.
indices = ret_speedup < 0
nodes, replicas = nodes[indices], replicas[indices]
# Only compute for unique inputs.
if np.size(replicas) > 0:
(nodes, replicas), unique_indices = np.unique(
np.stack([nodes, replicas]), axis=1, return_inverse=True)
else:
unique_indices = np.array([], dtype=np.int)
if np.size(replicas) == 0:
local_bsz = np.array([], dtype=np.int)
goodput = np.array([])
elif self._params is None:
local_bsz = np.ceil(self._init_batch_size / replicas).astype(int)
goodput = np.ones(np.shape(replicas))
elif self._elastic_bsz:
max_local_bsz = np.floor(self._max_batch_size / replicas)
min_local_bsz = np.ceil(self._init_batch_size / replicas)
if self._max_local_bsz is not None:
max_local_bsz = np.minimum(self._max_local_bsz, max_local_bsz)
if self._min_local_bsz is not None:
min_local_bsz = np.maximum(self._min_local_bsz, min_local_bsz)
assert np.all(max_local_bsz >= min_local_bsz)
# Sample a bunch of potential local_bsz values
local_bsz = np.geomspace(min_local_bsz, max_local_bsz, num=100)
# Should get broadcast to (num_samples, replicas.size).
goodput = self._goodput(nodes, replicas, local_bsz)
local_bsz = local_bsz[np.argmax(goodput, axis=0),
np.arange(local_bsz.shape[1])]
local_bsz = local_bsz.round().astype(int)
goodput = np.amax(goodput, axis=0)
else:
local_bsz = np.ceil(self._init_batch_size / replicas).astype(int)
log_pred_step_time, _, _ = \
_predict_log(self._params, nodes, replicas, local_bsz)
goodput = 1.0 / np.exp(log_pred_step_time)
speedup = goodput / self._base_goodput
# Undo unique.
nodes = nodes[unique_indices]
replicas = replicas[unique_indices]
speedup = speedup[unique_indices]
local_bsz = local_bsz[unique_indices]
# Fill in computed results.
ret_speedup[indices] = speedup
ret_local_bsz[indices] = local_bsz
# Memoize results.
ret_indices = replicas < self._mem_size
mem_indices = (nodes[ret_indices], replicas[ret_indices])
self._mem_speedup[mem_indices] = speedup[ret_indices]
self._mem_local_bsz[mem_indices] = local_bsz[ret_indices]
if isscalar:
ret_speedup = ret_speedup.item()
ret_local_bsz = ret_local_bsz.item()
return ((ret_speedup, ret_local_bsz)
if return_local_bsz else ret_speedup)
def _goodput(self, nodes, replicas, local_bsz):
log_pred_step_time, _, _ = \
_predict_log(self._params, nodes, replicas, local_bsz)
var, norm = self._grad_params['var'], self._grad_params['norm']
global_bsz = replicas * local_bsz
gain = np.where(
(var / global_bsz * self._init_batch_size + norm) == 0.0,
1.0,
(var + norm) / (var / global_bsz * self._init_batch_size + norm))
return gain / np.exp(log_pred_step_time)
def params(self):
return self._params
def fit(nodes, replicas, local_bsz, step_time, step_time_compute):
# Fit the performance model given step time and compute time measurements
# for different configurations of nodes, replicas, local_bsz.
# HACK: We want to use the original numpy module for calls from the
# SpeedupFunction for performance reasons, but also need those functions to
# use autograd.numpy when we want to differentiate them. We patch the
# global np reference only for the code invoked rom this function.
global np # Replace numpy from autograd.
orig_np = np
np = autograd.numpy
replicas = np.array(replicas)
local_bsz = np.array(local_bsz)
step_time =
|
np.array(step_time)
|
numpy.array
|
import numpy as np
from tensorflow.keras.utils import to_categorical
def randomize(x, y):
permutation = np.random.permutation(y.shape[0])
shuffled_x = x[permutation, :]
shuffled_y = y[permutation]
return shuffled_x, shuffled_y
def get_next_batch(x, y, start, end):
x_batch = x[start:end]
y_batch = y[start:end]
return x_batch, y_batch
class TensorFlowDataLoaderTemplate:
def __init__(self):
self.input_train = []
self.target_train = []
self.input_valid = []
self.target_valid = []
self.start = 0
self.end = 0
def epoch_init(self):
self.input_train, self.target_train = randomize(self.input_train, self.target_train)
self.start = 0
self.end = 0
def next_batch(self, batch_size):
self.start = self.end
self.end = self.start + batch_size
input_batch, target_batch = get_next_batch(self.input_train, self.target_train, self.start, self.end)
return input_batch, target_batch
class DataMNIST(TensorFlowDataLoaderTemplate):
def __init__(self, flatten=True):
from tensorflow.keras.datasets import mnist
img_h = img_w = 28
img_size_flat = img_h * img_w
n_channels = 1
(input_train, target_train), (input_test, target_test) = mnist.load_data()
self.input_train = input_train
self.target_train = to_categorical(target_train)
self.input_valid = input_test
self.target_valid = to_categorical(target_test)
if flatten:
self.input_train = self.input_train.reshape((-1, img_size_flat))
self.input_valid = self.input_valid.reshape((-1, img_size_flat))
else:
self.input_train = self.input_train.reshape((-1, img_h, img_w, n_channels))
self.input_valid = self.input_valid.reshape((-1, img_h, img_w, n_channels))
class DataMNISTAE(TensorFlowDataLoaderTemplate):
def __init__(self):
from tensorflow.keras.datasets import mnist
img_h = img_w = 28
img_size_flat = img_h * img_w
noise_level = 0.9
(input_train, target_train), (input_test, target_test) = mnist.load_data()
self.input_train = input_train
self.input_valid = input_test
self.input_train = input_train.reshape((-1, img_size_flat))*1.0
self.input_train += noise_level * np.random.normal(loc=0.0, scale=255.0, size=self.input_train.shape)
self.target_train = input_train.reshape((-1, img_size_flat))*1.0
self.input_valid = input_test.reshape((-1, img_size_flat))*1.0
self.input_valid += noise_level *
|
np.random.normal(loc=0.0, scale=255.0, size=self.input_valid.shape)
|
numpy.random.normal
|
import numpy as np
from copy import deepcopy
import abc
from sklearn.utils import shuffle
import torch
from torch.utils.data import Dataset, DataLoader
from torchvision import datasets
import torchvision.transforms as transforms
def mnist_transforms():
return transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])
class NumpyDataset(Dataset):
def __init__(self, data, target, transform=None):
self.data = torch.from_numpy(data).type(torch.float)
self.target = torch.from_numpy(target).type(torch.long)
self.transform = transform
def __getitem__(self, index):
x = self.data[index]
y = self.target[index]
if self.transform:
x = self.transform(x)
return x, y
def __len__(self):
return len(self.data)
class DataGenerator(metaclass=abc.ABCMeta):
def __init__(self, limit_per_task=1000):
self.limit_per_task = limit_per_task
self.X_train_batch = []
self.y_train_batch = []
self.current_pos = 0
self.cur_iter = 0
def next_batch(self, size):
self.current_pos += size
if self.current_pos > self.X_train_batch.shape[0]:
return None, None
return self.X_train_batch[self.current_pos - size:self.current_pos], self.y_train_batch[
self.current_pos - size:self.current_pos]
class PermutedMnistGenerator(DataGenerator):
def __init__(self, limit_per_task=1000, max_iter=10):
super().__init__(limit_per_task)
train_dataset = datasets.MNIST('data/MNIST/', train=True, transform=mnist_transforms(),
download=True)
test_dataset = datasets.MNIST('data/MNIST/', train=False, transform=mnist_transforms(),
download=True)
train_loader = DataLoader(train_dataset, batch_size=len(train_dataset), shuffle=False)
test_loader = DataLoader(test_dataset, batch_size=len(test_dataset))
self.X_train, self.Y_train = next(iter(train_loader))
self.X_train, self.Y_train = self.X_train.numpy()[:limit_per_task].reshape(-1, 28 * 28), self.Y_train.numpy()[
:limit_per_task]
self.X_test, self.Y_test = next(iter(test_loader))
self.X_test, self.Y_test = self.X_test.numpy().reshape(-1, 28 * 28), self.Y_test.numpy()
self.max_iter = max_iter
self.permutations = []
self.rs = np.random.RandomState(0)
for i in range(max_iter):
perm_inds = list(range(self.X_train.shape[1]))
self.rs.shuffle(perm_inds)
self.permutations.append(perm_inds)
self.X_train_batch.append(self.X_train[:, perm_inds])
self.y_train_batch.append(self.Y_train)
self.X_train_batch = np.vstack(self.X_train_batch)
self.y_train_batch = np.hstack(self.y_train_batch)
def next_task(self):
if self.cur_iter >= self.max_iter:
raise Exception('Number of tasks exceeded!')
else:
perm_inds = self.permutations[self.cur_iter]
next_x_train = deepcopy(self.X_train)
next_x_train = next_x_train[:, perm_inds]
next_y_train = self.Y_train
next_x_test = deepcopy(self.X_test)
next_x_test = next_x_test[:, perm_inds]
next_y_test = self.Y_test
self.cur_iter += 1
return next_x_train, next_y_train, next_x_test, next_y_test
class SplitMnistGenerator(DataGenerator):
def __init__(self, limit_per_task=1000):
super().__init__(limit_per_task)
train_dataset = datasets.MNIST('data/MNIST', train=True, transform=mnist_transforms(),
download=True)
test_dataset = datasets.MNIST('data/MNIST/', train=False, transform=mnist_transforms(),
download=True)
train_loader = DataLoader(train_dataset, batch_size=len(train_dataset))
test_loader = DataLoader(test_dataset, batch_size=len(test_dataset))
self.X_train, self.Y_train = next(iter(train_loader))
self.X_train, self.Y_train = self.X_train.numpy(), self.Y_train.numpy()
self.X_test, self.Y_test = next(iter(test_loader))
self.X_test, self.Y_test = self.X_test.numpy(), self.Y_test.numpy()
self.sets_0 = [0, 2, 4, 6, 8]
self.sets_1 = [1, 3, 5, 7, 9]
self.max_iter = len(self.sets_0)
self.X_train_batch = []
self.y_train_batch = []
self.inds = []
rs = np.random.RandomState(0)
for i in range(5):
ind = np.where(np.logical_or(self.Y_train == self.sets_0[i], self.Y_train == self.sets_1[i]))[0]
ind = rs.choice(ind, limit_per_task, replace=False)
self.inds.append(ind)
X = self.X_train[ind]
y = self.Y_train[ind]
X, y = shuffle(X, y, random_state=0)
self.X_train_batch.append(X)
self.y_train_batch.append(y)
self.X_train_batch = np.vstack(self.X_train_batch)
self.y_train_batch = np.hstack(self.y_train_batch)
self.current_pos = 0
def next_task(self):
if self.cur_iter >= self.max_iter:
raise Exception('Number of tasks exceeded!')
else:
ind = self.inds[self.cur_iter]
next_x_train = self.X_train[ind]
next_y_train = self.Y_train[ind]
ind = np.where(
np.logical_or(self.Y_test == self.sets_0[self.cur_iter], self.Y_test == self.sets_1[self.cur_iter]))[
0]
next_x_test = self.X_test[ind]
next_y_test = self.Y_test[ind]
self.cur_iter += 1
return next_x_train, next_y_train, next_x_test, next_y_test
class SplitMnistImbalancedGenerator(DataGenerator):
def __init__(self):
super().__init__()
train_dataset = datasets.MNIST('data/MNIST/', train=True, transform=mnist_transforms(),
download=True)
test_dataset = datasets.MNIST('data/MNIST/', train=False, transform=mnist_transforms(),
download=True)
train_loader = DataLoader(train_dataset, batch_size=len(train_dataset))
test_loader = DataLoader(test_dataset, batch_size=len(test_dataset))
self.X_train, self.Y_train = next(iter(train_loader))
self.X_train, self.Y_train = self.X_train.numpy(), self.Y_train.numpy()
self.X_test, self.Y_test = next(iter(test_loader))
self.X_test, self.Y_test = self.X_test.numpy(), self.Y_test.numpy()
self.sets_0 = [0, 2, 4, 6, 8]
self.sets_1 = [1, 3, 5, 7, 9]
self.max_iter = len(self.sets_0)
limit_per_task = 200
self.inds = []
rs =
|
np.random.RandomState(0)
|
numpy.random.RandomState
|
"""
Example setup and run script for a 3d example with two fractures containing
an injection and production well, respectively.
"""
import logging
from typing import Tuple
import numpy as np
import porepy as pp
import utils
from fracture_propagation_model import THMPropagationModel
logger = logging.getLogger(__name__)
class Example3Model(THMPropagationModel, pp.THM):
"""
This class provides the parameter specification differing from examples 1 and 2.
"""
def _set_fields(self, params):
super()._set_fields(params)
self.length_scale = params["length_scale"]
self.initial_aperture = 3.0e-4 / self.length_scale
self.production_well_key = "production_well"
self.export_fields.append("well")
self.gravity_on = True
size = 1e3 / self.length_scale
self.box = {
"xmin": 0,
"xmax": size,
"ymin": 0,
"ymax": size,
"zmin": 0,
"zmax": size,
}
def _fractures(self):
"""
Define the two fractures.
The first fracture is the one where injection takes place.
"""
s = self.box["xmax"]
z_3 = s / 2
z_2 = 2 / 3 * s
z_1 = 1 / 3 * s
y_1 = 3 / 12 * s
y_2 = 5 / 12 * s
y_3 = 7 / 12 * s
y_4 = 9 / 12 * s
x_1 = 1 / 3 * s
x_2 = 2 / 3 * s
x_3 = 0.5 * s
f_1 = np.array(
[[x_1, x_1, x_2, x_2], [y_1, y_2, y_2, y_1], [z_3, z_3, z_3, z_3]]
)
f_2 = np.array(
[[x_3, x_3, x_3, x_3], [y_3, y_4, y_4, y_3], [z_2, z_2, z_1, z_1]]
)
self.fracs = [f_1, f_2]
def create_grid(self):
self._fractures()
x = self.box["xmax"] - self.box["xmin"]
y = self.box["ymax"] - self.box["ymin"]
nx = self.params.get("nx")
ny = self.params.get("ny")
ncells = [nx, ny]
dims = [x, y]
if "zmax" in self.box:
nz = self.params.get("nz")
ncells.append(nz)
dims.append(self.box["zmax"] - self.box["zmin"])
gb = pp.meshing.cart_grid(self.fracs, ncells, physdims=dims)
s = self.box["xmax"]
# The following ugly code refines the grid around the two fractures
z = [0.44, 0.56]
x = [0.40, 0.6]
y0 = 10 / 26
y1 = 16 / 26
x_0 = 1 / 3 - 3 / 36
x_1 = 2 / 3 + 3 / 36
old = np.array([[x_0, x_1], [1 / 4, 3 / 4], [1 / 3, 2 / 3]]) * s
new = np.array([[x[0], x[1]], [y0, y1], [z[0], z[1]]]) * s
utils.adjust_nodes(gb, old, new)
# Ensure one layer of small cells around fractures
k = 0.8
dx = k * x[0] / (nx / 3)
dy = k * 0.25 / (ny / 4)
dz = k * z[0] / (nz / 3)
old =
|
np.array([[0, x_0], [0, 1 / 4], [0, 1 / 3 + dz]])
|
numpy.array
|
from __future__ import print_function
import numpy as np
from scipy import spatial
from . import wcsutils, utils, enmap, coordinates, fft, curvedsky
try: from . import sharp
except ImportError: pass
# Python 2/3 compatibility
try: basestring
except NameError: basestring = str
def thumbnails(imap, coords, r=5*utils.arcmin, res=None, proj="tan", apod=2*utils.arcmin,
order=3, oversample=4, pol=None, oshape=None, owcs=None, extensive=False, verbose=False,
filter=None,pixwin=False):
"""Given an enmap [...,ny,nx] and a set of coords [n,{dec,ra}], extract a set
of thumbnail images [n,...,thumby,thumbx] centered on each set of
coordinates. Each of these thumbnail images is projected onto a local tangent
plane, removing the effect of size and shape distortions in the input map.
If oshape, owcs are specified, then the thumbnails will have this geometry,
which should be centered on [0,0]. Otherwise, a geometry with the given
projection (defaults to "tan" = gnomonic projection) will be constructed,
going up to a maximum radius of r.
The reprojection involved in this operation implies interpolation. The default
is to use fft rescaling to oversample the input pixels by the given pixel, and
then use bicubic spline interpolation to read off the values at the output
pixel centers. The fft oversampling can be controlled with the oversample argument.
Values <= 1 turns this off. The other interpolation step is controlled using the
"order" argument. 0/1/3 corresponds to nearest neighbor, bilinear and bicubic spline
interpolation respectively.
If pol == True, then Q,U will be rotated to take into account the change in
the local northward direction impled in the reprojection. The default is to
do polarization rotation automatically if the input map has a compatible shape,
e.g. at least 3 axes and a length of 3 for the 3rd last one. TODO: I haven't
tested this yet.
If extensive == True (not the default), then the map is assumed to contain an
extensive field rather than an intensive one. An extensive field is one where
the values in the pixels depend on the size of the pixel. For example, if the
inverse variance in the map is given per pixel, then this ivar map will be
extensive, but if it's given in units of inverse variance per square arcmin
then it's intensive.
For reprojecting inverse variance maps, consider using the wrapper thumbnails_ivar,
which makes it easier to avoid common pitfalls.
If pixwin is True, the pixel window will be deconvolved."""
# FIXME: Specifying a geometry manually is broken - see usage of r in neighborhood_pixboxes below
# Handle arbitrary coords shape
coords = np.asarray(coords)
ishape = coords.shape[:-1]
coords = coords.reshape(-1, coords.shape[-1])
# If the output geometry was not given explicitly, then build one
if oshape is None:
if res is None: res = min(np.abs(imap.wcs.wcs.cdelt))*utils.degree/2
oshape, owcs = enmap.thumbnail_geometry(r=r, res=res, proj=proj)
# Check if we should be doing polarization rotation
pol_compat = imap.ndim >= 3 and imap.shape[-3] == 3
if pol is None: pol = pol_compat
if pol and not pol_compat: raise ValueError("Polarization rotation requested, but can't interpret map shape %s as IQU map" % (str(imap.shape)))
nsrc = len(coords)
if verbose: print("Extracting %d %dx%d thumbnails from %s map" % (nsrc, oshape[-2], oshape[-1], str(imap.shape)))
opos = enmap.posmap(oshape, owcs)
# Get the pixel area around each of the coordinates
rtot = r + apod
apod_pix = utils.nint(apod/(np.min(np.abs(imap.wcs.wcs.cdelt))*utils.degree))
pixboxes = enmap.neighborhood_pixboxes(imap.shape, imap.wcs, coords, rtot)
# Define our output maps, which we will fill below
omaps = enmap.zeros((nsrc,)+imap.shape[:-2]+oshape, owcs, imap.dtype)
for si, pixbox in enumerate(pixboxes):
if oversample > 1:
# Make the pixbox fft-friendly
for i in range(2):
pixbox[1,i] = pixbox[0,i] + fft.fft_len(pixbox[1,i]-pixbox[0,i], direction="above", factors=[2,3,5])
ithumb = imap.extract_pixbox(pixbox)
if extensive: ithumb /= ithumb.pixsizemap()
ithumb = ithumb.apod(apod_pix, fill="median")
if pixwin: ithumb = enmap.apply_window(ithumb, -1)
if filter is not None: ithumb = filter(ithumb)
if verbose:
print("%4d/%d %6.2f %6.2f %8.2f %dx%d" % (si+1, nsrc, coords[si,0]/utils.degree, coords[si,1]/utils.degree, np.max(ithumb), ithumb.shape[-2], ithumb.shape[-1]))
# Oversample using fourier if requested. We do this because fourier
# interpolation is better than spline interpolation overall
if oversample > 1:
fshape = utils.nint(np.array(oshape[-2:])*oversample)
ithumb = ithumb.resample(fshape, method="fft")
# I apologize for the syntax. There should be a better way of doing this
ipos = coordinates.transform("cel", ["cel",[[0,0,coords[si,1],coords[si,0]],False]], opos[::-1], pol=pol)
ipos, rest = ipos[1::-1], ipos[2:]
omaps[si] = ithumb.at(ipos, order=order)
# Apply the polarization rotation. The sign is flipped because we computed the
# rotation from the output to the input
if pol: omaps[si] = enmap.rotate_pol(omaps[si], -rest[0])
if extensive: omaps *= omaps.pixsizemap()
# Restore original dimension
omaps = omaps.reshape(ishape + omaps.shape[1:])
return omaps
def thumbnails_ivar(imap, coords, r=5*utils.arcmin, res=None, proj="tan",
oshape=None, owcs=None, extensive=True, verbose=False):
"""Like thumbnails, but for hitcounts, ivars, masks, and other quantities that
should stay positive and local. Remember to set extensive to True if you have an
extensive quantity, i.e. if the values in each pixel would go up if multiple pixels
combined. An example of this is a hitcount map or ivar per pixel. Conversely, if
you have an intensive quantity like ivar per arcmin you should set extensive=False."""
return thumbnails(imap, coords, r=r, res=res, proj=proj, oshape=oshape, owcs=owcs,
order=1, oversample=1, pol=False, extensive=extensive, verbose=verbose,
pixwin=False)
def map2healpix(imap, nside=None, lmax=None, out=None, rot=None, spin=[0,2], method="harm", order=1, extensive=False, bsize=100000, nside_mode="pow2", boundary="constant", verbose=False):
"""Reproject from an enmap to healpix, optionally including a rotation.
imap: The input enmap[...,ny,nx]. Stokes along the -3rd axis if
present.
nside: The nside of the healpix map to generate. Not used if
an output map is passed. Otherwise defaults to the same
resolution as the input map.
lmax: The highest multipole to use in any harmonic-space
operations. Defaults to the input maps' Nyquist limit.
out: An optional array [...,npix] to write the output map to.
The ... part must match the input map, as must the data
type.
rot: An optional coordinate rotation to apply. Either a string
"isys,osys", where isys is the system to transform from,
and osys is the system to transform to. Currently the values
"cel"/"equ" and "gal" are recognized. Alternatively, a tuple of
3 euler zyz euler angles can be passed, in the same convention
as healpy.rotate_alm.
spin: A description of the spin of the entries along the stokes
axis. Defaults to [0,2], which means that the first entry
is spin-0, followed by a spin-2 pair (any non-zero spin
covers a pair of entries). If the axis is longer than
what's covered in the description, then it is repeated as
necessary. Pass spin=[0] to disable any special treatment
of this axis.
method: How to interpolate between the input and output
pixelizations. Can be "harm" (default) or "spline".
"harm" maps between them using spherical harmonics
transforms. This preserves the power spectrum (so no window
function is introduced), and averages noise down when the
output pixels are larger than the input pixels. However, it
can suffer from ringing around very bright features, and an
all-positive input map may end up with small negative values.
"spline" instead uses spline interpolation to look up the
value in the intput map corresponding to each pixel center
in the output map. The spline order is controlled with the
"order" argument. Overall "harm" is best suited for normal
sky maps, while "spline" with order = 0 or 1 is best suited
for hitcount maps and masks.
order: The spline order to use when method="spline".
0 corresponds to nearest neighbor interpolation.
1 corresponds to bilinear interpolation (default)
3 corresponds to bicubic spline interpolation.
0 and 1 are local and do not introduce values outside
the input range, but introduce some aliasing and loss of
power. 3 has less power loss, but still non-zero, and
is vulnerable to ringing.
extensive: Whether the map represents an extensive (as opposed to
intensive) quantity. Extensive quantities have values
proportional to the pixel size, unlike intensive quantities.
Hitcount per pixel is an extensive quantity. Hitcount per
square degree is an intensive quantity, as is a temperature
map. Defaults to False.
bsize: The spline method operates on batches of pixels to save memory.
This controls the batch size, in pixels. Defaults to 100000.
nside_mode: Controls which restrictions apply to nside in the case where
it has to be inferred automatically. Can be "pow2", "mul32" and "any".
"pow2", the default, results in nside being a power of two, as
required by the healpix standard.
"mul32" relaxes this requirement, making a map where nside is a
multiple of 32. This is compatible with most healpix operations,
but not with ud_grade or the nest pixel ordering.
"any" allows for any integer nside.
boundary: The boundary conditions assumed for the input map when
method="spline". Defaults to "constant", which assumes that
anything outsize the map has a constant value of 0. Another
useful value is "wrap", which assumes that the right side
wraps over to the left, and the top to the bottom. See
scipy.ndimage.distance_transform's documentation for other,
less useful values. method="harm" always assumes "constant"
regardless of this setting.
verbose: Whether to print information about what it's doing.
Defaults to False, which doesn't print anything.
Typical usage:
* map_healpix = map2healpix(map, rot="cel,gal")
* ivar_healpix = map2healpix(ivar, rot="cel,gal", method="spline", spin=[0], extensive=True)
"""
# Get the map's typical resolution from cdelt
ires = np.mean(np.abs(imap.wcs.wcs.cdelt))*utils.degree
lnyq = np.pi/ires
if out is None:
if nside is None:
nside = restrict_nside(((4*np.pi/ires**2)/12)**0.5, nside_mode)
out = np.zeros(imap.shape[:-2]+(12*nside**2,), imap.dtype)
npix = out.shape[-1]
opixsize = 4*np.pi/npix
# Might not be safe to go all the way to the Nyquist l, but looks that way to my tests.
if lmax is None: lmax = lnyq
if extensive:
imap = imap * (opixsize / imap.pixsizemap(broadcastable=True)) # not /= to avoid changing original imap
if method in ["harm", "harmonic"]:
# Harmonic interpolation preserves the power spectrum, but can introduce ringing.
# Probably not a good choice for positive-only quantities like hitcounts.
# Coordinate rotation is slow.
alm = curvedsky.map2alm(imap, lmax=lmax, spin=spin)
if rot is not None:
curvedsky.rotate_alm(alm, *rot2euler(rot), inplace=True)
curvedsky.alm2map_healpix(alm, out, spin=spin)
del alm
elif method == "spline":
# Covers both cubic spline interpolation (order=3), linear interpolation (order=1)
# and nearest neighbor (order=0). Harmonic interpolation is preferable to cubic
# splines, but linear and nearest neighbor may be useful. Coordinate rotation may
# be slow.
import healpy
imap_pre = utils.interpol_prefilter(imap, npre=-2, order=order, mode=boundary)
# Figure out if we need to compute polarization rotations
pol = imap.ndim > 2 and any([s != 0 for s,c1,c2 in enmap.spin_helper(spin, imap.shape[-3])])
# Batch to save memory
for i1 in range(0, npix, bsize):
i2 = min(i1+bsize, npix)
opix = np.arange(i1,i2)
pos = healpy.pix2ang(nside, opix)[::-1]
pos[1][:] = np.pi/2-pos[1]
if rot is not None:
# Not sure why the [::-1] is necessary here. Maybe psi,theta,phi vs. phi,theta,psi?
pos = coordinates.transform_euler(inv_euler(rot2euler(rot))[::-1], pos, pol=pol)
# The actual interpolation happens here
vals = imap_pre.at(pos[1::-1], order=order, prefilter=False, mode=boundary)
if rot is not None and imap.ndim > 2:
# Update the polarization to account for the new coordinate system
for s, c1, c2 in enmap.spin_helper(spin, imap.shape[-3]):
vals = enmap.rotate_pol(vals, -pos[2], spin=s, comps=[c1,c2-1], axis=-2)
out[...,i1:i2] = vals
else:
raise ValueError("Map reprojection method '%s' not recognized" % str(method))
return out
def healpix2map(iheal, shape=None, wcs=None, lmax=None, out=None, rot=None, spin=[0,2], method="harm", order=1, extensive=False, bsize=100000, verbose=False):
"""Reproject from healpix to an enmap, optionally including a rotation.
iheal: The input healpix map [...,npix]. Stokes along the -2nd axis if
present.
shape: The (...,ny,nx) shape of the output map. Only the last two entries
are used, the rest of the dimensions are taken from iheal.
Mandatory unless an output map is passed.
wcs : The world woordinate system object the output map.
Mandatory unless an output map is passed.
lmax: The highest multipole to use in any harmonic-space
operations. Defaults to 3 times the nside of iheal.
out: An optional enmap [...,ny,nx] to write the output map to.
The ... part must match iheal, as must the data type.
rot: An optional coordinate rotation to apply. Either a string
"isys,osys", where isys is the system to transform from,
and osys is the system to transform to. Currently the values
"cel"/"equ" and "gal" are recognized. Alternatively, a tuple of
3 euler zyz euler angles can be passed, in the same convention
as healpy.rotate_alm.
spin: A description of the spin of the entries along the stokes
axis. Defaults to [0,2], which means that the first entry
is spin-0, followed by a spin-2 pair (any non-zero spin
covers a pair of entries). If the axis is longer than
what's covered in the description, then it is repeated as
necessary. Pass spin=[0] to disable any special treatment
of this axis.
method: How to interpolate between the input and output
pixelizations. Can be "harm" (default) or "spline".
"harm" maps between them using spherical harmonics
transforms. This preserves the power spectrum (so no window
function is introduced), and averages noise down when the
output pixels are larger than the input pixels. However, it
can suffer from ringing around very bright features, and an
all-positive input map may end up with small negative values.
"spline" instead uses spline interpolation to look up the
value in the intput map corresponding to each pixel center
in the output map. The spline order is controlled with the
"order" argument. Overall "harm" is best suited for normal
sky maps, while "spline" with order = 0 or 1 is best suited
for hitcount maps and masks.
order: The spline order to use when method="spline".
0 corresponds to nearest neighbor interpolation.
1 corresponds to bilinear interpolation (default)
Higher order interpolation is not supported - use
method="harm" for that.
extensive: Whether the map represents an extensive (as opposed to
intensive) quantity. Extensive quantities have values
proportional to the pixel size, unlike intensive quantities.
Hitcount per pixel is an extensive quantity. Hitcount per
square degree is an intensive quantity, as is a temperature
map. Defaults to False.
bsize: The spline method operates on batches of pixels to save memory.
This controls the batch size, in pixels. Defaults to 100000.
verbose: Whether to print information about what it's doing.
Defaults to False, which doesn't print anything.
Typical usage:
* map = healpix2map(map_healpix, shape, wcs, rot="gal,cel")
* ivar = healpix2map(ivar_healpix, shape, wcs, rot="gal,cel", method="spline", spin=[0], extensive=True)
"""
iheal = np.asarray(iheal)
npix = iheal.shape[-1]
nside = curvedsky.npix2nside(npix)
ipixsize = 4*np.pi/npix
if out is None:
out = enmap.zeros(iheal.shape[:-1]+shape[-2:], wcs, dtype=iheal.dtype)
else: shape, wcs = out.geometry
if lmax is None: lmax = 3*nside
if method in ["harm", "harmonic"]:
# Harmonic interpolation preserves the power spectrum, but can introduce ringing.
# Probably not a good choice for positive-only quantities like hitcounts.
# Coordinate rotation is slow.
alm = curvedsky.map2alm_healpix(iheal, lmax=lmax, spin=spin)
if rot is not None:
curvedsky.rotate_alm(alm, *rot2euler(rot), inplace=True)
curvedsky.alm2map(alm, out, spin=spin)
del alm
elif method == "spline":
# Covers linear interpolation (order=1) and nearest neighbor (order=0).
# Coordinate rotation may be slow.
import healpy
if order > 1:
raise ValueError("Only order 0 and order 1 spline interpolation supported from healpix maps")
# Figure out if we need to compute polarization rotations
pol = iheal.ndim > 1 and any([s != 0 for s,c1,c2 in enmap.spin_helper(spin, iheal.shape[-2])])
# Batch to save memory
brow = (bsize+out.shape[-1]-1)//out.shape[-1]
for i1 in range(0, out.shape[-2], brow):
i2 = min(i1+brow, out.shape[-2])
pos = out[...,i1:i2,:].posmap().reshape(2,-1)[::-1]
if rot is not None:
# Not sure why the [::-1] is necessary here. Maybe psi,theta,phi vs. phi,theta,psi?
pos = coordinates.transform_euler(inv_euler(rot2euler(rot))[::-1], pos, pol=pol)
pos[1] = np.pi/2 - pos[1]
if order == 0:
# Nearest neighbor. Just read off from the pixels
vals = iheal[...,healpy.ang2pix(nside, pos[1], pos[0])]
else:
# Bilinear interpolation. healpy only supports one component at a time, so loop
vals = np.zeros(iheal.shape[:-1]+pos.shape[-1:], iheal.dtype)
for I in utils.nditer(iheal.shape[:-1]):
vals[I] = healpy.get_interp_val(iheal[I], pos[1], pos[0])
if rot is not None and iheal.ndim > 1:
# Update the polarization to account for the new coordinate system
for s, c1, c2 in enmap.spin_helper(spin, iheal.shape[-2]):
vals = enmap.rotate_pol(vals, -pos[2], spin=s, comps=[c1,c2-1], axis=-2)
out[...,i1:i2,:] = vals.reshape(vals.shape[:-1]+(i2-i1,-1))
else:
raise ValueError("Map reprojection method '%s' not recognized" % str(method))
if extensive:
out *= out.pixsizemap(broadcastable=True)/ipixsize
return out
def rot2euler(rot):
"""Given a coordinate rotation description, return the [rotz,roty,rotz] euler
angles it corresponds to. The rotation desciption can either be those angles
directly, or a string of the form isys,osys"""
gal2cel = np.array([57.06793215, 62.87115487, -167.14056929])*utils.degree
if isinstance(rot, basestring):
try: isys, osys = rot.split(",")
except ValueError:
raise ValueError("Rotation string must be of form 'isys,osys', but got '%s'" % str(rot))
R = spatial.transform.Rotation.identity()
# Handle input system
if isys in ["cel","equ"]: pass
elif isys == "gal": R *= spatial.transform.Rotation.from_euler("zyz", gal2cel)
else: raise ValueError("Unrecognized system '%s'" % isys)
# Handle output system
if osys in ["cel","equ"]: pass
elif osys == "gal": R *= spatial.transform.Rotation.from_euler("zyz", gal2cel).inv()
else: raise ValueError("Unrecognized system '%s'" % osys)
return R.as_euler("zyz")
else:
rot = np.asfarray(rot)
return rot
def inv_euler(euler): return [-euler[2], -euler[1], -euler[0]]
def restrict_nside(nside, mode="mul32", round="ceil"):
"""Given an arbitrary Healpix nside, return one that's restricted in
various ways according to the "mode" argument:
"pow2": Restrict to a power of 2. This is required for compatibility
with the rarely used "nest" pixel ordering in Healpix, and is the standard
in the Healpix world.
"mul32": Restrict to multiple of 32, unless 12*nside**2<=1024.
This is enough to make the maps writable by healpy.
"any": No restriction
The "round" argument controls how any rounding is done. This can be one
of the strings "ceil" (default), "round" or "floor", or you can pass in
a custom function(nside) -> nside.
In all cases, the final nside is converted to an integer and capped to
1 below.
"""
if isinstance(round, basestring):
round = {"floor":np.floor, "round":np.round, "ceil":np.ceil}[round]
if mode == "any": nside = round(nside)
elif mode == "mul32":
if 12*nside**2 > 1024:
nside = round(nside/32)*32
elif mode == "pow2":
nside = 2**round(np.log2(nside))
else:
raise ValueError("Unrecognized nside mode '%s'" % str(mode))
nside = max(1,int(nside))
return nside
################################
####### Old stuff below ########
################################
def centered_map(imap, res, box=None, pixbox=None, proj='car', rpix=None,
width=None, height=None, width_multiplier=1.,
rotate_pol=True, **kwargs):
"""Reproject a map such that its central pixel is at the origin of a
given projection system (default: CAR).
imap -- (Ny,Nx) enmap array from which to extract stamps
TODO: support leading dimensions
res -- width of pixel in radians
box -- optional bounding box of submap in radians
pixbox -- optional bounding box of submap in pixel numbers
proj -- coordinate system for target map; default is 'car';
can also specify 'cea' or 'gnomonic'
rpix -- optional pre-calculated pixel positions from get_rotated_pixels()
"""
if imap.ndim==2: imap = imap[None,:]
ncomp = imap.shape[0]
proj = proj.strip().lower()
assert proj in ['car', 'cea']
# cut out a stamp assuming CAR ; TODO: generalize?
if box is not None:
pixbox = enmap.skybox2pixbox(imap.shape, imap.wcs, box)
if pixbox is not None:
omap = enmap.extract_pixbox(imap, pixbox)
else:
omap = imap
sshape, swcs = omap.shape, omap.wcs
# central pixel of source geometry
dec, ra = enmap.pix2sky(sshape, swcs, (sshape[0] / 2., sshape[1] / 2.))
dims = enmap.extent(sshape, swcs)
dheight, dwidth = dims
if height is None:
height = dheight
if width is None:
width = dwidth
width *= width_multiplier
tshape, twcs = rect_geometry(
width=width, res=res, proj=proj, height=height)
if rpix is None:
rpix = get_rotated_pixels(sshape, swcs, tshape, twcs, inverse=False,
pos_target=None, center_target=(0., 0.),
center_source=(dec, ra))
rot = enmap.enmap(rotate_map(omap, pix_target=rpix[:2], **kwargs), twcs)
if ncomp==3 and rotate_pol:
rot[1:3] = enmap.rotate_pol(rot[1:3], -rpix[2]) # for polarization rotation if enough components
return rot, rpix
def healpix_from_enmap_interp(imap, **kwargs):
return imap.to_healpix(**kwargs)
def healpix_from_enmap(imap, lmax, nside):
"""Convert an ndmap to a healpix map such that the healpix map is
band-limited up to lmax. Only supports single component (intensity)
currently. The resulting map will be band-limited. Bright sources and
sharp edges could cause ringing. Use healpix_from_enmap_interp if you
are worried about this (e.g. for a mask), but that routine will not ensure
power to be correct to some lmax.
Args:
imap: ndmap of shape (Ny,Nx)
lmax: integer specifying maximum multipole of map
nside: integer specifying nside of healpix map
Returns:
retmap: (Npix,) healpix map as array
"""
from pixell import curvedsky
import healpy as hp
alm = curvedsky.map2alm(imap, lmax=lmax, spin=0)
if alm.ndim > 1:
assert alm.shape[0] == 1
alm = alm[0]
retmap = hp.alm2map(alm.astype(np.complex128), nside, lmax=lmax)
return retmap
def enmap_from_healpix(hp_map, shape, wcs, ncomp=1, unit=1, lmax=0,
rot="gal,equ", first=0, is_alm=False, return_alm=False, f_ell=None):
"""Convert a healpix map to an ndmap using harmonic space reprojection.
The resulting map will be band-limited. Bright sources and sharp edges
could cause ringing. Use enmap_from_healpix_interp if you are worried
about this (e.g. for a mask), but that routine will not ensure power to
be correct to some lmax.
Args:
hp_map: an (Npix,) or (ncomp,Npix,) healpix map, or alms, or a string containing
the path to a healpix map on disk
shape: the shape of the ndmap geometry to project to
wcs: the wcs object of the ndmap geometry to project to
ncomp: the number of components in the healpix map (either 1 or 3)
unit: a unit conversion factor to divide the map by
lmax: the maximum multipole to include in the reprojection
rot: comma separated string that specify a coordinate rotation to
perform. Use None to perform no rotation. e.g. default "gal,equ"
to rotate a Planck map in galactic coordinates to the equatorial
coordinates used in ndmaps.
first: if a filename is provided for the healpix map, this specifies
the index of the first FITS field
is_alm: if True, interprets hp_map as alms
return_alm: if True, returns alms also
f_ell: optionally apply a transfer function f_ell(ell) -- this should be
a function of a single variable ell. e.g., lambda x: exp(-x**2/2/sigma**2)
Returns:
res: the reprojected ndmap or the a tuple (ndmap,alms) if return_alm
is True
"""
from pixell import curvedsky
import healpy as hp
dtype = np.float64
if not(is_alm):
assert ncomp == 1 or ncomp == 3, "Only 1 or 3 components supported"
ctype = np.result_type(dtype, 0j)
# Read the input maps
if type(hp_map) == str:
m = np.atleast_2d(hp.read_map(hp_map, field=tuple(
range(first, first + ncomp)))).astype(dtype)
else:
m = np.atleast_2d(hp_map).astype(dtype)
if unit != 1:
m /= unit
# Prepare the transformation
print("Preparing SHT")
nside = hp.npix2nside(m.shape[1])
lmax = lmax or 3 * nside
minfo = sharp.map_info_healpix(nside)
ainfo = sharp.alm_info(lmax)
sht = sharp.sht(minfo, ainfo)
alm =
|
np.zeros((ncomp, ainfo.nelem), dtype=ctype)
|
numpy.zeros
|
import numpy as np
import config
initial_black = np.uint64(0b00010000 << 24 | 0b00001000 << 32)
initial_white = np.uint64(0b00001000 << 24 | 0b00010000 << 32)
class Board:
def __init__(self, black=initial_black, white=initial_white):
self.black = black
self.white = white
self.black_array2d = bit_to_array(self.black, config.board_length).reshape((config.N, config.N))
self.white_array2d = bit_to_array(self.white, config.board_length).reshape((config.N, config.N))
def get_own_and_enemy(self, player):
if player == config.black:
return self.black, self.white
else:
return self.white, self.black
def get_own_and_enemy_array2d(self, player):
if player == config.black:
return self.black_array2d, self.white_array2d
else:
return self.white_array2d, self.black_array2d
def make_move(self, player, move):
if move == config.pass_move:
return Board(self.black, self.white)
bit_move = np.uint64(0b1 << move)
own, enemy = self.get_own_and_enemy(player)
flipped_stones = get_flipped_stones_bit(bit_move, own, enemy)
own |= flipped_stones | bit_move
enemy &= ~flipped_stones
if player == config.black:
return Board(own, enemy)
else:
return Board(enemy, own)
def get_legal_moves(self, player):
own, enemy = self.get_own_and_enemy(player)
legal_moves_without_pass = bit_to_array(get_legal_moves_bit(own, enemy), config.board_length)
if np.sum(legal_moves_without_pass) == 0:
return np.concatenate((legal_moves_without_pass, [1]))
else:
return np.concatenate((legal_moves_without_pass, [0]))
left_right_mask = np.uint64(0x7e7e7e7e7e7e7e7e)
top_bottom_mask = np.uint64(0x00ffffffffffff00)
corner_mask = left_right_mask & top_bottom_mask
def get_legal_moves_bit(own, enemy):
legal_moves = np.uint64(0)
legal_moves |= search_legal_moves_left(own, enemy, left_right_mask, np.uint64(1))
legal_moves |= search_legal_moves_left(own, enemy, corner_mask, np.uint64(9))
legal_moves |= search_legal_moves_left(own, enemy, top_bottom_mask, np.uint64(8))
legal_moves |= search_legal_moves_left(own, enemy, corner_mask, np.uint64(7))
legal_moves |= search_legal_moves_right(own, enemy, left_right_mask, np.uint64(1))
legal_moves |= search_legal_moves_right(own, enemy, corner_mask, np.uint64(9))
legal_moves |= search_legal_moves_right(own, enemy, top_bottom_mask, np.uint64(8))
legal_moves |= search_legal_moves_right(own, enemy, corner_mask, np.uint64(7))
legal_moves &= ~(own | enemy)
return legal_moves
def search_legal_moves_left(own, enemy, mask, offset):
return search_contiguous_stones_left(own, enemy, mask, offset) >> offset
def search_legal_moves_right(own, enemy, mask, offset):
return search_contiguous_stones_right(own, enemy, mask, offset) << offset
def get_flipped_stones_bit(bit_move, own, enemy):
flipped_stones = np.uint64(0)
flipped_stones |= search_flipped_stones_left(bit_move, own, enemy, left_right_mask, np.uint64(1))
flipped_stones |= search_flipped_stones_left(bit_move, own, enemy, corner_mask, np.uint64(9))
flipped_stones |= search_flipped_stones_left(bit_move, own, enemy, top_bottom_mask, np.uint64(8))
flipped_stones |= search_flipped_stones_left(bit_move, own, enemy, corner_mask, np.uint64(7))
flipped_stones |= search_flipped_stones_right(bit_move, own, enemy, left_right_mask, np.uint64(1))
flipped_stones |= search_flipped_stones_right(bit_move, own, enemy, corner_mask, np.uint64(9))
flipped_stones |= search_flipped_stones_right(bit_move, own, enemy, top_bottom_mask, np.uint64(8))
flipped_stones |= search_flipped_stones_right(bit_move, own, enemy, corner_mask, np.uint64(7))
return flipped_stones
def search_flipped_stones_left(bit_move, own, enemy, mask, offset):
flipped_stones = search_contiguous_stones_left(bit_move, enemy, mask, offset)
if own & (flipped_stones >> offset) == np.uint64(0):
return np.uint64(0)
else:
return flipped_stones
def search_flipped_stones_right(bit_move, own, enemy, mask, offset):
flipped_stones = search_contiguous_stones_right(bit_move, enemy, mask, offset)
if own & (flipped_stones << offset) ==
|
np.uint64(0)
|
numpy.uint64
|
from tensorboard.backend.event_processing import event_accumulator
import matplotlib.pyplot as plt
import os
import numpy as np
import matplotlib.pyplot as plt
import sys
import matplotlib.animation as animation
from matplotlib.pyplot import MultipleLocator
import pandas
plt.style.use('ggplot')
def read_tensorboard_data(tensorboard_path, val_name):
ea = event_accumulator.EventAccumulator(tensorboard_path)
ea.Reload()
val = ea.scalars.Items(val_name)
return val
def file_name(file_dir):
L=[]
for root, dirs, files in os.walk(file_dir):
for file in files:
L.append(os.path.join(root, file))
return L
if __name__ == "__main__":
map_names = ['2s_vs_1sc','2s3z',\
'5m_vs_6m','3s5z','1c3s5z','27m_vs_30m','bane_vs_bane','3s_vs_5z','6h_vs_8z',\
'corridor','3s5z_vs_3s6z','10m_vs_11m','MMM2','2c_vs_64zg']
title_names = [name.replace("_vs_"," vs. ") for name in map_names]
for map_name, title_name in zip(map_names,title_names):
plt.figure()
############################################################
max_steps = []
exp_name = "final_mappo"
data_dir = './' + map_name + '/' + map_name + '_' + exp_name + '.csv'
df = pandas.read_csv(data_dir)
key_cols = [c for c in df.columns if 'MIN' not in c and 'MAX' not in c]
key_step = [n for n in key_cols if n == 'Step']
key_win_rate = [n for n in key_cols if n != 'Step']
all_step = np.array(df[key_step])
all_win_rate =
|
np.array(df[key_win_rate])
|
numpy.array
|
#!/usr/bin/env python3
# coding=utf-8
import numpy as np
import tensorflow as tf
from sklearn import datasets
import kaiLogistic.logistic_from_mock_data_utils as kai
random_state = np.random.RandomState(1)
data, target = datasets.make_moons(202, noise=0.18, random_state=random_state)
target = np.array(target, dtype=np.float32)
# print('data=%s' % data)
# print('target=%s' % target)
data *= 5
b = tf.Variable(0, dtype=tf.float32)
w1 = tf.Variable([[0]], dtype=tf.float32)
w2 = tf.Variable([[0]], dtype=tf.float32)
w3 = tf.Variable([[0]], dtype=tf.float32)
w4 = tf.Variable([[0]], dtype=tf.float32)
w5 = tf.Variable([[0]], dtype=tf.float32)
w6 = tf.Variable([[0]], dtype=tf.float32)
x_data1 = tf.placeholder(shape=[None, 1], dtype=tf.float32)
x_data2 = tf.placeholder(shape=[None, 1], dtype=tf.float32)
y_target = tf.placeholder(shape=[None, 1], dtype=tf.float32)
use_method = 3
if use_method == 1:
result_matmul1 = tf.matmul(x_data1, w1)
result_matmul2 = tf.matmul(x_data2, w2)
result_add = result_matmul1 + result_matmul2 + b
elif use_method == 2:
result_matmul1 = tf.matmul(x_data1 ** 2, w1)
result_matmul2 = tf.matmul(x_data2 ** 2, w2)
result_add = result_matmul1 + result_matmul2 + b
elif use_method == 3:
result_1 = tf.matmul(x_data1, w1)
result_2 = tf.matmul(x_data2, w2)
result_3 = tf.matmul(x_data1 ** 2, w3)
result_4 = tf.matmul(x_data2 ** 2, w4)
result_5 = x_data1 ** 3 * w5
result_6 = x_data2 ** 3 * w6
result_add = b + result_1 + result_2 + result_3 + result_4 + result_5 + result_6
loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=result_add, labels=y_target)
loss = tf.reduce_mean(loss)
optimizer = tf.train.GradientDescentOptimizer(0.001)
train = optimizer.minimize(loss)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
var_x1 = np.array([x[0] for i, x in enumerate(data)])
var_x2 = np.array([x[1] for i, x in enumerate(data)])
data_amount = len(var_x1)
batch_size = 20
loss_vec = []
for step in range(10001):
rand_index = np.random.choice(data_amount, size=batch_size)
tmp1 = var_x1[rand_index]
tmp2 = [tmp1]
x1 = np.transpose(tmp2)
x2 = np.transpose([var_x2[rand_index]])
y = np.transpose([target[rand_index]])
sess.run(train, feed_dict={x_data1: x1, x_data2: x2, y_target: y})
if step % 200 == 0:
loss_value = sess.run(loss, feed_dict={x_data1: x1, x_data2: x2, y_target: y})
loss_vec.append(loss_value)
print('step=%d w1=%s w2=%s b=%s loss=%s' % (
step, sess.run(w1)[0, 0], sess.run(w2)[0, 0], sess.run(b), loss_value))
[[_w1]] = sess.run(w1)
[[_w2]] = sess.run(w2)
[[_w3]] = sess.run(w3)
[[_w4]] = sess.run(w4)
[[_w5]] = sess.run(w5)
[[_w6]] = sess.run(w6)
_b = sess.run(b)
print('last B=%f W1=%f W2=%f W3=%f W4=%f W5=%f W6=%f' % (_b, _w1, _w2, _w3, _w4, _w5, _w6))
result_sigmoid = tf.sigmoid(result_add)
x1 = np.transpose([var_x1])
x2 =
|
np.transpose([var_x2])
|
numpy.transpose
|
#!/usr/bin/env python
"""
unit test for filters module
author: <NAME>
This file is part of evo (github.com/MichaelGrupp/evo).
evo is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
evo is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with evo. If not, see <http://www.gnu.org/licenses/>.
"""
import math
import unittest
import numpy as np
import context
from evo.core import filters
from evo.core import lie_algebra as lie
# some synthetic poses for testing
# [0] [1]
poses_1 = [lie.se3(np.eye(3), np.array([0, 0, 0])), lie.se3(np.eye(3), np.array([0, 0, 0.5])),
lie.se3(np.eye(3), np.array([0, 0, 0])), lie.se3(np.eye(3), np.array([0, 0, 1]))]
# [2] [3]
# [0] [1]
poses_2 = [lie.se3(np.eye(3), np.array([0, 0, 0])), lie.se3(np.eye(3), np.array([0, 0, 0.5])),
lie.se3(np.eye(3), np.array([0, 0, 0.99])), lie.se3(np.eye(3), np.array([0, 0, 1.0]))]
# [2] [3]
# [0] [1]
poses_3 = [lie.se3(np.eye(3), np.array([0, 0, 0.0])), lie.se3(np.eye(3), np.array([0, 0, 0.9])),
lie.se3(np.eye(3), np.array([0, 0, 0.99])), lie.se3(np.eye(3), np.array([0, 0, 0.999])),
lie.se3(np.eye(3), np.array([0, 0, 0.9999])), lie.se3(np.eye(3), np.array([0, 0, 0.99999])),
lie.se3(np.eye(3), np.array([0, 0, 0.999999])), lie.se3(np.eye(3), np.array([0, 0, 0.9999999]))]
# [6] [7]
# [0] [1]
poses_4 = [lie.se3(np.eye(3), np.array([0, 0, 0])), lie.se3(np.eye(3), np.array([0, 0, 1])),
lie.se3(np.eye(3), np.array([0, 0, 1])), lie.se3(np.eye(3), np.array([0, 0, 1]))]
# [2] [3]
class TestFilterPairsByPath(unittest.TestCase):
def test_poses1_all_pairs(self):
target_path = 1.0
tol = 0.0
id_pairs = filters.filter_pairs_by_path(
poses_1, target_path, tol, all_pairs=True)
self.assertEqual(id_pairs, [(0, 2), (2, 3)])
def test_poses1_wrong_target(self):
target_path = 2.5
tol = 0.0
id_pairs = filters.filter_pairs_by_path(
poses_1, target_path, tol, all_pairs=True)
self.assertEqual(id_pairs, [])
def test_poses2_all_pairs_low_tolerance(self):
target_path = 1.0
tol = 0.001
id_pairs = filters.filter_pairs_by_path(
poses_2, target_path, tol, all_pairs=True)
self.assertEqual(id_pairs, [(0, 3)])
def test_convergence_all_pairs(self):
target_path = 1.0
tol = 0.2
id_pairs = filters.filter_pairs_by_path(
poses_3, target_path, tol, all_pairs=True)
self.assertEqual(id_pairs, [(0, 7)])
class TestFilterPairsByDistance(unittest.TestCase):
def test_poses1_all_pairs(self):
target_path = 1.0
tol = 0.0
id_pairs = filters.filter_pairs_by_distance(
poses_1, target_path, tol, all_pairs=True)
self.assertEqual(id_pairs, [(0, 3), (2, 3)])
def test_poses1_wrong_target(self):
target_path = 2.5
tol = 0.0
id_pairs = filters.filter_pairs_by_distance(
poses_1, target_path, tol, all_pairs=True)
self.assertEqual(id_pairs, [])
def test_poses2_all_pairs_low_tolerance(self):
target_path = 1.0
tol = 0.001
id_pairs = filters.filter_pairs_by_distance(
poses_2, target_path, tol, all_pairs=True)
self.assertEqual(id_pairs, [(0, 3)])
def test_poses4_all_pairs(self):
target_path = 1.0
tol = 0.2
id_pairs = filters.filter_pairs_by_distance(
poses_4, target_path, tol, all_pairs=True)
self.assertEqual(id_pairs, [(0, 1), (0, 2), (0, 3)])
# some synthetic poses for testing
axis = np.array([1, 0, 0])
poses_5 = [lie.se3(lie.so3_exp(axis, 0.0), np.array([0, 0, 0])),
lie.se3(lie.so3_exp(axis, math.pi), np.array([0, 0, 0])),
lie.se3(lie.so3_exp(axis, 0.0),
|
np.array([0, 0, 0])
|
numpy.array
|
#!/usr/bin/env python
import threading
import time
import cv2
import numpy as np
import pytesseract
OCR_SIZE = (500, 500)
TESSERACT_CONFIG="-c tessedit_char_whitelist=ABCDEFGHIJKLMNOPQRSTUVWXYZ"
DEBUG = 0
class BoardExtractor(object):
def __init__(self, frame):
# Constants
self.shape = frame.shape
self.target = self.get_rect(frame.shape, 0.75)
self.hull_boundary = self.get_rect(frame.shape, 0.9)
self.masked = None
self.letters = []
self.modifiers = []
self.lock = threading.Lock()
self.processing = False
self.ocr_result = None
# Hackish initialization for OpenCV windows to
# show on top with focus
window = cv2.namedWindow('Camera', cv2.WINDOW_NORMAL)
cv2.setWindowProperty('Camera', cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
cv2.setWindowProperty('Camera', cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_NORMAL)
def run(self, cap):
global DEBUG
prev_result = None
while True:
start_time = time.time()
ret, frame = cap.read()
if frame.shape != self.shape:
raise RuntimeError("Image capture changed sizes!")
# Clear mask so it disappears in the UI if we
# fail to detect one.
self.masked = None
self.process(frame)
camera = cv2.rectangle(frame, self.target[0], self.target[1], (0, 255, 0), 1)
if self.masked is not None:
camera = cv2.addWeighted(camera, 0.8, self.masked, 0.2, 0.0)
if DEBUG > 0:
fps = 1 / (time.time() - start_time)
cv2.putText(camera, "Debug: %d" % DEBUG, (25, 25), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 3)
cv2.putText(camera, "%0.2f" % fps, (25, 60), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 3)
cv2.imshow('Camera', camera)
cv2.setWindowProperty('Camera', cv2.WND_PROP_TOPMOST, 1)
with self.lock:
if self.ocr_result is not None:
print(self.ocr_result)
if prev_result is None:
prev_result = self.ocr_result
elif prev_result == self.ocr_result:
cv2.destroyWindow('Camera')
cv2.waitKey(1)
return prev_result
else:
prev_result = self.ocr_result
c = cv2.waitKey(1)
if c < 0:
continue
if c == 27:
break
if c == 68 or c == 100:
DEBUG = (DEBUG + 1) % 4
def process(self, source):
gray = self.preprocess(source)
contours = self.find_contours(gray)
if not contours:
return
if DEBUG >= 1:
img = gray.copy()
img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
for c in contours:
cv2.drawContours(img, [c], 0, (0, 255, 0), 2)
cv2.imshow("Contours", img)
else:
cv2.destroyWindow("Contours")
hull = self.find_board_hull(contours)
if hull is None:
return
# UI Feedback step
self.draw_mask(hull)
with self.lock:
processing = self.processing
gray_board = self.extract_board(gray, hull, OCR_SIZE)
letters = self.extract_letters(gray_board)
if DEBUG >= 2:
cv2.imshow('Gray Board', gray_board)
cv2.imshow('Letters', letters)
else:
cv2.destroyWindow('Gray Board')
cv2.destroyWindow('Letters')
color_board = self.extract_board(source, hull, OCR_SIZE)
modifiers = self.extract_modifiers(color_board)
if processing and len(self.letters) >= 20:
return
self.letters.append(letters)
if modifiers is not None:
self.modifiers.append(modifiers)
if not processing and len(self.letters) == 20:
self.start_ocr()
def preprocess(self, source):
img = cv2.cvtColor(source, cv2.COLOR_BGR2GRAY)
img = cv2.GaussianBlur(img, (5, 5), 1)
kernel =
|
np.ones((5, 5), np.uint8)
|
numpy.ones
|
import numpy as np
from aocd import get_data
from aoc_helper import submit_correct
test_input = '3,4,3,1,2'
test_answer = 5934
test_answer_2 = 26984457539
real_data_file = '/Users/andrewemmett/Projects/adventofcode/2021/data/aoc_2021_06.data'
day=6
def read_data(input_file):
with open(input_file, 'r') as file:
#data = read_data(real_data_file)#[int(line.strip()) for line in read_data(real_data_file)).split(',')]
data = file.read()
return data
# make array with elements
def count_fish(data, days):
new_school = np.array(data.split(','), dtype=int)
unique_fish, fish_counts =
|
np.unique(new_school, return_counts=True)
|
numpy.unique
|
# -*- coding: UTF-8 -*-
"""
JPEG Implementation Forensics Based on Eigen-Algorithms
@author: <NAME> (<EMAIL>)
"""
import os
import numpy as np
from PIL import Image, ExifTags
from scipy.fftpack import dct, idct
from skimage.util import view_as_blocks
class RecompressError(Exception):
pass
def imread_orientation(img_in):
for orientation in ExifTags.TAGS.keys():
if ExifTags.TAGS[orientation] == 'Orientation':
break
exif = dict(img_in._getexif().items())
if exif is not None:
if orientation in exif.keys():
if exif[orientation] == 3:
img_in = img_in.rotate(180, expand=True)
elif exif[orientation] == 6:
img_in = img_in.rotate(270, expand=True)
elif exif[orientation] == 8:
img_in = img_in.rotate(90, expand=True)
return img_in
def jpeg_recompress_pil(img_path_in: str, img_path_out: str, img_shape: tuple = None, qtables_in=None,
check=False) -> None:
"""Re-compress a JPEG image using the same quantization matrix and PIL implementation.
Args:
img_path_in (str): path to input JPEG image.
img_path_out (str): path to output JPEG image.
qtables_in (np.array): quantization table to apply.
check (bool): check input and output quantization tables.
"""
# Read Data
img_in = Image.open(img_path_in)
if not qtables_in:
qtables_in = img_in.quantization
# Resize image
if img_shape is not None:
img_in = imread_orientation(img_in)
if (img_in.size[0] >= img_in.size[1] and img_shape[0] < img_shape[1]) or (
img_in.size[0] < img_in.size[1] and img_shape[0] >= img_shape[1]):
img_shape = [img_shape[1], img_shape[0]]
pass
img_in = img_in.resize(img_shape, Image.LANCZOS)
# Re-compress image
os.makedirs(os.path.split(img_path_out)[0], exist_ok=True)
img_in.save(img_path_out, format='JPEG', subsample='keep', qtables=qtables_in)
# Check qtables
if check:
img_out = Image.open(img_path_out)
qtables_out = img_out.quantization
img_out.close()
if qtables_in != qtables_out:
raise RecompressError('Input and output quantization tables are different.')
# Close
img_in.close()
def compute_jpeg_dct_Y(img_Y: np.ndarray) -> np.ndarray:
"""Compute block-wise DCT in a JPEG-like fashion
Args:
img_Y (np.array): luminance component of input JPEG image.
Returns:
img_blocks_dct (np.array): block-wise DCT
"""
# Parameters
B = 8
# Check B division and pad
dH, dW = np.asarray(img_Y.shape) % B
if dH != 0:
dH = B - dH
if dW != 0:
dW = B - dW
img_Y = np.pad(img_Y, ((0, dH), (0, dW)), mode='reflect')
# Split Into Blocks
img_blocks = view_as_blocks(img_Y, block_shape=(B, B))
img_blocks = np.reshape(img_blocks, (-1, B, B))
# Compute DCT
img_blocks_dct = dct(dct(img_blocks, axis=1, norm='ortho'), axis=2, norm='ortho')
return img_blocks_dct
def jpeg_compress_Y(img_Y: np.ndarray, qtable: np.ndarray, quant_fun: callable = np.round):
"""Simulate luminance component JPEG compression.
Args:
img_Y (np.array): luminance component of input JPEG image.
qtable (np.array): JPEG quantization table.
quant_fun (function): quantization function
Returns:
img_Y_comp (np.array): luminance component of output JPEG image.
"""
# Parameters
B = 8
# Check B division and pad
H, W = img_Y.shape
dH, dW = np.asarray(img_Y.shape) % B
if dH != 0:
dH = B - dH
if dW != 0:
dW = B - dW
img_Y = np.pad(img_Y, ((0, dH), (0, dW)), mode='reflect')
# Compute DCT
img_blocks_dct = compute_jpeg_dct_Y(img_Y - 128.)
# Quantize and de-quantize
img_blocks_dct_q = qtable * quant_fun(img_blocks_dct / qtable)
# Compute IDCT
img_blocks_idct = idct(idct(img_blocks_dct_q, axis=2, norm='ortho'), axis=1, norm='ortho')
# Reshape
img_Y_comp = np.zeros(img_Y.shape)
i = 0
for h in np.arange(0, img_Y.shape[0], B):
for w in np.arange(0, img_Y.shape[1], B):
img_Y_comp[h:h + B, w:w + B] = img_blocks_idct[i]
i += 1
img_Y_comp = np.clip(np.round(128. + img_Y_comp), 0, 255)
img_Y_comp = img_Y_comp[:H, :W]
return img_Y_comp
def jpeg_feature(img_path: str) -> np.ndarray:
"""Extract JPEG feature.
Args:
img_path (str): path to input JPEG image.
Returns:
feature (np.array): feature vector
"""
# Params
zig_zag_idx = [0, 1, 5, 6, 14, 15, 27, 28, 2, 4, 7, 13, 16, 26, 29, 42,
3, 8, 12, 17, 25, 30, 41, 43, 9, 11, 18, 24, 31, 40, 44, 53,
10, 19, 23, 32, 39, 45, 52, 54, 20, 22, 33, 38, 46, 51, 55, 60,
21, 34, 37, 47, 50, 56, 59, 61, 35, 36, 48, 49, 57, 58, 62, 63]
# Init
img = Image.open(img_path)
img.draft('YCbCr', None)
qtable = np.asarray(img.quantization[0])[zig_zag_idx].reshape((8, 8))
# Original Image Data
img_Y_0 = np.asarray(img, dtype=np.float32)[:, :, 0]
img_blocks_dct_0 = compute_jpeg_dct_Y(img_Y_0 - 128.)
# Loop over quantization functions
quant_fun_list = [np.round,
lambda x: np.floor(x + 0.5),
lambda x: np.ceil(x - 0.5),
]
feature = np.zeros((len(quant_fun_list), 64))
for q_idx, quant_fun in enumerate(quant_fun_list):
# First JPEG
img_Y_1 = jpeg_compress_Y(img_Y_0, qtable, quant_fun)
img_blocks_dct_1 = compute_jpeg_dct_Y(img_Y_1 - 128.)
# Second JPEG
img_Y_2 = jpeg_compress_Y(img_Y_1, qtable, quant_fun)
img_blocks_dct_2 = compute_jpeg_dct_Y(img_Y_2 - 128.)
# Feature
mse_single = np.mean((img_blocks_dct_0 - img_blocks_dct_1) ** 2, axis=0).reshape(-1)
mse_double =
|
np.mean((img_blocks_dct_1 - img_blocks_dct_2) ** 2, axis=0)
|
numpy.mean
|
"""
`Learn the Basics <intro.html>`_ ||
`Quickstart <quickstart_tutorial.html>`_ ||
**Tensors** ||
`Datasets & DataLoaders <data_tutorial.html>`_ ||
`Transforms <transforms_tutorial.html>`_ ||
`Build Model <buildmodel_tutorial.html>`_ ||
`Autograd <autograd_tutorial.html>`_ ||
`Optimization <optimization_tutorial.html>`_ ||
`Save & Load Model <saveloadrun_tutorial.html>`_
Tensors
==========================
Tensors are a specialized data structure that are very similar to arrays and matrices.
In PyTorch, we use tensors to encode the inputs and outputs of a model, as well as the model’s parameters.
Tensors are similar to `NumPy’s <https://numpy.org/>`_ ndarrays, except that tensors can run on GPUs or other hardware accelerators. In fact, tensors and
NumPy arrays can often share the same underlying memory, eliminating the need to copy data (see :ref:`bridge-to-np-label`). Tensors
are also optimized for automatic differentiation (we'll see more about that later in the `Autograd <autograd_tutorial.html>`__
section). If you’re familiar with ndarrays, you’ll be right at home with the Tensor API. If not, follow along!
"""
import torch
import numpy as np
######################################################################
# Initializing a Tensor
# ~~~~~~~~~~~~~~~~~~~~~
#
# Tensors can be initialized in various ways. Take a look at the following examples:
#
# **Directly from data**
#
# Tensors can be created directly from data. The data type is automatically inferred.
data = [[1, 2],[3, 4]]
x_data = torch.tensor(data)
######################################################################
# **From a NumPy array**
#
# Tensors can be created from NumPy arrays (and vice versa - see :ref:`bridge-to-np-label`).
np_array = np.array(data)
x_np = torch.from_numpy(np_array)
###############################################################
# **From another tensor:**
#
# The new tensor retains the properties (shape, datatype) of the argument tensor, unless explicitly overridden.
x_ones = torch.ones_like(x_data) # retains the properties of x_data
print(f"Ones Tensor: \n {x_ones} \n")
x_rand = torch.rand_like(x_data, dtype=torch.float) # overrides the datatype of x_data
print(f"Random Tensor: \n {x_rand} \n")
######################################################################
# **With random or constant values:**
#
# ``shape`` is a tuple of tensor dimensions. In the functions below, it determines the dimensionality of the output tensor.
shape = (2,3,)
rand_tensor = torch.rand(shape)
ones_tensor = torch.ones(shape)
zeros_tensor = torch.zeros(shape)
print(f"Random Tensor: \n {rand_tensor} \n")
print(f"Ones Tensor: \n {ones_tensor} \n")
print(f"Zeros Tensor: \n {zeros_tensor}")
######################################################################
# --------------
#
######################################################################
# Attributes of a Tensor
# ~~~~~~~~~~~~~~~~~
#
# Tensor attributes describe their shape, datatype, and the device on which they are stored.
tensor = torch.rand(3,4)
print(f"Shape of tensor: {tensor.shape}")
print(f"Datatype of tensor: {tensor.dtype}")
print(f"Device tensor is stored on: {tensor.device}")
######################################################################
# --------------
#
######################################################################
# Operations on Tensors
# ~~~~~~~~~~~~~~~~~
#
# Over 100 tensor operations, including arithmetic, linear algebra, matrix manipulation (transposing,
# indexing, slicing), sampling and more are
# comprehensively described `here <https://pytorch.org/docs/stable/torch.html>`__.
#
# Each of these operations can be run on the GPU (at typically higher speeds than on a
# CPU). If you’re using Colab, allocate a GPU by going to Runtime > Change runtime type > GPU.
#
# By default, tensors are created on the CPU. We need to explicitly move tensors to the GPU using
# ``.to`` method (after checking for GPU availability). Keep in mind that copying large tensors
# across devices can be expensive in terms of time and memory!
# We move our tensor to the GPU if available
if torch.cuda.is_available():
tensor = tensor.to('cuda')
######################################################################
# Try out some of the operations from the list.
# If you're familiar with the NumPy API, you'll find the Tensor API a breeze to use.
#
###############################################################
# **Standard numpy-like indexing and slicing:**
tensor = torch.ones(4, 4)
print('First row: ',tensor[0])
print('First column: ', tensor[:, 0])
print('Last column:', tensor[..., -1])
tensor[:,1] = 0
print(tensor)
######################################################################
# **Joining tensors** You can use ``torch.cat`` to concatenate a sequence of tensors along a given dimension.
# See also `torch.stack <https://pytorch.org/docs/stable/generated/torch.stack.html>`__,
# another tensor joining op that is subtly different from ``torch.cat``.
t1 = torch.cat([tensor, tensor, tensor], dim=1)
print(t1)
######################################################################
# **Arithmetic operations**
# This computes the matrix multiplication between two tensors. y1, y2, y3 will have the same value
y1 = tensor @ tensor.T
y2 = tensor.matmul(tensor.T)
y3 = torch.rand_like(tensor)
torch.matmul(tensor, tensor.T, out=y3)
# This computes the element-wise product. z1, z2, z3 will have the same value
z1 = tensor * tensor
z2 = tensor.mul(tensor)
z3 = torch.rand_like(tensor)
torch.mul(tensor, tensor, out=z3)
######################################################################
# **Single-element tensors** If you have a one-element tensor, for example by aggregating all
# values of a tensor into one value, you can convert it to a Python
# numerical value using ``item()``:
agg = tensor.sum()
agg_item = agg.item()
print(agg_item, type(agg_item))
######################################################################
# **In-place operations**
# Operations that store the result into the operand are called in-place. They are denoted by a ``_`` suffix.
# For example: ``x.copy_(y)``, ``x.t_()``, will change ``x``.
print(tensor, "\n")
tensor.add_(5)
print(tensor)
######################################################################
# .. note::
# In-place operations save some memory, but can be problematic when computing derivatives because of an immediate loss
# of history. Hence, their use is discouraged.
######################################################################
# --------------
#
######################################################################
# .. _bridge-to-np-label:
#
# Bridge with NumPy
# ~~~~~~~~~~~~~~~~~
# Tensors on the CPU and NumPy arrays can share their underlying memory
# locations, and changing one will change the other.
######################################################################
# Tensor to NumPy array
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
t = torch.ones(5)
print(f"t: {t}")
n = t.numpy()
print(f"n: {n}")
######################################################################
# A change in the tensor reflects in the NumPy array.
t.add_(1)
print(f"t: {t}")
print(f"n: {n}")
######################################################################
# NumPy array to Tensor
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
n = np.ones(5)
t = torch.from_numpy(n)
######################################################################
# Changes in the NumPy array reflects in the tensor.
|
np.add(n, 1, out=n)
|
numpy.add
|
import numpy as np
import pandas as pd
import pytest
import dask.dataframe as dd
import cudf
import dask_cudf
@pytest.mark.parametrize("agg", ["sum", "mean", "count", "min", "max"])
def test_groupby_basic_aggs(agg):
pdf = pd.DataFrame(
{
"x": np.random.randint(0, 5, size=10000),
"y": np.random.normal(size=10000),
}
)
gdf = cudf.DataFrame.from_pandas(pdf)
ddf = dask_cudf.from_cudf(gdf, npartitions=5)
a = getattr(gdf.groupby("x"), agg)().to_pandas()
b = getattr(ddf.groupby("x"), agg)().compute().to_pandas()
a.index.name = None
a.name = None
b.index.name = None
b.name = None
if agg == "count":
a["y"] = a["y"].astype(np.int64)
dd.assert_eq(a, b)
@pytest.mark.parametrize(
"func",
[
lambda df: df.groupby("x").agg({"y": "max"}),
pytest.param(
lambda df: df.groupby("x").y.agg(["sum", "max"]),
marks=pytest.mark.skip,
),
],
)
def test_groupby_agg(func):
pdf = pd.DataFrame(
{
"x": np.random.randint(0, 5, size=10000),
"y": np.random.normal(size=10000),
}
)
gdf = cudf.DataFrame.from_pandas(pdf)
ddf = dask_cudf.from_cudf(gdf, npartitions=5)
a = func(gdf).to_pandas()
b = func(ddf).compute().to_pandas()
a.index.name = None
a.name = None
b.index.name = None
b.name = None
dd.assert_eq(a, b)
@pytest.mark.xfail(reason="cudf issues")
@pytest.mark.parametrize(
"func",
[lambda df: df.groupby("x").std(), lambda df: df.groupby("x").y.std()],
)
def test_groupby_std(func):
pdf = pd.DataFrame(
{
"x": np.random.randint(0, 5, size=10000),
"y": np.random.normal(size=10000),
}
)
gdf = cudf.DataFrame.from_pandas(pdf)
ddf = dask_cudf.from_cudf(gdf, npartitions=5)
a = func(gdf.to_pandas())
b = func(ddf).compute().to_pandas()
a.index.name = None
a.name = None
b.index.name = None
dd.assert_eq(a, b)
# reason gotattr in cudf
@pytest.mark.parametrize(
"func",
[
pytest.param(
lambda df: df.groupby(["a", "b"]).x.sum(), marks=pytest.mark.xfail
),
pytest.param(
lambda df: df.groupby(["a", "b"]).sum(), marks=pytest.mark.xfail
),
pytest.param(
lambda df: df.groupby(["a", "b"]).agg({"x", "sum"}),
marks=pytest.mark.xfail,
),
],
)
def test_groupby_multi_column(func):
pdf = pd.DataFrame(
{
"a": np.random.randint(0, 20, size=1000),
"b": np.random.randint(0, 5, size=1000),
"x": np.random.normal(size=1000),
}
)
gdf = cudf.DataFrame.from_pandas(pdf)
ddf = dask_cudf.from_cudf(gdf, npartitions=5)
a = func(gdf).to_pandas()
b = func(ddf).compute().to_pandas()
dd.assert_eq(a, b)
def test_reset_index_multiindex():
df = cudf.DataFrame()
df["id_1"] = ["a", "a", "b"]
df["id_2"] = [0, 0, 1]
df["val"] = [1, 2, 3]
df_lookup = cudf.DataFrame()
df_lookup["id_1"] = ["a", "b"]
df_lookup["metadata"] = [0, 1]
gddf = dask_cudf.from_cudf(df, npartitions=2)
gddf_lookup = dask_cudf.from_cudf(df_lookup, npartitions=2)
ddf = dd.from_pandas(df.to_pandas(), npartitions=2)
ddf_lookup = dd.from_pandas(df_lookup.to_pandas(), npartitions=2)
# Note: 'id_2' has wrong type (object) until after compute
dd.assert_eq(
gddf.groupby(by=["id_1", "id_2"])
.val.sum()
.reset_index()
.merge(gddf_lookup, on="id_1")
.compute(),
ddf.groupby(by=["id_1", "id_2"])
.val.sum()
.reset_index()
.merge(ddf_lookup, on="id_1"),
)
@pytest.mark.parametrize("split_out", [1, 2, 3])
@pytest.mark.parametrize(
"column", ["c", "d", "e", ["b", "c"], ["b", "d"], ["b", "e"]]
)
def test_groupby_split_out(split_out, column):
df = pd.DataFrame(
{
"a": np.arange(8),
"b": [1, 0, 0, 2, 1, 1, 2, 0],
"c": [0, 1] * 4,
"d": ["dog", "cat", "cat", "dog", "dog", "dog", "cat", "bird"],
}
).fillna(0)
df["e"] = df["d"].astype("category")
gdf = cudf.from_pandas(df)
ddf = dd.from_pandas(df, npartitions=3)
gddf = dask_cudf.from_cudf(gdf, npartitions=3)
ddf_result = (
ddf.groupby(column)
.a.mean(split_out=split_out)
.compute()
.sort_values()
.dropna()
)
gddf_result = (
gddf.groupby(column)
.a.mean(split_out=split_out)
.compute()
.sort_values()
)
dd.assert_eq(gddf_result, ddf_result, check_index=False)
@pytest.mark.parametrize("dropna", [False, True, None])
@pytest.mark.parametrize(
"by", ["a", "b", "c", "d", ["a", "b"], ["a", "c"], ["a", "d"]]
)
def test_groupby_dropna(dropna, by):
# NOTE: This test is borrowed from upstream dask
# (dask/dask/dataframe/tests/test_groupby.py)
df = cudf.DataFrame(
{
"a": [1, 2, 3, 4, None, None, 7, 8],
"b": [1, None, 1, 3, None, 3, 1, 3],
"c": ["a", "b", None, None, "e", "f", "g", "h"],
"e": [4, 5, 6, 3, 2, 1, 0, 0],
}
)
df["b"] = df["b"].astype("datetime64[ns]")
df["d"] = df["c"].astype("category")
ddf = dask_cudf.from_cudf(df, npartitions=3)
if dropna is None:
dask_result = ddf.groupby(by).e.sum()
cudf_result = df.groupby(by).e.sum()
else:
dask_result = ddf.groupby(by, dropna=dropna).e.sum()
cudf_result = df.groupby(by, dropna=dropna).e.sum()
if by in ["c", "d"]:
# Loose string/category index name in cudf...
dask_result = dask_result.compute()
dask_result.index.name = cudf_result.index.name
dd.assert_eq(dask_result, cudf_result)
@pytest.mark.parametrize("myindex", [[1, 2] * 4, ["s1", "s2"] * 4])
def test_groupby_string_index_name(myindex):
# GH-Issue #3420
data = {"index": myindex, "data": [0, 1] * 4}
df = cudf.DataFrame(data=data)
ddf = dask_cudf.from_cudf(df, npartitions=2)
gdf = ddf.groupby("index").agg({"data": "count"})
assert gdf.compute().index.name == gdf.index.name
@pytest.mark.parametrize(
"agg_func",
[
lambda gb: gb.agg({"c": ["count"]}, split_out=2),
lambda gb: gb.agg({"c": "count"}, split_out=2),
lambda gb: gb.agg({"c": ["count", "sum"]}, split_out=2),
lambda gb: gb.count(split_out=2),
lambda gb: gb.c.count(split_out=2),
],
)
def test_groupby_split_out_multiindex(agg_func):
df = cudf.DataFrame(
{
"a": np.random.randint(0, 10, 100),
"b": np.random.randint(0, 5, 100),
"c": np.random.random(100),
}
)
ddf = dask_cudf.from_cudf(df, 5)
pddf = dd.from_pandas(df.to_pandas(), 5)
gr = agg_func(ddf.groupby(["a", "b"]))
pr = agg_func(pddf.groupby(["a", "b"]))
dd.assert_eq(gr.compute(), pr.compute())
@pytest.mark.parametrize("npartitions", [1, 2])
def test_groupby_multiindex_reset_index(npartitions):
df = cudf.DataFrame(
{"a": [1, 1, 2, 3, 4], "b": [5, 2, 1, 2, 5], "c": [1, 2, 2, 3, 5]}
)
ddf = dask_cudf.from_cudf(df, npartitions=npartitions)
pddf = dd.from_pandas(df.to_pandas(), npartitions=npartitions)
gr = ddf.groupby(["a", "c"]).agg({"b": ["count"]}).reset_index()
pr = pddf.groupby(["a", "c"]).agg({"b": ["count"]}).reset_index()
dd.assert_eq(
gr.compute().sort_values(by=["a", "c"]).reset_index(drop=True),
pr.compute().sort_values(by=["a", "c"]).reset_index(drop=True),
)
@pytest.mark.parametrize(
"groupby_keys", [["a"], ["a", "b"], ["a", "b", "dd"], ["a", "dd", "b"]]
)
@pytest.mark.parametrize(
"agg_func",
[
lambda gb: gb.agg({"c": ["count"]}),
lambda gb: gb.agg({"c": "count"}),
lambda gb: gb.agg({"c": ["count", "sum"]}),
lambda gb: gb.count(),
lambda gb: gb.c.count(),
],
)
def test_groupby_reset_index_multiindex(groupby_keys, agg_func):
df = cudf.DataFrame(
{
"a": np.random.randint(0, 10, 10),
"b": np.random.randint(0, 5, 10),
"c": np.random.randint(0, 5, 10),
"dd": np.random.randint(0, 5, 10),
}
)
ddf = dask_cudf.from_cudf(df, 5)
pddf = dd.from_pandas(df.to_pandas(), 5)
gr = agg_func(ddf.groupby(groupby_keys)).reset_index()
pr = agg_func(pddf.groupby(groupby_keys)).reset_index()
gf = gr.compute().sort_values(groupby_keys).reset_index(drop=True)
pf = pr.compute().sort_values(groupby_keys).reset_index(drop=True)
dd.assert_eq(gf, pf)
def test_groupby_reset_index_drop_True():
df = cudf.DataFrame(
{"a": np.random.randint(0, 10, 10), "b":
|
np.random.randint(0, 5, 10)
|
numpy.random.randint
|
import math
import itertools
import numpy as np
import pytest
import arim
import arim.geometry as g
DATASET_1 = dict(
# set1:
points1=g.Points.from_xyz(
np.array([0, 1, 1], dtype=np.float),
np.array([0, 0, 0], dtype=np.float),
np.array([1, 0, 2], dtype=np.float),
"Points1",
),
# set 2:
points2=g.Points.from_xyz(
np.array([0, 1, 2], dtype=np.float),
np.array([0, -1, -2], dtype=np.float),
np.array([0, 0, 1], dtype=np.float),
"Points2",
),
)
def test_are_points_aligned():
n = 10
z =
|
np.arange(n, dtype=np.float64)
|
numpy.arange
|
# -*- coding: utf-8 -*-
"""
aid_bin
some useful functions that I want to keep separate to
keep the backbone script shorter
---------
@author: maikherbig
"""
import os,shutil,json,re,urllib
import numpy as np
import dclab
import h5py,time,datetime
import six,tarfile, zipfile
import hashlib
import warnings
import pathlib
import aid_img
from scipy.interpolate import RectBivariateSpline
from scipy.stats import gaussian_kde, skew
import aid_start #import a module that sits in the AIDeveloper folder
dir_root = os.path.dirname(aid_start.__file__)#ask the module for its origin
def save_aid_settings(Default_dict):
dir_settings = os.path.join(dir_root,"aid_settings.json")#dir to settings
#Save the layout to Default_dict
with open(dir_settings, 'w') as f:
json.dump(Default_dict,f)
def splitall(path):
"""
Credit goes to <NAME>
SOURCE:
https://www.oreilly.com/library/view/python-cookbook/0596001673/ch04s16.html
"""
allparts = []
while 1:
parts = os.path.split(path)
if parts[0] == path: # sentinel for absolute paths
allparts.insert(0, parts[0])
break
elif parts[1] == path: # sentinel for relative paths
allparts.insert(0, parts[1])
break
else:
path = parts[0]
allparts.insert(0, parts[1])
return allparts
def hashfile(fname, blocksize=65536, count=0, constructor=hashlib.md5,
hasher_class=None):
"""Compute md5 hex-hash of a file
Parameters
----------
fname: str or pathlib.Path
path to the file
blocksize: int
block size in bytes read from the file
(set to `0` to hash the entire file)
count: int
number of blocks read from the file
hasher_class: callable
deprecated, see use `constructor` instead
constructor: callable
hash algorithm constructor
"""
if hasher_class is not None:
warnings.warn("The `hasher_class` argument is deprecated, please use "
"`constructor` instead.")
constructor = hasher_class
hasher = constructor()
fname = pathlib.Path(fname)
with fname.open('rb') as fd:
buf = fd.read(blocksize)
ii = 0
while len(buf) > 0:
hasher.update(buf)
buf = fd.read(blocksize)
ii += 1
if count and ii == count:
break
return hasher.hexdigest()
def obj2bytes(obj):
"""Bytes representation of an object for hashing"""
if isinstance(obj, str):
return obj.encode("utf-8")
elif isinstance(obj, pathlib.Path):
return obj2bytes(str(obj))
elif isinstance(obj, (bool, int, float)):
return str(obj).encode("utf-8")
elif obj is None:
return b"none"
elif isinstance(obj, np.ndarray):
return obj.tobytes()
elif isinstance(obj, tuple):
return obj2bytes(list(obj))
elif isinstance(obj, list):
return b"".join(obj2bytes(o) for o in obj)
elif isinstance(obj, dict):
return obj2bytes(sorted(obj.items()))
elif hasattr(obj, "identifier"):
return obj2bytes(obj.identifier)
elif isinstance(obj, h5py.Dataset):
return obj2bytes(obj[0])
else:
raise ValueError("No rule to convert object '{}' to string.".
format(obj.__class__))
def hashfunction(rtdc_path):
"""Hash value based on file name and content"""
tohash = [os.path.basename(rtdc_path),
# Hash a maximum of ~1MB of the hdf5 file
hashfile(rtdc_path, blocksize=65536, count=20)]
return hashlib.md5(obj2bytes(tohash)).hexdigest()
def load_rtdc(rtdc_path):
"""
This function load .rtdc files using dclab and takes care of catching all
errors
"""
try:
try:
#sometimes there occurs an error when opening hdf files,
#therefore try opening a second time in case of an error.
#This is very strange, and seems like a dirty solution,
#but I never saw it failing two times in a row
rtdc_ds = h5py.File(rtdc_path, 'r')
except:
rtdc_ds = h5py.File(rtdc_path, 'r')
return False,rtdc_ds #failed=False
except Exception as e:
#There is an issue loading the files!
return True,e
def print_ram_example(n):
#100k cropped images (64x64 pix unsigned integer8) need 400MB of RAM:
Imgs = []
sizex,sizey = 64,64
for i in range(100000):
Imgs.append(np.random.randint(low=0,high=255,size=(sizex,sizey)))
Imgs = np.array(Imgs)
Imgs = Imgs.astype(np.uint8)
print(str(Imgs.shape[0]) + " images (uint8) of size " + str(sizex) + "x" + str(sizey) + " pixels take " +str(Imgs.nbytes/1048576.0) +" MB of RAM")
def calc_ram_need(crop):
crop = int(crop)
n=1000
#100k cropped images (64x64 pix unsigned integer8) need 400MB of RAM:
Imgs = []
sizex,sizey = crop,crop
for i in range(n):
Imgs.append(np.random.randint(low=0,high=255,size=(sizex,sizey)))
Imgs = np.array(Imgs)
Imgs = Imgs.astype(np.uint8)
MB = Imgs.nbytes/1048576.0 #Amount of RAM for 1000 images
MB = MB/float(n)
return MB
def metrics_using_threshold(scores,y_valid,threshold,target_index,thresh_on=True):
nr_target_init = float(len(np.where(y_valid==target_index)[0])) #number of target cells in the initial sample
conc_init = 100*nr_target_init/float(len(y_valid)) #concentration of the target cells in the initial sample
scores_in_function = np.copy(scores)
if thresh_on==True:
#First: check the scores_in_function of the sorting index and adjust them using the threshold
pred_thresh = np.array([1 if p>threshold else 0 for p in scores_in_function[:,target_index]])
#replace the corresponding column in the scores_in_function
scores_in_function[:,target_index] = pred_thresh
#Finally use argmax for the rest of the predictions (threshold can only be applied to one index)
pred = np.argmax(scores_in_function,axis=1)
ind = np.where( pred==target_index )[0] #which cells are predicted to be target cells?
y_train_prime = y_valid[ind] #get the correct label of those cells
where_correct = np.where( y_train_prime==target_index )[0] #where is this label equal to the target label
nr_correct = float(len(where_correct)) #how often was the correct label in the target
if len(y_train_prime)==0:
conc_target_cell=0
else:
conc_target_cell = 100.0*(nr_correct/float(len(y_train_prime))) #divide nr of correct target cells by nr of total target cells
if conc_init==0:
enrichment=0
else:
enrichment = conc_target_cell/conc_init
if nr_target_init==0:
yield_ = 0
else:
yield_ = (nr_correct/nr_target_init)*100.0
dic = {"scores":scores,"pred":pred,"conc_target_cell":conc_target_cell,"enrichment":enrichment,"yield_":yield_}
return dic
def find_files(user_selected_path,paths,hashes):
assert len(paths) == len(hashes)
#Create a list of files in that folder
Paths_available,Fnames_available = [],[]
for root, dirs, files in os.walk(user_selected_path):
for file in files:
if file.endswith(".rtdc"):
Paths_available.append(os.path.join(root, file))
Fnames_available.append(file)
#Iterate through the list of given paths and search each measurement in Files
Paths_new,Info = [],[]
for i in range(len(paths)):
path_ = paths[i]
hash_ = hashes[i]
p,fname = os.path.split(path_)
#where does a file of that name exist:
ind = [fname_new == fname for fname_new in Fnames_available] #there could be several since they might originate from the same measurement, but differently filtered
paths_new = list(np.array(Paths_available)[ind]) #get the corresponding paths to the files
#Therfore, open the files and get the hashes
#hash_new = [(dclab.rtdc_dataset.RTDC_HDF5(p)).hash for p in paths_new] #since reading hdf sometimes does cause error, better try and repaeat if necessary
hash_new = []
for p in paths_new:
failed,rtdc_ds = load_rtdc(p)
if failed:
print("Error occurred during loading file\n"+str(p)+"\n"+str(rtdc_ds))
else:
hash_new.append(hashfunction(p))
ind = [ h==hash_ for h in hash_new ] #where do the hashes agree?
#get the corresponding Path_new
path_new = list(np.array(paths_new)[ind])
if len(path_new)==1:
Paths_new.append(str(path_new[0]))
Info.append("Found the required file!")
elif len(path_new)==0:
Paths_new.append([])
Info.append("File missing!")
if len(path_new)>1:
Paths_new.append(str(path_new[0]))
Info.append("Found the required file multiple times! Choose one!")
return(Paths_new,Info)
#: Chunk size for storing HDF5 data
CHUNK_SIZE = 100
def store_contour(h5group,name, data, compression):
if not isinstance(data, (list, tuple)):
# single event
data = [data]
grp = h5group.require_group(name)
curid = len(grp.keys())
for ii, cc in enumerate(data):
grp.create_dataset("{}".format(curid + ii),
data=cc,
fletcher32=True,
compression=compression)
def store_image(h5group, name, data, compression, background=False):
"""Store image data in an HDF5 group
Parameters
----------
h5group: h5py.Group
The group (usually "events") where to store the image data
data: 2d or 3d ndarray
The image data. If 3d, then the first axis enumerates
the images.
compression: str
Dataset compression method
background: bool
If set to False (default), then the regular "image" is stored;
If set to True, then the background image ("image_bg") is
stored.
"""
if len(data.shape) == 2:
# single event
data = data.reshape(1, data.shape[0], data.shape[1])
if name not in h5group:
maxshape = (None, data.shape[1], data.shape[2])
chunks = (CHUNK_SIZE, data.shape[1], data.shape[2])
dset = h5group.create_dataset(name,
data=data,
dtype=np.uint8,
maxshape=maxshape,
chunks=chunks,
fletcher32=True,
compression=compression)
# Create and Set image attributes:
# HDFView recognizes this as a series of images.
# Use np.string_ as per
# http://docs.h5py.org/en/stable/strings.html#compatibility
dset.attrs.create('CLASS',
|
np.string_('IMAGE')
|
numpy.string_
|
import numpy as np
from numpy.linalg import multi_dot as mm
from numpy import add as ma
from numpy import subtract as ms
from numpy.linalg import inv as miv
from numpy import identity as mid
class Kalman(object):
''' Kalman filter
State-transition equation: xp = F * x + w
Measurement equation: z = H * xp + v
x : State vector
z : Measurement vector
w : Process noise
v : Measurement noice
F : State space matrix
H : Measurement matrix
Q : Covariance of the process noise
R : Covariance of the measurement noise
P : Error covariance matrix
K : Kalman gain matrix
xp : Predicted state vector
Pp : Predicted error covariance matrix
x0 : Initial state vector
P0 : Initial error covariance matrix
'''
def __init__(self, x0, P0, F, H, Q, R):
self.x = x0
self.P = P0
self.F = F
self.H = H
self.Q = Q
self.R = R
self.P_list = []
self.K_list = []
def filter(self, z):
# Prediction
# xp = F * x
self.xp = mm([self.F, self.x])
# Pp = F * P * F' + Q
self.Pp = ma(mm([self.F, self.P, self.F.transpose()]), self.Q)
# Update prediction
# S = H * Pp * H' + R
self.S = ma(mm([self.H, self.Pp, self.H.transpose()]), self.R)
# K = Pp * H' * S^-1
self.K = mm([self.Pp, self.H.transpose(), miv(self.S)])
self.K_list.append(self.K)
# y = z - H * xp
self.y = ms(z,
|
mm([self.H, self.xp])
|
numpy.linalg.multi_dot
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import print_function, division
from astropy.tests.helper import pytest
from ..filters import *
import numpy as np
import math
import astropy.table
import astropy.units as u
import matplotlib
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
def test_ab_invalid_wlen():
with pytest.raises(ValueError):
ab_reference_flux(1 * u.s)
def test_validate_bad_wlen():
with pytest.raises(ValueError):
validate_wavelength_array(1. * u.Angstrom)
with pytest.raises(ValueError):
validate_wavelength_array([[1.]] * u.Angstrom)
with pytest.raises(ValueError):
validate_wavelength_array([1.] * u.Angstrom, min_length=2)
with pytest.raises(ValueError):
validate_wavelength_array([2., 1.] * u.Angstrom)
def test_validate_units():
validate_wavelength_array([1.])
validate_wavelength_array([1.] * u.m)
def test_validate_array():
wave = np.arange(1000., 6000., 100.)
wave2 = validate_wavelength_array(wave)
assert wave is wave2
validate_wavelength_array(wave + 500.)
validate_wavelength_array(wave - 500.)
def test_tabulate_wlen_units():
with pytest.raises(ValueError):
tabulate_function_of_wavelength(lambda wlen: 1, [1.])
with pytest.raises(ValueError):
tabulate_function_of_wavelength(lambda wlen: 1, [1.] * u.s)
def test_tabulate():
scalar_no_units = lambda wlen: math.sqrt(wlen)
scalar_with_units = lambda wlen: math.sqrt(wlen.value)
array_no_units = lambda wlen: 1. + np.sqrt(wlen)
array_with_units = lambda wlen: np.sqrt(wlen.value)
add_units = lambda fval: (lambda wlen: fval(wlen) * u.erg)
wlen = np.arange(1, 3) * u.Angstrom
for v in True, False:
# Test each mode without any function return units.
f1 = tabulate_function_of_wavelength(scalar_no_units, wlen, v)
assert f1[1] == None
f2 = tabulate_function_of_wavelength(scalar_with_units, wlen, v)
assert f2[1] == None
f3 = tabulate_function_of_wavelength(array_no_units, wlen, v)
assert f3[1] == None
f4 = tabulate_function_of_wavelength(scalar_with_units, wlen, v)
assert f4[1] == None
# Now test with return units.
g1 = tabulate_function_of_wavelength(
add_units(scalar_no_units), wlen, v)
assert np.array_equal(f1[0], g1[0]) and g1[1] == u.erg
g2 = tabulate_function_of_wavelength(
add_units(scalar_with_units), wlen, v)
assert np.array_equal(f2[0], g2[0]) and g2[1] == u.erg
g3 = tabulate_function_of_wavelength(
add_units(array_no_units), wlen, v)
assert np.array_equal(f3[0], g3[0]) and g3[1] == u.erg
g4 = tabulate_function_of_wavelength(
add_units(scalar_with_units), wlen, v)
assert np.array_equal(f4[0], g4[0]) and g4[1] == u.erg
def test_tabulate_not_func():
wlen = np.arange(1, 3) * u.Angstrom
for v in True, False:
with pytest.raises(ValueError):
tabulate_function_of_wavelength('not a function', wlen, v)
def test_tabulate_changing_units():
wlen = [1, 2] * u.Angstrom
f = lambda wlen: 1 * u.erg ** math.sqrt(wlen)
verbose = True
with pytest.raises(RuntimeError):
tabulate_function_of_wavelength(f, wlen, verbose)
f = lambda wlen: 1 * u.erg ** math.sqrt(wlen.value)
with pytest.raises(RuntimeError):
tabulate_function_of_wavelength(f, wlen, verbose)
f = lambda wlen: 1 if wlen < 1.5 else 1 * u.erg
with pytest.raises(RuntimeError):
tabulate_function_of_wavelength(f, wlen, verbose)
f = lambda wlen: 1 if wlen.value < 1.5 else 1 * u.erg
with pytest.raises(RuntimeError):
tabulate_function_of_wavelength(f, wlen, verbose)
f = lambda wlen: 1 if wlen > 1.5 else 1 * u.erg
with pytest.raises(RuntimeError):
tabulate_function_of_wavelength(f, wlen, verbose)
f = lambda wlen: 1 if wlen.value > 1.5 else 1 * u.erg
with pytest.raises(RuntimeError):
tabulate_function_of_wavelength(f, wlen, verbose)
def test_response():
wlen = [1, 2, 3]
meta = dict(group_name='g', band_name='b')
FilterResponse(wlen, [0, 1, 0], meta)
FilterResponse(wlen, [0, 1, 0] * u.dimensionless_unscaled, meta)
def test_response_call():
wlen = [1, 2, 3]
meta = dict(group_name='g', band_name='b')
r = FilterResponse(wlen, [0, 1, 0], meta)
result = r(1.)
result = r(1. * u.Angstrom)
result = r(1. * u.micron)
result = r([1.])
result = r([1.] * u.Angstrom)
result = r([1.] * u.micron)
with pytest.raises(u.UnitConversionError):
result = r(1. * u.erg)
def test_response_bad():
wlen = [1, 2, 3]
meta = dict(group_name='g', band_name='b')
with pytest.raises(ValueError):
FilterResponse(wlen, [1, 2], meta)
with pytest.raises(ValueError):
FilterResponse(wlen, [1, 2, 3] * u.erg, meta)
with pytest.raises(ValueError):
FilterResponse(wlen, [0, -1, 0], meta)
with pytest.raises(ValueError):
FilterResponse(wlen, [0, 0, 0], meta)
with pytest.raises(ValueError):
FilterResponse(wlen, [1, 1, 0], meta)
with pytest.raises(ValueError):
FilterResponse(wlen, [0, 1, 1], meta)
def test_response_band_shift():
wlen = [1, 2, 3]
meta = dict(group_name='g', band_name='b')
r0 = FilterResponse(wlen, [0, 1, 0], meta)
r1 = FilterResponse(wlen, [0, 1, 0], meta, band_shift=1)
r2 = r0.create_shifted(band_shift=1)
assert np.array_equal(r1._wavelength, r0._wavelength / 2)
assert np.array_equal(r1._wavelength, r2._wavelength)
assert np.array_equal(r1.response, r0.response)
assert np.array_equal(r2.response, r0.response)
with pytest.raises(ValueError):
r = FilterResponse(wlen, [0, 1, 0], meta, band_shift=-1)
with pytest.raises(RuntimeError):
r1.save()
with pytest.raises(RuntimeError):
r2.create_shifted(1)
def test_response_trim():
wlen = [1, 2, 3, 4, 5]
meta = dict(group_name='g', band_name='b')
assert np.array_equal(
FilterResponse(wlen, [0, 0, 1, 1, 0], meta)._wavelength, [2, 3, 4, 5])
assert np.array_equal(
FilterResponse(wlen, [0, 1, 1, 0, 0], meta)._wavelength, [1, 2, 3, 4])
assert np.array_equal(
FilterResponse(wlen, [0, 0, 1, 0, 0], meta)._wavelength, [2, 3, 4])
def test_response_bad_meta():
wlen = [1, 2, 3]
resp = [0, 1, 0]
with pytest.raises(ValueError):
FilterResponse(wlen, resp, 123)
with pytest.raises(ValueError):
FilterResponse(wlen, resp, dict())
with pytest.raises(ValueError):
FilterResponse(wlen, resp, dict(group_name='g'))
with pytest.raises(ValueError):
FilterResponse(wlen, resp, dict(band_name='b'))
with pytest.raises(ValueError):
FilterResponse(wlen, resp, dict(group_name=123, band_name='b'))
with pytest.raises(ValueError):
FilterResponse(wlen, resp, dict(group_name='0', band_name='b'))
with pytest.raises(ValueError):
FilterResponse(wlen, resp, dict(group_name='g-*', band_name='b'))
with pytest.raises(ValueError):
FilterResponse(wlen, resp, dict(group_name='g', band_name='b.ecsv'))
with pytest.raises(ValueError):
FilterResponse(wlen, resp, dict(group_name='g\n', band_name='b'))
with pytest.raises(ValueError):
FilterResponse(wlen, resp, dict(group_name=' g', band_name='b'))
def test_response_convolve():
wlen = [1, 2, 3]
meta = dict(group_name='g', band_name='b')
r = FilterResponse(wlen, [0, 1, 0], meta)
r.convolve_with_array([1, 3], [1, 1], interpolate=True)
def test_response_convolve_with_function():
wlen = [1, 2, 3]
resp = [0, 1, 0]
meta = dict(group_name='g', band_name='b')
filt = FilterResponse(wlen, resp, meta)
filt.convolve_with_function(lambda wlen: 1.)
filt.convolve_with_function(lambda wlen: 1. * u.erg)
filt.convolve_with_function(lambda wlen: 1. * u.erg, units=u.erg)
filt.convolve_with_function(lambda wlen: 1., units=u.erg)
with pytest.raises(ValueError):
filt.convolve_with_function(lambda wlen: 1., method='none')
with pytest.raises(ValueError):
filt.convolve_with_function(lambda wlen: 1. * u.m, units=u.erg)
def test_response_mag():
wlen = [1, 2, 3]
meta = dict(group_name='g', band_name='b')
r = FilterResponse(wlen, [0, 1, 0], meta)
r.get_ab_maggies(lambda wlen: 1.)
r.get_ab_maggies(lambda wlen: 1. * default_flux_unit)
r.get_ab_maggies([1., 1.], [1, 3])
r.get_ab_maggies([1, 1] * default_flux_unit, [1, 3])
r.get_ab_maggies([1, 1] * default_flux_unit,
[1, 3] * default_wavelength_unit)
r.get_ab_magnitude(lambda wlen: 1 * default_flux_unit)
r.get_ab_magnitude([1, 1] * default_flux_unit, [1, 3])
r.get_ab_magnitude([1, 1] * default_flux_unit,
[1, 3] * default_wavelength_unit)
def test_mag_wavelength_units():
# Check that non-default wavelength units are handled correctly.
wlen = [1, 2, 3] * u.Angstrom
meta = dict(group_name='g', band_name='b')
r = FilterResponse(wlen, [0, 1, 0], meta)
# Note that some margin is required to allow for roundoff error
# when converting wlen to the default units.
eps = 1e-6
wlen = [0.1 - eps, 0.3 + eps] * u.nm
flux = [1., 1.] * default_flux_unit
m1 = r.get_ab_maggies(flux, wlen)
m2 = r.get_ab_maggies(flux, wlen)
assert m1 == m2
def test_wavelength_property():
# Check that the wavelength property is working
wlen = [1, 2, 3] * u.Angstrom
meta = dict(group_name='g', band_name='b')
r = FilterResponse(wlen, [0,1,0], meta)
assert
|
np.allclose(r.wavelength, r._wavelength)
|
numpy.allclose
|
"""Define LineModelCtc class and associated functions."""
from typing import Callable, Dict, Tuple
import editdistance
import numpy as np
import tensorflow.keras.backend as K
from tensorflow.keras.models import Model as KerasModel
from text_recognizer.datasets import EmnistLinesDataset # This downloads the synthetic handwriting lines dataset made from EMNIST characters.
from text_recognizer.datasets.dataset_sequence import DatasetSequence
from text_recognizer.models.base import Model # This is a Base class, to be subclassed by predictors for specific type of data. Model class, to be extended by specific types of models.
from text_recognizer.networks.line_lstm_ctc import line_lstm_ctc # LSTM with CTC for handwritten text recognition within a line.
class LineModelCtc(Model):
"""Model for recognizing handwritten text in an image of a line, using CTC loss/decoding."""
def __init__(
self,
dataset_cls: type = EmnistLinesDataset,
network_fn: Callable = line_lstm_ctc,
dataset_args: Dict = None,
network_args: Dict = None,
):
"""Define the default dataset and network values for this model."""
default_dataset_args: dict = {}
if dataset_args is None:
dataset_args = {}
dataset_args = {**default_dataset_args, **dataset_args}
default_network_args = {"window_width": 12, "window_stride": 5}
if network_args is None:
network_args = {}
network_args = {**default_network_args, **network_args}
super().__init__(dataset_cls, network_fn, dataset_args, network_args)
self.batch_format_fn = format_batch_ctc
def loss(self):
"""Simply pass through the loss that we computed in the network."""
return {"ctc_loss": lambda y_true, y_pred: y_pred}
def metrics(self):
"""
Compute no metrics.
TODO: We could probably pass in a custom character accuracy metric for 'ctc_decoded' output here.
"""
return None
def evaluate(self, x, y, batch_size: int = 16, verbose: bool = True) -> float:
"""Evaluate model."""
test_sequence = DatasetSequence(x, y, batch_size, format_fn=self.batch_format_fn)
# We can use the `ctc_decoded` layer that is part of our model here.
decoding_model = KerasModel(inputs=self.network.input, outputs=self.network.get_layer("ctc_decoded").output)
preds = decoding_model.predict(test_sequence)
trues = np.argmax(y, -1)
pred_strings = ["".join(self.data.mapping.get(label, "") for label in pred).strip(" |_") for pred in preds]
true_strings = ["".join(self.data.mapping.get(label, "") for label in true).strip(" |_") for true in trues]
char_accuracies = [
1 - editdistance.eval(true_string, pred_string) / len(true_string)
for pred_string, true_string in zip(pred_strings, true_strings)
]
if verbose:
sorted_ind = np.argsort(char_accuracies)
print("\nLeast accurate predictions:")
for ind in sorted_ind[:5]:
print(f"True: {true_strings[ind]}")
print(f"Pred: {pred_strings[ind]}")
print("\nMost accurate predictions:")
for ind in sorted_ind[-5:]:
print(f"True: {true_strings[ind]}")
print(f"Pred: {pred_strings[ind]}")
print("\nRandom predictions:")
random_ind = np.random.randint(0, len(char_accuracies), 5)
for ind in random_ind: # pylint: disable=not-an-iterable
print(f"True: {true_strings[ind]}")
print(f"Pred: {pred_strings[ind]}")
mean_accuracy = np.mean(char_accuracies)
return mean_accuracy
def predict_on_image(self, image: np.ndarray) -> Tuple[str, float]:
"""Predict on a single input."""
softmax_output_fn = KerasModel(
inputs=[self.network.get_layer("image").input], outputs=[self.network.get_layer("softmax_output").output],
)
if image.dtype == np.uint8:
image = (image / 255).astype(np.float32)
# Get the prediction and confidence using softmax_output_fn, passing the right input into it.
input_image = np.expand_dims(image, 0)
softmax_output = softmax_output_fn.predict(input_image)
input_length = [softmax_output.shape[1]]
decoded, log_prob = K.ctc_decode(softmax_output, input_length, greedy=True)
pred_raw = K.eval(decoded[0])[0]
pred = "".join(self.data.mapping[label] for label in pred_raw).strip()
neg_sum_logit = K.eval(log_prob)[0][0]
conf =
|
np.exp(-neg_sum_logit)
|
numpy.exp
|
import os
import random
import numpy as np
from PIL import Image
import fnmatch
import sys
from subprocess import Popen, PIPE, STDOUT
if (sys.version_info >= (3,0)):
from queue import Queue
else:
from Queue import Queue
def recursive_glob(path, pattern):
for root, dirs, files in os.walk(path):
for basename in files:
if fnmatch.fnmatch(basename, pattern):
filename = os.path.abspath(os.path.join(root, basename))
if os.path.isfile(filename):
yield filename
class SpectrogramGenerator(object):
def __init__(self, source, config, shuffle=False, max_size=100, run_only_once=False):
self.source = source
self.config = config
self.queue = Queue(max_size)
self.shuffle = shuffle
self.run_only_once = run_only_once
if os.path.isdir(self.source):
files = []
files.extend(recursive_glob(self.source, "*.wav"))
files.extend(recursive_glob(self.source, "*.mp3"))
files.extend(recursive_glob(self.source, "*.m4a"))
else:
files = [self.source]
self.files = files
def audioToSpectrogram(self, file, pixel_per_sec, height):
'''
V0 - Verbosity level: ignore everything
c 1 - channel 1 / mono
n - apply filter/effect
rate 10k - limit sampling rate to 10k --> max frequency 5kHz (Shenon Nquist Theorem)
y - small y: defines height
X capital X: defines pixels per second
m - monochrom
r - no legend
o - output to stdout (-)
'''
file_name = "tmp_{}.png".format(random.randint(0, 100000))
command = "sox -V0 '{}' -n remix 1 rate 10k spectrogram -y {} -X {} -m -r -o {}".format(file, height, pixel_per_sec, file_name)
p = Popen(command, shell=True, stdin=PIPE, stdout=PIPE, stderr=STDOUT, close_fds=True)
output, errors = p.communicate()
if errors:
print(errors)
# image = Image.open(StringIO(output))
image = Image.open(file_name)
os.remove(file_name)
return np.array(image)
def get_generator(self):
start = 0
while True:
file = self.files[start]
try:
target_height, target_width, target_channels = self.config["input_shape"]
image = self.audioToSpectrogram(file, self.config["pixel_per_second"], target_height)
image =
|
np.expand_dims(image, -1)
|
numpy.expand_dims
|
import numpy as np
a = np.array([[1, 2], [3, 4]])
b =
|
np.array([[1, 1], [1, 1]])
|
numpy.array
|
from __future__ import division, absolute_import, print_function
try:
# Accessing collections abstract classes from collections
# has been deprecated since Python 3.3
import collections.abc as collections_abc
except ImportError:
import collections as collections_abc
import tempfile
import sys
import shutil
import warnings
import operator
import io
import itertools
import functools
import ctypes
import os
import gc
import weakref
import pytest
from contextlib import contextmanager
from numpy.core.numeric import pickle
if sys.version_info[0] >= 3:
import builtins
else:
import __builtin__ as builtins
from decimal import Decimal
import numpy as np
from numpy.compat import strchar, unicode
import numpy.core._multiarray_tests as _multiarray_tests
from numpy.testing import (
assert_, assert_raises, assert_warns, assert_equal, assert_almost_equal,
assert_array_equal, assert_raises_regex, assert_array_almost_equal,
assert_allclose, IS_PYPY, HAS_REFCOUNT, assert_array_less, runstring,
temppath, suppress_warnings
)
from numpy.core.tests._locales import CommaDecimalPointLocale
# Need to test an object that does not fully implement math interface
from datetime import timedelta, datetime
if sys.version_info[:2] > (3, 2):
# In Python 3.3 the representation of empty shape, strides and sub-offsets
# is an empty tuple instead of None.
# https://docs.python.org/dev/whatsnew/3.3.html#api-changes
EMPTY = ()
else:
EMPTY = None
def _aligned_zeros(shape, dtype=float, order="C", align=None):
"""
Allocate a new ndarray with aligned memory.
The ndarray is guaranteed *not* aligned to twice the requested alignment.
Eg, if align=4, guarantees it is not aligned to 8. If align=None uses
dtype.alignment."""
dtype = np.dtype(dtype)
if dtype == np.dtype(object):
# Can't do this, fall back to standard allocation (which
# should always be sufficiently aligned)
if align is not None:
raise ValueError("object array alignment not supported")
return np.zeros(shape, dtype=dtype, order=order)
if align is None:
align = dtype.alignment
if not hasattr(shape, '__len__'):
shape = (shape,)
size = functools.reduce(operator.mul, shape) * dtype.itemsize
buf = np.empty(size + 2*align + 1, np.uint8)
ptr = buf.__array_interface__['data'][0]
offset = ptr % align
if offset != 0:
offset = align - offset
if (ptr % (2*align)) == 0:
offset += align
# Note: slices producing 0-size arrays do not necessarily change
# data pointer --- so we use and allocate size+1
buf = buf[offset:offset+size+1][:-1]
data = np.ndarray(shape, dtype, buf, order=order)
data.fill(0)
return data
class TestFlags(object):
def setup(self):
self.a = np.arange(10)
def test_writeable(self):
mydict = locals()
self.a.flags.writeable = False
assert_raises(ValueError, runstring, 'self.a[0] = 3', mydict)
assert_raises(ValueError, runstring, 'self.a[0:1].itemset(3)', mydict)
self.a.flags.writeable = True
self.a[0] = 5
self.a[0] = 0
def test_writeable_from_readonly(self):
# gh-9440 - make sure fromstring, from buffer on readonly buffers
# set writeable False
data = b'\x00' * 100
vals = np.frombuffer(data, 'B')
assert_raises(ValueError, vals.setflags, write=True)
types = np.dtype( [('vals', 'u1'), ('res3', 'S4')] )
values = np.core.records.fromstring(data, types)
vals = values['vals']
assert_raises(ValueError, vals.setflags, write=True)
def test_writeable_from_buffer(self):
data = bytearray(b'\x00' * 100)
vals = np.frombuffer(data, 'B')
assert_(vals.flags.writeable)
vals.setflags(write=False)
assert_(vals.flags.writeable is False)
vals.setflags(write=True)
assert_(vals.flags.writeable)
types = np.dtype( [('vals', 'u1'), ('res3', 'S4')] )
values = np.core.records.fromstring(data, types)
vals = values['vals']
assert_(vals.flags.writeable)
vals.setflags(write=False)
assert_(vals.flags.writeable is False)
vals.setflags(write=True)
assert_(vals.flags.writeable)
@pytest.mark.skipif(sys.version_info[0] < 3, reason="Python 2 always copies")
def test_writeable_pickle(self):
import pickle
# Small arrays will be copied without setting base.
# See condition for using PyArray_SetBaseObject in
# array_setstate.
a = np.arange(1000)
for v in range(pickle.HIGHEST_PROTOCOL):
vals = pickle.loads(pickle.dumps(a, v))
assert_(vals.flags.writeable)
assert_(isinstance(vals.base, bytes))
def test_otherflags(self):
assert_equal(self.a.flags.carray, True)
assert_equal(self.a.flags['C'], True)
assert_equal(self.a.flags.farray, False)
assert_equal(self.a.flags.behaved, True)
assert_equal(self.a.flags.fnc, False)
assert_equal(self.a.flags.forc, True)
assert_equal(self.a.flags.owndata, True)
assert_equal(self.a.flags.writeable, True)
assert_equal(self.a.flags.aligned, True)
with assert_warns(DeprecationWarning):
assert_equal(self.a.flags.updateifcopy, False)
with assert_warns(DeprecationWarning):
assert_equal(self.a.flags['U'], False)
assert_equal(self.a.flags['UPDATEIFCOPY'], False)
assert_equal(self.a.flags.writebackifcopy, False)
assert_equal(self.a.flags['X'], False)
assert_equal(self.a.flags['WRITEBACKIFCOPY'], False)
def test_string_align(self):
a = np.zeros(4, dtype=np.dtype('|S4'))
assert_(a.flags.aligned)
# not power of two are accessed byte-wise and thus considered aligned
a = np.zeros(5, dtype=np.dtype('|S4'))
assert_(a.flags.aligned)
def test_void_align(self):
a = np.zeros(4, dtype=np.dtype([("a", "i4"), ("b", "i4")]))
assert_(a.flags.aligned)
class TestHash(object):
# see #3793
def test_int(self):
for st, ut, s in [(np.int8, np.uint8, 8),
(np.int16, np.uint16, 16),
(np.int32, np.uint32, 32),
(np.int64, np.uint64, 64)]:
for i in range(1, s):
assert_equal(hash(st(-2**i)), hash(-2**i),
err_msg="%r: -2**%d" % (st, i))
assert_equal(hash(st(2**(i - 1))), hash(2**(i - 1)),
err_msg="%r: 2**%d" % (st, i - 1))
assert_equal(hash(st(2**i - 1)), hash(2**i - 1),
err_msg="%r: 2**%d - 1" % (st, i))
i = max(i - 1, 1)
assert_equal(hash(ut(2**(i - 1))), hash(2**(i - 1)),
err_msg="%r: 2**%d" % (ut, i - 1))
assert_equal(hash(ut(2**i - 1)), hash(2**i - 1),
err_msg="%r: 2**%d - 1" % (ut, i))
class TestAttributes(object):
def setup(self):
self.one = np.arange(10)
self.two = np.arange(20).reshape(4, 5)
self.three = np.arange(60, dtype=np.float64).reshape(2, 5, 6)
def test_attributes(self):
assert_equal(self.one.shape, (10,))
assert_equal(self.two.shape, (4, 5))
assert_equal(self.three.shape, (2, 5, 6))
self.three.shape = (10, 3, 2)
assert_equal(self.three.shape, (10, 3, 2))
self.three.shape = (2, 5, 6)
assert_equal(self.one.strides, (self.one.itemsize,))
num = self.two.itemsize
assert_equal(self.two.strides, (5*num, num))
num = self.three.itemsize
assert_equal(self.three.strides, (30*num, 6*num, num))
assert_equal(self.one.ndim, 1)
assert_equal(self.two.ndim, 2)
assert_equal(self.three.ndim, 3)
num = self.two.itemsize
assert_equal(self.two.size, 20)
assert_equal(self.two.nbytes, 20*num)
assert_equal(self.two.itemsize, self.two.dtype.itemsize)
assert_equal(self.two.base, np.arange(20))
def test_dtypeattr(self):
assert_equal(self.one.dtype, np.dtype(np.int_))
assert_equal(self.three.dtype, np.dtype(np.float_))
assert_equal(self.one.dtype.char, 'l')
assert_equal(self.three.dtype.char, 'd')
assert_(self.three.dtype.str[0] in '<>')
assert_equal(self.one.dtype.str[1], 'i')
assert_equal(self.three.dtype.str[1], 'f')
def test_int_subclassing(self):
# Regression test for https://github.com/numpy/numpy/pull/3526
numpy_int = np.int_(0)
if sys.version_info[0] >= 3:
# On Py3k int_ should not inherit from int, because it's not
# fixed-width anymore
assert_equal(isinstance(numpy_int, int), False)
else:
# Otherwise, it should inherit from int...
assert_equal(isinstance(numpy_int, int), True)
# ... and fast-path checks on C-API level should also work
from numpy.core._multiarray_tests import test_int_subclass
assert_equal(test_int_subclass(numpy_int), True)
def test_stridesattr(self):
x = self.one
def make_array(size, offset, strides):
return np.ndarray(size, buffer=x, dtype=int,
offset=offset*x.itemsize,
strides=strides*x.itemsize)
assert_equal(make_array(4, 4, -1), np.array([4, 3, 2, 1]))
assert_raises(ValueError, make_array, 4, 4, -2)
assert_raises(ValueError, make_array, 4, 2, -1)
assert_raises(ValueError, make_array, 8, 3, 1)
assert_equal(make_array(8, 3, 0), np.array([3]*8))
# Check behavior reported in gh-2503:
assert_raises(ValueError, make_array, (2, 3), 5, np.array([-2, -3]))
make_array(0, 0, 10)
def test_set_stridesattr(self):
x = self.one
def make_array(size, offset, strides):
try:
r = np.ndarray([size], dtype=int, buffer=x,
offset=offset*x.itemsize)
except Exception as e:
raise RuntimeError(e)
r.strides = strides = strides*x.itemsize
return r
assert_equal(make_array(4, 4, -1), np.array([4, 3, 2, 1]))
assert_equal(make_array(7, 3, 1), np.array([3, 4, 5, 6, 7, 8, 9]))
assert_raises(ValueError, make_array, 4, 4, -2)
assert_raises(ValueError, make_array, 4, 2, -1)
assert_raises(RuntimeError, make_array, 8, 3, 1)
# Check that the true extent of the array is used.
# Test relies on as_strided base not exposing a buffer.
x = np.lib.stride_tricks.as_strided(np.arange(1), (10, 10), (0, 0))
def set_strides(arr, strides):
arr.strides = strides
assert_raises(ValueError, set_strides, x, (10*x.itemsize, x.itemsize))
# Test for offset calculations:
x = np.lib.stride_tricks.as_strided(np.arange(10, dtype=np.int8)[-1],
shape=(10,), strides=(-1,))
assert_raises(ValueError, set_strides, x[::-1], -1)
a = x[::-1]
a.strides = 1
a[::2].strides = 2
def test_fill(self):
for t in "?bhilqpBHILQPfdgFDGO":
x = np.empty((3, 2, 1), t)
y = np.empty((3, 2, 1), t)
x.fill(1)
y[...] = 1
assert_equal(x, y)
def test_fill_max_uint64(self):
x = np.empty((3, 2, 1), dtype=np.uint64)
y = np.empty((3, 2, 1), dtype=np.uint64)
value = 2**64 - 1
y[...] = value
x.fill(value)
assert_array_equal(x, y)
def test_fill_struct_array(self):
# Filling from a scalar
x = np.array([(0, 0.0), (1, 1.0)], dtype='i4,f8')
x.fill(x[0])
assert_equal(x['f1'][1], x['f1'][0])
# Filling from a tuple that can be converted
# to a scalar
x = np.zeros(2, dtype=[('a', 'f8'), ('b', 'i4')])
x.fill((3.5, -2))
assert_array_equal(x['a'], [3.5, 3.5])
assert_array_equal(x['b'], [-2, -2])
class TestArrayConstruction(object):
def test_array(self):
d = np.ones(6)
r = np.array([d, d])
assert_equal(r, np.ones((2, 6)))
d = np.ones(6)
tgt = np.ones((2, 6))
r = np.array([d, d])
assert_equal(r, tgt)
tgt[1] = 2
r = np.array([d, d + 1])
assert_equal(r, tgt)
d = np.ones(6)
r = np.array([[d, d]])
assert_equal(r, np.ones((1, 2, 6)))
d = np.ones(6)
r = np.array([[d, d], [d, d]])
assert_equal(r, np.ones((2, 2, 6)))
d = np.ones((6, 6))
r = np.array([d, d])
assert_equal(r, np.ones((2, 6, 6)))
d = np.ones((6, ))
r = np.array([[d, d + 1], d + 2])
assert_equal(len(r), 2)
assert_equal(r[0], [d, d + 1])
assert_equal(r[1], d + 2)
tgt = np.ones((2, 3), dtype=bool)
tgt[0, 2] = False
tgt[1, 0:2] = False
r = np.array([[True, True, False], [False, False, True]])
assert_equal(r, tgt)
r = np.array([[True, False], [True, False], [False, True]])
assert_equal(r, tgt.T)
def test_array_empty(self):
assert_raises(TypeError, np.array)
def test_array_copy_false(self):
d = np.array([1, 2, 3])
e = np.array(d, copy=False)
d[1] = 3
assert_array_equal(e, [1, 3, 3])
e = np.array(d, copy=False, order='F')
d[1] = 4
assert_array_equal(e, [1, 4, 3])
e[2] = 7
assert_array_equal(d, [1, 4, 7])
def test_array_copy_true(self):
d = np.array([[1,2,3], [1, 2, 3]])
e = np.array(d, copy=True)
d[0, 1] = 3
e[0, 2] = -7
assert_array_equal(e, [[1, 2, -7], [1, 2, 3]])
assert_array_equal(d, [[1, 3, 3], [1, 2, 3]])
e = np.array(d, copy=True, order='F')
d[0, 1] = 5
e[0, 2] = 7
assert_array_equal(e, [[1, 3, 7], [1, 2, 3]])
assert_array_equal(d, [[1, 5, 3], [1,2,3]])
def test_array_cont(self):
d = np.ones(10)[::2]
assert_(np.ascontiguousarray(d).flags.c_contiguous)
assert_(np.ascontiguousarray(d).flags.f_contiguous)
assert_(np.asfortranarray(d).flags.c_contiguous)
assert_(np.asfortranarray(d).flags.f_contiguous)
d = np.ones((10, 10))[::2,::2]
assert_(np.ascontiguousarray(d).flags.c_contiguous)
assert_(np.asfortranarray(d).flags.f_contiguous)
class TestAssignment(object):
def test_assignment_broadcasting(self):
a = np.arange(6).reshape(2, 3)
# Broadcasting the input to the output
a[...] = np.arange(3)
assert_equal(a, [[0, 1, 2], [0, 1, 2]])
a[...] = np.arange(2).reshape(2, 1)
assert_equal(a, [[0, 0, 0], [1, 1, 1]])
# For compatibility with <= 1.5, a limited version of broadcasting
# the output to the input.
#
# This behavior is inconsistent with NumPy broadcasting
# in general, because it only uses one of the two broadcasting
# rules (adding a new "1" dimension to the left of the shape),
# applied to the output instead of an input. In NumPy 2.0, this kind
# of broadcasting assignment will likely be disallowed.
a[...] = np.arange(6)[::-1].reshape(1, 2, 3)
assert_equal(a, [[5, 4, 3], [2, 1, 0]])
# The other type of broadcasting would require a reduction operation.
def assign(a, b):
a[...] = b
assert_raises(ValueError, assign, a, np.arange(12).reshape(2, 2, 3))
def test_assignment_errors(self):
# Address issue #2276
class C:
pass
a = np.zeros(1)
def assign(v):
a[0] = v
assert_raises((AttributeError, TypeError), assign, C())
assert_raises(ValueError, assign, [1])
def test_unicode_assignment(self):
# gh-5049
from numpy.core.numeric import set_string_function
@contextmanager
def inject_str(s):
""" replace ndarray.__str__ temporarily """
set_string_function(lambda x: s, repr=False)
try:
yield
finally:
set_string_function(None, repr=False)
a1d = np.array([u'test'])
a0d = np.array(u'done')
with inject_str(u'bad'):
a1d[0] = a0d # previously this would invoke __str__
assert_equal(a1d[0], u'done')
# this would crash for the same reason
np.array([np.array(u'\xe5\xe4\xf6')])
def test_stringlike_empty_list(self):
# gh-8902
u = np.array([u'done'])
b = np.array([b'done'])
class bad_sequence(object):
def __getitem__(self): pass
def __len__(self): raise RuntimeError
assert_raises(ValueError, operator.setitem, u, 0, [])
assert_raises(ValueError, operator.setitem, b, 0, [])
assert_raises(ValueError, operator.setitem, u, 0, bad_sequence())
assert_raises(ValueError, operator.setitem, b, 0, bad_sequence())
def test_longdouble_assignment(self):
# only relevant if longdouble is larger than float
# we're looking for loss of precision
for dtype in (np.longdouble, np.longcomplex):
# gh-8902
tinyb = np.nextafter(np.longdouble(0), 1).astype(dtype)
tinya = np.nextafter(np.longdouble(0), -1).astype(dtype)
# construction
tiny1d = np.array([tinya])
assert_equal(tiny1d[0], tinya)
# scalar = scalar
tiny1d[0] = tinyb
assert_equal(tiny1d[0], tinyb)
# 0d = scalar
tiny1d[0, ...] = tinya
assert_equal(tiny1d[0], tinya)
# 0d = 0d
tiny1d[0, ...] = tinyb[...]
assert_equal(tiny1d[0], tinyb)
# scalar = 0d
tiny1d[0] = tinyb[...]
assert_equal(tiny1d[0], tinyb)
arr = np.array([np.array(tinya)])
assert_equal(arr[0], tinya)
def test_cast_to_string(self):
# cast to str should do "str(scalar)", not "str(scalar.item())"
# Example: In python2, str(float) is truncated, so we want to avoid
# str(np.float64(...).item()) as this would incorrectly truncate.
a = np.zeros(1, dtype='S20')
a[:] = np.array(['1.12345678901234567890'], dtype='f8')
assert_equal(a[0], b"1.1234567890123457")
class TestDtypedescr(object):
def test_construction(self):
d1 = np.dtype('i4')
assert_equal(d1, np.dtype(np.int32))
d2 = np.dtype('f8')
assert_equal(d2, np.dtype(np.float64))
def test_byteorders(self):
assert_(np.dtype('<i4') != np.dtype('>i4'))
assert_(np.dtype([('a', '<i4')]) != np.dtype([('a', '>i4')]))
def test_structured_non_void(self):
fields = [('a', '<i2'), ('b', '<i2')]
dt_int = np.dtype(('i4', fields))
assert_equal(str(dt_int), "(numpy.int32, [('a', '<i2'), ('b', '<i2')])")
# gh-9821
arr_int = np.zeros(4, dt_int)
assert_equal(repr(arr_int),
"array([0, 0, 0, 0], dtype=(numpy.int32, [('a', '<i2'), ('b', '<i2')]))")
class TestZeroRank(object):
def setup(self):
self.d = np.array(0), np.array('x', object)
def test_ellipsis_subscript(self):
a, b = self.d
assert_equal(a[...], 0)
assert_equal(b[...], 'x')
assert_(a[...].base is a) # `a[...] is a` in numpy <1.9.
assert_(b[...].base is b) # `b[...] is b` in numpy <1.9.
def test_empty_subscript(self):
a, b = self.d
assert_equal(a[()], 0)
assert_equal(b[()], 'x')
assert_(type(a[()]) is a.dtype.type)
assert_(type(b[()]) is str)
def test_invalid_subscript(self):
a, b = self.d
assert_raises(IndexError, lambda x: x[0], a)
assert_raises(IndexError, lambda x: x[0], b)
assert_raises(IndexError, lambda x: x[np.array([], int)], a)
assert_raises(IndexError, lambda x: x[np.array([], int)], b)
def test_ellipsis_subscript_assignment(self):
a, b = self.d
a[...] = 42
assert_equal(a, 42)
b[...] = ''
assert_equal(b.item(), '')
def test_empty_subscript_assignment(self):
a, b = self.d
a[()] = 42
assert_equal(a, 42)
b[()] = ''
assert_equal(b.item(), '')
def test_invalid_subscript_assignment(self):
a, b = self.d
def assign(x, i, v):
x[i] = v
assert_raises(IndexError, assign, a, 0, 42)
assert_raises(IndexError, assign, b, 0, '')
assert_raises(ValueError, assign, a, (), '')
def test_newaxis(self):
a, b = self.d
assert_equal(a[np.newaxis].shape, (1,))
assert_equal(a[..., np.newaxis].shape, (1,))
assert_equal(a[np.newaxis, ...].shape, (1,))
assert_equal(a[..., np.newaxis].shape, (1,))
assert_equal(a[np.newaxis, ..., np.newaxis].shape, (1, 1))
assert_equal(a[..., np.newaxis, np.newaxis].shape, (1, 1))
assert_equal(a[np.newaxis, np.newaxis, ...].shape, (1, 1))
assert_equal(a[(np.newaxis,)*10].shape, (1,)*10)
def test_invalid_newaxis(self):
a, b = self.d
def subscript(x, i):
x[i]
assert_raises(IndexError, subscript, a, (np.newaxis, 0))
assert_raises(IndexError, subscript, a, (np.newaxis,)*50)
def test_constructor(self):
x = np.ndarray(())
x[()] = 5
assert_equal(x[()], 5)
y = np.ndarray((), buffer=x)
y[()] = 6
assert_equal(x[()], 6)
def test_output(self):
x = np.array(2)
assert_raises(ValueError, np.add, x, [1], x)
def test_real_imag(self):
# contiguity checks are for gh-11245
x = np.array(1j)
xr = x.real
xi = x.imag
assert_equal(xr, np.array(0))
assert_(type(xr) is np.ndarray)
assert_equal(xr.flags.contiguous, True)
assert_equal(xr.flags.f_contiguous, True)
assert_equal(xi, np.array(1))
assert_(type(xi) is np.ndarray)
assert_equal(xi.flags.contiguous, True)
assert_equal(xi.flags.f_contiguous, True)
class TestScalarIndexing(object):
def setup(self):
self.d = np.array([0, 1])[0]
def test_ellipsis_subscript(self):
a = self.d
assert_equal(a[...], 0)
assert_equal(a[...].shape, ())
def test_empty_subscript(self):
a = self.d
assert_equal(a[()], 0)
assert_equal(a[()].shape, ())
def test_invalid_subscript(self):
a = self.d
assert_raises(IndexError, lambda x: x[0], a)
assert_raises(IndexError, lambda x: x[np.array([], int)], a)
def test_invalid_subscript_assignment(self):
a = self.d
def assign(x, i, v):
x[i] = v
assert_raises(TypeError, assign, a, 0, 42)
def test_newaxis(self):
a = self.d
assert_equal(a[np.newaxis].shape, (1,))
assert_equal(a[..., np.newaxis].shape, (1,))
assert_equal(a[np.newaxis, ...].shape, (1,))
assert_equal(a[..., np.newaxis].shape, (1,))
assert_equal(a[np.newaxis, ..., np.newaxis].shape, (1, 1))
assert_equal(a[..., np.newaxis, np.newaxis].shape, (1, 1))
assert_equal(a[np.newaxis, np.newaxis, ...].shape, (1, 1))
assert_equal(a[(np.newaxis,)*10].shape, (1,)*10)
def test_invalid_newaxis(self):
a = self.d
def subscript(x, i):
x[i]
assert_raises(IndexError, subscript, a, (np.newaxis, 0))
assert_raises(IndexError, subscript, a, (np.newaxis,)*50)
def test_overlapping_assignment(self):
# With positive strides
a = np.arange(4)
a[:-1] = a[1:]
assert_equal(a, [1, 2, 3, 3])
a = np.arange(4)
a[1:] = a[:-1]
assert_equal(a, [0, 0, 1, 2])
# With positive and negative strides
a = np.arange(4)
a[:] = a[::-1]
assert_equal(a, [3, 2, 1, 0])
a = np.arange(6).reshape(2, 3)
a[::-1,:] = a[:, ::-1]
assert_equal(a, [[5, 4, 3], [2, 1, 0]])
a = np.arange(6).reshape(2, 3)
a[::-1, ::-1] = a[:, ::-1]
assert_equal(a, [[3, 4, 5], [0, 1, 2]])
# With just one element overlapping
a = np.arange(5)
a[:3] = a[2:]
assert_equal(a, [2, 3, 4, 3, 4])
a = np.arange(5)
a[2:] = a[:3]
assert_equal(a, [0, 1, 0, 1, 2])
a = np.arange(5)
a[2::-1] = a[2:]
assert_equal(a, [4, 3, 2, 3, 4])
a = np.arange(5)
a[2:] = a[2::-1]
assert_equal(a, [0, 1, 2, 1, 0])
a = np.arange(5)
a[2::-1] = a[:1:-1]
assert_equal(a, [2, 3, 4, 3, 4])
a = np.arange(5)
a[:1:-1] = a[2::-1]
assert_equal(a, [0, 1, 0, 1, 2])
class TestCreation(object):
"""
Test the np.array constructor
"""
def test_from_attribute(self):
class x(object):
def __array__(self, dtype=None):
pass
assert_raises(ValueError, np.array, x())
def test_from_string(self):
types = np.typecodes['AllInteger'] + np.typecodes['Float']
nstr = ['123', '123']
result = np.array([123, 123], dtype=int)
for type in types:
msg = 'String conversion for %s' % type
assert_equal(np.array(nstr, dtype=type), result, err_msg=msg)
def test_void(self):
arr = np.array([], dtype='V')
assert_equal(arr.dtype.kind, 'V')
def test_too_big_error(self):
# 45341 is the smallest integer greater than sqrt(2**31 - 1).
# 3037000500 is the smallest integer greater than sqrt(2**63 - 1).
# We want to make sure that the square byte array with those dimensions
# is too big on 32 or 64 bit systems respectively.
if np.iinfo('intp').max == 2**31 - 1:
shape = (46341, 46341)
elif np.iinfo('intp').max == 2**63 - 1:
shape = (3037000500, 3037000500)
else:
return
assert_raises(ValueError, np.empty, shape, dtype=np.int8)
assert_raises(ValueError, np.zeros, shape, dtype=np.int8)
assert_raises(ValueError, np.ones, shape, dtype=np.int8)
def test_zeros(self):
types = np.typecodes['AllInteger'] + np.typecodes['AllFloat']
for dt in types:
d = np.zeros((13,), dtype=dt)
assert_equal(np.count_nonzero(d), 0)
# true for ieee floats
assert_equal(d.sum(), 0)
assert_(not d.any())
d = np.zeros(2, dtype='(2,4)i4')
assert_equal(np.count_nonzero(d), 0)
assert_equal(d.sum(), 0)
assert_(not d.any())
d = np.zeros(2, dtype='4i4')
assert_equal(np.count_nonzero(d), 0)
assert_equal(d.sum(), 0)
assert_(not d.any())
d = np.zeros(2, dtype='(2,4)i4, (2,4)i4')
assert_equal(np.count_nonzero(d), 0)
@pytest.mark.slow
def test_zeros_big(self):
# test big array as they might be allocated different by the system
types = np.typecodes['AllInteger'] + np.typecodes['AllFloat']
for dt in types:
d = np.zeros((30 * 1024**2,), dtype=dt)
assert_(not d.any())
# This test can fail on 32-bit systems due to insufficient
# contiguous memory. Deallocating the previous array increases the
# chance of success.
del(d)
def test_zeros_obj(self):
# test initialization from PyLong(0)
d = np.zeros((13,), dtype=object)
assert_array_equal(d, [0] * 13)
assert_equal(np.count_nonzero(d), 0)
def test_zeros_obj_obj(self):
d = np.zeros(10, dtype=[('k', object, 2)])
assert_array_equal(d['k'], 0)
def test_zeros_like_like_zeros(self):
# test zeros_like returns the same as zeros
for c in np.typecodes['All']:
if c == 'V':
continue
d = np.zeros((3,3), dtype=c)
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
# explicitly check some special cases
d = np.zeros((3,3), dtype='S5')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='U5')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='<i4')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='>i4')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='<M8[s]')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='>M8[s]')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='f4,f4')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
def test_empty_unicode(self):
# don't throw decode errors on garbage memory
for i in range(5, 100, 5):
d = np.empty(i, dtype='U')
str(d)
def test_sequence_non_homogenous(self):
assert_equal(np.array([4, 2**80]).dtype, object)
assert_equal(np.array([4, 2**80, 4]).dtype, object)
assert_equal(np.array([2**80, 4]).dtype, object)
assert_equal(np.array([2**80] * 3).dtype, object)
assert_equal(np.array([[1, 1],[1j, 1j]]).dtype, complex)
assert_equal(np.array([[1j, 1j],[1, 1]]).dtype, complex)
assert_equal(np.array([[1, 1, 1],[1, 1j, 1.], [1, 1, 1]]).dtype, complex)
@pytest.mark.skipif(sys.version_info[0] >= 3, reason="Not Python 2")
def test_sequence_long(self):
assert_equal(np.array([long(4), long(4)]).dtype, np.long)
assert_equal(np.array([long(4), 2**80]).dtype, object)
assert_equal(np.array([long(4), 2**80, long(4)]).dtype, object)
assert_equal(np.array([2**80, long(4)]).dtype, object)
def test_non_sequence_sequence(self):
"""Should not segfault.
Class Fail breaks the sequence protocol for new style classes, i.e.,
those derived from object. Class Map is a mapping type indicated by
raising a ValueError. At some point we may raise a warning instead
of an error in the Fail case.
"""
class Fail(object):
def __len__(self):
return 1
def __getitem__(self, index):
raise ValueError()
class Map(object):
def __len__(self):
return 1
def __getitem__(self, index):
raise KeyError()
a = np.array([Map()])
assert_(a.shape == (1,))
assert_(a.dtype == np.dtype(object))
assert_raises(ValueError, np.array, [Fail()])
def test_no_len_object_type(self):
# gh-5100, want object array from iterable object without len()
class Point2:
def __init__(self):
pass
def __getitem__(self, ind):
if ind in [0, 1]:
return ind
else:
raise IndexError()
d = np.array([Point2(), Point2(), Point2()])
assert_equal(d.dtype, np.dtype(object))
def test_false_len_sequence(self):
# gh-7264, segfault for this example
class C:
def __getitem__(self, i):
raise IndexError
def __len__(self):
return 42
assert_raises(ValueError, np.array, C()) # segfault?
def test_failed_len_sequence(self):
# gh-7393
class A(object):
def __init__(self, data):
self._data = data
def __getitem__(self, item):
return type(self)(self._data[item])
def __len__(self):
return len(self._data)
# len(d) should give 3, but len(d[0]) will fail
d = A([1,2,3])
assert_equal(len(np.array(d)), 3)
def test_array_too_big(self):
# Test that array creation succeeds for arrays addressable by intp
# on the byte level and fails for too large arrays.
buf = np.zeros(100)
max_bytes = np.iinfo(np.intp).max
for dtype in ["intp", "S20", "b"]:
dtype = np.dtype(dtype)
itemsize = dtype.itemsize
np.ndarray(buffer=buf, strides=(0,),
shape=(max_bytes//itemsize,), dtype=dtype)
assert_raises(ValueError, np.ndarray, buffer=buf, strides=(0,),
shape=(max_bytes//itemsize + 1,), dtype=dtype)
def test_jagged_ndim_object(self):
# Lists of mismatching depths are treated as object arrays
a = np.array([[1], 2, 3])
assert_equal(a.shape, (3,))
assert_equal(a.dtype, object)
a = np.array([1, [2], 3])
assert_equal(a.shape, (3,))
assert_equal(a.dtype, object)
a = np.array([1, 2, [3]])
assert_equal(a.shape, (3,))
assert_equal(a.dtype, object)
def test_jagged_shape_object(self):
# The jagged dimension of a list is turned into an object array
a = np.array([[1, 1], [2], [3]])
assert_equal(a.shape, (3,))
assert_equal(a.dtype, object)
a = np.array([[1], [2, 2], [3]])
assert_equal(a.shape, (3,))
assert_equal(a.dtype, object)
a = np.array([[1], [2], [3, 3]])
assert_equal(a.shape, (3,))
assert_equal(a.dtype, object)
class TestStructured(object):
def test_subarray_field_access(self):
a = np.zeros((3, 5), dtype=[('a', ('i4', (2, 2)))])
a['a'] = np.arange(60).reshape(3, 5, 2, 2)
# Since the subarray is always in C-order, a transpose
# does not swap the subarray:
assert_array_equal(a.T['a'], a['a'].transpose(1, 0, 2, 3))
# In Fortran order, the subarray gets appended
# like in all other cases, not prepended as a special case
b = a.copy(order='F')
assert_equal(a['a'].shape, b['a'].shape)
assert_equal(a.T['a'].shape, a.T.copy()['a'].shape)
def test_subarray_comparison(self):
# Check that comparisons between record arrays with
# multi-dimensional field types work properly
a = np.rec.fromrecords(
[([1, 2, 3], 'a', [[1, 2], [3, 4]]), ([3, 3, 3], 'b', [[0, 0], [0, 0]])],
dtype=[('a', ('f4', 3)), ('b', object), ('c', ('i4', (2, 2)))])
b = a.copy()
assert_equal(a == b, [True, True])
assert_equal(a != b, [False, False])
b[1].b = 'c'
assert_equal(a == b, [True, False])
assert_equal(a != b, [False, True])
for i in range(3):
b[0].a = a[0].a
b[0].a[i] = 5
assert_equal(a == b, [False, False])
assert_equal(a != b, [True, True])
for i in range(2):
for j in range(2):
b = a.copy()
b[0].c[i, j] = 10
assert_equal(a == b, [False, True])
assert_equal(a != b, [True, False])
# Check that broadcasting with a subarray works
a = np.array([[(0,)], [(1,)]], dtype=[('a', 'f8')])
b = np.array([(0,), (0,), (1,)], dtype=[('a', 'f8')])
assert_equal(a == b, [[True, True, False], [False, False, True]])
assert_equal(b == a, [[True, True, False], [False, False, True]])
a = np.array([[(0,)], [(1,)]], dtype=[('a', 'f8', (1,))])
b = np.array([(0,), (0,), (1,)], dtype=[('a', 'f8', (1,))])
assert_equal(a == b, [[True, True, False], [False, False, True]])
assert_equal(b == a, [[True, True, False], [False, False, True]])
a = np.array([[([0, 0],)], [([1, 1],)]], dtype=[('a', 'f8', (2,))])
b = np.array([([0, 0],), ([0, 1],), ([1, 1],)], dtype=[('a', 'f8', (2,))])
assert_equal(a == b, [[True, False, False], [False, False, True]])
assert_equal(b == a, [[True, False, False], [False, False, True]])
# Check that broadcasting Fortran-style arrays with a subarray work
a = np.array([[([0, 0],)], [([1, 1],)]], dtype=[('a', 'f8', (2,))], order='F')
b = np.array([([0, 0],), ([0, 1],), ([1, 1],)], dtype=[('a', 'f8', (2,))])
assert_equal(a == b, [[True, False, False], [False, False, True]])
assert_equal(b == a, [[True, False, False], [False, False, True]])
# Check that incompatible sub-array shapes don't result to broadcasting
x = np.zeros((1,), dtype=[('a', ('f4', (1, 2))), ('b', 'i1')])
y = np.zeros((1,), dtype=[('a', ('f4', (2,))), ('b', 'i1')])
# This comparison invokes deprecated behaviour, and will probably
# start raising an error eventually. What we really care about in this
# test is just that it doesn't return True.
with suppress_warnings() as sup:
sup.filter(FutureWarning, "elementwise == comparison failed")
assert_equal(x == y, False)
x = np.zeros((1,), dtype=[('a', ('f4', (2, 1))), ('b', 'i1')])
y = np.zeros((1,), dtype=[('a', ('f4', (2,))), ('b', 'i1')])
# This comparison invokes deprecated behaviour, and will probably
# start raising an error eventually. What we really care about in this
# test is just that it doesn't return True.
with suppress_warnings() as sup:
sup.filter(FutureWarning, "elementwise == comparison failed")
assert_equal(x == y, False)
# Check that structured arrays that are different only in
# byte-order work
a = np.array([(5, 42), (10, 1)], dtype=[('a', '>i8'), ('b', '<f8')])
b = np.array([(5, 43), (10, 1)], dtype=[('a', '<i8'), ('b', '>f8')])
assert_equal(a == b, [False, True])
def test_casting(self):
# Check that casting a structured array to change its byte order
# works
a = np.array([(1,)], dtype=[('a', '<i4')])
assert_(np.can_cast(a.dtype, [('a', '>i4')], casting='unsafe'))
b = a.astype([('a', '>i4')])
assert_equal(b, a.byteswap().newbyteorder())
assert_equal(a['a'][0], b['a'][0])
# Check that equality comparison works on structured arrays if
# they are 'equiv'-castable
a = np.array([(5, 42), (10, 1)], dtype=[('a', '>i4'), ('b', '<f8')])
b = np.array([(5, 42), (10, 1)], dtype=[('a', '<i4'), ('b', '>f8')])
assert_(np.can_cast(a.dtype, b.dtype, casting='equiv'))
assert_equal(a == b, [True, True])
# Check that 'equiv' casting can change byte order
assert_(np.can_cast(a.dtype, b.dtype, casting='equiv'))
c = a.astype(b.dtype, casting='equiv')
assert_equal(a == c, [True, True])
# Check that 'safe' casting can change byte order and up-cast
# fields
t = [('a', '<i8'), ('b', '>f8')]
assert_(np.can_cast(a.dtype, t, casting='safe'))
c = a.astype(t, casting='safe')
assert_equal((c == np.array([(5, 42), (10, 1)], dtype=t)),
[True, True])
# Check that 'same_kind' casting can change byte order and
# change field widths within a "kind"
t = [('a', '<i4'), ('b', '>f4')]
assert_(np.can_cast(a.dtype, t, casting='same_kind'))
c = a.astype(t, casting='same_kind')
assert_equal((c == np.array([(5, 42), (10, 1)], dtype=t)),
[True, True])
# Check that casting fails if the casting rule should fail on
# any of the fields
t = [('a', '>i8'), ('b', '<f4')]
assert_(not np.can_cast(a.dtype, t, casting='safe'))
assert_raises(TypeError, a.astype, t, casting='safe')
t = [('a', '>i2'), ('b', '<f8')]
assert_(not np.can_cast(a.dtype, t, casting='equiv'))
assert_raises(TypeError, a.astype, t, casting='equiv')
t = [('a', '>i8'), ('b', '<i2')]
assert_(not np.can_cast(a.dtype, t, casting='same_kind'))
assert_raises(TypeError, a.astype, t, casting='same_kind')
assert_(not np.can_cast(a.dtype, b.dtype, casting='no'))
assert_raises(TypeError, a.astype, b.dtype, casting='no')
# Check that non-'unsafe' casting can't change the set of field names
for casting in ['no', 'safe', 'equiv', 'same_kind']:
t = [('a', '>i4')]
assert_(not np.can_cast(a.dtype, t, casting=casting))
t = [('a', '>i4'), ('b', '<f8'), ('c', 'i4')]
assert_(not np.can_cast(a.dtype, t, casting=casting))
def test_objview(self):
# https://github.com/numpy/numpy/issues/3286
a = np.array([], dtype=[('a', 'f'), ('b', 'f'), ('c', 'O')])
a[['a', 'b']] # TypeError?
# https://github.com/numpy/numpy/issues/3253
dat2 = np.zeros(3, [('A', 'i'), ('B', '|O')])
dat2[['B', 'A']] # TypeError?
def test_setfield(self):
# https://github.com/numpy/numpy/issues/3126
struct_dt = np.dtype([('elem', 'i4', 5),])
dt = np.dtype([('field', 'i4', 10),('struct', struct_dt)])
x = np.zeros(1, dt)
x[0]['field'] = np.ones(10, dtype='i4')
x[0]['struct'] = np.ones(1, dtype=struct_dt)
assert_equal(x[0]['field'], np.ones(10, dtype='i4'))
def test_setfield_object(self):
# make sure object field assignment with ndarray value
# on void scalar mimics setitem behavior
b = np.zeros(1, dtype=[('x', 'O')])
# next line should work identically to b['x'][0] = np.arange(3)
b[0]['x'] = np.arange(3)
assert_equal(b[0]['x'], np.arange(3))
# check that broadcasting check still works
c = np.zeros(1, dtype=[('x', 'O', 5)])
def testassign():
c[0]['x'] = np.arange(3)
assert_raises(ValueError, testassign)
def test_zero_width_string(self):
# Test for PR #6430 / issues #473, #4955, #2585
dt = np.dtype([('I', int), ('S', 'S0')])
x = np.zeros(4, dtype=dt)
assert_equal(x['S'], [b'', b'', b'', b''])
assert_equal(x['S'].itemsize, 0)
x['S'] = ['a', 'b', 'c', 'd']
assert_equal(x['S'], [b'', b'', b'', b''])
assert_equal(x['I'], [0, 0, 0, 0])
# Variation on test case from #4955
x['S'][x['I'] == 0] = 'hello'
assert_equal(x['S'], [b'', b'', b'', b''])
assert_equal(x['I'], [0, 0, 0, 0])
# Variation on test case from #2585
x['S'] = 'A'
assert_equal(x['S'], [b'', b'', b'', b''])
assert_equal(x['I'], [0, 0, 0, 0])
# Allow zero-width dtypes in ndarray constructor
y = np.ndarray(4, dtype=x['S'].dtype)
assert_equal(y.itemsize, 0)
assert_equal(x['S'], y)
# More tests for indexing an array with zero-width fields
assert_equal(np.zeros(4, dtype=[('a', 'S0,S0'),
('b', 'u1')])['a'].itemsize, 0)
assert_equal(np.empty(3, dtype='S0,S0').itemsize, 0)
assert_equal(np.zeros(4, dtype='S0,u1')['f0'].itemsize, 0)
xx = x['S'].reshape((2, 2))
assert_equal(xx.itemsize, 0)
assert_equal(xx, [[b'', b''], [b'', b'']])
# check for no uninitialized memory due to viewing S0 array
assert_equal(xx[:].dtype, xx.dtype)
assert_array_equal(eval(repr(xx), dict(array=np.array)), xx)
b = io.BytesIO()
np.save(b, xx)
b.seek(0)
yy = np.load(b)
assert_equal(yy.itemsize, 0)
assert_equal(xx, yy)
with temppath(suffix='.npy') as tmp:
np.save(tmp, xx)
yy = np.load(tmp)
assert_equal(yy.itemsize, 0)
assert_equal(xx, yy)
def test_base_attr(self):
a = np.zeros(3, dtype='i4,f4')
b = a[0]
assert_(b.base is a)
def test_assignment(self):
def testassign(arr, v):
c = arr.copy()
c[0] = v # assign using setitem
c[1:] = v # assign using "dtype_transfer" code paths
return c
dt = np.dtype([('foo', 'i8'), ('bar', 'i8')])
arr = np.ones(2, dt)
v1 = np.array([(2,3)], dtype=[('foo', 'i8'), ('bar', 'i8')])
v2 = np.array([(2,3)], dtype=[('bar', 'i8'), ('foo', 'i8')])
v3 = np.array([(2,3)], dtype=[('bar', 'i8'), ('baz', 'i8')])
v4 = np.array([(2,)], dtype=[('bar', 'i8')])
v5 = np.array([(2,3)], dtype=[('foo', 'f8'), ('bar', 'f8')])
w = arr.view({'names': ['bar'], 'formats': ['i8'], 'offsets': [8]})
ans = np.array([(2,3),(2,3)], dtype=dt)
assert_equal(testassign(arr, v1), ans)
assert_equal(testassign(arr, v2), ans)
assert_equal(testassign(arr, v3), ans)
assert_raises(ValueError, lambda: testassign(arr, v4))
assert_equal(testassign(arr, v5), ans)
w[:] = 4
assert_equal(arr, np.array([(1,4),(1,4)], dtype=dt))
# test field-reordering, assignment by position, and self-assignment
a = np.array([(1,2,3)],
dtype=[('foo', 'i8'), ('bar', 'i8'), ('baz', 'f4')])
a[['foo', 'bar']] = a[['bar', 'foo']]
assert_equal(a[0].item(), (2,1,3))
# test that this works even for 'simple_unaligned' structs
# (ie, that PyArray_EquivTypes cares about field order too)
a = np.array([(1,2)], dtype=[('a', 'i4'), ('b', 'i4')])
a[['a', 'b']] = a[['b', 'a']]
assert_equal(a[0].item(), (2,1))
def test_structuredscalar_indexing(self):
# test gh-7262
x = np.empty(shape=1, dtype="(2)3S,(2)3U")
assert_equal(x[["f0","f1"]][0], x[0][["f0","f1"]])
assert_equal(x[0], x[0][()])
def test_multiindex_titles(self):
a = np.zeros(4, dtype=[(('a', 'b'), 'i'), ('c', 'i'), ('d', 'i')])
assert_raises(KeyError, lambda : a[['a','c']])
assert_raises(KeyError, lambda : a[['a','a']])
assert_raises(ValueError, lambda : a[['b','b']]) # field exists, but repeated
a[['b','c']] # no exception
class TestBool(object):
def test_test_interning(self):
a0 = np.bool_(0)
b0 = np.bool_(False)
assert_(a0 is b0)
a1 = np.bool_(1)
b1 = np.bool_(True)
assert_(a1 is b1)
assert_(np.array([True])[0] is a1)
assert_(np.array(True)[()] is a1)
def test_sum(self):
d = np.ones(101, dtype=bool)
assert_equal(d.sum(), d.size)
assert_equal(d[::2].sum(), d[::2].size)
assert_equal(d[::-2].sum(), d[::-2].size)
d = np.frombuffer(b'\xff\xff' * 100, dtype=bool)
assert_equal(d.sum(), d.size)
assert_equal(d[::2].sum(), d[::2].size)
assert_equal(d[::-2].sum(), d[::-2].size)
def check_count_nonzero(self, power, length):
powers = [2 ** i for i in range(length)]
for i in range(2**power):
l = [(i & x) != 0 for x in powers]
a = np.array(l, dtype=bool)
c = builtins.sum(l)
assert_equal(np.count_nonzero(a), c)
av = a.view(np.uint8)
av *= 3
assert_equal(np.count_nonzero(a), c)
av *= 4
assert_equal(np.count_nonzero(a), c)
av[av != 0] = 0xFF
assert_equal(np.count_nonzero(a), c)
def test_count_nonzero(self):
# check all 12 bit combinations in a length 17 array
# covers most cases of the 16 byte unrolled code
self.check_count_nonzero(12, 17)
@pytest.mark.slow
def test_count_nonzero_all(self):
# check all combinations in a length 17 array
# covers all cases of the 16 byte unrolled code
self.check_count_nonzero(17, 17)
def test_count_nonzero_unaligned(self):
# prevent mistakes as e.g. gh-4060
for o in range(7):
a = np.zeros((18,), dtype=bool)[o+1:]
a[:o] = True
assert_equal(np.count_nonzero(a), builtins.sum(a.tolist()))
a = np.ones((18,), dtype=bool)[o+1:]
a[:o] = False
assert_equal(np.count_nonzero(a), builtins.sum(a.tolist()))
def _test_cast_from_flexible(self, dtype):
# empty string -> false
for n in range(3):
v = np.array(b'', (dtype, n))
assert_equal(bool(v), False)
assert_equal(bool(v[()]), False)
assert_equal(v.astype(bool), False)
assert_(isinstance(v.astype(bool), np.ndarray))
assert_(v[()].astype(bool) is np.False_)
# anything else -> true
for n in range(1, 4):
for val in [b'a', b'0', b' ']:
v = np.array(val, (dtype, n))
assert_equal(bool(v), True)
assert_equal(bool(v[()]), True)
assert_equal(v.astype(bool), True)
assert_(isinstance(v.astype(bool), np.ndarray))
assert_(v[()].astype(bool) is np.True_)
def test_cast_from_void(self):
self._test_cast_from_flexible(np.void)
@pytest.mark.xfail(reason="See gh-9847")
def test_cast_from_unicode(self):
self._test_cast_from_flexible(np.unicode_)
@pytest.mark.xfail(reason="See gh-9847")
def test_cast_from_bytes(self):
self._test_cast_from_flexible(np.bytes_)
class TestZeroSizeFlexible(object):
@staticmethod
def _zeros(shape, dtype=str):
dtype = np.dtype(dtype)
if dtype == np.void:
return np.zeros(shape, dtype=(dtype, 0))
# not constructable directly
dtype = np.dtype([('x', dtype, 0)])
return np.zeros(shape, dtype=dtype)['x']
def test_create(self):
zs = self._zeros(10, bytes)
assert_equal(zs.itemsize, 0)
zs = self._zeros(10, np.void)
assert_equal(zs.itemsize, 0)
zs = self._zeros(10, unicode)
assert_equal(zs.itemsize, 0)
def _test_sort_partition(self, name, kinds, **kwargs):
# Previously, these would all hang
for dt in [bytes, np.void, unicode]:
zs = self._zeros(10, dt)
sort_method = getattr(zs, name)
sort_func = getattr(np, name)
for kind in kinds:
sort_method(kind=kind, **kwargs)
sort_func(zs, kind=kind, **kwargs)
def test_sort(self):
self._test_sort_partition('sort', kinds='qhm')
def test_argsort(self):
self._test_sort_partition('argsort', kinds='qhm')
def test_partition(self):
self._test_sort_partition('partition', kinds=['introselect'], kth=2)
def test_argpartition(self):
self._test_sort_partition('argpartition', kinds=['introselect'], kth=2)
def test_resize(self):
# previously an error
for dt in [bytes, np.void, unicode]:
zs = self._zeros(10, dt)
zs.resize(25)
zs.resize((10, 10))
def test_view(self):
for dt in [bytes, np.void, unicode]:
zs = self._zeros(10, dt)
# viewing as itself should be allowed
assert_equal(zs.view(dt).dtype, np.dtype(dt))
# viewing as any non-empty type gives an empty result
assert_equal(zs.view((dt, 1)).shape, (0,))
def test_pickle(self):
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
for dt in [bytes, np.void, unicode]:
zs = self._zeros(10, dt)
p = pickle.dumps(zs, protocol=proto)
zs2 = pickle.loads(p)
assert_equal(zs.dtype, zs2.dtype)
@pytest.mark.skipif(pickle.HIGHEST_PROTOCOL < 5,
reason="requires pickle protocol 5")
def test_pickle_with_buffercallback(self):
array = np.arange(10)
buffers = []
bytes_string = pickle.dumps(array, buffer_callback=buffers.append,
protocol=5)
array_from_buffer = pickle.loads(bytes_string, buffers=buffers)
# when using pickle protocol 5 with buffer callbacks,
# array_from_buffer is reconstructed from a buffer holding a view
# to the initial array's data, so modifying an element in array
# should modify it in array_from_buffer too.
array[0] = -1
assert array_from_buffer[0] == -1, array_from_buffer[0]
class TestMethods(object):
def test_compress(self):
tgt = [[5, 6, 7, 8, 9]]
arr = np.arange(10).reshape(2, 5)
out = arr.compress([0, 1], axis=0)
assert_equal(out, tgt)
tgt = [[1, 3], [6, 8]]
out = arr.compress([0, 1, 0, 1, 0], axis=1)
assert_equal(out, tgt)
tgt = [[1], [6]]
arr = np.arange(10).reshape(2, 5)
out = arr.compress([0, 1], axis=1)
assert_equal(out, tgt)
arr = np.arange(10).reshape(2, 5)
out = arr.compress([0, 1])
assert_equal(out, 1)
def test_choose(self):
x = 2*np.ones((3,), dtype=int)
y = 3*np.ones((3,), dtype=int)
x2 = 2*np.ones((2, 3), dtype=int)
y2 = 3*np.ones((2, 3), dtype=int)
ind = np.array([0, 0, 1])
A = ind.choose((x, y))
assert_equal(A, [2, 2, 3])
A = ind.choose((x2, y2))
assert_equal(A, [[2, 2, 3], [2, 2, 3]])
A = ind.choose((x, y2))
assert_equal(A, [[2, 2, 3], [2, 2, 3]])
oned = np.ones(1)
# gh-12031, caused SEGFAULT
assert_raises(TypeError, oned.choose,np.void(0), [oned])
def test_prod(self):
ba = [1, 2, 10, 11, 6, 5, 4]
ba2 = [[1, 2, 3, 4], [5, 6, 7, 9], [10, 3, 4, 5]]
for ctype in [np.int16, np.uint16, np.int32, np.uint32,
np.float32, np.float64, np.complex64, np.complex128]:
a = np.array(ba, ctype)
a2 = np.array(ba2, ctype)
if ctype in ['1', 'b']:
assert_raises(ArithmeticError, a.prod)
assert_raises(ArithmeticError, a2.prod, axis=1)
else:
assert_equal(a.prod(axis=0), 26400)
assert_array_equal(a2.prod(axis=0),
np.array([50, 36, 84, 180], ctype))
assert_array_equal(a2.prod(axis=-1),
np.array([24, 1890, 600], ctype))
def test_repeat(self):
m = np.array([1, 2, 3, 4, 5, 6])
m_rect = m.reshape((2, 3))
A = m.repeat([1, 3, 2, 1, 1, 2])
assert_equal(A, [1, 2, 2, 2, 3,
3, 4, 5, 6, 6])
A = m.repeat(2)
assert_equal(A, [1, 1, 2, 2, 3, 3,
4, 4, 5, 5, 6, 6])
A = m_rect.repeat([2, 1], axis=0)
assert_equal(A, [[1, 2, 3],
[1, 2, 3],
[4, 5, 6]])
A = m_rect.repeat([1, 3, 2], axis=1)
assert_equal(A, [[1, 2, 2, 2, 3, 3],
[4, 5, 5, 5, 6, 6]])
A = m_rect.repeat(2, axis=0)
assert_equal(A, [[1, 2, 3],
[1, 2, 3],
[4, 5, 6],
[4, 5, 6]])
A = m_rect.repeat(2, axis=1)
assert_equal(A, [[1, 1, 2, 2, 3, 3],
[4, 4, 5, 5, 6, 6]])
def test_reshape(self):
arr = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]])
tgt = [[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]]
assert_equal(arr.reshape(2, 6), tgt)
tgt = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]
assert_equal(arr.reshape(3, 4), tgt)
tgt = [[1, 10, 8, 6], [4, 2, 11, 9], [7, 5, 3, 12]]
assert_equal(arr.reshape((3, 4), order='F'), tgt)
tgt = [[1, 4, 7, 10], [2, 5, 8, 11], [3, 6, 9, 12]]
assert_equal(arr.T.reshape((3, 4), order='C'), tgt)
def test_round(self):
def check_round(arr, expected, *round_args):
assert_equal(arr.round(*round_args), expected)
# With output array
out = np.zeros_like(arr)
res = arr.round(*round_args, out=out)
assert_equal(out, expected)
assert_equal(out, res)
check_round(np.array([1.2, 1.5]), [1, 2])
check_round(np.array(1.5), 2)
check_round(np.array([12.2, 15.5]), [10, 20], -1)
check_round(np.array([12.15, 15.51]), [12.2, 15.5], 1)
# Complex rounding
check_round(np.array([4.5 + 1.5j]), [4 + 2j])
check_round(np.array([12.5 + 15.5j]), [10 + 20j], -1)
def test_squeeze(self):
a = np.array([[[1], [2], [3]]])
assert_equal(a.squeeze(), [1, 2, 3])
assert_equal(a.squeeze(axis=(0,)), [[1], [2], [3]])
assert_raises(ValueError, a.squeeze, axis=(1,))
assert_equal(a.squeeze(axis=(2,)), [[1, 2, 3]])
def test_transpose(self):
a = np.array([[1, 2], [3, 4]])
assert_equal(a.transpose(), [[1, 3], [2, 4]])
assert_raises(ValueError, lambda: a.transpose(0))
assert_raises(ValueError, lambda: a.transpose(0, 0))
assert_raises(ValueError, lambda: a.transpose(0, 1, 2))
def test_sort(self):
# test ordering for floats and complex containing nans. It is only
# necessary to check the less-than comparison, so sorts that
# only follow the insertion sort path are sufficient. We only
# test doubles and complex doubles as the logic is the same.
# check doubles
msg = "Test real sort order with nans"
a = np.array([np.nan, 1, 0])
b = np.sort(a)
assert_equal(b, a[::-1], msg)
# check complex
msg = "Test complex sort order with nans"
a = np.zeros(9, dtype=np.complex128)
a.real += [np.nan, np.nan, np.nan, 1, 0, 1, 1, 0, 0]
a.imag += [np.nan, 1, 0, np.nan, np.nan, 1, 0, 1, 0]
b = np.sort(a)
assert_equal(b, a[::-1], msg)
# all c scalar sorts use the same code with different types
# so it suffices to run a quick check with one type. The number
# of sorted items must be greater than ~50 to check the actual
# algorithm because quick and merge sort fall over to insertion
# sort for small arrays.
a = np.arange(101)
b = a[::-1].copy()
for kind in ['q', 'm', 'h']:
msg = "scalar sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test complex sorts. These use the same code as the scalars
# but the compare function differs.
ai = a*1j + 1
bi = b*1j + 1
for kind in ['q', 'm', 'h']:
msg = "complex sort, real part == 1, kind=%s" % kind
c = ai.copy()
c.sort(kind=kind)
assert_equal(c, ai, msg)
c = bi.copy()
c.sort(kind=kind)
assert_equal(c, ai, msg)
ai = a + 1j
bi = b + 1j
for kind in ['q', 'm', 'h']:
msg = "complex sort, imag part == 1, kind=%s" % kind
c = ai.copy()
c.sort(kind=kind)
assert_equal(c, ai, msg)
c = bi.copy()
c.sort(kind=kind)
assert_equal(c, ai, msg)
# test sorting of complex arrays requiring byte-swapping, gh-5441
for endianness in '<>':
for dt in np.typecodes['Complex']:
arr = np.array([1+3.j, 2+2.j, 3+1.j], dtype=endianness + dt)
c = arr.copy()
c.sort()
msg = 'byte-swapped complex sort, dtype={0}'.format(dt)
assert_equal(c, arr, msg)
# test string sorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)])
b = a[::-1].copy()
for kind in ['q', 'm', 'h']:
msg = "string sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test unicode sorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)], dtype=np.unicode)
b = a[::-1].copy()
for kind in ['q', 'm', 'h']:
msg = "unicode sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test object array sorts.
a = np.empty((101,), dtype=object)
a[:] = list(range(101))
b = a[::-1]
for kind in ['q', 'h', 'm']:
msg = "object sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test record array sorts.
dt = np.dtype([('f', float), ('i', int)])
a = np.array([(i, i) for i in range(101)], dtype=dt)
b = a[::-1]
for kind in ['q', 'h', 'm']:
msg = "object sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test datetime64 sorts.
a = np.arange(0, 101, dtype='datetime64[D]')
b = a[::-1]
for kind in ['q', 'h', 'm']:
msg = "datetime64 sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test timedelta64 sorts.
a = np.arange(0, 101, dtype='timedelta64[D]')
b = a[::-1]
for kind in ['q', 'h', 'm']:
msg = "timedelta64 sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# check axis handling. This should be the same for all type
# specific sorts, so we only check it for one type and one kind
a = np.array([[3, 2], [1, 0]])
b = np.array([[1, 0], [3, 2]])
c = np.array([[2, 3], [0, 1]])
d = a.copy()
d.sort(axis=0)
assert_equal(d, b, "test sort with axis=0")
d = a.copy()
d.sort(axis=1)
assert_equal(d, c, "test sort with axis=1")
d = a.copy()
d.sort()
assert_equal(d, c, "test sort with default axis")
# check axis handling for multidimensional empty arrays
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array sort with axis={0}'.format(axis)
assert_equal(np.sort(a, axis=axis), a, msg)
msg = 'test empty array sort with axis=None'
assert_equal(np.sort(a, axis=None), a.ravel(), msg)
# test generic class with bogus ordering,
# should not segfault.
class Boom(object):
def __lt__(self, other):
return True
a = np.array([Boom()]*100, dtype=object)
for kind in ['q', 'm', 'h']:
msg = "bogus comparison object sort, kind=%s" % kind
c.sort(kind=kind)
def test_void_sort(self):
# gh-8210 - previously segfaulted
for i in range(4):
rand = np.random.randint(256, size=4000, dtype=np.uint8)
arr = rand.view('V4')
arr[::-1].sort()
dt = np.dtype([('val', 'i4', (1,))])
for i in range(4):
rand = np.random.randint(256, size=4000, dtype=np.uint8)
arr = rand.view(dt)
arr[::-1].sort()
def test_sort_raises(self):
#gh-9404
arr = np.array([0, datetime.now(), 1], dtype=object)
for kind in ['q', 'm', 'h']:
assert_raises(TypeError, arr.sort, kind=kind)
#gh-3879
class Raiser(object):
def raises_anything(*args, **kwargs):
raise TypeError("SOMETHING ERRORED")
__eq__ = __ne__ = __lt__ = __gt__ = __ge__ = __le__ = raises_anything
arr = np.array([[Raiser(), n] for n in range(10)]).reshape(-1)
np.random.shuffle(arr)
for kind in ['q', 'm', 'h']:
assert_raises(TypeError, arr.sort, kind=kind)
def test_sort_degraded(self):
# test degraded dataset would take minutes to run with normal qsort
d = np.arange(1000000)
do = d.copy()
x = d
# create a median of 3 killer where each median is the sorted second
# last element of the quicksort partition
while x.size > 3:
mid = x.size // 2
x[mid], x[-2] = x[-2], x[mid]
x = x[:-2]
assert_equal(np.sort(d), do)
assert_equal(d[np.argsort(d)], do)
def test_copy(self):
def assert_fortran(arr):
assert_(arr.flags.fortran)
assert_(arr.flags.f_contiguous)
assert_(not arr.flags.c_contiguous)
def assert_c(arr):
assert_(not arr.flags.fortran)
assert_(not arr.flags.f_contiguous)
assert_(arr.flags.c_contiguous)
a = np.empty((2, 2), order='F')
# Test copying a Fortran array
assert_c(a.copy())
assert_c(a.copy('C'))
assert_fortran(a.copy('F'))
assert_fortran(a.copy('A'))
# Now test starting with a C array.
a = np.empty((2, 2), order='C')
assert_c(a.copy())
assert_c(a.copy('C'))
assert_fortran(a.copy('F'))
assert_c(a.copy('A'))
def test_sort_order(self):
# Test sorting an array with fields
x1 = np.array([21, 32, 14])
x2 = np.array(['my', 'first', 'name'])
x3 = np.array([3.1, 4.5, 6.2])
r = np.rec.fromarrays([x1, x2, x3], names='id,word,number')
r.sort(order=['id'])
assert_equal(r.id, np.array([14, 21, 32]))
assert_equal(r.word, np.array(['name', 'my', 'first']))
assert_equal(r.number, np.array([6.2, 3.1, 4.5]))
r.sort(order=['word'])
assert_equal(r.id, np.array([32, 21, 14]))
assert_equal(r.word, np.array(['first', 'my', 'name']))
assert_equal(r.number, np.array([4.5, 3.1, 6.2]))
r.sort(order=['number'])
assert_equal(r.id, np.array([21, 32, 14]))
assert_equal(r.word, np.array(['my', 'first', 'name']))
assert_equal(r.number, np.array([3.1, 4.5, 6.2]))
assert_raises_regex(ValueError, 'duplicate',
lambda: r.sort(order=['id', 'id']))
if sys.byteorder == 'little':
strtype = '>i2'
else:
strtype = '<i2'
mydtype = [('name', strchar + '5'), ('col2', strtype)]
r = np.array([('a', 1), ('b', 255), ('c', 3), ('d', 258)],
dtype=mydtype)
r.sort(order='col2')
assert_equal(r['col2'], [1, 3, 255, 258])
assert_equal(r, np.array([('a', 1), ('c', 3), ('b', 255), ('d', 258)],
dtype=mydtype))
def test_argsort(self):
# all c scalar argsorts use the same code with different types
# so it suffices to run a quick check with one type. The number
# of sorted items must be greater than ~50 to check the actual
# algorithm because quick and merge sort fall over to insertion
# sort for small arrays.
a = np.arange(101)
b = a[::-1].copy()
for kind in ['q', 'm', 'h']:
msg = "scalar argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), a, msg)
assert_equal(b.copy().argsort(kind=kind), b, msg)
# test complex argsorts. These use the same code as the scalars
# but the compare function differs.
ai = a*1j + 1
bi = b*1j + 1
for kind in ['q', 'm', 'h']:
msg = "complex argsort, kind=%s" % kind
assert_equal(ai.copy().argsort(kind=kind), a, msg)
assert_equal(bi.copy().argsort(kind=kind), b, msg)
ai = a + 1j
bi = b + 1j
for kind in ['q', 'm', 'h']:
msg = "complex argsort, kind=%s" % kind
assert_equal(ai.copy().argsort(kind=kind), a, msg)
assert_equal(bi.copy().argsort(kind=kind), b, msg)
# test argsort of complex arrays requiring byte-swapping, gh-5441
for endianness in '<>':
for dt in np.typecodes['Complex']:
arr = np.array([1+3.j, 2+2.j, 3+1.j], dtype=endianness + dt)
msg = 'byte-swapped complex argsort, dtype={0}'.format(dt)
assert_equal(arr.argsort(),
np.arange(len(arr), dtype=np.intp), msg)
# test string argsorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)])
b = a[::-1].copy()
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h']:
msg = "string argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test unicode argsorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)], dtype=np.unicode)
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h']:
msg = "unicode argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test object array argsorts.
a = np.empty((101,), dtype=object)
a[:] = list(range(101))
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h']:
msg = "object argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test structured array argsorts.
dt = np.dtype([('f', float), ('i', int)])
a = np.array([(i, i) for i in range(101)], dtype=dt)
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h']:
msg = "structured array argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test datetime64 argsorts.
a = np.arange(0, 101, dtype='datetime64[D]')
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'h', 'm']:
msg = "datetime64 argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test timedelta64 argsorts.
a = np.arange(0, 101, dtype='timedelta64[D]')
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'h', 'm']:
msg = "timedelta64 argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# check axis handling. This should be the same for all type
# specific argsorts, so we only check it for one type and one kind
a = np.array([[3, 2], [1, 0]])
b = np.array([[1, 1], [0, 0]])
c = np.array([[1, 0], [1, 0]])
assert_equal(a.copy().argsort(axis=0), b)
assert_equal(a.copy().argsort(axis=1), c)
assert_equal(a.copy().argsort(), c)
# check axis handling for multidimensional empty arrays
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array argsort with axis={0}'.format(axis)
assert_equal(np.argsort(a, axis=axis),
np.zeros_like(a, dtype=np.intp), msg)
msg = 'test empty array argsort with axis=None'
assert_equal(np.argsort(a, axis=None),
np.zeros_like(a.ravel(), dtype=np.intp), msg)
# check that stable argsorts are stable
r = np.arange(100)
# scalars
a = np.zeros(100)
assert_equal(a.argsort(kind='m'), r)
# complex
a = np.zeros(100, dtype=complex)
assert_equal(a.argsort(kind='m'), r)
# string
a = np.array(['aaaaaaaaa' for i in range(100)])
assert_equal(a.argsort(kind='m'), r)
# unicode
a = np.array(['aaaaaaaaa' for i in range(100)], dtype=np.unicode)
assert_equal(a.argsort(kind='m'), r)
def test_sort_unicode_kind(self):
d = np.arange(10)
k = b'\xc3\xa4'.decode("UTF8")
assert_raises(ValueError, d.sort, kind=k)
assert_raises(ValueError, d.argsort, kind=k)
def test_searchsorted(self):
# test for floats and complex containing nans. The logic is the
# same for all float types so only test double types for now.
# The search sorted routines use the compare functions for the
# array type, so this checks if that is consistent with the sort
# order.
# check double
a = np.array([0, 1, np.nan])
msg = "Test real searchsorted with nans, side='l'"
b = a.searchsorted(a, side='l')
assert_equal(b, np.arange(3), msg)
msg = "Test real searchsorted with nans, side='r'"
b = a.searchsorted(a, side='r')
assert_equal(b, np.arange(1, 4), msg)
# check double complex
a = np.zeros(9, dtype=np.complex128)
a.real += [0, 0, 1, 1, 0, 1, np.nan, np.nan, np.nan]
a.imag += [0, 1, 0, 1, np.nan, np.nan, 0, 1, np.nan]
msg = "Test complex searchsorted with nans, side='l'"
b = a.searchsorted(a, side='l')
assert_equal(b, np.arange(9), msg)
msg = "Test complex searchsorted with nans, side='r'"
b = a.searchsorted(a, side='r')
assert_equal(b, np.arange(1, 10), msg)
msg = "Test searchsorted with little endian, side='l'"
a = np.array([0, 128], dtype='<i4')
b = a.searchsorted(np.array(128, dtype='<i4'))
assert_equal(b, 1, msg)
msg = "Test searchsorted with big endian, side='l'"
a = np.array([0, 128], dtype='>i4')
b = a.searchsorted(np.array(128, dtype='>i4'))
assert_equal(b, 1, msg)
# Check 0 elements
a = np.ones(0)
b = a.searchsorted([0, 1, 2], 'l')
assert_equal(b, [0, 0, 0])
b = a.searchsorted([0, 1, 2], 'r')
assert_equal(b, [0, 0, 0])
a = np.ones(1)
# Check 1 element
b = a.searchsorted([0, 1, 2], 'l')
assert_equal(b, [0, 0, 1])
b = a.searchsorted([0, 1, 2], 'r')
assert_equal(b, [0, 1, 1])
# Check all elements equal
a = np.ones(2)
b = a.searchsorted([0, 1, 2], 'l')
assert_equal(b, [0, 0, 2])
b = a.searchsorted([0, 1, 2], 'r')
assert_equal(b, [0, 2, 2])
# Test searching unaligned array
a = np.arange(10)
aligned = np.empty(a.itemsize * a.size + 1, 'uint8')
unaligned = aligned[1:].view(a.dtype)
unaligned[:] = a
# Test searching unaligned array
b = unaligned.searchsorted(a, 'l')
assert_equal(b, a)
b = unaligned.searchsorted(a, 'r')
assert_equal(b, a + 1)
# Test searching for unaligned keys
b = a.searchsorted(unaligned, 'l')
assert_equal(b, a)
b = a.searchsorted(unaligned, 'r')
assert_equal(b, a + 1)
# Test smart resetting of binsearch indices
a = np.arange(5)
b = a.searchsorted([6, 5, 4], 'l')
assert_equal(b, [5, 5, 4])
b = a.searchsorted([6, 5, 4], 'r')
assert_equal(b, [5, 5, 5])
# Test all type specific binary search functions
types = ''.join((np.typecodes['AllInteger'], np.typecodes['AllFloat'],
np.typecodes['Datetime'], '?O'))
for dt in types:
if dt == 'M':
dt = 'M8[D]'
if dt == '?':
a = np.arange(2, dtype=dt)
out = np.arange(2)
else:
a = np.arange(0, 5, dtype=dt)
out = np.arange(5)
b = a.searchsorted(a, 'l')
assert_equal(b, out)
b = a.searchsorted(a, 'r')
assert_equal(b, out + 1)
# Test empty array, use a fresh array to get warnings in
# valgrind if access happens.
e = np.ndarray(shape=0, buffer=b'', dtype=dt)
b = e.searchsorted(a, 'l')
assert_array_equal(b, np.zeros(len(a), dtype=np.intp))
b = a.searchsorted(e, 'l')
assert_array_equal(b, np.zeros(0, dtype=np.intp))
def test_searchsorted_unicode(self):
# Test searchsorted on unicode strings.
# 1.6.1 contained a string length miscalculation in
# arraytypes.c.src:UNICODE_compare() which manifested as
# incorrect/inconsistent results from searchsorted.
a = np.array(['P:\\20x_dapi_cy3\\20x_dapi_cy3_20100185_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100186_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100187_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100189_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100190_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100191_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100192_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100193_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100194_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100195_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100196_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100197_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100198_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100199_1'],
dtype=np.unicode)
ind = np.arange(len(a))
assert_equal([a.searchsorted(v, 'left') for v in a], ind)
assert_equal([a.searchsorted(v, 'right') for v in a], ind + 1)
assert_equal([a.searchsorted(a[i], 'left') for i in ind], ind)
assert_equal([a.searchsorted(a[i], 'right') for i in ind], ind + 1)
def test_searchsorted_with_sorter(self):
a = np.array([5, 2, 1, 3, 4])
s = np.argsort(a)
assert_raises(TypeError, np.searchsorted, a, 0, sorter=(1, (2, 3)))
assert_raises(TypeError, np.searchsorted, a, 0, sorter=[1.1])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[1, 2, 3, 4])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[1, 2, 3, 4, 5, 6])
# bounds check
assert_raises(ValueError, np.searchsorted, a, 4, sorter=[0, 1, 2, 3, 5])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[-1, 0, 1, 2, 3])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[4, 0, -1, 2, 3])
a = np.random.rand(300)
s = a.argsort()
b = np.sort(a)
k = np.linspace(0, 1, 20)
assert_equal(b.searchsorted(k), a.searchsorted(k, sorter=s))
a = np.array([0, 1, 2, 3, 5]*20)
s = a.argsort()
k = [0, 1, 2, 3, 5]
expected = [0, 20, 40, 60, 80]
assert_equal(a.searchsorted(k, side='l', sorter=s), expected)
expected = [20, 40, 60, 80, 100]
assert_equal(a.searchsorted(k, side='r', sorter=s), expected)
# Test searching unaligned array
keys = np.arange(10)
a = keys.copy()
np.random.shuffle(s)
s = a.argsort()
aligned = np.empty(a.itemsize * a.size + 1, 'uint8')
unaligned = aligned[1:].view(a.dtype)
# Test searching unaligned array
unaligned[:] = a
b = unaligned.searchsorted(keys, 'l', s)
assert_equal(b, keys)
b = unaligned.searchsorted(keys, 'r', s)
assert_equal(b, keys + 1)
# Test searching for unaligned keys
unaligned[:] = keys
b = a.searchsorted(unaligned, 'l', s)
assert_equal(b, keys)
b = a.searchsorted(unaligned, 'r', s)
assert_equal(b, keys + 1)
# Test all type specific indirect binary search functions
types = ''.join((np.typecodes['AllInteger'], np.typecodes['AllFloat'],
np.typecodes['Datetime'], '?O'))
for dt in types:
if dt == 'M':
dt = 'M8[D]'
if dt == '?':
a = np.array([1, 0], dtype=dt)
# We want the sorter array to be of a type that is different
# from np.intp in all platforms, to check for #4698
s = np.array([1, 0], dtype=np.int16)
out = np.array([1, 0])
else:
a = np.array([3, 4, 1, 2, 0], dtype=dt)
# We want the sorter array to be of a type that is different
# from np.intp in all platforms, to check for #4698
s = np.array([4, 2, 3, 0, 1], dtype=np.int16)
out = np.array([3, 4, 1, 2, 0], dtype=np.intp)
b = a.searchsorted(a, 'l', s)
assert_equal(b, out)
b = a.searchsorted(a, 'r', s)
assert_equal(b, out + 1)
# Test empty array, use a fresh array to get warnings in
# valgrind if access happens.
e = np.ndarray(shape=0, buffer=b'', dtype=dt)
b = e.searchsorted(a, 'l', s[:0])
assert_array_equal(b, np.zeros(len(a), dtype=np.intp))
b = a.searchsorted(e, 'l', s)
assert_array_equal(b, np.zeros(0, dtype=np.intp))
# Test non-contiguous sorter array
a = np.array([3, 4, 1, 2, 0])
srt = np.empty((10,), dtype=np.intp)
srt[1::2] = -1
srt[::2] = [4, 2, 3, 0, 1]
s = srt[::2]
out = np.array([3, 4, 1, 2, 0], dtype=np.intp)
b = a.searchsorted(a, 'l', s)
assert_equal(b, out)
b = a.searchsorted(a, 'r', s)
assert_equal(b, out + 1)
def test_searchsorted_return_type(self):
# Functions returning indices should always return base ndarrays
class A(np.ndarray):
pass
a = np.arange(5).view(A)
b = np.arange(1, 3).view(A)
s = np.arange(5).view(A)
assert_(not isinstance(a.searchsorted(b, 'l'), A))
assert_(not isinstance(a.searchsorted(b, 'r'), A))
assert_(not isinstance(a.searchsorted(b, 'l', s), A))
assert_(not isinstance(a.searchsorted(b, 'r', s), A))
def test_argpartition_out_of_range(self):
# Test out of range values in kth raise an error, gh-5469
d = np.arange(10)
assert_raises(ValueError, d.argpartition, 10)
assert_raises(ValueError, d.argpartition, -11)
# Test also for generic type argpartition, which uses sorting
# and used to not bound check kth
d_obj = np.arange(10, dtype=object)
assert_raises(ValueError, d_obj.argpartition, 10)
assert_raises(ValueError, d_obj.argpartition, -11)
def test_partition_out_of_range(self):
# Test out of range values in kth raise an error, gh-5469
d = np.arange(10)
assert_raises(ValueError, d.partition, 10)
assert_raises(ValueError, d.partition, -11)
# Test also for generic type partition, which uses sorting
# and used to not bound check kth
d_obj = np.arange(10, dtype=object)
assert_raises(ValueError, d_obj.partition, 10)
assert_raises(ValueError, d_obj.partition, -11)
def test_argpartition_integer(self):
# Test non-integer values in kth raise an error/
d = np.arange(10)
assert_raises(TypeError, d.argpartition, 9.)
# Test also for generic type argpartition, which uses sorting
# and used to not bound check kth
d_obj = np.arange(10, dtype=object)
assert_raises(TypeError, d_obj.argpartition, 9.)
def test_partition_integer(self):
# Test out of range values in kth raise an error, gh-5469
d = np.arange(10)
assert_raises(TypeError, d.partition, 9.)
# Test also for generic type partition, which uses sorting
# and used to not bound check kth
d_obj = np.arange(10, dtype=object)
assert_raises(TypeError, d_obj.partition, 9.)
def test_partition_empty_array(self):
# check axis handling for multidimensional empty arrays
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array partition with axis={0}'.format(axis)
assert_equal(np.partition(a, 0, axis=axis), a, msg)
msg = 'test empty array partition with axis=None'
assert_equal(np.partition(a, 0, axis=None), a.ravel(), msg)
def test_argpartition_empty_array(self):
# check axis handling for multidimensional empty arrays
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array argpartition with axis={0}'.format(axis)
assert_equal(np.partition(a, 0, axis=axis),
np.zeros_like(a, dtype=np.intp), msg)
msg = 'test empty array argpartition with axis=None'
assert_equal(np.partition(a, 0, axis=None),
np.zeros_like(a.ravel(), dtype=np.intp), msg)
def test_partition(self):
d = np.arange(10)
assert_raises(TypeError, np.partition, d, 2, kind=1)
assert_raises(ValueError, np.partition, d, 2, kind="nonsense")
assert_raises(ValueError, np.argpartition, d, 2, kind="nonsense")
assert_raises(ValueError, d.partition, 2, axis=0, kind="nonsense")
assert_raises(ValueError, d.argpartition, 2, axis=0, kind="nonsense")
for k in ("introselect",):
d = np.array([])
assert_array_equal(np.partition(d, 0, kind=k), d)
assert_array_equal(np.argpartition(d, 0, kind=k), d)
d = np.ones(1)
assert_array_equal(np.partition(d, 0, kind=k)[0], d)
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
# kth not modified
kth = np.array([30, 15, 5])
okth = kth.copy()
np.partition(np.arange(40), kth)
assert_array_equal(kth, okth)
for r in ([2, 1], [1, 2], [1, 1]):
d = np.array(r)
tgt = np.sort(d)
assert_array_equal(np.partition(d, 0, kind=k)[0], tgt[0])
assert_array_equal(np.partition(d, 1, kind=k)[1], tgt[1])
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
assert_array_equal(d[np.argpartition(d, 1, kind=k)],
np.partition(d, 1, kind=k))
for i in range(d.size):
d[i:].partition(0, kind=k)
assert_array_equal(d, tgt)
for r in ([3, 2, 1], [1, 2, 3], [2, 1, 3], [2, 3, 1],
[1, 1, 1], [1, 2, 2], [2, 2, 1], [1, 2, 1]):
d = np.array(r)
tgt = np.sort(d)
assert_array_equal(np.partition(d, 0, kind=k)[0], tgt[0])
assert_array_equal(np.partition(d, 1, kind=k)[1], tgt[1])
assert_array_equal(np.partition(d, 2, kind=k)[2], tgt[2])
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
assert_array_equal(d[np.argpartition(d, 1, kind=k)],
np.partition(d, 1, kind=k))
assert_array_equal(d[np.argpartition(d, 2, kind=k)],
np.partition(d, 2, kind=k))
for i in range(d.size):
d[i:].partition(0, kind=k)
assert_array_equal(d, tgt)
d = np.ones(50)
assert_array_equal(np.partition(d, 0, kind=k), d)
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
# sorted
d = np.arange(49)
assert_equal(np.partition(d, 5, kind=k)[5], 5)
assert_equal(np.partition(d, 15, kind=k)[15], 15)
assert_array_equal(d[np.argpartition(d, 5, kind=k)],
np.partition(d, 5, kind=k))
assert_array_equal(d[np.argpartition(d, 15, kind=k)],
np.partition(d, 15, kind=k))
# rsorted
d = np.arange(47)[::-1]
assert_equal(np.partition(d, 6, kind=k)[6], 6)
assert_equal(np.partition(d, 16, kind=k)[16], 16)
assert_array_equal(d[np.argpartition(d, 6, kind=k)],
np.partition(d, 6, kind=k))
assert_array_equal(d[np.argpartition(d, 16, kind=k)],
np.partition(d, 16, kind=k))
assert_array_equal(np.partition(d, -6, kind=k),
np.partition(d, 41, kind=k))
assert_array_equal(np.partition(d, -16, kind=k),
np.partition(d, 31, kind=k))
assert_array_equal(d[np.argpartition(d, -6, kind=k)],
np.partition(d, 41, kind=k))
# median of 3 killer, O(n^2) on pure median 3 pivot quickselect
# exercises the median of median of 5 code used to keep O(n)
d = np.arange(1000000)
x = np.roll(d, d.size // 2)
mid = x.size // 2 + 1
assert_equal(np.partition(x, mid)[mid], mid)
d = np.arange(1000001)
x = np.roll(d, d.size // 2 + 1)
mid = x.size // 2 + 1
assert_equal(np.partition(x, mid)[mid], mid)
# max
d = np.ones(10)
d[1] = 4
assert_equal(np.partition(d, (2, -1))[-1], 4)
assert_equal(np.partition(d, (2, -1))[2], 1)
assert_equal(d[np.argpartition(d, (2, -1))][-1], 4)
assert_equal(d[np.argpartition(d, (2, -1))][2], 1)
d[1] = np.nan
assert_(np.isnan(d[np.argpartition(d, (2, -1))][-1]))
assert_(np.isnan(np.partition(d, (2, -1))[-1]))
# equal elements
d = np.arange(47) % 7
tgt = np.sort(np.arange(47) % 7)
np.random.shuffle(d)
for i in range(d.size):
assert_equal(np.partition(d, i, kind=k)[i], tgt[i])
assert_array_equal(d[np.argpartition(d, 6, kind=k)],
np.partition(d, 6, kind=k))
assert_array_equal(d[np.argpartition(d, 16, kind=k)],
np.partition(d, 16, kind=k))
for i in range(d.size):
d[i:].partition(0, kind=k)
assert_array_equal(d, tgt)
d = np.array([0, 1, 2, 3, 4, 5, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 9])
kth = [0, 3, 19, 20]
assert_equal(np.partition(d, kth, kind=k)[kth], (0, 3, 7, 7))
assert_equal(d[np.argpartition(d, kth, kind=k)][kth], (0, 3, 7, 7))
d = np.array([2, 1])
d.partition(0, kind=k)
assert_raises(ValueError, d.partition, 2)
assert_raises(np.AxisError, d.partition, 3, axis=1)
assert_raises(ValueError, np.partition, d, 2)
assert_raises(np.AxisError, np.partition, d, 2, axis=1)
assert_raises(ValueError, d.argpartition, 2)
assert_raises(np.AxisError, d.argpartition, 3, axis=1)
assert_raises(ValueError, np.argpartition, d, 2)
assert_raises(np.AxisError, np.argpartition, d, 2, axis=1)
d = np.arange(10).reshape((2, 5))
d.partition(1, axis=0, kind=k)
d.partition(4, axis=1, kind=k)
np.partition(d, 1, axis=0, kind=k)
np.partition(d, 4, axis=1, kind=k)
np.partition(d, 1, axis=None, kind=k)
np.partition(d, 9, axis=None, kind=k)
d.argpartition(1, axis=0, kind=k)
d.argpartition(4, axis=1, kind=k)
np.argpartition(d, 1, axis=0, kind=k)
np.argpartition(d, 4, axis=1, kind=k)
np.argpartition(d, 1, axis=None, kind=k)
np.argpartition(d, 9, axis=None, kind=k)
assert_raises(ValueError, d.partition, 2, axis=0)
assert_raises(ValueError, d.partition, 11, axis=1)
assert_raises(TypeError, d.partition, 2, axis=None)
assert_raises(ValueError, np.partition, d, 9, axis=1)
assert_raises(ValueError, np.partition, d, 11, axis=None)
assert_raises(ValueError, d.argpartition, 2, axis=0)
assert_raises(ValueError, d.argpartition, 11, axis=1)
assert_raises(ValueError, np.argpartition, d, 9, axis=1)
assert_raises(ValueError, np.argpartition, d, 11, axis=None)
td = [(dt, s) for dt in [np.int32, np.float32, np.complex64]
for s in (9, 16)]
for dt, s in td:
aae = assert_array_equal
at = assert_
d = np.arange(s, dtype=dt)
np.random.shuffle(d)
d1 = np.tile(np.arange(s, dtype=dt), (4, 1))
map(np.random.shuffle, d1)
d0 = np.transpose(d1)
for i in range(d.size):
p = np.partition(d, i, kind=k)
assert_equal(p[i], i)
# all before are smaller
assert_array_less(p[:i], p[i])
# all after are larger
assert_array_less(p[i], p[i + 1:])
aae(p, d[np.argpartition(d, i, kind=k)])
p = np.partition(d1, i, axis=1, kind=k)
aae(p[:, i], np.array([i] * d1.shape[0], dtype=dt))
# array_less does not seem to work right
at((p[:, :i].T <= p[:, i]).all(),
msg="%d: %r <= %r" % (i, p[:, i], p[:, :i].T))
at((p[:, i + 1:].T > p[:, i]).all(),
msg="%d: %r < %r" % (i, p[:, i], p[:, i + 1:].T))
aae(p, d1[np.arange(d1.shape[0])[:, None],
np.argpartition(d1, i, axis=1, kind=k)])
p = np.partition(d0, i, axis=0, kind=k)
aae(p[i, :], np.array([i] * d1.shape[0], dtype=dt))
# array_less does not seem to work right
at((p[:i, :] <= p[i, :]).all(),
msg="%d: %r <= %r" % (i, p[i, :], p[:i, :]))
at((p[i + 1:, :] > p[i, :]).all(),
msg="%d: %r < %r" % (i, p[i, :], p[:, i + 1:]))
aae(p, d0[np.argpartition(d0, i, axis=0, kind=k),
np.arange(d0.shape[1])[None, :]])
# check inplace
dc = d.copy()
dc.partition(i, kind=k)
assert_equal(dc, np.partition(d, i, kind=k))
dc = d0.copy()
dc.partition(i, axis=0, kind=k)
assert_equal(dc, np.partition(d0, i, axis=0, kind=k))
dc = d1.copy()
dc.partition(i, axis=1, kind=k)
assert_equal(dc, np.partition(d1, i, axis=1, kind=k))
def assert_partitioned(self, d, kth):
prev = 0
for k in np.sort(kth):
assert_array_less(d[prev:k], d[k], err_msg='kth %d' % k)
assert_((d[k:] >= d[k]).all(),
msg="kth %d, %r not greater equal %d" % (k, d[k:], d[k]))
prev = k + 1
def test_partition_iterative(self):
d = np.arange(17)
kth = (0, 1, 2, 429, 231)
assert_raises(ValueError, d.partition, kth)
assert_raises(ValueError, d.argpartition, kth)
d = np.arange(10).reshape((2, 5))
assert_raises(ValueError, d.partition, kth, axis=0)
assert_raises(ValueError, d.partition, kth, axis=1)
assert_raises(ValueError, np.partition, d, kth, axis=1)
assert_raises(ValueError, np.partition, d, kth, axis=None)
d = np.array([3, 4, 2, 1])
p = np.partition(d, (0, 3))
self.assert_partitioned(p, (0, 3))
self.assert_partitioned(d[np.argpartition(d, (0, 3))], (0, 3))
assert_array_equal(p, np.partition(d, (-3, -1)))
assert_array_equal(p, d[np.argpartition(d, (-3, -1))])
d = np.arange(17)
np.random.shuffle(d)
d.partition(range(d.size))
assert_array_equal(np.arange(17), d)
np.random.shuffle(d)
assert_array_equal(np.arange(17), d[d.argpartition(range(d.size))])
# test unsorted kth
d = np.arange(17)
np.random.shuffle(d)
keys = np.array([1, 3, 8, -2])
np.random.shuffle(d)
p = np.partition(d, keys)
self.assert_partitioned(p, keys)
p = d[np.argpartition(d, keys)]
self.assert_partitioned(p, keys)
np.random.shuffle(keys)
assert_array_equal(np.partition(d, keys), p)
assert_array_equal(d[np.argpartition(d, keys)], p)
# equal kth
d = np.arange(20)[::-1]
self.assert_partitioned(np.partition(d, [5]*4), [5])
self.assert_partitioned(np.partition(d, [5]*4 + [6, 13]),
[5]*4 + [6, 13])
self.assert_partitioned(d[np.argpartition(d, [5]*4)], [5])
self.assert_partitioned(d[np.argpartition(d, [5]*4 + [6, 13])],
[5]*4 + [6, 13])
d = np.arange(12)
np.random.shuffle(d)
d1 = np.tile(np.arange(12), (4, 1))
map(np.random.shuffle, d1)
d0 = np.transpose(d1)
kth = (1, 6, 7, -1)
p = np.partition(d1, kth, axis=1)
pa = d1[np.arange(d1.shape[0])[:, None],
d1.argpartition(kth, axis=1)]
assert_array_equal(p, pa)
for i in range(d1.shape[0]):
self.assert_partitioned(p[i,:], kth)
p = np.partition(d0, kth, axis=0)
pa = d0[np.argpartition(d0, kth, axis=0),
np.arange(d0.shape[1])[None,:]]
assert_array_equal(p, pa)
for i in range(d0.shape[1]):
self.assert_partitioned(p[:, i], kth)
def test_partition_cdtype(self):
d = np.array([('Galahad', 1.7, 38), ('Arthur', 1.8, 41),
('Lancelot', 1.9, 38)],
dtype=[('name', '|S10'), ('height', '<f8'), ('age', '<i4')])
tgt = np.sort(d, order=['age', 'height'])
assert_array_equal(np.partition(d, range(d.size),
order=['age', 'height']),
tgt)
assert_array_equal(d[np.argpartition(d, range(d.size),
order=['age', 'height'])],
tgt)
for k in range(d.size):
assert_equal(np.partition(d, k, order=['age', 'height'])[k],
tgt[k])
assert_equal(d[np.argpartition(d, k, order=['age', 'height'])][k],
tgt[k])
d = np.array(['Galahad', 'Arthur', 'zebra', 'Lancelot'])
tgt = np.sort(d)
assert_array_equal(np.partition(d, range(d.size)), tgt)
for k in range(d.size):
assert_equal(np.partition(d, k)[k], tgt[k])
assert_equal(d[np.argpartition(d, k)][k], tgt[k])
def test_partition_unicode_kind(self):
d = np.arange(10)
k = b'\xc3\xa4'.decode("UTF8")
assert_raises(ValueError, d.partition, 2, kind=k)
assert_raises(ValueError, d.argpartition, 2, kind=k)
def test_partition_fuzz(self):
# a few rounds of random data testing
for j in range(10, 30):
for i in range(1, j - 2):
d = np.arange(j)
np.random.shuffle(d)
d = d % np.random.randint(2, 30)
idx = np.random.randint(d.size)
kth = [0, idx, i, i + 1]
tgt = np.sort(d)[kth]
assert_array_equal(np.partition(d, kth)[kth], tgt,
err_msg="data: %r\n kth: %r" % (d, kth))
def test_argpartition_gh5524(self):
# A test for functionality of argpartition on lists.
d = [6,7,3,2,9,0]
p = np.argpartition(d,1)
self.assert_partitioned(np.array(d)[p],[1])
def test_flatten(self):
x0 = np.array([[1, 2, 3], [4, 5, 6]], np.int32)
x1 = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]], np.int32)
y0 = np.array([1, 2, 3, 4, 5, 6], np.int32)
y0f = np.array([1, 4, 2, 5, 3, 6], np.int32)
y1 = np.array([1, 2, 3, 4, 5, 6, 7, 8], np.int32)
y1f = np.array([1, 5, 3, 7, 2, 6, 4, 8], np.int32)
assert_equal(x0.flatten(), y0)
assert_equal(x0.flatten('F'), y0f)
assert_equal(x0.flatten('F'), x0.T.flatten())
assert_equal(x1.flatten(), y1)
assert_equal(x1.flatten('F'), y1f)
assert_equal(x1.flatten('F'), x1.T.flatten())
@pytest.mark.parametrize('func', (np.dot, np.matmul))
def test_arr_mult(self, func):
a = np.array([[1, 0], [0, 1]])
b = np.array([[0, 1], [1, 0]])
c = np.array([[9, 1], [1, -9]])
d = np.arange(24).reshape(4, 6)
ddt = np.array(
[[ 55, 145, 235, 325],
[ 145, 451, 757, 1063],
[ 235, 757, 1279, 1801],
[ 325, 1063, 1801, 2539]]
)
dtd = np.array(
[[504, 540, 576, 612, 648, 684],
[540, 580, 620, 660, 700, 740],
[576, 620, 664, 708, 752, 796],
[612, 660, 708, 756, 804, 852],
[648, 700, 752, 804, 856, 908],
[684, 740, 796, 852, 908, 964]]
)
# gemm vs syrk optimizations
for et in [np.float32, np.float64, np.complex64, np.complex128]:
eaf = a.astype(et)
assert_equal(func(eaf, eaf), eaf)
assert_equal(func(eaf.T, eaf), eaf)
assert_equal(func(eaf, eaf.T), eaf)
assert_equal(func(eaf.T, eaf.T), eaf)
assert_equal(func(eaf.T.copy(), eaf), eaf)
assert_equal(func(eaf, eaf.T.copy()), eaf)
assert_equal(func(eaf.T.copy(), eaf.T.copy()), eaf)
# syrk validations
for et in [np.float32, np.float64, np.complex64, np.complex128]:
eaf = a.astype(et)
ebf = b.astype(et)
assert_equal(func(ebf, ebf), eaf)
assert_equal(func(ebf.T, ebf), eaf)
assert_equal(func(ebf, ebf.T), eaf)
assert_equal(func(ebf.T, ebf.T), eaf)
# syrk - different shape, stride, and view validations
for et in [np.float32, np.float64, np.complex64, np.complex128]:
edf = d.astype(et)
assert_equal(
func(edf[::-1, :], edf.T),
func(edf[::-1, :].copy(), edf.T.copy())
)
assert_equal(
func(edf[:, ::-1], edf.T),
func(edf[:, ::-1].copy(), edf.T.copy())
)
assert_equal(
func(edf, edf[::-1, :].T),
func(edf, edf[::-1, :].T.copy())
)
assert_equal(
func(edf, edf[:, ::-1].T),
func(edf, edf[:, ::-1].T.copy())
)
assert_equal(
func(edf[:edf.shape[0] // 2, :], edf[::2, :].T),
func(edf[:edf.shape[0] // 2, :].copy(), edf[::2, :].T.copy())
)
assert_equal(
func(edf[::2, :], edf[:edf.shape[0] // 2, :].T),
func(edf[::2, :].copy(), edf[:edf.shape[0] // 2, :].T.copy())
)
# syrk - different shape
for et in [np.float32, np.float64, np.complex64, np.complex128]:
edf = d.astype(et)
eddtf = ddt.astype(et)
edtdf = dtd.astype(et)
assert_equal(func(edf, edf.T), eddtf)
assert_equal(func(edf.T, edf), edtdf)
@pytest.mark.parametrize('func', (np.dot, np.matmul))
@pytest.mark.parametrize('dtype', 'ifdFD')
def test_no_dgemv(self, func, dtype):
# check vector arg for contiguous before gemv
# gh-12156
a = np.arange(8.0, dtype=dtype).reshape(2, 4)
b = np.broadcast_to(1., (4, 1))
ret1 = func(a, b)
ret2 = func(a, b.copy())
assert_equal(ret1, ret2)
ret1 = func(b.T, a.T)
ret2 = func(b.T.copy(), a.T)
assert_equal(ret1, ret2)
# check for unaligned data
dt = np.dtype(dtype)
a = np.zeros(8 * dt.itemsize // 2 + 1, dtype='int16')[1:].view(dtype)
a = a.reshape(2, 4)
b = a[0]
# make sure it is not aligned
assert_(a.__array_interface__['data'][0] % dt.itemsize != 0)
ret1 = func(a, b)
ret2 = func(a.copy(), b.copy())
assert_equal(ret1, ret2)
ret1 = func(b.T, a.T)
ret2 = func(b.T.copy(), a.T.copy())
assert_equal(ret1, ret2)
def test_dot(self):
a = np.array([[1, 0], [0, 1]])
b = np.array([[0, 1], [1, 0]])
c = np.array([[9, 1], [1, -9]])
# function versus methods
assert_equal(np.dot(a, b), a.dot(b))
assert_equal(np.dot(np.dot(a, b), c), a.dot(b).dot(c))
# test passing in an output array
c = np.zeros_like(a)
a.dot(b, c)
assert_equal(c, np.dot(a, b))
# test keyword args
c = np.zeros_like(a)
a.dot(b=b, out=c)
assert_equal(c, np.dot(a, b))
def test_dot_type_mismatch(self):
c = 1.
A = np.array((1,1), dtype='i,i')
assert_raises(TypeError, np.dot, c, A)
assert_raises(TypeError, np.dot, A, c)
def test_dot_out_mem_overlap(self):
np.random.seed(1)
# Test BLAS and non-BLAS code paths, including all dtypes
# that dot() supports
dtypes = [np.dtype(code) for code in np.typecodes['All']
if code not in 'USVM']
for dtype in dtypes:
a = np.random.rand(3, 3).astype(dtype)
# Valid dot() output arrays must be aligned
b = _aligned_zeros((3, 3), dtype=dtype)
b[...] = np.random.rand(3, 3)
y = np.dot(a, b)
x = np.dot(a, b, out=b)
assert_equal(x, y, err_msg=repr(dtype))
# Check invalid output array
assert_raises(ValueError, np.dot, a, b, out=b[::2])
assert_raises(ValueError, np.dot, a, b, out=b.T)
def test_dot_matmul_out(self):
# gh-9641
class Sub(np.ndarray):
pass
a = np.ones((2, 2)).view(Sub)
b = np.ones((2, 2)).view(Sub)
out = np.ones((2, 2))
# make sure out can be any ndarray (not only subclass of inputs)
np.dot(a, b, out=out)
np.matmul(a, b, out=out)
def test_dot_matmul_inner_array_casting_fails(self):
class A(object):
def __array__(self, *args, **kwargs):
raise NotImplementedError
# Don't override the error from calling __array__()
assert_raises(NotImplementedError, np.dot, A(), A())
assert_raises(NotImplementedError, np.matmul, A(), A())
assert_raises(NotImplementedError, np.inner, A(), A())
def test_matmul_out(self):
# overlapping memory
a = np.arange(18).reshape(2, 3, 3)
b = np.matmul(a, a)
c = np.matmul(a, a, out=a)
assert_(c is a)
assert_equal(c, b)
a = np.arange(18).reshape(2, 3, 3)
c = np.matmul(a, a, out=a[::-1, ...])
assert_(c.base is a.base)
assert_equal(c, b)
def test_diagonal(self):
a = np.arange(12).reshape((3, 4))
assert_equal(a.diagonal(), [0, 5, 10])
assert_equal(a.diagonal(0), [0, 5, 10])
assert_equal(a.diagonal(1), [1, 6, 11])
assert_equal(a.diagonal(-1), [4, 9])
assert_raises(np.AxisError, a.diagonal, axis1=0, axis2=5)
assert_raises(np.AxisError, a.diagonal, axis1=5, axis2=0)
assert_raises(np.AxisError, a.diagonal, axis1=5, axis2=5)
assert_raises(ValueError, a.diagonal, axis1=1, axis2=1)
b = np.arange(8).reshape((2, 2, 2))
assert_equal(b.diagonal(), [[0, 6], [1, 7]])
assert_equal(b.diagonal(0), [[0, 6], [1, 7]])
assert_equal(b.diagonal(1), [[2], [3]])
assert_equal(b.diagonal(-1), [[4], [5]])
assert_raises(ValueError, b.diagonal, axis1=0, axis2=0)
assert_equal(b.diagonal(0, 1, 2), [[0, 3], [4, 7]])
assert_equal(b.diagonal(0, 0, 1), [[0, 6], [1, 7]])
assert_equal(b.diagonal(offset=1, axis1=0, axis2=2), [[1], [3]])
# Order of axis argument doesn't matter:
assert_equal(b.diagonal(0, 2, 1), [[0, 3], [4, 7]])
def test_diagonal_view_notwriteable(self):
# this test is only for 1.9, the diagonal view will be
# writeable in 1.10.
a = np.eye(3).diagonal()
assert_(not a.flags.writeable)
assert_(not a.flags.owndata)
a = np.diagonal(np.eye(3))
assert_(not a.flags.writeable)
assert_(not a.flags.owndata)
a = np.diag(np.eye(3))
assert_(not a.flags.writeable)
assert_(not a.flags.owndata)
def test_diagonal_memleak(self):
# Regression test for a bug that crept in at one point
a = np.zeros((100, 100))
if HAS_REFCOUNT:
assert_(sys.getrefcount(a) < 50)
for i in range(100):
a.diagonal()
if HAS_REFCOUNT:
assert_(sys.getrefcount(a) < 50)
def test_size_zero_memleak(self):
# Regression test for issue 9615
# Exercises a special-case code path for dot products of length
# zero in cblasfuncs (making it is specific to floating dtypes).
a = np.array([], dtype=np.float64)
x = np.array(2.0)
for _ in range(100):
np.dot(a, a, out=x)
if HAS_REFCOUNT:
assert_(sys.getrefcount(x) < 50)
def test_trace(self):
a = np.arange(12).reshape((3, 4))
assert_equal(a.trace(), 15)
assert_equal(a.trace(0), 15)
assert_equal(a.trace(1), 18)
assert_equal(a.trace(-1), 13)
b = np.arange(8).reshape((2, 2, 2))
assert_equal(b.trace(), [6, 8])
assert_equal(b.trace(0), [6, 8])
assert_equal(b.trace(1), [2, 3])
assert_equal(b.trace(-1), [4, 5])
assert_equal(b.trace(0, 0, 1), [6, 8])
assert_equal(b.trace(0, 0, 2), [5, 9])
assert_equal(b.trace(0, 1, 2), [3, 11])
assert_equal(b.trace(offset=1, axis1=0, axis2=2), [1, 3])
def test_trace_subclass(self):
# The class would need to overwrite trace to ensure single-element
# output also has the right subclass.
class MyArray(np.ndarray):
pass
b = np.arange(8).reshape((2, 2, 2)).view(MyArray)
t = b.trace()
assert_(isinstance(t, MyArray))
def test_put(self):
icodes = np.typecodes['AllInteger']
fcodes = np.typecodes['AllFloat']
for dt in icodes + fcodes + 'O':
tgt = np.array([0, 1, 0, 3, 0, 5], dtype=dt)
# test 1-d
a = np.zeros(6, dtype=dt)
a.put([1, 3, 5], [1, 3, 5])
assert_equal(a, tgt)
# test 2-d
a = np.zeros((2, 3), dtype=dt)
a.put([1, 3, 5], [1, 3, 5])
assert_equal(a, tgt.reshape(2, 3))
for dt in '?':
tgt = np.array([False, True, False, True, False, True], dtype=dt)
# test 1-d
a = np.zeros(6, dtype=dt)
a.put([1, 3, 5], [True]*3)
assert_equal(a, tgt)
# test 2-d
a = np.zeros((2, 3), dtype=dt)
a.put([1, 3, 5], [True]*3)
assert_equal(a, tgt.reshape(2, 3))
# check must be writeable
a = np.zeros(6)
a.flags.writeable = False
assert_raises(ValueError, a.put, [1, 3, 5], [1, 3, 5])
# when calling np.put, make sure a
# TypeError is raised if the object
# isn't an ndarray
bad_array = [1, 2, 3]
assert_raises(TypeError, np.put, bad_array, [0, 2], 5)
def test_ravel(self):
a = np.array([[0, 1], [2, 3]])
assert_equal(a.ravel(), [0, 1, 2, 3])
assert_(not a.ravel().flags.owndata)
assert_equal(a.ravel('F'), [0, 2, 1, 3])
assert_equal(a.ravel(order='C'), [0, 1, 2, 3])
assert_equal(a.ravel(order='F'), [0, 2, 1, 3])
assert_equal(a.ravel(order='A'), [0, 1, 2, 3])
assert_(not a.ravel(order='A').flags.owndata)
assert_equal(a.ravel(order='K'), [0, 1, 2, 3])
assert_(not a.ravel(order='K').flags.owndata)
assert_equal(a.ravel(), a.reshape(-1))
a = np.array([[0, 1], [2, 3]], order='F')
assert_equal(a.ravel(), [0, 1, 2, 3])
assert_equal(a.ravel(order='A'), [0, 2, 1, 3])
assert_equal(a.ravel(order='K'), [0, 2, 1, 3])
assert_(not a.ravel(order='A').flags.owndata)
assert_(not a.ravel(order='K').flags.owndata)
assert_equal(a.ravel(), a.reshape(-1))
assert_equal(a.ravel(order='A'), a.reshape(-1, order='A'))
a = np.array([[0, 1], [2, 3]])[::-1, :]
assert_equal(a.ravel(), [2, 3, 0, 1])
assert_equal(a.ravel(order='C'), [2, 3, 0, 1])
assert_equal(a.ravel(order='F'), [2, 0, 3, 1])
assert_equal(a.ravel(order='A'), [2, 3, 0, 1])
# 'K' doesn't reverse the axes of negative strides
assert_equal(a.ravel(order='K'), [2, 3, 0, 1])
assert_(a.ravel(order='K').flags.owndata)
# Test simple 1-d copy behaviour:
a = np.arange(10)[::2]
assert_(a.ravel('K').flags.owndata)
assert_(a.ravel('C').flags.owndata)
assert_(a.ravel('F').flags.owndata)
# Not contiguous and 1-sized axis with non matching stride
a = np.arange(2**3 * 2)[::2]
a = a.reshape(2, 1, 2, 2).swapaxes(-1, -2)
strides = list(a.strides)
strides[1] = 123
a.strides = strides
assert_(a.ravel(order='K').flags.owndata)
assert_equal(a.ravel('K'), np.arange(0, 15, 2))
# contiguous and 1-sized axis with non matching stride works:
a = np.arange(2**3)
a = a.reshape(2, 1, 2, 2).swapaxes(-1, -2)
strides = list(a.strides)
strides[1] = 123
a.strides = strides
assert_(np.may_share_memory(a.ravel(order='K'), a))
assert_equal(a.ravel(order='K'), np.arange(2**3))
# Test negative strides (not very interesting since non-contiguous):
a = np.arange(4)[::-1].reshape(2, 2)
assert_(a.ravel(order='C').flags.owndata)
assert_(a.ravel(order='K').flags.owndata)
assert_equal(a.ravel('C'), [3, 2, 1, 0])
assert_equal(a.ravel('K'), [3, 2, 1, 0])
# 1-element tidy strides test (NPY_RELAXED_STRIDES_CHECKING):
a = np.array([[1]])
a.strides = (123, 432)
# If the stride is not 8, NPY_RELAXED_STRIDES_CHECKING is messing
# them up on purpose:
if np.ones(1).strides == (8,):
assert_(np.may_share_memory(a.ravel('K'), a))
assert_equal(a.ravel('K').strides, (a.dtype.itemsize,))
for order in ('C', 'F', 'A', 'K'):
# 0-d corner case:
a = np.array(0)
assert_equal(a.ravel(order), [0])
assert_(np.may_share_memory(a.ravel(order), a))
# Test that certain non-inplace ravels work right (mostly) for 'K':
b = np.arange(2**4 * 2)[::2].reshape(2, 2, 2, 2)
a = b[..., ::2]
assert_equal(a.ravel('K'), [0, 4, 8, 12, 16, 20, 24, 28])
assert_equal(a.ravel('C'), [0, 4, 8, 12, 16, 20, 24, 28])
assert_equal(a.ravel('A'), [0, 4, 8, 12, 16, 20, 24, 28])
assert_equal(a.ravel('F'), [0, 16, 8, 24, 4, 20, 12, 28])
a = b[::2, ...]
assert_equal(a.ravel('K'), [0, 2, 4, 6, 8, 10, 12, 14])
assert_equal(a.ravel('C'), [0, 2, 4, 6, 8, 10, 12, 14])
assert_equal(a.ravel('A'), [0, 2, 4, 6, 8, 10, 12, 14])
assert_equal(a.ravel('F'), [0, 8, 4, 12, 2, 10, 6, 14])
def test_ravel_subclass(self):
class ArraySubclass(np.ndarray):
pass
a = np.arange(10).view(ArraySubclass)
assert_(isinstance(a.ravel('C'), ArraySubclass))
assert_(isinstance(a.ravel('F'), ArraySubclass))
assert_(isinstance(a.ravel('A'), ArraySubclass))
assert_(isinstance(a.ravel('K'), ArraySubclass))
a = np.arange(10)[::2].view(ArraySubclass)
assert_(isinstance(a.ravel('C'), ArraySubclass))
assert_(isinstance(a.ravel('F'), ArraySubclass))
assert_(isinstance(a.ravel('A'), ArraySubclass))
assert_(isinstance(a.ravel('K'), ArraySubclass))
def test_swapaxes(self):
a = np.arange(1*2*3*4).reshape(1, 2, 3, 4).copy()
idx = np.indices(a.shape)
assert_(a.flags['OWNDATA'])
b = a.copy()
# check exceptions
assert_raises(np.AxisError, a.swapaxes, -5, 0)
assert_raises(np.AxisError, a.swapaxes, 4, 0)
assert_raises(np.AxisError, a.swapaxes, 0, -5)
assert_raises(np.AxisError, a.swapaxes, 0, 4)
for i in range(-4, 4):
for j in range(-4, 4):
for k, src in enumerate((a, b)):
c = src.swapaxes(i, j)
# check shape
shape = list(src.shape)
shape[i] = src.shape[j]
shape[j] = src.shape[i]
assert_equal(c.shape, shape, str((i, j, k)))
# check array contents
i0, i1, i2, i3 = [dim-1 for dim in c.shape]
j0, j1, j2, j3 = [dim-1 for dim in src.shape]
assert_equal(src[idx[j0], idx[j1], idx[j2], idx[j3]],
c[idx[i0], idx[i1], idx[i2], idx[i3]],
str((i, j, k)))
# check a view is always returned, gh-5260
assert_(not c.flags['OWNDATA'], str((i, j, k)))
# check on non-contiguous input array
if k == 1:
b = c
def test_conjugate(self):
a = np.array([1-1j, 1+1j, 23+23.0j])
ac = a.conj()
assert_equal(a.real, ac.real)
assert_equal(a.imag, -ac.imag)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1-1j, 1+1j, 23+23.0j], 'F')
ac = a.conj()
assert_equal(a.real, ac.real)
assert_equal(a.imag, -ac.imag)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1, 2, 3])
ac = a.conj()
assert_equal(a, ac)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1.0, 2.0, 3.0])
ac = a.conj()
assert_equal(a, ac)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1-1j, 1+1j, 1, 2.0], object)
ac = a.conj()
assert_equal(ac, [k.conjugate() for k in a])
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1-1j, 1, 2.0, 'f'], object)
assert_raises(AttributeError, lambda: a.conj())
assert_raises(AttributeError, lambda: a.conjugate())
def test__complex__(self):
dtypes = ['i1', 'i2', 'i4', 'i8',
'u1', 'u2', 'u4', 'u8',
'f', 'd', 'g', 'F', 'D', 'G',
'?', 'O']
for dt in dtypes:
a = np.array(7, dtype=dt)
b = np.array([7], dtype=dt)
c = np.array([[[[[7]]]]], dtype=dt)
msg = 'dtype: {0}'.format(dt)
ap = complex(a)
assert_equal(ap, a, msg)
bp = complex(b)
assert_equal(bp, b, msg)
cp = complex(c)
assert_equal(cp, c, msg)
def test__complex__should_not_work(self):
dtypes = ['i1', 'i2', 'i4', 'i8',
'u1', 'u2', 'u4', 'u8',
'f', 'd', 'g', 'F', 'D', 'G',
'?', 'O']
for dt in dtypes:
a = np.array([1, 2, 3], dtype=dt)
assert_raises(TypeError, complex, a)
dt = np.dtype([('a', 'f8'), ('b', 'i1')])
b = np.array((1.0, 3), dtype=dt)
assert_raises(TypeError, complex, b)
c = np.array([(1.0, 3), (2e-3, 7)], dtype=dt)
assert_raises(TypeError, complex, c)
d = np.array('1+1j')
assert_raises(TypeError, complex, d)
e = np.array(['1+1j'], 'U')
assert_raises(TypeError, complex, e)
class TestCequenceMethods(object):
def test_array_contains(self):
assert_(4.0 in np.arange(16.).reshape(4,4))
assert_(20.0 not in np.arange(16.).reshape(4,4))
class TestBinop(object):
def test_inplace(self):
# test refcount 1 inplace conversion
assert_array_almost_equal(np.array([0.5]) * np.array([1.0, 2.0]),
[0.5, 1.0])
d = np.array([0.5, 0.5])[::2]
assert_array_almost_equal(d * (d * np.array([1.0, 2.0])),
[0.25, 0.5])
a = np.array([0.5])
b = np.array([0.5])
c = a + b
c = a - b
c = a * b
c = a / b
assert_equal(a, b)
assert_almost_equal(c, 1.)
c = a + b * 2. / b * a - a / b
assert_equal(a, b)
assert_equal(c, 0.5)
# true divide
a = np.array([5])
b = np.array([3])
c = (a * a) / b
assert_almost_equal(c, 25 / 3)
assert_equal(a, 5)
assert_equal(b, 3)
# ndarray.__rop__ always calls ufunc
# ndarray.__iop__ always calls ufunc
# ndarray.__op__, __rop__:
# - defer if other has __array_ufunc__ and it is None
# or other is not a subclass and has higher array priority
# - else, call ufunc
def test_ufunc_binop_interaction(self):
# Python method name (without underscores)
# -> (numpy ufunc, has_in_place_version, preferred_dtype)
ops = {
'add': (np.add, True, float),
'sub': (np.subtract, True, float),
'mul': (np.multiply, True, float),
'truediv': (np.true_divide, True, float),
'floordiv': (np.floor_divide, True, float),
'mod': (np.remainder, True, float),
'divmod': (np.divmod, False, float),
'pow': (np.power, True, int),
'lshift': (np.left_shift, True, int),
'rshift': (np.right_shift, True, int),
'and': (np.bitwise_and, True, int),
'xor': (np.bitwise_xor, True, int),
'or': (np.bitwise_or, True, int),
# 'ge': (np.less_equal, False),
# 'gt': (np.less, False),
# 'le': (np.greater_equal, False),
# 'lt': (np.greater, False),
# 'eq': (np.equal, False),
# 'ne': (np.not_equal, False),
}
if sys.version_info >= (3, 5):
ops['matmul'] = (np.matmul, False, float)
class Coerced(Exception):
pass
def array_impl(self):
raise Coerced
def op_impl(self, other):
return "forward"
def rop_impl(self, other):
return "reverse"
def iop_impl(self, other):
return "in-place"
def array_ufunc_impl(self, ufunc, method, *args, **kwargs):
return ("__array_ufunc__", ufunc, method, args, kwargs)
# Create an object with the given base, in the given module, with a
# bunch of placeholder __op__ methods, and optionally a
# __array_ufunc__ and __array_priority__.
def make_obj(base, array_priority=False, array_ufunc=False,
alleged_module="__main__"):
class_namespace = {"__array__": array_impl}
if array_priority is not False:
class_namespace["__array_priority__"] = array_priority
for op in ops:
class_namespace["__{0}__".format(op)] = op_impl
class_namespace["__r{0}__".format(op)] = rop_impl
class_namespace["__i{0}__".format(op)] = iop_impl
if array_ufunc is not False:
class_namespace["__array_ufunc__"] = array_ufunc
eval_namespace = {"base": base,
"class_namespace": class_namespace,
"__name__": alleged_module,
}
MyType = eval("type('MyType', (base,), class_namespace)",
eval_namespace)
if issubclass(MyType, np.ndarray):
# Use this range to avoid special case weirdnesses around
# divide-by-0, pow(x, 2), overflow due to pow(big, big), etc.
return np.arange(3, 7).reshape(2, 2).view(MyType)
else:
return MyType()
def check(obj, binop_override_expected, ufunc_override_expected,
inplace_override_expected, check_scalar=True):
for op, (ufunc, has_inplace, dtype) in ops.items():
err_msg = ('op: %s, ufunc: %s, has_inplace: %s, dtype: %s'
% (op, ufunc, has_inplace, dtype))
check_objs = [np.arange(3, 7, dtype=dtype).reshape(2, 2)]
if check_scalar:
check_objs.append(check_objs[0][0])
for arr in check_objs:
arr_method = getattr(arr, "__{0}__".format(op))
def first_out_arg(result):
if op == "divmod":
assert_(isinstance(result, tuple))
return result[0]
else:
return result
# arr __op__ obj
if binop_override_expected:
assert_equal(arr_method(obj), NotImplemented, err_msg)
elif ufunc_override_expected:
assert_equal(arr_method(obj)[0], "__array_ufunc__",
err_msg)
else:
if (isinstance(obj, np.ndarray) and
(type(obj).__array_ufunc__ is
np.ndarray.__array_ufunc__)):
# __array__ gets ignored
res = first_out_arg(arr_method(obj))
assert_(res.__class__ is obj.__class__, err_msg)
else:
assert_raises((TypeError, Coerced),
arr_method, obj, err_msg=err_msg)
# obj __op__ arr
arr_rmethod = getattr(arr, "__r{0}__".format(op))
if ufunc_override_expected:
res = arr_rmethod(obj)
assert_equal(res[0], "__array_ufunc__",
err_msg=err_msg)
assert_equal(res[1], ufunc, err_msg=err_msg)
else:
if (isinstance(obj, np.ndarray) and
(type(obj).__array_ufunc__ is
np.ndarray.__array_ufunc__)):
# __array__ gets ignored
res = first_out_arg(arr_rmethod(obj))
assert_(res.__class__ is obj.__class__, err_msg)
else:
# __array_ufunc__ = "asdf" creates a TypeError
assert_raises((TypeError, Coerced),
arr_rmethod, obj, err_msg=err_msg)
# arr __iop__ obj
# array scalars don't have in-place operators
if has_inplace and isinstance(arr, np.ndarray):
arr_imethod = getattr(arr, "__i{0}__".format(op))
if inplace_override_expected:
assert_equal(arr_method(obj), NotImplemented,
err_msg=err_msg)
elif ufunc_override_expected:
res = arr_imethod(obj)
assert_equal(res[0], "__array_ufunc__", err_msg)
assert_equal(res[1], ufunc, err_msg)
assert_(type(res[-1]["out"]) is tuple, err_msg)
assert_(res[-1]["out"][0] is arr, err_msg)
else:
if (isinstance(obj, np.ndarray) and
(type(obj).__array_ufunc__ is
np.ndarray.__array_ufunc__)):
# __array__ gets ignored
assert_(arr_imethod(obj) is arr, err_msg)
else:
assert_raises((TypeError, Coerced),
arr_imethod, obj,
err_msg=err_msg)
op_fn = getattr(operator, op, None)
if op_fn is None:
op_fn = getattr(operator, op + "_", None)
if op_fn is None:
op_fn = getattr(builtins, op)
assert_equal(op_fn(obj, arr), "forward", err_msg)
if not isinstance(obj, np.ndarray):
if binop_override_expected:
assert_equal(op_fn(arr, obj), "reverse", err_msg)
elif ufunc_override_expected:
assert_equal(op_fn(arr, obj)[0], "__array_ufunc__",
err_msg)
if ufunc_override_expected:
assert_equal(ufunc(obj, arr)[0], "__array_ufunc__",
err_msg)
# No array priority, no array_ufunc -> nothing called
check(make_obj(object), False, False, False)
# Negative array priority, no array_ufunc -> nothing called
# (has to be very negative, because scalar priority is -1000000.0)
check(make_obj(object, array_priority=-2**30), False, False, False)
# Positive array priority, no array_ufunc -> binops and iops only
check(make_obj(object, array_priority=1), True, False, True)
# ndarray ignores array_priority for ndarray subclasses
check(make_obj(np.ndarray, array_priority=1), False, False, False,
check_scalar=False)
# Positive array_priority and array_ufunc -> array_ufunc only
check(make_obj(object, array_priority=1,
array_ufunc=array_ufunc_impl), False, True, False)
check(make_obj(np.ndarray, array_priority=1,
array_ufunc=array_ufunc_impl), False, True, False)
# array_ufunc set to None -> defer binops only
check(make_obj(object, array_ufunc=None), True, False, False)
check(make_obj(np.ndarray, array_ufunc=None), True, False, False,
check_scalar=False)
def test_ufunc_override_normalize_signature(self):
# gh-5674
class SomeClass(object):
def __array_ufunc__(self, ufunc, method, *inputs, **kw):
return kw
a = SomeClass()
kw = np.add(a, [1])
assert_('sig' not in kw and 'signature' not in kw)
kw = np.add(a, [1], sig='ii->i')
assert_('sig' not in kw and 'signature' in kw)
assert_equal(kw['signature'], 'ii->i')
kw = np.add(a, [1], signature='ii->i')
assert_('sig' not in kw and 'signature' in kw)
assert_equal(kw['signature'], 'ii->i')
def test_array_ufunc_index(self):
# Check that index is set appropriately, also if only an output
# is passed on (latter is another regression tests for github bug 4753)
# This also checks implicitly that 'out' is always a tuple.
class CheckIndex(object):
def __array_ufunc__(self, ufunc, method, *inputs, **kw):
for i, a in enumerate(inputs):
if a is self:
return i
# calls below mean we must be in an output.
for j, a in enumerate(kw['out']):
if a is self:
return (j,)
a = CheckIndex()
dummy = np.arange(2.)
# 1 input, 1 output
assert_equal(np.sin(a), 0)
assert_equal(np.sin(dummy, a), (0,))
assert_equal(np.sin(dummy, out=a), (0,))
assert_equal(np.sin(dummy, out=(a,)), (0,))
assert_equal(np.sin(a, a), 0)
assert_equal(np.sin(a, out=a), 0)
assert_equal(np.sin(a, out=(a,)), 0)
# 1 input, 2 outputs
assert_equal(np.modf(dummy, a), (0,))
assert_equal(np.modf(dummy, None, a), (1,))
assert_equal(np.modf(dummy, dummy, a), (1,))
assert_equal(np.modf(dummy, out=(a, None)), (0,))
assert_equal(np.modf(dummy, out=(a, dummy)), (0,))
assert_equal(np.modf(dummy, out=(None, a)), (1,))
assert_equal(np.modf(dummy, out=(dummy, a)), (1,))
assert_equal(np.modf(a, out=(dummy, a)), 0)
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', DeprecationWarning)
assert_equal(np.modf(dummy, out=a), (0,))
assert_(w[0].category is DeprecationWarning)
assert_raises(ValueError, np.modf, dummy, out=(a,))
# 2 inputs, 1 output
assert_equal(np.add(a, dummy), 0)
assert_equal(np.add(dummy, a), 1)
assert_equal(np.add(dummy, dummy, a), (0,))
assert_equal(np.add(dummy, a, a), 1)
assert_equal(np.add(dummy, dummy, out=a), (0,))
assert_equal(np.add(dummy, dummy, out=(a,)), (0,))
assert_equal(np.add(a, dummy, out=a), 0)
def test_out_override(self):
# regression test for github bug 4753
class OutClass(np.ndarray):
def __array_ufunc__(self, ufunc, method, *inputs, **kw):
if 'out' in kw:
tmp_kw = kw.copy()
tmp_kw.pop('out')
func = getattr(ufunc, method)
kw['out'][0][...] = func(*inputs, **tmp_kw)
A = np.array([0]).view(OutClass)
B = np.array([5])
C = np.array([6])
np.multiply(C, B, A)
assert_equal(A[0], 30)
assert_(isinstance(A, OutClass))
A[0] = 0
np.multiply(C, B, out=A)
assert_equal(A[0], 30)
assert_(isinstance(A, OutClass))
def test_pow_override_with_errors(self):
# regression test for gh-9112
class PowerOnly(np.ndarray):
def __array_ufunc__(self, ufunc, method, *inputs, **kw):
if ufunc is not np.power:
raise NotImplementedError
return "POWER!"
# explicit cast to float, to ensure the fast power path is taken.
a = np.array(5., dtype=np.float64).view(PowerOnly)
assert_equal(a ** 2.5, "POWER!")
with assert_raises(NotImplementedError):
a ** 0.5
with assert_raises(NotImplementedError):
a ** 0
with assert_raises(NotImplementedError):
a ** 1
with assert_raises(NotImplementedError):
a ** -1
with assert_raises(NotImplementedError):
a ** 2
def test_pow_array_object_dtype(self):
# test pow on arrays of object dtype
class SomeClass(object):
def __init__(self, num=None):
self.num = num
# want to ensure a fast pow path is not taken
def __mul__(self, other):
raise AssertionError('__mul__ should not be called')
def __div__(self, other):
raise AssertionError('__div__ should not be called')
def __pow__(self, exp):
return SomeClass(num=self.num ** exp)
def __eq__(self, other):
if isinstance(other, SomeClass):
return self.num == other.num
__rpow__ = __pow__
def pow_for(exp, arr):
return np.array([x ** exp for x in arr])
obj_arr = np.array([SomeClass(1), SomeClass(2), SomeClass(3)])
assert_equal(obj_arr ** 0.5, pow_for(0.5, obj_arr))
assert_equal(obj_arr ** 0, pow_for(0, obj_arr))
assert_equal(obj_arr ** 1, pow_for(1, obj_arr))
assert_equal(obj_arr ** -1, pow_for(-1, obj_arr))
assert_equal(obj_arr ** 2, pow_for(2, obj_arr))
def test_pos_array_ufunc_override(self):
class A(np.ndarray):
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
return getattr(ufunc, method)(*[i.view(np.ndarray) for
i in inputs], **kwargs)
tst = np.array('foo').view(A)
with assert_raises(TypeError):
+tst
class TestTemporaryElide(object):
# elision is only triggered on relatively large arrays
def test_extension_incref_elide(self):
# test extension (e.g. cython) calling PyNumber_* slots without
# increasing the reference counts
#
# def incref_elide(a):
# d = input.copy() # refcount 1
# return d, d + d # PyNumber_Add without increasing refcount
from numpy.core._multiarray_tests import incref_elide
d = np.ones(100000)
orig, res = incref_elide(d)
d + d
# the return original should not be changed to an inplace operation
assert_array_equal(orig, d)
assert_array_equal(res, d + d)
def test_extension_incref_elide_stack(self):
# scanning if the refcount == 1 object is on the python stack to check
# that we are called directly from python is flawed as object may still
# be above the stack pointer and we have no access to the top of it
#
# def incref_elide_l(d):
# return l[4] + l[4] # PyNumber_Add without increasing refcount
from numpy.core._multiarray_tests import incref_elide_l
# padding with 1 makes sure the object on the stack is not overwritten
l = [1, 1, 1, 1, np.ones(100000)]
res = incref_elide_l(l)
# the return original should not be changed to an inplace operation
assert_array_equal(l[4], np.ones(100000))
assert_array_equal(res, l[4] + l[4])
def test_temporary_with_cast(self):
# check that we don't elide into a temporary which would need casting
d = np.ones(200000, dtype=np.int64)
assert_equal(((d + d) + 2**222).dtype, np.dtype('O'))
r = ((d + d) / 2)
assert_equal(r.dtype, np.dtype('f8'))
r = np.true_divide((d + d), 2)
assert_equal(r.dtype, np.dtype('f8'))
r = ((d + d) / 2.)
assert_equal(r.dtype, np.dtype('f8'))
r = ((d + d) // 2)
assert_equal(r.dtype, np.dtype(np.int64))
# commutative elision into the astype result
f = np.ones(100000, dtype=np.float32)
assert_equal(((f + f) + f.astype(np.float64)).dtype, np.dtype('f8'))
# no elision into lower type
d = f.astype(np.float64)
assert_equal(((f + f) + d).dtype, d.dtype)
l = np.ones(100000, dtype=np.longdouble)
assert_equal(((d + d) + l).dtype, l.dtype)
# test unary abs with different output dtype
for dt in (np.complex64, np.complex128, np.clongdouble):
c = np.ones(100000, dtype=dt)
r = abs(c * 2.0)
assert_equal(r.dtype, np.dtype('f%d' % (c.itemsize // 2)))
def test_elide_broadcast(self):
# test no elision on broadcast to higher dimension
# only triggers elision code path in debug mode as triggering it in
# normal mode needs 256kb large matching dimension, so a lot of memory
d = np.ones((2000, 1), dtype=int)
b = np.ones((2000), dtype=bool)
r = (1 - d) + b
assert_equal(r, 1)
assert_equal(r.shape, (2000, 2000))
def test_elide_scalar(self):
# check inplace op does not create ndarray from scalars
a = np.bool_()
assert_(type(~(a & a)) is np.bool_)
def test_elide_scalar_readonly(self):
# The imaginary part of a real array is readonly. This needs to go
# through fast_scalar_power which is only called for powers of
# +1, -1, 0, 0.5, and 2, so use 2. Also need valid refcount for
# elision which can be gotten for the imaginary part of a real
# array. Should not error.
a = np.empty(100000, dtype=np.float64)
a.imag ** 2
def test_elide_readonly(self):
# don't try to elide readonly temporaries
r = np.asarray(np.broadcast_to(np.zeros(1), 100000).flat) * 0.0
assert_equal(r, 0)
def test_elide_updateifcopy(self):
a = np.ones(2**20)[::2]
b = a.flat.__array__() + 1
del b
assert_equal(a, 1)
class TestCAPI(object):
def test_IsPythonScalar(self):
from numpy.core._multiarray_tests import IsPythonScalar
assert_(IsPythonScalar(b'foobar'))
assert_(IsPythonScalar(1))
assert_(IsPythonScalar(2**80))
assert_(IsPythonScalar(2.))
assert_(IsPythonScalar("a"))
class TestSubscripting(object):
def test_test_zero_rank(self):
x = np.array([1, 2, 3])
assert_(isinstance(x[0], np.int_))
if sys.version_info[0] < 3:
assert_(isinstance(x[0], int))
assert_(type(x[0, ...]) is np.ndarray)
class TestPickling(object):
def test_highest_available_pickle_protocol(self):
try:
import pickle5
except ImportError:
pickle5 = None
if sys.version_info[:2] >= (3, 8) or pickle5 is not None:
assert pickle.HIGHEST_PROTOCOL >= 5
else:
assert pickle.HIGHEST_PROTOCOL < 5
@pytest.mark.skipif(pickle.HIGHEST_PROTOCOL >= 5,
reason=('this tests the error messages when trying to'
'protocol 5 although it is not available'))
def test_correct_protocol5_error_message(self):
array = np.arange(10)
if sys.version_info[:2] in ((3, 6), (3, 7)):
# For the specific case of python3.6 and 3.7, raise a clear import
# error about the pickle5 backport when trying to use protocol=5
# without the pickle5 package
with pytest.raises(ImportError):
array.__reduce_ex__(5)
elif sys.version_info[:2] < (3, 6):
# when calling __reduce_ex__ explicitly with protocol=5 on python
# raise a ValueError saying that protocol 5 is not available for
# this python version
with pytest.raises(ValueError):
array.__reduce_ex__(5)
def test_record_array_with_object_dtype(self):
my_object = object()
arr_with_object = np.array(
[(my_object, 1, 2.0)],
dtype=[('a', object), ('b', int), ('c', float)])
arr_without_object = np.array(
[('xxx', 1, 2.0)],
dtype=[('a', str), ('b', int), ('c', float)])
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
depickled_arr_with_object = pickle.loads(
pickle.dumps(arr_with_object, protocol=proto))
depickled_arr_without_object = pickle.loads(
pickle.dumps(arr_without_object, protocol=proto))
assert_equal(arr_with_object.dtype,
depickled_arr_with_object.dtype)
assert_equal(arr_without_object.dtype,
depickled_arr_without_object.dtype)
@pytest.mark.skipif(pickle.HIGHEST_PROTOCOL < 5,
reason="requires pickle protocol 5")
def test_f_contiguous_array(self):
f_contiguous_array = np.array([[1, 2, 3], [4, 5, 6]], order='F')
buffers = []
# When using pickle protocol 5, Fortran-contiguous arrays can be
# serialized using out-of-band buffers
bytes_string = pickle.dumps(f_contiguous_array, protocol=5,
buffer_callback=buffers.append)
assert len(buffers) > 0
depickled_f_contiguous_array = pickle.loads(bytes_string,
buffers=buffers)
assert_equal(f_contiguous_array, depickled_f_contiguous_array)
def test_non_contiguous_array(self):
non_contiguous_array = np.arange(12).reshape(3, 4)[:, :2]
assert not non_contiguous_array.flags.c_contiguous
assert not non_contiguous_array.flags.f_contiguous
# make sure non-contiguous arrays can be pickled-depickled
# using any protocol
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
depickled_non_contiguous_array = pickle.loads(
pickle.dumps(non_contiguous_array, protocol=proto))
assert_equal(non_contiguous_array, depickled_non_contiguous_array)
def test_roundtrip(self):
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
carray = np.array([[2, 9], [7, 0], [3, 8]])
DATA = [
carray,
np.transpose(carray),
np.array([('xxx', 1, 2.0)], dtype=[('a', (str, 3)), ('b', int),
('c', float)])
]
refs = [weakref.ref(a) for a in DATA]
for a in DATA:
assert_equal(
a, pickle.loads(pickle.dumps(a, protocol=proto)),
err_msg="%r" % a)
del a, DATA, carray
gc.collect()
# check for reference leaks (gh-12793)
for ref in refs:
assert ref() is None
def _loads(self, obj):
if sys.version_info[0] >= 3:
return pickle.loads(obj, encoding='latin1')
else:
return pickle.loads(obj)
# version 0 pickles, using protocol=2 to pickle
# version 0 doesn't have a version field
def test_version0_int8(self):
s = b'\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02i1K\x00K\x01\x87Rq\x05(U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x04\x01\x02\x03\x04tb.'
a = np.array([1, 2, 3, 4], dtype=np.int8)
p = self._loads(s)
assert_equal(a, p)
def test_version0_float32(self):
s = b'\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02f4K\x00K\x01\x87Rq\x05(U\x01<NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x10\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@tb.'
a = np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32)
p = self._loads(s)
assert_equal(a, p)
def test_version0_object(self):
s = b'\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x02\x85cnumpy\ndtype\nq\x04U\x02O8K\x00K\x01\x87Rq\x05(U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89]q\x06(}q\x07U\x01aK\x01s}q\x08U\x01bK\x02setb.'
a = np.array([{'a': 1}, {'b': 2}])
p = self._loads(s)
assert_equal(a, p)
# version 1 pickles, using protocol=2 to pickle
def test_version1_int8(self):
s = b'\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x04\x85cnumpy\ndtype\nq\x04U\x02i1K\x00K\x01\x87Rq\x05(K\x01U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x04\x01\x02\x03\x04tb.'
a = np.array([1, 2, 3, 4], dtype=np.int8)
p = self._loads(s)
assert_equal(a, p)
def test_version1_float32(self):
s = b'\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x04\x85cnumpy\ndtype\nq\x04U\x02f4K\x00K\x01\x87Rq\x05(K\x01U\x01<NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x10\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@tb.'
a = np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32)
p = self._loads(s)
assert_equal(a, p)
def test_version1_object(self):
s = b'\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x02\x85cnumpy\ndtype\nq\x04U\x02O8K\x00K\x01\x87Rq\x05(K\x01U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89]q\x06(}q\x07U\x01aK\x01s}q\x08U\x01bK\x02setb.'
a = np.array([{'a': 1}, {'b': 2}])
p = self._loads(s)
assert_equal(a, p)
def test_subarray_int_shape(self):
s = b"cnumpy.core.multiarray\n_reconstruct\np0\n(cnumpy\nndarray\np1\n(I0\ntp2\nS'b'\np3\ntp4\nRp5\n(I1\n(I1\ntp6\ncnumpy\ndtype\np7\n(S'V6'\np8\nI0\nI1\ntp9\nRp10\n(I3\nS'|'\np11\nN(S'a'\np12\ng3\ntp13\n(dp14\ng12\n(g7\n(S'V4'\np15\nI0\nI1\ntp16\nRp17\n(I3\nS'|'\np18\n(g7\n(S'i1'\np19\nI0\nI1\ntp20\nRp21\n(I3\nS'|'\np22\nNNNI-1\nI-1\nI0\ntp23\nb(I2\nI2\ntp24\ntp25\nNNI4\nI1\nI0\ntp26\nbI0\ntp27\nsg3\n(g7\n(S'V2'\np28\nI0\nI1\ntp29\nRp30\n(I3\nS'|'\np31\n(g21\nI2\ntp32\nNNI2\nI1\nI0\ntp33\nbI4\ntp34\nsI6\nI1\nI0\ntp35\nbI00\nS'\\x01\\x01\\x01\\x01\\x01\\x02'\np36\ntp37\nb."
a = np.array([(1, (1, 2))], dtype=[('a', 'i1', (2, 2)), ('b', 'i1', 2)])
p = self._loads(s)
assert_equal(a, p)
class TestFancyIndexing(object):
def test_list(self):
x = np.ones((1, 1))
x[:, [0]] = 2.0
assert_array_equal(x, np.array([[2.0]]))
x = np.ones((1, 1, 1))
x[:, :, [0]] = 2.0
assert_array_equal(x, np.array([[[2.0]]]))
def test_tuple(self):
x = np.ones((1, 1))
x[:, (0,)] = 2.0
assert_array_equal(x, np.array([[2.0]]))
x = np.ones((1, 1, 1))
x[:, :, (0,)] = 2.0
assert_array_equal(x, np.array([[[2.0]]]))
def test_mask(self):
x = np.array([1, 2, 3, 4])
m = np.array([0, 1, 0, 0], bool)
assert_array_equal(x[m], np.array([2]))
def test_mask2(self):
x = np.array([[1, 2, 3, 4], [5, 6, 7, 8]])
m = np.array([0, 1], bool)
m2 = np.array([[0, 1, 0, 0], [1, 0, 0, 0]], bool)
m3 = np.array([[0, 1, 0, 0], [0, 0, 0, 0]], bool)
assert_array_equal(x[m], np.array([[5, 6, 7, 8]]))
assert_array_equal(x[m2], np.array([2, 5]))
assert_array_equal(x[m3], np.array([2]))
def test_assign_mask(self):
x = np.array([1, 2, 3, 4])
m = np.array([0, 1, 0, 0], bool)
x[m] = 5
assert_array_equal(x, np.array([1, 5, 3, 4]))
def test_assign_mask2(self):
xorig = np.array([[1, 2, 3, 4], [5, 6, 7, 8]])
m = np.array([0, 1], bool)
m2 = np.array([[0, 1, 0, 0], [1, 0, 0, 0]], bool)
m3 = np.array([[0, 1, 0, 0], [0, 0, 0, 0]], bool)
x = xorig.copy()
x[m] = 10
assert_array_equal(x, np.array([[1, 2, 3, 4], [10, 10, 10, 10]]))
x = xorig.copy()
x[m2] = 10
assert_array_equal(x, np.array([[1, 10, 3, 4], [10, 6, 7, 8]]))
x = xorig.copy()
x[m3] = 10
assert_array_equal(x, np.array([[1, 10, 3, 4], [5, 6, 7, 8]]))
class TestStringCompare(object):
def test_string(self):
g1 = np.array(["This", "is", "example"])
g2 = np.array(["This", "was", "example"])
assert_array_equal(g1 == g2, [g1[i] == g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 != g2, [g1[i] != g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 <= g2, [g1[i] <= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 >= g2, [g1[i] >= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 < g2, [g1[i] < g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0, 1, 2]])
def test_mixed(self):
g1 = np.array(["spam", "spa", "spammer", "and eggs"])
g2 = "spam"
assert_array_equal(g1 == g2, [x == g2 for x in g1])
assert_array_equal(g1 != g2, [x != g2 for x in g1])
assert_array_equal(g1 < g2, [x < g2 for x in g1])
assert_array_equal(g1 > g2, [x > g2 for x in g1])
assert_array_equal(g1 <= g2, [x <= g2 for x in g1])
assert_array_equal(g1 >= g2, [x >= g2 for x in g1])
def test_unicode(self):
g1 = np.array([u"This", u"is", u"example"])
g2 = np.array([u"This", u"was", u"example"])
assert_array_equal(g1 == g2, [g1[i] == g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 != g2, [g1[i] != g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 <= g2, [g1[i] <= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 >= g2, [g1[i] >= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 < g2, [g1[i] < g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0, 1, 2]])
class TestArgmax(object):
nan_arr = [
([0, 1, 2, 3, np.nan], 4),
([0, 1, 2, np.nan, 3], 3),
([np.nan, 0, 1, 2, 3], 0),
([np.nan, 0, np.nan, 2, 3], 0),
([0, 1, 2, 3, complex(0, np.nan)], 4),
([0, 1, 2, 3, complex(np.nan, 0)], 4),
([0, 1, 2, complex(np.nan, 0), 3], 3),
([0, 1, 2, complex(0, np.nan), 3], 3),
([complex(0, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, np.nan), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, np.nan)], 0),
([complex(0, 0), complex(0, 2), complex(0, 1)], 1),
([complex(1, 0), complex(0, 2), complex(0, 1)], 0),
([complex(1, 0), complex(0, 2), complex(1, 1)], 2),
([np.datetime64('1923-04-14T12:43:12'),
np.datetime64('1994-06-21T14:43:15'),
np.datetime64('2001-10-15T04:10:32'),
np.datetime64('1995-11-25T16:02:16'),
np.datetime64('2005-01-04T03:14:12'),
np.datetime64('2041-12-03T14:05:03')], 5),
([np.datetime64('1935-09-14T04:40:11'),
np.datetime64('1949-10-12T12:32:11'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('2015-11-20T12:20:59'),
np.datetime64('1932-09-23T10:10:13'),
np.datetime64('2014-10-10T03:50:30')], 3),
# Assorted tests with NaTs
([np.datetime64('NaT'),
np.datetime64('NaT'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('NaT'),
np.datetime64('2015-09-23T10:10:13'),
np.datetime64('1932-10-10T03:50:30')], 4),
([np.datetime64('2059-03-14T12:43:12'),
np.datetime64('1996-09-21T14:43:15'),
np.datetime64('NaT'),
np.datetime64('2022-12-25T16:02:16'),
np.datetime64('1963-10-04T03:14:12'),
np.datetime64('2013-05-08T18:15:23')], 0),
([np.timedelta64(2, 's'),
np.timedelta64(1, 's'),
np.timedelta64('NaT', 's'),
np.timedelta64(3, 's')], 3),
([np.timedelta64('NaT', 's')] * 3, 0),
([timedelta(days=5, seconds=14), timedelta(days=2, seconds=35),
timedelta(days=-1, seconds=23)], 0),
([timedelta(days=1, seconds=43), timedelta(days=10, seconds=5),
timedelta(days=5, seconds=14)], 1),
([timedelta(days=10, seconds=24), timedelta(days=10, seconds=5),
timedelta(days=10, seconds=43)], 2),
([False, False, False, False, True], 4),
([False, False, False, True, False], 3),
([True, False, False, False, False], 0),
([True, False, True, False, False], 0),
]
def test_all(self):
a = np.random.normal(0, 1, (4, 5, 6, 7, 8))
for i in range(a.ndim):
amax = a.max(i)
aargmax = a.argmax(i)
axes = list(range(a.ndim))
axes.remove(i)
assert_(np.all(amax == aargmax.choose(*a.transpose(i,*axes))))
def test_combinations(self):
for arr, pos in self.nan_arr:
with suppress_warnings() as sup:
sup.filter(RuntimeWarning,
"invalid value encountered in reduce")
max_val = np.max(arr)
assert_equal(np.argmax(arr), pos, err_msg="%r" % arr)
assert_equal(arr[np.argmax(arr)], max_val, err_msg="%r" % arr)
def test_output_shape(self):
# see also gh-616
a = np.ones((10, 5))
# Check some simple shape mismatches
out = np.ones(11, dtype=np.int_)
assert_raises(ValueError, a.argmax, -1, out)
out = np.ones((2, 5), dtype=np.int_)
assert_raises(ValueError, a.argmax, -1, out)
# these could be relaxed possibly (used to allow even the previous)
out = np.ones((1, 10), dtype=np.int_)
assert_raises(ValueError, a.argmax, -1, out)
out = np.ones(10, dtype=np.int_)
a.argmax(-1, out=out)
assert_equal(out, a.argmax(-1))
def test_argmax_unicode(self):
d = np.zeros(6031, dtype='<U9')
d[5942] = "as"
assert_equal(d.argmax(), 5942)
def test_np_vs_ndarray(self):
# make sure both ndarray.argmax and numpy.argmax support out/axis args
a = np.random.normal(size=(2,3))
# check positional args
out1 = np.zeros(2, dtype=int)
out2 = np.zeros(2, dtype=int)
assert_equal(a.argmax(1, out1), np.argmax(a, 1, out2))
assert_equal(out1, out2)
# check keyword args
out1 = np.zeros(3, dtype=int)
out2 = np.zeros(3, dtype=int)
assert_equal(a.argmax(out=out1, axis=0), np.argmax(a, out=out2, axis=0))
assert_equal(out1, out2)
def test_object_argmax_with_NULLs(self):
# See gh-6032
a = np.empty(4, dtype='O')
ctypes.memset(a.ctypes.data, 0, a.nbytes)
assert_equal(a.argmax(), 0)
a[3] = 10
assert_equal(a.argmax(), 3)
a[1] = 30
assert_equal(a.argmax(), 1)
class TestArgmin(object):
nan_arr = [
([0, 1, 2, 3, np.nan], 4),
([0, 1, 2, np.nan, 3], 3),
([np.nan, 0, 1, 2, 3], 0),
([np.nan, 0, np.nan, 2, 3], 0),
([0, 1, 2, 3, complex(0, np.nan)], 4),
([0, 1, 2, 3, complex(np.nan, 0)], 4),
([0, 1, 2, complex(np.nan, 0), 3], 3),
([0, 1, 2, complex(0, np.nan), 3], 3),
([complex(0, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, np.nan), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, np.nan)], 0),
([complex(0, 0), complex(0, 2), complex(0, 1)], 0),
([complex(1, 0), complex(0, 2), complex(0, 1)], 2),
([complex(1, 0), complex(0, 2), complex(1, 1)], 1),
([np.datetime64('1923-04-14T12:43:12'),
np.datetime64('1994-06-21T14:43:15'),
np.datetime64('2001-10-15T04:10:32'),
np.datetime64('1995-11-25T16:02:16'),
np.datetime64('2005-01-04T03:14:12'),
np.datetime64('2041-12-03T14:05:03')], 0),
([np.datetime64('1935-09-14T04:40:11'),
np.datetime64('1949-10-12T12:32:11'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('2014-11-20T12:20:59'),
np.datetime64('2015-09-23T10:10:13'),
np.datetime64('1932-10-10T03:50:30')], 5),
# Assorted tests with NaTs
([np.datetime64('NaT'),
np.datetime64('NaT'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('NaT'),
np.datetime64('2015-09-23T10:10:13'),
np.datetime64('1932-10-10T03:50:30')], 5),
([np.datetime64('2059-03-14T12:43:12'),
np.datetime64('1996-09-21T14:43:15'),
np.datetime64('NaT'),
np.datetime64('2022-12-25T16:02:16'),
np.datetime64('1963-10-04T03:14:12'),
np.datetime64('2013-05-08T18:15:23')], 4),
([np.timedelta64(2, 's'),
np.timedelta64(1, 's'),
np.timedelta64('NaT', 's'),
np.timedelta64(3, 's')], 1),
([np.timedelta64('NaT', 's')] * 3, 0),
([timedelta(days=5, seconds=14), timedelta(days=2, seconds=35),
timedelta(days=-1, seconds=23)], 2),
([timedelta(days=1, seconds=43), timedelta(days=10, seconds=5),
timedelta(days=5, seconds=14)], 0),
([timedelta(days=10, seconds=24), timedelta(days=10, seconds=5),
timedelta(days=10, seconds=43)], 1),
([True, True, True, True, False], 4),
([True, True, True, False, True], 3),
([False, True, True, True, True], 0),
([False, True, False, True, True], 0),
]
def test_all(self):
a = np.random.normal(0, 1, (4, 5, 6, 7, 8))
for i in range(a.ndim):
amin = a.min(i)
aargmin = a.argmin(i)
axes = list(range(a.ndim))
axes.remove(i)
assert_(np.all(amin == aargmin.choose(*a.transpose(i,*axes))))
def test_combinations(self):
for arr, pos in self.nan_arr:
with suppress_warnings() as sup:
sup.filter(RuntimeWarning,
"invalid value encountered in reduce")
min_val = np.min(arr)
assert_equal(np.argmin(arr), pos, err_msg="%r" % arr)
assert_equal(arr[np.argmin(arr)], min_val, err_msg="%r" % arr)
def test_minimum_signed_integers(self):
a = np.array([1, -2**7, -2**7 + 1], dtype=np.int8)
assert_equal(np.argmin(a), 1)
a = np.array([1, -2**15, -2**15 + 1], dtype=np.int16)
assert_equal(np.argmin(a), 1)
a = np.array([1, -2**31, -2**31 + 1], dtype=np.int32)
assert_equal(np.argmin(a), 1)
a = np.array([1, -2**63, -2**63 + 1], dtype=np.int64)
assert_equal(np.argmin(a), 1)
def test_output_shape(self):
# see also gh-616
a = np.ones((10, 5))
# Check some simple shape mismatches
out = np.ones(11, dtype=np.int_)
assert_raises(ValueError, a.argmin, -1, out)
out = np.ones((2, 5), dtype=np.int_)
assert_raises(ValueError, a.argmin, -1, out)
# these could be relaxed possibly (used to allow even the previous)
out = np.ones((1, 10), dtype=np.int_)
assert_raises(ValueError, a.argmin, -1, out)
out = np.ones(10, dtype=np.int_)
a.argmin(-1, out=out)
assert_equal(out, a.argmin(-1))
def test_argmin_unicode(self):
d = np.ones(6031, dtype='<U9')
d[6001] = "0"
assert_equal(d.argmin(), 6001)
def test_np_vs_ndarray(self):
# make sure both ndarray.argmin and numpy.argmin support out/axis args
a = np.random.normal(size=(2, 3))
# check positional args
out1 = np.zeros(2, dtype=int)
out2 = np.ones(2, dtype=int)
assert_equal(a.argmin(1, out1), np.argmin(a, 1, out2))
assert_equal(out1, out2)
# check keyword args
out1 = np.zeros(3, dtype=int)
out2 = np.ones(3, dtype=int)
assert_equal(a.argmin(out=out1, axis=0), np.argmin(a, out=out2, axis=0))
assert_equal(out1, out2)
def test_object_argmin_with_NULLs(self):
# See gh-6032
a = np.empty(4, dtype='O')
ctypes.memset(a.ctypes.data, 0, a.nbytes)
assert_equal(a.argmin(), 0)
a[3] = 30
assert_equal(a.argmin(), 3)
a[1] = 10
assert_equal(a.argmin(), 1)
class TestMinMax(object):
def test_scalar(self):
assert_raises(np.AxisError, np.amax, 1, 1)
assert_raises(np.AxisError, np.amin, 1, 1)
assert_equal(np.amax(1, axis=0), 1)
assert_equal(np.amin(1, axis=0), 1)
assert_equal(np.amax(1, axis=None), 1)
assert_equal(np.amin(1, axis=None), 1)
def test_axis(self):
assert_raises(np.AxisError, np.amax, [1, 2, 3], 1000)
assert_equal(np.amax([[1, 2, 3]], axis=1), 3)
def test_datetime(self):
# NaTs are ignored
for dtype in ('m8[s]', 'm8[Y]'):
a = np.arange(10).astype(dtype)
a[3] = 'NaT'
assert_equal(np.amin(a), a[0])
assert_equal(np.amax(a), a[9])
a[0] = 'NaT'
assert_equal(np.amin(a), a[1])
assert_equal(np.amax(a), a[9])
a.fill('NaT')
assert_equal(np.amin(a), a[0])
assert_equal(np.amax(a), a[0])
class TestNewaxis(object):
def test_basic(self):
sk = np.array([0, -0.1, 0.1])
res = 250*sk[:, np.newaxis]
assert_almost_equal(res.ravel(), 250*sk)
class TestClip(object):
def _check_range(self, x, cmin, cmax):
assert_(np.all(x >= cmin))
assert_(np.all(x <= cmax))
def _clip_type(self, type_group, array_max,
clip_min, clip_max, inplace=False,
expected_min=None, expected_max=None):
if expected_min is None:
expected_min = clip_min
if expected_max is None:
expected_max = clip_max
for T in np.sctypes[type_group]:
if sys.byteorder == 'little':
byte_orders = ['=', '>']
else:
byte_orders = ['<', '=']
for byteorder in byte_orders:
dtype = np.dtype(T).newbyteorder(byteorder)
x = (np.random.random(1000) * array_max).astype(dtype)
if inplace:
x.clip(clip_min, clip_max, x)
else:
x = x.clip(clip_min, clip_max)
byteorder = '='
if x.dtype.byteorder == '|':
byteorder = '|'
assert_equal(x.dtype.byteorder, byteorder)
self._check_range(x, expected_min, expected_max)
return x
def test_basic(self):
for inplace in [False, True]:
self._clip_type(
'float', 1024, -12.8, 100.2, inplace=inplace)
self._clip_type(
'float', 1024, 0, 0, inplace=inplace)
self._clip_type(
'int', 1024, -120, 100.5, inplace=inplace)
self._clip_type(
'int', 1024, 0, 0, inplace=inplace)
self._clip_type(
'uint', 1024, 0, 0, inplace=inplace)
self._clip_type(
'uint', 1024, -120, 100, inplace=inplace, expected_min=0)
def test_record_array(self):
rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)],
dtype=[('x', '<f8'), ('y', '<f8'), ('z', '<f8')])
y = rec['x'].clip(-0.3, 0.5)
self._check_range(y, -0.3, 0.5)
def test_max_or_min(self):
val = np.array([0, 1, 2, 3, 4, 5, 6, 7])
x = val.clip(3)
assert_(np.all(x >= 3))
x = val.clip(min=3)
assert_(np.all(x >= 3))
x = val.clip(max=4)
assert_(np.all(x <= 4))
def test_nan(self):
input_arr = np.array([-2., np.nan, 0.5, 3., 0.25, np.nan])
result = input_arr.clip(-1, 1)
expected = np.array([-1., np.nan, 0.5, 1., 0.25, np.nan])
assert_array_equal(result, expected)
class TestCompress(object):
def test_axis(self):
tgt = [[5, 6, 7, 8, 9]]
arr = np.arange(10).reshape(2, 5)
out = np.compress([0, 1], arr, axis=0)
assert_equal(out, tgt)
tgt = [[1, 3], [6, 8]]
out = np.compress([0, 1, 0, 1, 0], arr, axis=1)
assert_equal(out, tgt)
def test_truncate(self):
tgt = [[1], [6]]
arr = np.arange(10).reshape(2, 5)
out = np.compress([0, 1], arr, axis=1)
assert_equal(out, tgt)
def test_flatten(self):
arr = np.arange(10).reshape(2, 5)
out = np.compress([0, 1], arr)
assert_equal(out, 1)
class TestPutmask(object):
def tst_basic(self, x, T, mask, val):
np.putmask(x, mask, val)
assert_equal(x[mask], T(val))
assert_equal(x.dtype, T)
def test_ip_types(self):
unchecked_types = [bytes, unicode, np.void, object]
x = np.random.random(1000)*100
mask = x < 40
for val in [-100, 0, 15]:
for types in np.sctypes.values():
for T in types:
if T not in unchecked_types:
self.tst_basic(x.copy().astype(T), T, mask, val)
def test_mask_size(self):
assert_raises(ValueError, np.putmask, np.array([1, 2, 3]), [True], 5)
@pytest.mark.parametrize('dtype', ('>i4', '<i4'))
def test_byteorder(self, dtype):
x = np.array([1, 2, 3], dtype)
np.putmask(x, [True, False, True], -1)
assert_array_equal(x, [-1, 2, -1])
def test_record_array(self):
# Note mixed byteorder.
rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)],
dtype=[('x', '<f8'), ('y', '>f8'), ('z', '<f8')])
np.putmask(rec['x'], [True, False], 10)
assert_array_equal(rec['x'], [10, 5])
assert_array_equal(rec['y'], [2, 4])
assert_array_equal(rec['z'], [3, 3])
np.putmask(rec['y'], [True, False], 11)
assert_array_equal(rec['x'], [10, 5])
assert_array_equal(rec['y'], [11, 4])
assert_array_equal(rec['z'], [3, 3])
class TestTake(object):
def tst_basic(self, x):
ind = list(range(x.shape[0]))
assert_array_equal(x.take(ind, axis=0), x)
def test_ip_types(self):
unchecked_types = [bytes, unicode, np.void, object]
x = np.random.random(24)*100
x.shape = 2, 3, 4
for types in np.sctypes.values():
for T in types:
if T not in unchecked_types:
self.tst_basic(x.copy().astype(T))
def test_raise(self):
x = np.random.random(24)*100
x.shape = 2, 3, 4
assert_raises(IndexError, x.take, [0, 1, 2], axis=0)
assert_raises(IndexError, x.take, [-3], axis=0)
assert_array_equal(x.take([-1], axis=0)[0], x[1])
def test_clip(self):
x = np.random.random(24)*100
x.shape = 2, 3, 4
assert_array_equal(x.take([-1], axis=0, mode='clip')[0], x[0])
assert_array_equal(x.take([2], axis=0, mode='clip')[0], x[1])
def test_wrap(self):
x = np.random.random(24)*100
x.shape = 2, 3, 4
assert_array_equal(x.take([-1], axis=0, mode='wrap')[0], x[1])
assert_array_equal(x.take([2], axis=0, mode='wrap')[0], x[0])
assert_array_equal(x.take([3], axis=0, mode='wrap')[0], x[1])
@pytest.mark.parametrize('dtype', ('>i4', '<i4'))
def test_byteorder(self, dtype):
x = np.array([1, 2, 3], dtype)
assert_array_equal(x.take([0, 2, 1]), [1, 3, 2])
def test_record_array(self):
# Note mixed byteorder.
rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)],
dtype=[('x', '<f8'), ('y', '>f8'), ('z', '<f8')])
rec1 = rec.take([1])
assert_(rec1['x'] == 5.0 and rec1['y'] == 4.0)
class TestLexsort(object):
def test_basic(self):
a = [1, 2, 1, 3, 1, 5]
b = [0, 4, 5, 6, 2, 3]
idx = np.lexsort((b, a))
expected_idx = np.array([0, 4, 2, 1, 3, 5])
assert_array_equal(idx, expected_idx)
x = np.vstack((b, a))
idx = np.lexsort(x)
assert_array_equal(idx, expected_idx)
assert_array_equal(x[1][idx], np.sort(x[1]))
def test_datetime(self):
a = np.array([0,0,0], dtype='datetime64[D]')
b = np.array([2,1,0], dtype='datetime64[D]')
idx = np.lexsort((b, a))
expected_idx = np.array([2, 1, 0])
assert_array_equal(idx, expected_idx)
a = np.array([0,0,0], dtype='timedelta64[D]')
b = np.array([2,1,0], dtype='timedelta64[D]')
idx = np.lexsort((b, a))
expected_idx = np.array([2, 1, 0])
assert_array_equal(idx, expected_idx)
def test_object(self): # gh-6312
a = np.random.choice(10, 1000)
b = np.random.choice(['abc', 'xy', 'wz', 'efghi', 'qwst', 'x'], 1000)
for u in a, b:
left = np.lexsort((u.astype('O'),))
right = np.argsort(u, kind='mergesort')
assert_array_equal(left, right)
for u, v in (a, b), (b, a):
idx = np.lexsort((u, v))
assert_array_equal(idx, np.lexsort((u.astype('O'), v)))
assert_array_equal(idx, np.lexsort((u, v.astype('O'))))
u, v = np.array(u, dtype='object'), np.array(v, dtype='object')
assert_array_equal(idx, np.lexsort((u, v)))
def test_invalid_axis(self): # gh-7528
x = np.linspace(0., 1., 42*3).reshape(42, 3)
assert_raises(np.AxisError, np.lexsort, x, axis=2)
class TestIO(object):
"""Test tofile, fromfile, tobytes, and fromstring"""
def setup(self):
shape = (2, 4, 3)
rand = np.random.random
self.x = rand(shape) + rand(shape).astype(complex)*1j
self.x[0,:, 1] = [np.nan, np.inf, -np.inf, np.nan]
self.dtype = self.x.dtype
self.tempdir = tempfile.mkdtemp()
self.filename = tempfile.mktemp(dir=self.tempdir)
def teardown(self):
shutil.rmtree(self.tempdir)
def test_nofile(self):
# this should probably be supported as a file
# but for now test for proper errors
b = io.BytesIO()
assert_raises(IOError, np.fromfile, b, np.uint8, 80)
d = np.ones(7)
assert_raises(IOError, lambda x: x.tofile(b), d)
def test_bool_fromstring(self):
v = np.array([True, False, True, False], dtype=np.bool_)
y = np.fromstring('1 0 -2.3 0.0', sep=' ', dtype=np.bool_)
assert_array_equal(v, y)
def test_uint64_fromstring(self):
d = np.fromstring("9923372036854775807 104783749223640",
dtype=np.uint64, sep=' ')
e = np.array([9923372036854775807, 104783749223640], dtype=np.uint64)
assert_array_equal(d, e)
def test_int64_fromstring(self):
d = np.fromstring("-25041670086757 104783749223640",
dtype=np.int64, sep=' ')
e = np.array([-25041670086757, 104783749223640], dtype=np.int64)
assert_array_equal(d, e)
def test_empty_files_binary(self):
f = open(self.filename, 'w')
f.close()
y = np.fromfile(self.filename)
assert_(y.size == 0, "Array not empty")
def test_empty_files_text(self):
f = open(self.filename, 'w')
f.close()
y = np.fromfile(self.filename, sep=" ")
assert_(y.size == 0, "Array not empty")
def test_roundtrip_file(self):
f = open(self.filename, 'wb')
self.x.tofile(f)
f.close()
# NB. doesn't work with flush+seek, due to use of C stdio
f = open(self.filename, 'rb')
y = np.fromfile(f, dtype=self.dtype)
f.close()
assert_array_equal(y, self.x.flat)
def test_roundtrip_filename(self):
self.x.tofile(self.filename)
y = np.fromfile(self.filename, dtype=self.dtype)
assert_array_equal(y, self.x.flat)
def test_roundtrip_binary_str(self):
s = self.x.tobytes()
y = np.frombuffer(s, dtype=self.dtype)
assert_array_equal(y, self.x.flat)
s = self.x.tobytes('F')
y = np.frombuffer(s, dtype=self.dtype)
assert_array_equal(y, self.x.flatten('F'))
def test_roundtrip_str(self):
x = self.x.real.ravel()
s = "@".join(map(str, x))
y = np.fromstring(s, sep="@")
# NB. str imbues less precision
nan_mask = ~np.isfinite(x)
assert_array_equal(x[nan_mask], y[nan_mask])
assert_array_almost_equal(x[~nan_mask], y[~nan_mask], decimal=5)
def test_roundtrip_repr(self):
x = self.x.real.ravel()
s = "@".join(map(repr, x))
y = np.fromstring(s, sep="@")
assert_array_equal(x, y)
def test_unseekable_fromfile(self):
# gh-6246
self.x.tofile(self.filename)
def fail(*args, **kwargs):
raise IOError('Can not tell or seek')
with io.open(self.filename, 'rb', buffering=0) as f:
f.seek = fail
f.tell = fail
assert_raises(IOError, np.fromfile, f, dtype=self.dtype)
def test_io_open_unbuffered_fromfile(self):
# gh-6632
self.x.tofile(self.filename)
with io.open(self.filename, 'rb', buffering=0) as f:
y = np.fromfile(f, dtype=self.dtype)
assert_array_equal(y, self.x.flat)
def test_largish_file(self):
# check the fallocate path on files > 16MB
d = np.zeros(4 * 1024 ** 2)
d.tofile(self.filename)
assert_equal(os.path.getsize(self.filename), d.nbytes)
assert_array_equal(d, np.fromfile(self.filename))
# check offset
with open(self.filename, "r+b") as f:
f.seek(d.nbytes)
d.tofile(f)
assert_equal(os.path.getsize(self.filename), d.nbytes * 2)
# check append mode (gh-8329)
open(self.filename, "w").close() # delete file contents
with open(self.filename, "ab") as f:
d.tofile(f)
assert_array_equal(d, np.fromfile(self.filename))
with open(self.filename, "ab") as f:
d.tofile(f)
assert_equal(os.path.getsize(self.filename), d.nbytes * 2)
def test_io_open_buffered_fromfile(self):
# gh-6632
self.x.tofile(self.filename)
with io.open(self.filename, 'rb', buffering=-1) as f:
y = np.fromfile(f, dtype=self.dtype)
assert_array_equal(y, self.x.flat)
def test_file_position_after_fromfile(self):
# gh-4118
sizes = [io.DEFAULT_BUFFER_SIZE//8,
io.DEFAULT_BUFFER_SIZE,
io.DEFAULT_BUFFER_SIZE*8]
for size in sizes:
f = open(self.filename, 'wb')
f.seek(size-1)
f.write(b'\0')
f.close()
for mode in ['rb', 'r+b']:
err_msg = "%d %s" % (size, mode)
f = open(self.filename, mode)
f.read(2)
np.fromfile(f, dtype=np.float64, count=1)
pos = f.tell()
f.close()
assert_equal(pos, 10, err_msg=err_msg)
def test_file_position_after_tofile(self):
# gh-4118
sizes = [io.DEFAULT_BUFFER_SIZE//8,
io.DEFAULT_BUFFER_SIZE,
io.DEFAULT_BUFFER_SIZE*8]
for size in sizes:
err_msg = "%d" % (size,)
f = open(self.filename, 'wb')
f.seek(size-1)
f.write(b'\0')
f.seek(10)
f.write(b'12')
np.array([0], dtype=np.float64).tofile(f)
pos = f.tell()
f.close()
assert_equal(pos, 10 + 2 + 8, err_msg=err_msg)
f = open(self.filename, 'r+b')
f.read(2)
f.seek(0, 1) # seek between read&write required by ANSI C
np.array([0], dtype=np.float64).tofile(f)
pos = f.tell()
f.close()
assert_equal(pos, 10, err_msg=err_msg)
def test_load_object_array_fromfile(self):
# gh-12300
with open(self.filename, 'w') as f:
# Ensure we have a file with consistent contents
pass
with open(self.filename, 'rb') as f:
assert_raises_regex(ValueError, "Cannot read into object array",
np.fromfile, f, dtype=object)
assert_raises_regex(ValueError, "Cannot read into object array",
np.fromfile, self.filename, dtype=object)
def _check_from(self, s, value, **kw):
if 'sep' not in kw:
y = np.frombuffer(s, **kw)
else:
y = np.fromstring(s, **kw)
assert_array_equal(y, value)
f = open(self.filename, 'wb')
f.write(s)
f.close()
y = np.fromfile(self.filename, **kw)
assert_array_equal(y, value)
def test_nan(self):
self._check_from(
b"nan +nan -nan NaN nan(foo) +NaN(BAR) -NAN(q_u_u_x_)",
[np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan],
sep=' ')
def test_inf(self):
self._check_from(
b"inf +inf -inf infinity -Infinity iNfInItY -inF",
[np.inf, np.inf, -np.inf, np.inf, -np.inf, np.inf, -np.inf],
sep=' ')
def test_numbers(self):
self._check_from(b"1.234 -1.234 .3 .3e55 -123133.1231e+133",
[1.234, -1.234, .3, .3e55, -123133.1231e+133], sep=' ')
def test_binary(self):
self._check_from(b'\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@',
np.array([1, 2, 3, 4]),
dtype='<f4')
@pytest.mark.slow # takes > 1 minute on mechanical hard drive
def test_big_binary(self):
"""Test workarounds for 32-bit limited fwrite, fseek, and ftell
calls in windows. These normally would hang doing something like this.
See http://projects.scipy.org/numpy/ticket/1660"""
if sys.platform != 'win32':
return
try:
# before workarounds, only up to 2**32-1 worked
fourgbplus = 2**32 + 2**16
testbytes = np.arange(8, dtype=np.int8)
n = len(testbytes)
flike = tempfile.NamedTemporaryFile()
f = flike.file
np.tile(testbytes, fourgbplus // testbytes.nbytes).tofile(f)
flike.seek(0)
a = np.fromfile(f, dtype=np.int8)
flike.close()
assert_(len(a) == fourgbplus)
# check only start and end for speed:
assert_((a[:n] == testbytes).all())
assert_((a[-n:] == testbytes).all())
except (MemoryError, ValueError):
pass
def test_string(self):
self._check_from(b'1,2,3,4', [1., 2., 3., 4.], sep=',')
def test_counted_string(self):
self._check_from(b'1,2,3,4', [1., 2., 3., 4.], count=4, sep=',')
self._check_from(b'1,2,3,4', [1., 2., 3.], count=3, sep=',')
self._check_from(b'1,2,3,4', [1., 2., 3., 4.], count=-1, sep=',')
def test_string_with_ws(self):
self._check_from(b'1 2 3 4 ', [1, 2, 3, 4], dtype=int, sep=' ')
def test_counted_string_with_ws(self):
self._check_from(b'1 2 3 4 ', [1, 2, 3], count=3, dtype=int,
sep=' ')
def test_ascii(self):
self._check_from(b'1 , 2 , 3 , 4', [1., 2., 3., 4.], sep=',')
self._check_from(b'1,2,3,4', [1., 2., 3., 4.], dtype=float, sep=',')
def test_malformed(self):
self._check_from(b'1.234 1,234', [1.234, 1.], sep=' ')
def test_long_sep(self):
self._check_from(b'1_x_3_x_4_x_5', [1, 3, 4, 5], sep='_x_')
def test_dtype(self):
v = np.array([1, 2, 3, 4], dtype=np.int_)
self._check_from(b'1,2,3,4', v, sep=',', dtype=np.int_)
def test_dtype_bool(self):
# can't use _check_from because fromstring can't handle True/False
v = np.array([True, False, True, False], dtype=np.bool_)
s = b'1,0,-2.3,0'
f = open(self.filename, 'wb')
f.write(s)
f.close()
y = np.fromfile(self.filename, sep=',', dtype=np.bool_)
assert_(y.dtype == '?')
assert_array_equal(y, v)
def test_tofile_sep(self):
x = np.array([1.51, 2, 3.51, 4], dtype=float)
f = open(self.filename, 'w')
x.tofile(f, sep=',')
f.close()
f = open(self.filename, 'r')
s = f.read()
f.close()
#assert_equal(s, '1.51,2.0,3.51,4.0')
y = np.array([float(p) for p in s.split(',')])
assert_array_equal(x,y)
def test_tofile_format(self):
x = np.array([1.51, 2, 3.51, 4], dtype=float)
f = open(self.filename, 'w')
x.tofile(f, sep=',', format='%.2f')
f.close()
f = open(self.filename, 'r')
s = f.read()
f.close()
assert_equal(s, '1.51,2.00,3.51,4.00')
def test_locale(self):
with CommaDecimalPointLocale():
self.test_numbers()
self.test_nan()
self.test_inf()
self.test_counted_string()
self.test_ascii()
self.test_malformed()
self.test_tofile_sep()
self.test_tofile_format()
class TestFromBuffer(object):
@pytest.mark.parametrize('byteorder', ['<', '>'])
@pytest.mark.parametrize('dtype', [float, int, complex])
def test_basic(self, byteorder, dtype):
dt = np.dtype(dtype).newbyteorder(byteorder)
x = (np.random.random((4, 7)) * 5).astype(dt)
buf = x.tobytes()
assert_array_equal(np.frombuffer(buf, dtype=dt), x.flat)
def test_empty(self):
assert_array_equal(np.frombuffer(b''), np.array([]))
class TestFlat(object):
def setup(self):
a0 = np.arange(20.0)
a = a0.reshape(4, 5)
a0.shape = (4, 5)
a.flags.writeable = False
self.a = a
self.b = a[::2, ::2]
self.a0 = a0
self.b0 = a0[::2, ::2]
def test_contiguous(self):
testpassed = False
try:
self.a.flat[12] = 100.0
except ValueError:
testpassed = True
assert_(testpassed)
assert_(self.a.flat[12] == 12.0)
def test_discontiguous(self):
testpassed = False
try:
self.b.flat[4] = 100.0
except ValueError:
testpassed = True
assert_(testpassed)
assert_(self.b.flat[4] == 12.0)
def test___array__(self):
c = self.a.flat.__array__()
d = self.b.flat.__array__()
e = self.a0.flat.__array__()
f = self.b0.flat.__array__()
assert_(c.flags.writeable is False)
assert_(d.flags.writeable is False)
# for 1.14 all are set to non-writeable on the way to replacing the
# UPDATEIFCOPY array returned for non-contiguous arrays.
assert_(e.flags.writeable is True)
assert_(f.flags.writeable is False)
with assert_warns(DeprecationWarning):
assert_(c.flags.updateifcopy is False)
with assert_warns(DeprecationWarning):
assert_(d.flags.updateifcopy is False)
with assert_warns(DeprecationWarning):
assert_(e.flags.updateifcopy is False)
with assert_warns(DeprecationWarning):
# UPDATEIFCOPY is removed.
assert_(f.flags.updateifcopy is False)
assert_(c.flags.writebackifcopy is False)
assert_(d.flags.writebackifcopy is False)
assert_(e.flags.writebackifcopy is False)
assert_(f.flags.writebackifcopy is False)
class TestResize(object):
def test_basic(self):
x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
if IS_PYPY:
x.resize((5, 5), refcheck=False)
else:
x.resize((5, 5))
assert_array_equal(x.flat[:9],
np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]).flat)
assert_array_equal(x[9:].flat, 0)
def test_check_reference(self):
x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
y = x
assert_raises(ValueError, x.resize, (5, 1))
del y # avoid pyflakes unused variable warning.
def test_int_shape(self):
x = np.eye(3)
if IS_PYPY:
x.resize(3, refcheck=False)
else:
x.resize(3)
assert_array_equal(x, np.eye(3)[0,:])
def test_none_shape(self):
x = np.eye(3)
x.resize(None)
assert_array_equal(x, np.eye(3))
x.resize()
assert_array_equal(x, np.eye(3))
def test_0d_shape(self):
# to it multiple times to test it does not break alloc cache gh-9216
for i in range(10):
x = np.empty((1,))
x.resize(())
assert_equal(x.shape, ())
assert_equal(x.size, 1)
x = np.empty(())
x.resize((1,))
assert_equal(x.shape, (1,))
assert_equal(x.size, 1)
def test_invalid_arguments(self):
assert_raises(TypeError, np.eye(3).resize, 'hi')
assert_raises(ValueError, np.eye(3).resize, -1)
assert_raises(TypeError, np.eye(3).resize, order=1)
assert_raises(TypeError, np.eye(3).resize, refcheck='hi')
def test_freeform_shape(self):
x = np.eye(3)
if IS_PYPY:
x.resize(3, 2, 1, refcheck=False)
else:
x.resize(3, 2, 1)
assert_(x.shape == (3, 2, 1))
def test_zeros_appended(self):
x = np.eye(3)
if IS_PYPY:
x.resize(2, 3, 3, refcheck=False)
else:
x.resize(2, 3, 3)
assert_array_equal(x[0], np.eye(3))
assert_array_equal(x[1], np.zeros((3, 3)))
def test_obj_obj(self):
# check memory is initialized on resize, gh-4857
a = np.ones(10, dtype=[('k', object, 2)])
if IS_PYPY:
a.resize(15, refcheck=False)
else:
a.resize(15,)
assert_equal(a.shape, (15,))
assert_array_equal(a['k'][-5:], 0)
assert_array_equal(a['k'][:-5], 1)
def test_empty_view(self):
# check that sizes containing a zero don't trigger a reallocate for
# already empty arrays
x = np.zeros((10, 0), int)
x_view = x[...]
x_view.resize((0, 10))
x_view.resize((0, 100))
def test_check_weakref(self):
x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
xref = weakref.ref(x)
assert_raises(ValueError, x.resize, (5, 1))
del xref # avoid pyflakes unused variable warning.
class TestRecord(object):
def test_field_rename(self):
dt = np.dtype([('f', float), ('i', int)])
dt.names = ['p', 'q']
assert_equal(dt.names, ['p', 'q'])
def test_multiple_field_name_occurrence(self):
def test_dtype_init():
np.dtype([("A", "f8"), ("B", "f8"), ("A", "f8")])
# Error raised when multiple fields have the same name
assert_raises(ValueError, test_dtype_init)
@pytest.mark.skipif(sys.version_info[0] < 3, reason="Not Python 3")
def test_bytes_fields(self):
# Bytes are not allowed in field names and not recognized in titles
# on Py3
assert_raises(TypeError, np.dtype, [(b'a', int)])
assert_raises(TypeError, np.dtype, [(('b', b'a'), int)])
dt = np.dtype([((b'a', 'b'), int)])
assert_raises(TypeError, dt.__getitem__, b'a')
x = np.array([(1,), (2,), (3,)], dtype=dt)
assert_raises(IndexError, x.__getitem__, b'a')
y = x[0]
assert_raises(IndexError, y.__getitem__, b'a')
@pytest.mark.skipif(sys.version_info[0] < 3, reason="Not Python 3")
def test_multiple_field_name_unicode(self):
def test_dtype_unicode():
np.dtype([("\u20B9", "f8"), ("B", "f8"), ("\u20B9", "f8")])
# Error raised when multiple fields have the same name(unicode included)
assert_raises(ValueError, test_dtype_unicode)
@pytest.mark.skipif(sys.version_info[0] >= 3, reason="Not Python 2")
def test_unicode_field_titles(self):
# Unicode field titles are added to field dict on Py2
title = u'b'
dt = np.dtype([((title, 'a'), int)])
dt[title]
dt['a']
x = np.array([(1,), (2,), (3,)], dtype=dt)
x[title]
x['a']
y = x[0]
y[title]
y['a']
@pytest.mark.skipif(sys.version_info[0] >= 3, reason="Not Python 2")
def test_unicode_field_names(self):
# Unicode field names are converted to ascii on Python 2:
encodable_name = u'b'
assert_equal(np.dtype([(encodable_name, int)]).names[0], b'b')
assert_equal(np.dtype([(('a', encodable_name), int)]).names[0], b'b')
# But raises UnicodeEncodeError if it can't be encoded:
nonencodable_name = u'\uc3bc'
assert_raises(UnicodeEncodeError, np.dtype, [(nonencodable_name, int)])
assert_raises(UnicodeEncodeError, np.dtype, [(('a', nonencodable_name), int)])
def test_fromarrays_unicode(self):
# A single name string provided to fromarrays() is allowed to be unicode
# on both Python 2 and 3:
x = np.core.records.fromarrays([[0], [1]], names=u'a,b', formats=u'i4,i4')
assert_equal(x['a'][0], 0)
assert_equal(x['b'][0], 1)
def test_unicode_order(self):
# Test that we can sort with order as a unicode field name in both Python 2 and
# 3:
name = u'b'
x = np.array([1, 3, 2], dtype=[(name, int)])
x.sort(order=name)
assert_equal(x[u'b'], np.array([1, 2, 3]))
def test_field_names(self):
# Test unicode and 8-bit / byte strings can be used
a = np.zeros((1,), dtype=[('f1', 'i4'),
('f2', 'i4'),
('f3', [('sf1', 'i4')])])
is_py3 = sys.version_info[0] >= 3
if is_py3:
funcs = (str,)
# byte string indexing fails gracefully
assert_raises(IndexError, a.__setitem__, b'f1', 1)
assert_raises(IndexError, a.__getitem__, b'f1')
assert_raises(IndexError, a['f1'].__setitem__, b'sf1', 1)
assert_raises(IndexError, a['f1'].__getitem__, b'sf1')
else:
funcs = (str, unicode)
for func in funcs:
b = a.copy()
fn1 = func('f1')
b[fn1] = 1
assert_equal(b[fn1], 1)
fnn = func('not at all')
assert_raises(ValueError, b.__setitem__, fnn, 1)
assert_raises(ValueError, b.__getitem__, fnn)
b[0][fn1] = 2
assert_equal(b[fn1], 2)
# Subfield
assert_raises(ValueError, b[0].__setitem__, fnn, 1)
assert_raises(ValueError, b[0].__getitem__, fnn)
# Subfield
fn3 = func('f3')
sfn1 = func('sf1')
b[fn3][sfn1] = 1
assert_equal(b[fn3][sfn1], 1)
assert_raises(ValueError, b[fn3].__setitem__, fnn, 1)
assert_raises(ValueError, b[fn3].__getitem__, fnn)
# multiple subfields
fn2 = func('f2')
b[fn2] = 3
assert_equal(b[['f1', 'f2']][0].tolist(), (2, 3))
assert_equal(b[['f2', 'f1']][0].tolist(), (3, 2))
assert_equal(b[['f1', 'f3']][0].tolist(), (2, (1,)))
# non-ascii unicode field indexing is well behaved
if not is_py3:
pytest.skip('non ascii unicode field indexing skipped; '
'raises segfault on python 2.x')
else:
assert_raises(ValueError, a.__setitem__, u'\u03e0', 1)
assert_raises(ValueError, a.__getitem__, u'\u03e0')
def test_record_hash(self):
a = np.array([(1, 2), (1, 2)], dtype='i1,i2')
a.flags.writeable = False
b = np.array([(1, 2), (3, 4)], dtype=[('num1', 'i1'), ('num2', 'i2')])
b.flags.writeable = False
c = np.array([(1, 2), (3, 4)], dtype='i1,i2')
c.flags.writeable = False
assert_(hash(a[0]) == hash(a[1]))
assert_(hash(a[0]) == hash(b[0]))
assert_(hash(a[0]) != hash(b[1]))
assert_(hash(c[0]) == hash(a[0]) and c[0] == a[0])
def test_record_no_hash(self):
a = np.array([(1, 2), (1, 2)], dtype='i1,i2')
assert_raises(TypeError, hash, a[0])
def test_empty_structure_creation(self):
# make sure these do not raise errors (gh-5631)
np.array([()], dtype={'names': [], 'formats': [],
'offsets': [], 'itemsize': 12})
np.array([(), (), (), (), ()], dtype={'names': [], 'formats': [],
'offsets': [], 'itemsize': 12})
def test_multifield_indexing_view(self):
a = np.ones(3, dtype=[('a', 'i4'), ('b', 'f4'), ('c', 'u4')])
v = a[['a', 'c']]
assert_(v.base is a)
assert_(v.dtype == np.dtype({'names': ['a', 'c'],
'formats': ['i4', 'u4'],
'offsets': [0, 8]}))
v[:] = (4,5)
assert_equal(a[0].item(), (4, 1, 5))
class TestView(object):
def test_basic(self):
x = np.array([(1, 2, 3, 4), (5, 6, 7, 8)],
dtype=[('r', np.int8), ('g', np.int8),
('b', np.int8), ('a', np.int8)])
# We must be specific about the endianness here:
y = x.view(dtype='<i4')
# ... and again without the keyword.
z = x.view('<i4')
assert_array_equal(y, z)
assert_array_equal(y, [67305985, 134678021])
def _mean(a, **args):
return a.mean(**args)
def _var(a, **args):
return a.var(**args)
def _std(a, **args):
return a.std(**args)
class TestStats(object):
funcs = [_mean, _var, _std]
def setup(self):
np.random.seed(range(3))
self.rmat = np.random.random((4, 5))
self.cmat = self.rmat + 1j * self.rmat
self.omat = np.array([Decimal(repr(r)) for r in self.rmat.flat])
self.omat = self.omat.reshape(4, 5)
def test_python_type(self):
for x in (np.float16(1.), 1, 1., 1+0j):
assert_equal(np.mean([x]), 1.)
assert_equal(np.std([x]), 0.)
assert_equal(np.var([x]), 0.)
def test_keepdims(self):
mat = np.eye(3)
for f in self.funcs:
for axis in [0, 1]:
res = f(mat, axis=axis, keepdims=True)
assert_(res.ndim == mat.ndim)
assert_(res.shape[axis] == 1)
for axis in [None]:
res = f(mat, axis=axis, keepdims=True)
assert_(res.shape == (1, 1))
def test_out(self):
mat = np.eye(3)
for f in self.funcs:
out = np.zeros(3)
tgt = f(mat, axis=1)
res = f(mat, axis=1, out=out)
assert_almost_equal(res, out)
assert_almost_equal(res, tgt)
out = np.empty(2)
assert_raises(ValueError, f, mat, axis=1, out=out)
out = np.empty((2, 2))
assert_raises(ValueError, f, mat, axis=1, out=out)
def test_dtype_from_input(self):
icodes = np.typecodes['AllInteger']
fcodes = np.typecodes['AllFloat']
# object type
for f in self.funcs:
mat = np.array([[Decimal(1)]*3]*3)
tgt = mat.dtype.type
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = type(f(mat, axis=None))
assert_(res is Decimal)
# integer types
for f in self.funcs:
for c in icodes:
mat = np.eye(3, dtype=c)
tgt = np.float64
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None).dtype.type
assert_(res is tgt)
# mean for float types
for f in [_mean]:
for c in fcodes:
mat = np.eye(3, dtype=c)
tgt = mat.dtype.type
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None).dtype.type
assert_(res is tgt)
# var, std for float types
for f in [_var, _std]:
for c in fcodes:
mat = np.eye(3, dtype=c)
# deal with complex types
tgt = mat.real.dtype.type
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None).dtype.type
assert_(res is tgt)
def test_dtype_from_dtype(self):
mat = np.eye(3)
# stats for integer types
# FIXME:
# this needs definition as there are lots places along the line
# where type casting may take place.
# for f in self.funcs:
# for c in np.typecodes['AllInteger']:
# tgt = np.dtype(c).type
# res = f(mat, axis=1, dtype=c).dtype.type
# assert_(res is tgt)
# # scalar case
# res = f(mat, axis=None, dtype=c).dtype.type
# assert_(res is tgt)
# stats for float types
for f in self.funcs:
for c in np.typecodes['AllFloat']:
tgt = np.dtype(c).type
res = f(mat, axis=1, dtype=c).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None, dtype=c).dtype.type
assert_(res is tgt)
def test_ddof(self):
for f in [_var]:
for ddof in range(3):
dim = self.rmat.shape[1]
tgt = f(self.rmat, axis=1) * dim
res = f(self.rmat, axis=1, ddof=ddof) * (dim - ddof)
for f in [_std]:
for ddof in range(3):
dim = self.rmat.shape[1]
tgt = f(self.rmat, axis=1) * np.sqrt(dim)
res = f(self.rmat, axis=1, ddof=ddof) * np.sqrt(dim - ddof)
assert_almost_equal(res, tgt)
assert_almost_equal(res, tgt)
def test_ddof_too_big(self):
dim = self.rmat.shape[1]
for f in [_var, _std]:
for ddof in range(dim, dim + 2):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
res = f(self.rmat, axis=1, ddof=ddof)
assert_(not (res < 0).any())
assert_(len(w) > 0)
assert_(issubclass(w[0].category, RuntimeWarning))
def test_empty(self):
A = np.zeros((0, 3))
for f in self.funcs:
for axis in [0, None]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_(np.isnan(f(A, axis=axis)).all())
assert_(len(w) > 0)
assert_(issubclass(w[0].category, RuntimeWarning))
for axis in [1]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_equal(f(A, axis=axis), np.zeros([]))
def test_mean_values(self):
for mat in [self.rmat, self.cmat, self.omat]:
for axis in [0, 1]:
tgt = mat.sum(axis=axis)
res = _mean(mat, axis=axis) * mat.shape[axis]
assert_almost_equal(res, tgt)
for axis in [None]:
tgt = mat.sum(axis=axis)
res = _mean(mat, axis=axis) * np.prod(mat.shape)
assert_almost_equal(res, tgt)
def test_mean_float16(self):
# This fail if the sum inside mean is done in float16 instead
# of float32.
assert_(_mean(np.ones(100000, dtype='float16')) == 1)
def test_var_values(self):
for mat in [self.rmat, self.cmat, self.omat]:
for axis in [0, 1, None]:
msqr = _mean(mat * mat.conj(), axis=axis)
mean = _mean(mat, axis=axis)
tgt = msqr - mean * mean.conjugate()
res = _var(mat, axis=axis)
assert_almost_equal(res, tgt)
def test_std_values(self):
for mat in [self.rmat, self.cmat, self.omat]:
for axis in [0, 1, None]:
tgt = np.sqrt(_var(mat, axis=axis))
res = _std(mat, axis=axis)
assert_almost_equal(res, tgt)
def test_subclass(self):
class TestArray(np.ndarray):
def __new__(cls, data, info):
result = np.array(data)
result = result.view(cls)
result.info = info
return result
def __array_finalize__(self, obj):
self.info = getattr(obj, "info", '')
dat = TestArray([[1, 2, 3, 4], [5, 6, 7, 8]], 'jubba')
res = dat.mean(1)
assert_(res.info == dat.info)
res = dat.std(1)
assert_(res.info == dat.info)
res = dat.var(1)
assert_(res.info == dat.info)
class TestVdot(object):
def test_basic(self):
dt_numeric = np.typecodes['AllFloat'] + np.typecodes['AllInteger']
dt_complex = np.typecodes['Complex']
# test real
a = np.eye(3)
for dt in dt_numeric + 'O':
b = a.astype(dt)
res = np.vdot(b, b)
assert_(np.isscalar(res))
assert_equal(np.vdot(b, b), 3)
# test complex
a = np.eye(3) * 1j
for dt in dt_complex + 'O':
b = a.astype(dt)
res = np.vdot(b, b)
assert_(np.isscalar(res))
assert_equal(np.vdot(b, b), 3)
# test boolean
b = np.eye(3, dtype=bool)
res = np.vdot(b, b)
assert_(np.isscalar(res))
assert_equal(np.vdot(b, b), True)
def test_vdot_array_order(self):
a = np.array([[1, 2], [3, 4]], order='C')
b = np.array([[1, 2], [3, 4]], order='F')
res = np.vdot(a, a)
# integer arrays are exact
assert_equal(np.vdot(a, b), res)
assert_equal(np.vdot(b, a), res)
assert_equal(np.vdot(b, b), res)
def test_vdot_uncontiguous(self):
for size in [2, 1000]:
# Different sizes match different branches in vdot.
a = np.zeros((size, 2, 2))
b = np.zeros((size, 2, 2))
a[:, 0, 0] = np.arange(size)
b[:, 0, 0] = np.arange(size) + 1
# Make a and b uncontiguous:
a = a[..., 0]
b = b[..., 0]
assert_equal(np.vdot(a, b),
np.vdot(a.flatten(), b.flatten()))
assert_equal(np.vdot(a, b.copy()),
np.vdot(a.flatten(), b.flatten()))
assert_equal(np.vdot(a.copy(), b),
np.vdot(a.flatten(), b.flatten()))
assert_equal(np.vdot(a.copy('F'), b),
np.vdot(a.flatten(), b.flatten()))
assert_equal(np.vdot(a, b.copy('F')),
np.vdot(a.flatten(), b.flatten()))
class TestDot(object):
def setup(self):
np.random.seed(128)
self.A = np.random.rand(4, 2)
self.b1 = np.random.rand(2, 1)
self.b2 = np.random.rand(2)
self.b3 = np.random.rand(1, 2)
self.b4 = np.random.rand(4)
self.N = 7
def test_dotmatmat(self):
A = self.A
res = np.dot(A.transpose(), A)
tgt = np.array([[1.45046013, 0.86323640],
[0.86323640, 0.84934569]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotmatvec(self):
A, b1 = self.A, self.b1
res = np.dot(A, b1)
tgt = np.array([[0.32114320], [0.04889721],
[0.15696029], [0.33612621]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotmatvec2(self):
A, b2 = self.A, self.b2
res = np.dot(A, b2)
tgt = np.array([0.29677940, 0.04518649, 0.14468333, 0.31039293])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecmat(self):
A, b4 = self.A, self.b4
res = np.dot(b4, A)
tgt = np.array([1.23495091, 1.12222648])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecmat2(self):
b3, A = self.b3, self.A
res = np.dot(b3, A.transpose())
tgt = np.array([[0.58793804, 0.08957460, 0.30605758, 0.62716383]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecmat3(self):
A, b4 = self.A, self.b4
res = np.dot(A.transpose(), b4)
tgt = np.array([1.23495091, 1.12222648])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecvecouter(self):
b1, b3 = self.b1, self.b3
res = np.dot(b1, b3)
tgt = np.array([[0.20128610, 0.08400440], [0.07190947, 0.03001058]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecvecinner(self):
b1, b3 = self.b1, self.b3
res = np.dot(b3, b1)
tgt = np.array([[ 0.23129668]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotcolumnvect1(self):
b1 = np.ones((3, 1))
b2 = [5.3]
res = np.dot(b1, b2)
tgt = np.array([5.3, 5.3, 5.3])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotcolumnvect2(self):
b1 = np.ones((3, 1)).transpose()
b2 = [6.2]
res = np.dot(b2, b1)
tgt = np.array([6.2, 6.2, 6.2])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecscalar(self):
np.random.seed(100)
b1 = np.random.rand(1, 1)
b2 = np.random.rand(1, 4)
res = np.dot(b1, b2)
tgt = np.array([[0.15126730, 0.23068496, 0.45905553, 0.00256425]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecscalar2(self):
np.random.seed(100)
b1 = np.random.rand(4, 1)
b2 = np.random.rand(1, 1)
res = np.dot(b1, b2)
tgt = np.array([[0.00256425],[0.00131359],[0.00200324],[ 0.00398638]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_all(self):
dims = [(), (1,), (1, 1)]
dout = [(), (1,), (1, 1), (1,), (), (1,), (1, 1), (1,), (1, 1)]
for dim, (dim1, dim2) in zip(dout, itertools.product(dims, dims)):
b1 = np.zeros(dim1)
b2 = np.zeros(dim2)
res = np.dot(b1, b2)
tgt = np.zeros(dim)
assert_(res.shape == tgt.shape)
assert_almost_equal(res, tgt, decimal=self.N)
def test_vecobject(self):
class Vec(object):
def __init__(self, sequence=None):
if sequence is None:
sequence = []
self.array = np.array(sequence)
def __add__(self, other):
out = Vec()
out.array = self.array + other.array
return out
def __sub__(self, other):
out = Vec()
out.array = self.array - other.array
return out
def __mul__(self, other): # with scalar
out = Vec(self.array.copy())
out.array *= other
return out
def __rmul__(self, other):
return self*other
U_non_cont = np.transpose([[1., 1.], [1., 2.]])
U_cont = np.ascontiguousarray(U_non_cont)
x = np.array([Vec([1., 0.]), Vec([0., 1.])])
zeros = np.array([Vec([0., 0.]), Vec([0., 0.])])
zeros_test = np.dot(U_cont, x) - np.dot(U_non_cont, x)
assert_equal(zeros[0].array, zeros_test[0].array)
assert_equal(zeros[1].array, zeros_test[1].array)
def test_dot_2args(self):
from numpy.core.multiarray import dot
a = np.array([[1, 2], [3, 4]], dtype=float)
b = np.array([[1, 0], [1, 1]], dtype=float)
c = np.array([[3, 2], [7, 4]], dtype=float)
d = dot(a, b)
assert_allclose(c, d)
def test_dot_3args(self):
from numpy.core.multiarray import dot
np.random.seed(22)
f = np.random.random_sample((1024, 16))
v = np.random.random_sample((16, 32))
r = np.empty((1024, 32))
for i in range(12):
dot(f, v, r)
if HAS_REFCOUNT:
assert_equal(sys.getrefcount(r), 2)
r2 = dot(f, v, out=None)
assert_array_equal(r2, r)
assert_(r is dot(f, v, out=r))
v = v[:, 0].copy() # v.shape == (16,)
r = r[:, 0].copy() # r.shape == (1024,)
r2 = dot(f, v)
assert_(r is dot(f, v, r))
assert_array_equal(r2, r)
def test_dot_3args_errors(self):
from numpy.core.multiarray import dot
np.random.seed(22)
f = np.random.random_sample((1024, 16))
v = np.random.random_sample((16, 32))
r = np.empty((1024, 31))
assert_raises(ValueError, dot, f, v, r)
r = np.empty((1024,))
assert_raises(ValueError, dot, f, v, r)
r = np.empty((32,))
assert_raises(ValueError, dot, f, v, r)
r = np.empty((32, 1024))
assert_raises(ValueError, dot, f, v, r)
assert_raises(ValueError, dot, f, v, r.T)
r = np.empty((1024, 64))
assert_raises(ValueError, dot, f, v, r[:, ::2])
assert_raises(ValueError, dot, f, v, r[:, :32])
r = np.empty((1024, 32), dtype=np.float32)
assert_raises(ValueError, dot, f, v, r)
r = np.empty((1024, 32), dtype=int)
assert_raises(ValueError, dot, f, v, r)
def test_dot_array_order(self):
a = np.array([[1, 2], [3, 4]], order='C')
b = np.array([[1, 2], [3, 4]], order='F')
res = np.dot(a, a)
# integer arrays are exact
assert_equal(np.dot(a, b), res)
assert_equal(np.dot(b, a), res)
assert_equal(np.dot(b, b), res)
def test_accelerate_framework_sgemv_fix(self):
def aligned_array(shape, align, dtype, order='C'):
d = dtype(0)
N = np.prod(shape)
tmp = np.zeros(N * d.nbytes + align, dtype=np.uint8)
address = tmp.__array_interface__["data"][0]
for offset in range(align):
if (address + offset) % align == 0:
break
tmp = tmp[offset:offset+N*d.nbytes].view(dtype=dtype)
return tmp.reshape(shape, order=order)
def as_aligned(arr, align, dtype, order='C'):
aligned = aligned_array(arr.shape, align, dtype, order)
aligned[:] = arr[:]
return aligned
def assert_dot_close(A, X, desired):
assert_allclose(np.dot(A, X), desired, rtol=1e-5, atol=1e-7)
m = aligned_array(100, 15, np.float32)
s = aligned_array((100, 100), 15, np.float32)
np.dot(s, m) # this will always segfault if the bug is present
testdata = itertools.product((15,32), (10000,), (200,89), ('C','F'))
for align, m, n, a_order in testdata:
# Calculation in double precision
A_d = np.random.rand(m, n)
X_d = np.random.rand(n)
desired = np.dot(A_d, X_d)
# Calculation with aligned single precision
A_f = as_aligned(A_d, align, np.float32, order=a_order)
X_f = as_aligned(X_d, align, np.float32)
assert_dot_close(A_f, X_f, desired)
# Strided A rows
A_d_2 = A_d[::2]
desired = np.dot(A_d_2, X_d)
A_f_2 = A_f[::2]
assert_dot_close(A_f_2, X_f, desired)
# Strided A columns, strided X vector
A_d_22 = A_d_2[:, ::2]
X_d_2 = X_d[::2]
desired = np.dot(A_d_22, X_d_2)
A_f_22 = A_f_2[:, ::2]
X_f_2 = X_f[::2]
assert_dot_close(A_f_22, X_f_2, desired)
# Check the strides are as expected
if a_order == 'F':
assert_equal(A_f_22.strides, (8, 8 * m))
else:
assert_equal(A_f_22.strides, (8 * n, 8))
assert_equal(X_f_2.strides, (8,))
# Strides in A rows + cols only
X_f_2c = as_aligned(X_f_2, align, np.float32)
assert_dot_close(A_f_22, X_f_2c, desired)
# Strides just in A cols
A_d_12 = A_d[:, ::2]
desired = np.dot(A_d_12, X_d_2)
A_f_12 = A_f[:, ::2]
assert_dot_close(A_f_12, X_f_2c, desired)
# Strides in A cols and X
assert_dot_close(A_f_12, X_f_2, desired)
class MatmulCommon(object):
"""Common tests for '@' operator and numpy.matmul.
"""
# Should work with these types. Will want to add
# "O" at some point
types = "?bhilqBHILQefdgFDG"
def test_exceptions(self):
dims = [
((1,), (2,)), # mismatched vector vector
((2, 1,), (2,)), # mismatched matrix vector
((2,), (1, 2)), # mismatched vector matrix
((1, 2), (3, 1)), # mismatched matrix matrix
((1,), ()), # vector scalar
((), (1)), # scalar vector
((1, 1), ()), # matrix scalar
((), (1, 1)), # scalar matrix
((2, 2, 1), (3, 1, 2)), # cannot broadcast
]
for dt, (dm1, dm2) in itertools.product(self.types, dims):
a = np.ones(dm1, dtype=dt)
b = np.ones(dm2, dtype=dt)
assert_raises(ValueError, self.matmul, a, b)
def test_shapes(self):
dims = [
((1, 1), (2, 1, 1)), # broadcast first argument
((2, 1, 1), (1, 1)), # broadcast second argument
((2, 1, 1), (2, 1, 1)), # matrix stack sizes match
]
for dt, (dm1, dm2) in itertools.product(self.types, dims):
a = np.ones(dm1, dtype=dt)
b = np.ones(dm2, dtype=dt)
res = self.matmul(a, b)
assert_(res.shape == (2, 1, 1))
# vector vector returns scalars.
for dt in self.types:
a = np.ones((2,), dtype=dt)
b = np.ones((2,), dtype=dt)
c = self.matmul(a, b)
assert_(np.array(c).shape == ())
def test_result_types(self):
mat = np.ones((1,1))
vec = np.ones((1,))
for dt in self.types:
m = mat.astype(dt)
v = vec.astype(dt)
for arg in [(m, v), (v, m), (m, m)]:
res = self.matmul(*arg)
assert_(res.dtype == dt)
# vector vector returns scalars
res = self.matmul(v, v)
assert_(type(res) is np.dtype(dt).type)
def test_scalar_output(self):
vec1 = np.array([2])
vec2 = np.array([3, 4]).reshape(1, -1)
tgt = np.array([6, 8])
for dt in self.types[1:]:
v1 = vec1.astype(dt)
v2 = vec2.astype(dt)
res = self.matmul(v1, v2)
assert_equal(res, tgt)
res = self.matmul(v2.T, v1)
assert_equal(res, tgt)
# boolean type
vec = np.array([True, True], dtype='?').reshape(1, -1)
res = self.matmul(vec[:, 0], vec)
assert_equal(res, True)
def test_vector_vector_values(self):
vec1 = np.array([1, 2])
vec2 = np.array([3, 4]).reshape(-1, 1)
tgt1 = np.array([11])
tgt2 = np.array([[3, 6], [4, 8]])
for dt in self.types[1:]:
v1 = vec1.astype(dt)
v2 = vec2.astype(dt)
res = self.matmul(v1, v2)
assert_equal(res, tgt1)
# no broadcast, we must make v1 into a 2d ndarray
res = self.matmul(v2, v1.reshape(1, -1))
assert_equal(res, tgt2)
# boolean type
vec = np.array([True, True], dtype='?')
res = self.matmul(vec, vec)
assert_equal(res, True)
def test_vector_matrix_values(self):
vec = np.array([1, 2])
mat1 = np.array([[1, 2], [3, 4]])
mat2 = np.stack([mat1]*2, axis=0)
tgt1 = np.array([7, 10])
tgt2 = np.stack([tgt1]*2, axis=0)
for dt in self.types[1:]:
v = vec.astype(dt)
m1 = mat1.astype(dt)
m2 = mat2.astype(dt)
res = self.matmul(v, m1)
assert_equal(res, tgt1)
res = self.matmul(v, m2)
assert_equal(res, tgt2)
# boolean type
vec = np.array([True, False])
mat1 = np.array([[True, False], [False, True]])
mat2 = np.stack([mat1]*2, axis=0)
tgt1 = np.array([True, False])
tgt2 = np.stack([tgt1]*2, axis=0)
res = self.matmul(vec, mat1)
assert_equal(res, tgt1)
res = self.matmul(vec, mat2)
assert_equal(res, tgt2)
def test_matrix_vector_values(self):
vec = np.array([1, 2])
mat1 = np.array([[1, 2], [3, 4]])
mat2 = np.stack([mat1]*2, axis=0)
tgt1 = np.array([5, 11])
tgt2 = np.stack([tgt1]*2, axis=0)
for dt in self.types[1:]:
v = vec.astype(dt)
m1 = mat1.astype(dt)
m2 = mat2.astype(dt)
res = self.matmul(m1, v)
assert_equal(res, tgt1)
res = self.matmul(m2, v)
assert_equal(res, tgt2)
# boolean type
vec = np.array([True, False])
mat1 = np.array([[True, False], [False, True]])
mat2 = np.stack([mat1]*2, axis=0)
tgt1 = np.array([True, False])
tgt2 = np.stack([tgt1]*2, axis=0)
res = self.matmul(vec, mat1)
assert_equal(res, tgt1)
res = self.matmul(vec, mat2)
assert_equal(res, tgt2)
def test_matrix_matrix_values(self):
mat1 = np.array([[1, 2], [3, 4]])
mat2 = np.array([[1, 0], [1, 1]])
mat12 = np.stack([mat1, mat2], axis=0)
mat21 = np.stack([mat2, mat1], axis=0)
tgt11 = np.array([[7, 10], [15, 22]])
tgt12 = np.array([[3, 2], [7, 4]])
tgt21 = np.array([[1, 2], [4, 6]])
tgt12_21 = np.stack([tgt12, tgt21], axis=0)
tgt11_12 = np.stack((tgt11, tgt12), axis=0)
tgt11_21 = np.stack((tgt11, tgt21), axis=0)
for dt in self.types[1:]:
m1 = mat1.astype(dt)
m2 = mat2.astype(dt)
m12 = mat12.astype(dt)
m21 = mat21.astype(dt)
# matrix @ matrix
res = self.matmul(m1, m2)
assert_equal(res, tgt12)
res = self.matmul(m2, m1)
assert_equal(res, tgt21)
# stacked @ matrix
res = self.matmul(m12, m1)
assert_equal(res, tgt11_21)
# matrix @ stacked
res = self.matmul(m1, m12)
assert_equal(res, tgt11_12)
# stacked @ stacked
res = self.matmul(m12, m21)
assert_equal(res, tgt12_21)
# boolean type
m1 = np.array([[1, 1], [0, 0]], dtype=np.bool_)
m2 = np.array([[1, 0], [1, 1]], dtype=np.bool_)
m12 = np.stack([m1, m2], axis=0)
m21 = np.stack([m2, m1], axis=0)
tgt11 = m1
tgt12 = m1
tgt21 = np.array([[1, 1], [1, 1]], dtype=np.bool_)
tgt12_21 = np.stack([tgt12, tgt21], axis=0)
tgt11_12 = np.stack((tgt11, tgt12), axis=0)
tgt11_21 = np.stack((tgt11, tgt21), axis=0)
# matrix @ matrix
res = self.matmul(m1, m2)
assert_equal(res, tgt12)
res = self.matmul(m2, m1)
assert_equal(res, tgt21)
# stacked @ matrix
res = self.matmul(m12, m1)
assert_equal(res, tgt11_21)
# matrix @ stacked
res = self.matmul(m1, m12)
assert_equal(res, tgt11_12)
# stacked @ stacked
res = self.matmul(m12, m21)
assert_equal(res, tgt12_21)
class TestMatmul(MatmulCommon):
matmul = np.matmul
def test_out_arg(self):
a = np.ones((5, 2), dtype=float)
b = np.array([[1, 3], [5, 7]], dtype=float)
tgt = np.dot(a, b)
# test as positional argument
msg = "out positional argument"
out = np.zeros((5, 2), dtype=float)
self.matmul(a, b, out)
assert_array_equal(out, tgt, err_msg=msg)
# test as keyword argument
msg = "out keyword argument"
out = np.zeros((5, 2), dtype=float)
self.matmul(a, b, out=out)
assert_array_equal(out, tgt, err_msg=msg)
# test out with not allowed type cast (safe casting)
msg = "Cannot cast ufunc matmul output"
out = np.zeros((5, 2), dtype=np.int32)
assert_raises_regex(TypeError, msg, self.matmul, a, b, out=out)
# test out with type upcast to complex
out = np.zeros((5, 2), dtype=np.complex128)
c = self.matmul(a, b, out=out)
assert_(c is out)
with suppress_warnings() as sup:
sup.filter(np.ComplexWarning, '')
c = c.astype(tgt.dtype)
assert_array_equal(c, tgt)
def test_out_contiguous(self):
a = np.ones((5, 2), dtype=float)
b = np.array([[1, 3], [5, 7]], dtype=float)
v = np.array([1, 3], dtype=float)
tgt = np.dot(a, b)
tgt_mv = np.dot(a, v)
# test out non-contiguous
out = np.ones((5, 2, 2), dtype=float)
c = self.matmul(a, b, out=out[..., 0])
assert c.base is out
assert_array_equal(c, tgt)
c = self.matmul(a, v, out=out[:, 0, 0])
assert_array_equal(c, tgt_mv)
c = self.matmul(v, a.T, out=out[:, 0, 0])
assert_array_equal(c, tgt_mv)
# test out contiguous in only last dim
out = np.ones((10, 2), dtype=float)
c = self.matmul(a, b, out=out[::2, :])
assert_array_equal(c, tgt)
# test transposes of out, args
out = np.ones((5, 2), dtype=float)
c = self.matmul(b.T, a.T, out=out.T)
assert_array_equal(out, tgt)
m1 = np.arange(15.).reshape(5, 3)
m2 = np.arange(21.).reshape(3, 7)
m3 = np.arange(30.).reshape(5, 6)[:, ::2] # non-contiguous
vc = np.arange(10.)
vr = np.arange(6.)
m0 = np.zeros((3, 0))
@pytest.mark.parametrize('args', (
# matrix-matrix
(m1, m2), (m2.T, m1.T), (m2.T.copy(), m1.T), (m2.T, m1.T.copy()),
# matrix-matrix-transpose, contiguous and non
(m1, m1.T), (m1.T, m1), (m1, m3.T), (m3, m1.T),
(m3, m3.T), (m3.T, m3),
# matrix-matrix non-contiguous
(m3, m2), (m2.T, m3.T), (m2.T.copy(), m3.T),
# vector-matrix, matrix-vector, contiguous
(m1, vr[:3]), (vc[:5], m1), (m1.T, vc[:5]), (vr[:3], m1.T),
# vector-matrix, matrix-vector, vector non-contiguous
(m1, vr[::2]), (vc[::2], m1), (m1.T, vc[::2]), (vr[::2], m1.T),
# vector-matrix, matrix-vector, matrix non-contiguous
(m3, vr[:3]), (vc[:5], m3), (m3.T, vc[:5]), (vr[:3], m3.T),
# vector-matrix, matrix-vector, both non-contiguous
(m3, vr[::2]), (vc[::2], m3), (m3.T, vc[::2]), (vr[::2], m3.T),
# size == 0
(m0, m0.T), (m0.T, m0), (m1, m0), (m0.T, m1.T),
))
def test_dot_equivalent(self, args):
r1 = np.matmul(*args)
r2 = np.dot(*args)
assert_equal(r1, r2)
r3 = np.matmul(args[0].copy(), args[1].copy())
assert_equal(r1, r3)
if sys.version_info[:2] >= (3, 5):
class TestMatmulOperator(MatmulCommon):
import operator
matmul = operator.matmul
def test_array_priority_override(self):
class A(object):
__array_priority__ = 1000
def __matmul__(self, other):
return "A"
def __rmatmul__(self, other):
return "A"
a = A()
b = np.ones(2)
assert_equal(self.matmul(a, b), "A")
assert_equal(self.matmul(b, a), "A")
def test_matmul_raises(self):
assert_raises(TypeError, self.matmul, np.int8(5), np.int8(5))
assert_raises(TypeError, self.matmul, np.void(b'abc'), np.void(b'abc'))
assert_raises(ValueError, self.matmul, np.arange(10), np.void(b'abc'))
def test_matmul_inplace():
# It would be nice to support in-place matmul eventually, but for now
# we don't have a working implementation, so better just to error out
# and nudge people to writing "a = a @ b".
a = np.eye(3)
b = np.eye(3)
assert_raises(TypeError, a.__imatmul__, b)
import operator
assert_raises(TypeError, operator.imatmul, a, b)
# we avoid writing the token `exec` so as not to crash python 2's
# parser
exec_ = getattr(builtins, "exec")
assert_raises(TypeError, exec_, "a @= b", globals(), locals())
def test_matmul_axes():
a = np.arange(3*4*5).reshape(3, 4, 5)
c = np.matmul(a, a, axes=[(-2, -1), (-1, -2), (1, 2)])
assert c.shape == (3, 4, 4)
d = np.matmul(a, a, axes=[(-2, -1), (-1, -2), (0, 1)])
assert d.shape == (4, 4, 3)
e = np.swapaxes(d, 0, 2)
assert_array_equal(e, c)
f = np.matmul(a, np.arange(3), axes=[(1, 0), (0), (0)])
assert f.shape == (4, 5)
class TestInner(object):
def test_inner_type_mismatch(self):
c = 1.
A = np.array((1,1), dtype='i,i')
assert_raises(TypeError, np.inner, c, A)
assert_raises(TypeError, np.inner, A, c)
def test_inner_scalar_and_vector(self):
for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?':
sca = np.array(3, dtype=dt)[()]
vec = np.array([1, 2], dtype=dt)
desired = np.array([3, 6], dtype=dt)
assert_equal(np.inner(vec, sca), desired)
assert_equal(np.inner(sca, vec), desired)
def test_vecself(self):
# Ticket 844.
# Inner product of a vector with itself segfaults or give
# meaningless result
a = np.zeros(shape=(1, 80), dtype=np.float64)
p = np.inner(a, a)
assert_almost_equal(p, 0, decimal=14)
def test_inner_product_with_various_contiguities(self):
# github issue 6532
for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?':
# check an inner product involving a matrix transpose
A = np.array([[1, 2], [3, 4]], dtype=dt)
B = np.array([[1, 3], [2, 4]], dtype=dt)
C = np.array([1, 1], dtype=dt)
desired = np.array([4, 6], dtype=dt)
assert_equal(np.inner(A.T, C), desired)
assert_equal(np.inner(C, A.T), desired)
assert_equal(np.inner(B, C), desired)
assert_equal(np.inner(C, B), desired)
# check a matrix product
desired = np.array([[7, 10], [15, 22]], dtype=dt)
assert_equal(np.inner(A, B), desired)
# check the syrk vs. gemm paths
desired = np.array([[5, 11], [11, 25]], dtype=dt)
assert_equal(np.inner(A, A), desired)
assert_equal(np.inner(A, A.copy()), desired)
# check an inner product involving an aliased and reversed view
a = np.arange(5).astype(dt)
b = a[::-1]
desired = np.array(10, dtype=dt).item()
assert_equal(np.inner(b, a), desired)
def test_3d_tensor(self):
for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?':
a = np.arange(24).reshape(2,3,4).astype(dt)
b = np.arange(24, 48).reshape(2,3,4).astype(dt)
desired = np.array(
[[[[ 158, 182, 206],
[ 230, 254, 278]],
[[ 566, 654, 742],
[ 830, 918, 1006]],
[[ 974, 1126, 1278],
[1430, 1582, 1734]]],
[[[1382, 1598, 1814],
[2030, 2246, 2462]],
[[1790, 2070, 2350],
[2630, 2910, 3190]],
[[2198, 2542, 2886],
[3230, 3574, 3918]]]],
dtype=dt
)
assert_equal(np.inner(a, b), desired)
assert_equal(np.inner(b, a).transpose(2,3,0,1), desired)
class TestAlen(object):
def test_basic(self):
m = np.array([1, 2, 3])
assert_equal(np.alen(m), 3)
m = np.array([[1, 2, 3], [4, 5, 7]])
assert_equal(np.alen(m), 2)
m = [1, 2, 3]
assert_equal(np.alen(m), 3)
m = [[1, 2, 3], [4, 5, 7]]
assert_equal(np.alen(m), 2)
def test_singleton(self):
assert_equal(np.alen(5), 1)
class TestChoose(object):
def setup(self):
self.x = 2*np.ones((3,), dtype=int)
self.y = 3*np.ones((3,), dtype=int)
self.x2 = 2*np.ones((2, 3), dtype=int)
self.y2 = 3*np.ones((2, 3), dtype=int)
self.ind = [0, 0, 1]
def test_basic(self):
A = np.choose(self.ind, (self.x, self.y))
assert_equal(A, [2, 2, 3])
def test_broadcast1(self):
A = np.choose(self.ind, (self.x2, self.y2))
assert_equal(A, [[2, 2, 3], [2, 2, 3]])
def test_broadcast2(self):
A = np.choose(self.ind, (self.x, self.y2))
assert_equal(A, [[2, 2, 3], [2, 2, 3]])
class TestRepeat(object):
def setup(self):
self.m = np.array([1, 2, 3, 4, 5, 6])
self.m_rect = self.m.reshape((2, 3))
def test_basic(self):
A = np.repeat(self.m, [1, 3, 2, 1, 1, 2])
assert_equal(A, [1, 2, 2, 2, 3,
3, 4, 5, 6, 6])
def test_broadcast1(self):
A = np.repeat(self.m, 2)
assert_equal(A, [1, 1, 2, 2, 3, 3,
4, 4, 5, 5, 6, 6])
def test_axis_spec(self):
A = np.repeat(self.m_rect, [2, 1], axis=0)
assert_equal(A, [[1, 2, 3],
[1, 2, 3],
[4, 5, 6]])
A = np.repeat(self.m_rect, [1, 3, 2], axis=1)
assert_equal(A, [[1, 2, 2, 2, 3, 3],
[4, 5, 5, 5, 6, 6]])
def test_broadcast2(self):
A = np.repeat(self.m_rect, 2, axis=0)
assert_equal(A, [[1, 2, 3],
[1, 2, 3],
[4, 5, 6],
[4, 5, 6]])
A = np.repeat(self.m_rect, 2, axis=1)
assert_equal(A, [[1, 1, 2, 2, 3, 3],
[4, 4, 5, 5, 6, 6]])
# TODO: test for multidimensional
NEIGH_MODE = {'zero': 0, 'one': 1, 'constant': 2, 'circular': 3, 'mirror': 4}
@pytest.mark.parametrize('dt', [float, Decimal], ids=['float', 'object'])
class TestNeighborhoodIter(object):
# Simple, 2d tests
def test_simple2d(self, dt):
# Test zero and one padding for simple data type
x = np.array([[0, 1], [2, 3]], dtype=dt)
r = [np.array([[0, 0, 0], [0, 0, 1]], dtype=dt),
np.array([[0, 0, 0], [0, 1, 0]], dtype=dt),
np.array([[0, 0, 1], [0, 2, 3]], dtype=dt),
np.array([[0, 1, 0], [2, 3, 0]], dtype=dt)]
l = _multiarray_tests.test_neighborhood_iterator(
x, [-1, 0, -1, 1], x[0], NEIGH_MODE['zero'])
assert_array_equal(l, r)
r = [np.array([[1, 1, 1], [1, 0, 1]], dtype=dt),
np.array([[1, 1, 1], [0, 1, 1]], dtype=dt),
np.array([[1, 0, 1], [1, 2, 3]], dtype=dt),
np.array([[0, 1, 1], [2, 3, 1]], dtype=dt)]
l = _multiarray_tests.test_neighborhood_iterator(
x, [-1, 0, -1, 1], x[0], NEIGH_MODE['one'])
assert_array_equal(l, r)
r = [np.array([[4, 4, 4], [4, 0, 1]], dtype=dt),
np.array([[4, 4, 4], [0, 1, 4]], dtype=dt),
np.array([[4, 0, 1], [4, 2, 3]], dtype=dt),
np.array([[0, 1, 4], [2, 3, 4]], dtype=dt)]
l = _multiarray_tests.test_neighborhood_iterator(
x, [-1, 0, -1, 1], 4, NEIGH_MODE['constant'])
assert_array_equal(l, r)
def test_mirror2d(self, dt):
x = np.array([[0, 1], [2, 3]], dtype=dt)
r = [np.array([[0, 0, 1], [0, 0, 1]], dtype=dt),
np.array([[0, 1, 1], [0, 1, 1]], dtype=dt),
np.array([[0, 0, 1], [2, 2, 3]], dtype=dt),
np.array([[0, 1, 1], [2, 3, 3]], dtype=dt)]
l = _multiarray_tests.test_neighborhood_iterator(
x, [-1, 0, -1, 1], x[0], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# Simple, 1d tests
def test_simple(self, dt):
# Test padding with constant values
x = np.linspace(1, 5, 5).astype(dt)
r = [[0, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 0]]
l = _multiarray_tests.test_neighborhood_iterator(
x, [-1, 1], x[0], NEIGH_MODE['zero'])
assert_array_equal(l, r)
r = [[1, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 1]]
l = _multiarray_tests.test_neighborhood_iterator(
x, [-1, 1], x[0], NEIGH_MODE['one'])
assert_array_equal(l, r)
r = [[x[4], 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, x[4]]]
l = _multiarray_tests.test_neighborhood_iterator(
x, [-1, 1], x[4], NEIGH_MODE['constant'])
assert_array_equal(l, r)
# Test mirror modes
def test_mirror(self, dt):
x = np.linspace(1, 5, 5).astype(dt)
r = np.array([[2, 1, 1, 2, 3], [1, 1, 2, 3, 4], [1, 2, 3, 4, 5],
[2, 3, 4, 5, 5], [3, 4, 5, 5, 4]], dtype=dt)
l = _multiarray_tests.test_neighborhood_iterator(
x, [-2, 2], x[1], NEIGH_MODE['mirror'])
assert_([i.dtype == dt for i in l])
assert_array_equal(l, r)
# Circular mode
def test_circular(self, dt):
x = np.linspace(1, 5, 5).astype(dt)
r = np.array([[4, 5, 1, 2, 3], [5, 1, 2, 3, 4], [1, 2, 3, 4, 5],
[2, 3, 4, 5, 1], [3, 4, 5, 1, 2]], dtype=dt)
l = _multiarray_tests.test_neighborhood_iterator(
x, [-2, 2], x[0], NEIGH_MODE['circular'])
assert_array_equal(l, r)
# Test stacking neighborhood iterators
class TestStackedNeighborhoodIter(object):
# Simple, 1d test: stacking 2 constant-padded neigh iterators
def test_simple_const(self):
dt = np.float64
# Test zero and one padding for simple data type
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0], dtype=dt),
np.array([0], dtype=dt),
np.array([1], dtype=dt),
np.array([2], dtype=dt),
np.array([3], dtype=dt),
np.array([0], dtype=dt),
np.array([0], dtype=dt)]
l = _multiarray_tests.test_neighborhood_iterator_oob(
x, [-2, 4], NEIGH_MODE['zero'], [0, 0], NEIGH_MODE['zero'])
assert_array_equal(l, r)
r = [np.array([1, 0, 1], dtype=dt),
np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt),
np.array([3, 0, 1], dtype=dt)]
l = _multiarray_tests.test_neighborhood_iterator_oob(
x, [-1, 3], NEIGH_MODE['zero'], [-1, 1], NEIGH_MODE['one'])
assert_array_equal(l, r)
# 2nd simple, 1d test: stacking 2 neigh iterators, mixing const padding and
# mirror padding
def test_simple_mirror(self):
dt = np.float64
# Stacking zero on top of mirror
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 1, 1], dtype=dt),
np.array([1, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 3], dtype=dt),
np.array([3, 3, 0], dtype=dt)]
l = _multiarray_tests.test_neighborhood_iterator_oob(
x, [-1, 3], NEIGH_MODE['mirror'], [-1, 1], NEIGH_MODE['zero'])
assert_array_equal(l, r)
# Stacking mirror on top of zero
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 0, 0], dtype=dt),
np.array([0, 0, 1], dtype=dt),
np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt)]
l = _multiarray_tests.test_neighborhood_iterator_oob(
x, [-1, 3], NEIGH_MODE['zero'], [-2, 0], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 2nd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt),
np.array([3, 0, 0], dtype=dt),
np.array([0, 0, 3], dtype=dt)]
l = _multiarray_tests.test_neighborhood_iterator_oob(
x, [-1, 3], NEIGH_MODE['zero'], [0, 2], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 3rd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 0, 0, 1, 2], dtype=dt),
np.array([0, 0, 1, 2, 3], dtype=dt),
np.array([0, 1, 2, 3, 0], dtype=dt),
np.array([1, 2, 3, 0, 0], dtype=dt),
np.array([2, 3, 0, 0, 3], dtype=dt)]
l = _multiarray_tests.test_neighborhood_iterator_oob(
x, [-1, 3], NEIGH_MODE['zero'], [-2, 2], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# 3rd simple, 1d test: stacking 2 neigh iterators, mixing const padding and
# circular padding
def test_simple_circular(self):
dt = np.float64
# Stacking zero on top of mirror
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 3, 1], dtype=dt),
np.array([3, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 1], dtype=dt),
np.array([3, 1, 0], dtype=dt)]
l = _multiarray_tests.test_neighborhood_iterator_oob(
x, [-1, 3], NEIGH_MODE['circular'], [-1, 1], NEIGH_MODE['zero'])
assert_array_equal(l, r)
# Stacking mirror on top of zero
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([3, 0, 0], dtype=dt),
np.array([0, 0, 1], dtype=dt),
np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt)]
l = _multiarray_tests.test_neighborhood_iterator_oob(
x, [-1, 3], NEIGH_MODE['zero'], [-2, 0], NEIGH_MODE['circular'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 2nd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt),
np.array([3, 0, 0], dtype=dt),
np.array([0, 0, 1], dtype=dt)]
l = _multiarray_tests.test_neighborhood_iterator_oob(
x, [-1, 3], NEIGH_MODE['zero'], [0, 2], NEIGH_MODE['circular'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 3rd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([3, 0, 0, 1, 2], dtype=dt),
np.array([0, 0, 1, 2, 3], dtype=dt),
np.array([0, 1, 2, 3, 0], dtype=dt),
np.array([1, 2, 3, 0, 0], dtype=dt),
np.array([2, 3, 0, 0, 1], dtype=dt)]
l = _multiarray_tests.test_neighborhood_iterator_oob(
x, [-1, 3], NEIGH_MODE['zero'], [-2, 2], NEIGH_MODE['circular'])
assert_array_equal(l, r)
# 4th simple, 1d test: stacking 2 neigh iterators, but with lower iterator
# being strictly within the array
def test_simple_strict_within(self):
dt = np.float64
# Stacking zero on top of zero, first neighborhood strictly inside the
# array
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 2, 3, 0], dtype=dt)]
l = _multiarray_tests.test_neighborhood_iterator_oob(
x, [1, 1], NEIGH_MODE['zero'], [-1, 2], NEIGH_MODE['zero'])
assert_array_equal(l, r)
# Stacking mirror on top of zero, first neighborhood strictly inside the
# array
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 2, 3, 3], dtype=dt)]
l = _multiarray_tests.test_neighborhood_iterator_oob(
x, [1, 1], NEIGH_MODE['zero'], [-1, 2], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# Stacking mirror on top of zero, first neighborhood strictly inside the
# array
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 2, 3, 1], dtype=dt)]
l = _multiarray_tests.test_neighborhood_iterator_oob(
x, [1, 1], NEIGH_MODE['zero'], [-1, 2], NEIGH_MODE['circular'])
assert_array_equal(l, r)
class TestWarnings(object):
def test_complex_warning(self):
x = np.array([1, 2])
y = np.array([1-2j, 1+2j])
with warnings.catch_warnings():
warnings.simplefilter("error", np.ComplexWarning)
assert_raises(np.ComplexWarning, x.__setitem__, slice(None), y)
assert_equal(x, [1, 2])
class TestMinScalarType(object):
def test_usigned_shortshort(self):
dt = np.min_scalar_type(2**8-1)
wanted = np.dtype('uint8')
assert_equal(wanted, dt)
def test_usigned_short(self):
dt = np.min_scalar_type(2**16-1)
wanted = np.dtype('uint16')
assert_equal(wanted, dt)
def test_usigned_int(self):
dt = np.min_scalar_type(2**32-1)
wanted = np.dtype('uint32')
assert_equal(wanted, dt)
def test_usigned_longlong(self):
dt = np.min_scalar_type(2**63-1)
wanted = np.dtype('uint64')
assert_equal(wanted, dt)
def test_object(self):
dt = np.min_scalar_type(2**64)
wanted = np.dtype('O')
assert_equal(wanted, dt)
from numpy.core._internal import _dtype_from_pep3118
class TestPEP3118Dtype(object):
def _check(self, spec, wanted):
dt = np.dtype(wanted)
actual = _dtype_from_pep3118(spec)
assert_equal(actual, dt,
err_msg="spec %r != dtype %r" % (spec, wanted))
def test_native_padding(self):
align = np.dtype('i').alignment
for j in range(8):
if j == 0:
s = 'bi'
else:
s = 'b%dxi' % j
self._check('@'+s, {'f0': ('i1', 0),
'f1': ('i', align*(1 + j//align))})
self._check('='+s, {'f0': ('i1', 0),
'f1': ('i', 1+j)})
def test_native_padding_2(self):
# Native padding should work also for structs and sub-arrays
self._check('x3T{xi}', {'f0': (({'f0': ('i', 4)}, (3,)), 4)})
self._check('^x3T{xi}', {'f0': (({'f0': ('i', 1)}, (3,)), 1)})
def test_trailing_padding(self):
# Trailing padding should be included, *and*, the item size
# should match the alignment if in aligned mode
align = np.dtype('i').alignment
size = np.dtype('i').itemsize
def aligned(n):
return align*(1 + (n-1)//align)
base = dict(formats=['i'], names=['f0'])
self._check('ix', dict(itemsize=aligned(size + 1), **base))
self._check('ixx', dict(itemsize=aligned(size + 2), **base))
self._check('ixxx', dict(itemsize=aligned(size + 3), **base))
self._check('ixxxx', dict(itemsize=aligned(size + 4), **base))
self._check('i7x', dict(itemsize=aligned(size + 7), **base))
self._check('^ix', dict(itemsize=size + 1, **base))
self._check('^ixx', dict(itemsize=size + 2, **base))
self._check('^ixxx', dict(itemsize=size + 3, **base))
self._check('^ixxxx', dict(itemsize=size + 4, **base))
self._check('^i7x', dict(itemsize=size + 7, **base))
def test_native_padding_3(self):
dt = np.dtype(
[('a', 'b'), ('b', 'i'),
('sub', np.dtype('b,i')), ('c', 'i')],
align=True)
self._check("T{b:a:xxxi:b:T{b:f0:=i:f1:}:sub:xxxi:c:}", dt)
dt = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b'), ('d', 'b'),
('e', 'b'), ('sub', np.dtype('b,i', align=True))])
self._check("T{b:a:=i:b:b:c:b:d:b:e:T{b:f0:xxxi:f1:}:sub:}", dt)
def test_padding_with_array_inside_struct(self):
dt = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b', (3,)),
('d', 'i')],
align=True)
self._check("T{b:a:xxxi:b:3b:c:xi:d:}", dt)
def test_byteorder_inside_struct(self):
# The byte order after @T{=i} should be '=', not '@'.
# Check this by noting the absence of native alignment.
self._check('@T{^i}xi', {'f0': ({'f0': ('i', 0)}, 0),
'f1': ('i', 5)})
def test_intra_padding(self):
# Natively aligned sub-arrays may require some internal padding
align = np.dtype('i').alignment
size = np.dtype('i').itemsize
def aligned(n):
return (align*(1 + (n-1)//align))
self._check('(3)T{ix}', (dict(
names=['f0'],
formats=['i'],
offsets=[0],
itemsize=aligned(size + 1)
), (3,)))
def test_char_vs_string(self):
dt = np.dtype('c')
self._check('c', dt)
dt = np.dtype([('f0', 'S1', (4,)), ('f1', 'S4')])
self._check('4c4s', dt)
def test_field_order(self):
# gh-9053 - previously, we relied on dictionary key order
self._check("(0)I:a:f:b:", [('a', 'I', (0,)), ('b', 'f')])
self._check("(0)I:b:f:a:", [('b', 'I', (0,)), ('a', 'f')])
def test_unnamed_fields(self):
self._check('ii', [('f0', 'i'), ('f1', 'i')])
self._check('ii:f0:', [('f1', 'i'), ('f0', 'i')])
self._check('i', 'i')
self._check('i:f0:', [('f0', 'i')])
class TestNewBufferProtocol(object):
""" Test PEP3118 buffers """
def _check_roundtrip(self, obj):
obj = np.asarray(obj)
x = memoryview(obj)
y = np.asarray(x)
y2 = np.array(x)
assert_(not y.flags.owndata)
assert_(y2.flags.owndata)
assert_equal(y.dtype, obj.dtype)
assert_equal(y.shape, obj.shape)
assert_array_equal(obj, y)
assert_equal(y2.dtype, obj.dtype)
assert_equal(y2.shape, obj.shape)
assert_array_equal(obj, y2)
def test_roundtrip(self):
x = np.array([1, 2, 3, 4, 5], dtype='i4')
self._check_roundtrip(x)
x = np.array([[1, 2], [3, 4]], dtype=np.float64)
self._check_roundtrip(x)
x = np.zeros((3, 3, 3), dtype=np.float32)[:, 0,:]
self._check_roundtrip(x)
dt = [('a', 'b'),
('b', 'h'),
('c', 'i'),
('d', 'l'),
('dx', 'q'),
('e', 'B'),
('f', 'H'),
('g', 'I'),
('h', 'L'),
('hx', 'Q'),
('i', np.single),
('j', np.double),
('k', np.longdouble),
('ix', np.csingle),
('jx', np.cdouble),
('kx', np.clongdouble),
('l', 'S4'),
('m', 'U4'),
('n', 'V3'),
('o', '?'),
('p', np.half),
]
x = np.array(
[(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
b'aaaa', 'bbbb', b'xxx', True, 1.0)],
dtype=dt)
self._check_roundtrip(x)
x = np.array(([[1, 2], [3, 4]],), dtype=[('a', (int, (2, 2)))])
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='>i2')
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='<i2')
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='>i4')
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='<i4')
self._check_roundtrip(x)
# check long long can be represented as non-native
x = np.array([1, 2, 3], dtype='>q')
self._check_roundtrip(x)
# Native-only data types can be passed through the buffer interface
# only in native byte order
if sys.byteorder == 'little':
x = np.array([1, 2, 3], dtype='>g')
assert_raises(ValueError, self._check_roundtrip, x)
x = np.array([1, 2, 3], dtype='<g')
self._check_roundtrip(x)
else:
x = np.array([1, 2, 3], dtype='>g')
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='<g')
assert_raises(ValueError, self._check_roundtrip, x)
def test_roundtrip_half(self):
half_list = [
1.0,
-2.0,
6.5504 * 10**4, # (max half precision)
2**-14, # ~= 6.10352 * 10**-5 (minimum positive normal)
2**-24, # ~= 5.96046 * 10**-8 (minimum strictly positive subnormal)
0.0,
-0.0,
float('+inf'),
float('-inf'),
0.333251953125, # ~= 1/3
]
x = np.array(half_list, dtype='>e')
self._check_roundtrip(x)
x = np.array(half_list, dtype='<e')
self._check_roundtrip(x)
def test_roundtrip_single_types(self):
for typ in np.typeDict.values():
dtype = np.dtype(typ)
if dtype.char in 'Mm':
# datetimes cannot be used in buffers
continue
if dtype.char == 'V':
# skip void
continue
x = np.zeros(4, dtype=dtype)
self._check_roundtrip(x)
if dtype.char not in 'qQgG':
dt = dtype.newbyteorder('<')
x = np.zeros(4, dtype=dt)
self._check_roundtrip(x)
dt = dtype.newbyteorder('>')
x = np.zeros(4, dtype=dt)
self._check_roundtrip(x)
def test_roundtrip_scalar(self):
# Issue #4015.
self._check_roundtrip(0)
def test_invalid_buffer_format(self):
# datetime64 cannot be used fully in a buffer yet
# Should be fixed in the next Numpy major release
dt = np.dtype([('a', 'uint16'), ('b', 'M8[s]')])
a = np.empty(3, dt)
assert_raises((ValueError, BufferError), memoryview, a)
assert_raises((ValueError, BufferError), memoryview, np.array((3), 'M8[D]'))
def test_export_simple_1d(self):
x = np.array([1, 2, 3, 4, 5], dtype='i')
y = memoryview(x)
assert_equal(y.format, 'i')
assert_equal(y.shape, (5,))
assert_equal(y.ndim, 1)
assert_equal(y.strides, (4,))
assert_equal(y.suboffsets, EMPTY)
assert_equal(y.itemsize, 4)
def test_export_simple_nd(self):
x = np.array([[1, 2], [3, 4]], dtype=np.float64)
y = memoryview(x)
assert_equal(y.format, 'd')
assert_equal(y.shape, (2, 2))
assert_equal(y.ndim, 2)
assert_equal(y.strides, (16, 8))
assert_equal(y.suboffsets, EMPTY)
assert_equal(y.itemsize, 8)
def test_export_discontiguous(self):
x = np.zeros((3, 3, 3), dtype=np.float32)[:, 0,:]
y = memoryview(x)
assert_equal(y.format, 'f')
assert_equal(y.shape, (3, 3))
assert_equal(y.ndim, 2)
assert_equal(y.strides, (36, 4))
assert_equal(y.suboffsets, EMPTY)
assert_equal(y.itemsize, 4)
def test_export_record(self):
dt = [('a', 'b'),
('b', 'h'),
('c', 'i'),
('d', 'l'),
('dx', 'q'),
('e', 'B'),
('f', 'H'),
('g', 'I'),
('h', 'L'),
('hx', 'Q'),
('i', np.single),
('j', np.double),
('k', np.longdouble),
('ix', np.csingle),
('jx', np.cdouble),
('kx', np.clongdouble),
('l', 'S4'),
('m', 'U4'),
('n', 'V3'),
('o', '?'),
('p', np.half),
]
x = np.array(
[(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
b'aaaa', 'bbbb', b' ', True, 1.0)],
dtype=dt)
y = memoryview(x)
assert_equal(y.shape, (1,))
assert_equal(y.ndim, 1)
assert_equal(y.suboffsets, EMPTY)
sz = sum([np.dtype(b).itemsize for a, b in dt])
if np.dtype('l').itemsize == 4:
assert_equal(y.format, 'T{b:a:=h:b:i:c:l:d:q:dx:B:e:@H:f:=I:g:L:h:Q:hx:f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}')
else:
assert_equal(y.format, 'T{b:a:=h:b:i:c:q:d:q:dx:B:e:@H:f:=I:g:Q:h:Q:hx:f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}')
# Cannot test if NPY_RELAXED_STRIDES_CHECKING changes the strides
if not (np.ones(1).strides[0] == np.iinfo(np.intp).max):
assert_equal(y.strides, (sz,))
assert_equal(y.itemsize, sz)
def test_export_subarray(self):
x = np.array(([[1, 2], [3, 4]],), dtype=[('a', ('i', (2, 2)))])
y = memoryview(x)
assert_equal(y.format, 'T{(2,2)i:a:}')
assert_equal(y.shape, EMPTY)
assert_equal(y.ndim, 0)
assert_equal(y.strides, EMPTY)
assert_equal(y.suboffsets, EMPTY)
assert_equal(y.itemsize, 16)
def test_export_endian(self):
x = np.array([1, 2, 3], dtype='>i')
y = memoryview(x)
if sys.byteorder == 'little':
assert_equal(y.format, '>i')
else:
assert_equal(y.format, 'i')
x = np.array([1, 2, 3], dtype='<i')
y = memoryview(x)
if sys.byteorder == 'little':
assert_equal(y.format, 'i')
else:
assert_equal(y.format, '<i')
def test_export_flags(self):
# Check SIMPLE flag, see also gh-3613 (exception should be BufferError)
assert_raises(ValueError,
_multiarray_tests.get_buffer_info,
np.arange(5)[::2], ('SIMPLE',))
def test_padding(self):
for j in range(8):
x = np.array([(1,), (2,)], dtype={'f0': (int, j)})
self._check_roundtrip(x)
def test_reference_leak(self):
if HAS_REFCOUNT:
count_1 = sys.getrefcount(np.core._internal)
a = np.zeros(4)
b = memoryview(a)
c = np.asarray(b)
if HAS_REFCOUNT:
count_2 = sys.getrefcount(np.core._internal)
assert_equal(count_1, count_2)
del c # avoid pyflakes unused variable warning.
def test_padded_struct_array(self):
dt1 = np.dtype(
[('a', 'b'), ('b', 'i'), ('sub', np.dtype('b,i')), ('c', 'i')],
align=True)
x1 = np.arange(dt1.itemsize, dtype=np.int8).view(dt1)
self._check_roundtrip(x1)
dt2 = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b', (3,)), ('d', 'i')],
align=True)
x2 = np.arange(dt2.itemsize, dtype=np.int8).view(dt2)
self._check_roundtrip(x2)
dt3 = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b'), ('d', 'b'),
('e', 'b'), ('sub', np.dtype('b,i', align=True))])
x3 = np.arange(dt3.itemsize, dtype=np.int8).view(dt3)
self._check_roundtrip(x3)
def test_relaxed_strides(self):
# Test that relaxed strides are converted to non-relaxed
c = np.ones((1, 10, 10), dtype='i8')
# Check for NPY_RELAXED_STRIDES_CHECKING:
if np.ones((10, 1), order="C").flags.f_contiguous:
c.strides = (-1, 80, 8)
assert_(memoryview(c).strides == (800, 80, 8))
# Writing C-contiguous data to a BytesIO buffer should work
fd = io.BytesIO()
fd.write(c.data)
fortran = c.T
assert_(memoryview(fortran).strides == (8, 80, 800))
arr = np.ones((1, 10))
if arr.flags.f_contiguous:
shape, strides = _multiarray_tests.get_buffer_info(
arr, ['F_CONTIGUOUS'])
assert_(strides[0] == 8)
arr = np.ones((10, 1), order='F')
shape, strides = _multiarray_tests.get_buffer_info(
arr, ['C_CONTIGUOUS'])
assert_(strides[-1] == 8)
def test_out_of_order_fields(self):
dt = np.dtype(dict(
formats=['<i4', '<i4'],
names=['one', 'two'],
offsets=[4, 0],
itemsize=8
))
# overlapping fields cannot be represented by PEP3118
arr = np.empty(1, dt)
with assert_raises(ValueError):
memoryview(arr)
def test_max_dims(self):
a = np.empty((1,) * 32)
self._check_roundtrip(a)
@pytest.mark.skipif(sys.version_info < (2, 7, 7), reason="See gh-11115")
def test_error_too_many_dims(self):
def make_ctype(shape, scalar_type):
t = scalar_type
for dim in shape[::-1]:
t = dim * t
return t
# construct a memoryview with 33 dimensions
c_u8_33d = make_ctype((1,)*33, ctypes.c_uint8)
m = memoryview(c_u8_33d())
assert_equal(m.ndim, 33)
assert_raises_regex(
RuntimeError, "ndim",
np.array, m)
def test_error_pointer_type(self):
# gh-6741
m = memoryview(ctypes.pointer(ctypes.c_uint8()))
assert_('&' in m.format)
assert_raises_regex(
ValueError, "format string",
np.array, m)
def test_error_message_unsupported(self):
# wchar has no corresponding numpy type - if this changes in future, we
# need a better way to construct an invalid memoryview format.
t = ctypes.c_wchar * 4
with assert_raises(ValueError) as cm:
np.array(t())
exc = cm.exception
if sys.version_info.major > 2:
with assert_raises_regex(
NotImplementedError,
r"Unrepresentable .* 'u' \(UCS-2 strings\)"
):
raise exc.__cause__
def test_ctypes_integer_via_memoryview(self):
# gh-11150, due to bpo-10746
for c_integer in {ctypes.c_int, ctypes.c_long, ctypes.c_longlong}:
value = c_integer(42)
with warnings.catch_warnings(record=True):
warnings.filterwarnings('always', r'.*\bctypes\b', RuntimeWarning)
np.asarray(value)
def test_ctypes_struct_via_memoryview(self):
# gh-10528
class foo(ctypes.Structure):
_fields_ = [('a', ctypes.c_uint8), ('b', ctypes.c_uint32)]
f = foo(a=1, b=2)
with warnings.catch_warnings(record=True):
warnings.filterwarnings('always', r'.*\bctypes\b', RuntimeWarning)
arr = np.asarray(f)
assert_equal(arr['a'], 1)
assert_equal(arr['b'], 2)
f.a = 3
assert_equal(arr['a'], 3)
class TestArrayAttributeDeletion(object):
def test_multiarray_writable_attributes_deletion(self):
# ticket #2046, should not seqfault, raise AttributeError
a = np.ones(2)
attr = ['shape', 'strides', 'data', 'dtype', 'real', 'imag', 'flat']
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, "Assigning the 'data' attribute")
for s in attr:
assert_raises(AttributeError, delattr, a, s)
def test_multiarray_not_writable_attributes_deletion(self):
a = np.ones(2)
attr = ["ndim", "flags", "itemsize", "size", "nbytes", "base",
"ctypes", "T", "__array_interface__", "__array_struct__",
"__array_priority__", "__array_finalize__"]
for s in attr:
assert_raises(AttributeError, delattr, a, s)
def test_multiarray_flags_writable_attribute_deletion(self):
a = np.ones(2).flags
attr = ['writebackifcopy', 'updateifcopy', 'aligned', 'writeable']
for s in attr:
assert_raises(AttributeError, delattr, a, s)
def test_multiarray_flags_not_writable_attribute_deletion(self):
a = np.ones(2).flags
attr = ["contiguous", "c_contiguous", "f_contiguous", "fortran",
"owndata", "fnc", "forc", "behaved", "carray", "farray",
"num"]
for s in attr:
assert_raises(AttributeError, delattr, a, s)
class TestArrayInterface():
class Foo(object):
def __init__(self, value):
self.value = value
self.iface = {'typestr': 'f8'}
def __float__(self):
return float(self.value)
@property
def __array_interface__(self):
return self.iface
f = Foo(0.5)
@pytest.mark.parametrize('val, iface, expected', [
(f, {}, 0.5),
([f], {}, [0.5]),
([f, f], {}, [0.5, 0.5]),
(f, {'shape': ()}, 0.5),
(f, {'shape': None}, TypeError),
(f, {'shape': (1, 1)}, [[0.5]]),
(f, {'shape': (2,)}, ValueError),
(f, {'strides': ()}, 0.5),
(f, {'strides': (2,)}, ValueError),
(f, {'strides': 16}, TypeError),
])
def test_scalar_interface(self, val, iface, expected):
# Test scalar coercion within the array interface
self.f.iface = {'typestr': 'f8'}
self.f.iface.update(iface)
if HAS_REFCOUNT:
pre_cnt = sys.getrefcount(np.dtype('f8'))
if isinstance(expected, type):
assert_raises(expected, np.array, val)
else:
result = np.array(val)
assert_equal(np.array(val), expected)
assert result.dtype == 'f8'
del result
if HAS_REFCOUNT:
post_cnt = sys.getrefcount(np.dtype('f8'))
assert_equal(pre_cnt, post_cnt)
def test_interface_no_shape():
class ArrayLike(object):
array = np.array(1)
__array_interface__ = array.__array_interface__
assert_equal(np.array(ArrayLike()), 1)
def test_array_interface_itemsize():
# See gh-6361
my_dtype = np.dtype({'names': ['A', 'B'], 'formats': ['f4', 'f4'],
'offsets': [0, 8], 'itemsize': 16})
a = np.ones(10, dtype=my_dtype)
descr_t = np.dtype(a.__array_interface__['descr'])
typestr_t = np.dtype(a.__array_interface__['typestr'])
assert_equal(descr_t.itemsize, typestr_t.itemsize)
def test_array_interface_empty_shape():
# See gh-7994
arr = np.array([1, 2, 3])
interface1 = dict(arr.__array_interface__)
interface1['shape'] = ()
class DummyArray1(object):
__array_interface__ = interface1
# NOTE: Because Py2 str/Py3 bytes supports the buffer interface, setting
# the interface data to bytes would invoke the bug this tests for, that
# __array_interface__ with shape=() is not allowed if the data is an object
# exposing the buffer interface
interface2 = dict(interface1)
interface2['data'] = arr[0].tobytes()
class DummyArray2(object):
__array_interface__ = interface2
arr1 = np.asarray(DummyArray1())
arr2 = np.asarray(DummyArray2())
arr3 = arr[:1].reshape(())
assert_equal(arr1, arr2)
assert_equal(arr1, arr3)
def test_flat_element_deletion():
it = np.ones(3).flat
try:
del it[1]
del it[1:2]
except TypeError:
pass
except Exception:
raise AssertionError
def test_scalar_element_deletion():
a = np.zeros(2, dtype=[('x', 'int'), ('y', 'int')])
assert_raises(ValueError, a[0].__delitem__, 'x')
class TestMemEventHook(object):
def test_mem_seteventhook(self):
# The actual tests are within the C code in
# multiarray/_multiarray_tests.c.src
_multiarray_tests.test_pydatamem_seteventhook_start()
# force an allocation and free of a numpy array
# needs to be larger then limit of small memory cacher in ctors.c
a =
|
np.zeros(1000)
|
numpy.zeros
|
# -*- coding: utf-8 -*-
# Updating date : 2019/09/12
import time, sys, argparse, math
import numpy as np
from dronekit import connect, VehicleMode, LocationGlobal, LocationGlobalRelative, Command
from pymavlink import mavutil
import scipy
from scipy import special
import os
def get_distance_meters(location1,location2):
dLat = location2.lat - location1.lat
dLon = location2.lon - location1.lon
return math.sqrt((dLat**2 + dLon**2))*1.113195e5
def servo_pwm(vehicle, servo_num, pwm):
"""
Enable the servo
"""
msg = vehicle.message_factory.command_long_encode(
0, 0,
mavutil.mavlink.MAV_CMD_DO_SET_SERVO,
0,
servo_num,
pwm,
0,
0,
0, 0, 0)
vehicle.send_mavlink(msg)
def send_ned_velocity(vehicle, n, e, d, duration):
msg = vehicle.message_factory.set_position_target_local_ned_encode(
0, # time_boot_ms (not used)
0, 0, # target system, target component
mavutil.mavlink.MAV_FRAME_LOCAL_NED, # frame
0b0000111111000111, # type_mask (only speeds enabled)
0, 0, 0, # x, y, z positions (not used)
n, e, d, # x, y, z velocity in m/s
0, 0, 0, # x, y, z acceleration (not supported yet, ignored in GCS_Mavlink)
0, 0) # yaw, yaw_rate (not supported yet, ignored in GCS_Mavlink)
for i in range(duration):
vehicle.send_mavlink(msg)
time.sleep(1)
vehicle.flush()
def send_velocity(vehicle, velocity_x, velocity_y, velocity_z):
msg = vehicle.message_factory.set_position_target_local_ned_encode(
0, # time_boot_ms (not used)
0, 0, # target system, target component
mavutil.mavlink.MAV_FRAME_BODY_OFFSET_NED, # frame
0b0000111111000111, # type_mask (only speeds enabled)
0, 0, 0, # x, y, z positions (not used)
velocity_x, velocity_y, velocity_z, # x, y, z velocity in m/s
0, 0, 0, # x, y, z acceleration (not supported yet, ignored in GCS_Mavlink)
0, 0) # yaw, yaw_rate (not supported yet, ignored in GCS_Mavlink)
vehicle.send_mavlink(msg)
""" for i in range(duration):
vehicle.send_mavlink(msg)
time.sleep(1) """
vehicle.flush()
def get_location_offset_meters(original_location, dNorth, dEast, alt):
earth_radius=6378137.0 #Radius of "spherical" earth
#Coordinate offsets in radians
dLat = dNorth/earth_radius
dLon = dEast/(earth_radius*math.cos(math.pi*original_location.lat/180))
#New position in decimal degrees
newlat = original_location.lat + (dLat * 180/math.pi)
newlon = original_location.lon + (dLon * 180/math.pi)
return LocationGlobal(newlat, newlon,original_location.alt+alt)
def arm_and_takeoff(vehicle, alt):
print("Pre-arm Checking ...")
while not vehicle.is_armable:
print("Waiting for vehicle to initialise ...")
time.sleep(5)
print("Init Finished")
vehicle.mode = VehicleMode("GUIDED")
time.sleep(1)
vehicle.armed = True
while not vehicle.armed:
print("Waiting for arming ...")
time.sleep(1)
vehicle.armed = True
print("Prepare to Take off !!")
vehicle.simple_takeoff(alt)
while True:
print("Alt : %s" %vehicle.location.global_relative_frame.alt)
if vehicle.location.global_relative_frame.alt >=alt*0.95:
print("Reach the alt.")
break
time.sleep(1)
def security_lock(vehicle,channel):
'''
Use the RC channel 8 to start/stop the mission on RaspberryPi, Low : Stop, High : Start
'''
while vehicle.channels[channel] > 1500:
print ("RC_%s :%s" %channel,vehicle.channels[channel])
print ("switch down to continue the mission")
time.sleep(1)
print ("Start Mission")
def stop_monitor(vehicle):
# Not success yet
while True:
if vehicle.channels['8'] > 1500:
print("Stop mission ,RTL")
vehicle.mode = VehicleMode("RTL")
sys.exit(0)
def rbga(place):
global wp, sol
population = 10
generation = 1000
numofpoint = len(place)
#check for the start point
findorigin = np.where(place == 0)
for m in range(0,len(place)):
originlen = np.where( m == findorigin[0])
if len(originlen[0]) > 1:
origin = m
break
else:
origin = -1
if origin == -1:
place = np.insert(place,numofpoint,0,axis = 0 )
origin = numofpoint
numofpoint = len(place)
wp = np.zeros((numofpoint,2))
cost = np.zeros((numofpoint,numofpoint))
initial = np.zeros((population,numofpoint))
pathorder =
|
np.zeros((population,numofpoint))
|
numpy.zeros
|
import os
import datetime
import tensorflow as tf
import numpy as np
from deepeeg import WaveNet, DilatedBlock, EEGWaveNetv4
from tensorflow.keras.optimizers import Adam
from tensorflow.keras import utils as np_utils
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.wrappers import scikit_learn
from tensorflow.keras.callbacks import ModelCheckpoint
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
tf.random.set_seed(2345)
SAMPLE_RATE_HZ = 2000.0 # Hz
TRAIN_ITERATIONS = 400
SAMPLE_DURATION = 0.5 # Seconds
SAMPLE_PERIOD_SECS = 1.0 / SAMPLE_RATE_HZ
MOMENTUM = 0.95
GENERATE_SAMPLES = 1000
QUANTIZATION_CHANNELS = 256
NUM_SPEAKERS = 3
F1 = 155.56 # E-flat frequency in hz
F2 = 196.00 # G frequency in hz
F3 = 233.08 # B-flat frequency in hz
def make_sine_waves(global_conditioning):
"""Creates a time-series of sinusoidal audio amplitudes."""
sample_period = 1.0 / SAMPLE_RATE_HZ
times = np.arange(0.0, SAMPLE_DURATION, sample_period)
if global_conditioning:
LEADING_SILENCE = random.randint(10, 128)
amplitudes = np.zeros(shape=(NUM_SPEAKERS, len(times)))
amplitudes[0, 0:LEADING_SILENCE] = 0.0
amplitudes[1, 0:LEADING_SILENCE] = 0.0
amplitudes[2, 0:LEADING_SILENCE] = 0.0
start_time = LEADING_SILENCE / SAMPLE_RATE_HZ
times = times[LEADING_SILENCE:] - start_time
amplitudes[0, LEADING_SILENCE:] = 0.6 * np.sin(
times * 2.0 * np.pi * F1)
amplitudes[1, LEADING_SILENCE:] = 0.5 * np.sin(
times * 2.0 * np.pi * F2)
amplitudes[2, LEADING_SILENCE:] = 0.4 * np.sin(
times * 2.0 * np.pi * F3)
speaker_ids = np.zeros((NUM_SPEAKERS, 1), dtype=np.int)
speaker_ids[0, 0] = 0
speaker_ids[1, 0] = 1
speaker_ids[2, 0] = 2
else:
amplitudes = (np.sin(times * 2.0 * np.pi * F1) / 3.0 +
|
np.sin(times * 2.0 * np.pi * F2)
|
numpy.sin
|
#!/usr/bin/env python
import os,time,datetime
import glob
import matplotlib
matplotlib.use('PDF')
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from matplotlib.path import Path
import matplotlib.animation as animate
from matplotlib_scalebar.scalebar import ScaleBar
import numpy as np
import nd2reader as nd2
import bioformats,javabridge
import warnings
warnings.filterwarnings("ignore")
from skimage import feature,morphology,restoration #Edge detection
from skimage.transform import warp,SimilarityTransform
from skimage.feature import ORB, match_descriptors,register_translation
from skimage import measure
from skimage.measure import ransac
from skimage.filters import sobel
from skimage.color import label2rgb
import scipy,cv2
from scipy import ndimage as ndi
from scipy.signal import savgol_filter
from sklearn.cluster import DBSCAN
from sklearn import metrics
class RecruitmentMovieAnalyzer(object):
def __init__(self):
self.nuclear_channel= 'TRITC'
self.irrad_frame = 'auto'
self.roi_buffer = [-10,0]
self.track_nucleus = True
self.autosave = True
self.save_direct = './MovieAnalysis_output/'
self.save_movie = True
self.save_roi_data = True
self.additional_rois= 0
#self.correct_bleach = True
self.bleach_frames = 0
self.threshold = -1
self.bg_correct = True
self.bleach_correct = True
def SetParameters(self,nuclear_channel='TRITC',protein_channel='EGFP',irrad_frame='auto',roi_buffer=[-10,0],track_nucleus=True,autosave=True,save_direct='./MovieAnalysis_output/',save_movie=True,save_roi_data=True,additional_rois=0,bleach_correct=True,bleach_frames=0,threshold=-1,bg_correct=True,verbose=True):
self.nuclear_channel= nuclear_channel
self.protein_channel= protein_channel
self.irrad_frame = irrad_frame
self.roi_buffer = roi_buffer
self.track_nucleus = track_nucleus
self.autosave = autosave
self.save_direct = save_direct
self.save_movie = save_movie
self.save_roi_data = save_roi_data
self.additional_rois= additional_rois
#self.correct_bleach = correct_bleach
self.bleach_frames = bleach_frames
self.threshold = threshold
if str(self.threshold).lower() == 'auto':
self.threshold = 3
self.bg_correct = bg_correct
self.bleach_correct = bleach_correct
self.verbose = verbose
if not os.path.isdir(self.save_direct):
os.mkdir(self.save_direct)
else:
print("WARNING: Directory "+self.save_direct+" already exists! Be aware that you may be overwriting files!!!")
# if self.save_movie:
# self.ffmpeg_writer = animate.writers['ffmpeg']
def LoadFile(self,video_file='',roi_file=''):
if not os.path.isfile(video_file):
print("ERROR: Cannot load file - "+video_file+" - File not found!")
vid_exists = False
else:
vid_exists = True
if not os.path.isfile(roi_file):
print("ERROR: Cannot load file - "+roi_file+" - File not found!")
roi_exists = False
else:
roi_exists = True
if roi_exists and vid_exists:
try:
self.video_list.append(video_file)
except:
self.video_list = [video_file]
try:
self.roif_list.append(roi_file)
except:
self.roif_list = [roi_file]
else:
print("File(s) missing. Cannot load desired experiment for analysis!!!")
def LoadDirectory(self,video_direct='',extension='.nd2',roi_extension='_ROI.tif'):
if not os.path.isdir(video_direct):
print("ERROR: Cannot load directory - "+video_direct+" - Directory not found!")
else:
self.video_direct = video_direct
filelist = glob.glob(os.path.join(video_direct,"*"+extension))
for vidFile in filelist:
roiFile = vidFile.replace(extension,roi_extension)
if not os.path.isfile(roiFile):
print("WARNING: Could not find ROI file ("+roiFile+") for video file ("+vidFile+")! Not adding files to processing list!!!")
else:
try:
self.video_list.append(vidFile)
except:
self.video_list = [vidFile]
try:
self.roif_list.append(roiFile)
except:
self.roif_list = [roiFile]
def ClearFileList(self):
self.video_list = []
self.Nfiles = 0
def ProcessOther(self,input_video):
#Process Metadata
this_omexmlstr = bioformats.get_omexml_metadata(input_video)
this_omexml = bioformats.OMEXML(this_omexmlstr)
these_pixels= this_omexml.image().Pixels
this_numt = these_pixels.get_SizeT()
this_numc = these_pixels.get_SizeC()
self.pix_res= these_pixels.get_PhysicalSizeX()
try:
final_time = these_pixels.Plane(index=this_numt*this_numc-1).DeltaT
self.has_time = True
if os.path.splitext(input_video)[1]==".czi":
#Zeiss microscopes don't count from zero, so need to correct for first time point
self.init_time = these_pixels.Plane(index=0).DeltaT
self.is_zeiss = True
final_time = final_time - self.init_time
else:
self.is_zeiss = False
except:
self.has_time = False
print("Warning: Unable to extract time points from movie! Please extract them by hand!")
print("Loading \""+input_video+"\" :")
if self.has_time:
print("\t\tMovie Length = "+str(np.around(final_time,decimals=2))+" seconds.")
else:
print("\t\tMovie Length = "+str(this_numt)+" frames.")
print("\t\tPixel Resolution = "+str(self.pix_res)+" um")
this_tsteps = np.zeros(this_numt,dtype=float) # We have to fill this up as we open images in bioformats...
self.roi_intensity_array = np.zeros((len(this_tsteps),1+(1+2*self.additional_rois)),dtype=int)
self.roi0_intensity_array = np.zeros((len(this_tsteps),2),dtype=int)
self.total_intensity_array = np.zeros(len(this_tsteps),dtype=float)
for self.ts in range(this_numt):
if self.has_time:
this_tsteps[self.ts] = these_pixels.Plane(index=this_numc*self.ts).DeltaT
if self.is_zeiss:
this_tsteps[self.ts] -= self.init_time
else:
this_tsteps[self.ts] = self.ts
this_nuc_frame = np.array(bioformats.load_image(input_video,c=self.nuclear_channel,t=self.ts,rescale=False))
this_nuc_path,this_nuc_points,this_nuc_fill = self.getNuclearMask(this_nuc_frame,sigma=self.threshold)
if self.ts==0:
self.first_nuc_points= np.copy(this_nuc_points)
self.first_nuc_frame = np.copy(this_nuc_frame)
self.first_nuc_fill = np.copy(this_nuc_fill)
shifted_nuc_fill = np.copy(this_nuc_fill)
else:
if self.track_nucleus:
shift,error,diffphase= register_translation(self.first_nuc_fill,this_nuc_fill)
else:
shift = np.array([0,0],dtype=int)
if (shift[0]!=0) or (shift[1]!=0):
shifted_nuc_fill = np.zeros_like(this_nuc_fill)
shifted_nuc_frame= np.zeros_like(this_nuc_frame)
N1 = len(shifted_nuc_fill)
N2 = len(shifted_nuc_fill[0])
for idx in range(N1):
for idx2 in range(N2):
if (idx - shift[0] >= 0) and (idx2 - shift[1] >= 0) and (idx - shift[0] < N1) and (idx2-shift[1] < N2):
shifted_nuc_fill[idx,idx2] = this_nuc_fill[idx-int(shift[0]),idx2-int(shift[1])]
this_nuc_points[:,0] -= int(shift[0])
this_nuc_points[:,1] -= int(shift[1])
else:
shifted_nuc_fill = np.copy(this_nuc_fill)
this_prot_frame = np.array(bioformats.load_image(input_video,c=self.protein_channel,t=self.ts,rescale=False))
if self.bg_correct:
this_chan_frame = self.correctBackground(this_prot_frame,input_video,self.protein_channel)
else:
this_chan_frame = np.copy(this_prot_frame)
this_vmin = np.min(this_prot_frame)
if self.ts == 0:
shifted_frame = np.copy(this_chan_frame)
else:
if (shift[0]!=0) or (shift[1]!=0):
shifted_frame = np.zeros_like(this_prot_frame)
N1 = len(shifted_frame)
N2 = len(shifted_frame[0])
for idx in range(N1):
for idx2 in range(N2):
if (idx-shift[0] >= 0) and (idx2-shift[1] >= 0) and (idx-shift[0] < N1) and (idx2-shift[1] < N2):
shifted_frame[idx,idx2] = this_chan_frame[idx-int(shift[0]),idx2-int(shift[1])]
else:
shifted_frame = np.copy(this_chan_frame)
if self.save_movie:
self.SaveMovieFrame(this_chan_frame,roi_path_dict=self.roi_path_dict,vmin=this_vmin,suffix='raw')
self.SaveMovieFrame(shifted_frame,roi_path_dict=self.roi_path_dict,vmin=this_vmin,suffix='shifted')
for idx in range(-self.additional_rois,self.additional_rois+1):
this_roi_buffed = self.roi_buff_dict[idx]
this_roi_path = self.roi_path_dict[idx]
this_roi_pix = np.where(np.logical_and(this_roi_buffed>0,shifted_nuc_fill>0))
roi_intensity = np.sum(shifted_frame[this_roi_pix])
this_col_id = idx + self.additional_rois + 1
self.roi_intensity_array[self.ts,this_col_id] = roi_intensity
if idx == 0:
self.roi0_intensity_array[self.ts,1] = roi_intensity
all_nuc_prot_pix = np.where(shifted_nuc_fill > 0)
total_intensity = np.sum(shifted_frame[all_nuc_prot_pix])
self.total_intensity_array[self.ts] = total_intensity
if self.bleach_frames > 0 and self.bg_correct:
pre_bleached = np.average(self.total_intensity_array[:self.bleach_frames])
self.bleach_corrections = np.divide(pre_bleached,self.total_intensity_array)
self.roi_intensity_array[:,1:] = self.roi_intensity_array[:,1:]\
* self.bleach_corrections[:,np.newaxis]
self.bleach_correct_tot = np.multiply(self.total_intensity_array,self.bleach_corrections)
self.normalized_intensity_array = np.copy(self.roi_intensity_array).astype(float)
for colID in range(len(self.roi_intensity_array[0,1:])):
this_col = self.normalized_intensity_array[:,1+colID].astype(float)
self.normalized_intensity_array[:,1+colID] = this_col/np.average(this_col[:self.bleach_frames])
else:
self.bleach_correct_tot = np.copy(self.total_intensity_array)
self.normalized_intensity_array = np.copy(self.roi_intensity_array).astype(float)
for colID in range(len(self.roi_intensity[:,1:])):
this_col = self.normalized_intensity_array[:,1+colID].astype(float)
self.normalized_intensity_array[:,1+colID] = this_col/this_col[0]
#Print the intensity timeseries
ofile = open(self.this_ofile,'w')
nofile = open(self.this_nofile,'w')
ofile.write('Input Filename: '+input_video+'\n')
nofile.write('Input Filename: '+input_video+'\n')
now = datetime.datetime.now()
ofile.write('Analysis Date: '\
+now.strftime('%d-%m-%Y %H:%M:%S')\
+'\n')
nofile.write('Analysis Date: '\
+now.strftime('%d-%m-%Y %H:%M:%S')\
+'\n')
N_columns = 2 * self.additional_rois + 1 #All the ROIs, including 0
N_columns += 1 #Account for the time column
chan_center= 1+self.additional_rois
for idx in range(N_columns):
if idx==chan_center:
ofile.write(self.protein_channel)
nofile.write(self.protein_channel)
ofile.write(',')
nofile.write(',')
ofile.write("\nTime (s)")
nofile.write("\nTime (s)")
roi_tracker = np.arange(-self.additional_rois,self.additional_rois+1)
for idx in range(N_columns-1):
ofile.write(",ROI "+str(roi_tracker[idx]))
nofile.write(",ROI "+str(roi_tracker[idx]))
ofile.write('\n')
nofile.write('\n')
for tidx in range(this_numt):
ofile.write(str(this_tsteps[tidx]/1000.))
nofile.write(str(this_tsteps[tidx]/1000.))
for cidx in range(1,N_columns):
ofile.write(","+str(self.roi_intensity_array[tidx,cidx]))
nofile.write(","+str(self.normalized_intensity_array[tidx,cidx]))
ofile.write("\n")
nofile.write("\n")
ofile.close()
nofile.close()
#Make the intensity plot
plt.figure(figsize=(6,4))
plot_array = np.genfromtxt(self.this_nofile,skip_header=4,delimiter=',')
for idx in range(self.additional_rois+1):
plt.plot(plot_array[:,0],plot_array[:,idx+1],linestyle='',
marker='.',markersize=5,label='ROI '+str(roi_tracker[idx]))
plt.xlabel("Time (s)")
plt.ylabel("Normalized Intensity (A.U.)")
plt.legend(loc=0)
plt.tight_layout()
plt.savefig(self.this_noplot,format='pdf')
if self.save_movie:
movie_basename = os.path.basename(input_video)
extension = "."+movie_basename.split(".")[1]
raw_movie_file = os.path.join(self.save_direct,
movie_basename.replace(
extension,'_raw.mp4'))
shifted_movie_file = os.path.join(self.save_direct,
movie_basename.replace(
extension,"_drift_corrected.mp4"))
if os.path.isfile(raw_movie_file):
os.remove(raw_movie_file)
if os.path.isfile(shifted_movie_file):
os.remove(shifted_movie_file)
print(shifted_movie_file)
os.system("ffmpeg -r 30 -f image2 -i "+os.path.join(self.save_direct,
"frame%04d_raw.png")+" -vcodec libx264"\
+" -crf 25 -pix_fmt yuv420p "\
+raw_movie_file+" &> raw_movie_ffmpeg.log")
os.system("ffmpeg -r 30 -f image2 -i "+os.path.join(self.save_direct,
"frame%04d_shifted.png")+" -vcodec libx264"\
+" -crf 25 -pix_fmt yuv420p "\
+shifted_movie_file+" &> drift_corrected_movie_ffmpeg.log")
movie_frames = glob.glob(os.path.join(self.save_direct,"*.png"))
for FRAME in movie_frames:
os.remove(FRAME)
def ProcessND2(self,input_video):
this_video = nd2.reader.ND2Reader(input_video)
this_tsteps = this_video.get_timesteps()
this_chans = np.array(this_video.metadata['channels'],dtype=str)
this_pix = this_video.metadata['pixel_microns']
self.pix_res= float(this_pix)
print("Loading \""+input_video+"\" :")
print("\t\tMovie length = "+str(np.around(this_tsteps[-1]/1000.,decimals=2))+" seconds.")
print("\t\tChannel Names = "+str(this_chans))
print("\t\tPixel Resolution = "+str(this_pix)+" um")
nuc_chan_check = np.where(this_chans==self.nuclear_channel)[0]
if len(nuc_chan_check)==0:
print("ERROR: Nuclear channel( \""+self.nuclear_channel+"\") not found!! Channel List = "+str(this_chans))
print("ERROR: File (\""+input_video+"\") not processed!!!")
return -1
elif len(nuc_chan_check)>1:
print("ERROR: Nuclear channel (\""+self.nuclear_channel+"\") is not unique!! Channel List = "+str(this_chans))
print("ERROR: File (\""+input_video+"\") not processed!!!")
return -1
else:
nuc_chan = nuc_chan_check[0]
prot_chan_check = np.where(this_chans==self.protein_channel)[0]
if len(prot_chan_check) == 0:
print("ERROR: Protein channel (\""+self.protein_channel+"\") not found!! Channel List = "+str(this_chans))
print("ERROR: File (\""+input_video+"\") not processed!!!")
return -1
elif len(prot_chan_check) > 1:
print("ERROR: Protein channel (\""+self.protein_channel+"\") is not unique!! Channel List = "+str(this_chans))
print("ERROR: File (\""+input_video+"\") not processed!!!")
return -1
else:
prot_chan = prot_chan_check[0]
#Build the intensity timeseries array
self.roi_intensity_array = np.zeros((len(this_tsteps),1+(1+2*self.additional_rois)),dtype=int)
self.roi0_intensity_array = np.zeros((len(this_tsteps),2),dtype=int)
self.total_intensity_array = np.zeros(len(this_tsteps),dtype=float)
for self.ts in range(len(this_video)):
this_nuc_frame = this_video.get_frame_2D(c=nuc_chan,t=self.ts)
this_nuc_path,this_nuc_points,this_nuc_fill = self.getNuclearMask(this_nuc_frame,sigma=self.threshold)
if self.ts==0:
self.first_nuc_points= np.copy(this_nuc_points)
self.first_nuc_frame = np.copy(this_nuc_frame)
self.first_nuc_fill = np.copy(this_nuc_fill)
shifted_nuc_fill= np.copy(this_nuc_fill)
else:
if self.track_nucleus:
shift,error,diffphase= register_translation(self.first_nuc_fill,this_nuc_fill)
else:
shift = np.array([0,0],dtype=int)
if (shift[0]!=0) or (shift[1]!=0):
shifted_nuc_fill = np.zeros_like(this_nuc_fill)
shifted_nuc_frame= np.zeros_like(this_nuc_frame)
N1 = len(shifted_nuc_fill)
N2 = len(shifted_nuc_fill[0])
for idx in range(N1):
for idx2 in range(N2):
if (idx - shift[0] >= 0) and (idx2 - shift[1] >= 0) and (idx - shift[0] < N1) and (idx2-shift[1] < N2):
shifted_nuc_fill[idx,idx2] = this_nuc_fill[idx-int(shift[0]),idx2-int(shift[1])]
this_nuc_points[:,0] -= int(shift[0])
this_nuc_points[:,1] -= int(shift[1])
else:
shifted_nuc_fill = np.copy(this_nuc_fill)
this_prot_frame = this_video.get_frame_2D(c=prot_chan,t=self.ts)
if self.bg_correct:
this_chan_frame = self.correctBackground(this_prot_frame,this_video,prot_chan,is_nd2=True)
else:
this_chan_frame = np.copy(this_prot_frame)
#Need to know minimium pixel count to adjust movie brightness
this_vmin = np.min(this_prot_frame)
if self.ts == 0:
shifted_frame = np.copy(this_prot_frame)
else:
if (shift[0]!=0) or (shift[1]!=0):
shifted_frame = np.zeros_like(this_prot_frame)
N1 = len(shifted_frame)
N2 = len(shifted_frame[0])
for idx in range(N1):
for idx2 in range(N2):
if (idx-shift[0] >= 0) and (idx2-shift[1] >= 0) and (idx - shift[0] < N1) and (idx2-shift[1] < N2):
shifted_frame[idx,idx2] = this_chan_frame[idx-int(shift[0]),idx2-int(shift[1])]
else:
shifted_frame = np.copy(this_chan_frame)
if self.save_movie:
self.SaveMovieFrame(this_chan_frame,roi_path_dict=self.roi_path_dict,vmin=this_vmin,suffix='raw')
self.SaveMovieFrame(shifted_frame,roi_path_dict=self.roi_path_dict,vmin=this_vmin,suffix='shifted')
for idx in range(-self.additional_rois,self.additional_rois+1):
this_roi_buffed = self.roi_buff_dict[idx]
this_roi_path = self.roi_path_dict[idx]
this_roi_pix = np.where(np.logical_and(this_roi_buffed>0,shifted_nuc_fill>0))
roi_intensity = np.sum(shifted_frame[this_roi_pix])
this_col_id = idx + self.additional_rois + 1
self.roi_intensity_array[self.ts,this_col_id] = roi_intensity
if idx==0:
self.roi0_intensity_array[self.ts,1] = roi_intensity
#Determine total intensity for bleach correction
all_nuc_prot_pix = np.where(shifted_nuc_fill > 0)
total_intensity = np.sum(shifted_frame[all_nuc_prot_pix])
self.total_intensity_array[self.ts] = total_intensity
if self.bleach_frames > 0 and self.bg_correct:
pre_bleached = np.average(self.total_intensity_array[:self.bleach_frames])
self.bleach_corrections = np.divide(pre_bleached,self.total_intensity_array)
self.roi_intensity_array[:,1:] = self.roi_intensity_array[:,1:]\
* self.bleach_corrections[:,np.newaxis]
self.bleach_correct_tot = np.multiply(self.total_intensity_array,self.bleach_corrections)
self.normalized_intensity_array= np.copy(self.roi_intensity_array).astype(float)
for colID in range(len(self.roi_intensity_array[0,1:])):
this_col = self.normalized_intensity_array[:,1+colID].astype(float)
self.normalized_intensity_array[:,1+colID] = this_col/np.average(this_col[:self.bleach_frames])
else:
self.bleach_correct_tot = np.copy(self.total_intensity_array)
self.normalized_intensity_array = np.copy(self.roi_intensity_array).astype(float)
for colID in range(len(self.roi_intensity_array[:,1:])):
this_col = self.normalized_intensity_array[:,1+colID].astype(float)
self.normalized_intensity_array[:,1+colID] = this_col/this_col[0]
#Pring the intensity timeseries
ofile = open(self.this_ofile,'w')
nofile= open(self.this_nofile,'w')
ofile.write("Input Filename: "+input_video+"\n")
nofile.write("Input Filename: "+input_video+"\n")
now = datetime.datetime.now()
ofile.write("Analysis Date: "\
+now.strftime("%d-%m-%Y %H:%M:%S")\
+"\n")
nofile.write("Analysis Date: "\
+now.strftime("%d-%m-%Y %H:%M:%S")\
+"\n")
N_columns = 2*self.additional_rois + 1 #All the ROIs, including 0
N_columns += 1 #Account for the time idx
chan_center = 1+self.additional_rois
for idx in range(N_columns):
if idx == chan_center:
ofile.write(self.protein_channel)
nofile.write(self.protein_channel)
ofile.write(",")
nofile.write(",")
ofile.write("\nTime (s)")
nofile.write("\nTime (s)")
roi_tracker = np.arange(-self.additional_rois,self.additional_rois+1)
for idx in range(N_columns-1):
ofile.write(",ROI "+str(roi_tracker[idx]))
nofile.write(",ROI "+str(roi_tracker[idx]))
ofile.write("\n")
nofile.write("\n")
for tidx in range(len(this_tsteps)):
ofile.write(str(this_tsteps[tidx]/1000.))
nofile.write(str(this_tsteps[tidx]/1000.))
for cidx in range(1,N_columns):
ofile.write(","+str(self.roi_intensity_array[tidx,cidx]))
nofile.write(","+str(self.normalized_intensity_array[tidx,cidx]))
ofile.write("\n")
nofile.write("\n")
ofile.close()
nofile.close()
#Plot the intensities
plt.figure(figsize=(6,4))
plot_array = np.genfromtxt(self.this_nofile,skip_header=4,
delimiter=',')
roi_idx = range(-self.additional_rois,self.additional_rois+1)
for idx in range(self.additional_rois+1):
plt.plot(plot_array[:,0],plot_array[:,idx+1],linestyle='',
marker='.',markersize=5,label='ROI '+str(roi_idx[idx]))
plt.xlabel("Time (s)")
plt.ylabel("Normalized Intensity (A.U.)")
plt.legend(loc=0)
plt.tight_layout()
plt.savefig(self.this_noplot,format='pdf')
if self.save_movie:
movie_basename = os.path.basename(input_video)
extension = "."+movie_basename.split(".")[-1]
raw_movie_file = os.path.join(self.save_direct,
movie_basename.replace(
extension,'_raw.mp4'))
shifted_movie_file = os.path.join(self.save_direct,
movie_basename.replace(
extension,"_drift_corrected.mp4"))
if os.path.isfile(raw_movie_file):
os.remove(raw_movie_file)
if os.path.isfile(shifted_movie_file):
os.remove(shifted_movie_file)
os.system("ffmpeg -r 30 -f image2 -i "+os.path.join(self.save_direct,
"frame%04d_raw.png")+" -vcodec libx264"\
+" -crf 25 -pix_fmt yuv420p "\
+raw_movie_file+" &> raw_movie_ffmpeg.log")
os.system("ffmpeg -r 30 -f image2 -i "+os.path.join(self.save_direct,
"frame%04d_shifted.png")+" -vcodec libx264"\
+" -crf 25 -pix_fmt yuv420p "\
+shifted_movie_file+" &> drift_corrected_movie_ffmpeg.log")
movie_frames = glob.glob(os.path.join(self.save_direct,"*.png"))
for FRAME in movie_frames:
os.remove(FRAME)
def SaveMovieFrame(self,frame,roi_path_dict={},vmin=0,suffix='raw'):
if suffix=='raw':
self.raw_frame_string = os.path.join(self.save_direct,
"frame%04d"%(self.ts)+"_raw.png")
save_string = self.raw_frame_string
elif suffix=='shifted':
self.shifted_frame_string = os.path.join(self.save_direct,
"frame%04d"%(self.ts)+"_shifted.png")
save_string = self.shifted_frame_string
plt.figure(figsize=(6.,6.))
ax = plt.subplot(111)
ax.imshow(frame,vmin=vmin)
ax.plot(self.first_nuc_points[:,1],self.first_nuc_points[:,0],color='red',linewidth=1.25)
i=0
for key in roi_path_dict:
this_roi_path = roi_path_dict[key]
this_patch = patches.PathPatch(this_roi_path,facecolor='none',linewidth=1.0,edgecolor='white')
i+=1
ax.add_patch(this_patch)
scalebar = ScaleBar(self.pix_res,'um',location=4)
plt.gca().add_artist(scalebar)
plt.tight_layout()
plt.savefig(save_string,format='png',dpi=300)
plt.close('all')
def ProcessFileList(self):
self.Nfiles = len(self.video_list)
for idx in range(self.Nfiles):
input_video = self.video_list[idx]
input_roif = self.roif_list[idx]
input_ext = os.path.splitext(input_video)[1]
self.output_prefix = os.path.join(self.save_direct,
os.path.splitext(
os.path.basename(input_video))[0])
#File that contains the ROI coordinates
this_roif = np.array(bioformats.load_image(input_roif,rescale=False))
self.roi_buff_dict,self.roi_path_dict = self.growROI(this_roif,self.roi_buffer)
#Output file for intensity timeseries
self.this_ofile = os.path.join(
self.save_direct,
os.path.basename(
input_video
).replace(input_ext,'.csv')
)
self.this_nofile = self.this_ofile.replace('.csv','_normalized.csv')
self.this_noplot = self.this_nofile.replace('.csv','.pdf')
#Currently tested importing nd2 files or TIFF files...theoretically can load any bioformats file
if input_ext=='.nd2':
self.ProcessND2(input_video)
else:
self.ProcessOther(input_video)
def getNuclearMask2(self,nuclear_frame,show_plots=False,radius=20):
temp_nuc= nuclear_frame.astype(float)
if str(self.threshold).lower() =='auto':
centerMask = np.zeros(np.shape(nuclear_frame),dtype=int)
xval= np.arange(len(centerMask[0]))
yval= np.arange(len(centerMask))
#center the values around a "zero"
xval= xval - np.median(xval)
yval= yval - np.median(yval)
xval,yval = np.meshgrid(xvay,yval)
#Determine threshold in a circle at the center of the frame
#(assumes that you've centered your microscope on the cell)
centerMask[np.where(xval**2+yval**2 < radius**2)] = 1
#Calculate the mean and std intensity in the region
mean_int= np.average(temp_nuc[centerMask])
std_int = np.std(temp_nuc[centerMask])
#Determine thresholding level
self.thresh_level = mean_int - 0.5*mean_int
#Check that the threshold level isn't too low
if self.thresh_level <= 0:
thresh_fact = 0.5
while self.thresh_level < mean_int:
thresh_fact = thresh_fact - 0.1
self.thresh_level = (mean_int - thresh_fact * std_int)
else:
try:
self.thresh_level = float(self.threshold)
if np.isnan(self.thresh_level):
print("Could not understand setting for threshold ("\
+str(self.threshold_level)+"). Assuming \"auto\".")
self.threshold = 'auto'
self.getNuclearMask2(nuclear_frame,
show_plots=show_plots,
radius=radius)
except:
print("Could not understand setting for threshold ("\
+str(self.threshold_level)+"). Assuming \"auto\".")
self.threshold = 'auto'
self.getNuclearMask2(nuclear_frame,
show_plots=show_plots,
radius=radius)
#Find all points in image above threshhold level
thresh_masked = np.zeros(temp_nuc.shape,dtype=int)
thresh_masked[np.where(temp_nuc>self.thresh_level)] = 1
thresh_masked = self.imclearborderAnalogue(thresh_masked,8)
thresh_masked = self.bwareaopenAnalogue(thresh_masked,500)
thresh_masked = scipy.ndimage.binary_fill_holes(thresh_masked)
labels = measure.label(thresh_masked,background=1)
props = measure.regionprops(labels)
if len(np.unique(labels))>1:
#We want the central object
best_r = 99999.9
xcent = int(len(thresh_masked)/2)
ycent = int(len(thresh_masked[0])/2)
for nuc_obj in props:
this_center = nuc_obj.centroid
this_r = np.sqrt((this_center[0] - xcent)**2\
+(this_center[1] - ycent)**2)
if this_r < best_r:
best_r = this_r
center_nuc = nuc_obj
these_pix = np.where(labels==nuc_obj.label)
elif len(np.unique(labels))==1:
these_pix = np.where(thresh_masked)
else:
print("ERROR: getNuclearMask2() could not find any nuclei! "\
+"Please specify a lower threshold.")
quit()
nuc_fill = np.zeros(np.shape(nuclear_frame),dtype=int)
nuc_fill[these_pix] = 1.0
nuc_fill = scipy.ndimage.binary_fill_holes(nuc_fill)
for idx in range(len(nuclear_frame)):
this_slice = np.where(nuc_fill[idx]>0)
if len(this_slice[0]) > 0:
this_min = this_slice[0][0]
this_max = this_slice[0][-1]
try:
nuc_points = np.vstack((nuc_points,[idx,this_min]))
except:
nuc_points = np.array([idx,this_min])
if this_max != this_min:
try:
nuc_points = np.vstack((nuc_points,[idx,this_max]))
except:
nuc_points = np.array([idx,this_max])
nuc_points = np.vstack((nuc_points,nuc_points[0]))
#Filter out the sharp edges
nuc_points[:,1] = savgol_filter(nuc_points[:,1],51,3)
nuc_path = Path(nuc_points,closed=True)
if self.ts==0:
self.saveNuclearMask(nuc_points)
return nuc_path, nuc_points, nuc_fill
def imclearborderAnalogue(self,image_frame,radius):
#Contour the image
Nx = len(image_frame)
Ny = len(image_frame[0])
img = cv2.resize(image_frame,(Nx,Ny))
contours, hierarchy = cv2.findContours(img, cv2.RETR_FLOODFILL,
cv2.CHAIN_APPROX_SIMPLE)
#Get dimensions
nrows = image_frame.shape[0]
ncols = image_frame.shape[1]
#Track contours touching the border
contourList = []
for idx in range(len(contours)):
this_contour = contours[idx]
for point in this_contour:
contour_row = point[0][1]
contour_col = point[0][0]
#Check if within radius of border, else remove
rowcheck = (contour_row >= 0 and contour_row < radius)\
or (contour_row >= nrows-1-radius and contour_row\
< nrows)
colcheck = (contour_col >= 0 and contour_col < radius)\
or (contour_col >= ncols-1-radius and contour_col\
< ncols)
if rowcheck or colcheck:
contourList.append(idx)
output_frame = image_frame.copy()
for idx in contourList:
cv2.drawContours(output_frame, contours, idx, (0,0,0), -1)
return output_frame
def bwareaopenAnalogue(self,image_frame,areaPix):
output_frame = image_frame.copy()
#First, identify all the contours
contours,hierarchy = cv2.findContours(output_frame.copy(),
cv2.RETR_FLOODFILL,
cv2.CHAIN_APPROX_SIMPLE)
#then determine occupying area of each contour
for idx in range(len(contours)):
area = cv2.contourArea(contours[idx])
if (area >= 0 and area <= areaPix):
cv2.drawContours(output_frame,contours,idx,(0,0,0),-1)
return output_frame
def getNuclearMask(self,nuclear_frame,sigma=-1):
if sigma < 0:
sigma_est = restoration.estimate_sigma(nuclear_frame)
else:
sigma_est = sigma
filtered= ndi.gaussian_filter(nuclear_frame,0.5*sigma_est)
seed = np.copy(filtered,1)
seed[1:-1,1:-1] = filtered.min()
mask = np.copy(filtered)
dilated = morphology.reconstruction(seed,mask,method='dilation')
bgsub = filtered - dilated
self.lit_pxl = np.where(bgsub > np.average(bgsub))
self.lit_crd = np.vstack((self.lit_pxl[0],self.lit_pxl[1])).T
self.clusters= DBSCAN(eps=np.sqrt(2),min_samples=2).fit(self.lit_crd)
nuc_path, nuc_points, nuc_fill = self.findNucEdgeFromClusters(nuclear_frame)
if self.ts==0:
self.saveNuclearMask(nuc_points)
return nuc_path, nuc_points, nuc_fill
def saveNuclearMask(self,nuc_points):
ofile = open(self.output_prefix+"NuclMask.txt",'w')
for idx in range(len(nuc_points)):
ofile.write(str(nuc_points[idx][0])+","\
+str(nuc_points[idx][1])+"\n")
ofile.close()
def findNucEdgeFromClusters(self,nuclear_frame):
#Assume that the cell is in the center of the frame
Nx = len(nuclear_frame)
Ny = len(nuclear_frame[0])
xMid = int(Nx/2)
yMid = int(Ny/2)
#Find the edges of each cluster
for idx in range(np.max(self.clusters.labels_)+1):
blank = np.zeros_like(nuclear_frame)
members = np.where(self.clusters.labels_ == idx)[0]
x = self.lit_crd[members,0]
y = self.lit_crd[members,1]
xbounds = np.array([np.min(x),np.max(x)])
ybounds = np.array([np.min(y),np.max(y)])
for x_idx in range(xbounds[0],xbounds[1]+1):
these_x = np.where(x == x_idx)[0]
if len(these_x) > 0:
min_y = np.min(y[these_x])
max_y = np.max(y[these_x])
try:
lower_bound= np.vstack((lower_bound,[x_idx,min_y]))
except:
lower_bound= np.array([x_idx,min_y])
try:
upper_bound= np.vstack(([x_idx,max_y],upper_bound))
except:
upper_bound= np.array([x_idx,max_y])
else:
print("Warning: No X values in this lane: "+str(x_idx))
nuc_points = np.vstack((lower_bound,upper_bound))
nuc_points = np.vstack((nuc_points,nuc_points[0]))
for idx2 in range(len(x)):
blank[x[idx2],y[idx2]] = 1
nuc_path = Path(nuc_points,closed=True)
if nuc_path.contains_point([xMid,yMid]):
break
else:
del nuc_points
del upper_bound
del lower_bound
return nuc_path,nuc_points,blank
def growROI(self,roi_file,roi_buffer):
#Store Path objects for all the requested ROIs for later callback
roi_path_dict = {}
roi_frame_dict= {}
roi_pix = np.where(roi_file>0)
row_start= np.min(roi_pix[0]) - roi_buffer[1]
row_end = np.max(roi_pix[0]) + roi_buffer[1]
col_start= np.min(roi_pix[1]) - roi_buffer[0]
col_end = np.max(roi_pix[1]) + roi_buffer[0]
roi_height = row_end - row_start
roi_width = col_end - col_start - 1
for idx in range(-self.additional_rois,self.additional_rois+1):
rowStart= row_start+idx*(roi_height)
rowEnd = row_end+idx*(roi_height)
colStart= col_start
colEnd = col_end
out_roi_file= np.zeros(np.shape(roi_file))
out_roi_file[rowStart:rowEnd+1,colStart:colEnd+1] = 1
roi_frame_dict[idx] = np.copy(out_roi_file)
roi_path = Path(np.array([[colStart,rowStart],
[colStart,rowEnd],
[colEnd,rowEnd],
[colEnd,rowStart],
[colStart,rowStart]],dtype=int))
roi_path_dict[idx] = roi_path
if idx==0:
ofile = open(self.output_prefix+"ROI.txt",'w')
ofile.write(str(colStart+1)+","+str(rowStart+1)+",")
ofile.write(str(roi_width)+",")
ofile.write(str(roi_height)+"\n")
ofile.close()
return roi_frame_dict,roi_path_dict
def correctBackground(self,input_frame,input_video,channel,is_nd2=False):
if self.ts == 0:
for idx in range(self.irrad_frame):
if is_nd2:
this_frame = input_video.get_frame_2D(c=channel,t=idx)
else:
this_frame = np.array(bioformats.load_image(input_video,c=self.protein_channel,t=self.ts,rescale=False))
self.histogram,bins = np.histogram(this_frame,
bins=np.arange(1,np.max(this_frame)+1))
#Smooth the histogram out
temp = np.convolve(self.histogram,
np.ones(3,dtype=int),'valid')
ranges = np.arange(1,2,2)
start = np.cumsum(self.histogram[:2])[::2]/ranges
stop = (np.cumsum(self.histogram[:-3:-1])[::2]/ranges)[::-1]
self.smoothed_hist = np.concatenate((start,temp,stop))
self.peaks = scipy.signal.find_peaks(self.smoothed_hist)[0]
self.min_peak_int = np.max(self.smoothed_hist[self.peaks])
self.min_peak = np.where(self.smoothed_hist==self.min_peak_int)[0][0]
self.bg_value = bins[np.where(
self.smoothed_hist[self.min_peak:]<=\
(0.67*self.min_peak_int))[0][0]]\
+self.min_peak
self.avg_bg = np.average(this_frame[np.where(
input_frame<=self.bg_value)])
try:
self.arr_of_avgs = np.append(self.arr_of_avgs,
self.avg_bg)
except:
self.arr_of_avgs = np.array([self.avg_bg])
self.bg_correction =
|
np.average(self.arr_of_avgs)
|
numpy.average
|
from PyQt5.QtCore import QCoreApplication, Qt
import numpy as np
from matplotlib.patches import Rectangle, Circle
from GUI_classes.utils_gui import choose_dataset
from GUI_classes.generic_gui import StartingGui, FinalStepWindow
from clustviz.denclue import (
pop_cubes,
check_border_points_rectangles,
highly_pop_cubes,
connect_cubes,
density_attractor,
assign_cluster,
extract_cluster_labels,
center_of_mass,
gauss_dens,
)
from base import appctxt
class DENCLUE_class(StartingGui):
def __init__(self):
super(DENCLUE_class, self).__init__(
name="DENCLUE",
twinx=False,
first_plot=False,
second_plot=False,
function=self.start_DENCLUE,
extract=False,
stretch_plot=False,
)
self.label_slider.hide()
self.first_run_occurred_mod = False
self.dict_checkbox_names = {
0: "highly_pop",
1: "contour",
2: "3dplot",
3: "clusters",
}
self.plot_list = [False, False, False, False]
# self.checkbox_pop_cubes.stateChanged.connect(lambda: self.number_of_plots_gui(0))
self.checkbox_highly_pop_cubes.stateChanged.connect(
lambda: self.number_of_plots_gui(0)
)
self.checkbox_contour.stateChanged.connect(lambda: self.number_of_plots_gui(1))
self.checkbox_3dplot.stateChanged.connect(lambda: self.number_of_plots_gui(2))
self.checkbox_clusters.stateChanged.connect(lambda: self.number_of_plots_gui(3))
# influence plot
self.button_infl_denclue.clicked.connect(
lambda: self.plot_infl_gui(ax=self.ax_infl, canvas=self.canvas_infl)
)
def number_of_plots_gui(self, number):
# if number == 0:
# if self.checkbox_pop_cubes.isChecked():
# self.plot_list[number] = True
# else:
# self.plot_list[number] = False
if number == 0:
if self.checkbox_highly_pop_cubes.isChecked():
self.plot_list[number] = True
else:
self.plot_list[number] = False
if number == 1:
if self.checkbox_contour.isChecked():
self.plot_list[number] = True
else:
self.plot_list[number] = False
if number == 2:
if self.checkbox_3dplot.isChecked():
self.plot_list[number] = True
else:
self.plot_list[number] = False
if number == 3:
if self.checkbox_clusters.isChecked():
self.plot_list[number] = True
else:
self.plot_list[number] = False
def start_DENCLUE(self):
self.log.clear()
self.log.appendPlainText("{} LOG".format(self.name))
self.log.appendPlainText("")
QCoreApplication.processEvents()
self.verify_input_parameters()
if self.param_check is False:
return
self.sigma_denclue = float(self.line_edit_sigma_denclue.text())
self.xi_denclue = float(self.line_edit_xi_denclue.text())
self.xi_c_denclue = float(self.line_edit_xi_c_denclue.text())
self.tol_denclue = float(self.line_edit_tol_denclue.text())
self.prec_denclue = int(self.line_edit_prec_denclue.text())
self.n_points = int(self.line_edit_np.text())
self.X = choose_dataset(self.combobox.currentText(), self.n_points)
self.SetWindowsDENCLUE(
pic_list=self.plot_list, first_run_boolean=self.first_run_occurred_mod
)
self.button_run.setEnabled(False)
self.checkbox_saveimg.setEnabled(False)
self.button_delete_pics.setEnabled(False)
QCoreApplication.processEvents()
if self.first_run_occurred is True:
self.ind_run += 1
self.ind_extr_fig = 0
if self.save_plots is True:
self.checkBoxChangedAction(self.checkbox_saveimg.checkState())
else:
if Qt.Checked == self.checkbox_saveimg.checkState():
self.first_run_occurred = True
self.checkBoxChangedAction(self.checkbox_saveimg.checkState())
if np.array(self.plot_list).sum() != 0:
self.DENCLUE_gui(
data=self.X,
s=self.sigma_denclue,
xi=self.xi_denclue,
xi_c=self.xi_c_denclue,
tol=self.tol_denclue,
prec=self.prec_denclue,
save_plots=self.save_plots,
)
else:
self.display_empty_message()
if (self.make_gif is True) and (self.save_plots is True):
self.generate_GIF()
self.button_run.setEnabled(True)
self.checkbox_saveimg.setEnabled(True)
self.button_delete_pics.setEnabled(True)
self.first_run_occurred_mod = True
def display_empty_message(self):
self.log.appendPlainText("You did not select anything to plot")
QCoreApplication.processEvents()
def DENCLUE_gui(self, data, s, xi, xi_c, tol, prec, save_plots, dist="euclidean"):
clust_dict = {}
processed = []
z, d = pop_cubes(data=data, s=s)
self.log.appendPlainText("Number of populated cubes: {}".format(len(z)))
check_border_points_rectangles(data, z)
hpc = highly_pop_cubes(z, xi_c=xi_c)
self.log.appendPlainText(
"Number of highly populated cubes: {}".format(len(hpc))
)
self.log.appendPlainText("")
new_cubes = connect_cubes(hpc, z, s=s)
if self.plot_list[0] == True:
self.plot_grid_rect_gui(
data,
s=s,
cube_kind="highly_populated",
ax=self.axes_list[0],
canvas=self.canvas_list[0],
save_plots=save_plots,
ind_fig=0,
)
if self.plot_list[1] == True:
self.plot_3d_or_contour_gui(
data,
s=s,
three=False,
scatter=True,
prec=prec,
ax=self.axes_list[1],
canvas=self.canvas_list[1],
save_plots=save_plots,
ind_fig=1,
)
if self.plot_list[2] == True:
self.plot_3d_both_gui(
data,
s=s,
xi=xi,
prec=prec,
ax=self.axes_list[2],
canvas=self.canvas_list[2],
save_plots=save_plots,
ind_fig=2,
)
if self.plot_list[3] == False:
return
if len(new_cubes) != 0:
points_to_process = [
item
for sublist in np.array(list(new_cubes.values()))[:, 2]
for item in sublist
]
else:
points_to_process = []
initial_noise = []
for elem in data:
if len((np.nonzero(points_to_process == elem))[0]) == 0:
initial_noise.append(elem)
for num, point in enumerate(points_to_process):
if num == int(len(points_to_process) / 4):
self.log.appendPlainText("hill-climb progress: 25%")
QCoreApplication.processEvents()
if num == int(len(points_to_process) / 2):
self.log.appendPlainText("hill-climb progress: 50%")
QCoreApplication.processEvents()
if num == int((len(points_to_process) / 4) * 3):
self.log.appendPlainText("hill-climb progress: 75%")
QCoreApplication.processEvents()
delta = 0.02
r, o = None, None
while r is None:
r, o = density_attractor(
data=data,
x=point,
coord_dict=d,
tot_cubes=new_cubes,
s=s,
xi=xi,
delta=delta,
max_iter=600,
dist=dist,
)
delta = delta * 2
clust_dict, proc = assign_cluster(
data=data,
others=o,
attractor=r,
clust_dict=clust_dict,
processed=processed,
)
self.log.appendPlainText("hill-climb progress: 100%")
QCoreApplication.processEvents()
for point in initial_noise:
point_index = np.nonzero(data == point)[0][0]
clust_dict[point_index] = [-1]
try:
lab, coord_df = extract_cluster_labels(data, clust_dict, tol)
except:
self.log.appendPlainText("")
self.log.appendPlainText(
"There was an error when extracting clusters. Increase number "
"of points or try with a less "
"pathological case: see the other plots to have an idea of why it failed."
)
return
if self.plot_list[3] == True:
self.plot_clust_dict_gui(
data,
coord_df,
ax=self.axes_list[3],
canvas=self.canvas_list[3],
save_plots=save_plots,
ind_fig=3,
)
return lab
def plot_grid_rect_gui(
self,
data,
s,
ax,
canvas,
save_plots,
ind_fig,
cube_kind="populated",
color_grids=True,
):
ax.clear()
ax.set_title("Highly populated cubes")
cl, ckc = pop_cubes(data, s)
cl_copy = cl.copy()
coms = [center_of_mass(list(cl.values())[i]) for i in range(len(cl))]
coms_hpc = []
if cube_kind == "highly_populated":
cl = highly_pop_cubes(cl, xi_c=3)
coms_hpc = [center_of_mass(list(cl.values())[i]) for i in range(len(cl))]
ax.scatter(data[:, 0], data[:, 1], s=100, edgecolor="black")
rect_min = data.min(axis=0)
rect_diff = data.max(axis=0) - rect_min
x0 = rect_min[0] - 0.05
y0 = rect_min[1] - 0.05
# minimal bounding rectangle
ax.add_patch(
Rectangle(
(x0, y0),
rect_diff[0] + 0.1,
rect_diff[1] + 0.1,
fill=None,
color="r",
alpha=1,
linewidth=3,
)
)
ax.scatter(
np.array(coms)[:, 0],
np.array(coms)[:, 1],
s=100,
marker="X",
color="red",
edgecolor="black",
label="centers of mass",
)
if cube_kind == "highly_populated":
for i in range(len(coms_hpc)):
ax.add_artist(
Circle(
(np.array(coms_hpc)[i, 0], np.array(coms_hpc)[i, 1]),
4 * s,
color="red",
fill=False,
linewidth=2,
alpha=0.6,
)
)
tot_cubes = connect_cubes(cl, cl_copy, s)
new_clusts = {
i: tot_cubes[i] for i in list(tot_cubes.keys()) if i not in list(cl.keys())
}
for key in list(new_clusts.keys()):
(a, b, c, d) = ckc[key]
ax.add_patch(
Rectangle(
(a, b),
2 * s,
2 * s,
fill=True,
color="yellow",
alpha=0.3,
linewidth=3,
)
)
for key in list(ckc.keys()):
(a, b, c, d) = ckc[key]
if color_grids is True:
if key in list(cl.keys()):
color_or_not = True if cl[key][0] > 0 else False
else:
color_or_not = False
else:
color_or_not = False
ax.add_patch(
Rectangle(
(a, b),
2 * s,
2 * s,
fill=color_or_not,
color="g",
alpha=0.3,
linewidth=3,
)
)
ax.legend(fontsize=8)
canvas.draw()
if save_plots is True:
canvas.figure.savefig(
appctxt.get_resource("Images/")
+ "/"
+ "{}_{:02}/fig_{:02}.png".format(self.name, self.ind_run, ind_fig)
)
QCoreApplication.processEvents()
def plot_3d_both_gui(
self, data, s, ax, canvas, save_plots, ind_fig, xi=None, prec=3
):
ax.clear()
from matplotlib import cm
x_data = [np.array(data)[:, 0].min(),
|
np.array(data)
|
numpy.array
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 28 08:28:04 2010
Author: josef-pktd
"""
from __future__ import print_function
from statsmodels.compat.python import zip
import numpy as np
from scipy import stats, special, optimize
import statsmodels.api as sm
from statsmodels.base.model import GenericLikelihoodModel
#redefine some shortcuts
np_log = np.log
np_pi = np.pi
sps_gamln = special.gammaln
def maxabs(arr1, arr2):
return np.max(np.abs(arr1 - arr2))
def maxabsrel(arr1, arr2):
return np.max(np.abs(arr2 / arr1 - 1))
#global
store_params = []
class MyT(GenericLikelihoodModel):
'''Maximum Likelihood Estimation of Linear Model with t-distributed errors
This is an example for generic MLE which has the same
statistical model as discretemod.Poisson.
Except for defining the negative log-likelihood method, all
methods and results are generic. Gradients and Hessian
and all resulting statistics are based on numerical
differentiation.
'''
def loglike(self, params):
return -self.nloglikeobs(params).sum(0)
# copied from discretemod.Poisson
def nloglikeobs(self, params):
"""
Loglikelihood of Poisson model
Parameters
----------
params : array-like
The parameters of the model.
Returns
-------
The log likelihood of the model evaluated at `params`
Notes
--------
.. math:: \\ln L=\\sum_{i=1}^{n}\\left[-\\lambda_{i}+y_{i}x_{i}^{\\prime}\\beta-\\ln y_{i}!\\right]
"""
#print len(params),
store_params.append(params)
if not self.fixed_params is None:
#print 'using fixed'
params = self.expandparams(params)
beta = params[:-2]
df = params[-2]
scale = params[-1]
loc = np.dot(self.exog, beta)
endog = self.endog
x = (endog - loc)/scale
#next part is stats.t._logpdf
lPx = sps_gamln((df+1)/2) - sps_gamln(df/2.)
lPx -= 0.5*np_log(df*np_pi) + (df+1)/2.*np_log(1+(x**2)/df)
lPx -= np_log(scale) # correction for scale
return -lPx
#Example:
np.random.seed(98765678)
nobs = 1000
nvars = 6
df = 5
rvs = np.random.randn(nobs, nvars-1)
data_exog = sm.add_constant(rvs, prepend=False)
xbeta = 0.9 + 0.1*rvs.sum(1)
data_endog = xbeta + 0.1*np.random.standard_t(df, size=nobs)
print(data_endog.var())
res_ols = sm.OLS(data_endog, data_exog).fit()
print(res_ols.scale)
print(np.sqrt(res_ols.scale))
print(res_ols.params)
kurt = stats.kurtosis(res_ols.resid)
df_fromkurt = 6./kurt + 4
print(stats.t.stats(df_fromkurt, moments='mvsk'))
print(stats.t.stats(df, moments='mvsk'))
modp = MyT(data_endog, data_exog)
start_value = 0.1*np.ones(data_exog.shape[1]+2)
#start_value = np.zeros(data_exog.shape[1]+2)
#start_value[:nvars] = sm.OLS(data_endog, data_exog).fit().params
start_value[:nvars] = res_ols.params
start_value[-2] = df_fromkurt #10
start_value[-1] = np.sqrt(res_ols.scale) #0.5
modp.start_params = start_value
#adding fixed parameters
fixdf = np.nan * np.zeros(modp.start_params.shape)
fixdf[-2] = 100
fixone = 0
if fixone:
modp.fixed_params = fixdf
modp.fixed_paramsmask = np.isnan(fixdf)
modp.start_params = modp.start_params[modp.fixed_paramsmask]
else:
modp.fixed_params = None
modp.fixed_paramsmask = None
resp = modp.fit(start_params = modp.start_params, disp=1, method='nm')#'newton')
#resp = modp.fit(start_params = modp.start_params, disp=1, method='newton')
print('\nestimation results t-dist')
print(resp.params)
print(resp.bse)
resp2 = modp.fit(start_params = resp.params, method='Newton')
print('using Newton')
print(resp2.params)
print(resp2.bse)
from statsmodels.tools.numdiff import approx_fprime, approx_hess
hb=-approx_hess(modp.start_params, modp.loglike, epsilon=-1e-4)
tmp = modp.loglike(modp.start_params)
print(tmp.shape)
#np.linalg.eigh(np.linalg.inv(hb))[0]
pp=np.array(store_params)
print(pp.min(0))
print(pp.max(0))
##################### Example: Pareto
# estimating scale doesn't work yet, a bug somewhere ?
# fit_ks works well, but no bse or other result statistics yet
#import for kstest based estimation
#should be replace
import statsmodels.sandbox.distributions.sppatch
class MyPareto(GenericLikelihoodModel):
'''Maximum Likelihood Estimation pareto distribution
first version: iid case, with constant parameters
'''
#copied from stats.distribution
def pdf(self, x, b):
return b * x**(-b-1)
def loglike(self, params):
return -self.nloglikeobs(params).sum(0)
def nloglikeobs(self, params):
#print params.shape
if not self.fixed_params is None:
#print 'using fixed'
params = self.expandparams(params)
b = params[0]
loc = params[1]
scale = params[2]
#loc = np.dot(self.exog, beta)
endog = self.endog
x = (endog - loc)/scale
logpdf = np_log(b) - (b+1.)*np_log(x) #use np_log(1 + x) for Pareto II
logpdf -= np.log(scale)
#lb = loc + scale
#logpdf[endog<lb] = -inf
#import pdb; pdb.set_trace()
logpdf[x<1] = -10000 #-np.inf
return -logpdf
def fit_ks(self):
'''fit Pareto with nested optimization
originally published on stackoverflow
this doesn't trim lower values during ks optimization
'''
rvs = self.endog
rvsmin = rvs.min()
fixdf = np.nan * np.ones(3)
self.fixed_params = fixdf
self.fixed_paramsmask = np.isnan(fixdf)
def pareto_ks(loc, rvs):
#start_scale = rvs.min() - loc # not used yet
#est = self.fit_fr(rvs, 1., frozen=[np.nan, loc, np.nan])
self.fixed_params[1] = loc
est = self.fit(start_params=self.start_params[self.fixed_paramsmask]).params
#est = self.fit(start_params=self.start_params, method='nm').params
args = (est[0], loc, est[1])
return stats.kstest(rvs,'pareto',args)[0]
locest = optimize.fmin(pareto_ks, rvsmin - 1.5, (rvs,))
est = stats.pareto.fit_fr(rvs, 0., frozen=[np.nan, locest, np.nan])
args = (est[0], locest[0], est[1])
return args
def fit_ks1_trim(self):
'''fit Pareto with nested optimization
originally published on stackoverflow
'''
self.nobs = self.endog.shape[0]
rvs = np.sort(self.endog)
rvsmin = rvs.min()
def pareto_ks(loc, rvs):
#start_scale = rvs.min() - loc # not used yet
est = stats.pareto.fit_fr(rvs, frozen=[np.nan, loc, np.nan])
args = (est[0], loc, est[1])
return stats.kstest(rvs,'pareto',args)[0]
#locest = optimize.fmin(pareto_ks, rvsmin*0.7, (rvs,))
maxind = min(np.floor(self.nobs*0.95).astype(int), self.nobs-10)
res = []
for trimidx in range(self.nobs//2, maxind):
xmin = loc = rvs[trimidx]
res.append([trimidx, pareto_ks(loc-1e-10, rvs[trimidx:])])
res = np.array(res)
bestidx = res[np.argmin(res[:,1]),0].astype(int)
print(bestidx)
locest = rvs[bestidx]
est = stats.pareto.fit_fr(rvs[bestidx:], 1., frozen=[np.nan, locest, np.nan])
args = (est[0], locest, est[1])
return args
def fit_ks1(self):
'''fit Pareto with nested optimization
originally published on stackoverflow
'''
rvs = self.endog
rvsmin = rvs.min()
def pareto_ks(loc, rvs):
#start_scale = rvs.min() - loc # not used yet
est = stats.pareto.fit_fr(rvs, 1., frozen=[np.nan, loc, np.nan])
args = (est[0], loc, est[1])
return stats.kstest(rvs,'pareto',args)[0]
#locest = optimize.fmin(pareto_ks, rvsmin*0.7, (rvs,))
locest = optimize.fmin(pareto_ks, rvsmin - 1.5, (rvs,))
est = stats.pareto.fit_fr(rvs, 1., frozen=[np.nan, locest, np.nan])
args = (est[0], locest[0], est[1])
return args
#y = stats.pareto.rvs(1, loc=10, scale=2, size=nobs)
y = stats.pareto.rvs(1, loc=0, scale=2, size=nobs)
par_start_params = np.array([1., 9., 2.])
mod_par = MyPareto(y)
mod_par.start_params = np.array([1., 10., 2.])
mod_par.start_params =
|
np.array([1., -9., 2.])
|
numpy.array
|
import unittest
from .provider_test import ProviderTest
from gunpowder import ArrayKeys, ArraySpec, PointsSpec, Roi, Array, PointsKeys, Batch, BatchProvider, Points, Coordinate, ArrayKey, PointsKey, BatchRequest, build
from gunpowder.contrib import AddVectorMap
from gunpowder.contrib.points import PreSynPoint, PostSynPoint
from copy import deepcopy
import itertools
import numpy as np
class AddVectorMapTestSource(BatchProvider):
def setup(self):
for identifier in [
ArrayKeys.RAW,
ArrayKeys.GT_LABELS]:
self.provides(
identifier,
ArraySpec(
roi=Roi((1000, 1000, 1000), (400, 400, 400)),
voxel_size=(20, 2, 2)))
for identifier in [
PointsKeys.PRESYN,
PointsKeys.POSTSYN]:
self.provides(
identifier,
PointsSpec(
roi=Roi((1000, 1000, 1000), (400, 400, 400))))
def provide(self, request):
batch = Batch()
# have the pixels encode their position
if ArrayKeys.RAW in request:
# the z,y,x coordinates of the ROI
roi = request[ArrayKeys.RAW].roi
roi_voxel = roi // self.spec[ArrayKeys.RAW].voxel_size
meshgrids = np.meshgrid(
range(roi_voxel.get_begin()[0], roi_voxel.get_end()[0]),
range(roi_voxel.get_begin()[1], roi_voxel.get_end()[1]),
range(roi_voxel.get_begin()[2], roi_voxel.get_end()[2]), indexing='ij')
data = meshgrids[0] + meshgrids[1] + meshgrids[2]
spec = self.spec[ArrayKeys.RAW].copy()
spec.roi = roi
batch.arrays[ArrayKeys.RAW] = Array(data, spec)
if ArrayKeys.GT_LABELS in request:
roi = request[ArrayKeys.GT_LABELS].roi
roi_voxel_shape = (roi // self.spec[ArrayKeys.GT_LABELS].voxel_size).get_shape()
data = np.ones(roi_voxel_shape)
data[roi_voxel_shape[0]//2:,roi_voxel_shape[1]//2:,:] = 2
data[roi_voxel_shape[0]//2:, -(roi_voxel_shape[1] // 2):, :] = 3
spec = self.spec[ArrayKeys.GT_LABELS].copy()
spec.roi = roi
batch.arrays[ArrayKeys.GT_LABELS] = Array(data, spec)
if PointsKeys.PRESYN in request:
data_presyn, data_postsyn = self.__get_pre_and_postsyn_locations(roi=request[PointsKeys.PRESYN].roi)
elif PointsKeys.POSTSYN in request:
data_presyn, data_postsyn = self.__get_pre_and_postsyn_locations(roi=request[PointsKeys.POSTSYN].roi)
voxel_size_points = self.spec[ArrayKeys.RAW].voxel_size
for (points_key, spec) in request.points_specs.items():
if points_key == PointsKeys.PRESYN:
data = data_presyn
if points_key == PointsKeys.POSTSYN:
data = data_postsyn
batch.points[points_key] = Points(data, PointsSpec(spec.roi))
return batch
def __get_pre_and_postsyn_locations(self, roi):
presyn_locs, postsyn_locs = {}, {}
min_dist_between_presyn_locs = 250
voxel_size_points = self.spec[ArrayKeys.RAW].voxel_size
min_dist_pre_to_postsyn_loc, max_dist_pre_to_postsyn_loc= 60, 120
num_presyn_locations = roi.size() // (np.prod(50*np.asarray(voxel_size_points))) # 1 synapse per 50vx^3 cube
num_postsyn_locations =
|
np.random.randint(low=1, high=3)
|
numpy.random.randint
|
import numpy as np
from medpy.filter.binary import largest_connected_component
from skimage.exposure import rescale_intensity
from skimage.transform import resize
def dsc(y_pred, y_true, lcc=True):
if lcc and np.any(y_pred):
y_pred = np.round(y_pred).astype(int)
y_true = np.round(y_true).astype(int)
y_pred = largest_connected_component(y_pred)
return np.sum(y_pred[y_true == 1]) * 2.0 / (np.sum(y_pred) + np.sum(y_true))
def crop_sample(x):
volume, mask = x
volume[volume < np.max(volume) * 0.1] = 0
z_projection = np.max(np.max(np.max(volume, axis=-1), axis=-1), axis=-1)
z_nonzero = np.nonzero(z_projection)
z_min = np.min(z_nonzero)
z_max = np.max(z_nonzero) + 1
y_projection = np.max(np.max(np.max(volume, axis=0), axis=-1), axis=-1)
y_nonzero = np.nonzero(y_projection)
y_min = np.min(y_nonzero)
y_max = np.max(y_nonzero) + 1
x_projection = np.max(np.max(np.max(volume, axis=0), axis=0), axis=-1)
x_nonzero = np.nonzero(x_projection)
x_min = np.min(x_nonzero)
x_max = np.max(x_nonzero) + 1
return (
volume[z_min:z_max, y_min:y_max, x_min:x_max],
mask[z_min:z_max, y_min:y_max, x_min:x_max],
)
def pad_sample(x):
volume, mask = x
a = volume.shape[1]
b = volume.shape[2]
if a == b:
return volume, mask
diff = (max(a, b) - min(a, b)) / 2.0
if a > b:
padding = ((0, 0), (0, 0), (int(np.floor(diff)), int(np.ceil(diff))))
else:
padding = ((0, 0), (int(np.floor(diff)), int(np.ceil(diff))), (0, 0))
mask =
|
np.pad(mask, padding, mode="constant", constant_values=0)
|
numpy.pad
|
import numpy as np
from copy import copy, deepcopy
from contextlib import contextmanager
from ...util.event import Event
from ...util.misc import ensure_iterable
from .._base_layer import Layer
from .._register import add_to_viewer
from ..._vispy.scene.visuals import Mesh, Markers, Compound
from ..._vispy.scene.visuals import Line as VispyLine
from vispy.color import get_color_names
from .view import QtShapesLayer
from .view import QtShapesControls
from ._constants import (Mode, BOX_LINE_HANDLE, BOX_LINE, BOX_TOP_CENTER,
BOX_CENTER, BOX_LEN, BOX_HANDLE, BOX_WITH_HANDLE,
BOX_TOP_LEFT, BOX_BOTTOM_RIGHT, BOX_BOTTOM_LEFT,
BACKSPACE)
from .shape_list import ShapeList
from .shape_util import create_box, point_to_lines
from .shapes import Rectangle, Ellipse, Line, Path, Polygon
@add_to_viewer
class Shapes(Layer):
"""Shapes layer.
Parameters
----------
data : np.array | list
List of np.array of data or np.array. Each element of the list
(or row of a 3D np.array) corresponds to one shape. If a 2D array is
passed it corresponds to just a single shape.
shape_type : string | list
String of shape shape_type, must be one of "{'line', 'rectangle',
'ellipse', 'path', 'polygon'}". If a list is supplied it must be the
same length as the length of `data` and each element will be applied to
each shape otherwise the same value will be used for all shapes.
edge_width : float | list
thickness of lines and edges. If a list is supplied it must be the same
length as the length of `data` and each element will be applied to each
shape otherwise the same value will be used for all shapes.
edge_color : str | tuple | list
If string can be any color name recognized by vispy or hex value if
starting with `#`. If array-like must be 1-dimensional array with 3 or
4 elements. If a list is supplied it must be the same length as
the length of `data` and each element will be applied to each shape
otherwise the same value will be used for all shapes.
face_color : str | tuple | list
If string can be any color name recognized by vispy or hex value if
starting with `#`. If array-like must be 1-dimensional array with 3 or
4 elements. If a list is supplied it must be the same length as
the length of `data` and each element will be applied to each shape
otherwise the same value will be used for all shapes.
opacity : float | list
Opacity of the shapes, must be between 0 and 1.
z_index : int | list
Specifier of z order priority. Shapes with higher z order are displayed
ontop of others. If a list is supplied it must be the same length as
the length of `data` and each element will be applied to each shape
otherwise the same value will be used for all shapes.
name : str, keyword-only
Name of the layer.
Attributes
----------
data : ShapeList
Object containing all the shape data.
edge_width : float
thickness of lines and edges.
edge_color : str
Color of the shape edge.
face_color : str
Color of the shape face.
opacity : float
Opacity value between 0.0 and 1.0.
selected_shapes : list
List of currently selected shapes.
mode : Mode
Interactive mode.
Extended Summary
----------
_mode_history : Mode
Interactive mode captured on press of <space>.
_selected_shapes_history : list
List of currently selected captured on press of <space>.
_selected_shapes_stored : list
List of selected previously displayed. Used to prevent rerendering the
same highlighted shapes when no data has changed.
_selected_box : None | np.ndarray
`None` if no shapes are selected, otherwise a 10x2 array of vertices of
the interaction box. The first 8 points are the corners and midpoints
of the box. The 9th point is the center of the box, and the last point
is the location of the rotation handle that can be used to rotate the
box.
_hover_shape : None | int
Index of any shape currently hovered over if any. `None` otherwise.
_hover_shape_stored : None | int
Index of any shape previously displayed as hovered over if any. `None`
otherwise. Used to prevent rerendering the same highlighted shapes when
no data has changed.
_hover_vertex : None | int
Index of any vertex currently hovered over if any. `None` otherwise.
_hover_vertex_stored : None | int
Index of any vertex previously displayed as hovered over if any. `None`
otherwise. Used to prevent rerendering the same highlighted shapes when
no data has changed.
_moving_shape : None | int
Index of any shape currently being moved if any. `None` otherwise.
_moving_vertex : None | int
Index of any vertex currently being moved if any. `None` otherwise.
_drag_start : None | np.ndarray
If a drag has been started and is in progress then a length 2 array of
the initial coordinates of the drag. `None` otherwise.
_drag_box : None | np.ndarray
If a drag box is being created to select shapes then this is a 2x2
array of the two extreme corners of the drag. `None` otherwise.
_drag_box_stored : None | np.ndarray
If a drag box is being created to select shapes then this is a 2x2
array of the two extreme corners of the drag that have previously been
rendered. `None` otherwise. Used to prevent rerendering the same
drag box when no data has changed.
_is_moving : bool
Bool indicating if any shapes are currently being moved.
_is_selecting : bool
Bool indicating if a drag box is currently being created in order to
select shapes.
_is_creating : bool
Bool indicating if any shapes are currently being created.
_fixed_aspect : bool
Bool indicating if aspect ratio of shapes should be preserved on
resizing.
_aspect_ratio : float
Value of aspect ratio to be preserved if `_fixed_aspect` is `True`.
_fixed_vertex : None | np.ndarray
If a scaling or rotation is in progress then a length 2 array of the
coordinates that are remaining fixed during the move. `None` otherwise.
_fixed_index : int
If a scaling or rotation is in progress then the index of the vertex of
the boudning box that is remaining fixed during the move. `None`
otherwise.
_cursor_coord : np.ndarray
Length 2 array of the current cursor position in Image coordinates.
_update_properties : bool
Bool indicating if properties are to allowed to update the selected
shapes when they are changed. Blocking this prevents circular loops
when shapes are selected and the properties are changed based on that
selection
_clipboard : list
List of shape objects that are to be used during a copy and paste.
_colors : list
List of supported vispy color names.
_vertex_size : float
Size of the vertices of the shapes and boudning box in Canvas
coordinates.
_rotation_handle_length : float
Length of the rotation handle of the boudning box in Canvas
coordinates.
_highlight_color : list
Length 3 list of color used to highlight shapes and the interaction
box.
_highlight_width : float
Width of the edges used to highlight shapes.
"""
_colors = get_color_names()
_vertex_size = 10
_rotation_handle_length = 20
_highlight_color = (0, 0.6, 1)
_highlight_width = 1.5
def __init__(self, data, *, shape_type='rectangle', edge_width=1,
edge_color='black', face_color='white', opacity=0.7,
z_index=0, name=None):
# Create a compound visual with the following four subvisuals:
# Markers: corresponding to the vertices of the interaction box or the
# shapes that are used for highlights.
# Lines: The lines of the interaction box used for highlights.
# Mesh: The mesh of the outlines for each shape used for highlights.
# Mesh: The actual meshes of the shape faces and edges
visual = Compound([Markers(), VispyLine(), Mesh(), Mesh()])
super().__init__(visual, name)
# Freeze refreshes to prevent drawing before the viewer is constructed
with self.freeze_refresh():
# Add the shape data
self.data = ShapeList()
self.add_shapes(data, shape_type=shape_type, edge_width=edge_width,
edge_color=edge_color, face_color=face_color,
opacity=opacity, z_index=z_index)
# The following shape properties are for the new shapes that will
# be drawn. Each shape has a corresponding property with the
# value for itself
if np.isscalar(edge_width):
self._edge_width = edge_width
else:
self._edge_width = 1
if type(edge_color) is str:
self._edge_color = edge_color
else:
self._edge_color = 'black'
if type(face_color) is str:
self._face_color = face_color
else:
self._face_color = 'white'
self._opacity = opacity
# update flags
self._need_display_update = False
self._need_visual_update = False
self._selected_shapes = []
self._selected_shapes_stored = []
self._selected_shapes_history = []
self._selected_box = None
self._hover_shape = None
self._hover_shape_stored = None
self._hover_vertex = None
self._hover_vertex_stored = None
self._moving_shape = None
self._moving_vertex = None
self._drag_start = None
self._fixed_vertex = None
self._fixed_aspect = False
self._aspect_ratio = 1
self._is_moving = False
self._fixed_index = 0
self._is_selecting = False
self._drag_box = None
self._drag_box_stored = None
self._cursor_coord = np.array([0, 0])
self._is_creating = False
self._update_properties = True
self._clipboard = []
self._mode = Mode.PAN_ZOOM
self._mode_history = self._mode
self._status = str(self._mode)
self._help = 'enter a selection mode to edit shape properties'
self.events.add(mode=Event,
edge_width=Event,
edge_color=Event,
face_color=Event)
self._qt_properties = QtShapesLayer(self)
self._qt_controls = QtShapesControls(self)
self.events.deselect.connect(lambda x: self._finish_drawing())
@property
def data(self):
"""ShapeList: object containing all the shape data
"""
return self._data
@data.setter
def data(self, data):
self._data = data
self.refresh()
@property
def edge_width(self):
"""float: width of edges in px
"""
return self._edge_width
@edge_width.setter
def edge_width(self, edge_width):
self._edge_width = edge_width
if self._update_properties:
index = self.selected_shapes
for i in index:
self.data.update_edge_width(i, edge_width)
self.refresh()
self.events.edge_width()
@property
def edge_color(self):
"""str: color of edges and lines
"""
return self._edge_color
@edge_color.setter
def edge_color(self, edge_color):
self._edge_color = edge_color
if self._update_properties:
index = self.selected_shapes
for i in index:
self.data.update_edge_color(i, edge_color)
self.refresh()
self.events.edge_color()
@property
def face_color(self):
"""str: color of faces
"""
return self._face_color
@face_color.setter
def face_color(self, face_color):
self._face_color = face_color
if self._update_properties:
index = self.selected_shapes
for i in index:
self.data.update_face_color(i, face_color)
self.refresh()
self.events.face_color()
@property
def opacity(self):
"""float: Opacity value between 0.0 and 1.0.
"""
return self._opacity
@opacity.setter
def opacity(self, opacity):
if not 0.0 <= opacity <= 1.0:
raise ValueError('opacity must be between 0.0 and 1.0; '
f'got {opacity}')
self._opacity = opacity
if self._update_properties:
index = self.selected_shapes
for i in index:
self.data.update_opacity(i, opacity)
self.refresh()
self.events.opacity()
@property
def selected_shapes(self):
"""list: list of currently selected shapes
"""
return self._selected_shapes
@selected_shapes.setter
def selected_shapes(self, selected_shapes):
self._selected_shapes = selected_shapes
self._selected_box = self.interaction_box(selected_shapes)
# Update properties based on selected shapes
face_colors = list(set([self.data.shapes[i]._face_color_name for i in
selected_shapes]))
if len(face_colors) == 1:
face_color = face_colors[0]
with self.block_update_properties():
self.face_color = face_color
edge_colors = list(set([self.data.shapes[i]._edge_color_name for i in
selected_shapes]))
if len(edge_colors) == 1:
edge_color = edge_colors[0]
with self.block_update_properties():
self.edge_color = edge_color
edge_width = list(set([self.data.shapes[i].edge_width for i in
selected_shapes]))
if len(edge_width) == 1:
edge_width = edge_width[0]
with self.block_update_properties():
self.edge_width = edge_width
opacities = list(set([self.data.shapes[i].opacity for i in
selected_shapes]))
if len(opacities) == 1:
opacity = opacities[0]
with self.block_update_properties():
self.opacity = opacity
@property
def mode(self):
"""MODE: Interactive mode. The normal, default mode is PAN_ZOOM, which
allows for normal interactivity with the canvas.
The SELECT mode allows for entire shapes to be selected, moved and
resized.
The DIRECT mode allows for shapes to be selected and their individual
vertices to be moved.
The VERTEX_INSERT and VERTEX_REMOVE modes allow for individual
vertices either to be added to or removed from shapes that are already
selected. Note that shapes cannot be selected in this mode.
The ADD_RECTANGLE, ADD_ELLIPSE, ADD_LINE, ADD_PATH, and ADD_POLYGON
modes all allow for their corresponding shape type to be added.
"""
return self._mode
@mode.setter
def mode(self, mode):
if mode == self._mode:
return
old_mode = self._mode
if mode == Mode.PAN_ZOOM:
self.cursor = 'standard'
self.interactive = True
self.help = 'enter a selection mode to edit shape properties'
elif mode in [Mode.SELECT, Mode.DIRECT]:
self.cursor = 'pointing'
self.interactive = False
self.help = ('hold <space> to pan/zoom, '
f'press <{BACKSPACE}> to remove selected')
elif mode in [Mode.VERTEX_INSERT, Mode.VERTEX_REMOVE]:
self.cursor = 'cross'
self.interactive = False
self.help = 'hold <space> to pan/zoom'
elif mode in [Mode.ADD_RECTANGLE, Mode.ADD_ELLIPSE, Mode.ADD_LINE]:
self.cursor = 'cross'
self.interactive = False
self.help = 'hold <space> to pan/zoom'
elif mode in [Mode.ADD_PATH, Mode.ADD_POLYGON]:
self.cursor = 'cross'
self.interactive = False
self.help = ('hold <space> to pan/zoom, '
'press <esc> to finish drawing')
else:
raise ValueError("Mode not recongnized")
self.status = str(mode)
self._mode = mode
draw_modes = ([Mode.SELECT, Mode.DIRECT, Mode.VERTEX_INSERT,
Mode.VERTEX_REMOVE])
self.events.mode(mode=mode)
if not (mode in draw_modes and old_mode in draw_modes):
self._finish_drawing()
self.refresh()
@contextmanager
def block_update_properties(self):
self._update_properties = False
yield
self._update_properties = True
def _get_shape(self):
"""Determines the shape of the vertex data.
"""
if len(self.data._vertices) == 0:
return [1, 1]
else:
return np.max(self.data._vertices, axis=0) + 1
def add_shapes(self, data, *, shape_type='rectangle', edge_width=1,
edge_color='black', face_color='white', opacity=0.7,
z_index=0):
"""Add shapes to the current layer.
Parameters
----------
data : np.array | list
List of np.array of data or np.array. Each element of the list
(or row of a 3D np.array) corresponds to one shape. If a 2D array
is passed it corresponds to just a single shape.
shape_type : string | list
String of shape shape_type, must be one of "{'line', 'rectangle',
'ellipse', 'path', 'polygon'}". If a list is supplied it must be
the same length as the length of `data` and each element will be
applied to each shape otherwise the same value will be used for all
shapes.
edge_width : float | list
thickness of lines and edges. If a list is supplied it must be the
same length as the length of `data` and each element will be
applied to each shape otherwise the same value will be used for all
shapes.
edge_color : str | tuple | list
If string can be any color name recognized by vispy or hex value if
starting with `#`. If array-like must be 1-dimensional array with 3
or 4 elements. If a list is supplied it must be the same length as
the length of `data` and each element will be applied to each shape
otherwise the same value will be used for all shapes.
face_color : str | tuple | list
If string can be any color name recognized by vispy or hex value if
starting with `#`. If array-like must be 1-dimensional array with 3
or 4 elements. If a list is supplied it must be the same length as
the length of `data` and each element will be applied to each shape
otherwise the same value will be used for all shapes.
opacity : float | list
Opacity of the shapes, must be between 0 and 1.
z_index : int | list
Specifier of z order priority. Shapes with higher z order are
displayed ontop of others. If a list is supplied it must be the
same length as the length of `data` and each element will be
applied to each shape otherwise the same value will be used for all
shapes.
"""
if len(data) == 0:
return
if np.array(data[0]).ndim == 1:
# If a single array for a shape has been passed
if shape_type in self.data._types.keys():
shape_cls = self.data._types[shape_type]
shape = shape_cls(data, edge_width=edge_width,
edge_color=edge_color, face_color=face_color,
opacity=opacity, z_index=z_index)
else:
raise ValueError("""shape_type not recognized, must be one of
"{'line', 'rectangle', 'ellipse', 'path',
'polygon'}"
""")
self.data.add(shape)
else:
# Turn input arguments into iterables
shape_types = ensure_iterable(shape_type)
edge_widths = ensure_iterable(edge_width)
opacities = ensure_iterable(opacity)
z_indices = ensure_iterable(z_index)
edge_colors = ensure_iterable(edge_color, color=True)
face_colors = ensure_iterable(face_color, color=True)
for d, st, ew, ec, fc, o, z, in zip(data, shape_types, edge_widths,
edge_colors, face_colors,
opacities, z_indices):
shape_cls = self.data._types[st]
shape = shape_cls(d, edge_width=ew, edge_color=ec,
face_color=fc, opacity=o, z_index=z)
self.data.add(shape)
def _update(self):
"""Update the underlying visual.
"""
if self._need_display_update:
self._need_display_update = False
self._set_view_slice()
if self._need_visual_update:
self._need_visual_update = False
self._node.update()
def _refresh(self):
"""Fully refresh the underlying visual.
"""
self._need_display_update = True
self._update()
def _set_view_slice(self, indices=None):
"""Set the shape mesh data to the view.
Parameters
----------
indices : sequence of int or slice
Indices to slice with.
"""
z_order = self.data._mesh.triangles_z_order
faces = self.data._mesh.triangles[z_order]
colors = self.data._mesh.triangles_colors[z_order]
vertices = self.data._mesh.vertices
if len(faces) == 0:
self._node._subvisuals[3].set_data(vertices=None, faces=None)
else:
self._node._subvisuals[3].set_data(vertices=vertices, faces=faces,
face_colors=colors)
self._need_visual_update = True
self._set_highlight(force=True)
self._update()
def interaction_box(self, index):
"""Create the interaction box around a shape or list of shapes.
If a single index is passed then the boudning box will be inherited
from that shapes interaction box. If list of indices is passed it will
be computed directly.
Parameters
----------
index : int | list
Index of a single shape, or a list of shapes around which to
construct the interaction box
Returns
----------
box : np.ndarray
10x2 array of vertices of the interaction box. The first 8 points
are the corners and midpoints of the box in clockwise order
starting in the upper-left corner. The 9th point is the center of
the box, and the last point is the location of the rotation handle
that can be used to rotate the box
"""
if isinstance(index, (list, np.ndarray)):
if len(index) == 0:
box = None
elif len(index) == 1:
box = copy(self.data.shapes[index[0]]._box)
else:
indices = np.isin(self.data._index, index)
box = create_box(self.data._vertices[indices])
else:
box = copy(self.data.shapes[index]._box)
if box is not None:
rot = box[BOX_TOP_CENTER]
length_box = np.linalg.norm(box[BOX_BOTTOM_LEFT] -
box[BOX_TOP_LEFT])
if length_box > 0:
rescale = self._get_rescale()
r = self._rotation_handle_length*rescale
rot = rot-r*(box[BOX_BOTTOM_LEFT] -
box[BOX_TOP_LEFT])/length_box
box = np.append(box, [rot], axis=0)
return box
def _outline_shapes(self):
"""Finds outlines of any selected shapes including any shape hovered
over
Returns
----------
vertices : None | np.ndarray
Nx2 array of any vertices of outline or None
triangles : None | np.ndarray
Mx3 array of any indices of vertices for triangles of outline or
None
"""
if self._hover_shape is not None or len(self.selected_shapes) > 0:
if len(self.selected_shapes) > 0:
index = copy(self.selected_shapes)
if self._hover_shape is not None:
if self._hover_shape in index:
pass
else:
index.append(self._hover_shape)
index.sort()
else:
index = self._hover_shape
centers, offsets, triangles = self.data.outline(index)
rescale = self._get_rescale()
vertices = centers + rescale*self._highlight_width*offsets
else:
vertices = None
triangles = None
return vertices, triangles
def _compute_vertices_and_box(self):
"""Compute the location and properties of the vertices and box that
need to get rendered
Returns
----------
vertices : np.ndarray
Nx2 array of any vertices to be rendered as Markers
face_color : str
String of the face color of the Markers
edge_color : str
String of the edge color of the Markers and Line for the box
pos : np.ndarray
Nx2 array of vertices of the box that will be rendered using a
Vispy Line
width : float
Width of the box edge
"""
if len(self.selected_shapes) > 0:
if self.mode == Mode.SELECT:
# If in select mode just show the interaction boudning box
# including its vertices and the rotation handle
box = self._selected_box[BOX_WITH_HANDLE]
if self._hover_shape is None:
face_color = 'white'
elif self._hover_vertex is None:
face_color = 'white'
else:
face_color = self._highlight_color
edge_color = self._highlight_color
vertices = box
# Use a subset of the vertices of the interaction_box to plot
# the line around the edge
pos = box[BOX_LINE_HANDLE]
width = 1.5
elif self.mode in ([Mode.DIRECT, Mode.ADD_PATH, Mode.ADD_POLYGON,
Mode.ADD_RECTANGLE, Mode.ADD_ELLIPSE,
Mode.ADD_LINE, Mode.VERTEX_INSERT,
Mode.VERTEX_REMOVE]):
# If in one of these mode show the vertices of the shape itself
inds = np.isin(self.data._index, self.selected_shapes)
vertices = self.data._vertices[inds]
# If currently adding path don't show box over last vertex
if self.mode == Mode.ADD_PATH:
vertices = vertices[:-1]
if self._hover_shape is None:
face_color = 'white'
elif self._hover_vertex is None:
face_color = 'white'
else:
face_color = self._highlight_color
edge_color = self._highlight_color
pos = None
width = 0
else:
# Otherwise show nothing
vertices = np.empty((0, 2))
face_color = 'white'
edge_color = 'white'
pos = None
width = 0
elif self._is_selecting:
# If currently dragging a selection box just show an outline of
# that box
vertices = np.empty((0, 2))
edge_color = self._highlight_color
face_color = 'white'
box = create_box(self._drag_box)
width = 1.5
# Use a subset of the vertices of the interaction_box to plot
# the line around the edge
pos = box[BOX_LINE]
else:
# Otherwise show nothing
vertices = np.empty((0, 2))
face_color = 'white'
edge_color = 'white'
pos = None
width = 0
return vertices, face_color, edge_color, pos, width
def _set_highlight(self, force=False):
"""Render highlights of shapes including boundaries, vertices,
interaction boxes, and the drag selection box when appropriate
Parameters
----------
force : bool
Bool that forces a redraw to occur when `True`
"""
# Check if any shape or vertex ids have changed since last call
if (self.selected_shapes == self._selected_shapes_stored and
self._hover_shape == self._hover_shape_stored and
self._hover_vertex == self._hover_vertex_stored and
np.all(self._drag_box == self._drag_box_stored)) and not force:
return
self._selected_shapes_stored = copy(self.selected_shapes)
self._hover_shape_stored = copy(self._hover_shape)
self._hover_vertex_stored = copy(self._hover_vertex)
self._drag_box_stored = copy(self._drag_box)
# Compute the vertices and faces of any shape outlines
vertices, faces = self._outline_shapes()
self._node._subvisuals[2].set_data(vertices=vertices, faces=faces,
color=self._highlight_color)
# Compute the location and properties of the vertices and box that
# need to get rendered
(vertices, face_color, edge_color, pos,
width) = self._compute_vertices_and_box()
self._node._subvisuals[0].set_data(vertices, size=self._vertex_size,
face_color=face_color,
edge_color=edge_color,
edge_width=1.5, symbol='square',
scaling=False)
self._node._subvisuals[1].set_data(pos=pos, color=edge_color,
width=width)
def _finish_drawing(self):
"""Reset properties used in shape drawing so new shapes can be drawn.
"""
index = copy(self._moving_shape)
self._is_moving = False
self.selected_shapes = []
self._drag_start = None
self._drag_box = None
self._fixed_vertex = None
self._moving_shape = None
self._moving_vertex = None
self._hover_shape = None
self._hover_vertex = None
if self._is_creating is True and self.mode == Mode.ADD_PATH:
vertices = self.data._vertices[self.data._index == index]
if len(vertices) <= 2:
self.data.remove(index)
else:
self.data.edit(index, vertices[:-1])
if self._is_creating is True and self.mode == Mode.ADD_POLYGON:
vertices = self.data._vertices[self.data._index == index]
if len(vertices) <= 2:
self.data.remove(index)
self._is_creating = False
self.refresh()
def remove_selected(self):
"""Remove any selected shapes.
"""
to_remove = sorted(self.selected_shapes, reverse=True)
for index in to_remove:
self.data.remove(index)
self.selected_shapes = []
shape, vertex = self._shape_at(self._cursor_coord)
self._hover_shape = shape
self._hover_vertex = vertex
self.status = self.get_message(self._cursor_coord, shape, vertex)
self.refresh()
def _rotate_box(self, angle, center=[0, 0]):
"""Perfrom a rotation on the selected box.
Parameters
----------
angle : float
angle specifying rotation of shapes in degrees.
center : list
coordinates of center of rotation.
"""
theta = np.radians(angle)
transform = np.array([[np.cos(theta), np.sin(theta)],
[-np.sin(theta), np.cos(theta)]])
box = self._selected_box - center
self._selected_box = box @ transform.T + center
def _scale_box(self, scale, center=[0, 0]):
"""Perfrom a scaling on the selected box.
Parameters
----------
scale : float, list
scalar or list specifying rescaling of shape.
center : list
coordinates of center of rotation.
"""
if not isinstance(scale, (list, np.ndarray)):
scale = [scale, scale]
box = self._selected_box - center
box = np.array(box*scale)
if not np.all(box[BOX_TOP_CENTER] == box[BOX_HANDLE]):
rescale = self._get_rescale()
r = self._rotation_handle_length*rescale
handle_vec = box[BOX_HANDLE]-box[BOX_TOP_CENTER]
cur_len = np.linalg.norm(handle_vec)
box[BOX_HANDLE] = box[BOX_TOP_CENTER] + r*handle_vec/cur_len
self._selected_box = box + center
def _transform_box(self, transform, center=[0, 0]):
"""Perfrom a linear transformation on the selected box.
Parameters
----------
transform : np.ndarray
2x2 array specifying linear transform.
center : list
coordinates of center of rotation.
"""
box = self._selected_box - center
box = box @ transform.T
if not np.all(box[BOX_TOP_CENTER] == box[BOX_HANDLE]):
rescale = self._get_rescale()
r = self._rotation_handle_length*rescale
handle_vec = box[BOX_HANDLE]-box[BOX_TOP_CENTER]
cur_len = np.linalg.norm(handle_vec)
box[BOX_HANDLE] = box[BOX_TOP_CENTER] + r*handle_vec/cur_len
self._selected_box = box + center
def _shape_at(self, coord):
"""Determines if any shape at given coord by looking inside triangle
meshes.
Parameters
----------
coord : sequence of float
Image coordinates to check if any shapes are at.
Returns
----------
shape : int | None
Index of shape if any that is at the coordinates. Returns `None`
if no shape is found.
vertex : int | None
Index of vertex if any that is at the coordinates. Returns `None`
if no vertex is found.
"""
# Check selected shapes
if len(self.selected_shapes) > 0:
if self.mode == Mode.SELECT:
# Check if inside vertex of interaction box or rotation handle
box = self._selected_box[BOX_WITH_HANDLE]
distances = abs(box - coord[:2])
# Get the vertex sizes
rescale = self._get_rescale()
sizes = self._vertex_size*rescale/2
# Check if any matching vertices
matches = np.all(distances <= sizes, axis=1).nonzero()
if len(matches[0]) > 0:
return self.selected_shapes[0], matches[0][-1]
elif self.mode in ([Mode.DIRECT, Mode.VERTEX_INSERT,
Mode.VERTEX_REMOVE]):
# Check if inside vertex of shape
inds = np.isin(self.data._index, self.selected_shapes)
vertices = self.data._vertices[inds]
distances = abs(vertices - coord[:2])
# Get the vertex sizes
rescale = self._get_rescale()
sizes = self._vertex_size*rescale/2
# Check if any matching vertices
matches = np.all(distances <= sizes, axis=1).nonzero()[0]
if len(matches) > 0:
index = inds.nonzero()[0][matches[-1]]
shape = self.data._index[index]
_, idx = np.unique(self.data._index, return_index=True)
return shape, index - idx[shape]
# Check if mouse inside shape
shape = self.data.inside(coord)
return shape, None
def _get_rescale(self):
"""Get conversion factor from canvas coordinates to image coordinates.
Depends on the current zoom level.
Returns
----------
rescale : float
Conversion factor from canvas coordinates to image coordinates.
"""
transform = self.viewer._canvas.scene.node_transform(self._node)
rescale = transform.map([1, 1])[:2] - transform.map([0, 0])[:2]
return rescale.mean()
def _get_coord(self, position):
"""Convert a position in canvas coordinates to image coordinates.
Parameters
----------
position : sequence of int
Position of mouse cursor in canvas coordinates.
Returns
----------
coord : sequence of float
Position of mouse cursor in image coordinates.
"""
transform = self.viewer._canvas.scene.node_transform(self._node)
pos = transform.map(position)
coord = np.array([pos[0], pos[1]])
self._cursor_coord = coord
return coord
def get_message(self, coord, shape, vertex):
"""Generates a string based on the coordinates and information about
what shapes are hovered over
Parameters
----------
coord : sequence of int
Position of mouse cursor in image coordinates.
shape : int | None
Index of shape if any to be highlighted.
vertex : int | None
Index of vertex if any to be highlighted.
Returns
----------
msg : string
String containing a message that can be used as a status update.
"""
coord_shift = copy(coord)
coord_shift[0] = int(coord[1])
coord_shift[1] = int(coord[0])
msg = f'{coord_shift.astype(int)}, {self.name}'
if shape is not None:
msg = msg + ', shape ' + str(shape)
if vertex is not None:
msg = msg + ', vertex ' + str(vertex)
return msg
def move_to_front(self):
"""Moves selected objects to be displayed in front of all others.
"""
if len(self.selected_shapes) == 0:
return
new_z_index = max(self.data._z_index) + 1
for index in self.selected_shapes:
self.data.update_z_index(index, new_z_index)
self.refresh()
def move_to_back(self):
"""Moves selected objects to be displayed behind all others.
"""
if len(self.selected_shapes) == 0:
return
new_z_index = min(self.data._z_index) - 1
for index in self.selected_shapes:
self.data.update_z_index(index, new_z_index)
self.refresh()
def _copy_shapes(self):
"""Copy selected shapes to clipboard.
"""
self._clipboard = ([deepcopy(self.data.shapes[i]) for i in
self._selected_shapes])
def _paste_shapes(self):
"""Paste any shapes from clipboard and then selects them.
"""
cur_shapes = len(self.data.shapes)
for s in self._clipboard:
self.data.add(s)
self.selected_shapes = list(range(cur_shapes,
cur_shapes+len(self._clipboard)))
self.move_to_front()
self._copy_shapes()
def _move(self, coord):
"""Moves object at given mouse position and set of indices.
Parameters
----------
coord : sequence of two int
Position of mouse cursor in image coordinates.
"""
vertex = self._moving_vertex
if self.mode in ([Mode.SELECT, Mode.ADD_RECTANGLE, Mode.ADD_ELLIPSE,
Mode.ADD_LINE]):
if len(self.selected_shapes) > 0:
self._is_moving = True
if vertex is None:
# Check where dragging box from to move whole object
if self._drag_start is None:
center = self._selected_box[BOX_CENTER]
self._drag_start = coord - center
center = self._selected_box[BOX_CENTER]
shift = coord - center - self._drag_start
for index in self.selected_shapes:
self.data.shift(index, shift)
self._selected_box = self._selected_box + shift
self.refresh()
elif vertex < BOX_LEN:
# Corner / edge vertex is being dragged so resize object
box = self._selected_box
if self._fixed_vertex is None:
self._fixed_index = (vertex+4) % BOX_LEN
self._fixed_vertex = box[self._fixed_index]
size = (box[(self._fixed_index+4) % BOX_LEN] -
box[self._fixed_index])
offset = box[BOX_HANDLE] - box[BOX_CENTER]
offset = offset/
|
np.linalg.norm(offset)
|
numpy.linalg.norm
|
# %% --------------------------------------- Load Packages -------------------------------------------------------------
import os
import numpy as np
import cv2
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
# %% --------------------------------------- Data Prep -----------------------------------------------------------------
# Download data
if "train" not in os.listdir():
os.system("cd ~/Capstone")
os.system("wget https://storage.googleapis.com/exam-deep-learning/train.zip")
os.system("unzip train.zip")
# Read data
DIR = 'train/'
train = [f for f in os.listdir(DIR)]
train_sorted = sorted(train, key=lambda x: int(x[5:-4]))
imgs = []
texts = []
resize_to = 64
for f in train_sorted:
if f[-3:] == 'png':
imgs.append(cv2.resize(cv2.imread(DIR + f), (resize_to, resize_to)))
else:
texts.append(open(DIR + f).read())
imgs = np.array(imgs)
texts = np.array(texts)
le = LabelEncoder()
le.fit(["red blood cell", "ring", "schizont", "trophozoite"])
labels = le.transform(texts)
# Splitting
SEED = 42
x_train, x_val, y_train, y_val = train_test_split(imgs, labels,
random_state=SEED,
test_size=0.2,
stratify=labels)
print(x_train.shape, x_val.shape)
# %% --------------------------------------- Save as .npy --------------------------------------------------------------
# Save
|
np.save("x_train.npy", x_train)
|
numpy.save
|
import pandas as pd
import numpy as np
scale = {}
scale["Constant Returns to Scale"] =
|
np.arange(101)
|
numpy.arange
|
import sys
sys.path.append('/usr/local/lib/python2.7/site-packages')
sys.path.append('/usr/local/lib/python3.6/site-packages')
from sklearn import svm
from sklearn.externals import joblib
import numpy as np
import sys
board = []
clf = joblib.load('SVM.pkl')
input = sys.argv[1]
for i in input:
board.append(int(i)-1)
b =
|
np.array(board)
|
numpy.array
|
"""
This file contains stochastic generation code that I am releasing to the group. I will try to keep it updated,
however, if you would like to most up-to-date research code that I am using, you should email me at
<EMAIL>. (or text me)
This file contains several different useful generation classes.
(1) StatisticsGenerator: This is the base generation class. It samples the gaussian random field, does filtering (assuming
filtering is called), and returns in without any post-processing
(2) EigenGenerator_... - this are series of child classes that implement generation of eigenmicrostructures. I would suggest using
EigenGenerator_SquareLocalNMaximumConstrained (this one is the generator that is described in my paper).
(3) PhaseFieldGenerator - This is a generator where I just slapped a soft-max function on the output. I have not tested it at all.
Use at your own risk.
By: <NAME>
"""
import numpy as np
from HelperFunctions_StochasticGeneration import disect, rescale, local_square_mean
try:
import torch
except:
print("Couldn't find pytorch. Don't use AutoEigen")
ctol = 1e-8
class StatisticsGenerator():
def __init__(self, statistics, statistics_type='complete'):
"""
Initializing the class to be able to generate new structures given a set of statistics.
The second term indicates to the code whether the statistics passed in are a complete row (N statistics), or
a reduced row (N-1).
Statistics are assumed to be provided as a numpy array, where the first array, [..., 0], is the autocorrelation
and the remaining arrays are cross-correlations
The statistics are also assumed to be provided [0,0,0] being the t=0 point (so fftshift has not been applied)
:param statistics:
:param statistics_type: an indicator which explains to the code whether the statistics are complete. Can be
"incomplete" or "complete".
:return:
"""
# Some useful parameters
self.N = np.array(statistics.shape)[:-1].prod()
self.shape = statistics.shape[:-1]
self.twoD = True if (self.shape.__len__()<3) else False
# First we will transform all the statistics into the frequency domain
self.two_point_fft = np.fft.fftn(statistics, axes=tuple(range(0, self.shape.__len__())))
# Compute the zero mean:
self.means = self.two_point_fft[tuple([0] * (self.shape.__len__()) + [slice(None)])].real.copy()
self.means[0] = (self.means[0]/self.N)**(0.5)
# Computing the Final Phase, if we are interested in a conserved N-Phase Structure
if statistics_type.lower() == 'incomplete':
final_phase = np.zeros(self.shape, dtype=np.complex128)
final_phase[tuple([0] * self.shape.__len__())] = self.N * self.means[0]
final_phase -= self.two_point_fft.sum(axis=-1)
self.two_point_fft = np.concatenate((self.two_point_fft, final_phase[..., np.newaxis]), axis=-1)
self.means = np.hstack((self.means, np.array([self.two_point_fft[tuple([0] * self.shape.__len__() + [-1])].real
])))
self.means[1:] = (self.means[1:] / self.N) / self.means[0]
# Using the computed crosscorrelations, compute the inter phase transformation kernels
self.interfilter = np.ones_like(self.two_point_fft)
self.calculate_interfilters()
# Defining the derivatives
self.deriv_matrix = []
# Initializing the filters
self.filters = np.ones_like(self.two_point_fft)
def reinitialize(self, statistics, statistics_type='complete'):
"""
A method to reinitialize
:param statistics: The N-point statistics
:param statistics_type: A indicator for whether the complete set of statistics have been provided.
:return:
"""
self.__init__(statistics, statistics_type)
def filter(self, filter='flood', alpha=0.3, beta=0.35, cutoff_radius=0.15, maximum_sigma=0.02):
"""
A method for filtering sample structures using the gaussian filter that has been defined
:param filter_weight: This is the power that controls the filter radius based on the volume fractions
:param filter: This is a string that indicates to the code the type of filter that you would like to use.
The options are: 'none' for no filtering. 'Gaussian_volumes' for a gaussian filter paramterized by the volume
fraction. 'chords' for a gaussian filter parameterized by the mean chord length of each phase.
Finally, 'flood' (the default) is the flood filtering method defined in the paper.
:param maximum_sigma: This provides the standard deviation length of the largest gaussian as a percentage of the
shortest side of the image.
:return:
"""
# renaming variables to correspond to the variable names given in the paper
filter_weight = beta
cutoff = alpha
if filter.lower() == 'gaussian_volumes':
for i in range(self.two_point_fft.shape[-1]):
if self.means[i] > 0.0:
cov = np.diag(np.square(np.ones([len(self.two_point_fft.shape[:-1])]) \
* (self.means[0] / self.means[i]) ** filter_weight / (maximum_sigma \
* self.two_point_fft.shape[0])))
self.filters[..., i] = self.gaussian_kernel(cov)
elif filter.lower() == 'chords':
# I do not recommend that people use this.
#
# only works in 2D
assert self.twoD
self.two_forward_derivative()
self.tfd_sides()
self.tfd_ups()
self.derivative_estimate()
for i in range(self.two_point_fft.shape[-1]):
if self.means[i] > 0.0:
cov = np.diag(np.square(np.array(self.derivs[i][1:]) / self.means[i] / filter_weight))
self.filters[..., i] = self.gaussian_kernel(cov)
elif filter.lower() == 'flood':
# Filters are extracted from the autocorrelations using a Breadth for Search Flood
# filling algorithm
#
# I have made some changes here to allow this to work for 3D. They have yet to be
# debugged because I need to change some of the base code to do so.
self.two_forward_derivative()
autos = self.derivative_estimate(return_autos=True)
for n in range(self.two_point_fft.shape[-1]):
if self.means[n] > 0:
filters_temp = disect(autos[..., n], cutoff=cutoff, \
radius_cutoff_fraction=cutoff_radius, twoD=self.twoD)
# second, we check our desired length and make sure it isn't too big:
# (lets say, half the domain?)
desired_length = self.means[n] / self.derivs[n][0] * filter_weight
if desired_length > filters_temp[0].shape[0] / 2:
desired_length = filters_temp[0].shape[0] / 2
self.filters[..., n] = rescale(filters_temp[0], filters_temp[1], \
desired_length, twoD=self.twoD)
self.filters[..., n] /= self.filters[..., n].sum()
self.filters = np.fft.fftn(self.filters, axes=tuple(range(len(self.shape))))
else:
pass
def generate(self, number_of_structures=2):
"""
This is a function to generate just the highest phase
:param number_of_structures: a parameter which indicates the number of stuctures to
generate. The number must be either 1 or 2.
:return:
"""
self.generator()
self.images = []
if (number_of_structures > 2) or (number_of_structures < 1):
raise ValueError('number_of_structures parameter must be either 1 or 2.')
for gen_iterator in range(0, number_of_structures):
self.images.append(np.ones_like(self.two_point_fft))
self.images[gen_iterator] *= np.fft.fftn(self.new[gen_iterator])[..., np.newaxis]
self.images[gen_iterator] = self.postprocess(np.fft.ifftn(self.images[gen_iterator] *
self.interfilter * self.filters,
axes=tuple(range(0, self.shape.__len__()))).real)
if number_of_structures == 1:
return self.images[0]
else:
return self.images[0], self.images[1]
def generator(self):
"""
A method for generating new microstructures given a covariance matrix and a mean.
for 2D
ctol is a global parameter that defines how negative the smallest
eigenvalue can be. It is defined at the top of the file.
"""
eigs = self.two_point_fft[...,0].real
eigs[tuple([0] * self.shape.__len__())] = 0.0 # I changed this from 1e-10
eigs = eigs / np.product(self.shape)
if eigs.min() < -ctol:
raise ValueError('The autocovariance contains at least one negative eigenvalue (' + \
str(eigs.min()) + ').')
eigs[eigs < 0.0] = 0.0
eigs = np.sqrt(eigs)
eps = np.random.normal(loc=0.0, scale=1.0, size=self.shape) + \
1j * np.random.normal(loc=0.0, scale=1.0, size=self.shape)
new = np.fft.fftn(eigs * eps)
self.new = []
self.new.append(new.real + self.means[0])
self.new.append(new.imag + self.means[0])
def postprocess(self, arr):
return arr
def two_forward_derivative(self, short=1.0, long=np.sqrt(2)):
if self.twoD:
self.deriv_matrix.append((-1 / 16) * np.array([[-1 / long, 0, -1 / short, 0, -1 / long],
[0, 4 / long, 4 / short, 4 / long, 0],
[-1 / short, 4 / short, -12 / short - 12 / long, 4 / short, -1 / short],
[0, 4 / long, 4 / short, 4 / long, 0],
[-1 / long, 0, -1 / short, 0, -1 / long]]))
else:
# define the two forward 3D approximate matrix derivative
#
# Things left to do:
# (1) update coefficient (x?)
# (2) Update center value (x)
# (3) update all layers (x)
self.deriv_matrix.append((-1 / 36) * np.array([
[[0, 0, -1 / long, 0, 0],
[0, 0, 0, 0, 0],
[-1 / long, 0, -1 / short, 0, -1 / long],
[0, 0, 0, 0, 0],
[0, 0, -1 / long, 0, 0]],
# z = 1
[[0, 0, 0, 0, 0],
[0, 0, 4 / long, 0, 0],
[0, 4 / long, 4 / short, 4 / long, 0],
[0, 0, 4 / long, 0, 0],
[0, 0, 0, 0, 0]],
# z = 2
[[-1 / long, 0, -1 / short, 0, -1 / long],
[0, 4 / long, 4 / short, 4 / long, 0],
[-1 / short, 4 / short, -18 / short - 36 / long, 4 / short, -1 / short],
[0, 4 / long, 4 / short, 4 / long, 0],
[-1 / long, 0, -1 / short, 0, -1 / long]],
# z = 3
[[0, 0, 0, 0, 0],
[0, 0, 4 / long, 0, 0],
[0, 4 / long, 4 / short, 4 / long, 0],
[0, 0, 4 / long, 0, 0],
[0, 0, 0, 0, 0]],
# z = 4
[[0, 0, -1 / long, 0, 0],
[0, 0, 0, 0, 0],
[-1 / long, 0, -1 / short, 0, -1 / long],
[0, 0, 0, 0, 0],
[0, 0, -1 / long, 0, 0]],
]))
def tfd_ups(self, short=1.0, long=np.sqrt(2)):
self.deriv_matrix.append((-1 / 4) * np.array([[0, 0, -1 / short, 0, 0],
[0, 0, 4 / short, 0, 0],
[0, 0, -6 / short, 0, 0],
[0, 0, 4 / short, 0, 0],
[0, 0, -1 / short, 0, 0]]))
def tfd_sides(self, short=1.0, long=np.sqrt(2)):
self.deriv_matrix.append((-1 / 4) * np.array([[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[-1 / short, 4 / short, -6 / short, 4 / short, -1 / short],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]]))
def derivative_estimate(self, return_autos=False):
autos = np.fft.ifftn(self.two_point_fft * self.two_point_fft.conj() / self.two_point_fft[..., 0, np.newaxis],
axes=tuple(range(0, self.shape.__len__()))).real
self.derivs = [self.derivative(autos[..., i]) for i in range(autos.shape[-1])]
if return_autos:
return autos
def derivative(self, arr):
if self.twoD:
cent = np.fft.fftshift(arr)[int(self.shape[0] / 2 - 2):int(self.shape[0] / 2 + 3), int(self.shape[1] / 2 - 2):int(self.shape[1] / 2 + 3)]
return [(cent * self.deriv_matrix[n]).sum() for n in range(len(self.deriv_matrix))]
else:
cent = np.fft.fftshift(arr)[int(self.shape[0] / 2 - 2):int(self.shape[0] / 2 + 3), \
int(self.shape[1] / 2 - 2):int(self.shape[1] / 2 + 3), \
int(self.shape[2] / 2 - 2):int(self.shape[2] / 2 + 3)]
return [(cent * self.deriv_matrix[n]).sum() for n in range(len(self.deriv_matrix))]
def calculate_interfilters(self):
"""
A method for computing the interphase filters from the auto and cross correlations
:return:
"""
self.interfilter[..., 1:] = self.two_point_fft[..., 1:] / self.two_point_fft[..., 0, np.newaxis]
def gaussian_kernel(self, inverse_covariance):
"""
Produces the frequency domain of an quassian filter with integral of 1.
It returns a 'real' fft transformation.
:param size: the NxNxN dimension N
:param sigma: the standard deviation, 1.165 is used to approximate a 7x7x7 gaussian blur
:return:
"""
if self.twoD:
assert inverse_covariance.shape[0] == 2
xx, yy = np.meshgrid(np.linspace(-(self.shape[0] - 1) / 2., (self.shape[0] - 1) / 2., self.shape[0]),
np.linspace(-(self.shape[1] - 1) / 2., (self.shape[1] - 1) / 2., self.shape[1]))
arr = np.concatenate([xx[..., np.newaxis], yy[..., np.newaxis]], axis=-1)
else:
assert inverse_covariance.shape[0] == 3
xx, yy, zz = np.meshgrid(np.linspace(-(self.shape[0] - 1) / 2., (self.shape[0] - 1) / 2., self.shape[0]),
np.linspace(-(self.shape[1] - 1) / 2., (self.shape[1] - 1) / 2., self.shape[1]),
np.linspace(-(self.shape[2] - 1) / 2., (self.shape[2] - 1) / 2., self.shape[2]))
arr = np.concatenate([xx[..., np.newaxis], yy[..., np.newaxis], zz[..., np.newaxis]], axis=-1)
kernel = np.squeeze(np.exp(-0.5 * arr[..., np.newaxis, :] @ inverse_covariance @ arr[..., np.newaxis]))
return np.fft.fftn(np.fft.ifftshift(kernel / np.sum(kernel)))
class StatisticsGenerator_PhaseRetrieval(StatisticsGenerator):
def generate(self, iterations=50):
"""
This is a function to generate just the highest phase
:param number_of_structures: a parameter which indicates the number of stuctures to
generate. The number must be either 1 or 2.
:return:
"""
number_of_structures = 1
self.generator(iter=iterations)
self.images = []
if (number_of_structures > 2) or (number_of_structures < 1):
raise ValueError('number_of_structures parameter must be either 1 or 2.')
for gen_iterator in range(0, number_of_structures):
self.images.append(np.ones_like(self.two_point_fft))
self.images[gen_iterator] *=
|
np.fft.fftn(self.new[gen_iterator])
|
numpy.fft.fftn
|
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 11 21:33:12 2016
@author: mali
"""
from enum import Enum
#Region ExtraRayData Filters
class ExtraRayData(Enum):
Source=1 #1.0 if filter condition met, 0.0 otherwise.
Surface =2 #1.0 if filter condition met, 0.0 otherwise.
After_Surface=3 #1.0 if filter condition met, 0.0 otherwise.
Element=4 #1.0 if filter condition met, 0.0 otherwise.
After_Element=5 #1.0 if filter condition met, 0.0 otherwise.
Property_Zone=6 #1.0 if filter condition met, 0.0 otherwise.
After_Property_Zone=7 #1.0 if filter condition met, 0.0 otherwise.
Hit_Number=8 #Number of times ray hit the receiver, as a real.
Ray_Magnitude=9 #Ray magnitude of ray. -- RayDataMagnitude with LT.GetReceiverRayData is faster
Wavelength=10 #Wavelength of ray (in nm). -- RayDataWavelength with LT.GetReceiverRayData is faster
Incident_Angle=11 #Angle of incidence of ray on receiver (in degrees).
Exit_Angle=12 #Angle of exit of ray from receiver (in degrees).
Path_Transmittance=13 #Path transmitance of ray.
Volume_Interface=14 #1.0 if filter condition met, 0.0 otherwise.
After_Volume_Interface=15 #1.0 if filter condition met, 0.0 otherwise.
Optical_Path_Length=16 #Cumulative optical path length of ray at receiver.
Optical_Property=17 #1.0 if filter condition met, 0.0 otherwise.
After_Optical_Property=18 #1.0 if filter condition met, 0.0 otherwise.
Polarization=19 #Value of the PolType
RayPathIndex=20 #Base 0 Ray Path Index. -- RayPathIndex with LT.GetReceiverRayData is faster
Filter_Group=21 #1.0 if filter condition met, 0.0 otherwise.
#End extra ray data filters
import clr
import math
import numpy as np
import System
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
#This is the local LT pointer in this module.
#This must be set to a valid instance of LTCOM64.LTAPIx, after importing this module into another module
lt0=None
ltu=None
def cart2sph(x,y,z):
"""Returns the azimuth, elevation, and magnitude from x, y, z"""
azimuth = np.arctan2(y,x)
elevation = np.arctan2(z,np.sqrt(x**2 + y**2))
r = np.sqrt(x**2 + y**2 + z**2)
return azimuth, elevation, r
pass
def unit_vector(vector):
"""Returns the unit vector of the vector."""
return vector / np.linalg.norm(vector)
pass
def angle_between_vectors(v1, v2):
""" Returns the angle between vectors 'v1' and 'v2' in radians::
>>> angle_between((1, 0, 0), (0, 1, 0))
1.5707963267948966
>>> angle_between((1, 0, 0), (1, 0, 0))
0.0
>>> angle_between((1, 0, 0), (-1, 0, 0))
3.141592653589793
"""
try:
v1_u = unit_vector(v1)
v2_u = unit_vector(v2)
ang=np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0))
return ang
except Exception as e:
print('angle_between_vectors: ' + str(e))
pass
def GetKeyAndDataNameFromString(DataAccessString=''):
"""This is a utility function used for functions in this module
"""
if DataAccessString=='':
return ''
else:
DataAccessString=DataAccessString.strip()
das=DataAccessString.split('.')
dataname=das[len(das)-1]
fulldataname=dataname
#Strings for grids can contain [][]
tstr=dataname.split('[')
dataname=tstr[0]
key=DataAccessString[:len(DataAccessString)-len(fulldataname)-1]
#print(key,dataname)
return key,dataname
def GetLTDbItem(DataAccessString,returnstatus=-1, dbname=''):
"""Get a database item value
Parameters
----------
DataAccessString: String
This is the string you get via Copy Data Access Name in LightTools
returnstatus: Integer
This is optional. Default is -1, no return status. Pass 0 to request the return status
dbname: String
This is an option to pass the datakey, dataname combination. When empty, this is not used
Returns
-------
Requested data item
Usually a floating point number or a string
Status of the command execution (optional)
An integer that matches LTReturncodeENUMs
Examples
--------
Call without a return status
L = GetLTDbItem('Solid[1].Primitive[1].Length')
Call with the return status
L,Stat = GetLTDbItem('Solid[1].Primitive[1].Length',0)
Call with data key, data name combo
L = GetLTDbItem('Solid[1].Primitive[1]', dbname='Length')
"""
try:
if dbname == '':
key,dataname=GetKeyAndDataNameFromString(DataAccessString)
else:
dataname=dbname
key=DataAccessString
pass
#print(key,dataname)
[ltdata,stat]=lt0.DbGet(key,dataname,0)
if returnstatus==0:
return ltdata,stat
else:
return ltdata
except Exception as e:
print('GetLTDbItem: ' + str(e))
pass
##Local test
#Call without a return status
#L = GetLTDbItem('Solid[1].Primitive[1].Length')
#Call with the return status
#L,Stat = GetLTDbItem('Solid[1].Primitive[1].Length,0)
pass
def SetLTDbItem(DataAccessString,datavalue,dbname=''):
"""Set a database item value
Parameters
----------
DataAccessString: String
This is the string you get via Copy Data Access Name in LightTools
datavalue: string or numeric
This is the new data value assigned to the database item
dbname: String
This is an option to pass the datakey, dataname combination. When empty, this is not used
Returns
-------
Status of the command execution (optional)
An integer that matches LTReturncodeENUMs
Examples
--------
stat=SetLTDbItem('solid[1].primitive[1].radius',5)
"""
try:
if dbname == '':
key,dataname=GetKeyAndDataNameFromString(DataAccessString)
else:
dataname=dbname
key=DataAccessString
pass
stat=lt0.DbSet(key,dataname,datavalue)
return stat
except Exception as e:
print('SetLTDbItem: ' + str(e))
pass
##Local test
#Call without a return status
#L = GetLTDbItem('Solid[1].Primitive[1].Length)
#Call with the return status
#L,Stat = GetLTDbItem('Solid[1].Primitive[1].Length,0)
pass
def GetLTGridItem(DataAccessString,RowIndex=1,ColIndex=-1,returnstatus=-1,dbname=''):
"""Get a data value from a 1D or 2D grid item value
Parameters
----------
DataAccessString: String
This is the string you get via Copy Data Access Name in LightTools
Note: Content in square brackets at the end (e.g. [4] or [4][6]) is ignored)
RowIndex: Integer
Index of the row in the grid, starting 1
ColIndex: Integer
Index of the column in the grid, starting 1
returnstatus: Integer
This is optional. Default is -1, no return status. Pass 0 to request the return status
dbname: String
This is an option to pass the datakey, dataname combination. When empty, this is not used
Returns
-------
Requested data item
Usually a floating point number or a string
Status of the command execution (optional)
An integer that matches LTReturncodeENUMs
Examples
--------
#Call without a return status
L = GetLTDbItem('Solid[1].Primitive[1].Length')
#Call with the return status
L,Stat = GetLTDbItem('Solid[1].Primitive[1].Length',0)
"""
try:
if dbname == '':
key,dataname=GetKeyAndDataNameFromString(DataAccessString)
else:
dataname=dbname
key=DataAccessString
pass
if ColIndex != -1:
#2D grid
[ltdata,stat]=lt0.DbGet(key,dataname,0,ColIndex,RowIndex)
else:
#1D grid
[ltdata,stat]=lt0.DbGet(key,dataname,0,RowIndex)
if returnstatus==-1:
return ltdata
else:
return ltdata,stat
except Exception as e:
print('GetLTGridItem: ' + str(e))
pass
##Local test
#Call without a return status
#L = GetLTDbItem('Solid[1].Primitive[1].Length)
#Call with the return status
#L,Stat = GetLTDbItem('Solid[1].Primitive[1].Length,0)
pass
def SetLTGridItem(DataAccessString,datavalue,RowIndex=1,ColIndex=-1,dbname=''):
"""Get a data value from a 1D or 2D grid item value
Parameters
----------
DataAccessString: String
This is the string you get via Copy Data Access Name in LightTools
Note: Content in square brackets at the end (e.g. [4] or [4][6]) is ignored)
datavalue: string or numeric
This is the new data value assigned to the database item
RowIndex: Integer
Index of the row in the grid, starting 1
ColIndex: Integer
Index of the column in the grid, starting 1
dbname: String
This is an option to pass the datakey, dataname combination. When empty, this is not used
Returns
-------
Status of the command execution (optional)
An integer that matches LTReturncodeENUMs
Examples
--------
#Call without a return status
L = GetLTDbItem('Solid[1].Primitive[1].Length')
#Call with the return status
L,Stat = GetLTDbItem('Solid[1].Primitive[1].Length',0)
"""
try:
if dbname == '':
key,dataname=GetKeyAndDataNameFromString(DataAccessString)
else:
dataname=dbname
key=DataAccessString
pass
if ColIndex != -1:
#2D grid
stat=lt0.DbSet(key,dataname,datavalue,ColIndex,RowIndex)
else:
#1D grid
stat=lt0.DbSet(key,dataname,datavalue,RowIndex)
return stat
except Exception as e:
print('SetLTGridItem: ' + str(e))
pass
##Local test
#Call without a return status
#L = GetLTDbItem('Solid[1].Primitive[1].Length)
#Call with the return status
#L,Stat = GetLTDbItem('Solid[1].Primitive[1].Length,0)
pass
def GetLTMeshParams(DataAccessKey,CellValueType,paramsonly=False):
"""Get the data from a receiver mesh.
Parameters
----------
DataAccessKey: String
Data access string for the receiver mesh, typically obtaned by 'Copy Data Access Name'
CellValueType: String
Data type to retrieve - e.g. 'CellValue', 'Flux', etc.
paramsonly: Boolean
If this is true then the mesh data array is not returned/retrieved, but other parameters such as xdim, ydim will be returned
Returns
-------
X_Dimension
Number of bins in X dimension
Y_Dimension
Number of bins in Y dimension
Min_X_Bound
Minimum X bound for the mesh
Max_X_Bound
Maximum X bound for the mesh
Min_Y_Bound
Minimum Y bound for the mesh
Max_Y_Bound
Maximum Y bound for the mesh
Mesh_Data_Array
An array of data, based on the cell value type requested
Examples
--------
meshkey="receiver[1].Mesh[1]"
xdim,ydim,minx,maxx,miny,maxy,md=GetLTMeshParams(meshkey,"CellValue")
xdim,ydim,minx,maxx,miny,maxy=GetLTMeshParams(meshkey,paramsonly=True)
"""
try:
if DataAccessKey=='':
print('Invalid Data Access String')
return None
key=DataAccessKey
#Check the mesh key
if str(lt0.DbGet(key,"Name"))=="":
return None
else:
XDim=int(lt0.DbGet(key,"X_Dimension"))
YDim=int(lt0.DbGet(key,"Y_Dimension"))
MinX=lt0.DbGet(key,"Min_X_Bound")
MaxX=lt0.DbGet(key,"Max_X_Bound")
MinY=lt0.DbGet(key,"Min_Y_Bound")
MaxY=lt0.DbGet(key,"Max_Y_Bound")
if paramsonly==True:
return XDim,YDim,MinX,MaxX,MinY,MaxY
else:
dblArray=System.Array.CreateInstance(System.Double,XDim,YDim)
[Stat,mData]=lt0.GetMeshData(key,dblArray,CellValueType)
MeshData=np.ones((XDim,YDim))
print(XDim,YDim)
for i in range(0,XDim):
for j in range(0,YDim):
MeshData[i,j]=mData[i,j]
#print(mData[i,j])
MeshData=np.rot90(MeshData)
return XDim,YDim,MinX,MaxX,MinY,MaxY,MeshData
pass
except Exception as e:
print('GetLTMeshParams: ' + str(e))
pass
def PlotRaster(DataAccessString,CellValueType,colormap='jet',xlabel='X',ylabel='Y',zlabel='Value',title='',plottype='2D',plotsize=(5,5),returndata=False):
"""Creates a 2D or a 3D plot for a given receiver mesh. Optionally, data can be returned
Parameters
----------
DataAccessKey: String
Data access string for the receiver mesh, typically obtaned by 'Copy Data Access Name'
CellValueType: String
Data type to retrieve - e.g. 'CellValue', 'Flux', etc.
Chart_Parameters: Miscellaneous types
See the Python/matplotlib documentation for charting for more details
Returns
-------
X_Dimension
Number of bins in X dimension
Y_Dimension
Number of bins in Y dimension
Min_X_Bound
Minimum X bound for the mesh
Max_X_Bound
Maximum X bound for the mesh
Min_Y_Bound
Minimum Y bound for the mesh
Max_Y_Bound
Maximum Y bound for the mesh
Mesh_Data_Array
An array of data, based on the cell value type requested
Examples
--------
meshkey="receiver[1].Mesh[1]"
xdim,ydim,minx,maxx,miny,maxy,md=GetLTMeshParams(meshkey,"CellValue")
"""
try:
#This is the default width for the plot
figw=plotsize[0]
figh=plotsize[1]
ar=1
#for data with a single column, we need to add at least one more column
xdim,ydim,minx,maxx,miny,maxy,d=GetLTMeshParams(DataAccessString,CellValueType)
if d.shape[1]<2:
nrows=d.shape[0]
d2=np.zeros((nrows,2))
d2[:,0]=d[:,0]
d2[:,1]=d[:,0]
d=d2
#check the x and y scales to maintain the aspect
w=abs(maxx-minx)
h=abs(maxy-miny)
if w>h:
ar=h/w
figh=figh*ar
else:
ar=w/h
figw=figw*ar
if plottype[0]=='2':
cellx=np.linspace(minx,maxx,xdim+1)
celly=np.linspace(miny,maxy,ydim+1)
X,Y=np.meshgrid(cellx,celly)
#plt.figure(figsize=(int(figw),int(figh)))
plt.figure(figsize=plotsize)
plt.pcolormesh(X,Y,np.flipud(d),cmap=colormap)
plt.axis('equal')
#plt.axis('tight')
plt.xlim(minx,maxx)
plt.ylim(miny,maxy)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.colorbar()
if title != '':
plt.title(title)
else:
cellx=np.linspace(minx,maxx,xdim)
celly=np.linspace(miny,maxy,ydim)
X,Y=np.meshgrid(cellx,celly)
fig=plt.figure(figsize=(int(figw),int(figh)))
ax = fig.add_subplot(111, projection='3d')
ax.plot_surface(X, Y, d,cmap=colormap)
#ax.plot_trisurf(X, Y, d,cmap=colormap,linewidth=0.2)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_zlabel(zlabel)
if title != '':
ax.set_title(title)
# for angle in range(0, 360):
# ax.view_init(30, angle)
# plt.draw()
if returndata==True:
return xdim,ydim,minx,maxx,miny,maxy,d
else:
return 0
except Exception as e:
print('PlotRaster: ' + str(e))
pass
def PlotSpectralDistribution(DataAccessKey,returndata=True):
"""Plots the spectral distribution on a receiver and returns the data
Parameters
----------
DataAccessKey: String
This is the string you get via Copy Data Access Name in LightTools for the spectral distribution
returndata: Boolean
This is optional. Default is True, meaning the data will be returned.
Returns
-------
Row Count
Number of wavelength/power pairs in the spectral distribution
Data array
Array containing the wavelength and the relative power
Examples
--------
numrows,spd=PlotSpectralDistribution('receiver[2].spectral_distribution[1]')
plt.plot(spd[:,0],spd[:,1])
"""
try:
rowcount=int(lt0.DbGet(DataAccessKey,'Count'))
sd=np.zeros((rowcount,2))
for i in range(1,rowcount+1):
sd[i-1,0],stat=GetLTGridItem(DataAccessKey + '.Wavelength_At',RowIndex=i,returnstatus=0)
sd[i-1,1],stat=GetLTGridItem(DataAccessKey + '.Power_At',RowIndex=i,returnstatus=0)
plt.plot(sd[:,0],sd[:,1])
plt.xlabel('Wavelength (nm)')
plt.ylabel('Relative Power')
ax = plt.gca()
ax.grid(True)
if returndata==True:
return rowcount,sd
else:
return 0
except Exception as e:
print('PlotSpectralDistribution: ' + str(e))
def PlotTrueColorRster(DataAccessKey,xlabel='X',ylabel='Y',title='',plotsize=(5,5),returndata=False):
"""Creates an RGB image from R,G,B data on the CIE data mesh
Parameters
----------
DataAccessKey: String
This is the string you get via Copy Data Access Name in LightTools for the CIE mesh (spatial or angular)
returndata: Boolean
This is optional. Default is False (no data is returned)
Returns
-------
Data arrays, R, G, and B. Note, for 'imshow', data must be of type 'numpy.uint8'
Examples
--------
r,g,b=PlotTrueColorRster('receiver[1].mesh[2]',returndata=True)
"""
try:
xdim,ydim,minx,maxx,miny,maxy,d=GetLTMeshParams(DataAccessKey,'Red_Value_UI')
R=d
xdim,ydim,minx,maxx,miny,maxy,d=GetLTMeshParams(DataAccessKey,'Green_Value_UI')
G=d
xdim,ydim,minx,maxx,miny,maxy,d=GetLTMeshParams(DataAccessKey,'Blue_Value_UI')
B=d
im=np.zeros((xdim,ydim,3),dtype=np.uint8)
im[:,:,0]=R
im[:,:,1]=G
im[:,:,2]=B
plt.figure(figsize=plotsize)
plt.imshow(im,origin='lower', extent=[minx, maxx, miny, maxy],interpolation='nearest')
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.title=title
if returndata==True:
return R,G,B
else:
return 0
except Exception as e:
print('PlotTrueColorRster: ' + str(e))
pass
def GetViewImage(viewname):
"""Capture a screenshot of a given view (3D, 2D or Chart)
Parameters
----------
viewname: String
This is the name of the view to capture. Can use the short name '3D' for the 3D View. Chart views have specific names.
Returns
-------
File Name
Path and the file name of the image saved in the working directory
Image
Image (returned from misc.imread() function)
Examples
--------
viewname='3d'
im,imname=GetViewImage(viewname)
plt.imshow(im)
"""
import System.Windows.Forms
from System.Windows.Forms import Clipboard
import tempfile
from scipy import misc
#import LTUtilities as ltu
try:
workdirstr=ltu.checkpyWorkDir()
viewname=viewname.upper()
if viewname[:2]=='3D':
cmdstr=chr(92) + 'V3D'
else:
vname=lt0.ViewGet('View[' + viewname + ']','Name')
vname=vname.replace(' ','_')
cmdstr= chr(92) +'V' + 'Chart_' + vname
lt0.Cmd(cmdstr)
Clipboard.Clear()
lt0.Cmd('CopyToClipboard')
mybmap=Clipboard.GetImage()
tempfname = next(tempfile._get_candidate_names())
#print(tempfname)
fname=workdirstr + '/' + tempfname + '.png'
#print(fname)
mybmap.Save(fname,System.Drawing.Imaging.ImageFormat.Png)
del(mybmap)
im=misc.imread(fname)
return im,fname
except Exception as e:
print('GetViewImage: ' + str(e))
pass
def GetLTReceiverRays(DataAccessKey,descriptors=['raydatax','raydatay','raydataz'],simdata='Forward_Sim_Function[1]',usepassfilters=False):
"""Retrieves the ray data on the receiver
Parameters
----------
DataAccessKey : String
Data access key for the receiver, typically obtaned by 'Copy Data Access Name' - e.g. 'receiver[1]'
descriptors: String
ray data items to retrieve. This should be a list containing all the desired data items.
These are the available descriptors (as of 10/2016):
"RAYDATAX", "RAYDATAY", "RAYDATAZ", "RAYDATAENTERINGL", "RAYDATAENTERINGM", "RAYDATAENTERINGN",
"RAYDATAMAGNITUDE", "RAYDATAWAVELENGTH", "RAYDATAABSORPTION",
"RAYDATAORDINALNUMBER" ----> If you trace N rays, the ordinal number is between 1 and N,
"RAYDATAPASSFILTERS" ----> 1, if the ray passes all filters; otherwise, 0,
"RAYPATHINDEX" ----> Base 1 index of the ray pathes (e.g., 1 would correspond to PathIndex=1 in the LightTools ray path),
"STOKES0", "STOKES1", "STOKES2", "STOKES3", "POLDIRX", "POLDIRY", "POLDIRZ", "RAYTRANSMITTANCE"
simdata: String
The type of simulation to use; the default is forward simulation (Forward_Sim_Function)
usepassfilters: Boolean
If False, all the ray data is returned. If True, only rays that pass receiver filter criteria will be returned
Returns
-------
Number of Rays (N)
Number of rays retrieved
Number of Data Descriptors (M)
Number of data descriptors received in the list
Ray Data
This is an array (N rows, M columns) with requested ray data
Examples
--------
reckey="receiver[1]" --- assume forward simulation (default)
descriptors=['RayDataX','RayDataY','RayDataZ','RayDataWavelength']
N,M,RayData = GetReceiverRays(reckey,descriptors)
"""
try:
if len(descriptors)<1:
return -1
key=DataAccessKey + '.' + simdata
numrays=int(lt0.DbGet(key,'NumberOfSamples'))
if usepassfilters==True:
descriptors.append('raydatapassfilters')
pass
numdes=len(descriptors)
dblArray=System.Array.CreateInstance(System.Double,numrays,numdes)
strArray=System.Array.CreateInstance(System.String,numdes,1)
for i in range(0,numdes):
strArray[i,0]=descriptors[i]
pass
stat,d1,rdata=lt0.GetReceiverRayData(key,strArray,dblArray)
#If usepassfilters=True, we don't know how many qualify
#Resizing arrays dynamically is inefficient, so first check the number of rays that qualify
numpassedrays=0
if usepassfilters==True:
for i in range(0,numrays):
passflag=int(rdata[i,numdes-1])
if passflag>0:
numpassedrays += 1
pass
pass
raydata=np.zeros((numpassedrays,numdes-1)) #We added passfilters flag at the end, so ignore it
print('Qualified number of rays: ' + str(numpassedrays))
numpassedrays=0
for i in range(0,numrays):
passflag=int(rdata[i,numdes-1])
if passflag>0:
for j in range(0,numdes-1):
raydata[numpassedrays,j]=rdata[i,j]
pass
numpassedrays += 1
pass
pass
#We need to remove the appended passfilters flag
descriptors.remove('raydatapassfilters')
return numpassedrays,numdes-1,raydata
else:
raydata=np.zeros((numrays,numdes))
for i in range(0,numrays):
for j in range(0,numdes):
raydata[i,j]=rdata[i,j]
pass
pass
return numrays,numdes,raydata
except Exception as e:
print('GetLTReceiverRays: ' + str(e))
pass
def GetLTReceiverRays_Extra(DataAccessKey,FilterIndex,simdata='Forward_Sim_Function[1]',StartRay=-1,EndRay=-1):
"""Retrieves the extra ray data items, such as Optical Path length, that is not available with LTAPIx.GetReceiverRayData() function.
This is significantly slower than the GetReceiverRays(), when there is a large number of rays on the receiver!
Parameters
----------
DataAccessString: String
Data access key for the receiver, typically obtaned by 'Copy Data Access Name' - e.g. 'receiver[1]'
FilterIndex: Integer
This is the type of data to retrieve. Use the ENUM values at the top of this module to find the type of data. Value must be within [1, 21].
For some data types, a receiver filter must be present. Refer to the documentation for details
StartRay: Integer
Default (-1) will start from ray number 1
EndRay: Integer
Default (-1) will use all rays, from the start ray.
Returns
-------
Number of rays retrieved.
Requested data as an array of floats
Examples
--------
reckey="receiver[1]"
N,exdata=GetReceiverRays_Extra(reckey,ExtraRayData.Optical_Path_Length.value)
"""
try:
if FilterIndex>21:
return -1
key=DataAccessKey + '.' + simdata
numrays=int(lt0.DbGet(key,'NumberOfSamples'))
if numrays<1:
return -1
if StartRay>numrays:
print('Start ray must be < number of saved rays on receiver')
return -1
if StartRay>EndRay:
EndRay=StartRay+1
print('End ray must be > Start Ray')
if (StartRay==-1 and EndRay==-1):
ExData=np.zeros((numrays))
for i in range(1,numrays+1):
ExData[i-1]=GetLTGridItem(key + '.ExtraRayData',i,FilterIndex)
if i>numrays:
break
return numrays,ExData
else:
if StartRay<0:
StartRay=1
if EndRay<0:
EndRay=numrays
n=0
for i in range(StartRay,EndRay+1):
ExData[n]=GetLTGridItem(key + '.ExtraRayData',i,FilterIndex)
if i>numrays:
break
n += 1
return n,ExData
#plt.hist(ExData,bins=21)
except Exception as e:
print('GetLTReceiverRays_Extra: ' + str(e))
pass
def GetRayPathData(DataAccessKey,simdata='forward_sim_function[1]', usevisibleonly=False):
"""Get the ray path data from a given receiver
Parameters
----------
DataAccessString: String
Data access key for the ray paths, obtaned by 'Copy Data Access Name' - e.g. 'receiver[1]'
usevisiblecolumns: Boolean
Pass True in order to retrieve data only for the visible paths
Returns
-------
The following data items from the grid are returned
RayPathVisibleAt, RayPathPowerAt, RayPathNumRaysAt, RayPathStringAt
Examples
--------
visibleat,powerat,raysat,stringat=GetRayPathData('receiver[1].forward_sim_function)
"""
try:
key=DataAccessKey + '.' + simdata
numpaths=int(GetLTDbItem(key + '.NumberOfRayPaths'))
if numpaths<1:
return -1
visibleat=[]
powerat=[]
numraysat=[]
stringat=[]
vakey=key + '.RayPathVisibleAt'
pakey=key + '.RayPathPowerAt'
rakey=key + '.RayPathNumRaysAt'
sakey=key + '.RayPathStringAt'
for i in range(1,numpaths+1):
va=GetLTGridItem(vakey,i)
if usevisibleonly==True:
va=va.lower()
if va[0]=='y':
visibleat.append(va)
pa=GetLTGridItem(pakey,i)
ra=GetLTGridItem(rakey,i)
sa=GetLTGridItem(sakey,i)
visibleat.append(va)
powerat.append(pa)
numraysat.append(ra)
stringat.append(sa)
else:
visibleat.append(va)
pa=GetLTGridItem(pakey,i)
ra=GetLTGridItem(rakey,i)
sa=GetLTGridItem(sakey,i)
powerat.append(pa)
numraysat.append(ra)
stringat.append(sa)
return visibleat,powerat,numraysat,stringat
except Exception as e:
print('GetRayPathData: ' + str(e))
pass
def MakeRayFileUsingRayOrdinal(DataAccessKey,descriptors=['raydataX','raydataY','raydataZ','raydataenteringL','raydataenteringM','raydataenteringN','raydataMAGNITUDE'],simdata='Forward_Sim_Function[1]',DataAccessKey_Ordinal=''):
"""Creates a ray data file from receiver rays, based on ray ordinal numers from another receiver
Parameters
----------
DataAccessKey : String
Data access key for the receiver, typically obtaned by 'Copy Data Access Name' - e.g. 'receiver[1]'
descriptors: String
ray data items to retrieve. This should be a list containing all the desired data items.
These are the available descriptors (as of 10/2016):
"RAYDATAX", "RAYDATAY", "RAYDATAZ", "RAYDATAENTERINGL", "RAYDATAENTERINGM", "RAYDATAENTERINGN",
"RAYDATAMAGNITUDE", "RAYDATAWAVELENGTH", "RAYDATAABSORPTION",
"RAYDATAORDINALNUMBER" ----> If you trace N rays, the ordinal number is between 1 and N,
"RAYDATAPASSFILTERS" ----> 1, if the ray passes all filters; otherwise, 0,
"RAYPATHINDEX" ----> Base 1 index of the ray pathes (e.g., 1 would correspond to PathIndex=1 in the LightTools ray path),
"STOKES0", "STOKES1", "STOKES2", "STOKES3", "POLDIRX", "POLDIRY", "POLDIRZ", "RAYTRANSMITTANCE"
simdata: String
The type of simulation to use; the default is forward simulation (Forward_Sim_Function)
DataAccessKey_Ordinal: String
This is the data access key for the receiver used to retrieve ray ordinal numbers
Returns
-------
Number of Rays (N)
Number of rays retrieved
File Name
This is the name of the new ray data file
Examples
--------
reckey1='receiver[1]' --- this is the receiver for ray data, assume forward simulation (default)
reckey2='receiver[2]'
descriptors=['RayDataX','RayDataY','RayDataZ','RayDataWavelength']
N,FName = MakeRayFileUsingRayOrdinal(reckey1,descriptors,DataAccessKey_Ordinal=reckey2)
"""
try:
import tempfile
descriptors.append('RayDataOrdinalNumber')
n1,m1,rd1=GetLTReceiverRays(DataAccessKey,descriptors,simdata)
des=['raydataordinalnumber'] #This is all we need from this receiver
n2,m2,rd2=GetLTReceiverRays(DataAccessKey_Ordinal,des,simdata,usepassfilters=True)
tempfname = next(tempfile._get_candidate_names())
#print(tempfname)
workdirstr=ltu.checkpyWorkDir()
fname=workdirstr + '/' + tempfname + '.txt'
print('Using temporary file name: ' + fname)
rf=open(fname,'w')
rf.write(ltu.GetRayFileHeader())
N=0
for i in range(0,n1):
tstr=''
if rd1[i,m1-1] in rd2:
N += 1
for j in range(0,m1-1): #we added the ordinal, so don't write that
tstr=tstr + ' ' + str(rd1[i,j])
pass
rf.write(tstr + '\n\r')
pass
pass
rf.write(ltu.GetRayFileFooter())
rf.close()
return N,fname
except Exception as e:
print('GetReceiverRaysByOrdinal: ' + str(e))
pass
def GetAOIDistributionFromReceiverRays(DataAccessKey,simdata='forward_sim_function[1]',usepassfilters=True,referencevector=(0,0,1)):
"""Returns the angle of incidence relative to a direction vector. Default is surface normal [0,0,1]
Parameters
----------
DataAccessKey : String
Data access key for the receiver, typically obtaned by 'Copy Data Access Name' - e.g. 'receiver[1]'
simdata: String
The type of simulation to use; the default is forward simulation (Forward_Sim_Function)
usepassfilters: Boolean
This flag specifies whether to use any filters enabled on the receiver for data
referencevector: 3-element array like 0,0,1
The default is the surface normal - 0,0,1
Returns
-------
An array with computed incident angles
Examples
--------
aoi=ltd.GetAOIDistributionFromReceiverRays('receiver[1]',referencevector=(0,0,1))
plt.hist(aoi*180/math.pi,bins=30,range=(0,90))
"""
try:
des=['raydataEnteringL','raydataEnteringM','raydataEnteringN']
n1,m1,rd1=GetLTReceiverRays(DataAccessKey,descriptors=des,simdata=simdata,usepassfilters=usepassfilters)
ang=np.zeros(n1)
#v2=(0,0,1) #surface normal
v2=referencevector
for i in range(0,n1):
v1=(rd1[i,0],rd1[i,1],rd1[i,2])
ang[i]=angle_between_vectors(v1,v2)
return ang
except Exception as e:
print('GetAOIDistributionFromReceiverRays: ' + str(e))
pass
def ComputeLumDataForMesh(ReceiverDataAccessKey,carea,flux,datatype):
"""
"""
try:
print('Analysis type: ' + str(datatype))
datatype=int(datatype)
if carea.any() != 0:
E=flux/carea #For intensity, CellArea is the solid angle
if (datatype==0 or datatype==1): #Illum, Intensity
return E
elif datatype==2: #Spatial Lum
meterkey=ReceiverDataAccessKey + '.SPATIAL_LUM_METER[1].CentralPSA'
psa=GetLTDbItem(meterkey)
if psa != 0:
E=E/psa
else:
print('Central PSA must be > 0')
return None
pass
elif datatype==3: #Angular Lum
aperturekey=ReceiverDataAccessKey + '.ANGULAR_LUM_METER[1].HalfSize'
aperturesize=2*float(GetLTDbItem(aperturekey))
if aperturesize != 0:
E=E/(aperturesize*aperturesize)
else:
print('Aperture size must be > 0')
return None
pass
else:
print('Unexpected data type.')
return None
pass
return E
else:
print('Cell Area must be > 0')
return None
except Exception as e:
print('ComputeLumDataForMesh: ' + str(e))
pass
def RebinReceiverRayData(ReceiverDataAccessKey,MeshDataAccessKey,simdata='forward_sim_function[1]',useraypathindex=False):
"""Only illuminance data is supported!
"""
try:
#Smoothing can produce different results in the UI
#These calculations do not take smoothing into account
smoothing=GetLTDbItem(MeshDataAccessKey + '.Do_Noise_Reduction')
if smoothing != '':
if smoothing.upper() == 'YES':
print('Warning: mesh smoothing is ON.')
else:
print('Invalid mesh key.')
return None
pass
#We need to determine the data type using the mesh key
analysistype=-1
datatype=['ILLUMINANCE_MESH', 'INTENSITY_MESH', 'SPATIAL_LUMINANCE_MESH', 'ANGULAR_LUMINANCE_MESH']
filtertype=GetLTDbItem(MeshDataAccessKey + '.FilterName')
filtertype=filtertype.upper()
for i in range(0,4):
if datatype[i] == filtertype:
analysistype=i
break
pass
pass
if analysistype != 0:
#Only illuminance is supported for now
print('Data types other than illuminance are currently unsupported.')
return None
#We need to calculate some extra data
#For luminance, filter rays inside the cone angle
conekey=ReceiverDataAccessKey + '.SPATIAL_LUM_METER[1].CentralConeAngle'
conehalfangle=GetLTDbItem(conekey)
rayaoi=GetAOIDistributionFromReceiverRays(ReceiverDataAccessKey,simdata)
raysincone=np.where(rayaoi<=conehalfangle) #These are the indices
#For intensity, calculate angles
pass
xdim,ydim,minx,maxx,miny,maxy,carea=GetLTMeshParams(MeshDataAccessKey,'CellSurfaceArea')
simrays=int(GetLTDbItem('SIMULATIONS[1].CurProgress'))
if useraypathindex==True:
des=['raydatax','raydatay','raydatamagnitude','raypathindex']
n,m,rd=GetLTReceiverRays(ReceiverDataAccessKey,simdata=simdata,descriptors=des)
x=rd[:,0]
y=rd[:,1]
mag=rd[:,2]
rpi=rd[:,3]
if analysistype == 2 or analysistype == 3:
#we need to clip rays
x=x[raysincone]
y=y[raysincone]
mag=mag[raysincone]
#A 3D array to save data for each ray path
numpaths=int(GetLTDbItem(ReceiverDataAccessKey + '.' + simdata + '.NumberOfRayPaths'))
pdata=np.zeros((ydim,xdim,numpaths)) #Rows/columns need to match, after flip
for i in range(1,numpaths+1):
xp=x[rpi==float(i)]
yp=y[rpi==float(i)]
magp=mag[rpi==float(i)]
#sumpowerp, xedges, yedges = np.histogram2d(xp,yp,bins=(xdim,ydim),range=([sumpowerp=np.rot90(sumpowerp) #Same orientation
sumpower, xedges, yedges = np.histogram2d(xp,yp,bins=(xdim,ydim),range=([minx,maxx],[miny,maxy]),weights=magp)
sumpower=np.rot90(sumpower) #Same orientation
fluxp=sumpower/simrays
Ep=ComputeLumDataForMesh(ReceiverDataAccessKey,carea,fluxp,analysistype)
pdata[:,:,i-1]=Ep
pass
return pdata
else:
des=['raydatax','raydatay','raydatamagnitude']
n,m,rd=GetLTReceiverRays(ReceiverDataAccessKey,simdata=simdata,descriptors=des)
x=rd[:,0]
y=rd[:,1]
mag=rd[:,2]
sumpower, xedges, yedges = np.histogram2d(x,y,bins=(xdim,ydim),range=([minx,maxx],[miny,maxy]),weights=mag)
sumpower=np.rot90(sumpower) #Same orientation
flux=sumpower/simrays
sumpower
flux
E=ComputeLumDataForMesh(ReceiverDataAccessKey,carea,flux,analysistype)
return E
pass
except Exception as e:
print('RebinReceiverRayData: ' + str(e))
pass
def GetAzimuthElevationFromRayData(DataAccessKey,simdata='forward_sim_function[1]',datarange=[0,360,0,90]):
"""Calculates and returns the azimuth, elevation from raya data. Mostly suitable for surface receivers
Parameters
----------
DataAccessKey: String
This is the data access key for the receiver
simdata: String
Simulation data to use. The default is forward simulation
datarange: List
A list containing the min/max for Azimuth and Elevation. The full range will return for the whole hemisphere
Returns
-------
Three arrays, containing Azimuth, Elevation, and Ray Data Magnitude
Examples
--------
Get the ray data for azimuth 45,90 and elevation 45,60
azm,elv,rdm=GetAzimuthElevationFromRayData('receiver[1]',datarange=[45,90,45,60])
Get the ray data for the hemisphere
azm,elv,rdm=GetAzimuthElevationFromRayData('receiver[1]')
"""
try:
des=['raydataEnteringL','raydataEnteringM','raydataEnteringN','raydatamagnitude']
n1,m1,rd0=GetLTReceiverRays(DataAccessKey,descriptors=des,simdata=simdata)
azm=np.zeros(n1)
elv=np.zeros(n1)
for i in range(0,n1):
azm[i],elv[i],r=cart2sph(rd0[i,0],rd0[i,1],rd0[i,2])
azm=azm*180/math.pi
elv=elv*180/math.pi
azm[azm<0]=360-abs(azm[azm<0])
elv=90-elv #We want elevation defined wrt the surface normal
rdm=rd0[:,3]
#if the datarange covers a hemisphere, then we're set
if datarange==[0,360,0,90]:
return azm,elv,rdm
else:
#If not, get a 'patch' of angles
#First, isolate azimuth
k=
|
np.where((azm>=datarange[0]) & (azm<=datarange[1]))
|
numpy.where
|
""" Plot full light curves, one panel per band
Advice on aesthetic from <NAME>
this is Fig 3 in the paper """
import matplotlib.pyplot as plt
plt.rc("font", family="serif")
plt.rc("text", usetex=True)
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
from mpl_toolkits.axes_grid1.inset_locator import mark_inset
import numpy as np
from astropy.table import Table
from astropy.cosmology import Planck15
import glob
import extinction
from uv_lc import get_uv_lc
zp = 2458370.6473
def plot_inset():
# zoomed-in window showing the earliest non-detection and detection
axins = inset_axes(
ax, 2, 1, loc=1,
bbox_to_anchor=(0.87,0.98),
bbox_transform=ax.transAxes)
choose = np.logical_and(det, band)
axins.errorbar(
dt[choose]*24, mag[choose], emag[choose], fmt='s', ms=6,
mec=rcol, mfc=rcol, c=rcol, label='r', zorder=9)
choose = np.logical_and(nondet, band)
axins.arrow(
2458370.6408-zp, 19.97, 0, 0.5, length_includes_head=True,
head_width=0.2, head_length=0.3, fc='k', ec='k')
band = filt=='g'
choose = np.logical_and(np.logical_and(det, band), dt*24 < 3)
axins.errorbar(
dt[choose]*24, mag[choose], emag[choose],
fmt='o', ms=5, mec='#57106e', mfc='white', c='#57106e', label='g')
# fit a line to this early g-band data
out = np.polyfit(dt[choose]*24, mag[choose], deg=1, w=1/emag[choose])
m,b = out
dt_plt = np.linspace(-1,3)
y_plt = m*dt_plt + b
axins.plot(dt_plt, y_plt, ls='--', c='k', lw=0.5)
axins.text(0.5, 0.5, "31.2 mag/day", fontsize=12, transform=axins.transAxes,
verticalalignment='top')
axins.set_xlim(-0.1,3)
axins.set_ylim(18,21)
axins.tick_params(axis='both', labelsize=12)
axins.set_xlabel(r"Hours since $t_0$", fontsize=12)
axins.invert_yaxis()
ax.plot([-1, -1], [21, 18], c='k', lw=0.5)
ax.plot([1, 1], [21, 18], c='k', lw=0.5)
ax.plot([-1, 1], [18, 18], c='k', lw=0.5)
ax.plot([-1, 1], [21, 21], c='k', lw=0.5)
#mark_inset(ax, axins, loc1=2, loc2=4, fc="none", ec="0.5")
def get_lc():
# get optical light curves
DATA_DIR = "/Users/annaho/Dropbox/Projects/Research/ZTF18abukavn/data/phot"
f = DATA_DIR + "/ZTF18abukavn_opt_phot.dat"
dat = np.loadtxt(f, dtype=str, delimiter=' ')
instr = dat[:,0]
jd = dat[:,1].astype(float)
filt = dat[:,2]
mag = dat[:,3].astype(float)
emag = dat[:,4].astype(float)
dt = jd-zp
# add the UV light curves
add_dt, add_filt, fnu_mjy, efnu_mjy = get_uv_lc()
# convert to AB mag
add_mag = -2.5 * np.log10(fnu_mjy*1E-3) + 8.90
add_emag = (efnu_mjy/fnu_mjy) # I think it's just the ratio
choose = add_emag < 50
dt = np.append(dt, add_dt[choose])
filt = np.append(filt, add_filt[choose])
mag = np.append(mag, add_mag[choose])
emag = np.append(emag, add_emag[choose])
return dt, filt, mag, emag
def plot_lc():
dt, filt, mag, emag = get_lc()
det = np.logical_and(mag<99, ~np.isnan(mag))
nondet = np.logical_or(mag==99, np.isnan(mag))
fig,axarr = plt.subplots(
4, 3, figsize=(8,8), sharex=True, sharey=True)
for ii,use_f in enumerate(bands):
ax = axarr.reshape(-1)[ii]
choose = np.logical_and(det, filt == use_f)
order = np.argsort(dt[choose])
ax.errorbar(
dt[choose][order], mag[choose][order]-ext[use_f],
emag[choose][order], c='black', fmt='o', ms=3,
alpha=1.0, zorder=5)
# for each panel, plot all of them as a grey background
for f in bands:
choose =
|
np.logical_and(det, filt == f)
|
numpy.logical_and
|
#!/usr/bin/env python
u"""
spatial.py
Written by <NAME> (03/2021)
Utilities for reading, writing and operating on spatial data
PYTHON DEPENDENCIES:
numpy: Scientific Computing Tools For Python
https://numpy.org
https://numpy.org/doc/stable/user/numpy-for-matlab-users.html
netCDF4: Python interface to the netCDF C library
https://unidata.github.io/netcdf4-python/netCDF4/index.html
h5py: Pythonic interface to the HDF5 binary data format
https://www.h5py.org/
gdal: Pythonic interface to the Geospatial Data Abstraction Library (GDAL)
https://pypi.python.org/pypi/GDAL
PyYAML: YAML parser and emitter for Python
https://github.com/yaml/pyyaml
UPDATE HISTORY:
Updated 03/2021: added polar stereographic area scale calculation
add routines for converting to and from cartesian coordinates
eplaced numpy bool/int to prevent deprecation warnings
Updated 01/2021: add streaming from bytes for ascii, netCDF4, HDF5, geotiff
set default time for geotiff files to 0
Updated 12/2020: added module for converting ellipsoids
Updated 11/2020: output data as masked arrays if containing fill values
add functions to read from and write to geotiff image formats
Written 09/2020
"""
import os
import re
import io
import gzip
import uuid
import h5py
import yaml
import netCDF4
import datetime
import numpy as np
import osgeo.gdal, osgeo.osr
def case_insensitive_filename(filename):
"""
Searches a directory for a filename without case dependence
"""
#-- check if file presently exists with input case
if not os.access(os.path.expanduser(filename),os.F_OK):
#-- search for filename without case dependence
basename = os.path.basename(filename)
directory = os.path.dirname(os.path.expanduser(filename))
f = [f for f in os.listdir(directory) if re.match(basename,f,re.I)]
if not f:
raise IOError('{0} not found in file system'.format(filename))
filename = os.path.join(directory,f.pop())
return os.path.expanduser(filename)
def from_ascii(filename, compression=None, verbose=False,
columns=['time','y','x','data'], header=0):
"""
Read data from an ascii file
Inputs: full path of input ascii file
Options:
ascii file is compressed or streamed from memory
verbose output of file information
column names of ascii file
header lines to skip from start of file
"""
#-- set filename
print(filename) if verbose else None
#-- open the ascii file and extract contents
if (compression == 'gzip'):
#-- read input ascii data from gzip compressed file and split lines
with gzip.open(case_insensitive_filename(filename),'r') as f:
file_contents = f.read().decode('ISO-8859-1').splitlines()
elif (compression == 'bytes'):
#-- read input file object and split lines
file_contents = filename.read().splitlines()
else:
#-- read input ascii file (.txt, .asc) and split lines
with open(case_insensitive_filename(filename),'r') as f:
file_contents = f.read().splitlines()
#-- number of lines in the file
file_lines = len(file_contents)
#-- compile regular expression operator for extracting numerical values
#-- from input ascii files of spatial data
regex_pattern = r'[-+]?(?:(?:\d*\.\d+)|(?:\d+\.?))(?:[EeD][+-]?\d+)?'
rx = re.compile(regex_pattern, re.VERBOSE)
#-- check if header has a known format
if (str(header).upper() == 'YAML'):
#-- counts the number of lines in the header
YAML = False
count = 0
#-- Reading over header text
while (YAML is False) & (count < file_lines):
#-- file line at count
line = file_contents[count]
#-- if End of YAML Header is found: set YAML flag
YAML = bool(re.search(r"\# End of YAML header",line))
#-- add 1 to counter
count += 1
#-- parse the YAML header (specifying yaml loader)
YAML_HEADER = yaml.load('\n'.join(file_contents[:count]),
Loader=yaml.BaseLoader)
#-- output spatial data and attributes
dinput = {}
#-- copy global attributes
dinput['attributes'] = YAML_HEADER['header']['global_attributes']
#-- allocate for each variable and copy variable attributes
for c in columns:
dinput[c] = np.zeros((file_lines-count))
dinput['attributes'][c] = YAML_HEADER['header']['variables'][c]
#-- update number of file lines to skip for reading data
header = int(count)
else:
#-- output spatial data and attributes
dinput = {c:np.zeros((file_lines-header)) for c in columns}
dinput['attributes'] = {c:dict() for c in columns}
#-- extract spatial data array
#-- for each line in the file
for i,line in enumerate(file_contents[header:]):
#-- extract columns of interest and assign to dict
#-- convert fortran exponentials if applicable
column = {c:r.replace('D','E') for c,r in zip(columns,rx.findall(line))}
#-- copy variables from column dict to output dictionary
for c in columns:
dinput[c][i] = np.float64(column[c])
#-- convert to masked array if fill values
if '_FillValue' in dinput['attributes']['data'].keys():
dinput['data'] = np.ma.asarray(dinput['data'])
dinput['data'].fill_value = dinput['attributes']['data']['_FillValue']
dinput['data'].mask = (dinput['data'].data == dinput['data'].fill_value)
#-- return the spatial variables
return dinput
def from_netCDF4(filename, compression=None, verbose=False,
timename='time', xname='lon', yname='lat', varname='data'):
"""
Read data from a netCDF4 file
Inputs: full path of input netCDF4 file
Options:
netCDF4 file is compressed or streamed from memory
verbose output of file information
netCDF4 variable names of time, longitude, latitude, and data
"""
#-- read data from netCDF4 file
#-- Open the NetCDF4 file for reading
if (compression == 'gzip'):
#-- read as in-memory (diskless) netCDF4 dataset
with gzip.open(case_insensitive_filename(filename),'r') as f:
fileID = netCDF4.Dataset(uuid.uuid4().hex,memory=f.read())
elif (compression == 'bytes'):
#-- read as in-memory (diskless) netCDF4 dataset
fileID = netCDF4.Dataset(uuid.uuid4().hex,memory=filename.read())
else:
#-- read netCDF4 dataset
fileID = netCDF4.Dataset(case_insensitive_filename(filename), 'r')
#-- Output NetCDF file information
if verbose:
print(fileID.filepath())
print(list(fileID.variables.keys()))
#-- create python dictionary for output variables and attributes
dinput = {}
dinput['attributes'] = {}
#-- get attributes for the file
for attr in ['title','description','projection']:
#-- try getting the attribute
try:
ncattr, = [s for s in dir(fileID) if re.match(attr,s,re.I)]
dinput['attributes'][attr] = getattr(fileID,ncattr)
except (ValueError,AttributeError):
pass
#-- list of attributes to attempt to retrieve from included variables
attributes_list = ['description','units','long_name','calendar',
'standard_name','_FillValue']
#-- mapping between netCDF4 variable names and output names
variable_mapping = dict(x=xname,y=yname,data=varname,time=timename)
#-- for each variable
for key,nc in variable_mapping.items():
#-- Getting the data from each NetCDF variable
dinput[key] = fileID.variables[nc][:]
#-- get attributes for the included variables
dinput['attributes'][key] = {}
for attr in attributes_list:
#-- try getting the attribute
try:
ncattr, = [s for s in dir(fileID) if re.match(attr,s,re.I)]
dinput['attributes'][key][attr] = \
getattr(fileID.variables[nc],ncattr)
except (ValueError,AttributeError):
pass
#-- convert to masked array if fill values
if '_FillValue' in dinput['attributes']['data'].keys():
dinput['data'] = np.ma.asarray(dinput['data'])
dinput['data'].fill_value = dinput['attributes']['data']['_FillValue']
dinput['data'].mask = (dinput['data'].data == dinput['data'].fill_value)
#-- Closing the NetCDF file
fileID.close()
#-- return the spatial variables
return dinput
def from_HDF5(filename, compression=None, verbose=False,
timename='time', xname='lon', yname='lat', varname='data'):
"""
Read data from a HDF5 file
Inputs: full path of input HDF5 file
Options:
HDF5 file is compressed or streamed from memory
verbose output of file information
HDF5 variable names of time, longitude, latitude, and data
"""
#-- read data from HDF5 file
#-- Open the HDF5 file for reading
if (compression == 'gzip'):
#-- read gzip compressed file and extract into in-memory file object
with gzip.open(case_insensitive_filename(filename),'r') as f:
fid = io.BytesIO(f.read())
#-- set filename of BytesIO object
fid.filename = os.path.basename(filename)
#-- rewind to start of file
fid.seek(0)
#-- read as in-memory (diskless) HDF5 dataset from BytesIO object
fileID = h5py.File(fid, 'r')
elif (compression == 'bytes'):
#-- read as in-memory (diskless) HDF5 dataset
fileID = h5py.File(filename, 'r')
else:
#-- read HDF5 dataset
fileID = h5py.File(case_insensitive_filename(filename), 'r')
#-- Output HDF5 file information
if verbose:
print(fileID.filename)
print(list(fileID.keys()))
#-- create python dictionary for output variables and attributes
dinput = {}
dinput['attributes'] = {}
#-- get attributes for the file
for attr in ['title','description','projection']:
#-- try getting the attribute
try:
dinput['attributes'][attr] = fileID.attrs[attr]
except (KeyError,AttributeError):
pass
#-- list of attributes to attempt to retrieve from included variables
attributes_list = ['description','units','long_name','calendar',
'standard_name','_FillValue']
#-- mapping between HDF5 variable names and output names
variable_mapping = dict(x=xname,y=yname,data=varname,time=timename)
#-- for each variable
for key,h5 in variable_mapping.items():
#-- Getting the data from each HDF5 variable
dinput[key] = np.copy(fileID[h5][:])
#-- get attributes for the included variables
dinput['attributes'][key] = {}
for attr in attributes_list:
#-- try getting the attribute
try:
dinput['attributes'][key][attr] = fileID[h5].attrs[attr]
except (KeyError,AttributeError):
pass
#-- convert to masked array if fill values
if '_FillValue' in dinput['attributes']['data'].keys():
dinput['data'] = np.ma.asarray(dinput['data'])
dinput['data'].fill_value = dinput['attributes']['data']['_FillValue']
dinput['data'].mask = (dinput['data'].data == dinput['data'].fill_value)
#-- Closing the HDF5 file
fileID.close()
#-- return the spatial variables
return dinput
def from_geotiff(filename, compression=None, verbose=False):
"""
Read data from a geotiff file
Inputs: full path of input geotiff file
Options:
geotiff file is compressed or streamed from memory
verbose output of file information
"""
#-- Open the geotiff file for reading
if (compression == 'gzip'):
#-- read gzip compressed file and extract into memory-mapped object
mmap_name = "/vsimem/{0}".format(uuid.uuid4().hex)
with gzip.open(case_insensitive_filename(filename),'r') as f:
osgeo.gdal.FileFromMemBuffer(mmap_name, f.read())
#-- read as GDAL memory-mapped (diskless) geotiff dataset
ds = osgeo.gdal.Open(mmap_name)
elif (compression == 'bytes'):
#-- read as GDAL memory-mapped (diskless) geotiff dataset
mmap_name = "/vsimem/{0}".format(uuid.uuid4().hex)
osgeo.gdal.FileFromMemBuffer(mmap_name, filename.read())
ds = osgeo.gdal.Open(mmap_name)
else:
#-- read geotiff dataset
ds = osgeo.gdal.Open(case_insensitive_filename(filename))
#-- print geotiff file if verbose
print(filename) if verbose else None
#-- create python dictionary for output variables and attributes
dinput = {}
dinput['attributes'] = {c:dict() for c in ['x','y','data']}
#-- get the spatial projection reference information
srs = ds.GetSpatialRef()
dinput['attributes']['projection'] = srs.ExportToProj4()
dinput['attributes']['wkt'] = srs.ExportToWkt()
#-- get dimensions
xsize = ds.RasterXSize
ysize = ds.RasterYSize
bsize = ds.RasterCount
#-- get geotiff info
info_geotiff = ds.GetGeoTransform()
dinput['attributes']['spacing'] = (info_geotiff[1],info_geotiff[5])
#-- calculate image extents
xmin = info_geotiff[0]
ymax = info_geotiff[3]
xmax = xmin + (xsize-1)*info_geotiff[1]
ymin = ymax + (ysize-1)*info_geotiff[5]
dinput['attributes']['extent'] = (xmin,xmax,ymin,ymax)
#-- x and y pixel center coordinates (converted from upper left)
dinput['x'] = xmin + info_geotiff[1]/2.0 + np.arange(xsize)*info_geotiff[1]
dinput['y'] = ymax + info_geotiff[5]/2.0 + np.arange(ysize)*info_geotiff[5]
#-- read full image with GDAL
dinput['data'] = ds.ReadAsArray()
#-- set default time to zero for each band
dinput.setdefault('time', np.zeros((bsize)))
#-- check if image has fill values
if ds.GetRasterBand(1).GetNoDataValue():
#-- convert to masked array if fill values
dinput['data'] = np.ma.asarray(dinput['data'])
#-- mask invalid values
dinput['data'].fill_value = ds.GetRasterBand(1).GetNoDataValue()
#-- create mask array for bad values
dinput['data'].mask = (dinput['data'].data == dinput['data'].fill_value)
#-- set attribute for fill value
dinput['attributes']['data']['_FillValue'] = dinput['data'].fill_value
#-- close the dataset
ds = None
#-- return the spatial variables
return dinput
def to_ascii(output, attributes, filename, delimiter=',',
columns=['time','lat','lon','tide'], header=False, verbose=False):
"""
Write data to an ascii file
Inputs:
python dictionary of output data
python dictionary of output attributes
full path of output ascii file
Options:
delimiter for output spatial file
order of columns for output spatial file
create a YAML header with data attributes
verbose output
"""
filename = os.path.expanduser(filename)
print(filename) if verbose else None
#-- open the output file
fid = open(filename, 'w')
#-- create a column stack arranging data in column order
data_stack = np.c_[[output[col] for col in columns]]
ncol,nrow = np.shape(data_stack)
#-- print YAML header to top of file
if header:
fid.write('{0}:\n'.format('header'))
#-- data dimensions
fid.write('\n {0}:\n'.format('dimensions'))
fid.write(' {0:22}: {1:d}\n'.format('time',nrow))
#-- non-standard attributes
fid.write(' {0}:\n'.format('non-standard_attributes'))
#-- data format
fid.write(' {0:22}: ({1:d}f0.8)\n'.format('formatting_string',ncol))
fid.write('\n')
#-- global attributes
fid.write('\n {0}:\n'.format('global_attributes'))
today = datetime.datetime.now().isoformat()
fid.write(' {0:22}: {1}\n'.format('date_created', today))
# print variable descriptions to YAML header
fid.write('\n {0}:\n'.format('variables'))
#-- print YAML header with variable attributes
for i,v in enumerate(columns):
fid.write(' {0:22}:\n'.format(v))
for atn,atv in attributes[v].items():
fid.write(' {0:20}: {1}\n'.format(atn,atv))
#-- add precision and column attributes for ascii yaml header
fid.write(' {0:20}: double_precision\n'.format('precision'))
fid.write(' {0:20}: column {1:d}\n'.format('comments',i+1))
#-- end of header
fid.write('\n\n# End of YAML header\n')
#-- write to file for each data point
for line in range(nrow):
line_contents = ['{0:0.8f}'.format(d) for d in data_stack[:,line]]
print(delimiter.join(line_contents), file=fid)
#-- close the output file
fid.close()
def to_netCDF4(output, attributes, filename, verbose=False):
"""
Write data to a netCDF4 file
Inputs:
python dictionary of output data
python dictionary of output attributes
full path of output netCDF4 file
Options: verbose output
"""
#-- opening NetCDF file for writing
fileID = netCDF4.Dataset(os.path.expanduser(filename),'w',format="NETCDF4")
#-- Defining the NetCDF dimensions
fileID.createDimension('time', len(np.atleast_1d(output['time'])))
#-- defining the NetCDF variables
nc = {}
for key,val in output.items():
if '_FillValue' in attributes[key].keys():
nc[key] = fileID.createVariable(key, val.dtype, ('time',),
fill_value=attributes[key]['_FillValue'], zlib=True)
else:
nc[key] = fileID.createVariable(key, val.dtype, ('time',))
#-- filling NetCDF variables
nc[key][:] = val
#-- Defining attributes for variable
for att_name,att_val in attributes[key].items():
setattr(nc[key],att_name,att_val)
#-- add attribute for date created
fileID.date_created = datetime.datetime.now().isoformat()
#-- Output NetCDF structure information
if verbose:
print(filename)
print(list(fileID.variables.keys()))
#-- Closing the NetCDF file
fileID.close()
def to_HDF5(output, attributes, filename, verbose=False):
"""
Write data to a HDF5 file
Inputs:
python dictionary of output data
python dictionary of output attributes
full path of output HDF5 file
Options: verbose output
"""
#-- opening HDF5 file for writing
fileID = h5py.File(filename, 'w')
#-- Defining the HDF5 dataset variables
h5 = {}
for key,val in output.items():
if '_FillValue' in attributes[key].keys():
h5[key] = fileID.create_dataset(key, val.shape, data=val,
dtype=val.dtype, fillvalue=attributes[key]['_FillValue'],
compression='gzip')
else:
h5[key] = fileID.create_dataset(key, val.shape, data=val,
dtype=val.dtype, compression='gzip')
#-- Defining attributes for variable
for att_name,att_val in attributes[key].items():
h5[key].attrs[att_name] = att_val
#-- add attribute for date created
fileID.attrs['date_created'] = datetime.datetime.now().isoformat()
#-- Output HDF5 structure information
if verbose:
print(filename)
print(list(fileID.keys()))
#-- Closing the HDF5 file
fileID.close()
def to_geotiff(output, attributes, filename, verbose=False,
varname='data', dtype=osgeo.gdal.GDT_Float64):
"""
Write data to a geotiff file
Inputs:
python dictionary of output data
python dictionary of output attributes
full path of output geotiff file
Options:
verbose output
output variable name
GDAL data type
"""
#-- verify grid dimensions to be iterable
output = expand_dims(output, varname=varname)
#-- grid shape
ny,nx,nband = np.shape(output[varname])
#-- output as geotiff
driver = osgeo.gdal.GetDriverByName("GTiff")
#-- set up the dataset with compression options
ds = driver.Create(filename,nx,ny,nband,dtype,['COMPRESS=LZW'])
#-- top left x, w-e pixel resolution, rotation
#-- top left y, rotation, n-s pixel resolution
xmin,xmax,ymin,ymax = attributes['extent']
dx,dy = attributes['spacing']
ds.SetGeoTransform([xmin,dx,0,ymax,0,dy])
#-- set the spatial projection reference information
srs = osgeo.osr.SpatialReference()
srs.ImportFromWkt(attributes['wkt'])
#-- export
ds.SetProjection( srs.ExportToWkt() )
#-- for each band
for band in range(nband):
#-- set fill value for band
if '_FillValue' in attributes[varname].keys():
fill_value = attributes[varname]['_FillValue']
ds.GetRasterBand(band+1).SetNoDataValue(fill_value)
#-- write band to geotiff array
ds.GetRasterBand(band+1).WriteArray(output[varname][:,:,band])
#-- print filename if verbose
print(filename) if verbose else None
#-- close dataset
ds.FlushCache()
def expand_dims(obj, varname='data'):
"""
Add a singleton dimension to a spatial dictionary if non-existent
Options:
variable name to modify
"""
#-- change time dimensions to be iterableinformation
try:
obj['time'] = np.atleast_1d(obj['time'])
except:
pass
#-- output spatial with a third dimension
if isinstance(varname,list):
for v in varname:
obj[v] = np.atleast_3d(obj[v])
elif isinstance(varname,str):
obj[varname] = np.atleast_3d(obj[varname])
#-- return reformed spatial dictionary
return obj
def convert_ellipsoid(phi1, h1, a1, f1, a2, f2, eps=1e-12, itmax=10):
"""
Convert latitudes and heights to a different ellipsoid using Newton-Raphson
Inputs:
phi1: latitude of input ellipsoid in degrees
h1: height above input ellipsoid in meters
a1: semi-major axis of input ellipsoid
f1: flattening of input ellipsoid
a2: semi-major axis of output ellipsoid
f2: flattening of output ellipsoid
Options:
eps: tolerance to prevent division by small numbers
and to determine convergence
itmax: maximum number of iterations to use in Newton-Raphson
Returns:
phi2: latitude of output ellipsoid in degrees
h2: height above output ellipsoid in meters
References:
Astronomical Algorithms, <NAME>, 1991, Willmann-Bell, Inc.
pp. 77-82
"""
if (len(phi1) != len(h1)):
raise ValueError('phi and h have incompatable dimensions')
#-- semiminor axis of input and output ellipsoid
b1 = (1.0 - f1)*a1
b2 = (1.0 - f2)*a2
#-- initialize output arrays
npts = len(phi1)
phi2 = np.zeros((npts))
h2 = np.zeros((npts))
#-- for each point
for N in range(npts):
#-- force phi1 into range -90 <= phi1 <= 90
if (np.abs(phi1[N]) > 90.0):
phi1[N] = np.sign(phi1[N])*90.0
#-- handle special case near the equator
#-- phi2 = phi1 (latitudes congruent)
#-- h2 = h1 + a1 - a2
if (np.abs(phi1[N]) < eps):
phi2[N] = np.copy(phi1[N])
h2[N] = h1[N] + a1 - a2
#-- handle special case near the poles
#-- phi2 = phi1 (latitudes congruent)
#-- h2 = h1 + b1 - b2
elif ((90.0 - np.abs(phi1[N])) < eps):
phi2[N] = np.copy(phi1[N])
h2[N] = h1[N] + b1 - b2
#-- handle case if latitude is within 45 degrees of equator
elif (np.abs(phi1[N]) <= 45):
#-- convert phi1 to radians
phi1r = phi1[N] * np.pi/180.0
sinphi1 = np.sin(phi1r)
cosphi1 = np.cos(phi1r)
#-- prevent division by very small numbers
cosphi1 = np.copy(eps) if (cosphi1 < eps) else cosphi1
#-- calculate tangent
tanphi1 = sinphi1 / cosphi1
u1 = np.arctan(b1 / a1 * tanphi1)
hpr1sin = b1 * np.sin(u1) + h1[N] * sinphi1
hpr1cos = a1 * np.cos(u1) + h1[N] * cosphi1
#-- set initial value for u2
u2 = np.copy(u1)
#-- setup constants
k0 = b2 * b2 - a2 * a2
k1 = a2 * hpr1cos
k2 = b2 * hpr1sin
#-- perform newton-raphson iteration to solve for u2
#-- cos(u2) will not be close to zero since abs(phi1) <= 45
for i in range(0, itmax+1):
cosu2 = np.cos(u2)
fu2 = k0 * np.sin(u2) + k1 * np.tan(u2) - k2
fu2p = k0 * cosu2 + k1 / (cosu2 * cosu2)
if (np.abs(fu2p) < eps):
i = np.copy(itmax)
else:
delta = fu2 / fu2p
u2 -= delta
if (np.abs(delta) < eps):
i = np.copy(itmax)
#-- convert latitude to degrees and verify values between +/- 90
phi2r = np.arctan(a2 / b2 * np.tan(u2))
phi2[N] = phi2r*180.0/np.pi
if (np.abs(phi2[N]) > 90.0):
phi2[N] = np.sign(phi2[N])*90.0
#-- calculate height
h2[N] = (hpr1cos - a2 * np.cos(u2)) / np.cos(phi2r)
#-- handle final case where latitudes are between 45 degrees and pole
else:
#-- convert phi1 to radians
phi1r = phi1[N] * np.pi/180.0
sinphi1 = np.sin(phi1r)
cosphi1 = np.cos(phi1r)
#-- prevent division by very small numbers
cosphi1 = np.copy(eps) if (cosphi1 < eps) else cosphi1
#-- calculate tangent
tanphi1 = sinphi1 / cosphi1
u1 = np.arctan(b1 / a1 * tanphi1)
hpr1sin = b1 * np.sin(u1) + h1[N] * sinphi1
hpr1cos = a1 * np.cos(u1) + h1[N] * cosphi1
#-- set initial value for u2
u2 = np.copy(u1)
#-- setup constants
k0 = a2 * a2 - b2 * b2
k1 = b2 * hpr1sin
k2 = a2 * hpr1cos
#-- perform newton-raphson iteration to solve for u2
#-- sin(u2) will not be close to zero since abs(phi1) > 45
for i in range(0, itmax+1):
sinu2 = np.sin(u2)
fu2 = k0 * np.cos(u2) + k1 / np.tan(u2) - k2
fu2p = -1 * (k0 * sinu2 + k1 / (sinu2 * sinu2))
if (np.abs(fu2p) < eps):
i = np.copy(itmax)
else:
delta = fu2 / fu2p
u2 -= delta
if (np.abs(delta) < eps):
i = np.copy(itmax)
#-- convert latitude to degrees and verify values between +/- 90
phi2r = np.arctan(a2 / b2 * np.tan(u2))
phi2[N] = phi2r*180.0/np.pi
if (np.abs(phi2[N]) > 90.0):
phi2[N] =
|
np.sign(phi2[N])
|
numpy.sign
|
"""
Plots normalized histograms of R^2 of observations using randomized data ANN
Reference : Barnes et al. [2020, JAMES]
Author : <NAME>
Date : 21 October 2020
"""
### Import packages
import numpy as np
import matplotlib.pyplot as plt
import cmocean
import scipy.stats as stats
### Set parameters
variables = [r'T2M']
seasons = [r'annual']
SAMPLEQ = 100
### Set directories
directorydata = '/Users/zlabe/Documents/Research/InternalSignal/Data/'
directoryfigure = '/Users/zlabe/Desktop/SINGLE_v2.0/Histograms/%s/' % variables[0]
### Read in slope data
filename_slope = 'Slopes_20CRv3-RANDOM_%s_RANDOMSEED_20ens.txt' % SAMPLEQ
slopes = np.genfromtxt(directorydata + filename_slope,unpack=True)
### Read in R2 data
filename_R2 = 'R2_20CRv3-RANDOM_%s_RANDOMSEED_20ens.txt' % SAMPLEQ
r2 = np.genfromtxt(directorydata + filename_R2,unpack=True)
### Read in other R2 data
filename_R2all = 'R2_20CRv3-Obs_XGHG-XAER-LENS_%s_RANDOMSEED_Medians.txt' % SAMPLEQ
r2_allmodel = np.genfromtxt(directorydata + filename_R2all,unpack=True)
ghg_r2 = r2_allmodel[0]
aer_r2 = r2_allmodel[1]
lens_r2 = r2_allmodel[2]
###############################################################################
###############################################################################
###############################################################################
### Create plot for histograms of slopes
plt.rc('text',usetex=True)
plt.rc('font',**{'family':'sans-serif','sans-serif':['Avant Garde']})
def adjust_spines(ax, spines):
for loc, spine in ax.spines.items():
if loc in spines:
spine.set_position(('outward', 5))
else:
spine.set_color('none')
if 'left' in spines:
ax.yaxis.set_ticks_position('left')
else:
ax.yaxis.set_ticks([])
if 'bottom' in spines:
ax.xaxis.set_ticks_position('bottom')
else:
ax.xaxis.set_ticks([])
fig = plt.figure()
ax = plt.subplot(111)
adjust_spines(ax, ['left','bottom'])
ax.spines['top'].set_color('none')
ax.spines['right'].set_color('none')
ax.spines['bottom'].set_color('dimgrey')
ax.spines['left'].set_color('dimgrey')
ax.spines['bottom'].set_linewidth(2)
ax.spines['left'].set_linewidth(2)
ax.tick_params('both',length=5.5,width=2,which='major',color='dimgrey')
ax.yaxis.grid(zorder=1,color='dimgrey',alpha=0.35)
### Plot histograms
plt.axvline(x=ghg_r2,color='steelblue',linewidth=2,linestyle='--',dashes=(1,0.3),
zorder=10,label=r'\textbf{AER+ALL}')
plt.axvline(x=aer_r2,color='goldenrod',linewidth=2,linestyle='--',dashes=(1,0.3),
zorder=10,label=r'\textbf{GHG+ALL}')
plt.axvline(x=lens_r2,color='forestgreen',linewidth=2,linestyle='--',dashes=(1,0.3),
zorder=10,label=r'\textbf{TOTAL}')
leg = plt.legend(shadow=False,fontsize=7,loc='upper center',
bbox_to_anchor=(0.5,1.1),fancybox=True,ncol=3,frameon=False,
handlelength=3,handletextpad=1)
weights = np.ones_like(r2)/len(r2)
n, bins, patches = plt.hist(r2,bins=np.arange(0,1.01,0.025),
density=False,alpha=0.5,
label=r'\textbf{XGHG}',
weights=weights,zorder=3)
for i in range(len(patches)):
patches[i].set_facecolor('crimson')
patches[i].set_edgecolor('white')
patches[i].set_linewidth(1)
plt.ylabel(r'\textbf{PROPORTION[%s]}' % SAMPLEQ,fontsize=10,color='k')
plt.xlabel(r'\textbf{R$^{2}$} [ANNUAL -- T2M -- 20CRv3 -- (1920-2015)]',fontsize=10,color='k')
plt.yticks(np.arange(0,1.1,0.1),map(str,np.round(np.arange(0,1.1,0.1),2)),size=6)
plt.xticks(
|
np.arange(0,1.1,0.1)
|
numpy.arange
|
"""
Copyright 2017 Deepgram
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from unittest.mock import Mock
from functools import partial
import pytest
import numpy
from kur.supplier.speechrec import SpeechRecognitionSupplier
@pytest.fixture
def seed():
return 0
@pytest.fixture
def num_entries():
return 50
@pytest.fixture
def fake_data(num_entries):
return list(range(num_entries))
@pytest.fixture
def fake_supplier(seed, fake_data):
result = Mock()
result.metadata = {'entries' : len(fake_data)}
result.data = {'data' : fake_data}
result.kurfile.get_seed.return_value = seed
result.downselect = partial(SpeechRecognitionSupplier.downselect, result)
return result
@pytest.fixture
def permuted_numbers(seed, num_entries):
return
|
numpy.random.RandomState(seed)
|
numpy.random.RandomState
|
#!/usr/bin/python
# coding: utf-8
import numpy as np
import matplotlib.pyplot as plt
import copy
import sys
lis = [
"bst_double_double_b.txt",
"umap_double_double.txt",
"bst_double_double_u.txt",
"map_double_double.txt",
"bst_int_int_b.txt",
"map_int_int.txt",
"bst_int_int_u.txt",
"umap_int_int.txt",
"bst_char_char_b.txt",
"map_char_char.txt",
"bst_char_char_u.txt",
"umap_char_char.txt",
]
data = np.zeros((50, 12))
for i, item in enumerate(lis):
data[:, i] = np.loadtxt(item)
# print(item+".txt")
data = np.array(data)
x =
|
np.arange(1, 51)
|
numpy.arange
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 11 21:25:57 2018
@author: miller
"""
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 11 20:43:18 2018
@author: miller
"""
import pandas as pd
import numpy as np
pred_set_indicator = 9999999 # Will serve as indicator for rows to be in pred set
def feat_eng(all_games, expectation_stats, cumulative_stats):
'''Calculating exponential moving averages for some stats as well as summing other stats.
Using special indexing scheme to compute ith row for each player at the same time.'''
### Building index containing rows in which players played first game
### Calculating how many games each player played in their career
### Based on first row for each player and how many games played, can compute same row for each player at the same time.
# Calculating boolean array for rows where players change
player_shifted_up_one = all_games.household_key.shift(-1)
new_player_bool = np.where(all_games.household_key!= player_shifted_up_one, 1, 0)
# Calculating row idx for first row for each player, missing zero for first player
new_player_row_nums_missing_zero = np.flatnonzero(new_player_bool > 0)
new_player_row_nums = np.zeros(shape=(new_player_row_nums_missing_zero.shape[0]+1)) # Initializing new array
# Contains row idx for first row for each player
## Last value = len(df) + 1 --> used to calculate career length of last player (below)
new_player_row_nums[1:] = new_player_row_nums_missing_zero+1
# Calculating the max number of games played by one player
player_career_lengths = [(row_num - new_player_row_nums[index-1]) for index, row_num in enumerate(new_player_row_nums)][1:]
max_games_played = int(max(player_career_lengths))
all_games["DAY"] = all_games["DAY"].astype(float) # Casting to float
all_games.sort_values(['household_key','DAY'], inplace=True)
all_games.reset_index(inplace=True, drop=True)
###############################################################
### Initializing arrays, lists to hold intermediate computation
expectation_stats.append('days_since_last_trip')
# Initializing static retain weights in exponential decaying average --> lower wt = more dependent on recent values
med_retain_wt = 0.7
med_update_wt = 1 - med_retain_wt
# Creating lists of exponential weighted average column names based on how much of average is retained at each time point
med_retain_stat_list = ['exp_{0}_{1}_retain'.format(stat, med_retain_wt) for stat in expectation_stats]
slow_anneal_retain_stat_list = ['exp_{0}_slow_anneal_retain'.format(stat) for stat in expectation_stats]
fast_anneal_retain_stat_list = ['exp_{0}_fast_anneal_retain'.format(stat) for stat in expectation_stats]
exp_stat_list = med_retain_stat_list + slow_anneal_retain_stat_list + fast_anneal_retain_stat_list
cumulative_stat_list = ['cumulative_{0}'.format(stat) for stat in cumulative_stats]
### Initializing columns ###
for stat in exp_stat_list + cumulative_stat_list:
all_games[stat] = 0
# Indicator for first trip ever
all_games['first_trip'] = 0
all_games.loc[new_player_row_nums[:-1], 'first_trip'] = 1
all_games['days_since_last_trip'] = 100 # Will only remain for first row for each player
all_games['cumulative_trips'] = 1 # Computing separate of other cumulative stats to avoid creating useless "trip" column
cumulative_array = np.zeros(shape=(len(all_games), len(cumulative_stats)))
med_retain_array = np.zeros(shape=(len(all_games), len(expectation_stats)))
slow_anneal_retain_array = np.zeros(shape=(len(all_games), len(expectation_stats)))
fast_anneal_retain_array = np.zeros(shape=(len(all_games), len(expectation_stats)))
print("Max Games: " + str(max_games_played))
##############################################################
### Calculating stats ###
for game_num in range(1, int(max_games_played)):
if game_num % 100 == 0:
print(game_num)
# Indices of players who have played >= game_num games
played_num_games_bool = np.zeros(shape=(len(player_career_lengths)))
played_num_games_bool = np.where(np.array(player_career_lengths) > game_num, True, False)
# List of row indices of first game for players who have played more games than game_num
first_game_players_played_num_games = new_player_row_nums[:-1][played_num_games_bool]
rows_to_increment = (first_game_players_played_num_games+game_num).astype(int)
# Updating anneal retain weights --> do this to allow for rapid updating in early games, eventually tailing off when more confident about player
fast_anneal_retain_wt = min(0.1 + ( (game_num-1)**(1/3) * 0.35 ), 0.925)
fast_anneal_update_wt = 1 - fast_anneal_retain_wt
slow_anneal_retain_wt = min(0.1 + ( (game_num-1)**(1/6) * 0.4 ), 0.8)
slow_anneal_update_wt = 1 - slow_anneal_retain_wt
# If a player just played their first game...
if game_num == 1:
all_games.loc[rows_to_increment, 'days_since_last_trip'] = np.array(all_games.loc[rows_to_increment, 'DAY']) - np.array(all_games.loc[rows_to_increment-1, 'DAY'])
all_games.loc[ rows_to_increment , 'cumulative_trips'] = np.array(all_games.loc[ rows_to_increment -1 , 'cumulative_trips']) + 1
cumulative_array[rows_to_increment,:] = np.array(all_games.loc[rows_to_increment - 1, cumulative_stats])
# Setting retain_stats = expectation_stats to initialize value for exp moving avg
fast_anneal_retain_array[rows_to_increment,:] =
|
np.array(all_games.loc[rows_to_increment - 1, expectation_stats])
|
numpy.array
|
"""Main module."""
from __future__ import division, print_function
__metaclass__ = type
import numpy as np
import tqdm
import sys
import h5py
from scipy.sparse import coo_matrix
import scipy.sparse as sparse
from sklearn.decomposition import IncrementalPCA as iPCA
from sklearn.cluster import KMeans as kmeans
from sklearn.cluster import MiniBatchKMeans as mini_kmeans
import logging
from rich.logging import RichHandler
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
log.addHandler(RichHandler())
log.propagate = False
# Using the tkinter backend makes matplotlib run better on a cluster, maybe?
# import matplotlib
# matplotlib.use("TkAgg")
import matplotlib.pyplot as plt
import mdtraj as md
import pyemma.coordinates as coor
import pyemma.coordinates.clustering as clustering
import pyemma
def inverse_iteration(guess, matrix):
"""
Do one iteration of inverse iteration.
Parameters
----------
guess: array-like (N elements)
Vector of weights to be used as the initial guess.
matrix: array-like (NxN elements)
Transition matrix to use for inverse iteration.
Returns
-------
The new vector of weights after one iteration of inverse iteration.
"""
# Looking for eigenvector corresponding to eigenvalue 1
mu = 1
identity = sparse.eye(guess.shape[0])
# Inverse
inverse = sparse.linalg.inv(matrix.T - mu * identity)
result = inverse @ guess
result = result.squeeze()
# Normalize
result /= sum(result)
return result
class modelWE:
"""
Implementation of haMSM model building, particularly for steady-state estimation (but there are lots of extras),
from WE sampling with basis (source) and target (sink) states with recycling.
Set up for typical west.h5 file structure, with coordinates to be stored in west.h5 /iterations/auxdata/coord and
basis and target definitions from progress coordinates.
Check out run_msmWE.slurm and run_msmWE_flux.py in scripts folder for an implementation example.
Danger
-------
This code currently, in general, appears to assume a 1-D progress coordinate.
Todo
----
Refactor
In general, this class's methods generally handle data by holding state in the object.
The functions that update state with the result of a calculation, though, tend to update a lot of state on the way.
The state being updated along the way is usually "helper" quantities (an example would be the number of bins
or number of walkers, which is computed "along the way" in a number of functions, and the object state updated.)
I think it would be prudent to refactor these in such a way that these are updated in as few places as possible --
one example of this might be setting them as properties, and then updating the value in state as part of that
accessor if necessary.
References
--------
Copperman and Zuckerman,
*Accelerated estimation of long-timescale kinetics by combining weighted ensemble simulation with Markov model
microstategs using non-Markovian theory*, **arXiv** (2020).
"""
def __init__(self):
"""
Work-in-progress init function. For now, just start adding attribute definitions in here.
Todo
----
- Most logic from initialize() should be moved in here.
- Also, comment all of these here. Right now most of them have comments throughout the code.
- Reorganize these attributes into some meaningful structure
"""
self.modelName = None
"""str: Name used for storing files"""
self.fileList = None
"""list of str: List of all filenames with data"""
self.n_data_files = None
"""int: Number of files in :code:`fileList`
**TODO**: Deprecate this, this could just be a property"""
self.n_lag = 0
self.pcoord_ndim = None
"""int: Number of dimensions in the progress coordinate"""
self.pcoord_len = None
"""int: Number of stored progress coordinates for each iteration, per-segment."""
self.tau = None
"""float: Resampling time for weighted ensemble. (Maybe should be int? Units?)"""
self.WEtargetp1_min = None
self.WEtargetp1_max = None
"""float: Progress coordinate value at target state.
Used to check if a progress coord is in the target, and to set the RMSD of the target cluster when cleaning the
fluxmatrix."""
self.target_bin_center = None
self._WEtargetp1_bounds = None
self.WEbasisp1_min = None
"""float: Minimum progress coordinate value within basis state.
Used to check if a progress coord is in the basis, and to set the RMSD of the basis cluster when cleaning the
fluxmatrix."""
self.WEbasisp1_max = None
"""float: Maximum progress coordinate value within basis state.
Used to check if a progress coord is in the basis, and to set the RMSD of the basis cluster when cleaning the
fluxmatrix."""
self.basis_bin_center = None
self._WEbasisp1_bounds = None
self.dimReduceMethod = None
"""str: Dimensionality reduction method. Must be one of "pca", "vamp", or "none" (**NOT** NoneType)"""
self.vamp_lag = None
self.vamp_dim = None
# For optimized binning
self.nB = None
self.nW = None
self.min_walkers = None
"""str: Test description for minwalkers"""
self.binMethod = None
self.allocationMethod = None
self.coordsExist = None
self.westList = None
self.reference_structure = None
self.reference_coord = None
self.basis_structure = None
# TODO: This is plural, reference_coord is singular. Intentional? Can you have multiple bases but 1 reference?
self.basis_coords = None
self.nAtoms = None
self.numSegments = None
self.maxIter = None
# TODO: Describe segindList better.
self.segindList = None
"""list: List of segment indices(?)"""
self.weightList = None
"""array-like: List of segment weights in an iteration"""
self.nSeg = None
self.pcoord0List = None
self.pcoord1List = None
self.seg_weights = {}
self.coordPairList = None
self.transitionWeights = None
self.departureWeights = None
self.n_iter = None
self.coordinates = None
self.ndim = None
self.n_hist = None
"""int: Number of steps of history information to use when building transitions."""
self.n_clusters = None
self.clusters = None
self.clusterFile = None
self.errorWeight = None
self.errorCount = None
self.fluxMatrixRaw = None
self.targetRMSD_centers = None
"""array-like: List of RMSDs corresponding to each cluster."""
self.fluxMatrix = None
self.indBasis = None
self.Tmatrix = None
self.pSS = None
self.lagtime = None
self.JtargetSS = None
self.removed_clusters = []
self.cluster_structures = None
self.cluster_structure_weights = None
"""dict: Mapping of cluster indices to structures in that cluster"""
def initialize(
# self, fileSpecifier: str, refPDBfile: str, initPDBfile: str, modelName: str
self,
fileSpecifier: str,
refPDBfile: str,
modelName: str,
basis_pcoord_bounds: list = None,
target_pcoord_bounds: list = None,
dim_reduce_method: str = "pca",
tau: float = None,
):
"""
Initialize the model-builder.
Parameters
----------
fileSpecifier : list
List of paths to H5 files to analyze.
refPDBfile : string
Path to PDB file that defines topology.
modelName : string
Name to use in output filenames.
basis_pcoord_bounds: list
List of [lower bound, upper bound] in pcoord-space for the basis state
target_pcoord_bounds: list
List of [lower bound, upper bound] in pcoord-space for the target state
dim_reduce_method: str
Dimensionality reduction method. "pca", "vamp", or "none".
tau: float
Resampling time (i.e. time of 1 WE iteration). Used to map fluxes to physical times.
Returns
-------
None
Todo
----
Some of this logic should be broken into a constructor, and default arguments handled in the constructor's
function signature.
"""
log.debug("Initializing msm_we model")
self.modelName = modelName
if type(fileSpecifier) is list:
fileList = fileSpecifier
elif type(fileSpecifier) is str:
fileList = fileSpecifier.split(" ")
log.warning(
"HDF5 file paths were provided in a string -- this is now deprecated, please pass as a list "
"of paths."
)
if basis_pcoord_bounds is None:
log.warning(
"No basis coord bounds provided to initialize(). "
"You can manually set this for now, but that will be deprecated eventually."
)
else:
self.WEbasisp1_bounds = basis_pcoord_bounds
if target_pcoord_bounds is None:
log.warning(
"No target coord bounds provided to initialize(). "
"You can manually set this for now, but that will be deprecated eventually."
)
else:
self.WEtargetp1_bounds = target_pcoord_bounds
self.fileList = fileList
self.n_data_files = len(fileList)
#####
self.pcoord_ndim = 1
self.pcoord_len = 2
# self.pcoord_len = 50
if tau is None:
log.warning("No tau provided, defaulting to 1.")
tau = 1.0
self.tau = tau
# This is really only used for nAtoms
self.set_topology(refPDBfile)
# self.set_basis(initPDBfile)
if dim_reduce_method is None:
log.warning(
"No dimensionality reduction method provided to initialize(). Defaulting to pca."
"You can manually set this for now, but that will be deprecated eventually."
)
self.dimReduceMethod = "pca"
else:
self.dimReduceMethod = dim_reduce_method
self.vamp_lag = 10
self.vamp_dim = 10
self.nB = 48 # number of bins for optimized WE a la Aristoff
self.nW = 40 # number of walkers for optimized WE a la Aristoff
self.min_walkers = 1 # minimum number of walkers per bin
self.binMethod = "adaptive" # adaptive for dynamic k-means bin edges, uniform for equal spacing on kh
self.allocationMethod = (
"adaptive" # adaptive for dynamic allocation, uniform for equal allocation
)
try:
self.load_iter_data(1)
self.load_iter_coordinates0()
self.coordsExist = True
# Nothing is raised here because this might be fine, depending on what you're doing.
except KeyError:
log.error("Problem getting coordinates, they don't exist yet.\n")
self.coordsExist = False
log.debug("msm_we model successfully initialized")
@property
def WEbasisp1_bounds(self):
return self._WEbasisp1_bounds
@WEbasisp1_bounds.setter
def WEbasisp1_bounds(self, bounds):
"""
Set the boundaries for the basis state in pcoord1, and also set the bin center based on those.
Parameters
----------
bounds
"""
if None in bounds:
raise Exception("A basis boundary has not been correctly provided")
self.WEbasisp1_min, self.WEbasisp1_max = bounds
self._WEbasisp1_bounds = bounds
# Same as in WEtargetp1_bounds
if (
not abs(self.WEbasisp1_min) == np.inf
and not abs(self.WEbasisp1_max) == np.inf
):
self.basis_bin_center = np.mean([self.WEbasisp1_min, self.WEbasisp1_max])
else:
self.basis_bin_center = [self.WEbasisp1_min, self.WEbasisp1_max][
abs(self.WEbasisp1_min) == np.inf
]
@property
def n_lag(self):
return self._n_lag
@n_lag.setter
def n_lag(self, lag):
if not lag == 0:
raise NotImplementedError(
"Only a lag of 1 tau (n_lag = 0) is currently supported"
)
else:
self._n_lag = lag
@property
def WEtargetp1_bounds(self):
return self._WEtargetp1_bounds
@WEtargetp1_bounds.setter
def WEtargetp1_bounds(self, bounds):
"""
Set the boundaries for the target state in pcoord1, and also set the bin center based on those.
Parameters
----------
bounds
"""
if None in bounds:
raise Exception("A target boundary has not been correctly provided")
self.WEtargetp1_min, self.WEtargetp1_max = bounds
self._WEtargetp1_bounds = bounds
# If neither of the target bin boundaries are infinity, then the bin center is their mean.
if (
not abs(self.WEtargetp1_min) == np.inf
and not abs(self.WEtargetp1_max) == np.inf
):
self.target_bin_center = np.mean([self.WEtargetp1_min, self.WEtargetp1_max])
# If one of them IS infinity, their "bin center" is the non-infinity one.
else:
# Janky indexing, if p1_max == inf then that's True, and True == 1 so it picks the second element
self.target_bin_center = [self.WEtargetp1_min, self.WEtargetp1_max][
abs(self.WEtargetp1_min) == np.inf
]
def initialize_from_h5(self, refPDBfile, initPDBfile, modelName):
"""
Like initialize, but sets state without
Parameters
----------
refPDBfile
initPDBfile
modelName
Returns
-------
"""
def is_WE_basis(self, pcoords):
"""
Checks if the input progress coordinates are in the basis state.
Parameters
----------
pcoords : numpy.ndarray(num_segments, num_pcoords)
Array of progress coordinates for each segment.
Returns
-------
True or False : bool
Todo
----
This only checks the 0th progress coordinate
"""
isBasis = np.logical_and(
pcoords[:, 0] > self.WEbasisp1_min, pcoords[:, 0] < self.WEbasisp1_max
)
return isBasis
def is_WE_target(self, pcoords):
"""
Checks if the input progress coordinates are in the target state.
Parameters
----------
pcoords : numpy.ndarray(num_segments, num_pcoords)
Array of progress coordinates for each segment.
Returns
-------
True or False : bool
Todo
----
This only checks the 0th progress coordinate
This also assumes you need a small pcoord!
"""
isTarget = np.logical_and(
pcoords[:, 0] > self.WEtargetp1_min, pcoords[:, 0] < self.WEtargetp1_max
)
return isTarget
def load_iter_data(self, n_iter: int):
"""
Update state with the data (including pcoord but not including coords) corresponding to an iteration.
Object fields updated with the information from the selected iteration:
- `self.westList`
- `self.segindList`
- `self.weightList`
- `self.n_segs`
- `self.pcoord0List`
- `self.pcoord1List`
Parameters
----------
n_iter : int
Iteration to get data for.
Returns
-------
None
Todo
----
May want to rework the logic here, depending on how this is used.
Seems like some of this iteration can be removed/optimized.
"""
# log.debug("Getting iteration data")
self.n_iter = n_iter
westList = np.array([])
segindList = np.array([])
weightList = np.array([])
pcoord0List = np.empty((0, self.pcoord_ndim))
pcoord1List = np.empty((0, self.pcoord_ndim))
seg_weights = np.array([])
n_segs = 0
# Iterate through each file index, trying to find files that contains the iteration of interest
# TODO: Can replace this with `for if, fileName in enumerate(self.\\)`
for file_idx in range(self.n_data_files):
fileName = self.fileList[file_idx]
try:
# Try to find the h5 data file associated with this iteration
dataIn = h5py.File(fileName, "r")
dsetName = "/iterations/iter_%08d/seg_index" % int(n_iter)
# Check if the dataset
dataset_exists = dsetName in dataIn
# Check to make sure this isn't the last iteration -- last iterations have incomplete data
is_not_last_iteration = (
"/iterations/iter_%08d/seg_index" % int(n_iter + 1) in dataIn
)
log.debug(f"From file {fileName}, loading iteration {n_iter}")
if dataset_exists and is_not_last_iteration:
dset = dataIn[dsetName]
newSet = dset[:]
n_segs_in_file = np.shape(newSet)
n_segs_in_file = n_segs_in_file[0]
dsetNameP = "/iterations/iter_%08d/pcoord" % int(n_iter)
dsetP = dataIn[dsetNameP]
pcoord = dsetP[:]
weights = dset["weight"]
seg_weights = np.append(seg_weights, weights)
# Iterate over segments in this dataset
for seg_idx in range(n_segs_in_file):
# if np.sum(pcoord[seg_idx,self.pcoord_len-1,:])==0.0:
# # intentionally using this to write in dummy pcoords,
# # this is a good thing to have for post-analysis though!
# raise ValueError('Sum pcoord is 0, probably middle of WE iteration, not using iteration') f
westList = np.append(westList, file_idx)
segindList = np.append(segindList, seg_idx)
weightList = np.append(weightList, newSet[seg_idx][0])
pcoord0List = np.append(
pcoord0List,
np.expand_dims(pcoord[seg_idx, 0, :], 0),
axis=0,
)
pcoord1List = np.append(
pcoord1List,
np.expand_dims(pcoord[seg_idx, self.pcoord_len - 1, :], 0),
axis=0,
)
n_segs = n_segs + 1
dataIn.close()
except Exception as dataset_exists:
sys.stdout.write("error in " + fileName + str(sys.exc_info()[0]) + "\n")
raise dataset_exists
# log.debug(f"Found {n_segs} segments in iteration {n_iter}")
self.westList = westList.astype(int)
# This is a list of the segment indices
self.segindList = segindList.astype(int)
self.seg_weights[n_iter] = seg_weights
self.weightList = weightList
self.nSeg = n_segs
self.pcoord0List = pcoord0List
self.pcoord1List = pcoord1List
def get_iterations(self):
"""
Updates internal state with the maximum number of iterations, and the number of segments in each section.
Note
----
This updates :code:`numSegments` -- :code:`numSegments` is actually a *list* of the number of segments in each iteration.
Returns
-------
None
"""
log.debug("Getting number of iterations and segments")
numSegments = np.array([])
nSeg = 1
n_iter = 1
# Loop over nSegs
# TODO: Not sure I understand the logic in this loop
while nSeg > 0:
nSeg = 0
# Iterate through each filename in fileList, and see if it contains the iteration we're looking for
# TODO: This loop is pretty common, this should be refactored into a find_iteration() or something
for file_index in range(self.n_data_files):
fileName = self.fileList[file_index]
try:
dataIn = h5py.File(fileName, "r")
dsetName = "/iterations/iter_%08d/seg_index" % int(n_iter)
dataset_exists = dsetName in dataIn
is_not_last_iteration = (
"/iterations/iter_%08d/seg_index" % int(n_iter + 1) in dataIn
)
if dataset_exists and is_not_last_iteration:
# If this file does contain the iteration of interest
# if dataset_exists:
dset = dataIn[dsetName]
newSet = dset[:]
nS = np.shape(newSet)
nSeg = nS[0] + nSeg
dataIn.close()
except Exception as e:
log.error(e)
log.error(f"No segments in {fileName} {str(sys.exc_info()[0])}")
if nSeg > 0:
numSegments = np.append(numSegments, nSeg)
log.debug(
"Iteration " + str(n_iter) + " has " + str(nSeg) + " segments...\n"
)
n_iter = n_iter + 1
# Warning: These are not defined until this is run for the first time
self.numSegments = numSegments
self.maxIter = numSegments.size
def get_iterations_iters(self, first_iter: int, last_iter: int):
"""
Updates internal state with the maximum number of iterations, and the number of segments in each section.
Parameters
----------
first_iter : int
last_iter : int
Returns
-------
None
Warning
----
This is potentially deprecated or unnecessary. Currently unused.
"""
numSegments = np.array([])
for n_iter in range(first_iter, last_iter + 1):
nSeg = 0
for iF in range(self.n_data_files):
fileName = self.fileList[iF]
try:
dataIn = h5py.File(fileName, "r")
dsetName = "/iterations/iter_%08d/seg_index" % int(n_iter)
dataset_exists = dsetName in dataIn
if dataset_exists:
dset = dataIn[dsetName]
newSet = dset[:]
nS = np.shape(newSet)
nSeg = nS[0] + nSeg
dataIn.close()
except Exception as e:
log.error(e)
log.error(f"No segments in {fileName} {str(sys.exc_info()[0])}")
if nSeg > 0:
numSegments = np.append(numSegments, nSeg)
sys.stdout.write(
"Iteration " + str(n_iter) + " has " + str(nSeg) + " segments...\n"
)
self.numSegments = numSegments
self.maxIter = last_iter
def set_topology(self, topology):
"""
Updates internal state with a new topology.
Parameters
----------
topology : str
Path to a file containing the PDB with the topology, OR, an mdtraj Trajectory object describing
the new basis structure.
Returns
-------
None
"""
if type(topology) is str:
log.debug(
"Input reference topology was provided as a path, trying to load with mdtraj"
)
if topology[-3:] == "dat":
self.reference_coord = np.loadtxt(topology)
self.nAtoms = 1
return
elif topology[-6:] == "prmtop":
struct = md.load_prmtop(topology)
self.reference_structure = struct
self.nAtoms = struct.n_atoms
return
elif not topology[-3:] == "pdb":
log.critical(
"Topology is not a recognized type (PDB)! Proceeding, but no guarantees."
)
struct = md.load(topology)
self.reference_structure = struct
self.reference_coord = np.squeeze(struct._xyz)
self.nAtoms = struct.topology.n_atoms
return
else:
log.debug(
"Input reference topology was provided as an mdtraj structure, loading that"
)
struct = topology
self.reference_structure = struct
self.reference_coord = np.squeeze(struct._xyz)
self.nAtoms = struct.topology.n_atoms
def set_basis(self, basis):
"""
Updates internal state with a new basis.
Parameters
----------
basis : str or mdtraj.Trajectory
Path to a file containing the PDB with the new basis state, OR, an mdtraj Trajectory object describing
the new basis structure.
Returns
-------
None
"""
if type(basis) is str:
log.debug(
"Input basis state topology was provided as a path, trying to load with mdtraj"
)
if basis[-3:] == "dat":
self.basis_coords = np.loadtxt(basis)
elif basis[-3:] == "pdb":
struct = md.load(basis)
self.basis_structure = struct
self.basis_coords = np.squeeze(struct._xyz)
else:
log.critical(
"Basis is not a recognized type! Proceeding, but no guarantees."
)
# raise NotImplementedError("Basis coordinates are not a recognized filetype")
else:
log.debug(
"Input reference topology was provided as an mdtraj structure, loading that"
)
struct = basis
self.basis_structure = struct
self.basis_coords = np.squeeze(struct._xyz)
def get_transition_data(self, n_lag):
"""
This function analyzes pairs of coordinates at the current iteration, set by :code:`self.n_iter`, and at some
lag in the past, :code:`self.n_iter - n_lag`.
Segments where a walker was warped (recycled) use the basis coords as the lagged coords.
Parameters
----------
n_lag : int
Number of lags to use for transitions.
Returns
-------
None
"""
log.warning(
"Getting transition data at arbitrary lags > 0 is not yet supported! Use at your own risk."
)
# get segment history data at lag time n_lag from current iter
if n_lag > self.n_iter:
sys.stdout.write(
"too much lag for iter... n_lag reduced to: " + str(self.n_iter) + "\n"
)
n_lag = self.n_iter
if n_lag >= self.n_hist:
sys.stdout.write("too much lag for stored history... recalculating...\n")
self.get_seg_histories(n_lag)
self.n_lag = n_lag
segindList_lagged = self.seg_histories[:, n_lag]
# TODO: What exactly is this a list of?
# seg_histories is a list of indices of the segments
warpList = self.seg_histories[:, 0:n_lag] # check for warps
warpList = np.sum(warpList < 0, 1)
# Get the weights for the lagged and current iterations
# If something was split/merged between n_iter and n_iter-n_lag , then the weights may have changed, so
# check a particular segment's weight.
# TODO: Does this effectively get the parent weight if it was split from something else? Then weight_histories
# would need to be tracking parents/children
weightList_lagged = self.weight_histories[:, n_lag]
# TODO: Does this copy need to be made?
weightList = self.weightList
# This will become a list of (lagged iter coord, current iter coord)
coordPairList = np.zeros((self.nSeg, self.nAtoms, 3, 2))
prewarpedStructures = np.zeros((self.nSeg, self.nAtoms, 3))
nWarped = 0
# Go through each segment, and get pairs of coordinates at current iter (n_iter) and
# lagged iter (n_iter-n_lag)
for seg_idx in range(self.nSeg):
# FIXME: Try statements should encompass the smallest amount of code
# possible - anything could be tripping this
# try:
if seg_idx == 0:
westFile = self.fileList[self.westList[seg_idx]]
dataIn = h5py.File(westFile, "r")
dsetName = "/iterations/iter_%08d/auxdata/coord" % int(self.n_iter)
dset = dataIn[dsetName]
coords_current = dset[:]
dsetName = "/iterations/iter_%08d/auxdata/coord" % int(
self.n_iter - n_lag
)
dset = dataIn[dsetName]
coords_lagged = dset[:]
elif self.westList[seg_idx] != self.westList[seg_idx - 1]:
# FIXME: I think you can just move this close to an if statement in the beginning, and then remove
# this whole if/elif. Everything after that close() seems to be duplicated.
dataIn.close()
westFile = self.fileList[self.westList[seg_idx]]
dataIn = h5py.File(westFile, "r")
# Load the data for the current iteration
dsetName = "/iterations/iter_%08d/auxdata/coord" % int(self.n_iter)
dset = dataIn[dsetName]
coords_current = dset[:]
# Load the lagged data for (iteration - n_lag)
dsetName = "/iterations/iter_%08d/auxdata/coord" % int(
self.n_iter - n_lag
)
dset = dataIn[dsetName]
coords_lagged = dset[:]
coordPairList[seg_idx, :, :, 1] = coords_current[
self.segindList[seg_idx], 1, :, :
]
# If this segment has no warps, then add the lagged coordinates
if warpList[seg_idx] == 0:
# Try to set the previous coord in the transition pair to the segment's lagged coordinates
try:
lagged_seg_index = segindList_lagged[seg_idx]
coordPairList[seg_idx, :, :, 0] = coords_lagged[
lagged_seg_index, 0, :, :
]
# If this fails, then there were no lagged coordinates for this structure.
except IndexError as e:
log.critical(
f"Lagged coordinates do not exist for the structure in segment {seg_idx}"
)
raise e
weightList_lagged[seg_idx] = 0.0
weightList[
seg_idx
] = 0.0 # set transitions without structures to zero weight
# If something was recycled during this segment, then instead of using the lagged cooordinates,
# just use the basis coords.
# But, also save the original structure before the warp!
elif warpList[seg_idx] > 0:
# St
prewarpedStructures[nWarped, :, :] = coords_lagged[
segindList_lagged[seg_idx], 0, :, :
]
assert self.basis_coords is not None
coordPairList[seg_idx, :, :, 0] = self.basis_coords
nWarped = nWarped + 1
# # TODO: What triggers this? When that critical log hits, come update this comment and the main docstring.
# # This triggers on index out of bounds... But that's a huge try statement!
# except Exception as e:
# log.critical("Document whatever's causing this exception!")
# log.warning(e)
# raise e
# weightList_lagged[seg_idx] = 0.0
# weightList[
# seg_idx
# ] = 0.0 # set transitions without structures to zero weight
# Squeeze removes axes of length 1 -- this helps with np.where returning nested lists of indices
# FIXME: is any of this necessary? This kinda seems like it could be replaced with
# something like indWarped = np.squeeze(np.where(warpList > 0)).astype(int)
indWarped = np.squeeze(np.where(warpList > 0))
indWarpedArray = np.empty(nWarped)
indWarpedArray[0:nWarped] = indWarped
indWarped = indWarpedArray.astype(int)
# Get the current and lagged weights
# This returns (the wrong? none at all?) weights for segments with warps, which is corrected below
transitionWeights = weightList.copy()
departureWeights = weightList_lagged.copy()
# Get correct weights for segments that warped
for iW in range(nWarped):
# The coord pair here is (pre-warped structure, reference topology) instead of
# (lagged struture, current structure)
coordPair = np.zeros((1, self.nAtoms, 3, 2))
coordPair[0, :, :, 0] = prewarpedStructures[iW, :, :]
coordPair[0, :, :, 1] = self.reference_coord
coordPairList = np.append(coordPairList, coordPair, axis=0)
# TODO: iterWarped appears to be the iteration the warp happened at
iterWarped = np.squeeze(
|
np.where(self.seg_histories[indWarped[iW], :] < 0)
|
numpy.where
|
import numpy as np
import tensorflow as tf
import tree
from scipy.special import expit
def create_grasping_env_discrete_sampler(
env=None,
discretizer=None,
deterministic_model=None,
min_samples_before_train=None,
epsilon=None,
):
total_dimensions = np.prod(discretizer.dimensions)
def sample_random():
obs = env.get_observation()
action_discrete = np.random.randint(0, total_dimensions)
action_undiscretized = discretizer.undiscretize(discretizer.unflatten(action_discrete))
reward = env.do_grasp(action_undiscretized)
return obs, action_discrete,reward, {'sample_random': 1, 'action_undiscretized': action_undiscretized}
def sample_deterministic():
obs = env.get_observation()
action_discrete = deterministic_model(np.array([obs])).numpy()
action_undiscretized = discretizer.undiscretize(discretizer.unflatten(action_discrete))
reward = env.do_grasp(action_undiscretized)
return obs, action_discrete, reward, {'sample_deterministic': 1, 'action_undiscretized': action_undiscretized}
def sampler(num_samples, force_deterministic=False):
if force_deterministic:
return sample_deterministic()
rand = np.random.uniform()
if rand < epsilon or num_samples < min_samples_before_train: # epsilon greedy or initial samples
#print("sampling random rand", rand, "epsilon", epsilon, "num_samples", num_samples, "minsamples", min_samples_before_train)
return sample_random()
else:
#print("deterministic")
return sample_deterministic()
return sampler
def create_fc_grasping_env_discrete_sampler(
env=None,
discretizer=None,
deterministic_model=None,
min_samples_before_train=None,
epsilon=None,
):
total_dimensions = np.prod(discretizer.dimensions)
def sample_random():
obs = env.get_observation()
action_discrete = np.random.randint(0, total_dimensions)
#action_undiscretized = discretizer.undiscretize(discretizer.unflatten(action_discrete))
reward = env.do_grasp(action_discrete)
return obs, action_discrete, reward, {'sample_random': 1}
def sample_deterministic():
obs = env.get_observation()
action_discrete = deterministic_model(np.array([obs])).numpy()
#action_undiscretized = discretizer.undiscretize(discretizer.unflatten(action_discrete))
reward = env.do_grasp(action_discrete)
return obs, action_discrete, reward, {'sample_deterministic': 1}
def sampler(num_samples, force_deterministic=False):
if force_deterministic:
return sample_deterministic()
rand = np.random.uniform()
if rand < epsilon or num_samples < min_samples_before_train: # epsilon greedy or initial samples
#print("sampling random rand", rand, "epsilon", epsilon, "num_samples", num_samples, "minsamples", min_samples_before_train)
return sample_random()
else:
#print("deterministic")
return sample_deterministic()
return sampler
def create_fake_grasping_discrete_sampler(
env=None,
discrete_dimension=None,
deterministic_model=None,
min_samples_before_train=None,
epsilon=None,
):
def sample_random():
obs = env.get_observation()
action = np.random.randint(0, discrete_dimension)
reward = env.do_grasp(action)
return obs, action, reward, {'sample_random': 1}
def sample_deterministic():
obs = env.get_observation()
action = deterministic_model(np.array([obs])).numpy()
reward = env.do_grasp(action)
return obs, action, reward, {'sample_deterministic': 1}
def sampler(num_samples):
rand = np.random.uniform()
if rand < epsilon or num_samples < min_samples_before_train: # epsilon greedy or initial samples
return sample_random()
else:
return sample_deterministic()
return sampler
def create_grasping_env_autoregressive_discrete_sampler(
env=None,
discretizer=None,
deterministic_model=None,
min_samples_before_train=None,
epsilon=None,
):
def sample_random():
obs = env.get_observation()
action_discrete = np.random.randint(0, discretizer.dimensions)
action_undiscretized = discretizer.undiscretize(action_discrete)
reward = env.do_grasp(action_undiscretized)
return obs, action_discrete, reward, {'sample_random': 1}
def sample_deterministic():
obs = env.get_observation()
action_onehot = deterministic_model(np.array([obs]))
action_discrete = np.array([tf.argmax(a, axis=-1).numpy().squeeze() for a in action_onehot])
action_undiscretized = discretizer.undiscretize(action_discrete)
reward = env.do_grasp(action_undiscretized)
return obs, action_discrete, reward, {'sample_deterministic': 1}
def sampler(num_samples):
rand = np.random.uniform()
if rand < epsilon or num_samples < min_samples_before_train: # epsilon greedy or initial samples
return sample_random()
else:
return sample_deterministic()
return sampler
def create_grasping_env_ddpg_sampler(
env=None,
policy_model=None,
unsquashed_model=None,
action_dim=None,
min_samples_before_train=1000,
num_samples_at_end=50000,
noise_std_start=0.5,
noise_std_end=0.01,
):
""" Linear annealing from start noise to end noise. """
def sample_random():
obs = env.get_observation()
action = np.random.uniform(-1.0, 1.0, size=(action_dim,))
reward = env.do_grasp(env.from_normalized_action(action))
return obs, action, reward, {'action': action}
def sample_with_noise(noise_std):
obs = env.get_observation()
noise = np.random.normal(size=(action_dim,)) * noise_std
action_deterministic = policy_model(np.array([obs])).numpy()[0]
action = np.clip(action_deterministic + noise, -1.0, 1.0)
reward = env.do_grasp(env.from_normalized_action(action))
infos = {
'action': action,
'action_deterministic': action_deterministic,
'noise': noise,
'noise_std': noise_std,
}
if unsquashed_model:
action_unsquashed = unsquashed_model(np.array([obs])).numpy()[0]
infos['unsquashed_action'] = action_unsquashed
return obs, action, reward, infos
def sampler(num_samples):
if num_samples < min_samples_before_train: # epsilon greedy or initial samples
return sample_random()
noise_std = np.interp(num_samples,
np.array([min_samples_before_train, num_samples_at_end]),
np.array([noise_std_start, noise_std_end]))
return sample_with_noise(noise_std)
return sampler
def create_grasping_env_ddpg_epsilon_greedy_sampler(
env=None,
policy_model=None,
unsquashed_model=None,
Q_model=None,
action_dim=None,
min_samples_before_train=1000,
epsilon=0.1,
):
def sample_random():
obs = env.get_observation()
action = np.random.uniform(-1.0, 1.0, size=(action_dim,))
reward = env.do_grasp(env.from_normalized_action(action))
return obs, action, reward, {'action': action}
def sample_deterministic():
obs = env.get_observation()
action = policy_model(np.array([obs])).numpy()[0]
action = np.clip(action, -1.0, 1.0)
reward = env.do_grasp(env.from_normalized_action(action))
infos = {'action': action}
if unsquashed_model:
action_unsquashed = unsquashed_model(np.array([obs])).numpy()[0]
infos['unsquashed_action'] = action_unsquashed
if Q_model:
logits = Q_model((np.array([obs]), np.array([action]))).numpy()[0]
infos['Q_logits_for_action'] = logits
return obs, action, reward, infos
def sampler(num_samples):
rand = np.random.uniform()
if rand < epsilon or num_samples < min_samples_before_train: # epsilon greedy or initial samples
return sample_random()
else:
return sample_deterministic()
return sampler
def create_grasping_env_Q_greedy_sampler(
env=None,
Q_model=None,
num_samples=256,
num_samples_elites=16,
num_samples_repeat=4,
action_dim=None,
min_samples_before_train=1000,
epsilon=0.1,
):
if isinstance(num_samples, int):
num_samples = [num_samples] * num_samples_repeat
elif isinstance(num_samples, (list, tuple)):
if len(num_samples) == 2:
num_samples = [num_samples[0]] + [num_samples[1]] * (num_samples_repeat - 1)
elif len(num_samples) != num_samples_repeat:
raise ValueError
def sample_random():
obs = env.get_observation()
action = np.random.uniform(-1.0, 1.0, size=(action_dim,))
reward = env.do_grasp(env.from_normalized_action(action))
return obs, action, reward, {'action': action}
def sample_deterministic():
obs = env.get_observation()
max_action_samples = None
max_action_Q_values = None
for i, n in enumerate(num_samples):
if i == 0:
action_samples = np.random.uniform(-1.0, 1.0, size=(n, action_dim))
else:
action_means = np.mean(max_action_samples, axis=0)
action_stds = np.std(max_action_samples, axis=0)
# print(action_means, action_stds)
action_samples = np.random.normal(loc=action_means, scale=action_stds, size=(n, action_dim))
action_samples = np.clip(action_samples, -1.0, 1.0)
stacked_obs = np.array([obs] * n)
Q_values = Q_model((stacked_obs, action_samples)).numpy()
max_Q_inds = np.argpartition(Q_values.squeeze(), -num_samples_elites)[-num_samples_elites:]
max_action_samples = action_samples[max_Q_inds, ...]
max_action_Q_values = Q_values[max_Q_inds, ...]
max_ind = np.argmax(max_action_Q_values)
action = max_action_samples[max_ind, ...]
Q_value = expit(max_action_Q_values[max_ind, ...][0])
reward = env.do_grasp(env.from_normalized_action(action))
infos = {
'action': action,
'max_Q_value': Q_value
}
# print(infos)
return obs, action, reward, infos
def sampler(num_samples):
rand = np.random.uniform()
if rand < epsilon or num_samples < min_samples_before_train: # epsilon greedy or initial samples
return sample_random()
else:
return sample_deterministic()
return sampler
def create_grasping_env_soft_q_sampler(
env=None,
discretizer=None,
logits_models=None,
min_samples_before_train=None,
deterministic=False,
alpha=10.0,
beta=0.0,
aggregate_func="min",
uncertainty_func=None
):
total_dimensions = np.prod(discretizer.dimensions)
def sample_random():
obs = env.get_observation()
action_discrete = np.random.randint(0, total_dimensions)
action_undiscretized = discretizer.undiscretize(discretizer.unflatten(action_discrete))
reward = env.do_grasp(action_undiscretized)
return obs, action_discrete, reward, {"sample_random": 1, "action": action_undiscretized}
@tf.function(experimental_relax_shapes=True)
def calc_probs(obs):
obs = obs[tf.newaxis, ...]
all_logits = [logits_model(obs) for logits_model in logits_models]
all_Q_values = tf.nn.sigmoid(all_logits)
min_Q_values = tf.reduce_min(all_Q_values, axis=0)
mean_Q_values = tf.reduce_mean(all_Q_values, axis=0)
max_Q_values = tf.reduce_max(all_Q_values, axis=0)
if aggregate_func == "min":
agg_Q_values = min_Q_values
elif aggregate_func == "mean":
agg_Q_values = mean_Q_values
elif aggregate_func == "max":
agg_Q_values = max_Q_values
else:
raise NotImplementedError()
if uncertainty_func is None:
uncertainty = tf.constant(0.0)
elif uncertainty_func == "std":
uncertainty = tf.math.reduce_std(all_Q_values, axis=0)
elif uncertainty_func == "diff":
uncertainty = max_Q_values - min_Q_values
else:
raise NotImplementedError()
probs = tf.nn.softmax(alpha * agg_Q_values + beta * uncertainty, axis=-1)
diagnostics = {
"min_Q_values": tf.squeeze(min_Q_values),
"mean_Q_values": tf.squeeze(mean_Q_values),
"max_Q_values": tf.squeeze(max_Q_values),
}
return tf.squeeze(probs), tf.squeeze(agg_Q_values), diagnostics
def sample_policy():
obs = env.get_observation()
probs, agg_Q_values, diagnostics = calc_probs(obs)
probs = probs.numpy()
agg_Q_values = agg_Q_values.numpy()
diagnostics = tree.map_structure(lambda x: x.numpy(), diagnostics)
if deterministic:
action_discrete =
|
np.argmax(agg_Q_values)
|
numpy.argmax
|
#!/bin/bash
# import stuff
import os, sys, scipy.io, numpy as np
from tvb.simulator.lab import *
######################### THINGS TO CHANGE ##############################
# define model:
mymodel = models.Generic2dOscillator()
# define model parameters:
subj = sys.argv[1]
global_coupling = float(sys.argv[2])
noise_val = float(sys.argv[3])
# define directories and filenames
pse_name = 'simtest1'
# define timing
TR=2000.0 # TR of fMRI
simmins=0.5 # length of simulation in minutes
dtval=0.5 # integration step size; results in NaNs if too large
##########################################################################
indir = os.path.join(os.getcwd(),'input/')
outdir = os.path.join(os.getcwd(),'output/')
results_fn = outdir + '/' + pse_name + '_' + subj
for a in range(len(sys.argv)-2):
results_fn = results_fn + '_' + sys.argv[a+2]
results_fn = results_fn + '.mat'
datamat = scipy.io.loadmat(indir + '/' + subj + '_connectivity.mat')
sc_weights = datamat['sc_weights']
sc_weights = sc_weights / sc_weights.max()
tract_lengths = datamat['tract_lengths']
emp_fc = datamat['fc']
wm = connectivity.Connectivity(weights=sc_weights, tract_lengths=tract_lengths)
wm_coupling = coupling.Linear(a = global_coupling)
##### run simulation
sim = simulator.Simulator(model=mymodel, connectivity=wm, coupling=wm_coupling, conduction_speed=3.0,
integrator=integrators.HeunStochastic(dt=dtval, noise=noise.Additive(nsig=np.array([noise_val]))),
monitors=(monitors.Bold(period=TR), monitors.TemporalAverage(period=10.0), monitors.ProgressLogger(period=10000.0)),
simulation_length=(simmins+1.0)*60.0*1000.0)
sim.configure()
(time, data), (tavg_time, tavg_data), _ = sim.run() # data = time x state_variables x nodes x modes
##### remove transient
data_all = data
data = data[-int(simmins*60*1000/TR):,0,:,0]
tavg_all = tavg_data
tavg_data = tavg_data[-int(simmins*60*1000/10.0):,0,:,0] #
data = np.squeeze(data)
##### calculate sim-emp correlations
sim_fc = np.corrcoef(data.T)
for i in range(sim_fc.shape[0]):
sim_fc[i:,i]=np.inf
for i in range(emp_fc.shape[0]):
emp_fc[i:,i]=np.inf
sim_fc = sim_fc[~np.isinf(sim_fc)]
emp_fc = emp_fc[~np.isinf(emp_fc)]
def fisherz(rmat):
z = 0.5*
|
np.log((1+rmat)/(1-rmat))
|
numpy.log
|
import datetime as dt
import numpy as np
import pytest
import OMMBV
import OMMBV.heritage
from OMMBV.tests.test_core import gen_plot_grid_fixed_alt
import OMMBV.vector
class TestIntegratedMethods(object):
def setup(self):
"""Setup test environment before each function."""
self.lats, self.longs, self.alts = gen_plot_grid_fixed_alt(550.)
self.date = dt.datetime(2000, 1, 1)
return
def teardown(self):
"""Clean up test environment after each function."""
del self.lats, self.longs, self.alts
def test_integrated_unit_vector_components(self):
"""Test Field-Line Integrated Unit Vectors"""
p_lats, p_longs, p_alts = gen_plot_grid_fixed_alt(550.)
# data returned are the locations along each direction
# the full range of points obtained by iterating over all
# recasting alts into a more convenient form for later calculation
p_alts = [p_alts[0]]*len(p_longs)
zvx = np.zeros((len(p_lats), len(p_longs)))
zvy = zvx.copy()
zvz = zvx.copy()
mx = zvx.copy()
my = zvx.copy()
mz = zvx.copy()
bx = zvx.copy()
by = zvx.copy()
bz = zvx.copy()
date = dt.datetime(2000, 1, 1)
fcn = OMMBV.heritage.calculate_integrated_mag_drift_unit_vectors_ecef
for i, p_lat in enumerate(p_lats):
(tzx, tzy, tzz,
tbx, tby, tbz,
tmx, tmy, tmz
) = fcn([p_lat]*len(p_longs), p_longs, p_alts, [date]*len(p_longs),
steps=None, max_steps=10000, step_size=10.,
ref_height=120.)
(zvx[i, :], zvy[i, :],
zvz[i, :]) = OMMBV.vector.ecef_to_enu(tzx, tzy, tzz,
[p_lat] * len(p_longs),
p_longs)
(bx[i, :], by[i, :],
bz[i, :])= OMMBV.vector.ecef_to_enu(tbx, tby, tbz,
[p_lat] * len(p_longs),
p_longs)
(mx[i, :], my[i, :],
mz[i, :]) = OMMBV.vector.ecef_to_enu(tmx, tmy, tmz,
[p_lat] * len(p_longs),
p_longs)
# Zonal generally eastward
assert
|
np.all(zvx > 0.7)
|
numpy.all
|
'''Invert CNN feature to reconstruct image: Reconstruct image from CNN features using gradient descent with momentum.
Author: <NAME> <<EMAIL>.>
'''
import os
from datetime import datetime
import numpy as np
import PIL.Image
import torch.optim as optim
import torch
from loss import MSE_with_regulariztion
from utils import img_deprocess, img_preprocess, normalise_img, \
vid_deprocess, normalise_vid, vid_preprocess, clip_extreme_value, get_cnn_features, create_feature_masks,\
save_video, save_gif, gaussian_blur
from utils_hts import vid2flow, flow2vid, flow_preprocess, flow_deprocess, flow_norm
def reconstruct_stim(features, net,
img_mean=np.array([104,117,123] * 11).astype(np.float32),
img_std=np.array([1,1,1] * 11).astype(np.float32),
norm=1,
bgr = False,
initial_input=None,
input_size=(11, 224, 224, 3),
layer_weight=None, channel=None, mask=None,
opt_name='SGD',
prehook_dict = {},
lr_start= 2., lr_end=1e-10,
momentum_start=0.9, momentum_end=0.9,
pix_decay = True,decay_start=0.2, decay_end=1e-10,
image_jitter=False, jitter_size=4,
image_blur=True, sigma_start=0.2, sigma_end=0.5,
grad_normalize = True,
p=3, lamda=0.5,
TVlambda = [0,0],
clip_extreme=False, clip_extreme_every=4, e_pct_start=1, e_pct_end=1,
clip_small_norm=False, clip_small_norm_every=4, n_pct_start=5., n_pct_end=5.,
loss_type='l2', iter_n=200, save_intermediate=False,
save_intermediate_every=1, save_intermediate_path=None,
disp_every=1,
):
if loss_type == "l2":
loss_fun = torch.nn.MSELoss(reduction='sum')
elif loss_type == "L2_with_reg":
loss_fun = MSE_with_regulariztion(L_lambda=lamda, alpha=p, TV_lambda=TVlambda)
else:
assert loss_type + ' is not correct'
# make save dir
if save_intermediate:
if save_intermediate_path is None:
save_intermediate_path = os.path.join('..', 'recon_img_by_icnn' + datetime.now().strftime('%Y%m%dT%H%M%S'))
if not os.path.exists(save_intermediate_path):
os.makedirs(save_intermediate_path)
# image size
input_size = input_size
# image mean
img_mean = img_mean
img_std = img_std
norm = norm
# image norm
noise_img = np.random.randint(0, 256, (input_size))
img_norm0 = np.linalg.norm(noise_img)
img_norm0 = img_norm0/2.
# initial input
if initial_input is None:
initial_input = np.random.randint(0, 256, (input_size))
else:
input_size = initial_input.shape
if save_intermediate:
if len(input_size) == 3:
#image
save_name = 'initial_image.jpg'
if bgr:
PIL.Image.fromarray(np.uint8(initial_input[...,[2,1,0]])).save(os.path.join(save_intermediate_path, save_name))
else:
PIL.Image.fromarray(np.uint8(initial_input)).save(os.path.join(save_intermediate_path, save_name))
elif len(input_size) == 4:
# video
# if you install cv2 and ffmpeg, you can use save_video function which save preferred video as video format
save_name = 'initial_video.avi'
save_video(initial_input, save_name, save_intermediate_path, bgr)
save_name = 'initial_video.gif'
save_gif(initial_input, save_name, save_intermediate_path, bgr,
fr_rate=150)
else:
print('Input size is not appropriate for save')
assert len(input_size) not in [3,4]
# layer_list
layer_dict = features
layer_list = list(features.keys())
# number of layers
num_of_layer = len(layer_list)
# layer weight
if layer_weight is None:
weights = np.ones(num_of_layer)
weights = np.float32(weights)
weights = weights / weights.sum()
layer_weight = {}
for j, layer in enumerate(layer_list):
layer_weight[layer] = weights[j]
# feature mask
feature_masks = create_feature_masks(layer_dict, masks=mask, channels=channel)
# iteration for gradient descent
input_stim = initial_input.copy().astype(np.float32)
input_stim = vid2flow(input_stim)
input_stim = flow_preprocess(input_stim, img_mean, img_std, norm)
loss_list = np.zeros(iter_n, dtype='float32')
for t in range(iter_n):
# parameters
lr = lr_start + t * (lr_end - lr_start) / iter_n
momentum = momentum_start + t * (momentum_end - momentum_start) / iter_n
decay = decay_start + t * (decay_end - decay_start) / iter_n
sigma = sigma_start + t * (sigma_end - sigma_start) / iter_n
# shift
if image_jitter:
ox, oy = np.random.randint(-jitter_size, jitter_size+1, 2)
input_stim = np.roll(np.roll(input_stim, ox, -1), oy, -2)
# forward
input_stim = torch.tensor(input_stim[np.newaxis], requires_grad=True)
if opt_name == 'Adam':
op = optim.Adam([input_stim], lr = lr)
elif opt_name == 'SGD':
op = optim.SGD([input_stim], lr=lr, momentum=momentum)
elif opt_name == 'Adadelta':
op = optim.Adadelta([input_stim])
elif opt_name == 'Adagrad':
op = optim.Adagrad([input_stim])
elif opt_name == 'AdamW':
op = optim.AdamW([input_stim])
elif opt_name == 'SparseAdam':
op = optim.SparseAdam([input_stim])
elif opt_name == 'Adamax':
op = optim.Adamax([input_stim])
elif opt_name == 'ASGD':
op = optim.ASGD([input_stim])
elif opt_name == 'RMSprop':
op = optim.RMSprop([input_stim])
elif opt_name == 'Rprop':
op = optim.Rprop([input_stim])
fw = get_cnn_features(net, input_stim, features.keys(), prehook_dict)
# backward for net
err = 0.
loss = 0.
# set the grad of network to 0
net.zero_grad()
op.zero_grad()
for j in range(num_of_layer):
target_layer_id = num_of_layer -1 -j
target_layer = layer_list[target_layer_id]
# extract activation or mask at input true video, and mask
act_j = fw[target_layer_id].clone()
feat_j = features[target_layer].clone()
mask_j = feature_masks[target_layer]
layer_weight_j = layer_weight[target_layer]
masked_act_j = torch.masked_select(act_j, torch.FloatTensor(mask_j).bool())
masked_feat_j = torch.masked_select(feat_j, torch.FloatTensor(mask_j).bool())
# calculate loss using pytorch loss function
loss_j = loss_fun(masked_act_j, masked_feat_j) * layer_weight_j
# backward the gradient to the video
loss_j.backward(retain_graph=True)
loss += loss_j.detach().numpy()
if grad_normalize:
grad_mean = torch.abs(input_stim.grad).mean()
if grad_mean > 0:
input_stim.grad /= grad_mean
op.step()
input_stim = input_stim.detach().numpy()[0]
err = err + loss
loss_list[t] = loss
# clip pixels with extreme value
if clip_extreme and (t+1) % clip_extreme_every == 0:
e_pct = e_pct_start + t * (e_pct_end - e_pct_start) / iter_n
input_stim = clip_extreme_pixel(input_stim, e_pct)
# clip pixels with small norm
if clip_small_norm and (t+1) % clip_small_norm_every == 0:
n_pct = n_pct_start + t * (n_pct_end - n_pct_start) / iter_n
input_stim = clip_small_norm_pixel(input_stim, n_pct)
# unshift
if image_jitter:
input_stim = np.roll(
|
np.roll(input_stim, -ox, -1)
|
numpy.roll
|
# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE
import pytest # noqa: F401
import numpy as np # noqa: F401
import awkward as ak # noqa: F401
ak_to_buffers = ak._v2.operations.to_buffers
ak_from_buffers = ak._v2.operations.from_buffers
ak_from_iter = ak._v2.operations.from_iter
to_list = ak._v2.operations.to_list
def test_EmptyArray():
v2a = ak._v2.contents.emptyarray.EmptyArray()
assert to_list(ak_from_buffers(*ak_to_buffers(v2a))) == to_list(v2a)
def test_NumpyArray():
v2a = ak._v2.contents.numpyarray.NumpyArray(np.array([0.0, 1.1, 2.2, 3.3]))
assert to_list(ak_from_buffers(*ak_to_buffers(v2a))) == to_list(v2a)
v2b = ak._v2.contents.numpyarray.NumpyArray(
np.arange(2 * 3 * 5, dtype=np.int64).reshape(2, 3, 5)
)
assert to_list(ak_from_buffers(*ak_to_buffers(v2b))) == to_list(v2b)
def test_RegularArray_NumpyArray():
v2a = ak._v2.contents.regulararray.RegularArray(
ak._v2.contents.numpyarray.NumpyArray(np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5])),
3,
)
assert to_list(ak_from_buffers(*ak_to_buffers(v2a))) == to_list(v2a)
v2b = ak._v2.contents.regulararray.RegularArray(
ak._v2.contents.emptyarray.EmptyArray(), 0, zeros_length=10
)
assert to_list(ak_from_buffers(*ak_to_buffers(v2b))) == to_list(v2b)
def test_ListArray_NumpyArray():
v2a = ak._v2.contents.listarray.ListArray(
ak._v2.index.Index(np.array([4, 100, 1], np.int64)),
ak._v2.index.Index(np.array([7, 100, 3, 200], np.int64)),
ak._v2.contents.numpyarray.NumpyArray(
np.array([6.6, 4.4, 5.5, 7.7, 1.1, 2.2, 3.3, 8.8])
),
)
assert to_list(ak_from_buffers(*ak_to_buffers(v2a))) == to_list(v2a)
def test_ListOffsetArray_NumpyArray():
v2a = ak._v2.contents.listoffsetarray.ListOffsetArray(
ak._v2.index.Index(np.array([1, 4, 4, 6, 7], np.int64)),
ak._v2.contents.numpyarray.NumpyArray([6.6, 1.1, 2.2, 3.3, 4.4, 5.5, 7.7]),
)
assert to_list(ak_from_buffers(*ak_to_buffers(v2a))) == to_list(v2a)
def test_RecordArray_NumpyArray():
v2a = ak._v2.contents.recordarray.RecordArray(
[
ak._v2.contents.numpyarray.NumpyArray(np.array([0, 1, 2, 3, 4], np.int64)),
ak._v2.contents.numpyarray.NumpyArray(
np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5])
),
],
["x", "y"],
)
assert to_list(ak_from_buffers(*ak_to_buffers(v2a))) == to_list(v2a)
v2b = ak._v2.contents.recordarray.RecordArray(
[
ak._v2.contents.numpyarray.NumpyArray(np.array([0, 1, 2, 3, 4], np.int64)),
ak._v2.contents.numpyarray.NumpyArray(
np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5])
),
],
None,
)
assert to_list(ak_from_buffers(*ak_to_buffers(v2b))) == to_list(v2b)
v2c = ak._v2.contents.recordarray.RecordArray([], [], 10)
assert to_list(ak_from_buffers(*ak_to_buffers(v2c))) == to_list(v2c)
v2d = ak._v2.contents.recordarray.RecordArray([], None, 10)
assert to_list(ak_from_buffers(*ak_to_buffers(v2d))) == to_list(v2d)
def test_IndexedArray_NumpyArray():
v2a = ak._v2.contents.indexedarray.IndexedArray(
ak._v2.index.Index(np.array([2, 2, 0, 1, 4, 5, 4], np.int64)),
ak._v2.contents.numpyarray.NumpyArray(np.array([1.1, 2.2, 3.3, 4.4, 5.5, 6.6])),
)
assert to_list(ak_from_buffers(*ak_to_buffers(v2a))) == to_list(v2a)
def test_IndexedOptionArray_NumpyArray():
v2a = ak._v2.contents.indexedoptionarray.IndexedOptionArray(
ak._v2.index.Index(np.array([2, 2, -1, 1, -1, 5, 4], np.int64)),
ak._v2.contents.numpyarray.NumpyArray(np.array([1.1, 2.2, 3.3, 4.4, 5.5, 6.6])),
)
assert to_list(ak_from_buffers(*ak_to_buffers(v2a))) == to_list(v2a)
def test_ByteMaskedArray_NumpyArray():
v2a = ak._v2.contents.bytemaskedarray.ByteMaskedArray(
ak._v2.index.Index(np.array([1, 0, 1, 0, 1], np.int8)),
ak._v2.contents.numpyarray.NumpyArray(np.array([1.1, 2.2, 3.3, 4.4, 5.5, 6.6])),
valid_when=True,
)
assert to_list(ak_from_buffers(*ak_to_buffers(v2a))) == to_list(v2a)
v2b = ak._v2.contents.bytemaskedarray.ByteMaskedArray(
ak._v2.index.Index(np.array([0, 1, 0, 1, 0], np.int8)),
ak._v2.contents.numpyarray.NumpyArray(np.array([1.1, 2.2, 3.3, 4.4, 5.5, 6.6])),
valid_when=False,
)
assert to_list(ak_from_buffers(*ak_to_buffers(v2b))) == to_list(v2b)
def test_BitMaskedArray_NumpyArray():
v2a = ak._v2.contents.bitmaskedarray.BitMaskedArray(
ak._v2.index.Index(
np.packbits(
np.array(
[
1,
1,
1,
1,
0,
0,
0,
0,
1,
0,
1,
0,
1,
],
np.uint8,
)
)
),
ak._v2.contents.numpyarray.NumpyArray(
np.array(
[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6]
)
),
valid_when=True,
length=13,
lsb_order=False,
)
assert to_list(ak_from_buffers(*ak_to_buffers(v2a))) == to_list(v2a)
v2b = ak._v2.contents.bitmaskedarray.BitMaskedArray(
ak._v2.index.Index(
np.packbits(
np.array(
[
0,
0,
0,
0,
1,
1,
1,
1,
0,
1,
0,
1,
0,
],
np.uint8,
)
)
),
ak._v2.contents.numpyarray.NumpyArray(
np.array(
[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6]
)
),
valid_when=False,
length=13,
lsb_order=False,
)
assert to_list(ak_from_buffers(*ak_to_buffers(v2b))) == to_list(v2b)
v2c = ak._v2.contents.bitmaskedarray.BitMaskedArray(
ak._v2.index.Index(
np.packbits(
np.array(
[
0,
0,
0,
0,
1,
1,
1,
1,
0,
0,
0,
1,
0,
1,
0,
1,
],
np.uint8,
)
)
),
ak._v2.contents.numpyarray.NumpyArray(
np.array(
[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6]
)
),
valid_when=True,
length=13,
lsb_order=True,
)
assert to_list(ak_from_buffers(*ak_to_buffers(v2c))) == to_list(v2c)
v2d = ak._v2.contents.bitmaskedarray.BitMaskedArray(
ak._v2.index.Index(
np.packbits(
np.array(
[
1,
1,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
],
np.uint8,
)
)
),
ak._v2.contents.numpyarray.NumpyArray(
np.array(
[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6]
)
),
valid_when=False,
length=13,
lsb_order=True,
)
assert to_list(ak_from_buffers(*ak_to_buffers(v2d))) == to_list(v2d)
def test_UnmaskedArray_NumpyArray():
v2a = ak._v2.contents.unmaskedarray.UnmaskedArray(
ak._v2.contents.numpyarray.NumpyArray(np.array([0.0, 1.1, 2.2, 3.3]))
)
assert to_list(ak_from_buffers(*ak_to_buffers(v2a))) == to_list(v2a)
def test_UnionArray_NumpyArray():
v2a = ak._v2.contents.unionarray.UnionArray(
ak._v2.index.Index(np.array([1, 1, 0, 0, 1, 0, 1], np.int8)),
ak._v2.index.Index(np.array([4, 3, 0, 1, 2, 2, 4, 100], np.int64)),
[
ak._v2.contents.numpyarray.NumpyArray(np.array([1, 2, 3], np.int64)),
ak._v2.contents.numpyarray.NumpyArray(np.array([1.1, 2.2, 3.3, 4.4, 5.5])),
],
)
assert to_list(ak_from_buffers(*ak_to_buffers(v2a))) == to_list(v2a)
def test_RegularArray_RecordArray_NumpyArray():
v2a = ak._v2.contents.regulararray.RegularArray(
ak._v2.contents.recordarray.RecordArray(
[
ak._v2.contents.numpyarray.NumpyArray(
np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6])
)
],
["nest"],
),
3,
)
assert to_list(ak_from_buffers(*ak_to_buffers(v2a))) == to_list(v2a)
v2b = ak._v2.contents.regulararray.RegularArray(
ak._v2.contents.recordarray.RecordArray(
[ak._v2.contents.emptyarray.EmptyArray()], ["nest"]
),
0,
zeros_length=10,
)
assert to_list(ak_from_buffers(*ak_to_buffers(v2b))) == to_list(v2b)
def test_ListArray_RecordArray_NumpyArray():
v2a = ak._v2.contents.listarray.ListArray(
ak._v2.index.Index(np.array([4, 100, 1], np.int64)),
ak._v2.index.Index(np.array([7, 100, 3, 200], np.int64)),
ak._v2.contents.recordarray.RecordArray(
[
ak._v2.contents.numpyarray.NumpyArray(
np.array([6.6, 4.4, 5.5, 7.7, 1.1, 2.2, 3.3, 8.8])
)
],
["nest"],
),
)
assert to_list(ak_from_buffers(*ak_to_buffers(v2a))) == to_list(v2a)
def test_ListOffsetArray_RecordArray_NumpyArray():
v2a = ak._v2.contents.listoffsetarray.ListOffsetArray(
ak._v2.index.Index(np.array([1, 4, 4, 6], np.int64)),
ak._v2.contents.recordarray.RecordArray(
[
ak._v2.contents.numpyarray.NumpyArray(
[6.6, 1.1, 2.2, 3.3, 4.4, 5.5, 7.7]
)
],
["nest"],
),
)
assert to_list(ak_from_buffers(*ak_to_buffers(v2a))) == to_list(v2a)
def test_IndexedArray_RecordArray_NumpyArray():
v2a = ak._v2.contents.indexedarray.IndexedArray(
ak._v2.index.Index(np.array([2, 2, 0, 1, 4, 5, 4], np.int64)),
ak._v2.contents.recordarray.RecordArray(
[
ak._v2.contents.numpyarray.NumpyArray(
np.array([1.1, 2.2, 3.3, 4.4, 5.5, 6.6])
)
],
["nest"],
),
)
assert to_list(ak_from_buffers(*ak_to_buffers(v2a))) == to_list(v2a)
def test_IndexedOptionArray_RecordArray_NumpyArray():
v2a = ak._v2.contents.indexedoptionarray.IndexedOptionArray(
ak._v2.index.Index(np.array([2, 2, -1, 1, -1, 5, 4], np.int64)),
ak._v2.contents.recordarray.RecordArray(
[
ak._v2.contents.numpyarray.NumpyArray(
np.array([1.1, 2.2, 3.3, 4.4, 5.5, 6.6])
)
],
["nest"],
),
)
assert to_list(ak_from_buffers(*ak_to_buffers(v2a))) == to_list(v2a)
def test_ByteMaskedArray_RecordArray_NumpyArray():
v2a = ak._v2.contents.bytemaskedarray.ByteMaskedArray(
ak._v2.index.Index(np.array([1, 0, 1, 0, 1], np.int8)),
ak._v2.contents.recordarray.RecordArray(
[
ak._v2.contents.numpyarray.NumpyArray(
np.array([1.1, 2.2, 3.3, 4.4, 5.5, 6.6])
)
],
["nest"],
),
valid_when=True,
)
assert to_list(ak_from_buffers(*ak_to_buffers(v2a))) == to_list(v2a)
v2b = ak._v2.contents.bytemaskedarray.ByteMaskedArray(
ak._v2.index.Index(np.array([0, 1, 0, 1, 0], np.int8)),
ak._v2.contents.recordarray.RecordArray(
[
ak._v2.contents.numpyarray.NumpyArray(
np.array([1.1, 2.2, 3.3, 4.4, 5.5, 6.6])
)
],
["nest"],
),
valid_when=False,
)
assert to_list(ak_from_buffers(*ak_to_buffers(v2b))) == to_list(v2b)
def test_BitMaskedArray_RecordArray_NumpyArray():
v2a = ak._v2.contents.bitmaskedarray.BitMaskedArray(
ak._v2.index.Index(
np.packbits(
np.array(
[
True,
True,
True,
True,
False,
False,
False,
False,
True,
False,
True,
False,
True,
]
)
)
),
ak._v2.contents.recordarray.RecordArray(
[
ak._v2.contents.numpyarray.NumpyArray(
np.array(
[
0.0,
1.0,
2.0,
3.0,
4.0,
5.0,
6.0,
7.0,
1.1,
2.2,
3.3,
4.4,
5.5,
6.6,
]
)
)
],
["nest"],
),
valid_when=True,
length=13,
lsb_order=False,
)
assert to_list(ak_from_buffers(*ak_to_buffers(v2a))) == to_list(v2a)
v2b = ak._v2.contents.bitmaskedarray.BitMaskedArray(
ak._v2.index.Index(
np.packbits(
np.array(
[
0,
0,
0,
0,
1,
1,
1,
1,
0,
1,
0,
1,
0,
],
np.uint8,
)
)
),
ak._v2.contents.recordarray.RecordArray(
[
ak._v2.contents.numpyarray.NumpyArray(
np.array(
[
0.0,
1.0,
2.0,
3.0,
4.0,
5.0,
6.0,
7.0,
1.1,
2.2,
3.3,
4.4,
5.5,
6.6,
]
)
)
],
["nest"],
),
valid_when=False,
length=13,
lsb_order=False,
)
assert to_list(ak_from_buffers(*ak_to_buffers(v2b))) == to_list(v2b)
v2c = ak._v2.contents.bitmaskedarray.BitMaskedArray(
ak._v2.index.Index(
np.packbits(
np.array(
[
0,
0,
0,
0,
1,
1,
1,
1,
0,
0,
0,
1,
0,
1,
0,
1,
],
np.uint8,
)
)
),
ak._v2.contents.recordarray.RecordArray(
[
ak._v2.contents.numpyarray.NumpyArray(
np.array(
[
0.0,
1.0,
2.0,
3.0,
4.0,
5.0,
6.0,
7.0,
1.1,
2.2,
3.3,
4.4,
5.5,
6.6,
]
)
)
],
["nest"],
),
valid_when=True,
length=13,
lsb_order=True,
)
assert to_list(ak_from_buffers(*ak_to_buffers(v2c))) == to_list(v2c)
v2d = ak._v2.contents.bitmaskedarray.BitMaskedArray(
ak._v2.index.Index(
np.packbits(
np.array(
[
1,
1,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
],
np.uint8,
)
)
),
ak._v2.contents.recordarray.RecordArray(
[
ak._v2.contents.numpyarray.NumpyArray(
np.array(
[
0.0,
1.0,
2.0,
3.0,
4.0,
5.0,
6.0,
7.0,
1.1,
2.2,
3.3,
4.4,
5.5,
6.6,
]
)
)
],
["nest"],
),
valid_when=False,
length=13,
lsb_order=True,
)
assert to_list(ak_from_buffers(*ak_to_buffers(v2d))) == to_list(v2d)
def test_UnmaskedArray_RecordArray_NumpyArray():
v2a = ak._v2.contents.unmaskedarray.UnmaskedArray(
ak._v2.contents.recordarray.RecordArray(
[ak._v2.contents.numpyarray.NumpyArray(np.array([0.0, 1.1, 2.2, 3.3]))],
["nest"],
)
)
assert to_list(ak_from_buffers(*ak_to_buffers(v2a))) == to_list(v2a)
def test_UnionArray_RecordArray_NumpyArray():
v2a = ak._v2.contents.unionarray.UnionArray(
ak._v2.index.Index(np.array([1, 1, 0, 0, 1, 0, 1], np.int8)),
ak._v2.index.Index(np.array([4, 3, 0, 1, 2, 2, 4, 100], np.int64)),
[
ak._v2.contents.recordarray.RecordArray(
[ak._v2.contents.numpyarray.NumpyArray(
|
np.array([1, 2, 3], np.int64)
|
numpy.array
|
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
import torchvision
from torchvision import datasets, models as tv_models
from torch.utils.data import DataLoader
from torchsummary import summary
import numpy as np
from scipy import io
import threading
import pickle
from pathlib import Path
import math
import os
import sys
from glob import glob
import re
import gc
import importlib
import time
import sklearn.preprocessing
import utils
from sklearn.utils import class_weight
import psutil
import models
# add configuration file
# Dictionary for model configuration
mdlParams = {}
# Import machine config
pc_cfg = importlib.import_module('pc_cfgs.'+sys.argv[1])
mdlParams.update(pc_cfg.mdlParams)
# Import model config
model_cfg = importlib.import_module('cfgs.'+sys.argv[2])
mdlParams_model = model_cfg.init(mdlParams)
mdlParams.update(mdlParams_model)
# Indicate training
mdlParams['trainSetState'] = 'train'
# Path name from filename
mdlParams['saveDirBase'] = mdlParams['saveDir'] + sys.argv[2]
# Set visible devices
if 'gpu' in sys.argv[3]:
mdlParams['numGPUs']= [[int(s) for s in re.findall(r'\d+',sys.argv[3])][-1]]
cuda_str = ""
for i in range(len(mdlParams['numGPUs'])):
cuda_str = cuda_str + str(mdlParams['numGPUs'][i])
if i is not len(mdlParams['numGPUs'])-1:
cuda_str = cuda_str + ","
print("Devices to use:",cuda_str)
os.environ["CUDA_VISIBLE_DEVICES"] = cuda_str
# Specify val set to train for
if len(sys.argv) > 4:
mdlParams['cv_subset'] = [int(s) for s in re.findall(r'\d+',sys.argv[4])]
print("Training validation sets",mdlParams['cv_subset'])
# Check if there is a validation set, if not, evaluate train error instead
if 'valIndCV' in mdlParams or 'valInd' in mdlParams:
eval_set = 'valInd'
print("Evaluating on validation set during training.")
else:
eval_set = 'trainInd'
print("No validation set, evaluating on training set during training.")
# Check if there were previous ones that have alreary bin learned
prevFile = Path(mdlParams['saveDirBase'] + '/CV.pkl')
#print(prevFile)
if prevFile.exists():
print("Part of CV already done")
with open(mdlParams['saveDirBase'] + '/CV.pkl', 'rb') as f:
allData = pickle.load(f)
else:
allData = {}
allData['f1Best'] = {}
allData['sensBest'] = {}
allData['specBest'] = {}
allData['accBest'] = {}
allData['waccBest'] = {}
allData['aucBest'] = {}
allData['convergeTime'] = {}
allData['bestPred'] = {}
allData['targets'] = {}
# Take care of CV
if mdlParams.get('cv_subset',None) is not None:
cv_set = mdlParams['cv_subset']
else:
cv_set = range(mdlParams['numCV'])
for cv in cv_set:
# Check if this fold was already trained
already_trained = False
if 'valIndCV' in mdlParams:
mdlParams['saveDir'] = mdlParams['saveDirBase'] + '/CVSet' + str(cv)
if os.path.isdir(mdlParams['saveDirBase']):
if os.path.isdir(mdlParams['saveDir']):
all_max_iter = []
for name in os.listdir(mdlParams['saveDir']):
int_list = [int(s) for s in re.findall(r'\d+',name)]
if len(int_list) > 0:
all_max_iter.append(int_list[-1])
#if '-' + str(mdlParams['training_steps'])+ '.pt' in name:
# print("Fold %d already fully trained"%(cv))
# already_trained = True
all_max_iter = np.array(all_max_iter)
if len(all_max_iter) > 0 and np.max(all_max_iter) >= mdlParams['training_steps']:
print("Fold %d already fully trained with %d iterations"%(cv,np.max(all_max_iter)))
already_trained = True
if already_trained:
continue
print("CV set",cv)
# Reset model graph
importlib.reload(models)
#importlib.reload(torchvision)
# Collect model variables
modelVars = {}
#print("here")
modelVars['device'] = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(modelVars['device'])
# Def current CV set
mdlParams['trainInd'] = mdlParams['trainIndCV'][cv]
if 'valIndCV' in mdlParams:
mdlParams['valInd'] = mdlParams['valIndCV'][cv]
# Def current path for saving stuff
if 'valIndCV' in mdlParams:
mdlParams['saveDir'] = mdlParams['saveDirBase'] + '/CVSet' + str(cv)
else:
mdlParams['saveDir'] = mdlParams['saveDirBase']
# Create basepath if it doesnt exist yet
if not os.path.isdir(mdlParams['saveDirBase']):
os.mkdir(mdlParams['saveDirBase'])
# Check if there is something to load
load_old = 0
if os.path.isdir(mdlParams['saveDir']):
# Check if a checkpoint is in there
if len([name for name in os.listdir(mdlParams['saveDir'])]) > 0:
load_old = 1
print("Loading old model")
else:
# Delete whatever is in there (nothing happens)
filelist = [os.remove(mdlParams['saveDir'] +'/'+f) for f in os.listdir(mdlParams['saveDir'])]
else:
os.mkdir(mdlParams['saveDir'])
# Save training progress in here
save_dict = {}
save_dict['acc'] = []
save_dict['loss'] = []
save_dict['wacc'] = []
save_dict['auc'] = []
save_dict['sens'] = []
save_dict['spec'] = []
save_dict['f1'] = []
save_dict['step_num'] = []
if mdlParams['print_trainerr']:
save_dict_train = {}
save_dict_train['acc'] = []
save_dict_train['loss'] = []
save_dict_train['wacc'] = []
save_dict_train['auc'] = []
save_dict_train['sens'] = []
save_dict_train['spec'] = []
save_dict_train['f1'] = []
save_dict_train['step_num'] = []
# Potentially calculate setMean to subtract
if mdlParams['subtract_set_mean'] == 1:
mdlParams['setMean'] = np.mean(mdlParams['images_means'][mdlParams['trainInd'],:],(0))
print("Set Mean",mdlParams['setMean'])
# balance classes
if mdlParams['balance_classes'] < 3 or mdlParams['balance_classes'] == 7 or mdlParams['balance_classes'] == 11:
class_weights = class_weight.compute_class_weight('balanced',np.unique(np.argmax(mdlParams['labels_array'][mdlParams['trainInd'],:],1)),np.argmax(mdlParams['labels_array'][mdlParams['trainInd'],:],1))
print("Current class weights",class_weights)
class_weights = class_weights*mdlParams['extra_fac']
print("Current class weights with extra",class_weights)
elif mdlParams['balance_classes'] == 3 or mdlParams['balance_classes'] == 4:
# Split training set by classes
not_one_hot = np.argmax(mdlParams['labels_array'],1)
mdlParams['class_indices'] = []
for i in range(mdlParams['numClasses']):
mdlParams['class_indices'].append(np.where(not_one_hot==i)[0])
# Kick out non-trainind indices
mdlParams['class_indices'][i] = np.setdiff1d(mdlParams['class_indices'][i],mdlParams['valInd'])
#print("Class",i,mdlParams['class_indices'][i].shape,np.min(mdlParams['class_indices'][i]),np.max(mdlParams['class_indices'][i]),np.sum(mdlParams['labels_array'][np.int64(mdlParams['class_indices'][i]),:],0))
elif mdlParams['balance_classes'] == 5 or mdlParams['balance_classes'] == 6 or mdlParams['balance_classes'] == 13:
# Other class balancing loss
class_weights = 1.0/np.mean(mdlParams['labels_array'][mdlParams['trainInd'],:],axis=0)
print("Current class weights",class_weights)
if isinstance(mdlParams['extra_fac'], float):
class_weights = np.power(class_weights,mdlParams['extra_fac'])
else:
class_weights = class_weights*mdlParams['extra_fac']
print("Current class weights with extra",class_weights)
elif mdlParams['balance_classes'] == 9:
# Only use official indicies for calculation
print("Balance 9")
indices_ham = mdlParams['trainInd'][mdlParams['trainInd'] < 25331]
if mdlParams['numClasses'] == 9:
class_weights_ = 1.0/np.mean(mdlParams['labels_array'][indices_ham,:8],axis=0)
#print("class before",class_weights_)
class_weights = np.zeros([mdlParams['numClasses']])
class_weights[:8] = class_weights_
class_weights[-1] = np.max(class_weights_)
else:
class_weights = 1.0/np.mean(mdlParams['labels_array'][indices_ham,:],axis=0)
print("Current class weights",class_weights)
if isinstance(mdlParams['extra_fac'], float):
class_weights = np.power(class_weights,mdlParams['extra_fac'])
else:
class_weights = class_weights*mdlParams['extra_fac']
print("Current class weights with extra",class_weights)
# Meta scaler
if mdlParams.get('meta_features',None) is not None and mdlParams['scale_features']:
mdlParams['feature_scaler_meta'] = sklearn.preprocessing.StandardScaler().fit(mdlParams['meta_array'][mdlParams['trainInd'],:])
print("scaler mean",mdlParams['feature_scaler_meta'].mean_,"var",mdlParams['feature_scaler_meta'].var_)
# Set up dataloaders
num_workers = psutil.cpu_count(logical=False)
# For train
dataset_train = utils.ISICDataset(mdlParams, 'trainInd')
# For val
dataset_val = utils.ISICDataset(mdlParams, 'valInd')
if mdlParams['multiCropEval'] > 0:
modelVars['dataloader_valInd'] = DataLoader(dataset_val, batch_size=mdlParams['multiCropEval'], shuffle=False, num_workers=num_workers, pin_memory=True)
else:
modelVars['dataloader_valInd'] = DataLoader(dataset_val, batch_size=mdlParams['batchSize'], shuffle=False, num_workers=num_workers, pin_memory=True)
if mdlParams['balance_classes'] == 12 or mdlParams['balance_classes'] == 13:
#print(np.argmax(mdlParams['labels_array'][mdlParams['trainInd'],:],1).size(0))
strat_sampler = utils.StratifiedSampler(mdlParams)
modelVars['dataloader_trainInd'] = DataLoader(dataset_train, batch_size=mdlParams['batchSize'], sampler=strat_sampler, num_workers=num_workers, pin_memory=True)
else:
modelVars['dataloader_trainInd'] = DataLoader(dataset_train, batch_size=mdlParams['batchSize'], shuffle=True, num_workers=num_workers, pin_memory=True, drop_last=True)
#print("Setdiff",np.setdiff1d(mdlParams['trainInd'],mdlParams['trainInd']))
# Define model
modelVars['model'] = models.getModel(mdlParams)()
# Load trained model
if mdlParams.get('meta_features',None) is not None:
# Find best checkpoint
files = glob(mdlParams['model_load_path'] + '/CVSet' + str(cv) + '/*')
global_steps = np.zeros([len(files)])
#print("files",files)
for i in range(len(files)):
# Use meta files to find the highest index
if 'best' not in files[i]:
continue
if 'checkpoint' not in files[i]:
continue
# Extract global step
nums = [int(s) for s in re.findall(r'\d+',files[i])]
global_steps[i] = nums[-1]
# Create path with maximum global step found
chkPath = mdlParams['model_load_path'] + '/CVSet' + str(cv) + '/checkpoint_best-' + str(int(np.max(global_steps))) + '.pt'
print("Restoring lesion-trained CNN for meta data training: ",chkPath)
# Load
state = torch.load(chkPath)
# Initialize model
curr_model_dict = modelVars['model'].state_dict()
for name, param in state['state_dict'].items():
#print(name,param.shape)
if isinstance(param, nn.Parameter):
# backwards compatibility for serialized parameters
param = param.data
if curr_model_dict[name].shape == param.shape:
curr_model_dict[name].copy_(param)
else:
print("not restored",name,param.shape)
#modelVars['model'].load_state_dict(state['state_dict'])
# Original input size
#if 'Dense' not in mdlParams['model_type']:
# print("Original input size",modelVars['model'].input_size)
#print(modelVars['model'])
if 'Dense' in mdlParams['model_type']:
if mdlParams['input_size'][0] != 224:
modelVars['model'] = utils.modify_densenet_avg_pool(modelVars['model'])
#print(modelVars['model'])
num_ftrs = modelVars['model'].classifier.in_features
modelVars['model'].classifier = nn.Linear(num_ftrs, mdlParams['numClasses'])
#print(modelVars['model'])
elif 'dpn' in mdlParams['model_type']:
num_ftrs = modelVars['model'].classifier.in_channels
modelVars['model'].classifier = nn.Conv2d(num_ftrs,mdlParams['numClasses'],[1,1])
#modelVars['model'].add_module('real_classifier',nn.Linear(num_ftrs, mdlParams['numClasses']))
#print(modelVars['model'])
elif 'efficient' in mdlParams['model_type']:
# Do nothing, output is prepared
num_ftrs = modelVars['model']._fc.in_features
modelVars['model']._fc = nn.Linear(num_ftrs, mdlParams['numClasses'])
elif 'wsl' in mdlParams['model_type']:
num_ftrs = modelVars['model'].fc.in_features
modelVars['model'].fc = nn.Linear(num_ftrs, mdlParams['numClasses'])
else:
num_ftrs = modelVars['model'].last_linear.in_features
modelVars['model'].last_linear = nn.Linear(num_ftrs, mdlParams['numClasses'])
# Take care of meta case
if mdlParams.get('meta_features',None) is not None:
# freeze cnn first
if mdlParams['freeze_cnn']:
# deactivate all
for param in modelVars['model'].parameters():
param.requires_grad = False
if 'efficient' in mdlParams['model_type']:
# Activate fc
for param in modelVars['model']._fc.parameters():
param.requires_grad = True
elif 'wsl' in mdlParams['model_type']:
# Activate fc
for param in modelVars['model'].fc.parameters():
param.requires_grad = True
else:
# Activate fc
for param in modelVars['model'].last_linear.parameters():
param.requires_grad = True
else:
# mark cnn parameters
for param in modelVars['model'].parameters():
param.is_cnn_param = True
# unmark fc
for param in modelVars['model']._fc.parameters():
param.is_cnn_param = False
# modify model
modelVars['model'] = models.modify_meta(mdlParams,modelVars['model'])
# Mark new parameters
for param in modelVars['model'].parameters():
if not hasattr(param, 'is_cnn_param'):
param.is_cnn_param = False
# multi gpu support
if len(mdlParams['numGPUs']) > 1:
modelVars['model'] = nn.DataParallel(modelVars['model'])
modelVars['model'] = modelVars['model'].cuda()
#summary(modelVars['model'], modelVars['model'].input_size)# (mdlParams['input_size'][2], mdlParams['input_size'][0], mdlParams['input_size'][1]))
# Loss, with class weighting
if mdlParams.get('focal_loss',False):
modelVars['criterion'] = utils.FocalLoss(alpha=class_weights.tolist())
elif mdlParams['balance_classes'] == 3 or mdlParams['balance_classes'] == 0 or mdlParams['balance_classes'] == 12:
modelVars['criterion'] = nn.CrossEntropyLoss()
elif mdlParams['balance_classes'] == 8:
modelVars['criterion'] = nn.CrossEntropyLoss(reduce=False)
elif mdlParams['balance_classes'] == 6 or mdlParams['balance_classes'] == 7:
modelVars['criterion'] = nn.CrossEntropyLoss(weight=torch.cuda.FloatTensor(class_weights.astype(np.float32)),reduce=False)
elif mdlParams['balance_classes'] == 10:
modelVars['criterion'] = utils.FocalLoss(mdlParams['numClasses'])
elif mdlParams['balance_classes'] == 11:
modelVars['criterion'] = utils.FocalLoss(mdlParams['numClasses'],alpha=torch.cuda.FloatTensor(class_weights.astype(np.float32)))
else:
modelVars['criterion'] = nn.CrossEntropyLoss(weight=torch.cuda.FloatTensor(class_weights.astype(np.float32)))
if mdlParams.get('meta_features',None) is not None:
if mdlParams['freeze_cnn']:
modelVars['optimizer'] = optim.Adam(filter(lambda p: p.requires_grad, modelVars['model'].parameters()), lr=mdlParams['learning_rate_meta'])
# sanity check
for param in filter(lambda p: p.requires_grad, modelVars['model'].parameters()):
print(param.name,param.shape)
else:
modelVars['optimizer'] = optim.Adam([
{'params': filter(lambda p: not p.is_cnn_param, modelVars['model'].parameters()), 'lr': mdlParams['learning_rate_meta']},
{'params': filter(lambda p: p.is_cnn_param, modelVars['model'].parameters()), 'lr': mdlParams['learning_rate']}
], lr=mdlParams['learning_rate'])
else:
modelVars['optimizer'] = optim.Adam(modelVars['model'].parameters(), lr=mdlParams['learning_rate'])
# Decay LR by a factor of 0.1 every 7 epochs
modelVars['scheduler'] = lr_scheduler.StepLR(modelVars['optimizer'], step_size=mdlParams['lowerLRAfter'], gamma=1/np.float32(mdlParams['LRstep']))
# Define softmax
modelVars['softmax'] = nn.Softmax(dim=1)
# Set up training
# loading from checkpoint
if load_old:
# Find last, not last best checkpoint
files = glob(mdlParams['saveDir']+'/*')
global_steps = np.zeros([len(files)])
for i in range(len(files)):
# Use meta files to find the highest index
if 'best' in files[i]:
continue
if 'checkpoint-' not in files[i]:
continue
# Extract global step
nums = [int(s) for s in re.findall(r'\d+',files[i])]
global_steps[i] = nums[-1]
# Create path with maximum global step found
chkPath = mdlParams['saveDir'] + '/checkpoint-' + str(int(np.max(global_steps))) + '.pt'
print("Restoring: ",chkPath)
# Load
state = torch.load(chkPath)
# Initialize model and optimizer
modelVars['model'].load_state_dict(state['state_dict'])
modelVars['optimizer'].load_state_dict(state['optimizer'])
start_epoch = state['epoch']+1
mdlParams['valBest'] = state.get('valBest',1000)
mdlParams['lastBestInd'] = state.get('lastBestInd',int(np.max(global_steps)))
else:
start_epoch = 1
mdlParams['lastBestInd'] = -1
# Track metrics for saving best model
mdlParams['valBest'] = 1000
# Num batches
numBatchesTrain = int(math.floor(len(mdlParams['trainInd'])/mdlParams['batchSize']))
print("Train batches",numBatchesTrain)
# Run training
start_time = time.time()
print("Start training...")
for step in range(start_epoch, mdlParams['training_steps']+1):
# One Epoch of training
if step >= mdlParams['lowerLRat']-mdlParams['lowerLRAfter']:
modelVars['scheduler'].step()
modelVars['model'].train()
for j, (inputs, labels, indices) in enumerate(modelVars['dataloader_trainInd']):
#print(indices)
#t_load = time.time()
# Run optimization
if mdlParams.get('meta_features',None) is not None:
inputs[0] = inputs[0].cuda()
inputs[1] = inputs[1].cuda()
else:
inputs = inputs.cuda()
#print(inputs.shape)
labels = labels.cuda()
# zero the parameter gradients
modelVars['optimizer'].zero_grad()
# forward
# track history if only in train
with torch.set_grad_enabled(True):
if mdlParams.get('aux_classifier',False):
outputs, outputs_aux = modelVars['model'](inputs)
loss1 = modelVars['criterion'](outputs, labels)
labels_aux = labels.repeat(mdlParams['multiCropTrain'])
loss2 = modelVars['criterion'](outputs_aux, labels_aux)
loss = loss1 + mdlParams['aux_classifier_loss_fac']*loss2
else:
#print("load",time.time()-t_load)
#t_fwd = time.time()
outputs = modelVars['model'](inputs)
#print("forward",time.time()-t_fwd)
#t_bwd = time.time()
loss = modelVars['criterion'](outputs, labels)
# Perhaps adjust weighting of the loss by the specific index
if mdlParams['balance_classes'] == 6 or mdlParams['balance_classes'] == 7 or mdlParams['balance_classes'] == 8:
#loss = loss.cpu()
indices = indices.numpy()
loss = loss*torch.cuda.FloatTensor(mdlParams['loss_fac_per_example'][indices].astype(np.float32))
loss = torch.mean(loss)
#loss = loss.cuda()
# backward + optimize only if in training phase
loss.backward()
modelVars['optimizer'].step()
#print("backward",time.time()-t_bwd)
if step % mdlParams['display_step'] == 0 or step == 1:
# Calculate evaluation metrics
if mdlParams['classification']:
# Adjust model state
modelVars['model'].eval()
# Get metrics
loss, accuracy, sensitivity, specificity, conf_matrix, f1, auc, waccuracy, predictions, targets, _ = utils.getErrClassification_mgpu(mdlParams, eval_set, modelVars)
# Save in mat
save_dict['loss'].append(loss)
save_dict['acc'].append(accuracy)
save_dict['wacc'].append(waccuracy)
save_dict['auc'].append(auc)
save_dict['sens'].append(sensitivity)
save_dict['spec'].append(specificity)
save_dict['f1'].append(f1)
save_dict['step_num'].append(step)
if os.path.isfile(mdlParams['saveDir'] + '/progression_'+eval_set+'.mat'):
os.remove(mdlParams['saveDir'] + '/progression_'+eval_set+'.mat')
io.savemat(mdlParams['saveDir'] + '/progression_'+eval_set+'.mat',save_dict)
eval_metric = -np.mean(waccuracy)
# Check if we have a new best value
if eval_metric < mdlParams['valBest']:
mdlParams['valBest'] = eval_metric
if mdlParams['classification']:
allData['f1Best'][cv] = f1
allData['sensBest'][cv] = sensitivity
allData['specBest'][cv] = specificity
allData['accBest'][cv] = accuracy
allData['waccBest'][cv] = waccuracy
allData['aucBest'][cv] = auc
oldBestInd = mdlParams['lastBestInd']
mdlParams['lastBestInd'] = step
allData['convergeTime'][cv] = step
# Save best predictions
allData['bestPred'][cv] = predictions
allData['targets'][cv] = targets
# Write to File
with open(mdlParams['saveDirBase'] + '/CV.pkl', 'wb') as f:
pickle.dump(allData, f, pickle.HIGHEST_PROTOCOL)
# Delte previously best model
if os.path.isfile(mdlParams['saveDir'] + '/checkpoint_best-' + str(oldBestInd) + '.pt'):
os.remove(mdlParams['saveDir'] + '/checkpoint_best-' + str(oldBestInd) + '.pt')
# Save currently best model
state = {'epoch': step, 'valBest': mdlParams['valBest'], 'lastBestInd': mdlParams['lastBestInd'], 'state_dict': modelVars['model'].state_dict(),'optimizer': modelVars['optimizer'].state_dict()}
torch.save(state, mdlParams['saveDir'] + '/checkpoint_best-' + str(step) + '.pt')
# If its not better, just save it delete the last checkpoint if it is not current best one
# Save current model
state = {'epoch': step, 'valBest': mdlParams['valBest'], 'lastBestInd': mdlParams['lastBestInd'], 'state_dict': modelVars['model'].state_dict(),'optimizer': modelVars['optimizer'].state_dict()}
torch.save(state, mdlParams['saveDir'] + '/checkpoint-' + str(step) + '.pt')
# Delete last one
if step == mdlParams['display_step']:
lastInd = 1
else:
lastInd = step-mdlParams['display_step']
if os.path.isfile(mdlParams['saveDir'] + '/checkpoint-' + str(lastInd) + '.pt'):
os.remove(mdlParams['saveDir'] + '/checkpoint-' + str(lastInd) + '.pt')
# Duration so far
duration = time.time() - start_time
# Print
if mdlParams['classification']:
print("\n")
print("Config:",sys.argv[2])
print('Fold: %d Epoch: %d/%d (%d h %d m %d s)' % (cv,step,mdlParams['training_steps'], int(duration/3600), int(np.mod(duration,3600)/60), int(np.mod(
|
np.mod(duration,3600)
|
numpy.mod
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.