prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
# -*- coding: utf-8 -*-
"""Minimum_External_Constraints.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1soDz7aUjticLOAa_JMQxQ3GNZ22AWNjI
# Definition of the network properties.
"""
#Import modules.
import numpy as np
import sympy as sp
import sys
from sympy import symbols, diff
from numpy.linalg import multi_dot
import math
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse
n = 16 #number of measuremens.
v = 5 #number of network nodes.
d = 3 #imperfections : position and orientation = 2+1 =3
m = 2*v-d #number of unknown parameters.
r = n - m #degrees of freedom.
#Set numpy's print options.
np.set_printoptions(suppress=True,threshold=np.inf,linewidth=300,precision=15)
print("The design matrix A has dimensions of {}x{} elements".format(n,m))
print("The weight matrix P has dimensions of {}x{} elements".format(n,n))
#We assume that:
# 0 = Α
# 1 = B = constant and a_12 = a_BC = constant
# 2 = C
# 3 = D
# 4 = E
#Matrices of measurements and standard errors.
l = np.array([64.1902,50.8882,32.4675,60.7616,26.2679,43.1958,92.6467,29.5762,106.2276,116.6508,112.9705,64.1595,490.249,220.725,791.552,659.535]) #array which includes all measurements. (angles+lengths)
noa = 12 #number of angles
sigma_1 = np.array([25]*noa) #the standard error is in cc!
sigma_2 = np.array([0.012]*(l.shape[0]-noa)) #the standard error is in meters!
sigma = np.concatenate((sigma_1,sigma_2))
#Temporary coordinates of network nodes.
x = np.array([1586.537,2075.094,2222.679,1449.130,1688.320])
y = np.array([937.235,896.541,354.801,522.664,741.395])
#Temporary distance S12.
S12 = np.sqrt((x[2]-x[1])**2+(y[2]-y[1])**2)
#Matrix of unknown parameters Χ : xA,yΑ,xA,yΑ,xA,yΑ,SBC
X=np.array([1586.537, 937.235, 1449.13 , 522.664, 1688.320, 741.395, S12])
X
#Create the necessary variables.
b_jik, a_ik, a_ij = symbols("b_jik, a_ik, a_ij ")
y_i,y_j,y_k = symbols("y_i,y_j,y_k")
x_i,x_j,x_k = symbols("x_i,x_j,x_k")
S_ij,S_ik = symbols("S_ij,S_ik")
dx_i,dx_j,dx_k = symbols("dx_i,dx_j,dx_k")
dy_i,dy_j,dy_k = symbols("dy_i,dy_j,dy_k")
#Auxiliary indices.
jj = np.array([2,5,1,5,4,5,3,5,1,4,3,2,2,5,3,3])-1
ii = np.array([1,1,4,4,3,3,2,2,5,5,5,5,1,1,4,5])-1
kk = np.array([5,4,5,3,5,2,5,1,2,1,4,3])-1
#Linearized angle equations.
angle_eq = (((y_j-y_i)/S_ij**2 - (y_k-y_i)/S_ik**2)*dx_i + ((x_k-x_i)/S_ik**2 - (x_j-x_i)/S_ij**2)*dy_i
- dx_j*(y_j-y_i)/S_ij**2 + dy_j*(x_j-x_i)/S_ij**2 + dx_k*(y_k-y_i)/S_ik**2 - dy_k*(x_k-x_i)/S_ik**2
)
angle_eq
#Linearized distance equations.
dist_eq = -(x_j-x_i)/S_ij*dx_i-dy_i*(y_j-y_i)/S_ij+(x_j-x_i)/S_ij*dx_j+dy_j*(y_j-y_i)/S_ij
dist_eq
def finda(dx,dy,a):
'''This function calculates the true value of an azimuth angle'''
if dx>0:
if dy>0:return 200*a/math.pi
if dy==0:return 100
if dy<0:return 200+200*a/math.pi
if dx<0:
if dy>0:return 400+200*a/math.pi
if dy==0:return 300
if dy<0:return 200+200*a/math.pi
if dx==0:
if dy>0:return 0
if dy==0:return print("Division by 0!")
if dy<0:return 200
"""# Creation of design matrix Α."""
#Create auxiliary vector.
dx_0,dy_0,dx_1,dy_1,dx_2,dy_2,dx_3,dy_3,dx_4,dy_4,dS_12 = symbols('dx_0,dy_0,dx_1,dy_1,dx_2,dy_2,dx_3,dy_3,dx_4,dy_4,dS_12')
DX = [dx_0,dy_0,
dx_3,dy_3,
dx_4,dy_4,
dS_12]
DX
#Construction of the 16 observation equations.
eqs = np.empty(shape=(n,), dtype=object)
for i in range(eqs.shape[0]):
Sij = np.sqrt((x[jj[i]]-x[ii[i]])**2+(y[jj[i]]-y[ii[i]])**2)
if i<=noa-1:
Sik = np.sqrt((x[kk[i]]-x[ii[i]])**2+(y[kk[i]]-y[ii[i]])**2)
eqs[i]=angle_eq.subs([(x_i,x[ii[i]]),(x_j,x[jj[i]]),(x_k,x[kk[i]]),(y_i,y[ii[i]]),(y_j,y[jj[i]]),(y_k,y[kk[i]]),(S_ij,Sij),(S_ik,Sik),
(dx_i,symbols('dx_{}'.format(ii[i]))),
(dx_j,symbols('dx_{}'.format(jj[i]))),
(dx_k,symbols('dx_{}'.format(kk[i]))),
(dy_i,symbols('dy_{}'.format(ii[i]))),
(dy_j,symbols('dy_{}'.format(jj[i]))),
(dy_k,symbols('dy_{}'.format(kk[i])))
])*636620
else:
eqs[i] = dist_eq.subs([(x_i,x[ii[i]]),(x_j,x[jj[i]]),(y_i,y[ii[i]]),(y_j,y[jj[i]]),(S_ij,Sij),
(dx_i,symbols('dx_{}'.format(ii[i]))),
(dx_j,symbols('dx_{}'.format(jj[i]))),
(dy_i,symbols('dy_{}'.format(ii[i]))),
(dy_j,symbols('dy_{}'.format(jj[i]))),
])
#Calculate the true value of the azimuth angle a_BC = a_12.
a = math.atan((x[2]-x[1])/(y[2]-y[1]))
a_12 = finda(x[2]-x[1],y[2]-y[1],a)
#Replace the variables with their true values.
for i,eq in enumerate(eqs):
eqs[i]=eq.subs([(dx_1,x[1]),(dy_1,x[2])])
eqs[i]=eq.subs([(dx_2,dS_12*math.sin(a_12*math.pi/200)),(dy_2,dS_12*math.cos(a_12*math.pi/200))])
#Differentiation of each equation with each one of the unknown parameters and creation of design matrix Α.
A = np.zeros(shape=(n,m))
for i in range(0,n):
for j in range(0,m):
A[i,j] = diff(eqs[i],DX[j])
print('Array A is:\n')
print(A)
"""# Creation of matrix of the calculated values. """
#Creation of matrix of the calculated values.
delta_l = np.zeros(shape=(n,))
for i in range(n):
if i<=noa-1: #If its a angle equation.
div1 = (x_k-x_i)/(y_k-y_i)
div2 = (x_j-x_i)/(y_j-y_i)
d1=float(div1.subs([(x_i,x[ii[i]]),(x_j,x[jj[i]]),(x_k,x[kk[i]]),(y_i,y[ii[i]]),(y_j,y[jj[i]]),(y_k,y[kk[i]])]))
d2=float(div2.subs([(x_i,x[ii[i]]),(x_j,x[jj[i]]),(y_i,y[ii[i]]),(y_j,y[jj[i]]),(y_k,y[kk[i]])]))
aik_ = np.arctan(d1)
aij_ = np.arctan(d2)
#Azimuth angle calculation.
deltaX = x_k-x_i
deltaY = y_k-y_i
deltaX = float(deltaX.subs([(x_i,x[ii[i]]),(x_j,x[jj[i]]),(x_k,x[kk[i]]),(y_i,y[ii[i]]),(y_j,y[jj[i]]),(y_k,y[kk[i]])]))
deltaY = float(deltaY.subs([(x_i,x[ii[i]]),(x_j,x[jj[i]]),(x_k,x[kk[i]]),(y_i,y[ii[i]]),(y_j,y[jj[i]]),(y_k,y[kk[i]])]))
aik=finda(deltaX,deltaY,aik_)
deltaX = x_j-x_i
deltaY = y_j-y_i
deltaX = float(deltaX.subs([(x_i,x[ii[i]]),(x_j,x[jj[i]]),(x_k,x[kk[i]]),(y_i,y[ii[i]]),(y_j,y[jj[i]]),(y_k,y[kk[i]])]))
deltaY = float(deltaY.subs([(x_i,x[ii[i]]),(x_j,x[jj[i]]),(x_k,x[kk[i]]),(y_i,y[ii[i]]),(y_j,y[jj[i]]),(y_k,y[kk[i]])]))
aij=finda(deltaX,deltaY,aij_)
delta_l[i]=(l[i]-aik+aij) #in cc
while delta_l[i]>399: delta_l[i]-=400
delta_l[i]=delta_l[i]*10000 #convertion from grad to cc.
else: #If its a distance equation.
Sij = np.sqrt((x[jj[i]]-x[ii[i]])**2+(y[jj[i]]-y[ii[i]])**2) #distance calculation.
delta_l[i]=l[i]-Sij #in meters
delta_l
"""
# Creation of the weight matrix P."""
#Define the a-priori standard error.
sigma_0 = 1
I = np.identity(n)
P = I*(sigma_0/sigma)**2
print('Array P is:\n')
print(P)
"""#System solution."""
#Calculate the new array with the adjusted coordinate values of network nodes A,D,E and distance SBC.
delta_x=np.dot(np.linalg.inv(multi_dot([A.T,P,A])),multi_dot([A.T,P,delta_l]))
X_hat = X+delta_x
X_hat
#Points Β=1 και C=2.
x_1 = x[1]
y_1 = y[1]
#Calculate the final coordinates of the node C=2.
#With:
# x_2 = X_hat[-1]*math.sin(a_12*math.pi/200) + x[1]
# y_2 = X_hat[-1]*math.cos(a_12*math.pi/200) + y[1]
#Or:
x_2 = delta_x[-1]*math.sin(a_12*math.pi/200) + x[2]
y_2 = delta_x[-1]*math.cos(a_12*math.pi/200) + y[2]
#Create a new array with the adjusted coordinate values of all network nodes.
X_hat_extended = np.array([X_hat[0], X_hat[1], x_1,y_1,x_2,y_2, X_hat[2], X_hat[3], X_hat[4], X_hat[5]])
X_hat_extended
"""#Calculation of the a-priori variance-covariance matrix.
"""
#Define the a-priori standard error.
sigma_0 = 1
#Calculate the a-priori variance-covariance matrix.
V_x_hat = (sigma_0**2)*np.linalg.inv(multi_dot([A.T,P,A]))
V_x_hat
"""#Computation of the a posteriori standard error."""
#Calculation of the a-posteriori standard error.
u = np.dot(A,delta_x)-delta_l
sigma_0_hat = np.sqrt(multi_dot([u.T,P,u])/(n-m))
sigma_0_hat
#Create the necessary auxiliary variables and vectors.
x_0,y_0,x_1,y_1,x_2,y_2,x_3,y_3,x_4,y_4,S_12 = symbols('x_0,y_0,x_1,y_1,x_2,y_2,x_3,y_3,x_4,y_4,S_12')
DX_1 = np.array([x_0,y_0,x_3,y_3,x_4,y_4,S_12*math.sin(a_12*math.pi/200),S_12*math.cos(a_12*math.pi/200),x_1,y_1])
DX_2 = np.array([x_0,y_0,x_3,y_3,x_4,y_4,S_12])
#Calculation of the necessary Jacobian matrix for the propagation of uncertainty between the two vectors DX_1, DX_2.
J = np.zeros(shape=(2*v,V_x_hat.shape[0]))
for i in range(0,2*v):
for j in range(V_x_hat.shape[0]):
J[i,j] = diff(DX_1[i],DX_2[j])
print('Array J is:\n')
print(J)
#Calculation of the a-priori variance-covariance matrix of the coordinates of all network nodes.
V_x_hat_extended = multi_dot([J,V_x_hat,J.T])
V_x_hat_extended_df = pd.DataFrame(V_x_hat_extended,index=["0","00","3","33","4","44","2","22","1","11"], columns=["0","00","3","33","4","44","2","22","1","11"])
V_x_hat_extended_df=V_x_hat_extended_df.sort_index()
V_x_hat_extended_df=V_x_hat_extended_df.reindex(sorted(V_x_hat_extended_df.columns), axis=1)
V_x_hat_extended = np.array(V_x_hat_extended_df)
V_x_hat_extended
"""#Graphical representation of the error ellipses."""
#Final coordinates of network nodes.
x_hat = np.zeros(shape=(v,))
y_hat = np.zeros(shape=(v,))
j=0
for i in range(0,v):
x_hat[i] = X_hat_extended[j]
y_hat[i] = X_hat_extended[j+1]
if j%2==0:j+=2
#Network edges.
lines_x = np.concatenate((x_hat,np.array([x_hat[0],x_hat[4],x_hat[1],x_hat[4],x_hat[2],x_hat[4],x_hat[3],x_hat[4],x_hat[0],x_hat[3]])))
lines_y = np.concatenate((y_hat,np.array([y_hat[0],y_hat[4],y_hat[1],y_hat[4],y_hat[2],y_hat[4],y_hat[3],y_hat[4],y_hat[0],y_hat[3]])))
X_hat_extended
def auxfunc(V_xy):
'''This function takes as argument the variance-covariance submatrix of the corresponding
network node or edge, and as output it returns the absolute or relative ellipse properties.'''
width, height, angle =0,0,0
if (V_xy[0,1]+V_xy[0,0]-V_xy[1,1])!=0:
#Define equations for the calculation of the semi-major axis, the semi-minor axis and the orientation of the error ellipse.
sigma_x_sq,sigma_y_sq,sigma_xy = symbols('sigma_x_sq,sigma_y_sq,sigma_xy')
sigma_max_squared = ((sigma_x_sq+sigma_y_sq)+((sigma_x_sq-sigma_y_sq)**2+4*sigma_xy**2)**0.5)/2
sigma_min_squared = ((sigma_x_sq+sigma_y_sq)-((sigma_x_sq-sigma_y_sq)**2+4*sigma_xy**2)**0.5)/2
tan_2theta = 2*sigma_xy/(sigma_x_sq-sigma_y_sq)
#Calculate the length of the semi-major and the semi-minor ellipse axes.
sigma_max_squared = sigma_max_squared.subs([(sigma_x_sq,V_xy[0,0]),(sigma_y_sq,V_xy[1,1]),(sigma_xy,V_xy[0,1])])
sigma_min_squared = sigma_min_squared.subs([(sigma_x_sq,V_xy[0,0]),(sigma_y_sq,V_xy[1,1]),(sigma_xy,V_xy[0,1])])
sigma_u=sigma_max_squared**0.5
sigma_v=sigma_min_squared**0.5
width = 2*sigma_u
height = 2*sigma_v
#Calculate the orientation of the error ellipse.
tan_2theta = tan_2theta.subs([(sigma_x_sq,V_xy[0,0]),(sigma_y_sq,V_xy[1,1]),(sigma_xy,V_xy[0,1])])
theta = math.atan(tan_2theta)*180/(2*math.pi)
#Extract the variances and the covariances from the input matrix.
sigma_x=V_xy[0,0]**0.5
sigma_y=V_xy[1,1]**0.5
sigma_xy=V_xy[0,1]
#Angle investigation.
if sigma_x>sigma_y:
if sigma_xy>0:angle=theta
if sigma_xy<0:angle=theta+180
if sigma_x<sigma_y:
if sigma_xy>0:angle=theta+90
if sigma_xy<0:angle=theta+90
if sigma_x==sigma_y:
if sigma_xy>0:angle=45
if sigma_xy<0:angle=135
return width, height, angle
def ellipse_args(netnodecode1, netnodecode2=9999, V_x_hat=V_x_hat_extended):
'''This function takes as arguments the specific network node for the calculation of the absolute error ellipse properties, or
the two network nodes of the corresponding network edge in wich we want to calculate the relative error ellipse arguments.
Aditionally this function takes as argument the variance-covariance matrix of the coordinates of all network nodes. As output,
it extracts the absolute or relative ellipse properties.'''
netnodecode=netnodecode1
if netnodecode < 5:
if netnodecode2 == 9999: #If we want to calculate the absolute error ellipse.
#Extract the variance-covariance submatrix of the given network node.
V_xy = V_x_hat[2*netnodecode:2*netnodecode+2,2*netnodecode:2*netnodecode+2]
width, height, angle = auxfunc(V_xy)
return width, height, angle
elif netnodecode2 < 5: #If we want to calculate the relative error ellipse.
Jrij = np.array([[-1,0,1,0],[0,-1,0,1]])
Vrig = np.ones(shape=(4,4))
#Extract the variance-covariance submatrix of the given network edge.
V_xy1 = V_x_hat[2*netnodecode1:2*netnodecode1+2,2*netnodecode1:2*netnodecode1+2]
V_xy2 = V_x_hat[2*netnodecode2:2*netnodecode2+2,2*netnodecode2:2*netnodecode2+2]
V1 = V_x_hat[2*netnodecode1:2*netnodecode1+2,2*netnodecode2:2*netnodecode2+2]
V2 = V_x_hat[2*netnodecode2:2*netnodecode2+2,2*netnodecode1:2*netnodecode1+2]
Vrig=np.asarray(np.bmat([[V_xy1, V1], [V2, V_xy2]]))
VDrij = multi_dot([Jrij,Vrig,Jrij.T])
width, height, angle = auxfunc(VDrij)
return width, height, angle
else: return print("There is no network node with the given code name!")
#Graphical representation of the error ellipses.
fig=plt.figure(figsize=(15,10))
ax = fig.add_subplot(111, aspect='equal')
#Define plot options.
scalefactor = 1000
ld=1.5
ellcolor = 'red'
ellzorder = 4
pointzorder = 5
#Plot network edges.
plt.plot(lines_x, lines_y, linewidth=0.6, markersize=8, alpha=1)
#Plot network nodes.
plt.scatter(x_hat, y_hat, marker="+", zorder=pointzorder, s=70, c='black')
#Plot nodenames.
nodenames=["A","B","C","D","E"]
for i, txt in enumerate(nodenames):
ax.annotate(txt, (x_hat[i], y_hat[i]), xytext=(x_hat[i]+5,y_hat[i]+5), zorder=pointzorder)
#Absolute error ellipses of all network nodes.
width, height, angle = ellipse_args(0)
ax.add_artist(Ellipse(xy=X_hat_extended[:2], width=width*scalefactor, height=height*scalefactor, angle=angle, fill=None, alpha=1, linewidth=ld, color=ellcolor, zorder=ellzorder))
width, height, angle = ellipse_args(1)
ax.add_artist(Ellipse(xy=X_hat_extended[2:4], width=width*scalefactor, height=height*scalefactor, angle=angle, fill=None, alpha=1, linewidth=ld, color=ellcolor, zorder=ellzorder))
width, height, angle = ellipse_args(2)
ax.add_artist(Ellipse(xy=X_hat_extended[4:6], width=width*scalefactor, height=height*scalefactor, angle=angle, fill=None, alpha=1, linewidth=ld, color=ellcolor, zorder=ellzorder))
width, height, angle = ellipse_args(3)
ax.add_artist(Ellipse(xy=X_hat_extended[6:8], width=width*scalefactor, height=height*scalefactor, angle=angle, fill=None, alpha=1, linewidth=ld, color=ellcolor, zorder=ellzorder))
width, height, angle = ellipse_args(4)
ax.add_artist(Ellipse(xy=X_hat_extended[8:], width=width*scalefactor, height=height*scalefactor, angle=angle, fill=None, alpha=1, linewidth=ld, color=ellcolor, zorder=ellzorder))
#Relative error ellipses between each station pair.
#Edge AC.
width, height, angle = ellipse_args(0,2)
xy=(X_hat_extended[:2]+X_hat_extended[4:6])/2
xx = np.array([x_hat[0],x_hat[2]])
yy = np.array([y_hat[0],y_hat[2]])
plt.plot(xx, yy, "k--", linewidth=2, markersize=4)
plt.plot((x_hat[0]+x_hat[2])/2,(y_hat[0]+y_hat[2])/2, marker=".", color='red', mec="black", markersize=12, zorder=pointzorder)
ax.add_artist(Ellipse(xy=xy, width=width*scalefactor, height=height*scalefactor, angle=angle, fill=None, alpha=1, linewidth=ld, color=ellcolor, zorder=ellzorder))
#Edge AB.
width, height, angle = ellipse_args(0,1)
xy=(X_hat_extended[:2]+X_hat_extended[2:4])/2
xx = np.array([x_hat[0],x_hat[1]])
yy = np.array([y_hat[0],y_hat[1]])
plt.plot(xx, yy, "k--", linewidth=2, markersize=4)
plt.plot((x_hat[1]+x_hat[0])/2,(y_hat[1]+y_hat[0])/2, marker=".", color='red', mec="black", markersize=12, zorder=pointzorder)
ax.add_artist(Ellipse(xy=xy, width=width*scalefactor, height=height*scalefactor, angle=angle, fill=None, alpha=1, linewidth=ld, color=ellcolor, zorder=ellzorder))
#Edge DB.
width, height, angle = ellipse_args(3,1)
xy=(X_hat_extended[6:8]+X_hat_extended[2:4])/2
xx = np.array([x_hat[3],x_hat[1]])
yy = np.array([y_hat[3],y_hat[1]])
plt.plot(xx, yy, "k--", linewidth=2, markersize=4)
plt.plot((x_hat[3]+x_hat[1])/2,(y_hat[3]+y_hat[1])/2, marker=".", color='red', mec="black", markersize=12, zorder=pointzorder)
ax.add_artist(Ellipse(xy=xy, width=width*scalefactor, height=height*scalefactor, angle=angle, fill=None, alpha=1, linewidth=ld, color=ellcolor, zorder=ellzorder))
#Edge EC.
width, height, angle = ellipse_args(4,2)
xy=(X_hat_extended[8:]+X_hat_extended[4:6])/2
xx = | np.array([x_hat[4],x_hat[2]]) | numpy.array |
#! /usr/bin/env python
"""
Forward model matched filter relying on either KLIP (Soummer et al. 2012;
Pueyo 2016) and LOCI (Lafreniere et al. 2007b) for the PSF reference
approximation. The original concept of matched filter applied to KLIP has been
first proposed in Ruffio et al. (2019) and then adapted in Dahlqvist et al.
(2021) to use the LOCI framework. For both PSF-subtraction techniques, a
forward model of the PSF is computed for each pixel contained in the field of
view and each frame to account for the over-subtraction and self-subtraction
of potential planetary signal due to the reference PSF subtraction. The
obtained model is then compared to the pixels intensities within each frame of
the residual cube. The SNR associated to each pixel contained in the field of
view, as well as its estimated contrast is thn obtained via a
Gaussian maximum likelihood approach.
"""
__author__ = '<NAME>'
__all__ = ['fmmf']
import numpy as np
import numpy.linalg as la
from skimage.draw import disk
from ..var import get_annulus_segments, frame_center
from ..preproc import frame_crop,cube_crop_frames,cube_derotate
from ..config.utils_conf import pool_map, iterable
from ..fm import cube_inject_companions
from ..preproc.derotation import _find_indices_adi
def fmmf(cube, pa, psf, fwhm, min_r=None,max_r=None, model='KLIP',var='FR',
param={'ncomp': 20, 'tolerance': 5e-3, 'delta_rot':0.5}, crop=5,
imlib='opencv', interpolation='lanczos4',ncore=1,verbose=True):
"""
Forward model matched filter generating SNR map and contrast map, using
either KLIP or LOCI as PSF subtraction techniques.
Parameters
----------
cube : numpy ndarray, 3d
Input cube (ADI sequences), Dim 1 = temporal axis, Dim 2-3 =
spatial axis
pa : numpy ndarray, 1d
Parallactic angles for each frame of the ADI sequences.
psf : numpy ndarray 2d
2d array with the normalized PSF template, with an odd shape.
The PSF image must be centered wrt to the array! Therefore, it is
recommended to run the function ``normalize_psf`` to generate a
centered and flux-normalized PSF template.
fwhm: int
Full width at half maximum for the instrument PSF
min_r : int,optional
Center radius of the first annulus considered in the FMMF detection
map estimation. The radius should be larger than half
the value of the 'crop' parameter . Default is None which
corresponds to one FWHM.
max_r : int
Center radius of the last annulus considered in the FMMF detection
map estimation. The radius should be smaller or equal to half the
size of the image minus half the value of the 'crop' parameter.
Default is None which corresponds to half the size of the image
minus half the value of the 'crop' parameter.
model: string, optional
Selected PSF-subtraction technique for the computation of the FMMF
detection map. FMMF work either with KLIP or LOCI. Default is 'KLIP'.
var: str, optional
Model used for the residual noise variance estimation used in the
matched filtering (maximum likelihood estimation of the flux and SNR).
Three different approaches are proposed: 'FR', 'FM', and 'TE'.
'FR': consider the pixels in the selected annulus with a width equal
to asize but separately for every frame.
'FM': consider the pixels in the selected annulus with a width
equal to asize but separately for every frame. Apply a mask one FWHM
on the selected pixel and its surrounding.
'TE': rely on the method developped in PACO to estimate the
residual noise variance (take the pixels in a region of one FWHM
arround the selected pixel, considering every frame in the
derotated cube of residuals except for the selected frame)
param: dict, optional
Dictionnary regrouping the parameters used by the KLIP (ncomp and
delta_rot) or LOCI (tolerance and delta_rot) PSF-subtraction
technique.
ncomp : int, optional
Number of components used for the low-rank approximation of the
speckle field. Default is 20.
tolerance: float, optional
Tolerance level for the approximation of the speckle field via
a linear combination of the reference images in the LOCI
algorithm. Default is 5e-3.
delta_rot : float, optional
Factor for tunning the parallactic angle threshold, expressed
in FWHM. Default is 0.5 (excludes 0.5xFHWM on each side of the
considered frame).
crop: int, optional
Part of the PSF tempalte considered is the estimation of the FMMF
detection map. Default is 5.
imlib : str, optional
Parameter used for the derotation of the residual cube. See the
documentation of the ``vip_hci.preproc.frame_rotate`` function.
interpolation : str, optional
Parameter used for the derotation of the residual cube. See the
documentation of the ``vip_hci.preproc.frame_rotate`` function.
ncore : int, optional
Number of processes for parallel computing. By default ('ncore=1')
the algorithm works in single-process mode.
verbose: bool, optional
If True provide a message each time an annulus has been treated.
Default True.
Returns
-------
flux_matrix : 2d ndarray
Maximum likelihood estimate of the contrast for each pixel in the field
of view
snr_matrix : 2d ndarray
Signal to noise ratio map (defined as the estimated contrast divided by
the estimated standard deviation of the contrast).
"""
if crop>=2*round(fwhm)+1:
raise ValueError("Maximum cropsize should be lower or equal to two"+
" FWHM,please change accordingly the value of 'crop'")
if min_r is None:
min_r=int(round(fwhm))
if max_r is None:
max_r= cube.shape[-1]//2-(crop//2+1)
res_full = pool_map(ncore, snr_contrast_esti, iterable(range(min_r,max_r)),
cube, pa, psf, fwhm, model,var,param, crop, imlib,
interpolation,verbose)
flux_matrix=np.zeros((cube.shape[1],cube.shape[2]))
snr_matrix=np.zeros((cube.shape[1],cube.shape[2]))
for res_temp in res_full:
indices=get_annulus_segments(cube[0], res_temp[2],1)
flux_matrix[indices[0][0],indices[0][1]]=res_temp[0]
snr_matrix[indices[0][0],indices[0][1]]=res_temp[1]
return flux_matrix,snr_matrix
def var_esti(mcube,pa,var,crop,ann_center):
"""
Computation of the residual noise variance
"""
n,y,x=mcube.shape
if var=='FR':
var_f=np.zeros(n)
indices = get_annulus_segments(mcube[0], ann_center-int(crop/2),crop,1)
poscentx=indices[0][1]
poscenty=indices[0][0]
for a in range(n):
var_f[a]=np.var(mcube[a,poscenty,poscentx])
elif var=='FM' :
indices=get_annulus_segments(mcube[0], ann_center,1,1)
indicesy=indices[0][0]
indicesx=indices[0][1]
var_f=np.zeros((len(indicesy),n))
indices = get_annulus_segments(mcube[0], ann_center-int(crop/2),crop,1)
for a in range(len(indicesy)):
indc=disk((indicesy[a], indicesx[a]),3)
positionx=[]
positiony=[]
for k in range(0,len(indices[0][1])):
cond1=set(np.where(indices[0][1][k]==indc[1])[0])
cond2=set(np.where(indices[0][0][k]==indc[0])[0])
if len(cond1 & cond2)==0:
positionx.append(indices[0][1][k])
positiony.append(indices[0][0][k])
for b in range((n)):
var_f[a,b]=np.var(mcube[b,positiony,positionx])
elif var=='TE' :
indices=get_annulus_segments(mcube[0], ann_center,1,1)
indicesy=indices[0][0]
indicesx=indices[0][1]
var_f=np.zeros((len(indicesy),n))
mcube_derot=cube_derotate(mcube,-pa)
for a in range(0,len(indicesy)):
radist=np.sqrt((indicesx[a]-int(x/2))**2+(indicesy[a]-int(y/2))**2)
if (indicesy[a]-int(y/2))>=0:
ang_s= np.arccos((indicesx[a]-int(x/2))/radist)/np.pi*180
else:
ang_s= 360-np.arccos((indicesx[a]-int(x/2))/radist)/np.pi*180
for b in range(n):
twopi=2*np.pi
sigposy=int(y/2 + np.sin((ang_s-pa[b])/360*twopi)*radist)
sigposx=int(x/2+ np.cos((ang_s-pa[b])/360*twopi)*radist)
y0 = int(sigposy - int(crop/2))
y1 = int(sigposy + int(crop/2)+1) # +1 cause endpoint is
#excluded when slicing
x0 = int(sigposx - int(crop/2))
x1 = int(sigposx + int(crop/2)+1)
mask = np.ones(mcube_derot.shape[0],dtype=bool)
mask[b]=False
mcube_sel=mcube_derot[mask,y0:y1,x0:x1]
var_f[a,b]=np.var(np.asarray(mcube_sel))
return var_f
def snr_contrast_esti(ann_center,cube, pa, psf, fwhm, model,var,param, crop
, imlib, interpolation,verbose):
"""
Computation of the SNR and contrast associated to the pixels contained
in a given annulus via the foward model matched filter
"""
n,y,x=cube.shape
evals_matrix=[]
evecs_matrix=[]
KL_basis_matrix=[]
refs_mean_sub_matrix=[]
sci_mean_sub_matrix=[]
resicube_klip=None
ind_ref_list=None
coef_list=None
ncomp=param['ncomp']
tolerance=param['tolerance']
delta_rot=param['delta_rot']
# Computation of the reference PSF, and the matrices
# required for the computation of the PSF forward models
pa_threshold = np.rad2deg(2 * np.arctan(delta_rot * fwhm / (2 * (ann_center))))
mid_range = np.abs(np.amax(pa) - np.amin(pa)) / 2
if pa_threshold >= mid_range - mid_range * 0.1:
pa_threshold = float(mid_range - mid_range * 0.1)
if model=='KLIP':
resicube_klip=np.zeros_like(cube)
indices = get_annulus_segments(cube[0],
ann_center-int(round(fwhm)/2),int(round(fwhm)),1)
for k in range(0,cube.shape[0]):
res_temp=KLIP_patch(k,cube[:, indices[0][0], indices[0][1]],
ncomp,pa, int(round(fwhm)), pa_threshold, ann_center)
evals_temp=res_temp[0]
evecs_temp=res_temp[1]
KL_basis_temp=res_temp[2]
sub_img_rows_temp=res_temp[3]
refs_mean_sub_temp=res_temp[4]
sci_mean_sub_temp=res_temp[5]
resicube_klip[k,indices[0][0], indices[0][1]] = sub_img_rows_temp
evals_matrix.append(evals_temp)
evecs_matrix.append(evecs_temp)
KL_basis_matrix.append(KL_basis_temp)
refs_mean_sub_matrix.append(refs_mean_sub_temp)
sci_mean_sub_matrix.append(sci_mean_sub_temp)
mcube=cube_derotate(resicube_klip,pa,imlib=imlib,
interpolation=interpolation)
elif model=='LOCI':
resicube, ind_ref_list,coef_list=LOCI_FM(cube, psf, ann_center, pa,
int(round(fwhm)), fwhm, tolerance,delta_rot,pa_threshold)
mcube=cube_derotate(resicube,pa,imlib=imlib,
interpolation=interpolation)
ceny, cenx = frame_center(cube[0])
indices=get_annulus_segments(mcube[0], ann_center,1,1)
indicesy=indices[0][0]
indicesx=indices[0][1]
flux_esti=np.zeros_like(indicesy)
prob_esti=np.zeros_like(indicesy)
var_f=var_esti(mcube,pa,var,crop,ann_center)
for i in range(0,len(indicesy)):
psfm_temp=None
poscenty=indicesy[i]
poscentx=indicesx[i]
indices = get_annulus_segments(cube[0],
ann_center-int(round(fwhm)/2),int(round(fwhm)),1)
an_dist = np.sqrt((poscenty-ceny)**2 + (poscentx-cenx)**2)
theta = np.degrees(np.arctan2(poscenty-ceny, poscentx-cenx))
model_matrix=cube_inject_companions(np.zeros_like(cube), psf, pa,
flevel=1, plsc=0.1,
rad_dists=an_dist, theta=theta, n_branches=1,verbose=False)
#PSF forward model computation for KLIP
if model=='KLIP':
psf_map=np.zeros_like(model_matrix)
for b in range(0,n):
psf_map_temp = perturb(b,
model_matrix[:, indices[0][0], indices[0][1]],
ncomp,evals_matrix, evecs_matrix,KL_basis_matrix,
sci_mean_sub_matrix,refs_mean_sub_matrix, pa, fwhm,
pa_threshold, ann_center)
psf_map[b,indices[0][0], indices[0][1]]=psf_map_temp
psf_map[b,indices[0][0], indices[0][1]]-=np.mean(psf_map_temp)
psf_map_der = cube_derotate(psf_map, pa, imlib=imlib,
interpolation=interpolation)
psfm_temp=cube_crop_frames(psf_map_der,int(2*round(fwhm)+1),
xy=(poscentx,poscenty),verbose=False)
#PSF forward model computation for LOCI
if model=='LOCI':
values_fc = model_matrix[:, indices[0][0], indices[0][1]]
cube_res_fc=np.zeros_like(model_matrix)
matrix_res_fc = np.zeros((values_fc.shape[0],
indices[0][0].shape[0]))
for e in range(values_fc.shape[0]):
recon_fc = np.dot(coef_list[e], values_fc[ind_ref_list[e]])
matrix_res_fc[e] = values_fc[e] - recon_fc
cube_res_fc[:, indices[0][0], indices[0][1]] = matrix_res_fc
cube_der_fc = cube_derotate(cube_res_fc-np.mean(cube_res_fc),
pa, imlib=imlib, interpolation=interpolation)
psfm_temp=cube_crop_frames(cube_der_fc,int(2*round(fwhm)+1),
xy=(poscentx,poscenty),verbose=False)
num=[]
denom=[]
# Matched Filter
for j in range(n):
if var=='FR':
svar=var_f[j]
elif var=='FM' :
svar=var_f[i,j]
elif var=='TE':
svar=var_f[i,j]
if psfm_temp.shape[1]==crop:
psfm=psfm_temp[j]
else:
psfm=frame_crop(psfm_temp[j],
crop,cenxy=[int(psfm_temp.shape[-1]/2),
int(psfm_temp.shape[-1]/2)],verbose=False)
num.append(np.multiply(frame_crop(mcube[j],crop,
cenxy=[poscentx,poscenty],verbose=False),psfm).sum()/svar)
denom.append(np.multiply(psfm,psfm).sum()/svar)
flux_esti[i]=sum(num)/np.sqrt(sum(denom))
prob_esti[i]=sum(num)/sum(denom)
if verbose==True:
print("Radial distance "+"{}".format(ann_center)+" done!")
return prob_esti,flux_esti,ann_center
def perturb(frame,model_matrix,numbasis,evals_matrix, evecs_matrix,
KL_basis_matrix,sci_mean_sub_matrix,refs_mean_sub_matrix,
angle_list, fwhm, pa_threshold, ann_center):
"""
Function allowing the estimation of the PSF forward model when relying on
KLIP for the computation of the speckle field. The code is based on the
PyKLIP library considering only the ADI case with a singlle number of
principal components considered. For more details about the code, consider
the PyKLIP library or the original articles (Pueyo, L. 2016, ApJ, 824, 117
or <NAME>., <NAME>., <NAME>., & Pueyo, L. 2017, ApJ, 842)
"""
#Selection of the reference library based on the given parralactic angle threshold
if pa_threshold != 0:
indices_left = _find_indices_adi(angle_list, frame,
pa_threshold, truncate=False)
models_ref = model_matrix[indices_left]
else:
models_ref = model_matrix
#Computation of the self-subtraction and over-subtraction for the current frame
model_sci = model_matrix[frame]
KL_basis=KL_basis_matrix[frame]
sci_mean_sub=sci_mean_sub_matrix[frame]
refs_mean_sub=refs_mean_sub_matrix[frame]
evals=evals_matrix[frame]
evecs=evecs_matrix[frame]
max_basis = KL_basis.shape[0]
N_pix = KL_basis.shape[1]
models_mean_sub = models_ref - np.nanmean(models_ref, axis=1)[:,None]
models_mean_sub[np.where(np.isnan(models_mean_sub))] = 0
model_sci_mean_sub = model_sci- np.nanmean(model_sci)
model_sci_mean_sub[np.where(np.isnan(model_sci_mean_sub))] = 0
model_sci_mean_sub_rows = np.reshape(model_sci_mean_sub,(1,N_pix))
sci_mean_sub_rows = np.reshape(sci_mean_sub,(1,N_pix))
delta_KL = np.zeros([max_basis, N_pix])
models_mean_sub_X_refs_mean_sub_T = models_mean_sub.dot(refs_mean_sub.transpose())
for k in range(max_basis):
Zk = np.reshape(KL_basis[k,:],(1,KL_basis[k,:].size))
Vk = (evecs[:,k])[:,None]
diagVk_X_models_mean_sub_X_refs_mean_sub_T = (Vk.T).dot(models_mean_sub_X_refs_mean_sub_T)
models_mean_sub_X_refs_mean_sub_T_X_Vk = models_mean_sub_X_refs_mean_sub_T.dot(Vk)
DeltaZk = -(1/(2*np.sqrt(evals[k])))*(diagVk_X_models_mean_sub_X_refs_mean_sub_T.dot(Vk) + ((Vk.T).dot(models_mean_sub_X_refs_mean_sub_T_X_Vk))).dot(Zk)+(Vk.T).dot(models_mean_sub)
for j in range(k):
Zj = KL_basis[j, :][None,:]
Vj = evecs[:, j][:,None]
DeltaZk += np.sqrt(evals[j])/(evals[k]-evals[j])*(diagVk_X_models_mean_sub_X_refs_mean_sub_T.dot(Vj) + ((Vj.T).dot(models_mean_sub_X_refs_mean_sub_T_X_Vk))).dot(Zj)
for j in range(k+1, max_basis):
Zj = KL_basis[j, :][None,:]
Vj = evecs[:, j][:,None]
DeltaZk += np.sqrt(evals[j])/(evals[k]-evals[j])*(diagVk_X_models_mean_sub_X_refs_mean_sub_T.dot(Vj) + ((Vj.T).dot(models_mean_sub_X_refs_mean_sub_T_X_Vk))).dot(Zj)
delta_KL[k] = DeltaZk/np.sqrt(evals[k])
oversubtraction_inner_products = np.dot(model_sci_mean_sub_rows, KL_basis.T)
selfsubtraction_1_inner_products = np.dot(sci_mean_sub_rows, delta_KL.T)
selfsubtraction_2_inner_products = np.dot(sci_mean_sub_rows, KL_basis.T)
oversubtraction_inner_products[max_basis::] = 0
klipped_oversub = np.dot(oversubtraction_inner_products, KL_basis)
selfsubtraction_1_inner_products[0,max_basis::] = 0
selfsubtraction_2_inner_products[0,max_basis::] = 0
klipped_selfsub = np.dot(selfsubtraction_1_inner_products, KL_basis) + \
np.dot(selfsubtraction_2_inner_products, delta_KL)
return model_sci[None,:] - klipped_oversub - klipped_selfsub
def KLIP_patch(frame, matrix, numbasis, angle_list, fwhm, pa_threshold,
ann_center,nframes=None):
"""
Function allowing the computation of the reference PSF via KLIP for a
given sub-region of the original ADI sequence. Code inspired by the
PyKLIP librabry
"""
max_frames_lib=200
if pa_threshold != 0:
if ann_center > fwhm*20:
indices_left = _find_indices_adi(angle_list,frame,pa_threshold,
truncate=True,
max_frames=max_frames_lib)
else:
indices_left = _find_indices_adi(angle_list, frame,pa_threshold,
truncate=False,nframes=nframes)
refs = matrix[indices_left]
else:
refs = matrix
sci = matrix[frame]
sci_mean_sub = sci - np.nanmean(sci)
#sci_mean_sub[np.where(np.isnan(sci_mean_sub))] = 0
refs_mean_sub = refs- np.nanmean(refs, axis=1)[:, None]
#refs_mean_sub[np.where(np.isnan(refs_mean_sub))] = 0
# Covariance matrix definition
covar_psfs = np.cov(refs_mean_sub)
covar_psfs *= (np.size(sci)-1)
tot_basis = covar_psfs.shape[0]
numbasis = np.clip(numbasis - 1, 0, tot_basis-1)
max_basis = np.max(numbasis) + 1
#Computation of the eigenvectors/values of the covariance matrix
evals, evecs = la.eigh(covar_psfs)
evals = np.copy(evals[int(tot_basis-max_basis):int(tot_basis)])
evecs = np.copy(evecs[:,int(tot_basis-max_basis):int(tot_basis)])
evals = np.copy(evals[::-1])
evecs = np.copy(evecs[:,::-1])
# Computation of the principal components
KL_basis = np.dot(refs_mean_sub.T,evecs)
KL_basis = KL_basis * (1. / np.sqrt(evals))[None,:]
KL_basis = KL_basis.T
N_pix = np.size(sci_mean_sub)
sci_rows = np.reshape(sci_mean_sub, (1,N_pix))
inner_products = np.dot(sci_rows, KL_basis.T)
inner_products[0,int(max_basis)::]=0
#Projection of the science image on the selected prinicpal component
#to generate the speckle field model
klip_reconstruction = np.dot(inner_products, KL_basis)
# Subtraction of the speckle field model from the riginal science image
#to obtain the residual frame
sub_img_rows = sci_rows - klip_reconstruction
return evals,evecs,KL_basis,np.reshape(sub_img_rows, (N_pix)),refs_mean_sub,sci_mean_sub
def LOCI_FM(cube, psf, ann_center, angle_list, asize,fwhm, Tol,delta_rot,
pa_threshold):
"""
Computation of the optimal factors weigthing the linear combination of
reference frames used to obtain the modeled speckle field for each frame
and allowing the determination of the forward modeled PSF. Estimation of
the cube of residuals based on the modeled speckle field.
"""
cube_res = np.zeros_like(cube)
ceny, cenx = frame_center(cube[0])
radius_int=ann_center-int(1.5*asize)
if radius_int<=0:
radius_int=1
for ann in range(3):
n_segments_ann = 1
inner_radius_ann = radius_int + ann*asize
indices = get_annulus_segments(cube[0], inner_radius=inner_radius_ann,
width=asize, nsegm=n_segments_ann)
ind_opt = get_annulus_segments(cube[0], inner_radius=inner_radius_ann,
width=asize, nsegm=n_segments_ann,
optim_scale_fact=2)
ayxyx = [inner_radius_ann,pa_threshold, indices[0][0], indices[0][1],
ind_opt[0][0], ind_opt[0][1]]
matrix_res, ind_ref, coef, yy, xx = _leastsq_patch_fm(ayxyx,
angle_list,fwhm,cube, 100,
Tol,psf=psf)
if ann==1:
ind_ref_list=ind_ref
coef_list=coef
cube_res[:, yy, xx] = matrix_res
return cube_res, ind_ref_list,coef_list
def _leastsq_patch_fm(ayxyx, angle_list,fwhm,cube, dist_threshold,
tol,psf=None):
"""
Function allowing th estimation of the optimal factors for the modeled
speckle field estimation via the LOCI framework. The code has been
developped based on the VIP python function _leastsq_patch, but return
additionnaly the set of coefficients used for the speckle field computation.
"""
ann_center,pa_threshold, yy, xx, yy_opti, xx_opti = ayxyx
ind_ref_list=[]
coef_list=[]
yy_opt=[]
xx_opt=[]
for j in range(0,len(yy_opti)):
if not any(x in np.where(yy==yy_opti[j])[0] for x in np.where(xx==xx_opti[j])[0]):
xx_opt.append(xx_opti[j])
yy_opt.append(yy_opti[j])
values = cube[:, yy, xx]
matrix_res = | np.zeros((values.shape[0], yy.shape[0])) | numpy.zeros |
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.cm import get_cmap
from pandas import DataFrame
from scipy.interpolate import griddata
import glob
import os
#Package Imports
from .read import load_mol_abund,\
load_rates, get_reac_str, total_rates,\
load_radfield,\
read_levels,read_trans
from .misc import contour_points, get_contour_arr, remove_nan, sigfig, iterable, nint
from . import __path__ as pkg_path
#Functions for setting and getting global directory path
# where chemical code is located, base_dir
def set_base_dir(direc):
if direc[-1]!='/':
direc = direc+'/'
fpath = pkg_path[0]+'/pkg_files/base_dir.txt'
f = open(fpath,'w')
f.write(direc)
f.close()
def get_base_dir():
fpath = pkg_path[0]+'/pkg_files/base_dir.txt'
try:
f = open(fpath)
direc = f.read()
f.close()
assert os.path.exists(direc)
except (OSError, AssertionError) as e:
direc = pkg_path[0]+'/test_files/'
return direc
#Some constants that get used throughout.
mp = 1.67e-24 #Mass of proton in g
mau = 1.496e11 #Conversion from AU to meters.
class chem_mod:
'''
A class to handle loading, viewing, and manipulating output from
the disk chemical modeling code presented in Fogel et al. 2011.
For more in-depth documentation, visit
https://github.com/richardseifert/Chemvene
To create an instance, the following three paths must be provided.
outdir - string path to the runs/ directory where model output is
stored.
environ - string path to the environ/ directory used to run your
chemical model. (Must given it outdir/environ doesn't exist)
inp - string filename of the input file used to run your model.
(Must be given if outdir/0io* doesn't exits)
'''
################################################################################
################################ Initialization ################################
################################################################################
def __init__(self,outdir,environ=None,inp=None,base_dir=None):
self.outdir = outdir
if self.outdir[-1] != '/':
self.outdir += '/'
if base_dir is None:
self.bsd = get_base_dir()
else:
self.bsd = base_dir
if not environ is None:
self.set_environ(environ)
elif os.path.exists(self.outdir+'environ/'):
self.set_environ(self.outdir+'environ/')
else:
raise FileNotFoundError("Could not determine environ/ directory to use for this model.")
if not inp is None:
self.set_environ(environ)
else:
outdir_0io_paths = glob.glob(self.outdir+'0io*')
if len(outdir_0io_paths) > 0:
self.set_inp(outdir_0io_paths[0].split('/')[-1])
else:
raise FileNotFoundError("Could not determine 0io file to use for this model.")
self.phys = DataFrame()
self.radfields = {}
self.abunds = {}
self.rates = {}
self.load_physical()
self.load_times()
def set_environ(self,environ):
self.environ = environ
if self.environ[-1] != '/':
self.environ += '/'
def set_inp(self,inp):
self.inp = self.environ+inp
inp_types = ['spec','reac','uv','xray','isrf','rn']
self.inp_paths = {k:None for k in inp_types}
d = np.genfromtxt(self.inp,dtype=str)
for i,k in enumerate(inp_types):
if os.path.exists(self.bsd+d[i]):
self.inp_paths[k] = self.bsd+d[i]
def copy(self):
'''
Make a hard copy of a chem_mod instance.
'''
#Initialize
new_inst = chem_mod(outdir=self.outdir,environ=self.environ,inp=self.inp)
#Hard copy physical quantities
new_inst.phys = self.phys.copy()
#for q in self.phys.columns:
# new_inst.set_quant(q,self.phys[q])
#Hard copy abundances
for mol in self.abunds.keys():
new_inst.abunds[mol] = self.abunds[mol].copy()
#Hard copy rates
for rid in self.rates.keys():
new_inst.rates[rid] = self.rates[rid].copy()
return new_inst
################################################################################
############################### General Loading ################################
################################################################################
def merge(self,tbl):
'''
Prepare a given table to be merged according to position, R and zAU.
ARGUMENTS:
tbl - A pandas table containing the two columns 'R' and either 'shell' or 'zAU'.
RETURNS:
merged - A tbl with the same number of rows as phys. The returned table
has values ordered according to phys['R'] and phys['shell']
'''
#Match R values to their nearest R values in phys['R'].
#This is necessary for the relational merge to work.
phys_R = np.array(list(set(self.phys['R'])))
diffs = np.vstack([(pr-tbl['R'])**2 for pr in phys_R])
inds = np.argmin(diffs,axis=0)
tbl['R'] = phys_R[inds]
#Merge according to columns of phys.
if 'shell' in tbl.columns:
merged = self.phys.merge(tbl,'left',on=['R','shell'])
elif 'zAU' in tbl.columns:
#Match by nearest R and zAU.
# pandas.DataFrame.merge has failed me in this reguard.. :(
# So I just had to do it myself, huney, using griddata.
merge_cols = [col for col in tbl.columns if not col in self.phys.columns]
points = np.vstack([tbl['R'],tbl['zAU']]).T
values = np.array(tbl[merge_cols])
phys_points = np.array([self.phys['R'],self.phys['zAU']]).T
matched = griddata(points,values,phys_points,method='nearest')
merged = self.phys.copy()
for i,col in enumerate(merge_cols):
merged[col] = matched[:,i]
return merged
def set_times(self,tbl):
'''
Method that takes a table with times as column headers and changes the headers
to match the nearest model timesteps.
ARGUMENTS:
tbl - A pandas.DataFrame object with times (in years) as columns header.
RETURNS:
The same table, but times have been corrected to the nearest model times.
'''
ctimes = tbl.columns
mtimes = self.nearest_times(ctimes,itr=True)
return tbl.rename(columns=dict(zip(ctimes,mtimes)))
################################################################################
############################# Handling Timesteps ###############################
################################################################################
def load_times(self):
'''
Method that reads the 2times.inp file for the model and produces an array of
the time at each model timestep.
No arguments or returns; times are stored in self.times variable.
'''
f = open(self.outdir+'2times.inp')
f.readline()
t_end = float(f.readline().split()[0].replace('D','E'))
t_start = float(f.readline().split()[0].replace('D','E'))
nsteps = int(f.readline().split()[0])
self.times = sigfig(np.logspace(np.log10(t_start),np.log10(t_end),nsteps), 4)
def nearest_times(self,times,itr=False):
'''
Function for finding nearest timesteps to a given time or list of times.
ARGUMENTS:
times - Time or list of times. Must be values that can be cast to floats.
itr - Boolean whether or not to return a scalar if possible. Default False.
If a single time is given, itr=False will return a scalar value.
itr=True will return a list of length one.
'''
#If None was given, do nothing. Return None.
if times is None:
return times
#Otherwise, check if time is iterable. If it's not, make it a single-valued array.
try:
iter(times)
times = np.array(times).astype(float)
except TypeError:
times = np.asarray([times]).astype(float)
#Find the nearest times in self.times.
nearest = self.times[ np.argmin([ (self.times - t)**2 for t in times ], axis=1) ]
#Depending on the number of times given, return an array or a scalar.
if len(nearest) == 1 and not itr:
return nearest[0]
else:
return nearest
def nearest_time_i(self,time):
return np.argmin( (self.times - time)**2 )
################################################################################
######################## Read/Write Model Quantities ###########################
################################################################################
def load_physical(self):
'''
Method that loads the disk physical model from 1environ files.
'''
env_paths = glob.glob(self.environ+'1environ*')
#Determine number of shells in model.
f1 = open(env_paths[0])
for i,line in enumerate(f1):
if i==2:
nshells = int(line.strip())
f1.close()
break
dat = np.array([])
shells = np.array([np.arange(nshells)+1]).T
for path in env_paths:
d = np.loadtxt(path,skiprows=3)
d = np.hstack([d,shells])
if len(dat) != 0:
dat = np.vstack([dat,d])
else:
dat = d
#Get header from test file.
f = open(env_paths[0])
header = f.readline()
f.close()
for i,k in enumerate(header.split()+['shell']):
self.phys[k] = dat[:,i]
def load_field(self,field,path=None):
if path is None:
path = self.inp_paths[field]
print("Loading %s field from: %s"%(field,path))
dat = load_radfield(path)
R = dat[:,0]
zAU = dat[:,1]
spec = dat[:,2]
flux = dat[:,3]
self.radfields[field] = DataFrame()
spec_vals = np.unique(spec)
for sv in spec_vals:
mask = spec==sv
tbl = DataFrame()
tbl['R'] = R[mask]
tbl['zAU'] = zAU[mask]
tbl['flux'] = flux[mask]
self.radfields[field][sv] = self.merge(tbl)['flux']
def limedir(self,strmol):
'''
Function that produces string limefg path for a given species.
It's a pretty pointless method, because I only need the limefg path
twice, when loading and writing species abundances. But, I figured
if I ever want to change where I save limefg or what I want to rename
the directory, I can just change it once in this method.
ARGUMENTS:
strmol - String name of the species.
RETURNS:
string path of a directory where limefg should go.
'''
return self.outdir+'e1/limefg_'+strmol+'/'
def grab_mol(self,strmol,*args,**kwargs):
if not strmol in self.abunds:
self.load_mol(strmol,*args,**kwargs)
def load_mol(self,strmol,times=None,write=True):
'''
Method that loads abundances of a given species,
potentially at a given time or times.
If limefg exists for this species (it has previously been loaded and saved),
then species is loaded from this (quicker). Otherwise, species is loaded
directly from r*.out files.
ARGUMENTS:
strmol - string name of the species to load.
times - Time steps to load species at. Only works if species is saved
in limefg format. Optional; default times=None -> load all times.
RETURNS:
Nothing, abundances are stored in self.abunds[strmol]. Column headers are
model times. Use self.get_quant to get strmol at a specific time (See below).
'''
#Look for strmol in limefg format.
limedir = self.limedir(strmol)
if not os.path.exists(limedir):
#If not in limefg, load from scratch (and write to limefg).
self.read_mol(strmol,write=write)
return
#Load from limefg
print("Loading from limefg.")
self.abunds[strmol] = DataFrame()
outpaths = glob.glob(self.outdir+'e1/r*.out')
limepaths = glob.glob(limedir+'*time*.dat')
tnum = [int(lp.split('time')[-1].split('.')[0]) for lp in limepaths]
limepaths = np.array(limepaths)[np.argsort(tnum)]
#Only load files for times requested.
all_times = self.times
if times is None:
times = all_times
else:
times = self.nearest_times(times,itr=True)
limepaths = [ lp for t,lp in zip(all_times,limepaths) if t in times ]
abunds = np.array([])
columns = ['R','zAU','rho','Tgas','Tdust','abund','fg']
for time,path in zip(times,limepaths):
dat = np.loadtxt(path)
tbl = DataFrame(dat,columns=columns)
tbl['R'] /= mau
tbl['zAU'] /= mau
merged = self.merge(tbl)
self.abunds[strmol][time] = merged['abund']/2
# ^ factor of 2 because LIME wants abundance per H2 instead of per H
#Tweak times to be exact values from self.times.
self.abunds[strmol] = self.set_times(self.abunds[strmol])
def read_mol(self,strmol,write=False):
'''
Method that reads abundances of a given species from r*.out files.
ARGUMENTS:
strmol - string name of the species to load.
RETURNS:
Nothing, abundances are stored in self.abunds[strmol]. Column headers are
model times. Use self.get_quant to get strmol at a specific time (See below).
'''
#Load from e1 files.
dat = load_mol_abund(self.outdir+'e1/',strmol)
times = list(set(dat[:,0]))
t = dat[:,0]
R = dat[:,1]
shell = dat[:,2]
abunds = dat[:,3]
#Construct table with abundances at each timestep.
mol_abund = DataFrame({time:abunds[t==time] for time in sorted(times)})
mol_abund['shell'] = shell[t==times[0]]
mol_abund['R'] = R[t==times[0]]
#Merge table with existing self.phys physical table.
self.abunds[strmol] = self.merge(mol_abund)[times]
#Tweak times to be exact values from self.times.
self.abunds[strmol] = self.set_times(self.abunds[strmol])
if write:
#Write abundances in limefg format.
self.write_mol(strmol)
def write_mol(self,strmol,times=None,label=None,savedir=None,tag=''):
'''
Method that writes abundances for a species in the limefg format
used by LIME radiative transfer.
ARGUMENTS:
strmol - string name of the species to load.
'''
if not strmol in self.abunds.keys():
self.read_mol(strmol)
if label is None:
label = strmol
else:
label = strmol+'_'+label
savetbl = self.phys[['R','zAU','rho','Tgas','Tdust']]
savetbl.loc[:,'rho'] *= 0.8/(2.0*mp) * 1e6
savetbl.loc[:,'abund'] = np.zeros_like(savetbl['R']) #Place holder.
# Match tmp table and physical table by positions.
tmp = np.genfromtxt(pkg_path[0]+'/pkg_files/imlup_gaia_v2_abrig_model_Tgas_SB_G04.txt')
inds = [np.argmin(( tmp[:,0]-R)**2 + (tmp[:,1]-z)**2 ) for R,z in zip(self.phys['R'],self.phys['zAU'])]
tmp_sort = tmp[inds]
fghere = np.array(tmp_sort[:,2]/(tmp_sort[:,3]*tmp_sort[:,7]))
fghere[(tmp_sort[:,3] <= 1e-30) | (tmp_sort[:,7] <= 1e-30)] = 1e20
fghere[savetbl['R'] > 313.] = 1e20 # this is for IM LUP SPECIFICALLY!! no large grains beyond this radius
savetbl.loc[:,'fg'] = fghere
savetbl.loc[:,'R'] *= mau
savetbl.loc[:,'zAU'] *= mau
if savedir is None:
limedir = self.limedir(label)
else:
limedir = savedir
if limedir[-1]!='/': limedir=limedir+'/'
if not os.path.exists(limedir):
os.makedirs(limedir)
if not times is None:
times = self.nearest_times(times,itr=True)
else:
times = np.sort(np.unique(self.abunds[strmol].columns))
for time in times:
i = self.nearest_time_i(time)
fname=limedir+tag+strmol+'_time'+str(i)+'.dat'
abu = 2*np.array(self.abunds[strmol][time])
# ^ factor of 2 because LIME wants abundance per H2, not per H.
abu[(savetbl['rho'] <= 1e2) | (abu < 1e-32)] = 0.0
savetbl.loc[:,'abund'] = abu
no_nan = remove_nan(self.phys['R'],abu)
savearr = np.array(savetbl)[no_nan]
np.savetxt(fname,savearr,fmt='%15.7E')
################################################################################
######################### Handling Chemical Reactions ##########################
################################################################################
def get_reac_str(self,reac_id,fmt='ascii'):
'''
Method that obtains a string representation of a given reaction in the
chemical network.
ARGUMENTS:
reac_id - Integer ID for the reaction.
fmt - Desired format of the reaction string.
Options:
ascii - Plain text, no subscript or superscript.
latex - Formatted to include subscripts and superscripts
when interpreted by LaTeX.
RETURNS:
Reaction string.
'''
return get_reac_str(self.inp_paths['reac'], reac_id, fmt)
def load_reac(self,strmol,reacs,times=None,radii=None,zones=None):
'''
Method for loading reaction rates for a specific reaction or reactions
involving a specific species, optionally at specific times or radii.
ARGUMENTS:
strmol - Species involved in the reaction(s). This is used as the prefix
for the *.rout files that contain reaction rates.
reacs - Scalar or array of integer reaction IDs.
times - Model timesteps at which to load reaction rates. Default is all times.
radii - Model radii at which to load reaction rates. Default is all radii.
RETURNS:
Nothing, rates are stored in self.rates[reac_id]. Column headers are
model times. Use self.get_quant to get rates at a specific time (See below).
'''
#Check that this molecule has reaction rates collated.
reac_files = glob.glob(self.outdir+'e1/rates/'+strmol+'_*.rout')
if len(reac_files) == 0:
print("Warning: This molecule has no reaction rates stored for %s. \
Doing nothing and continuing.")
#Find nearest times to those given.
if not times is None:
times = self.nearest_times(times)
#Load from e1 files.
dat = load_rates(self.outdir+'e1/rates/',strmol,reacs,times,radii,zones)
times = list(set(dat[:,0]))
t = dat[:,0]
R = dat[:,1]
shell = dat[:,2]
reac_ids = dat[:,3]
reac_rates = dat[:,4]
try:
iter(reacs)
except TypeError:
reacs = [ reacs ]
for reac in reacs:
self.rates[reac] = DataFrame()
for time in times:
#Construct table with abundances at each timestep.
mask = (reac_ids==reac) & (t==time)
tbl = DataFrame()
tbl[time] = reac_rates[mask]
tbl['shell'] = shell[mask]
tbl['R'] = R[mask]
self.rates[reac][time] = self.merge(tbl)[time]
def rank_reacs(self,strmol,time=None,R=None,zone=None):
'''
Method for ranking reactions involving a particular species
according to the reaction rates, optionally at a specific time
and/or radius in the model.
ARGUMENTS:
strmol - The species whose reactions will be ranked.
time - Timestep at which to rank reactions.
Default, sum over all timesteps.
R - Radius at which to rank reactions.
Default, sum over all radii.
'''
if not time is None:
time = self.nearest_times(time)
rates = total_rates(self.outdir+'e1/rates/',strmol,times=time,radii=R,zones=zone)
return rates
################################################################################
############################# Requesting Model Data ############################
################################################################################
def get_quant(self,quant,time=0,mask=None,fmt='numpy'):
'''
Method for obtaining model quantity at all locations of the disk,
at a specific time.
ARGUMENTS:
quant - Name of quantity. String for physical quantities and species
abundances. Integer for reaction IDs.
For convenience of other methods that use get_quant, if an array
of values is passed, get_quant will do nothing and return the
array passed to it.
time - Float value of the time at which to get the quantity.
RETURNS:
1D array of quant values corresponding to R and shell/zAU columns of self.phys
'''
### Retrieve 2-D quant values ###
quant = self._retrieve_quant(quant,time=time)
if mask is None:
mask = np.ones_like(quant).astype(bool)
elif fmt == 'contour':
raise ValueError("Cannot return contour-formatted arrays with mask")
if fmt == 'numpy':
return np.array(quant[mask])
elif fmt == 'pandas':
return quant[mask]
elif fmt == 'contour':
nx = len(list(set(self.phys['R'])))
ny = len(list(set(self.phys['shell'])))
return get_contour_arr(quant,nx,ny,sortx=self.phys['R'])
else:
raise ValueError("Unrecognized format: %s"%(fmt))
def _retrieve_quant(self,quant,time=0):
if iterable(quant):
return quant #quant passed is already 2-D values.
elif self._validate_phys(quant):
return self.phys[quant]
elif self._validate_abun(quant,time=time):
return self._get_abun(quant,time=time)
elif quant in self.rates.keys():
times = np.array(self.rates[quant].columns)
nearest = times[np.argmin((times-time)**2)]
quant = self.rates[quant][nearest]
if np.nanmean(quant) < 0:
quant = -quant
return quant
elif self._validate_radf(quant):
return self._get_radf(quant)
else:
raise ValueError("The quantity %s was not found for this model."%(quant))
## _validate functions are used to determine if the quantity name
## provided is a valid (i.e. loadable) quantity of the given type.
## Quantities can be Physical Model Quantities (phys), Abundances (abun),
## Radiation Fields (radf), or Reactions (reac).
## Each funtion returns True if the given quant is loadable for the given
## quantity type, and they're used to determine how to load different
## quantities in get_quant.
def _validate_phys(self,quant):
return quant in self.phys.columns
def _validate_abun(self,quant,time=0):
if quant[0] == 'n':
quant = quant[1:]
try:
self.grab_mol(quant,times=time)
return True
except IndexError:
return False
def _validate_radf(self,quant):
#Try extracting field name from quant string
try:
field = quant.split('_')[0]
except AttributeError:
return False #Not a valid radiation field name.
#If field is already loaded, return True
if field in self.radfields.keys():
return True
try:
self.load_field(field)
return True
except (TypeError, KeyError) as e:
return False
def _validate_reac(self,quant):
pass
## _get functions are used to load quantites of each type delineated above.
## They are used by get_quant to load/retrieve different types of model quantities.
def _get_abun(self,quant,time=0):
if quant[0] == 'n': #Return species density
times = np.array(self.abunds[quant[1:]].columns)
nearest = times[ | np.argmin((times-time)**2) | numpy.argmin |
from __future__ import print_function
import time, sys
import socket, struct
import threading
if sys.version_info.major <= 2:
import Queue as queue
else:
import queue
import tempfile, os
import pprint
import pkg_resources
import shutil
import numpy as np
from pymvg.camera_model import CameraModel
from pymvg.multi_camera_system import MultiCameraSystem
import flydra_core.kalman.dynamic_models
import flydra_analysis.offline_data_save
from flydra_analysis.kalmanize import kalmanize
import flydra_core.water as water
import flydra_analysis.a2.core_analysis as core_analysis
import flydra_core.flydra_socket as flydra_socket
from flydra_core.reconstruct import Reconstructor, DEFAULT_WATER_REFRACTIVE_INDEX
from flydra_analysis.a2.retrack_reuse_data_association import (
retrack_reuse_data_association,
)
SPINUP_DURATION = 0.2
MAX_MEAN_ERROR = 0.002
def _get_cams(with_distortion):
base = CameraModel.load_camera_default()
lookat = np.array((0.0, 0.0, 0.0))
up = np.array((0.0, 0.0, 1.0))
cams = []
cams.append(
base.get_view_camera(eye=np.array((1.0, 0.0, 1.0)), lookat=lookat, up=up)
)
cams.append(
base.get_view_camera(eye=np.array((1.2, 3.4, 5.6)), lookat=lookat, up=up)
)
cams.append(base.get_view_camera(eye=np.array((0, 0.3, 1.0)), lookat=lookat, up=up))
if with_distortion:
distortion1 = np.array([0.2, 0.3, 0.1, 0.1, 0.1])
else:
distortion1 = np.zeros((5,))
cam_wide = CameraModel.load_camera_simple(
name="cam_wide",
fov_x_degrees=90,
eye=np.array((-1.0, -1.0, 0.7)),
lookat=lookat,
distortion_coefficients=distortion1,
)
cams.append(cam_wide)
for i in range(len(cams)):
cams[i].name = "cam%02d" % i
cam_system = MultiCameraSystem(cams)
reconstructor = Reconstructor.from_pymvg(cam_system)
result = dict(cams=cams, cam_system=cam_system, reconstructor=reconstructor,)
return result
def setup_data(
with_water=False, fps=120.0, with_orientation=False, with_distortion=True
):
tmp = _get_cams(with_distortion=with_distortion)
cams = tmp["cams"]
cam_system = tmp["cam_system"]
reconstructor = tmp["reconstructor"]
# generate fake trajectory
dt = 1 / fps
t = np.arange(0.0, 1.0, dt)
x = 0.2 * np.cos(t * 0.9)
y = 0.3 * np.sin(t * 0.7)
z = 0.1 * np.sin(t * 0.13) - 0.12
pts = np.hstack((x[:, np.newaxis], y[:, np.newaxis], z[:, np.newaxis]))
# ------------
# calculate 2d points for each camera
if with_water:
wateri = water.WaterInterface(
refractive_index=DEFAULT_WATER_REFRACTIVE_INDEX, water_roots_eps=1e-7
)
reconstructor.add_water(wateri)
data2d = {
"2d_pos_by_cam_ids": {},
"2d_slope_by_cam_ids": {},
}
for camn, cam in enumerate(cams):
cam_id = cam.name
assert cam_id != "t"
if with_water:
center_2d = water.view_points_in_water(reconstructor, cam_id, pts, wateri).T
else:
center_2d = cam.project_3d_to_pixel(pts)
data2d["2d_pos_by_cam_ids"][cam_id] = center_2d
if with_orientation:
dx = np.gradient(center_2d[:, 0])
dy = | np.gradient(center_2d[:, 1]) | numpy.gradient |
from tkinter import filedialog, Tk
from typing import List
import matplotlib.pyplot as plt
import numpy as np
from lmfit import Model
from lmfit.models import GaussianModel, LinearModel
from pandas import read_csv, read_hdf, DataFrame, set_option
from scipy import fftpack, interpolate
from scipy.optimize import curve_fit
from seaborn import set_style
from range_selector import RangeTool
set_option('column_space', 80)
set_style("whitegrid")
# set_palette(["#9b59b6", "#3498db", "#95a5a6", "#e74c3c", "#34495e", "#2ecc71"])
e = 2.7182818
def find_nearest(array, value):
idx = (np.abs(array - value)).argmin()
return array[idx]
def pick_dat(cols, initdir='RDAT', title="Select file"):
"""
Data reader that is called within many other functions.
:param initdir: This is the directory that the function will open by default to look for the data (.csv or .h5).
:param title: The message to display at the top of the dialogue box.
:param cols: Headers to give to the data.
:return: Pandas DataFrame with headers that contains the selected data.
"""
root = Tk()
root.filename = filedialog.askopenfilename(initialdir="C:\\Users\Josh\IdeaProjects\PulsedNMR\{}".format(initdir),
title=title)
filename_parts = root.filename.split('/')[-1]
if 'csv' in root.filename:
data = read_csv(root.filename, names=cols, engine='c')
return data, filename_parts
elif 'h5' in root.filename:
data = read_hdf(root.filename, 'table', names=cols, engine='c')
return data, filename_parts
else:
print('Unexpected file type. Choose either a .csv or .h5 file.')
'''
Free induction decay (FID). This is the signal of M_x or M_y decaying after a pi/2 pulse.
'''
def T2_from_echo(M_xy, M0, tau):
"""
This function extracts the spin-spin relaxition time from the height difference in initial magnetization
and magnetization in the xy-plane that after a time two tau has passed.
:param M_xy: Magnetization in the xy-plane.
:param M0: Initial magnetization in z direction.
:param tau: Time between the pi/2 and the pi pulse.
:return: Spin-spin relaxation time.
"""
return -2 * tau / (np.log(M_xy / M0))
def echo_as_T2(t, M0, T2, c, ph):
"""
:param t:
:param M0: Initial magnetization in z direction.
:param T2: Spin-spin relaxation time.
:param c: Intercept to compensate for DC-offset.
:param ph: Phase difference.
:return: Magnetization in the xy-plane.
"""
# Old form:
return M0 * (np.exp(-((t - ph) / T2))) + c
# return M0 * (np.exp(-(t / T2) + ph)) + c
def FID_Exponential_fit():
"""
A mixture of smoothing and differentiating is used to determine the point at which the FID shape is dominantly
exponential decay and fits the echo_as_T2 function to the data in this region.
"""
dat, filename = pick_dat(['t', 'm'])
dat.loc[:, 't'] += abs(np.min(dat['t']))
maxi = np.max(dat['m'])
try:
smoothdat = interpolate.UnivariateSpline(dat['t'], dat['m'], k=5, s=200)
grad1 = np.gradient(smoothdat(dat['t']))
grad1_2 = np.gradient(grad1)
grad2 = interpolate.UnivariateSpline(dat['t'], grad1_2, k=3, s=0)
max_pos = dat['t'][int(np.median(np.where(dat['m'] == find_nearest(dat['m'], maxi))[0]))]
roots_range = range(0, len(grad2.roots()))
f = [find_nearest(dat['t'], grad2.roots()[p]) for p in roots_range]
s = [f[i] for i in roots_range if f[i] > max_pos]
b = np.where(dat['t'] == s[0])[0][0]
except ValueError:
b = int(np.median(np.where(dat['m'] == maxi)[0]))
mini = np.min(dat['m'][b:])
mx = np.max(dat['m'][b:]) - mini
max_loc = int(np.median(np.where(dat['m'] == find_nearest(dat['m'], mx + mini))))
max_loc_time = dat['t'][max_loc]
decay_con_amp = mx / e
decay_con_amp_pos = int(
np.median(np.where(dat['m'] == find_nearest(dat['m'], decay_con_amp + mini))))
decay_con_amp_time = dat['t'][decay_con_amp_pos]
decay_time = decay_con_amp_time - max_loc_time
initial = np.array([mx, decay_time, mini, max_loc_time])
boundss = (
[mx * 0.85, decay_time * 0.7, mini * 0.9, max_loc_time * 0.9], [mx * 1.15, decay_time * 1.3, (mini + 0.5) * 1.2,
max_loc_time * 1.1])
popt, pcov = curve_fit(echo_as_T2, xdata=dat['t'][b:], ydata=dat['m'][b:], p0=initial, maxfev=30000,
method='trf', bounds=boundss)
errs = np.diag(pcov)
datas1 = np.array([popt, errs, initial])
datas2 = np.transpose(datas1)
vals = DataFrame(datas2, columns=['Parameter', 'Uncertainty', 'Initial'], index=['M0', 'T2', 'Intercept', 'Phase'])
print('\n', vals)
plt.title('{}'.format(filename))
plt.plot(dat['t'], dat['m'], '+', ms=1.4, color='r')
plt.plot(dat['t'][b:], echo_as_T2(dat['t'][b:], *popt), ls='--', lw=2, color='k')
plt.xlabel("Time (s)")
plt.ylabel("Magnetization (A/m)")
plt.axhline(mx + mini)
plt.axhline(decay_con_amp + mini)
plt.axvline(max_loc_time)
plt.axvline(decay_con_amp_time)
fig_manager = plt.get_current_fig_manager()
fig_manager.window.showMaximized()
plt.show()
def range_to_list():
"""
This function is used to create an array of values from a dataset that's limits are given by a list lower and
upper limits. THIS IS CONFIGURED FOR MY COMPUTER, CHANGE THE DIRECTORY TO USE.
"""
dat1, filename1 = pick_dat(['t', 'm'], "RDAT_Test", "Select dataset to draw from")
dat2 = read_csv("C:\\Users\\Josh\\IdeaProjects\\PulsedNMR\\Ranges\\{}".format(filename1),
names=['Lower Bound', 'LowerIndex', 'Upper Bound', 'UpperIndex'])
xrange = []
yrange = []
xranges = {}
yranges = {}
x_append = xrange.append
y_append = yrange.append
for o in range(0, len(dat2)):
x_append((dat1['t'][dat2['LowerIndex'][o]:dat2['UpperIndex'][o] + 1]).values)
y_append((dat1['m'][dat2['LowerIndex'][o]:dat2['UpperIndex'][o] + 1]).values)
for o in range(0, len(xrange)):
xranges[o] = xrange[o]
yranges[o] = yrange[o]
return xranges, yranges, xrange, yrange, filename1, dat1
def echo_fits():
"""
Fits a Gaussian with a linear background to each of the echo peaks, finds the centroid and top of
the Gaussian, then fits the echo_as_T2 function to the points given by x=centroid, y=top.
"""
xrs, yrs, xr, yr, filename, dat1 = range_to_list()
cents: List[float] = []
cents_uncert: List[float] = []
heights: List[float] = []
heights_uncert: List[float] = []
for i in range(0, len(xrs)):
mdl = GaussianModel(prefix='G_')
lne = LinearModel(prefix='L_')
params = mdl.guess(yrs[i], x=xrs[i])
params += lne.guess(yrs[i], x=xrs[i])
max_y = np.max(yrs[i])
min_y = np.min(yrs[i])
max_x = np.max(yrs[i])
min_x = np.min(yrs[i])
predicted_slope = (max_y - min_y) / (max_x - min_x)
params.add('L_slope', value=predicted_slope, min=predicted_slope * 1.1, max=predicted_slope * 0.9)
params.add('L_intercept', value=min_y, min=min_y * 0.9, max=min_y * 1.1)
params.add('G_height', value=max_y - min_y, min=(max_y - min_y) * 0.99, max=(max_y - min_y) * 1.05)
model = mdl + lne
result = model.fit(yrs[i], params, x=xrs[i], method='leastsq')
cent: float = result.params['G_center'].value
amp: float = result.params['G_height'].value
inter: float = result.params['L_intercept'].value
grad: float = result.params['L_slope'].value
height: float = amp + ((cent * grad) + inter)
heights.append(height)
cents.append(cent)
cents_uncert.append(result.params['G_center'].stderr)
partial_amp = 1
partial_grad = cent
partial_x = grad
partial_inter = 1
amp_term = partial_amp * result.params['G_height'].stderr
grad_term = partial_grad * result.params['L_slope'].stderr
x_term = partial_x * np.mean(np.diff(xrs[i]))
inter_term = partial_inter * result.params['L_intercept'].stderr
height_uncert = np.sqrt(amp_term ** 2 + grad_term ** 2 + x_term ** 2 + inter_term ** 2)
heights_uncert.append(height_uncert)
heights = np.array(heights)
cents = | np.array(cents) | numpy.array |
"""
In this example we use the pysid library to estimate a MIMO armax model
"""
#Import Libraries
from numpy import array, convolve, concatenate, zeros
from numpy.random import rand, randn #To generate the experiment
from scipy.signal import lfilter #To generate the data
from pysid import armax #To estimate an arx model
#True System
#Number of inputs
nu = 2
#Number of outputs
ny = 2
#Orders
na = [[2, 2], [2, 2]] #This variable must be (ny x ny)
nb = [[1, 1], [1, 1]] #This variable must be (ny x nu)
nk = [[1, 1], [1, 1]] #This variable must be (ny x nu)
nc = [[2], [2]] #This variable must be (ny x 1)
#with the following true parameters
A1o = array([1, -1.2, 0.36])
A12o = array([0, 0.09, -0.1])
A2o = array([1, -1.6, 0.64])
A21o = array([0, 0.2, -0.01])
B11o = array([0, 0.5, 0.4])
B12o = array([0, 0.9, 0.8])
B21o = array([0, 0.2,-0.3])
B22o = array([0, 0.1,-0.8])
C1o = array([1, 0.8,-0.1])
C2o = array([1, 0.9,-0.2])
#True parameter vector
thetao = [-1.2, 0.36, 0.5, 0.4, 0.2, -0.3, 0.8, -0.1]
# Generate the experiment
# The true system is generates by the following relation:
# S: y(t) = Go(q)*u(t) + Ho(q)*e(t),
# with u(t) the input and e white noise.
# Number of Samples
N = 1000
# Take u as uniform
u = -1 + 2*rand(N, nu)
# Generate gaussian white noise with standat deviation 0.01
e = 0.01*randn(N, ny)
# ARMAX A**-1*B u + A**-1*C
# Calculate the y through S (ARX: G(q) = B(q)/A(q) and H(q) = 1/A(q))
# Calculate the y through S (ARX: G(q) = B(q)/A(q) and H(q) = 1/A(q))
det = convolve(A1o, A2o) - convolve(A12o, A21o)
y1 = lfilter(convolve(A2o, B11o), det, u[:, 0:1], axis=0) + \
lfilter(convolve(-A12o, B21o), det, u[:, 0:1], axis=0) + \
lfilter(convolve(A2o, B12o), det, u[:, 1:2], axis=0) + \
lfilter( | convolve(-A12o, B22o) | numpy.convolve |
from datetime import timedelta
from astropy import units as u
import numpy as np
from sunpy.time import parse_time
def get_sky_position(time, offset):
"""Code for converting solar offsets to pointing position.
Parameters
----------
time: Date that is parsable by sunpy.time.parse_time()
i.e.,
time='2016-07-26T19:53:15.00'
offset: Offset from the center of the Sun. Must have units from astropy:
i.e.: offset = np.array([1000, 150]) * u.arcsec
Returns
----------
sky_position: Two-element array giving the [RA, Dec] coordinates of the
Notes
----------
Syntax:
sky_position = get_sky_position(time, offset)
"""
from astropy.coordinates import get_sun
from astropy.time import Time
# Replaced with newer sunpy v1 function
# from sunpy import sun
from sunpy.coordinates import sun
# Convert the date into something that's usable by astropy.
start_date = parse_time(time)
astro_time = Time(start_date)
# Use astropy get_sun for Sun sky position.
# sunpy has a similar function, but it may be giving a different
# epoch for the RA and dec. We need them in J2000 RA and dec.
astro_sun_pos = get_sun(astro_time)
# Get the solar north pole angle. cgs --> radians
# Update for sunpy v1.0+
# sun_np=sun.solar_north(t=time).cgs
sun_np=sun.P(time).cgs
# Get the center of the Sun, and assign it degrees.
# Doing it this was is necessary to do the vector math below.
sun_pos = np.array([astro_sun_pos.ra.deg, astro_sun_pos.dec.deg])* u.deg
# Rotation matrix for a counter-clockwise rotation since we're going
# back to celestial north from solar north
rotMatrix = np.array([[np.cos(sun_np), np.sin(sun_np)],
[-np.sin(sun_np), np.cos(sun_np)]])
# Project the offset onto the Sun
delta_offset = np.dot(offset, rotMatrix)
# Scale to RA based on the declination.
delta_offset = delta_offset * np.array([1. / np.cos(sun_pos[1]), 1.])
# Account for the fact that +Ra == East and we have defined +X = West
delta_offset = delta_offset * [-1.0, 1.0]
# Apply the offset and return the sky position.
sky_position = sun_pos + delta_offset
return sky_position
def get_skyfield_position(time, offset, load_path=None, parallax_correction=False):
"""Code for converting solar coordinates to astrometric (J200) RA/Dec coordinates.
Parameters
----------
time: Date that is parsable by sunpy.time.parse_time()
i.e.,
time='2016-07-26T19:53:15.00'
offset: Offset from the center of the Sun. Must have units from astropy:
i.e.: offset = np.array([1000, 150]) * u.arcsec
load_path (optional): Relative path from currently location to store bsp files
parallax_correction: Use the NuSTAR TLE to correct for orbital parallax
Returns
----------
sky_position: Two-element array giving the [RA, Dec] coordinates of the
target location. Note this is given in astrometric (J2000) RA/Dec, which is what
we need for the NuSTAR planning system.
Notes
----------
Syntax:
skyfield_position = get_skyfield_position(time, offset)
"""
from astropy.time import Time
# Replaced with newer sunpy v1 function
# from sunpy import sun
from sunpy.coordinates import sun
from nustar_pysolar.utils import skyfield_ephem
start_date = parse_time(time)
utc = Time(start_date)
observer, sunephem, ts = skyfield_ephem(load_path=load_path,
parallax_correction=parallax_correction,
utc=utc)
tcheck = ts.from_astropy(utc)
geocentric = observer.at(tcheck).observe(sunephem)
this_ra_geo, this_dec_geo, dist = geocentric.radec()
# Get the solar north pole angle. cgs --> radians
# sun_np = sunpy.sun.solar_north(t=time).cgs
# Update for sunpy v1.0+
sun_np=sun.P(time).cgs
# Get the center of the Sun, and assign it degrees.
# Doing it this was is necessary to do the vector math below.
sun_pos = np.array([this_ra_geo.to(u.deg).value, this_dec_geo.to(u.deg).value])*u.deg
# Rotation matrix for a counter-clockwise rotation since we're going
# back to celestial north from solar north
rotMatrix = np.array([[np.cos(sun_np), np.sin(sun_np)],
[-np.sin(sun_np), np.cos(sun_np)]])
# Project the offset onto the Sun
delta_offset = np.dot(offset, rotMatrix)
# Scale to RA based on the declination.
delta_offset = delta_offset * np.array([1. / np.cos(sun_pos[1]), 1.])
# Account for the fact that +Ra == East and we have defined +X = West
delta_offset = delta_offset * [-1.0, 1.0]
# Apply the offset and return the sky position.
sky_position = sun_pos + delta_offset
return sky_position
def get_nustar_roll(time, angle):
"""Code to determine the NuSTAR roll angle for a given field-of-view on the
Sun for a given time.
Parameters
----------
time: Date that is parsable by sunpy.time.parse_time()
i.e.
time='2016-07-26T19:53:15.00'
angle: Desired roll offset from solar north in degrees.
For a "square" field of view, use angle=0 / 90 / 180 / 270 to have DET0
at the NE / SE / SW / NW corners of a square field of view.
For a "diamond" with DET0 to the south, use angle = 45.
Returns
----------
nustar_roll: NuSTAR PA angle with respect to celestial north.
"""
# Replaced with newer sunpy v1 function
# from sunpy import sun
from sunpy.coordinates import sun
# Get the solar north pole angle. cgs --> radians
# sun_np=sun.solar_north(t=time).deg * u.deg
# Update for sunpy v1.0+
sun_np=sun.P(time).deg*u.deg
nustar_roll = np.mod(sun_np + angle, 360*u.deg)
return nustar_roll;
def _parse_timestamp(tstamp):
"""Convenience function for turning the SOC timestamp into a datetime object.
"""
date1 = tstamp.split('/')
year=date1[0].strip()
day, time=(date1[1].split())
stub = (year.strip()+'-01-01T00:00:00')
year = parse_time(stub)
hr, min, sec = time.split(':')
dt = timedelta(int(day)-1, int(sec), 0, 0, int(min), int(hr))
return year+dt;
def _parse_SOC_timestamp(tstamp):
"""Convenience function for turning the timestamp into a datetime object.
"""
date1 = tstamp.split(':')
year = date1[0]
day = date1[1]
hr = date1[2]
min = date1[3]
sec = date1[4]
stub = (year.strip()+'-01-01T00:00:00')
year = parse_time(stub)
# hr, min, sec = date1[2:4]
dt = timedelta(int(day)-1, int(sec), 0, 0, int(min), int(hr))
return year+dt;
def parse_occultations(infile):
"""Parse the shadow analysis file to determine the 'in Sun' times.
Parameters
----------
infile: Input file to be parsed.
Returns
----------
Returns a list of [ [start, stop], [start stop] ] times where start means
you egress from Earth shadow into the sunlight, while stop means you
re-enter Earth shadow.
Notes
---------
"""
f = open(infile)
all_pairs = []
start = 0
for ind,line in enumerate(f):
# Little parser here to find the right place to start reading in...
if (line.find("Shadow Begin") != -1):
start=start+1
# Skips over additional lines of whitespace.
if(start == 0):
continue
if(start <3):
start+=1
continue
# Get the first date string:
fields = line.split('-')
first = fields[0]
dtfirst = _parse_timestamp(first)
second = (fields[1].split('UTC'))[0].strip()
dtsecond=_parse_timestamp(second)
# Since the file actually gives the start/stop times of going into
# earthshadow, we actually want the "In Sun" times, which is the egress
# from earthshadow and the entry into the next earthshadow.
# Note that this skips the first row.
if(start == 3):
start+=1
else:
all_pairs.append([last, dtfirst])
# Store the last entry to add in the next time around...
last=dtsecond
f.close()
return all_pairs
def sunlight_periods(infile, tstart, tend):
"""Return the periods when NuSTAR is in Sunlight in the given timerange.
Parameters
----------
tstart, tend: ISO formatted times or something else that
sunpy.time.parse_time() can read.
i.e.
tstart='2017-03-11T23:09:10'
infile: Input file to be parsed. This should the value returned by
nustar_pysolar.download_occultation_times()
Returns
----------
Returns a list of [ [start, stop], [start stop] ] times where start means
you egress from Earth shadow into the sunlight, while stop means you
re-enter Earth shadow.
The list has been filtered to only include those epochs that span the given
time range.
Notes
---------
"""
import os.path
if not(os.path.isfile(infile)):
print('Error in nustar_pysolar.sunlight_periods.')
print('Input file: '+infile+' does not exist.')
return -1;
all_pairs = parse_occultations(infile)
checkstart = parse_time(tstart)
checkend = parse_time(tend)
in_range = []
set=0
for pair in all_pairs:
dtmin = (pair[0] - checkstart)
dtmax = (pair[1] - checkstart)
if ( (pair[1] > checkstart) ):
set=1
if (set == 0):
continue
if ( pair[1] > checkend ):
break
in_range.append(pair)
if len(in_range) == 0:
print('Error in function: '+sunlight_periods.__name__)
print('No dates found in range. Pick a different occultation file.')
return -1
else:
return in_range
def make_mosaic(orbit, outfile='mosaic.txt', write_output=False, make_regions=False,
reg_pref='testbox', extra_roll=0.*u.deg, write_sun=False):
'''
Code to make a mosaic for a 5x5 tiled array on the Sun.
Input:
tstart = '2018-05-28T15:37:00'
tend = '2018-05-28T23:10:00'
positions = make_mosaic(tstart, tend, write_output=True)
Optional flags:
write_output = [False] / True
Write the output pointing positions in NuSTAR SOC readable formats in 'outfile' for all of the pointings.
outfile = ['mosaic.txt']
Output file if write_output is used.
make_regions: [False] / True
Make ds9 region files for each tile so that you can see how the FoV moves with each mosaic location.
reg_pref: 'testbox'
The prefix for the region files. Useful if you want to make this meaningful.
Output mosaic file has columns of:
"Arrive By Time" RA DEC RA_SUN DEC_SUN
'''
import numpy as np
box_pa = get_nustar_roll(orbit[0], extra_roll)
sun_pa = get_nustar_roll(orbit[0], 0.)
pa = box_pa + 90*u.deg
base = np.array([-1.45, -0.725, 0, 0.725, 1.45])
xsteps = np.append(base, np.flip(base, 0))
xsteps = np.append(xsteps, base)
xsteps = np.append(xsteps, np.flip(base, 0))
xsteps = np.append(xsteps, base)
ysteps = np.array(np.zeros(5) + 1.45)
ysteps = np.append(ysteps, np.zeros(5) + 0.725)
ysteps = np.append(ysteps, np.zeros(5))
ysteps = np.append(ysteps, np.zeros(5)-0.725)
ysteps = np.append(ysteps, np.zeros(5)-1.45)
# Rotation matrix for a clockwise rotation on the solar disk:
rotMatrix = np.array([[np.cos(extra_roll), np.sin(extra_roll)],
[-np.sin(extra_roll), np.cos(extra_roll)]])
dt = (orbit[1] - orbit[0]) / 25.
print("Orbit start: {} Orbit end: {}".format(orbit[0].iso, orbit[1].iso))
print("Dwell per position:", dt.to_value(u.s))
print("")
print("NuSTAR Roll Angle to get roll relative to Sun of {:.02f} is {:.02f} deg".format(extra_roll.value, box_pa.value))
print("Step of FOV PA direction is {:.02f} deg".format(pa.value))
print("")
if write_output is True:
f = open(outfile, 'w')
aim_time = orbit[0]
for ind, pair in enumerate(zip(xsteps, ysteps)):
arrive_time = aim_time
aim_time = aim_time + dt
# Make this 10-arcmin steps.
step_size = 10 * u.arcmin
# Sun-center location:
offset = [0., 0.]*u.deg
sun_pos = get_skyfield_position(aim_time, offset, load_path='../data', parallax_correction=True)
# print('Sun time: {} RA (deg): {} Dec (deg): {}'.format(aim_time.isoformat(), sun_pos[0], sun_pos[1]))
# Pointing location
# Rotate to the correct orientation on the solar disk:
offset = ( | np.dot(pair, rotMatrix) | numpy.dot |
# Copyright (c) 2020, TU Wien, Department of Geodesy and Geoinformation
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of TU Wien, Department of Geodesy and Geoinformation
# nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL TU WIEN DEPARTMENT OF GEODESY AND
# GEOINFORMATION BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
import os
import warnings
from gzip import GzipFile
from tempfile import NamedTemporaryFile
from datetime import datetime, timedelta
import numpy as np
try:
import pygrib
except ImportError:
warnings.warn(
'pygrib can not be imported GRIB files (H14) can not be read.')
from pygeobase.io_base import ImageBase
from pygeobase.io_base import MultiTemporalImageBase
from pygeobase.object_base import Image
import ascat.read_native.bufr as bufr_reader
from ascat.read_native.bufr import AscatL2SsmBufr
from ascat.read_native.cdr import AscatNc
if sys.version_info < (3, 0):
range = xrange
class H08Single(ImageBase):
def read(self, timestamp=None, lat_lon_bbox=None):
"""
Read specific image for given datetime timestamp.
Parameters
----------
filename : string
filename
timestamp : datetime.datetime
exact observation timestamp of the image that should be read
lat_lon_bbox : list, optional
list of lat,lon cooridnates of bounding box
[lat_min, lat_max, lon_min, lon_max]
Returns
-------
data : dict or None
dictionary of numpy arrays that hold the image data for each
variable of the dataset, if no data was found None is returned
metadata : dict
dictionary of numpy arrays that hold the metadata
timestamp : datetime.datetime
exact timestamp of the image
lon : numpy.array or None
array of longitudes, if None self.grid will be assumed
lat : numpy.array or None
array of latitudes, if None self.grid will be assumed
time_var : string or None
variable name of observation times in the data dict, if None all
observations have the same timestamp
"""
zipped = False
if os.path.splitext(self.filename)[1] == '.gz':
zipped = True
# for zipped files use an unzipped temporary copy
if zipped:
with NamedTemporaryFile(delete=False) as tmp_fid:
with GzipFile(self.filename) as gz_fid:
tmp_fid.write(gz_fid.read())
filename = tmp_fid.name
else:
filename = self.filename
with bufr_reader.BUFRReader(filename) as bufr:
lons = []
ssm = []
ssm_noise = []
ssm_corr_flag = []
ssm_proc_flag = []
data_in_bbox = True
for i, message in enumerate(bufr.messages()):
if i == 0:
# first message is just lat, lon extent
# check if any data in bbox
if lat_lon_bbox is not None:
lon_min, lon_max = message[0, 2], message[0, 3]
lat_min, lat_max = message[0, 4], message[0, 5]
if (lat_lon_bbox[0] > lat_max or
lat_lon_bbox[1] < lat_min or
lat_lon_bbox[2] > lon_max or
lat_lon_bbox[3] < lon_min):
data_in_bbox = False
break
elif data_in_bbox:
# first 5 elements are there only once, after that,
# 4 elements are repeated till the end of the array
# these 4 are ssm, ssm_noise, ssm_corr_flag and
# ssm_proc_flag each message contains the values for
# 120 lons between lat_min and lat_max the grid spacing
# is 0.00416667 degrees
lons.append(message[:, 0])
lat_min = message[0, 1]
lat_max = message[0, 2]
ssm.append(message[:, 4::4])
ssm_noise.append(message[:, 5::4])
ssm_corr_flag.append(message[:, 6::4])
ssm_proc_flag.append(message[:, 7::4])
if zipped:
os.remove(filename)
if data_in_bbox:
ssm = np.rot90(np.vstack(ssm)).astype(np.float32)
ssm_noise = np.rot90(np.vstack(ssm_noise)).astype(np.float32)
ssm_corr_flag = np.rot90(
np.vstack(ssm_corr_flag)).astype(np.float32)
ssm_proc_flag = np.rot90(
np.vstack(ssm_proc_flag)).astype(np.float32)
lats_dim = np.linspace(lat_max, lat_min, ssm.shape[0])
lons_dim = np.concatenate(lons)
data = {'ssm': ssm,
'ssm_noise': ssm_noise,
'proc_flag': ssm_proc_flag,
'corr_flag': ssm_corr_flag
}
# if there are is a gap in the image it is not a 2D array in
# lon, lat space but has a jump in latitude or longitude
# detect a jump in lon or lat spacing
lon_jump_ind = np.where(np.diff(lons_dim) > 0.00418)[0]
if lon_jump_ind.size > 1:
print("More than one jump in longitude")
if lon_jump_ind.size == 1:
lon_jump_ind = lon_jump_ind[0]
diff_lon_jump = np.abs(
lons_dim[lon_jump_ind] - lons_dim[lon_jump_ind + 1])
missing_elements = int(np.round(diff_lon_jump / 0.00416666))
missing_lons = np.linspace(lons_dim[lon_jump_ind],
lons_dim[lon_jump_ind + 1],
missing_elements,
endpoint=False)
# fill up longitude dimension to full grid
lons_dim = np.concatenate([lons_dim[:lon_jump_ind],
missing_lons,
lons_dim[lon_jump_ind + 1:]])
# fill data with NaN values
empty = np.empty((lats_dim.shape[0], missing_elements))
empty.fill(1e38)
for key in data:
data[key] = np.concatenate(
[data[key][:, :lon_jump_ind],
empty, data[key][:, lon_jump_ind + 1:]], axis=1)
lat_jump_ind = np.where( | np.diff(lats_dim) | numpy.diff |
import json
import math
import os
import cv2
from PIL import Image
import numpy as np
from keras import layers
from keras.applications import DenseNet121
from keras.callbacks import Callback, ModelCheckpoint
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.optimizers import Adam
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.metrics import cohen_kappa_score, accuracy_score
import scipy
import tensorflow as tf
from tqdm import tqdm
import matplotlib.pyplot as plt
import cv2
from pathlib import Path
import os
from PIL import *
import matplotlib.image as mpimg
import numpy as np
from keras.preprocessing import image
import json
import random
from sklearn.model_selection import train_test_split
from keras.utils import to_categorical
from keras.layers import Dense, Dropout, Flatten
from keras.applications.resnet50 import ResNet50
from keras.applications.inception_v3 import InceptionV3
import keras
from keras.models import Sequential,Input,Model
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D,GlobalAveragePooling2D, ReLU, MaxPool2D,InputLayer
from keras.layers.normalization import BatchNormalization
from keras.layers.advanced_activations import LeakyReLU
from keras import optimizers, regularizers
from sklearn.metrics import classification_report
from keras.callbacks import TensorBoard
import datetime
from keras.preprocessing.image import ImageDataGenerator
from keras.applications.resnet50 import preprocess_input
from keras.applications import DenseNet121
from keras import layers
import sys
np.random.seed(2019)
tf.set_random_seed(2019)
def preprocess_image(image_path, desired_size=224):
im = Image.open(image_path)
im = im.resize((desired_size, )*2, resample=Image.LANCZOS)
return im
def create_datagen():
return ImageDataGenerator(
zoom_range=0.15, # set range for random zoom
# set mode for filling points outside the input boundaries
fill_mode='constant',
cval=0., # value used for fill_mode = "constant"
horizontal_flip=True, # randomly flip images
vertical_flip=True, # randomly flip images
)
'''
resnet50
'''
def build_model(input_shape):
base_model =ResNet50(weights='imagenet', include_top=False, input_shape=input_shape)
#for layer in base_model.layers[:10]:
#layer.trainable = False
#layer.padding='same'
#for layer in base_model.layers[10:]:
#layer.trainable = True
#layer.padding='same'
# x = base_model.get_layer('avg_pool').output
x = base_model.output
x = GlobalAveragePooling2D()(x)
# x = BatchNormalization()(x)
x = Dropout(0.5)(x)
# x = Flatten() (x)
# x = Dropout(0.5)(x)
# x = Dense(512, activation='relu', kernel_regularizer=regularizers.l2(0.001))(x)
# x = BatchNormalization()(x)
# x = Dropout(0.5)(x)
# x = Dense(32, activation='relu')(x)
# x = Dense(128, activation='relu', kernel_regularizer=regularizers.l2(0.001))(x)
# x = Dropout(0.5)(x)
# x = BatchNormalization()(x)
# x = Dense(64, activation='relu', kernel_regularizer=regularizers.l2(0.001))(x)
# x = Dropout(0.5)(x)
# x = BatchNormalization()(x)
# x = Dense(512, activation='relu')(x)
# x = LeakyReLU(alpha=0.1)(x)
# x = Dropout(0.3)(x)
#x = Dense(5, activation='softmax')(x)
#model = Model(base_model.input, x)
predictions = Dense(5, activation='sigmoid')(x)
model = Model(inputs=base_model.input, outputs=predictions)
# for layer in model.layers[:-2]:
# layer.trainable = False
model.compile(
loss='binary_crossentropy',
optimizer=Adam(lr=0.00005),
metrics=['accuracy']
)
return model
# def build_model(input_shape):
# densenet = DenseNet121(
# weights='/home/z5163479/code/adapted_deep_embeddings/DenseNet-BC-121-32-no-top.h5',
# include_top=False,
# input_shape=input_shape
# )
# model = Sequential()
# model.add(densenet)
# model.add(layers.GlobalAveragePooling2D())
# model.add(layers.Dropout(0.5))
# model.add(layers.Dense(5, activation='sigmoid'))
# model.compile(
# loss='binary_crossentropy',
# optimizer=Adam(lr=0.00005),
# metrics=['accuracy']
# )
# return model
def get_preds(arr):
mask = arr == 0
return np.clip(np.where(mask.any(1), mask.argmax(1), 5) - 1, 0, 4)
def main():
train_df = pd.read_csv('/srv/scratch/z5163479/aptos/labels/trainLabels19.csv')
print(train_df.shape)
# train_df.head()
N = train_df.shape[0]
x_train = np.empty((N, 224, 224, 3), dtype=np.uint8)
for i, image_id in enumerate(tqdm(train_df['id_code'])):
x_train[i, :, :, :] = preprocess_image(
f'/srv/scratch/z5163479/aptos/resized_train_19/{image_id}.jpg'
)
y_train = pd.get_dummies(train_df['diagnosis']).values
print(x_train.shape)
print(y_train.shape)
x_train, x_val, y_train, y_val = train_test_split(
x_train, y_train,
test_size=0.15,
random_state=2019
)
y_train_multi = np.empty(y_train.shape, dtype=y_train.dtype)
y_train_multi[:, 4] = y_train[:, 4]
for i in range(3, -1, -1):
y_train_multi[:, i] = | np.logical_or(y_train[:, i], y_train_multi[:, i+1]) | numpy.logical_or |
# Functions relating to forces
import numpy as np
import scipy.optimize as opt
def set_myofilament_stresses(self):
""" Sets initial values """
d = self.check_myofilament_stresses(0.0)
self.cpt_myofil_stress = d['cpt_myofil_stress']
self.hs_stress = d['hs_stress']
def check_myofilament_stresses(self, delta_hsl):
""" cpt_ values are stresses (that is normalized to area) within the
individual components. Other stresses correct for relative areas
of components and are normalized to the relative areas of the
components in the wall """
d = dict()
d['cpt_cb_stress'] = return_cb_stress(self, delta_hsl)
d['cpt_int_pas_stress'] = return_intracellular_passive_stress(self, delta_hsl)
d['cpt_ext_pas_stress'] = return_extracellular_passive_stress(self, delta_hsl)
d['cpt_myofil_stress'] = d['cpt_cb_stress'] + d['cpt_int_pas_stress']
d['cb_stress'] = (1.0 - self.data['prop_fibrosis']) * \
self.data['prop_myofilaments'] * d['cpt_cb_stress']
d['int_pas_stress'] = (1.0 - self.data['prop_fibrosis']) * \
self.data['prop_myofilaments'] * d['cpt_int_pas_stress']
d['ext_pas_stress'] = self.data['prop_fibrosis'] * d['cpt_ext_pas_stress']
d['hs_stress'] = d['cb_stress'] + d['int_pas_stress'] + d['ext_pas_stress']
return d
def return_cb_stress(self, delta_hsl):
if (self.implementation['kinetic_scheme'] == '3_state_with_SRX'):
bin_pops = self.y[2 + np.arange(0, self.no_of_x_bins)]
cb_stress = \
self.data['cb_number_density'] * \
self.data['k_cb'] * 1e-9 * \
np.sum(bin_pops *
(self.x + self.data['x_ps'] +
(self.implementation['filament_compliance_factor'] *
delta_hsl)))
return cb_stress
if (self.implementation['kinetic_scheme'] == '4_state_with_SRX'):
pre_ind = 2 + np.arange(0, self.no_of_x_bins)
post_ind = 2 + self.no_of_x_bins + np.arange(0, self.no_of_x_bins)
cb_stress = \
self.data['cb_number_density'] * self.data['k_cb'] * 1e-9 * \
(np.sum(self.y[pre_ind] *
(self.x +
(self.implementation['filament_compliance_factor']
* delta_hsl))) +
np.sum(self.y[post_ind] * \
(self.x + self.data['x_ps'] +
(self.implementation['filament_compliance_factor'] *
delta_hsl))))
return cb_stress
def return_intracellular_passive_stress(self, delta_hsl):
if (self.implementation['int_passive_mode'] == 'linear'):
pas_force = self.data['int_passive_linear_k_p'] * \
(self.parent_hs.data['hs_length'] + delta_hsl -
self.data['int_passive_l_slack'])
if (self.implementation['int_passive_mode'] == 'exponential'):
x = self.parent_hs.data['hs_length'] + delta_hsl - \
self.data['int_passive_l_slack']
if (x > 0):
pas_force = self.data['int_passive_exp_sigma'] * \
( | np.exp(x / self.data['int_passive_exp_L']) | numpy.exp |
# <NAME> 2017
# GMM implementation I made for a computer vision course during my honours degree at Wits
import numpy as np
from sklearn.mixture import GaussianMixture
from scipy.stats import multivariate_normal
# These are functions which can be run on GMMs
class fn():
def zero_init(data, K):
lambda_vect = np.full((K), 1.0/K)
# init randomly between (0,1]
# positive semi-def but already is
# sigma_vect = np.full((K), np.var(data)) # diagonal
sigma_list = []
mean_list = []
for k in range(K):
mean = (1.-0.)*np.random.random_sample((data.shape[1])) + 0.
mean_list.append(mean)
sig = (1.0-0.001)*np.random.random_sample((data.shape[1],data.shape[1])) + 0.001
sig = np.dot(sig, sig.T)
sig = np.diag(np.diag(sig))
sigma_list.append(sig)
sigma = np.array(sigma_list)
mean_vect = np.array(mean_list)
# print(mean_vect)
# print(lambda_vect)
return lambda_vect, mean_vect, sigma
def naive_bayes_classifier(data, GMM_fg, GMM_bg, prior, confidence=0.65):
# test_label[i] = np.argmax(p)#(p>confidence)
p1 = GMM_fg.probability(data)
p2 = GMM_bg.probability(data)
l1 = prior
l2 = 1 - prior
prob = np.divide(p1*l1, p1*l1 + p2*l2)
# true if GMM_fg is greater
if (prob > confidence):
return True;
return False;
def classifier(data, GMM_fg, GMM_bg):
# print("test")
p1 = GMM_fg.probability(data)
# print("test: ", p1)
p2 = GMM_bg.probability(data)
# print("test: ", p2)
# true if GMM_fg is greater
if (p1 > p2):
return True;
return False;
def error(test_vector, label_vector, GMM_fg, GMM_bg):
test_label = np.zeros(test_vector.shape[0])
sum = 0
for i in range(test_vector.shape[0]):
test_label[i] = fn.classifier(test_vector.values[i], GMM_fg, GMM_bg)
if test_label[i] != label_vector[i]:
sum = sum + 1
# return np.sum(np.absolute(test_label-label_vector))/(label_vector.shape[0]*label_vector.shape[1])
return sum/label_vector.shape[0]
def bayes_error(test_vector, label_vector, GMM_fg, GMM_bg, prior, confidence=0.65):
test_label = np.zeros(test_vector.shape[0])
sum = 0
for i in range(test_vector.shape[0]):
test_label[i] = fn.naive_bayes_classifier(test_vector.values[i], GMM_fg, GMM_bg, prior, confidence)
if test_label[i] != label_vector[i]:
sum = sum + 1
# return np.sum(np.absolute(test_label-label_vector))/(label_vector.shape[0]*label_vector.shape[1])
return sum/label_vector.shape[0]
class GMM():
def __init__(self, data, K):
self.data = data
# Dimensionality
# self.D = len(data[0])
# Data Size
self.I = data.shape[0]
# Num Gaussians
self.K = K
self.theta = fn.zero_init(self.data, self.K)
# Init Responsibilities [n x K]
self.r = np.zeros((self.I,self.K))
def expectation_step(self):
# print("Expectation Step")
I = self.I #vector length
K = self.K
l = np.zeros((I, K))
r = np.zeros((I, K))
lambda_vect = self.theta[0]
mean_vect = self.theta[1]
sigma_vect = self.theta[2]
# print("Lambdas ", lambda_vect)
# print("Mean ", mean_vect)
# print("Sigmas ", sigma_vect)
# Numerator of Bayes' rule
for k in range(K):
dist = multivariate_normal(mean=mean_vect[k], cov=sigma_vect[k])
sample = dist.pdf(self.data)
# print('sample: ', sample)
l[:,k] = lambda_vect[k]*sample
# Compute posterior by normalizing ...
l_k_sum = np.sum(l, axis=1)
# another hack to deal with singularities
if(l_k_sum.any() == 0.):
print("l_k_sum is 0")
# Compute posterior by normalizing ...
l_k_sum = np.sum(l, axis=1)
for i in range(I):
# r[:][k] = 1.00*l[:][k] / 1.00*l_i_sum
# print "numerator: ",l[:,k]
# print "lisum[k]: ", l_i_sum[k]
# print "r: ", l[:,k]/l_i_sum[k]
r[i,:] = l[i,:]/l_k_sum[i]
# print("r: ", r)
# print("r shape: ", r.shape)
# print("r_sum: ", np.sum(r,axis=0))
self.r = r
def maximization_step(self):
# print("Maximization Step")
I = self.I #vector length
K = self.K
lambda_vect = self.theta[0]
mean_vect = self.theta[1]
sigma_vect = self.theta[2]
sumri = np.sum(self.r, axis=0)
# print("sumri", self.r)
# print "sumri sum", sumri.sum()
lambda_vect = sumri/sumri.sum()
for k in range(K):
# optimize
# r_sum = np.sum(r, axis=0)
# r_k_sum = np.sum(r[:,k], axis=0)
mean_vect[k] = self.r[:,k].dot(self.data) / sumri[k]
for k in range(K):
mean_shift = np.zeros(self.data.shape)
mean_shift = np.subtract(self.data, mean_vect[k])
sig = np.dot(mean_shift.T, np.multiply(self.r[:,k][:,np.newaxis], mean_shift))
sigma_vect[k] = ((sig)) / (sumri[k])
sigma_vect[k] = np.diag(np.diag(sigma_vect[k]))
# print("Lambdas ", lambda_vect)
# print("Mean ", mean_vect)
# print("Sigmas ", sigma_vect)
self.theta = lambda_vect, mean_vect, sigma_vect
def probability(self, data):
lambda_vect = np.copy(self.theta[0])
mean_vect = np.copy(self.theta[1])
sigma_vect = np.copy(self.theta[2])
# p = np.zeros(K)
p = 0.0
for k in range(self.K):
sample = multivariate_normal.pdf(data, mean=mean_vect[k], cov=sigma_vect[k])
# print(lambda_vect)
p = p + (lambda_vect[k]*sample)
return p
def calc_log_likelihood(self):
I = self.I #vector length
lambda_vect = self.theta[0]
mean_vect = self.theta[1]
sigma_vect = self.theta[2]
tol = 5000 # todo?
loglikelihood = 0.0
# for i in xrange(I):
# inner = 0.0
# for k in xrange(K):
# dist = multivariate_normal(mean=mean_vect[k], cov=sigma_vect[k]).pdf(data[i])
# inner = inner + (lambda_vect[k] * dist)
# if inner != 0:
# loglikelihood = loglikelihood + np.log(inner)
inner_sum = 0.0
for k in range(self.K):
dist = multivariate_normal(mean=mean_vect[k], cov=sigma_vect[k])
samples = dist.pdf(self.data)
inner_sum = inner_sum + (lambda_vect[k]*samples)
loglikelihood = np.sum(np.log(inner_sum), axis=0)
print("loglikelihood: %f"%(loglikelihood))
return loglikelihood
def train(self, max_count = 255, epsilon = 0.001):
count = 0
prev_mean = | np.zeros((self.K, self.data.shape[1])) | numpy.zeros |
#!/usr/bin/env python
import os, sys
from tensorflow import keras
os.environ['KERAS_BACKEND'] = 'tensorflow'
from sklearn.metrics import auc
import matplotlib.pyplot as plt
from sklearn.metrics import roc_curve
from sklearn.utils import shuffle
from tensorflow.keras.models import Model, load_model
#from subtlenet.backend.keras_objects import *
#from subtlenet.backend.losses import *
from tensorflow.keras.layers import Dense, BatchNormalization, Input, Dropout, Activation, concatenate, GRU,LSTM, Add, Conv1D, Dot, Lambda
from tensorflow.keras import utils #import np_utils
from tensorflow.keras import optimizers
from tensorflow.keras.optimizers import Adam, Nadam, SGD
import tensorflow.keras.backend as K
import tensorflow as tf
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping
from tensorflow.python.framework import graph_util, graph_io
import numpy as np
#import pandas as pd
from collections import namedtuple
#from keras import regularizers
os.environ['KERAS_BACKEND'] = 'tensorflow'
import subtlenet.utils as subtlenetutils
#utils.set_processor('gpu')
np.set_printoptions(threshold=10000)
VALSPLIT = 0.1 #0.7
MULTICLASS = True
REGRESSION = False
np.random.seed(10)
basedir = '/home/jeffkrupa/13Aug21_v1_tol1p5_dropSig' #14Apr21_preUL_2017_v2/'
Nqcd = 1120000
Nsig = 1120000
Nparts = 100
NSVs = 5
params = 5
Wlo = 0.4
Whi = 3.
def newShape(X,name='parts'):
Ndims = Nparts if 'parts' in name else NSVs
Xnew = np.zeros((X.shape[0], Ndims, int(X.shape[1]/Ndims)),dtype=np.float16)
for i in range(0,X.shape[0]):
Xnew[i] = np.reshape(X[i],(int(X.shape[1]/Ndims),Ndims)).T
if 'parts' in name:
return Xnew[:,:60,:]
else:
return Xnew
def select(X,N):
np.random.shuffle(X)
return X[:N]
def _make_parent(path):
os.system('mkdir -p %s'%('/'.join(path.split('/')[:-1])))
class Sample(object):
def __init__(self, name, base, max_Y):
self.name = name
name = name
self.Yhat = {}
N = Nqcd if 'QCD' in name else Nsig
self.X = np.load('%s/%s_%s_%s.npy'%(base, name, 'x', args.inputtag)).astype(np.float16)[:N][:,:]
#self.SS = np.load('%s/%s_%s%s.npy'%(base, name, 'ss', args.inputtag))[:]
self.K = np.load('%s/%s_%s_%s.npy'%(base, name, 'w', args.inputtag))[:N]
#self.Y = np_utils.to_categorical((np.load('%s/%s_%s_%s.npy'%(base, name, 'y', args.inputtag))[:N] > 0).astype(np.int), 2)
#try:
# self.Y = self.Y[:,0]
#except:
# pass
'''
if args.SV:
self.SV_Y = np.load('%s/%s_%s_%s.npy'%(base,name,'Y_flavor_tags',args.inputtag))[:N]
self.SV_Y = np.sum(self.SV_Y,axis=1)
self.SV_Y[self.SV_Y<0] = 0
self.SV_Y[self.SV_Y>2] = 2
##print(self.Y[:10],self.SV_Y[:10])
#print('nprongs', self.Y.T.shape, self.Y.T)
#print('nbtags', self.SV_Y.T.shape,self.SV_Y.T)
#print(np.vstack((self.Y,self.SV_Y)).T)
#self.Y = np.vstack((self.Y,self.SV_Y)).T
'''
#self.Y = np.load('%s/%s_%s.npy'%(base, name, 'y'))[:]
#try:
#self.Y = np.matrix(self.Y[:,0]).T
#except:
# pass
#print(self.Y[:10,])
if 'QCD' in name:
ptbins = np.linspace(200,1200,50)
msdbins = np.linspace(40,400,50)
def getbin(a,b):
return (ptbins.searchsorted(a), msdbins.searchsorted(b))
getbinfunc = np.vectorize(getbin)
z = getbinfunc(self.K[:,0],self.K[:,1])
mapping = np.load('%s/weights_%s.npy'%(base,args.inputtag))
mapping = np.nan_to_num(mapping, nan=1,posinf=1,neginf=1)
#print(mapping)
def run_mapping(a,b):
return mapping[a,b]
return_mapping = np.vectorize(run_mapping)
self.W = return_mapping(z[0],z[1])
#self.W = np.nan_to_num(return_mapping(z[0],z[1]),nan=1, posinf=1,neginf=1)
self.W[self.W < Wlo] = Wlo
self.W[self.W > Whi] = Whi
#self.Y = np.load('%s/%s_%s_%s.npy'%(base,name, 'y', args.inputtag))[:N]
else:
self.W = np.ones(self.K.shape[0])
#Y = np.load('%s/%s_%s_%s.npy'%(base,name,'Y_flavor_tags',args.inputtag))[:N]
#self.Y = np.zeros(shape=(Y.shape[0],1))
#if args.blvqcd:
# self.Y[Y[:,0]==2] = 2
# self.Y[Y[:,1]==2] = 1
# self.Y[Y[:,2]==2] = 1
print('sample',self.name)
print('self.W', self.W[:100])
print('msd', self.K[:100,1])
print('pt', self.K[:100,0])
#self.Y = np.load('%s/%s_%s_%s.npy'%(base,name, 'y', args.inputtag))[:N]
#self.Y[self.Y==5]=1
#self.Y = utils.to_categorical(self.Y,num_classes=2)
#print(self.name,'self.W/self.Y', self.W, self.Y)
if 1: #args.SV:
self.SV_X = np.load('%s/%s_%s_%s.npy'%(base, name, 'SV_x',args.inputtag))[:N]
self.SV_Y = np.load('%s/%s_%s_%s.npy'%(base,name,'Y_flavor_tags',args.inputtag))[:N]
#self.SV_X = select(self.SV_X,N)
print(self.SV_Y,np.zeros(shape=(len(self.SV_Y),1)))
if 'QCD' in self.name:
self.SV_Y = np.hstack((self.SV_Y, np.ones(shape=(len(self.SV_Y),1))))
else:
self.SV_Y = np.hstack((self.SV_Y, np.zeros(shape=(len(self.SV_Y),1))))
self.SV_Y[self.SV_Y==2] = 1
print('self.SV_Y',self.SV_Y)
'''sel = (1==1)# (self.K[:,0] < 1000)
#self.Y = self.Y[sel]
self.K = self.K[sel]
self.W = self.W[sel]
self.SV_X = self.SV_X[sel]
self.SV_Y = self.SV_Y[sel]
self.X = self.X[sel]
'''
#if not args.SV: self.Y = np.transpose(self.Y)
#self.W = np.ones(self.Y.shape[0])
#self.X = select(self.X,N)
#self.SS = select(self.SS,N)
#self.K = select(self.K,N)
#self.Y = select(self.Y,N)
#self.W = select(self.W,N)
self.idx = np.random.permutation(len(self.W))#.shape[0])
#print(name, self.X, self.Y, self.W)
@property
def tidx(self):
if VALSPLIT == 1 or VALSPLIT == 0:
return self.idx
else:
return self.idx[int(VALSPLIT*len(self.idx)):]
@property
def vidx(self):
if VALSPLIT == 1 or VALSPLIT == 0:
return self.idx
else:
return self.idx[:int(VALSPLIT*len(self.idx))]
def infer(self, model):
if 'IN' in model.name:
self.X = newShape(self.X)#np.reshape(self.X, (self.X.shape[0], self.X.shape[1]/Nparts, Nparts))
if args.SV: self.SV_X = newShape(self.SV_X, 'SV')
print(self.X.shape, self.SV_X.shape)
if args.toCategorical:
idconv = {211.:1, 13.:2, 22.:3, 11.:4, 130.:5, 1.:6, 2.:7, 3.:8, 4.:9,
5.:10, -211.:1, -13.:2,
-11.:4, -1.:-6, -2.:7, -3.:8, -4.:9, -5.:10, 0.:0}
self.X[:,:,-1] = np.vectorize(idconv.__getitem__)(self.X[:,:,-1])
Xidlist = np.abs(self.X[:,:,-1]).astype(int)
self.X = np.concatenate([self.X[:,:,:-1],np.eye(11)[Xidlist]],axis=-1,dtype=np.float16)#
#print(self.X.shape[0],self.X.shape[1])
if 'Dense' in model.name: self.X = np.reshape(self.X, (self.X.shape[0],self.X.shape[1]))
if not args.SV: self.Yhat[model.name] = model.predict(self.X)
else:
#print(self.X, self.SV_X)
self.Yhat[model.name] = model.predict([self.X,self.SV_X])
def calc2DWeights(sig, bkg):
NBINS=50#-1
MMIN=40
MMAX=400
PTMIN=200
PTMAX=1200
sig2d_msd_pt, msdedges, ptedges, imsig = plt.hist2d(sig[:,1], sig[:,0], bins=[NBINS,NBINS], range=[[MMIN,MMAX],[PTMIN,PTMAX]])
bkg2d_msd_pt, _,_, imbkg = plt.hist2d(bkg[:,1], bkg[:,0], bins=[NBINS,NBINS], range=[[MMIN,MMAX],[PTMIN,PTMAX]])
weights2d = np.divide(sig2d_msd_pt,bkg2d_msd_pt)
print(msdedges,ptedges)
def getbin(a,b):
return (msdedges.searchsorted(a), ptedges.searchsorted(b))
getbinfunc = np.vectorize(getbin)
z = getbinfunc(bkg[:,1],bkg[:,0])
print(bkg[:10,1],bkg[:10,0])
print('z',z[:10])#[z<2])
print('# sig, # bkg=', len(bkg[:,1]))
print('# sig: ',len(sig), '# bkg: ',len(bkg))
#mapping = np.load('%s/weights_%s.npy'%(basedir,args.inputtag))
#mapping = np.nan_to_num(mapping, nan=1,posinf=1,neginf=1)
#print(mapping)
def run_mapping(a,b):
return weights2d[a-1,b-1]
return_mapping = np.vectorize(run_mapping)
bkgWeights = return_mapping(z[0],z[1])
return bkgWeights
#self.W = np.nan_to_num(return_mapping(z[0],z[1]),nan=1, posinf=1,neginf=1)
class ClassModel(object):
def __init__(self, n_inputs, h_hidden, n_targets, samples, model, modeldir="."):
self._hidden = 0
self.name = model
self.n_inputs = n_inputs
self.n_targets = n_targets if MULTICLASS else 2
self.n_hidden = n_hidden
print([s.X[:][s.tidx] for s in samples])
print([s.X[:][s.tidx].shape for s in samples])
self.tX = np.vstack([s.X[:][s.tidx] for s in samples])
self.vX = np.vstack([s.X[:][s.vidx] for s in samples])
for s in samples:
if 'Vector' in s.name:
self.tW = np.ones(shape=(s.X[s.tidx].shape[0]))
self.vW = np.ones(shape=(s.X[s.vidx].shape[0]))
print('Vector self.tW', self.tW)
if 'QCD' in s.name:
bkgWeights = calc2DWeights(samples[0].K, samples[1].K)
self.tW = np.concatenate([self.tW,bkgWeights[s.tidx]])
self.vW = np.concatenate([self.vW,bkgWeights[s.vidx]])
s.W = bkgWeights
print('QCD self.tW/s.W', self.tW[-10:],s.W[-10:])
self.tW = np.nan_to_num(self.tW, nan=1,posinf=1,neginf=1)
self.vW = np.nan_to_num(self.vW, nan=1,posinf=1,neginf=1)
self.tK = np.vstack([s.K[:][s.tidx] for s in samples])
self.vK = np.vstack([s.K[:][s.vidx] for s in samples])
print(self.tW[-10:], self.tK[-10:,0], self.tK[-10:,1])
#self.tY = np.vstack([s.Y[:][s.tidx] for s in samples])
#self.vY = np.vstack([s.Y[:][s.vidx] for s in samples])
#self.tY = utils.to_categorical(self.tY)
#self.vY = utils.to_categorical(self.vY)
self.tSV_Y = np.vstack([s.SV_Y[:][s.tidx] for s in samples])
self.vSV_Y = np.vstack([s.SV_Y[:][s.vidx] for s in samples])
#print('self.tY/self.tSV_Y', self.tY, self.tSV_Y)
self.tX = newShape(self.tX)
self.vX = newShape(self.vX)
if args.toCategorical:
idconv = {211.:1, 13.:2, 22.:3, 11.:4, 130.:5, 1.:6, 2.:7, 3.:8, 4.:9,
5.:10, -211.:1, -13.:2,
-11.:4, -1.:-6, -2.:7, -3.:8, -4.:9, -5.:10, 0.:0}
self.tX[:,:,-1] = np.vectorize(idconv.__getitem__)(self.tX[:,:,-1])
self.vX[:,:,-1] = np.vectorize(idconv.__getitem__)(self.vX[:,:,-1])
tXidlist = np.abs(self.tX[:,:,-1]).astype(int)
vXidlist = np.abs(self.vX[:,:,-1]).astype(int)
self.tX = np.concatenate([self.tX[:,:,:-1],np.eye(11)[tXidlist]],axis=-1,dtype=np.float16)#
self.vX = np.concatenate([self.vX[:,:,:-1],np.eye(11)[vXidlist]],axis=-1,dtype=np.float16)#
#print(self.tX[0])
if args.SV:
self.tSV_X = np.vstack([s.SV_X[:][s.tidx] for s in samples])
self.vSV_X = np.vstack([s.SV_X[:][s.vidx] for s in samples])
self.tSV_X = newShape(self.tSV_X,'SV')
self.vSV_X = newShape(self.vSV_X,'SV')
print('self.vX', self.vX)
if 'IN' in self.name:
particlesConsidered = self.tX.shape[1]
if args.SV:
svConsidered = self.tSV_X.shape[1]
#Defines the recieving matrix for particles
RR=[]
for i in range(particlesConsidered):
row=[]
for j in range(particlesConsidered*(particlesConsidered-1)):
if j in range(i*(particlesConsidered-1),(i+1)*(particlesConsidered-1)):
row.append(1.0)
else:
row.append(0.0)
RR.append(row)
RR=np.array(RR)
RR=np.float32(RR)
RRT=np.transpose(RR)
RST=[]
for i in range(particlesConsidered):
for j in range(particlesConsidered):
row=[]
for k in range(particlesConsidered):
if k == j:
row.append(1.0)
else:
row.append(0.0)
RST.append(row)
rowsToRemove=[]
for i in range(particlesConsidered):
rowsToRemove.append(i*(particlesConsidered+1))
RST=np.array(RST)
RST=np.float32(RST)
RST=np.delete(RST,rowsToRemove,0)
RS=np.transpose(RST)
#Defines the recieving matrix for the bipartite particle and secondary vertex graph
if args.SV:
RK=[]
for i in range(particlesConsidered):
row=[]
for j in range(particlesConsidered*svConsidered):
if j in range(i*svConsidered,(i+1)*svConsidered):
row.append(1.0)
else:
row.append(0.0)
RK.append(row)
RK=np.array(RK)
RK=np.float32(RK)
RKT=np.transpose(RK)
#Defines the sending matrix for the bipartite particle and secondary vertex graph
if args.SV:
RV=[]
for i in range(svConsidered):
row=[]
for j in range(particlesConsidered*svConsidered):
if j % svConsidered == i:
row.append(1.0)
else:
row.append(0.0)
RV.append(row)
RV=np.array(RV)
RV= | np.float32(RV) | numpy.float32 |
#!/usr/bin/env python3
#####################################################################
# This script is used to validate weather reading and postprocessing
# for tilted and oriented surfaces according to the BESTEST standard.
# It will create a folder in
# Resources/Data/BoundaryConditions/Validation/BESTEST/
# called results and inside there will be the .mat files and the
# .json files or just the .json files
#
# This script creates folders in the temporary directory.
# It copies the library from a local GitHub repository,
# executes simulations and prints results in current working directory
#
# <EMAIL>
#####################################################################
import json
import os
import shutil
import numpy as np
import copy
import sys
from pathlib import Path
from datetime import date
import stat
import git
# Check if it just implements post-process (from .mat files to Json files)
POST_PROCESS_ONLY = False
# Erase anything but the Json file results in the ResultJson folder and .mat
# files
DEL_EVR = False
# Path to local library copy (This assumes the script is run inside
# the library folder)
script_path = os.path.dirname(os.path.realpath(__file__))
path = Path(script_path)
levels_up = 5 # Goes up five levels to get the IBPSA folder
LIBPATH = str(path.parents[levels_up - 1])
# Simulator, Dymola
TOOL = 'dymola'
# Modelica Library working branch
# BRANCH = 'master'
try:
BRANCH = git.Repo(search_parent_directories=True).active_branch.name
except TypeError as e:
# Branch is detached from head. This is if one run "git checkout
# commit_hash"
BRANCH = None
# Software specifications
# Set library_name to IBPSA, or Buildings, etc.
library_name = LIBPATH.split(os.path.sep)[-1]
library_version = 'v4.0.0dev'
modeler_organization = 'LBNL'
modeler_organization_for_tables_and_charts = 'LBNL'
program_name_for_tables_and_charts = library_name
results_submission_date = str(date.today().strftime('%m/%d/%Y'))
# Make sure script is run from correct directory
if os.path.abspath(".").split(os.path.sep)[-1] != library_name:
raise ValueError(f"Script must be run from directory \
{library_name}")
# List of cases and result cases
PACKAGES = f'{library_name}.BoundaryConditions.Validation.BESTEST'
CASES = ['WD100', 'WD200', 'WD300', 'WD400', 'WD500', 'WD600']
result_vars = [
'azi000til00.H',
'azi000til00.HPer',
'azi000til00.HDir.H',
'azi000til00.HDiffIso.H',
'azi000til00.HDiffPer.H',
'azi000til90.H',
'azi000til90.HPer',
'azi000til90.HDir.H',
'azi000til90.HDiffIso.H',
'azi000til90.HDiffPer.H',
'azi270til90.H',
'azi270til90.HPer',
'azi270til90.HDir.H',
'azi270til90.HDiffIso.H',
'azi270til90.HDiffPer.H',
'azi180til90.H',
'azi180til90.HPer',
'azi180til90.HDir.H',
'azi180til90.HDiffIso.H',
'azi180til90.HDiffPer.H',
'azi090til90.H',
'azi090til90.HPer',
'azi090til90.HDir.H',
'azi090til90.HDiffIso.H',
'azi090til90.HDiffPer.H',
'azi315til90.H',
'azi315til90.HPer',
'azi315til90.HDir.H',
'azi315til90.HDiffIso.H',
'azi315til90.HDiffPer.H',
'azi045til90.H',
'azi045til90.HDir.H',
'azi045til90.HDiffIso.H',
'azi045til90.HDiffPer.H',
'azi270til30.H',
'azi270til30.HDir.H',
'azi270til30.HDiffIso.H',
'azi270til30.HDiffPer.H',
'azi000til30.H',
'azi000til30.HDir.H',
'azi000til30.HDiffIso.H',
'azi000til30.HDiffPer.H',
'azi000til30.HPer',
'azi090til30.H',
'azi090til30.HPer',
'azi090til30.HDir.H',
'azi090til30.HDiffPer.H',
'toDryAir.XiDry',
'weaBusHHorIR.pAtm',
'weaBusHHorIR.TDryBul',
'weaBusHHorIR.relHum',
'weaBusHHorIR.TBlaSky',
'weaBusHHorIR.TDewPoi',
'weaBusHHorIR.TWetBul',
'weaBusHHorIR.nOpa',
'weaBusHHorIR.nTot',
'weaBusHHorIR.winDir',
'weaBusHHorIR.winSpe',
'weaBusTDryBulTDewPoiOpa.TBlaSky',
'azi270til30.HPer',
'azi045til90.HPer',
'azi090til30.HDiffIso.H']
def create_working_directory():
''' Create working directory in temp folder
'''
import tempfile
import getpass
wor_dir = tempfile.mkdtemp(prefix='tmp_Weather_Bestest' +
getpass.getuser())
if CODE_VERBOSE:
print("Created directory {}".format(wor_dir))
return wor_dir
def checkout_repository(working_directory, case_dict):
''' The function will download the repository from GitHub or a copy from a
local library to the temporary working directory
:param working_directory: Current working directory
:param case_dict : from_git_hub get the library repository from local copy
or git_hub, BRANCH, to specify branch from git_hub, LIBPATH, to specify
the local library path
'''
import os
from git import Repo
import git
import time
d = {}
d['lib_name'] = case_dict['lib_name']
if case_dict['from_git_hub']:
git_url = git.Repo(search_parent_directories=True).remotes.origin.url
r = Repo.clone_from(git_url, working_directory)
g = git.Git(working_directory)
g.checkout(BRANCH)
if case_dict['CODE_VERBOSE']:
print("Checking out repository IBPSA repository branch \
{}".format(BRANCH))
# Print commit
d['branch'] = case_dict['BRANCH']
d['commit'] = str(r.active_branch.commit)
headcommit = r.head.commit
time.asctime(time.gmtime(headcommit.committed_date))
d['commit_time'] = time.strftime("%m/%d/%Y", time.gmtime
(headcommit.committed_date))
else:
# This uses the local copy of the repository
des = os.path.join(working_directory, d['lib_name'])
shutil.copytree(case_dict['LIBPATH'], des)
if case_dict['CODE_VERBOSE']:
print("Since a local copy of the library is used, remember to manually add software version and commit.")
d['branch'] = 'AddManually'
d['commit'] = 'AddManually'
d['commit_time'] = 'AddManually'
return d
def get_result_directory():
return os.path.join(os.path.dirname(os.path.realpath(__file__)), "results")
def get_cases(case_dict):
''' Return the simulation cases that are used for the case study.
The cases are stored in this function as they are used
for the simulation and for the post processing.
:param case_dict : In the dictionary are reported the options for
the Dymola simulations
'''
cases = list()
for case in case_dict["CASES"]:
wor_dir = create_working_directory()
cases.append(
{'model': case_dict["PACKAGES"] + '.' + case,
"name": case,
'wor_dir': wor_dir,
"tex_label": "p",
"start_time": case_dict["start_time"],
"stop_time": case_dict["StopTime"],
"solver": case_dict["Solver"],
"set_tolerance": case_dict["set_tolerance"],
"show_GUI": case_dict["show_GUI"],
"n_intervals": case_dict["n_intervals"],
"DEL_EVR": case_dict["DEL_EVR"],
"CODE_VERBOSE": case_dict["CODE_VERBOSE"]})
return cases
def _simulate(spec):
'''
This function execute the simulation of a specific Case model and stores
the result in the simulation directory, then copies the result to the
current working directory.
:param spec: dictionary with the simulation specifications
'''
import glob
from buildingspy.simulate.Dymola import Simulator
# Write git information if the simulation is based on a github checkout
if 'git' in spec:
with open("version.txt", "w+") as text_file:
text_file.write("branch={}\n".format(spec['git']['branch']))
text_file.write("commit={}\n".format(spec['git']['commit']))
# Change to working directory
cur_dir = path.cwd()
wor_dir = spec['wor_dir']
os.chdir(wor_dir)
# Set MODELICAPATH
#os.environ['MODELICAPATH'] = LIBPATH
# Set Model to simulate, the output dir and the package directory
s = Simulator(spec["model"])
# Add all necessary parameters from Case Dict
s.addPreProcessingStatement("OutputCPUtime:= true;")
s.addPreProcessingStatement("// For Dymola 2022 (or higher) unload MSL so that MSL from uses statement is loaded")
s.addPreProcessingStatement("if DymolaVersionNumber() <> 2021.0 then eraseClasses({\"Modelica\"}); end if;")
s.setSolver(spec["solver"])
if 'parameters' in spec:
s.addParameters(spec['parameters'])
s.setStartTime(spec["start_time"])
s.setStopTime(spec["stop_time"])
s.setNumberOfIntervals(spec["n_intervals"])
s.setTolerance(spec["set_tolerance"])
s.showGUI(spec["show_GUI"])
if spec['CODE_VERBOSE']:
print("Starting simulation in {}".format(path.cwd()))
s.simulate()
# Change back to current directory
os.chdir(cur_dir)
# Copy results back
res_des = os.path.join(get_result_directory(), spec["name"])
def _copy_results(wor_dir, des_dir):
os.mkdir(des_dir)
if spec['CODE_VERBOSE']:
print(f"Running glob for .mat in '{wor_dir}'")
files = glob.glob(os.path.join(wor_dir, '*.mat'))
files.extend(glob.glob(os.path.join(wor_dir, '*.log')))
for file in files:
if spec['CODE_VERBOSE']:
print(f"Copying result file '{file}'' to '{res_des}'")
shutil.copy(file, res_des)
# Removing old results directory
if os.path.isdir(res_des):
shutil.rmtree(res_des)
_copy_results(wor_dir, res_des)
def _organize_cases(mat_dir,case_dict):
''' Create a list of dictionaries. Each a dictionary include the case name
and the mat file path.
:param mat_dir: path to .mat_files directory
:param case_dict : In the dictionary are reported the general options for
simulation and other parameters
'''
mat_files = list()
if case_dict['CODE_VERBOSE']:
print(f"Searching for .mat files in {mat_dir}.")
for r, _, f in os.walk(mat_dir):
for file in f:
if '.mat' in file:
mat_files.append(os.path.join(r, file))
if case_dict['CODE_VERBOSE']:
print(f"Appending {os.path.join(r, file)} to mat_files.")
case_list = list()
if len(CASES) == len(mat_files):
for case in CASES:
temp = {'case': case}
for mat_file in mat_files:
# mat_filen = os.path.basename(mat_file)
tester = case + '.mat'
if tester in mat_file:
temp['mat_file'] = os.path.join(mat_dir, mat_file)
case_list.append(temp)
else:
raise ValueError(
f"*** No result file was found. Check the simulations. len(CASES) = {len(CASES)}, len(mat_files) = {len(mat_files)}")
return case_list
def _extract_data(mat_file, re_val):
"""
Extract time series data from mat file.
:param mat_file: Path of .mat output file
:param re_val: List of variables that the data should be extracted
"""
from buildingspy.io.outputfile import Reader
nPoi = case_dict["n_intervals"]
try:
if case_dict['CODE_VERBOSE']:
print(f"**** Extracting {mat_file}")
r = Reader(mat_file, TOOL)
except IOError:
raise ValueError("Failed to read {}.".format(mat_file))
result = list()
for var in re_val:
time = []
val = []
try:
var_mat = var
(time, val) = r.values(var_mat)
timen, valn = clean_time_series(time, val, nPoi)
except KeyError:
raise ValueError("Result {} does not have variable {}."
.format(mat_file, var))
# Convert variable to compact format to save disk space.
temp = {'variable': var,
'time': timen,
'value': valn}
result.append(temp)
return result
def clean_time_series(time, val, nPoi):
"""
Clean doubled time values and checks with wanted number of nPoi
:param time: Time.
:param val: Variable values.
:param nPoi: Number of result points.
"""
import numpy as np
# Create shift array
Shift = np.array([0.0], dtype='f')
# Shift time to right and left and subtract
time_sr = np.concatenate((Shift, time))
time_sl = np.concatenate((time, Shift))
time_d = time_sl - time_sr
time_dn = time_d[0:-1]
# Get new values for time and val
tol = 1E-5
timen = time[time_dn > tol]
valn = val[time_dn > tol]
if len(timen) != nPoi:
raise ValueError(
"Error: In clean_time_series, length and number of results \
points do not match.")
return timen, valn
def weather_json(res_form, Matfd, case_dict):
"""
This function take the results and writes them in the required json BESTEST
format
:param res_form: json file format.
:param Matfd: List of the results mat files and their path.
:param case_dict: case_dict are stored the simulation cases "result_vars"
"""
# List of type of results
# Taking hourly variables
res_fin = copy.deepcopy(res_form)
for dic in Matfd:
mat_file = dic["mat_file"]
results = _extract_data(mat_file, case_dict["result_vars"])
k = 0
for result in results:
resSplit = result['variable'].split('.')
if resSplit[-1] in 'TDryBul_TBlaSky_TWetBul_TDewPoi':
# Pass from K to °C
results[k]['value'] = results[k]['value'] - 273.15
elif 'relHum' in resSplit[-1]:
# Pass from [0,1] to %
results[k]['value'] = results[k]['value'] * 100
elif 'pAtm' in resSplit[-1]:
# Pass from Pa to mbar
results[k]['value'] = results[k]['value'] / 100
elif 'winDir' in resSplit[-1]:
# Pass from rad to °
Pi = 3.141592653589793
results[k]['value'] = results[k]['value'] * 180 / Pi
elif ('nOpa' in resSplit[-1]) or ('nTot' in resSplit[-1]):
# Sky coverage from [0-1] to tenth of sky
results[k]['value'] = results[k]['value'] * 10
k += 1
map_dymola_and_json(results, dic['case'], res_fin, case_dict)
return res_fin
def map_dymola_and_json(results, case, res_fin, case_dict):
"""
This function couples the .mat file variable with the final .json variable
:param results: Result obtained from the _extract_data function
:param case: Dictionary that specifies the BESTEST case
:param res_fin: Dictionary with the same format as the desired json file
:param case_dict: in case_dict is stored TestN (which .json file format\
should be used)"
"""
dict_hourly = [{'json': 'dry_bulb_temperature',
'mat': 'weaBusHHorIR.TDryBul'},
{'json': 'relative_humidity',
'mat': 'weaBusHHorIR.relHum'},
{'json': 'humidity_ratio',
'mat': 'toDryAir.XiDry'},
{'json': 'dewpoint_temperature',
'mat': 'weaBusHHorIR.TDewPoi'},
{'json': 'wet_bulb_temperature',
'mat': 'weaBusHHorIR.TWetBul'},
{'json': 'wind_speed',
'mat': 'weaBusHHorIR.winSpe'},
{'json': 'wind_direction',
'mat': 'weaBusHHorIR.winDir'},
{'json': 'station_pressure',
'mat': 'weaBusHHorIR.pAtm'},
{'json': 'total_cloud_cover',
'mat': 'weaBusHHorIR.nTot'},
{'json': 'opaque_cloud_cover',
'mat': 'weaBusHHorIR.nOpa'},
{'json': 'sky_temperature',
'matHor': 'weaBusHHorIR.TBlaSky',
'matDew': 'weaBusTDryBulTDewPoiOpa.TBlaSky'},
{'json': 'total_horizontal_radiation',
'matIso': 'azi000til00.H',
'matPer': 'azi000til00.HPer'},
{'json': 'beam_horizontal_radiation',
'mat': 'azi000til00.HDir.H'},
{'json': 'diffuse_horizontal_radiation',
'matIso': 'azi000til00.HDiffIso.H',
'matPer': 'azi000til00.HDiffPer.H'},
{'json': 'total_radiation_s_90',
'matIso': 'azi000til90.H',
'matPer': 'azi000til90.HPer'},
{'json': 'beam_radiation_s_90',
'mat': 'azi000til90.HDir.H'},
{'json': 'diffuse_radiation_s_90',
'matIso': 'azi000til90.HDiffIso.H',
'matPer': 'azi000til90.HDiffPer.H'},
{'json': 'total_radiation_e_90',
'matIso': 'azi270til90.H',
'matPer': 'azi270til90.HPer'},
{'json': 'beam_radiation_e_90',
'mat': 'azi270til90.HDir.H'},
{'json': 'diffuse_radiation_e_90',
'matIso': 'azi270til90.HDiffIso.H',
'matPer': 'azi270til90.HDiffPer.H'},
{'json': 'total_radiation_n_90',
'matIso': 'azi180til90.H',
'matPer': 'azi180til90.HPer'},
{'json': 'beam_radiation_n_90',
'mat': 'azi180til90.HDir.H'},
{'json': 'diffuse_radiation_n_90',
'matIso': 'azi180til90.HDiffIso.H',
'matPer': 'azi180til90.HDiffPer.H'},
{'json': 'total_radiation_w_90',
'matIso': 'azi090til90.H',
'matPer': 'azi090til90.HPer'},
{'json': 'beam_radiation_w_90',
'mat': 'azi090til90.HDir.H'},
{'json': 'diffuse_radiation_w_90',
'matIso': 'azi090til90.HDiffIso.H',
'matPer': 'azi090til90.HDiffPer.H'},
{'json': 'total_radiation_45_e_90',
'matIso': 'azi315til90.H',
'matPer': 'azi315til90.HPer'},
{'json': 'beam_radiation_45_e_90',
'mat': 'azi315til90.HDir.H'},
{'json': 'diffuse_radiation_45_e_90',
'matIso': 'azi315til90.HDiffIso.H',
'matPer': 'azi315til90.HDiffPer.H'},
{'json': 'total_radiation_45_w_90',
'matIso': 'azi045til90.H',
'matPer': 'azi045til90.HPer'},
{'json': 'beam_radiation_45_w_90',
'mat': 'azi045til90.HDir.H'},
{'json': 'diffuse_radiation_45_w_90',
'matIso': 'azi045til90.HDiffIso.H',
'matPer': 'azi045til90.HDiffPer.H'},
{'json': 'total_radiation_e_30',
'matIso': 'azi270til30.H',
'matPer': 'azi270til30.HPer'},
{'json': 'beam_radiation_e_30',
'mat': 'azi270til30.HDir.H'},
{'json': 'diffuse_radiation_e_30',
'matIso': 'azi270til30.HDiffIso.H',
'matPer': 'azi270til30.HDiffPer.H'},
{'json': 'total_radiation_s_30',
'matIso': 'azi000til30.H',
'matPer': 'azi000til30.HPer'},
{'json': 'beam_radiation_s_30',
'mat': 'azi000til30.HDir.H'},
{'json': 'diffuse_radiation_s_30',
'matIso': 'azi000til30.HDiffIso.H',
'matPer': 'azi000til30.HDiffPer.H'},
{'json': 'total_radiation_w_30',
'matIso': 'azi090til30.H',
'matPer': 'azi090til30.HPer'},
{'json': 'beam_radiation_w_30',
'mat': 'azi090til30.HDir.H'},
{'json': 'diffuse_radiation_w_30',
'matIso': 'azi090til30.HDiffIso.H',
'matPer': 'azi090til30.HDiffPer.H'}]
dict_sub_hourly = [{'json': 'dry_bulb_temperature',
'mat': 'weaBusHHorIR.TDryBul'},
{'json': 'relative_humidity',
'mat': 'weaBusHHorIR.relHum'},
{'json': 'total_horizontal_radiation',
'matIso': 'azi000til00.H',
'matPer': 'azi000til00.HPer'},
{'json': 'beam_horizontal_radiation',
'mat': 'azi000til00.HDir.H'},
{'json': 'diffuse_horizontal_radiation',
'matIso': 'azi000til00.HDiffIso.H',
'matPer': 'azi000til00.HDiffPer.H'},
{'json': 'total_radiation_s_90',
'matIso': 'azi000til90.H',
'matPer': 'azi000til90.HPer'},
{'json': 'beam_radiation_s_90',
'mat': 'azi000til90.HDir.H'},
{'json': 'diffuse_radiation_s_90',
'matIso': 'azi000til90.HDiffIso.H',
'matPer': 'azi000til90.HDiffPer.H'},
{'json': 'total_radiation_e_90',
'matIso': 'azi270til90.H',
'matPer': 'azi270til90.HPer'},
{'json': 'beam_radiation_e_90',
'mat': 'azi270til90.HDir.H'},
{'json': 'diffuse_radiation_e_90',
'matIso': 'azi270til90.HDiffIso.H',
'matPer': 'azi270til90.HDiffPer.H'},
{'json': 'total_radiation_n_90',
'matIso': 'azi180til90.H',
'matPer': 'azi180til90.HPer'},
{'json': 'beam_radiation_n_90',
'mat': 'azi180til90.HDir.H'},
{'json': 'diffuse_radiation_n_90',
'matIso': 'azi180til90.HDiffIso.H',
'matPer': 'azi180til90.HDiffPer.H'},
{'json': 'total_radiation_w_90',
'matIso': 'azi090til90.H',
'matPer': 'azi090til90.HPer'},
{'json': 'beam_radiation_w_90',
'mat': 'azi090til90.HDir.H'},
{'json': 'diffuse_radiation_w_90',
'matIso': 'azi090til90.HDiffIso.H',
'matPer': 'azi090til90.HDiffPer.H'},
{'json': 'total_radiation_45_e_90',
'matIso': 'azi315til90.H',
'matPer': 'azi315til90.HPer'},
{'json': 'beam_radiation_45_e_90',
'mat': 'azi315til90.HDir.H'},
{'json': 'diffuse_radiation_45_e_90',
'matIso': 'azi315til90.HDiffIso.H',
'matPer': 'azi315til90.HDiffPer.H'},
{'json': 'total_radiation_45_w_90',
'matIso': 'azi045til90.H',
'matPer': 'azi045til90.HPer'},
{'json': 'beam_radiation_45_w_90',
'mat': 'azi045til90.HDir.H'},
{'json': 'diffuse_radiation_45_w_90',
'matIso': 'azi045til90.HDiffIso.H',
'matPer': 'azi045til90.HDiffPer.H'},
{'json': 'total_radiation_e_30',
'matIso': 'azi270til30.H',
'matPer': 'azi270til30.HPer'},
{'json': 'beam_radiation_e_30',
'mat': 'azi270til30.HDir.H'},
{'json': 'diffuse_radiation_e_30',
'matIso': 'azi270til30.HDiffIso.H',
'matPer': 'azi270til30.HDiffPer.H'},
{'json': 'total_radiation_s_30',
'matIso': 'azi000til30.H',
'matPer': 'azi000til30.HPer'},
{'json': 'beam_radiation_s_30',
'mat': 'azi000til30.HDir.H'},
{'json': 'diffuse_radiation_s_30',
'matIso': 'azi000til30.HDiffIso.H',
'matPer': 'azi000til30.HDiffPer.H'},
{'json': 'total_radiation_w_30',
'matIso': 'azi090til30.H',
'matPer': 'azi090til30.HPer'},
{'json': 'beam_radiation_w_30',
'mat': 'azi090til30.HDir.H'},
{'json': 'diffuse_radiation_w_30',
'matIso': 'azi090til30.HDiffIso.H',
'matPer': 'azi090til30.HDiffPer.H'},
{'json': 'integrated_total_horizontal_radiation',
'matIso': 'azi000til00.H',
'matPer': 'azi000til00.HPer'},
{'json': 'integrated_beam_horizontal_radiation',
'mat': 'azi000til00.HDir.H'},
{'json': 'integrated_diffuse_horizontal_radiation',
'matIso': 'azi000til00.HDiffIso.H',
'matPer': 'azi000til00.HDiffPer.H'}]
dict_yearly = [{'json': 'average_dry_bulb_temperature',
'mat': 'weaBusHHorIR.TDryBul'},
{'json': 'average_relative_humidity',
'mat': 'weaBusHHorIR.relHum'},
{'json': 'average_humidity_ratio',
'mat': 'toDryAir.XiDry'},
{'json': 'average_wet_bulb_temperature',
'mat': 'weaBusHHorIR.TWetBul'},
{'json': 'average_dew_point_temperature',
'mat': 'weaBusHHorIR.TDewPoi'},
{'json': 'total_horizontal_solar_radiation',
'matIso': 'azi000til00.H',
'matPer': 'azi000til00.HPer'},
{'json': 'total_horizontal_beam_solar_radiation',
'mat': 'azi000til00.HDir.H'},
{'json': 'total_horizontal_diffuse_solar_radiation',
'matIso': 'azi000til00.HDiffIso.H',
'matPer': 'azi000til00.HDiffPer.H'},
{'json': 'total_radiation_s_90',
'matIso': 'azi000til90.H',
'matPer': 'azi000til90.HPer'},
{'json': 'total_beam_radiation_s_90',
'mat': 'azi000til90.HDir.H'},
{'json': 'total_diffuse_radiation_s_90',
'matIso': 'azi000til90.HDiffIso.H',
'matPer': 'azi000til90.HDiffPer.H'},
{'json': 'total_radiation_e_90',
'matIso': 'azi270til90.H',
'matPer': 'azi270til90.HPer'},
{'json': 'total_beam_radiation_e_90',
'mat': 'azi270til90.HDir.H'},
{'json': 'total_diffuse_radiation_e_90',
'matIso': 'azi270til90.HDiffIso.H',
'matPer': 'azi270til90.HDiffPer.H'},
{'json': 'total_radiation_n_90',
'matIso': 'azi180til90.H',
'matPer': 'azi180til90.HPer'},
{'json': 'total_beam_radiation_n_90',
'mat': 'azi180til90.HDir.H'},
{'json': 'total_diffuse_radiation_n_90',
'matIso': 'azi180til90.HDiffIso.H',
'matPer': 'azi180til90.HDiffPer.H'},
{'json': 'total_radiation_w_90',
'matIso': 'azi090til90.H',
'matPer': 'azi090til90.HPer'},
{'json': 'total_beam_radiation_w_90',
'mat': 'azi090til90.HDir.H'},
{'json': 'total_diffuse_radiation_w_90',
'matIso': 'azi090til90.HDiffIso.H',
'matPer': 'azi090til90.HDiffPer.H'},
{'json': 'total_radiation_45_e_90',
'matIso': 'azi315til90.H',
'matPer': 'azi315til90.HPer'},
{'json': 'total_beam_radiation_45_e_90',
'mat': 'azi315til90.HDir.H'},
{'json': 'total_diffuse_radiation_45_e_90',
'matIso': 'azi315til90.HDiffIso.H',
'matPer': 'azi315til90.HDiffPer.H'},
{'json': 'total_radiation_45_w_90',
'matIso': 'azi045til90.H',
'matPer': 'azi045til90.HPer'},
{'json': 'total_beam_radiation_45_w_90',
'mat': 'azi045til90.HDir.H'},
{'json': 'total_diffuse_radiation_45_w_90',
'matIso': 'azi045til90.HDiffIso.H',
'matPer': 'azi045til90.HDiffPer.H'},
{'json': 'total_radiation_e_30',
'matIso': 'azi270til30.H',
'matPer': 'azi270til30.HPer'},
{'json': 'total_beam_radiation_e_30',
'mat': 'azi270til30.HDir.H'},
{'json': 'total_diffuse_radiation_e_30',
'matIso': 'azi270til30.HDiffIso.H',
'matPer': 'azi270til30.HDiffPer.H'},
{'json': 'total_radiation_s_30',
'matIso': 'azi000til30.H',
'matPer': 'azi000til30.HPer'},
{'json': 'total_beam_radiation_s_30',
'mat': 'azi000til30.HDir.H'},
{'json': 'total_diffuse_radiation_s_30',
'matIso': 'azi000til30.HDiffIso.H',
'matPer': 'azi000til30.HDiffPer.H'},
{'json': 'total_radiation_w_30',
'matIso': 'azi090til30.H',
'matPer': 'azi090til30.HPer'},
{'json': 'total_beam_radiation_w_30',
'mat': 'azi090til30.HDir.H'},
{'json': 'total_diffuse_radiation_w_30',
'matIso': 'azi090til30.HDiffIso.H',
'matPer': 'azi090til30.HDiffPer.H'}]
Days = {'WD100': {'days': ['yearly', 'may4', 'jul14', 'sep6'],
'tstart': [0, 10627200, 16761600, 21427200],
'tstop': [0, 10713600, 16848000, 21513600]},
'WD200': {'days': ['yearly', 'may24', 'aug26'],
'tstart': [0, 12355200, 20476800, 0],
'tstop': [0, 12441600, 20563200, 31536000]},
'WD300': {'days': ['yearly', 'feb7', 'aug13'],
'tstart': [0, 3196800, 19353600],
'tstop': [0, 3283200, 19440000]},
'WD400': {'days': ['yearly', 'jan24', 'jul1'],
'tstart': [0, 1987200, 15638400],
'tstop': [0, 2073600, 15724800]},
'WD500': {'days': ['yearly', 'mar1', 'sep14'],
'tstart': [0, 5097600, 22118400],
'tstop': [0, 5184000, 22204800]},
'WD600': {'days': ['yearly', 'may4', 'jul14', 'sep6'],
'tstart': [0, 10627200, 16761600, 21427200],
'tstop': [0, 10713600, 16848000, 21513600]}}
Days2 = {'WD100': {'days': ['test2'],
'tstart': [0],
'tstop': [31536000+3600]},
'WD200': {'days': ['test2'],
'tstart': [0],
'tstop': [31536000+3600]},
'WD300': {'days': ['test2'],
'tstart': [0],
'tstop': [31536000+3600]},
'WD400': {'days': ['test2'],
'tstart': [0],
'tstop': [31536000+3600]},
'WD500': {'days': ['test2'],
'tstart': [0],
'tstop': [31536000+3600]},
'WD600': {'days': ['test2'],
'tstart': [0],
'tstop': [31536000+3600]}}
dictTest2 = [{'json': 'dry_bulb_temperature',
'mat': 'weaBusHHorIR.TDryBul'},
{'json': 'relative_humidity',
'mat': 'weaBusHHorIR.relHum'},
{'json': 'humidity_ratio',
'mat': 'toDryAir.XiDry'},
{'json': 'dewpoint_temperature',
'mat': 'weaBusHHorIR.TDewPoi'},
{'json': 'wet_bulb_temperature',
'mat': 'weaBusHHorIR.TWetBul'},
{'json': 'wind_speed',
'mat': 'weaBusHHorIR.winSpe'},
{'json': 'wind_direction',
'mat': 'weaBusHHorIR.winDir'},
{'json': 'station_pressure',
'mat': 'weaBusHHorIR.pAtm'},
{'json': 'total_cloud_cover',
'mat': 'weaBusHHorIR.nTot'},
{'json': 'opaque_cloud_cover',
'mat': 'weaBusHHorIR.nOpa'},
{'json': 'sky_temperature',
'matHor': 'weaBusHHorIR.TBlaSky',
'matDew': 'weaBusTDryBulTDewPoiOpa.TBlaSky'},
{'json': 'total_horizontal_radiation',
'matIso': 'azi000til00.H',
'matPer': 'azi000til00.HPer'},
{'json': 'beam_horizontal_radiation',
'mat': 'azi000til00.HDir.H'},
{'json': 'diffuse_horizontal_radiation',
'matIso': 'azi000til00.HDiffIso.H',
'matPer': 'azi000til00.HDiffPer.H'},
{'json': 'total_radiation_s_90',
'matIso': 'azi000til90.H',
'matPer': 'azi000til90.HPer'},
{'json': 'beam_radiation_s_90',
'mat': 'azi000til90.HDir.H'},
{'json': 'diffuse_radiation_s_90',
'matIso': 'azi000til90.HDiffIso.H',
'matPer': 'azi000til90.HDiffPer.H'},
{'json': 'total_radiation_e_90',
'matIso': 'azi270til90.H',
'matPer': 'azi270til90.HPer'},
{'json': 'beam_radiation_e_90',
'mat': 'azi270til90.HDir.H'},
{'json': 'diffuse_radiation_e_90',
'matIso': 'azi270til90.HDiffIso.H',
'matPer': 'azi270til90.HDiffPer.H'},
{'json': 'total_radiation_n_90',
'matIso': 'azi180til90.H',
'matPer': 'azi180til90.HPer'},
{'json': 'beam_radiation_n_90',
'mat': 'azi180til90.HDir.H'},
{'json': 'diffuse_radiation_n_90',
'matIso': 'azi180til90.HDiffIso.H',
'matPer': 'azi180til90.HDiffPer.H'},
{'json': 'total_radiation_w_90',
'matIso': 'azi090til90.H',
'matPer': 'azi090til90.HPer'},
{'json': 'beam_radiation_w_90',
'mat': 'azi090til90.HDir.H'},
{'json': 'diffuse_radiation_w_90',
'matIso': 'azi090til90.HDiffIso.H',
'matPer': 'azi090til90.HDiffPer.H'},
{'json': 'total_radiation_45_e_90',
'matIso': 'azi315til90.H',
'matPer': 'azi315til90.HPer'},
{'json': 'beam_radiation_45_e_90',
'mat': 'azi315til90.HDir.H'},
{'json': 'diffuse_radiation_45_e_90',
'matIso': 'azi315til90.HDiffIso.H',
'matPer': 'azi315til90.HDiffPer.H'},
{'json': 'total_radiation_45_w_90',
'matIso': 'azi045til90.H',
'matPer': 'azi045til90.HPer'},
{'json': 'beam_radiation_45_w_90',
'mat': 'azi045til90.HDir.H'},
{'json': 'diffuse_radiation_45_w_90',
'matIso': 'azi045til90.HDiffIso.H',
'matPer': 'azi045til90.HDiffPer.H'},
{'json': 'total_radiation_e_30',
'matIso': 'azi270til30.H',
'matPer': 'azi270til30.HPer'},
{'json': 'beam_radiation_e_30',
'mat': 'azi270til30.HDir.H'},
{'json': 'diffuse_radiation_e_30',
'matIso': 'azi270til30.HDiffIso.H',
'matPer': 'azi270til30.HDiffPer.H'},
{'json': 'total_radiation_s_30',
'matIso': 'azi000til30.H',
'matPer': 'azi000til30.HPer'},
{'json': 'beam_radiation_s_30',
'mat': 'azi000til30.HDir.H'},
{'json': 'diffuse_radiation_s_30',
'matIso': 'azi000til30.HDiffIso.H',
'matPer': 'azi000til30.HDiffPer.H'},
{'json': 'total_radiation_w_30',
'matIso': 'azi090til30.H',
'matPer': 'azi090til30.HPer'},
{'json': 'beam_radiation_w_30',
'mat': 'azi090til30.HDir.H'},
{'json': 'diffuse_radiation_w_30',
'matIso': 'azi090til30.HDiffIso.H',
'matPer': 'azi090til30.HDiffPer.H'}]
if case_dict['TestN']:
caseDays = [{key: value[i] for key, value in Days2[case].items()}
for i in range(len(Days2[case]['days']))]
else:
caseDays = [{key: value[i] for key, value in Days[case].items()}
for i in range(len(Days[case]['days']))]
out_dir = res_fin
missing = list()
for dR in results:
for day in caseDays:
if day['days'] in 'yearly':
res = extrapolate_results(dict_yearly, dR, day)
if not res:
missing.append(day['days'] + '_' + dR['variable'])
else:
# float(res['res'])
out_dir[case]['annual_results'][res['json']] =\
float(res['res'])
elif day['days'] in 'test2':
ressH = extrapolate_results(dictTest2, dR, day)
if 'dry_bulb_temperature' in ressH['json']:
out_dir[case]['hour_of_year'] = (ressH['time']/
3600).tolist()
out_dir[case][ressH['json']] = ressH['res'].tolist()
else:
resH = extrapolate_results(dict_hourly, dR, day)
ressH = extrapolate_results(dict_sub_hourly, dR, day)
if not resH:
missing.append(day['days'] + '_hourly_' + dR['variable'])
else:
resH['res'] = resH['res'][0::4]
resH['time'] = resH['time'][0::4]
HRlist = list()
k = 0
for HR in resH['res']:
HRdict = {}
HRdict['time'] = float((resH['time'][k] -
resH['time'][0]) / 3600)
HRdict['value'] = float(HR)
HRlist.append(HRdict)
k += 1
out_dir[case]['hourly_results'][day['days']]\
[resH['json']] = HRlist
if not ressH:
missing.append(day['days'] + '_subhourly_' +
dR['variable'])
else:
sHRlist = list()
k = 0
for sHR in ressH['res']:
sHRdict = {}
sHRdict['time'] = float((ressH['time'][k] -
ressH['time'][0]) / 3600)
if 'radiation' in ressH['json']:
sHRdict['value'] = float(sHR)
else:
sHRdict['value'] = float(sHR)
sHRlist.append(sHRdict)
k += 1
out_dir[case]['subhourly_results'][day['days']]\
[ressH['json']] = sHRlist
# Manually update integrated values for 'integrated'
# variables for subhourly results
if 'horizontal_radiation' in ressH['json']:
ressH['time'] = ressH['time']
time_int = ressH['time'][0::4]
H_int = np.interp(time_int, ressH['time'],
ressH['res'])
sHRlist = list()
k = 0
for sHR in H_int:
sHRdict = {}
sHRdict['time'] = float((time_int[k] -
time_int[0]) / 3600)
sHRdict['value'] = float(sHR)
sHRlist.append(sHRdict)
k += 1
out_dir[case]['subhourly_results']\
[day['days']]['integrated_' +
ressH['json']] = sHRlist
return out_dir
def extrapolate_results(dicT, dR, day):
"""
This function takes a result time series, matches it with the corresponding
json, and extrapolates the data
:param dictT: This is the dictionary with the mapping between .mat and \
.Json variables
:param dR: Dictionary with the name, time and value of certain variables\
in the .mat file
:param day: Subdictionary with all the days required for the bestest. \
See table 3 in BESTEST package
"""
OutDict = {}
for dT in dicT:
if dR['variable'] in list(dT.values()) and 'integrated' not in\
dT['json']:
if day['days'] in 'yearly':
if 'azi' in dR['variable']:
res = | np.trapz(dR['value'], x=dR['time']) | numpy.trapz |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
This file is for automatic discoloration detection. fig_path: str, path of target image;
save_flag: bool, whether to save the image and mask or not;
mask_expand_flag: bool, whether to perform expansion on initial mask or not;
fig_out_path: str, output path for masked images;
mask_out_path: str, Output path for masks
Usage:
python auto_detect.py --fig_path="results/4_recon.png"
"""
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
import copy
import os
import argparse
# num_detect_01, num_detect_02 and num_detect_12 define the number of bins that we assume caused by errors in the
# histogram. Change these three values can affect performance on different images. The larger the value is, the more
# pixels will be masked.
# block_offset and pre_thres is to expand the mask after initial computation to cover other potential defects
# The following setting works well for 4_recon.png
num_detect_01 = 15
num_detect_02 = 15
num_detect_12 = 15
block_offset = 5
per_thres = 0.6
def hist_detect(a, b, num_detect):
diff = a - b
n1, inter1, _ = plt.hist(diff.flatten(), bins=30)
inter_bound = np.zeros((num_detect, 2))
o1 = np.argsort(n1)
for i in range(num_detect):
inter_bound[i, 0] = inter1[o1[i]]
inter_bound[i, 1] = inter1[o1[i] + 1]
row_idx = []
col_idx = []
for i in range(num_detect):
tmp = | np.where((diff >= inter_bound[i, 0]) & (diff < inter_bound[i, 1])) | numpy.where |
# Copyright 2020 The ElasticDL Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import csv
import os
import tempfile
from collections.__init__ import namedtuple
from contextlib import closing
from pathlib import Path
from unittest.mock import Mock
import grpc
import numpy as np
import recordio
import tensorflow as tf
from odps import ODPS
from elasticai_api.proto import elasticai_api_pb2
from elasticai_api.util.grpc_utils import build_channel
from elasticdl.python.common.args import parse_worker_args
from elasticdl.python.common.constants import JobType, MaxComputeConfig
from elasticdl.python.common.model_utils import (
get_module_file_path,
load_module,
)
from elasticdl.python.data.recordio_gen.frappe_recordio_gen import (
load_raw_data,
)
from elasticdl.python.master.evaluation_service import EvaluationService
from elasticdl.python.master.servicer import MasterServicer
from elasticdl.python.master.task_manager import TaskManager
from elasticdl.python.ps.parameter_server import ParameterServer
from elasticdl.python.tests.mock_service import _server
from elasticdl.python.worker.master_client import MasterClient
from elasticdl.python.worker.ps_client import PSClient
from elasticdl.python.worker.worker import Worker
from elasticdl_client.common.constants import DistributionStrategy
class PserverArgs(object):
def __init__(
self,
grads_to_wait=8,
lr_staleness_modulation=0,
sync_version_tolerance=0,
use_async=False,
model_zoo=None,
model_def=None,
optimizer="optimizer",
port=9999,
log_level="INFO",
job_name="test_pserver",
namespace="default",
master_addr="test:1111",
evaluation_steps=0,
checkpoint_dir=None,
checkpoint_steps=None,
keep_checkpoint_max=0,
ps_id=0,
num_ps_pods=1,
num_workers=2,
checkpoint_dir_for_init=None,
):
self.grads_to_wait = grads_to_wait
self.lr_staleness_modulation = lr_staleness_modulation
self.sync_version_tolerance = sync_version_tolerance
self.use_async = use_async
self.model_zoo = model_zoo
self.model_def = model_def
self.optimizer = optimizer
self.port = port
self.log_level = log_level
self.job_name = job_name
self.namespace = namespace
self.master_addr = master_addr
self.evaluation_steps = evaluation_steps
self.checkpoint_dir = checkpoint_dir
self.checkpoint_steps = checkpoint_steps
self.keep_checkpoint_max = keep_checkpoint_max
self.ps_id = ps_id
self.num_ps_pods = num_ps_pods
self.num_workers = num_workers
self.checkpoint_dir_for_init = checkpoint_dir_for_init
class TaskManagerArgs(object):
def __init__(
self,
training_data="",
validation_data="",
minibatch_size=1,
num_minibatches_per_task=2,
num_epochs=1,
max_step=0,
data_reader_params="",
model_zoo="",
model_def="",
custom_data_reader="custom_data_reader",
checkpoint_dir_for_init="",
custom_training_loop=False,
task_fault_tolerance=True,
relaunch_timeout_worker=True,
):
self.training_data = training_data
self.validation_data = validation_data
self.minibatch_size = minibatch_size
self.num_minibatches_per_task = num_minibatches_per_task
self.num_epochs = num_epochs
self.max_step = max_step
self.data_reader_params = data_reader_params
self.model_zoo = model_zoo
self.model_def = model_def
self.custom_data_reader = custom_data_reader
self.checkpoint_dir_for_init = checkpoint_dir_for_init
self.custom_training_loop = custom_training_loop
self.task_fault_tolerance = task_fault_tolerance
self.relaunch_timeout_worker = relaunch_timeout_worker
class DatasetName(object):
IMAGENET = "imagenet1"
FRAPPE = "frappe1"
TEST_MODULE = "test_module1"
IMAGE_DEFAULT = "image_default1"
CENSUS = "census1"
def create_task_manager(training_shards, evaluation_shards, num_epochs=1):
args = TaskManagerArgs(num_minibatches_per_task=3, num_epochs=num_epochs)
task_manager = TaskManager(args)
task_manager._training_shards = training_shards
task_manager._evaluation_shards = evaluation_shards
if task_manager._training_shards:
task_manager.create_tasks(elasticai_api_pb2.TRAINING)
return task_manager
def create_recordio_file(size, dataset_name, shape, temp_dir=None):
"""Creates a temporary file containing data of `recordio` format.
Args:
size: The number of records in the temporary file.
dataset_name: A dataset name from `DatasetName`.
shape: The shape of records to be created.
temp_dir: The storage path of the temporary file.
Returns:
A python string indicating the temporary file name.
"""
temp_file = tempfile.NamedTemporaryFile(delete=False, dir=temp_dir)
with closing(recordio.Writer(temp_file.name)) as f:
for _ in range(size):
if dataset_name == DatasetName.IMAGENET:
image = np.random.randint(255, size=shape, dtype=np.uint8)
image = tf.image.encode_jpeg(tf.convert_to_tensor(value=image))
image = image.numpy()
label = np.ndarray([1], dtype=np.int64)
label[0] = np.random.randint(1, 11)
example_dict = {
"image": tf.train.Feature(
bytes_list=tf.train.BytesList(value=[image])
),
"label": tf.train.Feature(
int64_list=tf.train.Int64List(value=[label])
),
}
elif dataset_name == DatasetName.FRAPPE:
feature = np.random.randint(5383, size=(shape,))
label = np.random.randint(2, size=(1,))
example_dict = {
"feature": tf.train.Feature(
int64_list=tf.train.Int64List(value=feature)
),
"label": tf.train.Feature(
int64_list=tf.train.Int64List(value=[label])
),
}
elif dataset_name == DatasetName.TEST_MODULE:
x = np.random.rand(shape).astype(np.float32)
y = 2 * x + 1
example_dict = {
"x": tf.train.Feature(
float_list=tf.train.FloatList(value=x)
),
"y": tf.train.Feature(
float_list=tf.train.FloatList(value=y)
),
}
elif dataset_name == DatasetName.IMAGE_DEFAULT:
image = np.random.rand( | np.prod(shape) | numpy.prod |
# Copyright 2021 The ParallelAccel Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
# Lint as: python3
"""Tests for asic_la.asic_simulator.
"""
import linear_algebra
import pytest
import math
import jax
import asic_la.utils as utils
from jax.config import config
config.update("jax_enable_x64", True)
import numpy as np
from asic_la import asic_simulator
import asic_la.asic_simulator_helpers as helpers
import asic_la.piecewise_pmapped_functions as ppf
import asic_la.asic_simulator_helpers_experimental as helpers_experimental
from asic_la.asic_simulator_helpers import AXIS_NAME
from asic_la.parser import parse_pbaxisums, parse
from asic_la.preprocessor.preprocessor import (
preprocess,
preprocess_pbaxisums,
canonicalize_gradients,
canonicalize_building_blocks,
)
from asic_la.sharded_probability_function import ShardedDiscretedProbabilityFunction
from asic_la.testutils import (
build_random_acyclic_graph,
generate_raw_pbaxistring,
generate_pbaxisum,
to_array,
)
from asic_la.sharded_probability_function import invert_permutation
@pytest.mark.parametrize("depth", [30])
@pytest.mark.parametrize("Nparams", [10])
@pytest.mark.parametrize("Nexponents", [10])
def test_get_final_state_in_steps(depth, Nparams, Nexponents):
N = 21
tar = 7
acyclic_graph, discretes, resolver = build_random_acyclic_graph(
Nparams=Nparams, Nexponents=Nexponents, depth=depth, N=N
)
resolved_acyclic_graph = linear_algebra.resolve_parameters(acyclic_graph, resolver)
building_blocks, gradients, op_axes = parse(resolved_acyclic_graph, discretes, dtype=np.complex128)
supermatrices, _, superaxes = preprocess(
building_blocks, gradients, op_axes, N, max_discrete_support=tar
)
canonical_superaxes = utils.canonicalize_ints(superaxes)
canonical_supermatrices = canonicalize_building_blocks(
supermatrices, broadcasted_shape=jax.device_count()
)
assert len(supermatrices) > 1
state = np.zeros(2 ** N).astype(np.complex128)
state[0] = 1.0
state = state.reshape((2,) * N)
simulator = linear_algebra.Simulator(dtype=np.complex128)
expected = simulator.simulate(
resolved_acyclic_graph, discrete_order=discretes, initial_state=state.ravel()
)
asic_result = ppf.get_final_state_in_steps(
canonical_supermatrices, canonical_superaxes, N, len(supermatrices)
)
assert asic_result.perm == tuple(range(N))
actual = to_array(asic_result.concrete_tensor)
np.testing.assert_allclose(np.ravel(actual), expected.final_state_vector)
@pytest.mark.parametrize("depth", [30])
@pytest.mark.parametrize("Nparams", [10])
@pytest.mark.parametrize("Nexponents", [10])
def test_get_final_state(depth, Nparams, Nexponents):
N = 21
tar = 7
acyclic_graph, discretes, resolver = build_random_acyclic_graph(
Nparams=Nparams, Nexponents=Nexponents, depth=depth, N=N
)
resolved_acyclic_graph = linear_algebra.resolve_parameters(acyclic_graph, resolver)
building_blocks, gradients, op_axes = parse(resolved_acyclic_graph, discretes, dtype=np.complex128)
supermatrices, _, superaxes = preprocess(
building_blocks, gradients, op_axes, N, max_discrete_support=tar
)
state = np.zeros(2 ** N).astype(np.complex128)
state[0] = 1.0
state = state.reshape((2,) * N)
simulator = linear_algebra.Simulator(dtype=np.complex128)
expected = simulator.simulate(
resolved_acyclic_graph, discrete_order=discretes, initial_state=state.ravel()
)
asic_result = jax.pmap(
lambda x: helpers.get_final_state(supermatrices, superaxes, N),
axis_name=AXIS_NAME,
)(np.arange(jax.device_count()))
assert asic_result.perm == tuple(range(N))
actual = to_array(asic_result.concrete_tensor)
np.testing.assert_allclose(np.ravel(actual), expected.final_state_vector)
@pytest.mark.parametrize("depth", [30])
@pytest.mark.parametrize("Nparams", [20])
@pytest.mark.parametrize("Nexponents", [10])
def test_apply_building_blocks(depth, Nparams, Nexponents):
N = 21
target = 7
discretes = linear_algebra.LinearSpace.range(N)
acyclic_graph, discretes, resolver = build_random_acyclic_graph(
Nparams=Nparams, Nexponents=Nexponents, depth=depth, N=N
)
building_blocks, gradients, op_axes = parse(
linear_algebra.resolve_parameters(acyclic_graph, resolver), discretes, dtype=np.complex128
)
supermatrices, _, superaxes = preprocess(
building_blocks, gradients, op_axes, N, max_discrete_support=target
)
intermediate_state = jax.pmap(
lambda x: helpers.get_final_state(supermatrices, superaxes, N),
axis_name=AXIS_NAME,
)(np.arange(jax.device_count()))
actual_final_state = jax.pmap(
lambda x: helpers.apply_building_blocks(x, supermatrices, superaxes).align_axes(),
axis_name=AXIS_NAME,
)(intermediate_state)
state = np.zeros(2 ** N)
state[0] = 1.0
state /= np.linalg.norm(state)
simulator = linear_algebra.Simulator(dtype=np.complex128)
linear_algebra_result = simulator.simulate(
linear_algebra.resolve_parameters(acyclic_graph + acyclic_graph, resolver),
discrete_order=discretes,
initial_state=state.ravel(),
)
expected_final_state = linear_algebra_result.final_state_vector
np.testing.assert_allclose(
expected_final_state, to_array(actual_final_state.concrete_tensor).ravel()
)
@pytest.mark.parametrize("depth", [30])
@pytest.mark.parametrize("Nparams", [20])
@pytest.mark.parametrize("Nexponents", [10])
def test_apply_pbaxistring(depth, Nparams, Nexponents):
N = 21
target = 7
discretes = linear_algebra.LinearSpace.range(N)
acyclic_graph, discretes, resolver = build_random_acyclic_graph(
Nparams=Nparams, Nexponents=Nexponents, depth=depth, N=N
)
building_blocks, gradients, op_axes = parse(
linear_algebra.resolve_parameters(acyclic_graph, resolver), discretes, dtype=np.complex128
)
supermatrices, _, superaxes = preprocess(
building_blocks, gradients, op_axes, N, max_discrete_support=target
)
intermediate_state = jax.pmap(
lambda x: helpers.get_final_state(supermatrices, superaxes, N),
axis_name=AXIS_NAME,
)(np.arange(jax.device_count()))
coeff, rawpbaxistring, prob_basis_axis_discretes = generate_raw_pbaxistring(discretes, N)
pbaxistring = linear_algebra.ProbBasisAxisString(
coeff, [p(q) for p, q in zip(rawpbaxistring, prob_basis_axis_discretes)]
)
pbaxisums = [sum([pbaxistring])]
prob_basis_axis_building_blocks, _, prob_basis_axis_opaxes = parse_pbaxisums(pbaxisums, discretes)
superpaulimats, superpauliaxes = preprocess_pbaxisums(
prob_basis_axis_building_blocks, prob_basis_axis_opaxes, num_discretes=N, max_discrete_support=target
)
actual_final_state = jax.pmap(
lambda x: helpers.apply_building_blocks(
x, superpaulimats[0][0], superpauliaxes[0][0]
).align_axes(),
axis_name=AXIS_NAME,
)(intermediate_state)
state = np.zeros(2 ** N)
state[0] = 1.0
state /= np.linalg.norm(state)
simulator = linear_algebra.Simulator(dtype=np.complex128)
# NOTE : linear_algebra.ProbBasisAxisString and + operator for linear_algebra.Graphs
# use different logic for ordering building_blocks.
acyclic_graph_2 = acyclic_graph + [p(q) for q, p in pbaxistring.items()]
linear_algebra_result = simulator.simulate(
linear_algebra.resolve_parameters(acyclic_graph_2, resolver),
discrete_order=discretes,
initial_state=state.ravel(),
)
expected_final_state = linear_algebra_result.final_state_vector
np.testing.assert_allclose(
expected_final_state, to_array(actual_final_state.concrete_tensor).ravel()
)
@pytest.mark.parametrize("depth", [30])
@pytest.mark.parametrize("Nparams", [10])
@pytest.mark.parametrize("Nexponents", [10])
def test_inverse_unfolding(depth, Nparams, Nexponents):
N = 21
target = 7
discretes = linear_algebra.LinearSpace.range(N)
acyclic_graph, discretes, resolver = build_random_acyclic_graph(
Nparams=Nparams, Nexponents=Nexponents, depth=depth, N=N
)
building_blocks, gradients, op_axes = parse(
linear_algebra.resolve_parameters(acyclic_graph, resolver), discretes, dtype=np.complex128
)
supermatrices, _, superaxes = preprocess(
building_blocks, gradients, op_axes, N, max_discrete_support=target
)
def forward_backward(building_blocks, axes, num_discretes):
state = helpers.get_final_state(building_blocks, axes, num_discretes)
assert state.perm == tuple(range(N))
reversed_axes = reversed(axes)
reversed_building_blocks = [g.T.conj() for g in reversed(building_blocks)]
return helpers.apply_building_blocks(state, reversed_building_blocks, reversed_axes).align_axes()
actual = jax.pmap(
lambda x: forward_backward(supermatrices, superaxes, N), axis_name=AXIS_NAME
)(np.arange(jax.device_count()))
assert actual.perm == tuple(range(N))
state = np.zeros(2 ** N)
state[0] = 1.0
state /= np.linalg.norm(state)
eps = np.finfo(np.float64).eps * 100
np.testing.assert_allclose(
to_array(actual.concrete_tensor).ravel(), state.ravel(), atol=eps, rtol=eps
)
@pytest.mark.parametrize("Nparams", [10])
@pytest.mark.parametrize("depth", [20])
@pytest.mark.parametrize("subdomain_length", [21])
@pytest.mark.parametrize("num_pbaxistrings", [4])
@pytest.mark.parametrize("num_pbaxisums", [1])
@pytest.mark.parametrize("string_length", [4])
@pytest.mark.parametrize("seed", [0])
def test_distributed_compute_gradients(
Nparams,
depth,
subdomain_length,
num_pbaxisums,
num_pbaxistrings,
string_length,
seed,
):
np.random.seed(seed)
N = 21
target = 7
discretes = linear_algebra.LinearSpace.range(N)
subdomain = np.sort(np.random.choice(np.arange(N), subdomain_length, replace=False))
acyclic_graph, discretes, resolver = build_random_acyclic_graph(
Nparams=Nparams, Nexponents=0, depth=depth, N=N, subdomain=subdomain
)
building_blocks, gradients, op_axes = parse(acyclic_graph, discretes, resolver, dtype=np.complex128)
supermatrices, supergradients, superaxes = preprocess(
building_blocks, gradients, op_axes, N, max_discrete_support=target
)
# canonicalize data
canonical_superaxes = utils.canonicalize_ints(superaxes)
canon_grads, smap = canonicalize_gradients(
supergradients, broadcasted_shape=jax.device_count()
)
canon_supermats = canonicalize_building_blocks(
supermatrices, broadcasted_shape=jax.device_count()
)
op_discretes = []
for op in acyclic_graph.all_operations():
op_discretes.extend(list(op.discretes))
op_discretes = sorted(list(set(op_discretes)))
pbaxisums = []
for _ in range(num_pbaxisums):
pbaxisums.append(generate_pbaxisum(num_pbaxistrings, op_discretes, string_length))
prob_basis_axis_building_blocks, prob_basis_axis_coeffs, prob_basis_axis_opaxes = parse_pbaxisums(pbaxisums, discretes)
superpaulimats, superpauliaxes = preprocess_pbaxisums(
prob_basis_axis_building_blocks, prob_basis_axis_opaxes, num_discretes=N, max_discrete_support=target
)
canonical_superpauliaxes = utils.canonicalize_ints(superpauliaxes)
canon_superpaulimats = canonicalize_building_blocks(
superpaulimats, broadcasted_shape=jax.device_count()
)
canonical_prob_basis_axis_coeffs = canonicalize_building_blocks(
prob_basis_axis_coeffs, broadcasted_shape=jax.device_count()
)
(
actual_gradients,
actual_expectations,
) = helpers_experimental.distributed_compute_gradients(
canon_supermats,
canon_grads,
canonical_superaxes,
canon_superpaulimats,
canonical_superpauliaxes,
canonical_prob_basis_axis_coeffs,
N,
len(smap),
)
simulator = linear_algebra.Simulator(dtype=np.complex128)
linear_algebra_result = simulator.simulate(acyclic_graph, resolver)
params = linear_algebra.parameter_symbols(acyclic_graph)
exp_acyclic_graphs = [None] * num_pbaxisums
g1 = []
for m, pbaxisum in enumerate(pbaxisums):
exp_acyclic_graphs[m] = [linear_algebra.Graph() for _ in range(num_pbaxistrings)]
accumulator = np.zeros_like(linear_algebra_result.final_state_vector)
for n, pbaxistring in enumerate(pbaxisum):
exp_acyclic_graphs[m][n] += [p(q) for q, p in pbaxistring.items()]
obs_result = simulator.simulate(
exp_acyclic_graphs[m][n],
discrete_order=op_discretes,
initial_state=linear_algebra_result.final_state_vector.ravel(),
)
accumulator += obs_result.final_state_vector * prob_basis_axis_coeffs[m][n]
expected_expectation = np.dot(
linear_algebra_result.final_state_vector.conj(), accumulator
)
g1.append(expected_expectation)
eps = jax.numpy.finfo(actual_expectations.dtype).eps * 100
np.testing.assert_allclose(np.array(g1), actual_expectations[0], atol=eps, rtol=eps)
delta = 1e-8
g2 = {}
for param in params:
g2[param] = []
shifted_dict = {k: v for k, v in resolver.param_dict.items()}
shifted_dict[param.name] = resolver.param_dict[param.name] + delta
shifted_resolver = linear_algebra.ParamResolver(shifted_dict)
linear_algebra_result_shifted = simulator.simulate(acyclic_graph, shifted_resolver)
for m, pbaxisum in enumerate(pbaxisums):
accumulator = np.zeros_like(linear_algebra_result_shifted.final_state_vector)
for n, pbaxistring in enumerate(pbaxisum):
obs_result = simulator.simulate(
exp_acyclic_graphs[m][n],
discrete_order=op_discretes,
initial_state=linear_algebra_result_shifted.final_state_vector.ravel(),
)
accumulator += obs_result.final_state_vector * prob_basis_axis_coeffs[m][n]
g2[param].append(
np.dot(linear_algebra_result_shifted.final_state_vector.conj(), accumulator)
)
for s, idx in smap.items():
for m, val in enumerate(g2[s]):
expected = np.real((val - g1[m]) / delta)
np.testing.assert_allclose(
actual_gradients[idx, m], expected, atol=1e-5, rtol=1e-5
)
def test_distributed_scalar_product():
N = 21
shape = (
(jax.device_count(),)
+ (2,) * (N - int(math.log2(jax.device_count())) - 10)
+ (8, 128)
)
a = | np.random.rand(*shape) | numpy.random.rand |
'''
设计一个BP神经网络用于大作业第三问 训练棋局的评估函数
BP神经网络结构:
输入层:input_dim = 361(棋盘19*19)
输出层:一个神经元
'''
import keras
from keras.models import model_from_json
from keras.layers import Dense,Dropout
from keras.optimizers import SGD
import re
import numpy as np
import matplotlib.pyplot as plt
# 写一个LossHistory类,保存loss和acc
class LossHistory(keras.callbacks.Callback):
def on_train_begin(self, logs={}):
self.losses = {'batch': [], 'epoch': []}
self.accuracy = {'batch': [], 'epoch': []}
self.val_loss = {'batch': [], 'epoch': []}
self.val_acc = {'batch': [], 'epoch': []}
def on_batch_end(self, batch, logs={}):
self.losses['batch'].append(logs.get('loss'))
self.accuracy['batch'].append(logs.get('acc'))
self.val_loss['batch'].append(logs.get('val_loss'))
self.val_acc['batch'].append(logs.get('val_acc'))
def on_epoch_end(self, batch, logs={}):
self.losses['epoch'].append(logs.get('loss'))
self.accuracy['epoch'].append(logs.get('acc'))
self.val_loss['epoch'].append(logs.get('val_loss'))
self.val_acc['epoch'].append(logs.get('val_acc'))
def loss_plot(self, loss_type):
iters = range(len(self.losses[loss_type]))
plt.figure()
# acc
plt.plot(iters, self.accuracy[loss_type], 'r', label='train acc')
# loss
plt.plot(iters, self.losses[loss_type], 'g', label='train loss')
if loss_type == 'epoch':
# val_acc
plt.plot(iters, self.val_acc[loss_type], 'b', label='val acc')
# val_loss
plt.plot(iters, self.val_loss[loss_type], 'k', label='val loss')
plt.grid(True)
plt.xlabel(loss_type)
plt.ylabel('acc-loss')
plt.legend(loc="upper right")
plt.show()
dataset_size = 7541
# X_TRAIN = np.zeros((dataset_size,361),dtype=int)
# Y_TRAIN = np.zeros((dataset_size,1),dtype=int)
def load_data():
i = 0
X_TRAIN = np.zeros((dataset_size, 361), dtype=int)
x_line = [[0 for col in range(361)] for row in range(dataset_size)]
for line in open("x_train.txt"):
x_line[i] = [int(s) for s in re.findall(r'\d+', line)]
i += 1
print(len(x_line))
for i in range(dataset_size):
x_train2 = | np.array(x_line[i], dtype=int) | numpy.array |
# Copyright 2018 <NAME> and <NAME>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import tensorflow as tf
import scipy.sparse as sp
class Converter(object):
@staticmethod
def convert_sparse_tensor_to_csr(tensor):
indices = np.array(tensor.indices)
if len(tensor.dense_shape) == 1:
return sp.csr_matrix((tensor.values, (np.zeros(tensor.dense_shape[0]), indices[:, 0])), shape=(1, tensor.dense_shape[0]))
else:
return sp.csr_matrix((tensor.values, (indices[:,0], indices[:, 1])), shape=tensor.dense_shape)
@staticmethod
def convert_sparse_tensor_to_coo(tensor):
indices = np.array(tensor.indices)
return sp.coo_matrix((tensor.values, (indices[:,0], indices[:, 1])), shape=tensor.dense_shape)
@staticmethod
def convert_sparse_tensor_to_csc(tensor):
indices = np.array(tensor.indices)
return sp.csc_matrix((tensor.values, (indices[:,0], indices[:, 1])), shape=tensor.dense_shape)
@staticmethod
def convert_sparse_matrix_to_sparse_tensor(sparse):
s_format = sparse.getformat()
if s_format == "csr":
return Converter.convert_csr_to_sparse_tensor(sparse)
elif s_format == "coo":
return Converter.convert_coo_to_sparse_tensor(sparse)
elif s_format == "csc":
return Converter.convert_coo_to_sparse_tensor(sparse.to_coo())
else:
raise Exception("Failed to convert sparse matrix to sparse tensor.")
@staticmethod
def convert_coo_to_sparse_tensor(coo):
indices = | np.mat([coo.row, coo.col]) | numpy.mat |
import threading
import numpy as np
from PyQt5.QtCore import Qt, QTimer
from PyQt5.QtWidgets import QMainWindow, QApplication
from PyQt5.QtGui import QOpenGLShaderProgram, QOpenGLShader, QMatrix4x4, \
QVector3D, QVector4D
from OpenGL import GL
from ui.shaders import Ui_ShadersWindow
from main import pol2cart
def star_coords(point, rad, angle=0):
pts1 = []
pts2 = []
angle -= np.pi / 2
coef = 0.382
for a in range(5):
delta = np.array(pol2cart(rad, a * np.pi * 2 / 5 + angle))
delta2 = np.array(
pol2cart(rad * coef, a * np.pi * 2 / 5 + angle - np.pi)
)
pts1.append(point - delta)
pts2.append(point - delta2)
res = [point]
for i1, i2 in zip(range(5), [3, 4, 0, 1, 2]):
res.extend([pts1[i1], pts2[i2]])
res.append(pts1[0])
return [list(p) for p in res]
# return [list(p) for p in [pts[0], pts[2], pts[4], pts[1], pts[3]]]
class MainWindow(QMainWindow, Ui_ShadersWindow):
def __init__(self, parent=None):
super().__init__(parent)
self.star_color = (255 / 255, 223 / 255, 0 / 255, 0.8)
self.flag_color = (223 / 255, 37 / 255, 0 / 255, 0.8)
self.start_flag_coords = np.array([(0.05, 0.2),
(0.95, 0.2),
(0.95, 0.8),
(0.05, 0.8)])
self.time = 0.0
self.star_coords = []
self.flag_coords = self.getFlagCoords()
self.star_params = self.getStarsParams()
self.setupUi(self)
self.getLightPos()
self.openGLWidget.initializeGL = self.initializeGL
self.openGLWidget.paintGL = self.paintGL
self.keyPressEvent = self.onKeyPressed
self.initTimer()
self.angleX, self.angleY = 0, 0
self.mutex = threading.Lock()
self.shaders = QOpenGLShaderProgram()
def getLightPos(self):
self.light_pos = QVector3D(
self.xSpinBox.value(),
self.ySpinBox.value(),
self.zSpinBox.value()
)
def initTimer(self):
self.timer = QTimer()
self.timer.setInterval(50)
self.timer.timeout.connect(self.onTimeOut)
self.timer.start()
def onTimeOut(self):
self.time = self.time + 0.01 % 1
self.openGLWidget.update()
def loadScene(self):
width, height = self.openGLWidget.width(), self.openGLWidget.height()
GL.glViewport(0, 0, width, height)
GL.glPolygonMode(GL.GL_FRONT_AND_BACK, GL.GL_FILL)
GL.glEnable(GL.GL_BLEND)
GL.glBlendFunc(GL.GL_SRC_ALPHA, GL.GL_ONE_MINUS_SRC_ALPHA)
self.getLightPos()
# Why the heck this does not work...
# GL.glMatrixMode(GL.GL_PROJECTION)
# GL.glLoadIdentity()
# GL.glFrustum(-1, 1, -1, 1, 1, 20)
# GL.glMatrixMode(GL.GL_MODELVIEW)
# GL.glLoadIdentity()
def initializeGL(self):
GL.glClearColor(0.1, 0.1, 0.1, 1.0)
self.setUpShaders()
self.initTimer()
def paintGL(self):
self.loadScene()
with self.mutex:
self.updateMatrices()
self.drawStuff()
def setUpShaders(self):
self.shaders.addShaderFromSourceFile(QOpenGLShader.Vertex,
'shader.vert')
self.shaders.addShaderFromSourceFile(QOpenGLShader.Fragment,
'shader.frag')
self.shaders.link()
self.shaders.bind()
self.updateMatrices()
def updateMatrices(self):
proj = QMatrix4x4()
proj.frustum(-0.3, 1, -0.3, 1, 2, 20)
modelview = QMatrix4x4()
modelview.lookAt(
QVector3D(0, 0, 3),
QVector3D(0, 0, 0),
QVector3D(0, 1, 0)
)
modelview.rotate(self.angleX, 1, 0, 0)
modelview.rotate(self.angleY, 0, 1, 0)
self.shaders.setUniformValue("ModelViewMatrix", modelview)
self.shaders.setUniformValue("MVP", proj * modelview)
self.shaders.setUniformValue("Time", self.time)
self.shaders.setUniformValue("LightPos", self.light_pos)
def drawStuff(self):
GL.glClear(GL.GL_COLOR_BUFFER_BIT)
self.star_coords.clear()
[self.putStar(self.star_color, *param) for param in self.star_params]
self.drawFlag(self.flag_coords)
self.drawStars()
def putStar(self, color, point, radius, angle):
self.star_coords.extend(star_coords(np.array(point), radius, angle))
def drawStars(self):
self.shaders.setUniformValue("FlagColor", QVector4D(*self.star_color))
GL.glEnableClientState(GL.GL_VERTEX_ARRAY)
GL.glEnableClientState(GL.GL_COLOR_ARRAY)
GL.glVertexPointer(2, GL.GL_FLOAT, 0, self.star_coords)
GL.glColorPointer(3, GL.GL_FLOAT, 0, [
self.star_color] * len(self.star_coords))
[GL.glDrawArrays(GL.GL_POLYGON, i, 12) for i in range(0, 12 * 5, 12)]
GL.glDisableClientState(GL.GL_VERTEX_ARRAY)
GL.glDisableClientState(GL.GL_COLOR_ARRAY)
def drawFlag(self, flag_coords):
self.shaders.setUniformValue("FlagColor", QVector4D(*self.flag_color))
GL.glEnableClientState(GL.GL_VERTEX_ARRAY)
GL.glEnableClientState(GL.GL_COLOR_ARRAY)
GL.glVertexPointer(2, GL.GL_FLOAT, 0, flag_coords)
color = []
[color.extend(self.flag_color) for _ in range(len(flag_coords))]
GL.glColorPointer(3, GL.GL_FLOAT, 0, color)
for i in range(0, len(flag_coords), 4):
GL.glDrawArrays(GL.GL_POLYGON, i, 4)
GL.glDisableClientState(GL.GL_VERTEX_ARRAY)
GL.glDisableClientState(GL.GL_COLOR_ARRAY)
def getFlagCoords(self):
points = self.start_flag_coords
precision = 0.01
i_range = np.arange(0, 1, precision)
res = []
for i, i1 in zip(i_range, i_range[1:]):
p1 = points[0] * (1 - i) + points[1] * i
p2 = points[0] * (1 - i1) + points[1] * i1
p3 = points[3] * (1 - i1) + points[2] * i1
p4 = points[3] * (1 - i) + points[2] * i
res.extend([p1, p2, p3, p4])
return np.array(res)
def getStarsParams(self):
flag_coords = self.start_flag_coords
star_params = (
((1 / 3, 1 / 2), 0.2, 0),
((2 / 3, 0.8), 1 / 15, | np.arcsin(3 / 5) | numpy.arcsin |
# -*- coding: utf8 -*-
import chardet
import re
import os
from parse import Parser
import numpy as np
import string
import nltk
import pickle
from nltk.tokenize import WordPunctTokenizer
from nltk.tokenize import RegexpTokenizer
def parseRawData(domain):
# Generate the working directory
work_dir = os.path.abspath(os.path.join(os.path.curdir,"work2"))
word_dir = os.path.abspath(os.path.join(work_dir,'word'))
sentence_dir = os.path.abspath(os.path.join(work_dir,'sentence'))
domain_dir1 = os.path.abspath(os.path.join(word_dir,domain))
domain_dir2 = os.path.abspath(os.path.join(sentence_dir,domain))
if not os.path.exists(work_dir):
os.makedirs(work_dir)
if not os.path.exists(word_dir):
os.makedirs(word_dir)
if not os.path.exists(sentence_dir):
os.makedirs(sentence_dir)
if not os.path.exists(domain_dir1):
os.makedirs(domain_dir1)
if not os.path.exists(domain_dir2):
os.makedirs(domain_dir2)
fname = "./work/%s/review_%s" % (domain, "positive")
h_pos_data, pos_data = get_review(fname)
fname = "./work/%s/review_%s" % (domain, "negative")
h_neg_data, neg_data = get_review(fname)
pos_num, neg_num = len(h_pos_data), len(h_neg_data)
np.random.seed(7)
shuffle_pos_idx = np.random.permutation(np.arange(pos_num))
h_pos_shuffle = h_pos_data[shuffle_pos_idx]
h_pos_train = h_pos_shuffle[:2800]
h_pos_test = h_pos_shuffle[2800:]
write_h_tokensToFile(h_pos_train, domain, "train", "positive")
write_h_tokensToFile(h_pos_test, domain, "test", "positive")
pos_shuffle = pos_data[shuffle_pos_idx]
pos_train = pos_shuffle[:2800]
pos_test = pos_shuffle[2800:]
write_tokensToFile(pos_train, domain, "train", "positive")
write_tokensToFile(pos_test, domain, "test", "positive")
shuffle_neg_idx = np.random.permutation(np.arange(neg_num))
h_neg_shuffle = h_neg_data[shuffle_neg_idx]
h_neg_train = h_neg_shuffle[:2800]
h_neg_test = h_neg_shuffle[2800:]
write_h_tokensToFile(h_neg_train, domain, "train", "negative")
write_h_tokensToFile(h_neg_test, domain, "test", "negative")
neg_shuffle = neg_data[shuffle_neg_idx]
neg_train = neg_shuffle[:2800]
neg_test = neg_shuffle[2800:]
write_tokensToFile(neg_train, domain, "train", "negative")
write_tokensToFile(neg_test, domain, "test", "negative")
fname = "./work/%s/review_%s" % (domain, "unlabeled")
h_unlab_data, unlab_data = get_review(fname)
write_h_tokensToFile(h_unlab_data, domain, "train", "unlabeled")
write_tokensToFile(unlab_data, domain, "train", "unlabeled")
def get_review(fname):
sent_tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')
tokenizer = RegexpTokenizer('\w+|\$[\d\.]+|\S+')
with open("./data/stopwords") as F:
stopWords = set(map(string.strip, F.readlines()))
h_tokens_list = []
tokens_list = []
with open(fname) as f:
lines = f.readlines()
for line in lines:
review = line.strip().lower()
sentences = sent_tokenizer.tokenize(review)
h_tokens = []
tokens = []
for sentence in sentences:
table = string.maketrans("", "")
delEStr = string.punctuation + string.digits
words = tokenizer.tokenize(str(sentence))
symbols = list(string.punctuation + string.digits)
symbols.remove('!')
elements = words
words = []
for word in elements:
if word not in symbols:
if word != '!':
word = word.translate(table, delEStr)
if len(word) != 0:
words.append(word)
if len(words) > 0:
if len(words) == 1 and (words[0] == '!' or words[0] in stopWords):
pass
else:
h_tokens.append(words)
tokens.extend(words)
h_tokens_list.append(h_tokens)
tokens_list.append(tokens)
return np.array(h_tokens_list), | np.array(tokens_list) | numpy.array |
from typing import List, Any, Callable
import torch
from torch import Tensor
from sklearn.linear_model import LinearRegression
import numpy as np
from evaluation.IOU import calc_IOU
from evaluation.mpc import evaluate_model_planning
from evaluation.nearest_neighbours import calc_nearest_neighbours
class Statistics:
""" object to save statistics """
def __init__(self):
self.statistics = dict()
def add(self, key: str, value: Any):
if key not in self.statistics:
self.statistics[key] = []
self.statistics[key].append(value)
def keys(self):
return list(self.statistics.keys())
def __getitem__(self, key: str):
return self.statistics[key]
def items(self):
return self.statistics.items()
def print(self):
for k, v in self.statistics.items():
print(f"{k}: {np.mean(v)}")
def print_mean_std(self):
for k, v in self.statistics.items():
if k.endswith("mean") and k[:-4]+"std" in self.statistics.keys():
print(f"{k[:-4]}: {v[0]:.2f} +- {self.statistics[k[:-4]+'std'][0]:.2f}")
def update(self, key: str, values: List[Any]):
""" add values to the Statistics objects """
if key not in self.statistics:
self.statistics[key] = values
self.statistics[key].extend(values)
def update_from_stats(self, stats):
""" add values from Statistics objects """
for key, values in stats.items():
self.update(key, values)
class Metric():
def __init__(self, prefix: str = ""):
self._prefix = prefix
def calculate(self, input: Any) -> Statistics:
raise NotImplementedError
class GeomLinearRegressionMetric(Metric):
def calculate(self, features_mat, states_mat, test_features=None, test_states=None) -> Statistics:
"""
train a linear regression model Wz = state, calculate the euclidean error in cm
and the 95% and 99% percentiles
if test_features and test_states are not provided, error will be reported on features_mat and states_mat.
:param features_mat: n_samples X n_features matrix
:param states: n_samples X 2 euclidean position matrix
:param test_features: n_batch_samples X n_features matrix
:param test_states: n_batch_samples X 2 euclidean position matrix
:return:
"""
# fit the data to the linear model
model = LinearRegression(fit_intercept=True, normalize=False, copy_X=True)
x = features_mat.cpu().numpy() if isinstance(features_mat, torch.Tensor) else features_mat
y = states_mat.cpu().numpy() if isinstance(states_mat, torch.Tensor) else states_mat
model.fit(x, y)
w = model.coef_
bias = model.intercept_
# if test_features and test_states are not provided, error will be reported on features_mat and states_mat.
if test_features is not None and test_states is not None:
x_test = test_features.cpu().numpy() if isinstance(test_features, torch.Tensor) else test_features
y_test = test_states.cpu().numpy() if isinstance(test_states, torch.Tensor) else test_states
geom_error = np.linalg.norm((w.dot(x_test.T) + np.expand_dims(bias, axis=1)).T - y_test, axis=1)
else:
geom_error = np.linalg.norm((w.dot(x.T) + np.expand_dims(bias, axis=1)).T - y, axis=1)
errors = Statistics()
errors.add(f"{self._prefix}predict_geom_state_from_features_error", geom_error.mean())
errors.add(f"{self._prefix}predict_geom_state_from_features_99_percentile_error", np.percentile(geom_error, 99))
errors.add(f"{self._prefix}predict_geom_state_from_features_95_percentile_error", np.percentile(geom_error, 95))
return errors
class ForwardModelMSEPredictionMetric(Metric):
def calculate(self, z_next: Tensor, z_next_hat: Tensor) -> Statistics:
"""
compute the MSE of error of the forward model ||z_next - z_next_hat||
:param z_next: the true next state representations, n_batch_samples X n_features matrix
:param z_next_hat: the predicted next state representations, n_batch_samples X n_features matrix
:return:
"""
errors = Statistics()
fm_mse_error = torch.norm(z_next - z_next_hat, dim=-1).detach().cpu().numpy()
errors.add(f"{self._prefix}fm_MSE_prediction_error", fm_mse_error.mean())
errors.add(f"{self._prefix}fm_MSE_prediction_99_percentile_error", np.percentile(fm_mse_error, 99))
errors.add(f"{self._prefix}fm_MSE_prediction_95_percentile_error", np.percentile(fm_mse_error, 95))
return errors
class NearestNeighboursGeometricMetric(Metric):
def __init__(self, similarity_func: Callable[[Tensor, Tensor], Tensor], prefix: str = ""): #fm_prediction
self._similarity_func = similarity_func
self._prefix = prefix
def calculate(self, features_mat: Tensor, states_mat: Tensor, true_next_state: Tensor, z: Tensor) -> Statistics:
"""
given the state representations, find their nearest neighbour and compute euclidean error in cm
and the 95% and 99% percentiles
:param features_mat: n_samples X n_features matrix
:param states_mat: n_samples X 2 euclidean position matrix
:param true_next_state: the positive next state
:param z: the state representations, n_batch_samples X n_features matrix
:return:
"""
assert true_next_state is not None, "Error, NearestNeighboursGeometricMetric, must provide state"
stats = Statistics()
# calculate the next Z prediction nearest neighbour and compare it to the real next state
top_scores, top_scores_ind = calc_nearest_neighbours(z, features_mat, k_neighbours=3, similarity_func=self._similarity_func)
next_state_nn = states_mat[top_scores_ind[:, 0]]
geom_error = torch.norm(true_next_state - next_state_nn, dim=-1).detach().cpu().numpy()
stats.add(f"{self._prefix}nn_geom_error", geom_error.mean())
stats.add(f"{self._prefix}nn_geom_99_percentile_error", np.percentile(geom_error, 99))
stats.add(f"{self._prefix}nn_geom_95_percentile_error", np.percentile(geom_error, 95))
return stats
class NearestNeighboursIOUMetric(Metric):
def __init__(self, similarity_func: Callable[[Tensor, Tensor], Tensor], prefix: str = ""):
self._similarity_func = similarity_func
self._prefix = prefix
def calculate(self, features_mat: Tensor, seg_masks: Tensor, true_next_seg_mask: Tensor, z: Tensor) -> Statistics:
"""
given the state representations, find their nearest neighbour and compute IOU
:param features_mat: n_samples X n_features matrix
:param seg_masks: n_samples X image_shape segmentation masks
:param true_next_seg_mask: the positive next state segmentation mask
:param z: the state representations, n_batch_samples X n_features matrix
:return:
"""
stats = Statistics()
top_scores, top_scores_ind = calc_nearest_neighbours(z, features_mat, k_neighbours=3, similarity_func=self._similarity_func)
stats.add(f"{self._prefix}nn_IOU", calc_IOU(true_next_seg_mask, seg_masks[top_scores_ind[:, 0]]))
return stats
class NearestNeighboursAccuracyMetric(Metric):
def __init__(self, similarity_func: Callable[[Tensor, Tensor], Tensor], prefix: str = "",
top_k: List[int] = [1, 3], ignore_first: bool = False):
self._similarity_func = similarity_func
self._prefix = prefix
self._top_k = [k+1 for k in top_k] if ignore_first else top_k
self._max_k = int(np.max(top_k)) + 1 if ignore_first else int(np.max(top_k))
self._init_index = 1 if ignore_first else 0
def calculate(self, features_mat: Tensor, info, batch_path, batch_index, z: Tensor) -> Statistics:
"""
given the state representations, find their nearest neighbour and compute recover accuracy
:param features_mat: n_samples X n_features matrix
:param info: n_samples X batch information (file path and names)
:param batch_path: n_batch_samples X batch path information (file path)
:param batch_index: n_batch_samples X batch index information (index within file)
:param z: the state representations, n_batch_samples X n_features matrix
:return: Statistics
"""
stats = Statistics()
(video_path, frame_ind, _, _) = info
top_scores, top_scores_ind = calc_nearest_neighbours(z, features_mat, k_neighbours=self._max_k,
similarity_func=self._similarity_func)
# test if the nearest neighbours of the z_next_hat is equal to z_next.
same_video = np.array(video_path)[top_scores_ind] == np.tile(batch_path, (self._max_k, 1)).transpose()
same_index = np.array(frame_ind)[top_scores_ind] == np.tile(batch_index, (self._max_k, 1)).transpose()
for k in self._top_k:
stats.add(f"{self._prefix}nn_top_{k-self._init_index}_accuracy", (same_video[:, self._init_index:k] & same_index[:, self._init_index:k]).mean() * (k-self._init_index))
return stats
class PlanningMetric(Metric):
def __init__(self, device: str,
config_path: str,
prefix: str = "Planning/",
n_experiments: int = 20,
stop_when_not_improving: bool = False,
tolerance_to_goal_in_cm: float = 3.,
random_actions_p: float = 0.,
use_goal_from_different_domain: bool = False):
self._prefix = prefix
self._device = device
self._n_experiments = n_experiments
self.config_path = config_path
self._tolerance_to_goal_in_cm = tolerance_to_goal_in_cm
self._stop_when_not_improving = stop_when_not_improving
self._random_actions_p = random_actions_p
self._use_goal_from_different_domain=use_goal_from_different_domain
def calculate(self, model) -> Statistics:
"""
plan and report stats
:param model: model
:return:
"""
stats = Statistics()
final_dists, init_dists, trials, gains, min_dists, trails_to_min_dist = evaluate_model_planning(model=model,
device=self._device,
use_oracle=False,
n_experiments=self._n_experiments,
verbose=False,
save_dir=None,
random_actions_p=self._random_actions_p,
n_process=1,
n_steps=10,
tolerance_to_goal_in_cm=self._tolerance_to_goal_in_cm,
stop_when_not_improving=self._stop_when_not_improving,
config_path=self.config_path,
use_goal_from_different_domain=self._use_goal_from_different_domain)
stats.add(f"{self._prefix}final_dist_mean", np.mean(final_dists))
stats.add(f"{self._prefix}final_dist_std", | np.std(final_dists) | numpy.std |
from __future__ import print_function, division
from netCDF4 import Dataset
import numpy as np
''' from doc in croco_tools Tides doc (<NAME>, Nov. 2000)
read tidal data and rebuild corresponding elevation, x- and y- velocity fields
at a given tide. Each tidal component is kept separated along an extra dimension
time must be in hour. N.B.: ubar, vbar are pure azimutal and meridional components
<NAME> April 2017 for LUCKYTO '''
def get_alltides(frcname,time=0.,doverb=False):
### zeta, ubar, vbar = get_tides(frcname,doverb=False)
### zeta, ubar, vbar have same shape as tidal components stored in "frcname"
ncfrc = Dataset(frcname,'r')
if doverb:
print('getting tidal signal from file',frcname)
Ntides = len(ncfrc.dimensions['tide_period'])
tide_period = ncfrc.variables['tide_period'][:]
if doverb:
print(Ntides,'components with period (h):',tide_period)
omega = 2*np.pi/tide_period[:,None,None]
### Sea-surface elevation
zeta_amp = ncfrc.variables['tide_Eamp'][:]
zeta_phi = ncfrc.variables['tide_Ephase'][:]*np.pi/180.
zeta = zeta_amp*np.cos(omega*time - zeta_phi)
if doverb:
print('computed surface elevation, shapes:',zeta_amp.shape,zeta_phi.shape,zeta.shape)
del zeta_amp, zeta_phi, tide_period
### Current
Cmin = ncfrc.variables['tide_Cmin'][:]
Cmax = ncfrc.variables['tide_Cmax'][:]
Cphi = ncfrc.variables['tide_Cphase'][:]*np.pi/180.
Cang = ncfrc.variables['tide_Cangle'][:]*np.pi/180.
ncfrc.close()
Wp = (Cmax + Cmin)/2.
Wm = (Cmax - Cmin)/2.
Thetap = Cang - Cphi
Thetam = Cang + Cphi
del Cmin, Cmax, Cphi, Cang
#ww = Wp*np.exp(1j*(omega*time - Thetap)) + Wm*np.exp(1j*(Thetam - omega*time))
ww = Wp*np.exp(1j*(omega*time + Thetap)) + Wm*np.exp(1j*(Thetam - omega*time))
ubar = ww.real
vbar = ww.imag
return zeta, ubar, vbar
def get_sumtides(frcname,time=0.,itides=None,doverb=False):
### zeta, ubar, vbar = get_tides(frcname,doverb=False)
### zeta, ubar, vbar have same shape as tidal components stored in "frcname"
ncfrc = Dataset(frcname,'r')
ncvar = ncfrc.variables
if doverb:
print('getting tidal signal from file',frcname)
Ntides = len(ncfrc.dimensions['tide_period'])
if itides is None:
itides = range(Ntides)
tide_period = ncfrc.variables['tide_period'][itides]
zeta = np.zeros(ncfrc.variables['tide_Eamp'].shape[1:])
ubar = np.copy(zeta); vbar = np.copy(zeta)
if doverb:
print(Ntides,'components with period (h):',tide_period)
print('using components',itides)
omega = 2*np.pi/tide_period[:,None,None]
for ip in itides:
zeta += ncvar['tide_Eamp'][ip,...]*np.cos(omega[ip,...]*time \
- ncvar['tide_Ephase'][ip,...]*np.pi/180.)
Cmin = ncfrc.variables['tide_Cmin'][ip,...]
Cmax = ncfrc.variables['tide_Cmax'][ip,...]
Cphi = ncfrc.variables['tide_Cphase'][ip,...]*np.pi/180.
Cang = ncfrc.variables['tide_Cangle'][ip,...]*np.pi/180.
Wp = (Cmax + Cmin)/2.
Wm = (Cmax - Cmin)/2.
Thetap = Cang - Cphi
Thetam = Cang + Cphi
ww = Wp*np.exp(1j*(omega[ip,...]*time + Thetap)) + Wm*np.exp(1j*(Thetam - omega[ip,...]*time))
ubar += ww.real
vbar += ww.imag
return zeta, ubar, vbar
def get_sumtides_onepoint(frcname,times=np.array(0.),ind=[0,0],doverb=False):
### zeta, ubar, vbar = get_tides(frcname,doverb=False)
### zeta, ubar, vbar are time series at single point
ix, iy = ind
ncfrc = Dataset(frcname,'r')
ncvar = ncfrc.variables
if doverb:
print('getting tidal signal from file',frcname)
Ntides = len(ncfrc.dimensions['tide_period'])
tide_period = ncfrc.variables['tide_period'][:]
zeta = np.zeros(times.shape)
ubar = np.copy(zeta); vbar = np.copy(zeta)
if doverb:
print(Ntides,'components with period (h):',tide_period)
omega = 2*np.pi/tide_period
zamp = ncvar['tide_Eamp'][:,iy,ix]
zphi = ncvar['tide_Ephase'][:,iy,ix]*np.pi/180.
Cmin = ncfrc.variables['tide_Cmin'][:,iy,ix]
Cmax = ncfrc.variables['tide_Cmax'][:,iy,ix]
Cphi = ncfrc.variables['tide_Cphase'][:,iy,ix]*np.pi/180.
Cang = ncfrc.variables['tide_Cangle'][:,iy,ix]*np.pi/180.
Wp = (Cmax + Cmin)/2.
Wm = (Cmax - Cmin)/2.
Thetap = Cang - Cphi
Thetam = Cang + Cphi
for ip in range(Ntides):
zeta += zamp[ip]*np.cos(omega[ip]*times - zphi[ip])
ww = Wp[ip]*np.exp(1j*(omega[ip]*times + Thetap[ip])) \
+ Wm[ip]*np.exp(1j*(Thetam[ip] - omega[ip]*times))
ubar += ww.real
vbar += ww.imag
return zeta, ubar, vbar
def get_sumtides_zeta(frcname,time=0.,tramp=None,itides=None,doverb=False):
### zeta, zetat = get_tides(frcname,time,doverb=False)
### zeta, zetat have same shape as tidal components stored in "frcname"
if tramp is None:
ramp = 1.
dramp = 0.
else:
ramp = np.tanh(time/tramp) # tramp in hours
dramp = 1./tramp/np.cosh(time/tramp)**2
ncfrc = Dataset(frcname,'r')
ncvar = ncfrc.variables
if doverb:
print('getting tidal signal from file',frcname)
Ntides = len(ncfrc.dimensions['tide_period'])
if itides is None:
itides = range(Ntides)
tide_period = ncfrc.variables['tide_period'][itides]
zeta = np.zeros(ncfrc.variables['tide_Eamp'].shape[1:]);
zetat = np.copy(zeta)
if doverb:
print(Ntides,'components with period (h):',tide_period)
print('using components',itides)
omega = 2*np.pi/tide_period[:,None,None]
for ip in itides:
amp = ncvar['tide_Eamp'][ip,...]
phi = ncvar['tide_Ephase'][ip,...]*np.pi/180.
zeta += ramp*amp* | np.cos(omega[ip,...]*time - phi) | numpy.cos |
"""
Testcases for encoders:
- Exclude
- Log10
- Normalization
Separate linear-reduction tests:
- PCA
- Karuhnen-Loeve
"""
import numpy as np
from profit.sur.encoders import Encoder
def test_exclude():
CONFIG = ['Exclude', [2], False, {}]
COLUMNS= [2]
SIZE = (10, 4)
n = SIZE[0] * SIZE[1]
X = np.linspace(0, n-1, n).reshape(SIZE)
enc = Encoder['Exclude'](COLUMNS)
assert enc.repr == CONFIG
X_enc = enc.encode(X)
assert np.all(X_enc == X[:, [0, 1, 3]])
X_dec = enc.decode(X_enc)
assert np.all(X_dec == X)
def test_log10():
from profit.sur.encoders import Log10Encoder
CONFIG = ['Log10', [2, 3], False, {}]
COLUMNS= [2, 3]
SIZE = (10, 4)
n = SIZE[0] * SIZE[1]
X = np.linspace(0, n-1, n).reshape(SIZE)
X_log = X.copy()
X_log[:, COLUMNS] = np.log10(X_log[:, COLUMNS])
enc = Log10Encoder(COLUMNS)
assert enc.repr == CONFIG
X_enc = enc.encode(X)
assert np.all(X_enc == X_log)
X_dec = enc.decode(X_enc)
assert np.allclose(X_dec, X, atol=1e-7)
def test_normalization():
CONFIG = ['Normalization', [0, 1, 2, 3], False, {}]
COLUMNS= [0, 1, 2, 3]
SIZE = (10, 4)
n = SIZE[0] * SIZE[1]
X = | np.linspace(0, n-1, n) | numpy.linspace |
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
'''
Intrinsic Atomic Orbitals
ref. JCTC, 9, 4834
'''
from functools import reduce
import numpy
import scipy.linalg
from pyscf.lib import logger, c_null_ptr
from pyscf import gto
from pyscf import scf
from pyscf import __config__
from pyscf.lo.orth import vec_lowdin
from pyscf.data.elements import is_ghost_atom
# Alternately, use ANO for minao
# orthogonalize iao with coefficients obtained by
# vec_lowdin(iao_coeff, mol.intor('int1e_ovlp'))
MINAO = getattr(__config__, 'lo_iao_minao', 'minao')
def iao(mol, orbocc, minao=MINAO, kpts=None, lindep_threshold=1e-10):
'''Intrinsic Atomic Orbitals (IAO). [Ref. JCTC, 9, 4834]
Args:
mol : pyscf.gto.Mole or pyscf.pbc.gto.Cell
The molecule or cell object.
orbocc : (N, M) or (K, N, M) array
Occupied molecular orbitals. N= number of AOs, M= number of occupied MOs,
K= number of k-points (only if mol is Cell object).
minao : str, optional
Minimal basis set used for IAOs.
kpts : ndarray(K), optional
k-points, only used for Cell objects.
lindep_threshold : float, optional
For almost linearly dependent basis sets, the Cholesky decomposition can fail.
In this case the eigendecomposition of the overlap matrix is used
with eigenvectors corresponding to eigenvalues below `lindep_threshold`
removed.
Returns:
a : (N, L) or (K, N, L) array
Non-orthogonal IAOs. Orthogonalize them as C (C^T S C)^{-1/2}, eg. using :func:`orth.lowdin`
>>> orbocc = mf.mo_coeff[:,mf.mo_occ>0]
>>> c = iao(mol, orbocc)
>>> numpy.dot(c, orth.lowdin(reduce(numpy.dot, (c.T,s,c))))
'''
if mol.has_ecp() and minao == "minao":
logger.warn(mol, 'ECP/PP is used. MINAO is not a good reference AO basis for IAOs.')
def kernel(s1, s2, s12, orbocc):
"""Make IAOs for a molecule or single k-point."""
s21 = s12.conj().T
# Minimal basis CD is not expected to fail
s2cd = scipy.linalg.cho_factor(s2)
ctild = scipy.linalg.cho_solve(s2cd, numpy.dot(s21, orbocc))
# Try Cholesky of computational basis first
try:
s1cd = scipy.linalg.cho_factor(s1)
p12 = scipy.linalg.cho_solve(s1cd, s12)
ctild = scipy.linalg.cho_solve(s1cd, numpy.dot(s12, ctild))
# For overcomplete basis sets, use eigendecomposition + canonical orthogonalization instead
except numpy.linalg.LinAlgError:
se, sv = numpy.linalg.eigh(s1)
logger.debug(mol, "Cholesky decomp. of overlap S failed; removing %d eigenvectors of S with eigenvalues below %.2e",
numpy.count_nonzero(se < lindep_threshold), lindep_threshold)
keep = (se >= lindep_threshold)
invS = numpy.einsum("ai,i,bi->ab", sv[:,keep], 1/se[keep], sv[:,keep])
p12 = numpy.dot(invS, s12)
ctild = | numpy.dot(p12, ctild) | numpy.dot |
"""Module for univariate densities (see also :mod:`ddl.independent`)."""
from __future__ import division, print_function
import logging
import warnings
import numpy as np
import scipy.stats
from sklearn.base import BaseEstimator
from sklearn.exceptions import DataConversionWarning, NotFittedError
from sklearn.utils.validation import check_array, check_is_fitted, check_random_state, column_or_1d
from .base import BoundaryWarning, ScoreMixin
# noinspection PyProtectedMember
from .utils import (_DEFAULT_SUPPORT, check_X_in_interval, make_finite, make_interior,
make_interior_probability, make_positive)
logger = logging.getLogger(__name__)
SCIPY_RV_NON_NEGATIVE = ['expon', 'chi']
SCIPY_RV_STRICLTY_POSITIVE = ['gamma', 'invgamma', 'chi2', 'lognorm']
SCIPY_RV_UNIT_SUPPORT = ['rv_histgoram', 'uniform', 'beta']
def _check_univariate_X(X, support, inverse=False):
X = check_array(X, ensure_2d=True) # ensure_2d=True is default but just making explicit
# Check that X is a column vector but first ravel because check_estimator passes
# a matrix to fit
if X.shape[1] > 1:
warnings.warn(DataConversionWarning(
'Input should be column vector with shape (n, 1) but found matrix. Converting to '
'column vector via `np.mean(X, axis=1).reshape((-1, 1))`. '
'Ideally, this would raise an error but in order to pass the checks in '
'`sklearn.utils.check_estimator`, we convert the data rather than raise an error. '
))
X = np.mean(X, axis=1).reshape((-1, 1))
# Check that values are within support or range(inverse)
if inverse:
X = check_X_in_interval(X, np.array([0, 1]))
else:
X = check_X_in_interval(X, support)
return np.array(X)
class ScipyUnivariateDensity(BaseEstimator, ScoreMixin):
"""Density estimator via random variables defined in :mod:`scipy.stats`.
A univariate density estimator that can fit any distribution defined in
:mod:`scipy.stats`. This includes common distributions such as Gaussian,
laplace, beta, gamma and log-normal distributions but also many other
distributions as well.
Note that this density estimator is strictly univariate and therefore
expects the input data to be a single array with shape (n_samples, 1).
Parameters
----------
scipy_rv : object or None, default=None
Default random variable is a Gaussian (i.e.
:class:`scipy.stats.norm`) if `scipy_rv=None`. Other examples include
:class:`scipy.stats.gamma` or :class:`scipy.stats.beta`.
scipy_fit_kwargs : dict or None, optional
Keyword arguments as a dictionary for the fit function of the scipy
random variable (e.g. ``dict(floc=0, fscale=1)`` to fix the location
and scale parameters to 0 and 1 respectively). Defaults are
different depending on `scipy_rv` parameter. For example for the
`scipy.stats.beta` we set `floc=0` and `fscale=1`, i.e. fix the
location and scale of the beta distribution.
Attributes
----------
rv_ : object
Frozen :mod:`scipy.stats` random variable object. Fitted parameters
of distribution can be accessed via `args` property.
See Also
--------
scipy.stats
"""
def __init__(self, scipy_rv=None, scipy_fit_kwargs=None):
self.scipy_rv = scipy_rv
self.scipy_fit_kwargs = scipy_fit_kwargs
def fit(self, X, y=None, **fit_params):
"""Fit estimator to X.
Parameters
----------
X : array-like, shape (n_samples, 1)
Training data, where `n_samples` is the number of samples. Note
that the shape must have a second dimension of 1 since this is a
univariate density estimator.
y : None, default=None
Not used in the fitting process but kept for compatibility.
fit_params : dict, optional
Optional extra fit parameters.
Returns
-------
self : estimator
Returns the instance itself.
"""
def _check_scipy_kwargs(kwargs, _scipy_rv):
if kwargs is None:
if self._is_special(_scipy_rv, SCIPY_RV_UNIT_SUPPORT):
return dict(floc=0, fscale=1)
elif self._is_special(_scipy_rv,
SCIPY_RV_NON_NEGATIVE + SCIPY_RV_STRICLTY_POSITIVE):
return dict(floc=0)
else:
return {}
elif isinstance(kwargs, dict):
return kwargs
else:
raise ValueError('`scipy_fit_kwargs` should be either None or a `dict` object.')
# Input validation
scipy_rv = self._get_scipy_rv_or_default()
scipy_fit_kwargs = _check_scipy_kwargs(self.scipy_fit_kwargs, scipy_rv)
X = self._check_X(X)
# MLE fit based on scipy implementation
if scipy_rv.numargs == 0 and 'floc' in scipy_fit_kwargs and 'fscale' in scipy_fit_kwargs:
params = (scipy_fit_kwargs['floc'], scipy_fit_kwargs['fscale'])
else:
try:
params = scipy_rv.fit(X.ravel(), **scipy_fit_kwargs)
except RuntimeError as e:
warnings.warn('Unable to fit to data using scipy_rv so attempting to use default '
'parameters for the distribution. Original error:\n%s' % str(e))
params = self._get_default_params(scipy_rv)
except ValueError:
# warnings.warn(
# 'Trying to use fixed parameters instead. Original error:\n%s' % str(e))
# try to extract fixed parameters in a certain order
params = []
for k in ['fa', 'f0', 'fb', 'f1', 'floc', 'fscale']:
try:
params.append(scipy_fit_kwargs.pop(k))
except KeyError:
pass
# Avoid degenerate case when scale = 0
if len(params) >= 2 and params[-1] == 0:
params = list(params)
if isinstance(X.dtype, np.floating):
params[-1] = np.finfo(X.dtype).eps
else:
params[-1] = 1 # Integer types
params = tuple(params)
# Create "frozen" version of random variable so that parameters do not need to be
# specified
self.rv_ = scipy_rv(*params)
# Check for a fit error in the domain of the parameters
try:
self.rv_.rvs(1)
except ValueError:
warnings.warn('Parameters discovered by fit are not in the domain of the '
'parameters so attempting to use default parameters for the '
'distribution.')
self.rv_ = scipy_rv(*self._get_default_params(scipy_rv))
return self
@classmethod
def create_fitted(cls, scipy_rv_params=None, **kwargs):
"""Create fitted density.
Parameters
----------
scipy_rv : object or None, default=None
Default random variable is a Gaussian (i.e.
:class:`scipy.stats.norm`) if `scipy_rv=None`. Other examples include
:class:`scipy.stats.gamma` or :class:`scipy.stats.beta`.
scipy_rv_params : dict, optional
Parameters to pass to scipy_rv when creating frozen random variable.
Default parameters have been set for various distributions.
**kwargs
Other parameters to pass to object constructor.
Returns
-------
fitted_density : Density
Fitted density.
"""
density = cls(**kwargs)
# Get default if scipy_rv=None
scipy_rv = density._get_scipy_rv_or_default()
# Fit scipy random variable
if scipy_rv_params is None:
try:
params = cls._get_default_params(scipy_rv)
except NotImplementedError:
params = []
rv = scipy_rv(*params)
else:
rv = scipy_rv(**scipy_rv_params)
density.rv_ = rv
return density
@classmethod
def _get_default_params(cls, scipy_rv):
if cls._is_special(scipy_rv, ['beta']):
return [1, 1]
elif cls._is_special(scipy_rv, ['uniform', 'norm', 'expon', 'lognorm']):
return [] # Empty since no parameters needed
else:
raise NotImplementedError('The distribution given by the `scipy_rv = %s` does not '
'have any associated default parameters.'
% str(scipy_rv))
def sample(self, n_samples=1, random_state=None):
"""Generate random samples from this density/destructor.
Parameters
----------
n_samples : int, default=1
Number of samples to generate. Defaults to 1.
random_state : int, RandomState instance or None, optional (default=None)
If int, `random_state` is the seed used by the random number
generator; If :class:`~numpy.random.RandomState` instance,
`random_state` is the random number generator; If None, the random
number generator is the :class:`~numpy.random.RandomState` instance
used by :mod:`numpy.random`.
Returns
-------
X : array, shape (n_samples, n_features)
Randomly generated sample.
"""
self._check_is_fitted()
rng = check_random_state(random_state)
return np.array(self.rv_.rvs(size=n_samples, random_state=rng)).reshape((n_samples, 1))
def score_samples(self, X, y=None):
"""Compute log-likelihood (or log(det(Jacobian))) for each sample.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples is the number of samples and n_features
is the number of features.
y : None, default=None
Not used but kept for compatibility.
Returns
-------
log_likelihood : array, shape (n_samples,)
Log likelihood of each data point in X.
"""
self._check_is_fitted()
X = self._check_X(X)
return self.rv_.logpdf(X.ravel()).reshape((-1, 1))
def cdf(self, X, y=None):
"""[Placeholder].
Parameters
----------
X :
y :
Returns
-------
obj : object
"""
self._check_is_fitted()
X = self._check_X(X)
return self.rv_.cdf(X.ravel()).reshape((-1, 1))
def inverse_cdf(self, X, y=None):
"""[Placeholder].
Parameters
----------
X :
y :
Returns
-------
obj : object
"""
self._check_is_fitted()
X = self._check_X(X, inverse=True)
return self.rv_.ppf(X.ravel()).reshape((-1, 1))
def get_support(self):
"""Get the support of this density (i.e. the positive density region).
Returns
-------
support : array-like, shape (2,) or shape (n_features, 2)
If shape is (2, ), then ``support[0]`` is the minimum and
``support[1]`` is the maximum for all features. If shape is
(`n_features`, 2), then each feature's support (which could
be different for each feature) is given similar to the first
case.
"""
# Assumes density is univariate
try:
self._check_is_fitted()
except NotFittedError:
# Get upper and lower bounds of support from scipy random variable properties
if self.scipy_rv is None:
default_rv = ScipyUnivariateDensity._get_default_scipy_rv()
return np.array([[default_rv.a, default_rv.b]])
else:
return np.array([[self.scipy_rv.a, self.scipy_rv.b]])
else:
# Scale and shift if fitted
try:
loc = self.rv_.args[-2]
except IndexError:
try:
loc = self.rv_.args[-1]
except IndexError:
loc = 0
scale = 1
else:
scale = self.rv_.args[-1]
if scale == 0: # Handle special degenerate case to avoid nans in domain
scale += np.finfo(float).eps
return loc + scale * np.array([[self.rv_.a, self.rv_.b]])
def _check_X(self, X, inverse=False):
# Check that X is univariate or warn otherwise
X = _check_univariate_X(X, self.get_support(), inverse=inverse)
scipy_rv = self._get_scipy_rv_or_default()
# Move away from support/domain boundaries if necessary
if inverse and (np.any(X <= 0) or np.any(X >= 1)):
warnings.warn(BoundaryWarning(
'Some probability values (input to inverse functions) are either 0 or 1. Bounding '
'values away from 0 or 1 to avoid infinities in output. For example, the inverse '
'cdf of a Gaussian at 0 will yield `-np.inf`.'))
X = make_interior_probability(X)
if self._is_special(scipy_rv, SCIPY_RV_UNIT_SUPPORT) and (np.any(X <= 0) or np.any(X >= 1)):
warnings.warn(BoundaryWarning(
'Input to random variable function has at least one value either 0 or 1 '
'but all input should be in (0,1) exclusive. Bounding values away from 0 or 1 by '
'eps=%g'))
X = make_interior_probability(X)
if self._is_special(scipy_rv, SCIPY_RV_STRICLTY_POSITIVE) and np.any(X <= 0):
warnings.warn(BoundaryWarning(
'Input to random variable function has at least one value less than or equal to '
'zero but all input should be strictly positive. Making all input greater than or '
'equal to some small positive constant.'))
X = make_positive(X)
if np.any(np.isinf(X)):
warnings.warn(BoundaryWarning(
'Input to random variable function has at least one value that is `np.inf` or '
'`-np.inf`. Making all input finite via a very large constant.'))
X = make_finite(X)
return X
def _get_scipy_rv_or_default(self):
if self.scipy_rv is None:
return ScipyUnivariateDensity._get_default_scipy_rv()
else:
return self.scipy_rv
@staticmethod
def _get_default_scipy_rv():
return scipy.stats.norm
@staticmethod
def _is_special(scipy_rv, scipy_str_set):
# Modify string set for special case of rv_histogram
scipy_str_set = [
'.' + dstr + '_gen' if dstr != 'rv_histogram' else '.' + dstr
for dstr in scipy_str_set
]
return np.any([
dstr in str(scipy_rv)
for dstr in scipy_str_set
])
def _check_is_fitted(self):
check_is_fitted(self, ['rv_'])
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=UserWarning)
STANDARD_NORMAL_DENSITY = ScipyUnivariateDensity(
scipy_rv=scipy.stats.norm,
scipy_fit_kwargs=dict(floc=0, fscale=1)
).fit(np.array([[0]]))
class HistogramUnivariateDensity(ScipyUnivariateDensity):
"""Histogram univariate density estimator.
Parameters
----------
bins : int or sequence of scalars or str, optional
Same ase the parameter of :func:`numpy.histogram`. Copied from numpy
documentation:
If `bins` is an int, it defines the number of equal-width
bins in the given range (10, by default). If `bins` is a
sequence, it defines the bin edges, including the rightmost
edge, allowing for non-uniform bin widths.
.. versionadded:: 1.11.0
If `bins` is a string from the list below, `histogram` will use
the method chosen to calculate the optimal bin width and
consequently the number of bins (see `Notes` for more detail on
the estimators) from the data that falls within the requested
range. While the bin width will be optimal for the actual data
in the range, the number of bins will be computed to fill the
entire range, including the empty portions. For visualisation,
using the 'auto' option is suggested. Weighted data is not
supported for automated bin size selection.
'auto'
Maximum of the 'sturges' and 'fd' estimators. Provides good
all around performance.
'fd' (Freedman Diaconis Estimator)
Robust (resilient to outliers) estimator that takes into
account data variability and data size.
'doane'
An improved version of Sturges' estimator that works better
with non-normal datasets.
'scott'
Less robust estimator that that takes into account data
variability and data size.
'rice'
Estimator does not take variability into account, only data
size. Commonly overestimates number of bins required.
'sturges'
R's default method, only accounts for data size. Only
optimal for gaussian data and underestimates number of bins
for large non-gaussian datasets.
'sqrt'
Square root (of data size) estimator, used by Excel and
other programs for its speed and simplicity.
bounds : float or array-like of shape (2,)
Specification for the finite bounds of the histogram. Bounds can be
percentage extension or a specified interval [a,b].
alpha : float
Regularization parameter corresponding to the number of
pseudo-counts to add to each bin of the histogram. This can be seen
as putting a Dirichlet prior on the empirical bin counts with
Dirichlet parameter alpha.
Attributes
----------
bin_edges_ : array of shape (n_bins + 1,)
Edges of bins.
pdf_bin_ : array of shape (n_bins,)
pdf values of bins. Note that histograms have a constant pdf
value within each bin.
cdf_bin_ : array of shape (n_bins + 1,)
cdf values at bin edges. Used with linear interpolation to
compute pdf, cdf and inverse cdf.
"""
def __init__(self, bins=None, bounds=0.1, alpha=1e-6):
self.bins = bins
self.bounds = bounds
self.alpha = alpha
def fit(self, X, y=None, histogram_params=None):
"""Fit estimator to X.
Parameters
----------
X : array-like, shape (n_samples, 1)
Training data, where `n_samples` is the number of samples. Note
that the shape must have a second dimension of 1 since this is a
univariate density estimator.
y : None, default=None
Not used in the fitting process but kept for compatibility.
histogram_params : list or tuple of size 2
Tuple or list of values of bins and bin edges. For example,
from :func:`numpy.histogram`.
Returns
-------
self : estimator
Returns the instance itself.
"""
if X is not None and histogram_params is not None:
raise ValueError('Either X or histogram_params can be provided (i.e. not None) '
'but not both.')
if histogram_params is not None:
hist, bin_edges = histogram_params
warnings.warn(DeprecationWarning('Class factory method `create_fitted` should '
'be used instead of passing histogram_params '
'to fit.'))
else:
X = self._check_X(X)
# Get percent extension but do not modify bounds
bounds = self._check_bounds(X)
bins = self.bins if self.bins is not None else 'auto'
# Fit numpy histogram
hist, bin_edges = np.histogram(X, bins=bins, range=bounds)
hist = np.array(hist, dtype=float) # Make float so we can add non-integer alpha
hist += self.alpha # Smooth histogram by alpha so no areas have 0 probability
# Normalize bins by bin_edges
rv = self._hist_params_to_rv(hist, bin_edges)
self.rv_ = rv
return self
@staticmethod
def _hist_params_to_rv(hist, bin_edges):
hist = np.array(hist)
bin_edges = np.array(bin_edges)
hist = hist / (bin_edges[1:] - bin_edges[:-1])
rv = scipy.stats.rv_histogram((hist, bin_edges))
return rv
@classmethod
def create_fitted(cls, hist, bin_edges, **kwargs):
"""Create fitted density.
Parameters
----------
hist : counts
fitted_univariate_densities : array-like of Density, shape (n_features,)
Fitted univariate densities.
**kwargs
Other parameters to pass to object constructor.
Returns
-------
fitted_density : Density
Fitted density.
"""
density = cls(**kwargs)
rv = cls._hist_params_to_rv(hist, bin_edges)
density.rv_ = rv
return density
def get_support(self):
"""Get the support of this density (i.e. the positive density region).
Returns
-------
support : array-like, shape (2,) or shape (n_features, 2)
If shape is (2, ), then ``support[0]`` is the minimum and
``support[1]`` is the maximum for all features. If shape is
(`n_features`, 2), then each feature's support (which could
be different for each feature) is given similar to the first
case.
"""
# Make [[a,b]] so that it is explicitly a univariate density
return np.array([self._check_bounds()])
def _check_bounds(self, X=None, extend=True):
# If bounds is extension
if np.isscalar(self.bounds):
if X is None:
# If no X than just return -inf, inf
return _DEFAULT_SUPPORT
else:
# If X is not None than extract bounds and extend as necessary
perc_extension = self.bounds
_domain = np.array([np.min(X), | np.max(X) | numpy.max |
#!/usr/bin/env python3
import json
from collections import defaultdict
from copy import deepcopy
from random import shuffle
import numpy as np
import sys
import ontology
def load_json_data(file_name):
"""Load a json file - file_name.
:param file_name: a name of the json file
:return: the Python representation of the json file
"""
with open(file_name) as f:
text_data = json.load(f)
return text_data
def gen_examples(text_data, input):
"""Generates training examples for the conversation to sequence model. Here, we experiment with conversational models
that is converting a conversational history into dialogue state representation (dialogue state tracking) and generation
a textual response given the conversation history (dialogue policy).
:param text_data: a list of conversation each composed of (system_output, user_input, dialogue state) tuples
:return: a transformed text_data
"""
examples = []
for conversation in text_data:
asr_history = []
trs_history = []
prev_turn = None
for turn in conversation:
if prev_turn:
trs_history.append(prev_turn[0])
trs_history.append(prev_turn[1])
asr_history.append(prev_turn[0])
asr_history.append(prev_turn[2])
state = prev_turn[4] # the dialogue state
action = turn[0] # the system action / response
if input == 'trs':
examples.append(deepcopy([trs_history, state, action]))
elif input == 'asr':
examples.append(deepcopy([asr_history, state, action]))
elif input == 'trs+asr':
examples.append(deepcopy([trs_history, state, action]))
examples.append(deepcopy([asr_history, state, action]))
else:
raise Exception('Unsupported type of input: {s}'.format(s=input))
prev_turn = turn
trs_history.append(prev_turn[0])
trs_history.append(prev_turn[1])
asr_history.append(prev_turn[0])
asr_history.append(prev_turn[2])
state = prev_turn[4] # the dialogue state
action = 'hangup' # the system action / response
if input == 'trs':
examples.append(deepcopy([trs_history, state, action]))
elif input == 'asr':
examples.append(deepcopy([asr_history, state, action]))
elif input == 'trs+asr':
examples.append(deepcopy([trs_history, state, action]))
examples.append(deepcopy([asr_history, state, action]))
else:
raise Exception('Unsupported type of input: {s}'.format(s=input))
return examples
def get_words(utterance):
"""Splits an utterance into words, removes some characters not available in spoken dialogue systems,
uppercases the text.
:param utterance: a string
:return: a list of string (words)
"""
for c in '?!.,':
utterance = utterance.replace(c, ' ').replace(' ', ' ')
return utterance.lower().split()
def normalize(examples):
norm_examples = []
for history, state, action in examples:
norm_history = []
for utterance in history:
utterance_words = get_words(utterance)
norm_history.append(utterance_words)
norm_state = get_words(state)
norm_action = get_words(action)
norm_examples.append([norm_history, norm_state, norm_action])
return norm_examples
def sort_by_conversation_length(examples):
examples.sort(key=lambda example: len(example[0]))
return examples
def get_word2idx(idx2word):
return dict([(w, i) for i, w in enumerate(idx2word)])
def count_dict(lst, dct):
for word in lst:
dct[word] += 1
def get_idx2word(examples):
words_history = defaultdict(int)
words_history_arguments = defaultdict(int)
words_state = defaultdict(int)
words_action = defaultdict(int)
words_action_arguments = defaultdict(int)
words_action_templates = defaultdict(int)
for abs_history, history_arguments, abs_state, abs_action, action_arguments, action_templates in examples:
for utterance in abs_history:
count_dict(utterance, words_history)
count_dict(history_arguments, words_history_arguments)
count_dict(abs_state, words_state)
count_dict(abs_action, words_action)
count_dict(action_arguments, words_action_arguments)
words_action_templates[action_templates] += 1
idx2word_history = get_indexes(words_history)
idx2word_history_arguments = get_indexes(words_history_arguments)
idx2word_state = get_indexes(words_state)
idx2word_action = get_indexes(words_action)
idx2word_action_arguments = get_indexes(words_action_arguments)
idx2word_action_templates = get_indexes(words_action_templates, add_sos=False, add_eos=False)
return (idx2word_history, idx2word_history_arguments,
idx2word_state,
idx2word_action, idx2word_action_arguments,
idx2word_action_templates)
def get_indexes(dct, add_sos=True, add_eos=True, add_oov=True):
idx2word = []
if add_sos:
idx2word.append('_SOS_')
if add_eos:
idx2word.append('_EOS_')
if add_oov:
idx2word.append('_OOV_')
dct = [word for word in dct if dct[word] >= 2]
idx2word.extend(sorted(dct))
return idx2word
def get_padding(abstract_train_examples):
max_length_history = 0
max_length_utterance = 0
max_length_state = 0
max_length_action = 0
for abs_history, history_arguments, abs_state, abs_action, action_arguments, action_template in abstract_train_examples:
for utterance in abs_history:
max_length_utterance = max(max_length_utterance, len(utterance))
max_length_history = max(max_length_history, len(abs_history))
max_length_state = max(max_length_state, len(abs_state))
max_length_action = max(max_length_action, len(abs_action))
return max_length_action, max_length_history, max_length_state, max_length_utterance
def index_and_pad_utterance(utterance, word2idx, max_length, add_sos=True):
if add_sos:
s = [word2idx['_SOS_']]
else:
s = []
for w in utterance:
# if w == '20 milton road chesterton':
# print(len(utterance), utterance, w, max_length)
# sys.exit(0)
# if w not in word2idx:
# print('U', utterance)
# print('OOV: {oov}'.format(oov=w))
s.append(word2idx.get(w, word2idx['_OOV_']))
for w in range(max_length - len(s)):
s.append(word2idx['_EOS_'])
return s[:max_length]
def index_and_pad_history(history, word2idx, max_length_history, max_length_utterance):
index_pad_history = []
# padding
for i in range(max_length_history - len(history)):
ip_utterance = index_and_pad_utterance('', word2idx, max_length_utterance + 2)
index_pad_history.append(ip_utterance)
# the real data
for utterance in history:
ip_utterance = index_and_pad_utterance(utterance, word2idx, max_length_utterance + 2)
index_pad_history.append(ip_utterance)
return index_pad_history[len(index_pad_history) - max_length_history:]
def index_action_template(action_template, word2idx_action_template):
return word2idx_action_template.get(action_template, word2idx_action_template['_OOV_'])
def index_and_pad_examples(examples,
word2idx_history, max_length_history, max_length_utterance,
word2idx_history_arguments,
word2idx_state, max_length_state,
word2idx_action, max_length_action,
word2idx_action_arguments,
word2idx_action_template):
index_pad_examples = []
for abs_history, history_arguments, abs_state, abs_action, action_arguments, action_template in examples:
ip_history = index_and_pad_history(abs_history, word2idx_history, max_length_history, max_length_utterance)
# print(len(history_arguments), history_arguments)
ip_history_arguments = index_and_pad_utterance(history_arguments, word2idx_history_arguments,
len(history_arguments), add_sos=False)
# print(len(ip_history_arguments), ip_history_arguments)
ip_state = index_and_pad_utterance(abs_state, word2idx_state, max_length_state, add_sos=False)
ip_action = index_and_pad_utterance(abs_action, word2idx_action, max_length_action, add_sos=False)
ip_action_arguments = index_and_pad_utterance(action_arguments, word2idx_action_arguments,
len(action_arguments), add_sos=False)
ip_action_template = index_action_template(action_template, word2idx_action_template)
index_pad_examples.append(
[ip_history, ip_history_arguments, ip_state, ip_action, ip_action_arguments, ip_action_template])
return index_pad_examples
def add_action_templates(abstract_test_examples):
examples = []
for e in abstract_test_examples:
examples.append(list(e) + [' '.join(e[3]), ])
return examples
def gen_database(database_data):
"""Convert a discrete database
:param database_data:
:return:
"""
idx2word = defaultdict(set)
for row in database_data:
for column in row:
idx2word[column].add(row[column])
for column in idx2word:
idx2word[column].add('_OOV_')
for column in idx2word:
idx2word[column] = sorted(idx2word[column])
word2idx = defaultdict(dict)
for column in idx2word:
word2idx[column] = get_word2idx(idx2word[column])
columns = sorted(idx2word.keys())
database = []
for row in database_data:
r = []
for column in columns:
idx = word2idx[column][row.get(column, '_OOV_')]
r.append(idx)
database.append(r)
return database, columns, idx2word, word2idx
class DSTC2:
def __init__(self, input, data_fraction, train_data_fn, dev_data_fn, test_data_fn, ontology_fn, database_fn,
batch_size):
self.ontology = ontology.Ontology(ontology_fn, database_fn)
database_data = load_json_data(database_fn)
database, \
self.database_columns, \
self.database_idx2word, \
self.database_word2idx = gen_database(database_data)
self.database = np.asarray(database, dtype=np.int32)
train_histories, \
train_histories_arguments, \
train_states, \
train_actions, \
train_actions_arguments, \
train_actions_template = self.train_data_processing(input, train_data_fn, data_fraction)
self.train_set = {
'histories': train_histories,
'histories_arguments': train_histories_arguments,
'states': train_states,
'actions': train_actions,
'actions_arguments': train_actions_arguments,
'actions_template': train_actions_template
}
self.train_set_size = len(self.train_set['histories'])
dev_histories, \
dev_histories_arguments, \
dev_states, \
dev_actions, \
dev_actions_arguments, \
dev_actions_template = self.data_processing(input, dev_data_fn, data_fraction)
self.dev_set = {
'histories': dev_histories,
'histories_arguments': dev_histories_arguments,
'states': dev_states,
'actions': dev_actions,
'actions_arguments': dev_actions_arguments,
'actions_template': dev_actions_template
}
self.dev_set_size = len(self.dev_set['histories'])
test_histories, \
test_histories_arguments, \
test_states, \
test_actions, \
test_actions_arguments, \
test_actions_template = self.data_processing(input, test_data_fn, data_fraction)
self.test_set = {
'histories': test_histories,
'histories_arguments': test_histories_arguments,
'states': test_states,
'actions': test_actions,
'actions_arguments': test_actions_arguments,
'actions_template': test_actions_template
}
self.test_set_size = len(self.test_set['histories'])
self.batch_size = batch_size
self.train_batch_indexes = []
batch_histories = []
batch_histories_arguments = []
batch_states = []
batch_actions = []
batch_actions_arguments = []
batch_actions_template = []
for i in range(0, self.train_set_size // batch_size):
start = i * batch_size
end = (i + 1) * batch_size
self.train_batch_indexes.append(i)
batch_histories.append(np.expand_dims(train_histories[start:end], axis=0))
batch_histories_arguments.append(np.expand_dims(train_histories_arguments[start:end], axis=0))
batch_states.append(np.expand_dims(train_states[start:end], axis=0))
batch_actions.append(np.expand_dims(train_actions[start:end], axis=0))
batch_actions_arguments.append( | np.expand_dims(train_actions_arguments[start:end], axis=0) | numpy.expand_dims |
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import utility_functions as utilfunc
import sys
import config
# Import from support function repo
import dispatch_functions as dFuncs
import tariff_functions as tFuncs
import decorators
np.seterr(divide='ignore', invalid='ignore')
#==============================================================================
# Load logger
logger = utilfunc.get_logger()
#==============================================================================
#%%
def calc_system_size_and_financial_performance(agent):
"""
This function accepts the characteristics of a single agent and
evaluates the financial performance of a set of solar+storage
system sizes. The system size with the highest NPV is selected.
Parameters
----------
agent : pandas.Series
Single agent (row) from an agent dataframe.
Returns
-------
pandas.Series
Agent with system size, business model and corresponding financial performance.
"""
#=========================================================================#
# Setup
#=========================================================================#
try:
in_cols = list(agent.index)
if config.VERBOSE:
logger.info(' ')
logger.info("\tRunning system size calculations for: {}, {}, {}".format(agent['state'], agent['tariff_class'], agent['sector_abbr']))
logger.info('real_discount: {}'.format(agent['discount_rate']))
logger.info('loan_rate: {}'.format(agent['loan_rate']))
logger.info('down_payment: {}'.format(agent['down_payment']))
# Set resolution of dispatcher
d_inc_n_est = 10
DP_inc_est = 12
d_inc_n_acc = 20
DP_inc_acc = 12
# Extract load profile
load_profile = np.array(agent['consumption_hourly'])
agent.loc['timesteps_per_year'] = 1
# Extract load profile
pv_cf_profile = np.array(agent['solar_cf_profile']) / 1e3
agent['naep'] = float(np.sum(pv_cf_profile))
# Create battery object
batt = dFuncs.Battery()
batt_ratio = 3.0
tariff = tFuncs.Tariff(dict_obj=agent.loc['tariff_dict'])
# Create export tariff object
if agent['nem_system_size_limit_kw'] != 0:
export_tariff = tFuncs.Export_Tariff(full_retail_nem=True)
export_tariff.periods_8760 = tariff.e_tou_8760
export_tariff.prices = tariff.e_prices_no_tier
else:
export_tariff = tFuncs.Export_Tariff(full_retail_nem=False)
original_bill, original_results = tFuncs.bill_calculator(load_profile, tariff, export_tariff)
if config.VERBOSE:
logger.info('original_bill: {}'.format(original_bill))
agent['first_year_elec_bill_without_system'] = original_bill * agent['elec_price_multiplier']
if config.VERBOSE:
logger.info('multiplied original bill: {}'.format(agent['first_year_elec_bill_without_system']))
if agent['first_year_elec_bill_without_system'] == 0:
agent['first_year_elec_bill_without_system']=1.0
agent['first_year_elec_cents_per_kwh_without_system'] = agent['first_year_elec_bill_without_system'] / agent['load_per_customer_in_bin_kwh']
#=========================================================================#
# Estimate bill savings revenue from a set of solar+storage system sizes
#=========================================================================#
max_size_load = agent.loc['load_per_customer_in_bin_kwh']/agent.loc['naep']
max_size_roof = agent.loc['developable_roof_sqft'] * agent.loc['developable_buildings_pct'] * agent.loc['pv_power_density_w_per_sqft']/1000.0
agent.loc['max_pv_size'] = min([max_size_load, max_size_roof, agent.loc['nem_system_size_limit_kw']])
if config.VERBOSE:
logger.info('max_size_load: {}'.format(max_size_load))
logger.info('max_size_roof: {}'.format(max_size_roof))
dynamic_sizing = True #False
if dynamic_sizing:
pv_sizes = np.arange(0, 1.1, 0.1) * agent.loc['max_pv_size']
else:
# Size the PV system depending on NEM availability, either to 95% of load w/NEM, or 50% w/o NEM. In both cases, roof size is a constraint.
if export_tariff.full_retail_nem==True:
pv_sizes = np.array([min(max_size_load * 0.95, max_size_roof)])
else:
pv_sizes = np.array([min(max_size_load * 0.5, max_size_roof)])
batt_powers = np.zeros(1)
# Calculate the estimation parameters for each PV size
est_params_df = pd.DataFrame(index=pv_sizes)
est_params_df['estimator_params'] = 'temp'
for pv_size in pv_sizes:
load_and_pv_profile = load_profile - pv_size*pv_cf_profile
est_params_df.at[pv_size, 'estimator_params'] = dFuncs.calc_estimator_params(load_and_pv_profile, tariff, export_tariff, batt.eta_charge, batt.eta_discharge)
# Create df with all combinations of solar+storage sizes
system_df = pd.DataFrame(dFuncs.cartesian([pv_sizes, batt_powers]), columns=['pv', 'batt_kw'])
system_df['est_bills'] = None
pv_kwh_by_year = np.array([sum(x) for x in np.split(np.array(pv_cf_profile), agent.loc['timesteps_per_year'])])
pv_kwh_by_year = np.concatenate([(pv_kwh_by_year - ( pv_kwh_by_year * agent.loc['pv_deg'] * i)) for i in range(1, agent.loc['economic_lifetime']+1)])
system_df['kwh_by_timestep'] = system_df['pv'].apply(lambda x: x * pv_kwh_by_year)
n_sys = len(system_df)
for i in system_df.index:
pv_size = system_df['pv'][i].copy()
load_and_pv_profile = load_profile - pv_size*pv_cf_profile
# for buy all sell all agents: calculate value of generation based on wholesale prices and subtract from original bill
if agent.loc['compensation_style'] == 'Buy All Sell All':
sell_all = np.sum(pv_size * pv_cf_profile * agent.loc['wholesale_elec_use_per_kwh'])
system_df.loc[i, 'est_bills'] = original_bill - sell_all
# for net billing agents: if system size within policy limits, set sell rate to wholesale price -- otherwise, set sell rate to 0
elif (agent.loc['compensation_style'] == 'Net Billing (Wholesale)') or (agent.loc['compensation_style'] == 'Net Billing (Avoided Cost)'):
export_tariff = tFuncs.Export_Tariff(full_retail_nem=False)
if pv_size<=agent.loc['nem_system_size_limit_kw']:
if agent.loc['compensation_style'] == 'Net Billing (Wholesale)':
export_tariff.set_constant_sell_price(agent.loc['wholesale_elec_usd_per_kwh'])
elif agent.loc['compensation_style'] == 'Net Billing (Avoided Cost)':
export_tariff.set_constant_sell_price(agent.loc['hourly_excess_sell_rate_usd_per_kwh'])
else:
export_tariff.set_constant_sell_price(0.)
batt_power = system_df['batt_kw'][i].copy()
batt.set_cap_and_power(batt_power*batt_ratio, batt_power)
if batt_power > 0:
estimator_params = est_params_df.loc[system_df['pv'][i].copy(), 'estimator_params']
estimated_results = dFuncs.determine_optimal_dispatch(load_profile, pv_size*pv_cf_profile, batt, tariff, export_tariff, estimator_params=estimator_params, estimated=True, DP_inc=DP_inc_est, d_inc_n=d_inc_n_est, estimate_demand_levels=True)
system_df.loc[i, 'est_bills'] = estimated_results['bill_under_dispatch']
else:
bill_with_PV, _ = tFuncs.bill_calculator(load_and_pv_profile, tariff, export_tariff)
system_df.loc[i, 'est_bills'] = bill_with_PV #+ one_time_charge
# for net metering agents: if system size within policy limits, set full_retail_nem=True -- otherwise set export value to wholesale price
elif agent.loc['compensation_style'] == 'Net Metering':
if pv_size<=agent.loc['nem_system_size_limit_kw']:
export_tariff = tFuncs.Export_Tariff(full_retail_nem=True)
export_tariff.periods_8760 = tariff.e_tou_8760
export_tariff.prices = tariff.e_prices_no_tier
else:
export_tariff = tFuncs.Export_Tariff(full_retail_nem=False)
export_tariff.set_constant_sell_price(agent.loc['wholesale_elec_usd_per_kwh'])
batt_power = system_df['batt_kw'][i].copy()
batt.set_cap_and_power(batt_power*batt_ratio, batt_power)
if batt_power > 0:
estimator_params = est_params_df.loc[system_df['pv'][i].copy(), 'estimator_params']
estimated_results = dFuncs.determine_optimal_dispatch(load_profile, pv_size*pv_cf_profile, batt, tariff, export_tariff, estimator_params=estimator_params, estimated=True, DP_inc=DP_inc_est, d_inc_n=d_inc_n_est, estimate_demand_levels=True)
system_df.loc[i, 'est_bills'] = estimated_results['bill_under_dispatch']
else:
bill_with_PV, _ = tFuncs.bill_calculator(load_and_pv_profile, tariff, export_tariff)
system_df.loc[i, 'est_bills'] = bill_with_PV #+ one_time_charge
# for agents with no compensation mechanism: set sell rate to 0 and calculate bill with net load profile
else:
export_tariff = tFuncs.Export_Tariff(full_retail_nem=False)
export_tariff.set_constant_sell_price(0.)
batt_power = system_df['batt_kw'][i].copy()
batt.set_cap_and_power(batt_power*batt_ratio, batt_power)
if batt_power > 0:
estimator_params = est_params_df.loc[system_df['pv'][i].copy(), 'estimator_params']
estimated_results = dFuncs.determine_optimal_dispatch(load_profile, pv_size*pv_cf_profile, batt, tariff, export_tariff, estimator_params=estimator_params, estimated=True, DP_inc=DP_inc_est, d_inc_n=d_inc_n_est, estimate_demand_levels=True)
system_df.loc[i, 'est_bills'] = estimated_results['bill_under_dispatch']
else:
bill_with_PV, _ = tFuncs.bill_calculator(load_and_pv_profile, tariff, export_tariff)
system_df.loc[i, 'est_bills'] = bill_with_PV #+ one_time_charge
# Calculate bill savings cash flow
# elec_price_multiplier is the scalar increase in the cost of electricity since 2016, when the tariffs were curated
# elec_price_escalator is this agent's assumption about how the price of electricity will change in the future.
avg_est_bill_savings = (original_bill - np.array(system_df['est_bills'])).reshape([n_sys, 1]) * agent['elec_price_multiplier']
est_bill_savings = np.zeros([n_sys, agent['economic_lifetime']+1])
est_bill_savings[:,1:] = avg_est_bill_savings
escalator = (np.zeros(agent['economic_lifetime']+1) + agent['elec_price_escalator'] + 1)**list(range(agent['economic_lifetime']+1))
degradation = (np.zeros(agent['economic_lifetime']+1) + 1 - agent['pv_deg'])**list(range(agent['economic_lifetime']+1))
est_bill_savings = est_bill_savings * escalator * degradation
system_df['est_bill_savings'] = est_bill_savings[:, 1]
# simple representation of 70% minimum of batt charging from PV in order to
# qualify for the ITC. Here, if batt kW is greater than 25% of PV kW, no ITC.
batt_chg_frac = np.where(system_df['pv'] >= system_df['batt_kw']*4.0, 1.0, 0)
#=========================================================================#
# Determine financial performance of each system size
#=========================================================================#
if 'investment_incentive_pct' in agent.index:
if agent['investment_incentive_year_cutoff'] >= agent['year']:
investment_incentives = np.full(system_df.shape[0], agent['investment_incentive_pct'])
else:
investment_incentives = np.zeros(system_df.shape[0])
else:
investment_incentives = np.zeros(system_df.shape[0])
if 'capacity_incentive' in agent.index:
raise NotImplementedError
else:
capacity_based_incentives = np.zeros(system_df.shape[0])
if 'production_incentive' in agent.index:
raise NotImplementedError
else:
production_based_incentives = np.tile(np.array([0]*agent.loc['economic_lifetime']), (system_df.shape[0],1))
if 'cash_incentives' in agent.index:
raise NotImplementedError
else:
cash_incentives = np.array([0]*system_df.shape[0])
cf_results_est = cashflow_constructor(bill_savings=est_bill_savings,
pv_size=np.array(system_df['pv']), pv_price=agent.loc['pv_price_per_kw'], pv_om=agent.loc['pv_om_per_kw'],
batt_cap=np.array(system_df['batt_kw'])*batt_ratio, batt_power=np.array(system_df['batt_kw']),
batt_cost_per_kw=agent.loc['batt_price_per_kw'], batt_cost_per_kwh=agent.loc['batt_price_per_kwh'],
batt_om_per_kw=agent.loc['batt_om_per_kw'], batt_om_per_kwh=agent.loc['batt_om_per_kwh'],
batt_chg_frac=batt_chg_frac,
sector=agent.loc['sector_abbr'], itc=agent.loc['itc_fraction'], deprec_sched=agent.loc['deprec_sch'],
fed_tax_rate=agent['tax_rate'], state_tax_rate=0, real_d=agent['discount_rate'],
analysis_years=agent.loc['economic_lifetime'], inflation=agent.loc['inflation'],
down_payment_fraction=agent.loc['down_payment'], loan_rate=agent.loc['loan_rate'], loan_term=agent.loc['loan_term'],
cash_incentives=cash_incentives, ibi=investment_incentives, cbi=capacity_based_incentives, pbi=production_based_incentives)
system_df['npv'] = cf_results_est['npv']
#=========================================================================#
# Select system size and business model for this agent
#=========================================================================#
index_of_best_fin_perform_ho = system_df['npv'].idxmax()
opt_pv_size = system_df['pv'][index_of_best_fin_perform_ho].copy()
opt_batt_power = system_df['batt_kw'][index_of_best_fin_perform_ho].copy()
opt_batt_cap = opt_batt_power*batt_ratio
batt.set_cap_and_power(opt_batt_cap, opt_batt_power)
tariff = tFuncs.Tariff(dict_obj=agent.loc['tariff_dict'])
# for buy all sell all agents: calculate value of generation based on wholesale prices and subtract from original bill
if agent.loc['compensation_style'] == 'Buy All Sell All':
sell_all = np.sum(opt_pv_size * pv_cf_profile * agent.loc['wholesale_elec_usd_per_kwh'])
opt_bill = original_bill - sell_all
# package into "dummy" dispatch results dictionary
accurate_results = {'bill_under_dispatch' : opt_bill, 'batt_dispatch_profile' : np.zeros(len(load_profile))}
# for net billing agents: if system size within policy limits, set sell rate to wholesale price -- otherwise, set sell rate to 0
elif (agent.loc['compensation_style'] == 'Net Billing (Wholesale)') or (agent.loc['compensation_style'] == 'Net Billing (Avoided Cost)'):
export_tariff = tFuncs.Export_Tariff(full_retail_nem=False)
if opt_pv_size<=agent.loc['nem_system_size_limit_kw']:
if agent.loc['compensation_style'] == 'Net Billing (Wholesale)':
export_tariff.set_constant_sell_price(agent.loc['wholesale_elec_usd_per_kwh'])
elif agent.loc['compensation_style'] == 'Net Billing (Avoided Cost)':
export_tariff.set_constant_sell_price(agent.loc['hourly_excess_sell_rate_usd_per_kwh'])
else:
export_tariff.set_constant_sell_price(0.)
accurate_results = dFuncs.determine_optimal_dispatch(load_profile, opt_pv_size*pv_cf_profile, batt, tariff, export_tariff, estimated=False, d_inc_n=d_inc_n_acc, DP_inc=DP_inc_acc)
# for net metering agents: if system size within policy limits, set full_retail_nem=True -- otherwise set export value to wholesale price
elif agent.loc['compensation_style'] == 'Net Metering':
export_tariff = tFuncs.Export_Tariff(full_retail_nem=True)
if opt_pv_size<=agent.loc['nem_system_size_limit_kw']:
export_tariff = tFuncs.Export_Tariff(full_retail_nem=True)
export_tariff.periods_8760 = tariff.e_tou_8760
export_tariff.prices = tariff.e_prices_no_tier
else:
export_tariff = tFuncs.Export_Tariff(full_retail_nem=False)
export_tariff.set_constant_sell_price(agent.loc['wholesale_elec_usd_per_kwh'])
accurate_results = dFuncs.determine_optimal_dispatch(load_profile, opt_pv_size*pv_cf_profile, batt, tariff, export_tariff, estimated=False, d_inc_n=d_inc_n_acc, DP_inc=DP_inc_acc)
else:
export_tariff = tFuncs.Export_Tariff(full_retail_nem=False)
export_tariff.set_constant_sell_price(0.)
accurate_results = dFuncs.determine_optimal_dispatch(load_profile, opt_pv_size*pv_cf_profile, batt, tariff, export_tariff, estimated=False, d_inc_n=d_inc_n_acc, DP_inc=DP_inc_acc)
# add system size class
system_size_breaks = [0.0, 2.5, 5.0, 10.0, 20.0, 50.0, 100.0, 250.0, 500.0, 750.0, 1000.0, 1500.0, 3000.0]
#=========================================================================#
# Determine dispatch trajectory for chosen system size
#=========================================================================#
opt_bill = accurate_results['bill_under_dispatch'] #+ one_time_charge
agent.loc['first_year_elec_bill_with_system'] = opt_bill * agent.loc['elec_price_multiplier']
agent.loc['first_year_elec_bill_savings'] = agent.loc['first_year_elec_bill_without_system'] - agent.loc['first_year_elec_bill_with_system']
agent.loc['first_year_elec_bill_savings_frac'] = agent.loc['first_year_elec_bill_savings'] / agent.loc['first_year_elec_bill_without_system']
opt_bill_savings = np.zeros([1, agent.loc['economic_lifetime'] + 1])
opt_bill_savings[:, 1:] = (original_bill - opt_bill)
opt_bill_savings = opt_bill_savings * agent.loc['elec_price_multiplier'] * escalator * degradation
# If the batt kW is less than 25% of the PV kW, apply the ITC
if opt_pv_size >= opt_batt_power*4:
batt_chg_frac = 1.0
else:
batt_chg_frac = 0.0
cash_incentives = np.array([cash_incentives[index_of_best_fin_perform_ho]])
investment_incentives = np.array([investment_incentives[index_of_best_fin_perform_ho]])
capacity_based_incentives = np.array([capacity_based_incentives[index_of_best_fin_perform_ho]])
production_based_incentives = np.array(production_based_incentives[index_of_best_fin_perform_ho])
cf_results_opt = cashflow_constructor(bill_savings=opt_bill_savings,
pv_size=opt_pv_size, pv_price=agent.loc['pv_price_per_kw'], pv_om=agent.loc['pv_om_per_kw'],
batt_cap=opt_batt_cap, batt_power=opt_batt_power,
batt_cost_per_kw=agent.loc['batt_price_per_kw'], batt_cost_per_kwh=agent.loc['batt_price_per_kwh'],
batt_om_per_kw=agent['batt_om_per_kw'], batt_om_per_kwh=agent['batt_om_per_kwh'],
batt_chg_frac=batt_chg_frac,
sector=agent.loc['sector_abbr'], itc=agent.loc['itc_fraction'], deprec_sched=agent.loc['deprec_sch'],
fed_tax_rate=agent.loc['tax_rate'], state_tax_rate=0, real_d=agent.loc['discount_rate'],
analysis_years=agent.loc['economic_lifetime'], inflation=agent.loc['inflation'],
down_payment_fraction=agent.loc['down_payment'], loan_rate=agent.loc['loan_rate'], loan_term=agent.loc['loan_term'],
cash_incentives=cash_incentives, ibi=investment_incentives, cbi=capacity_based_incentives, pbi=production_based_incentives)
#=========================================================================#
# Package results
#=========================================================================#
agent['pv_kw'] = opt_pv_size
agent['batt_kw'] = opt_batt_power
agent['batt_kwh'] = opt_batt_cap
agent['npv'] = cf_results_opt['npv'][0]
agent['cash_flow'] = cf_results_opt['cf'][0]
agent['batt_dispatch_profile'] = accurate_results['batt_dispatch_profile']
agent['bill_savings'] = opt_bill_savings
agent['aep'] = agent['pv_kw'] * agent['naep']
agent['cf'] = agent['naep']/8760
agent['system_size_factors'] = np.where(agent['pv_kw'] == 0, 0, pd.cut([agent['pv_kw']], system_size_breaks))[0]
agent['export_tariff_results'] = original_results
out_cols = list(agent.index)
new_cols = [i for i in out_cols if i not in in_cols] + ['agent_id']
agent = agent.loc[agent.index.isin(new_cols)]
except Exception as e:
logger.info(' ')
logger.info('--------------------------------------------')
logger.info("failed in calc_system_size_and_financial_performance")
logger.info(('Error on line {}'.format(sys.exc_info()[-1].tb_lineno), type(e), e))
logger.info('agent that failed')
logger.info(agent)
logger.info('--------------------------------------------')
agent.to_pickle('agent_that_failed.pkl')
return agent
#%%
@decorators.fn_timer(logger = logger, tab_level = 2, prefix = '')
def calc_financial_performance(dataframe):
"""
Function to calculate the payback period and join it on the agent dataframe.
Parameters
----------
dataframe : pandas.DataFrame
Agent dataframe
Returns
-------
pandas.DataFrame
Agent dataframe with `payback_period` joined on dataframe
"""
# dataframe = dataframe.reset_index()
cfs = np.vstack(dataframe['cash_flow']).astype(np.float)
# calculate payback period
tech_lifetime = np.shape(cfs)[1] - 1
payback = calc_payback_vectorized(cfs, tech_lifetime)
# calculate time to double
ttd = calc_ttd(cfs)
metric_value = np.where(dataframe['sector_abbr']=='res', payback, ttd)
dataframe['metric_value'] = metric_value
dataframe = dataframe.set_index('agent_id')
return dataframe
#%%
@decorators.fn_timer(logger = logger, tab_level = 2, prefix = '')
def calc_max_market_share(dataframe, max_market_share_df):
"""
Calculates the maximum marketshare available for each agent.
Parameters
----------
dataframe : pandas.DataFrame
Attributes
----------
metric_value : float
max_market_share_df : pandas.DataFrame
Set by :meth:`settings.ScenarioSettings.get_max_marketshare`.
Returns
-------
pandas.DataFrame
Input DataFrame with `max_market_share` and `metric` columns joined on.
"""
in_cols = list(dataframe.columns)
dataframe = dataframe.reset_index()
dataframe['business_model'] = 'host_owned'
dataframe['metric'] = 'payback_period'
# Convert metric value to integer as a primary key, then bound within max market share ranges
max_payback = max_market_share_df[max_market_share_df.metric == 'payback_period'].metric_value.max()
min_payback = max_market_share_df[max_market_share_df.metric == 'payback_period'].metric_value.min()
max_mbs = max_market_share_df[max_market_share_df.metric == 'percent_monthly_bill_savings'].metric_value.max()
min_mbs = max_market_share_df[max_market_share_df.metric == 'percent_monthly_bill_savings'].metric_value.min()
# copy the metric valeus to a new column to store an edited version
metric_value_bounded = dataframe['metric_value'].values.copy()
# where the metric value exceeds the corresponding max market curve bounds, set the value to the corresponding bound
metric_value_bounded[np.where((dataframe.metric == 'payback_period') & (dataframe['metric_value'] < min_payback))] = min_payback
metric_value_bounded[np.where((dataframe.metric == 'payback_period') & (dataframe['metric_value'] > max_payback))] = max_payback
metric_value_bounded[np.where((dataframe.metric == 'percent_monthly_bill_savings') & (dataframe['metric_value'] < min_mbs))] = min_mbs
metric_value_bounded[ | np.where((dataframe.metric == 'percent_monthly_bill_savings') & (dataframe['metric_value'] > max_mbs)) | numpy.where |
import numpy as np
import pandas
import math
from sklearn.utils import resample
import time
import sys
import cdrds_analysis as cdrds
import random
# I might want to get rid of this shit to make it a more tunable script...
import seq_loader
ntasks = int(sys.argv[1])
which_num = int(sys.argv[2])
num_RE = 1000
# How many times will this replicant run
num_iter = int(np.ceil(num_RE/ntasks))
print('We will run '+str(num_iter)+' iterations in process '+str(which_num))
# Load in the data
# Need to load in all of the therapeutic ABs
# BE LAZY AND COPY IT EXACTLY FROM MY COMPUTER
adimab_data=seq_loader.getAdimab()
adimab_supp,sabDab_data=seq_loader.getSabDab()
new_adimab=pandas.concat([adimab_data,adimab_supp['Highest_Clin_Trial (Jan \'20)'],adimab_supp['Est. Status']],axis=1)
# Now sort it into two separate groups for classification
y=new_adimab[new_adimab["Highest_Clin_Trial (Jan '20)"] == 'Approved']
adi_approve=y[y["Est. Status"] != 'Discontinued']
adi_discont=new_adimab[new_adimab["Est. Status"] == 'Discontinued']
x=new_adimab[new_adimab["Highest_Clin_Trial (Jan '20)"] != 'Approved']
adi_phases=x[x["Est. Status"] != 'Discontinued']
sab_heavy=[sabDab_data['cdrH1_aa'].values,sabDab_data['cdrH2_aa'].values,sabDab_data['cdrH3_aa'].values]
sab_light=[sabDab_data['cdrL1_aa'].values,sabDab_data['cdrL2_aa'].values,sabDab_data['cdrL3_aa'].values]
sabDab_seqs=np.vstack((sab_light,sab_heavy))
# Now sort it into two separate groups for classification
y=sabDab_data[sabDab_data["Highest_Clin_Trial (Jan '20)"] == 'Approved']
sab_approve=y[y["Est. Status"] != 'Discontinued']
sab_discont=sabDab_data[sabDab_data["Est. Status"] == 'Discontinued']
x=sabDab_data[sabDab_data["Highest_Clin_Trial (Jan '20)"] != 'Approved']
phases=x[x["Est. Status"] != 'Discontinued']
approve_h=np.vstack((adi_approve.values[:,1:4],sab_approve.values[:,1:4]))
discont_h=np.vstack((adi_discont.values[:,1:4],sab_discont.values[:,1:4]))
approve_l=np.vstack((adi_approve.values[:,5:8],sab_approve.values[:,4:7]))
discont_l=np.vstack((adi_discont.values[:,5:8],sab_discont.values[:,4:7]))
approve_all=np.transpose(np.hstack((approve_l,approve_h)))
discont_all=np.transpose(np.hstack((discont_l,discont_h)))
approve_adi_F=np.transpose(np.hstack((adi_approve.values[:,5:8],adi_approve.values[:,1:4])))
discont_adi_F=np.transpose(np.hstack((adi_discont.values[:,5:8],adi_discont.values[:,1:4])))
ALL_mono=approve_all
ALL_poly=discont_all
newnew=pandas.read_csv('new_props')
oldold=pandas.read_csv('old_props')
# Again, ugly to hard code in the number of properties (62) but
# For now no harm no foul
AA_key=['A','R','N','D','C','Q','E','G','H','I','L','K','M','F','P','S','T','W','Y','V']
properties=np.zeros((62,20))
for i in np.arange(len(AA_key)):
properties[0:16,i]=oldold[AA_key[i]]
properties[16:,i]=newnew[AA_key[i]]
AA_num_key_new=properties[1]
AA_num_key=np.arange(20)+1
# Might want to change this to like half of the min,
# in order to actually subsample the data.
#NUMsamples = min(len(ALL_mono[0]),len(ALL_poly[0]))
mono_size = len(ALL_mono[0])
poly_size = len(ALL_poly[0])
test_MATRIX1 = ALL_mono
test_MATRIX2 = ALL_poly
# Really need to pre-define the matrix size based on the largest matrix
mono_matrix_MI,poly_matrix_MI,matSize=cdrds.gen_tcr_matrix(ALL_poly,pre_mono=ALL_mono,key = AA_num_key,binary=True,return_Size=True)
for i in np.arange(num_iter):
# REPLACE NUMsamples with the actual size of each dataset... (They're close anyway)
remono=np.transpose(resample(np.transpose(test_MATRIX1),random_state=int(1000*random.random()),n_samples = mono_size))
repoly=np.transpose(resample(np.transpose(test_MATRIX2),random_state=int(1000*random.random()),n_samples = poly_size))
mono_matrix_MI,poly_matrix_MI=cdrds.gen_tcr_matrix(remono,pre_mono=repoly,key = AA_num_key,binary = True,giveSize = matSize)
len_distM=np.zeros((6,len(remono[0])))
len_distP=np.zeros((6,len(repoly[0])))
for j in [0,1,2,3,4,5]: # This one for light
for k in np.arange(len(remono[0])):
len_distM[j,k]=len(remono[j][k])
for k in np.arange(len(repoly[0])):
len_distP[j,k]=len(repoly[j][k])
len_meanM=np.average(len_distM,axis=1)
len_meanP=np.average(len_distP,axis=1)
len_stdM=np.std(len_distM,axis=1)
len_stdP=np.std(len_distP,axis=1)
clone_meansM=cdrds.gen_clone_props(mono_matrix_MI)
clone_meansP=cdrds.gen_clone_props(poly_matrix_MI)
mean_varsM=np.average(clone_meansM,axis=1)
mean_varsP=np.average(clone_meansP,axis=1)
std_varsM= | np.std(clone_meansM,axis=1) | numpy.std |
#!/usr/bin/env python
"""
Interpolation of scattered data using ordinary kriging/collocation
The program uses nearest neighbors interpolation and selects data from eight
quadrants around the prediction point and uses a third-order Gauss-Markov
covariance model, with a correlation length defined by the user.
Provides the possibility of pre-cleaning of the data using a spatial n-sigma
filter before interpolation.
Observations with provided noise/error estimates (for each observation) are
added to the diagonal of the covariance matrix if provided. User can also
provide a constant rms-noise added to the diagonal.
Takes as input a h5df file with needed data in geographical coordinates
and a-priori error if needed. The user provides the wanted projection
using the EPSG projection format.
Output consists of an hdf5 file containing the predictions, rmse and the
number of points used in the prediction, and the epsg number for the
projection.
Notes:
If both the a-priori errors are provided and constant rms all values
smaller then provided rms is set to this value providing a minimum
error for the observations.
To reduce the impact of highly correlated along-track measurements
(seen as streaks in the interpolated raster) the 'rand' option
can be used. This randomly samples N-observations in each quadrant
instead of using the closest data points.
Example:
python interpkrig.py ifile.h5 ofile.h5 -d 10 10 -n 25 -r 50 -a 25 -p 3031 \
-c 50 10 -v lon lat dhdt dummy -e 0.1 -m dist
python interpkrig.py ifile.h5 ofile.h5 -d 10 10 -n 25 -r 50 -a 25 -p 3031 \
-c 50 10 -v lon lat dhdt rmse -e 0.1 -m rand
Credits:
captoolkit - JPL Cryosphere Altimetry Processing Toolkit
<NAME> (<EMAIL>)
<NAME> (<EMAIL>)
<NAME> (<EMAIL>)
Jet Propulsion Laboratory, California Institute of Technology
"""
import h5py
import pyproj
import argparse
import numpy as np
from scipy import stats
from scipy.spatial import cKDTree
from scipy.spatial.distance import cdist
def rand(x, n):
"""Draws random samples from array"""
# Determine data density
if len(x) > n:
# Draw random samples from array
I = np.random.choice(np.arange(len(x)), n, replace=False)
else:
# Output boolean vector - true
I = np.ones(len(x), dtype=bool)
return I
def sort_dist(d, n):
""" Sort array by distance"""
# Determine if sorting needed
if len(d) >= n:
# Sort according to distance
I = np.argsort(d)
else:
# Output boolean vector - true
I = np.ones(len(x), dtype=bool)
return I
def transform_coord(proj1, proj2, x, y):
"""Transform coordinates from proj1 to proj2 (EPSG num)."""
# Set full EPSG projection strings
proj1 = pyproj.Proj("+init=EPSG:" + proj1)
proj2 = pyproj.Proj("+init=EPSG:" + proj2)
# Convert coordinates
return pyproj.transform(proj1, proj2, x, y)
def make_grid(xmin, xmax, ymin, ymax, dx, dy):
""" Construct output grid-coordinates. """
Nn = int((np.abs(ymax - ymin)) / dy) + 1 # ny
Ne = int((np.abs(xmax - xmin)) / dx) + 1 # nx
xi = np.linspace(xmin, xmax, num=Ne)
yi = np.linspace(ymin, ymax, num=Nn)
return np.meshgrid(xi, yi)
def spatial_filter(x, y, z, dx, dy, sigma=5.0):
""" Cleaning of spatial data """
# Grid dimensions
Nn = int((np.abs(y.max() - y.min())) / dy) + 1
Ne = int((np.abs(x.max() - x.min())) / dx) + 1
# Bin data
f_bin = stats.binned_statistic_2d(x, y, z, bins=(Ne, Nn))
# Get bin numbers for the data
index = f_bin.binnumber
# Unique indexes
ind = np.unique(index)
# Create output
zo = z.copy()
# Number of unique index
for i in range(len(ind)):
# index for each bin
idx, = np.where(index == ind[i])
# Get data
zb = z[idx]
# Make sure we have enough
if len(zb[~np.isnan(zb)]) == 0:
continue
# Set to median of values
dh = zb - np.nanmedian(zb)
# Identify outliers
foo = np.abs(dh) > sigma * | np.nanstd(dh) | numpy.nanstd |
import logging
import numpy as np
from strax import exporter
from .pulse import Pulse
from .s2 import S2
export, __all__ = exporter()
logging.basicConfig(handlers=[logging.StreamHandler()])
log = logging.getLogger('wfsim.core')
log.setLevel('WARNING')
@export
class PhotoIonization_Electron(S2):
"""
Produce electron after pulse simulation, using already built cdfs
The cdfs follow distribution parameters extracted from data.
"""
def __init__(self, config):
super().__init__(config)
self._photon_timings = []
def generate_instruction(self, signal_pulse, signal_pulse_instruction):
if len(signal_pulse._photon_timings) == 0:
return []
return self.electron_afterpulse(signal_pulse, signal_pulse_instruction)
def electron_afterpulse(self, signal_pulse, signal_pulse_instruction):
"""
For electron afterpulses we assume a uniform x, y
"""
delaytime_pmf_hist = self.resource.uniform_to_ele_ap
# To save calculation we first find out how many photon will give rise ap
n_electron = np.random.poisson(delaytime_pmf_hist.n
* len(signal_pulse._photon_timings)
* self.config['photoionization_modifier'])
ap_delay = delaytime_pmf_hist.get_random(n_electron)
# Reasonably bin delay time that would be diffuse out together
ap_delay_i, n_electron_i = self._reduce_instruction_timing(
ap_delay,
delaytime_pmf_hist)
n_instruction = len(ap_delay_i)
# Randomly select original photon as time zeros
t_zeros = signal_pulse._photon_timings[np.random.randint(
low=0, high=len(signal_pulse._photon_timings),
size=n_instruction)]
instruction = np.repeat(signal_pulse_instruction[0], n_instruction)
instruction['type'] = 4 # pi_el
instruction['time'] = t_zeros - self.config['drift_time_gate']
instruction['x'], instruction['y'] = self._rand_position(n_instruction)
instruction['z'] = - ap_delay_i * self.config['drift_velocity_liquid']
instruction['amp'] = n_electron_i
return instruction
def _reduce_instruction_timing(self, ap_delay, delaytime_pmf_hist):
# Binning the delay time, so electron timing within each
# will be diffused to fill the whole bin
delaytime_spread = np.sqrt(2 * self.config['diffusion_constant_longitudinal']\
* delaytime_pmf_hist.bin_centers)
delaytime_spread /= self.config['drift_velocity_liquid']
coarse_time, coarse_time_i = [], 100 # Start at 100ns, as its smaller than single electron width
while coarse_time_i < delaytime_pmf_hist.bin_centers[-1]:
coarse_time.append(coarse_time_i)
coarse_time_i += delaytime_spread[np.argmin(np.abs(coarse_time_i - delaytime_pmf_hist.bin_centers))]
coarse_time = np.array(coarse_time)
idx = | np.digitize(ap_delay[ap_delay < coarse_time[-1]], coarse_time) | numpy.digitize |
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""General utility functions for M-Theory investigations."""
import base64
import collections
import dataclasses
import hashlib
import itertools
import math
import os
import pprint
import sys
import time
from typing import Optional
import warnings
import numpy
import scipy.linalg
import scipy.optimize
import tensorflow as tf
# For this maths-heavy project that needs to be open-sourceable,
# the style guide sometimes pulls us away from the most concise notation.
# In these cases: ignore pylint.
# pylint: disable=invalid-name
# pylint: disable=g-complex-comprehension
# pylint: disable=redefined-outer-name
_str_ignore_einsum_annotations = str.maketrans(
{'^': None, '_': None, '|': None, ';': None})
def nsum(spec, *arrays, optimize='greedy'):
"""Numpy-Einsum Convenience wrapper.
This uses "greedy" contraction as the default contraction-strategy.
Also, it will strip the characters '^', '_', '|', and ';' from the
contraction-spec, which hence can be used to document index-placement
in the underlying physics formulas.
Args:
spec: string, the contraction-specification.
*arrays: The arrays to contract over.
optimize: The `optimize` parameter for numpy.einsum().
Returns:
The generalized-einstein-summation contraction result.
"""
translated_spec = spec.translate(_str_ignore_einsum_annotations)
try:
return numpy.einsum(translated_spec, *arrays, optimize=optimize)
# If something goes wrong, re-write the exception to a more telling one.
except Exception as exn: # pylint:disable=broad-except
shapes_dtypes = [(x.shape, x.dtype) for x in arrays]
raise ValueError(
f'nsum failure, spec: {spec!r}, pieces: {shapes_dtypes!r}, '
f'exception: {exn!r}')
def asymm2(a, *einsum_specs):
"""Antisymmetrizes an array."""
for einsum_spec in einsum_specs:
a = 0.5 * (a - nsum(einsum_spec, a))
return a
# Convenience short-hands.
rng = numpy.random.RandomState
expm = scipy.linalg.expm
tf_expm = tf.linalg.expm
def rm(filename):
"""Removes a file, returning error or `None` if successful."""
try:
os.unlink(filename)
return None
except OSError as exn:
return exn
def home_relative(path):
"""Returns a path relative to $HOME."""
return os.path.join(os.getenv('HOME'), path)
def arg_enabled(name, *enabling_tags):
"""Helper for selectively enabling parts of a semi-interactive script."""
return name == '__main__' and (
set(sys.argv[1:]) & (set(enabling_tags) | {'all'}))
def final(iterator):
"""Returns the final element of an iterator."""
ret = None
for x in iterator:
ret = x
return ret
def evstats(m, d=6):
"""Returns eigenvalue statistics for a matrix."""
if numpy.allclose(m, m.T):
eigvals = numpy.linalg.eigh(m)[0]
return sorted(collections.Counter(eigvals.round(d)).items())
eigvals = numpy.linalg.eigvals(m)
return sorted(collections.Counter(eigvals.round(d)).items(),
key=lambda zn: (zn[0].real, zn[0].imag))
def get_gramian_onb(gramian, eigenvalue_threshold=1e-7):
"""Computes orthogonalizing transform for a gramian.
Args:
gramian: [N, N]-array G.
eigenvalue_threshold: Eigenvalues smaller than this
are considered to be equal to zero.
Returns:
A pair of matrices (R, R_inv) such that R @ R_inv = numpy.eye(N) and
einsum('Aa,Bb,ab->AB', R, R, gramian) is diagonal with entries
in (0, 1, -1).
Example: If gramian=numpy.array([[100.0, 0.1], [0.1, 1.0]])
then R.round(6) == numpy.array([[0.00101, -1.00005], [-0.1, -0.000101]])
and R[0, :] as well as R[1, :] are orthonormal unit vectors w.r.t.
the scalar product given by the gramian.
"""
gramian = numpy.asarray(gramian)
sprods_eigvals, sprods_eigvecsT = numpy.linalg.eigh(gramian)
abs_evs = abs(sprods_eigvals)
onbi_scaling = numpy.where(abs_evs <= eigenvalue_threshold,
1.0,
numpy.sqrt(abs_evs))
onbi = numpy.einsum('WZ,Z->WZ',
sprods_eigvecsT, onbi_scaling)
onb = numpy.einsum('WZ,Z->WZ',
sprods_eigvecsT, 1.0 / onbi_scaling).T
assert numpy.allclose(onb @ onbi, numpy.eye(onb.shape[0]))
return onb, onbi
def dstack(*pieces):
"""Assembles a matrix from blocks-on-the-diagonal."""
a_pieces = [numpy.asarray(piece) for piece in pieces]
dtype = numpy.find_common_type([a.dtype for a in a_pieces],
[numpy.float64])
piece_shapes = [x.shape for x in a_pieces]
if not all(len(s) == 2 and s[0] == s[1] for s in piece_shapes):
raise ValueError(f'Invalid diagonal-piece shapes: {piece_shapes!r}')
block_sizes = [s[0] for s in piece_shapes]
block_ranges = [0] + numpy.cumsum(block_sizes).tolist()
result = numpy.zeros([block_ranges[-1]] * 2, dtype=dtype)
for num_block, block in enumerate(a_pieces):
start_idx, end_idx = block_ranges[num_block:num_block + 2]
result[start_idx:end_idx, start_idx:end_idx] = block
return result
def numpy_fingerprint(a, digits=3):
"""Fingerprints a numpy-array."""
# Hack to ensure that -0.0 gets consistently shown as 0.0.
minus_zero_hack = 1e-100+1e-100j
return base64.b64encode(
hashlib.sha256(
str(
(a.shape, ','.join(map(repr, numpy.round(a + minus_zero_hack,
digits).flat))))
.encode('utf-8'))
.digest()).decode('utf-8').strip('\n=')
def nonzero_entries(array, eps=1e-7):
"""Extracts sparse [(value, *indices), ...] array representation.
Args:
array: The numpy array to obtain a sparse representation for,
eps: Threshold magnitude. Entries <= `eps` are skipped.
Returns:
List of (coefficient, index0, ..., indexN) tuples representing
the non-zero entries of the array.
"""
entries = []
for indices in itertools.product(*[range(n) for n in array.shape]):
v = array[indices]
if abs(v) <= eps:
continue
entries.append((v,) + indices)
return entries
def numpy_from_nonzero_entries(shape, entries, dtype=None):
"""Produces a numpy array from a sparse representation.
Args:
shape: The shape of the array.
entries: The entries as an iterable of (value, index0, ..., indexN)
tuples, e.g. as produced by nonzero_entries().
dtype: The array-dtype. Defaults to the dtype of the sum of all values.
Returns:
The numpy array.
"""
if dtype is None:
dtype = type(sum(z[0] for z in entries))
ret = numpy.zeros(shape, dtype=dtype)
for v, *indices in entries:
ret[tuple(indices)] += v
return ret
def as_code(array, func_name, eps=1e-7):
"""Prints Python code that synthesizes a given numpy array."""
# This is mostly useful in intermediate stages of research,
# when we temporarily want to make some definition that was found
# to work directly part of the code, for reproducibility.
entries = nonzero_entries(array, eps=eps)
print(f'\ndef {func_name}():\n'
f' data = [\n {pprint.pformat(entries, compact=True, indent=4)[1:]}\n'
f' return numpy_from_nonzero_entries({array.shape}, data)\n\n')
def sparse_dict_from_array(array, magnitude_threshold=0):
"""Converts a array to a dict of nonzero-entries keyed by index-tuple."""
ret = {}
for index_tuple in itertools.product(*(map(range, array.shape))):
v = array[index_tuple]
if abs(v) > magnitude_threshold:
ret[index_tuple] = v
return ret
def permutation_sign(p):
"""Determines the sign of a permutation, given as a sequence of integers."""
q = list(p) # Copy to list.
sign = 1
for n in range(len(p)):
while n != q[n]:
qn = q[n]
q[n], q[qn] = q[qn], q[n] # Flip to make q[qn] = qn.
sign = -sign
return sign
def get_symmetric_traceless_basis(n):
"""Computes a basis for symmetric-traceless matrices."""
num_matrices = n * (n + 1) // 2 - 1
# Basis for symmetric-traceless n x n matrices.
b = numpy.zeros([num_matrices, n, n])
# First (n-1) matrices are diag(1, -1, 0, ...), diag(0, 1, -1, 0, ...).
# These are not orthogonal to one another.
for k in range(n - 1):
b[k, k, k] = 1
b[k, k + 1, k + 1] = -1
i = n - 1
for j in range(n):
for k in range(j + 1, n):
b[i, j, k] = b[i, k, j] = 1
i += 1
return b
def symmetric_svd(m):
"""Computes the 'symmetric SVD' of a symmetric (complex) matrix.
Args:
m: [n, n]-ndarray, the symmetric matrix to be decomposed.
Returns:
(q, s), where `q` is a [n, n]-complex-ndarray, and `s` is a [n]-real-ndarray,
and q.T @ m @ q == numpy.diag(s).
"""
# Conceptually, this algorithm is about 'regarding the 1j that
# multiplies the imaginary part of the matrix as a different
# imaginary unit than the one we get in eigenvalues',
# i.e. unraveling the complex-symmetric matrix into a real-symmetric
# one by splitting real and imaginary parts (where we have to be careful,
# as the imaginary piece does not become an antisymmetric contribution,
# as is usual).
dim = m.shape[0]
if m.shape[1] != dim or not numpy.allclose(m, m.T):
raise ValueError('Matrix is not symmetric!')
m_re = m.real
m_im = m.imag
mb = numpy.zeros([2 * dim, 2 * dim], dtype=m_re.dtype)
mb[:dim, :dim] = m_re
mb[dim:, dim:] = -m_re
mb[dim:, :dim] = mb[:dim, dim:] = m_im
mb_eigvals, mb_eigvecsT = numpy.linalg.eigh(mb)
# We need that half of the eigenvectors that is associated
# with 'large' eigenvalues.
# Let us call these the 'plus eigenvectors'.
eigvals_sorting_indices = numpy.argsort(mb_eigvals)
plus_eigvecs_re_im = mb_eigvecsT[:, eigvals_sorting_indices[dim:]]
plus_eigvecs = plus_eigvecs_re_im[:dim, :] - 1j * plus_eigvecs_re_im[dim:, :]
diagonalized = plus_eigvecs.T @ m @ plus_eigvecs
diag = numpy.diag(diagonalized)
diag_re = diag.real
assert numpy.allclose(diag_re, diag)
assert numpy.allclose(numpy.diag(diag), diagonalized)
return plus_eigvecs, diag_re
def undiskify(z):
"""Maps SL(2)/U(1) poincare disk coord to Lie algebra generator-factor."""
# Conventions match (2.13) in https://arxiv.org/abs/1909.10969
return 2* numpy.arctanh(abs(z)) * numpy.exp(1j * numpy.angle(z))
def diskify(z):
"""Maps Lie algebra generator-factor to SL(2)/U(1) poincare disk coord."""
# Conventions match (2.13) in https://arxiv.org/abs/1909.10969
return numpy.tanh(0.5 * abs(z)) * numpy.exp(1j * numpy.angle(z))
def aligning_rotation(v_target, v_in):
"""Returns some SO(N) rotation that turns v_in into v_target."""
v_target = numpy.asarray(v_target)
v_in = numpy.asarray(v_in)
dim = v_target.shape[0]
vn_target = v_target / numpy.sqrt(v_target.dot(v_target))
vn_in = v_in / numpy.sqrt(1 / numpy.finfo(numpy.float64).max +
v_in.dot(v_in))
cos_angle = vn_target.dot(vn_in)
v_parallel = vn_target * cos_angle
v_perp = vn_in - v_parallel
v_perp_len = sin_angle = numpy.sqrt(v_perp.dot(v_perp))
if v_perp_len < 100 * numpy.finfo(numpy.float64).resolution:
# The rotation that we would need to apply is very close to the
# identity matrix. Just return that instead.
return numpy.eye(dim)
# Otherwise, we can normalize the perpendicular vector.
vn_perp = v_perp / v_perp_len
return (numpy.eye(dim) -
numpy.outer(vn_target, vn_target) -
numpy.outer(vn_perp, vn_perp) +
sin_angle * (numpy.outer(vn_target, vn_perp) -
numpy.outer(vn_perp, vn_target)) +
cos_angle * numpy.outer(vn_target, vn_target) +
cos_angle * numpy.outer(vn_perp, vn_perp))
def _aitken_accelerated_inner(iterable):
# See: https://en.wikipedia.org/wiki/Aitken%27s_delta-squared_process
# Also: "Structure and Interpretation of Computer Programs",
# 3.5.3 "Exploiting the Stream Paradigm".
z_prev2, z_prev = numpy.nan, numpy.nan
for z in iterable:
if numpy.isfinite(z_prev2):
yield z - (z - z_prev)**2 / (z + z_prev2 - 2 * z_prev)
z_prev2, z_prev = z_prev, z
def aitken_accelerated(iterable, order=1):
"""Convergence-accelerates an iterable."""
if order == 0:
return iterable
return aitken_accelerated(_aitken_accelerated_inner(iterable), order - 1)
# [t_a, t_b] = f_abc t_c
def get_f_abc(t_abc, filename=None, sanitize=None, verbose=False):
"""Computes structure constants, [t_a, t_b] = f_abc t_c.
Args:
t_abc: [dim_ad, D, D]-ndarray, generator matrices.
When reading t_abc entries as matrices, the matrix that
corresponds to generator #0 is t_abc[0].T.
filename: Optional filename to save the computation's result to,
in NumPy's compressed .npz format. Must end with '.npz'.
If `filename` is provided and a corresponding file exists,
this file is de-serialized instead of re-doing the computation.
sanitize: Optional ndarray->ndarray function to remove numerical
noise on f_abC.
verbose: Whether to announce (via print()) re-computation of structure
constants (rather than re-use of serialized data).
Returns:
A pair (f_abc, f_abC), where `f_abc` are the structure constants,
computed as tr(t_a [t_b, t_c]), and `f_abC` have the last index
converted to a co-adjoint index via an extra 'inverse Cartan-Killing
metric' factor.
"""
if filename is not None and not filename.endswith('.npz'):
raise ValueError(f'Filename should end with ".npz": {filename!r}')
if sanitize is None:
sanitize = lambda x: x
k_ab = nsum('_aN^M,_bM^N->ab', t_abc, t_abc)
try:
if filename is None:
raise IOError('')
f_abc = numpy.load(filename)['f_abc']
except (IOError, OSError):
if verbose:
print('Computing f_abc.')
commutators_ab = 2 * asymm2(nsum('_aP^M,_bN^P->_abN^M', t_abc, t_abc),
'_abN^M->_baN^M')
f_abc = nsum('_abM^N,_cN^M->abc', commutators_ab, t_abc)
if filename is not None:
numpy.savez_compressed(filename, f_abc=f_abc)
f_abC = sanitize(nsum('_abc,^cC->_ab^C', f_abc, numpy.linalg.inv(k_ab)))
return f_abc, f_abC
def get_commutant(f_abC, g_an, space=None, ev_threshold=1e-7):
"""Returns the commutant of a set of generators inside a Lie algebra.
Args:
f_abC: [D, D, D]-array, structure constants that define the enclosing
Lie algebra, [G_a, G_b] = f_ab^c G_c.
g_an: [D, N]-array, generators for which we want to determine the commutant.
space: Optional [D, M]-array. If provided, determine commutant within this
subspace of the Lie algebra.
ev_threshold: Eigenvalue threshold for commutant.
Returns:
[D, P]-array, P generators which all commute wit the g_an.
"""
# For a Lie algebra given in terms of its structure constants f_abC,
# as well as a collection of generators g_na, find the generators
# h_mb that commute with the g_na.
dim = f_abC.shape[0]
subspace = numpy.eye(dim) if space is None else space
for gen in g_an.T:
gen_ad_on_subspace = nsum('abc,a,bm->cm', f_abC, gen, subspace)
svd_u, svd_s, svd_vh = numpy.linalg.svd(gen_ad_on_subspace,
full_matrices=False)
del svd_u # Unused here.
sub_subspace = svd_vh[svd_s <= ev_threshold, :len(svd_s)]
subspace = nsum('an,mn->am', subspace, sub_subspace)
return subspace
def lie_algebra_derivative(f0_mnp, fj_mnp):
"""Computes Lie algebra commutators [L, Ln]."""
# Idea: f_mnp, f0_mnp are structure constants embedded in the same irrep,
# so every f_m, f0_m corresponds to a generator-matrix.
# We want to decompose the commutators in terms of the original generators.
dim = f0_mnp.shape[0]
comms = 2 * asymm2(nsum('MPQ,NRP->MNRQ', f0_mnp, fj_mnp), 'MNRQ->NMRQ')
decomposed, residuals, *_ = numpy.linalg.lstsq(
f0_mnp.reshape(dim, dim * dim).T,
comms.reshape(dim * dim, dim * dim).T)
del residuals # Unused, named only for documentation purposes.
return decomposed.reshape(f0_mnp.shape)
def spot_check_t_aMN(t_aMN, num_checks=100, seed=1):
"""Spot-checks closure of a matrix Lie algebra."""
rng = numpy.random.RandomState(seed=seed)
for num_check in range(num_checks):
n1 = rng.randint(0, t_aMN.shape[0])
n2 = rng.randint(0, t_aMN.shape[0])
g1 = t_aMN[n1].T
g2 = t_aMN[n2].T
g12 = g1 @ g2 - g2 @ g1
# The claim is that `g12` is always expressible in terms of t_aMN.T
_, residue = numpy.linalg.lstsq(t_aMN.reshape(t_aMN.shape[0], -1).T,
g12.T.ravel())[:2]
if not numpy.allclose(0, residue):
raise ValueError(
f'Failed (n={num_check}): [T{n1}, T{n2}], '
f'max_residue={max(residue):.6g}')
return True
def spot_check_f_abC(t_aMN, f_abC, num_checks=1000, seed=1):
"""Spot-checks structure constants of a matrix Lie algebra."""
rng = numpy.random.RandomState(seed=seed)
for num_check in range(num_checks):
n1 = rng.randint(0, t_aMN.shape[0])
n2 = rng.randint(0, t_aMN.shape[0])
g1 = t_aMN[n1].T
g2 = t_aMN[n2].T
g12 = g1 @ g2 - g2 @ g1
f_g12 = nsum('_aM^N,_a->^N_M', t_aMN, f_abC[n1, n2])
if not numpy.allclose(g12, f_g12):
tprint(g12, name='g12')
tprint(f_g12, name='f_g12')
raise RuntimeError(f'Failed (n={num_check}): [T{n1}, T{n2}]')
return True
def tff64(x):
return tf.constant(x, dtype=tf.float64)
def tfc128(re, im=0.0):
return tf.complex(tff64(re), tff64(im))
def numpy_func_from_tf(tf_func,
dtype=tf.float64,
allow_extra_args=False,
debug_tag=None):
"""Wraps up a tf.Tensor->tf.Tensor function as ndarray->ndarray."""
def f_np(pos, *extra_args):
if not allow_extra_args and extra_args:
raise ValueError('Unexpected extra arguments to function.')
ret = tf_func(tf.constant(pos, dtype=dtype)).numpy()
if debug_tag is not None:
print('DEBUG %s(%r): %s' % (debug_tag, pos, ret))
return ret
return f_np
_DEFAULT_USE_TF_FUNCTION = False
def tf_grad(t_scalar_func, use_tf_function=_DEFAULT_USE_TF_FUNCTION):
"""Maps a TF scalar-function to its TF gradient-function."""
maybe_tf_function = tf.function if use_tf_function else lambda x: x
@maybe_tf_function
def f_grad(t_pos):
tape = tf.GradientTape()
with tape:
tape.watch(t_pos)
t_val = t_scalar_func(t_pos)
grad = tape.gradient(t_val, t_pos)
assert grad is not None, '`None` gradient.'
return grad
return f_grad
def tf_ext_grad(t_scalar_func, use_tf_function=_DEFAULT_USE_TF_FUNCTION):
"""Maps a TF scalar-function to its gradient-extended variant."""
maybe_tf_function = tf.function if use_tf_function else lambda x: x
@maybe_tf_function
def f_grad(t_pos):
tape = tf.GradientTape()
with tape:
tape.watch(t_pos)
t_val = t_scalar_func(t_pos)
grad = tape.gradient(t_val, t_pos)
assert grad is not None, '`None` gradient.'
return t_val, grad
return f_grad
def tf_batch_grad(tb_scalar_func, use_tf_function=_DEFAULT_USE_TF_FUNCTION):
"""Maps a TF batched-scalar-function to its TF batched-gradient-function."""
# TODO(tfish): Properly document and test-cover this.
# This so far has only seen explorative use.
maybe_tf_function = tf.function if use_tf_function else lambda x: x
@maybe_tf_function
def f_batch_grad(tb_pos):
tape = tf.GradientTape()
with tape:
tape.watch(tb_pos)
tb_val = tb_scalar_func(tb_pos)[:, tf.newaxis]
tb_grad_raw = tape.batch_jacobian(tb_val, tb_pos)
tb_grad_raw_shape = tf.shape(tb_grad_raw).as_list()
# We have to remove the extra batch-index on `tb_val` that was introduced
# above.
tb_grad = tf.reshape(tb_grad_raw,
[tb_grad_raw_shape[0]] + tb_grad_raw_shape[2:])
assert tb_grad is not None, '`None` gradient.'
return tb_grad
return f_batch_grad
def tf_stationarity(t_scalar_func, use_tf_function=_DEFAULT_USE_TF_FUNCTION):
"""Maps a TF scalar-function to its TF gradient-length-squared function."""
maybe_tf_function = tf.function if use_tf_function else lambda x: x
@maybe_tf_function
def f_stat(t_pos):
tape = tf.GradientTape()
with tape:
tape.watch(t_pos)
t_val = t_scalar_func(t_pos)
grad = tape.gradient(t_val, t_pos)
assert grad is not None, '`None` gradient (for stationarity).'
return tf.reduce_sum(tf.math.square(grad))
return f_stat
def _tf_jacobian_v1(t_vec_func, use_tf_function=_DEFAULT_USE_TF_FUNCTION):
"""Maps a TF vector-function to its TF Jacobian-function."""
maybe_tf_function = tf.function if use_tf_function else lambda x: x
@maybe_tf_function
def f_jac(t_pos, *further_unwatched_args):
tape = tf.GradientTape(persistent=True)
with tape:
tape.watch(t_pos)
v_components = tf.unstack(t_vec_func(t_pos, *further_unwatched_args))
gradients = [tape.gradient(v_component, t_pos)
for v_component in v_components]
assert all(g is not None for g in gradients), 'Bad Gradients for Jacobian.'
# The gradient's index must come last, so we have to stack along axis 0.
jacobian = tf.stack(gradients, axis=0)
return jacobian
return f_jac
def _tf_jacobian_v2(t_vec_func, use_tf_function=_DEFAULT_USE_TF_FUNCTION):
"""Maps a TF vector-function to its TF Jacobian-function."""
maybe_tf_function = tf.function if use_tf_function else lambda x: x
@maybe_tf_function
def tf_j(t_xs, *further_unwatched_args):
tape = tf.GradientTape()
with tape:
tape.watch(t_xs)
v = t_vec_func(t_xs, *further_unwatched_args)
ret = tape.jacobian(v, t_xs)
return ret
return tf_j
# TODO(tfish): Change to using _v2 once this supports all the weird graph Ops
# that we are using.
tf_jacobian = _tf_jacobian_v1
# tf_jacobian = _tf_jacobian_v2
def tf_hessian(t_scalar_func, use_tf_function=_DEFAULT_USE_TF_FUNCTION):
"""Maps a TF scalar-function to its TF hessian-function."""
return tf_jacobian(
tf_grad(t_scalar_func,
use_tf_function=use_tf_function),
use_tf_function=use_tf_function)
def tf_mdnewton_step(tf_vec_func, t_pos, tf_jacobian_func=None):
"""Performs a MDNewton-iteration step.
Args:
tf_vec_func: A R^m -> R^m TF-tensor-to-TF-tensor function.
t_pos: A R^m position TF-tensor.
tf_jacobian_func: Optional Jacobian-function (if available).
Returns:
A pair (tf_tensor_new_pos, residual_magnitude)
"""
if tf_jacobian_func is None:
tf_jacobian_func = tf_jacobian(tf_vec_func)
residual = tf_vec_func(t_pos)
update = tf.linalg.lstsq(tf_jacobian_func(t_pos),
residual[:, tf.newaxis],
fast=False)
return t_pos - update[:, 0], tf.reduce_sum(tf.abs(residual))
def tf_mdnewton(tf_scalar_func,
t_pos,
tf_grad_func=None,
tf_jacobian_func=None,
maxsteps=None,
debug_func=print):
"""Finds a zero of a tf.Tensor R^N->R function via MDNewton({gradient})."""
if tf_grad_func is None:
tf_grad_func = tf_grad(tf_scalar_func)
if tf_jacobian_func is None:
tf_jacobian_func = tf_jacobian(tf_grad_func)
num_step = 0
last_residual = numpy.inf
while True:
num_step += 1
t_pos_next, t_residual = tf_mdnewton_step(tf_grad_func, t_pos,
tf_jacobian_func=tf_jacobian_func)
residual = t_residual.numpy()
if residual > last_residual or (maxsteps is not None
and num_step >= maxsteps):
yield t_pos # Make sure we do yield the position before stopping.
return
t_pos = t_pos_next
last_residual = residual
if debug_func is not None:
debug_func('[MDNewton step=%d] val=%s' % (
num_step,
tf_scalar_func(t_pos).numpy()))
yield t_pos
def grid_scan(f, index_ranges, maybe_prev_f=None):
"""Iterates over grid-positions and function-values.
The common situation is: We want to map out values of a function
that involves e.g. optimization, so whenever we compute that
function, it is useful to know the value at a neighboring point.
This function iterates over grid-points in such a way that each
function-evaluation gets to see the value at a neighboring point.
Args:
f: (Tuple[int, ...], Optional[T]) -> T: Function to be scanned.
index_ranges: Sequence of pairs (low_end, high_end) that specify
ranges for each index.
maybe_prev_f: Optional neighbor-value to previded to
f(starting_point).
Yields:
Pair of (indices, f_value), where f_value is obtained by calling
f(indices, {value of f at a neighboring point,
or maybe_prev_f for the starting point})
"""
# Each `index_ranges` entry is (end_neg, end_pos),
# so [-10..10] would get encoded as (-11, 11).
num_indices = len(index_ranges)
max_index = num_indices - 1
ipos0 = (0,) * num_indices
f_now = f(ipos0, maybe_prev_f)
yield ipos0, f_now
if num_indices == 0:
return
stack = [(+1, 0, ipos0, f_now), (-1, 0, ipos0, f_now)]
while stack:
direction, icursor, ipos_now, f_now = stack.pop()
ipos_c = ipos_now[icursor]
ipos_c_end = index_ranges[icursor][(1 + direction) // 2]
# Subtle: We must make sure that we do not recurse-down into
# scanning the next-index range from both the increasing and
# decreasing branch. The `and not` part of the condition below
# ensures this.
if icursor < max_index and not (ipos_c == 0 and direction == -1):
stack.extend(
[(+1, icursor + 1, ipos_now, f_now),
(-1, icursor + 1, ipos_now, f_now)])
if ipos_c == ipos_c_end - direction:
continue # Reached the end for this stride - done.
ipos_next = tuple(idx + direction * (k == icursor)
for k, idx in enumerate(ipos_now))
f_next = f(ipos_next, f_now)
yield ipos_next, f_next
stack.append((direction, icursor, ipos_next, f_next))
def _fixed_fmin(f_opt, x0, minimizer_func, **kwargs):
"""Internal - Fixes a wart in scipy.optimize.fmin_bfgs() behavior."""
# Always return the smallest value encountered during the entire
# minimization procedure, not the actual result from fmin_bfgs().
last_seen = [(numpy.inf, None)]
def f_opt_wrapped(xs):
val_opt = f_opt(xs)
if last_seen[0][0] > val_opt:
last_seen[0] = (val_opt, xs.copy())
return val_opt
#
ret = minimizer_func(f_opt_wrapped, x0, **kwargs)
if kwargs.get('full_output'):
return (last_seen[0][1],) + ret[1:]
return last_seen[0][1]
class OptimizationError(Exception):
"""Optimization failed."""
class LineSearchError(Exception):
"""Line search failed."""
def line_search_wolfe12(f, fprime, pos_start, direction, grad_at_start,
f_at_start, f_at_previous_point,
**kwargs):
"""Runs line_search_wolfe1, falling back to line_search_wolfe2 as needed.
Args:
f: numpy.ndarray -> float function, the function to line-search over.
fprime: The gradient function of `f`.
pos_start: numpy.ndarray, the starting position for line-search.
direction: numpy.ndarray, the direction of the line.
grad_at_start: numpy.ndarray, gradient at start.
f_at_start: Value of the function at the starting point.
f_at_previous_point: (Estimated) value of the function at the
previous point.
**kwargs: Other keyword arguments to pass on to
scipy.optimize.linesearch.line_search_wolfe1().
Only the key-value pairs with keys 'c1', 'c2', 'amax' will get passed on
to scipy.optimize.linesearch.line_search_wolfe2() in case we fall back
to that other function.
Returns:
The result of calling scipy.optimize.linesearch.line_search_wolfe1(),
respectively scipy.optimize.linesearch.line_search_wolfe2().
Raises:
LineSearchError: line search failed.
"""
ret = scipy.optimize.linesearch.line_search_wolfe1(
f, fprime, pos_start, direction, grad_at_start,
f_at_start, f_at_previous_point,
**kwargs)
if ret[0] is not None:
return ret
# Otherwise, line search failed, and we try _wolfe2.
with warnings.catch_warnings():
warnings.simplefilter('ignore', scipy.optimize.linesearch.LineSearchWarning)
wolfe2_kwargs = {key: val for key, val in kwargs.items()
if key in ('c1', 'c2', 'amax')}
ret = scipy.optimize.linesearch.line_search_wolfe2(
f, fprime, pos_start, direction, grad_at_start,
f_at_start, f_at_previous_point,
**wolfe2_kwargs)
if ret[0] is None:
raise LineSearchError()
return ret
@dataclasses.dataclass(frozen=True)
class PreliminaryOptimum:
"""A preliminary proposal for a minimization problem solution."""
num_step: int
val: numpy.ndarray
norm_grad: float
pos: numpy.ndarray
args: 'Any' # TODO(tfish): Be more specific here.
grad: numpy.ndarray
inv_hessian: Optional[numpy.ndarray]
def __repr__(self):
"""Returns a string representation of the instance."""
return (f'<PreliminaryOptimum, num_step={self.num_step}, '
f'norm_grad={self.norm_grad:.6g}, val={self.val:.6g}, '
f'pos={self.pos.round(3).tolist()}>')
def bfgs(f, xs_start, fprime, args=(),
rho_k_max=1000,
norm=numpy.inf,
gtol=-numpy.inf,
inv_hessian=None):
"""Yields PreliminaryOptimum instances for BFGS-optimization.
This algorithm is a generator re-implemenetation of
scipy.optimize.fmin_bfgs().
Args:
f: (x_pos, *args) -> float function, the objective function.
xs_start: numpy.typing.ArrayLike, the starting position for minimization.
fprime: (x_pos, *args) -> grad: numpy.ndarray, the gradient of `f`.
args: extra arguments to be provided to `f` and `fprime`.
rho_k_max: Maximum value of the rho_k parameter.
Normally does not need any adjustments.
norm: The vector norm to use when reporting gradient-norms.
gtol: Tolerable value for the gradient-norm.
Optimization will terminate once gradient-norm gets smaller than this
threshold.
inv_hessian: Optional[numpy.ndarray], the initial guess for the inverse
Hessian. If not provided, the identity matrix will be used.
Yields:
PreliminaryOptimum, an intermediate point during optimization.
"""
# This has been checked manually on a particular supergravity
# equilibrium to zoom down to the equilibrium just exactly
# like scipy.optimize.fmin_bfgs does.
args = tuple(args)
xs_start = numpy.asarray(xs_start).ravel()
dim = xs_start.size
if not args:
fx = f
fxprime = fprime
else:
fx = lambda xs: f(xs, *args)
fxprime = lambda xs: fprime(xs, *args)
def vnorm(xs):
return numpy.linalg.norm(xs, ord=norm)
f_current = float(fx(xs_start))
grad_current = fxprime(xs_start)
identity = numpy.eye(dim)
inv_hessian_current = (
numpy.asarray(inv_hessian) if inv_hessian is not None else identity)
# Sets the initial step guess to dx ~ 1
f_previous = f_current + 0.5 * numpy.linalg.norm(grad_current)
xk = xs_start
for k in itertools.count():
pk = -numpy.dot(inv_hessian_current, grad_current)
try:
alpha_k, _, _, f_current, f_previous, grad_next = (
line_search_wolfe12(
fx, fxprime, xk, pk, grad_current,
f_current, f_previous, amin=1e-100, amax=1e100))
except LineSearchError:
# Line search failed to find a better solution. We are done.
return
xk_next = xk + alpha_k * pk
sk = xk_next - xk
xk = xk_next
if grad_next is None:
grad_next = fxprime(xk_next)
yk = grad_next - grad_current
grad_current = grad_next
yk_sk = numpy.dot(yk, sk)
if yk_sk == 0: # Weird, but as in scipy.optimize.fmin_bfgs.
rho_k = rho_k_max
else:
rho_k = 1 / yk_sk
a1 = identity - sk[:, numpy.newaxis] * yk[numpy.newaxis, :] * rho_k
a2 = identity - yk[:, numpy.newaxis] * sk[numpy.newaxis, :] * rho_k
inv_hessian_current = numpy.dot(a1, numpy.dot(inv_hessian_current, a2)) + (
rho_k * sk[:, numpy.newaxis] * sk[numpy.newaxis, :])
norm_grad = vnorm(grad_current)
yield PreliminaryOptimum(num_step=k,
val=f_current,
norm_grad=norm_grad,
pos=xk_next,
args=args,
grad=grad_current,
inv_hessian=inv_hessian_current)
if norm_grad < gtol:
return
def bfgs_scan(
f, fprime, xs_start,
param_ranges,
param_range_mid_indices=None,
extract_result_fn=(
lambda prelim_opt: prelim_opt.pos.tolist() + [prelim_opt.val]),
verbose=True,
forward_positions=True,
forward_hessians=False,
report=print,
gtol=1e-10):
"""BFGS-scans a function."""
# `forward_positions=True` generally has a dramatic positive effect.
# `forward_hessians=True` may well have a negative effect.
param_ranges = [numpy.asarray(prange) for prange in param_ranges]
if param_range_mid_indices is None:
param_range_mid_indices = [prange.size // 2 for prange in param_ranges]
grid_scan_index_ranges = [
(-mid_index - 1, prange.size - mid_index)
for mid_index, prange in zip(param_range_mid_indices, param_ranges)]
result = numpy.full([prange.size for prange in param_ranges], None)
def f_scan(indices, f_prev):
f_arg = numpy.array(
[prange[mid + k]
for prange, mid, k in zip(param_ranges,
param_range_mid_indices,
indices)])
prev_pos = (xs_start if f_prev or forward_positions is not None
else f_prev.pos)
inv_hessian = (None if f_prev is None or not forward_hessians
else f_prev.inv_hessian)
optimum = None
for prelim_opt in bfgs(f, prev_pos, fprime, args=(f_arg,), gtol=gtol,
inv_hessian=inv_hessian):
optimum = prelim_opt
if verbose and optimum.num_step % 50 == 0:
report(f'BFGS n={optimum.num_step}, val={optimum.val:.6g}, '
f'norm_grad={optimum.norm_grad:.6g}')
return optimum
t_start = time.time()
t_prev = t_start
for num_step, (indices, f_scan_val) in enumerate(
grid_scan(f_scan, grid_scan_index_ranges)):
result[indices] = val_here = extract_result_fn(f_scan_val)
if verbose:
t_now = time.time()
report(f'N={num_step:6d}, T={t_now - t_start:.3f}s '
f'(+{t_now - t_prev:.3f} s): {indices} -> {val_here[-1]}')
t_prev = t_now
return result.tolist()
def tf_minimize(tf_scalar_func, x0,
tf_grad_func=None,
# Sequence of letters from set('BGN'), B=BFGS, G=Gradient,
# N=Newton.
strategy='B',
# Default is to do 100 gradient-steps at learning rate 1e-3.
gradient_steps=((100, 1e-3),),
show_gradient_loss_every_n_steps=None,
dtype=tf.float64,
fail_on_nan=True,
# Only works if tf_grad_func is not given.
cache_gradients=True,
gtol=1e-5, maxiter=10**4, mdnewton_maxsteps=7):
"""Minimizes a TensorFlow function."""
tf_ext_grad_func = None
if tf_grad_func is None:
# pylint:disable=function-redefined
tf_ext_grad_func = tf_ext_grad(tf_scalar_func)
@tf.function
def tf_grad_func(t_params):
_, t_grad = tf_ext_grad_func(t_params)
return t_grad
if not cache_gradients:
def f_opt(params):
ret = tf_scalar_func(tf.constant(params, dtype=dtype)).numpy()
if fail_on_nan and numpy.isnan(ret):
raise OptimizationError('NaN in objective function.')
return ret
def fprime_opt(params):
ret = tf_grad_func(tf.constant(params, dtype=dtype)).numpy()
if fail_on_nan and numpy.any(numpy.isnan(ret)):
raise OptimizationError('NaN in gradient function.')
return ret
else:
# The way scipy's optimizers work, they will usually compute the function
# and its gradient once each per location. Let's speed this
# up by caching the last value, by input position.
last_cached = [numpy.zeros_like(x0) + numpy.nan,
numpy.nan,
numpy.zeros_like(x0) + numpy.nan]
def fprime_opt(params):
if numpy.all(params == last_cached[0]):
return last_cached[2].copy()
t_val, t_grad = tf_ext_grad_func(tf.constant(params, dtype=dtype))
last_cached[0][:] = params
last_cached[1] = t_val.numpy()
last_cached[2] = t_grad.numpy()
if fail_on_nan and numpy.any(numpy.isnan(last_cached[2])):
raise OptimizationError('NaN in gradient function.')
return last_cached[2].copy()
def f_opt(params):
if numpy.all(params == last_cached[0]):
return last_cached[1]
t_val, t_grad = tf_ext_grad_func(tf.constant(params, dtype=dtype))
last_cached[0][:] = params
last_cached[1] = t_val.numpy()
last_cached[2] = t_grad.numpy()
if fail_on_nan and numpy.isnan(last_cached[1]):
raise OptimizationError('NaN in objective function.')
return last_cached[1]
xs_now = x0
num_gradient_descent_stages_done = 0
for strategy_step in strategy:
if strategy_step == 'B': # BFGS
opt_info = _fixed_fmin(f_opt,
numpy.array(xs_now),
minimizer_func=scipy.optimize.fmin_bfgs,
fprime=fprime_opt,
gtol=gtol,
maxiter=maxiter,
disp=0,
full_output=True)
# TODO(tfish): Check full output for convergence.
# Not much of a problem currently, since we are always
# checking stationarity.
xs_now = opt_info[0]
elif strategy_step == 'N': # Multi-Dimensional (MD)Newton
*_, t_ret_xs = tf_mdnewton(
tf_scalar_func,
tf.constant(xs_now, dtype=tf.float64),
tf_grad_func=tf_grad_func,
maxsteps=mdnewton_maxsteps)
xs_now = t_ret_xs.numpy()
elif strategy_step == 'G': # Gradient Descent.
num_gradient_steps, learning_rate = gradient_steps[
min(len(gradient_steps)-1, num_gradient_descent_stages_done)]
num_gradient_descent_stages_done += 1
for num_gradient_step in range(num_gradient_steps):
xs_now -= learning_rate * fprime_opt(xs_now)
if (show_gradient_loss_every_n_steps is not None and
num_gradient_step % show_gradient_loss_every_n_steps == 0):
print('[gradient, lr=%.6g, step=%4d] L=%.6g' % (
learning_rate, num_gradient_step, f_opt(xs_now)))
else:
raise RuntimeError('Unknown strategy step: %r' % (strategy_step,))
return f_opt(xs_now), xs_now
def tf_minimize_v2(
tf_scalar_func, x0,
tf_grad_func=None,
# 'Strategy' is a sequence of pairs (strategy_name, *details, opt_kwargs),
# where opt_kwargs are different for each strategy.
# BFGS, CG: args are forwarded as kwargs to
# scipy.optimize.fmin_bfgs() / scipy.optimize.fmin_cg().
# Relevant args are: gtol, maxiter.
# CUSTOM: `details` has length-1 and provides the actual optimizer-function
# to call. Call signature must be compatible with how this
# minimizer-wrapper
# calls scipy.optimize.fmin_bfgs.
# So, ('CUSTOM', scipy.optimize.fmin_bfgs, kwargs) is equivalent to
# ('BFGS', kwargs).
# GD: gradient-descent. kwargs are:
# schedule=[(num_steps, learning_rate), ...]
# show_loss_every_n_steps.
# MDNewton: kwargs are: maxsteps.
strategy=(('BFGS', None),),
dtype=tf.float64,
fail_on_nan=True,
cache_gradients=True, # Only works if tf_grad_func is not given.
use_tf_function=True,
zoom=1.0, # We need the 'zoom' argument for aligning docstrings.
default_gtol=1e-7, default_maxiter=10**4, default_mdnewton_maxsteps=3,
default_gd_schedule=((100, 3e-4),),):
"""Minimizes a TensorFlow function."""
# TODO(tfish): Document properly. We currently have some code that already
# uses the improved _v2, but still some code using the original interface.
# Small details about how the optimizer should behave are still changing,
# and the docstring should be finalized once these are resolved.
#
# TODO(tfish): Add args-argument for parameters that get passed on to
# the scalar function and its gradient-function.
if zoom != 1.0:
# This was used in some code but ultimately deemed to not be a useful idea.
raise ValueError('Deprecated legacy argument `zoom` has non-default value.')
maybe_tf_function = tf.function if use_tf_function else lambda x: x
tf_ext_grad_func = None
if tf_grad_func is None:
# pylint:disable=function-redefined
tf_ext_grad_func = tf_ext_grad(tf_scalar_func)
@maybe_tf_function
def tf_grad_func(t_params):
_, t_grad = tf_ext_grad_func(t_params)
return t_grad
if not cache_gradients:
def f_opt(params):
ret = tf_scalar_func(tf.constant(params, dtype=dtype)).numpy()
if fail_on_nan and numpy.isnan(ret):
raise OptimizationError('NaN in objective function.')
return ret
def fprime_opt(params):
ret = tf_grad_func(tf.constant(params, dtype=dtype)).numpy()
if fail_on_nan and numpy.any(numpy.isnan(ret)):
raise OptimizationError('NaN in gradient function.')
return ret
else:
# The way scipy's optimizers work, they will usually compute the function
# and its gradient once each per location. Let's speed this
# up by caching the last value, by input position.
last_cached = [
numpy.zeros_like(x0) + numpy.nan, # Cached position.
numpy.nan, # Cached value.
numpy.zeros_like(x0) + numpy.nan] # Cached gradient.
# Note: SciPy has a weird bug: When we return a gradient,
# it assumes that it receives ownership of the gradient
# vector-object and be free to modify the object from
# there on. This breaks if we just return the cached vector -
# SciPy would modify that object and break the invariant
# that for the given last-evaluated cached position x0,
# the cached-gradient vector holds the value of the gradient
# at x0. We hence have to .copy() the gradient that we return.
def fprime_opt(params):
if numpy.all(params == last_cached[0]):
return last_cached[2].copy()
t_val, t_grad = tf_ext_grad_func(
tf.constant(params, dtype=dtype))
last_cached[0][:] = params
last_cached[1] = t_val.numpy()
last_cached[2][:] = t_grad.numpy()
if fail_on_nan and not numpy.all(numpy.isfinite(last_cached[2])):
raise OptimizationError('NaN in gradient function.')
return last_cached[2].copy()
def f_opt(params):
if numpy.all(params == last_cached[0]):
return last_cached[1]
t_val, t_grad = tf_ext_grad_func(tf.constant(params, dtype=dtype))
last_cached[0][:] = params
last_cached[1] = t_val.numpy()
last_cached[2][:] = t_grad.numpy()
if fail_on_nan and not numpy.isfinite(last_cached[1]):
raise OptimizationError('NaN in gradient function.')
return last_cached[1]
xs_now = numpy.array(x0)
for strategy_step, *strategy_details, strategy_kwargs in strategy:
if strategy_step in ('BFGS', 'CG'):
kwargs = dict(gtol=default_gtol,
maxiter=default_maxiter,
disp=0)
if strategy_kwargs is not None:
kwargs.update(strategy_kwargs)
if strategy_step == 'BFGS':
minimizer_func = scipy.optimize.fmin_bfgs
elif strategy_step == 'CG':
minimizer_func = scipy.optimize.fmin_cg
# TODO(tfish): Check full output for convergence.
# Not much of a problem currently, since we are always
# checking stationarity.
xs_now = _fixed_fmin(f_opt,
xs_now,
fprime=fprime_opt,
minimizer_func=minimizer_func,
**kwargs)
elif strategy_step == 'CUSTOM':
minimizer_func = strategy_details[0]
opt_info = _fixed_fmin(f_opt,
xs_now,
fprime=fprime_opt,
minimizer_func=minimizer_func)
# TODO(tfish): Check full output for convergence.
# Not much of a problem currently, since we are always
# checking stationarity.
xs_now = opt_info[0]
elif strategy_step == 'MDNewton': # Multi-Dimensional Newton
kwargs = dict(maxsteps=default_mdnewton_maxsteps)
kwargs.update(strategy_kwargs)
*_, t_xs_opt = tf_mdnewton(
tf_scalar_func,
tf.constant(xs_now, dtype=dtype),
tf_grad_func=tf_grad_func,
**kwargs)
xs_now = t_xs_opt.numpy()
elif strategy_step == 'GD': # Gradient Descent.
kwargs = dict(schedule=default_gd_schedule,
show_gradient_loss_every_n_steps=None)
kwargs.update(strategy_kwargs)
show_gradient_loss_every_n_steps = kwargs.get(
'show_gradient_loss_every_n_steps')
for num_steps, learning_rate in kwargs['schedule']:
for num_gradient_step in range(num_steps):
xs_new = xs_now - learning_rate * fprime_opt(xs_now)
if f_opt(xs_new) > f_opt(xs_now):
# Gradient-step did not reduce the function-value.
break # Do not proceed with this learning-rate.
else:
xs_now = xs_new # Accept new position.
if (show_gradient_loss_every_n_steps is not None and
num_gradient_step % show_gradient_loss_every_n_steps == 0):
print('[gradient, lr=%.6g, step=%4d] L=%.6g' % (
learning_rate, num_gradient_step, f_opt(xs_now)))
else:
raise ValueError('Unknown strategy step: %r' % strategy_step)
return f_opt(xs_now), xs_now
def tf_reshaped_to_1_batch_index(t_x, num_non_batch_indices):
"""Reshapes tf-tensor `t_x` to a single batch-index."""
return tf.reshape(
t_x,
tf.concat([tf.constant([-1], dtype=tf.int32),
tf.shape(t_x)[-num_non_batch_indices:]], axis=0))
def tf_restore_batch_indices(tb1_x, t_ref, num_non_batch_indices_ref):
"""Restores the batch indices on `t_ref` to `tb1_x`."""
return tf.reshape(
tb1_x,
tf.concat(
[tf.shape(t_ref)[:-num_non_batch_indices_ref],
tf.shape(tb1_x)[1:]], axis=0))
def gramian_eigenspaces(gramian, eps=1e-5):
"""Returns eigenspaces of a Gramian matrix."""
# .eigh() will give us an orthonormal basis, while .eigvals() typically would
# not (even for a hermitean matrix).
eigvals, eigvecsT = numpy.linalg.eigh(gramian)
eigenspaces = [] # [(eigval, [eigvec1, eigvec2, ...]), ...]
for eigval, eigvec in zip(eigvals, eigvecsT.T):
matching_eigenspaces = (
espace for espace in eigenspaces if abs(espace[0] - eigval) <= eps)
try:
espace = next(matching_eigenspaces)
espace[1].append(eigvec)
except StopIteration: # Did not have a matching eigenvalue.
eigenspaces.append((eigval, [eigvec]))
return [(eigval, numpy.vstack(eigvecs))
for eigval, eigvecs in eigenspaces]
def decompose_residual_symmetry(v_scalars, f_abC,
gg_generators,
threshold=1e-3):
"""Decomposes the unbroken gauge symmetry into {semisimple}+{u1s} pieces."""
dim_ad = f_abC.shape[-1]
comm_v_scalars = numpy.einsum('abc,a->cb',
f_abC[:len(v_scalars), :, :],
v_scalars)
gg_comms = numpy.einsum('na,ba->nb',
gg_generators, comm_v_scalars)
svd_u, svd_s, svd_vh = numpy.linalg.svd(gg_comms)
del svd_vh # Unused.
# TODO(tfish): Perhaps use .null_space()?
unbroken_gg_gens = [
numpy.einsum('ng,n->g', gg_generators, u)
for u in svd_u.T[abs(svd_s) <= threshold]]
assert all(abs(numpy.dot(comm_v_scalars, v)).max() < 1e-3
for v in unbroken_gg_gens)
ugg = (numpy.vstack(unbroken_gg_gens) if unbroken_gg_gens
else numpy.zeros([0, dim_ad]))
# We need the geometric object that codifies taking the commutator
# with an unbroken gauge-group generator.
comm_ugg = nsum('ma,abc->mcb', ugg, f_abC)
# We can use this to determine the derivative.
d_ugg = nsum('nb,mcb->mnc', ugg, comm_ugg)
svd_du, svd_ds, svd_dvh = numpy.linalg.svd(d_ugg.reshape(-1, dim_ad))
del svd_du # Unused.
d_ugg_gens = svd_dvh[:len(svd_ds)][svd_ds > threshold, :]
# Also, those unbroken-gauge-group generators that commute
# with all unbroken-gauge-group generators give us the U(1)
# generators (for groups that are "semisimple plus U1's").
if ugg.size == 0:
u1_gens = | numpy.zeros([0, dim_ad]) | numpy.zeros |
"""
Utilities to modify a given neural network and obtain a new one.
--<EMAIL>
"""
# pylint: disable=import-error
# pylint: disable=no-member
# pylint: disable=invalid-name
# pylint: disable=relative-import
# pylint: disable=star-args
# pylint: disable=too-many-branches
from argparse import Namespace
from copy import deepcopy
import numpy as np
# Local imports
from ..nn.neural_network import ConvNeuralNetwork, MultiLayerPerceptron, MLP_RECTIFIERS, \
MLP_SIGMOIDS, is_a_pooling_layer_label, is_a_conv_layer_label,\
CNNImageSizeMismatchException, CNNNoConvAfterIPException
from ..utils.general_utils import reorder_list_or_array, reorder_rows_and_cols_in_matrix
from ..utils.option_handler import get_option_specs, load_options
from ..utils.reporters import get_reporter
_DFLT_CHANGE_FRAC = 0.125
_DFLT_CHANGE_NUM_UNITS_SPAWN = 'all'
_DFLT_CHANGE_LAYERS_SPAWN = 20
_DFLT_NUM_SINGLE_STEP_MODIFICATIONS = 'all'
_DFLT_NUM_TWO_STEP_MODIFICATIONS = 0
_DFLT_NUM_THREE_STEP_MODIFICATIONS = 0
_DFLT_WEDGE_LAYER_CNN_CANDIDATES = ['conv3', 'conv5', 'conv7', 'res3', 'res5', 'res7']
_DFLT_WEDGE_LAYER_MLP_CANDIDATES = MLP_RECTIFIERS + MLP_SIGMOIDS
_DFLT_SIGMOID_SWAP = MLP_SIGMOIDS
_DFLT_RECTIFIER_SWAP = MLP_RECTIFIERS
_PRIMITIVE_PROB_MASSES = {'inc_single': 0.1,
'dec_single': 0.1,
'inc_en_masse': 0.1,
'dec_en_masse': 0.1,
'swap_layer': 0.2,
'wedge_layer': 0.1,
'remove_layer': 0.1,
'branch': 0.2,
'skip': 0.2,
}
nn_modifier_args = [
# Change fractions for increasing the number of units in layers.
get_option_specs('single_inc_change_frac', False, _DFLT_CHANGE_FRAC,
'Default change fraction when increasing a single layer.'),
get_option_specs('single_dec_change_frac', False, _DFLT_CHANGE_FRAC,
'Default change fraction when decreasing a single layer.'),
get_option_specs('en_masse_inc_change_frac', False, _DFLT_CHANGE_FRAC,
'Default change fraction when increasing layers en_masse.'),
get_option_specs('en_masse_dec_change_frac', False, _DFLT_CHANGE_FRAC,
'Default change fraction when decreasing layers en_masse.'),
# Number of networks to spawn by changing number of units in a single layer.
get_option_specs('spawn_single_inc_num_units', False, _DFLT_CHANGE_NUM_UNITS_SPAWN,
'Default number of networks to spawn by increasing # units in a single layer.'),
get_option_specs('spawn_single_dec_num_units', False, _DFLT_CHANGE_NUM_UNITS_SPAWN,
'Default number of networks to spawn by decreasing # units in a single layer.'),
# Number of networks to spawn by adding or deleting a single layer.
get_option_specs('spawn_add_layer', False, _DFLT_CHANGE_LAYERS_SPAWN,
'Default number of networks to spawn by adding a layer.'),
get_option_specs('spawn_del_layer', False, _DFLT_CHANGE_LAYERS_SPAWN,
'Default number of networks to spawn by deleting a layer.'),
# Number of double/triple step candidates - i.e. applications of basic primitives
# twice/thrice before executing candidates
get_option_specs('num_single_step_modifications', False,
_DFLT_NUM_SINGLE_STEP_MODIFICATIONS,
'Default number of networks to spawn via single step primitives.'),
get_option_specs('num_two_step_modifications', False,
_DFLT_NUM_TWO_STEP_MODIFICATIONS,
'Default number of networks to spawn via two step primitives.'),
get_option_specs('num_three_step_modifications', False,
_DFLT_NUM_THREE_STEP_MODIFICATIONS,
'Default number of networks to spawn via three step primitives.'),
]
# Generic utilities we will need in all functions below ==================================
def get_copies_from_old_nn(nn):
""" Gets copies of critical parameters of the old network. """
layer_labels = deepcopy(nn.layer_labels)
num_units_in_each_layer = deepcopy(nn.num_units_in_each_layer)
conn_mat = deepcopy(nn.conn_mat)
mandatory_child_attributes = Namespace()
for mca_str in nn.mandatory_child_attributes:
mca_val = deepcopy(getattr(nn, mca_str))
setattr(mandatory_child_attributes, mca_str, mca_val)
return layer_labels, num_units_in_each_layer, conn_mat, mandatory_child_attributes
def get_new_nn(old_nn, layer_labels, num_units_in_each_layer, conn_mat,
mandatory_child_attributes):
""" Returns a new neural network of the same type as old_nn. """
known_nn_class = True
try:
if old_nn.nn_class == 'cnn':
new_cnn = ConvNeuralNetwork(layer_labels, conn_mat, num_units_in_each_layer,
mandatory_child_attributes.strides,
old_nn.all_layer_label_classes,
old_nn.layer_label_similarities)
return new_cnn
elif old_nn.nn_class.startswith('mlp'):
return MultiLayerPerceptron(old_nn.nn_class[4:], layer_labels, conn_mat,
num_units_in_each_layer, old_nn.all_layer_label_classes,
old_nn.layer_label_similarities)
else:
known_nn_class = False
except (CNNImageSizeMismatchException, CNNNoConvAfterIPException, AssertionError):
return None
if not known_nn_class:
raise ValueError('Unidentified nn_class %s.'%(old_nn.nn_class))
def add_layers_to_end_of_conn_mat(conn_mat, num_add_layers):
""" Adds layers with no edges and returns. """
new_num_layers = conn_mat.shape[0] + num_add_layers
conn_mat.resize((new_num_layers, new_num_layers))
return conn_mat
# Change architecture of the network
# ========================================================================================
# Add a layer ----------------------------------------------------------------------------
def wedge_layer(nn, layer_type, units_in_layer, layer_before, layer_after,
new_layer_attributes=None):
""" Wedges a layer of type layer_type after the layer given in layer_before. The
output of the layer in layer_before goes to the new layer and the output of the
new layer goes to layer_after. If an edge existed between layer_before and
layer_after, it is removed. """
layer_labels, num_units_in_each_layer, conn_mat, mandatory_child_attributes = \
get_copies_from_old_nn(nn)
layer_labels.append(layer_type)
num_units_in_each_layer = np.append(num_units_in_each_layer, units_in_layer)
if nn.nn_class == 'cnn':
mandatory_child_attributes.strides.append(new_layer_attributes.stride)
conn_mat = add_layers_to_end_of_conn_mat(conn_mat, 1)
conn_mat[layer_before, -1] = 1
conn_mat[-1, layer_after] = 1
conn_mat[layer_before, layer_after] = 0
return get_new_nn(nn, layer_labels, num_units_in_each_layer, conn_mat,
mandatory_child_attributes)
def _get_non_None_elements(iter_of_vals):
""" Returns non None values. """
return [x for x in iter_of_vals if x is not None]
def _determine_num_units_for_wedge_layer(nn, edge):
""" Determines the number of layers for wedging a layer. This is usually the average
of the parent (edge[0]) and child (edge[1]).
"""
edge_num_layers = _get_non_None_elements(
[nn.num_units_in_each_layer[idx] for idx in edge])
if len(edge_num_layers) > 0:
return round(sum(edge_num_layers) / len(edge_num_layers))
else:
parents = nn.get_parents(edge[0])
if len(parents) == 0:
# Means you have reached the input node
ip_children = nn.get_children(edge[0])
children_num_units = _get_non_None_elements(
[nn.num_units_in_each_layer[ch] for ch in ip_children])
if len(children_num_units) == 0:
# Create a layer with 16 units
children_num_units = [16]
return sum(children_num_units) / len(children_num_units)
else:
parent_num_units = _get_non_None_elements(
[nn.num_units_in_each_layer[par] for par in parents])
if len(parent_num_units) > 0:
return sum(parent_num_units) / len(parent_num_units)
else:
par_num_units = []
for par in parents:
par_num_units.append(_determine_num_units_for_wedge_layer(nn, (par, edge[0])))
par_num_units = _get_non_None_elements(par_num_units)
return sum(par_num_units) / len(par_num_units)
def get_list_of_wedge_layer_modifiers(nn, num_modifications='all',
internal_layer_type_candidates=None,
choose_pool_with_prob=0.05,
choose_stride_2_with_prob=0.05):
""" Returns a list of operations for adding a layer in between two layers. """
# A local function for creating a modifier
def _get_wedge_modifier(_layer_type, _num_units, _edge, _nl_attributes):
""" Returns a modifier which wedges an edge between the edge. """
return lambda arg_nn: wedge_layer(arg_nn, _layer_type, _num_units,
_edge[0], _edge[1], _nl_attributes)
# Pre-process arguments
nn_is_a_cnn = nn.nn_class == 'cnn'
if internal_layer_type_candidates is None:
if nn_is_a_cnn:
internal_layer_type_candidates = _DFLT_WEDGE_LAYER_CNN_CANDIDATES
else:
internal_layer_type_candidates = _DFLT_WEDGE_LAYER_MLP_CANDIDATES
if not nn_is_a_cnn:
choose_pool_with_prob = 0
all_edges = nn.get_edges()
num_modifications = len(all_edges) if num_modifications == 'all' else num_modifications
op_layer_idx = nn.get_op_layer_idx() # Output layer
ip_layer_idx = nn.get_ip_layer_idx() # Input layer
# We won't change this below so keep it as it is
nonconv_nl_attrs = Namespace(stride=None)
conv_nl_attrs_w_stride_1 = Namespace(stride=1)
conv_nl_attrs_w_stride_2 = Namespace(stride=2)
# Iterate through all edges
ret = []
for edge in all_edges:
curr_layer_type = None
# First handle the edges cases
if edge[1] == op_layer_idx:
continue
elif nn_is_a_cnn and nn.layer_labels[edge[0]] == 'fc':
curr_layer_type = 'fc'
curr_num_units = nn.num_units_in_each_layer[edge[0]]
nl_attrs = nonconv_nl_attrs
elif not nn_is_a_cnn and edge[1] == op_layer_idx:
# Don't add new layers just before the output for MLPs
continue
elif edge[0] == ip_layer_idx and nn_is_a_cnn:
curr_pool_prob = 0 # No pooling layer right after the input for a CNN
else:
curr_pool_prob = choose_pool_with_prob
if curr_layer_type is None:
if np.random.random() < curr_pool_prob:
curr_layer_candidates = ['avg-pool', 'max-pool']
else:
curr_layer_candidates = internal_layer_type_candidates
curr_layer_type = np.random.choice(curr_layer_candidates, 1)[0]
if curr_layer_type in ['max-pool', 'avg-pool', 'linear', 'softmax']:
curr_num_units = None
else:
curr_num_units = _determine_num_units_for_wedge_layer(nn, edge)
# Determine stride
if is_a_conv_layer_label(curr_layer_type):
nl_attrs = conv_nl_attrs_w_stride_2 if \
np.random.random() < choose_stride_2_with_prob else conv_nl_attrs_w_stride_1
else:
nl_attrs = nonconv_nl_attrs
ret.append(_get_wedge_modifier(curr_layer_type, curr_num_units, edge, nl_attrs))
# Break if more than the number of modifications
if len(ret) >= num_modifications:
break
return ret
# Removing a layer -----------------------------------------------------------------------
def remove_layer(nn, del_idx, additional_edges, new_strides=None):
""" Deletes the layer indicated in del_idx and adds additional_edges specified
in additional_edges. """
layer_labels, num_units_in_each_layer, conn_mat, mandatory_child_attributes = \
get_copies_from_old_nn(nn)
# First add new edges to conn_mat and remove edges to and from del_idx
for add_edge in additional_edges:
conn_mat[add_edge[0], add_edge[1]] = 1
conn_mat[del_idx, :] = 0
conn_mat[:, del_idx] = 0
# Now reorder everything so that del_idx is at the end
all_idxs = list(range(len(layer_labels)))
new_order = all_idxs[:del_idx] + all_idxs[del_idx+1:] + [del_idx]
# Now reorder everything so that the layer to be remove is at the end
layer_labels = reorder_list_or_array(layer_labels, new_order)
num_units_in_each_layer = reorder_list_or_array(num_units_in_each_layer, new_order)
conn_mat = reorder_rows_and_cols_in_matrix(conn_mat, new_order)
# remove layer
layer_labels = layer_labels[:-1]
num_units_in_each_layer = num_units_in_each_layer[:-1]
conn_mat = conn_mat[:-1, :-1]
# Strides for a convolutional network
if nn.nn_class == 'cnn':
new_strides = new_strides if new_strides is not None else \
mandatory_child_attributes.strides
mandatory_child_attributes.strides = reorder_list_or_array(
new_strides, new_order)
mandatory_child_attributes.strides = mandatory_child_attributes.strides[:-1]
return get_new_nn(nn, layer_labels, num_units_in_each_layer, conn_mat,
mandatory_child_attributes)
def get_list_of_remove_layer_modifiers(old_nn):
""" Returns a list of primitives which remove a layer from a neural network. """
# pylint: disable=too-many-locals
# A local function to return the modifier
if old_nn.num_processing_layers == 0:
# Don't delete any layers if there are no processing layers.
return []
def _get_remove_modifier(_del_idx, _add_edges, *_args, **_kwargs):
""" Returns a modifier which deletes _del_idx and adds _add_edges. """
return lambda arg_nn: remove_layer(arg_nn, _del_idx, _add_edges, *_args, **_kwargs)
# Now check every layer
ret = []
for idx, ll in enumerate(old_nn.layer_labels):
if ll in ['ip', 'op', 'softmax', 'linear']: # Don't delete any of these layers
continue
curr_parents = old_nn.get_parents(idx)
parent_labels = [old_nn.layer_labels[par_idx] for par_idx in curr_parents]
if ll == 'fc' and (not parent_labels == ['fc'] * len(parent_labels)):
# If the parents of a fc layer are also not fc then do not delete
continue
curr_children = old_nn.get_children(idx)
if old_nn.nn_class == 'cnn' and \
old_nn.pre_img_inv_sizes[idx] != old_nn.post_img_inv_sizes[idx]:
change_stride_idxs = None
# First check if the children are modifiable
child_strides = [old_nn.strides[ch_idx] for ch_idx in curr_children]
if child_strides == [1] * len(curr_children):
change_stride_idxs = curr_children # we will change the strides of the children
if change_stride_idxs is None:
parent_strides = [old_nn.strides[par_idx] for par_idx in curr_parents]
if parent_strides == [1] * len(curr_parents):
change_stride_idxs = curr_parents
# If we have successfuly identified children/parents which we can modify, great!
# Otherwise, lets not change anything and hope that it
# does not break anything. If it does, there is an exception to handle this.
if change_stride_idxs is not None:
new_strides = deepcopy(old_nn.strides)
for csi in change_stride_idxs:
new_strides[csi] = 2
else:
new_strides = None
else:
new_strides = None
# Now delete the layer and add new adges
num_children_on_each_parent = [len(old_nn.get_children(par_idx)) for par_idx in
curr_parents]
num_parents_on_each_child = [len(old_nn.get_parents(ch_idx)) for ch_idx in
curr_children]
must_add_children = [curr_children[i] for i in range(len(curr_children)) if
num_parents_on_each_child[i] == 1]
must_add_parents = [curr_parents[i] for i in range(len(curr_parents)) if
num_children_on_each_parent[i] == 1]
num_must_add_children = len(must_add_children)
num_must_add_parents = len(must_add_parents)
np.random.shuffle(must_add_children)
np.random.shuffle(must_add_parents)
add_edges = []
for _ in range(min(num_must_add_children, num_must_add_parents)):
add_edges.append((must_add_parents.pop(), must_add_children.pop()))
# Add edges for left over children/parents
if num_must_add_children > num_must_add_parents:
diff = num_must_add_children - num_must_add_parents
cand_parents = list(np.random.choice(curr_parents, diff))
for _ in range(diff):
add_edges.append((cand_parents.pop(), must_add_children.pop()))
if num_must_add_parents > num_must_add_children:
diff = num_must_add_parents - num_must_add_children
cand_children = list(np.random.choice(curr_children, diff))
for _ in range(diff):
add_edges.append((must_add_parents.pop(), cand_children.pop()))
ret.append(_get_remove_modifier(idx, add_edges, new_strides))
return ret
# Branching modifications ----------------------------------------------------------------
def create_duplicate_branch(nn, path, keep_layer_with_prob=0.5):
""" Creates a new network which creates a new branch between path[0] and path[-1] and
copies all layers between. It keeps a layer in between with probability 0.5. If
in CNNs, the layer shrinks the size of the image, then we keep it with prob 1.
"""
layer_labels, num_units_in_each_layer, conn_mat, mandatory_child_attributes = \
get_copies_from_old_nn(nn)
# First decide which nodes in the path to keep
branched_path = [path[0]]
fc_encountered = False
for idx in path[1: -1]:
if idx == path[1] and nn.get_ip_layer_idx() == path[0]:
branched_path.append(idx) # Append if the branch starts at ip and this is a child.
elif idx == path[-2] and len(branched_path) == 1:
branched_path.append(idx) # If this is the last layer and we have not appended yet.
elif is_a_pooling_layer_label(nn.layer_labels[idx]) or \
nn.layer_labels[idx] in ['linear', 'softmax'] or \
(hasattr(nn, 'strides') and nn.strides[idx] == 2):
branched_path.append(idx)
elif nn.layer_labels[idx] == 'fc' and not fc_encountered:
branched_path.append(idx)
fc_encountered = True
elif np.random.random() < keep_layer_with_prob:
branched_path.append(idx)
branched_path.append(path[-1])
# Now create additional nodes
num_new_nodes = len(branched_path) - 2
layer_labels.extend([nn.layer_labels[idx] for idx in branched_path[1:-1]])
num_units_in_each_layer = np.concatenate((num_units_in_each_layer,
[nn.num_units_in_each_layer[idx] for idx in branched_path[1:-1]]))
# Add edges
new_idxs = list(range(nn.num_layers, nn.num_layers + num_new_nodes))
conn_mat = add_layers_to_end_of_conn_mat(conn_mat, num_new_nodes)
if num_new_nodes == 0:
conn_mat[branched_path[0], branched_path[1]] = 1
else:
conn_mat[branched_path[0], new_idxs[0]] = 1
conn_mat[new_idxs[-1], branched_path[-1]] = 1
for new_idx in new_idxs[:-1]:
conn_mat[new_idx, new_idx + 1] = 1
# Add strides
if nn.nn_class == 'cnn':
mandatory_child_attributes.strides.extend([nn.strides[idx] for idx in
branched_path[1:-1]])
return get_new_nn(nn, layer_labels, num_units_in_each_layer, conn_mat,
mandatory_child_attributes)
def _get_path_for_branching_from_start_layer(nn, start_layer, min_path_length=4,
end_path_prob=0.20):
""" Returns a path which starts at start layer. """
path = [start_layer]
while True:
curr_layer = path[-1]
curr_children = nn.get_children(curr_layer)
next_layer = int(np.random.choice(curr_children, 1))
path.append(next_layer)
if nn.layer_labels[next_layer] == 'op':
break
elif len(path) < min_path_length:
pass
elif np.random.random() < end_path_prob:
break
return path
def _get_start_layer_probs_for_branching_and_skipping(nn):
""" Returns probabilities for the start layer to be used in branching and
skipping primitives.
"""
dists_from_ip = nn.get_distances_from_ip()
start_layer_prob = []
for layer_idx, layer_label in enumerate(nn.layer_labels):
# We pick the first layer with distance inversely proportional to its distance from ip
curr_layer_prob = 0 if layer_label in ['op', 'softmax', 'fc', 'linear'] else \
1.0 / np.sqrt(1 + dists_from_ip[layer_idx])
start_layer_prob.append(curr_layer_prob)
start_layer_prob = np.array(start_layer_prob)
return start_layer_prob
def get_list_of_branching_modifiers(nn, num_modifiers=None, **kwargs):
""" Returns a list of operators for Neural networks that create branches in the
architecture.
"""
if nn.num_processing_layers == 0:
# Don't create any branches if there are no processing layers.
return []
# Define a local function to return the modifier
def _get_branching_modifier(_path, *_args, **_kwargs):
""" Returns a modifier which duplicates the path along _path. """
return lambda arg_nn: create_duplicate_branch(arg_nn, _path, *_args, **_kwargs)
# Some preprocessing
num_modifiers = num_modifiers if num_modifiers is not None else 2 * nn.num_layers
start_layer_prob = _get_start_layer_probs_for_branching_and_skipping(nn)
ret = []
if sum(start_layer_prob) <= 0.0:
return ret # return immediately with an empty list
while len(ret) < num_modifiers:
start_layer_prob = start_layer_prob / sum(start_layer_prob)
start_layer = int(np.random.choice(nn.num_layers, 1, p=start_layer_prob))
path = _get_path_for_branching_from_start_layer(nn, start_layer)
start_layer_prob[start_layer] *= 0.9 # shrink probability of picking this layer again.
ret.append(_get_branching_modifier(path, **kwargs))
return ret
# Skipping modifications -----------------------------------------------------------------
def create_skipped_network(nn, start_layer, end_layer, pool_layer_type='avg'):
""" Creates a new layer with a skip connection from start_layer to end_layer.
In a CNN, if the image sizes do not match, this creates additional pooling layers
(either avg-pool or max-pool) to make them match.
"""
layer_labels, num_units_in_each_layer, conn_mat, mandatory_child_attributes = \
get_copies_from_old_nn(nn)
if nn.nn_class != 'cnn' or \
nn.post_img_inv_sizes[start_layer] == nn.pre_img_inv_sizes[end_layer]:
conn_mat[start_layer, end_layer] = 1
else:
# Determine number of new layers, the number of units and strides in each layer.
num_new_pool_layers = int(np.log2(nn.pre_img_inv_sizes[end_layer] /
nn.post_img_inv_sizes[start_layer]))
new_layer_idxs = list(range(nn.num_layers, nn.num_layers + num_new_pool_layers))
num_units_in_each_layer = np.concatenate((num_units_in_each_layer,
[None] * num_new_pool_layers))
mandatory_child_attributes.strides.extend([None] * num_new_pool_layers)
# Determine layer labels
if pool_layer_type.lower().startswith('avg'):
new_layer_type = 'avg-pool'
elif pool_layer_type.lower().startswith('max'):
new_layer_type = 'max-pool'
else:
raise ValueError('pool_layer_type should be \'avg\' or \'max\'.')
new_layer_labels = [new_layer_type for _ in range(num_new_pool_layers)]
layer_labels.extend(new_layer_labels)
conn_mat = add_layers_to_end_of_conn_mat(conn_mat, num_new_pool_layers)
# Finally, the conn_mat
conn_mat[start_layer, new_layer_idxs[0]] = 1
conn_mat[new_layer_idxs[-1], end_layer] = 1
for new_idx in new_layer_idxs[:-1]:
conn_mat[new_idx, new_idx + 1] = 1
return get_new_nn(nn, layer_labels, num_units_in_each_layer, conn_mat,
mandatory_child_attributes)
def _get_end_layer_probs_for_skipping(nn, start_layer):
""" Returns the end layer probabilities to be used in skipping. """
dists_from_ip = nn.get_distances_from_ip()
dists_to_op = nn.get_distances_to_op()
is_a_cnn = nn.nn_class.startswith('cnn')
end_layer_prob = []
for layer_idx, layer_label in enumerate(nn.layer_labels):
curr_layer_prob = 'assign'
if dists_from_ip[layer_idx] - 1 <= dists_from_ip[start_layer] or \
dists_to_op[layer_idx] + 1 >= dists_to_op[start_layer] or \
layer_label in ['ip', 'op', 'softmax']:
curr_layer_prob = 'no-assign'
elif is_a_cnn and \
nn.post_img_inv_sizes[start_layer] > nn.pre_img_inv_sizes[layer_idx]:
# If the layer has an input image size *larger* than the output of the
# start layer, then do not assign.
curr_layer_prob = 'no-assign'
elif layer_label == 'fc':
# If its a fully connected layer, connect with this only if it is the first
# fc layer.
curr_layer_parent_labels = [nn.layer_labels[x] for x in nn.get_parents(layer_idx)]
if not all([(is_a_pooling_layer_label(clpl) or is_a_conv_layer_label(clpl)) for
clpl in curr_layer_parent_labels]):
curr_layer_prob = 'no-assign'
curr_layer_prob = 0.0 if curr_layer_prob == 'no-assign' else 1.0
end_layer_prob.append(curr_layer_prob)
if sum(end_layer_prob) == 0:
return None
else:
end_layer_prob = | np.array(end_layer_prob) | numpy.array |
""" Test for topographic data source """
import numpy as np
import pandas as pd
import pytest
from nowcasting_dataset.data_sources import TopographicDataSource
@pytest.mark.parametrize(
"x, y, left, right, top, bottom",
[
(0, 0, -128_000, 126_000, 128_000, -126_000),
(10, 0, -126_000, 128_000, 128_000, -126_000),
(30, 0, -126_000, 128_000, 128_000, -126_000),
(1000, 0, -126_000, 128_000, 128_000, -126_000),
(0, 1000, -128_000, 126_000, 128_000, -126_000),
(1000, 1000, -126_000, 128_000, 128_000, -126_000),
(2000, 2000, -126_000, 128_000, 130_000, -124_000),
(2000, 1000, -126_000, 128_000, 128_000, -126_000),
(2001, 2001, -124_000, 130_000, 130_000, -124_000),
],
)
def test_get_example_2km(x, y, left, right, top, bottom):
"""Test get examples"""
size = 2000 # meters
topo_source = TopographicDataSource(
filename="tests/data/europe_dem_2km_osgb.tif",
image_size_pixels=128,
meters_per_pixel=size,
forecast_minutes=300,
history_minutes=10,
)
t0_dt = pd.Timestamp("2019-01-01T13:00")
topo_data = topo_source.get_example(t0_datetime_utc=t0_dt, x_center_osgb=x, y_center_osgb=y)
assert topo_data.data.shape == (128, 128)
assert len(topo_data.x_osgb) == 128
assert len(topo_data.y_osgb) == 128
assert not np.isnan(topo_data.data).any()
# Topo x and y coords are not exactly set on the edges, but the center of the pixels
assert np.isclose(left, topo_data.x_osgb.values[0], atol=size)
assert np.isclose(right, topo_data.x_osgb.values[-1], atol=size)
assert np.isclose(top, topo_data.y_osgb.values[0], atol=size)
assert np.isclose(bottom, topo_data.y_osgb.values[-1], atol=size)
@pytest.mark.parametrize(
"x, y, left, right, top, bottom",
[
(0, 0, -128_000, 126_000, 128_000, -126_000),
(10, 0, -126_000, 128_000, 128_000, -126_000),
(30, 0, -126_000, 128_000, 128_000, -126_000),
(1000, 0, -126_000, 128_000, 128_000, -126_000),
(0, 1000, -128_000, 126_000, 128_000, -126_000),
(1000, 1000, -126_000, 128_000, 128_000, -126_000),
(2000, 2000, -126_000, 128_000, 130_000, -124_000),
(2000, 1000, -126_000, 128_000, 128_000, -126_000),
(2001, 2001, -124_000, 130_000, 130_000, -124_000),
],
)
def test_get_batch_2km(x, y, left, right, top, bottom):
"""Test get batches"""
size = 2000 # meters
topo_source = TopographicDataSource(
filename="tests/data/europe_dem_2km_osgb.tif",
image_size_pixels=128,
meters_per_pixel=size,
forecast_minutes=300,
history_minutes=10,
)
x = | np.array([x] * 32) | numpy.array |
import click
import torch
import numpy as np
from model import BOS, EOS, CharTokenizer, CharLSTM
from constants import MAX_LEN
STOP_LIST = [EOS, " "] + list('!"#$%&()*+,-./:;<=>?@[\\]^_{|}~')
CUDA = "cuda"
CPU = "cpu"
@click.group()
def main():
pass
def to_matrix(names, tokenizer, max_len=None, dtype='int32', batch_first=True):
"""Casts a list of names into rnn-digestable matrix"""
max_len = max_len or max(map(len, names))
names_ix = np.zeros([len(names), max_len], dtype) + tokenizer.pad_token
for i in range(len(names)):
line_ix = [tokenizer.char_to_idx_get(c) for c in names[i]]
names_ix[i, :len(line_ix)] = line_ix
if not batch_first: # convert [batch, time] into [time, batch]
names_ix = | np.transpose(names_ix) | numpy.transpose |
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import multivariate_normal as mvn
from Machine_Learning.hmm.generate_hmm_continuous import get_signals, big_init, simple_init
from Machine_Learning.hmm.utils import random_normalized
class HMM:
def __init__(self, M, K):
"""
Continuous HMM class. Note that there are 2 hidden states now, M and K.
M is the hidden state that was used in regular HMMs. K is a new hidden
state that is represented by a gaussian distribution.
:param M: Number of hidden states
:param K: Number of 2nd hidden states
"""
self.M = M
self.K = K
def fit(self, X, max_iter=30, eps=10e-1):
N = len(X)
D = X[0].shape[1]
self.pi = np.ones(self.M) / self.M # Uniform distribution
self.A = random_normalized(self.M, self.M)
# GMM parameters. mu is set similar to how it is set in kmeans -> randomly select points from dataset
# R, responsibilities --> Uniform distribution
self.R = np.ones((self.M, self.K)) / self.K
self.mu = np.zeros((self.M, self.K, D))
for i in range(self.M):
for k in range(self.K):
# For all states and all gaussians, get a random index, choose a random sequence,
# get a random time, and set mu[i,k] to be whatever point was at that time
random_idx = np.random.choice(N)
x = X[random_idx]
random_time_idx = np.random.choice(len(x))
self.mu[i,k] = x[random_time_idx]
self.sigma = np.zeros((self.M, self.K, D, D))
for j in range(self.M):
for k in range(self.K):
self.sigma[j,k] = np.eye(D)
costs = []
for it in range(max_iter):
if it % 1 == 0:
print("it: ", it)
alphas = []
betas = []
gammas = []
Bs = []
P = np.zeros(N) # Sequence probabilities
# ----------- Expectation Step -----------
# Iterate over every sequence
for n in range(N):
x = X[n]
T = len(x)
B = np.zeros((self.M, T))
component = np.zeros((self.M, self.K, T))
# Iterate over every state, every time, and every gaussian
for j in range(self.M):
for t in range(T):
for k in range(self.K):
p = self.R[j,k] * mvn.pdf(x[t], self.mu[j,k], self.sigma[j,k]) # Component probability
component[j,k,t] = p
B[j,t] += p
Bs.append(B)
# Just like discrete case
alpha = np.zeros((T, self.M))
alpha[0] = self.pi * B[:, 0]
for t in range(1, T):
alpha[t] = alpha[t-1].dot(self.A) * B[:,t]
P[n] = alpha[-1].sum()
assert(P[n] <= 1)
alphas.append(alpha)
beta = np.zeros((T, self.M))
beta[-1] = 1
for t in range(T-2, -1, -1):
beta[t] = self.A.dot(B[:, t+1] * beta[t+1])
betas.append(beta)
# This was not needed in the discrete case
gamma = np.zeros((T, self.M, self.K))
for t in range(T):
# Denominator only depends on t
alphabeta = (alphas[n][t,:] * betas[n][t,:]).sum()
for j in range(self.M):
# Now loop through every state and calculate alpha beta factor
factor = alphas[n][t,j] * betas[n][t,j] / alphabeta
for k in range(self.K):
# loop through all gaussians
gamma[t,j,k] = factor * component[j,k,t] / B[j,t]
gammas.append(gamma)
cost = np.log(P).sum()
costs.append(cost)
# ----------- Maximization Step -----------
self.pi = np.sum((alphas[n][0] * betas[n][0]) / P[n] for n in range(N)) / N
# Define numerators and denominators, since all updates formulas involve division
a_den = np.zeros((self.M, 1))
a_num = 0
r_num = np.zeros((self.M, self.K))
r_den = np.zeros(self.M)
mu_num = np.zeros((self.M, self.K, D))
sigma_num = np.zeros((self.M, self.K, D, D))
# Note the denominator for mu and sigma is just r_num
for n in range(N):
# iterate over all sequences
x = X[n]
T = len(x)
B = Bs[n]
gamma = gammas[n]
a_den += (alphas[n][:-1] * betas[n][:-1]).sum(axis=0, keepdims=True).T / P[n]
# Update A -> This is the same update that was performed in the discrete case!
a_num_n = | np.zeros((self.M, self.M)) | numpy.zeros |
# Copyright (c) 2020. <NAME>. hk2699 at caa dot columbia dot edu.
import matplotlib as mpl
import numpy as np
import numpy_groupies as npg
import statsmodels.api as sm
from matplotlib import pyplot as plt
from data_2d import consts, load_data
from lib.pylabyk import plt2, np2
def get_coefs(
dim, dif_other,
dur, ch, cond, t_RDK_dur,
correct_only=True
):
"""
:param dim:
:param dif_other:
:param dur: [tr]
:param ch: [tr, dim]
:param cond: [tr, dim]
:param t_RDK_dur:
:param correct_only:
:return: glmres.params, glmres.bse, glmres, glmmodel
"""
id_dif = np.empty_like(cond)
for dim1 in range(consts.N_DIM):
out = np.unique(np.abs(cond[:,dim1]),
return_inverse=True)
_, id_dif[:, dim1] = out
odim = consts.N_DIM - 1 - dim
incl = (
(t_RDK_dur == dur)
& (np.isin(id_dif[:, odim], dif_other))
)
if correct_only:
incl = (
incl
& (np.sign(ch[:, odim] - 0.5)
== np.sign(cond[:, odim]))
)
ch1 = ch[incl, dim]
coh1 = cond[incl, dim]
cohs, id_cohs = np.unique(coh1, return_inverse=True)
if np.issubdtype(ch1.dtype, np.floating):
# p_ch=1 is given
ch11 = np.stack([
npg.aggregate(id_cohs, ch1),
npg.aggregate(id_cohs, 1 - ch1)
], -1)
else:
ch11 = npg.aggregate(np.vstack((id_cohs, 1 - ch1)), 1)
glmmodel = sm.GLM(
ch11, sm.add_constant(cohs), family=sm.families.Binomial())
glmres = glmmodel.fit()
return glmres.params, glmres.bse, glmres, glmmodel
def get_coefs_mesh(cond, ch, t_RDK_dur,
dif_irrs=(2, (0, 1)),
correct_only=False
) -> (np.ndarray, np.ndarray, np.ndarray, np.ndarray):
"""
:param cond:
:param ch:
:param t_RDK_dur:
:param dif_irrs:
:param correct_only:
:return: (coef, se_coef, glmres, glmmodel)
coef[(bias, slope), dim, dif, dur]
"""
dims = [0, 1]
t_RDK_durs, id_durs = np.unique(t_RDK_dur, return_inverse=True)
coef, se_coef, glmres, glmmodel = np2.meshfun(
lambda *args: get_coefs(
*args,
ch=ch, cond=cond, t_RDK_dur=t_RDK_dur,
correct_only=correct_only),
[dims, dif_irrs, t_RDK_durs],
n_out=4,
outshape_first=True
)
return coef, se_coef, glmres, glmmodel
def get_coefs_from_histogram(cond, p_cond_ch):
glmmodel = sm.GLM(p_cond_ch, sm.add_constant(cond),
family=sm.families.Binomial())
glmres = glmmodel.fit()
return glmres.params, glmres.bse, glmres, glmmodel
def get_coefs_mesh_from_histogram(
p_cond_dur_ch: np.ndarray,
ev_cond_dim: np.ndarray,
dif_irrs=((2,), (0, 1))
) -> (np.ndarray, np.ndarray, np.ndarray, np.ndarray):
"""
:param p_cond_dur_ch:
:param ev_cond_dim: [cond, dim]
:param dif_irrs:
return: (coef, se_coef, glmres, glmmodel)
coef[(bias, slope), dim, dif, dur]
"""
n_dim = ev_cond_dim.shape[1]
n_dif = len(dif_irrs)
n_dur = p_cond_dur_ch.shape[1]
siz = [n_dim, n_dif, n_dur]
n_coef = 4
coef = | np.zeros([n_coef] + siz) | numpy.zeros |
import argparse
from sklearn.metrics import roc_curve, auc
import tensorflow as tf
from tensorflow.python.ops.check_ops import assert_greater_equal_v2
import load_data
from tqdm import tqdm
import numpy as np
import pandas as pd
from math import e as e_VALUE
import tensorflow.keras.backend as Keras_backend
from sklearn.ensemble import RandomForestClassifier
from scipy.special import bdtrc
def func_CallBacks(Dir_Save=''):
mode = 'min'
monitor = 'val_loss'
# checkpointer = tf.keras.callbacks.ModelCheckpoint(filepath= Dir_Save + '/best_model_weights.h5', monitor=monitor , verbose=1, save_best_only=True, mode=mode)
# Reduce_LR = tf.keras.callbacks.ReduceLROnPlateau(monitor=monitor, factor=0.1, min_delta=0.005 , patience=10, verbose=1, save_best_only=True, mode=mode , min_lr=0.9e-5 , )
# CSVLogger = tf.keras.callbacks.CSVLogger(Dir_Save + '/results.csv', separator=',', append=False)
EarlyStopping = tf.keras.callbacks.EarlyStopping( monitor = monitor,
min_delta = 0,
patience = 4,
verbose = 1,
mode = mode,
baseline = 0,
restore_best_weights = True)
return [EarlyStopping] # [checkpointer , EarlyStopping , CSVLogger]
def reading_terminal_inputs():
parser = argparse.ArgumentParser()
parser.add_argument("--epoch" , help="number of epochs")
parser.add_argument("--bsize" , help="batch size")
parser.add_argument("--max_sample" , help="maximum number of training samples")
parser.add_argument("--naug" , help="number of augmentations")
""" Xception VG16 VGG19 DenseNet201
ResNet50 ResNet50V2 ResNet101 DenseNet169
ResNet101V2 ResNet152 ResNet152V2 DenseNet121
InceptionV3 InceptionResNetV2 MobileNet MobileNetV2
if keras_version > 2.4
EfficientNetB0 EfficientNetB1 EfficientNetB2 EfficientNetB3
EfficientNetB4 EfficientNetB5 EfficientNetB6 EfficientNetB7 """
parser.add_argument("--architecture_name", help='architecture name')
args = parser.parse_args()
epoch = int(args.epoch) if args.epoch else 3
number_augmentation = int(args.naug) if args.naug else 3
bsize = int(args.bsize) if args.bsize else 100
max_sample = int(args.max_sample) if args.max_sample else 1000
architecture_name = str(args.architecture_name) if args.architecture_name else 'DenseNet121'
return epoch, bsize, max_sample, architecture_name, number_augmentation
def mlflow_settings():
"""
RUN UI with postgres and HPC:
REMOTE postgres server:
# connecting to remote server through ssh tunneling
ssh -L 5000:localhost:5432 <EMAIL>
# using the mapped port and localhost to view the data
mlflow ui --backend-store-uri postgresql://artinmajdi:1234@localhost:5000/chest_db --port 6789
RUN directly from GitHub or show experiments/runs list:
export MLFLOW_TRACKING_URI=http://127.0.0.1:5000
mlflow runs list --experiment-id <id>
mlflow run --no-conda --experiment-id 5 -P epoch=2 https://github.com/artinmajdi/mlflow_workflow.git -v main
mlflow run mlflow_workflow --no-conda --experiment-id 5 -P epoch=2
PostgreSQL server style
server = f'{dialect_driver}://{username}:{password}@{ip}/{database_name}' """
postgres_connection_type = { 'direct': ('5432', 'data7-db1.cyverse.org'),
'ssh-tunnel': ('5000', 'localhost')
}
port, host = postgres_connection_type['ssh-tunnel'] # 'direct' , 'ssh-tunnel'
username = "artinmajdi"
password = '<PASSWORD>'
database_name = "chest_db_v2"
dialect_driver = 'postgresql'
server = f'{dialect_driver}://{username}:{password}@{host}:{port}/{database_name}'
Artifacts = { 'hpc': 'sftp://mohammadsmajdi@file<EMAIL>iz<EMAIL>.<EMAIL>:/home/u29/mohammadsmajdi/projects/mlflow/artifact_store',
'data7_db1': 'sftp://[email protected]:/home/artinmajdi/mlflow_data/artifact_store'} # :temp2_data7_b
return server, Artifacts['data7_db1']
def architecture(architecture_name: str='DenseNet121', input_shape: list=[224,224,3], num_classes: int=14):
input_tensor=tf.keras.layers.Input(input_shape)
if architecture_name == 'custom':
model = tf.keras.layers.Conv2D(4, kernel_size=(3,3), activation='relu')(input_tensor)
model = tf.keras.layers.BatchNormalization()(model)
model = tf.keras.layers.MaxPooling2D(2,2)(model)
model = tf.keras.layers.Conv2D(8, kernel_size=(3,3), activation='relu')(model)
model = tf.keras.layers.BatchNormalization()(model)
model = tf.keras.layers.MaxPooling2D(2,2)(model)
model = tf.keras.layers.Conv2D(16, kernel_size=(3,3), activation='relu')(model)
model = tf.keras.layers.BatchNormalization()(model)
model = tf.keras.layers.MaxPooling2D(2,2)(model)
model = tf.keras.layers.Flatten()(model)
model = tf.keras.layers.Dense(32, activation='relu')(model)
model = tf.keras.layers.Dense(num_classes , activation='softmax')(model)
return tf.keras.models.Model(inputs=model.input, outputs=[model])
else:
""" Xception VG16 VGG19 DenseNet201
ResNet50 ResNet50V2 ResNet101 DenseNet169
ResNet101V2 ResNet152 ResNet152V2 DenseNet121
InceptionV3 InceptionResNetV2 MobileNet MobileNetV2
if keras_version > 2.4
EfficientNetB0 EfficientNetB1 EfficientNetB2 EfficientNetB3
EfficientNetB4 EfficientNetB5 EfficientNetB6 EfficientNetB7 """
pooling='avg'
weights='imagenet'
include_top=False
if architecture_name == 'xception': model_architecture = tf.keras.applications.Xception
elif architecture_name == 'VGG16': model_architecture = tf.keras.applications.VGG16
elif architecture_name == 'VGG19': model_architecture = tf.keras.applications.VGG19
elif architecture_name == 'ResNet50': model_architecture = tf.keras.applications.ResNet50
elif architecture_name == 'ResNet50V2': model_architecture = tf.keras.applications.ResNet50V2
elif architecture_name == 'ResNet101': model_architecture = tf.keras.applications.ResNet101
elif architecture_name == 'ResNet101V2': model_architecture = tf.keras.applications.ResNet101V2
elif architecture_name == 'ResNet152': model_architecture = tf.keras.applications.ResNet152
elif architecture_name == 'ResNet152V2': model_architecture = tf.keras.applications.ResNet152V2
elif architecture_name == 'InceptionV3': model_architecture = tf.keras.applications.InceptionV3
elif architecture_name == 'InceptionResNetV2': model_architecture = tf.keras.applications.InceptionResNetV2
elif architecture_name == 'MobileNet': model_architecture = tf.keras.applications.MobileNet
elif architecture_name == 'MobileNetV2': model_architecture = tf.keras.applications.MobileNetV2
elif architecture_name == 'DenseNet121': model_architecture = tf.keras.applications.DenseNet121
elif architecture_name == 'DenseNet169': model_architecture = tf.keras.applications.DenseNet169
elif architecture_name == 'DenseNet201': model_architecture = tf.keras.applications.DenseNet201
elif int(list(tf.keras.__version__)[2]) >= 4:
if architecture_name == 'EfficientNetB0': model_architecture = tf.keras.applications.EfficientNetB0
elif architecture_name == 'EfficientNetB1': model_architecture = tf.keras.applications.EfficientNetB1
elif architecture_name == 'EfficientNetB2': model_architecture = tf.keras.applications.EfficientNetB2
elif architecture_name == 'EfficientNetB3': model_architecture = tf.keras.applications.EfficientNetB3
elif architecture_name == 'EfficientNetB4': model_architecture = tf.keras.applications.EfficientNetB4
elif architecture_name == 'EfficientNetB5': model_architecture = tf.keras.applications.EfficientNetB5
elif architecture_name == 'EfficientNetB6': model_architecture = tf.keras.applications.EfficientNetB6
elif architecture_name == 'EfficientNetB7': model_architecture = tf.keras.applications.EfficientNetB7
model = model_architecture( weights = weights,
include_top = include_top,
input_tensor = input_tensor,
input_shape = input_shape,
pooling = pooling) # ,classes=num_classes
KK = tf.keras.layers.Dense( num_classes, activation='sigmoid', name='predictions' )(model.output)
return tf.keras.models.Model(inputs=model.input,outputs=KK)
def weighted_bce_loss(W):
def func_loss(y_true,y_pred):
NUM_CLASSES = y_pred.shape[1]
loss = 0
for d in range(NUM_CLASSES):
y_true = tf.cast(y_true, tf.float32)
mask = tf.keras.backend.cast( tf.keras.backend.not_equal(y_true[:,d], -5),
tf.keras.backend.floatx() )
loss += W[d]*tf.keras.losses.binary_crossentropy( y_true[:,d] * mask,
y_pred[:,d] * mask )
return tf.divide( loss, tf.cast(NUM_CLASSES,tf.float32) )
return func_loss
def optimize(dir, train_dataset, valid_dataset, epochs, Info, architecture_name):
# architecture
model = architecture( architecture_name = architecture_name,
input_shape = list(Info.target_size) + [3] ,
num_classes = len(Info.pathologies) )
model.compile( optimizer = tf.keras.optimizers.Adam(learning_rate=0.001),
loss = weighted_bce_loss(Info.class_weights), # tf.keras.losses.binary_crossentropy
metrics = [tf.keras.metrics.binary_accuracy] )
# optimization
history = model.fit( train_dataset,
validation_data = valid_dataset,
epochs = epochs,
steps_per_epoch = Info.steps_per_epoch,
validation_steps = Info.validation_steps,
verbose = 1,
use_multiprocessing = True) # ,callbacks=func_CallBacks(dir + '/model')
# saving the optimized model
model.save( dir + '/model/model.h5',
overwrite = True,
include_optimizer = False )
return model
def evaluate(dir: str, dataset: str='chexpert', batch_size: int=1000, model=tf.keras.Model()):
# Loading the data
Data, Info = load_data.load_chest_xray( dir = dir,
dataset = dataset,
batch_size = batch_size,
mode = 'test' )
score = measure_loss_acc_on_test_data( generator = Data.generator['test'],
model = model,
pathologies = Info.pathologies )
return score
def measure_loss_acc_on_test_data(generator, model, pathologies):
# Looping over all test samples
score_values = {}
NUM_CLASSES = len(pathologies)
generator.reset()
for j in tqdm(range(len(generator.filenames))):
x_test, y_test = next(generator)
full_path, x,y = generator.filenames[j] , x_test[0,...] , y_test[0,...]
x,y = x[np.newaxis,:] , y[np.newaxis,:]
# Estimating the loss & accuracy for instance
eval = model.evaluate(x=x, y=y,verbose=0,return_dict=True)
# predicting the labels for instance
pred = model.predict(x=x,verbose=0)
# Measuring the loss for each class
loss_per_class = [ tf.keras.losses.binary_crossentropy(y[...,d],pred[...,d]) for d in range(NUM_CLASSES)]
# saving all the infos
score_values[full_path] = {'full_path':full_path,'loss_avg':eval['loss'], 'acc_avg':eval['binary_accuracy'], 'pred':pred[0], 'pred_binary':pred[0] > 0.5, 'truth':y[0]>0.5, 'loss':np.array(loss_per_class), 'pathologies':pathologies}
# converting the outputs into panda dataframe
df = pd.DataFrame.from_dict(score_values).T
# resetting the index to integers
df.reset_index(inplace=True)
# # dropping the old index column
df = df.drop(['index'],axis=1)
return df
class Parent_Child():
def __init__(self, subj_info: pd.DataFrame.dtypes={}, technique: int=0, tuning_variables: dict={}):
"""
subject_info = {'pred':[], 'loss':[], 'pathologies':['Edema','Cardiomegaly',...]}
1. After creating a class:
SPC = Parent_Child(loss_dict, pred_dict, technique)
2. Update the parent child relationship:
SPC.set_parent_child_relationship(parent_name1, child_name_list1)
SPC.set_parent_child_relationship(parent_name2, child_name_list2)
3. Then update the loss and probabilities
SPC.update_loss_pred()
4. In order to see the updated loss and probabilities use below
loss_new_list = SPC.loss_dict_weighted or SPC.loss_list_weighted
pred_new_list = SPC.pred_dict_weighted or SPC.predlist_weighted
IMPORTANT NOTE:
If there are more than 2 generation; it is absolutely important to enter the subjects in order of seniority
gen1: grandparent (gen1)
gen1_subjx_children: parent (gen2)
gen2_subjx_children: child (gen3)
SPC = Parent_Child(loss_dict, pred_dict, technique)
SPC.set_parent_child_relationship(gen1_subj1, gen1_subj1_children)
SPC.set_parent_child_relationship(gen1_subj2, gen1_subj2_children)
. . .
SPC.set_parent_child_relationship(gen2_subj1, gen2_subj1_children)
SPC.set_parent_child_relationship(gen2_subj2, gen2_subj2_children)
. . .
SPC.update_loss_pred()
"""
self.subj_info = subj_info
self.technique = technique
self.all_parents: dict = {}
self.tuning_variables = tuning_variables
self.loss = subj_info.loss
self.pred = subj_info.pred
self.truth = subj_info.truth
self._convert_inputs_list_to_dict()
def _convert_inputs_list_to_dict(self):
self.loss_dict = {disease:self.subj_info.loss[index] for index,disease in enumerate(self.subj_info.pathologies)}
self.pred_dict = {disease:self.subj_info.pred[index] for index,disease in enumerate(self.subj_info.pathologies)}
self.truth_dict = {disease:self.subj_info.truth[index] for index,disease in enumerate(self.subj_info.pathologies)}
self.loss_dict_weighted = self.loss_dict
self.pred_dict_weighted = self.pred_dict
def set_parent_child_relationship(self, parent_name: str='parent_name', child_name_list: list=[]):
self.all_parents[parent_name] = child_name_list
def update_loss_pred(self):
"""
techniques:
1: coefficinet = (1 + parent_loss)
2: coefficinet = (2 * parent_pred)
3: coefficient = (2 * parent_pred)
1: loss_new = loss_old * coefficient if parent_pred < 0.5 else loss_old
2: loss_new = loss_old * coefficient if parent_pred < 0.5 else loss_old
3. loss_new = loss_old * coefficient
"""
for parent_name in self.all_parents:
self._update_loss_for_children(parent_name)
self._convert_outputs_to_list()
def _convert_outputs_to_list(self):
self.loss_new = np.array([self.loss_dict_weighted[disease] for disease in self.subj_info.pathologies])
self.pred_new = np.array([self.pred_dict_weighted[disease] for disease in self.subj_info.pathologies])
def _update_loss_for_children(self, parent_name: str='parent_name'):
parent_loss = self.loss_dict_weighted[parent_name]
parent_pred = self.pred_dict_weighted[parent_name]
parent_truth = self.truth_dict[parent_name]
TV = self.tuning_variables[ self.technique ]
if TV['mode'] == 'truth': parent_truth_pred = parent_truth
elif TV['mode'] == 'pred': parent_truth_pred = parent_pred
else: parent_truth_pred = 1.0
if self.technique == 1: coefficient = TV['weight'] * parent_loss + TV['bias']
elif self.technique == 2: coefficient = TV['weight'] * parent_truth_pred + TV['bias']
elif self.technique == 3: coefficient = TV['weight'] * parent_truth_pred + TV['bias']
for child_name in self.all_parents[parent_name]:
new_child_loss = self._measure_new_child_loss(coefficient, parent_name, child_name)
self.loss_dict_weighted[child_name] = new_child_loss
self.pred_dict_weighted[child_name] = 1 - np.power(e_VALUE , -new_child_loss)
self.pred_dict[child_name] = 1 - np.power(e_VALUE , -self.loss_dict[child_name])
def _measure_new_child_loss(self, coefficient: float=0.0, parent_name: str='parent_name', child_name: str='child_name'):
TV = self.tuning_variables[ self.technique ]
parent_pred = self.pred_dict_weighted[parent_name]
parent_truth = self.truth_dict[parent_name]
if TV['mode'] == 'truth': loss_activated = (parent_truth < 0.5 )
elif TV['mode'] == 'pred': loss_activated = (parent_pred < TV['parent_pred_threshold'] )
else: loss_activated = True
old_child_loss = self.loss_dict_weighted[child_name]
if self.technique == 1: new_child_loss = old_child_loss * coefficient if loss_activated else old_child_loss
elif self.technique == 2: new_child_loss = old_child_loss * coefficient if loss_activated else old_child_loss
elif self.technique == 3: new_child_loss = old_child_loss * coefficient
return new_child_loss
class Measure_InterDependent_Loss_Aim1_1(Parent_Child):
def __init__(self,score: pd.DataFrame.dtypes={}, technique: int=0, tuning_variables: dict={}):
score['loss_new'] = score['loss']
score['pred_new'] = score['pred']
self.score = score
self.technique = technique
for subject_ix in tqdm(self.score.index):
Parent_Child.__init__(self, subj_info=self.score.loc[subject_ix], technique=technique, tuning_variables=tuning_variables)
self.set_parent_child_relationship(parent_name='Lung Opacity' , child_name_list=['Pneumonia', 'Atelectasis','Consolidation','Lung Lesion', 'Edema'])
self.set_parent_child_relationship(parent_name='Enlarged Cardiomediastinum', child_name_list=['Cardiomegaly'])
self.update_loss_pred()
self.score.loss_new.loc[subject_ix] = self.loss_new
self.score.pred_new.loc[subject_ix] = self.pred_new
def apply_new_loss_techniques_aim1_1(pathologies: list=[], score: pd.DataFrame.dtypes={}, tuning_variables: dict={}):
L = len(pathologies)
accuracies = np.zeros((4,L))
measured_auc = np.zeros((4,L))
FR = list(np.zeros(4))
for technique in range(4):
# extracting the ouput predictions
if technique == 0:
FR[technique] = score
output = score.pred
else:
FR[technique] = Measure_InterDependent_Loss_Aim1_1(score=score, technique=technique, tuning_variables=tuning_variables)
output = FR[technique].score.pred_new
# Measuring accuracy
func = lambda x1, x2: [ (x1[j] > 0.5) == (x2[j] > 0.5) for j in range(len(x1))]
pred_acc = score.truth.combine(output,func=func).to_list()
pred_acc = np.array(pred_acc).mean(axis=0)
prediction_table = np.stack(score.pred)
truth_table = np.stack(score.truth)
for d in range(prediction_table.shape[1]):
fpr, tpr, thresholds = roc_curve(truth_table[:,d], prediction_table[:,d], pos_label=1)
measured_auc[technique, d] = auc(fpr, tpr)
accuracies[technique,:] = np.floor( pred_acc*1000 ) / 10
class Outputs:
def __init__(self,accuracies, measured_auc, FR, pathologies):
self.accuracy = self._converting_to_dataframe(input_table=accuracies , columns=pathologies)
self.auc = self._converting_to_dataframe(input_table=measured_auc, columns=pathologies)
self.details = FR
self.pathologies = pathologies
def _converting_to_dataframe(self, input_table, columns):
df = pd.DataFrame(input_table, columns=columns)
df['technique'] = ['original','1','2','3']
df = df.set_index('technique').T
return df
return Outputs(accuracies=accuracies, measured_auc=measured_auc, FR=FR,pathologies=pathologies)
def apply_nan_back_to_truth(truth, how_to_treat_nans):
# changing teh samples with uncertain truth label to nan
truth[ truth == -10] = np.nan
# how to treat the nan labels in the original dataset before measuring the average accuracy
if how_to_treat_nans == 'ignore': truth[ truth == -5] = np.nan
elif how_to_treat_nans == 'pos': truth[ truth == -5] = 1
elif how_to_treat_nans == 'neg': truth[ truth == -5] = 0
return truth
def measure_mean_accruacy_chexpert(truth, prediction, how_to_treat_nans):
""" prediction & truth: num_samples x num_classes """
pred_classes = prediction > 0.5
# truth_nan_applied = self._truth_with_nan_applied()
truth_nan_applied = apply_nan_back_to_truth(truth=truth, how_to_treat_nans=how_to_treat_nans)
# measuring the binary truth labels (the nan samples will be fixed below)
truth_binary = truth_nan_applied > 0.5
truth_pred_compare = (pred_classes == truth_binary).astype(float)
# replacing the nan samples back to their nan value
truth_pred_compare[np.where(np.isnan(truth_nan_applied))] = np.nan
# measuring teh average accuracy over all samples after ignoring the nan samples
accuracy = np.nanmean(truth_pred_compare, axis=0)*100
# this is for safety measure; in case one of the classes overall accuracy was also nan. if removed, then the integer format below will change to very long floats
accuracy[np.isnan(accuracy)] = 0
accuracy = (accuracy*10).astype(int)/10
return accuracy
def measure_mean_uncertainty_chexpert(truth=np.array([]), uncertainty=np.array([]), how_to_treat_nans='ignore'):
""" uncertainty & truth: num_samples x num_classes """
# adding the nan values back to arrays
truth_nan_applied = apply_nan_back_to_truth(truth, how_to_treat_nans)
# replacing the nan samples back to their nan value
uncertainty[np.where(np.isnan(truth_nan_applied))] = np.nan
# measuring teh average accuracy over all samples after ignoring the nan samples
uncertainty_mean = np.nanmean(uncertainty , axis=0)
# this is for safety measure; in case one of the classes overall accuracy was also nan. if removed, then the integer format below will change to very long floats
uncertainty_mean[np.isnan(uncertainty_mean)] = 0
uncertainty_mean = (uncertainty_mean*1000).astype(int)/1000
return uncertainty_mean
class Measure_Accuracy_Aim1_2():
def __init__(self, predict_accuracy_mode: bool=False , model: tf.keras.models.Model.dtype='' , generator=tf.keras.preprocessing.image.ImageDataGenerator() , how_to_treat_nans: str='ignore', uncertainty_type: str='std'):
"""
how_to_treat_nans:
ignore: ignoring the nan samples when measuring the average accuracy
pos: if integer number, it'll treat as postitive
neg: if integer number, it'll treat as negative """
self.predict_accuracy_mode = predict_accuracy_mode
self.how_to_treat_nans = how_to_treat_nans
self.generator = generator
self.model = model
self.uncertainty_type = uncertainty_type
self._setting_params()
def _setting_params(self):
self.full_data_length, self.num_classes = self.generator.labels.shape
self.batch_size = self.generator.batch_size
self.number_batches = int(np.ceil(self.full_data_length/self.batch_size))
self.truth = self.generator.labels.astype(float)
def loop_over_whole_dataset(self):
probs = np.zeros(self.generator.labels.shape)
# Looping over all batches
# Keras_backend.clear_session()
self.generator.reset()
np.random.seed(1)
for batch_index in tqdm(range(self.number_batches),disable=False):
# extracting the indexes for batch "batch_index"
self.generator.batch_index = batch_index
indexes = next(self.generator.index_generator)
# print(' extracting data -------')
self.generator.batch_index = batch_index
x, _ = next(self.generator)
# print(' predicting the labels -------')
probs[indexes,:] = self.model.predict(x,verbose=0)
# Measuring the accuracy over whole augmented dataset
if self.predict_accuracy_mode:
accuracy = measure_mean_accruacy_chexpert(truth=self.truth.copy(), prediction=probs.copy(), how_to_treat_nans=self.how_to_treat_nans)
return probs, accuracy
def loop_over_all_augmentations(self,number_augmentation: int=0):
self.number_augmentation = number_augmentation
self.probs_all_augs_3d = np.zeros((1 + number_augmentation , self.full_data_length , self.num_classes))
self.accuracy_all_augs_3d = np.zeros((1 + number_augmentation , self.num_classes))
# Looping over all augmentation scenarios
for ix_aug in range(number_augmentation):
print(f'augmentation {ix_aug}/{number_augmentation}')
probs, accuracy = self.loop_over_whole_dataset()
self.probs_all_augs_3d[ ix_aug,...] = probs
self.accuracy_all_augs_3d[ix_aug,...] = accuracy
# measuring the average probability over all augmented data
self.probs_avg_2d = np.mean( self.probs_all_augs_3d, axis=0)
if self.uncertainty_type == 'std':
self.probs_std_2d = np.std(self.probs_all_augs_3d, axis=0)
# Measuring the accruacy for new estimated probability for each sample over all augmented data
# self.accuracy_final = self._measure_mean_accruacy(self.probs_avg_2d)
# self.uncertainty_final = self._measure_mean_std(self.probs_std_2d)
self.accuracy_final = measure_mean_accruacy_chexpert(truth=self.truth.copy(), prediction=self.probs_avg_2d.copy(), how_to_treat_nans=self.how_to_treat_nans)
self.uncertainty_final = measure_mean_uncertainty_chexpert(truth=self.truth.copy(), uncertainty=self.probs_std_2d.copy(), how_to_treat_nans=self.how_to_treat_nans)
def apply_technique_aim_1_2(how_to_treat_nans='ignore', data_generator='', data_generator_aug='', model='', number_augmentation=3, uncertainty_type='std'):
print('running the evaluation on original non-augmented data')
MA = Measure_Accuracy_Aim1_2( predict_accuracy_mode = True,
generator = data_generator,
model = model,
how_to_treat_nans = how_to_treat_nans,
uncertainty_type = uncertainty_type)
probs_2d_orig, old_accuracy = MA.loop_over_whole_dataset()
print(' running the evaluation on augmented data including the uncertainty measurement')
MA = Measure_Accuracy_Aim1_2( predict_accuracy_mode = True,
generator = data_generator_aug,
model = model,
how_to_treat_nans = how_to_treat_nans,
uncertainty_type = uncertainty_type)
MA.loop_over_all_augmentations(number_augmentation=number_augmentation)
final_results = { 'old-accuracy': old_accuracy,
'new-accuracy': MA.accuracy_final,
'std' : MA.uncertainty_final}
return probs_2d_orig, final_results, MA
def estimate_maximum_and_change(all_accuracies=np.array([]), pathologies=[]):
columns = ['old-accuracy', 'new-accuracy', 'std']
# creating a dataframe from accuracies
df = pd.DataFrame(all_accuracies , index=pathologies)
# adding the 'maximum' & 'change' columns
df['maximum'] = df.columns[ df.values.argmax(axis=1) ]
df['change'] = df[columns[1:]].max(axis=1) - df[columns[0]]
# replacing "0" values to "--" for readability
df.maximum[df.change==0.0] = '--'
df.change[df.change==0.0] = '--'
return df
# def apply_technique_aim_1_2_with_dataframe(how_to_treat_nans='ignore', pathologies=[], data_generator='', data_generator_aug='', model='', uncertainty_type='std'):
# outputs, MA = apply_technique_aim_1_2(how_to_treat_nans=how_to_treat_nans, data_generator=data_generator, data_generator_aug=data_generator_aug, model=model, uncertainty_type=uncertainty_type)
# df = estimate_maximum_and_change(all_accuracies=outputs, pathologies=pathologies)
# return df, outputs, MA
""" crowdsourcing technique aim 1_3 """
def apply_technique_aim_1_3(data={}, num_simulations=20, feature_columns=[], ARLS={}):
def assigning_worker_true_labels(seed_num=1, true=[], labelers_strength=0.5):
# setting the random seed
# np.random.seed(seed_num)
# number of samples and labelers/workers
num_samples = true.shape[0]
# finding a random number for each instance
true_label_assignment_prob = np.random.random(num_samples)
# samples that will have an inaccurate true label
false_samples = true_label_assignment_prob < 1 - labelers_strength
# measuring the new labels for each labeler/worker
worker_true = true > 0.5
worker_true[ false_samples ] = ~ worker_true[ false_samples ]
return worker_true
def assigning_random_labelers_strengths(num_labelers=10, low_dis=0.3, high_dis=0.9):
labeler_names = [f'labeler_{j}' for j in range(num_labelers)]
# if num_labelers > 1:
# ls1 = np.random.uniform( low = 0.1,
# high = 0.3,
# size = int(num_labelers/2))
# ls2 = np.random.uniform( low = 0.7,
# high = 0.9,
# size = num_labelers - int(num_labelers/2))
# labelers_strength = np.concatenate((ls1 , ls2),axis=0)
# else:
labelers_strength = np.random.uniform( low = low_dis,
high = high_dis,
size = num_labelers)
return pd.DataFrame( {'labelers_strength': labelers_strength}, index = labeler_names)
# TODO I should repeate this for multiple seed and average
| np.random.seed(11) | numpy.random.seed |
"""Extract single tone
Quadratic Interpolation of Spectral Peaks
https://ccrma.stanford.edu/~jos/sasp/Quadratic_Interpolation_Spectral_Peaks.html
"""
import numpy as np
def gaussianwindow(x, sigma=0.2):
"""Gaussian FFT window
Parameters
----------
sigma : float
"""
i = np.arange(len(x))
m = (len(x) - 1) / 2
return x * np.exp(-((i - m) ** 2) / (2 * (sigma * len(x)) ** 2))
def extract_singletone(x, fs, approx_freq=None, search=0.05):
"""Extract single tone from a time domain signal
Finds the frequency and amplitude of the largest amplitude tone in the
time domain signal.
Parameters
----------
x : array-like
time domain signal
fs : float
sample frequency
approx_freq : float
approximate frequency to search for
if None, find the maximum amplitude
search : float
search ± percentage around approx_freq if given
Returns
-------
single_tone : 2-tuple of float
estimated parameters of single tone (frequency, amplitude)
"""
n_samples = len(x)
mid = n_samples // 2
xw = gaussianwindow(x)
coherent_gain = gaussianwindow(np.ones(n_samples)).sum() / n_samples
spectrum = np.abs(np.fft.fft(xw)[:mid])
fmin_bin = 0
fmax_bin = mid
if approx_freq is not None:
fmin = approx_freq * (1 - search)
fmax = approx_freq * (1 + search)
df = fs / n_samples
fmin_bin = np.floor(fmin / df).astype(int)
fmin_bin = 0 if fmin_bin < 0 else fmin_bin
fmax_bin = np.ceil(fmax / df).astype(int) + 1
fmax_bin = n_samples if fmax_bin > mid else fmax_bin
tone_bin = fmin_bin + spectrum[fmin_bin:fmax_bin].argmax()
a = | np.log10(spectrum[tone_bin - 1]) | numpy.log10 |
import numpy as np
def moment_2d(array, var=None, weights=None):
"""Compute zeroth and first order moments of an array.
Uncertainties are `None` if no variance is provided.
Arguments:
array (array-like):
.
var (array-like):
Array of variances with the same shape as `array` argument. If not provided, all uncertainties will be
`None`-type.
weights (array-like):
Weights to apply during the computation. If not provided, all entries of the `array` and `var` input are
weighted equally.
Returns:
moment0 (float):
Zeroth order Moment.
std_moment0 (float):
Uncertainty of `moment0`.
moment1_x (float):
First order moment in x-direction.
std_moment1_x (float):
Uncertainty of `moment1_x`. Will be None if `var` is not provided.
moment1_y (float):
First order moment in y-direction.
std_moment1_y (float):
Uncertainty of `moment1_y`. Will be None if `var` is not provided.
"""
# Apply weights to array and variance map
try:
weighted_array = np.multiply(weights, array)
except TypeError:
weighted_array = array
try:
weighted_var = np.multiply(weights, var)
except TypeError:
weighted_var = var
# Initialize arrays
y, x = np.mgrid[:array.shape[0], :array.shape[1]]
# Compute zeroth moment
moment0 = np.sum(weighted_array)
try:
var_moment0 = np.sum(np.square(weighted_var))
std_moment0 = np.sqrt(var_moment0)
except TypeError:
std_moment0 = None
# Compute first moments
try:
moment1_x = np.average(x, weights=weighted_array, axis=(0, 1))
moment1_y = np.average(y, weights=weighted_array, axis=(0, 1))
except ZeroDivisionError as e:
if (np.array(array.shape) == 0).any():
raise ValueError(f"Input array with shape {array.shape} has no extent and cannot be averaged!")
else:
raise e
# Compute uncertainties
try:
var_moment1_x = np.sum(np.square(np.multiply(weighted_var, np.subtract(x, moment1_x)))) / np.square(moment0)
std_moment1_x = np.sqrt(var_moment1_x)
var_moment1_y = np.sum(np.square(np.multiply(weighted_var, np.subtract(y, moment1_y)))) / np.square(moment0)
std_moment1_y = np.sqrt(var_moment1_y)
except TypeError:
std_moment1_x = None
std_moment1_y = None
return moment0, std_moment0, moment1_x, std_moment1_x, moment1_y, std_moment1_y
def gaussian_weights(shape, fwhm, center=None, normalization='unity'):
# Set center to the central pixel, if not provided
if center is None:
center = (shape[0]-1) / 2, (shape[1]-1) / 2
# Evaluate distances from center on a grid
y, x = np.mgrid[:shape[0], :shape[1]]
r_squared = (y - center[0])**2 + (x - center[1])**2
# Compute amplitude of Gaussian distribution on the grid
var = np.square(fwhm / 2.35)
weights = np.exp(-np.divide(r_squared, 2 * var)) / np.sqrt(2 * np.pi * var)
return normalize_weights(weights=weights, normalization=normalization)
def uniform_weights(shape, normalization='unity'):
return normalize_weights(np.ones(shape=shape), normalization=normalization)
def normalize_weights(weights, normalization=None):
"""Normalize an array of weights.
Args:
weights (array-like):
Array of weights.
normalization:
Any value to normalize the sum of the weights to. Can be set to `'unity'` to allow for flux conservation.
Returns:
weights (array-like):
Normalized weights
"""
# Compute number of pixels to normalize to this
if normalization == 'unity':
normalization = weights.shape[0] * weights.shape[1]
try:
return weights * np.divide(normalization, | np.sum(weights) | numpy.sum |
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 27 14:12:12 2018
Switchback square
@author: oddvi
"""
import matplotlib.pyplot as plt
import shapely.geometry
import shapely.affinity
import shapely.ops
import patternGenerators as gen
def make_square_switchback_gen_reg(cut_width, flexure_width, junction_length, edge_space, num_flex, side_cut='default'):
"""
"""
import numpy as np
a = cut_width; b = flexure_width; c = junction_length; d = edge_space
if side_cut == 'default': # x displacement along diagonal cut
ax = cut_width/(2**0.5)/2
else:
ax = side_cut
dx = a+b # displacement y direction
dy = dx # displacement y direction
h0 = a+b/2+c # height in triangle
l1 = b/2 # height baseline -> flexure bottom
l2 = a+b/2 # height baseline -> flexure top
x = np.array([])
y = np.array([])
x = np.append(x, 0) # 0
y = np.append(y, h0) # 0
x = np.append(x, -h0+l2+ax/2) # 1
y = np.append(y, l2+ax/2) # 1
x = np.append(x, -h0+l2+ax) # 2
y = np.append(y, l2) # 2
x = | np.append(x, -h0+ax) | numpy.append |
# Library
import torch
import numpy as np
import time
import socket
import threading
from asteroid.models import BaseModel
from denoiser.demucs import DemucsStreamer
from denoiser.utils import deserialize_model
from npsocket import SocketNumpyArray
from real_time_omlsa.omlsa import *
import json
inport = 9999
outport = 9998
parameter_port = 9997
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
print(device)
MODEL_PATH = "denoiser/denoiser.th"
# Load Model
pkg = torch.load(MODEL_PATH,map_location=torch.device(device))
if 'model' in pkg:
if 'best_state' in pkg:
pkg['model']['state'] = pkg['best_state']
model = deserialize_model(pkg['model'])
else:
model = deserialize_model(pkg)
model.eval()
pytorch_total_params = sum(p.numel() for p in model.parameters())
from prettytable import PrettyTable
def count_parameters(model):
table = PrettyTable(["Modules", "Parameters"])
total_params = 0
for name, parameter in model.named_parameters():
if not parameter.requires_grad: continue
param = parameter.numel()
table.add_row([name, param])
total_params+=param
print(table)
print(f"Total Trainable Params: {total_params}")
return total_params
count_parameters(model)
server_parameter_receiver = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
server_parameter_receiver.bind(("127.0.0.1",parameter_port))
# Define Server Socket (receiver)
server_denoiser_receiver = SocketNumpyArray()
server_denoiser_receiver.initialize_receiver(inport)
server_denoiser_sender = SocketNumpyArray()
# server_parameter_sender = SocketNumpyArray()
# GLOBAL_VARIABLES
DRY = 0.04
frame_num = 1
sample_rate = 16000
CONNECTED = 0
DCCRN_model = BaseModel.from_pretrained("JorisCos/DCCRNet_Libri1Mix_enhsingle_16k")
DCCRN_model.eval()
# Threads
audio_buffer = []
threads = []
MIX = 40
Denoiser = "DSP EQ"
sos_list = ""
def receive_audio_parameter():
global MIX,Denoiser,sos_list,audio_buffer
while True:
recieved = server_parameter_receiver.recvfrom(1024)
json_obj = json.loads(recieved[0].decode('utf-8'))
print(json_obj)
MIX = json_obj.get("MIX")
Denoiser = json_obj.get("Denoiser")
sos_list = json_obj.get("sos")
def receive_audio():
global audio_buffer
while True:
frame = server_denoiser_receiver.receive_array()
audio_buffer.append(frame)
def denoiser_live():
global server_denoiser_sender
global audio_buffer
global CONNECTED
print("denoiser_start")
first = True
current_time = 0
last_log_time = 0
FRAME_LENGTH = 20
log_delta = 10
sr_ms = sample_rate / 1000
streamer = DemucsStreamer(model, dry=DRY, num_frames=frame_num)
stride_ms = streamer.stride / sr_ms
while True:
if len(audio_buffer) > 0:
start = time.time()
if "DL" or "Demucs" in Denoiser:
while len(audio_buffer) > FRAME_LENGTH*5:
del(audio_buffer[0:FRAME_LENGTH])
print("Processing speed is too slow. Switch to DSP denoiser or remove denoiser")
if len(audio_buffer)>=FRAME_LENGTH:
frame = audio_buffer[0:FRAME_LENGTH]
del(audio_buffer[0:FRAME_LENGTH])
frame = np.concatenate(frame)
if current_time > last_log_time + log_delta:
last_log_time = current_time
streamer.reset_time_per_frame()
last_log_time = current_time
length = streamer.total_length if first else streamer.stride
first = False
current_time += length / sample_rate
out = frame
frame = torch.from_numpy(frame).mean(dim=1).to(device)
with torch.no_grad():
out = streamer.feed(frame[None])[0]
if not out.numel():
continue
out.clamp_(-1, 1)
out = out.cpu().numpy()
if CONNECTED == 0:
print("initialized sender")
time.sleep(2)
server_denoiser_sender.initialize_sender('127.0.0.1', outport)
CONNECTED = 1
else:
server_denoiser_sender.send_numpy_array(out)
# print(time.time()-start)
elif "DCCRN" in Denoiser:
while len(audio_buffer) > FRAME_LENGTH*5:
del(audio_buffer[0:FRAME_LENGTH])
print("Processing speed is too slow. Switch to DSP denoiser or remove denoiser")
if len(audio_buffer)>=FRAME_LENGTH:
frame = audio_buffer[0:FRAME_LENGTH]
del(audio_buffer[0:FRAME_LENGTH])
frame = np.concatenate(frame)
frame = torch.from_numpy(frame).mean(dim=1).to(device)
out = DCCRN_model(frame)
out = out.cpu().detach().numpy()
out = | np.transpose(out) | numpy.transpose |
""" Tests for coordinate transforms """
import pytest
import numpy as np
import spharpy.samplings as samplings
from spharpy.samplings import spherical_voronoi
def test_sph2cart():
rad, theta, phi = 1, np.pi/2, 0
x, y, z = samplings.sph2cart(rad, theta, phi)
(1, 0, 0) == pytest.approx((x, y, z), abs=2e-16, rel=2e-16)
def test_sph2cart_array():
rad = np.ones(6)
theta = np.array([np.pi/2, np.pi/2, np.pi/2, np.pi/2, 0, np.pi])
phi = np.array([0, np.pi, np.pi/2, np.pi*3/2, 0, 0])
x, y, z = samplings.sph2cart(rad, theta, phi)
xx = np.array([1, -1, 0, 0, 0, 0])
yy = np.array([0, 0, 1, -1, 0, 0])
zz = np.array([0, 0, 0, 0, 1, -1])
np.testing.assert_allclose(xx, x, atol=1e-15)
np.testing.assert_allclose(yy, y, atol=1e-15)
np.testing.assert_allclose(zz, z, atol=1e-15)
def test_cart2sph_array():
x = np.array([1, -1, 0, 0, 0, 0])
y = np.array([0, 0, 1, -1, 0, 0])
z = np.array([0, 0, 0, 0, 1, -1])
rr, tt, pp = samplings.cart2sph(x, y, z)
rad = np.ones(6)
theta = np.array([np.pi/2, np.pi/2, np.pi/2, np.pi/2, 0, np.pi])
phi = np.array([0, np.pi, np.pi/2, np.pi*3/2, 0, 0])
np.testing.assert_allclose(rad, rr, atol=1e-15)
| np.testing.assert_allclose(phi, pp, atol=1e-15) | numpy.testing.assert_allclose |
from kiri.models import Vectorisation, QA, Summarisation, Emotion, Classification
import numpy as np
import torch
qa_example = {
"q1": "Where does Sally live?",
"q2": "How long has Sally lived in London?",
"q2_conv": "How long has she lived there?",
"q3": "Where did Sally live prior to London?",
"q3_conv": "Where did she live before?",
"c": "Sally has been living in London for 3 years. Previously, Sally lived in Liverpool.",
"a1": "London",
"a2": "3 years",
"a3": "Liverpool"
}
summary_context = """<NAME> FRS (/ˈiːlɒn/ EE-lon; born June 28, 1971) is a business magnate, industrial designer and engineer.[6] He is the founder, CEO, CTO and chief designer of SpaceX; early investor,[b] CEO and product architect of Tesla, Inc.; founder of The Boring Company; co-founder of Neuralink; and co-founder and initial co-chairman of OpenAI. He was elected a Fellow of the Royal Society (FRS) in 2018.[9][10] Also that year, he was ranked 25th on the Forbes list of The World's Most Powerful People,[11] and was ranked joint-first on the Forbes list of the Most Innovative Leaders of 2019.[12] As of December 19, 2020, Musk’s net worth was estimated by Forbes to US$153.5 billion,[1][13] making him the second-richest person in the world, behind <NAME>.[14]"""
classify_context = """I am mad because my product broke the first time I used it"""
classify_labels = ["product issue", "nature"]
classify_correct = "product issue"
device = "cuda" if torch.cuda.is_available() else "cpu"
vectorise = Vectorisation(local=True, device=device)
qa = QA(local=True, device=device)
emotion = Emotion(local=True, device=device)
summarise = Summarisation(local=True, device=device)
classify = Classification(local=True, device=device)
def test_vectorisation_single():
out = vectorise("This is a sample thing.")
assert isinstance(out, np.ndarray), "Not an array"
assert type(out[0]) == | np.dtype("float32") | numpy.dtype |
"""
The ProblemManager contains all of the
different classes of problems that windse can solve
"""
import __main__
import os
### Get the name of program importing this package ###
if hasattr(__main__,"__file__"):
main_file = os.path.basename(__main__.__file__)
else:
main_file = "ipython"
### This checks if we are just doing documentation ###
if not main_file in ["sphinx-build", "__main__.py"]:
from dolfin import *
import numpy as np
import time
import scipy.interpolate as interp
import glob
### Import the cumulative parameters ###
from windse import windse_parameters
# from memory_profiler import memory_usage
### Check if we need dolfin_adjoint ###
if windse_parameters.dolfin_adjoint:
from dolfin_adjoint import *
class GenericProblem(object):
"""
A GenericProblem contains on the basic functions required by all problem objects.
Args:
domain (:meth:`windse.DomainManager.GenericDomain`): a windse domain object.
windfarm (:meth:`windse.WindFarmManager.GenericWindFarmm`): a windse windfarm object.
function_space (:meth:`windse.FunctionSpaceManager.GenericFunctionSpace`): a windse function space object.
boundary_conditions (:meth:`windse.BoundaryManager.GenericBoundary`): a windse boundary object.
"""
def __init__(self,domain,windfarm,function_space,boundary_data):
### save a reference of option and create local version specifically of domain options ###
self.params = windse_parameters
self.dom = domain
self.farm = windfarm
self.fs = function_space
self.bd = boundary_data
self.tf_first_save = True
self.fprint = self.params.fprint
self.tag_output = self.params.tag_output
self.debug_mode = self.params.debug_mode
### Update attributes based on params file ###
for key, value in self.params["problem"].items():
setattr(self,key,value)
self.record_time = self.params["optimization"].get("record_time",0.0)
if isinstance(self.record_time,str):
self.record_time = 0.0
def DebugOutput(self):
if self.debug_mode:
# integral of nu_t
int_nut = assemble(self.nu_T*dx)/self.dom.volume
self.tag_output("int_nu_T", int_nut)
# integral of tf
if self.dom.dim == 3:
e1 = Constant((1,0,0)); e2 = Constant((0,1,0)); e3 = Constant((0,0,1));
else:
e1 = Constant((1,0)); e2 = Constant((0,1));
int_tf_x = assemble(inner(self.tf,e1)*dx)/self.dom.volume
self.tag_output("int_tf_x", int_tf_x)
int_tf_y = assemble(inner(self.tf,e2)*dx)/self.dom.volume
self.tag_output("int_tf_y", int_tf_y)
if self.dom.dim == 3:
int_tf_z = assemble(inner(self.tf,e3)*dx)/self.dom.volume
self.tag_output("int_tf_z", int_tf_z)
if self.farm.turbine_method == 'alm':
self.tag_output("min_chord", np.min(self.chord))
self.tag_output("max_chord", np.max(self.chord))
self.tag_output("avg_chord", np.mean(self.chord))
self.tag_output("min_cl", np.min(self.cl))
self.tag_output("max_cl", | np.max(self.cl) | numpy.max |
from typing import Callable
import hypothesis.strategies as st
import numpy as np
import pytest
from hypothesis import given
from numpy.testing import assert_allclose, assert_array_equal
import mygrad as mg
from mygrad import Tensor
from tests.custom_strategies import tensors
# Make sure we actually test the correctness of the
# in-place syntaxes, e.g. `x += y`, and not just
# `x.__iadd__(y)`
#
# Also, make sure that augmented updates on tensors
# match behavior of numpy
def test_iadd_mirrors_numpy():
an = np.array([3.0, 4.0])
at = mg.Tensor(an)
bn = an
vn = an[...]
an += 2.0
bt = at
vt = at[...]
at += 2.0
assert_array_equal(an, at)
assert_array_equal(bn, bt)
assert_array_equal(vn, vt)
def test_isub_mirrors_numpy():
an = np.array([3.0, 4.0])
at = mg.Tensor(an)
bn = an
vn = an[...]
an -= 2.0
bt = at
vt = at[...]
at -= 2.0
assert_array_equal(an, at)
assert_array_equal(bn, bt)
assert_array_equal(vn, vt)
def test_imul_mirrors_numpy():
an = np.array([3.0, 4.0])
at = mg.Tensor(an)
bn = an
vn = an[...]
an *= 2.0
bt = at
vt = at[...]
at *= 2.0
assert_array_equal(an, at)
assert_array_equal(bn, bt)
assert_array_equal(vn, vt)
def test_idiv_mirrors_numpy():
an = np.array([3.0, 4.0])
at = mg.Tensor(an)
bn = an
vn = an[...]
an /= 2.0
bt = at
vt = at[...]
at /= 2.0
assert_array_equal(an, at)
assert_array_equal(bn, bt)
assert_array_equal(vn, vt)
def test_ipow_mirrors_numpy():
an = np.array([3.0, 4.0])
at = mg.Tensor(an)
bn = an
vn = an[...]
an **= 2.1
bt = at
vt = at[...]
at **= 2.1
assert_array_equal(an, at)
assert_array_equal(bn, bt)
assert_array_equal(vn, vt)
def test_isqr_mirrors_numpy():
an = np.array([3.0, 4.0])
at = mg.Tensor(an)
bn = an
vn = an[...]
an **= 2
bt = at
vt = at[...]
at **= 2
assert_array_equal(an, at)
assert_array_equal(bn, bt)
assert_array_equal(vn, vt)
def test_ipow_1_mirrors_numpy():
an = np.array([3.0, 4.0])
at = mg.Tensor(an)
bn = an
vn = an[...]
an **= 1
bt = at
vt = at[...]
at **= 1
assert_array_equal(an, at)
assert_array_equal(bn, bt)
assert_array_equal(vn, vt)
@pytest.mark.parametrize("inplace_on_view", [True, False])
def test_raising_during_in_place_op_doesnt_corrupt_graph(inplace_on_view: bool):
x = mg.arange(1.0, 5.0)
y_base = 2 * x
y = y_base[...] if inplace_on_view else y_base
w = y[...]
with pytest.raises(ValueError):
y[:2] = y # shape mismatch
(2 * w).backward()
assert (y.base is y_base) if inplace_on_view else (y.base is None)
assert w.base is y_base
assert np.shares_memory(w, y)
assert_allclose(w.grad, 2 * np.ones_like(y))
assert_allclose(y_base.grad, 2 * np.ones_like(y_base))
assert_allclose(y.grad, 2 * np.ones_like(y))
assert_allclose(x.grad, 4 * np.ones_like(y))
@pytest.mark.parametrize("x_constant", [False, True])
@pytest.mark.parametrize("y_constant", [False, True])
@pytest.mark.parametrize("z_constant", [False, True])
def test_inplace_update_constant_dictated_by_target(
x_constant: bool, y_constant: bool, z_constant: bool
):
x = mg.tensor([1.0], constant=x_constant)
y = mg.tensor([1.0], constant=y_constant)
z = mg.tensor([1.0], constant=z_constant)
assert np.multiply(x, y, out=z).constant is z_constant
@pytest.mark.parametrize("inplace_on_view", [False, True])
@pytest.mark.parametrize("x_constant", [False, True])
@pytest.mark.parametrize("y_constant", [False, True])
def test_inplace_update_constant_dictated_by_target(
inplace_on_view: bool, x_constant: bool, y_constant: bool
):
x = mg.arange(1.0, 5.0, constant=x_constant)
y = mg.zeros_like(x, constant=y_constant)
if inplace_on_view:
x = x[...]
dangling_view = x[:2]
assert x.constant is x_constant
assert dangling_view.constant is x_constant
x[...] = y
assert x.constant is x_constant
assert dangling_view.constant is x.constant
@pytest.mark.parametrize("inplace_on_view", [True, False])
@pytest.mark.parametrize("constant", [True, False])
def test_in_place_op_propagates_to_views(constant: bool, inplace_on_view: bool):
x = mg.arange(1.0, 5.0, constant=constant)
y_base = +x
y = y_base[...] if inplace_on_view else y_base
view1 = y[...]
view2 = view1[...] # view of view
y[:2] = -1 # should mutate all views
assert y_base.base is None
if inplace_on_view:
assert y.base is y_base
assert view1.base is y_base
assert view2.base is y_base
assert_array_equal(x, mg.arange(1.0, 5.0))
assert_array_equal(y_base, [-1.0, -1.0, 3.0, 4.0])
assert_array_equal(y_base, y)
assert_array_equal(y_base, view1)
assert_array_equal(y_base, view2)
@given(tensors(shape=(4,), constant=False))
@pytest.mark.parametrize("inplace_on_view", [True, False])
def test_simple_backprop_from_view_post_upstream_mutation(
inplace_on_view: bool, x: Tensor
):
y_base = +x
y = y_base[...] if inplace_on_view else y_base
z = y[...]
y[:2] = 0 # base is mutated
# downstream view should carry appropriate info
# for backprop post-mutation
w = mg.ones_like(z)
(w * z).backward()
assert_array_equal(y, y_base)
assert_array_equal(z, y_base)
assert_array_equal(w.grad, [0.0, 0.0, *y_base.data[-2:]])
assert_array_equal(z.grad, np.ones_like(y_base))
assert_array_equal(y_base.grad, np.ones_like(y_base))
assert_array_equal(y.grad, np.ones_like(y_base))
assert_array_equal(x.grad, [0.0, 0.0, 1.0, 1.0])
@given(tensors(shape=(4,), elements=st.floats(-10, 10), constant=False))
@pytest.mark.parametrize("inplace_on_view", [True, False])
def test_mutation_doesnt_corrupt_upstream_op(inplace_on_view: bool, x: Tensor):
y_base = +x
y = y_base[...] if inplace_on_view else y_base
view = y[...]
# z = x**4
z = mg.multiply_sequence(x, y, view, view[...])
y[:2] = 0 # shouldn't change backprop through z
z.backward() # dz/dx = 6 * x ** 2
assert_allclose(z, x.data ** 4)
assert_array_equal(view, y)
assert_allclose(z.grad, np.ones_like(y))
assert_allclose(x.grad, 4 * | np.asarray(x) | numpy.asarray |
from __future__ import division
import numpy as np
import scipy.sparse as sp
from sklearn.base import clone
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.stats import _weighted_percentile
from sklearn.dummy import DummyClassifier, DummyRegressor
@ignore_warnings
def _check_predict_proba(clf, X, y):
proba = clf.predict_proba(X)
# We know that we can have division by zero
log_proba = clf.predict_log_proba(X)
y = np.atleast_1d(y)
if y.ndim == 1:
y = np.reshape(y, (-1, 1))
n_outputs = y.shape[1]
n_samples = len(X)
if n_outputs == 1:
proba = [proba]
log_proba = [log_proba]
for k in range(n_outputs):
assert_equal(proba[k].shape[0], n_samples)
assert_equal(proba[k].shape[1], len(np.unique(y[:, k])))
assert_array_almost_equal(proba[k].sum(axis=1), np.ones(len(X)))
# We know that we can have division by zero
assert_array_almost_equal(np.log(proba[k]), log_proba[k])
def _check_behavior_2d(clf):
# 1d case
X = np.array([[0], [0], [0], [0]]) # ignored
y = np.array([1, 2, 1, 1])
est = clone(clf)
est.fit(X, y)
y_pred = est.predict(X)
assert_equal(y.shape, y_pred.shape)
# 2d case
y = np.array([[1, 0],
[2, 0],
[1, 0],
[1, 3]])
est = clone(clf)
est.fit(X, y)
y_pred = est.predict(X)
assert_equal(y.shape, y_pred.shape)
def _check_behavior_2d_for_constant(clf):
# 2d case only
X = np.array([[0], [0], [0], [0]]) # ignored
y = np.array([[1, 0, 5, 4, 3],
[2, 0, 1, 2, 5],
[1, 0, 4, 5, 2],
[1, 3, 3, 2, 0]])
est = clone(clf)
est.fit(X, y)
y_pred = est.predict(X)
assert_equal(y.shape, y_pred.shape)
def _check_equality_regressor(statistic, y_learn, y_pred_learn,
y_test, y_pred_test):
assert_array_almost_equal(np.tile(statistic, (y_learn.shape[0], 1)),
y_pred_learn)
assert_array_almost_equal(np.tile(statistic, (y_test.shape[0], 1)),
y_pred_test)
def test_most_frequent_and_prior_strategy():
X = [[0], [0], [0], [0]] # ignored
y = [1, 2, 1, 1]
for strategy in ("most_frequent", "prior"):
clf = DummyClassifier(strategy=strategy, random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.ones(len(X)))
_check_predict_proba(clf, X, y)
if strategy == "prior":
assert_array_almost_equal(clf.predict_proba([X[0]]),
clf.class_prior_.reshape((1, -1)))
else:
assert_array_almost_equal(clf.predict_proba([X[0]]),
clf.class_prior_.reshape((1, -1)) > 0.5)
def test_most_frequent_and_prior_strategy_multioutput():
X = [[0], [0], [0], [0]] # ignored
y = np.array([[1, 0],
[2, 0],
[1, 0],
[1, 3]])
n_samples = len(X)
for strategy in ("prior", "most_frequent"):
clf = DummyClassifier(strategy=strategy, random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X),
np.hstack([np.ones((n_samples, 1)),
np.zeros((n_samples, 1))]))
_check_predict_proba(clf, X, y)
_check_behavior_2d(clf)
def test_stratified_strategy():
X = [[0]] * 5 # ignored
y = [1, 2, 1, 1, 2]
clf = DummyClassifier(strategy="stratified", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
p = np.bincount(y_pred) / float(len(X))
assert_almost_equal(p[1], 3. / 5, decimal=1)
assert_almost_equal(p[2], 2. / 5, decimal=1)
_check_predict_proba(clf, X, y)
def test_stratified_strategy_multioutput():
X = [[0]] * 5 # ignored
y = np.array([[2, 1],
[2, 2],
[1, 1],
[1, 2],
[1, 1]])
clf = DummyClassifier(strategy="stratified", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 3. / 5, decimal=1)
assert_almost_equal(p[2], 2. / 5, decimal=1)
_check_predict_proba(clf, X, y)
_check_behavior_2d(clf)
def test_uniform_strategy():
X = [[0]] * 4 # ignored
y = [1, 2, 1, 1]
clf = DummyClassifier(strategy="uniform", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
p = np.bincount(y_pred) / float(len(X))
assert_almost_equal(p[1], 0.5, decimal=1)
assert_almost_equal(p[2], 0.5, decimal=1)
_check_predict_proba(clf, X, y)
def test_uniform_strategy_multioutput():
X = [[0]] * 4 # ignored
y = np.array([[2, 1],
[2, 2],
[1, 2],
[1, 1]])
clf = DummyClassifier(strategy="uniform", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 0.5, decimal=1)
assert_almost_equal(p[2], 0.5, decimal=1)
_check_predict_proba(clf, X, y)
_check_behavior_2d(clf)
def test_string_labels():
X = [[0]] * 5
y = ["paris", "paris", "tokyo", "amsterdam", "berlin"]
clf = DummyClassifier(strategy="most_frequent")
clf.fit(X, y)
assert_array_equal(clf.predict(X), ["paris"] * 5)
def test_classifier_exceptions():
clf = DummyClassifier(strategy="unknown")
assert_raises(ValueError, clf.fit, [], [])
assert_raises(ValueError, clf.predict, [])
assert_raises(ValueError, clf.predict_proba, [])
def test_mean_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 4 # ignored
y = random_state.randn(4)
reg = DummyRegressor()
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.mean(y)] * len(X))
def test_mean_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
mean = np.mean(y_learn, axis=0).reshape((1, -1))
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor()
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(mean, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
def test_regressor_exceptions():
reg = DummyRegressor()
assert_raises(ValueError, reg.predict, [])
def test_median_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 5 # ignored
y = random_state.randn(5)
reg = DummyRegressor(strategy="median")
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.median(y)] * len(X))
def test_median_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
median = np.median(y_learn, axis=0).reshape((1, -1))
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor(strategy="median")
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
median, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
def test_quantile_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 5 # ignored
y = random_state.randn(5)
reg = DummyRegressor(strategy="quantile", quantile=0.5)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.median(y)] * len(X))
reg = DummyRegressor(strategy="quantile", quantile=0)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.min(y)] * len(X))
reg = DummyRegressor(strategy="quantile", quantile=1)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.max(y)] * len(X))
reg = DummyRegressor(strategy="quantile", quantile=0.3)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.percentile(y, q=30)] * len(X))
def test_quantile_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
median = np.median(y_learn, axis=0).reshape((1, -1))
quantile_values = | np.percentile(y_learn, axis=0, q=80) | numpy.percentile |
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
from __future__ import absolute_import
import numpy as np
from nipy.algorithms.graph.field import field_from_coo_matrix_and_data
from ..hierarchical_parcellation import hparcel
from ...utils.simul_multisubject_fmri_dataset import surrogate_2d_dataset
from ..parcellation import MultiSubjectParcellation
from ..discrete_domain import grid_domain_from_binary_array
def test_parcel_interface():
""" Simply test parcellation interface
"""
# prepare some data
shape = (5, 5, 5)
nb_parcel = 10
data = np.random.randn(np.prod(shape))
domain = grid_domain_from_binary_array(np.ones(shape))
g = field_from_coo_matrix_and_data(domain.topology, data)
u, J0 = g.ward(nb_parcel)
tmp = np.array([np.sum(u == k) for k in range(nb_parcel)])
#instantiate a parcellation
msp = MultiSubjectParcellation(domain, u, u)
assert msp.nb_parcel == nb_parcel
assert msp.nb_subj == 1
assert (msp.population().ravel() == tmp).all()
def test_parcel_interface_multi_subj():
""" test parcellation interface, with multiple subjects
"""
# prepare some data
shape = (5, 5, 5)
nb_parcel = 10
nb_subj = 5
v = []
for s in range(nb_subj):
data = np.random.randn(np.prod(shape))
domain = grid_domain_from_binary_array(np.ones(shape))
g = field_from_coo_matrix_and_data(domain.topology, data)
u, J0 = g.ward(nb_parcel)
v.append(u)
v = np.array(v).T
tmp = np.array([np.sum(v == k, 0) for k in range(nb_parcel)])
#instantiate a parcellation
msp = MultiSubjectParcellation(domain, u, v)
assert msp.nb_parcel == nb_parcel
assert msp.nb_subj == nb_subj
assert (msp.population() == tmp).all()
def test_parcel_feature():
""" Simply test parcellation feature interface
"""
# prepare some data
shape = (5, 5, 5)
nb_parcel = 10
data = np.random.randn(np.prod(shape), 1)
domain = grid_domain_from_binary_array(np.ones(shape))
g = field_from_coo_matrix_and_data(domain.topology, data)
u, J0 = g.ward(nb_parcel)
#instantiate a parcellation
msp = MultiSubjectParcellation(domain, u, u)
msp.make_feature('data', data)
assert msp.get_feature('data').shape == (nb_parcel, 1)
# test with a copy
msp2 = msp.copy()
assert (msp2.get_feature('data') == msp2.get_feature('data')).all()
# test a multi_dimensional feature
dim = 4
msp.make_feature('new', np.random.randn(np.prod(shape), 1, dim))
assert msp.get_feature('new').shape == (nb_parcel, 1, dim)
def test_parcel_feature_multi_subj():
""" Test parcellation feature interface with multiple subjects
"""
# prepare some data
shape = (5, 5, 5)
nb_parcel = 10
nb_subj = 5
v = []
for s in range(nb_subj):
data = np.random.randn(np.prod(shape))
domain = grid_domain_from_binary_array(np.ones(shape))
g = field_from_coo_matrix_and_data(domain.topology, data)
u, J0 = g.ward(nb_parcel)
v.append(u)
v = np.array(v).T
msp = MultiSubjectParcellation(domain, u, v)
# test a multi_dimensional feature
# dimension 1
msp.make_feature('data', np.random.randn(np.prod(shape), nb_subj))
assert msp.get_feature('data').shape == (nb_parcel, nb_subj)
#dimension>1
dim = 4
msp.make_feature('data', np.random.randn(np.prod(shape), nb_subj, dim))
assert msp.get_feature('data').shape == (nb_parcel, nb_subj, dim)
# msp.features['data'] has been overriden
assert list(msp.features.keys()) == ['data']
def test_parcel_hierarchical():
"""Test the algorithm for hierrachical parcellation
"""
# step 1: generate some synthetic data
n_subj = 10
shape = (30, 30)
dataset = surrogate_2d_dataset(n_subj=n_subj, shape=shape)
# step 2 : prepare all the information for the parcellation
nb_parcel = 10
domain = grid_domain_from_binary_array(dataset[0] ** 2, np.eye(3))
ldata = np.reshape(dataset, (n_subj, np.prod(shape), 1))
# step 3 : run the algorithm
Pa = hparcel(domain, ldata, nb_parcel)
# step 4: look at the results
Label = Pa.individual_labels
control = True
for s in range(n_subj):
control *= (np.unique(Label[:, s]) == np.arange(nb_parcel)).all()
assert(control)
def test_prfx():
"""Test the ability to construct parcel features and random effects models
"""
# step 1: generate some synthetic data
n_subj = 10
shape = (30, 30)
dataset = surrogate_2d_dataset(n_subj=n_subj, shape=shape)
# step 2 : prepare all the information for the parcellation
nb_parcel = 10
domain = grid_domain_from_binary_array(dataset[0] ** 2, np.eye(3))
ldata = np.reshape(dataset, (n_subj, np.prod(shape), 1))
# step 3 : run the algorithm
Pa = hparcel(domain, ldata, nb_parcel)
pdata = Pa.make_feature('functional',
np.rollaxis(np.array(ldata), 1, 0))
one_sample = np.squeeze(pdata.mean(0) / pdata.std(0))
assert | np.shape(one_sample) | numpy.shape |
"""Main module."""
__authors__ = '<NAME>, <NAME>'
__version__ = '1.0'
__date__ = '9/10/2017'
import json
import os.path
import pickle
import random
import urllib
from bs4 import BeautifulSoup
from nltk.corpus import stopwords
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import train_test_split
from sklearn import naive_bayes
from sklearn import svm
from sklearn.metrics import accuracy_score
from textblob import TextBlob
import matplotlib.pyplot as plt
import requests
import numpy as np
SETTINGS_PATH = 'settings.json'
RAW_PATH = 'data/raw.json'
STORIES_PATH = 'data/with_stories.json'
LABELS_PATH = 'data/with_labels.json'
SENTIMENTS_PATH = 'data/with_sentiments.json'
MNB_PATH = 'models/mnb.pkl'
SVM_PATH = 'models/svm.pkl'
COUNT_VECT_PATH = 'models/count_vect.pkl'
TFIDF_VECT_PATH = 'models/tfidf_vect.pkl'
BASE_URI = 'http://api.nytimes.com/svc/mostpopular/v2'
TYPE = 'mostviewed'
SECTION = 'all-sections'
TIME_PERIOD = '1'
RESPONSE_FORMAT = 'json'
def query(num_queries=1):
"""Request data from NYT and store it as a json file.
Args:
num_queries (int): The number of queries
"""
# Load API key
settings = json.load(open(SETTINGS_PATH))
API_KEY = settings['API_KEY']
# Send requests
URI = f'{BASE_URI}/{TYPE}/{SECTION}/{TIME_PERIOD}.{RESPONSE_FORMAT}'
articles = []
for k in range(num_queries):
print(f'Running query {k+1}...')
offset = k * 20
payload = {'api_key': API_KEY, 'offset': offset}
response = requests.get(URI, params=payload)
articles += response.json()['results']
# Save to file
with open(RAW_PATH, 'w') as output_file:
json.dump(articles, output_file)
def scrape_stories():
"""Get full document texts from urls."""
# Load articles
articles = json.load(open(RAW_PATH))
# Submit GET request and parse response content
for k, article in enumerate(articles):
print(f'Scraping article {k+1}...')
url = article['url']
f = urllib.request.urlopen(url)
soup = BeautifulSoup(f, 'html5lib')
story = ''
for par in soup.find_all('p', class_='story-body-text \
story-content'):
if par.string:
story += ' ' + par.string
article.update({'story': story})
# Save articles
with open(STORIES_PATH, 'w') as output_file:
json.dump(articles, output_file)
def label_articles(reset=False, relabel=False, start=0, rand_labels=False):
"""Run UI for sentiment labeling.
Loads all articles and presents those without a label.
Args:
reset (boolean): Delete all labels
relabel (boolean): Allow option to override existing labels
start (int): Article number to start from
rand_labels (boolean): Assign all random labels
"""
# Load articles
if reset or not os.path.isfile(LABELS_PATH):
articles = json.load(open(STORIES_PATH))
else:
articles = json.load(open(LABELS_PATH))
if start >= len(articles):
raise ValueError(f'Invalid starting point: {start}')
# Label articles
sentiments = [-1, 1]
print(f'Available sentiments: {sentiments}')
for k, article in enumerate(articles[start:]):
if not relabel and 'sentiment' in article:
continue
print(f'Article: {k+start+1}')
print(f"Title: {article['title']}")
print(f"Abstract: {article['abstract']}")
if rand_labels:
sent = random.choice(sentiments)
else:
try:
sent = int(input('Label: '))
except ValueError:
break
if sent not in sentiments:
break
article.update({'sentiment': sent})
print('----------------------------')
# Save articles
with open(LABELS_PATH, 'w') as output_file:
json.dump(articles, output_file)
def train_model(random_state=None):
"""Train a sentiment analyzer model.
Args:
random_state (int): Random seed for train_test_split used by numpy
"""
# Load articles
articles = json.load(open(LABELS_PATH))
# Extract data
articles = [article for article in articles if 'sentiment' in article]
stopset = set(stopwords.words('english'))
titles = [article['title'] for article in articles]
labels = [article['sentiment'] for article in articles]
# Vectorize data
count_vect = CountVectorizer(lowercase=True,
strip_accents='ascii',
stop_words=stopset,
decode_error='replace')
tfidf_vect = TfidfVectorizer(use_idf=True,
lowercase=True,
strip_accents='ascii',
stop_words=stopset,
decode_error='replace')
# Analyze and display relevant information
num_total = len(articles)
num_pos = sum(article['sentiment'] == 1 for article in articles)
num_neg = sum(article['sentiment'] == -1 for article in articles)
print(f'Found {num_total} labeled articles')
print(f'{num_pos} +, {num_neg} -')
# Train using count vectorizer
print('Vectorizing using bag of words...')
x = count_vect.fit_transform(titles)
y = labels
if random_state is not None:
x_train, x_test, y_train, y_test = train_test_split(
x, y, random_state=random_state)
else:
x_train, x_test, y_train, y_test = train_test_split(x, y)
mnb_clf = naive_bayes.MultinomialNB()
mnb_clf.fit(x_train, y_train)
y_pred = mnb_clf.predict(x_test)
mnb_acc = accuracy_score(y_test, y_pred) * 100
print('Naive Bayes: %.2f%% accuracy' % mnb_acc)
svm_clf = svm.SVC(probability=True)
svm_clf.fit(x_train, y_train)
y_pred = svm_clf.predict(x_test)
svm_acc = accuracy_score(y_test, y_pred) * 100
print('SVM: %.2f%% accuracy' % svm_acc)
# Train using tfidf vectorizer
print('Vectorizing using tfidf...')
x = tfidf_vect.fit_transform(titles)
y = labels
if random_state is not None:
x_train, x_test, y_train, y_test = train_test_split(
x, y, random_state=random_state)
else:
x_train, x_test, y_train, y_test = train_test_split(x, y)
mnb_clf = naive_bayes.MultinomialNB()
mnb_clf.fit(x_train, y_train)
y_pred = mnb_clf.predict(x_test)
mnb_acc = accuracy_score(y_test, y_pred) * 100
print('Naive Bayes: %.2f%% accuracy' % mnb_acc)
svm_clf = svm.SVC(probability=True)
svm_clf.fit(x_train, y_train)
y_pred = svm_clf.predict(x_test)
svm_acc = accuracy_score(y_test, y_pred) * 100
print('SVM: %.2f%% accuracy' % svm_acc)
# Store vectorizers and trained classifiers
with open(SVM_PATH, 'wb') as output_file:
pickle.dump(mnb_clf, output_file)
with open(MNB_PATH, 'wb') as output_file:
pickle.dump(svm_clf, output_file)
with open(COUNT_VECT_PATH, 'wb') as output_file:
pickle.dump(count_vect.vocabulary_, output_file)
with open(TFIDF_VECT_PATH, 'wb') as output_file:
pickle.dump(tfidf_vect.vocabulary_, output_file)
def analyze():
"""Analyze article data."""
# Calculate sentiment scores
articles = json.load(open(LABELS_PATH))
mnb_clf = pickle.load(open(MNB_PATH, 'rb'))
svm_clf = pickle.load(open(SVM_PATH, 'rb'))
count_vocabulary = pickle.load(open(COUNT_VECT_PATH, 'rb'))
tfidf_vocabulary = pickle.load(open(TFIDF_VECT_PATH, 'rb'))
stopset = set(stopwords.words('english'))
count_vect = CountVectorizer(lowercase=True,
strip_accents='ascii',
stop_words=stopset,
decode_error='replace',
vocabulary=count_vocabulary)
tfidf_vect = TfidfVectorizer(use_idf=True,
lowercase=True,
strip_accents='ascii',
stop_words=stopset,
decode_error='replace',
vocabulary=tfidf_vocabulary)
for k, article in enumerate(articles):
title = article['title']
abstract = article['abstract']
story = article['story']
print(f'{k+1}: {title}')
title_sent = TextBlob(title).sentiment
abstract_sent = TextBlob(abstract).sentiment
story_sent = TextBlob(story).sentiment
article.update({'title_sent': title_sent,
'abstract_sent': abstract_sent,
'story_sent': story_sent})
print(f'{title_sent} {abstract_sent} {story_sent}')
count = count_vect.fit_transform([title])
tfidf = tfidf_vect.fit_transform([title])
article.update({'count_mnb_sent': mnb_clf.predict(count).item(0),
'count_svm_sent': svm_clf.predict(count).item(0),
'tfidf_mnb_sent': mnb_clf.predict(tfidf).item(0),
'tfidf_svm_sent': svm_clf.predict(tfidf).item(0)})
# Test TextBlob performance
num_total = 0
num_correct = 0
for article in articles:
if 'sentiment' not in article:
continue
title_sent = article['title_sent'].polarity
true_sent = article['sentiment']
if title_sent == 0:
continue
if _sign(title_sent) == true_sent:
num_correct += 1
num_total += 1
acc = num_correct / num_total * 100
print('=========================')
print('TextBlob accuracy: %.2f' % acc)
print('=========================')
# Determine min, max, mean, and std
title_sents = np.array([a['title_sent'] for a in articles])
abstract_sents = np.array([a['abstract_sent'] for a in articles])
story_sents = np.array([a['story_sent'] for a in articles])
print('Title Sentiments')
print('----------------')
print(f'min: {np.min(title_sents)}')
print(f'max: {np.max(title_sents)}')
print(f'mean: { | np.mean(title_sents) | numpy.mean |
from manipulate import Manipulator
import tensorflow as tf
import numpy as np
import torch
import clip
from MapTS import GetBoundary,GetDt
class StyleCLIP():
def __init__(self,dataset_name='ffhq'):
print('load clip')
device = "cuda" if torch.cuda.is_available() else "cpu"
self.model, preprocess = clip.load("ViT-B/32", device=device)
self.LoadData(dataset_name)
def LoadData(self, dataset_name):
tf.keras.backend.clear_session()
M=Manipulator(dataset_name=dataset_name)
np.set_printoptions(suppress=True)
fs3= | np.load('./npy/'+dataset_name+'/fs3.npy') | numpy.load |
# -*- coding: utf-8 -*- {{{
#
# Your license here
# }}}
import os
import sys
from dateutil import parser
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from os.path import dirname, abspath, join
sys.path.insert(0, dirname(dirname(dirname(abspath(__file__)))))
from fleet_request import FleetRequest
from utils import ensure_ddir
from services.reg_service.helpers.historical_signal_helper import HistoricalSignalHelper
from services.reg_service.helpers.clearing_price_helper import ClearingPriceHelper
from pdb import set_trace as bp
# Class for traditional regulation and dynamic regulation services.
class RegService():
"""
This class implements FleetInterface so that it can communicate with a fleet
"""
_fleet = None
def __init__(self, *args, **kwargs):
self._historial_signal_helper = HistoricalSignalHelper()
self._clearing_price_helper = ClearingPriceHelper()
# The "request_loop" function is the workhorse that manages hourly loops and sending requests & retrieving responses.
# It returns a 2-level dictionary; 1st level key is the starting time of each hour.
# TODO: [minor] currently, the start and end times are hardcoded. Ideally, they would be based on promoted user inputs.
def request_loop(self, service_type="Traditional",
start_time=parser.parse("2017-08-01 16:00:00"),
end_time=parser.parse("2017-08-01 21:00:00"),
sim_step=timedelta(seconds=2),
clearing_price_filename='historical-ancillary-service-data-2017.xls',
fleet_name="PVInverterFleet"):
# Check service type compatibility.
if service_type not in ['Traditional', 'Dynamic']:
raise ValueError("service_type has to be either 'Traditional' or 'Dynamic'!")
# Generate lists of 2s request and response class objects based on regulation service type (i.e. traditional vs. dynamic).
print(' Generating traditional signal lists')
request_list_2s_trad, response_list_2s_trad = self.get_signal_lists('Traditional', start_time, end_time, sim_step)
if service_type == 'Dynamic':
print(' Generating dynamic signal lists')
request_list_2s_dynm, response_list_2s_dynm = self.get_signal_lists(service_type, start_time, end_time, sim_step)
# Assign generic names to signal lists.
request_list_2s_tot = request_list_2s_dynm
response_list_2s_tot = response_list_2s_dynm
else:
request_list_2s_tot = request_list_2s_trad
response_list_2s_tot = response_list_2s_trad
# Returns a Dictionary containing a month-worth of hourly regulation price data indexed by datetime.
print(' Getting price data')
clearing_price_filename = join(dirname(abspath(__file__)), clearing_price_filename)
self._clearing_price_helper.read_and_store_clearing_prices(clearing_price_filename, start_time)
# Create a dictionary to store hourly results incl. performance score, clearing price credit, etc.
hourly_results = {}
# Set time duration.
cur_time = start_time
one_hour = timedelta(hours=1)
print(' Starting hourly loop')
# Loop through each hour between "start_time" and "end_time".
while cur_time < end_time - timedelta(minutes=65):
# Generate 1-hour worth (65 min) of request and response arrays for calculating scores.
cur_end_time = cur_time + timedelta(minutes=65)
# Traditional regulation request and response signals are needed regardless of service type.
request_list_2s_65min_trad = [r.P_req for r in request_list_2s_trad if cur_time <= r.ts_req <= cur_end_time]
response_list_2s_65min_trad = [r.P_service for r in response_list_2s_trad if
cur_time <= r.ts <= cur_end_time]
request_array_2s_65min_trad = np.asarray(request_list_2s_65min_trad)
response_array_2s_65min_trad = np.asarray(response_list_2s_65min_trad)
# For dynamic regulation, mileage ratio calculation is as below.
if service_type == 'Dynamic':
# Chop total signals to 1 hour.
request_list_2s_65min_dynm = [r.P_req for r in request_list_2s_dynm if
cur_time <= r.ts_req <= cur_end_time]
response_list_2s_65min_dynm = [r.P_service for r in response_list_2s_dynm if
cur_time <= r.ts <= cur_end_time]
request_array_2s_65min_dynm = np.asarray(request_list_2s_65min_dynm)
response_array_2s_65min_dynm = np.asarray(response_list_2s_65min_dynm)
# The "mileage ratio" equals "1" for traditional regulation and is > 1 for dynamic regulation.
try:
Hourly_mileage_trad = self.Hourly_reg_mileage(request_array_2s_65min_trad)
Hourly_mileage_dynm = self.Hourly_reg_mileage(request_array_2s_65min_dynm)
mileage_ratio = Hourly_mileage_dynm / Hourly_mileage_trad
except:
# This occurs for March 12 at 23:00 hours.
# The self.Hourly_reg_mileage() methods requires an array of a specific
# length to work properly. Therefore, henever the underlying data have missing
# values, this function breaks.
mileage_ratio = np.nan
# Assign generic names to signal lists.
request_list_2s_65min = request_list_2s_65min_dynm
response_list_2s_65min = response_list_2s_65min_dynm
else:
request_list_2s_65min = request_list_2s_65min_trad
response_list_2s_65min = response_list_2s_65min_trad
mileage_ratio = 1
# Convert lists into arrays. convert units from kW to MW.
request_array_2s = np.asarray(request_list_2s_65min) / 1000
response_array_2s = np.asarray(response_list_2s_65min) / 1000
# Slice arrays at 10s intervals - resulted arrays have 390 data points.
request_array_10s = request_array_2s[::5]
response_array_10s = response_array_2s[::5]
# Use if statement to ensure full array is present
# (Pandas skips over the NaN rows, so the array ends up being shorter than it should be)
if len(request_array_10s) == 391:
# Calculate performance scores for current hour and store in a dictionary keyed by starting time.
hourly_results[cur_time] = {}
hourly_results[cur_time]['performance_score'] = self.perf_score(request_array_10s, response_array_10s)
hourly_results[cur_time]['hourly_integrated_MW'] = self.Hr_int_reg_MW(request_array_2s)
hourly_results[cur_time]['mileage_ratio'] = mileage_ratio
hourly_results[cur_time]['Regulation_Market_Clearing_Price(RMCP)'] = \
self._clearing_price_helper.clearing_prices[cur_time]
hourly_results[cur_time]['Reg_Clearing_Price_Credit'] = self.Reg_clr_pr_credit(service_type,
hourly_results[cur_time][
'Regulation_Market_Clearing_Price(RMCP)'],
hourly_results[cur_time][
'performance_score'][
0],
hourly_results[cur_time][
'hourly_integrated_MW'],
mileage_ratio)
else: # There are no NaNs in request_array_10s
pass
# Move to the next hour.
cur_time += one_hour
# Store request and response parameters in lists for plotting and printing to text files.
P_request = [r.P_req for r in request_list_2s_tot]
ts_request = [r.ts_req for r in request_list_2s_tot]
P_response = [r.P_service for r in response_list_2s_tot]
P_togrid = [r.P_togrid for r in response_list_2s_tot]
# Save the responses to a csv
results_df = pd.DataFrame({
'DateTime': ts_request,
'P_request': P_request,
'P_response': P_response,
'P_togrid': P_togrid
})
# Calculate P_base
results_df['P_base'] = results_df['P_togrid'] - results_df['P_response']
# Add SoC if battery fleet
if 'battery' in fleet_name.lower():
SOC = [r.soc for r in response_list_2s_tot]
results_df['SOC'] = SOC
results_df_dir = join(dirname(abspath(__file__)), 'results', '')
ensure_ddir(results_df_dir)
results_df_filename = datetime.now().strftime('%Y%m%d') + '_' + ts_request[0].strftime(
'%B') + '_2sec_results_' + service_type + '_' + fleet_name + '.csv'
results_df.to_csv(results_df_dir + results_df_filename)
# Generate and save plot of the normalized request and response signals for the month
print(' Plotting monthly response signal')
plot_dir = join(dirname(abspath(__file__)), 'results', 'plots', '')
ensure_ddir(plot_dir)
plot_filename = datetime.now().strftime('%Y%m%d') + '_' + \
ts_request[0].strftime('%B') + \
'_2secnormsignals_' + \
service_type + \
'_' + \
fleet_name + '.png'
plt.figure(1)
plt.figure(figsize=(15, 8))
plt.subplot(211)
if (not (all(pd.isnull(results_df['P_request'])))):
plt.plot(ts_request, P_request, label='P_request')
if (not (all(pd.isnull(results_df['P_response'])))):
plt.plot(ts_request, P_response, label='P_response')
if (not (all(pd.isnull(results_df['P_togrid'])))):
plt.plot(ts_request, P_togrid, label='P_togrid')
if (not (all(pd.isnull(results_df['P_base'])))):
plt.plot(ts_request, results_df.P_base, label='P_base')
plt.legend(loc='best')
plt.ylabel('Power (MW)')
if 'battery' in fleet_name.lower():
if not (all(pd.isnull(results_df['SOC']))):
plt.subplot(212)
plt.plot(ts_request, SOC)
plt.ylabel('SoC (%)')
plt.xlabel('Date and Time')
plt.savefig(plot_dir + plot_filename, bbox_inches='tight')
plt.close()
return hourly_results
# Returns lists of requests and responses at 2s intervals.
def get_signal_lists(self, service_type, start_time, end_time, sim_step):
# Note: If you would like to infer input filename from start_time, use the following
# method. However, since the input files are not in the same directory as this code,
# file path still needs to be specified.
# Thus, revisit this after the codebase becomes production ready, at which time,
# you may have the default directory for input files whose path can be passed in
# in a different way.
# Get the name of the Excel file (e.g. "08 2017.xlsx") that contains historical regulation signal data.
historial_signal_filename = self._historial_signal_helper.get_input_filename(start_time, service_type)
historial_signal_filename = join(dirname(abspath(__file__)), historial_signal_filename)
# Returns a DataFrame that contains data in the entire specified sheet (i.e. tab).
self._historial_signal_helper.read_and_store_historical_signals(historial_signal_filename)
# Returns a Dictionary with datetime type keys.
signals = self._historial_signal_helper.signals_in_range(start_time, end_time)
#sim_step = timedelta(seconds=2)
reqrespitems = [self.request(x, sim_step, i * self._fleet.assigned_service_kW()) for x, i in signals.items()]
requests = [x[0] for x in reqrespitems]
responses = [x[1] for x in reqrespitems]
return requests, responses
# Method for retrieving device fleet's response to each individual request.
def request(self, ts, sim_step, p, q=0.0): # added input variables; what's the purpose of sim_step??
fleet_request = FleetRequest(ts=ts, sim_step=sim_step, p=p, q=0.0)
print("Processing request at timestep %s" % ts)
fleet_response = self.fleet.process_request(fleet_request)
# print(fleet_response.P_service)
return fleet_request, fleet_response
# Score the performance of device fleets for the hour (based on PJM Manual 12).
# Take 65 min worth of 10s data (should contain 390 data values).
def perf_score(self, request_array, response_array):
max_corr_array = []
max_index_array = []
prec_score_array = []
# In each 5-min of the hour, use max correlation to define "delay", "correlation" & "delay" scores.
# There are twelve (12) 5-min in each hour.
for i in range(12):
# Takes 5-min of the input signal data.
x = request_array[30 * i:30 * (i + 1)]
# print('x:', x)
y = response_array[30 * i:30 * (i + 1)]
# plot for 5min+5min(response signal lag window) for visually verifying correlation and delay.
x_plot = request_array[30 * i:30 * (i + 2)]
y_plot = response_array[30 * i:30 * (i + 2)]
# Refresh the array in each 5-min run.
corr_score = []
# If the regulation signal is nearly constant, then correlation score is calculated as:
# Calculates "1 - absoluate of difference btw slope of request and response signals" (determined by linear regression).
std_dev_x = np.std(x)
if std_dev_x < 0.01: # need to vet the threshold later, not specified in PJM manual.
axis = np.array(np.arange(30.))
coeff_x = np.polyfit(axis, x, 1) # linear regression when degree = 1.
coeff_y = np.polyfit(axis, y, 1)
slope_x = coeff_x[0]
slope_y = coeff_y[0]
corr_score_val = max(0, 1 - abs(slope_x - slope_y)) # from PJM manual 12.
max_index_array = np.append(max_index_array, 0)
max_corr_array = np.append(max_corr_array,
corr_score_val) # "r" cannot be calc'd for constant values in one or both arrays.
else:
# Calculate correlation btw the 5-min input signal and thirty different 5-min response signals,
# each is delayed at an additional 10s than the previous one; store results.
# There are thirty (30) 10s in each 5min.
for j in range(31):
y_ = response_array[30 * i + j:30 * (i + 1) + j]
# # for debug use
# print('y:', y)
# x_axis_x = np.arange(x.size)
# plt.plot(x_axis_x, x, "b")
# plt.plot(x_axis_x, y, "r")
# plt.show()
std_dev_y_ = | np.std(y_) | numpy.std |
#####################################################################################################
# Purpose: calculate artificial structure, i.e. fluctuations in galaxy counts, resulting from
# imperfect observing strategy (OS). Includes the functionality to account for dust extinction,
# photometric calibration errors (simple ansatz implemented here), individual redshift bins (see
# GalaxyCountsMetric_extended for details), as well as poisson noise in the galaxy counts.
#
# Basic workflow, assuming all the functionalities are used:
# 1. HEALpix slicers are set up for survey strategies.
# 2. Using GalaxyCountMetric_extended, which handles dust extinction and calculates galaxy counts
# based on redshift-bin-specific powerlaws, galaxy counts are found for each HEALpix pixel.
# 3. The shallow borders are masked (based on user-specified 'pixel radius').
# 4. Photometric calibration errors are calculated.
# 5. The galaxy counts in each pixel are recalculated using GalaxyCounts_withPixelCalibration
# since the calibration errors modify the upper limit on the integral used to calculate
# galaxy counts. GalaxyCounts_withPixelCalibration takes in each pixel's modified integration
# limit individually.
# 6. Poisson noise is added to the galaxy counts.
# 7. Fluctuations in the galaxy counts are calculated.
#
# For each pixel i, the photometric calibration errors are modeled as del_i= k*z_i/sqrt(nObs_i),
# where z_i is the average seeing the pixel minus avgSeeing across map, nObs is the number of observations,
# and k is a constant such that var(del_i)= (0.01)^2 -- 0.01 in accordance with LSST goal for relative
# photometric calibration.
#
# Most of the functionalities can be turned on/off, and plots and data can be saved at various points.
# Bordering masking adds significant run time as does the incorporation of photometric calibration
# errors. See the method descrpition for further details.
#
# <NAME>: <EMAIL>
#####################################################################################################
import matplotlib.pyplot as plt
import numpy as np
import os
import healpy as hp
try:
from sympy.solvers import solve
from sympy import Symbol
except ImportError:
pass
import copy
import time
from matplotlib.ticker import FuncFormatter
import datetime
import rubin_sim.maf
import rubin_sim.maf.db as db
import rubin_sim.maf.metrics as metrics
import rubin_sim.maf.slicers as slicers
import rubin_sim.maf.stackers as mafStackers # stackers in sims_maf
import rubin_sim.maf.plots as plots
import rubin_sim.maf.metricBundles as metricBundles
import rubin_sim.maf.maps as maps
from rubin_sim.maf.mafContrib.LSSObsStrategy.galaxyCountsMetric_extended import GalaxyCountsMetric_extended \
as GalaxyCountsMetric
from rubin_sim.maf.mafContrib.LSSObsStrategy.galaxyCounts_withPixelCalibration import GalaxyCounts_withPixelCalibration \
as GalaxyCounts_0ptErrors
from rubin_sim.maf.mafContrib.LSSObsStrategy.maskingAlgorithmGeneralized import maskingAlgorithmGeneralized
from rubin_sim.maf.mafContrib.LSSObsStrategy.numObsMetric import NumObsMetric
from rubin_sim.maf.mafContrib.LSSObsStrategy.saveBundleData_npzFormat import saveBundleData_npzFormat
from rubin_sim.maf.mafContrib.LSSObsStrategy.constantsForPipeline import powerLawConst_a, plotColor
__all__ = ['artificialStructureCalculation']
def artificialStructureCalculation(path, upperMagLimit, dbfile, runName,
noDithOnly=False,
bestDithOnly=False,
specifiedDith=None,
nside=128, filterBand='i',
cutOffYear=None, redshiftBin='all',
CFHTLSCounts=False, normalizedMockCatalogCounts=True,
includeDustExtinction=True, saveRawNumGalData=True,
pixelRadiusForMasking=5, saveNumGalDataAfterMasking=False,
include0ptErrors=True,
print0ptInformation=True,
plot0ptPlots=True, show0ptPlots=False, save0ptPlots=True,
saveNumGalDataAfter0pt=False,
addPoissonNoise=True, saveDeltaNByNData=True,
saveClsForDeltaNByN=True,
show_comp_plots=False, return_stuff=False):
"""
Calculate artificial structure, i.e. fluctuations in galaxy counts dN/N, resulting due
to imperfect observing strategy (OS).
- Creates an output directory for subdirectories containing the specified things to save.
- Prints out execution time at key steps (after border-masking, incorporating calibration errors, etc.)
- Returns the metricBundle object containing the calculated dN/N, the output directory name,
the resultsDb object, and (if include0ptErrors=True) calibration errors for each survey strategy.
Parameters
-------------
path: str
path to the main directory where output directory is to be saved.
upperMagLimit: float
upper limit on magnitude when calculating the galaxy counts.
dbfile: str
path to the OpSim output file, e.g. to a copy of enigma_1189
runName: str
run name tag to identify the output of specified OpSim output.
Since new OpSim outputs have different columns, the runName for enigma_1189 **must**
be 'enigma1189'; can be anything for other outputs, e.g. 'minion1016'
noDithOnly: `bool`
set to True if only want to consider the undithered survey. Default: False
bestDithOnly: `bool`
set to True if only want to consider RandomDitherFieldPerVisit.
Default: False
specifiedDith: str
specific dither strategy to run; could be a string or a list of strings.
Default: None
nside: int
HEALpix resolution parameter. Default: 128
filterBand: str
any one of 'u', 'g', 'r', 'i', 'z', 'y'. Default: 'i'
cutOffYear: int
year cut to restrict analysis to only a subset of the survey.
Must range from 1 to 9, or None for the full survey analysis (10 yrs).
Default: None
redshiftBin: str
options include '0.<z<0.15', '0.15<z<0.37', '0.37<z<0.66, '0.66<z<1.0',
'1.0<z<1.5', '1.5<z<2.0', '2.0<z<2.5', '2.5<z<3.0','3.0<z<3.5', '3.5<z<4.0',
'all' for no redshift restriction (i.e. 0.<z<4.0)
Default: 'all'
CFHTLSCounts: `bool`
set to True if want to calculate the total galaxy counts from CFHTLS
powerlaw from LSST Science Book. Must be run with redshiftBin='all'
Default: False
normalizedMockCatalogCounts: `bool`
set to False if want the raw/un-normalized galaxy
counts from mock catalogs. Default: True
includeDustExtinction: `bool`:
set to include dust extinction when calculating the coadded
depth. Default: True
saveRawNumGalData: `bool`
set to True to save numGal data right away, i.e. before
0pt error calibration, bordering masking, or poisson noise.
Default: True
pixelRadiusForMasking: int
number of pixels to mask along the shallow border. Default: 5
saveNumGalDataAfterMasking: `bool`
set to True to save numGal data after border masking.
Default: False
include0ptErrors: `bool`
set to True to include photometric calibration errors.
Default: True
print0ptInformation: `bool`
set to True to print out some statistics (variance, the k-value, etc.)
of the calibration errors of every dither strategy.
Default: True
plot0ptPlots : `bool`
generate 0pt plots. Default True.
saveNumGalDataAfter0pt: `bool`
set to True to save numGal data after border masking and 0pt calibration. Default: False
addPoissonNoise: `bool`
set to True to add poisson noise to the galaxy counts after border masking
and the incorporation of calibration errors. Default: True
saveNumGalDataAfterPoisson:: `bool`
set to True to save numGal data right away, after border masking,
including the calibration errors, and the poisson noise.
Default: True
showDeltaNByNPlots: `bool`
set to True to show the plots related to the fluctuations in the galaxy
counts. Will work only when plotDeltaNByN=True. Default: False
saveDeltaNByNPlots: `bool`
set to True to save the plots related to the fluctuations in the galaxy
counts. Will work only when plotDeltaNByN=True. Default: True
saveDeltaNByNData: `bool`
set to True to save data for the the fluctuations in the galaxy counts.
Default: True
saveClsForDeltaNByN: `bool`
set to True to save the power spectrum data for the the fluctuations in
the galaxy counts. Default: True
show_comp_plots: `bool`
set to True if want to display the comparison plots (only valid if have more
han one dither strategy); otherwise, the plots will be saved and not displayed.
Default: False
return_stuff: `bool`
set to True to get the metricBundle object, the outDir, and resultsDb object.
Default: False
"""
startTime = time.time()
# ------------------------------------------------------------------------
# set up the metric
galCountMetric = GalaxyCountsMetric(upperMagLimit=upperMagLimit,
includeDustExtinction=includeDustExtinction,
redshiftBin=redshiftBin,
filterBand=filterBand,
nside=nside,
CFHTLSCounts=CFHTLSCounts,
normalizedMockCatalogCounts=normalizedMockCatalogCounts)
# OpSim database
opsdb = db.OpsimDatabase(dbfile)
# ------------------------------------------------------------------------
# set up the outDir name
zeropt_tag, dust_tag = '', ''
if include0ptErrors: zeropt_tag = 'with0ptErrors'
else: zeropt_tag = 'no0ptErrors'
if includeDustExtinction: dust_tag = 'withDustExtinction'
else: dust_tag = 'noDustExtinction'
if cutOffYear is not None: survey_tag = '%syearCut'%(cutOffYear)
else: survey_tag = 'fullSurveyPeriod'
# check to make sure redshift bin is ok.
allowedRedshiftBins = list(powerLawConst_a.keys()) + ['all']
if redshiftBin not in allowedRedshiftBins:
print('ERROR: Invalid redshift bin. Input bin can only be among %s.\n'%(allowedRedshiftBins))
return
zbin_tag = redshiftBin
if (redshiftBin=='all'): zbin_tag = 'allRedshiftData'
poisson_tag = ''
if addPoissonNoise: poisson_tag = 'withPoissonNoise'
else: poisson_tag = 'noPoissonNoise'
counts_tag = ''
if CFHTLSCounts:
counts_tag = 'CFHTLSpowerLaw'
elif normalizedMockCatalogCounts:
counts_tag = 'normalizedGalaxyCounts'
else:
counts_tag = 'unnormalizedGalaxyCounts'
outDir = f'artificialStructure_{poisson_tag}_nside{nside}'\
f'_pixelRadiusFormasking_{pixelRadiusForMasking}_{zeropt_tag}_{dust_tag}_{filterBand}'\
f'_{upperMagLimit}_{runName}_{survey_tag}_{zbin_tag}_{counts_tag}_directory'
print('# outDir: %s\n'%outDir)
if not os.path.exists('%s%s'%(path, outDir)):
os.makedirs('%s%s'%(path, outDir))
results_dbname = 'resultsDb_%s.db'%np.random.randint(100)
resultsDb = db.ResultsDb(database=results_dbname, outDir='%s%s'%(path, outDir))
# ------------------------------------------------------------------------
# set up the sql constraint
propIds, propTags = opsdb.fetchPropInfo()
wfdWhere = opsdb.createSQLWhere('WFD', propTags)
raDecInDeg = opsdb.raDecInDeg
if cutOffYear is not None:
nightCutOff = (cutOffYear)*365.25
sqlconstraint = '%s and night<=%s and filter=="%s"'%(wfdWhere, nightCutOff, filterBand)
else:
sqlconstraint = '%s and filter=="%s"'%(wfdWhere, filterBand)
print('# sqlconstraint: %s'%sqlconstraint)
# ------------------------------------------------------------------------
# create a ReadMe type file to put info in.
update = '%s\n'%(datetime.datetime.now())
update += '\nArtificial structure calculation with %s, %s, and %s '%(zeropt_tag, dust_tag, poisson_tag)
update += 'for %s for %s for %s<%s. '%(survey_tag, zbin_tag, filterBand, upperMagLimit)
update += '\nWith %s and PixelRadiusForMasking: %s.\n'%(counts_tag, pixelRadiusForMasking)
update += '\nsqlconstraint: %s'%sqlconstraint
update += '\nRunning with %s\n'%runName
update += '\noutDir: %s\n'%outDir
update += '\nMAF version: %s\n'%rubin_sim.maf.__version__
# figure out the readme name
readme_name = 'ReadMe'
readmes = [f for f in os.listdir('%s%s'%(path, outDir)) if any([f.endswith('.txt')])]
numFile = 0
for f in readmes:
if f.__contains__('%s_'%readme_name):
temp = f.split('.txt')[0]
numFile = max(numFile, int(temp.split('%s_'%readme_name)[1]))
else:
numFile = 1
readme_name = 'ReadMe_%s.txt'%(numFile+1)
readme = open('%s%s/%s'%(path, outDir, readme_name), 'w')
readme.write(update)
readme.close()
# ------------------------------------------------------------------------
# setup all the slicers. set up randomSeed for random/repRandom strategies through stackerList.
slicer = {}
stackerList = {}
if specifiedDith is not None:
# would like to add all the stackers first and then keep only the one that is specified
bestDithOnly, noDithOnly = False, False
if bestDithOnly:
stackerList['RandomDitherFieldPerVisit'] = [mafStackers.RandomDitherFieldPerVisitStacker(degrees=raDecInDeg,
randomSeed=1000)]
slicer['RandomDitherFieldPerVisit'] = slicers.HealpixSlicer(lonCol='randomDitherFieldPerVisitRa',
latCol='randomDitherFieldPerVisitDec',
latLonDeg=raDecInDeg,
nside=nside, useCache=False)
else:
slicer['NoDither'] = slicers.HealpixSlicer(lonCol='fieldRA', latCol='fieldDec', latLonDeg=raDecInDeg,
nside=nside, useCache=False)
if not noDithOnly:
# random dithers on different timescales
stackerList['RandomDitherPerNight'] = [mafStackers.RandomDitherPerNightStacker(degrees=raDecInDeg,
randomSeed=1000)]
stackerList['RandomDitherFieldPerNight'] = [mafStackers.RandomDitherFieldPerNightStacker(degrees=raDecInDeg, randomSeed=1000)]
stackerList['RandomDitherFieldPerVisit'] = [mafStackers.RandomDitherFieldPerVisitStacker(degrees=raDecInDeg, randomSeed=1000)]
# rep random dithers on different timescales
#stackerList['RepulsiveRandomDitherPerNight'] = [myStackers.RepulsiveRandomDitherPerNightStacker(degrees=raDecInDeg,
# randomSeed=1000)]
#stackerList['RepulsiveRandomDitherFieldPerNight'] = [myStackers.RepulsiveRandomDitherFieldPerNightStacker(degrees=raDecInDeg,
# randomSeed=1000)]
#stackerList['RepulsiveRandomDitherFieldPerVisit'] = [myStackers.RepulsiveRandomDitherFieldPerVisitStacker(degrees=raDecInDeg,
# randomSeed=1000)]
# set up slicers for different dithers
# random dithers on different timescales
slicer['RandomDitherPerNight'] = slicers.HealpixSlicer(lonCol='randomDitherPerNightRa',
latCol='randomDitherPerNightDec',
latLonDeg=raDecInDeg, nside=nside,
useCache=False)
slicer['RandomDitherFieldPerNight'] = slicers.HealpixSlicer(lonCol='randomDitherFieldPerNightRa',
latCol='randomDitherFieldPerNightDec',
latLonDeg=raDecInDeg, nside=nside,
useCache=False)
slicer['RandomDitherFieldPerVisit'] = slicers.HealpixSlicer(lonCol='randomDitherFieldPerVisitRa',
latCol='randomDitherFieldPerVisitDec',
latLonDeg=raDecInDeg, nside=nside,
useCache=False)
# rep random dithers on different timescales
#slicer['RepulsiveRandomDitherPerNight'] = slicers.HealpixSlicer(lonCol='repulsiveRandomDitherPerNightRa',
# latCol='repulsiveRandomDitherPerNightDec',
# latLonDeg=raDecInDeg, nside=nside, useCache=False)
#slicer['RepulsiveRandomDitherFieldPerNight'] = slicers.HealpixSlicer(lonCol='repulsiveRandomDitherFieldPerNightRa',
# latCol='repulsiveRandomDitherFieldPerNightDec',
# latLonDeg=raDecInDeg, nside=nside,
# useCache=False)
#slicer['RepulsiveRandomDitherFieldPerVisit'] = slicers.HealpixSlicer(lonCol='repulsiveRandomDitherFieldPerVisitRa',
# latCol='repulsiveRandomDitherFieldPerVisitDec',
# latLonDeg=raDecInDeg, nside=nside,
# useCache=False)
# spiral dithers on different timescales
slicer['FermatSpiralDitherPerNight'] = slicers.HealpixSlicer(lonCol='fermatSpiralDitherPerNightRa',
latCol='fermatSpiralDitherPerNightDec',
latLonDeg=raDecInDeg, nside=nside,
useCache=False)
slicer['FermatSpiralDitherFieldPerNight'] = slicers.HealpixSlicer(lonCol='fermatSpiralDitherFieldPerNightRa',
latCol='fermatSpiralDitherFieldPerNightDec',
latLonDeg=raDecInDeg, nside=nside,
useCache=False)
slicer['FermatSpiralDitherFieldPerVisit'] = slicers.HealpixSlicer(lonCol='fermatSpiralDitherFieldPerVisitRa',
latCol='fermatSpiralDitherFieldPerVisitDec',
latLonDeg=raDecInDeg, nside=nside,
useCache=False)
# hex dithers on different timescales
slicer['SequentialHexDitherPerNight'] = slicers.HealpixSlicer(lonCol='hexDitherPerNightRa',
latCol='hexDitherPerNightDec',
latLonDeg=raDecInDeg, nside=nside,
useCache=False)
slicer['SequentialHexDitherFieldPerNight'] = slicers.HealpixSlicer(lonCol='hexDitherFieldPerNightRa',
latCol='hexDitherFieldPerNightDec',
latLonDeg=raDecInDeg, nside=nside,
useCache=False)
slicer['SequentialHexDitherFieldPerVisit'] = slicers.HealpixSlicer(lonCol='hexDitherFieldPerVisitRa',
latCol='hexDitherFieldPerVisitDec',
latLonDeg=raDecInDeg, nside=nside,
useCache=False)
# per season dithers
slicer['PentagonDitherPerSeason'] = slicers.HealpixSlicer(lonCol='pentagonDitherPerSeasonRa',
latCol='pentagonDitherPerSeasonDec',
latLonDeg=raDecInDeg, nside=nside,
useCache=False)
slicer['PentagonDiamondDitherPerSeason'] = slicers.HealpixSlicer(lonCol='pentagonDiamondDitherPerSeasonRa',
latCol='pentagonDiamondDitherPerSeasonDec',
latLonDeg=raDecInDeg, nside=nside,
useCache=False)
slicer['SpiralDitherPerSeason'] = slicers.HealpixSlicer(lonCol='spiralDitherPerSeasonRa',
latCol='spiralDitherPerSeasonDec',
latLonDeg=raDecInDeg, nside=nside,
useCache=False)
# ------------------------------------------------------------------------
if specifiedDith is not None:
stackerList_, slicer_ = {}, {}
if isinstance(specifiedDith, str):
if specifiedDith in slicer.keys():
if specifiedDith.__contains__('Random'):
# only Random dithers have a stacker object for rand seed specification
stackerList_[specifiedDith] = stackerList[specifiedDith]
slicer_[specifiedDith] = slicer[specifiedDith]
elif isinstance(specifiedDith, list):
for specific in specifiedDith:
if specific in slicer.keys():
if specific.__contains__('Random'):
# only Random dithers have a stacker object for rand seed specification
stackerList_[specific] = stackerList[specific]
slicer_[specific] = slicer[specific]
else:
err = 'Invalid value for specifiedDith: %s.'%specifiedDith
err += 'Allowed values include one of the following:\n%s'%(slicer.keys())
raise ValueError(err)
stackerList, slicer = stackerList_, slicer_
print('\nRunning the analysis for %s'%slicer.keys())
# ------------------------------------------------------------------------
readme = open('%s%s/%s'%(path, outDir, readme_name), 'a')
readme.write('\nObserving strategies considered: %s\n'%(list(slicer.keys())))
readme.close()
# ------------------------------------------------------------------------
# set up bundle for numGal (and later deltaN/N)
myBundles = {}
dustMap = maps.DustMap(interp=False, nside=nside) # include dustMap; actual in/exclusion of dust is handled by the galaxyCountMetric
for dither in slicer:
if dither in stackerList:
myBundles[dither] = metricBundles.MetricBundle(galCountMetric, slicer[dither], sqlconstraint,
stackerList=stackerList[dither],
runName=runName, metadata=dither, mapsList=[dustMap])
else:
myBundles[dither] = metricBundles.MetricBundle(galCountMetric, slicer[dither], sqlconstraint,
runName=runName, metadata=dither, mapsList=[dustMap])
# ------------------------------------------------------------------------
# run the metric/slicer combination for galaxy counts (numGal)
print('\n# Running myBundles ...')
bGroup = metricBundles.MetricBundleGroup(myBundles, opsdb, outDir='%s%s'%(path, outDir),
resultsDb=resultsDb, saveEarly=False)
bGroup.runAll()
# ------------------------------------------------------------------------
# save the raw numGal data.
if saveRawNumGalData:
outDir_new = 'numGalData_beforeMasking_before0pt'
if not os.path.exists('%s%s/%s'%(path, outDir, outDir_new)):
os.makedirs('%s%s/%s'%(path, outDir, outDir_new))
saveBundleData_npzFormat('%s%s/%s'%(path, outDir, outDir_new), myBundles, 'numGalData_unmasked_no0pt', filterBand)
# ------------------------------------------------------------------------
# print out tot(numGal) associated with each strategy
# write to the readme as well
update = '\n# Before any border masking or photometric error calibration: '
print(update)
for dither in myBundles:
ind = np.where(myBundles[dither].metricValues.mask[:] == False)[0]
printOut = 'Total Galaxies for %s: %.9e' %(dither, sum(myBundles[dither].metricValues.data[ind]))
update += '\n %s'%printOut
print(printOut)
update += '\n'
readme = open('%s%s/%s'%(path, outDir, readme_name), 'a')
readme.write(update)
readme.close()
print('\n## Time since the start of the calculation: %.2f hrs'%((time.time()-startTime)/3600.))
# ------------------------------------------------------------------------
# mask the edges: the data in the masked pixels is not changed
plotHandler = plots.PlotHandler(outDir='%s%s'%(path, outDir), resultsDb=resultsDb, thumbnail=False, savefig=False)
print('\n# Masking the edges ...')
myBundles, borderPixelsMasked = maskingAlgorithmGeneralized(myBundles, plotHandler, 'Number of Galaxies',
nside=nside,
pixelRadius=pixelRadiusForMasking,
plotIntermediatePlots=False,
plotFinalPlots=False, printFinalInfo=True,
returnBorderIndices=True)
# ------------------------------------------------------------------------
# save the numGal data.
if saveNumGalDataAfterMasking:
outDir_new = 'numGalData_afterBorderMasking'
if not os.path.exists('%s%s/%s'%(path, outDir, outDir_new)):
os.makedirs('%s%s/%s'%(path, outDir, outDir_new))
saveBundleData_npzFormat('%s%s/%s'%(path, outDir, outDir_new), myBundles, 'numGalData_masked', filterBand)
# ------------------------------------------------------------------------
# print out tot(numGal) associated with each strategy
# write to the readme as well
if (pixelRadiusForMasking!=0):
update = '\n# After border masking: '
print(update)
for dither in myBundles:
ind = np.where(myBundles[dither].metricValues.mask[:] == False)[0]
printOut = 'Total Galaxies for %s: %.9e' %(dither, sum(myBundles[dither].metricValues.data[ind]))
print(printOut)
update += '\n %s'%printOut
update += '\n'
readme = open('%s%s/%s'%(path, outDir, readme_name), 'a')
readme.write(update)
readme.close()
print('\n## Time since the start of the calculation: %.2f hrs'%((time.time()-startTime)/3600.))
################################################################################################################
# If include 0pt errors
# Ansatz: for each pixel i, del_i= k*z_i/sqrt(nObs_i),
# where z_i is the average seeing the pixel minus avgSeeing across map, nObs is the number of observations,
# and k is a constant such that var(del_i)= (0.01)^2. 0.01 for the 1% LSST goal.
# k-constraint equation becomes: k^2*var(z_i/sqrt(nObs_i))= (0.01)^2 --- equation 1
if include0ptErrors:
tablename = 'SummaryAllProps'
if tablename in opsdb.tableNames:
colname = 'seeingFwhmEff'
if colname not in opsdb.columnNames[tablename]:
raise ValueError('Unclear which seeing column to use.')
elif 'Summary' in opsdb.tableNames:
tablename = 'Summary'
colname = 'finSeeing'
if colname not in opsdb.columnNames[tablename]:
colname = 'FWHMeff'
if colname not in opsdb.columnNames[tablename]:
raise ValueError('Unclear which seeing column to use.')
meanMetric = metrics.MeanMetric(col=colname) # for avgSeeing per HEALpix pixel
nObsMetric = NumObsMetric(nside=nside) # for numObs per HEALpix pixel
if includeDustExtinction: coaddMetric = metrics.ExgalM5(lsstFilter=filterBand)
else: coaddMetric = metrics.Coaddm5Metric()
avgSeeingBundle = {}
nObsBundle = {}
coaddBundle = {}
# can pass dustMap to metricBundle regardless of whether to include dust extinction or not.
# the metric choice (coadd vs. exGal) takes care of whether to use the dustMap or not.
dustMap = maps.DustMap(interp=False, nside=nside)
for dither in slicer:
if dither in stackerList:
avgSeeingBundle[dither] = metricBundles.MetricBundle(meanMetric, slicer[dither], sqlconstraint,
stackerList=stackerList[dither],
runName=runName, metadata=dither)
nObsBundle[dither] = metricBundles.MetricBundle(nObsMetric, slicer[dither], sqlconstraint,
stackerList=stackerList[dither],
runName=runName, metadata=dither)
coaddBundle[dither] = metricBundles.MetricBundle(coaddMetric, slicer[dither], sqlconstraint,
stackerList=stackerList[dither],
runName=runName, metadata=dither,
mapsList=[dustMap])
else:
avgSeeingBundle[dither] = metricBundles.MetricBundle(meanMetric, slicer[dither], sqlconstraint,
runName=runName, metadata=dither)
nObsBundle[dither] = metricBundles.MetricBundle(nObsMetric, slicer[dither], sqlconstraint,
runName=runName, metadata=dither)
coaddBundle[dither] = metricBundles.MetricBundle(coaddMetric, slicer[dither], sqlconstraint,
runName=runName, metadata=dither,
mapsList=[dustMap])
print('\n# Running avgSeeingBundle ...')
aGroup = metricBundles.MetricBundleGroup(avgSeeingBundle, opsdb, outDir='%s%s'%(path, outDir),
resultsDb=resultsDb, saveEarly=False)
aGroup.runAll()
print('\n# Running nObsBundle ...')
nGroup = metricBundles.MetricBundleGroup(nObsBundle, opsdb, outDir='%s%s'%(path, outDir),
resultsDb=resultsDb, saveEarly=False)
nGroup.runAll()
print('\n# Running coaddBundle ...')
cGroup = metricBundles.MetricBundleGroup(coaddBundle, opsdb, outDir='%s%s'%(path, outDir),
resultsDb=resultsDb, saveEarly=False)
cGroup.runAll()
# ------------------------------------------------------------------------
# mask the border pixels
for dither in slicer:
avgSeeingBundle[dither].metricValues.mask[borderPixelsMasked[dither]] = True
nObsBundle[dither].metricValues.mask[borderPixelsMasked[dither]] = True
coaddBundle[dither].metricValues.mask[borderPixelsMasked[dither]] = True
# ------------------------------------------------------------------------
# calculate averageSeeing over the entrie map
bundle = {}
bundle['avgSeeingAcrossMap'] = metricBundles.MetricBundle(meanMetric, slicers.UniSlicer(),
sqlconstraint,runName=runName,
metadata='avgSeeingAcrossMap')
bundleGroup = metricBundles.MetricBundleGroup(bundle, opsdb, outDir='%s%s'%(path, outDir),
resultsDb=resultsDb, saveEarly=False)
bundleGroup.runAll()
avgSeeingAcrossMap = bundle['avgSeeingAcrossMap'].metricValues.data[0]
printOut = '\n# Average seeing across map: %s' %(avgSeeingAcrossMap)
print(printOut)
# add to the readme
readme = open('%s%s/%s'%(path, outDir, readme_name), 'a')
readme.write(printOut)
readme.close()
# find the zero point uncertainties: for each pixel i, del_i=k*z_i/sqrt(nObs_i),
# where z_i is the average seeing the pixel minus avgSeeing across map, nObs is the number of observations,
# and k is a constant such that var(del_i)=(0.01)^2.
# k-constraint equation becomes: k^2*var(z_i/sqrt(nObs_i))=(0.01)^2 --- equation 1
k = Symbol('k')
zeroPtError = {}
kValue = {}
print('\n# 0pt calculation ansatz: \delta_i=k*z_i/sqrt{nObs_i}, where k is s.t. var(\delta_i)=(0.01)^$')
if save0ptPlots:
outDir_new = '0pt_plots'
if not os.path.exists('%s%s/%s'%(path, outDir, outDir_new)):
os.makedirs('%s%s/%s'%(path, outDir, outDir_new))
# ------------------------------------------------------------------------
# add to the readme
readme = open('%s%s/%s'%(path, outDir, readme_name), 'a')
readme.write('\n\n0pt Information: ')
readme.close()
for dither in avgSeeingBundle:
z_i = avgSeeingBundle[dither].metricValues.data[:]-avgSeeingAcrossMap
nObs_i = nObsBundle[dither].metricValues.data[:]
ind = np.where((nObsBundle[dither].metricValues.mask == False) & \
(nObs_i != 0.0))[0] # make sure the uncertainty is valid; no division by 0
temp = np.var(z_i[ind]/ | np.sqrt(nObs_i[ind]) | numpy.sqrt |
import torch
from torchvision import transforms
import pandas as pd
from skimage.segmentation import expand_labels
from skimage.color import label2rgb
import os, tqdm
from skimage import segmentation
import numpy as np
from lcfcn import lcfcn_loss
from haven import haven_utils as hu
from . import networks
class LCFCN(torch.nn.Module):
def __init__(self, n_classes=1, lr=1e-5, opt='adam', network='vgg', device='cuda'):
super().__init__()
self.device = device
if network == 'vgg':
self.model_base = networks.FCN8_VGG16(n_classes=n_classes)
elif network == 'resnet':
self.model_base = networks.FCN8_ResNet(n_classes=n_classes)
if opt == "adam":
self.opt = torch.optim.Adam(
self.model_base.parameters(), lr=lr, betas=(0.99, 0.999), weight_decay=0.0005)
elif opt == "sgd":
self.opt = torch.optim.SGD(
self.model_base.parameters(), lr=lr)
else:
raise ValueError
def train_on_loader(model, train_loader):
model.train()
n_batches = len(train_loader)
pbar = tqdm.tqdm(total=n_batches)
for batch in train_loader:
score_dict = model.train_on_batch(batch)
pbar.set_description("Training. Loss: %.4f" % score_dict['train_loss'])
pbar.update(1)
pbar.close()
return {'train_loss':score_dict['train_loss']}
@torch.no_grad()
def val_on_loader(self, val_loader, savedir_images=None, n_images=2):
self.eval()
n_batches = len(val_loader)
score_list = []
pbar = tqdm.tqdm(total=n_batches)
for i, batch in enumerate(tqdm.tqdm(val_loader)):
val_dict = self.val_on_batch(batch)
score_list += [val_dict]
pbar.update(1)
if savedir_images and i < n_images:
os.makedirs(savedir_images, exist_ok=True)
self.vis_on_batch(batch, savedir_image=os.path.join(
savedir_images, "%d.jpg" % i))
pbar.set_description("Validating. MAE: %.4f" % val_dict['mae'])
pbar.close()
val_dict =pd.DataFrame(score_list).mean().to_dict()
return val_dict
def train_on_batch(self, batch, **extras):
self.opt.zero_grad()
self.train()
images = batch["images"].to(self.device)
points = batch["points"].long().to(self.device)
logits = self.model_base.forward(images)
loss = lcfcn_loss.compute_loss(points=points, probs=logits.sigmoid())
loss.backward()
self.opt.step()
return {"train_loss":loss.item()}
def get_state_dict(self):
state_dict = {"model": self.model_base.state_dict(),
"opt":self.opt.state_dict()}
return state_dict
def load_state_dict(self, state_dict):
self.model_base.load_state_dict(state_dict["model"])
self.opt.load_state_dict(state_dict["opt"])
@torch.no_grad()
def val_on_batch(self, batch):
self.eval()
images = batch["images"].to(self.device)
points = batch["points"].long().to(self.device)
logits = self.model_base.forward(images)
probs = logits.sigmoid().cpu().numpy()
blobs = lcfcn_loss.get_blobs(probs=probs)
pred_points = lcfcn_loss.blobs2points(blobs).squeeze()
mae = abs(float((np.unique(blobs)!=0).sum() - (points!=0).sum()))
game = lcfcn_loss.compute_game(pred_points=pred_points.squeeze(), gt_points=points.squeeze().cpu().numpy(), L=3)
return {'mae':mae, 'game':game }
@torch.no_grad()
def vis_on_batch(self, batch, savedir_image):
self.eval()
images = batch["images"].to(self.device)
points = batch["points"].long().to(self.device)
logits = self.model_base.forward(images)
probs = logits.sigmoid().cpu().numpy()
blobs = lcfcn_loss.get_blobs(probs=probs)
pred_counts = (np.unique(blobs)!=0).sum()
pred_blobs = blobs
pred_probs = probs.squeeze()
# loc
pred_count = pred_counts.ravel()[0]
pred_blobs = pred_blobs.squeeze()
pred_points = lcfcn_loss.blobs2points(pred_blobs).squeeze()
img_org = hu.get_image(batch["images"],denorm="rgb")
i1 = convert(np.array(img_org), batch['points'][0], enlarge=20)
i2 = convert(np.array(img_org), pred_blobs, enlarge=0)
i3 = convert(np.array(img_org), pred_points, enlarge=20)
hu.save_image(savedir_image, np.hstack([i1, i2, i3]))
def convert(img, mask, enlarge=0):
if enlarge != 0:
mask = expand_labels(mask, enlarge).astype('uint8')
m = label2rgb(mask, bg_label=0)
m = segmentation.mark_boundaries(m, mask.astype('uint8'))
i = 0.5 * | np.array(img) | numpy.array |
import numpy as np
from sklearn.datasets import make_blobs
from numpy.random import uniform, normal
def get_ece(predicted_posterior, predicted_label, true_label, num_bins=40):
poba_hist = []
accuracy_hist = []
bin_size = 1/num_bins
total_sample = len(true_label)
posteriors = predicted_posterior.max(axis=1)
score = 0
for bin in range(num_bins):
indx = np.where(
(posteriors>bin*bin_size) & (posteriors<=(bin+1)*bin_size)
)[0]
acc = np.nan_to_num(
np.mean(
predicted_label[indx] == true_label[indx]
)
) if indx.size!=0 else 0
conf = np.nan_to_num(
np.mean(
posteriors[indx]
)
) if indx.size!=0 else 0
score += len(indx)*np.abs(
acc - conf
)
score /= total_sample
return score
def hellinger(p, q):
"""Hellinger distance between two discrete distributions.
Same as original version but without list comprehension
"""
return np.mean(np.sqrt(np.sum((np.sqrt(p) - np.sqrt(q)) ** 2, axis = 1)) / np.sqrt(2))
def _generate_2d_rotation(theta=0):
R = np.array([[np.cos(theta), np.sin(theta)], [-np.sin(theta), np.cos(theta)]])
return R
def generate_gaussian_parity(
n_samples,
centers=None,
class_label=None,
cluster_std=0.25,
center_box=(-1.0,1.0),
angle_params=None,
random_state=None,
):
"""
Generate 2-dimensional Gaussian XOR distribution.
(Classic XOR problem but each point is the
center of a Gaussian blob distribution)
Parameters
----------
n_samples : int
Total number of points divided among the four
clusters with equal probability.
centers : array of shape [n_centers,2], optional (default=None)
The coordinates of the ceneter of total n_centers blobs.
class_label : array of shape [n_centers], optional (default=None)
class label for each blob.
cluster_std : float, optional (default=1)
The standard deviation of the blobs.
center_box : tuple of float (min, max), default=(-1.0, 1.0)
The bounding box for each cluster center when centers are generated at random.
angle_params: float, optional (default=None)
Number of radians to rotate the distribution by.
random_state : int, RandomState instance, default=None
Determines random number generation for dataset creation. Pass an int
for reproducible output across multiple function calls.
Returns
-------
X : array of shape [n_samples, 2]
The generated samples.
y : array of shape [n_samples]
The integer labels for cluster membership of each sample.
"""
if random_state != None:
np.random.seed(random_state)
if centers == None:
centers = np.array([(-0.5, 0.5), (0.5, 0.5), (-0.5, -0.5), (0.5, -0.5)])
if class_label == None:
class_label = [0, 1, 1, 0]
blob_num = len(class_label)
# get the number of samples in each blob with equal probability
samples_per_blob = np.random.multinomial(
n_samples, 1 / blob_num * np.ones(blob_num)
)
X, y = make_blobs(
n_samples=samples_per_blob,
n_features=2,
centers=centers,
cluster_std=cluster_std,
center_box=center_box
)
for blob in range(blob_num):
y[np.where(y == blob)] = class_label[blob]
if angle_params != None:
R = _generate_2d_rotation(angle_params)
X = X @ R
return X, y.astype(int)
def pdf(x, cov_scale=0.25):
mu01 = np.array([-0.5,0.5])
mu02 = np.array([0.5,-0.5])
mu11 = np.array([0.5,0.5])
mu12 = np.array([-0.5,-0.5])
cov = cov_scale* np.eye(2)
inv_cov = np.linalg.inv(cov)
p01 = (
np.exp(-0.5*(x - mu01)@inv_cov@(x-mu01).T)
)/(2*np.pi*np.sqrt(np.linalg.det(cov)))
p02 = (
np.exp(-0.5*(x - mu02)@inv_cov@(x-mu02).T)
)/(2*np.pi*np.sqrt(np.linalg.det(cov)))
p11 = (
| np.exp(-0.5*(x - mu11)@inv_cov@(x-mu11).T) | numpy.exp |
# import python libraries
import glob
import numpy as np
import cv2
from matplotlib import pyplot as plt
import matplotlib.image as mpimg
from skimage.feature import hog
from scipy.ndimage.measurements import label
### grab images from a directory
### returns np array of images
def read_image_dir(path):
Images = np.array([plt.imread(file) for file in glob.glob(path + '*')])
return Images
### apply color thresholds to image
### returns binarized image
def binarize(img, sobelx_thresh, sobely_thresh, saturation_thresh, value_thresh):
# grab S channel
hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
s_channel = hls[:, :, 2]
# grab v channel
hsv = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
v_channel = hsv[:, :, 2]
# grab grayscale
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# take derivative of the gradient w.r.t. X
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0)
# take derivative of the gradient w.r.t. Y
sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1)
# take absolute value of the derivatives
abs_sobelx = np.absolute(sobelx)
abs_sobely = np.absolute(sobely)
# rescale back to 255
scaled_sobelx = np.uint8(255 * abs_sobelx / np.max(abs_sobelx))
scaled_sobely = np.uint8(255 * abs_sobely / np.max(abs_sobely))
# translate pixels in each filtered image to binary values
sobelx_binary = np.zeros_like(scaled_sobelx)
sobely_binary = np.zeros_like(scaled_sobely)
sobelx_binary[(scaled_sobelx >= sobelx_thresh[0]) & (scaled_sobelx <= sobelx_thresh[1])] = 1
sobely_binary[(scaled_sobely >= sobely_thresh[0]) & (scaled_sobely >= sobely_thresh[1])] = 1
saturation_binary = np.zeros_like(s_channel)
saturation_binary[(s_channel >= saturation_thresh[0]) & (s_channel <= saturation_thresh[1])] = 1
value_binary = np.zeros_like(v_channel)
value_binary[(v_channel >= value_thresh[0]) & (v_channel <= value_thresh[1])] = 1
# combine binarized images
combined = np.zeros_like(saturation_binary)
combined[((sobelx_binary == 1) | (sobely_binary == 1)) | ((saturation_binary == 1) & (value_binary == 1))] = 1
return combined
### find center of each lane at bottom of frame
### no return
def find_center_points(binary_img):
# grab bottom half of image
bottom = binary_img[binary_img.shape[0] // 2:, :]
# take histogram of activated pixel amplitude
hist = np.sum(bottom, axis=0)
# find lane centers
mid = np.int(hist.shape[0] // 2)
left_center = np.argmax(hist[:mid])
right_center = np.argmax(hist[mid:]) + mid
return [left_center, right_center]
### process frames of video one by one
### returns processed image
def video_pipeline(frame, undist, txf, thresholds, lanes, lane_params, svc, X_scaler, window_smoothing, vehicle_params):
## lane detection
lane_img = np.copy(frame)
undistorted = undist.undistort(lane_img)
warped = txf.warp(undistorted)
binarized = binarize(warped, thresholds[0], thresholds[1], thresholds[2], thresholds[3])
detected = lanes.find_lanes(binarized, lane_params[0], lane_params[1], lane_params[2], lane_params[3], lane_params[4], lane_params[5])
result = lanes.project_lines(detected, undistorted, txf)
# Overlay visualization of lane finding function
detected_resized = cv2.resize(detected, (0, 0), fx=0.35, fy=0.35)
y_pos = 20
x_pos = 20
result[y_pos: y_pos + detected_resized.shape[0], x_pos: x_pos + detected_resized.shape[1]] = detected_resized
# Overlay radius measurements
radius = lanes.get_radius()
text = 'Radius of curvature: ' + str(int(radius)) + 'm'
cv2.putText(result, text, (550, 60), cv2.FONT_HERSHEY_SIMPLEX, 1.2, (0, 0, 0), thickness=3)
# Overlay center distance
distance = lanes.get_center_distance(warped)
text2 = 'Distance from center: ' + str(round(distance, 1)) + 'm'
cv2.putText(result, text2, (550, 120), cv2.FONT_HERSHEY_SIMPLEX, 1.2, (0, 0, 0), thickness=3)
## vehicle detection
color_space = vehicle_params[0]
y_start_stop = vehicle_params[1]
scale_factor = vehicle_params[2]
orient = vehicle_params[3]
pix_per_cell = vehicle_params[4]
cell_per_block = vehicle_params[5]
spatial_size = vehicle_params[6]
hist_bins = vehicle_params[7]
heatmap_threshold = vehicle_params[8]
vehicle_img = np.copy(undistorted)
# Define an list to hold window coordinates
windows = []
# Iterate scales to use search windows of varied size
for scale in scale_factor:
# Identify vehicles in new image/frame and compile list of detection windows
windows.extend(find_cars(vehicle_img, color_space, y_start_stop[0], y_start_stop[1], scale,
svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins))
recent_windows = sum(window_smoothing.next(windows), [])
# create heat map of all detections
heat = np.zeros_like(vehicle_img[:, :, 0]).astype(np.float)
heat = add_heat(heat, recent_windows)
heat = apply_threshold(heat, heatmap_threshold)
heatmap = np.clip(heat, 0, 255)
# draw final boxes based on heat map
labels = label(heatmap)
result = draw_labeled_bboxes(result, labels)
return result
# convert an image to new cv2 color space
# defined by parameter "conv"
def convert_color(img, conv):
conv_str = 'cv2.cvtColor(img, cv2.COLOR_RGB2' + conv + ')'
return eval(conv_str)
def get_hog_features(img, orient, pix_per_cell, cell_per_block,
vis=False, feature_vec=True):
# Call with two outputs if vis==True
if vis == True:
features, hog_image = hog(img, orientations=orient,
pixels_per_cell=(pix_per_cell, pix_per_cell),
cells_per_block=(cell_per_block, cell_per_block),
block_norm='L2-Hys',
transform_sqrt=False,
visualize=vis, feature_vector=feature_vec)
return features, hog_image
# Otherwise call with one output
else:
features = hog(img, orientations=orient,
pixels_per_cell=(pix_per_cell, pix_per_cell),
cells_per_block=(cell_per_block, cell_per_block),
block_norm='L2-Hys',
transform_sqrt=False,
visualize=vis, feature_vector=feature_vec)
return features
# downsize a passed image and return a flat list of
# pixel values from each color channel
def bin_spatial(img, size=(32, 32)):
color1 = cv2.resize(img[:, :, 0], size).ravel()
color2 = cv2.resize(img[:, :, 1], size).ravel()
color3 = cv2.resize(img[:, :, 2], size).ravel()
return np.hstack((color1, color2, color3))
# take frequency histograms of the pixel values in each channel
# of the passed image and return a single vector
def color_hist(img, nbins=32): # bins_range=(0, 256)
# Compute the histogram of the color channels separately
channel1_hist = np.histogram(img[:, :, 0], bins=nbins)
channel2_hist = np.histogram(img[:, :, 1], bins=nbins)
channel3_hist = | np.histogram(img[:, :, 2], bins=nbins) | numpy.histogram |
import numpy as np
import matplotlib.pyplot as plt
import mir3.modules.tool.wav2spectrogram as wav2spec
eps = np.finfo(float).eps
def inDb(a):
return 20 * | np.log10(a + eps) | numpy.log10 |
"""Module retrieving Fourier coefficients computation from lonlat grid.
Computation of the Fourier coefficients from lonlat grids
on pressure levels at every timestep.
The spectral truncation is determined by the number of longitudinal
gridsteps. The outputs are given as (time,level,wave,lat) where wave stands
for the zonal wavenumber. In the context of the thermodynamic diagnostic tool,
this is used for the computation of the Lorenz Energy Cycle.
@author: <EMAIL>, <NAME>, Hamburg University, 2018.
"""
import numpy as np
from netCDF4 import Dataset
GP_RES = np.array([16, 32, 48, 64, 96, 128, 256, 384, 512, 1024, 2048, 4096])
FC_RES = np.array([5, 10, 15, 21, 31, 43, 85, 127, 171, 341, 683, 1365])
G_0 = 9.81 # Gravity acceleration
GAM = 0.0065 # Standard atmosphere lapse rate
GAS_CON = 287.0 # Gas constant
P_0 = 10000 # Reference tropospheric pressure
def fourier_coeff(tadiagfile, outfile, ta_input, tas_input):
"""Compute Fourier coefficients in lon direction.
Receive as input:
- tadiagfile: the name of a file to store modified t fields;
- outfile: the name of a file to store the Fourier coefficients;
- ta_input: the name of a file containing t,u,v,w fields;
- tas_input: the name of a file containing t2m field.
"""
with Dataset(ta_input) as dataset:
lon = dataset.variables['lon'][:]
lat = dataset.variables['lat'][:]
lev = dataset.variables['plev'][:]
time = dataset.variables['time'][:]
t_a = dataset.variables['ta'][:, :, :, :]
u_a = dataset.variables['ua'][:, :, :, :]
v_a = dataset.variables['va'][:, :, :, :]
wap = dataset.variables['wap'][:, :, :, :]
nlon = len(lon)
nlat = len(lat)
nlev = len(lev)
ntime = len(time)
i = np.min(np.where(2 * nlat <= GP_RES))
trunc = FC_RES[i] + 1
wave2 = np.linspace(0, trunc - 1, trunc)
with Dataset(tas_input) as dataset:
tas = dataset.variables['tas'][:, :, :]
tas = tas[:, ::-1, :]
ta1_fx = np.array(t_a)
deltat = np.zeros([ntime, nlev, nlat, nlon])
p_s = np.full([ntime, nlat, nlon], P_0)
for i in np.arange(nlev - 1, 0, -1):
h_1 = np.ma.masked_where(ta1_fx[:, i, :, :] != 0, ta1_fx[:, i, :, :])
if np.any(h_1.mask > 0):
deltat[:, i - 1, :, :] = np.where(ta1_fx[:, i - 1, :, :] != 0,
deltat[:, i - 1, :, :],
(ta1_fx[:, i, :, :] - tas))
deltat[:, i - 1, :, :] = (
(1 * np.array(h_1.mask)) * np.array(deltat[:, i - 1, :, :]))
d_p = -(
(P_0 * G_0 / (GAM * GAS_CON)) * deltat[:, i - 1, :, :] / tas)
p_s = np.where(ta1_fx[:, i - 1, :, :] != 0, p_s, lev[i - 1] + d_p)
for k in np.arange(0, nlev - i - 1, 1):
h_3 = np.ma.masked_where(ta1_fx[:, i + k, :, :] != 0,
ta1_fx[:, i + k, :, :])
if np.any(h_3.mask > 0):
deltat[:, i - 1, :, :] = np.where(
ta1_fx[:, i + k, :, :] != 0, deltat[:, i - 1, :, :],
(ta1_fx[:, i + k + 1, :, :] - tas))
d_p = -((P_0 * G_0 /
(GAM * GAS_CON)) * deltat[:, i - 1, :, :] / tas)
p_s = np.where(ta1_fx[:, i + k, :, :] != 0, p_s,
lev[i + k] + d_p)
ta2_fx = np.array(t_a)
mask = np.zeros([nlev, ntime, nlat, nlon])
dat = np.zeros([nlev, ntime, nlat, nlon])
tafr_bar = np.zeros([nlev, ntime, nlat, nlon])
deltap = np.zeros([ntime, nlev, nlat, nlon])
for i in np.arange(nlev):
deltap[:, i, :, :] = p_s - lev[i]
h_2 = np.ma.masked_where(ta2_fx[:, i, :, :] == 0, ta2_fx[:, i, :, :])
mask[i, :, :, :] = np.array(h_2.mask)
tafr_bar[i, :, :, :] = (1 * np.array(mask[i, :, :, :]) * (
tas - GAM * GAS_CON / (G_0 * p_s) * deltap[:, i, :, :] * tas))
dat[i, :, :, :] = (
ta2_fx[:, i, :, :] * (1 - 1 * np.array(mask[i, :, :, :])))
t_a[:, i, :, :] = dat[i, :, :, :] + tafr_bar[i, :, :, :]
pr_output_diag(t_a, ta_input, tadiagfile, 'ta')
tafft_p = np.fft.fft(t_a, axis=3)[:, :, :, :int(trunc / 2)] / (nlon)
uafft_p = | np.fft.fft(u_a, axis=3) | numpy.fft.fft |
import sys
sys.path.insert(1, '..')
import unittest
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_array_equal
from findiff.operators import FinDiff, Identity, Coef
from findiff.pde import *
#import matplotlib.pyplot as plt
#from mpl_toolkits import mplot3d
class TestPDE(unittest.TestCase):
def test_1d_dirichlet_hom(self):
shape = (11,)
x = np.linspace(0, 1, 11)
dx = x[1] - x[0]
L = FinDiff(0, dx, 2)
bc = BoundaryConditions(shape)
bc[0] = 1
bc[-1] = 2
pde = PDE(L, np.zeros_like(x), bc)
u = pde.solve()
expected = x + 1
np.testing.assert_array_almost_equal(expected, u)
def test_1d_dirichlet_inhom(self):
nx = 21
shape = (nx,)
x = np.linspace(0, 1, nx)
dx = x[1] - x[0]
L = FinDiff(0, dx, 2)
bc = BoundaryConditions(shape)
bc[0] = 1
bc[-1] = 2
pde = PDE(L, 6*x, bc)
u = pde.solve()
expected = x**3 + 1
np.testing.assert_array_almost_equal(expected, u)
def test_1d_neumann_hom(self):
nx = 11
shape = (nx,)
x = np.linspace(0, 1, nx)
dx = x[1] - x[0]
L = FinDiff(0, dx, 2)
bc = BoundaryConditions(shape)
bc[0] = 1
bc[-1] = FinDiff(0, dx, 1), 2
pde = PDE(L, np.zeros_like(x), bc)
u = pde.solve()
expected = 2*x + 1
np.testing.assert_array_almost_equal(expected, u)
def test_2d_dirichlet_hom(self):
shape = (11, 11)
x, y = np.linspace(0, 1, shape[0]), np.linspace(0, 1, shape[1])
dx, dy = x[1] - x[0], y[1] - y[0]
X, Y = np.meshgrid(x, y, indexing='ij')
L = FinDiff(0, dx, 2) + FinDiff(1, dy, 2)
expected = X + 1
bc = BoundaryConditions(shape)
bc[0, :] = 1
bc[-1, :] = 2
bc[:, 0] = X + 1
bc[:, -1] = X + 1
pde = PDE(L, np.zeros_like(X), bc)
u = pde.solve()
np.testing.assert_array_almost_equal(expected, u)
def test_2d_dirichlet_inhom(self):
shape = (11, 11)
x, y = np.linspace(0, 1, shape[0]), np.linspace(0, 1, shape[1])
dx, dy = x[1] - x[0], y[1] - y[0]
X, Y = np.meshgrid(x, y, indexing='ij')
L = FinDiff(0, dx, 2) + FinDiff(1, dy, 2)
expected = X**3 + Y**3 + 1
f = 6*X + 6*Y
bc = BoundaryConditions(shape)
bc[0, :] = expected
bc[-1, :] = expected
bc[:, 0] = expected
bc[:, -1] = expected
pde = PDE(L, f, bc)
u = pde.solve()
np.testing.assert_array_almost_equal(expected, u)
def test_2d_neumann_hom(self):
shape = (31, 31)
x, y = np.linspace(0, 1, shape[0]), np.linspace(0, 1, shape[1])
dx, dy = x[1] - x[0], y[1] - y[0]
X, Y = np.meshgrid(x, y, indexing='ij')
L = FinDiff(0, dx, 2) + FinDiff(1, dy, 2)
expected = X**2 + Y**2 + 1
f = 4 * np.ones_like(X)
bc = BoundaryConditions(shape)
d_dy = FinDiff(1, dy)
bc[0, :] = expected
bc[-1, :] = expected
bc[:, 0] = d_dy, 2*Y
bc[:, -1] = d_dy, 2*Y
pde = PDE(L, f, bc)
u = pde.solve()
np.testing.assert_array_almost_equal(expected, u)
def test_1d_oscillator_free_dirichlet(self):
n = 300
shape = n,
t = np.linspace(0, 5, n)
dt = t[1] - t[0]
L = FinDiff(0, dt, 2) + Identity()
bc = BoundaryConditions(shape)
bc[0] = 1
bc[-1] = 2
eq = PDE(L, np.zeros_like(t), bc)
u = eq.solve()
expected = np.cos(t)-(np.cos(5)-2)*np.sin(t)/np.sin(5)
np.testing.assert_array_almost_equal(expected, u, decimal=4)
def test_1d_damped_osc_driv_dirichlet(self):
n = 100
shape = n,
t = np.linspace(0, 1, n)
dt = t[1] - t[0]
L = FinDiff(0, dt, 2) - FinDiff(0, dt) + Identity()
f = -3*np.exp(-t)*np.cos(t) + 2*np.exp(-t)*np.sin(t)
expected = np.exp(-t)*np.sin(t)
bc = BoundaryConditions(shape)
bc[0] = expected[0]
bc[-1] = expected[-1]
eq = PDE(L, f, bc)
u = eq.solve()
np.testing.assert_array_almost_equal(expected, u, decimal=4)
def test_1d_oscillator_driv_neumann(self):
n = 200
shape = n,
t = np.linspace(0, 1, n)
dt = t[1] - t[0]
L = FinDiff(0, dt, 2) - FinDiff(0, dt) + Identity()
f = -3 * np.exp(-t) * np.cos(t) + 2 * np.exp(-t) * np.sin(t)
expected = np.exp(-t) * np.sin(t)
bc = BoundaryConditions(shape)
bc[0] = FinDiff(0, dt), 1
bc[-1] = expected[-1]
eq = PDE(L, f, bc)
u = eq.solve()
np.testing.assert_array_almost_equal(expected, u, decimal=4)
def test_1d_with_coeffs(self):
n = 200
shape = n,
t = np.linspace(0, 1, n)
dt = t[1] - t[0]
L = Coef(t) * FinDiff(0, dt, 2)
f = 6*t**2
bc = BoundaryConditions(shape)
bc[0] = 0
bc[-1] = 1
eq = PDE(L, f, bc)
u = eq.solve()
expected = t**3
np.testing.assert_array_almost_equal(expected, u, decimal=4)
def test_mixed_equation__with_coeffs_2d(self):
shape = (41, 51)
x, y = np.linspace(0, 1, shape[0]), np.linspace(0, 1, shape[1])
dx, dy = x[1] - x[0], y[1] - y[0]
X, Y = np.meshgrid(x, y, indexing='ij')
L = FinDiff(0, dx, 2) + Coef(X*Y) * FinDiff((0, dx, 1), (1, dy, 1)) + FinDiff(1, dy, 2)
expected = X ** 3 + Y ** 3 + 1
f = 6*(X + Y)
bc = BoundaryConditions(shape)
bc[0, :] = expected
bc[-1, :] = expected
bc[:, 0] = expected
bc[:, -1] = expected
pde = PDE(L, f, bc)
u = pde.solve()
np.testing.assert_array_almost_equal(expected, u, decimal=4)
def test_2d_inhom_const_coefs_dirichlet_all(self):
shape = (41, 50)
(x, y), (dx, dy), (X, Y) = make_grid(shape, edges=[(-1, 1), (-1, 1)])
expected = X**3 + Y**3 + X*Y + 1
L = Coef(3) * FinDiff(0, dx, 2) + Coef(2) * FinDiff((0, dx, 1), (1, dy, 1)) + FinDiff(1, dy, 2)
f = 2 + 18 * X + 6 * Y
bc = BoundaryConditions(shape)
bc[0, :] = expected
bc[-1, :] = expected
bc[:, 0] = expected
bc[:, -1] = expected
pde = PDE(L, f, bc)
actual = pde.solve()
np.testing.assert_array_almost_equal(expected, actual, decimal=4)
def test_2d_inhom_var_coefs_dirichlet_all(self):
shape = (41, 50)
(x, y), (dx, dy), (X, Y) = make_grid(shape, edges=[(-1, 1), (-1, 1)])
expected = X**3 + Y**3 + X*Y + 1
L = Coef(3*X) * FinDiff(0, dx, 2) + Coef(2*Y) * FinDiff((0, dx, 1), (1, dy, 1)) + FinDiff(1, dy, 2)
f = 18 * X**2 + 8*Y
bc = BoundaryConditions(shape)
bc[0, :] = expected
bc[-1, :] = expected
bc[:, 0] = expected
bc[:, -1] = expected
pde = PDE(L, f, bc)
actual = pde.solve()
np.testing.assert_array_almost_equal(expected, actual, decimal=4)
# This simple case in not running yet!
@unittest.skip
def test_2d_inhom_var_coefs_with_identity_all_dirichlet(self):
shape = (5, 5)
(x, y), (dx, dy), (X, Y) = make_grid(shape, edges=[(-1, 1), (-1, 1)])
expected = X**3 + Y**3 + X*Y + 1
#L = Coef(3*X) * FinDiff(0, dx, 2) + Coef(2*Y) * FinDiff((0, dx, 1), (1, dy, 1)) + FinDiff(1, dy, 2) + Coef(5*X*Y) * Identity()
L = Coef(5*X*Y) * FinDiff(0, dx, 2) #Identity()
#f = 18 * X**2 + 8*Y + 5*X*Y*expected
mat = L.matrix(shape)
print(mat)
bc = BoundaryConditions(shape)
bc[0, :] = expected
bc[-1, :] = expected
bc[:, 0] = expected
bc[:, -1] = expected
pde = PDE(L, f, bc)
actual = pde.solve()
np.testing.assert_array_almost_equal(expected, actual, decimal=4)
def make_grid(shape, edges):
axes = tuple([np.linspace(edges[k][0], edges[k][1], shape[k]) for k in range(len(shape))])
coords = | np.meshgrid(*axes, indexing='ij') | numpy.meshgrid |
"""Collection of functions to run simulation studies and plot results"""
from time import time
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d import Axes3D # noqa:F401
import src.functions_to_approximate as functions
import src.interpolate as interpolators
from src.auxiliary import get_grid
from src.auxiliary import get_interpolation_points
from src.auxiliary import rmse as root_mean_squared_error
def execute_study(study_params, interpolation_params):
"""Run the simulation study with parameters *study_params*.
...
Parameters
----------
study_params: dict
...
interpolation_params: dict
...
Returns
-------
results: dict
...
"""
# load parameters
interpolation_method = study_params["controls"]["interpolation method"]
func_name = study_params["controls"]["function to approximate"]
func_name_short = func_name[: func_name.find("_")]
interpolator_name = study_params[interpolation_method]["interpolator"]
grid_density = study_params["controls"]["grid density"]
grid_method = study_params["controls"]["grid method"]
iterations = study_params["controls"]["iterations"]
n_interpolation_points = study_params["controls"][
"number of points for accuracy check"
]
accuracy_check_seed = study_params["controls"]["seed for accuracy check"]
# set grid parameters
grid_params = {}
grid_params["orders"] = study_params["grid"]["orders"][grid_density]
grid_params["lower bounds"] = study_params["grid"]["lower bounds"][func_name_short]
grid_params["upper bounds"] = study_params["grid"]["upper bounds"][func_name_short]
# set functionals for function to approximate and interpolator
func = getattr(functions, func_name)
interpolator = getattr(interpolators, interpolator_name)
# initiate dict to store results
results = {"rmse": {}, "runtime": {}, "gridpoints": {}}
for dims in study_params["controls"]["dims"]:
# generate grid
grid, index = get_grid(grid_params, dims)
# get interpolation points
interpolation_points = get_interpolation_points(
n_interpolation_points, grid, accuracy_check_seed,
)
# get results on interpolation points
results_calc = func(interpolation_points)
# initiate objects to store results
rmse_tmp = []
runtime_tmp = []
n_gridpoints_effective_tmp = []
# iterate over settings
for iteration in range(iterations):
# print(f"dimension: {dims}; iteration: {iteration + 1}")
# adjust interpolation parameters
interpolation_params[interpolation_method]["grid method"] = grid_method
interpolation_params[interpolation_method][
"evaluate off-grid"
] = study_params["controls"]["evaluate off-grid"]
if interpolation_method == "linear":
interpolation_params["linear"]["sparse grid level"] = study_params[
"linear"
]["sparse grid levels"][iteration]
interpolation_params["linear"]["interpolation points"] = study_params[
"linear"
]["interpolation points"][iteration]
elif interpolation_method == "spline":
interpolation_params["spline"]["interpolation points"] = study_params[
"spline"
]["interpolation points"][iteration]
elif interpolation_method == "smolyak":
interpolation_params["smolyak"]["sparse grid level"] = study_params[
"smolyak"
]["sparse grid levels"][iteration]
elif interpolation_method == "sparse":
interpolation_params["sparse"]["sparse grid level"] = study_params[
"sparse"
]["sparse grid levels"][iteration]
# interpolate and capture computation time
start = time()
results_interp, n_gridpoints_effective = interpolator(
interpolation_points, grid, func, interpolation_params
)
stop = time()
# assess interpolation accuracy
rmse_iter = root_mean_squared_error(results_interp, results_calc)
# store results
rmse_tmp.append(rmse_iter)
runtime_tmp.append(stop - start)
n_gridpoints_effective_tmp.append(n_gridpoints_effective)
results["rmse"][dims] = np.array(object=rmse_tmp)
results["runtime"][dims] = | np.array(object=runtime_tmp) | numpy.array |
#! /usr/bin/env python
import os
import numpy as np
import astropy.io.fits as fits
from . import noise_simulation as ng
def add_dark_current(ramp, seed, gain, darksignal):
"""
Adds dark current to the input signal
Parameters
----------
ramp: sequence
The array of ramp images
seed: int
The seed for the dark signal
gain: float
The detector gain
darksignal: sequence
A 2D map of the dark signal to project onto the ramp
Returns
-------
np.ndarray
The dark signal ramp
"""
# Get the random seed and array shape
np.random.seed(seed)
dims = ramp.shape
# Add the dark signal to the ramp
total = darksignal*0.
for n in range(dims[0]):
signal = np.random.poisson(darksignal)/gain
total = total+signal
ramp[n,:,:] = ramp[n,:,:]+total
return ramp
def make_exposure(nints, ngrps, darksignal, gain, pca0_file, noise_seed=None,
dark_seed=None, offset=500):
"""
Make a simulated exposure with no source signal
Parameters
----------
nints: int
The number of integrations
ngrps: int
The number of groups per integration
darksignal: sequence
A dark frame
gain: float
The gain on the detector
pca0_file: str
The path to the PCA-zero file
noise_seed: int
The seed for the generated noise
dark_seed: int
The seed for the generated dark
offset: int
The pedestal offset
Returns
-------
np.ndarray
A simulated ramp of darks
"""
if nints < 1 or ngrps < 1:
return None
if not noise_seed:
noise_seed = 7+int(np.random.uniform()*4000000000.)
if not dark_seed:
dark_seed = 5+int(np.random.uniform()*4000000000.)
np.random.seed(dark_seed)
# Make empty data array
nrows, ncols = darksignal.shape
simulated_data = np.zeros([nints*ngrps,nrows,ncols], dtype=np.float32)
# Define some constants
pedestal = 18.30
c_pink = 9.6
u_pink = 3.2
acn = 2.0
bias_amp = 0.
#bias_amp = 5358.87
#bias_offset = 20944.06
pca0_amp = 0.
rd_noise = 12.95
dark_current = 0.0
dc_seed = dark_seed
bias_offset = offset*gain
# Define the HXRGN instance to make a SUSBSTRIP256 array
#(in detector coordinates)
noisecube = ng.HXRGNoise(naxis1=nrows, naxis2=ncols, naxis3=ngrps,
pca0_file=pca0_file, x0=0, y0=0, det_size=2048,
verbose=False)
# iterate over integrations
for loop in range(nints):
seed1 = noise_seed+24*int(loop)
ramp = noisecube.mknoise(c_pink=c_pink, u_pink=u_pink,
bias_amp=bias_amp, bias_offset=bias_offset,
acn=acn, pca0_amp=pca0_amp, rd_noise=rd_noise,
pedestal=pedestal, dark_current=dark_current,
dc_seed=dc_seed, noise_seed=seed1, gain=gain)
if len(ramp.shape)==2:
ramp = ramp[np.newaxis,:,:]
ramp = np.transpose(ramp,(0,2,1))
ramp = ramp[::,::-1,::-1]
ramp = add_dark_current(ramp, dc_seed, gain, darksignal)
simulated_data[loop*ngrps:(loop+1)*ngrps,:,:] = np.copy(ramp)
ramp = 0
return simulated_data
def make_photon_yield(photon_yield, orders):
"""
Generates a map of the photon yield for each order.
The shape of both arrays should be [order, nrows, ncols]
Parameters
----------
photon_yield: str
The path to the file containg the calculated photon yield at each pixel
orders: sequence
An array of the median image of each order
Returns
-------
np.ndarray
The array containing the photon yield map for each order
"""
# Get the shape and create empty arrays
dims = orders.shape
sum1 = np.zeros((dims[1], dims[2]), dtype=np.float32)
sum2 = | np.zeros((dims[1], dims[2]), dtype=np.float32) | numpy.zeros |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 27 17:44:49 2020
@author: sergeykoldobskiy
"""
import numpy as np
import warnings
warnings.filterwarnings("ignore", message="divide by zero encountered in")
warnings.filterwarnings("ignore", message="invalid value encountered in")
warnings.filterwarnings("ignore", message='overflow encountered in exp')
m_p = 0.938272
###############################################################################
###############################################################################
def E_trans(energy):
"""Return str with formatted energy value."""
power = np.log10(energy)
power_SI = power // 3
SI = ['eV', 'keV', 'MeV', 'GeV', 'TeV', 'PeV', 'EeV']
try:
en = SI[int(power_SI)]
except IndexError:
return str(energy)+' eV'
# print(power, power_SI, en)
return str((np.round(energy/10**(power_SI*3), 1)))+' '+en
###############################################################################
###############################################################################
def interpolate_sigma(E_primary, data, le_flag, E_secondary=None):
"""Return interpolated data.
Parameters
----------
E_primary (float): Primary energy, GeV.
data (numpy ndarray): Tabulated cross-section data.
le_flag (int): Flag for low-energy data.
E_secondary (list), optional
Binning for secondaries, GeV. The default is 'data' binning.
Returns
-------
temp (numpy 2D ndarray):
Vector of secondary energy and the vector of the corresponding
differential cross-section.
"""
# if binning is not given as input, use default one
if E_secondary is None:
E_sec = np.unique(data[:, 1])
def_bin_flag = 1
else:
if type(E_secondary) is not np.ndarray:
E_secondary = np.array(E_secondary)
E_sec = E_secondary * 1e9
def_bin_flag = 0
log_E_i = np.log10(E_primary)
log_data = np.log10(data)
log_E_sec = np.log10(E_sec)
uniq_log_E_i = np.unique(log_data[:, 0])
uniq_E_i = np.unique(data[:, 0])
if le_flag:
u = (E_primary - uniq_E_i)
idxl = np.abs(u).argsort(axis=0)[:2]
else:
u = (log_E_i - uniq_log_E_i)
idxl = np.abs(u).argsort(axis=0)[:2]
# interpolation is not needed
if (abs(log_E_i-uniq_log_E_i[idxl[0]]) <= np.log10(1.01)
and def_bin_flag == 1):
# print('No interploation is needed, return tabulated data')
temp = data[data[:, 0] == uniq_E_i[idxl[0]]][:, [1, 2]].T
temp[0] = temp[0]/1e9
temp[1, 0] = 0
return temp
cl1 = abs((log_E_i - uniq_log_E_i[idxl[0]])/(uniq_log_E_i[idxl[1]] -
uniq_log_E_i[idxl[0]]))
cl2 = abs((log_E_i - uniq_log_E_i[idxl[1]])/(uniq_log_E_i[idxl[1]] -
uniq_log_E_i[idxl[0]]))
si1 = log_data[np.abs(log_data[:, 0] - uniq_log_E_i[idxl[0]]) < 1e-6]
si2 = log_data[np.abs(log_data[:, 0] - uniq_log_E_i[idxl[1]]) < 1e-6]
#get indices of the last inf in low energies
inf_si1 = np.where(si1[:,2][si1[:,1]<8]==-np.inf)[0][-1]
inf_si2 = np.where(si2[:,2][si2[:,1]<8]==-np.inf)[0][-1]
si1[:,2] = np.where(np.where(si1[:,2])[0]<inf_si1,-np.inf,si1[:,2])
si2[:,2] = np.where(np.where(si2[:,2])[0]<inf_si2,-np.inf,si2[:,2])
a1 = si1[si1[:, 2] != -np.inf][1:, 1:]
a2 = si2[si2[:, 2] != -np.inf][1:, 1:]
# exception for zero matrix interpolation
try:
min_a1_x, max_a1_x = min(a1[:, 0]), max(a1[:, 0])
min_a2_x, max_a2_x = min(a2[:, 0]), max(a2[:, 0])
except ValueError:
if def_bin_flag == 1:
temp = data[data[:, 0] == uniq_E_i[idxl[0]]][:, [1, 2]].T
temp[0] = temp[0]/1e9
return temp
if def_bin_flag == 0:
temp = np.vstack([E_sec, np.zeros(len(E_sec))])
return temp
sigma_final = np.zeros(log_E_sec.shape)
sigma_final[sigma_final == 0] = -np.inf
new_a1_x = np.linspace(min_a1_x, max_a1_x, 1000)
new_a2_x = np.linspace(min_a2_x, max_a2_x, 1000)
new_a1_y = np.interp(new_a1_x, a1[:, 0], a1[:, 1])
new_a2_y = np.interp(new_a2_x, a2[:, 0], a2[:, 1])
midx = cl2*new_a1_x+cl1*new_a2_x
midy = cl2*new_a1_y+cl1*new_a2_y
filter_energies = (log_E_sec > np.min([min_a1_x, min_a2_x])) *\
(log_E_sec < np.max([max_a1_x, max_a2_x])) * (log_E_sec <= log_E_i) *\
(log_E_sec <= max(midx)) * (log_E_sec >=min(midx))
fiE_energies = log_E_sec[filter_energies]
fiE_bins = np.where(filter_energies)
sigma_final[fiE_bins] = np.interp(fiE_energies, midx, midy)
temp = np.array((E_sec, np.power(10, sigma_final)))
temp[0] = temp[0]/1e9
return temp
###############################################################################
###############################################################################
def open_data_files(secondary, primary_target):
"""Open AAFrag data files."""
import os
import inspect
AAFrag_path = (os.path.dirname(inspect.getfile(open_data_files)))
if secondary == 'gam':
data_col = 2
elif secondary == 'el':
data_col = 2
elif secondary == 'posi':
secondary = 'el'
data_col = 3
elif secondary == 'nu_e':
secondary = 'nu'
data_col = 2
elif secondary == 'anu_e':
secondary = 'nu'
data_col = 3
elif secondary == 'nu_mu':
secondary = 'nu'
data_col = 4
elif secondary == 'anu_mu':
secondary = 'nu'
data_col = 5
elif secondary == 'p':
secondary = 'pap'
data_col = 2
elif secondary == 'ap':
secondary = 'pap'
data_col = 3
elif secondary == 'n':
secondary = 'nan'
data_col = 2
elif secondary == 'an':
secondary = 'nan'
data_col = 3
elif secondary == 'nu_all':
secondary = 'nu'
data_col = 100
else:
return print('Unknown product. Check your input, please!')
name = secondary+'_'+primary_target.split('-')[0]+'_' +\
primary_target.split('-')[1]
try:
data_HE = np.genfromtxt(AAFrag_path+'/Tables/'+name+'_04')
if data_col != 100:
data_HE = data_HE[:, [0, 1, data_col]]
else:
temp_nu = data_HE[:,[2,3,4,5]].sum(axis=1)
data_HE = np.vstack([data_HE[:, [0, 1]].T,temp_nu]).T
data_LE = 0
except OSError:
return print('There is no data for this combination of primary'+\
'and target. Check your input, please!')
E_th_b = float(data_HE[0, 0])
E_th_t = float(data_HE[-1:, 0])
E_th_c = 0
try:
data_LE = np.genfromtxt(AAFrag_path+'/Tables/'+name+'_04L')
if data_col != 100:
data_LE = data_LE[:, [0, 1, data_col]]
else:
temp_nu = data_LE[:,[2,3,4,5]].sum(axis=1)
data_LE = np.vstack([data_LE[:, [0, 1]].T,temp_nu]).T
E_th_b = float(data_LE[0, 0])
E_th_c = float(data_LE[-1:, 0])
except OSError:
pass
return data_HE, data_LE, E_th_b, E_th_c, E_th_t
###############################################################################
###############################################################################
def get_cs_value(secondary, primary_target, E_primaries,
E_secondaries=None):
"""
Return single differential cross-section value.
Parameters
----------
secondary (str): Secondary particle produced in the nucleon-nucleon
interaction.
Allowed inputs are: gam, posi, el, nu_e, anu_e, mu_mu, amu_mu, nu_all
primary_target (str): Primary/target combination.
E_primaries (int or float): Total energy of a primary particle in GeV.
E_secondaries (int or float or list or tuple or numpy.ndarray): optional
Vector of the secondary particle energy (in GeV).
Default (tabulated) binning is used if the input is empty.
Returns
-------
2d numpy array (secondary differential cross-section, secondary energy)
"""
# primary = primary_target.split('-')[0]
# avaliable_primaries = ['p','He','C','Al','Fe']
# masses = [0.9385,3.7274,11.178,25.133,52.103]
# mass = masses[avaliable_primaries==primary]
# E_primary = mass + T_primaries
E_primaries = E_primaries * 1e9
try:
data_HE, data_LE, E_th_b, E_th_c, E_th_t = open_data_files(secondary,
primary_target)
except TypeError:
return
if E_th_b/E_primaries < 1.001 and E_primaries/E_th_t < 1.001:
le_flag = 1
if E_primaries - E_th_c >= 9e-3:
le_flag = 0
if (E_secondaries is None):
data = interpolate_sigma(E_primaries, data_HE, le_flag)
else:
if type(E_secondaries) is not np.ndarray:
if np.isscalar(E_secondaries):
E_secondaries = [E_secondaries]
E_secondaries = np.array(E_secondaries)
data = interpolate_sigma(E_primaries, data_HE, le_flag,
E_secondaries)
if le_flag == 1:
if (E_secondaries is None):
data = interpolate_sigma(E_primaries, data_LE, le_flag)
else:
if type(E_secondaries) is not np.ndarray:
if np.isscalar(E_secondaries):
E_secondaries = [E_secondaries]
E_secondaries = np.array(E_secondaries)
data = interpolate_sigma(E_primaries, data_LE, le_flag,
E_secondaries)
data[1] = data[1]/data[0]
else:
return print('Primary kinetic energy '+E_trans(E_primaries) +
' is not in range: '+E_trans(E_th_b)+' -- ' +
E_trans(E_th_t) +
' avaliable for primary/target combination: ' +
primary_target)
return np.array([data[1],data[0]])
###############################################################################
###############################################################################
def get_cross_section(secondary, primary_target, E_primaries=None,
E_secondaries=None):
"""
Reconstruсt cross-section values for given values of the total energy for
primary and secondary particle combination.
Return the matrix of differential cross-section, vector of primary total
energy and secondary energy.
If primary and secondary energies are not set, the default binning will be used.
Parameters
----------
secondary (str): Secondary particle produced in the nucleon-nucleon
interaction.
Allowed inputs are: gam, posi, el, nu_e, anu_e, mu_mu, amu_mu, nu_all
primary_target (str): Primary/target combination.
E_primaries (int or float or list or tuple or numpy.ndarray): optional
Vector of the primary particle energy (in GeV) of the size M.
The default values are taken from the tables.
E_secondaries (int or float or list or tuple or numpy.ndarray): optiona
Vector of the secondary particle energy (in GeV) of the size N.
The default values are taken from the tables.
Returns
-------
(numpy ndarray 2D)
Matrix MxN of differential cross-section (in mb/GeV) for a given
combination of vectors.
(numpy ndarray 1D)
Vector of primary total energy in GeV.
(numpy ndarray 1D)
Vector of secondary energy in GeV.
"""
try:
data_HE, data_LE, E_th_b, E_th_c, E_th_t = open_data_files(secondary,
primary_target)
except TypeError:
return
# primary = primary_target.split('-')[0]
# avaliable_primaries = ['p','He','C','Al','Fe']
# masses = [0.9385,3.7274,11.178,25.133,52.103]
# mass = masses[avaliable_primaries==primary]
if (E_primaries is None) and (E_secondaries is None):
energy_primary = np.unique(data_HE[:, 0])/1e9
len_en_primary = len(energy_primary)
energy_secondary = np.unique(data_HE[:, 1])/1e9
len_en_secondary = len(energy_secondary)
cs_matrix = np.reshape(data_HE[:, 2],
[len_en_primary, len_en_secondary])
if not np.isscalar(data_LE):
energy_primary_LE = np.unique(data_LE[:, 0])/1e9
len_en_primary_LE = len(energy_primary_LE)
len_en_secondary_LE = len(np.unique(data_LE[:, 1]))
cs_matrix_LE = np.reshape(data_LE[:, 2], [len_en_primary_LE,
len_en_secondary_LE])
cs_matrix = np.vstack([cs_matrix_LE[:-1], cs_matrix])
energy_primary = np.hstack([energy_primary_LE[:-1],
energy_primary])
cs_matrix[:, 0] = 0
cs_matrix = cs_matrix/energy_secondary
else:
if (E_primaries is None):
E_primaries = np.unique(data_HE[:, 0])/1e9
if not np.isscalar(data_LE):
energy_primary_LE = np.unique(data_LE[:, 0])/1e9
E_primaries = np.hstack([energy_primary_LE[:-1], E_primaries])
else:
if type(E_primaries) is not np.ndarray:
if np.isscalar(E_primaries):
E_primaries = [E_primaries]
E_primaries = np.array(E_primaries)
E_primaries = E_primaries * 1e9
E_max = E_primaries.max()
E_min = E_primaries.min()
if E_th_b/E_min > 1.001 or E_max/E_th_t > 1.001:
return print('Primary kinetic energy is not in range: ' +
E_trans(E_th_b)+' -- '+E_trans(E_th_t) +
' avaliable for primary/target combination: ' +
primary_target)
noE_in_range = 1
else:
noE_in_range = 0
c = 0
for E_primary in E_primaries:
if E_th_b/E_primary < 1.001 and E_primary/E_th_t < 1.001:
le_flag = 1
if E_primary - E_th_c >= 9e-3:
le_flag = 0
if (E_secondaries is None):
if le_flag == 1:
new_data = interpolate_sigma(E_primary,
data_LE, le_flag)
else:
new_data = interpolate_sigma(E_primary,
data_HE, le_flag)
else:
if type(E_secondaries) is not np.ndarray:
if np.isscalar(E_secondaries):
E_secondaries = [E_secondaries]
E_secondaries = np.array(E_secondaries)
if le_flag == 1:
new_data = interpolate_sigma(E_primary, data_LE,
le_flag, E_secondaries)
else:
new_data = interpolate_sigma(E_primary, data_HE,
le_flag, E_secondaries)
if c == 0:
cs_matrix = new_data[1]
energy_primary = E_primary/1e9
energy_secondary = new_data[0]
else:
cs_matrix = np.vstack([cs_matrix, new_data[1]])
energy_primary = np.vstack([energy_primary, E_primary/1e9])
c += 1
if noE_in_range == 0:
cs_matrix = cs_matrix / energy_secondary
if c == 1:
energy_primary = np.array([energy_primary])
cs_matrix = np.array([cs_matrix])
return cs_matrix, (energy_primary), energy_secondary
return cs_matrix, np.squeeze(energy_primary), energy_secondary
###############################################################################
###############################################################################
def get_cross_section_Kafexhiu2014(E_primaries, E_secondaries):
"""
Return cross-section values (Kafexhiu et al. 2014).
Return the matrix of the differential cross-section for a given
combination of energy vectors, primary energy vector, secondary energy
vector.
Based on Kafexhiu et al. 2014 (GEANT parameters)
Calculations are performed for p-p interactions
and for gamma production only.
Works good in low energies,
but should be substituted by newer codes in high energies.
----------
E_primaries (int or float or list or tuple or numpy.ndarray):
Vector of the primary proton energy (in GeV) of the size M.
E_secondaries (int or float or list or tuple or numpy.ndarray):
Vector of the gamma energy (in GeV) of the size N.
Returns
-------
(numpy ndarray 2D)
Matrix MxN of the differential cross-section (in mb/GeV)
for a given combination of vectors.
(numpy ndarray 1D)
Vector of primary energy in GeV.
(numpy ndarray 1D)
Vector of secondary energy in GeV.
"""
from Kafexhiu2014 import F_gamma_Kafexhiu2014
csf = np.vectorize(F_gamma_Kafexhiu2014)
if (E_primaries is None) or (E_secondaries is None):
return print('Error: please provide the energy binning for protons'+\
' and secondary particles.')
else:
if type(E_primaries) is not np.ndarray:
if np.isscalar(E_primaries):
E_primaries = [E_primaries]
E_primaries = np.array(E_primaries)
if type(E_secondaries) is not np.ndarray:
if np.isscalar(E_secondaries):
E_secondaries = [E_secondaries]
E_secondaries = np.array(E_secondaries)
cs_matrix = np.zeros([len(E_primaries), len(E_secondaries)])
for i, E_p in enumerate(E_primaries):
cs_matrix[i] = csf(E_p-m_p, E_secondaries, 'GEANT')
return cs_matrix, E_primaries, E_secondaries
###############################################################################
###############################################################################
def get_cross_section_Kamae2006(secondary, E_primaries,
E_secondaries, diffractive=True):
"""
Return cross-section values (Kamae et al. 2006).
Return the matrix of the differential cross-section for a given
combination of energy vectors, primary energy vector, secondary energy
vector.
Based on Kamae et al. 2006
Calculations are performed for p-p interactions
and for gamma and lepton production only.
Works good in low energies,
but should be substituted by newer codes in high energies.
----------
secondary (str): Secondary particle of proton-proton interaction.
E_primaries (int or float or list or tuple or numpy.ndarray):
Vector of the primary proton energy (in GeV) of the size M.
E_secondaries (int or float or list or tuple or numpy.ndarray):
Vector of the secondary particle energy (in GeV) of the size N.
diffractive (bool): Include or exclude diffractive processes
Returns
-------
(numpy ndarray 2D)
Matrix MxN of the differential cross-section (in mb/GeV)
for a given combination of vectors.
(numpy ndarray 1D)
Vector of primary energy in GeV.
(numpy ndarray 1D)
Vector of secondary energy in GeV.
"""
if secondary == 'gam':
from Kamae2006 import dXSdE_gamma_Kamae2006
csf = np.vectorize(dXSdE_gamma_Kamae2006)
elif secondary == 'el':
from Kamae2006 import dXSdE_elec_Kamae2006
csf = | np.vectorize(dXSdE_elec_Kamae2006) | numpy.vectorize |
import numpy as np
import gym
from gym import spaces
from gym.utils import seeding
class pushBox(gym.Env):
RIGHT = 0
UP = 1
LEFT = 2
DOWN = 3
def __init__(self, grid_size=10, mode='vector'):
assert mode in ['vector','image'], f'mode {mode} invalid'
assert grid_size >= 8
super(pushBox,self).__init__()
self.mode = mode
self.grid_size = grid_size
self.n_steps = 0
self.max_n_steps = grid_size * 4
self.state = np.zeros((2,), dtype=int)
self.goal = np.zeros((2,), dtype=int)
self.box = np.zeros((2,), dtype=int)
self.state_color = np.array([0, 255, 0])
self.goal_color = np.array([255, 0, 0])
self.box_color = np.array([0, 0, 255])
self.action_space = spaces.Discrete(4)
if mode == 'vector':
high = np.full(4, grid_size, dtype=np.float32)
self.observation_space = spaces.Box(-high, high, dtype=np.float32)
elif mode == 'image':
self.observation_space = spaces.Box(0, 255, (grid_size * 4, grid_size * 4, 3), dtype = np.uint8)
def _get_obs(self):
if self.mode == 'vector':
return np.concatenate((np.subtract(self.state, self.box), | np.subtract(self.box, self.goal) | numpy.subtract |
import numpy as np
from sklearn.cluster import AffinityPropagation
#import pydotplus as pydot
from collections import Counter
from distance_utils import time_series_twed
import pandas as pd
from scipy.spatial.distance import squareform
import time
class BottomUpSubsequenceTree:
def __init__(self, max_level, prototype_subsequences_list,
affinities, db_time_series,
time_window, time_step, weighted=True, max_branching_factor=20):
self.time_window = time_window
self.time_step = time_step
self.max_level = max_level
self.max_branching_factor = max_branching_factor
#self.graph = pydot.Dot(graph_type='graph')
self.query_ts = None
self.query_score_chart = None
self.node_shortcuts = None
self.weights = None
self.d_data_frame = None
self._original_time_series_ids = None
self._query_vector = None
self.n_nodes = 0
self._weighted = weighted
prototype_subsequences = np.array(prototype_subsequences_list)
self._build_tree(affinities, prototype_subsequences)
self._populate_tree(db_time_series)
self._build_node_shorcuts()
self._build_weights_vector()
self._build_d_data_frame()
@property
def n_subsequences(self):
return len(self.db_subsequences_dict)
@property
def original_time_series_ids(self):
if self._original_time_series_ids is None:
self._original_time_series_ids = list(self.root.inverted_file)
return self._original_time_series_ids
@property
def n_original_time_series(self):
return len(self.original_time_series_ids)
@property
def query_vector(self):
if self._query_vector is None:
q_vector = np.array([node.q for node in self.node_shortcuts])
q_norm = np.linalg.norm(q_vector)
self._query_vector = q_vector / q_norm
return self._query_vector
@property
def _queried_time_series_ids(self):
return list(set().union(*self._queried_time_series_ids_iterator()))
def prune(self):
self.root.prune_branch(1, self.max_level)
def _queried_time_series_ids_iterator(self):
for node in self.node_shortcuts:
if node.is_leaf and node.n_query_subsequences > 0:
yield node.inverted_file.keys()
def make_query(self, time_series, timer=None):
if timer is not None:
timer.start()
subsequences = time_series.run_sliding_window(self.time_window, self.time_step)
if timer is not None:
timer.stop()
timer.start()
for node in self.node_shortcuts:
node.n_query_subsequences = 0
if timer is not None:
timer.stop()
timer.start()
self._query_vector = None
for subsequence in subsequences:
self.root.add_query_subsequence(subsequence)
if timer is not None:
timer.stop()
timer.start()
t = time.time()
not_zero_node_ids = np.where(self.query_vector != 0)[0]
print("".format(time.time() - t))
t = time.time()
not_zero_query_vector = self.query_vector[not_zero_node_ids]
print("".format(time.time() - t))
t = time.time()
not_zero_ts_ids = self._queried_time_series_ids
print("".format(time.time() - t))
t = time.time()
not_zero_d_dataframe = self.d_data_frame.loc[not_zero_ts_ids, not_zero_node_ids]
print("".format(time.time() - t))
print('')
if timer is not None:
timer.stop()
timer.start()
score = -np.sum(not_zero_query_vector*not_zero_d_dataframe.values, axis=1)
#score = 2-2*score
if timer is not None:
timer.stop()
timer.start()
order = np.argsort(score)
result = not_zero_d_dataframe.index.values[order]
if timer is not None:
timer.stop()
return result
def get_db_subsequences_dict(self):
def _get_db_subsequences_dict():
return self.db_subsequences_dict
return _get_db_subsequences_dict
def get_next_node_id(self):
def _get_next_node_id():
n_nodes = self.n_nodes
self.n_nodes += 1
return n_nodes
return _get_next_node_id
def get_original_time_series_ids(self):
def _get_original_time_series_ids():
return self.original_time_series_ids
return _get_original_time_series_ids
# def save_graph(self):
# self.generate_graph()
# self.graph.write_png('graph.png')
#
# def generate_graph(self):
# self.root.add_to_graph(None, self.graph)
def _build_tree(self, affinities, subsequences):
print('Building layer 0')
center_indices, labels = self.run_affinity_propagation(affinities, True)
centers = subsequences[center_indices]
affinities = affinities[center_indices][:, center_indices]
nodes = self._build_leaves(centers)
print("{} nodes".format(len(nodes)))
levels = 1
while len(nodes) > self.max_branching_factor:
print('Building layer {0}'.format(levels))
center_indices, labels = self.run_affinity_propagation(affinities, False)
centers = centers[center_indices]
affinities = affinities[center_indices][:, center_indices]
nodes = self._build_layer(nodes, centers, labels)
print("{} nodes".format(len(nodes)))
levels += 1
if len(nodes) == 1:
self.root = nodes[0]
else:
self.root = Node(None, nodes, self.get_next_node_id(), self.get_original_time_series_ids())
if levels > self.max_level:
self.prune()
def _build_leaves(self, leaf_centers):
return [Node(center, None, self.get_next_node_id(), self.get_original_time_series_ids())
for center in leaf_centers]
def _build_layer(self, lower_nodes, upper_centers, labels):
lower_nodes = np.array(lower_nodes)
nodes = []
for i, center in enumerate(upper_centers):
children = lower_nodes[np.where(labels == i)[0]]
if len(children) == 1:
nodes.append(children[0])
else:
nodes.append(Node(center, children, self.get_next_node_id(), self.get_original_time_series_ids()))
return nodes
def _populate_tree(self, db_time_series):
print("populating tree")
print('time window')
print(self.time_window)
print('time step')
print(self.time_step)
print(type(db_time_series))
print(db_time_series)
for i, ts in enumerate(db_time_series):
print(ts)
for subsequence in ts.run_sliding_window(self.time_window, self.time_step):
self._add_subsequence(subsequence)
print("{0} time series added".format(i))
def _build_node_shorcuts(self, just_leaves=False):
shortcut_dict = {}
self.root.add_shortcut_to_dict(shortcut_dict)
shortcut_list = [v for v in shortcut_dict.values()
if not just_leaves or v.is_leaf]
self.node_shortcuts = shortcut_list
def _build_weights_vector(self):
weights_list = [node.weight for node in self.node_shortcuts]
self.weights = np.array(weights_list)
def _build_d_data_frame(self, just_leaves=False):
d_list = [node.d_vector for node in self.node_shortcuts]
d_matrix = np.column_stack(d_list)
d_norm = np.linalg.norm(d_matrix, axis=1)
d_matrix = (d_matrix.T / d_norm).T
d_matrix[d_matrix == np.inf] = 0
self.d_data_frame = pd.DataFrame(np.nan_to_num(d_matrix),
index=self.original_time_series_ids)
def _add_subsequence(self, subsequence):
self.root.add_db_subsequence(subsequence)
def calculate_inverted_files(self):
return self.root.inverted_file
def run_affinity_propagation(self, affinities, leaves):
affinities_list = squareform(affinities)
preference = np.median(affinities_list)
branching_factor = np.inf
while branching_factor > self.max_branching_factor:
ap = AffinityPropagation(affinity='precomputed')
ap.preference = preference
ap.fit(affinities)
if leaves:
branching_factor = 0
else:
branching_factor = max(Counter(ap.labels_).values())
#branching_factor = affinities.shape[0]//len(ap.cluster_centers_indices_)
preference += ( | np.max(affinities_list) | numpy.max |
from pyapprox.models.wrappers import ActiveSetVariableModel
from pyapprox.cvar_regression import smooth_max_function_first_derivative,\
smooth_max_function_second_derivative
import numpy as np
from scipy.optimize import minimize, Bounds
from functools import partial
from scipy.stats import gaussian_kde as KDE
from pyapprox.configure_plots import *
import scipy.stats as ss
from pyapprox.utilities import get_all_sample_combinations
from inspect import signature
def approx_jacobian(func, x, *args, epsilon=np.sqrt(np.finfo(float).eps)):
x0 = np.asfarray(x)
assert x0.ndim == 1 or x0.shape[1] == 1
f0 = np.atleast_1d(func(*((x0,)+args)))
if f0.ndim == 2:
assert f0.shape[1] == 1
f0 = f0[:, 0]
jac = np.zeros([len(x0), len(f0)])
dx = np.zeros(x0.shape)
for i in range(len(x0)):
dx[i] = epsilon
f1 = func(*((x0+dx,)+args))
if f1.ndim == 2:
assert f1.shape[1] == 1
f1 = f1[:, 0]
jac[i] = (f1 - f0)/epsilon
dx[i] = 0.0
return jac.transpose()
def eval_function_at_multiple_design_and_random_samples(function, uq_samples, design_samples):
"""
for functions which only take 1d arrays for uq_samples and design_samples
loop over all combinations and evaluate function at each combination
design_samples vary slowest and uq_samples vary fastest
Let design samples = [[1,2],[2,3]]
uq_samples = [[0, 0, 0],[0, 1, 2]]
Then samples will be
([1, 2], [0, 0, 0])
([1, 2], [0, 1, 2])
([3, 4], [0, 0, 0])
([3, 4], [0, 1, 2])
function(uq_samples,design_samples)
"""
vals = []
# put design samples first so that samples iterates over uq_samples fastest
samples = get_all_sample_combinations(design_samples, uq_samples)
for xx, zz in zip(
samples[:design_samples.shape[0]].T,
samples[design_samples.shape[0]:].T):
# flip xx,zz because functions assumed to take uq_samples then
# design_samples
vals.append(function(zz, xx))
return np.asarray(vals)
def eval_mc_based_jacobian_at_multiple_design_samples(grad, stat_func,
uq_samples, design_samples):
"""
Alternatively I could use
jacobian = [np.mean([constraint_grad_single(z,x) for z in zz.T],axis=0) for x in xx.T]
But I think this implementation will allow better use of concurent evaluations in the
future. For example eval_function_at_multiple_design_and_random_samples could
utilize an asynchronous call over all the sample combinations
TODO combine uq_samples and design samples into one matrix and assume functions
always take a single matrix and not two matrices
"""
grads = eval_function_at_multiple_design_and_random_samples(
grad, uq_samples, design_samples)
ndesign_samples = design_samples.shape[1]
nuq_samples = uq_samples.shape[1]
jacobian = np.array(
[stat_func(grads[ii*nuq_samples:(ii+1)*nuq_samples])
for ii in range(ndesign_samples)])
return jacobian
def check_inputs(uq_samples, design_samples):
if design_samples.ndim == 1:
design_samples = design_samples[:, np.newaxis]
if uq_samples is not None and uq_samples.ndim == 1:
uq_samples = design_samples[:, np.newaxis]
if (uq_samples is not None and
(design_samples.shape[1] > 1 and uq_samples.shape[1] > 1)):
assert design_samples.shape[1] == uq_samples.shape[1]
return uq_samples, design_samples
def deterministic_lower_bound_constraint(constraint_function, lower_bound,
uq_samples, design_samples):
uq_samples, design_samples = check_inputs(uq_samples, design_samples)
assert design_samples.shape[1] == 1
val = lower_bound-constraint_function(uq_samples, design_samples)
# scipy minimize enforces constraints are non-negative so use negative here
# to enforce upper bound
return -val
def variance_lower_bound_constraint(constraint_function, lower_bound, uq_samples,
design_samples):
uq_samples, design_samples = check_inputs(uq_samples, design_samples)
assert design_samples.shape[1] == 1
# scipy minimize enforces constraints are non-negative
vals = constraint_function(uq_samples, design_samples)
val = lower_bound-np.std(vals)**2
# scipy minimize enforces constraints are non-negative so use negative here
# to enforce upper bound
return -val
def mean_lower_bound_constraint(constraint_function, lower_bound, uq_samples,
design_samples):
uq_samples, design_samples = check_inputs(uq_samples, design_samples)
assert design_samples.shape[1] == 1
# scipy minimize enforces constraints are non-negative
vals = constraint_function(uq_samples, design_samples)
val = lower_bound-np.mean(vals)**2
# scipy minimize enforces constraints are non-negative so use negative here
# to enforce upper bound
return -val
def mean_lower_bound_constraint_jacobian(constraint_function_jacobian, uq_samples,
design_samples):
uq_samples, design_samples = check_inputs(uq_samples, design_samples)
assert design_samples.shape[1] == 1
# scipy minimize enforces constraints are non-negative
vals = constraint_function_jacobian(uq_samples, design_samples)
val = -np.mean(vals)**2
# scipy minimize enforces constraints are non-negative so use negative here
# to enforce upper bound
return -val
def quantile_lower_bound_constraint(constraint_function, quantile, lower_bound,
uq_samples, design_samples):
uq_samples, design_samples = check_inputs(uq_samples, design_samples)
assert design_samples.shape[1] == 1
vals = constraint_function(uq_samples, design_samples)
val = (lower_bound-ss.mstats.mquantiles(vals, prob=[quantile]))
# scipy minimize enforces constraints are non-negative so use negative here
# to enforce lower bound
return -val
# from pyapprox.cvar_regression import smooth_conditional_value_at_risk, \
# conditional_value_at_risk
# def cvar_lower_bound_constraint(constraint_function,quantile,lower_bound,eps,
# uq_samples,design_samples):
# uq_samples,design_samples = check_inputs(uq_samples,design_samples)
# assert design_samples.shape[1]==1
# vals = constraint_function(uq_samples,design_samples)
# # -vals because we want to minimize lower tail
# val = (lower_bound-smooth_conditional_value_at_risk(0,eps,quantile,-vals))
# #val = (lower_bound-conditional_value_at_risk(-vals,quantile))
# return val
class MultipleConstraints(object):
def __init__(self, constraints):
self.constraints = constraints
def __call__(self, design_sample, constraint_idx=None):
if constraint_idx is None:
constraint_idx = np.arange(len(self.constraints))
nconstraints = len(constraint_idx)
vals = np.empty(nconstraints)
for ii, jj in enumerate(constraint_idx):
vals[ii] = self.constraints[jj](design_sample)
return vals
class MCStatisticConstraint(object):
def __init__(self, constraint_function, generate_samples, info):
self.constraint_function = constraint_function
self.generate_samples = generate_samples
self.info = info
def __call__(self, design_samples):
uq_samples = self.generate_samples()
constraint_type = self.info['type']
if constraint_type == 'quantile':
quantile = self.info['quantile']
lower_bound = self.info['lower_bound']
return quantile_lower_bound_constraint(
self.constraint_function, quantile, lower_bound,
uq_samples, design_samples)
elif constraint_type == 'cvar':
quantile = self.info['quantile']
lower_bound = self.info['lower_bound']
eps = self.info['smoothing_eps']
return cvar_lower_bound_constraint(
constraint_functions[ii], quantile, lower_bound, eps,
uq_samples, design_samples)
elif constraint_type == 'var':
var_lower_bound = self.info['lower_bound']
return variance_lower_bound_constraint(
constraint_functions[ii], lower_bound, uq_samples, design_samples)
else:
raise Exception(
'constraint type (%s) not implemented' % constraint_type[ii])
class DeterministicConstraint(object):
def __init__(self, constraint_function, info):
self.constraint_function = constraint_function
self.info = info
def __call__(self, design_samples):
lower_bound = self.info['lower_bound']
uq_nominal_sample = self.info['uq_nominal_sample']
return deterministic_lower_bound_constraint(
self.constraint_function, lower_bound, uq_nominal_sample,
design_samples)
def setup_inequality_constraints(constraint_functions, constraints_info,
uq_samples):
constraints = []
for ii in range(len(constraint_functions)):
info = constraints_info[ii]
constraint_type = info['type']
if constraint_type == 'quantile':
quantile = info['quantile']
quantile_lower_bound = info['quantile_lower_bound']
ineq_cons_fun = partial(
quantile_lower_bound_constraint, constraint_functions[ii],
quantile, quantile_lower_bound, uq_samples)
elif constraint_type == 'cvar':
quantile = info['quantile']
quantile_lower_bound = info['cvar_lower_bound']
eps = info['smoothing_eps']
ineq_cons_fun = partial(
cvar_lower_bound_constraint, constraint_functions[ii],
quantile, quantile_lower_bound, eps, uq_samples)
elif constraint_type == 'var':
var_lower_bound = info['var_lower_bound']
ineq_cons_fun = partial(
variance_lower_bound_constraint, constraint_functions[ii],
var_lower_bound, uq_samples)
elif constraint_type == 'deterministic':
lower_bound = info['lower_bound']
ineq_cons_fun = partial(
deterministic_lower_bound_constraint, constraint_functions[ii],
lower_bound, uq_samples)
else:
raise Exception(
'constraint type (%s) not implemented' % constraint_type[ii])
ineq_cons = {'type': 'ineq', 'fun': ineq_cons_fun}
constraints.append(ineq_cons)
return constraints
def run_design(objective, init_design_sample,
constraints, bounds, optim_options):
opt_history = [init_design_sample[:, 0]]
def callback(xk):
opt_history.append(xk)
# print(objective(xk))
#print([constraints[ii]['fun'](xk) for ii in [0,1]])
# opt_method = 'SLSQP'
# res = minimize(
# objective, init_design_sample[:,0], method=opt_method, jac=None,
# constraints=constraints,
# options=optim_options,bounds=bounds,callback=callback)
from scipy.optimize import fmin_slsqp
res = fmin_slsqp(objective, init_design_sample[:, 0], f_ieqcons=constraints,
bounds=bounds, callback=callback, full_output=True) # , **optim_options)
class result():
def __init__(self, x, fun):
self.x = np.atleast_1d(x)
self.fun = fun
res = result(res[0], res[1])
opt_history = (np.array(opt_history)).T
return res, opt_history
def plot_optimization_history(obj_function, constraints, uq_samples, opt_history,
plot_limits):
# fig,axs=plot_optimization_objective_and_constraints_2D(
# [constraints[ii]['fun'] for ii in range(len(constraints))],
# partial(obj_function,uq_samples[:,0]),plot_limits)
fig, axs = plot_optimization_objective_and_constraints_2D(
constraints, partial(obj_function, uq_samples[:, 0]), plot_limits)
# objective can only be evaluated at one uq_sample thus use of
# uq_samples[:,0]
for ii in range(len(axs)):
axs[ii].plot(opt_history[0, :], opt_history[1, :], 'ko')
for jj, txt in enumerate(range(opt_history.shape[1])):
axs[ii].annotate(
'%d' % txt, (opt_history[0, jj], opt_history[1, jj]))
return fig, axs
# def plot_optimization_objective_and_constraints_2D(
# constraint_functions,objective,plot_limits):
def plot_optimization_objective_and_constraints_2D(
constraints, objective, plot_limits):
from pyapprox.visualization import get_meshgrid_function_data
num_pts_1d = 100
num_contour_levels = 30
fig, axs = plt.subplots(1, 3, figsize=(3*8, 6))
# for ii in range(len(constraint_functions)+1):
for ii in range(len(constraints.constraints)+1):
# if ii==len(constraint_functions):
if ii == len(constraints.constraints):
function = objective
else:
# def function(design_samples):
# vals = np.empty((design_samples.shape[1]))
# for jj in range(design_samples.shape[1]):
# vals[jj]=constraint_functions[ii](design_samples[:,jj])
# return vals
def function(design_samples):
vals = np.empty((design_samples.shape[1]))
for jj in range(design_samples.shape[1]):
vals[jj] = constraints(design_samples[:, jj], [ii])
return vals
X, Y, Z = get_meshgrid_function_data(
function, plot_limits, num_pts_1d)
norm = None
cset = axs[ii].contourf(
X, Y, Z, levels=np.linspace(Z.min(), Z.max(), num_contour_levels),
cmap=mpl.cm.coolwarm,
norm=norm)
# for kk in range(len(constraint_functions)):
for kk in range(len(constraints.constraints)):
if ii == kk:
ls = '-'
else:
ls = '--'
axs[kk].contour(X, Y, Z, levels=[0], colors='k', linestyles=ls)
plt.colorbar(cset, ax=axs[ii])
return fig, axs
def plot_constraint_pdfs(constraint_functions, uq_samples, design_sample,
fig_pdf=None, axs_pdf=None, label=None, color=None):
colors = ['b', 'gray']
nconstraints = len(constraint_functions)
if axs_pdf is None:
fig_pdf, axs_pdf = plt.subplots(
1, nconstraints, figsize=(nconstraints*8, 6))
for ii in range(nconstraints):
# evaluate constraint function at each of the uq samples
constraint_function_vals = constraint_functions[ii](
uq_samples, design_sample)
constraint_kde = KDE(constraint_function_vals)
yy = np.linspace(constraint_function_vals.min(),
constraint_function_vals.max(), 101)
axs_pdf[ii].fill_between(yy, 0, constraint_kde(yy), alpha=0.5, label=label,
color=color)
axs_pdf[ii].axvline(0, color='k')
# axs_pdf[ii].axvline(constraints[ii]['fun'](design_sample),color='r')
return fig_pdf, axs_pdf
def plot_constraint_cdfs(constraints, constraint_functions, uq_samples,
design_sample, quantile, fig_cdf, axs_cdf=None, label=None,
color=None):
nconstraints = len(constraint_functions)
if axs_cdf is None:
fig_cdf, axs_cdf = plt.subplots(
1, nconstraints, figsize=(nconstraints*8, 6))
for ii in range(nconstraints):
constraint_function_vals = constraint_functions[ii](
uq_samples, design_sample)
cvar = (conditional_value_at_risk(-constraint_function_vals, 0.9))
cvars = (smooth_conditional_value_at_risk(
0, 1e-3, 0.9, -constraint_function_vals))
print('cvar', cvar)
print('cvars', cvars)
#constraint_val = constraints[ii]['fun'](design_sample)
constraint_val = constraints(design_sample, [ii])
constraint_function_vals.sort()
cdf_vals = np.linspace(0, 1, constraint_function_vals.shape[0]+1)[1:]
axs_cdf[ii].plot(constraint_function_vals, cdf_vals, label=label,
color=color)
#I = np.where(constraint_function_vals<=constraint_val)[0]
I = np.where(constraint_function_vals <= 0)[0]
axs_cdf[ii].fill_between(
constraint_function_vals[I], 0, cdf_vals[I], alpha=0.5, color=color)
axs_cdf[ii].axvline(0, color='k')
J = np.where(constraint_function_vals <= 0)[0]
#print (J.shape[0]/float(constraint_function_vals.shape[0]),'p failure',constraint_val,J.shape[0])
# Compute the constraint value. This combines constraint_function_vals
# into a scalar value
# axs_cdf[ii].axvline(constraint_val,color='r')
# axs_cdf[ii].plot(
# np.linspace(constraint_function_vals[0],constraint_val,101),
# quantile*np.ones(101),'-r')
#axs_cdf[ii].set_yticks(list(axs_cdf[ii].get_yticks()) + [quantile])
axs_cdf[ii].set_ylim(0, 1.05)
axs_cdf[ii].set_xlim(
constraint_function_vals[0], constraint_function_vals[-1])
return fig_cdf, axs_cdf
def check_gradients(fun, jac, zz, plot=False, disp=True, rel=True,
direction=None, jacp=None):
"""
Compare a user specified jacobian with the jacobian computed with finite
difference with multiple step sizes.
Parameters
---------
fun : callable
A function with signature
``fun(z) -> np.ndarray``
where ``z`` is a 2D np.ndarray with shape (nvars, 1) and the
output is a 2D np.ndarray with shape (nqoi, 1)
jac : callable
The jacobian of ``fun`` with signature
``jac(z) -> np.ndarray``
where ``z`` is a 2D np.ndarray with shape (nvars, 1) and the
output is a 2D np.ndarray with shape (nqoi, nvars)
zz : np.ndarray (nvars, 1)
A sample of ``z`` at which to compute the gradient
plot : boolean
Plot the errors as a function of the finite difference step size
disp : boolean
True - print the errors
False - do not print
rel : boolean
True - compute the relative error in the directional derivative,
i.e. the absolute error divided by the directional derivative using
``jac``.
False - compute the absolute error in the directional derivative
direction : np.ndarray (nvars, 1)
Direction to which Jacobian is applied. Default is None in which
case random direction is chosen.
Returns
-------
errors : np.ndarray (14, nqoi)
The errors in the directional derivative of ``fun`` at 14 different
values of finite difference tolerance for each quantity of interest
"""
assert zz.ndim == 2
assert zz.shape[1] == 1
if direction is None:
direction = np.random.normal(0, 1, (zz.shape[0], 1))
direction /= np.linalg.norm(direction)
assert direction.ndim == 2 and direction.shape[1] == 1
if (jacp is None and jac is None) or (jac is not None and jacp is not None):
raise Exception('Must specify jac or jacp')
if callable(jac):
function_val = fun(zz)
grad_val = jac(zz) # .squeeze()
directional_derivative = grad_val.dot(direction).squeeze()
elif callable(jacp):
directional_derivative = jacp(zz, direction)
elif jac is True:
function_val, grad_val = fun(zz)
directional_derivative = grad_val.dot(direction).squeeze()
else:
raise Exception
fd_eps = np.logspace(-13, 0, 14)[::-1]
errors = []
row_format = "{:<12} {:<25} {:<25} {:<25}"
if disp:
if rel:
print(
row_format.format(
"Eps", "norm(jv)", "norm(jv_fd)",
"Rel. Errors"))
else:
print(row_format.format(
"Eps", "norm(jv)", "norm(jv_fd)",
"Abs. Errors"))
for ii in range(fd_eps.shape[0]):
zz_perturbed = zz.copy()+fd_eps[ii]*direction
perturbed_function_val = fun(zz_perturbed)
if jac == True:
perturbed_function_val = perturbed_function_val[0].squeeze()
fd_directional_derivative = (
perturbed_function_val-function_val).squeeze()/fd_eps[ii]
# np.set_printoptions(precision=16)
# print(perturbed_function_val, function_val, perturbed_function_val - function_val, direction)
# print(fd_directional_derivative, '\n', directional_derivative)
errors.append(np.linalg.norm(
fd_directional_derivative.reshape(directional_derivative.shape) -
directional_derivative))
if rel:
errors[-1] /= np.linalg.norm(directional_derivative)
if disp:
print(row_format.format(
fd_eps[ii],
np.linalg.norm(directional_derivative),
np.linalg.norm(fd_directional_derivative),
errors[ii]))
#print(fd_directional_derivative, directional_derivative)
if plot:
plt.loglog(fd_eps, errors, 'o-')
plt.ylabel(r'$\lvert\nabla_\epsilon f\cdot p-\nabla f\cdot p\rvert$')
plt.xlabel(r'$\epsilon$')
plt.show()
return np.asarray(errors)
def check_hessian(jac, hessian_matvec, zz, plot=False, disp=True, rel=True,
direction=None):
"""
Compare a user specified Hessian matrix-vector product with the
Hessian matrix vector produced computed with finite
difference with multiple step sizes using a user specified jacobian.
Parameters
---------
jac : callable
The jacobian with signature
``jac(z) -> np.ndarray``
where ``z`` is a 2D np.ndarray with shape (nvars,1) and the
output is a 2D np.ndarray with shape (nqoi,nvars)
hessian_matvec : callable
A function implementing the hessian matrix-vector product with signature
``hessian_matvec(z,p) -> np.ndarray``
where ``z`` is a 2D np.ndarray with shape (nvars,1), ``p`` is
an arbitrary vector with shape (nvars,1) and the
output is a 2D np.ndarray with shape (nqoi,nvars)
zz : np.ndarray (nvars,1)
A sample of ``z`` at which to compute the gradient
plot : boolean
Plot the errors as a function of the finite difference step size
disp : boolean
True - print the errors
False - do not print
rel : boolean
True - compute the relative error in the directional derivative,
i.e. the absolute error divided by the directional derivative using
``jac``.
False - compute the absolute error in the directional derivative
direction : np.ndarray (nvars, 1)
Direction to which Hessian is applied. Default is None in which
case random direction is chosen.
Returns
-------
errors : np.ndarray (14, nqoi)
The errors in the directional derivative of ``jac`` at 14 different
values of finite difference tolerance for each quantity of interest
"""
assert zz.ndim == 2
assert zz.shape[1] == 1
grad = jac(zz)
if direction is None:
direction = np.random.normal(0, 1, (zz.shape[0], 1))
direction /= np.linalg.norm(direction)
directional_derivative = hessian_matvec(zz, direction)
fd_eps = np.logspace(-13, 0, 14)[::-1]
errors = []
row_format = "{:<12} {:<25} {:<25} {:<25}"
if disp:
if rel:
print(
row_format.format(
"Eps", "norm(jv)", "norm(jv_fd)",
"Rel. Errors"))
else:
print(row_format.format(
"Eps", "norm(jv)", "norm(jv_fd)",
"Abs. Errors"))
for ii in range(fd_eps.shape[0]):
zz_perturbed = zz.copy()+fd_eps[ii]*direction
perturbed_grad = jac(zz_perturbed)
fd_directional_derivative = (perturbed_grad-grad)/fd_eps[ii]
# print(directional_derivative, fd_directional_derivative)
errors.append(np.linalg.norm(
fd_directional_derivative.reshape(directional_derivative.shape) -
directional_derivative))
if rel:
errors[-1] /= np.linalg.norm(directional_derivative)
if disp:
print(row_format.format(fd_eps[ii],
np.linalg.norm(directional_derivative),
np.linalg.norm(fd_directional_derivative),
errors[ii]))
# print(fd_directional_derivative,directional_derivative)
if plot:
plt.loglog(fd_eps, errors, 'o-')
plt.ylabel(r'$\lvert\nabla^2_\epsilon \cdot p f-\nabla^2 f\cdot p\rvert$')
plt.xlabel(r'$\epsilon$')
plt.show()
return np.asarray(errors)
def expectation_fun(values, weights):
assert values.shape[0] % weights.shape[0] == 0
nqoi = values.shape[0]//weights.shape[0]
nsamples = values.shape[0]//nqoi
assert nqoi == 1
fun_vals = (values.T.dot(weights)).T
return fun_vals
def expectation_jac(jac_values, weights):
assert jac_values.shape[0] % weights.shape[0] == 0
nqoi = jac_values.shape[0]//weights.shape[0]
nsamples = jac_values.shape[0]//nqoi
num_vars = jac_values.shape[1]
assert nqoi == 1
jac = (jac_values.T.dot(weights)).T
return jac
def smooth_prob_failure_fun(smoother_type, eps, tol, values, weights):
assert values.shape[0] % weights.shape[0] == 0
nqoi = values.shape[0]//weights.shape[0]
assert nqoi == 1
nsamples = values.shape[0]//nqoi
heaviside_vals = smooth_max_function_first_derivative(
smoother_type, eps, values-tol)
fun_vals = (heaviside_vals.dot(weights)).T
# print(fun_vals.shape)
return fun_vals
def smooth_prob_failure_jac(smoother_type, eps, tol, jac_values, weights):
assert jac_values.shape[0] % weights.shape[0] == 0
nqoi = jac_values.shape[0]//weights.shape[0]
assert nqoi == 1
nsamples = jac_values.shape[0]//nqoi
num_vars = jac_values.shape[1]
grad_heaviside_vals = smooth_max_function_second_derivative(
smoother_type, eps, jac_values-tol)
jac = (grad_heaviside_vals*jac_values).T.dot(weights)[np.newaxis, :]
print(jac_values.max(axis=0), 'm', eps)
return jac
def generate_monte_carlo_quadrature_data(
generate_random_samples, num_vars, design_var_indices, fun, seed=None):
if seed is not None:
np.random.seed(seed)
samples = generate_random_samples()
weights = np.ones(samples.shape[1])/samples.shape[1]
values = fun(samples)
return samples, weights, values
class StatisticalConstraint(object):
"""
Notes
-----
TODO ensure the following.
This class unifies the jac=True and callable(jac)=True interfaces.
The interface is used for passing to optimizers that need the fun and jac functions
to be separate. This is often good practice as it avoids computing
jac when only fun is required.
If jac=True the jacobian is stored and returned when self.jac is called
"""
def __init__(self, fun, jac, stats_fun, stats_jac, num_vars,
design_var_indices, generate_sample_data, bound=None,
upper_bound=True, isobjective=False):
self.fun, self.jac, self.stats_fun = fun, jac, stats_fun
self.stats_jac = stats_jac
self.num_vars = num_vars
self.design_var_indices = design_var_indices
self.random_var_indices = np.delete(
np.arange(self.num_vars), self.design_var_indices)
self.generate_sample_data = generate_sample_data
self.bound = bound
self.upper_bound = upper_bound
self.isobjective = isobjective
self.design_sample = None
self.jac_values = None
self.samples = None
if self.stats_jac is not None and self.jac is None:
msg = 'stats_jac requries jac to be defined'
raise Exception(msg)
if self.jac is not None and self.stats_jac is None:
msg = 'jac will be ignored because stats_jac was not defined'
raise Exception(msg)
def generate_shared_data(self, design_sample):
self.design_sample = design_sample.copy()
fun = ActiveSetVariableModel(self.fun, self.num_vars, design_sample,
self.random_var_indices)
data = self.generate_sample_data(fun)
self.samples, self.weights, self.fun_values = data[:3]
assert self.samples.shape[0] ==\
self.num_vars-self.design_var_indices.shape[0]
assert self.samples.shape[1] == self.weights.shape[0]
#assert self.samples.shape[1]==self.fun_values.shape[0]
if not callable(self.jac) and self.jac:
# consider whether to support self.jac=True. It seems appealing
# if using gradients from adjoint PDE simulation which requires
# data used to compute function values and thus better to do at the
# time the function values are obtained. Challenge is defining the
# correct output interface and only computing gradients if self.jac
# has been called and not if self.__call__ is called.
raise Exception("Not yet implemented")
self.jac_values = data[3]
def __call__(self, design_sample):
if design_sample.ndim == 1:
design_sample = design_sample[:, np.newaxis]
self.generate_shared_data(design_sample)
nsamples = self.weights.shape[0]
nqoi = self.fun_values.shape[1]
# print(self.fun_values)
values = np.empty((nqoi))
for ii in range(nqoi):
values[ii] = self.stats_fun(
self.fun_values[:, ii:ii+1], self.weights)
# print('b',np.where(self.fun_values[:,ii:ii+1]>0)[0].shape[0]/nsamples)
# print('c',values[ii])
# print(self.fun_values.min(),self.fun_values.max())
if self.bound is not None:
values = values-self.bound
if self.upper_bound:
values *= -1
if self.isobjective:
values = values[0]
return values
def jacobian(self, design_sample):
if design_sample.ndim == 1:
design_sample = design_sample[:, np.newaxis]
if ( | np.array_equal(design_sample, self.design_sample) | numpy.array_equal |
# coding=utf-8
# Copyright 2022 The Robustness Metrics Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Metrics that are defined only for the synthetic data."""
import collections
import os
from typing import Dict, Text
import numpy as np
import pandas as pd
from robustness_metrics.common import types
from robustness_metrics.metrics import base as metrics_base
import tensorflow as tf
def get_metadata(variant):
fname = ("https://s3.us-east-1.amazonaws.com/si-score-dataset/"
f"object_{variant}/metadata.csv")
return pd.read_csv(fname)
def parse_metadata(variant, fields=None):
"""Returns the filename to group mapping."""
fields = fields or []
file_to_group_mapping = {}
metadata = get_metadata(variant)[["image_id"] + fields]
# For example "/../area/pidgeon/1.jpg" gets mapped to `area(xxx)` where
# `xxx` is read from the metadata file.
for row in metadata.values:
instance_id = int(row[0])
if len(row[1:]) == 1:
group = "(%s)" % row[1]
elif len(row[1:]) == 2:
group = "(%.2f,%.2f)" % (float(row[1]), float(row[2]))
else:
raise ValueError("Unexpected number of fields: %d" % len(row[:1]))
file_to_group_mapping[instance_id] = group
return file_to_group_mapping
@metrics_base.registry.register("synthetic")
class Synthetic(metrics_base.Metric):
"""Synthetic data experiments.
There are three datasets, and we return the average accuracy for each
dimension of each dataset:
(1) Size: we vary the object area in some range (e.g. 10%, 20%, ...), and
return the average accuracy for each area size.
(2) Rotation: we vary the object rotation (0 deg, 5 deg, ...), and return
the average accuracy for each rotation.
(3) Location: we vary the object offste with respect to the top-left corner
of the image (e.g. [5%, 5%] and return the average accuracy for each
offset.
"""
def __init__(self, dataset_info=None):
"""Synthetic data metric.
Args:
dataset_info: DatasetInfo object containing useful information.
"""
super().__init__(dataset_info)
self._groups = collections.defaultdict(list)
self._dataset_info = dataset_info
self.location_map = parse_metadata("location",
fields=["x_coord", "y_coord"])
self.size_map = parse_metadata("size", fields=["area"])
self.rotation_map = parse_metadata("rotation", fields=["rotation"])
def map_path_to_group(self, image_id, dataset_variant):
if isinstance(dataset_variant, bytes):
dataset_variant = dataset_variant.decode("utf-8")
instance_id = int(image_id)
if dataset_variant == "location":
return "location" + self.location_map[instance_id]
elif dataset_variant == "size":
return "size" + self.size_map[instance_id]
elif dataset_variant == "rotation":
return "rotation" + self.rotation_map[instance_id]
else:
raise ValueError("Couldn't map id %s of variant %s" %
(image_id, dataset_variant))
def add_predictions(self,
model_predictions: types.ModelPredictions,
metadata) -> None:
for prediction in model_predictions.predictions:
image_id = metadata["image_id"]
dataset_variant = metadata["dataset_variant"]
# Example group IDs: `area(0.5)` or `rotation(121)`.
group_id = self.map_path_to_group(image_id, dataset_variant)
correct = metadata["label"]
predicted = np.argmax(prediction)
self._groups[group_id].append(int(predicted == correct))
def result(self) -> Dict[Text, float]:
scores = {}
size_scores = []
rotation_scores = []
location_scores = []
for group_id, element_scores in self._groups.items():
scores[group_id] = np.mean(element_scores)
if "size" in group_id:
size_scores.append(scores[group_id])
elif "rotation" in group_id:
rotation_scores.append(scores[group_id])
elif "location" in group_id:
location_scores.append(scores[group_id])
scores["size_average"] = np.mean(size_scores)
scores["rotation_average"] = | np.mean(rotation_scores) | numpy.mean |
import sys
sys.path.append('C:/python scripts/ciecam02 plot')
import Read_Meredith as rm
# from scipy.ndimage import binary_dilation
# from scipy.stats import circstd
# import scipy.fftpack as fftpack
# from scipy.linalg import solve_banded
import vispol
import numpy as np
from scipy.sparse.linalg import spsolve
# from scipy.sparse.linalg import spilu
import matplotlib.pyplot as plt
from scipy.signal import convolve2d
# from scipy.ndimage import gaussian_filter
from scipy.ndimage import grey_erosion
from scipy.signal import medfilt2d
from scipy.signal import wiener
def ang_diff(D):
P = np.pi
return 1 - 2/P * np.abs((D + P/2) % (2 * P) - P)
vispol.register_cmaps()
data_folder = 'c:/users/z5052714/documents/unsw/unsw/data_sets/'
# filename = data_folder + 'forscottfrommeredith/162041.L1B2.v006.hdf5'
# filename = data_folder + 'forscottfrommeredith/162651.L1B2.v006.hdf5'
filename = data_folder + 'forscottfrommeredith/094821.L1B2.v006.hdf5'
# I, P, A = rm.getIPA(filename, start=[2, 32], end=[3900, 1403])
_, P, A = rm.getIPA(filename, start=[2, 32], end=[3000, 1403])
# I, P, A = rm.getIPA(filename, start=[1000, 800], end=[1600, 1300])
# I, _, A = rm.getIPA(filename, start=[2000, 150], end=[2200, 450]) # looks great
# _, _, A = rm.getIPA(filename, start=[1500, 150], end=[2200, 450]) # looks good with scaling
# _, _, A = rm.getIPA(filename, start=[2, 150], end=[2600, 1000])
# I = np.clip(I/np.percentile(I,99), 0, 1)
A *= np.pi/180.0
A[A > np.pi] -= np.pi
delta = vispol.delta_aop(A)
A45 = A + np.pi/8
A45[A45 > np.pi] -= np.pi
Aneg45 = A - np.pi/8
Aneg45[Aneg45 < 0 ] += np.pi
# plt.plot(np.linspace(-np.pi, np.pi, 256), ang_diff(np.linspace(-np.pi, np.pi, 256)))
# delta_patch = delta[2520:2620, 1200:1300].reshape((-1, 1))
# A_patch = A[2520:2620, 1200:1300].reshape((-1, 1))
# P_patch = P[2520:2620, 1200:1300].reshape((-1, 1))
# hist1, hist2, edges = np.histogram2d(A_patch, delta_patch, bins='fd')
# f, ax = plt.subplots(3)
# ax[0].scatter(A_patch, delta_patch)
# ax[1].scatter(A_patch, P_patch)
# ax[2].scatter(P_patch, delta_patch)
# print(np.mean(P_patch))
# print(np.std(P_patch))
# print(vispol.circular_mean(A_patch))
# print(np.sqrt(-2 * np.log(np.hypot(np.mean(np.sin(2 * A_patch)), np.mean(np.cos(2 * A_patch))))))
# print(np.mean(delta_patch))
# plt.show()
cap = 95
sigma = 2
# delta = grey_erosion(delta, size=(5, 5))
# delta = medfilt2d(delta, 7)
# delta = wiener(delta, 5)
# A, _ = vispol.histogram_eq(A,
# weighted=True,
# min_change=0.25,
# element=5,
# deltas = delta,
# suppress_noise=True,
# interval=[0.0,np.pi])#,
# # box=[[1100, A.shape[0]], [0, A.shape[1]]])
# plt.imsave("C:/users/z5052714/documents/weekly_meetings/28-06-2019/AoP_rot.png", A, cmap="AoP", vmin=0, vmax=np.pi)
f, ax = plt.subplots(1, 4)
# ax[0].imshow(delta, vmin=0, vmax=1)
# ax[1].imshow(A, vmin=0, vmax=np.pi, cmap="AoP")
# ax[2].imshow(P, vmin=0, vmax=1)
ax[0].imshow(np.cos(2 * A), cmap="gray")
ax[1].imshow(np.cos(2 * A45), cmap="gray")
ax[2].imshow(np.sin(2 * A), cmap="gray")
ax[3].imshow( | np.cos(2 * Aneg45) | numpy.cos |
#!/usr/bin/pyton
import numpy as np
import matplotlib.pyplot as plt
from exportToTeXFile import *
x=np.array([np.linspace(2,10,50),np.linspace(10,20,50)])
x=x.T
sqrt=np.sqrt(x)
log=np.log(x)
cos=5.*np.cos(x)
sin=5.*np.sin(x)
## Exporting one TeX file
legend=[r'$f=\sqrt{x}$',r'$f=\log{x}$',r'$f=\cos{x}$',r'$f=\sin{x}$']
filename='example_export2DTeXFile.tex'
export2DTeXFile(filename,np.array([x[:,0],x[:,0],x[:,0],x[:,0]]),'$x (m)$',r'Functions','', | np.array([sqrt[:,0],log[:,0],cos[:,0],sin[:,0]]) | numpy.array |
import warnings
import torch
import numpy as np
from meshreg.datasets.queries import TransQueries, BaseQueries
class Metric:
def __init__(self, name, unit, axis_label):
self._data = {}
pass
def get_name(self):
pass
def get_axis_label(self):
pass
def get_axis_limits(self):
pass
def get_unit(self):
pass
def eval(self, epoch, split, sample, results):
pass
def _recover_back(joints_trans, affinetrans):
"""
Given 2d point coordinates and an affine transform, recovers original pixel points
(locations before translation, rotation, crop, scaling... are applied during data
augmentation)
"""
batch_size = joints_trans.shape[0]
point_nb = joints_trans.shape[1]
hom2d = torch.cat([joints_trans, joints_trans.new_ones(batch_size, point_nb, 1)], -1)
rec2d = torch.inverse(affinetrans).bmm(hom2d.transpose(1, 2).float()).transpose(1, 2)[:, :, :2]
return rec2d
def _recover_3d_proj(joints3d, joints2d, camintr, est_scale, est_trans, center_idx=9):
# Estimate scale and trans between 3D and 2D
trans3d = joints3d[:, center_idx : center_idx + 1]
joints3d_c = joints3d - trans3d
focal = camintr[:, :1, :1]
est_Z0 = focal / est_scale
est_XY0 = (est_trans[:, 0] - camintr[:, :2, 2]) * est_Z0[:, 0] / focal[:, 0]
est_c3d = torch.cat([est_XY0, est_Z0[:, 0]], -1).unsqueeze(1)
recons3d = est_c3d + joints3d_c
return recons3d, est_c3d
def _transform(points3d, Rt):
# points3d: (B,N,3)
# Rt: (B,3,4)
hom_points3d = np.concatenate([points3d, np.ones([points3d.shape[0], points3d.shape[1], 1])], axis=2)
trans_points3d = hom_points3d @ Rt.transpose((0,2,1))
return trans_points3d
def _euclidean_dist(gt, pred, compute_mean_of_keypoints=True):
if isinstance(gt, torch.Tensor):
gt = gt.detach().cpu().numpy()
if isinstance(pred, torch.Tensor):
pred = pred.detach().cpu().numpy()
#gt = np.squeeze(gt)
#pred = np.squeeze(pred)
assert gt.ndim == 3, "gt not 3-dim, but has shape {}".format(gt.shape)
assert pred.ndim == 3, "pred not 3-dim, but has shape {}".format(pred.shape)
# shapes: (batch_size, nb_keypoints, point_dim), e.g. (64, 21, 3)
# calc euclidean distance
euclidean_dist = np.linalg.norm(gt - pred, ord=2, axis=-1)
if compute_mean_of_keypoints:
euclidean_dist = np.mean(euclidean_dist, axis=-1)
return euclidean_dist
def hand_joints_2d(batch, pred):
result = {}
if "joints2d" in pred and BaseQueries.JOINTS2D in batch:
gt_joints2d = batch[TransQueries.JOINTS2D]
affinetrans = batch[TransQueries.AFFINETRANS]
or_joints2d = batch[BaseQueries.JOINTS2D]
rec_pred = _recover_back(pred["joints2d"].detach().cpu(), affinetrans)
rec_gt = _recover_back(gt_joints2d, affinetrans)
# Sanity check, this should be ~1pixel
gt_err = | np.linalg.norm(rec_gt - or_joints2d, 2, -1) | numpy.linalg.norm |
import pandas as pd
from matplotlib import pyplot as plt
import os
import torch
import numpy as np
from tqdm.notebook import tqdm
import pickle
from dotmap import DotMap
from IPython.display import display
import sys
import argparse
if "./" not in sys.path:
sys.path.append('./')
from src.train import FrankModelTrainer
from src.utils.eval_values import eval_net, frank_m2_similarity
orig_df = None
def parse_args(args):
parser = argparse.ArgumentParser(description='Simple settings.')
parser.add_argument('from_state', type=str, help='From state', choices=['random_frank', 'ps_inv_init', 'ps_inv_frank'])
parser.add_argument('to_state', type=str, help='To state', choices=['random_frank', 'ps_inv_init', 'ps_inv_frank'])
parser.add_argument('layer', type=str, help='Layer to check interpolation')
parser.add_argument('-c','--csv', help="Path to summary csv", default='results/official/tiny_bn/summary-extra-random.csv')
parser.add_argument('-o','--out-dir', help='Folder to save csv', default='results/interpolation')
parser.add_argument('-n','--n-variants', help='Number of variants to try out', type=int, default=10)
parser.add_argument('-s','--n-split', help='Number of splits', type=int, default=10)
return parser.parse_args(args)
def get_data_dict(layer, init, i=0, can_be_second_random=False):
df = orig_df.copy()
df['front_type'] = df.front_model.apply(lambda x:x.split('/')[-2])
df['end_type'] = df.end_model.apply(lambda x:x.split('/')[-2])
filters = [
df.init==init,
df.front_layer==layer,
df.front_model.str.contains(f"in{i}")
]
df = df[ | np.logical_and.reduce(filters) | numpy.logical_and.reduce |
# -*- coding: utf-8 -*-
"""
Romanization of Thai words based on machine-learnt engine ("thai2rom")
"""
import numpy as np
from keras.layers import Input
from keras.models import Model, load_model
from pythainlp.corpus import download, get_file
class ThaiTransliterator:
def __init__(self):
"""
Transliteration of Thai words
Now supports Thai to Latin (romanization)
"""
self.__batch_size = 64
self.__epochs = 100
self.__latent_dim = 256
self.__num_samples = 648241
self.__data_path = get_file("thai2rom-dataset")
if not self.__data_path:
download("thai2rom-dataset")
self.__data_path = get_file("thai2rom-dataset")
self.__input_texts = []
self.__target_texts = []
self.__input_characters = set()
self.__target_characters = set()
with open(self.__data_path, "r", encoding="utf-8-sig") as self.__fh:
self.__lines = self.__fh.read().split("\n")
for line in self.__lines[: min(self.__num_samples, len(self.__lines) - 1)]:
input_text, target_text = line.split("\t")
if len(input_text) < 30 and len(target_text) < 90:
target_text = "\t" + target_text + "\n"
self.__input_texts.append(self.input_text)
self.__target_texts.append(self.target_text)
for char in input_text:
if char not in self.__input_characters:
self.__input_characters.add(char)
for char in target_text:
if char not in self.__target_characters:
self.__target_characters.add(char)
self.__input_characters = sorted(list(self.__input_characters))
self.__target_characters = sorted(list(self.__target_characters))
self.__num_encoder_tokens = len(self.__input_characters)
self.__num_decoder_tokens = len(self.__target_characters)
self.__max_encoder_seq_length = max([len(text) for text in self.__input_texts])
self.__max_decoder_seq_length = max([len(text) for text in self.__target_texts])
"""print('Number of samples:', len(self.input_texts))
print('Number of unique input tokens:', self.num_encoder_tokens)
print('Number of unique output tokens:', self.num_decoder_tokens)
print('Max sequence length for inputs:', self.max_encoder_seq_length)
print('Max sequence length for outputs:', self.max_decoder_seq_length)"""
self.__input_token_index = dict(
[(char, i) for i, char in enumerate(self.__input_characters)]
)
self.__target_token_index = dict(
[(char, i) for i, char in enumerate(self.__target_characters)]
)
self.__encoder_input_data = np.zeros(
(
len(self.__input_texts),
self.__max_encoder_seq_length,
self.__num_encoder_tokens,
),
dtype="float32",
)
for i, input_text in enumerate(self.__input_texts):
for t, char in enumerate(input_text):
self.__encoder_input_data[i, t, self.__input_token_index[char]] = 1.
# Restore the model and construct the encoder and decoder.
self.__filemodel = get_file("thai2rom")
if not self.__filemodel:
download("thai2rom")
self.__filemodel = get_file("thai2rom")
self.__model = load_model(self.__filemodel)
self.__encoder_inputs = self.__model.input[0] # input_1
self.__encoder_outputs, self.__state_h_enc, self.__state_c_enc = self.__model.layers[
2
].output # lstm_1
self.__encoder_states = [self.__state_h_enc, self.__state_c_enc]
self.__encoder_model = Model(self.__encoder_inputs, self.__encoder_states)
self.__decoder_inputs = self.__model.input[1] # input_2
self.__decoder_state_input_h = Input(shape=(self.__latent_dim,), name="input_3")
self.__decoder_state_input_c = Input(shape=(self.__latent_dim,), name="input_4")
self.__decoder_states_inputs = [
self.__decoder_state_input_h,
self.__decoder_state_input_c,
]
self.__decoder_lstm = self.__model.layers[3]
self.__decoder_outputs, self.__state_h_dec, self.__state_c_dec = self.__decoder_lstm(
self.__decoder_inputs, initial_state=self.__decoder_states_inputs
)
self.__decoder_states = [self.__state_h_dec, self.__state_c_dec]
self.__decoder_dense = self.__model.layers[4]
self.__decoder_outputs = self.__decoder_dense(self.__decoder_outputs)
self.__decoder_model = Model(
[self.__decoder_inputs] + self.__decoder_states_inputs,
[self.__decoder_outputs] + self.__decoder_states,
)
self.__reverse_input_char_index = dict(
(i, char) for char, i in self.__input_token_index.items()
)
self.__reverse_target_char_index = dict(
(i, char) for char, i in self.__target_token_index.items()
)
def __decode_sequence(self, input_seq):
self.__states_value = self.__encoder_model.predict(input_seq)
self.__target_seq = | np.zeros((1, 1, self.__num_decoder_tokens)) | numpy.zeros |
"""
Test script.
"""
import pytest
import os
from psrqpy import QueryATNF
import numpy as np
from pandas import Series
import pytest_socket
from six import string_types
from astropy.table.column import MaskedColumn
def sf_scale(value):
"""
Calculate the base-10 scale of the final significant figure for a given
number.
E.g. for a value of 12000.0 you would get 1000.0 as the scale of the final
significant figure. Or, for 1.2345e-8 you would get 0.0001.
"""
# base-10 exponent to which the value is raise
valexp = np.floor(np.log10(np.abs(value)))
# value that is raise to 10**valexp
val = (value/10**valexp).astype('float32') # type as float32 to avoid numerical noise
valstr = str(val)
# get the number of decimal places
numdp = len(valstr) - valstr.find('.') - 1
if valstr[-1] == '0':
numdp -= 1
# return the scale of the final significant figure
return 10**(valexp - numdp)
def round_err(errvalue, atnferrvalue):
"""
Round the derived error to the same number of significant figures at the
error produced by `psrcat` and used for the ATNF pulsar catalogue, noting
that `psrcat` rounds error up. Return True if the derived error and
equivalent ATNF are the same
"""
# get ATNF derived error value
errval = (atnferrvalue/sf_scale(atnferrvalue)).astype('float32')
# ATNF derived errors are always rounded up
derval = np.ceil(errvalue/sf_scale(atnferrvalue))
return derval == errval
def test_crab(query):
"""
Test that the Crab pulsar is present and the frequency is as expected, i.e.
the frequency rounds down to 29 Hz (should be OK for another ~80 years!)
"""
f0 = query.get_pulsar('J0534+2200')['F0'][0]
assert np.floor(f0) == 29.0
# try Crab's B-name
f0B = query.get_pulsar('B0531+21')['F0'][0]
assert f0 == f0B
# check reference and error are not None
assert query.get_pulsar('B0531+21')['F0_ERR'][0] is not None
assert query.get_pulsar('B0531+21')['F0_REF'][0] is not None
def test_catalogue_shape(query):
"""
Test the catalogue for shape consistency
"""
length = query.catalogue_len
shape = query.catalogue_shape
rows = query.catalogue_nrows
cols = query.catalogue_ncols
colnames = query.columns
assert length == rows and length == shape[0]
assert cols == len(colnames) and cols == shape[1]
def test_get_pulsars(query):
"""
Test the 'Pulsars' class.
"""
psrs = query.get_pulsars()
assert len(psrs) == query.num_pulsars
# check Crab frequency
f01 = query.get_pulsar('J0534+2200')['F0'][0]
f02 = psrs['J0534+2200'].F0 # frequency attribute
assert f01 == f02
# test removing a pulsar
crab = psrs.pop('J0534+2200')
f03 = crab.F0
assert f03 == f01
assert len(psrs) == (query.num_pulsars - 1)
# get the ephemeris string for the Crab pulsar
crabeph = query.get_ephemeris('J0534+2200AB') # wrong name
assert crabeph is None
crabeph = query.get_ephemeris('J0534+2200')
assert isinstance(crabeph, string_types)
for line in crabeph.split('\n'):
if line.split()[0].strip() == 'F0':
f0str = line.split()[1].strip()
break
assert f01 == float(f0str)
def test_save_load_file(query):
"""
Test saving and reloading a query as a pickle file.
"""
# test exception handling
testfilebad = '/jkshfdjfd/jkgsdfjkj/kgskfd.jhfd'
with pytest.raises(IOError):
query.save(testfilebad)
# test exception handling
with pytest.raises(IOError):
querynew = QueryATNF(loadquery=testfilebad)
testfile = os.path.join(os.getcwd(), 'query.pkl')
query.save(testfile)
# re-load in as a new query
querynew = QueryATNF(loadquery=testfile)
assert query.num_pulsars == querynew.num_pulsars
def test_condition(query):
"""
Test the parsing of logical conditions.
"""
with pytest.raises(TypeError):
# test for error if condition is not a string
query.condition = 2.3
# test that we only return pulsars with F0 > 100 Hz
query.condition = 'F0 > 100'
psrs = query.table
f0s = psrs['F0']
assert not np.any(f0s < 100.)
# test that we only return pulsars with F0 > 100 Hz in binary systems
query.condition = 'F0 > 100 && type(binary)'
psrs = query.table
f0s = psrs['F0']
binary = psrs['BINARY']
if type(binary) == MaskedColumn:
assert not np.any(f0s < 100.) and not np.any(binary.mask)
else:
assert not np.any(f0s < 100.)
# test 'OR'
query.condition = 'F0 > 100 || type(binary)'
psrs = query.table
f0s = psrs['F0']
binary = psrs['BINARY']
if type(binary) == MaskedColumn:
assert np.all(~binary[f0s < 100.].mask)
# test 'NOT'
query.condition = 'F0 > 100 and not type(binary)'
psrs = query.table
f0s = psrs['F0']
binary = psrs['BINARY']
if type(binary) == MaskedColumn:
assert not np.any(f0s < 100) and np.all(binary.mask)
else:
assert not | np.any(f0s < 100) | numpy.any |
# -*- coding: utf-8 -*-
import os
import random
import pdb
from PIL import Image
import numpy as np
import skimage.transform as sk_transformer
from skimage import color
from skimage.util import random_noise
#
#----------------------------------------------------------------------------
def load_and_resize_image(img_path, model_image_size):
"""Reads the image and resizes it.
Parameters:
-----------
img_path (str): fullpath to the image where it is located.
model_image_size (tuple): the dimension (width, height) of image which goes to model.
Note: here that pil-images have first dimension width and second height
Returns:
--------
true_img_size (tuple): the true (width, height) of original image.
image_data (numpy.ndarray): the resized image in shape (H x W x C)
"""
image = Image.open(img_path)
# image = Image.fromarray(img, mode='RGBA') # Covert to pillow image from np.uint8 image of HxWxC, mode='RGBA' when A is also available
resized_image = image.resize(model_image_size, Image.BICUBIC) # NOTE: (width, height).
image_data = | np.array(resized_image, dtype='float32') | numpy.array |
import os
import re
import sys
import math
import time
import string
import random
import warnings
from functools import partial
import numpy as np
from scipy import stats
from scipy.optimize import curve_fit
import matplotlib.pyplot as plt
from astropy import wcs
from astropy import units as u
from astropy.io import fits, ascii
from astropy.nddata import Cutout2D
from astropy.coordinates import SkyCoord
from astropy.table import Table, Column, setdiff, join
from astropy.stats import mad_std, biweight_location, gaussian_fwhm_to_sigma
from astropy.stats import sigma_clip, SigmaClip, sigma_clipped_stats
from photutils import detect_sources, deblend_sources
from photutils import CircularAperture, CircularAnnulus, EllipticalAperture
from .io import logger
from .io import save_pickle, load_pickle, check_save_path
from .plotting import LogNorm, AsinhNorm, colorbar
from . import DF_pixel_scale, DF_raw_pixel_scale
try:
from reproject import reproject_interp
reproject_install = True
except ImportError:
warnings.warn("Package reproject is not installed. No rescaling available.")
reproject_install = False
# default SE columns for cross_match
SE_COLUMNS = ["NUMBER", "X_IMAGE", "Y_IMAGE", "X_WORLD", "Y_WORLD",
"MAG_AUTO", "FLUX_AUTO", "FWHM_IMAGE", "MU_MAX", "FLAGS"]
# Fiducial values of PSF parameters
DF_default_params = {"fwhm":6.,
"beta":6.6,
"frac":0.3,
"n_s":np.array([3.3, 2.5]),
"theta_s":np.array([5, 100])}
### Baisc Funcs ###
def coord_Im2Array(X_IMAGE, Y_IMAGE, origin=1):
""" Convert image coordniate to numpy array coordinate """
x_arr, y_arr = int(max(round(Y_IMAGE)-origin, 0)), int(max(round(X_IMAGE)-origin, 0))
return x_arr, y_arr
def coord_Array2Im(x_arr, y_arr, origin=1):
""" Convert image coordniate to numpy array coordinate """
X_IMAGE, Y_IMAGE = y_arr+origin, x_arr+origin
return X_IMAGE, Y_IMAGE
def fwhm_to_gamma(fwhm, beta):
""" in arcsec """
return fwhm / 2. / math.sqrt(2**(1./beta)-1)
def gamma_to_fwhm(gamma, beta):
""" in arcsec """
return gamma / fwhm_to_gamma(1, beta)
def Intensity2SB(Intensity, BKG, ZP, pixel_scale=DF_pixel_scale):
""" Convert intensity to surface brightness (mag/arcsec^2) given the background value, zero point and pixel scale """
I = np.atleast_1d(np.copy(Intensity))
I[np.isnan(I)] = BKG
if np.any(I<=BKG):
I[I<=BKG] = np.nan
I_SB = -2.5*np.log10(I - BKG) + ZP + 2.5 * math.log10(pixel_scale**2)
return I_SB
def SB2Intensity(SB, BKG, ZP, pixel_scale=DF_pixel_scale):
"""
Convert surface brightness (mag/arcsec^2)to intensity given the
background value, zero point and pixel scale.
"""
SB = np.atleast_1d(SB)
I = 10** ((SB - ZP - 2.5 * math.log10(pixel_scale**2))/ (-2.5)) + BKG
return I
def cart2pol(x, y):
rho = np.sqrt(x**2 + y**2)
phi = np.arctan2(y, x)
return(rho, phi)
def pol2cart(rho, phi):
x = rho * np.cos(phi)
y = rho * np.sin(phi)
return(x, y)
def counter(i, number):
if np.mod((i+1), number//4) == 0:
logger.info(" - completed: %d/%d"%(i+1, number))
def round_good_fft(x):
# Rounded PSF size to 2^k or 3*2^k
a = 1 << int(x-1).bit_length()
b = 3 << int(x-1).bit_length()-2
if x>b:
return a
else:
return min(a,b)
def calculate_psf_size(n0, theta_0, contrast=1e5, psf_scale=DF_pixel_scale,
min_psf_range=60, max_psf_range=720):
A0 = theta_0**n0
opt_psf_range = int((contrast * A0) ** (1./n0))
psf_range = max(min_psf_range, min(opt_psf_range, max_psf_range))
# full (image) PSF size in pixel
psf_size = 2 * psf_range // psf_scale
return round_good_fft(psf_size)
def compute_poisson_noise(data, n_frame=1, header=None, Gain=0.37):
if header is not None:
try:
n_frame = np.int32(header['NFRAMES'])
except KeyError:
n_frame = 1
G_effective = Gain * n_frame # effecitve gain: e-/ADU
std_poi = np.nanmedian(np.sqrt(data/G_effective))
if np.isnan(std_poi):
std_poi = None
print("Sky Poisson Noise Unavailable.")
else:
print("Sky Poisson Noise: %.3f"%std_poi)
return std_poi
def extract_bool_bitflags(bitflags, ind):
from astropy.nddata.bitmask import interpret_bit_flags
return np.array(["{0:016b}".format(0xFFFFFFFF & interpret_bit_flags(flag))[-ind]
for flag in np.atleast_1d(bitflags)]).astype(bool)
### Photometry Funcs ###
def background_stats(data, header, mask, bkg_keyname="BACKVAL", **kwargs):
""" Check if background stored in header + short stats """
from astropy.stats import sigma_clipped_stats
from .io import find_keyword_header
# Short estimate summary
mean, med, std = sigma_clipped_stats(data, mask, **kwargs)
logger.info("Background stats: mean = %.5g med = %.5g std = %.5g"%(mean, med, std))
# check header key
bkg = find_keyword_header(header, bkg_keyname)
if bkg is None: bkg = med
return bkg, std
def background_annulus(cen, data, mask,
r_in=240., r_out=360, draw=True,
**plot_kw):
""" Extract local background value using annulus """
data_ = data.copy()
annulus_aperture = CircularAnnulus(cen, r_in=r_in, r_out=r_out)
annulus_masks = annulus_aperture.to_mask(method='center')
annulus_data = annulus_masks.multiply(data_)
mask_ring = annulus_masks.data
annulus_data_1d = annulus_data[mask_ring!=0]
mask_1d = annulus_masks.multiply(mask)[mask_ring!=0]
_, median_sigclip, _ = sigma_clipped_stats(annulus_data_1d, mask=mask_1d)
if draw:
plt.imshow(np.ma.array(annulus_data, mask=mask_ring==0), **plot_kw)
plt.show()
return median_sigclip
def background_extraction(field, mask=None, return_rms=True,
b_size=64, f_size=3, n_iter=5, **kwargs):
""" Extract background & rms image using SE estimator with mask """
from photutils import Background2D, SExtractorBackground
try:
Bkg = Background2D(field, mask=mask,
bkg_estimator=SExtractorBackground(),
box_size=b_size, filter_size=f_size,
sigma_clip=SigmaClip(sigma=3., maxiters=n_iter),
**kwargs)
back = Bkg.background
back_rms = Bkg.background_rms
except ValueError:
img = field.copy()
if mask is not None:
img[mask] = np.nan
back = np.nanmedian(field) * np.ones_like(field)
back_rms = np.nanstd(field) * np.ones_like(field)
if return_rms:
return back, back_rms
else:
return back
def source_detection(data, sn=2.5, b_size=120,
k_size=3, fwhm=3, smooth=True,
sub_background=True, mask=None):
from astropy.convolution import Gaussian2DKernel
from photutils import detect_sources, deblend_sources
if sub_background:
back, back_rms = background_extraction(data, b_size=b_size)
threshold = back + (sn * back_rms)
else:
back = np.zeros_like(data)
threshold = np.nanstd(data)
if smooth:
sigma = fwhm * gaussian_fwhm_to_sigma
kernel = Gaussian2DKernel(sigma, x_size=k_size, y_size=k_size)
kernel.normalize()
else:
kernel=None
segm_sm = detect_sources(data, threshold, npixels=5,
filter_kernel=kernel, mask=mask)
data_ma = data.copy() - back
data_ma[segm_sm.data!=0] = np.nan
return data_ma, segm_sm
def flattened_linear(x, k, x0, y0):
""" A linear function flattened at (x0,y0) of 1d array """
return np.array(list(map(lambda x:k*x + (y0-k*x0) if x>=x0 else y0, x)))
def piecewise_linear(x, k1, k2, x0, y0):
""" A piecewise linear function transitioned at (x0,y0) of 1d array """
return np.array(list(map(lambda x:k1*x + (y0-k1*x0) if x>=x0 else k2*x + (y0-k2*x0), x)))
def iter_curve_fit(x_data, y_data, func, p0=None,
color=None, x_min=None, x_max=None,
x_lab='', y_lab='',c_lab='',
n_iter=3, k_std=5, draw=True,
fig=None, ax=None, **kwargs):
""" Wrapper for iterative curve_fit """
# min-max cutoff
if x_min is None: x_min = x_data.min()
if x_max is None: x_max = x_data.max()
cut = (x_data>x_min) & (x_data<x_max)
x_data = x_data[cut]
y_data = y_data[cut]
if color is not None: color = color[cut]
# initialize
x_test = np.linspace(x_min, x_max)
clip = np.zeros_like(x_data, dtype='bool')
# first curve_fit
popt, pcov = curve_fit(func, x_data, y_data, p0=p0, **kwargs)
if draw:
with plt.rc_context({"text.usetex": False}):
if fig is None: fig = plt.figure()
if ax is None: ax = fig.add_subplot(1,1,1)
# Iterative sigma clip
for i in range(n_iter):
if draw: ax.plot(x_test, func(x_test, *popt),
color='r', lw=1, ls='--', alpha=0.2)
x_clip, y_clip = x_data[~clip], y_data[~clip]
popt, pcov = curve_fit(func, x_clip, y_clip, p0=p0, **kwargs)
# compute residual and stddev
res = y_data - func(x_data, *popt)
std = mad_std(res)
clip = res**2 > (k_std*std)**2
# clip function
clip_func = lambda x, y: (y - func(x, *popt))**2 > (k_std*std)**2
if draw:
s = ax.scatter(x_data, y_data, c=color,
s=10, cmap='viridis', alpha=0.4)
ax.scatter(x_data[clip], y_data[clip], lw=2, s=25,
facecolors='none', edgecolors='orange', alpha=0.7)
ax.plot(x_test, func(x_test, *popt), color='r')
if plt.rcParams['text.usetex']:
c_lab = c_lab.replace('_','$\_$')
x_lab = x_lab.replace('_','$\_$')
y_lab = y_lab.replace('_','$\_$')
if color is not None:
with plt.rc_context({"text.usetex": False}):
fig.colorbar(s, label=c_lab)
ax.set_xlim(x_min, x_max)
invert = lambda lab: ('MAG' in lab) | ('MU' in lab)
if invert(x_lab): ax.invert_xaxis()
if invert(y_lab): ax.invert_yaxis()
# ax.set_xlabel(x_lab)
# ax.set_ylabel(y_lab)
return popt, pcov, clip_func
def identify_extended_source(SE_catalog, mag_limit=15, mag_saturate=13.5, draw=True):
""" Empirically pick out (bright) extended sources in the SE_catalog.
The catalog need to contain following columns:
'MAG_AUTO', 'MU_MAX', 'ELLIPTICITY', 'CLASS_STAR' """
bright = SE_catalog['MAG_AUTO'] < mag_limit
SE_bright = SE_catalog[bright]
if len(SE_bright)>0:
x_data, y_data = SE_bright['MAG_AUTO'], SE_bright['MU_MAX']
else:
return SE_catalog, None
MU_satur_0 = np.quantile(y_data, 0.001) # guess of saturated MU_MAX
MAG_satur_0 = mag_saturate # guess of saturated MAG_AUTO
# Fit a flattened linear
logger.info("Fit an empirical relation to exclude extended sources...")
popt, _, clip_func = iter_curve_fit(x_data, y_data, flattened_linear,
p0=(1, MAG_satur_0, MU_satur_0),
x_max=mag_limit, x_min=max(7,np.min(x_data)),
color=SE_bright['CLASS_STAR'],
x_lab='MAG_AUTO',y_lab='MU_MAX',
c_lab='CLASS_STAR', draw=draw)
if draw: plt.show()
mag_saturate = popt[1]
logger.info("Saturation occurs at mag = {:.2f}".format(mag_saturate))
# pick outliers in the catalog
outlier = clip_func(SE_catalog['MAG_AUTO'], SE_catalog['MU_MAX'])
# identify bright extended sources by:
# (1) elliptical object or CLASS_STAR<0.5
# (2) brighter than mag_limit
# (3) lie out of MU_MAX vs MAG_AUTO relation
is_extend = (SE_catalog['ELLIPTICITY']>0.7) | (SE_catalog['CLASS_STAR']<0.5)
is_extend = is_extend & bright & outlier
SE_catalog_extend = SE_catalog[is_extend]
if len(SE_catalog_extend)>0:
SE_catalog_point = setdiff(SE_catalog, SE_catalog_extend)
return SE_catalog_point, SE_catalog_extend, mag_saturate
else:
return SE_catalog, None, mag_saturate
def clean_isolated_stars(xx, yy, mask, star_pos, pad=0, dist_clean=60):
""" Remove items of stars far away from mask """
star_pos = star_pos + pad
clean = np.zeros(len(star_pos), dtype=bool)
for k, pos in enumerate(star_pos):
rr = np.sqrt((xx-pos[0])**2+(yy-pos[1])**2)
if np.min(rr[~mask]) > dist_clean:
clean[k] = True
return clean
def cal_profile_1d(img, cen=None, mask=None, back=None, bins=None,
color="steelblue", xunit="pix", yunit="Intensity",
seeing=2.5, pixel_scale=DF_pixel_scale, ZP=27.1,
sky_mean=0, sky_std=3, dr=1,
lw=2, alpha=0.7, markersize=5, I_shift=0,
core_undersample=False, figsize=None,
label=None, plot_line=False, mock=False,
plot=True, errorbar=False,
scatter=False, fill=False, use_annulus=False):
"""
Calculate 1d radial profile of a given star postage.
"""
if mask is None:
mask = np.zeros_like(img, dtype=bool)
if back is None:
back = np.ones_like(img) * sky_mean
bkg_val = np.median(back)
if cen is None:
cen = (img.shape[1]-1)/2., (img.shape[0]-1)/2.
if use_annulus:
img[mask] = np.nan
yy, xx = np.indices(img.shape)
rr = np.sqrt((xx - cen[0])**2 + (yy - cen[1])**2)
r = rr[~mask].ravel() # radius in pix
z = img[~mask].ravel() # pixel intensity
r_core = np.int32(2 * seeing) # core radius in pix
# Decide the outermost radial bin r_max before going into the background
bkg_cumsum = np.arange(1, len(z)+1, 1) * bkg_val
z_diff = abs(z.cumsum() - bkg_cumsum)
n_pix_max = len(z) - np.argmin(abs(z_diff - 0.00005 * z_diff[-1]))
r_max = np.min([img.shape[0]//2, np.sqrt(n_pix_max/np.pi)])
if xunit == "arcsec":
r *= pixel_scale # radius in arcsec
r_core *= pixel_scale
r_max *= pixel_scale
d_r = dr * pixel_scale
else:
d_r = dr
with warnings.catch_warnings():
warnings.simplefilter('ignore')
clip = lambda z: sigma_clip((z), sigma=5, maxiters=5)
if bins is None:
# Radial bins: discrete/linear within r_core + log beyond it
if core_undersample:
# for undersampled core, bin at int pixels
bins_inner = np.unique(r[r<r_core]) - 1e-3
else:
n_bin_inner = int(min((r_core/d_r*2), 6))
bins_inner = np.linspace(0, r_core-d_r, n_bin_inner) - 1e-3
n_bin_outer = np.max([6, np.min([np.int32(r_max/d_r/10), 50])])
if r_max > (r_core+d_r):
bins_outer = np.logspace(np.log10(r_core+d_r),
np.log10(r_max+2*d_r), n_bin_outer)
else:
bins_outer = []
bins = np.concatenate([bins_inner, bins_outer])
_, bins = np.histogram(r, bins=bins)
# Calculate binned 1d profile
r_rbin = np.array([])
z_rbin = np.array([])
zerr_rbin = np.array([])
for k, b in enumerate(bins[:-1]):
r_in, r_out = bins[k], bins[k+1]
in_bin = (r>=r_in) & (r<=r_out)
if use_annulus:
# Fractional ovelap w/ annulus
annl = CircularAnnulus(cen, abs(r_in)/pixel_scale, r_out/pixel_scale)
annl_ma = annl.to_mask()
# Intensity by fractional mask
z_ = annl_ma.multiply(img)
zb = np.sum(z_[~np.isnan(z_)]) / annl.area
zerr_b = sky_std / annl.area
rb = np.mean(r[in_bin])
else:
z_clip = clip(z[~np.isnan(z) & in_bin])
if np.ma.is_masked(z_clip):
z_clip = z_clip.compressed()
if len(z_clip)==0:
continue
zb = np.mean(z_clip)
zstd_b = np.std(z_clip) if len(z_clip) > 10 else 0
zerr_b = np.sqrt((zstd_b**2 + sky_std**2) / len(z_clip))
rb = np.mean(r[in_bin])
z_rbin = np.append(z_rbin, zb)
zerr_rbin = np.append(zerr_rbin, zerr_b)
r_rbin = np.append(r_rbin, rb)
logzerr_rbin = 0.434 * abs( zerr_rbin / (z_rbin-sky_mean))
if yunit == "SB":
I_rbin = Intensity2SB(z_rbin, BKG=bkg_val,
ZP=ZP, pixel_scale=pixel_scale) + I_shift
if plot:
if figsize is not None:
plt.figure(figsize=figsize)
if yunit == "Intensity":
# plot radius in Intensity
plt.plot(r_rbin, np.log10(z_rbin), "-o", color=color,
mec="k", lw=lw, ms=markersize, alpha=alpha, zorder=3, label=label)
if scatter:
I = np.log10(z)
if fill:
plt.fill_between(r_rbin, np.log10(z_rbin)-logzerr_rbin, np.log10(z_rbin)+logzerr_rbin,
color=color, alpha=0.2, zorder=1)
plt.ylabel("log Intensity")
elif yunit == "SB":
# plot radius in Surface Brightness
if mock is False:
I_sky = -2.5*np.log10(sky_std) + ZP + 2.5 * math.log10(pixel_scale**2)
p = plt.plot(r_rbin, I_rbin, "-o", mec="k",
lw=lw, ms=markersize, color=color,
alpha=alpha, zorder=3, label=label)
if scatter:
I = Intensity2SB(z, BKG=bkg_val,
ZP=ZP, pixel_scale=pixel_scale) + I_shift
if errorbar:
Ierr_rbin_up = I_rbin - Intensity2SB(z_rbin+zerr_rbin, BKG=bkg_val,
ZP=ZP, pixel_scale=pixel_scale) - I_shift
Ierr_rbin_lo = Intensity2SB(z_rbin-zerr_rbin, BKG=bkg_val,
ZP=ZP, pixel_scale=pixel_scale) - I_rbin + I_shift
lolims = np.isnan(Ierr_rbin_lo)
uplims = np.isnan(Ierr_rbin_up)
Ierr_rbin_lo[lolims] = 99
Ierr_rbin_up[uplims] = np.nan
plt.errorbar(r_rbin, I_rbin, yerr=[Ierr_rbin_up, Ierr_rbin_lo],
fmt='', ecolor=p[0].get_color(), capsize=2, alpha=0.5)
plt.ylabel("Surface Brightness [mag/arcsec$^2$]")
plt.gca().invert_yaxis()
plt.ylim(30,17)
plt.xscale("log")
plt.xlim(max(r_rbin[np.isfinite(r_rbin)][0]*0.8, pixel_scale*0.5),
r_rbin[np.isfinite(r_rbin)][-1]*1.2)
if xunit == "arcsec":
plt.xlabel("Radius [arcsec]")
else:
plt.xlabel("radius [pix]")
if scatter:
plt.scatter(r[r<3*r_core], I[r<3*r_core], color=color,
s=markersize/2, alpha=alpha/2, zorder=1)
plt.scatter(r[r>=3*r_core], I[r>=3*r_core], color=color,
s=markersize/5, alpha=alpha/10, zorder=1)
# Decide the radius within which the intensity saturated for bright stars w/ intersity drop half
with warnings.catch_warnings():
warnings.simplefilter('ignore')
dz_rbin = np.diff(np.log10(z_rbin))
dz_cum = np.cumsum(dz_rbin)
if plot_line:
r_satr = r_rbin[np.argmax(dz_cum<-0.3)] + 1e-3
plt.axvline(r_satr,color="k",ls="--",alpha=0.9)
plt.axvline(r_core,color="k",ls=":",alpha=0.9)
plt.axhline(I_sky,color="gray",ls="-.",alpha=0.7)
if yunit == "Intensity":
return r_rbin, z_rbin, logzerr_rbin
elif yunit == "SB":
return r_rbin, I_rbin, None
def make_psf_2D(n_s, theta_s,
frac=0.3, beta=6.6, fwhm=6.,
cutoff_param={"cutoff":False, "n_c":4, "theta_c":1200},
psf_range=1200, pixel_scale=DF_pixel_scale, plot=False):
"""
Make 2D PSF from parameters.
Parameters
----------
n_s : 1d list or array
Power index of PSF aureole.
theta_s : 1d list or array
Transition radii of PSF aureole in arcsec.
frac: float
Fraction of aureole [0 - 1]
beta : float
Moffat beta
fwhm : float
Moffat fwhm in arcsec
cutoff_param : dict, optional
Parametets controlling the cutoff.
psf_range : int, optional, default 1200
Range of PSF. In arcsec.
pixel_scale : float, optional, default 2.5
Pixel scale in arcsec/pix
plot : bool, optional, default False
Whether to plot the PSF image.
Returns
-------
PSF : 2d array
Image of the PSF. Normalized to 1.
psf : eldflower.modeling.PSF_Model
The PSF model object.
"""
from .modeling import PSF_Model
# Aureole Parameters
params_mpow = {"frac":frac, "fwhm":fwhm, "beta":beta,
"n_s":np.atleast_1d(n_s), "theta_s":np.atleast_1d(theta_s)}
params_mpow.update(cutoff_param)
if cutoff_param["cutoff"]:
psf_range = max(cutoff_param["theta_c"], psf_range)
# Build PSF Model
psf = PSF_Model(params=params_mpow, aureole_model='multi-power')
# Build grid of image for drawing
psf.pixelize(pixel_scale)
# Generate core and aureole PSF
psf_c = psf.generate_core()
psf_e, psf_size = psf.generate_aureole(contrast=1e6,
psf_range=psf_range,
psf_scale=pixel_scale)
# Plot Galasim 2D model extracted in 1D
if plot: psf.plot1D(xunit='arcsec')
# Center and grid
size = int(np.floor(psf_range/pixel_scale) * 2) + 1
cen = ((size-1)/2., (size-1)/2.)
x_ = y_ = np.linspace(0,size-1,size)
xx, yy = np.meshgrid(x_, y_)
# Draw image of PSF normalized to 1
PSF_aureole = psf.draw_aureole2D_in_real([cen], Flux=np.array([frac]))[0]
PSF_core = psf.draw_core2D_in_real([cen], Flux=np.array([1-frac]))[0]
PSF = PSF_core(xx,yy) + PSF_aureole(xx,yy)
PSF = PSF/PSF.sum()
return PSF, psf
def make_psf_1D(n_s, theta_s,
frac=0.3, beta=6.6, fwhm=6.,
cutoff_param={"cutoff":False, "n_c":4, "theta_c":1200},
psf_range=1200, pixel_scale=DF_pixel_scale,
dr=1, mag=0, ZP=0, plot=False):
"""
Make 1D PSF profiles from parameters.
Parameters
----------
n_s : 1d list or array
Power index of PSF aureole.
theta_s : 1d list or array
Transition radii of PSF aureole.
frac: float
Fraction of aureole [0 - 1]
beta : float
Moffat beta
fwhm : float
Moffat fwhm in arcsec
cutoff_param : dict, optional
Parametets controlling the cutoff.
psf_range : int, optional, default 1200
Range of PSF. In arcsec.
pixel_scale : float, optional, default 2.5
Pixel scale in arcsec/pix
dr : float, optional, default 0.5
Parameters controlling the radial interval
mag : float, optional, default 0.5
Magnitude of the PSF.
ZP : float, optional, default 0
Zero point.
plot : bool, optional, default False
Whether to plot the 1D PSF profile.
Returns
-------
r : 1d array
Radius of the profile in arcsec
I : 1d array
Surface brightness in mag/arcsec^2
D : 2d array
Image of the PSF.
"""
Amp = 10**((mag-ZP)/-2.5)
if plot:
print('Scaled 1D PSF to magnitude = ', mag)
size = int(np.floor(psf_range/pixel_scale) * 2) + 1
cen = ((size-1)/2., (size-1)/2.)
D, psf = make_psf_2D(n_s, theta_s, frac, beta, fwhm,
cutoff_param=cutoff_param,
pixel_scale=pixel_scale,
psf_range=psf_range)
D *= Amp
r, I, _ = cal_profile_1d(D, cen=cen, mock=True,
ZP=ZP, sky_mean=0, sky_std=1e-9,
dr=dr, seeing=fwhm,
pixel_scale=pixel_scale,
xunit="arcsec", yunit="SB",
color="lightgreen",
lw=4, alpha=0.9, plot=plot,
core_undersample=True)
if plot:
plt.xlim(2, max(1e3, np.max(2*theta_s)))
plt.ylim(24,4)
for pos in theta_s:
plt.axvline(pos, ls="--", color="k", alpha=0.3, zorder=0)
return r, I, D
def calculate_fit_SB(psf, r=np.logspace(0.03,2.5,100), mags=[15,12,9], ZP=27.1):
frac = psf.frac
I_s = [10**((mag-ZP)/-2.5) for mag in mags]
comp1 = psf.f_core1D(r)
comp2 = psf.f_aureole1D(r)
I_tot_s = [Intensity2SB(((1-frac) * comp1 + comp2 * frac) * I,
0, ZP, psf.pixel_scale) for I in I_s]
return I_tot_s
### Class & Funcs for measuring scaling ###
def compute_Rnorm(image, mask_field, cen,
R=12, wid_ring=1, wid_cross=4,
mask_cross=True, display=False):
"""
Compute the scaling factor using an annulus.
Note the output values include the background level.
Paramters
---------
image : input image for measurement
mask_field : mask map with masked pixels = 1.
cen : center of the target in image coordiante
R : radius of annulus in pix
wid_ring : half-width of annulus in pix
wid_cross : half-width of spike mask in pix
Returns
-------
I_mean: mean value in the annulus
I_med : median value in the annulus
I_std : std value in the annulus
I_flag : 0 good / 1 bad (available pixles < 5)
"""
if image is None:
return [np.nan] * 3 + [1]
cen = (cen[0], cen[1])
anl = CircularAnnulus([cen], R-wid_ring, R+wid_ring)
anl_ma = anl.to_mask()[0].to_image(image.shape)
in_ring = anl_ma > 0.5 # sky ring (R-wid, R+wid)
mask = in_ring & (~mask_field) & (~np.isnan(image))
# sky ring with other sources masked
# Whether to mask the cross regions, important if R is small
if mask_cross:
yy, xx = np.indices(image.shape)
rr = np.sqrt((xx-cen[0])**2+(yy-cen[1])**2)
in_cross = ((abs(xx-cen[0])<wid_cross))|(abs(yy-cen[1])<wid_cross)
mask = mask * (~in_cross)
if len(image[mask]) < 5:
return [np.nan] * 3 + [1]
z_ = sigma_clip(image[mask], sigma=3, maxiters=5)
z = z_.compressed()
I_mean = np.average(z, weights=anl_ma[mask][~z_.mask])
I_med, I_std = np.median(z), np.std(z)
if display:
L = min(100, int(mask.shape[0]))
fig, (ax1,ax2) = plt.subplots(nrows=1, ncols=2, figsize=(9,4))
ax1.imshow(mask, cmap="gray", alpha=0.7)
ax1.imshow(mask_field, alpha=0.2)
ax1.imshow(image, cmap='viridis', alpha=0.7,
norm=AsinhNorm(0.05, vmin=image.min(), vmax=I_med+50*I_std))
ax1.plot(cen[0], cen[1], 'r+', ms=10)
ax2.hist(z,alpha=0.7)
# Label mean value
plt.axvline(I_mean, color='k')
plt.text(0.5, 0.9, "%.1f"%I_mean,
color='darkorange', ha='center', transform=ax2.transAxes)
# Label 20% / 80% quantiles
I_20 = np.quantile(z, 0.2)
I_80 = np.quantile(z, 0.8)
for I, x_txt in zip([I_20, I_80], [0.2, 0.8]):
plt.axvline(I, color='k', ls="--")
plt.text(x_txt, 0.9, "%.1f"%I, color='orange',
ha='center', transform=ax2.transAxes)
ax1.set_xlim(cen[0]-L//4, cen[0]+L//4)
ax1.set_ylim(cen[1]-L//4, cen[1]+L//4)
plt.show()
return I_mean, I_med, I_std, 0
def compute_Rnorm_batch(table_target,
image, seg_map, wcs,
r_scale=12, k_win=1,
wid_ring=0.5, wid_cross=4,
display=False, verbose=True):
"""
Compute scaling factors for objects in the table.
Return an array with measurement and a dictionary containing maps and centers.
Paramters
---------
table_target : astropy.table.Table
SExtractor table containing measurements of sources.
image : 2d array
Full image.
seg_map : 2d array
Full segmentation map used to mask nearby sources during the measurement.
wcs_data : astropy.wcs.wcs
WCS of image.
r_scale : int, optional, default 12
Radius in pixel at which the flux scaling is measured.
k_win : int, optional, default 1
Enlargement factor for extracting thumbnails.
wid_ring : float, optional, default 0.5
Half-width in pixel of ring used to measure the scaling.
wid_cross : float, optional, default 4
Half-width in pixel of the spike mask when measuring the scaling.
Returns
-------
res_norm : nd array
A N x 5 array saving the measurements.
[I_mean, I_med, I_std, I_sky, I_flag]
res_thumb : dict
A dictionary storing thumbnails, mask, background and center of object.
"""
from .image import Thumb_Image
# Initialize
res_thumb = {}
res_norm = np.empty((len(table_target), 5))
# Iterate rows over the target table
for i, row in enumerate(table_target):
if verbose: counter(i, len(table_target))
num, mag_auto = row['NUMBER'], row['MAG_AUTO']
# For brighter sources, use a broader window
if mag_auto <= 10.5:
n_win = int(40 * k_win)
elif 10.5 < mag_auto < 13.5:
n_win = int(30 * k_win)
elif 13.5 < mag_auto < 15:
n_win = int(20 * k_win)
else:
n_win = int(10 * k_win)
# Make thumbnail of the star and mask sources
thumb = Thumb_Image(row, wcs)
thumb.extract_star(image, seg_map, n_win=n_win)
# Measure the mean, med and std of intensity at r_scale
thumb.compute_Rnorm(R=r_scale,
wid_ring=wid_ring,
wid_cross=wid_cross,
display=display)
I_flag = thumb.I_flag
if (I_flag==1) & verbose: logger.debug("Errorenous measurement: #", num)
# Store results as dict (might be bulky)
res_thumb[num] = {"image":thumb.img_thumb,
"mask":thumb.star_ma,
"bkg":thumb.bkg,
"center":thumb.cen_star}
# Store measurements to array
I_stats = ['I_mean', 'I_med', 'I_std', 'I_sky']
res_norm[i] = np.array([getattr(thumb, attr) for attr in I_stats] + [I_flag])
return res_norm, res_thumb
def measure_Rnorm_all(table,
bounds,
wcs_data,
image,
seg_map=None,
r_scale=12,
mag_limit=15,
mag_saturate=13.5,
mag_name='rmag_PS',
k_enlarge=1,
width_ring=0.5,
width_cross=4,
obj_name="", display=False,
save=True, dir_name='.',
read=False, verbose=True):
"""
Measure intensity at r_scale for bright stars in table.
Parameters
----------
table : astropy.table.Table
SExtractor table containing measurements of sources.
bounds : 1d array or list
Boundaries of the region in the image [Xmin, Ymin, Xmax, Ymax].
wcs_data : astropy.wcs.wcs
WCS of image.
image : 2d array
Full image.
seg_map : 2d array, optional, default None
Full segmentation map used to mask nearby sources during the measurement.
If not given, it will be done locally by photutils.
r_scale : int, optional, default 12
Radius in pixel at which the flux scaling is measured.
mag_limit : float, optional, default 15
Magnitude upper limit below which are measured.
mag_saturate : float, optional, default 13.5
Estimate of magnitude at which the image is saturated.
mag_name : str, optional, default 'rmag_PS'
Column name of magnitude used in the table.
k_enlarge : int, optional, default 1
Enlargement factor for extracting thumbnails.
width_ring : float, optional, default 0.5
Half-width in pixel of ring used to measure the scaling.
width_cross : float, optional, default 4
Half-width in pixel of the spike mask when measuring the scaling.
obj_name : str, optional
Object name used as prefix of saved output.
save : bool, optional, default True
Whether to save output table and thumbnails.
dir_name : str, optional
Path of saving. Use currrent one as default.
read : bool, optional, default False
Whether to read existed outputs if available.
Returns
-------
table_norm : astropy.table.Table
Table containing measurement results.
res_thumb : dict
A dictionary storing thumbnails, mask, background and center of object.
'image' : image of the object
'mask' : mask map from SExtractor with nearby sources masked (masked = 1)
'bkg' : estimated local 2d background
'center' : 0-based centroid of the object from SExtracror
"""
msg = "Measure intensity at R = {0} ".format(r_scale)
msg += "for catalog stars {0:s} < {1:.1f} in ".format(mag_name, mag_limit)
msg += "{0}.".format(bounds)
logger.info(msg)
band = mag_name[0]
range_str = 'X[{0:d}-{2:d}]Y[{1:d}-{3:d}]'.format(*bounds)
fn_table_norm = os.path.join(dir_name, '%s-norm_%dpix_%smag%d_%s.txt'\
%(obj_name, r_scale, band, mag_limit, range_str))
fn_res_thumb = os.path.join(dir_name, '%s-thumbnail_%smag%d_%s.pkl'\
%(obj_name, band, mag_limit, range_str))
fn_psf_satck = os.path.join(dir_name, f'{obj_name}-{band}-psf_stack_{range_str}.fits')
if read:
table_norm = Table.read(fn_table_norm, format="ascii")
res_thumb = load_pickle(fn_res_thumb)
else:
tab = table[table[mag_name]<mag_limit]
with warnings.catch_warnings():
warnings.simplefilter('ignore')
res_norm, res_thumb = compute_Rnorm_batch(tab, image,
seg_map, wcs_data,
r_scale=r_scale,
wid_ring=width_ring,
wid_cross=width_cross,
k_win=k_enlarge,
display=display,
verbose=verbose)
keep_columns = ['NUMBER', 'MAG_AUTO', 'MAG_AUTO_corr', 'MU_MAX', mag_name] \
+ [name for name in tab.colnames
if ('IMAGE' in name)|('CATALOG' in name)]
for name in keep_columns:
if name not in tab.colnames:
keep_columns.remove(name)
table_norm = tab[keep_columns].copy()
for j, colname in enumerate(['Imean','Imed','Istd','Isky','Iflag']):
if colname=='Iflag':
col = res_norm[:,j].astype(int)
else:
col = np.around(res_norm[:,j], 5)
table_norm[colname] = col
if save: # save star thumbnails
check_save_path(dir_name, overwrite=True, verbose=False)
save_pickle(res_thumb, fn_res_thumb, 'thumbnail result')
table_norm.write(fn_table_norm, overwrite=True, format='ascii')
# Stack non-saturated stars to obtain the inner PSF.
tab_stack = tab[(tab['FLAGS']<=3) & (tab['MAG_AUTO']>mag_saturate)]
psf_stack = stack_star_image(tab_stack, res_thumb, size=5*r_scale+1)
if save:
fits.writeto(fn_psf_satck, data=psf_stack, overwrite=True)
logger.info(f"Saved stacked PSF to {fn_psf_satck}")
return table_norm, res_thumb
### Stacking PSF functions ###
def resample_thumb(image, mask, center):
"""
Shift and resample the thumb image & mask to have odd dimensions
and center at the center pixel.
Parameters
----------
image : input image, 2d array
mask : input mask, 2d bool array (masked =1)
center : center of the target, array or turple
Returns
-------
image_ : output image, 2d array
mask_ : output mask, 2d bool array (masked =1)
center_ : center of the target after the shift
"""
from scipy.interpolate import RectBivariateSpline
X_c, Y_c = center
NY, NX = image.shape
# original grid points
Xp, Yp = np.linspace(0, NX-1, NX), np.linspace(0, NY-1, NY)
rbspl = RectBivariateSpline(Xp, Yp, image, kx=3, ky=3)
rbspl_ma = RectBivariateSpline(Xp, Yp, mask, kx=1, ky=1)
# new NAXIS
NY_ = NX_ = int(np.floor(image.shape[0]/2) * 2) - 3
# shift grid points
Xp_ = np.linspace(X_c - NX_//2, X_c + NX_//2, NX_)
Yp_ = np.linspace(Y_c - NY_//2, Y_c + NY_//2, NY_)
# resample image
image_ = rbspl(Xp_, Yp_)
mask_ = rbspl_ma(Xp_, Yp_) > 0.5
center_ = np.array([X_c - Xp_[0], Y_c - Yp_[0]])
return image_, mask_, center_
def stack_star_image(table_stack, res_thumb, size=61):
"""
Stack images of stars in the table.
Parameters
----------
table_stack : astropy.table.Table
SExtarctor table of stars to stack
res_thumb : dict
the dict containing the thumbnails, masks and centers
size : int, optional, default 61
Size of the stacked image in pixel, will be round to odd number.
Returns
-------
image_stack : stacked image
"""
from scipy.ndimage import binary_dilation
size = int(size/2) * 2 + 1
shape = (size, size)
canvas = np.zeros(shape)
footprint = np.zeros_like(canvas)
i = 0
logger.info("Stacking non-staurated stars to obtain the PSF core...")
for num in table_stack['NUMBER']:
# Read image, mask and center
img_star = res_thumb[num]['image']
mask_star = res_thumb[num]['mask']
cen_star = res_thumb[num]['center']
# enlarge mask
for j in range(3):
mask_star = binary_dilation(mask_star)
shape_star = img_star.shape
if shape_star[0]!=shape_star[1]: continue
# meausre local background
r_out = min(img_star.shape) * 0.8 //2
r_in = r_out - 5
bkg = background_annulus(cen_star, img_star, mask_star, r_in=r_in, r_out=r_out, draw=False)
# resample thumbnail centroid to center
img_star_, mask_star_, cen_star_ = resample_thumb(img_star, mask_star, cen_star)
shape_star_ = img_star_.shape
# remove nearby sources
img_star_ = img_star_ - bkg
img_star_[mask_star_] = 0
# add cutout to canvas
dx = abs(shape_star_[0]-canvas.shape[0])//2
dy = abs(shape_star_[1]-canvas.shape[1])//2
if shape_star_[0] > size:
cutout = img_star_[dx:-dx,dy:-dy]
canvas += cutout
footprint += (cutout!=0)
elif shape_star_[0] < size:
cutout = img_star_
canvas[dx:-dx,dy:-dy] += cutout
footprint[dx:-dx,dy:-dy] += (cutout!=0)
else:
canvas += img_star_
footprint += 1
i += 1
image_stack = canvas/footprint
logger.info(" - {:d} Stars used for stacking.".format(i))
return image_stack
def make_global_stack_PSF(dir_name, bounds_list, obj_name, band, overwrite=True):
"""
Combine the stacked PSF of all regions into one, skip if existed.
Parameters
----------
dir_name : str
path containing the stacked PSF
bounds_list : 2D int list / turple
List of boundaries of regions to be fit (Nx4)
[[X min, Y min, X max, Y max],[...],...]
obj_name : str
Object name
band : str, 'g' 'G' 'r' or 'R'
Filter name
"""
fn_stack = os.path.join(dir_name, f'{obj_name}-{band}-PSF_stack.fits')
if overwrite or (os.path.isfile(fn_stack)==False):
for i, bounds in enumerate(bounds_list):
range_str = 'X[{0:d}-{2:d}]Y[{1:d}-{3:d}]'.format(*bounds)
fn = os.path.join(dir_name, f'{obj_name}-{band}-psf_stack_{range_str}.fits')
image_psf = fits.getdata(fn)
if i==0:
image_stack = image_psf
else:
image_stack += image_psf
image_stack = image_stack/np.nansum(image_stack)
if i>0:
logger.info("Read & stack {:} PSF.".format(i+1))
fits.writeto(fn_stack, data=image_stack, overwrite=True)
logger.info("Saved stacked PSF as {:}".format(fn_stack))
else:
logger.info("{:} existed. Skip Stack.".format(fn_stack))
def montage_psf_image(image_psf, image_wide_psf, r=10, dr=0.5):
"""
Montage the core of the stacked psf and the wing of the wide psf model.
Parameters
----------
image_psf : 2d array
The image of the inner PSF.
image_wide_psf : 2d array
The image of the wide-angle PSF.
r : int, optional, default 10
Radius in pixel at which the PSF is montaged.
dr : float, optional, default 0.5
Width of annulus for measuring the scaling.
Returns
-------
image_PSF : 2d array
The image of the output PSF.
"""
image_PSF = image_wide_psf.copy()
# Wide PSF
size = image_wide_psf.shape[0]
cen = ((size-1)/2., (size-1)/2.)
x_ = y_ = np.linspace(0,size-1,size)
xx, yy = np.meshgrid(x_, y_)
rr = np.sqrt((yy-cen[0])**2+(xx-cen[1])**2)
I_wide = np.median(image_wide_psf[(rr<r+dr)&(rr>r-dr)])
# Stacked PSF
size_psf = image_psf.shape[0]
cen_psf = ((size_psf-1)/2., (size_psf-1)/2.)
x_psf = y_psf = np.linspace(0,size_psf-1,size_psf)
xx_psf, yy_psf = np.meshgrid(x_psf, y_psf)
rr_psf = np.sqrt((yy_psf-cen_psf[0])**2+(xx_psf-cen_psf[1])**2)
I_psf = np.median(image_psf[(rr_psf<r+dr)&(rr_psf>r-dr)])
# Montage
image_PSF[rr<r] = image_psf[rr_psf<r]/ I_psf * I_wide
image_PSF = image_PSF/image_PSF.sum()
return image_PSF
def fit_psf_core_1D(image_psf,
params0=DF_default_params,
theta_out=30, d_theta=1.,
pixel_scale=2.5, beta_max=8.,
obj_name="",band="r",
draw=True, save=False, save_dir='./'):
"""
Fit the core parameters from 1D profiles of the input 2D PSF.
Parameters
----------
image_psf : 2d array
The image of the PSF.
params0 : dict
Initial guess of oarameters of PSF.
Use Dragonfly fiducial vales is not given.
'frac' : float
Fraction of aureole [0 - 1]
'beta' : float
Moffat beta
'fwhm' : float
Moffat fwhm in arcsec
'n_s' : 1d list or array
Power index of PSF aureole.
Not required to be accurate. n0 is recommended to be close.
'theta_s' : 1d list or array
Transition radii of PSF aureole.
Not required to be accurate.
theta_out : float, optional, default 30
Max radias in arcsec of the profile.
d_theta : float, optional, default 1.
Radial interval of the profile in arcsec.
pixel_scale : float, optional, default 2.5
Pixel scale in arcsec/pix
beta_max : float, optional, default 8.
Upper bound of the Moffat beta (lower bound is 1.1)
obj_name : str
Object name
band : str, 'g' 'G' 'r' or 'R'
Filter name
draw : bool, optional, default True
Whether to plot the fit.
save : bool, optional, default True
Whether to save the plot.
save_dir : str, optional
Path of saving plot, default current.
"""
# Read initial guess of parameters
frac, beta, fwhm = [params0.get(prop) for prop in ["frac", "beta", "fwhm"]]
n_s = params0["n_s"]
theta_s = params0["theta_s"]
# Center of the input PSF image
cen = ((image_psf.shape[1]-1)/2., (image_psf.shape[0]-1)/2.)
# Set grid points
d_theta = min(d_theta, pixel_scale)
rp = np.arange(1, theta_out+d_theta, d_theta)
# Calculate 1D profile
r_psf, I_psf, _ = cal_profile_1d(image_psf, cen=cen, dr=0.5,
ZP=0, sky_mean=0, plot=False,
pixel_scale=pixel_scale, seeing=3,
xunit="arcsec", yunit="SB",
core_undersample=True)
# Interpolate at grid points
Ip = np.interp(rp, r_psf, I_psf)
# Guess and bounds for core params
p0 = [frac, beta]
bounds = [(1e-5, 0.5), (1.2, beta_max)]
logger.info("Fitting core parameters from stacked PSF...")
# Define the target function for fitting the core
def make_psf_core_1D(r_intp, frac, beta):
r_1d, I_1d,_ = make_psf_1D(n_s=n_s, theta_s=theta_s,
frac=frac, beta=beta, fwhm=fwhm,
dr=0.5, pixel_scale=pixel_scale)
I_intp = np.interp(r_intp, r_1d, I_1d)
return I_intp
# Fit the curve
popt, pcov = curve_fit(make_psf_core_1D, rp, Ip, p0, bounds=bounds)
frac, beta = popt
frac_err, beta_err = np.sqrt(pcov[0,0]), np.sqrt(pcov[1,1])
logger.info(" - frac = {:.3f}+/-{:.3f}".format(frac, frac_err))
logger.info(" - beta = {:.3f}+/-{:.3f}".format(beta, beta_err))
logger.info(" - fwhm = {:.3f} from stacking".format(fwhm))
if draw:
I_fit = make_psf_core_1D(rp, frac, beta)
plt.plot(rp, I_fit, 'r-o', ms=5, label='Fit')
plt.plot(rp, Ip, 'y-o', mfc='None', mec='y', label='Data')
plt.ylim(I_fit.max()+0.5, I_fit.min()-0.5)
plt.xlim(-2, theta_out * 1.1)
plt.xlabel("Radius [arcsec]")
plt.ylabel("Surface Brightness")
plt.legend()
if save:
fn_plot = f'Fit_core_{obj_name}-{band}.png'
plt.savefig(os.path.join(save_dir, fn_plot))
plt.show()
return frac, beta
### Resampling functions ###
def transform_rescale(val, scale=0.5):
""" transform coordinates after resampling """
return (val-1) * scale + scale/2. + 0.5
def transform_table_coordinates(table, filename, scale=0.5):
""" transform coordinates in a table and write to a new one """
table_ = table.copy()
# transform coordiantes for X/Y_IMAGE and A/B_IMAGE
for coln in table_.colnames:
if 'IMAGE' in coln:
if ('X' in coln) | ('X' in coln):
table_[coln] = transform_rescale(table[coln], scale)
else:
table_[coln] *= scale
table_.write(filename, format='ascii', overwrite=True)
def downsample_wcs(wcs_input, scale=0.5):
""" Downsample the input wcs along an axis using {CDELT, CRPIX} FITS convention """
header = wcs_input.to_header()
shape = wcs_input.pixel_shape
if 'PC1_1' in header.keys():
cdname = 'PC'
elif 'CD1_1' in header.keys():
cdname = 'CD'
elif 'CDELT1' in header.keys():
cdname = 'CDELT'
else:
msg = 'Fits header has no proper coordinate info (CD, PC, CDELT)!'
logger.error(msg)
raise KeyError(msg)
for axis in [1, 2]:
if cdname == 'PC':
cd = 'PC{0:d}_{0:d}'.format(axis)
elif cdname == 'CD':
cd = 'CD{0:d}_{0:d}'.format(axis)
elif cdname=='CDELT':
cd = 'CDELT{0:d}'.format(axis)
cp = 'CRPIX{0:d}'.format(axis)
na = 'NAXIS{0:d}'.format(axis)
header[cp] = transform_rescale(header[cp], scale)
header[cd] = header[cd]/scale
header[na] = int(round(shape[axis-1]*scale))
return wcs.WCS(header)
def write_downsample_fits(fn, fn_out,
scale=0.5, order=3,
keyword_preserved=['NFRAMES', 'BACKVAL',
'EXP_EFF', 'FILTNAM'],
wcs_out=None):
"""
Write fits data downsampled by factor.
Alternatively a target wcs can be provided.
Parameters
----------
fn: str
full path of fits file
fn_out: str
full path of output fits file
scale: int, optional, default 0.5
scaling factor
order: int, optional, default 3 ('bicubic')
order of interpolation (see docs of reproject)
keyword_preserved: str list
list of keyword to preserve in the output fits
wcs_out: wcs, optional, default None
output target wcs. must have shape info.
If given, scale will be overriden.
Notes
-----
If the output image contains all nan, it is likely the reprojection
fails, try use lower orders, or replacing the nan values.
"""
if reproject_install == False:
logger.warning('Module reproject not installed.')
return None
# read fits
header = fits.getheader(fn)
data = fits.getdata(fn)
wcs_input = wcs.WCS(header)
if (wcs_out is not None) & hasattr(wcs_out, 'pixel_shape'):
# use input wcs and shape
shape_out = wcs_out.pixel_shape
logger.info('Rescaling with given shape: {}'.format(shape))
else:
# make new wcs and shape according to scale factor
wcs_out = downsample_wcs(wcs_input, scale)
shape_out = (int(data.shape[0]*scale), int(data.shape[1]*scale))
logger.info('Rescaling with factor: {}'.format(scale))
# reproject the image by new wcs
data_rp, _ = reproject_interp((data, wcs_input), wcs_out,
shape_out=shape_out, order=order)
# write new header
header_out = wcs_out.to_header()
for key in ['NFRAMES', 'BACKVAL', 'EXP_EFF', 'FILTNAM']:
if key in header.keys():
header_out[key] = header[key]
# write new fits
fits.writeto(fn_out, data_rp, header=header_out, overwrite=True)
logger.info('Resampled image saved to: {}'.format(fn_out))
return True
def downsample_segmentation(fn, fn_out, scale=0.5):
""" Downsample segmentation and write to fits """
from scipy.ndimage import zoom
if os.path.isfile(fn):
segm = fits.getdata(fn)
segm_out = zoom(segm, zoom=0.5, order=1)
fits.writeto(fn_out, segm_out, overwrite=True)
else:
pass
def process_resampling(fn, bounds, obj_name, band,
pixel_scale=DF_pixel_scale, r_scale=12,
mag_limit=15, dir_measure='./', work_dir='./',
factor=1, verbose=True):
from .image import ImageList
# turn bounds_list into 2d array
bounds = np.atleast_2d(bounds).astype(int)
if factor!=1:
if verbose:
logger.info('Resampling by a factor of {0:.1g}...'.format(factor))
scale = 1/factor
fn_rp = "{0}_{2}.{1}".format(*os.path.basename(fn).rsplit('.', 1) + ['rp'])
fn_rp = os.path.join(work_dir, fn_rp)
bounds_rp = np.array([np.round(b_*scale) for b_ in bounds], dtype=int)
# resample image if it does not exist
if not os.path.exists(fn_rp):
write_downsample_fits(fn, fn_rp, scale, order=3)
# construct Image List for original image
DF_Images = ImageList(fn, bounds, obj_name, band,
pixel_scale=pixel_scale)
# read faint stars info and brightness measurement
DF_Images.read_measurement_tables(dir_measure,
r_scale=r_scale,
mag_limit=mag_limit)
# new quantities and names
r_scale *= scale
pixel_scale /= scale
obj_name_rp = obj_name + '_rp'
if verbose:
logger.info('Transforming coordinates for measurement tables...')
for Img, bound, bound_rp in zip(DF_Images, bounds, bounds_rp):
# transform coordinates and write as new tables
old_range = 'X[{0:d}-{2:d}]Y[{1:d}-{3:d}]'.format(*bound)
new_range = 'X[{0:d}-{2:d}]Y[{1:d}-{3:d}]'.format(*bound_rp)
table_faint, table_norm = Img.table_faint, Img.table_norm
fn_catalog = os.path.join(dir_measure,
"%s-catalog_PS_%s_all.txt"%(obj_name_rp, band.lower()))
fn_norm = os.path.join(dir_measure, "%s-norm_%dpix_%smag%s_%s.txt"\
%(obj_name_rp, r_scale, band.lower(), mag_limit, new_range))
transform_table_coordinates(table_faint, fn_catalog, scale)
transform_table_coordinates(table_norm, fn_norm, scale)
# reproject segmentation
if verbose:
logger.info('Resampling segmentation for bounds:', bound)
fn_seg = os.path.join(dir_measure,
"%s-segm_%s_catalog_%s.fits"\
%(obj_name, band.lower(), old_range))
fn_seg_out = os.path.join(dir_measure, "%s-segm_%s_catalog_%s.fits"\
%(obj_name_rp, band.lower(), new_range))
downsample_segmentation(fn_seg, fn_seg_out, scale)
else:
fn_rp, bounds_rp = fn, bounds
return fn_rp, bounds_rp
### Catalog / Data Manipulation Helper ###
def id_generator(size=6, chars=None):
if chars is None:
chars = string.ascii_uppercase + string.digits
return ''.join(random.choice(chars) for _ in range(size))
def crop_catalog(cat, bounds, keys=("X_IMAGE", "Y_IMAGE"), sortby=None):
Xmin, Ymin, Xmax, Ymax = bounds
A, B = keys
crop = (cat[A]>=Xmin) & (cat[A]<=Xmax) & (cat[B]>=Ymin) & (cat[B]<=Ymax)
if sortby is not None:
cat_crop = cat[crop]
cat_crop.sort(keys=sortby)
return cat_crop
else:
return cat[crop]
def crop_pad(image, pad):
""" Crop the padding of the image """
shape = image.shape
return image[pad:shape[0]-pad, pad:shape[1]-pad]
def crop_image(data, bounds, wcs=None, draw=False, **kwargs):
""" Crop the data (and segm map if given) with the given bouds.
Note boundaries are in 1-based pixel coordianates. """
Xmin, Ymin, Xmax, Ymax = bounds
# X, Y image size
nX, nY = (Xmax-Xmin, Ymax-Ymin)
# center in 1-based pixel coordinates
cen = (Xmin+(nX-1)/2., Ymin+(nY-1)/2.)
# make cutout
cutout = Cutout2D(data, cen, (nY, nX), wcs=wcs)
if draw:
from .plotting import draw_bounds
draw_bounds(data, bounds, **kwargs)
# also return cutout of wcs if given
if wcs is None:
return cutout.data
else:
return cutout.data, cutout.wcs
def transform_coords2pixel(table, wcs, name='',
RA_key="RAJ2000", DE_key="DEJ2000", origin=1):
""" Transform the RA/DEC columns in the table into pixel coordinates given wcs"""
coords = np.vstack([np.array(table[RA_key]),
np.array(table[DE_key])]).T
pos = wcs.wcs_world2pix(coords, origin)
table.add_column(Column(np.around(pos[:,0], 4)*u.pix), name="X_CATALOG")
table.add_column(Column(np.around(pos[:,1], 4)*u.pix), name="Y_CATALOG")
table.add_column(Column(np.arange(len(table))+1, dtype=int),
index=0, name="ID"+'_'+name)
return table
def merge_catalog(SE_catalog, table_merge, sep=5 * u.arcsec,
RA_key="RAJ2000", DE_key="DEJ2000", keep_columns=None):
""" Crossmatch and merge two catalogs by coordinates"""
c_SE = SkyCoord(ra=SE_catalog["X_WORLD"], dec=SE_catalog["Y_WORLD"])
c_tab = SkyCoord(ra=table_merge[RA_key], dec=table_merge[DE_key])
idx, d2d, d3d = c_SE.match_to_catalog_sky(c_tab)
match = d2d < sep
cat_SE_match = SE_catalog[match]
cat_tab_match = table_merge[idx[match]]
cat_tab_match.add_column(cat_SE_match["NUMBER"], index=0, name="NUMBER")
cat_match = join(cat_SE_match, cat_tab_match, keys='NUMBER')
if keep_columns is not None:
cat_match.keep_columns(keep_columns)
return cat_match
def read_measurement_table(dir_name, bounds0,
obj_name='', band='G',
pad=50, r_scale=12,
mag_limit=15):
""" Read measurement tables from the directory """
use_PS1_DR2 = True if 'PS2' in dir_name else False
# Magnitude name
b_name = band.lower()
mag_name = b_name+'MeanPSFMag' if use_PS1_DR2 else b_name+'mag'
# Clipped bounds
patch_Xmin0, patch_Ymin0, patch_Xmax0, patch_Ymax0 = bounds0
bounds = (patch_Xmin0+pad, patch_Ymin0+pad,
patch_Xmax0-pad, patch_Ymax0-pad)
## Read measurement for faint stars from catalog
# Faint star catalog name
fname_catalog = os.path.join(dir_name, "%s-catalog_PS_%s_all.txt"%(obj_name, b_name))
# Check if the file exist before read
assert os.path.isfile(fname_catalog), f"Table {fname_catalog} does not exist!"
logger.debug(f"Reading catalog {fname_catalog}.")
table_catalog = Table.read(fname_catalog, format="ascii")
mag_catalog = table_catalog[mag_name]
# stars fainter than magnitude limit (fixed as background), > 22 is ignored
table_faint = table_catalog[(mag_catalog>=mag_limit) & (mag_catalog<22)]
table_faint = crop_catalog(table_faint,
keys=("X_CATALOG", "Y_CATALOG"),
bounds=bounds)
## Read measurement for bright stars
# Catalog name
range_str = "X[{:d}-{:d}]Y[{:d}-{:d}]"
range_str = range_str.format(patch_Xmin0, patch_Xmax0, patch_Ymin0, patch_Ymax0)
fname_norm = os.path.join(dir_name, "%s-norm_%dpix_%smag%s_%s.txt"\
%(obj_name, r_scale, b_name, mag_limit, range_str))
# Check if the file exist before read
assert os.path.isfile(fname_norm), f"Table {fname_norm} does not exist"
logger.debug(f"Reading catalog {fname_norm}.")
table_norm = Table.read(fname_norm, format="ascii")
# Crop the catalog
table_norm = crop_catalog(table_norm, bounds=bounds0)
# Do not use flagged measurement
Iflag = table_norm["Iflag"]
table_norm = table_norm[Iflag==0]
return table_faint, table_norm
def assign_star_props(ZP, sky_mean, image_shape, pos_ref,
table_norm, table_faint=None,
r_scale=12, mag_threshold=[13.5,12],
psf=None, keys='Imed', verbose=True,
draw=True, save=False, save_dir='./'):
""" Assign position and flux for faint and bright stars from tables. """
from .modeling import Stars
# Positions & Flux (estimate) of bright stars from measured norm
star_pos = np.vstack([table_norm["X_CATALOG"],
table_norm["Y_CATALOG"]]).T - pos_ref
mag = table_norm['MAG_AUTO_corr'] if 'MAG_AUTO_corr' in table_norm.colnames else table_norm['MAG_AUTO']
Flux = 10**((np.array(mag)-ZP)/(-2.5))
# Estimate of brightness I at r_scale (I = Intensity - BKG) and flux
z_norm = table_norm['Imed'].data - table_norm['Isky'].data
z_norm[z_norm<=0] = min(1, z_norm[z_norm>0].min())
# Convert and printout thresholds
Flux_threshold = 10**((np.array(mag_threshold) - ZP) / (-2.5))
if verbose:
msg = "Magnitude Thresholds: {0}, {1} mag"
msg = msg.format(*mag_threshold)
logger.info(msg)
msg = "Flux Thresholds: {0}, {1} ADU"
msg = msg.format(*np.around(Flux_threshold,2))
logger.info(msg)
try:
SB_threshold = psf.Flux2SB(Flux_threshold, BKG=sky_mean, ZP=ZP, r=r_scale)
msg = "Surface Brightness Thresholds: {0}, {1} mag/arcsec^2 "
msg = msg.format(*np.around(SB_threshold,1))
msg += "at {0} pix for sky = {1:.3f}".format(r_scale, sky_mean)
logger.info(msg3)
except:
pass
# Bright stars in model
stars_bright = Stars(star_pos, Flux, Flux_threshold=Flux_threshold,
z_norm=z_norm, r_scale=r_scale, BKG=sky_mean)
stars_bright = stars_bright.remove_outsider(image_shape, gap=[3*r_scale, r_scale])
stars_bright._info()
if (table_faint is not None) & ('MAG_AUTO_corr' in table_faint.colnames):
table_faint['FLUX_AUTO_corr'] = 10**((table_faint['MAG_AUTO_corr']-ZP)/(-2.5))
try:
ma = table_faint['FLUX_AUTO_corr'].data.mask
except AttributeError:
ma = np.isnan(table_faint['FLUX_AUTO_corr'])
# Positions & Flux of faint stars from catalog
star_pos_faint = np.vstack([table_faint["X_CATALOG"].data[~ma],
table_faint["Y_CATALOG"].data[~ma]]).T - pos_ref
Flux_faint = np.array(table_faint['FLUX_AUTO_corr'].data[~ma])
# Combine two samples, make sure they do not overlap
star_pos = np.vstack([star_pos, star_pos_faint])
Flux = np.concatenate([Flux, Flux_faint])
stars_all = Stars(star_pos, Flux, Flux_threshold, BKG=sky_mean)
if draw:
stars_all.plot_flux_dist(label='All', color='plum')
stars_bright.plot_flux_dist(label='Model', color='orange', ZP=ZP,
save=save, save_dir=save_dir)
plt.show()
return stars_bright, stars_all
def interp_I0(r, I, r0, r1, r2):
""" Interpolate I0 at r0 with I(r) between r1 and r2 """
range_intp = (r>r1) & (r<r2)
logI0 = np.interp(r0, r[(r>r1)&(r<r2)], np.log10(I[(r>r1)&(r<r2)]))
return 10**logI0
def compute_mean_I(r, I, r1, r2):
""" Compute mean I under I(r) between r1 and r2 """
range_intg = (r>r1) & (r<r2)
r_range = r[range_intg]
return np.trapz(I[range_intg], r_range)/(r_range.max()-r_range.min())
def fit_n0(dir_measure, bounds,
obj_name, band, BKG, ZP,
pixel_scale=DF_pixel_scale,
fit_range=[20,40], dr=0.1,
N_fit=15, mag_max=13, mag_limit=15,
I_norm=24, norm='intp',
r_scale=12, sky_std=3,
plot_brightest=True, draw=True,
save=False, save_dir="./"):
"""
Fit the first component of using bright stars.
Parameters
----------
dir_measure : str
Directory storing the measurement
bounds : 1d list, [Xmin, Ymin, Xmax, Ymax]
Fitting boundary
band : str, 'g' 'G' 'r' or 'R'
Filter name
obj_name : str
Object name
BKG : float
Background value for profile measurement
ZP : float
Zero-point
pixel_scale : float, optional, default 2.5
Pixel scale in arcsec/pix
fit_range : 2-list, optional, default [20, 40]
Range for fitting in arcsec
dr : float, optional, default 0.2
Profile step paramter
N_fit : int, optional, default 15
Number of stars used to fit n0
mag_max : float, optional, default 13
Max magnitude of stars used to fit n0
I_norm : float, optional, default 24
SB at which profiles are normed
norm : 'intp' or 'intg', optional, default 'intg'
Normalization method to scale profiles.
Use mean value by 'intg', use interpolated value by 'intp'
r_scale : int, optional, default 12
Radius (in pix) at which the brightness is measured
Default is 30" for Dragonfly.
mag_limit : float, optional, default 15
Magnitude upper limit below which are measured
sky_std : float, optional, default 3
Sky stddev (for display only)
plot_brightest : bool, optional, default True
Whether to draw profile of the brightest star
draw : bool, optional, default True
Whether to draw profiles and fit process
save : bool, optional, default True
Whether to save plot.
save_dir : str, optional
Full path of saving plot, default current.
Returns
-------
n0 : float
first power index
d_n0 : float
uncertainty of n0
"""
from .modeling import log_linear
Xmin, Ymin, Xmax, Ymax = bounds
r1, r2 = fit_range
r0 = r_scale*pixel_scale
if r1<r0<r2:
# read result thumbnail and norm table
b = band.lower()
range_str = f'X[{Xmin}-{Xmax}]Y[{Ymin}-{Ymax}]'
fn_res_thumb = os.path.join(dir_measure, f'{obj_name}-thumbnail_{b}mag{mag_limit}_{range_str}.pkl')
fn_tab_norm = os.path.join(dir_measure, f'{obj_name}-norm_{r_scale}pix_{b}mag{mag_limit}_{range_str}.txt')
res_thumb = load_pickle(fn_res_thumb)
tab_norm = Table.read(fn_tab_norm, format='ascii')
if draw:
fig, ax = plt.subplots(1,1,figsize=(8,6))
else:
fig, ax, ax_ins = None, None, None
# r_rbin: r in arcsec, I_rbin: SB in mag/arcsec^2
# I_r0: SB at r0, I_rbin: SB in mag/arcsec^2
r_rbin_all, I_rbin_all = np.array([]), np.array([])
I_r0_all, In_rbin_all = np.array([]), | np.array([]) | numpy.array |
"""
PrimordialOoze is a genetic algorithm (GA) library for those who want something
very simple and don't want to spend time figuring out the more complicated libraries
that are out there.
See the README or the docstrings in this file for the documentation.
"""
import math
import multiprocessing
import numpy as np
class Simulation:
"""
A GA simulation. The general workflow for this is:
```python
import primordialooze as po
import pandas
import matplotlib.pyplot
sim = po.Simulation(nagents, shape, fitnessfunction)
bestagent, fitness = sim.run()
# Dump and plot
fname = "stats.csv"
sim.dump_history_csv(fname)
df = pandas.read_csv(fname)
df = df.drop(['GenerationIndex'], axis=1)
df.plot()
plt.show()
```
"""
def __init__(self, population, shape, fitnessfunc, *, seedfunc=None, selectionfunc=None,
crossoverfunc=None, mutationfunc=None, elitismfunc=None, nworkers=0,
max_agents_per_generation=None, min_agents_per_generation=None):
"""
## Args
The following list contains the arguments that are needed. These do not have default values
since the values for these will change dramatically depending on the problem.
- **population**: The number of agents in the first generation. We will generate this many agents
in the initial generation, each of which is a Numpy Array of shape=`shape`.
They will be mutated according to `mutationfunc`, and evaluated each generation
by `fitnessfunc`.
- **shape**: The shape of each agent in the population. Must be a list-like. The shape of the agents
must be a 1D array of whatever length like `(7,)`.
- **fitnessfunc**: The function to use to evaluate the fitness of each agent in the generation.
Must have signature: `fitnessfunc(agent) -> scalar float`. This function
will be evaluated on every single agent in the gene pool at each generation.
If this function is slow, it probably makes sense to use multiprocessing, unless the
gene pool is quite small. See `nworkers`.
## Keyword Args
These arguments contain (mostly) sensible defaults, but you should definitely make sure these
defaults work for you. You will almost certainly want to change some of these to fit your problem.
- **seedfunc**: The function to use to create the first generation of agents. The function must have
the signature `seedfunc() -> agent of shape 'shape'`. We call this function
`population` times. When `None`, defaults to uniform random
over the range [-1.0, 1.0) in each dimension.
- **selectionfunc**: The function to use to select the agents that are allowed to breed to create the
next generation. Signature must be `selectionfunc(population, evaluations) -> selected_agents`,
where `population` is an n-dimensional array of shape (nagents, agent_length),
`evaluations` is an array of shape (nagents,); `evaluations[i]` contains
the fitness value for `population[i, :]`; `selected_agents` is an n-dimensional array
of shape (nagents_selected, agent_length), which must contain the selected agents.
`population` and `evaluations` are pre-sorted so that `population[0, :]`, corresponds
to `evalutaion[0]` and has the highest evaluation value. Agents which are not selected
are simply discarded, i.e., they will not appear in the next generation (unless randomly
created again as part of crossover/mutation).
If `None`, defaults to selecting the top ten percent.
- **crossoverfunc**: Crossover function to use. Must have signature `crossoverfunc(agents) -> new_agents`,
where `agents` is an n-dimensional array of shape (nselected_agents, agent_length),
and where `new_agents` must be an n-dimensional array of shape (nagents, agent_length).
This function is applied after the selection function is used to determine which
agents will enter the new generation and this function is used exclusively on those
selected agents. Typically, `new_agents` will constitute the entirety of the new generation,
with one exception being if elitism is used (see below) and another exception being
if the mutation function adds new individuals to the gene pool, rather than just mutating
existing ones.
If `None`, defaults to 2-point crossover used on randomly selected pairs from the
breeding agents until `population` agents (or, if `elitismfunc` is None, `0.9 * population`).
- **mutationfunc**: The function to use to apply mutations to the gene pool. The signature must be
`mutationfunc(agents) -> new_agents`, where `agents` is the value returned from
`crossoverfunc` and `new_agents` must be an n-dimensional array of shape (nagents, agent_length).
This function is applied to the result of `crossoverfunc`.
When `None`, defaults to setting each value in 0.05 of the agents to a random value,
where the random value is drawn from a Gaussian distribution of mean = the value being replaced
and stdev = 0.25.
- **elitismfunc**: A function of signature `elitismfunc(generation_index) -> float in range [0.0, 1.0]`.
This function takes the index of the generation (0 for the first generation, 1 for the second, etc.)
and returns the fraction of top-performers to hold over as-is to the next generation.
The elites are duplicated and then, after the new
generation is created via the selectionfunc -> crossoverfunc -> mutationfunc pipeline, they are
reintroduced into the gene pool. This means that if the above pipeline generates 100 agents
and the elitism is set to take 10, the new generation will be composed of 110 agents. If this
is confusing, see `max_agents_per_generation` and `min_agents_per_generation`.
When `None`, defaults to a function that simply returns 0.1 (or 10%) of the gene pool regardless of the
generation.
- **nworkers**: The number of processes to use to parallelize the fitness function. This will default to 0, which will
mean no parallelism at all. `None` will use the number of cores. Otherwise, should be a positive integer.
- **max_agents_per_generation**: The maximum agents to allow into a generation. If the selection, crossover, mutation,
and elitism functions are not handled properly, it is possible for the number of
agents to change per generation. While this may be desired in some circumstances, it
is often not. If this value is negative, we will allow the generations to grow to arbitrary
size. If it is nonzero, after selection, crossover, mutation, and elitism, we will
take all of the candidates as long as they do not number more than this value. If they do,
we take this many at random.
This value defaults to `None`, which means we use `population` as the max.
- **min_agents_per_generation**: The minimum agents to allow making a new generation. If the selection, crossover, mutation,
and elitism functions are not handled properly, it is possible for the number of
agents to change per generation. While this may be desired in some circumstances, it
is often not. If this value is negative or zero, we will allow the generations
to shrink to zero, after which the simulation will stop. If it is nonzero, after selection,
crossover, mutation, and elitism, we will cycle through the candidate agents in random
order, duplicating them until this value is met. Note that we attempt to spread out the
duplication evenly amongst all candidates.
This value defaults to `None`, which means we use `population` as the min.
"""
# Validate population
if population <= 0:
raise ValueError("Population must be > 0 but is {}".format(population))
population = int(population)
# Validate shape
for i, dim in enumerate(shape):
if dim <= 0:
raise ValueError("Shape must contain no negative values, but contains {} at index {}".format(dim, i))
try:
_testagent = np.ndarray(shape=shape)
except Exception:
raise ValueError("There is something wrong with your shape parameter. It must be a list-like of integers greater than zero but is: {}.".format(shape))
# Do not validate functions; may take too long
# Validate nworkers
if nworkers is None:
nworkers = multiprocessing.cpu_count()
if nworkers <= 0:
raise ValueError("Something is wrong with multiprocessing.cpu_count(). Try passing in a number for nworkers instead of None.")
elif nworkers < 0:
raise ValueError("Nworkers must be zero (for no multiprocessing), None, or a positive integer, but is: {}".format(nworkers))
nworkers = int(nworkers)
# If we have negative max_agents, we actually want infinity
if max_agents_per_generation is not None and max_agents_per_generation < 0:
max_agents_per_generation = math.inf
# We allow negative min_agents for compatibility with max_agents, but we just
# interpret it as zero
if min_agents_per_generation is not None and min_agents_per_generation < 0:
min_agents_per_generation = 0
self._initial_population_size = population
self._shape = shape
self._fitnessfunc = fitnessfunc
self._seedfunc = self._default_seedfunc if seedfunc is None else seedfunc
self._selectionfunc = self._default_selectionfunc if selectionfunc is None else selectionfunc
self._crossoverfunc = self._default_crossoverfunc if crossoverfunc is None else crossoverfunc
self._mutationfunc = self._default_mutationfunc if mutationfunc is None else mutationfunc
self._elitismfunc = self._default_elitismfunc if elitismfunc is None else elitismfunc
self._nworkers = nworkers
self._max_agents_per_generation = population if max_agents_per_generation is None else max_agents_per_generation
self._min_agents_per_generation = population if min_agents_per_generation is None else min_agents_per_generation
self.statistics = []
self.best_agents = []
if self._max_agents_per_generation < self._min_agents_per_generation:
raise ValueError("max_agents_per_generation {} is less than min_agents_per_generation {}".format(self._max_agents_per_generation, self._min_agents_per_generation))
def dump_history_csv(self, fpath):
"""
Saves this simulation's statistics as a CSV file at `fpath` in the form:
```
Generation Index, Maximum, Minimum, Average
```
"""
with open(fpath, 'w') as f:
f.write("GenerationIndex, Maximum, Minimum, Average\n")
for s in self.statistics:
f.write("{}, {}, {}, {}\n".format(s.generationidx, s.maxval, s.minval, s.avgval))
def run(self, niterations=100, fitness=None, printprogress=True):
"""
Runs the constructed simulation.
Either runs until `niterations` have passed, or runs until the best fitness is `fitness` or greater.
Returns the best agent along with its fitness.
## Keyword Args
- **niterations**: The number of iterations to run the simulation to. Defaults to 100. If `None`,
`fitness` will be used (and must not be None). If both this and `fitness` is
specified, we will stop as soon as one or the other condition is met.
- **fitness**: The fitness level to converge on. As soon as one or more agents have this fitness level
or higher, the simulation will stop. Defaults to `None`. If `None` (the default),
`niterations` will be used (and must not be None). If this and `niterations` is
specified, we will stop as soon as one or the other condition is met.
- **printprogress**: If `True` (the default), we will print a progress indication after each generation.
## Returns
- The agent with the highest fitness score after the simulation ends.
- The fitness of this agent.
"""
# Validate args
if niterations is None and fitness is None:
raise ValueError("`niterations` and `fitness` must not both be None.")
# First seed the gene pool
listagents = [self._seedfunc() for _ in range(self._initial_population_size)]
self._agents = np.array(listagents)
self._fitnesses = np.zeros((self._initial_population_size,))
iteridx = 0
while not self._check_if_done(niterations, fitness, iteridx, printprogress):
# Evaluate the gene pool
self._fitnesses = self._evaluate_fitnesses()
# Sort the fitnesses along with the agents and reverse
sorted_indexes = np.argsort(self._fitnesses)[::-1]
self._fitnesses = self._fitnesses[sorted_indexes]
self._agents = self._agents[sorted_indexes]
# Calculate statistics
self._save_stats(iteridx)
# Elitism to duplicate the elites
eliteratio = self._elitismfunc(iteridx)
assert eliteratio <= 1.0, "The elitism function must produce a value between 0.0 and 1.0"
assert eliteratio >= 0.0, "The elitism function must produce a value between 0.0 and 1.0"
nelites = int(eliteratio * self._agents.shape[0])
elites = np.copy(self._agents[0:nelites])
elites = np.reshape(elites, (-1, self._agents.shape[1]))
# Select breeding agents with selection function
self._agents = self._selectionfunc(self._agents, self._fitnesses)
assert len(self._agents.shape) == 2, "Selection function must return an ndarray of shape (nagents, agent_length), but has shape: {}".format(self._agents.shape)
# Breed them using crossover
self._agents = self._crossoverfunc(self._agents)
assert len(self._agents.shape) == 2, "Crossover function must return an ndarray of shape (nagents, agent_length), but has shape: {}".format(self._agents.shape)
# Mutate the results
self._agents = self._mutationfunc(self._agents)
assert len(self._agents.shape) == 2, "Mutation function must return an ndarray of shape (nagents, agent_length), but has shape: {}".format(self._agents.shape)
# Construct the new gene pool from the mutation results and the elites
## Append any elites that were held over
np.append(self._agents, elites, axis=0)
## Take as many as max_agents (but don't take more than we actually have), but randomized
np.random.shuffle(self._agents)
mx = min(self._max_agents_per_generation, self._agents.shape[0])
self._agents = self._agents[0:mx, :]
## Now cycle through the agents, duplicating one at a time until we have at least min_agents
i = 0
while self._agents.shape[0] < self._min_agents_per_generation:
self._agents = np.append(self._agents, np.expand_dims(self._agents[i], 0), axis=0)
i += 1
if i >= self._agents.shape[0]:
i = 0
# Increment the generation index
iteridx += 1
if printprogress:
print()
# Return the fittest agent and its fitness score
return self.best_agents[-1], self.statistics[-1].maxval
def _save_stats(self, iteridx):
"""
Saves the statistics from this generation.
"""
maxval = np.max(self._fitnesses)
minval = np.min(self._fitnesses)
avgval = np.mean(self._fitnesses)
stats = Statistics(maxval, minval, avgval, iteridx)
self.statistics.append(stats)
# Sort the fitnesses along with the agents and reverse
sorted_indexes = np.argsort(self._fitnesses)[::-1]
sorted_agents = self._agents[sorted_indexes]
self.best_agents.append(sorted_agents[0, :])
def _check_if_done(self, niterations, fitness, iteridx, prnt):
"""
Returns `True` if the simulation is complete, `False` if not.
"""
assert not (niterations is None and fitness is None), "niterations and fitness cannot both be None"
if niterations is None:
niterations = math.inf
if fitness is None:
fitness = math.inf
# Check if the max fitness value is >= fitness
finished_by_fitness = | np.max(self._fitnesses) | numpy.max |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 28 16:21:46 2021
@author: jiayingweng
"""
import numpy as np
import scipy.linalg as la
__all__ = ['generateX', 'generateY']
def generateX(n, p, covstr):
"""
Generate X for simulation
Args:
n (int): sample size
p (int): number of dimension of X
covstr (0-3): covariance structure
Returns:
X: n times p array
"""
## generate X
if covstr == 0:
covx = np.eye(p)
elif covstr == 1:
v = 0.5 ** np.arange(p)
covx = la.toeplitz(v)
elif covstr == 2:
offdiag = 0.2
covx = np.ones((p,p)) * offdiag
covx = covx + np.eye(p) * (1-offdiag)
elif covstr == 3:
v = 0.8 ** np.arange(p)
covx = la.toeplitz(v)
L = np.linalg.cholesky(covx)
Z = np.random.randn(p,n)
X = (L @ Z).T
return(X)
def generateY(X, M):
"""
Generate Y based on X
Args:
X: input covariate
M: model 1-7 uni; 10-15 multi
Returns:
Y: outcome
d: structural dimension
p: the dimension of Y
b: the true beta
"""
[n,p] = X.shape
## generate Y
if M == 1: # Qian M1
d = 1
q = 1
b = np.zeros((p,d))
y = np.zeros((n,q))
index = np.arange(5)
b[index,:] = 1
y[:,0] = np.exp(X @ b[:,0]) + np.random.randn(n)
elif M == 2: # Qian M2
d = 2
q = 1
b = np.zeros((p,d))
y = np.zeros((n,q))
index1 = np.arange(4) #np.random.randint(p, size = 5)
index2 = np.arange(p-4,p)
b[index1,0] = 1
b[index2, 1] = 1
y[:,0] = np.sign(X @ b[:,0]) * np.log( np.abs( X @ b[:,1] + 5 ) ) + 0.2 * np.random.randn(n)
elif M == 3: # Tan AOS Model 1
d = 1
q = 1
b = np.zeros((p,d))
y = np.zeros((n,q))
index = np.arange(5)
b[index,:] = 1
y[:,0] = np.sin(X @ b[:,0]) ** 2 + X @ b[:,0] + np.random.randn(n)
elif M == 4: # Tan AOS Model 2
d = 1
q = 1
b = np.zeros((p,d))
y = np.zeros((n,q))
index = np.arange(5)
b[index,:] = 1
y[:,0] = 2 * np.tanh(X @ b[:,0]) + np.random.randn(n)
elif M == 5: # <NAME>
d = 1
q = 1
b = np.zeros((p,d))
index = np.arange(1)
b[index,:] = 1
X = 1/4 * np.sqrt(0.1) * ( np.random.randn(p,n) + 1) + 1/2 * np.sqrt(0.1) * ( np.random.randn(p,n) + 2 ) + 1/4 * np.sqrt(10) * (np.random.randn(p,n) + 1)
X = X.T
y = np.abs( np.sin( X @ b[:,0] ) ) + 0.2 * np.random.randn(n)
elif M == 6:
d = 2
q = 1
b = np.zeros((p,d))
b[0,0] = 1
b[1,1] = 1
X[:,1] = X[:,0] + X[:,1]
X[:,3] = ( 1+X[:,1] ) * X[:,3]
y = X @ b[:,0] + 0.5 * (X @ b[:,1])** 2
elif M == 7:
d = 2
q = 1
b = np.zeros((p,d))
y = np.zeros((n,q))
index1 = np.arange(1)
index2 = np.arange(1,3)
b[index1,0] = 1
b[index2, 1] = 1
y = (X @ b[:,0]) * (X @ b[:,1] + 1) + np.random.randn(n)
elif M == 10:
## simple
d = 2
q = 3
b = np.zeros((p,d))
y = np.zeros((n,q))
#index = np.random.randint(p, size = 5)
index = np.arange(5)
b[index[0:2], 0] = 1
b[index[2:], 1] = 1
y[:,0] = np.exp( X @ b[:,0]) + 0.5 * np.random.randn(n)
y[:,1] = X @ b[:,1] + 0.1 * np.random.randn(n)
y[:,2] = 0.1 * np.random.randn(n)
elif M == 11: ## <NAME>en 2010 Example 3
## complex
d = 2
q = 5
covy = np.diag([1,1/2,1/2,1/3,1/4])
covy[0,1] = covy[1,0] = -1/2
L = np.linalg.cholesky(covy)
Z = np.random.randn(q,n)
eps = (L @ Z).T
b = np.zeros((p,d))
y = np.zeros((n,q))
index = np.arange(3) #np.random.randint(p, size = 5)
b[index[0], 0] = 1
b[index[1:], 1] = 1
y[:,0] = 1 + X @ b[:,0] + np.sin(X @ b[:,1]) + eps[:,0]
y[:,1] = X @ b[:,1] / (0.5 + (X @ b[:,0])**2) + eps[:,1]
y[:,2] = np.abs(X @ b[:,1]) * eps[:,2]
y[:,3] = eps[:,3]
y[:,4] = eps[:,4]
elif M == 12: ## <NAME> 2010 Example 2 and <NAME> 2008 Model 4.3
d = 1
q = 2
b = np.zeros((p,d))
b[0:2,0] = [0.8, 0.6]
top = np.ones((n,2))
top[:,1] = np.sin(X @ b[:,0])
y = np.zeros((n,q))
for i in range(n):
covy = la.toeplitz(top[i,:])
L = np.linalg.cholesky(covy)
Z = np.random.randn(q,1)
y[i,:] = (L @ Z).T
elif M == 13: # <NAME>, Weng, Li 2008 Model 4.1
d = 2
q = 4
covy = np.diag([1,1,1,1])
covy[0,1] = covy[1,0] = -1/2
L = np.linalg.cholesky(covy)
Z = np.random.randn(q,n)
eps = (L @ Z).T
b = np.zeros((p,d))
y = np.zeros((n,q))
index = range(3)
b[index[0:1], 0] = 1
b[index[1:], 1] = [2,1]
y[:,0] = X @ b[:,0] + eps[:,0]
y[:,1] = X @ b[:,1] + eps[:,1]
y[:,2] = eps[:,2]
y[:,3] = eps[:,3]
elif M == 14: # Bing li 2008 Model 4.2
d = 1
q = 4
b = np.zeros((p,d))
b[0:2,0] = [0.8, 0.6]
top = np.sin(X @ b[:,0])
y = np.zeros((n,q))
for i in range(n):
covy = np.eye(q)
covy[0,1] = covy[1,0] = top[i]
L = np.linalg.cholesky(covy)
Z = np.random.randn(q,1)
eps = (L @ Z).T
y[i,:] = eps
y[i,0] = np.exp(eps[0,0])
elif M == 15: # Bing Li 08 Model 4.4
d = 2
q = 5
covy = np.diag([1,1/2,1/2,1/3,1/4])
covy[0,1] = covy[1,0] = -1/2
L = | np.linalg.cholesky(covy) | numpy.linalg.cholesky |
"""Drug Response Predictor
@author: <NAME>
This module centralizes the domain adaptation strategy towards biology-aware drug response
prediction on in-vivo dataset.
Example
-------
Examples are given in the vignettes.
Notes
-------
Examples are given in the vignette
References
-------
[1] <NAME>., <NAME>., <NAME>., <NAME>. (2019)
PRECISE: A domain adaptation approach to transfer predictors of drug response
from pre-clinical models to tumors
[2] <NAME>., <NAME>., <NAME>. (2012) Geodesic Flow Kernel for unsupervised
domain adaptation. IEEE CVPR
[3] <NAME>., <NAME>., <NAME>. (2011) Domain Adaptation for object
recognition, an unsupervised approach. IEEE ICCV
"""
import os
import numpy as np
import scipy
from pathlib import Path
from sklearn.model_selection import GridSearchCV, GroupKFold
from sklearn.kernel_ridge import KernelRidge
from sklearn.linear_model import ElasticNet, Ridge
from sklearn.neighbors import KNeighborsRegressor
from joblib import Parallel, delayed
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from copy import deepcopy
import tempfile
from joblib import load, dump
from precise.intermediate_factors import IntermediateFactors
from precise.principal_vectors import PVComputation
from precise.pipeline_routine import FlowProjector, GeodesicMatrixComputer, ConsensusRepresentation
class DrugResponsePredictor:
"""
Main pipeline for training a tumor-aware drug response predictor. This class contains:
- principal vectors computation,
- consensus representation computation,
- regression model training based on these representations,
- computation of the predictive performance.
On top of containing the solution selected by [1], it offers an implementation of the
Geodesic Flow Sampling [2] and the Geodesic Flow Kernel [3] with the equivalent
definition derived in supplementary material of [1].
Attributes
-------
n_representations : int, default to 100
Number of representations between source and target principal vectors for interpolation.
method : str, default to 'consensus'
Scheme used for the domain adaptation step, i.e. 'consensus', 'elasticnet', or 'gfk'.
mean_center : bool, default to True
Whether the different datasets used in the implementation should be mean centered.
std_unit : bool, default to False
Whether the different datasets used in the implementation should be standardized
(feature-level variance to 1).
n_factors : int, default to 70
Number of domain-specific factors to compute, e.g. PCs.
n_pv : int, default to 40
Number of principal vectors to compute from the domain-specific factors.
dim_reduction : str, default to 'pca'
Dimensionality reduction method for the source data,
i.e. 'pca', 'ica', 'nmf', 'fa', 'sparsepca', pls'.
dim_reduction_target : str, default to None
Dimensionality reduction method for the target data,
i.e. 'pca', 'ica', 'nmf', 'fa', 'sparsepca', pls'. If None, set to dim_reduction.
l1_ratio : float, default to 0
l1 ratio for elasticnet model (0 is Ridge, 1 is Lasso).
source_data : np.ndarray, default to None
source data to use in domain adaptation phase.
target_data : np.ndarray, default to None
target data to use in domain adaptation phase.
n_jobs : int, default to 1
number of jobs used in parallelisation.
pv_computation : PVComputation
Instance computing the principal vectors.
intermediate_factors : IntermediateFactors
Instance computing the interpolated features between source and target.
predictor : BaseEstimator
Regression model based on feature representation chosen in "method".
alpha_values: np.ndarray
Regression coefficients for grid search in regression model.
cv_fold : int (set to 10)
Number of cross validation folds used for finding the optimal shrinkage
coefficient and computing the predictive performance.
verbose : int (set to 1)
Level of verbosity in joblib instances.
"""
def __init__(self, n_representations=100, method='consensus',
mean_center=True,
std_unit=False,
n_factors=70,
n_pv=40,
dim_reduction='pca',
dim_reduction_target=None,
l1_ratio=0,
source_data=None,
target_data=None,
n_jobs=1):
"""
Parameters
-------
n_representations : int, default to 100
Number of representations between source and target principal vectors for interpolation.
0 means source only, -1 means target only.
method : str, default to 'consensus'
Scheme used for the domain adaptation step, i.e. 'consensus', 'elasticnet', or 'gfk'.
mean_center : bool, default to True
Whether the different datasets used in the implementation should be mean centered.
std_unit : bool, default to False
Whether the different datasets used in the implementation should be standardized
(feature-level variance to 1).
n_factors : int, default to 70
Number of domain-specific factors to compute, e.g. PCs.
n_pv : int, default to 40
Number of principal vectors to compute from the domain-specific factors.
dim_reduction : str, default to 'pca'
Dimensionality reduction method for the source data,
i.e. 'pca', 'ica', 'nmf', 'fa', 'sparsepca', pls'.
dim_reduction_target : str, default to None
Dimensionality reduction method for the target data,
i.e. 'pca', 'ica', 'nmf', 'fa', 'sparsepca', pls'. If None, set to dim_reduction.
l1_ratio : float, default to 0
l1 ratio for elasticnet model (0 is Ridge, 1 is Lasso).
source_data : np.ndarray, default to None
source data to use in domain adaptation phase.
target_data : np.ndarray, default to None
target data to use in domain adaptation phase.
n_jobs : int, default to 1
number of jobs used in parallelisation.
"""
self.n_representations = n_representations
self.mean_center = mean_center
self.std_unit = std_unit
self.method = method
self.n_factors = n_factors
self.n_pv = n_pv
self.l1_ratio = l1_ratio
self.dim_reduction = dim_reduction
self.dim_reduction_target = dim_reduction_target
self.n_jobs = n_jobs
self.source_data = source_data
self.target_data = target_data
self.pv_computation = PVComputation(
self.n_factors,
self.n_pv,
self.dim_reduction,
self.dim_reduction_target
)
self.intermediate_factors = IntermediateFactors(
self.n_representations
)
self.predictor = None
# Default values for CV
self.alpha_values = | np.logspace(-6,10,34) | numpy.logspace |
# Author: <NAME> <<EMAIL>>
"""Plot topographic maps of sensor space data."""
from __future__ import annotations
import dataclasses
from itertools import repeat
from math import floor, sqrt
from typing import Any, Dict, Literal, Sequence, Tuple, Union
import matplotlib as mpl
import matplotlib.axes
import numpy as np
from scipy import interpolate, linalg
from scipy.spatial import ConvexHull
from .._colorspaces import UNAMBIGUOUS_COLORS
from .._data_obj import NDVarArg, CategorialArg, IndexArg, Dataset
from .._text import ms
from ._base import (
CMapArg, ColorArg,
PlotType, EelFigure, PlotData, AxisData, DataLayer,
Layout, ImLayout, VariableAspectLayout,
ColorMapMixin, TimeSlicerEF, TopoMapKey, XAxisMixin, YLimMixin,
)
from ._utsnd import _ax_butterfly, _ax_im_array, _plt_im
from ._sensors import SENSORMAP_FRAME, SensorMapMixin, _plt_map2d
InterpolationArg = Literal[None, 'nearest', 'linear', 'spline']
SensorLabelsArg = Literal['', 'none', 'index', 'name', 'fullname']
class Topomap(SensorMapMixin, ColorMapMixin, TopoMapKey, EelFigure):
"""Plot individual topogeraphies
Parameters
----------
y
Data to plot.
xax
Create a separate plot for each cell in this model.
ds
If a Dataset is provided, data can be specified as strings.
sub
Specify a subset of the data.
vmax
Upper limits for the colormap (default is determined from data).
vmin
Lower limit for the colormap (default ``-vmax``).
cmap
Colormap (default depends on the data).
contours
Contours to draw on topomaps. Can be an int (number of contours,
including ``vmin``/``vmax``), a sequence (values at which to draw
contours), or a ``**kwargs`` dict (must contain at least the "levels"
key). Default is no contours.
proj
The sensor projection to use for topomaps (or one projection per plot).
res
Resolution of the topomaps (width = height = ``res``).
interpolation
Method for interpolating topo-map between sensors (default is based on
mne-python).
clip : bool | 'even' | 'circle'
Outline for clipping topomaps: 'even' to clip at a constant distance
(default), 'circle' to clip using a circle.
clip_distance
How far from sensor locations to clip (1 is the axes height/width).
head_radius
Radius of the head outline drawn over sensors (on sensor plots with
normalized positions, 0.45 is the outline of the topomap); 0 to plot no
outline; tuple for separate (right, anterior) radius.
The default is determined automatically.
head_pos
Head outline position along the anterior axis (0 is the center, 0.5 is
the top end of the plot).
im_interpolation
Topomap image interpolation (see Matplotlib's
:meth:`~matplotlib.axes.Axes.imshow`). Matplotlib 1.5.3's SVG output
can't handle uneven aspect with ``interpolation='none'``, use
``interpolation='nearest'`` instead.
sensorlabels
Show sensor labels. For 'name', any prefix common to all names
is removed; with 'fullname', the full name is shown.
mark
Sensors which to mark.
mcolor : matplotlib color
Color for marked sensors.
axtitle
Title for the individual axes. The default is to show the names of the
epochs, but only if multiple axes are plotted.
xlabel
Label below the topomaps (default is no label; ``True`` to use ``y``
names).
margins
Layout parameter.
...
Also accepts :ref:`general-layout-parameters`.
Notes
-----
Keys:
- ``t``: open a ``Topomap`` plot for the region under the mouse pointer.
- ``T``: open a larger ``Topomap`` plot with visible sensor names for the
map under the mouse pointer.
"""
def __init__(
self,
y: Union[NDVarArg, Sequence[NDVarArg]],
xax: CategorialArg = None,
ds: Dataset = None,
sub: IndexArg = None,
vmax: float = None,
vmin: float = None,
cmap: CMapArg = None,
contours: Union[int, Sequence, Dict] = None,
# topomap args
proj: str = 'default',
res: int = None,
interpolation: InterpolationArg = None,
clip: Union[bool, str] = 'even',
clip_distance: float = 0.05,
head_radius: Union[float, Tuple[float, float]] = None,
head_pos: Union[float, Sequence[float]] = 0,
im_interpolation: str = None,
# sensor-map args
sensorlabels: SensorLabelsArg = None,
mark: IndexArg = None,
mcolor: ColorArg = None,
# layout
axtitle: Union[bool, Sequence[str]] = True,
xlabel: Union[bool, str] = None,
margins: Dict[str, float] = None,
**kwargs,
):
data = PlotData.from_args(y, ('sensor',), xax, ds, sub)
self.plots = []
ColorMapMixin.__init__(self, data.data, cmap, vmax, vmin, contours, self.plots)
if isinstance(proj, str):
proj = repeat(proj, data.n_plots)
elif not isinstance(proj, Sequence):
raise TypeError(f"proj={proj!r}")
elif len(proj) != data.n_plots:
raise ValueError(f"proj={proj!r}: need as many proj as axes ({data.n_plots})")
layout = ImLayout(data.plot_used, 1.1, 2, margins, axtitle=axtitle, **kwargs)
EelFigure.__init__(self, data.frame_title, layout)
self._set_axtitle(axtitle, data, verticalalignment='top', pad=-1)
# plots
axes_data = data.for_plot(PlotType.IMAGE)
for ax, layers, proj_ in zip(self.axes, axes_data, proj):
h = _ax_topomap(ax, layers, clip, clip_distance, sensorlabels, mark, mcolor, None, proj_, res, im_interpolation, xlabel, self._vlims, self._cmaps, self._contours, interpolation, head_radius, head_pos)
self.plots.append(h)
TopoMapKey.__init__(self, self._topo_data)
SensorMapMixin.__init__(self, [h.sensors for h in self.plots])
self._show()
def _fill_toolbar(self, tb):
ColorMapMixin._fill_toolbar(self, tb)
SensorMapMixin._fill_toolbar(self, tb)
def _topo_data(self, event):
if event.inaxes:
ax_i = self.axes.index(event.inaxes)
p = self.plots[ax_i]
return p.data, p.title, p.proj
class TopomapBins(SensorMapMixin, ColorMapMixin, TopoMapKey, EelFigure):
"""Topomaps in time-bins
Parameters
----------
y
Data to plot.
xax
Create a separate plot for each cell in this model.
ds
If a Dataset is provided, data can be specified as strings.
sub
Specify a subset of the data.
bin_length
Length ofthe time bins for topo-plots.
tstart
Beginning of the first time bin (default is the beginning of ``y``).
tstop
End of the last time bin (default is the end of ``y``).
vmax
Upper limits for the colormap (default is determined from data).
vmin
Lower limit for the colormap (default ``-vmax``).
cmap
Colormap (default depends on the data).
contours
Contours to draw on topomaps. Can be an int (number of contours,
including ``vmin``/``vmax``), a sequence (values at which to draw
contours), or a ``**kwargs`` dict (must contain at least the "levels"
key). Default is no contours.
proj
The sensor projection to use for topomaps.
res
Resolution of the topomaps (width = height = ``res``).
interpolation
Method for interpolating topo-map between sensors (default is based on
mne-python).
clip : bool | 'even' | 'circle'
Outline for clipping topomaps: 'even' to clip at a constant distance
(default), 'circle' to clip using a circle.
clip_distance
How far from sensor locations to clip (1 is the axes height/width).
head_radius
Radius of the head outline drawn over sensors (on sensor plots with
normalized positions, 0.45 is the outline of the topomap); 0 to plot no
outline; tuple for separate (right, anterior) radius.
The default is determined automatically.
head_pos
Head outline position along the anterior axis (0 is the center, 0.5 is
the top end of the plot).
im_interpolation
Topomap image interpolation (see Matplotlib's
:meth:`~matplotlib.axes.Axes.imshow`). Matplotlib 1.5.3's SVG output
can't handle uneven aspect with ``interpolation='none'``, use
``interpolation='nearest'`` instead.
sensorlabels
Show sensor labels. For 'name', any prefix common to all names
is removed; with 'fullname', the full name is shown.
mark : Sensor index
Sensors which to mark.
mcolor : matplotlib color
Color for marked sensors.
...
Also accepts :ref:`general-layout-parameters`.
Notes
-----
Keys:
- ``t``: open a ``Topomap`` plot for the map under the mouse pointer.
- ``T``: open a larger ``Topomap`` plot with visible sensor names for the
map under the mouse pointer.
"""
def __init__(
self,
y: Union[NDVarArg, Sequence[NDVarArg]],
xax: CategorialArg = None,
ds: Dataset = None,
sub: IndexArg = None,
bin_length: float = 0.050,
tstart: float = None,
tstop: float = None,
vmax: float = None,
vmin: float = None,
cmap: CMapArg = None,
contours: Union[int, Sequence, Dict] = None,
# topomap args
proj: str = 'default',
res: int = None,
interpolation: InterpolationArg = None,
clip: Union[bool, str] = 'even',
clip_distance: float = 0.05,
head_radius: Union[float, Tuple[float, float]] = None,
head_pos: Union[float, Sequence[float]] = 0,
im_interpolation: str = None,
# sensor-map args
sensorlabels: SensorLabelsArg = None,
mark: IndexArg = None,
mcolor: ColorArg = None,
**kwargs,
):
data = PlotData.from_args(y, ('sensor', 'time'), xax, ds, sub)
self._plots = []
data._cannot_skip_axes(self)
bin_data = data.for_plot(PlotType.IMAGE).bin(bin_length, tstart, tstop)
ColorMapMixin.__init__(self, data.data, cmap, vmax, vmin, contours, self._plots)
# create figure
time = bin_data.y0.get_dim('time')
n_bins = len(time)
n_rows = bin_data.n_plots
layout = Layout(n_bins * n_rows, 1, 1.5, tight=False, nrow=n_rows, ncol=n_bins, **kwargs)
EelFigure.__init__(self, data.frame_title, layout)
self._plots.extend(repeat(None, n_bins * n_rows))
for column, t in enumerate(time):
t_data = bin_data.sub_time(t)
for row, layers in enumerate(t_data):
i = row * n_bins + column
ax = self.axes[i]
self._plots[i] = _ax_topomap(ax, layers, clip, clip_distance, sensorlabels, mark, mcolor, None, proj, res, im_interpolation, None, self._vlims, self._cmaps, self._contours, interpolation, head_radius, head_pos)
self._set_axtitle((str(t) for t in time), axes=self.axes[:len(time)])
TopoMapKey.__init__(self, self._topo_data)
SensorMapMixin.__init__(self, [h.sensors for h in self._plots])
self._show()
def _fill_toolbar(self, tb):
ColorMapMixin._fill_toolbar(self, tb)
SensorMapMixin._fill_toolbar(self, tb)
def _topo_data(self, event):
if event.inaxes:
ax_i = self.axes.index(event.inaxes)
p = self._plots[ax_i]
return p.data, p.title, p.proj
class TopoButterfly(ColorMapMixin, TimeSlicerEF, TopoMapKey, YLimMixin, XAxisMixin, EelFigure):
"""Butterfly plot with corresponding topomaps
Parameters
----------
y : (list of) NDVar
Data to plot.
xax : None | categorial
Create a separate plot for each cell in this model.
ds : Dataset
If a Dataset is provided, data can be specified as strings.
sub : str | array
Specify a subset of the data.
vmax : scalar
Upper limits for the colormap (default is determined from data).
vmin : scalar
Lower limit for the colormap (default ``-vmax``).
cmap : str
Colormap (default depends on the data).
contours : int | sequence | dict
Contours to draw on topomaps. Can be an int (number of contours,
including ``vmin``/``vmax``), a sequence (values at which to draw
contours), or a ``**kwargs`` dict (must contain at least the "levels"
key). Default is no contours.
color : matplotlib color
Color of the butterfly plots.
linewidth : scalar
Linewidth for plots (defult is to use ``matplotlib.rcParams``).
t
Time to display in the topomap.
proj : str
The sensor projection to use for topomaps.
res : int
Resolution of the topomaps (width = height = ``res``).
interpolation
Method for interpolating topo-map between sensors (default is based on
mne-python).
clip : bool | 'even' | 'circle'
Outline for clipping topomaps: 'even' to clip at a constant distance
(default), 'circle' to clip using a circle.
clip_distance : scalar
How far from sensor locations to clip (1 is the axes height/width).
head_radius : scalar | tuple
Radius of the head outline drawn over sensors (on sensor plots with
normalized positions, 0.45 is the outline of the topomap); 0 to plot no
outline; tuple for separate (right, anterior) radius.
The default is determined automatically.
head_pos : scalar
Head outline position along the anterior axis (0 is the center, 0.5 is
the top end of the plot).
im_interpolation : str
Topomap image interpolation (see Matplotlib's
:meth:`~matplotlib.axes.Axes.imshow`). Matplotlib 1.5.3's SVG output
can't handle uneven aspect with ``interpolation='none'``, use
``interpolation='nearest'`` instead.
sensorlabels
Show sensor labels. For 'name', any prefix common to all names
is removed; with 'fullname', the full name is shown.
mark : Sensor index
Sensors to mark in the topo-map. To highlight sensors in the butterfly
plot, consider using :meth:`NDVar.mask` on ``y``.
mcolor : matplotlib color
Color for marked sensors.
xlabel
X-axis label. By default the label is inferred from the data.
ylabel
Y-axis label. By default the label is inferred from the data.
xticklabels
Specify which axes should be annotated with x-axis tick labels.
Use ``int`` for a single axis, a sequence of ``int`` for multiple
specific axes, or one of ``'left' | 'bottom' | 'all' | 'none'``.
yticklabels
Specify which axes should be annotated with y-axis tick labels.
Use ``int`` for a single axis, a sequence of ``int`` for multiple
specific axes, or one of ``'left' | 'bottom' | 'all' | 'none'``.
axtitle : bool | sequence of str
Title for the individual axes. The default is to show the names of the
epochs, but only if multiple axes are plotted.
xlim : scalar | (scalar, scalar)
Initial x-axis view limits as ``(left, right)`` tuple or as ``length``
scalar (default is the full x-axis in the data).
...
Also accepts :ref:`general-layout-parameters`.
Notes
-----
Topomap control:
- LMB click in a butterfly plot fixates the topomap time
- RMB click in a butterfly plot removes the time point, the topomaps
follow the mouse pointer
- ``.``: Increment the current topomap time (got right)
- ``,``: Decrement the current topomap time (go left)
- ``t``: open a ``Topomap`` plot for the time point under the mouse
pointer
- ``T``: open a larger ``Topomap`` plot with visible sensor names for the
time point under the mouse pointer
Navigation:
- ``↑``: scroll up
- ``↓``: scroll down
- ``←``: scroll left
- ``→``: scroll right
- ``home``: scroll to beginning
- ``end``: scroll to end
- ``f``: x-axis zoom in (reduce x axis range)
- ``d``: x-axis zoom out (increase x axis range)
- ``r``: y-axis zoom in (reduce y-axis range)
- ``c``: y-axis zoom out (increase y-axis range)
"""
_default_xlabel_ax = -2
def __init__(
self,
y: Union[NDVarArg, Sequence[NDVarArg]],
xax: CategorialArg = None,
ds: Dataset = None,
sub: IndexArg = None,
vmax: float = None,
vmin: float = None,
cmap: CMapArg = None,
contours: Union[int, Sequence, Dict] = None,
color: Any = None,
linewidth: float = None,
# topomap args
t: float = None,
proj: str = 'default',
res: int = None,
interpolation: InterpolationArg = None,
clip: Union[bool, str] = 'even',
clip_distance: float = 0.05,
head_radius: Union[float, Tuple[float, float]] = None,
head_pos: Union[float, Sequence[float]] = 0,
im_interpolation: str = None,
# sensor-map args
sensorlabels: SensorLabelsArg = None,
mark: IndexArg = None,
mcolor: ColorArg = None,
# layout
xlabel: Union[bool, str] = True,
ylabel: Union[bool, str] = True,
xticklabels: Union[str, int, Sequence[int]] = 'bottom',
yticklabels: Union[str, int, Sequence[int]] = 'left',
axtitle: Union[bool, Sequence[str]] = True,
frame: bool = True,
xlim: Union[float, Tuple[float, float]] = None,
**kwargs,
):
data = PlotData.from_args(y, ('sensor', None), xax, ds, sub)
data._cannot_skip_axes(self)
xdim = data.dims[1]
self._topomap_data = data.for_plot(PlotType.IMAGE)
# create figure
row_titles = self._set_axtitle(axtitle, data, data.n_plots)
layout = VariableAspectLayout(data.n_plots, 3, 10, aspect=(None, 1), ax_frames=(frame, False), row_titles=row_titles, **kwargs)
EelFigure.__init__(self, data.frame_title, layout)
self.bfly_axes = self.axes[0::2]
self.topo_axes = self.axes[1::2]
self.bfly_plots = []
self.topo_plots = []
self.t_markers = [] # vertical lines on butterfly plots
ColorMapMixin.__init__(self, data.data, cmap, vmax, vmin, contours,
self.topo_plots)
self._topo_kwargs = {
'clip': clip,
'clip_distance': clip_distance,
'head_radius': head_radius,
'head_pos': head_pos,
'proj': proj,
'contours': self._contours,
'res': res,
'interpolation': interpolation,
'im_interpolation': im_interpolation,
'sensorlabels': sensorlabels,
'mark': mark,
'mcolor': mcolor,
}
# plot epochs (x/y are in figure coordinates)
for ax, layers in zip(self.bfly_axes, data.for_plot(PlotType.LINE)):
h = _ax_butterfly(ax, layers, 'time', 'sensor', mark, color, linewidth, self._vlims, clip)
self.bfly_plots.append(h)
# decorate axes
self._configure_axis_dim('x', data.time_dim, xlabel, xticklabels, self.bfly_axes)
self._configure_axis_data('y', data, ylabel, yticklabels, self.bfly_axes)
# setup callback
XAxisMixin._init_with_data(self, data.data, xdim, xlim, self.bfly_axes)
YLimMixin.__init__(self, self.bfly_plots + self.topo_plots)
TimeSlicerEF.__init__(self, xdim, data.time_dim, self.bfly_axes, False, initial_time=t)
TopoMapKey.__init__(self, self._topo_data)
self._t_label = None # time label under lowest topo-map
self._frame .store_canvas()
self._show(crosshair_axes=self.bfly_axes)
self._init_controller()
def _fill_toolbar(self, tb):
ColorMapMixin._fill_toolbar(self, tb)
def _update_topo(self, t):
if not self.topo_plots:
data = self._topomap_data.sub_time(t)
for ax, layers in zip(self.topo_axes, data):
p = _ax_topomap(ax, layers, cmaps=self._cmaps, vlims=self._vlims, **self._topo_kwargs)
self.topo_plots.append(p)
else:
data = self._topomap_data.sub_time(t, data_only=True)
for p, layers in zip(self.topo_plots, data):
p.set_data(layers)
def _topo_data(self, event):
ax = event.inaxes
if ax is None:
return
p = self.bfly_plots[ax.id // 2]
if ax in self.bfly_axes:
t = event.xdata
elif ax in self.topo_axes:
t = self._current_time
else:
return
seg = [l.sub(time=t) for l in p.data]
return seg, f"{ms(t)} ms", self._topo_kwargs['proj']
def _on_leave_axes_status_text(self, event):
return "Topomap: t = %.3f" % self._current_time
def _update_time(self, t, fixate):
TimeSlicerEF._update_time(self, t, fixate)
self._update_topo(t)
if fixate:
# add time label
text = "t = %i ms" % round(t * 1e3)
if self._t_label:
self._t_label.set_text(text)
else:
ax = self.topo_axes[-1]
self._t_label = ax.text(.5, -0.1, text, ha='center', va='top')
self.canvas.draw() # otherwise time label does not get redrawn
elif self._time_fixed:
self._t_label.remove()
self._t_label = None
self.canvas.draw() # otherwise time label does not get redrawn
elif hasattr(self.canvas, 'redraw'):
self.canvas.redraw(self.topo_axes)
class _plt_topomap(_plt_im):
"""Topomap plot
Parameters
----------
...
im_frame : scalar
Empty space beyond outmost sensors in the im plot.
vmax : scalar
Override the colorspace vmax.
interpolation
Method for interpolating topo-map between sensors.
"""
_aspect = 'equal'
def __init__(
self,
ax: mpl.axes.Axes,
layer: DataLayer,
proj: str,
res: int,
im_interpolation: str,
vlims,
cmaps,
contours,
interpolation: InterpolationArg,
clip: str,
clip_distance: float,
):
# store attributes
self._proj = proj
self._visible_data = layer.y.sensor._visible_sensors(proj)
self._grid = | np.linspace(0, 1, res) | numpy.linspace |
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 4 09:59:18 2015
@author: <NAME>
"""
#%%
import os
import ismrmrd
import ismrmrd.xsd
import numpy as np
import scipy as sp
from ismrmrdtools import show, transform, coils, grappa, sense
#%%
#Convert data from siemens file with
# siemens_to_ismrmrd -f meas_MID00032_FID22409_oil_gre_128_150reps_pause_alpha_10.dat -z 1 -o data_reps_noise.h5
# siemens_to_ismrmrd -f meas_MID00032_FID22409_oil_gre_128_150reps_pause_alpha_10.dat -z 2 -o data_reps_data.h5
# Data can be found in Gadgetron integration test datasets
#filename_noise = 'data_reps_noise.h5'
#filename_data = 'data_reps_data.h5'
filename_noise = 'tpat3_noise.h5'
filename_data = 'tpat3_data.h5'
#%%
# Read the noise data
if not os.path.isfile(filename_noise):
print("%s is not a valid file" % filename_noise)
raise SystemExit
noise_dset = ismrmrd.Dataset(filename_noise, 'dataset', create_if_needed=False)
#%%
# Process the noise data
noise_reps = noise_dset.number_of_acquisitions()
a = noise_dset.read_acquisition(0)
noise_samples = a.number_of_samples
num_coils = a.active_channels
noise_dwell_time = a.sample_time_us
noise = np.zeros((num_coils,noise_reps*noise_samples),dtype=np.complex64)
for acqnum in range(noise_reps):
acq = noise_dset.read_acquisition(acqnum)
# TODO: Currently ignoring noise scans
if not acq.isFlagSet(ismrmrd.ACQ_IS_NOISE_MEASUREMENT):
raise Exception("Errror: non noise scan found in noise calibration")
noise[:,acqnum*noise_samples:acqnum*noise_samples+noise_samples] = acq.data
noise = noise.astype('complex64')
#%% Read the actual data
# Read the noise data
if not os.path.isfile(filename_data):
print("%s is not a valid file" % filename_data)
raise SystemExit
dset = ismrmrd.Dataset(filename_data, 'dataset', create_if_needed=False)
header = ismrmrd.xsd.CreateFromDocument(dset.read_xml_header())
enc = header.encoding[0]
# Matrix size
eNx = enc.encodedSpace.matrixSize.x
eNy = enc.encodedSpace.matrixSize.y
eNz = enc.encodedSpace.matrixSize.z
rNx = enc.reconSpace.matrixSize.x
rNy = enc.reconSpace.matrixSize.y
rNz = enc.reconSpace.matrixSize.z
# Field of View
eFOVx = enc.encodedSpace.fieldOfView_mm.x
eFOVy = enc.encodedSpace.fieldOfView_mm.y
eFOVz = enc.encodedSpace.fieldOfView_mm.z
rFOVx = enc.reconSpace.fieldOfView_mm.x
rFOVy = enc.reconSpace.fieldOfView_mm.y
rFOVz = enc.reconSpace.fieldOfView_mm.z
#Parallel imaging factor
acc_factor = enc.parallelImaging.accelerationFactor.kspace_encoding_step_1
# Number of Slices, Reps, Contrasts, etc.
ncoils = header.acquisitionSystemInformation.receiverChannels
if enc.encodingLimits.slice != None:
nslices = enc.encodingLimits.slice.maximum + 1
else:
nslices = 1
if enc.encodingLimits.repetition != None:
nreps = enc.encodingLimits.repetition.maximum + 1
else:
nreps = 1
if enc.encodingLimits.contrast != None:
ncontrasts = enc.encodingLimits.contrast.maximum + 1
else:
ncontrasts = 1
# In case there are noise scans in the actual dataset, we will skip them.
firstacq=0
for acqnum in range(dset.number_of_acquisitions()):
acq = dset.read_acquisition(acqnum)
if acq.isFlagSet(ismrmrd.ACQ_IS_NOISE_MEASUREMENT):
print("Found noise scan at acq ", acqnum)
continue
else:
firstacq = acqnum
print("Imaging acquisition starts acq ", acqnum)
break
#Calculate prewhiterner taking BWs into consideration
a = dset.read_acquisition(firstacq)
data_dwell_time = a.sample_time_us
noise_receiver_bw_ratio = 0.79
dmtx = coils.calculate_prewhitening(noise,scale_factor=(data_dwell_time/noise_dwell_time)*noise_receiver_bw_ratio)
#%%
# Process the actual data
all_data = np.zeros((nreps, ncontrasts, nslices, ncoils, eNz, eNy, rNx), dtype=np.complex64)
# Loop through the rest of the acquisitions and stuff
for acqnum in range(firstacq,dset.number_of_acquisitions()):
acq = dset.read_acquisition(acqnum)
acq_data_prw = coils.apply_prewhitening(acq.data,dmtx)
# Remove oversampling if needed
if eNx != rNx:
xline = transform.transform_kspace_to_image(acq_data_prw, [1])
x0 = (eNx - rNx) / 2
x1 = (eNx - rNx) / 2 + rNx
xline = xline[:,x0:x1]
acq.resize(rNx,acq.active_channels,acq.trajectory_dimensions)
acq.center_sample = rNx/2
# need to use the [:] notation here to fill the data
acq.data[:] = transform.transform_image_to_kspace(xline, [1])
# Stuff into the buffer
rep = acq.idx.repetition
contrast = acq.idx.contrast
slice = acq.idx.slice
y = acq.idx.kspace_encode_step_1
z = acq.idx.kspace_encode_step_2
all_data[rep, contrast, slice, :, z, y, :] = acq.data
all_data = all_data.astype('complex64')
#%%
# Coil combination
coil_images = transform.transform_kspace_to_image(np.squeeze(np.mean(all_data,0)),(1,2))
(csm,rho) = coils.calculate_csm_walsh(coil_images)
csm_ss = np.sum(csm * np.conj(csm),0)
csm_ss = csm_ss + 1.0*(csm_ss < np.spacing(1)).astype('float32')
if acc_factor > 1:
coil_data = np.squeeze(np.mean(all_data,0))
reload(grappa)
(unmix,gmap) = grappa.calculate_grappa_unmixing(coil_data, acc_factor)
#(unmix,gmap) = sense.calculate_sense_unmixing(acc_factor,csm)
show.imshow(abs(gmap),colorbar=True,scale=(1,2))
recon = | np.zeros((nreps, ncontrasts, nslices, eNz, eNy, rNx), dtype=np.complex64) | numpy.zeros |
from math import ceil
import numpy as np
from sklearn.cluster import KMeans
def compute_reps(extract_fn, X, chunk_size):
"""Compute representations for input in chunks."""
chunks = int(ceil(float(X.shape[0]) / chunk_size))
reps = []
for i in range(chunks):
start = i * chunk_size
stop = start + chunk_size
chunk_reps = extract_fn(X[start:stop])
reps.append(chunk_reps)
return np.vstack(reps)
class ClusterBatchBuilder(object):
"""Sample minibatches for magnet loss."""
def __init__(self, labels, k, m, d):
self.num_classes = np.unique(labels).shape[0]
self.labels = labels
self.k = k
self.m = m
self.d = d
self.centroids = None
self.assignments = np.zeros_like(labels, int)
self.cluster_assignments = {}
self.cluster_classes = np.repeat(range(self.num_classes), k)
self.example_losses = None
self.cluster_losses = None
self.has_loss = None
def update_clusters(self, rep_data, max_iter=20):
"""
Given an array of representations for the entire training set,
recompute clusters and store example cluster assignments in a
quickly sampleable form.
"""
# Lazily allocate array for centroids
if self.centroids is None:
self.centroids = np.zeros([self.num_classes * self.k, rep_data.shape[1]])
for c in range(self.num_classes):
class_mask = self.labels == c
class_examples = rep_data[class_mask]
kmeans = KMeans(n_clusters=self.k, init='k-means++', n_init=1, max_iter=max_iter)
kmeans.fit(class_examples)
# Save cluster centroids for finding impostor clusters
start = self.get_cluster_ind(c, 0)
stop = self.get_cluster_ind(c, self.k)
self.centroids[start:stop] = kmeans.cluster_centers_
# Update assignments with new global cluster indexes
self.assignments[class_mask] = self.get_cluster_ind(c, kmeans.predict(class_examples))
# Construct a map from cluster to example indexes for fast batch creation
for cluster in range(self.k * self.num_classes):
cluster_mask = self.assignments == cluster
self.cluster_assignments[cluster] = np.flatnonzero(cluster_mask)
def update_losses(self, indexes, losses):
"""
Given a list of examples indexes and corresponding losses
store the new losses and update corresponding cluster losses.
"""
# Lazily allocate structures for losses
if self.example_losses is None:
self.example_losses = | np.zeros_like(self.labels, float) | numpy.zeros_like |
# The following parts are originally part of scikit-bio, with copyright notice
# as reproduced below. The original COPYING.txt file can be found under
# licenses/scikit-bio.txt.
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from unittest import TestCase
import numpy as np
import numpy.testing as npt
from composition_stats import (closure, multiplicative_replacement, perturb,
perturb_inv, power, inner, clr, clr_inv, ilr,
ilr_inv, alr, alr_inv, sbp_basis,
_gram_schmidt_basis, center, centralize)
class CompositionTests(TestCase):
def setUp(self):
# Compositional data
self.cdata1 = np.array([[2, 2, 6],
[4, 4, 2]])
self.cdata2 = np.array([2, 2, 6])
self.cdata3 = np.array([[1, 2, 3, 0, 5],
[1, 0, 0, 4, 5],
[1, 2, 3, 4, 5]])
self.cdata4 = np.array([1, 2, 3, 0, 5])
self.cdata5 = [[2, 2, 6], [4, 4, 2]]
self.cdata6 = [[1, 2, 3, 0, 5],
[1, 0, 0, 4, 5],
[1, 2, 3, 4, 5]]
self.cdata7 = [np.exp(1), 1, 1]
self.cdata8 = [np.exp(1), 1, 1, 1]
# Simplicial orthonormal basis obtained from Gram-Schmidt
self.ortho1 = [[0.44858053, 0.10905743, 0.22118102, 0.22118102],
[0.3379924, 0.3379924, 0.0993132, 0.22470201],
[0.3016453, 0.3016453, 0.3016453, 0.09506409]]
# Real data
self.rdata1 = [[0.70710678, -0.70710678, 0., 0.],
[0.40824829, 0.40824829, -0.81649658, 0.],
[0.28867513, 0.28867513, 0.28867513, -0.8660254]]
# Bad datasets
# negative count
self.bad1 = np.array([1, 2, -1])
# zero count
self.bad2 = np.array([[[1, 2, 3, 0, 5]]])
def test_closure(self):
npt.assert_allclose(closure(self.cdata1),
np.array([[.2, .2, .6],
[.4, .4, .2]]))
npt.assert_allclose(closure(self.cdata2),
np.array([.2, .2, .6]))
npt.assert_allclose(closure(self.cdata5),
np.array([[.2, .2, .6],
[.4, .4, .2]]))
with self.assertRaises(ValueError):
closure(self.bad1)
with self.assertRaises(ValueError):
closure(self.bad2)
# make sure that inplace modification is not occurring
closure(self.cdata2)
npt.assert_allclose(self.cdata2, np.array([2, 2, 6]))
def test_closure_warning(self):
with self.assertRaises(ValueError):
closure([0., 0., 0.])
with self.assertRaises(ValueError):
closure([[0., 0., 0.],
[0., 5., 5.]])
def test_perturb(self):
pmat = perturb(closure(self.cdata1),
closure(np.array([1, 1, 1])))
npt.assert_allclose(pmat,
np.array([[.2, .2, .6],
[.4, .4, .2]]))
pmat = perturb(closure(self.cdata1),
closure(np.array([10, 10, 20])))
npt.assert_allclose(pmat,
np.array([[.125, .125, .75],
[1./3, 1./3, 1./3]]))
pmat = perturb(closure(self.cdata1),
closure(np.array([10, 10, 20])))
npt.assert_allclose(pmat,
np.array([[.125, .125, .75],
[1./3, 1./3, 1./3]]))
pmat = perturb(closure(self.cdata2),
closure([1, 2, 1]))
npt.assert_allclose(pmat, np.array([1./6, 2./6, 3./6]))
pmat = perturb(closure(self.cdata5),
closure(np.array([1, 1, 1])))
npt.assert_allclose(pmat,
np.array([[.2, .2, .6],
[.4, .4, .2]]))
# make sure that inplace modification is not occurring
perturb(closure(self.cdata2), closure([1, 2, 3]))
npt.assert_allclose(self.cdata2, np.array([2, 2, 6]))
def test_power(self):
pmat = power(closure(self.cdata1), 2)
npt.assert_allclose(pmat,
np.array([[.04/.44, .04/.44, .36/.44],
[.16/.36, .16/.36, .04/.36]]))
pmat = power(closure(self.cdata2), 2)
npt.assert_allclose(pmat, np.array([.04, .04, .36])/.44)
pmat = power(closure(self.cdata5), 2)
npt.assert_allclose(pmat,
np.array([[.04/.44, .04/.44, .36/.44],
[.16/.36, .16/.36, .04/.36]]))
# make sure that inplace modification is not occurring
power(closure(self.cdata2), 4)
npt.assert_allclose(self.cdata2, np.array([2, 2, 6]))
def test_perturb_inv(self):
pmat = perturb_inv(closure(self.cdata1),
closure([.1, .1, .1]))
imat = perturb(closure(self.cdata1),
closure([10, 10, 10]))
npt.assert_allclose(pmat, imat)
pmat = perturb_inv(closure(self.cdata1),
closure([1, 1, 1]))
npt.assert_allclose(pmat,
closure([[.2, .2, .6],
[.4, .4, .2]]))
pmat = perturb_inv(closure(self.cdata5),
closure([.1, .1, .1]))
imat = perturb(closure(self.cdata1), closure([10, 10, 10]))
npt.assert_allclose(pmat, imat)
# make sure that inplace modification is not occurring
perturb_inv(closure(self.cdata2), closure([1, 2, 3]))
npt.assert_allclose(self.cdata2, np.array([2, 2, 6]))
def test_inner(self):
a = inner(closure(self.cdata5), closure(self.cdata5))
npt.assert_allclose(a, np.array([[0.80463264, -0.50766667],
[-0.50766667, 0.32030201]]))
b = inner(closure(self.cdata7), closure(self.cdata7))
npt.assert_allclose(b, 0.66666666666666663)
# Make sure that orthogonality holds
npt.assert_allclose(inner(closure(self.ortho1), closure(self.ortho1)),
np.identity(3), rtol=1e-04, atol=1e-06)
with self.assertRaises(ValueError):
inner(closure(self.cdata1), closure(self.cdata8))
# make sure that inplace modification is not occurring
inner(closure(self.cdata1), closure(self.cdata1))
npt.assert_allclose(self.cdata1,
np.array([[2, 2, 6],
[4, 4, 2]]))
def test_multiplicative_replacement(self):
amat = multiplicative_replacement(closure(self.cdata3))
npt.assert_allclose(amat,
np.array([[0.087273, 0.174545, 0.261818,
0.04, 0.436364],
[0.092, 0.04, 0.04, 0.368, 0.46],
[0.066667, 0.133333, 0.2,
0.266667, 0.333333]]),
rtol=1e-5, atol=1e-5)
amat = multiplicative_replacement(closure(self.cdata4))
npt.assert_allclose(amat,
np.array([0.087273, 0.174545, 0.261818,
0.04, 0.436364]),
rtol=1e-5, atol=1e-5)
amat = multiplicative_replacement(closure(self.cdata6))
npt.assert_allclose(amat,
np.array([[0.087273, 0.174545, 0.261818,
0.04, 0.436364],
[0.092, 0.04, 0.04, 0.368, 0.46],
[0.066667, 0.133333, 0.2,
0.266667, 0.333333]]),
rtol=1e-5, atol=1e-5)
# make sure that inplace modification is not occurring
multiplicative_replacement(closure(self.cdata4))
npt.assert_allclose(self.cdata4, np.array([1, 2, 3, 0, 5]))
def multiplicative_replacement_warning(self):
with self.assertRaises(ValueError):
multiplicative_replacement(closure([0, 1, 2]), delta=1)
def test_clr(self):
cmat = clr(closure(self.cdata1))
A = np.array([.2, .2, .6])
B = np.array([.4, .4, .2])
npt.assert_allclose(cmat,
[np.log(A / np.exp(np.log(A).mean())),
np.log(B / np.exp(np.log(B).mean()))])
cmat = clr(closure(self.cdata2))
A = np.array([.2, .2, .6])
npt.assert_allclose(cmat,
np.log(A / np.exp(np.log(A).mean())))
cmat = clr(closure(self.cdata5))
A = np.array([.2, .2, .6])
B = np.array([.4, .4, .2])
npt.assert_allclose(cmat,
[np.log(A / np.exp(np.log(A).mean())),
np.log(B / np.exp(np.log(B).mean()))])
# make sure that inplace modification is not occurring
clr(closure(self.cdata2))
npt.assert_allclose(self.cdata2, np.array([2, 2, 6]))
def test_clr_inv(self):
npt.assert_allclose(clr_inv(self.rdata1), self.ortho1)
npt.assert_array_almost_equal(clr(clr_inv(self.rdata1)), self.rdata1)
# make sure that inplace modification is not occurring
clr_inv(self.rdata1)
npt.assert_allclose(self.rdata1,
np.array([[0.70710678, -0.70710678, 0., 0.],
[0.40824829, 0.40824829,
-0.81649658, 0.],
[0.28867513, 0.28867513,
0.28867513, -0.8660254]]))
def test_center(self):
cmat = center(closure(self.cdata1))
npt.assert_allclose(cmat,
np.array([0.31010205, 0.31010205, 0.37979590]))
cmat = center(closure(self.cdata5))
npt.assert_allclose(cmat,
np.array([0.31010205, 0.31010205, 0.37979590]))
# make sure that inplace modification is not occurring
center(closure(self.cdata1))
npt.assert_allclose(self.cdata1,
np.array([[2, 2, 6],
[4, 4, 2]]))
def test_centralize(self):
cmat = centralize(closure(self.cdata1))
npt.assert_allclose(cmat,
np.array([[0.22474487, 0.22474487, 0.55051026],
[0.41523958, 0.41523958, 0.16952085]]))
cmat = centralize(closure(self.cdata5))
npt.assert_allclose(cmat,
np.array([[0.22474487, 0.22474487, 0.55051026],
[0.41523958, 0.41523958, 0.16952085]]))
# make sure that inplace modification is not occurring
centralize(closure(self.cdata1))
npt.assert_allclose(self.cdata1,
np.array([[2, 2, 6],
[4, 4, 2]]))
def test_ilr(self):
mat = closure(self.cdata7)
npt.assert_array_almost_equal(ilr(mat),
np.array([0.70710678, 0.40824829]))
# Should give same result as inner
npt.assert_allclose(ilr(closure(self.ortho1)), np.identity(3),
rtol=1e-04, atol=1e-06)
with self.assertRaises(ValueError):
ilr(closure(self.cdata1), basis=self.cdata1)
# make sure that inplace modification is not occurring
ilr(closure(self.cdata1))
npt.assert_allclose(self.cdata1,
np.array([[2, 2, 6],
[4, 4, 2]]))
def test_ilr_basis(self):
table = np.array([[1., 10.],
[1.14141414, 9.90909091],
[1.28282828, 9.81818182],
[1.42424242, 9.72727273],
[1.56565657, 9.63636364]])
basis = np.array([[0.80442968, 0.19557032]])
res = ilr(closure(table), basis=basis)
exp = np.array([np.log(1/10)*np.sqrt(1/2),
np.log(1.14141414 / 9.90909091)*np.sqrt(1/2),
np.log(1.28282828 / 9.81818182)*np.sqrt(1/2),
np.log(1.42424242 / 9.72727273)*np.sqrt(1/2),
np.log(1.56565657 / 9.63636364)*np.sqrt(1/2)])
npt.assert_allclose(res, exp)
def test_ilr_basis_one_dimension_error(self):
table = np.array([[1., 10.],
[1.14141414, 9.90909091],
[1.28282828, 9.81818182],
[1.42424242, 9.72727273],
[1.56565657, 9.63636364]])
basis = np.array([0.80442968, 0.19557032])
with self.assertRaises(ValueError):
ilr(closure(table), basis=basis)
def test_ilr_inv(self):
mat = closure(self.cdata7)
npt.assert_array_almost_equal(ilr_inv(ilr(mat)), mat)
npt.assert_allclose(ilr_inv(np.identity(3)), self.ortho1,
rtol=1e-04, atol=1e-06)
with self.assertRaises(ValueError):
ilr_inv(self.cdata1, basis=self.cdata1)
# make sure that inplace modification is not occurring
ilr_inv(self.cdata1)
npt.assert_allclose(self.cdata1,
np.array([[2, 2, 6],
[4, 4, 2]]))
def test_ilr_basis_isomorphism(self):
# tests to make sure that the isomorphism holds
# with the introduction of the basis.
basis = np.array([[0.80442968, 0.19557032]])
table = np.array([[np.log(1/10)*np.sqrt(1/2),
| np.log(1.14141414 / 9.90909091) | numpy.log |
from abc import ABC, abstractmethod
import numpy as np
from UQpy.utilities.ValidationTypes import RandomStateType
class Refinement(ABC):
"""
Baseclass of all available strata refinement methods. Provides the methods that each existing and new refinement
algorithm must implement in order to be used in the :class:`.RefinedStratifiedSampling` class.
"""
@abstractmethod
def update_samples(
self,
nsamples: int,
samples_per_iteration: int,
random_state: RandomStateType,
index: int,
dimension: int,
samples_u01: np.ndarray,
training_points: np.ndarray,
):
"""
Method that need to be overridden in case of new :class:`.Refinement` techniques.
:param nsamples: Total number of samples to be drawn
:param samples_per_iteration: New samples to be drawn at each refinement iteration.
:param random_state: Random seed used to initialize the pseudo-random number generator. Default is
:class:`None`.
If an :any:`int` is provided, this sets the seed for an object of :class:`numpy.random.RandomState`. Otherwise,
the object itself can be passed directly.
:param index: Iteration index
:param dimension: Number of dimension of the sampling problem.
:param samples_u01: Existing samples drawn at the unit hypercube space.
:param training_points: Training points required in case of advanced refinement techniques.
"""
pass
def initialize(self, samples_number, training_points, samples):
pass
def finalize(self, samples, samples_per_iteration):
pass
@staticmethod
def identify_bins(strata_metrics, points_to_add, random_state):
bins2break = np.array([])
points_left = points_to_add
while (np.where(strata_metrics == strata_metrics.max())[0].shape[0] < points_left):
bin = np.where(strata_metrics == strata_metrics.max())[0]
bins2break = | np.hstack([bins2break, bin]) | numpy.hstack |
#!/usr/bin/env python
# coding: utf-8
# Here we import some libraries that will come handy aftwerward.
# In[1]:
import numpy as np
from pathlib import Path
from datetime import datetime
from matplotlib import pyplot as plt
from scipy.stats import shapiro
from statsmodels.tsa.stattools import acf
from scipy.stats import ttest_1samp
from sklearn import linear_model
now = datetime.now()
time = now.strftime("%Y%m%d_%H%M%S")
import seaborn as sns
from prog_models.models import BatteryElectroChem as Battery
np.random.seed(0)
# Here we declare some parameters:
# In[2]:
# Number of controls
d = 2
# Size of reservoir
k = 1000
epsilon = 1
# Number of timesteps in which the split the time span [0, T]
N_T = 1000
# Number of Train Sample
N_S = 1000
# Number of Test Samples
N_C = 1000
N_Channels = 1
speed=2
mean = 2
vol=1
epsilon = 1
# For a given reconstruction error epsilon and N_T, it will tell the minimum k to use.
print((24*np.log(N_T))/(3*epsilon**2 - 2*epsilon**3))
print(k > (24*np.log(N_T))/(3*epsilon**2 - 2*epsilon**3))
Z0 = np.random.normal(0.0,1.0,size=(k,1))
# Decided where to put the outputs. You have to change this...
# In[3]:
quality = 1000
# Target_Path_Folder = r"C:\Users\eneam\Dropbox\Research\Thesis\GBM_Signal_Extraction_GBM_GBM_Few_Shit_Student_" + str(mu) + "_" + str(sigma) + "_" + str(mu_2) + "_" + str(sigma_2) + "_" + str(N_T) + "_" + str(M) + "_" + str(today).replace("-", "_")
Target_Path_Folder = r"C:\Users\eneam\Dropbox\Research\Rough_Paper\Outputs\ICLR\Battery_Log_" + str(speed) + "_" + str(mean) + "_" + str(vol) + "_" + str(N_T) + "_" + str(k) + "_" + str(N_S) + "_" + str(N_Channels)
Path(Target_Path_Folder).mkdir(parents=True, exist_ok=True)
path = Path(Target_Path_Folder)
# Now we define some utilities
# In[22]:
def nilpotent(M):
B = np.zeros((M,M))
for i in range(2,M):
B[i,i-1]=1.0
return B
def canonical(i,M):
e = np.zeros((M,1))
e[i,0]=1.0
return e
def o_u(timesteps,speed,mean,vol,dB,dt):
SDEpath = np.empty((1,timesteps+1))
SDEpath[:, 0] = 1
for tt in np.arange(1,timesteps+1):
SDEpath[:,tt] = SDEpath[:,tt-1] + speed*(mean-SDEpath[:,tt-1])*dt[tt-1] + vol*dB[tt-1,]
return SDEpath
def randomAbeta(d,M):
A = []
beta = []
for i in range(d):
# B = 0.0*nilpotent(M) + np.random.standard_t(2,size=(M,M))
B = 0.0*nilpotent(M) + np.random.normal(0.0,1.0,size=(M,M))
# B = np.random.permutation(B)
A = A + [B]
# beta = beta + [0.0*canonical(i,M)+np.random.standard_t(2,size=(M,1))]
beta = beta + [np.random.normal(0.0,1.0,size=(M,1))]
return [A,beta]
# speed=2
# mean = 2
# vol=0.5
# d = 2
# k = 1000
# N_T = 1000
# def sigmoid(x):
# return x/150
# speed=2
# mean = 2
# vol = 1
# d = 2
# k = 1000
# N_T = 1000
def sigmoid(x):
return x/300
def reservoirfield_Y(state,increment, C, deta):
value = np.zeros((k,1))
for i in range(d):
value = value + sigmoid(np.matmul(C[i],state) + deta[i])*increment[i]
return value
def reservoir_Y(N_T, Control_Path, C, deta):
reservoirpath = [Z0]
Increment_Storage = np.diff(Control_Path,axis=1)
for i in range(N_T):
increment = Increment_Storage[:,i]
reservoirpath = reservoirpath + [(reservoirpath[-1]+reservoirfield_Y(reservoirpath[-1],increment, C, deta))]
return reservoirpath
def Tonio_Measure(serie_1, serie_2):
numerator = np.sum(np.square(serie_1-serie_2))
denominator = np.sum(np.square(serie_1))
return numerator/denominator
def Average_Increment_Calculator(df):
Increments = np.diff(df,axis=1)
return np.mean(Increments, axis=1)
def Std_Increment_Calculator(df):
Increments = np.diff(df,axis=1)
return np.std(Increments, axis=1)
def Autocorr_Increment_Calculator(df):
Increments = np.diff(df,axis=1)
df_acf = np.empty((Increments.shape[0],0))
for i in range(Increments.shape[0]):
acf_temp = acf(Increments[i,:],nlags=1,fft=True)
df_acf = np.insert(df_acf,0,acf_temp[1])
return df_acf
def pvalue_normality_Increment_Calculator(df):
Increments = np.diff(df,axis=1)
df_p = np.empty((Increments.shape[0],0))
for i in range(Increments.shape[0]):
stat, p = shapiro(Increments[i,:])
df_p = np.insert(df_p,0,p)
return df_p
def Tonio_Measure_all(df1,df2):
df_tonio = np.empty((df1.shape[0],0))
for i in range(df1.shape[0]):
df_tonio = np.insert(df_tonio,0,Tonio_Measure(df1[i,:], df2[i,:]))
return df_tonio
def Quadratic_Variation_Calculator(df):
return np.sum(np.square(np.diff(df,axis=1)), axis=1)
def Path(options):
dt=time_to_simulate_to/N_T
dB = np.sqrt(dt) * np.random.randn(N_T)
tt=np.arange(0,time_to_simulate_to+dt,dt)
tt = np.array([round(i,3) for i in tt])
dt=np.repeat(dt,len(dB))
o_u_=list(o_u(N_T,speed, mean, vol,dB,dt)[0,:])
def brownian(t,x=None):
ii=np.where(tt==round(t,3))[0][0]
i=o_u_[ii]
return {'i': i}
(times, inputs, states, outputs, event_states) = batt.simulate_to(time_to_simulate_to, brownian, {'t': 18.95, 'v': 4.183}, **options)
t = np.array(times)
Current = np.array([ii['i'] for ii in inputs])
Y = np.array([outputs[i]['v'] for i in range(len(outputs))])
return [t,Current,Y]
# Decleare the RDE Object and plot the Random Signature, jsut to see how they look.
# In[23]:
CDeta = randomAbeta(d,k)
C = CDeta[0]
deta = CDeta[1]
batt = Battery()
noise=0.0
batt.parameters['process_noise']=noise
time_to_simulate_to = 500
options = {
'save_freq': time_to_simulate_to/N_T, # Frequency at which results are saved
'dt': time_to_simulate_to/N_T,
}
[t,Current,Y] = Path(options)
plt.figure(figsize=(6,4))
plt.plot(t,Current)
plt.show()
plt.figure(figsize=(6,4))
plt.plot(t,Y)
plt.show()
Control_Path = [t/time_to_simulate_to,Current]
plt.plot(np.squeeze(reservoir_Y(N_T, Control_Path, C, deta)))
plt.savefig(path / "Random_Signature.pdf", dpi=quality)
plt.show()
# In[24]:
CDeta = randomAbeta(d*N_Channels,k)
C = []
deta = []
for l in range(0,d*N_Channels,2):
C = C + [[CDeta[0][l],CDeta[0][l+1]]]
deta = deta + [[CDeta[1][l],CDeta[1][l+1]]]
Y_Reservoir = np.zeros((1,))
Features_Reservoir=np.zeros([N_Channels,N_S,N_T+1,k])
for i in range(N_S):
if np.mod(i,10)==0:
print(i)
Joint_Path = Path(options)
Control_Path = [Joint_Path[0]/time_to_simulate_to,Joint_Path[1]]
for l in range(0,N_Channels):
Features_Reservoir[l,i,:,:] = np.squeeze(reservoir_Y(N_T,Control_Path, C[l], deta[l]))
# Here we save the target: SDEpath
Y_Reservoir = np.r_[Y_Reservoir, np.log(Joint_Path[2])]
Y_Reservoir = np.delete(Y_Reservoir, (0), axis=0)
Y_Reservoir = Y_Reservoir.reshape((Y_Reservoir.shape[0],1))
# In[25]:
model_list = []
MAX = N_S*(N_T+1)
Y_Pred = np.zeros((Y_Reservoir.shape[0],N_Channels))
for l in range(0,N_Channels):
print(l)
Features = Features_Reservoir[l,:,:,:]
Features =np.reshape(Features,(-1,k))
lm_Y = linear_model.Ridge(alpha=0.001)#
model_Y = lm_Y.fit(Features[:MAX,:],Y_Reservoir[:MAX,:] )
Y_Pred[:,l] = model_Y.predict(Features[:MAX,:]).reshape((Y_Reservoir[:MAX,:].shape[0],))
model_list = model_list + [model_Y]
# Remove Useless Rows. Why? Because every example brings along the starting point which is always the same for all examples and is just redundant.
# In[25]:
Control_Path = [Joint_Path[0]/time_to_simulate_to,Joint_Path[1]]
Y_Pred_Test = np.zeros((Joint_Path[2].shape[0],N_Channels))
for l in range(0,N_Channels):
Y_Pred_Test[:,l] = np.exp(model_list[l].predict(np.squeeze(reservoir_Y(N_T,Control_Path, C[l], deta[l]))).reshape((Joint_Path[2].shape[0],)))
Y_Extracted = np.mean(Y_Pred_Test,axis=1)
plt.figure()
# We plot
line_up, = plt.plot(Joint_Path[0],Y_Extracted, color = (0.138, 0.484, 0.782),linewidth=4, label='LTL')
line_down, = plt.plot(Joint_Path[0],Joint_Path[2], color = (0.93, 0.525, 0.219),linewidth=3, linestyle='dashed', label='True')
# line_err_up, = plt.plot(Joint_Path_Test[0],Y_Extracted+2*Y_Extracted_err, 'g',linewidth=1, linestyle='dashed', label='True')
# line_err_down, = plt.plot(Joint_Path_Test[0],Y_Extracted-2*Y_Extracted_err, 'g',linewidth=1, linestyle='dashed', label='True')
# plt.legend([line_up, line_down,line_err_up,line_err_down], ['Extracted ' + r'$\hat{Y}_{t}$', 'True ' + r'$Y_{t}$'],fontsize=15)
plt.legend([line_up, line_down], ['Extracted ' + r'$\hat{Y}_{t}$', 'True ' + r'$Y_{t}$'],fontsize=15)
plt.title("In Sample",fontsize=15)
plt.xlabel('Time',fontsize=15)
plt.ylabel('Value',fontsize=15)
plt.xticks(fontsize=15)
plt.yticks(fontsize=15)
plt.savefig(path / "In_Sample_Comparison_of_True_vs_Extracted_Y_Levels.pdf", bbox_inches='tight', dpi=quality)
plt.show()
# Let us plot an OOS example
# In[26]:
Features_Test=np.zeros([N_Channels,1,N_T+1,k])
Joint_Path_Test = Path(options)
Control_Path = [Joint_Path_Test[0]/time_to_simulate_to,Joint_Path_Test[1]]
Y_Pred_Test = np.zeros((Joint_Path_Test[2].shape[0],N_Channels))
for l in range(0,N_Channels):
Y_Pred_Test[:,l] = np.exp(model_list[l].predict(np.squeeze(reservoir_Y(N_T,Control_Path, C[l], deta[l]))).reshape((Joint_Path_Test[2].shape[0],)))
Y_Extracted = np.mean(Y_Pred_Test,axis=1)
Y_Extracted_err = np.std(Y_Pred_Test,axis=1)/np.sqrt(N_Channels)
############# NOW WE EXTRACT the FEATURES of the Controls: Time and BM
# Map through linear layer
plt.figure()
# We plot
line_up, = plt.plot(Joint_Path_Test[0],Y_Extracted, color = (0.138, 0.484, 0.782),linewidth=4, label='LTL')
line_down, = plt.plot(Joint_Path_Test[0],Joint_Path_Test[2], color = (0.93, 0.525, 0.219),linewidth=3, linestyle='dashed', label='True')
# line_err_up, = plt.plot(Joint_Path_Test[0],Y_Extracted+2*Y_Extracted_err, 'g',linewidth=1, linestyle='dashed', label='True')
# line_err_down, = plt.plot(Joint_Path_Test[0],Y_Extracted-2*Y_Extracted_err, 'g',linewidth=1, linestyle='dashed', label='True')
# plt.legend([line_up, line_down,line_err_up,line_err_down], ['Extracted ' + r'$\hat{Y}_{t}$', 'True ' + r'$Y_{t}$'],fontsize=15)
plt.legend([line_up, line_down], ['Extracted ' + r'$\hat{Y}_{t}$', 'True ' + r'$Y_{t}$'],fontsize=15)
plt.title("Out Of Sample",fontsize=15)
plt.xlabel('Time',fontsize=15)
plt.ylabel('Value',fontsize=15)
plt.xticks(fontsize=15)
plt.yticks(fontsize=15)
plt.savefig(path / "Out_of_Sample_Comparison_of_True_vs_Extracted_Y_Levels.pdf", bbox_inches='tight', dpi=quality)
plt.show()
# Now we extract some statistics from the extracted and real paths and compare them.
# The fact is that for each new OOS path that we check, we know exactly which Y we should extract and we have one that we extract ourselves.
# Therefore, we compare in a fair way because we compare statistics on N_C Extracted Ys with the statistics that we would have observed on the correct Ys.
# This gives an idea of how much fucked up our Extracted Ys are wrt their true counterpaty.
# In[9]:
Reservoir_Y_Extracted = np.zeros((1,N_T+1))
Reservoir_Y_True = np.zeros((1,N_T+1))
for i in range(N_C):
if np.mod(i,10)==0:
print(i)
############ TEST THE AUTOENCODER #############
Features_Test=np.zeros([N_Channels,1,N_T+1,k])
Joint_Path_Test = Path(options)
Control_Path = [Joint_Path_Test[0]/time_to_simulate_to,Joint_Path_Test[1]]
Y_Pred_Test = np.zeros((Joint_Path_Test[2].shape[0],N_Channels))
for l in range(0,N_Channels):
Y_Pred_Test[:,l] = np.exp(model_list[l].predict(np.squeeze(reservoir_Y(N_T,Control_Path, C[l], deta[l]))).reshape((Joint_Path_Test[2].shape[0],)))
Y_Test_Extracted = np.mean(Y_Pred_Test,axis=1)
############### SU PATH CORRETTO
############### SU Y Real
Reservoir_Y_Extracted = np.r_[Reservoir_Y_Extracted, Y_Test_Extracted.reshape((1,N_T+1))]
Reservoir_Y_True = np.r_[Reservoir_Y_True, Joint_Path_Test[2].reshape((1,N_T+1))]
print("Fatto")
Reservoir_Y_Extracted = np.delete(Reservoir_Y_Extracted, 0, axis=0)
Reservoir_Y_True = np.delete(Reservoir_Y_True, 0, axis=0)
# In[10]:
Starting_Values_True = Reservoir_Y_True[:,0]
Average_Increments_True = Average_Increment_Calculator(Reservoir_Y_True)
Std_Increments_True = Std_Increment_Calculator(Reservoir_Y_True)
p_value_normality_increments_True = pvalue_normality_Increment_Calculator(Reservoir_Y_True)
Autocorrelation_increments_True = Autocorr_Increment_Calculator(Reservoir_Y_True)
Quadratic_Variation_True = Quadratic_Variation_Calculator(Reservoir_Y_True)
Starting_Values_Extracted = Reservoir_Y_Extracted [:,0]
Average_Increments_Extracted = Average_Increment_Calculator(Reservoir_Y_Extracted )
Std_Increments_Extracted = Std_Increment_Calculator(Reservoir_Y_Extracted )
p_value_normality_increments_Extracted = pvalue_normality_Increment_Calculator(Reservoir_Y_Extracted )
Autocorrelation_increments_Extracted = Autocorr_Increment_Calculator(Reservoir_Y_Extracted )
Quadratic_Variation_Extracted = Quadratic_Variation_Calculator(Reservoir_Y_Extracted )
Tonio_Measure_Extracted = Tonio_Measure_all(Reservoir_Y_True,Reservoir_Y_Extracted)
# Compare starting value
# In[11]:
print("Tonio mean Measure:")
print(np.mean(Tonio_Measure_Extracted))
print("Tonio median Measure:")
print(np.median(Tonio_Measure_Extracted))
# Compare Distribution of Average of the Increments. Interesting is that the average is relevant but... The distribution of the averages is more informative.
# In[12]:
plt.figure()
bins = np.linspace(-0.0005, -0.0002, np.int(np.sqrt(N_C)))
plt.hist(Average_Increments_True, bins, alpha=0.5, label='True',density=True)
plt.hist(Average_Increments_Extracted, bins, alpha=0.5, label='Extracted',density=True)
plt.legend(loc='upper right')
plt.title("Distribution of Average of Increments")
plt.savefig(path / "Distribution of Average of Increments.pdf", dpi=quality)
plt.show()
tset, pval_avg = ttest_1samp(Average_Increments_Extracted, 0)
print("p-values",pval_avg)
if pval_avg > 0.05: # alpha value is 0.05 or 5%
print("Average of the averages of Increments is NOT 0")
else:
print("Average of the averages of Increments is 0")
plt.figure()
labels = ('Extracted', 'True')
data = [Average_Increments_Extracted, Average_Increments_True]
fig7, ax7 = plt.subplots()
ax7.set_title('Average of Increments')
ax7.boxplot(data)
plt.xticks(np.arange(len(labels))+1,labels)
plt.savefig(path / "Boxplot of Average of Increments.pdf", dpi=quality)
plt.show()
# Compare Distribution of Stds of the Increments. Interesting is that the average is relevant but... The distribution of the Stds is more informative.
# In[13]:
plt.figure()
bins = np.linspace(0.8, 1.3, np.int(np.sqrt(N_C)))
plt.hist(Std_Increments_True**2*N_T, bins, alpha=0.5, label='True',density=True)
plt.hist(Std_Increments_Extracted**2*N_T, bins, alpha=0.5, label='Extracted',density=True)
plt.legend(loc='upper right')
plt.title("Distribution of Std of Increments")
plt.savefig(path / "Distribution of Std of Increments.pdf", dpi=quality)
plt.show()
print(np.mean(Std_Increments_True)**2*N_T)
print(np.mean(Std_Increments_Extracted)**2*N_T)
tset, pval_std = ttest_1samp(Std_Increments_Extracted, np.sqrt(1/N_T))
print("p-values",pval_std)
if pval_std > 0.05: # alpha value is 0.05 or 5%
print("Average of the Std of Increments is NOT " + str(np.sqrt(1/N_T)))
else:
print("Average of the Std of Increments is "+ str(np.sqrt(1/N_T)))
plt.figure()
labels = ('Extracted', 'True')
data = [Std_Increments_Extracted, Std_Increments_True]
fig7, ax7 = plt.subplots()
ax7.set_title('Std of Increments')
ax7.boxplot(data)
plt.xticks(np.arange(len(labels))+1,labels)
plt.savefig(path / "Boxplot of Std of Increments.pdf", dpi=quality)
plt.show()
# Compare Distribution of pvalues. Interesting is that the average is relevant but... The distribution of the pvalues is more informative.
# In[14]:
plt.figure()
bins = np.linspace(0, 1, np.int(np.sqrt(N_C)))
plt.hist(p_value_normality_increments_True, bins, alpha=0.5, label='True',density=True)
plt.hist(p_value_normality_increments_Extracted, bins, alpha=0.5, label='Extracted',density=True)
plt.legend(loc='upper right')
plt.title("Distribution of Pvalue of Normality of Increments")
plt.savefig(path / "Distribution of Pvalue of Normality of Increments.pdf", dpi=quality)
plt.show()
print(np.mean(p_value_normality_increments_True))
print(np.mean(p_value_normality_increments_Extracted))
plt.figure()
labels = ('Extracted', 'True')
data = [p_value_normality_increments_Extracted, p_value_normality_increments_True]
fig7, ax7 = plt.subplots()
ax7.set_title('Pvalue of Normality of Increments')
ax7.boxplot(data)
plt.xticks(np.arange(len(labels))+1,labels)
plt.savefig(path / "Boxplot of Pvalue of Normality of Increments.pdf", dpi=quality)
plt.show()
# Compare Distribution of Autocorrelation of the Increments. Interesting is that the average is relevant but... The distribution of the Autocorrelation is more informative.
# In[15]:
bins = np.linspace(-0.2, 0.2, np.int(np.sqrt(N_C)))
plt.figure()
plt.hist(Autocorrelation_increments_True, bins, alpha=0.5, label='True',density=True)
plt.hist(Autocorrelation_increments_Extracted, bins, alpha=0.5, label='Extracted',density=True)
plt.legend(loc='upper right')
plt.title("Distribution of Autocorrelation of Increments")
plt.savefig(path / "Distribution of Autocorrelations of Increments.pdf", dpi=quality)
plt.show()
print(np.mean(Autocorrelation_increments_True))
print(np.mean(Autocorrelation_increments_Extracted))
tset, pval_aut = ttest_1samp(Autocorrelation_increments_True,0)
print("p-values",pval_aut)
if pval_aut > 0.05: # alpha value is 0.05 or 5%
print("Average of the Autocorrelations of Increments is NOT 0")
else:
print("Average of the Autocorrelations of Increments is 0")
plt.figure()
labels = ('Extracted', 'True')
data = [Autocorrelation_increments_Extracted, Autocorrelation_increments_True]
fig7, ax7 = plt.subplots()
ax7.set_title('Autocorrelations of Increments')
ax7.boxplot(data)
plt.xticks(np.arange(len(labels))+1,labels)
plt.savefig(path / "Boxplot of Autocorrelations of Increments.pdf", dpi=quality)
plt.show()
# Compare Distribution of Quadratic Variation of the Increments. Interesting is that the average is relevant but... The distribution of the Quadratic Variation is more informative.
# In[16]:
plt.figure()
bins = np.linspace(0.8, 1.3, np.int(np.sqrt(N_C)))
plt.figure()
plt.hist(Quadratic_Variation_True, bins, alpha=0.5, label='True',density=True)
plt.hist(Quadratic_Variation_Extracted, bins, alpha=0.5, label='Extracted',density=True)
plt.legend(loc='upper right')
plt.title("Distribution of Second Variation of BMs")
plt.savefig(path / "Distribution of Second Variation of BMs.pdf", dpi=quality)
plt.show()
print(np.mean(Quadratic_Variation_True))
print(np.mean(Quadratic_Variation_Extracted))
tset, pval_qv = ttest_1samp(Autocorrelation_increments_True,1)
print("p-values",pval_qv)
if pval_qv > 0.05: # alpha value is 0.05 or 5%
print("Average of the Second Variation of BMs is NOT 1")
else:
print("Average of the Second Variation of BMs is 1")
plt.figure()
labels = ('Extracted', 'True')
data = [Quadratic_Variation_Extracted, Quadratic_Variation_True]
fig7, ax7 = plt.subplots()
ax7.set_title('Second Variation of BMs')
ax7.boxplot(data)
plt.xticks(np.arange(len(labels))+1,labels)
plt.savefig(path / "Boxplot of Second Variation of BMs.pdf", dpi=quality)
plt.show()
# In[17]:
import sys
print('This message will be displayed on the screen.')
original_stdout = sys.stdout # Save a reference to the original standard output
with open(path /'filename.txt', 'w') as f:
sys.stdout = f # Change the standard output to the file we created.
print("Mean of Starting_Values_True:")
print(np.mean(Starting_Values_True))
print("Mean of Starting_Values_Extracted:")
print(np.mean(Starting_Values_Extracted))
print()
print("Mean of Tonio Measure:")
print(np.mean(Tonio_Measure_Extracted))
print()
print("Median of Tonio Measure:")
print(np.median(Tonio_Measure_Extracted))
print()
print("Mean of Average_Increments_True:")
print(np.mean(Average_Increments_True))
print("Mean of Average_Increments_Extracted:")
print(np.mean(Average_Increments_Extracted))
tset, pval_avg = ttest_1samp(Average_Increments_Extracted, 0)
print("p-values",pval_avg)
if pval_avg > 0.05: # alpha value is 0.05 or 5%
print("Average of the averages of Increments is NOT 0")
else:
print("Average of the averages of Increments is 0")
print()
print("Mean of Std_Increments_True:")
print(np.mean(Std_Increments_True))
print("Mean of Std_Increments_Extracted:")
print(np.mean(Std_Increments_Extracted))
tset, pval_std = ttest_1samp(Std_Increments_Extracted, np.sqrt(1/N_T))
print("p-values",pval_std)
if pval_std > 0.05: # alpha value is 0.05 or 5%
print("Average of the Std of Increments is NOT " + str(np.sqrt(1/N_T)))
else:
print("Average of the Std of Increments is "+ str(np.sqrt(1/N_T)))
print()
print("Mean of p_value_normality_increments_True:")
print(np.mean(p_value_normality_increments_True))
print("Mean of p_value_normality_increments_Extracted:")
print(np.mean(p_value_normality_increments_Extracted))
print()
print("Mean of Autocorrelation_increments_True:")
print(np.mean(Autocorrelation_increments_True))
print("Mean of Autocorrelation_increments_Extracted:")
print(np.mean(Autocorrelation_increments_Extracted))
tset, pval_aut = ttest_1samp(Autocorrelation_increments_True,0)
print("p-values",pval_aut)
if pval_aut > 0.05: # alpha value is 0.05 or 5%
print("Average of the Autocorrelations of Increments is NOT 0")
else:
print("Average of the Autocorrelations of Increments is 0")
print()
print("Mean of Quadratic_Variation_True:")
print( | np.mean(Quadratic_Variation_True) | numpy.mean |
import numpy as np
from typing import Callable, List
import matplotlib.pyplot as plt
from dataclasses import dataclass
from .utils import rotation, averageB
@dataclass
class Result:
magFieldError: float
B0: float
B0Error: float
B0MinimizationArray: List[float]
b: float
bMinimizationArray: List[float]
R0: float
R0MinimizationArray: List[float]
theta: float
thetaMinimizationArray: List[float]
phi: float
phiMinimizationArray: List[float]
@dataclass
class Settings:
nIterations: int = 10
nPoints: int = 1000
def fitting_Hoyle(
Bx: np.ndarray,
By: np.ndarray,
Bz: np.ndarray,
Btotal: np.ndarray,
r: np.ndarray,
settings: Settings,
statusCallback: Callable[[str], None],
isCanceled: Callable[[], bool]
):
if Btotal is None:
Btotal = np.sqrt(Bx**2 + By**2 + Bz**2)
minB0: float = np.nanmax(Btotal)
minb: float = 0
minR0: float = 5
minTheta: float = 0.0
minPhi: float = 0.0
toRad = np.pi / 180
def createStatus(iteration: int):
# yapf: disable
return (
f"{iteration + 1}/{settings.nIterations}\n"
f"B0: {minB0}\n"
f"b: {minb}\n"
f"R0: {minR0}\n"
f"theta: {minTheta}\n"
f"phi: {minPhi}"
)
# yapf: enable
arrayB0 = | np.empty(settings.nPoints) | numpy.empty |
# Import packages
import os
import sys
import pickle
import dill
import time
import copy
import astropy.units as u
import matplotlib.pyplot as plt
import numpy as np
import scarlet
import sep
from astropy import wcs
from astropy.convolution import Box2DKernel, Gaussian2DKernel, convolve
from astropy.coordinates import SkyCoord, match_coordinates_sky
from astropy.io import fits
from astropy.table import Column, Table
from astropy.utils.data import clear_download_cache, download_file
from IPython.display import clear_output
# Initialize `unagi`
# from unagi import config, hsc, plotting
# from unagi.task import hsc_cutout, hsc_psf
# Import kuaizi
import kuaizi as kz
from kuaizi import HSC_pixel_scale, HSC_zeropoint
from kuaizi.detection import Data
from kuaizi.display import SEG_CMAP, display_single
sys.setrecursionlimit(10000)
plt.rcParams['font.size'] = 15
plt.rc('image', cmap='inferno', interpolation='none', origin='lower')
def _fitting_single_comp(lsbg, hsc_dr, cutout_halfsize=1.0, prefix='LSBG', large_away_factor=3.0, compact_away_factor=0.4):
from kuaizi.utils import padding_PSF
kz.utils.set_env(project='HSC', name='HSC_LSBG')
# kz.utils.set_matplotlib(usetex=False, fontsize=15)
index = lsbg['Seq']
lsbg_coord = SkyCoord(ra=lsbg['RAJ2000'], dec=lsbg['DEJ2000'], unit='deg')
if not os.path.isdir('./Images'):
os.mkdir('./Images')
if not os.path.isdir('./PSFs'):
os.mkdir('./PSFs')
size_ang = cutout_halfsize * u.arcmin
channels = 'griz'
cutout = hsc_cutout(
lsbg_coord,
cutout_size=size_ang,
filters=channels,
mask=True,
variance=True,
archive=hsc_dr,
use_saved=True,
output_dir='./Images/',
prefix=f'{prefix}_{index:04d}_img',
save_output=True)
psf_list = hsc_psf(
lsbg_coord,
centered=True,
filters=channels,
img_type='coadd',
verbose=True,
archive=hsc_dr,
save_output=True,
use_saved=True,
prefix=f'{prefix}_{index:04d}_psf',
output_dir='./PSFs/')
channels_list = list(channels)
# Reconstructure data
images = np.array([hdu[1].data for hdu in cutout])
w = wcs.WCS(cutout[0][1].header) # note: all bands share the same WCS here
filters = channels_list
weights = 1 / np.array([hdu[3].data for hdu in cutout])
psf_pad = padding_PSF(psf_list) # Padding PSF cutouts from HSC
psfs = scarlet.ImagePSF(np.array(psf_pad))
data = Data(images=images, weights=weights,
wcs=w, psfs=psfs, channels=channels)
_, msk_star = kz.utils.gaia_star_mask( # Generate a mask for GAIA bright stars
data.images.mean(axis=0), # averaged image
w,
pixel_scale=HSC_pixel_scale,
gaia_bright=19.5,
mask_a=694.7,
mask_b=3.8,
factor_b=1.0,
factor_f=1.4)
# This detection (after blurring the original images) finds out what is the central object and its (estimated) size
obj_cat_ori, segmap, bg_rms = kz.detection.makeCatalog(
[data],
lvl=8,
method='wavelet',
convolve=False,
# conv_radius=2,
wavelet_lvl=5,
low_freq_lvl=3,
high_freq_lvl=0,
match_gaia=False,
show_fig=True,
visual_gaia=False,
b=128,
f=3,
pixel_scale=0.168,
minarea=20,
deblend_nthresh=30,
deblend_cont=0.01,
sky_subtract=True)
catalog_c = SkyCoord(obj_cat_ori['ra'], obj_cat_ori['dec'], unit='deg')
dist = lsbg_coord.separation(catalog_c)
cen_indx = obj_cat_ori[np.argsort(dist)[0]]['index']
cen_obj = obj_cat_ori[cen_indx]
# print(f'# Central object is #{cen_indx}.')
# Better position for cen_obj
x, y, _ = sep.winpos(data.images.mean(
axis=0), cen_obj['x'], cen_obj['y'], 6)
ra, dec = data.wcs.wcs_pix2world(x, y, 0)
cen_obj['x'] = x
cen_obj['y'] = y
cen_obj['ra'] = ra
cen_obj['dec'] = dec
# This step masks out high freq sources after wavelet transformation
obj_cat, segmap, bg_rms = kz.detection.makeCatalog([data],
mask=msk_star,
lvl=2.5,
method='wavelet',
high_freq_lvl=1,
wavelet_lvl=3,
match_gaia=False,
show_fig=True,
visual_gaia=False,
b=32,
f=3,
pixel_scale=0.168,
minarea=5,
deblend_nthresh=30,
deblend_cont=0.001,
sky_subtract=True)
catalog_c = SkyCoord(obj_cat['ra'], obj_cat['dec'], unit='deg')
dist = lsbg_coord.separation(catalog_c)
for ind in np.where(dist < (compact_away_factor * cen_obj['fwhm_custom'] * HSC_pixel_scale) * u.arcsec)[0]:
# we do not mask compact sources that are nearby to the center of target galaxy
segmap[segmap == ind + 1] = 0
smooth_radius = 2
gaussian_threshold = 0.03
mask_conv = np.copy(segmap)
mask_conv[mask_conv > 0] = 1
mask_conv = convolve(mask_conv.astype(
float), Gaussian2DKernel(smooth_radius))
# This `seg_mask` only masks compact sources
seg_mask = (mask_conv >= gaussian_threshold)
# This step masks out bright and large contamination, which is not well-masked in previous step
obj_cat, segmap, bg_rms = kz.detection.makeCatalog(
[data],
lvl=10, # relative agressive threshold
method='vanilla',
match_gaia=False,
show_fig=True,
visual_gaia=False,
b=40,
f=3,
pixel_scale=0.168,
minarea=20, # only want large things
deblend_nthresh=30,
deblend_cont=0.001,
sky_subtract=True)
catalog_c = SkyCoord(obj_cat['ra'], obj_cat['dec'], unit='deg')
dist = lsbg_coord.separation(catalog_c)
arr = np.zeros_like(segmap).astype('uint8')
sep.mask_ellipse(
arr,
cen_obj['x'],
cen_obj['y'],
cen_obj['a'],
cen_obj['b'],
cen_obj['theta'],
r=large_away_factor) # don't mask the target galaxy too much
for ind, obj in enumerate(obj_cat):
if arr[int(obj['y']), int(obj['x'])] == 1:
segmap[segmap == ind + 1] = 0
smooth_radius = 4
gaussian_threshold = 0.01
mask_conv = np.copy(segmap)
mask_conv[mask_conv > 0] = 1
mask_conv = convolve(mask_conv.astype(
float), Gaussian2DKernel(smooth_radius))
# This `seg_mask_large` masks large bright sources
seg_mask_large = (mask_conv >= gaussian_threshold)
# Set weights of masked pixels to zero
for layer in data.weights:
layer[msk_star.astype(bool)] = 0
layer[seg_mask.astype(bool)] = 0
layer[seg_mask_large.astype(bool)] = 0
# Construct `scarlet` frames and observation
from functools import partial
model_psf = scarlet.GaussianPSF(sigma=(0.8,) * len(filters))
model_frame = scarlet.Frame(
data.images.shape,
wcs=w,
psfs=model_psf,
channels=filters)
observation = scarlet.Observation(
data.images,
wcs=data.wcs,
psfs=data.psfs,
weights=data.weights,
channels=filters)
observation = observation.match(model_frame)
# Add sources
from scarlet.initialization import build_initialization_coadd
coadd, bg_cutoff = build_initialization_coadd(observation)
coadd[(seg_mask_large + seg_mask + msk_star.astype(bool))] = 0.0
sources = []
src = obj_cat_ori[cen_indx]
if HSC_zeropoint - 2.5 * np.log10(src['flux']) > 26.5:
# If too faint, single component
new_source = scarlet.source.SingleExtendedSource(model_frame, (src['ra'], src['dec']),
observation,
thresh=0.0,
shifting=False,
coadd=coadd,
coadd_rms=bg_cutoff)
else:
new_source = scarlet.source.MultiExtendedSource(model_frame, (src['ra'], src['dec']),
observation,
K=2, # Two components
thresh=0.01,
shifting=False)
sources.append(new_source)
# Visualize our data and mask and source
if not os.path.isdir('./Figures'):
os.mkdir('./Figures/')
fig = kz.display.display_scarlet_sources(
data,
sources,
show_ind=None,
stretch=1,
Q=1,
minimum=-0.3,
show_mark=True,
scale_bar_length=10,
add_text=f'{prefix}-{index}')
plt.savefig(f'./Figures/{prefix}-{index:04d}-img.png', bbox_inches='tight')
# Star fitting!
start = time.time()
blend = scarlet.Blend(sources, observation)
try:
blend.fit(150, 1e-4)
with open(f'./Models/{prefix}-{index:04d}-trained-model.pkl', 'wb') as fp:
pickle.dump([blend, {'e_rel': 1e-4}], fp)
fp.close()
last_loss = blend.loss[-1]
print(
f'Succeed for e_rel = 1e-4 with {len(blend.loss)} iterations! Try higher accuracy!')
for i, e_rel in enumerate([5e-4, 1e-5, 5e-5, 1e-6]):
blend.fit(150, e_rel)
if len(blend.loss) > 50: # must have more than 50 iterations
recent_loss = np.mean(blend.loss[-10:])
min_loss = np.min(blend.loss[:-10])
if recent_loss < min_loss:
print(
f'Succeed for e_rel = {e_rel} with {len(blend.loss)} iterations! Try higher accuracy!')
with open(f'./Models/{prefix}-{index:04d}-trained-model.pkl', 'wb') as fp:
pickle.dump([blend, {'e_rel': e_rel}], fp)
fp.close()
elif abs((recent_loss - min_loss) / min_loss) < 0.02:
if recent_loss < last_loss: # better than the saved model
print(
f'I am okay with relative loss difference = {abs((recent_loss - min_loss) / min_loss)}. Fitting stopped.')
with open(f'./Models/{prefix}-{index:04d}-trained-model.pkl', 'wb') as fp:
pickle.dump([blend, {'e_rel': e_rel}], fp)
fp.close()
break
else:
print(
f'Cannot achieve a global optimization with e_rel = {e_rel}.')
print("Scarlet ran for {1} iterations to logL = {2}".format(
e_rel, len(blend.loss), -blend.loss[-1]))
end = time.time()
print(f'Elapsed time for fitting: {end - start} s')
with open(f"./Models/{prefix}-{index:04d}-trained-model.pkl", "rb") as fp:
blend = pickle.load(fp)[0]
fp.close()
fig = kz.display.display_scarlet_model(
blend,
minimum=-0.3,
stretch=1,
channels='griz',
show_loss=True,
show_mask=True,
show_mark=False,
scale_bar=False)
plt.savefig(
f'./Figures/{prefix}-{index:04d}-fitting.png', bbox_inches='tight')
return blend
except Exception as e:
print(e)
return blend
def fitting_less_comp(lsbg, hsc_dr, cutout_halfsize=1.0, prefix='LSBG', large_away_factor=3.0, compact_away_factor=0.4):
clear_output()
from kuaizi.utils import padding_PSF
kz.utils.set_env(project='HSC', name='HSC_LSBG')
# kz.utils.set_matplotlib(usetex=False, fontsize=15)
index = lsbg['Seq']
lsbg_coord = SkyCoord(ra=lsbg['RAJ2000'], dec=lsbg['DEJ2000'], unit='deg')
if not os.path.isdir('./Images'):
os.mkdir('./Images')
if not os.path.isdir('./PSFs'):
os.mkdir('./PSFs')
size_ang = cutout_halfsize * u.arcmin
channels = 'griz'
cutout = hsc_cutout(
lsbg_coord,
cutout_size=size_ang,
filters=channels,
mask=True,
variance=True,
archive=hsc_dr,
use_saved=True,
output_dir='./Images/',
prefix=f'{prefix}_{index:04d}_img',
save_output=True)
psf_list = hsc_psf(
lsbg_coord,
centered=True,
filters=channels,
img_type='coadd',
verbose=True,
archive=hsc_dr,
save_output=True,
use_saved=True,
prefix=f'{prefix}_{index:04d}_psf',
output_dir='./PSFs/')
channels_list = list(channels)
# Reconstructure data
images = np.array([hdu[1].data for hdu in cutout])
w = wcs.WCS(cutout[0][1].header) # note: all bands share the same WCS here
filters = channels_list
weights = 1 / np.array([hdu[3].data for hdu in cutout])
psf_pad = padding_PSF(psf_list) # Padding PSF cutouts from HSC
psfs = scarlet.ImagePSF(np.array(psf_pad))
data = Data(images=images, weights=weights,
wcs=w, psfs=psfs, channels=channels)
_, msk_star = kz.utils.gaia_star_mask( # Generate a mask for GAIA bright stars
data.images.mean(axis=0), # averaged image
w,
pixel_scale=HSC_pixel_scale,
gaia_bright=19.5,
mask_a=694.7,
mask_b=3.8,
factor_b=1.0,
factor_f=0.6)
# This vanilla detection with very low sigma finds out where is the central object and its footprint
obj_cat_ori, segmap_ori, bg_rms = kz.detection.makeCatalog(
[data],
mask=msk_star,
lvl=1.2,
method='vanilla',
convolve=False,
match_gaia=False,
show_fig=True,
visual_gaia=False,
b=128,
f=3,
pixel_scale=0.168,
minarea=20,
deblend_nthresh=30,
deblend_cont=0.08,
sky_subtract=True)
catalog_c = SkyCoord(obj_cat_ori['ra'], obj_cat_ori['dec'], unit='deg')
dist = lsbg_coord.separation(catalog_c)
cen_indx = obj_cat_ori[np.argsort(dist)[0]]['index']
cen_obj = obj_cat_ori[cen_indx]
# print(f'# Central object is #{cen_indx}.')
# Better position for cen_obj
x, y, _ = sep.winpos(data.images.mean(
axis=0), cen_obj['x'], cen_obj['y'], 6)
ra, dec = data.wcs.wcs_pix2world(x, y, 0)
cen_obj['x'] = x
cen_obj['y'] = y
cen_obj['ra'] = ra
cen_obj['dec'] = dec
# This detection (after blurring the original images) finds out what is the central object and its (estimated) size
obj_cat, segmap_conv, bg_rms = kz.detection.makeCatalog(
[data],
lvl=8,
method='wavelet',
convolve=False,
wavelet_lvl=5,
low_freq_lvl=3,
high_freq_lvl=0,
match_gaia=False,
show_fig=True,
visual_gaia=False,
b=128,
f=3,
pixel_scale=0.168,
minarea=20,
deblend_nthresh=30,
deblend_cont=0.01,
sky_subtract=True)
catalog_c = SkyCoord(obj_cat['ra'], obj_cat['dec'], unit='deg')
dist = lsbg_coord.separation(catalog_c)
cen_indx_conv = obj_cat_ori[np.argsort(dist)[0]]['index']
# This step masks out HIGH FREQUENCY sources after wavelet transformation
obj_cat, segmap, bg_rms = kz.detection.makeCatalog([data],
mask=msk_star,
lvl=2.5,
method='wavelet',
high_freq_lvl=3,
wavelet_lvl=4,
match_gaia=False,
show_fig=True,
visual_gaia=False,
b=32,
f=3,
pixel_scale=HSC_pixel_scale,
minarea=5,
deblend_nthresh=30,
deblend_cont=0.05,
sky_subtract=True)
# the footprint of central object: an ellipse with 4 * a and 4 * b
footprint = np.zeros_like(segmap, dtype=bool)
sep.mask_ellipse(footprint, cen_obj['x'], cen_obj['y'],
cen_obj['a'], cen_obj['b'], cen_obj['theta'], r=4.0)
inside_flag = [footprint[item] for item in list(
zip(obj_cat['y'].astype(int), obj_cat['x'].astype(int)))]
for ind in np.where(inside_flag)[0]:
# we do not mask compact sources that are nearby to the center of target galaxy
segmap[segmap == ind + 1] = 0
obj_cat_cpct = obj_cat[inside_flag] # catalog of compact sources
smooth_radius = 2
gaussian_threshold = 0.03
mask_conv = np.copy(segmap)
mask_conv[mask_conv > 0] = 1
mask_conv = convolve(mask_conv.astype(
float), Gaussian2DKernel(smooth_radius))
# This `seg_mask` only masks compact sources
seg_mask = (mask_conv >= gaussian_threshold)
# This step masks out bright and large contamination, which is not well-masked in previous step
obj_cat, segmap, bg_rms = kz.detection.makeCatalog(
[data],
lvl=10, # relative agressive threshold
method='vanilla',
match_gaia=False,
show_fig=True,
visual_gaia=False,
b=40,
f=3,
pixel_scale=0.168,
minarea=20, # only want large things
deblend_nthresh=30,
deblend_cont=0.001,
sky_subtract=True)
catalog_c = SkyCoord(obj_cat['ra'], obj_cat['dec'], unit='deg')
dist = lsbg_coord.separation(catalog_c)
arr = np.zeros_like(segmap).astype('uint8')
sep.mask_ellipse(
arr,
cen_obj['x'],
cen_obj['y'],
cen_obj['a'],
cen_obj['b'],
cen_obj['theta'],
r=large_away_factor) # don't mask the target galaxy too much
for ind, obj in enumerate(obj_cat):
if arr[int(obj['y']), int(obj['x'])] == 1:
segmap[segmap == ind + 1] = 0
smooth_radius = 5
gaussian_threshold = 0.01
mask_conv = np.copy(segmap)
mask_conv[mask_conv > 0] = 1
mask_conv = convolve(mask_conv.astype(
float), Gaussian2DKernel(smooth_radius))
# This `seg_mask_large` masks large bright sources
seg_mask_large = (mask_conv >= gaussian_threshold)
# Set weights of masked pixels to zero
for layer in data.weights:
layer[msk_star.astype(bool)] = 0
layer[seg_mask.astype(bool)] = 0
layer[seg_mask_large.astype(bool)] = 0
# Remove objects that are masked from the compact obj catalog
catalog_c = SkyCoord(obj_cat_cpct['ra'], obj_cat_cpct['dec'], unit='deg')
dist = lsbg_coord.separation(catalog_c)
obj_cat_cpct.remove_rows(np.where(dist < 2 * u.arcsec)[0])
inside_flag = [
seg_mask_large[item] for item in list(
zip(obj_cat_cpct['y'].astype(int), obj_cat_cpct['x'].astype(int)))
]
obj_cat_cpct.remove_rows(np.where(inside_flag)[0])
# Construct `scarlet` frames and observation
from functools import partial
model_psf = scarlet.GaussianPSF(sigma=(0.8,) * len(filters))
model_frame = scarlet.Frame(
data.images.shape,
wcs=w,
psfs=model_psf,
channels=filters)
observation = scarlet.Observation(
data.images,
wcs=data.wcs,
psfs=data.psfs,
weights=data.weights,
channels=filters)
observation = observation.match(model_frame)
# Add sources
from scarlet.initialization import build_initialization_coadd
# Filtered coadd removes noise! Very useful for faint objects (but slow)
coadd, bg_cutoff = build_initialization_coadd(
observation, filtered_coadd=True)
coadd[(seg_mask_large + seg_mask + msk_star.astype(bool))] = 0.0
sources = []
src = obj_cat_ori[cen_indx]
if HSC_zeropoint - 2.5 * np.log10(src['flux']) > 26.:
# If too faint, single component
new_source = scarlet.source.SingleExtendedSource(model_frame, (src['ra'], src['dec']),
observation,
thresh=0.001,
shifting=False,
coadd=coadd,
coadd_rms=bg_cutoff)
else:
new_source = scarlet.source.MultiExtendedSource(model_frame, (src['ra'], src['dec']),
observation,
K=2, # Two components
thresh=0.01,
shifting=False,
coadd=coadd,
coadd_rms=bg_cutoff)
sources.append(new_source)
for k, src in enumerate(obj_cat_cpct): # compact sources
if src['fwhm_custom'] < 5: # src['b'] / src['a'] > 0.9 and
new_source = scarlet.source.PointSource(
model_frame, (src['ra'], src['dec']), observation)
else:
try:
new_source = scarlet.source.SingleExtendedSource(
model_frame, (src['ra'], src['dec']), observation, coadd=coadd, coadd_rms=bg_cutoff)
except:
new_source = scarlet.source.SingleExtendedSource(
model_frame, (src['ra'], src['dec']), observation)
sources.append(new_source)
# Visualize our data and mask and source
if not os.path.isdir('./Figures'):
os.mkdir('./Figures/')
fig = kz.display.display_scarlet_sources(
data,
sources,
show_ind=None,
stretch=1,
Q=1,
minimum=-0.3,
show_mark=True,
scale_bar_length=10,
add_text=f'{prefix}-{index}')
plt.savefig(
f'./Figures/{prefix}-{index:04d}-img-less.png', bbox_inches='tight')
# Star fitting!
start = time.time()
blend = scarlet.Blend(sources, observation)
fig = kz.display.display_scarlet_model(
blend,
zoomin_size=50,
minimum=-0.3,
stretch=1,
channels='griz',
show_loss=True,
show_mask=True,
show_mark=True,
scale_bar=False)
plt.savefig(
f'./Figures/{prefix}-{index:04d}-init-less.png', bbox_inches='tight')
try:
blend.fit(150, 1e-4)
with open(f'./Models/{prefix}-{index:04d}-trained-model-less.pkl', 'wb') as fp:
pickle.dump([blend, {'e_rel': 1e-4, 'loss': blend.loss[-1]}], fp)
fp.close()
last_loss = blend.loss[-1]
print(
f'Succeed for e_rel = 1e-4 with {len(blend.loss)} iterations! Try higher accuracy!')
for i, e_rel in enumerate([5e-4, 1e-5, 5e-5, 1e-6]):
blend.fit(150, e_rel)
if len(blend.loss) > 50: # must have more than 50 iterations
recent_loss = np.mean(blend.loss[-10:])
min_loss = np.min(blend.loss[:-10])
if recent_loss < min_loss:
print(
f'Succeed for e_rel = {e_rel} with {len(blend.loss)} iterations! Try higher accuracy!')
with open(f'./Models/{prefix}-{index:04d}-trained-model-less.pkl', 'wb') as fp:
pickle.dump(
[blend, {'e_rel': e_rel, 'loss': blend.loss[-1]}], fp)
fp.close()
elif abs((recent_loss - min_loss) / min_loss) < 0.02:
if recent_loss < last_loss: # better than the saved model
print(
f'I am okay with relative loss difference = {abs((recent_loss - min_loss) / min_loss)}. Fitting stopped.')
with open(f'./Models/{prefix}-{index:04d}-trained-model-less.pkl', 'wb') as fp:
pickle.dump(
[blend, {'e_rel': e_rel, 'loss': blend.loss[-1]}], fp)
fp.close()
break
else:
print(
f'Cannot achieve a global optimization with e_rel = {e_rel}.')
print("Scarlet ran for {1} iterations to logL = {2}".format(
e_rel, len(blend.loss), -blend.loss[-1]))
end = time.time()
print(f'Elapsed time for fitting: {end - start} s')
# with open(f"./Models/{prefix}-{index:04d}-trained-model.pkl", "rb") as fp:
# blend = pickle.load(fp)[0]
# fp.close()
# Find out what compose a galaxy
if len(blend.sources) > 1:
mag_mat = np.array(
[-2.5 * np.log10(kz.measure.flux(src)) + 27 for src in sources])
# g - r, g - i, g - z
color_mat = (- mag_mat + mag_mat[:, 0][:, np.newaxis])[:, 1:]
color_dist = np.linalg.norm(
color_mat - color_mat[0], axis=1) / np.linalg.norm(color_mat[0])
# np.argsort(color_dist)[:] #
sed_ind = np.where(color_dist < 0.2)[0]
dist = np.array([
np.linalg.norm(
src.center - blend.sources[0].center) * HSC_pixel_scale
for src in np.array(blend.sources)[sed_ind]
])
dist_flag = (
dist < 3 * np.sqrt(cen_obj['a'] * cen_obj['b']) * HSC_pixel_scale)
point_flag = np.array([
isinstance(src, scarlet.source.PointSource)
for src in np.array(blend.sources)[sed_ind]
])
near_cen_flag = [
(segmap_conv == cen_indx_conv +
1)[int(src.center[1]), int(src.center[0])]
for src in np.array(blend.sources)[sed_ind]
]
sed_ind = sed_ind[(~point_flag) & near_cen_flag]
if not 0 in sed_ind:
# the central source must be included.
sed_ind = np.array(list(set(sed_ind).union({0})))
else:
sed_ind = np.array([0])
print(f'Components {sed_ind} are considered as the target galaxy.')
with open(f'./Models/{prefix}-{index:04d}-trained-model-less.pkl', 'wb') as fp:
pickle.dump(
[blend, {'e_rel': e_rel, 'loss': blend.loss[-1]}, sed_ind], fp)
fp.close()
fig = kz.display.display_scarlet_model(
blend,
minimum=-0.3,
stretch=1,
channels='griz',
show_loss=True,
show_mask=True,
show_mark=False,
scale_bar=False)
plt.savefig(
f'./Figures/{prefix}-{index:04d}-fitting-less.png', bbox_inches='tight')
fig = kz.display.display_scarlet_model(
blend,
show_ind=sed_ind,
zoomin_size=50,
minimum=-0.3,
stretch=1,
channels='griz',
show_loss=True,
show_mask=True,
show_mark=False,
scale_bar=False)
plt.savefig(
f'./Figures/{prefix}-{index:04d}-zoomin-less.png', bbox_inches='tight')
return blend
except Exception as e:
print(e)
return blend
def fitting_single_comp(lsbg, hsc_dr, cutout_halfsize=1.0, prefix='LSBG', large_away_factor=3.0, compact_away_factor=0.4):
clear_output()
from kuaizi.utils import padding_PSF
kz.utils.set_env(project='HSC', name='HSC_LSBG')
# kz.utils.set_matplotlib(usetex=False, fontsize=15)
index = lsbg['Seq']
lsbg_coord = SkyCoord(ra=lsbg['RAJ2000'], dec=lsbg['DEJ2000'], unit='deg')
if not os.path.isdir('./Images'):
os.mkdir('./Images')
if not os.path.isdir('./PSFs'):
os.mkdir('./PSFs')
size_ang = cutout_halfsize * u.arcmin
channels = 'griz'
cutout = hsc_cutout(
lsbg_coord,
cutout_size=size_ang,
filters=channels,
mask=True,
variance=True,
archive=hsc_dr,
use_saved=True,
output_dir='./Images/',
prefix=f'{prefix}_{index:04d}_img',
save_output=True)
psf_list = hsc_psf(
lsbg_coord,
centered=True,
filters=channels,
img_type='coadd',
verbose=True,
archive=hsc_dr,
save_output=True,
use_saved=True,
prefix=f'{prefix}_{index:04d}_psf',
output_dir='./PSFs/')
channels_list = list(channels)
# Reconstructure data
images = np.array([hdu[1].data for hdu in cutout])
w = wcs.WCS(cutout[0][1].header) # note: all bands share the same WCS here
filters = channels_list
weights = 1 / np.array([hdu[3].data for hdu in cutout])
psf_pad = padding_PSF(psf_list) # Padding PSF cutouts from HSC
psfs = scarlet.ImagePSF(np.array(psf_pad))
data = Data(images=images, weights=weights,
wcs=w, psfs=psfs, channels=channels)
_, msk_star = kz.utils.gaia_star_mask( # Generate a mask for GAIA bright stars
data.images.mean(axis=0), # averaged image
w,
pixel_scale=HSC_pixel_scale,
gaia_bright=19.5,
mask_a=694.7,
mask_b=3.8,
factor_b=1.0,
factor_f=0.6)
# This vanilla detection with very low sigma finds out where is the central object and its footprint
obj_cat_ori, segmap_ori, bg_rms = kz.detection.makeCatalog(
[data],
lvl=1.2,
method='vanilla',
convolve=False,
match_gaia=False,
show_fig=True,
visual_gaia=False,
b=128,
f=3,
pixel_scale=0.168,
minarea=20,
deblend_nthresh=30,
deblend_cont=0.08,
sky_subtract=True)
catalog_c = SkyCoord(obj_cat_ori['ra'], obj_cat_ori['dec'], unit='deg')
dist = lsbg_coord.separation(catalog_c)
cen_indx = obj_cat_ori[np.argsort(dist)[0]]['index']
cen_obj = obj_cat_ori[cen_indx]
# print(f'# Central object is #{cen_indx}.')
# Better position for cen_obj
x, y, _ = sep.winpos(data.images.mean(
axis=0), cen_obj['x'], cen_obj['y'], 6)
ra, dec = data.wcs.wcs_pix2world(x, y, 0)
cen_obj['x'] = x
cen_obj['y'] = y
cen_obj['ra'] = ra
cen_obj['dec'] = dec
# This detection (after blurring the original images) finds out what is the central object and its (estimated) size
obj_cat, segmap_conv, bg_rms = kz.detection.makeCatalog(
[data],
lvl=8,
method='wavelet',
convolve=False,
wavelet_lvl=5,
low_freq_lvl=3,
high_freq_lvl=0,
match_gaia=False,
show_fig=True,
visual_gaia=False,
b=128,
f=3,
pixel_scale=0.168,
minarea=20,
deblend_nthresh=30,
deblend_cont=0.01,
sky_subtract=True)
catalog_c = SkyCoord(obj_cat['ra'], obj_cat['dec'], unit='deg')
dist = lsbg_coord.separation(catalog_c)
cen_indx_conv = obj_cat_ori[np.argsort(dist)[0]]['index']
# This step masks out HIGH FREQUENCY sources after wavelet transformation
obj_cat, segmap, bg_rms = kz.detection.makeCatalog([data],
mask=msk_star,
lvl=2.5,
method='wavelet',
high_freq_lvl=3,
wavelet_lvl=4,
match_gaia=False,
show_fig=True,
visual_gaia=False,
b=32,
f=3,
pixel_scale=HSC_pixel_scale,
minarea=5,
deblend_nthresh=30,
deblend_cont=0.05,
sky_subtract=True)
# the footprint of central object: an ellipse with 4 * a and 4 * b
footprint = np.zeros_like(segmap, dtype=bool)
sep.mask_ellipse(footprint, cen_obj['x'], cen_obj['y'],
cen_obj['a'], cen_obj['b'], cen_obj['theta'], r=4.0)
inside_flag = [footprint[item] for item in list(
zip(obj_cat['y'].astype(int), obj_cat['x'].astype(int)))]
for ind in np.where(inside_flag)[0]:
# we do not mask compact sources that are nearby to the center of target galaxy
segmap[segmap == ind + 1] = 0
obj_cat_cpct = obj_cat[inside_flag] # catalog of compact sources
smooth_radius = 2
gaussian_threshold = 0.03
mask_conv = np.copy(segmap)
mask_conv[mask_conv > 0] = 1
mask_conv = convolve(mask_conv.astype(
float), Gaussian2DKernel(smooth_radius))
# This `seg_mask` only masks compact sources
seg_mask = (mask_conv >= gaussian_threshold)
# This step masks out bright and large contamination, which is not well-masked in previous step
obj_cat, segmap, bg_rms = kz.detection.makeCatalog(
[data],
lvl=10, # relative agressive threshold
method='vanilla',
match_gaia=False,
show_fig=True,
visual_gaia=False,
b=40,
f=3,
pixel_scale=0.168,
minarea=20, # only want large things
deblend_nthresh=30,
deblend_cont=0.001,
sky_subtract=True)
catalog_c = SkyCoord(obj_cat['ra'], obj_cat['dec'], unit='deg')
dist = lsbg_coord.separation(catalog_c)
arr = np.zeros_like(segmap).astype('uint8')
sep.mask_ellipse(
arr,
cen_obj['x'],
cen_obj['y'],
cen_obj['a'],
cen_obj['b'],
cen_obj['theta'],
r=large_away_factor) # don't mask the target galaxy too much
for ind, obj in enumerate(obj_cat):
if arr[int(obj['y']), int(obj['x'])] == 1:
segmap[segmap == ind + 1] = 0
smooth_radius = 5
gaussian_threshold = 0.01
mask_conv = np.copy(segmap)
mask_conv[mask_conv > 0] = 1
mask_conv = convolve(mask_conv.astype(
float), Gaussian2DKernel(smooth_radius))
# This `seg_mask_large` masks large bright sources
seg_mask_large = (mask_conv >= gaussian_threshold)
# Set weights of masked pixels to zero
for layer in data.weights:
layer[msk_star.astype(bool)] = 0
layer[seg_mask.astype(bool)] = 0
layer[seg_mask_large.astype(bool)] = 0
# Remove objects that are masked from the compact obj catalog
catalog_c = SkyCoord(obj_cat_cpct['ra'], obj_cat_cpct['dec'], unit='deg')
dist = lsbg_coord.separation(catalog_c)
obj_cat_cpct.remove_rows(np.where(dist < 2 * u.arcsec)[0])
inside_flag = [
seg_mask_large[item] for item in list(
zip(obj_cat_cpct['y'].astype(int), obj_cat_cpct['x'].astype(int)))
]
obj_cat_cpct.remove_rows(np.where(inside_flag)[0])
# Construct `scarlet` frames and observation
from functools import partial
model_psf = scarlet.GaussianPSF(sigma=(0.8,) * len(filters))
model_frame = scarlet.Frame(
data.images.shape,
wcs=w,
psfs=model_psf,
channels=filters)
observation = scarlet.Observation(
data.images,
wcs=data.wcs,
psfs=data.psfs,
weights=data.weights,
channels=filters)
observation = observation.match(model_frame)
# Add sources
from scarlet.initialization import build_initialization_coadd
# Filtered coadd removes noise! Very useful for faint objects (but slow)
coadd, bg_cutoff = build_initialization_coadd(
observation, filtered_coadd=True)
coadd[(seg_mask_large + seg_mask + msk_star.astype(bool))] = 0.0
sources = []
src = obj_cat_ori[cen_indx]
if HSC_zeropoint - 2.5 * np.log10(src['flux']) > 26.5:
# If too faint, single component
new_source = scarlet.source.SingleExtendedSource(model_frame, (src['ra'], src['dec']),
observation,
thresh=0.001,
shifting=False,
coadd=coadd,
coadd_rms=bg_cutoff)
else:
new_source = scarlet.source.MultiExtendedSource(model_frame, (src['ra'], src['dec']),
observation,
K=2, # Two components
thresh=0.01,
shifting=False,
coadd=coadd,
coadd_rms=bg_cutoff)
sources.append(new_source)
# for k, src in enumerate(obj_cat_cpct): # compact sources
# if src['fwhm_custom'] < 5: # src['b'] / src['a'] > 0.9 and
# new_source = scarlet.source.PointSource(
# model_frame, (src['ra'], src['dec']), observation)
# else:
# try:
# new_source = scarlet.source.SingleExtendedSource(
# model_frame, (src['ra'], src['dec']), observation, coadd=coadd, coadd_rms=bg_cutoff)
# except:
# new_source = scarlet.source.SingleExtendedSource(
# model_frame, (src['ra'], src['dec']), observation)
# sources.append(new_source)
# Visualize our data and mask and source
if not os.path.isdir('./Figures'):
os.mkdir('./Figures/')
fig = kz.display.display_scarlet_sources(
data,
sources,
show_ind=None,
stretch=1,
Q=1,
minimum=-0.3,
show_mark=True,
scale_bar_length=10,
add_text=f'{prefix}-{index}')
plt.savefig(
f'./Figures/{prefix}-{index:04d}-img-sing.png', bbox_inches='tight')
# Star fitting!
start = time.time()
blend = scarlet.Blend(sources, observation)
fig = kz.display.display_scarlet_model(
blend,
zoomin_size=50,
minimum=-0.3,
stretch=1,
channels='griz',
show_loss=True,
show_mask=True,
show_mark=True,
scale_bar=False)
plt.savefig(
f'./Figures/{prefix}-{index:04d}-init-sing.png', bbox_inches='tight')
try:
blend.fit(150, 1e-4)
with open(f'./Models/{prefix}-{index:04d}-trained-model-sing.pkl', 'wb') as fp:
pickle.dump([blend, {'e_rel': 1e-4, 'loss': blend.loss[-1]}], fp)
fp.close()
last_loss = blend.loss[-1]
print(
f'Succeed for e_rel = 1e-4 with {len(blend.loss)} iterations! Try higher accuracy!')
for i, e_rel in enumerate([5e-4, 1e-5, 5e-5, 1e-6]):
blend.fit(150, e_rel)
if len(blend.loss) > 50: # must have more than 50 iterations
recent_loss = np.mean(blend.loss[-10:])
min_loss = np.min(blend.loss[:-10])
if recent_loss < min_loss:
print(
f'Succeed for e_rel = {e_rel} with {len(blend.loss)} iterations! Try higher accuracy!')
with open(f'./Models/{prefix}-{index:04d}-trained-model-sing.pkl', 'wb') as fp:
pickle.dump(
[blend, {'e_rel': e_rel, 'loss': blend.loss[-1]}], fp)
fp.close()
elif abs((recent_loss - min_loss) / min_loss) < 0.02:
if recent_loss < last_loss: # better than the saved model
print(
f'I am okay with relative loss difference = {abs((recent_loss - min_loss) / min_loss)}. Fitting stopped.')
with open(f'./Models/{prefix}-{index:04d}-trained-model-sing.pkl', 'wb') as fp:
pickle.dump(
[blend, {'e_rel': e_rel, 'loss': blend.loss[-1]}], fp)
fp.close()
break
else:
print(
f'Cannot achieve a global optimization with e_rel = {e_rel}.')
print("Scarlet ran for {1} iterations to logL = {2}".format(
e_rel, len(blend.loss), -blend.loss[-1]))
end = time.time()
print(f'Elapsed time for fitting: {end - start} s')
# with open(f"./Models/{prefix}-{index:04d}-trained-model.pkl", "rb") as fp:
# blend = pickle.load(fp)[0]
# fp.close()
# Find out what compose a galaxy
# seds = np.array([np.copy(src.parameters[0]) for src in blend.sources])
# corr = np.corrcoef(seds)
# sed_ind = np.argsort(corr[0, :])[::-1] # np.where(corr[0, :] > 0.99)[0]#
# # dist = np.array([
# # np.linalg.norm(src.center - blend.sources[0].center) * HSC_pixel_scale
# # for src in np.array(blend.sources)[sed_ind]
# # ])
# # dist_flag = (dist < 3 * np.sqrt(cen_obj['a'] * cen_obj['b']) * HSC_pixel_scale)
# point_flag = np.array([isinstance(src, scarlet.source.PointSource) for src in np.array(blend.sources)[sed_ind]])
# near_cen_flag = [(segmap_conv == cen_indx_conv + 1)[int(src.center[1]), int(src.center[0])] for src in np.array(blend.sources)[sed_ind]]
# sed_ind = sed_ind[(~point_flag) & near_cen_flag] # & dist_flag]
# if not 0 in sed_ind:
# sed_ind.append(0) # the central source must be included.
# print(f'Components {sed_ind} are considered as the target galaxy.')
# with open(f'./Models/{prefix}-{index:04d}-trained-model.pkl', 'wb') as fp:
# pickle.dump([blend, {'e_rel': e_rel}, sed_ind], fp)
# fp.close()
with open(f'./Models/{prefix}-{index:04d}-trained-model-sing.pkl', 'wb') as fp:
pickle.dump([blend, {'e_rel': e_rel, 'loss': blend.loss[-1]}], fp)
fp.close()
fig = kz.display.display_scarlet_model(
blend,
minimum=-0.3,
stretch=1,
channels='griz',
show_loss=True,
show_mask=True,
show_mark=False,
scale_bar=False)
plt.savefig(
f'./Figures/{prefix}-{index:04d}-fitting-sing.png', bbox_inches='tight')
fig = kz.display.display_scarlet_model(
blend,
zoomin_size=50,
minimum=-0.3,
stretch=1,
channels='griz',
show_loss=True,
show_mask=True,
show_mark=False,
scale_bar=False)
plt.savefig(
f'./Figures/{prefix}-{index:04d}-zoomin-sing.png', bbox_inches='tight')
return blend
except Exception as e:
print(e)
return blend
def fitting_single_comp_mockgal(index=0, prefix='MockLSBG', large_away_factor=3.0, compact_away_factor=0.4, zp=HSC_zeropoint):
clear_output()
kz.utils.set_env(project='HSC', name='HSC_LSBG')
index = index
from kuaizi.mock import MockGal
mgal = MockGal.read(f'./Models/MockGalModel/{prefix}-{index:04d}.pkl')
print(f'Opening ./Models/MockGalModel/{prefix}-{index:04d}.pkl')
channels = mgal.channels
channels_list = list(channels)
filters = channels_list
lsbg_coord = SkyCoord(
ra=mgal.model.info['ra'], dec=mgal.model.info['dec'], unit='deg')
# Reconstructure data
images = mgal.mock.images
w = mgal.mock.wcs
weights = 1 / mgal.mock.variances
psfs = scarlet.ImagePSF(np.array(mgal.mock.psfs))
data = Data(images=images, weights=weights,
wcs=w, psfs=psfs, channels=channels)
_, msk_star = kz.utils.gaia_star_mask( # Generate a mask for GAIA bright stars
data.images.mean(axis=0), # averaged image
w,
pixel_scale=HSC_pixel_scale,
gaia_bright=19.5,
mask_a=694.7,
mask_b=3.8,
factor_b=1.0,
factor_f=0.6)
# This vanilla detection with very low sigma finds out where is the central object and its footprint
obj_cat_ori, segmap_ori, bg_rms = kz.detection.makeCatalog(
[data],
mask=msk_star,
lvl=1.2,
method='vanilla',
convolve=False,
match_gaia=False,
show_fig=True,
visual_gaia=False,
b=128,
f=3,
pixel_scale=0.168,
minarea=20,
deblend_nthresh=30,
deblend_cont=0.08,
sky_subtract=True)
catalog_c = SkyCoord(obj_cat_ori['ra'], obj_cat_ori['dec'], unit='deg')
dist = lsbg_coord.separation(catalog_c)
cen_indx = obj_cat_ori[np.argsort(dist)[0]]['index']
cen_obj = obj_cat_ori[cen_indx]
# print(f'# Central object is #{cen_indx}.')
# Better position for cen_obj
x, y, _ = sep.winpos(data.images.mean(
axis=0), cen_obj['x'], cen_obj['y'], 6)
ra, dec = data.wcs.wcs_pix2world(x, y, 0)
cen_obj['x'] = x
cen_obj['y'] = y
cen_obj['ra'] = ra
cen_obj['dec'] = dec
# This detection (after blurring the original images) finds out what is the central object and its (estimated) size
obj_cat, segmap_conv, bg_rms = kz.detection.makeCatalog(
[data],
mask=msk_star,
lvl=8,
method='wavelet',
convolve=False,
wavelet_lvl=5,
low_freq_lvl=3,
high_freq_lvl=0,
match_gaia=False,
show_fig=True,
visual_gaia=False,
b=128,
f=3,
pixel_scale=0.168,
minarea=20,
deblend_nthresh=30,
deblend_cont=0.01,
sky_subtract=True)
catalog_c = SkyCoord(obj_cat['ra'], obj_cat['dec'], unit='deg')
dist = lsbg_coord.separation(catalog_c)
cen_indx_conv = obj_cat_ori[np.argsort(dist)[0]]['index']
# This step masks out HIGH FREQUENCY sources after wavelet transformation
obj_cat, segmap, bg_rms = kz.detection.makeCatalog([data],
mask=msk_star,
lvl=2.5,
method='wavelet',
high_freq_lvl=3,
wavelet_lvl=4,
match_gaia=False,
show_fig=True,
visual_gaia=False,
b=32,
f=3,
pixel_scale=HSC_pixel_scale,
minarea=5,
deblend_nthresh=30,
deblend_cont=0.05,
sky_subtract=True)
# the footprint of central object: an ellipse with 4 * a and 4 * b
footprint = np.zeros_like(segmap, dtype=bool)
sep.mask_ellipse(footprint, cen_obj['x'], cen_obj['y'],
cen_obj['a'], cen_obj['b'], cen_obj['theta'], r=4.0)
inside_flag = [footprint[item] for item in list(
zip(obj_cat['y'].astype(int), obj_cat['x'].astype(int)))]
for ind in np.where(inside_flag)[0]:
# we do not mask compact sources that are nearby to the center of target galaxy
segmap[segmap == ind + 1] = 0
obj_cat_cpct = obj_cat[inside_flag] # catalog of compact sources
smooth_radius = 2
gaussian_threshold = 0.03
mask_conv = np.copy(segmap)
mask_conv[mask_conv > 0] = 1
mask_conv = convolve(mask_conv.astype(
float), Gaussian2DKernel(smooth_radius))
# This `seg_mask` only masks compact sources
seg_mask = (mask_conv >= gaussian_threshold)
# This step masks out bright and large contamination, which is not well-masked in previous step
obj_cat, segmap, bg_rms = kz.detection.makeCatalog(
[data],
lvl=10, # relative agressive threshold
method='vanilla',
match_gaia=False,
show_fig=True,
visual_gaia=False,
b=40,
f=3,
pixel_scale=0.168,
minarea=20, # only want large things
deblend_nthresh=30,
deblend_cont=0.001,
sky_subtract=True)
catalog_c = SkyCoord(obj_cat['ra'], obj_cat['dec'], unit='deg')
dist = lsbg_coord.separation(catalog_c)
arr = np.zeros_like(segmap).astype('uint8')
sep.mask_ellipse(
arr,
cen_obj['x'],
cen_obj['y'],
cen_obj['a'],
cen_obj['b'],
cen_obj['theta'],
r=large_away_factor) # don't mask the target galaxy too much
for ind, obj in enumerate(obj_cat):
if arr[int(obj['y']), int(obj['x'])] == 1:
segmap[segmap == ind + 1] = 0
smooth_radius = 5
gaussian_threshold = 0.01
mask_conv = np.copy(segmap)
mask_conv[mask_conv > 0] = 1
mask_conv = convolve(mask_conv.astype(
float), Gaussian2DKernel(smooth_radius))
# This `seg_mask_large` masks large bright sources
seg_mask_large = (mask_conv >= gaussian_threshold)
# Set weights of masked pixels to zero
for layer in data.weights:
layer[msk_star.astype(bool)] = 0
layer[seg_mask.astype(bool)] = 0
layer[seg_mask_large.astype(bool)] = 0
# Remove objects that are masked from the compact obj catalog
catalog_c = SkyCoord(obj_cat_cpct['ra'], obj_cat_cpct['dec'], unit='deg')
dist = lsbg_coord.separation(catalog_c)
obj_cat_cpct.remove_rows(np.where(dist < 2 * u.arcsec)[0])
inside_flag = [
seg_mask_large[item] for item in list(
zip(obj_cat_cpct['y'].astype(int), obj_cat_cpct['x'].astype(int)))
]
obj_cat_cpct.remove_rows(np.where(inside_flag)[0])
# Construct `scarlet` frames and observation
from functools import partial
model_psf = scarlet.GaussianPSF(sigma=(0.8,) * len(filters))
model_frame = scarlet.Frame(
data.images.shape,
wcs=w,
psfs=model_psf,
channels=filters)
observation = scarlet.Observation(
data.images,
wcs=data.wcs,
psfs=data.psfs,
weights=data.weights,
channels=filters)
observation = observation.match(model_frame)
# Add sources
from scarlet.initialization import build_initialization_coadd
# Filtered coadd removes noise! Very useful for faint objects (but slow)
coadd, bg_cutoff = build_initialization_coadd(
observation, filtered_coadd=True)
coadd[(seg_mask_large + seg_mask + msk_star.astype(bool))] = 0.0
sources = []
src = obj_cat_ori[cen_indx]
if zp - 2.5 * np.log10(src['flux']) > 26.:
# If too faint, single component
new_source = scarlet.source.SingleExtendedSource(model_frame, (src['ra'], src['dec']),
observation,
thresh=0.001,
shifting=False,
coadd=coadd,
coadd_rms=bg_cutoff)
else:
new_source = scarlet.source.MultiExtendedSource(model_frame, (src['ra'], src['dec']),
observation,
K=2, # Two components
thresh=0.001,
shifting=False,
coadd=coadd,
coadd_rms=bg_cutoff)
sources.append(new_source)
# Visualize our data and mask and source
if not os.path.isdir('./Figures'):
os.mkdir('./Figures/')
fig = kz.display.display_scarlet_sources(
data,
sources,
show_ind=None,
stretch=1,
Q=1,
minimum=-0.3,
show_mark=True,
scale_bar_length=10,
add_text=f'{prefix}-{index}')
plt.savefig(
f'./Figures/{prefix}-{index:04d}-img-sing.png', bbox_inches='tight')
# Star fitting!
start = time.time()
blend = scarlet.Blend(sources, observation)
fig = kz.display.display_scarlet_model(
blend,
zoomin_size=50,
minimum=-0.3,
stretch=1,
channels='griz',
show_loss=True,
show_mask=True,
show_mark=True,
scale_bar=False)
plt.savefig(
f'./Figures/{prefix}-{index:04d}-init-sing.png', bbox_inches='tight')
try:
blend.fit(150, 1e-4)
with open(f'./Models/MockGalScarlet/{prefix}-{index:04d}-trained-model-sing.pkl', 'wb') as fp:
pickle.dump([blend, {'e_rel': 1e-4, 'loss': blend.loss[-1]}], fp)
fp.close()
last_loss = blend.loss[-1]
print(
f'Succeed for e_rel = 1e-4 with {len(blend.loss)} iterations! Try higher accuracy!')
for i, e_rel in enumerate([5e-4, 1e-5, 5e-5, 1e-6]):
blend.fit(150, e_rel)
if len(blend.loss) > 50: # must have more than 50 iterations
recent_loss = np.mean(blend.loss[-10:])
min_loss = np.min(blend.loss[:-10])
if recent_loss < min_loss:
print(
f'Succeed for e_rel = {e_rel} with {len(blend.loss)} iterations! Try higher accuracy!')
with open(f'./Models/MockGalScarlet/{prefix}-{index:04d}-trained-model-sing.pkl', 'wb') as fp:
pickle.dump(
[blend, {'e_rel': e_rel, 'loss': blend.loss[-1]}], fp)
fp.close()
elif abs((recent_loss - min_loss) / min_loss) < 0.02:
if recent_loss < last_loss: # better than the saved model
print(
f'I am okay with relative loss difference = {abs((recent_loss - min_loss) / min_loss)}. Fitting stopped.')
with open(f'./Models/MockGalScarlet/{prefix}-{index:04d}-trained-model-sing.pkl', 'wb') as fp:
pickle.dump(
[blend, {'e_rel': e_rel, 'loss': blend.loss[-1]}], fp)
fp.close()
break
else:
print(
f'Cannot achieve a global optimization with e_rel = {e_rel}.')
print("Scarlet ran for {1} iterations to logL = {2}".format(
e_rel, len(blend.loss), -blend.loss[-1]))
end = time.time()
print(f'Elapsed time for fitting: {end - start} s')
with open(f'./Models/MockGalScarlet/{prefix}-{index:04d}-trained-model-sing.pkl', 'wb') as fp:
pickle.dump([blend, {'e_rel': e_rel, 'loss': blend.loss[-1]}], fp)
fp.close()
fig = kz.display.display_scarlet_model(
blend,
minimum=-0.3,
stretch=1,
channels='griz',
show_loss=True,
show_mask=True,
show_mark=False,
scale_bar=False)
plt.savefig(
f'./Figures/{prefix}-{index:04d}-fitting-sing.png', bbox_inches='tight')
fig = kz.display.display_scarlet_model(
blend,
zoomin_size=50,
minimum=-0.3,
stretch=1,
channels='griz',
show_loss=True,
show_mask=True,
show_mark=False,
scale_bar=False)
plt.savefig(
f'./Figures/{prefix}-{index:04d}-zoomin-sing.png', bbox_inches='tight')
return blend
except Exception as e:
print(e)
return blend
def fitting_less_comp_mockgal(index=0, prefix='MockLSBG', large_away_factor=3.0, compact_away_factor=0.4, zp=HSC_zeropoint):
clear_output()
kz.utils.set_env(project='HSC', name='HSC_LSBG')
index = index
from kuaizi.mock import MockGal
mgal = MockGal.read(f'./Models/MockGalModel/{prefix}-{index:04d}.pkl')
print(f'Opening ./Models/MockGalModel/{prefix}-{index:04d}.pkl')
channels = mgal.channels
channels_list = list(channels)
filters = channels_list
lsbg_coord = SkyCoord(
ra=mgal.model.info['ra'], dec=mgal.model.info['dec'], unit='deg')
# Reconstructure data
images = mgal.mock.images
w = mgal.mock.wcs
weights = 1 / mgal.mock.variances
psfs = scarlet.ImagePSF(np.array(mgal.mock.psfs))
data = Data(images=images, weights=weights,
wcs=w, psfs=psfs, channels=channels)
_, msk_star = kz.utils.gaia_star_mask( # Generate a mask for GAIA bright stars
data.images.mean(axis=0), # averaged image
w,
pixel_scale=HSC_pixel_scale,
gaia_bright=19.5,
mask_a=694.7,
mask_b=3.8,
factor_b=1.0,
factor_f=0.6)
# This vanilla detection with very low sigma finds out where is the central object and its footprint
obj_cat_ori, segmap_ori, bg_rms = kz.detection.makeCatalog(
[data],
mask=msk_star,
lvl=1.2,
method='vanilla',
convolve=False,
match_gaia=False,
show_fig=True,
visual_gaia=False,
b=128,
f=3,
pixel_scale=0.168,
minarea=20,
deblend_nthresh=30,
deblend_cont=0.08,
sky_subtract=True)
catalog_c = SkyCoord(obj_cat_ori['ra'], obj_cat_ori['dec'], unit='deg')
dist = lsbg_coord.separation(catalog_c)
cen_indx = obj_cat_ori[np.argsort(dist)[0]]['index']
cen_obj = obj_cat_ori[cen_indx]
# print(f'# Central object is #{cen_indx}.')
# Better position for cen_obj
x, y, _ = sep.winpos(data.images.mean(
axis=0), cen_obj['x'], cen_obj['y'], 6)
# x, y = cen_obj['x'], cen_obj['y']
ra, dec = data.wcs.wcs_pix2world(x, y, 0)
cen_obj['x'] = x
cen_obj['y'] = y
cen_obj['ra'] = ra
cen_obj['dec'] = dec
# This detection (after blurring the original images) finds out what is the central object and its (estimated) size
obj_cat, segmap_conv, bg_rms = kz.detection.makeCatalog(
[data],
mask=msk_star,
lvl=8,
method='wavelet',
convolve=False,
wavelet_lvl=5,
low_freq_lvl=3,
high_freq_lvl=0,
match_gaia=False,
show_fig=True,
visual_gaia=False,
b=128,
f=3,
pixel_scale=0.168,
minarea=20,
deblend_nthresh=30,
deblend_cont=0.01,
sky_subtract=True)
catalog_c = SkyCoord(obj_cat['ra'], obj_cat['dec'], unit='deg')
dist = lsbg_coord.separation(catalog_c)
cen_indx_conv = obj_cat_ori[np.argsort(dist)[0]]['index']
# This step masks out HIGH FREQUENCY sources after wavelet transformation
obj_cat, segmap, bg_rms = kz.detection.makeCatalog([data],
mask=msk_star,
lvl=2.5,
method='wavelet',
high_freq_lvl=3,
wavelet_lvl=4,
match_gaia=False,
show_fig=True,
visual_gaia=False,
b=32,
f=3,
pixel_scale=HSC_pixel_scale,
minarea=5,
deblend_nthresh=30,
deblend_cont=0.05,
sky_subtract=True)
# the footprint of central object: an ellipse with 4 * a and 4 * b
footprint = np.zeros_like(segmap, dtype=bool)
sep.mask_ellipse(footprint, cen_obj['x'], cen_obj['y'],
cen_obj['a'], cen_obj['b'], cen_obj['theta'], r=4.0)
inside_flag = [footprint[item] for item in list(
zip(obj_cat['y'].astype(int), obj_cat['x'].astype(int)))]
for ind in np.where(inside_flag)[0]:
# we do not mask compact sources that are nearby to the center of target galaxy
segmap[segmap == ind + 1] = 0
obj_cat_cpct = obj_cat[inside_flag] # catalog of compact sources
smooth_radius = 2
gaussian_threshold = 0.03
mask_conv = np.copy(segmap)
mask_conv[mask_conv > 0] = 1
mask_conv = convolve(mask_conv.astype(
float), Gaussian2DKernel(smooth_radius))
# This `seg_mask` only masks compact sources
seg_mask = (mask_conv >= gaussian_threshold)
# This step masks out bright and large contamination, which is not well-masked in previous step
obj_cat, segmap, bg_rms = kz.detection.makeCatalog(
[data],
lvl=10, # relative agressive threshold
method='vanilla',
match_gaia=False,
show_fig=True,
visual_gaia=False,
b=40,
f=3,
pixel_scale=0.168,
minarea=20, # only want large things
deblend_nthresh=30,
deblend_cont=0.001,
sky_subtract=True)
catalog_c = SkyCoord(obj_cat['ra'], obj_cat['dec'], unit='deg')
dist = lsbg_coord.separation(catalog_c)
arr = np.zeros_like(segmap).astype('uint8')
sep.mask_ellipse(
arr,
cen_obj['x'],
cen_obj['y'],
cen_obj['a'],
cen_obj['b'],
cen_obj['theta'],
r=large_away_factor) # don't mask the target galaxy too much
for ind, obj in enumerate(obj_cat):
if arr[int(obj['y']), int(obj['x'])] == 1:
segmap[segmap == ind + 1] = 0
smooth_radius = 5
gaussian_threshold = 0.01
mask_conv = np.copy(segmap)
mask_conv[mask_conv > 0] = 1
mask_conv = convolve(mask_conv.astype(
float), Gaussian2DKernel(smooth_radius))
# This `seg_mask_large` masks large bright sources
seg_mask_large = (mask_conv >= gaussian_threshold)
# Set weights of masked pixels to zero
for layer in data.weights:
layer[msk_star.astype(bool)] = 0
layer[seg_mask.astype(bool)] = 0
layer[seg_mask_large.astype(bool)] = 0
# Remove objects that are masked from the compact obj catalog
catalog_c = SkyCoord(obj_cat_cpct['ra'], obj_cat_cpct['dec'], unit='deg')
dist = lsbg_coord.separation(catalog_c)
obj_cat_cpct.remove_rows(np.where(dist < 2 * u.arcsec)[0])
inside_flag = [
seg_mask_large[item] for item in list(
zip(obj_cat_cpct['y'].astype(int), obj_cat_cpct['x'].astype(int)))
]
obj_cat_cpct.remove_rows(np.where(inside_flag)[0])
# Construct `scarlet` frames and observation
from functools import partial
model_psf = scarlet.GaussianPSF(sigma=(0.8,) * len(filters))
model_frame = scarlet.Frame(
data.images.shape,
wcs=w,
psfs=model_psf,
channels=filters)
observation = scarlet.Observation(
data.images,
wcs=data.wcs,
psfs=data.psfs,
weights=data.weights,
channels=filters)
observation = observation.match(model_frame)
# Add sources
from scarlet.initialization import build_initialization_coadd
# Filtered coadd removes noise! Very useful for faint objects (but slow)
coadd, bg_cutoff = build_initialization_coadd(
observation, filtered_coadd=True)
coadd[(seg_mask_large + seg_mask + msk_star.astype(bool))] = 0.0
sources = []
src = obj_cat_ori[cen_indx]
if HSC_zeropoint - 2.5 * np.log10(src['flux']) > 26.:
# If too faint, single component
new_source = scarlet.source.SingleExtendedSource(model_frame, (src['ra'], src['dec']),
observation,
thresh=0.005,
shifting=False,
coadd=coadd,
coadd_rms=bg_cutoff)
else:
new_source = scarlet.source.MultiExtendedSource(model_frame, (src['ra'], src['dec']),
observation,
K=2, # Two components
thresh=0.01,
shifting=False,
coadd=coadd,
coadd_rms=bg_cutoff)
sources.append(new_source)
for k, src in enumerate(obj_cat_cpct): # compact sources
if src['fwhm_custom'] < 5: # src['b'] / src['a'] > 0.9 and
new_source = scarlet.source.PointSource(
model_frame, (src['ra'], src['dec']), observation)
else:
try:
new_source = scarlet.source.SingleExtendedSource(
model_frame, (src['ra'], src['dec']), observation, coadd=coadd, coadd_rms=bg_cutoff)
except:
new_source = scarlet.source.SingleExtendedSource(
model_frame, (src['ra'], src['dec']), observation)
sources.append(new_source)
# Visualize our data and mask and source
if not os.path.isdir('./Figures'):
os.mkdir('./Figures/')
fig = kz.display.display_scarlet_sources(
data,
sources,
show_ind=None,
stretch=1,
Q=1,
minimum=-0.3,
show_mark=True,
scale_bar_length=10,
add_text=f'{prefix}-{index}')
plt.savefig(
f'./Figures/{prefix}-{index:04d}-img-less.png', bbox_inches='tight')
# Star fitting!
start = time.time()
blend = scarlet.Blend(sources, observation)
fig = kz.display.display_scarlet_model(
blend,
zoomin_size=50,
minimum=-0.3,
stretch=1,
channels='griz',
show_loss=True,
show_mask=True,
show_mark=True,
scale_bar=False)
plt.savefig(
f'./Figures/{prefix}-{index:04d}-init-less.png', bbox_inches='tight')
try:
blend.fit(150, 1e-4)
with open(f'./Models/MockGalScarlet/{prefix}-{index:04d}-trained-model-less.pkl', 'wb') as fp:
pickle.dump([blend, {'e_rel': 1e-4, 'loss': blend.loss[-1]}], fp)
fp.close()
last_loss = blend.loss[-1]
print(
f'Succeed for e_rel = 1e-4 with {len(blend.loss)} iterations! Try higher accuracy!')
for i, e_rel in enumerate([5e-4, 1e-5, 5e-5, 1e-6]):
blend.fit(150, e_rel)
if len(blend.loss) > 50: # must have more than 50 iterations
recent_loss = np.mean(blend.loss[-10:])
min_loss = np.min(blend.loss[:-10])
if recent_loss < min_loss:
print(
f'Succeed for e_rel = {e_rel} with {len(blend.loss)} iterations! Try higher accuracy!')
with open(f'./Models/MockGalScarlet/{prefix}-{index:04d}-trained-model-less.pkl', 'wb') as fp:
pickle.dump(
[blend, {'e_rel': e_rel, 'loss': blend.loss[-1]}], fp)
fp.close()
elif abs((recent_loss - min_loss) / min_loss) < 0.02:
if recent_loss < last_loss: # better than the saved model
print(
f'I am okay with relative loss difference = {abs((recent_loss - min_loss) / min_loss)}. Fitting stopped.')
with open(f'./Models/MockGalScarlet/{prefix}-{index:04d}-trained-model-less.pkl', 'wb') as fp:
pickle.dump(
[blend, {'e_rel': e_rel, 'loss': blend.loss[-1]}], fp)
fp.close()
break
else:
print(
f'Cannot achieve a global optimization with e_rel = {e_rel}.')
print("Scarlet ran for {1} iterations to logL = {2}".format(
e_rel, len(blend.loss), -blend.loss[-1]))
end = time.time()
print(f'Elapsed time for fitting: {end - start} s')
# with open(f"./Models/{prefix}-{index:04d}-trained-model.pkl", "rb") as fp:
# blend = pickle.load(fp)[0]
# fp.close()
# Find out what compose a galaxy
if len(blend.sources) > 1:
mag_mat = np.array(
[-2.5 * np.log10(kz.measure.flux(src)) + 27 for src in sources])
# g - r, g - i, g - z
color_mat = (- mag_mat + mag_mat[:, 0][:, np.newaxis])[:, 1:]
color_dist = np.linalg.norm(
color_mat - color_mat[0], axis=1) / np.linalg.norm(color_mat[0])
# np.argsort(color_dist)[:] #
sed_ind = np.where(color_dist < 0.2)[0]
dist = np.array([
np.linalg.norm(
src.center - blend.sources[0].center) * HSC_pixel_scale
for src in np.array(blend.sources)[sed_ind]
])
dist_flag = (
dist < 3 * np.sqrt(cen_obj['a'] * cen_obj['b']) * HSC_pixel_scale)
point_flag = np.array([
isinstance(src, scarlet.source.PointSource)
for src in np.array(blend.sources)[sed_ind]
])
near_cen_flag = [
(segmap_conv == cen_indx_conv +
1)[int(src.center[1]), int(src.center[0])]
for src in np.array(blend.sources)[sed_ind]
]
sed_ind = sed_ind[(~point_flag) & near_cen_flag]
if not 0 in sed_ind:
# the central source must be included.
sed_ind = np.array(list(set(sed_ind).union({0})))
else:
sed_ind = np.array([0])
print(f'Components {sed_ind} are considered as the target galaxy.')
with open(f'./Models/MockGalScarlet/{prefix}-{index:04d}-trained-model-less.pkl', 'wb') as fp:
pickle.dump(
[blend, {'e_rel': e_rel, 'loss': blend.loss[-1]}, sed_ind], fp)
fp.close()
fig = kz.display.display_scarlet_model(
blend,
minimum=-0.3,
stretch=1,
channels='griz',
show_loss=True,
show_mask=True,
show_mark=False,
scale_bar=False)
plt.savefig(
f'./Figures/{prefix}-{index:04d}-fitting-less.png', bbox_inches='tight')
fig = kz.display.display_scarlet_model(
blend,
show_ind=sed_ind,
zoomin_size=50,
minimum=-0.3,
stretch=1,
channels='griz',
show_loss=True,
show_mask=True,
show_mark=False,
scale_bar=False)
plt.savefig(
f'./Figures/{prefix}-{index:04d}-zoomin-less.png', bbox_inches='tight')
return blend
except Exception as e:
print(e)
return blend
def _fitting_wavelet(data, coord, pixel_scale=HSC_pixel_scale, zp=HSC_zeropoint, starlet_thresh=0.8, prefix='mockgal',
index=0, model_dir='./Model', figure_dir='./Figure', show_figure=True, tigress=False):
'''
This is a fitting function for internal use. It fits the galaxy using Starlet model, and apply a mask after fitting.
'''
from scarlet import Starlet
lsbg_coord = coord
# 2 whitespaces before "-", i.e., 4 whitespaces before word
print(' - Detect sources and make mask')
print(' Query GAIA stars...')
gaia_cat, msk_star = kz.utils.gaia_star_mask( # Generate a mask for GAIA bright stars
data.images.mean(axis=0), # averaged image
data.wcs,
pixel_scale=pixel_scale,
gaia_bright=19.5,
mask_a=694.7,
mask_b=3.8,
factor_b=1.0,
factor_f=1.5,
tigress=tigress)
# This vanilla detection with very low sigma finds out where is the central object and its footprint
obj_cat_ori, segmap_ori, bg_rms = kz.detection.makeCatalog(
[data],
lvl=1.2,
mask=msk_star,
method='vanilla',
convolve=False,
match_gaia=False,
show_fig=show_figure,
visual_gaia=False,
b=128,
f=3,
pixel_scale=pixel_scale,
minarea=20,
deblend_nthresh=48,
deblend_cont=0.07, # 0.07, I changed it to 0.1
sky_subtract=True)
catalog_c = SkyCoord(obj_cat_ori['ra'], obj_cat_ori['dec'], unit='deg')
dist = lsbg_coord.separation(catalog_c)
cen_indx_ori = obj_cat_ori[np.argsort(dist)[0]]['index']
cen_obj = obj_cat_ori[cen_indx_ori]
# Better position for cen_obj
x, y, _ = sep.winpos(data.images.mean(
axis=0), cen_obj['x'], cen_obj['y'], 6)
ra, dec = data.wcs.wcs_pix2world(x, y, 0)
cen_obj['x'] = x
cen_obj['y'] = y
cen_obj['ra'] = ra
cen_obj['dec'] = dec
# We roughly guess the box size of the Starlet model
model_psf = scarlet.GaussianPSF(sigma=(0.8,) * len(data.channels))
model_frame = scarlet.Frame(
data.images.shape,
wcs=data.wcs,
psf=model_psf,
channels=list(data.channels))
observation = scarlet.Observation(
data.images,
wcs=data.wcs,
psf=data.psfs,
weights=data.weights,
channels=list(data.channels))
observation = observation.match(model_frame)
cen_obj = obj_cat_ori[cen_indx_ori]
starlet_source = scarlet.StarletSource(model_frame,
(cen_obj['ra'], cen_obj['dec']),
observation,
thresh=0.001,
min_grad=-0.3, # the initial guess of box size is as large as possible
starlet_thresh=5e-3)
starlet_extent = kz.display.get_extent(
starlet_source.bbox) # [x1, x2, y1, y2]
# extra enlarge
starlet_extent[0] -= 5
starlet_extent[2] -= 5
starlet_extent[1] += 5
starlet_extent[3] += 5
# Show the Starlet initial box
fig = display_single(data.images.mean(axis=0))
from matplotlib.patches import Rectangle
box_kwargs = {"facecolor": "none", "edgecolor": "w", "lw": 0.5}
rect = Rectangle(
(starlet_extent[0], starlet_extent[2]),
starlet_extent[1] - starlet_extent[0],
starlet_extent[3] - starlet_extent[2],
**box_kwargs
)
ax = plt.gca()
ax.add_patch(rect)
plt.close()
if gaia_cat is not None:
star_flag = [(item[0] > starlet_extent[0]) & (item[0] < starlet_extent[1]) &
(item[1] > starlet_extent[2]) & (
item[1] < starlet_extent[3])
for item in np.asarray(
data.wcs.wcs_world2pix(gaia_cat['ra'], gaia_cat['dec'], 0), dtype=int).T]
# "star_cat" is a catalog for GAIA stars which fall in the Starlet box
star_cat = gaia_cat[star_flag]
_, msk_star = kz.utils.gaia_star_mask( # Generate GAIA mask only for stars outside of the Starlet box
data.images.mean(axis=0),
data.wcs,
gaia_stars=gaia_cat[~np.array(star_flag)],
pixel_scale=pixel_scale,
gaia_bright=19.5,
mask_a=694.7,
mask_b=3.8,
factor_b=1.0,
factor_f=0.6,
tigress=tigress)
else:
star_cat = []
# This step masks out high frequency sources by doing wavelet transformation
obj_cat, segmap_highfreq, bg_rms = kz.detection.makeCatalog([data],
mask=msk_star,
lvl=2., # 2.5
method='wavelet',
high_freq_lvl=2, # 3
wavelet_lvl=4,
match_gaia=False,
show_fig=show_figure,
visual_gaia=False,
b=24,
f=3,
pixel_scale=pixel_scale,
minarea=3,
deblend_nthresh=30,
deblend_cont=0.03,
sky_subtract=True)
catalog_c = SkyCoord(obj_cat['ra'], obj_cat['dec'], unit='deg')
dist = lsbg_coord.separation(catalog_c)
cen_indx_highfreq = obj_cat[np.argsort(dist)[0]]['index']
# Don't mask out objects that fall in the segmap of the central object and the Starlet box
segmap = segmap_highfreq.copy()
# overlap_flag is for objects which fall in the footprint of central galaxy in the fist SEP detection
overlap_flag = [(segmap_ori == (cen_indx_ori + 1))[item]
for item in list(zip(obj_cat['y'].astype(int), obj_cat['x'].astype(int)))]
# box_flat is for objects which fall in the initial Starlet box
box_flag = np.unique(
segmap[starlet_extent[2]:starlet_extent[3], starlet_extent[0]:starlet_extent[1]]) - 1
box_flag = np.delete(np.sort(box_flag), 0)
overlap_flag = np.array(overlap_flag)
overlap_flag[box_flag] = True
obj_cat_cpct = obj_cat[overlap_flag]
# Remove the source if it is the central galaxy
if dist[cen_indx_highfreq] < 1 * u.arcsec:
obj_cat_cpct.remove_rows(
np.where(obj_cat_cpct['index'] == cen_indx_highfreq)[0])
for ind in np.where(overlap_flag)[0]:
segmap[segmap == ind + 1] = 0
smooth_radius = 2
gaussian_threshold = 0.03
mask_conv = np.copy(segmap)
mask_conv[mask_conv > 0] = 1
mask_conv = convolve(mask_conv.astype(
float), Gaussian2DKernel(smooth_radius))
# This `seg_mask` only masks compact sources
seg_mask = (mask_conv >= gaussian_threshold)
# This step masks out bright and large contamination, which is not well-masked in previous step
obj_cat, segmap_big, bg_rms = kz.detection.makeCatalog(
[data],
lvl=4.5, # relative agressive threshold
method='vanilla',
match_gaia=False,
show_fig=show_figure,
visual_gaia=False,
b=45,
f=3,
pixel_scale=pixel_scale,
minarea=20, # only want large things
deblend_nthresh=30,
deblend_cont=0.02,
sky_subtract=True)
catalog_c = SkyCoord(obj_cat['ra'], obj_cat['dec'], unit='deg')
dist = lsbg_coord.separation(catalog_c)
cen_indx_big = obj_cat_ori[np.argsort(dist)[0]]['index']
# mask out big objects that are NOT identified in the high_freq step
segmap = segmap_big.copy()
box_flag = np.unique(
segmap[starlet_extent[2]:starlet_extent[3], starlet_extent[0]:starlet_extent[1]]) - 1
box_flag = np.delete(np.sort(box_flag), 0)
for ind in box_flag:
segmap[segmap == ind + 1] = 0
box_flag = np.delete(box_flag, np.where(box_flag == cen_indx_big)[
0]) # dont include the central galaxy
obj_cat_big = obj_cat[box_flag]
smooth_radius = 4
gaussian_threshold = 0.01
mask_conv = np.copy(segmap)
mask_conv[mask_conv > 0] = 1
mask_conv = convolve(mask_conv.astype(
float), Gaussian2DKernel(smooth_radius))
# This `seg_mask_large` masks large bright sources
seg_mask_large = (mask_conv >= gaussian_threshold)
# Set weights of masked pixels to zero
for layer in data.weights:
layer[msk_star.astype(bool)] = 0
layer[seg_mask.astype(bool)] = 0
layer[seg_mask_large.astype(bool)] = 0
# Remove compact objects that are too close to the central
catalog_c = SkyCoord(obj_cat_cpct['ra'], obj_cat_cpct['dec'], unit='deg')
dist = lsbg_coord.separation(catalog_c)
obj_cat_cpct.remove_rows(np.where(dist < 3 * u.arcsec)[0])
# Remove objects that are already masked!
inside_flag = [
seg_mask_large[item] for item in list(
zip(obj_cat_cpct['y'].astype(int), obj_cat_cpct['x'].astype(int)))
]
obj_cat_cpct.remove_rows(np.where(inside_flag)[0])
# Remove big objects that are toooo near to the target
catalog_c = SkyCoord(obj_cat_big['ra'], obj_cat_big['dec'], unit='deg')
dist = lsbg_coord.separation(catalog_c)
obj_cat_big.remove_rows(np.where(dist < 3 * u.arcsec)[0])
# Remove objects that are already masked!
inside_flag = [
(data.weights[0] == 0)[item] for item in list(
zip(obj_cat_big['y'].astype(int), obj_cat_big['x'].astype(int)))
]
obj_cat_big.remove_rows(np.where(inside_flag)[0])
# Construct `scarlet` frames and observation
from functools import partial
model_psf = scarlet.GaussianPSF(sigma=(0.8,) * len(data.channels))
model_frame = scarlet.Frame(
data.images.shape,
wcs=data.wcs,
psf=model_psf,
channels=list(data.channels))
observation = scarlet.Observation(
data.images,
wcs=data.wcs,
psf=data.psfs,
weights=data.weights,
channels=list(data.channels))
observation = observation.match(model_frame)
sources = []
# Add central Starlet source
src = obj_cat_ori[cen_indx_ori]
# Find a better box, not too large, not too small
for min_grad in | np.arange(-0.3, 0.4, 0.05) | numpy.arange |
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 7 00:44:48 2020
@author: saagar.parikh
"""
import numpy as np
from matplotlib import pyplot as plt
import cv2
path = "8.png" # Path of original image
img_orig = cv2.imread(path) # Creates an image object
img = cv2.cvtColor(img_orig, cv2.COLOR_BGR2GRAY ) # Converting RGB to gray
#Histogram Equalization using inbuilt function--------------------------------
equ = cv2.equalizeHist(img)
#performing histogram equalization manually-----------------------------------
origfreq = np.zeros((256,),dtype=np.float16) # vector consisting of initial frequencies of gray scale values
newgrayval = | np.zeros((256,),dtype=np.float16) | numpy.zeros |
import numpy as np # pip3 install numpy
import scipy # pip3 install scipy
import scipy.ndimage as snd
import reikna.fft, reikna.cluda # pip3 install pyopencl/pycuda, reikna
from PIL import Image, ImageTk, ImageDraw # pip3 install pillow
try: import tkinter as tk
except: import Tkinter as tk
from fractions import Fraction
import copy, re, itertools, json, csv
import os, sys, subprocess, datetime, time
import warnings
warnings.filterwarnings('ignore', '.*output shape of zoom.*') # suppress warning from snd.zoom()
P2, PIXEL_BORDER = 0,0 # 4,2 3,1 2,1 0,0
X2, Y2 = 9,9 # 10,9 9,8 8,8 1<<9=512
PIXEL = 1 << P2; SIZEX, SIZEY = 1 << (X2-P2), 1 << (Y2-P2)
# PIXEL, PIXEL_BORDER = 1,0; SIZEX, SIZEY = 1280//PIXEL, 720//PIXEL # 720p HD
# PIXEL, PIXEL_BORDER = 1,0; SIZEX, SIZEY = 1920//PIXEL, 1080//PIXEL # 1080p HD
MIDX, MIDY = int(SIZEX / 2), int(SIZEY / 2)
DEF_R = max(min(SIZEX, SIZEY) // 4 //5*5, 13)
EPSILON = 1e-10
ROUND = 10
FPS_FREQ = 20
STATUS = []
is_windows = (os.name == 'nt')
class Board:
def __init__(self, size=[0,0]):
self.names = ['', '', '']
self.params = {'R':DEF_R, 'T':10, 'b':[1], 'm':0.1, 's':0.01, 'kn':1, 'gn':1}
self.cells = np.zeros(size)
@classmethod
def from_values(cls, names, params, cells):
self = cls()
self.names = names.copy() if names is not None else None
self.params = params.copy() if params is not None else None
self.cells = cells.copy() if cells is not None else None
return self
@classmethod
def from_data(cls, data):
self = cls()
self.names = [data.get('code',''), data.get('name',''), data.get('cname','')]
self.params = data.get('params')
if self.params:
self.params = self.params.copy()
self.params['b'] = Board.st2fracs(self.params['b'])
self.cells = data.get('cells')
if self.cells:
if type(self.cells) in [tuple, list]:
self.cells = ''.join(self.cells)
self.cells = Board.rle2arr(self.cells)
return self
def to_data(self, is_shorten=True):
rle_st = Board.arr2rle(self.cells, is_shorten)
params2 = self.params.copy()
params2['b'] = Board.fracs2st(params2['b'])
data = {'code':self.names[0], 'name':self.names[1], 'cname':self.names[2], 'params':params2, 'cells':rle_st}
return data
def params2st(self):
params2 = self.params.copy()
params2['b'] = '[' + Board.fracs2st(params2['b']) + ']'
return ','.join(['{}={}'.format(k,str(v)) for (k,v) in params2.items()])
def long_name(self):
# return ' | '.join(filter(None, self.names))
return '{0} - {1} {2}'.format(*self.names)
@staticmethod
def arr2rle(A, is_shorten=True):
''' RLE = Run-length encoding:
http://www.conwaylife.com/w/index.php?title=Run_Length_Encoded
http://golly.sourceforge.net/Help/formats.html#rle
https://www.rosettacode.org/wiki/Run-length_encoding#Python
0=b=. 1=o=A 1-24=A-X 25-48=pA-pX 49-72=qA-qX 241-255=yA-yO '''
V = np.rint(A*255).astype(int).tolist() # [[255 255] [255 0]]
code_arr = [ [' .' if v==0 else ' '+chr(ord('A')+v-1) if v<25 else chr(ord('p')+(v-25)//24) + chr(ord('A')+(v-25)%24) for v in row] for row in V] # [[yO yO] [yO .]]
if is_shorten:
rle_groups = [ [(len(list(g)),c.strip()) for c,g in itertools.groupby(row)] for row in code_arr] # [[(2 yO)] [(1 yO) (1 .)]]
for row in rle_groups:
if row[-1][1]=='.': row.pop() # [[(2 yO)] [(1 yO)]]
st = '$'.join(''.join([(str(n) if n>1 else '')+c for n,c in row]) for row in rle_groups) + '!' # "2 yO $ 1 yO"
else:
st = '$'.join(''.join(row) for row in code_arr) + '!'
# print(sum(sum(r) for r in V))
return st
@staticmethod
def rle2arr(st):
rle_groups = re.findall('(\d*)([p-y]?[.boA-X$])', st.rstrip('!')) # [(2 yO)(1 $)(1 yO)]
code_list = sum([[c] * (1 if n=='' else int(n)) for n,c in rle_groups], []) # [yO yO $ yO]
code_arr = [l.split(',') for l in ','.join(code_list).split('$')] # [[yO yO] [yO]]
V = [ [0 if c in ['.','b'] else 255 if c=='o' else ord(c)-ord('A')+1 if len(c)==1 else (ord(c[0])-ord('p'))*24+(ord(c[1])-ord('A')+25) for c in row if c!='' ] for row in code_arr] # [[255 255] [255]]
# lines = st.rstrip('!').split('$')
# rle = [re.findall('(\d*)([p-y]?[.boA-X])', row) for row in lines]
# code = [ sum([[c] * (1 if n=='' else int(n)) for n,c in row], []) for row in rle]
# V = [ [0 if c in ['.','b'] else 255 if c=='o' else ord(c)-ord('A')+1 if len(c)==1 else (ord(c[0])-ord('p'))*24+(ord(c[1])-ord('A')+25) for c in row ] for row in code]
maxlen = len(max(V, key=len))
A = np.array([row + [0] * (maxlen - len(row)) for row in V])/255 # [[1 1] [1 0]]
# print(sum(sum(r) for r in V))
return A
@staticmethod
def fracs2st(B):
return ','.join([str(f) for f in B])
@staticmethod
def st2fracs(st):
return [Fraction(st) for st in st.split(',')]
def clear(self):
self.cells.fill(0)
def add(self, part, shift=[0,0]):
# assert self.params['R'] == part.params['R']
h1, w1 = self.cells.shape
h2, w2 = part.cells.shape
h, w = min(h1, h2), min(w1, w2)
i1, j1 = (w1 - w)//2 + shift[1], (h1 - h)//2 + shift[0]
i2, j2 = (w2 - w)//2, (h2 - h)//2
# self.cells[j:j+h, i:i+w] = part.cells[0:h, 0:w]
vmin = np.amin(part.cells)
for y in range(h):
for x in range(w):
if part.cells[j2+y, i2+x] > vmin:
self.cells[(j1+y)%h1, (i1+x)%w1] = part.cells[j2+y, i2+x]
return self
def transform(self, tx, mode='RZSF', is_world=False):
if 'R' in mode and tx['rotate'] != 0:
self.cells = snd.rotate(self.cells, tx['rotate'], reshape=not is_world, order=0, mode='wrap' if is_world else 'constant')
if 'Z' in mode and tx['R'] != self.params['R']:
# print('* {} / {}'.format(tx['R'], self.params['R']))
shape_orig = self.cells.shape
self.cells = snd.zoom(self.cells, tx['R'] / self.params['R'], order=0)
if is_world:
self.cells = Board(shape_orig).add(self).cells
self.params['R'] = tx['R']
if 'F' in mode and tx['flip'] != -1:
if tx['flip'] in [0,1]: self.cells = np.flip(self.cells, axis=tx['flip'])
elif tx['flip'] == 2: self.cells[:, :-MIDX-1:-1] = self.cells[:, :MIDX]
elif tx['flip'] == 3: self.cells[:, :-MIDX-1:-1] = self.cells[::-1, :MIDX]
if 'S' in mode and tx['shift'] != [0, 0]:
self.cells = snd.shift(self.cells, tx['shift'], order=0, mode='wrap')
# self.cells = np.roll(self.cells, tx['shift'], (1, 0))
return self
def add_transformed(self, part, tx):
part = copy.deepcopy(part)
self.add(part.transform(tx, mode='RZF'), tx['shift'])
return self
def crop(self):
vmin = np.amin(self.cells)
coords = np.argwhere(self.cells > vmin)
y0, x0 = coords.min(axis=0)
y1, x1 = coords.max(axis=0) + 1
self.cells = self.cells[y0:y1, x0:x1]
return self
class Automaton:
kernel_core = {
0: lambda r: (4 * r * (1-r))**4, # polynomial (quad4)
1: lambda r: np.exp( 4 - 1 / (r * (1-r)) ), # exponential / gaussian bump (bump4)
2: lambda r, q=1/4: (r>=q)*(r<=1-q), # step (stpz1/4)
3: lambda r, q=1/4: (r>=q)*(r<=1-q) + (r<q)*0.5 # staircase (life)
}
field_func = {
0: lambda n, m, s: np.maximum(0, 1 - (n-m)**2 / (9 * s**2) )**4 * 2 - 1, # polynomial (quad4)
1: lambda n, m, s: np.exp( - (n-m)**2 / (2 * s**2) ) * 2 - 1, # exponential / gaussian (gaus)
2: lambda n, m, s: (np.abs(n-m)<=s) * 2 - 1 # step (stpz)
}
def __init__(self, world):
self.world = world
self.world_FFT = np.zeros(world.cells.shape)
self.potential_FFT = np.zeros(world.cells.shape)
self.potential = np.zeros(world.cells.shape)
self.field = np.zeros(world.cells.shape)
self.field_old = None
self.change = np.zeros(world.cells.shape)
self.X = None
self.Y = None
self.D = None
self.gen = 0
self.time = 0
self.is_multi_step = False
self.is_soft_clip = False
self.is_inverted = False
self.kn = 1
self.gn = 1
self.is_gpu = True
self.has_gpu = True
self.compile_gpu(self.world.cells)
self.calc_kernel()
def kernel_shell(self, r):
k = len(self.world.params['b'])
kr = k * r
bs = np.array([float(f) for f in self.world.params['b']])
b = bs[np.minimum(np.floor(kr).astype(int), k-1)]
kfunc = Automaton.kernel_core[(self.world.params.get('kn') or self.kn) - 1]
return (r<1) * kfunc(np.minimum(kr % 1, 1)) * b
@staticmethod
def soft_max(x, m, k):
''' Soft maximum: https://www.johndcook.com/blog/2010/01/13/soft-maximum/ '''
return np.log(np.exp(k*x) + np.exp(k*m)) / k
@staticmethod
def soft_clip(x, min, max, k):
a = np.exp(k*x)
b = np.exp(k*min)
c = np.exp(-k*max)
return np.log( 1/(a+b)+c ) / -k
# return Automaton.soft_max(Automaton.soft_max(x, min, k), max, -k)
def compile_gpu(self, A):
''' Reikna: http://reikna.publicfields.net/en/latest/api/computations.html '''
self.gpu_api = self.gpu_thr = self.gpu_fft = self.gpu_fftshift = None
try:
self.gpu_api = reikna.cluda.any_api()
self.gpu_thr = self.gpu_api.Thread.create()
self.gpu_fft = reikna.fft.FFT(A.astype(np.complex64)).compile(self.gpu_thr)
self.gpu_fftshift = reikna.fft.FFTShift(A.astype(np.float32)).compile(self.gpu_thr)
except Exception as exc:
# if str(exc) == "No supported GPGPU APIs found":
self.has_gpu = False
self.is_gpu = False
print(exc)
# raise exc
def run_gpu(self, A, cpu_func, gpu_func, dtype, **kwargs):
if self.is_gpu and self.gpu_thr and gpu_func:
op_dev = self.gpu_thr.to_device(A.astype(dtype))
gpu_func(op_dev, op_dev, **kwargs)
return op_dev.get()
else:
return cpu_func(A)
# return np.roll(potential_shifted, (MIDX, MIDY), (1, 0))
def fft(self, A): return self.run_gpu(A, np.fft.fft2, self.gpu_fft, np.complex64)
def ifft(self, A): return self.run_gpu(A, np.fft.ifft2, self.gpu_fft, np.complex64, inverse=True)
def fftshift(self, A): return self.run_gpu(A, np.fft.fftshift, self.gpu_fftshift, np.float32)
def calc_once(self):
A = self.world.cells
self.world_FFT = self.fft(A)
self.potential_FFT = self.kernel_FFT * self.world_FFT
self.potential = self.fftshift(np.real(self.ifft(self.potential_FFT)))
gfunc = Automaton.field_func[(self.world.params.get('gn') or self.gn) - 1]
self.field = gfunc(self.potential, self.world.params['m'], self.world.params['s'])
dt = 1 / self.world.params['T']
if self.is_multi_step and self.field_old:
D = 1/2 * (3 * self.field - self.field_old)
self.field_old = self.field.copy()
else:
D = self.field
if not self.is_soft_clip:
A_new = np.clip(A + dt * D, 0, 1) # A_new = A + dt * np.clip(D, -A/dt, (1-A)/dt)
else:
A_new = Automaton.soft_clip(A + dt * D, 0, 1, 1/dt) # A_new = A + dt * Automaton.soft_clip(D, -A/dt, (1-A)/dt, 1)
self.change = (A_new - A) / dt
self.world.cells = A_new
self.gen += 1
self.time = round(self.time + dt, ROUND)
if self.is_gpu:
self.gpu_thr.synchronize()
def calc_kernel(self):
I, J = np.meshgrid(np.arange(SIZEX), np.arange(SIZEY))
self.X = (I - MIDX) / self.world.params['R']
self.Y = (J - MIDY) / self.world.params['R']
self.D = np.sqrt(self.X**2 + self.Y**2)
self.kernel = self.kernel_shell(self.D)
self.kernel_sum = np.sum(self.kernel)
kernel_norm = self.kernel / self.kernel_sum
self.kernel_FFT = self.fft(kernel_norm)
self.kernel_updated = False
def reset(self):
self.gen = 0
self.time = 0
self.field_old = None
class Analyzer:
STAT_NAMES = {'p_m':'Param m', 'p_s':'Param s', 'n':'Gen (#)', 't':'Time (s)',
'm':'Mass (mg)', 'g':'Growth (mg/s)', 'I':'Moment of inertia',
'd':'Mass-growth distance (mm)', 's':'Speed (mm/s)', 'w':'Angular speed (deg/s)', 'm_a':'Mass asymmetry (mg)'}
# 'a':'Semi-major axis (mm)', 'b':'Semi-minor axis (mm)', 'e':'Eccentricity', 'c':'Compactness', 'w_th':'Shape angular speed (deg/s)'}
STAT_HEADERS = ['p_m', 'p_s', 'n', 't', 'm', 'g', 'I', 'd', 's', 'w', 'm_a']
# , 'a', 'b', 'e', 'c', 'w_th']
SEGMENT_LEN = 200
def __init__(self, automaton):
self.automaton = automaton
self.world = self.automaton.world
# self.aaa = self.world.cells
self.reset()
def reset(self):
self.is_empty = False
self.is_full = False
self.mass = 0
self.growth = 0
self.inertia = 0
self.m_last_center = None
self.m_center = None
self.g_center = None
self.mg_dist = 0
self.m_shift = 0
self.m_last_angle = None
self.m_angle = 0
self.m_rotate = 0
self.mass_asym = 0
# self.shape_major_axis = 0
# self.shape_minor_axis = 0
# self.shape_eccentricity = 0
# self.shape_compactness = 0
# self.shape_last_angle = None
# self.shape_angle = 0
# self.shape_rotate = 0
self.series = []
self.last_shift_idx = np.zeros(2)
self.total_shift_idx = np.zeros(2)
self.is_clip_segment = True
def calc_stat(self):
R, T = [self.world.params[k] for k in ('R', 'T')]
A = self.world.cells
G = np.maximum(self.automaton.field, 0)
h, w = A.shape
X, Y = self.automaton.X, self.automaton.Y
m00 = self.mass = np.sum(A)
g00 = self.growth = np.sum(G)
self.is_empty = (self.mass < EPSILON)
self.is_full = (np.sum(A[0,:]) + np.sum(A[h-1,:]) + np.sum(A[:,0]) + np.sum(A[:,w-1]) > 0)
self.m_last_center = self.m_center
self.m_last_angle = self.m_angle
# self.shape_last_angle = self.shape_angle
self.inertia = 0
self.m_center = None
self.g_center = None
self.mg_dist = 0
self.m_shift = 0
self.m_angle = 0
self.m_rotate = 0
self.mass_asym = 0
# self.shape_major_axis = 0
# self.shape_minor_axis = 0
# self.shape_eccentricity = 0
# self.shape_compactness = 0
# self.shape_angle = 0
# self.shape_rotate = 0
if m00 > EPSILON:
AX, AY = A*X, A*Y
m01, m10 = np.sum(AX), np.sum(AY)
m02, m20 = np.sum(AX*X), np.sum(AY*Y)
mx, my = self.m_center = np.array([m01, m10]) / m00
mu02, mu20 = m02 - my * m01, m20 - mx * m10
self.inertia = (mu20 + mu02) / m00**2
# m11 = np.sum(AY*X)
# mu11 = m11 - mx * m01
# m1 = mu20 + mu02
# m2 = mu20 - mu02
# m3 = 2 * mu11
# t1 = m1 / 2 / m00
# t2 = np.sqrt(m2**2 + m3**2) / 2 / m00
# self.shape_major_axis = t1 + t2
# self.shape_minor_axis = t1 - t2
# self.shape_eccentricity = np.sqrt(1 - self.shape_minor_axis / self.shape_major_axis)
# self.shape_compactness = m00 / (mu20 + mu02)
# self.shape_angle = np.degrees(np.arctan2(m2, m3))
# if self.shape_last_angle is not None:
# self.shape_rotate = self.shape_angle - self.shape_last_angle
# self.shape_rotate = (self.shape_rotate + 540) % 360 - 180
if g00 > EPSILON:
g01, g10 = np.sum(G*X), np.sum(G*Y)
gx, gy = self.g_center = np.array([g01, g10]) / g00
self.mg_dist = np.linalg.norm(self.m_center - self.g_center)
if self.m_last_center is not None and self.m_last_angle is not None:
dm = self.m_center - self.m_last_center + self.last_shift_idx / R
self.m_shift = np.linalg.norm(dm)
self.m_angle = np.degrees(np.arctan2(dm[1], dm[0])) if self.m_shift >= EPSILON else 0
self.m_rotate = self.m_angle - self.m_last_angle
self.m_rotate = (self.m_rotate + 540) % 360 - 180
if self.automaton.gen <= 2:
self.m_rotate = 0
midpoint = np.array([MIDX, MIDY])
X, Y = np.meshgrid(np.arange(SIZEX), np.arange(SIZEY))
x0, y0 = self.m_last_center * R + midpoint - self.last_shift_idx
x1, y1 = self.m_center * R + midpoint
sign = (x1 - x0) * (Y - y0) - (y1 - y0) * (X - x0)
self.mass_asym = np.sum(A[sign>0]) - np.sum(A[sign<0])
# self.aaa = A.copy(); self.aaa[sign<0] = 0
def stat_name(self, i=None, x=None):
if not x: x = self.STAT_HEADERS[i]
return '{0}={1}'.format(x, self.STAT_NAMES[x])
def new_segment(self):
if self.series == [] or self.series[-1] != []:
self.series.append([])
def clear_segment(self):
if self.series != []:
if self.series[-1] == []:
self.series.pop()
if self.series != []:
self.series[-1] = []
def clear_series(self):
self.series = []
def add_stat(self):
R, T, pm, ps = [self.world.params[k] for k in ('R', 'T', 'm', 's')]
v = [pm, ps, self.automaton.gen, self.automaton.time,
self.mass/R/R, self.growth/R/R, self.inertia,
self.m_shift*T, self.m_rotate*T, self.mg_dist, self.mass_asym/R/R]
# self.shape_major_axis, self.shape_minor_axis,
# self.shape_eccentricity, self.shape_compactness, self.shape_rotate]
if self.series == []:
self.new_segment()
segment = self.series[-1]
segment.append(v)
if self.is_clip_segment:
while len(segment) > self.SEGMENT_LEN:
segment.pop(0)
def center_world(self):
if self.mass < EPSILON or self.m_center is None:
return
self.last_shift_idx = (self.m_center * self.world.params['R']).astype(int)
self.world.cells = np.roll(self.world.cells, -self.last_shift_idx, (1, 0))
self.total_shift_idx += self.last_shift_idx
def recurrence_plot(self, e=0.1, steps=10):
''' https://stackoverflow.com/questions/33650371/recurrence-plot-in-python '''
d = scipy.spatial.distance.pdist(self.series[:, None])
d = np.floor(d/e)
d[d>steps] = steps
Z = scipy.spatial.distance.squareform(d)
return Z
class Recorder:
RECORD_ROOT = 'record'
FRAME_EXT = '.png'
VIDEO_EXT = '.mov'
GIF_EXT = '.gif'
ANIM_FPS = 25
ffmpeg_cmd = ['/usr/local/bin/ffmpeg',
'-loglevel','warning', '-y', # glocal options
'-f','rawvideo', '-vcodec','rawvideo', '-pix_fmt','rgb24', # input options
'-s','{}x{}'.format(SIZEX*PIXEL, SIZEY*PIXEL), '-r',str(ANIM_FPS),
'-i','{input}', # input pipe
# '-an', '-vcodec','h264', '-pix_fmt','yuv420p', '-crf','1', # output options
'-an', '-vcodec','copy', # output options
'{output}'] # ouput file
def __init__(self, world):
self.world = world
self.is_recording = False
self.is_save_frames = False
self.record_id = None
self.record_seq = None
self.img_dir = None
self.video_path = None
self.video = None
self.gif_path = None
self.gif = None
def toggle_recording(self, is_save_frames=False):
self.is_save_frames = is_save_frames
if not self.is_recording:
self.start_record()
else:
self.finish_record()
def start_record(self):
global STATUS
''' https://trac.ffmpeg.org/wiki/Encode/H.264
https://trac.ffmpeg.org/wiki/Slideshow '''
self.is_recording = True
STATUS.append("> start " + ("saving frames" if self.is_save_frames else "recording video") + " and GIF...")
self.record_id = '{}-{}'.format(self.world.names[0].split('(')[0], datetime.datetime.now().strftime('%Y%m%d-%H%M%S-%f'))
self.record_seq = 1
self.video_path = os.path.join(self.RECORD_ROOT, self.record_id + self.VIDEO_EXT)
self.gif_path = os.path.join(self.RECORD_ROOT, self.record_id + self.GIF_EXT)
self.img_dir = os.path.join(self.RECORD_ROOT, self.record_id)
if self.is_save_frames:
if not os.path.exists(self.img_dir):
os.makedirs(self.img_dir)
else:
cmd = [s.replace('{input}', '-').replace('{output}', self.video_path) for s in self.ffmpeg_cmd]
try:
self.video = subprocess.Popen(cmd, stdin=subprocess.PIPE) # stderr=subprocess.PIPE
except FileNotFoundError:
self.video = None
STATUS.append("> no ffmpeg program found!")
self.gif = []
def save_image(self, img, filename=None):
self.record_id = '{}-{}'.format(self.world.names[0].split('(')[0], datetime.datetime.now().strftime('%Y%m%d-%H%M%S-%f'))
img_path = filename + self.FRAME_EXT if filename else os.path.join(self.RECORD_ROOT, self.record_id + self.FRAME_EXT)
img.save(img_path)
def record_frame(self, img):
if self.is_save_frames:
img_path = os.path.join(self.RECORD_ROOT, self.record_id, '{:03d}'.format(self.record_seq) + self.FRAME_EXT)
img.save(img_path)
else:
if self.video:
img_rgb = img.convert('RGB').tobytes()
self.video.stdin.write(img_rgb)
self.gif.append(img)
self.record_seq += 1
def finish_record(self):
global STATUS
if self.is_save_frames:
STATUS.append("> frames saved to '" + self.img_dir + "/*" + self.FRAME_EXT + "'")
cmd = [s.replace('{input}', os.path.join(self.img_dir, '%03d'+self.FRAME_EXT)).replace('{output}', self.video_path) for s in self.ffmpeg_cmd]
try:
subprocess.call(cmd)
except FileNotFoundError:
self.video = None
STATUS.append("> no ffmpeg program found!")
else:
if self.video:
self.video.stdin.close()
STATUS.append("> video saved to '" + self.video_path + "'")
self.gif[0].save(self.gif_path, format=self.GIF_EXT.lstrip('.'), save_all=True, append_images=self.gif[1:], loop=0, duration=1000//self.ANIM_FPS)
self.gif = None
STATUS.append("> GIF saved to '" + self.gif_path + "'")
self.is_recording = False
class Lenia:
MARKER_COLORS = [95,95,95,127,127,127,255,255,255]
def __init__(self):
self.is_run = True
self.is_once = False
self.is_show = True
self.is_closing = False
self.show_what = 0
self.is_show_markers = True
self.stats_mode = 0
self.stat_x = 4
self.stat_y = 5
self.is_show_fps = False
self.fps = None
self.last_time = None
self.fore = None
self.back = None
self.is_layered = False
self.is_auto_center = False
self.is_auto_load = False
self.trace_dir = 0
self.trace_small = False
''' http://hslpicker.com/ '''
self.colormaps = [
self.create_colormap(np.array([[0,0,4],[0,0,8],[0,4,8],[0,8,8],[4,8,4],[8,8,0],[8,4,0],[8,0,0],[4,0,0]])), #BCYR
self.create_colormap(np.array([[0,2,0],[0,4,0],[4,6,0],[8,8,0],[8,4,4],[8,0,8],[4,0,8],[0,0,8],[0,0,4]])), #GYPB
self.create_colormap(np.array([[4,0,2],[8,0,4],[8,0,6],[8,0,8],[4,4,4],[0,8,0],[0,6,0],[0,4,0],[0,2,0]])), #PPGG
self.create_colormap(np.array([[4,4,6],[2,2,4],[2,4,2],[4,6,4],[6,6,4],[4,2,2]])), #BGYR
self.create_colormap(np.array([[4,6,4],[2,4,2],[4,4,2],[6,6,4],[6,4,6],[2,2,4]])), #GYPB
self.create_colormap(np.array([[6,6,4],[4,4,2],[4,2,4],[6,4,6],[4,6,6],[2,4,2]])), #YPCG
self.create_colormap(np.array([[0,0,0],[3,3,3],[4,4,4],[5,5,5],[8,8,8]]))] #B/W
self.colormap_id = 0
self.set_colormap()
self.last_key = None
self.excess_key = None
self.update = None
self.clear_job = None
self.is_save_image = False
self.read_animals()
self.world = Board((SIZEY, SIZEX))
self.automaton = Automaton(self.world)
self.analyzer = Analyzer(self.automaton)
self.recorder = Recorder(self.world)
self.clear_transform()
self.create_window()
self.create_menu()
def clear_transform(self):
self.tx = {'shift':[0, 0], 'rotate':0, 'R':self.world.params['R'], 'flip':-1}
def read_animals(self):
with open('animals.json', encoding='utf-8') as file:
self.animal_data = json.load(file)
def load_animal_id(self, id, **kwargs):
self.animal_id = max(0, min(len(self.animal_data)-1, id))
self.load_part(Board.from_data(self.animal_data[self.animal_id]), **kwargs)
def load_animal_code(self, code, **kwargs):
if not code: return
id = self.get_animal_id(code)
if id: self.load_animal_id(id, **kwargs)
def get_animal_id(self, code):
code_sp = code.split(':')
n = int(code_sp[1]) if len(code_sp)==2 else 1
it = (id for (id, data) in enumerate(self.animal_data) if data["code"]==code_sp[0])
for i in range(n):
id = next(it, None)
return id
def load_part(self, part, is_replace=True, is_random=False, is_auto_load=False, repeat=1):
self.fore = part
if part.names[0].startswith('~'):
part.names[0] = part.names[0].lstrip('~')
self.world.params['R'] = part.params['R']
self.automaton.calc_kernel()
if is_replace:
self.world.names = part.names.copy()
if part.params is not None and part.cells is not None:
is_life = ((self.world.params.get('kn') or self.automaton.kn) == 4)
will_be_life = ((part.params.get('kn') or self.automaton.kn) == 4)
if not is_life and will_be_life:
self.colormap_id = len(self.colormaps) - 1
self.win.title('Conway\'s Game of Life')
elif is_life and not will_be_life:
self.colormap_id = 0
self.world.params['R'] = DEF_R
self.automaton.calc_kernel()
self.win.title('Lenia')
if self.is_layered:
self.back = copy.deepcopy(self.world)
if is_replace and not self.is_layered:
if not is_auto_load:
self.world.params = {**part.params, 'R':self.world.params['R']}
self.automaton.calc_kernel()
self.world.clear()
self.automaton.reset()
if not is_auto_load:
self.analyzer.reset()
self.clear_transform()
for i in range(repeat):
if is_random:
self.tx['rotate'] = np.random.random() * 360
h1, w1 = self.world.cells.shape
h, w = min(part.cells.shape, self.world.cells.shape)
self.tx['shift'] = [np.random.randint(d1 + d) - d1//2 for (d,d1) in [(h,h1), (w,w1)]]
self.tx['flip'] = np.random.randint(3) - 1
self.world.add_transformed(part, self.tx)
def check_auto_load(self):
if self.is_auto_load:
self.load_part(self.fore, is_auto_load=True)
def transform_world(self):
if self.is_layered:
self.world.cells = self.back.cells.copy()
self.world.params = self.back.params.copy()
self.world.transform(self.tx, mode='Z', is_world=True)
self.world.add_transformed(self.fore, self.tx)
else:
if not self.is_run:
if self.back is None:
self.back = copy.deepcopy(self.world)
else:
self.world.cells = self.back.cells.copy()
self.world.params = self.back.params.copy()
self.world.transform(self.tx, is_world=True)
self.automaton.calc_kernel()
def clear_world(self):
self.world.clear()
if self.is_layered:
self.back = copy.deepcopy(self.world)
self.automaton.reset()
self.analyzer.reset()
def random_world(self):
self.world.clear()
border = self.world.params['R']
rand = np.random.rand(SIZEY - border*2, SIZEX - border*2)
self.world.add(Board.from_values(None, None, rand))
if self.is_layered:
self.back = copy.deepcopy(self.world)
self.automaton.reset()
self.analyzer.reset()
def toggle_trace(self, dir, small):
if self.trace_dir == 0:
self.trace_dir = dir
self.trace_small = small
self.is_auto_center = True
self.is_auto_load = True
else:
self.trace_dir = 0
def stop_trace(self):
self.trace_dir = 0
def trace_params(self):
s = 's+' if self.trace_small else ''
if self.trace_dir == +1:
if self.analyzer.is_empty: self.key_press_internal(s+'w')
elif self.analyzer.is_full: self.key_press_internal(s+'q')
elif self.trace_dir == -1:
if self.analyzer.is_empty: self.key_press_internal(s+'a')
elif self.analyzer.is_full: self.key_press_internal(s+'s')
def create_window(self):
self.win = tk.Tk()
self.win.title('Lenia')
self.win.bind('<Key>', self.key_press_event)
self.frame = tk.Frame(self.win, width=SIZEX*PIXEL, height=SIZEY*PIXEL)
self.frame.pack()
self.canvas = tk.Canvas(self.frame, width=SIZEX*PIXEL, height=SIZEY*PIXEL)
self.canvas.place(x=-1, y=-1)
self.panel1 = self.create_panel(0, 0)
# self.panel2 = self.create_panel(1, 0)
# self.panel3 = self.create_panel(0, 1)
# self.panel4 = self.create_panel(1, 1)
self.info = tk.Label(self.win)
self.info.pack()
def create_panel(self, c, r):
buffer = np.uint8(np.zeros((SIZEY*PIXEL,SIZEX*PIXEL)))
img = Image.frombuffer('P', (SIZEX*PIXEL,SIZEY*PIXEL), buffer, 'raw', 'P', 0, 1)
photo = ImageTk.PhotoImage(image=img)
return self.canvas.create_image(c*SIZEY, r*SIZEX, image=photo, anchor=tk.NW)
def create_colormap(self, colors):
nval = 256 - 3
ncol = colors.shape[0]
colors = np.vstack((colors, np.array([[0,0,0]])))
v = np.repeat(range(nval), 3) # [0 0 0 1 1 1 ... 252 252 252]
i = np.array(list(range(3)) * nval) # [0 1 2 0 1 2 ... 0 1 2]
k = v / (nval-1) * (ncol-1) # interpolate between 0 .. ncol-1
k1 = k.astype(int)
c1, c2 = colors[k1,i], colors[k1+1,i]
c = (k-k1) * (c2-c1) + c1 # interpolate between c1 .. c2
return np.rint(c / 8 * 255).astype(int).tolist() + self.MARKER_COLORS
def set_colormap(self):
self.colormap_demo = np.tile(np.arange(SIZEX), (1, SIZEY)) / SIZEX
SHOW_WHAT_NUM = 7
def update_win(self):
if self.stats_mode in [0, 3]:
change_range = 1 if not self.automaton.is_soft_clip else 1.4
if self.show_what==0: self.draw_world(self.world.cells, 0, 1, is_shift=True, is_shift_zero=True, markers=['arrow','ruler','grid'])
# if self.show_what==0: self.draw_world(self.analyzer.aaa, 0, 1, is_shift=True, is_shift_zero=True, markers=['arrow','ruler','grid'])
elif self.show_what==1: self.draw_world(self.automaton.potential, 0, 2*self.world.params['m'], is_shift=True, is_shift_zero=True, markers=['arrow','ruler','grid'])
elif self.show_what==2: self.draw_world(self.automaton.field, -1, 1, is_shift=True, markers=['arrow','ruler','grid'])
elif self.show_what==3: self.draw_world(self.automaton.change, -change_range, change_range, is_shift=True, markers=['arrow','ruler','grid'])
elif self.show_what==4: self.draw_world(self.automaton.kernel, 0, 1, markers=['ruler','fixgrid'])
elif self.show_what==5: self.draw_world(self.automaton.fftshift(np.log(np.absolute(self.automaton.world_FFT))), 0, 5)
elif self.show_what==6: self.draw_world(self.automaton.fftshift(np.log(np.absolute(self.automaton.potential_FFT))), 0, 5)
elif self.show_what==7: self.draw_world(self.colormap_demo, 0, 1)
self.img.putpalette(self.colormaps[self.colormap_id])
elif self.stats_mode in [1, 2]:
self.draw_black()
if self.stats_mode in [1, 2, 3]:
self.draw_stats()
if self.recorder.is_recording and self.is_run:
self.recorder.record_frame(self.img)
if self.is_save_image:
self.recorder.save_image(self.img, filename='saved')
self.is_save_image = False
photo = ImageTk.PhotoImage(image=self.img)
# photo = tk.PhotoImage(width=SIZEX, height=SIZEY)
self.canvas.itemconfig(self.panel1, image=photo)
self.win.update()
def draw_world(self, A, vmin=0, vmax=1, is_shift=False, is_shift_zero=False, markers=[]):
if is_shift and not self.is_auto_center:
A = np.roll(A, self.analyzer.total_shift_idx.astype(int), (1, 0))
if is_shift_zero and self.automaton.is_soft_clip:
if vmin==0: vmin = np.amin(A)
buffer = np.uint8(np.clip((A-vmin) / (vmax-vmin), 0, 1) * 252) # .copy(order='C')
if self.is_show_markers and ('grid' in markers or 'fixgrid' in markers):
self.draw_grid(buffer, is_fixed='fixgrid' in markers)
buffer = np.repeat(np.repeat(buffer, PIXEL, axis=0), PIXEL, axis=1)
zero = np.uint8(np.clip((0-vmin) / (vmax-vmin), 0, 1) * 252)
for i in range(PIXEL_BORDER):
buffer[i::PIXEL, :] = zero; buffer[:, i::PIXEL] = zero
self.img = Image.frombuffer('P', (SIZEX*PIXEL,SIZEY*PIXEL), buffer, 'raw', 'P', 0, 1)
if self.is_show_markers and ('arrow' in markers or 'ruler' in markers):
self.draw_markers(markers)
def draw_black(self):
size = (SIZEX*PIXEL,SIZEY*PIXEL)
self.img = Image.frombuffer('L', size, np.zeros(size), 'raw', 'L', 0, 1)
def draw_grid(self, buffer, is_fixed=False):
R = self.world.params['R']
n = R // 40 if R >= 15 else -1
for i in range(-n, n+1):
sx, sy = 0, 0
if self.is_auto_center and not is_fixed:
sx, sy = (self.analyzer.total_shift_idx).astype(int)
grid = buffer[(MIDY - sy + i) % R:SIZEY:R, (MIDX - sx) % R:SIZEX:R]; grid[grid==0] = 253
grid = buffer[(MIDY - sy) % R:SIZEY:R, (MIDX - sx + i) % R:SIZEX:R]; grid[grid==0] = 253
def draw_markers(self, markers=[]):
R, T = [self.world.params[k] for k in ('R', 'T')]
midpoint = np.array([MIDX, MIDY])
draw = ImageDraw.Draw(self.img)
d2 = np.array([1, 1]) * 2
if 'arrow' in markers and self.analyzer.m_last_center is not None and self.analyzer.m_center is not None:
shift = self.analyzer.total_shift_idx if not self.is_auto_center else np.zeros(2)
m0 = self.analyzer.m_last_center * R + midpoint + shift - self.analyzer.last_shift_idx
m1 = self.analyzer.m_center * R + midpoint + shift
ms = m1 % np.array([SIZEX, SIZEY]) - m1
m2, m3 = [m0 + (m1 - m0) * n * T for n in [1,2]]
for i in range(-1, 2):
for j in range(-1, 2):
adj = np.array([i*SIZEX, j*SIZEY]) + ms
draw.line(tuple((m0+adj)*PIXEL) + tuple((m3+adj)*PIXEL), fill=254, width=1)
[draw.ellipse(tuple((m+adj-d2)*PIXEL) + tuple((m+adj+d2)*PIXEL), fill=c) for (m,c) in [(m0,254),(m1,255),(m2,255),(m3,255)]]
if 'ruler' in markers:
x0, y0 = SIZEX*PIXEL-20, SIZEY*PIXEL-20
x1, x2, x3, y1, y2 = x0-10-R*PIXEL, x0-10-R*PIXEL//2, x0-10, y0+3, y0+8
draw.text((x0, y0), 'R', fill=254)
draw.line([(x1,y1),(x1,y2),(x2,y2),(x2,y1),(x2,y2),(x3,y2),(x3,y1)], fill=254, width=1)
del draw
def draw_stats(self):
draw = ImageDraw.Draw(self.img)
series = self.analyzer.series
if series != [] and self.stats_mode in [2, 3]:
series = [series[-1]]
if series != [] and series != [[]]:
X = [[v[self.stat_x] for v in s] for s in series]
Y = [[v[self.stat_y] for v in s] for s in series]
# if self.stat_x in [0,1] or self.stat_y in [0,1]:
# X, Y = X[-100:], Y[-100:]
if self.stat_x in [2, 3]: X = [[x - min(s) for x in s] for s in X]
if self.stat_y in [2, 3]: Y = [[y - min(s) for y in s] for s in Y]
xmin, xmax = min(min(s) for s in X if s), max(max(s) for s in X if s)
ymin, ymax = min(min(s) for s in Y if s), max(max(s) for s in Y if s)
if xmax-xmin>EPSILON and ymax-ymin>EPSILON:
if self.stats_mode in [1, 2]:
C = reversed([194 // 2**i + 61 for i in range(len(X))])
else:
C = [255] * len(X)
for x, y, c in zip(X, Y, C):
xa, ya = | np.array(x) | numpy.array |
import numpy as np
import openmdao.api as om
from mphys import Builder
from funtofem import TransferScheme
class MeldDispXfer(om.ExplicitComponent):
"""
Component to perform displacement transfer using MELD
"""
def initialize(self):
self.options.declare('xfer_object', recordable=False)
self.options.declare('struct_ndof')
self.options.declare('struct_nnodes')
self.options.declare('aero_nnodes')
self.options.declare('check_partials')
self.meld = None
self.initialized_meld = False
self.struct_ndof = None
self.struct_nnodes = None
self.aero_nnodes = None
self.check_partials = False
def setup(self):
self.meld = self.options['xfer_object']
self.struct_ndof = self.options['struct_ndof']
self.struct_nnodes = self.options['struct_nnodes']
self.aero_nnodes = self.options['aero_nnodes']
self.check_partials= self.options['check_partials']
#self.set_check_partial_options(wrt='*',method='cs',directional=True)
# inputs
self.add_input('x_struct0', shape_by_conn=True,
distributed=True,
desc='initial structural node coordinates',
tags=['mphys_coordinates'])
self.add_input('x_aero0', shape_by_conn=True,
distributed=True,
desc='initial aero surface node coordinates',
tags=['mphys_coordinates'])
self.add_input('u_struct', shape_by_conn=True,
distributed=True,
desc='structural node displacements',
tags=['mphys_coupling'])
# outputs
self.add_output('u_aero', shape = self.aero_nnodes*3,
distributed=True,
val=np.zeros(self.aero_nnodes*3),
desc='aerodynamic surface displacements',
tags=['mphys_coupling'])
# partials
#self.declare_partials('u_aero',['x_struct0','x_aero0','u_struct'])
def compute(self, inputs, outputs):
x_s0 = np.array(inputs['x_struct0'],dtype=TransferScheme.dtype)
x_a0 = np.array(inputs['x_aero0'],dtype=TransferScheme.dtype)
u_a = np.array(outputs['u_aero'],dtype=TransferScheme.dtype)
u_s = np.zeros(self.struct_nnodes*3,dtype=TransferScheme.dtype)
for i in range(3):
u_s[i::3] = inputs['u_struct'][i::self.struct_ndof]
self.meld.setStructNodes(x_s0)
self.meld.setAeroNodes(x_a0)
if not self.initialized_meld:
self.meld.initialize()
self.initialized_meld = True
self.meld.transferDisps(u_s,u_a)
outputs['u_aero'] = u_a
def compute_jacvec_product(self, inputs, d_inputs, d_outputs, mode):
"""
The explicit component is defined as:
u_a = g(u_s,x_a0,x_s0)
The MELD residual is defined as:
D = u_a - g(u_s,x_a0,x_s0)
So explicit partials below for u_a are negative partials of D
"""
if self.check_partials:
x_s0 = np.array(inputs['x_struct0'],dtype=TransferScheme.dtype)
x_a0 = np.array(inputs['x_aero0'],dtype=TransferScheme.dtype)
self.meld.setStructNodes(x_s0)
self.meld.setAeroNodes(x_a0)
u_s = np.zeros(self.struct_nnodes*3,dtype=TransferScheme.dtype)
for i in range(3):
u_s[i::3] = inputs['u_struct'][i::self.struct_ndof]
u_a = np.zeros(self.aero_nnodes*3,dtype=TransferScheme.dtype)
self.meld.transferDisps(u_s,u_a)
if mode == 'fwd':
if 'u_aero' in d_outputs:
if 'u_struct' in d_inputs:
d_in = np.zeros(self.struct_nnodes*3,dtype=TransferScheme.dtype)
for i in range(3):
d_in[i::3] = d_inputs['u_struct'][i::self.struct_ndof]
prod = np.zeros(self.aero_nnodes*3,dtype=TransferScheme.dtype)
self.meld.applydDduS(d_in,prod)
d_outputs['u_aero'] -= np.array(prod,dtype=float)
if 'x_aero0' in d_inputs:
if self.check_partials:
pass
else:
raise ValueError('MELD forward mode requested but not implemented')
if 'x_struct0' in d_inputs:
if self.check_partials:
pass
else:
raise ValueError('MELD forward mode requested but not implemented')
if mode == 'rev':
if 'u_aero' in d_outputs:
du_a = np.array(d_outputs['u_aero'],dtype=TransferScheme.dtype)
if 'u_struct' in d_inputs:
# du_a/du_s^T * psi = - dD/du_s^T psi
prod = np.zeros(self.struct_nnodes*3,dtype=TransferScheme.dtype)
self.meld.applydDduSTrans(du_a,prod)
for i in range(3):
d_inputs['u_struct'][i::self.struct_ndof] -= np.array(prod[i::3],dtype=np.float64)
# du_a/dx_a0^T * psi = - psi^T * dD/dx_a0 in F2F terminology
if 'x_aero0' in d_inputs:
prod = np.zeros(d_inputs['x_aero0'].size,dtype=TransferScheme.dtype)
self.meld.applydDdxA0(du_a,prod)
d_inputs['x_aero0'] -= np.array(prod,dtype=float)
if 'x_struct0' in d_inputs:
prod = np.zeros(self.struct_nnodes*3,dtype=TransferScheme.dtype)
self.meld.applydDdxS0(du_a,prod)
d_inputs['x_struct0'] -= np.array(prod,dtype=float)
class MeldLoadXfer(om.ExplicitComponent):
"""
Component to perform load transfers using MELD
"""
def initialize(self):
self.options.declare('xfer_object', recordable=False)
self.options.declare('struct_ndof')
self.options.declare('struct_nnodes')
self.options.declare('aero_nnodes')
self.options.declare('check_partials')
self.meld = None
self.initialized_meld = False
self.struct_ndof = None
self.struct_nnodes = None
self.aero_nnodes = None
self.check_partials = False
def setup(self):
# get the transfer scheme object
self.meld = self.options['xfer_object']
self.struct_ndof = self.options['struct_ndof']
self.struct_nnodes = self.options['struct_nnodes']
self.aero_nnodes = self.options['aero_nnodes']
self.check_partials= self.options['check_partials']
#self.set_check_partial_options(wrt='*',method='cs',directional=True)
struct_ndof = self.struct_ndof
struct_nnodes = self.struct_nnodes
# inputs
self.add_input('x_struct0', shape_by_conn=True,
distributed=True,
desc='initial structural node coordinates',
tags=['mphys_coordinates'])
self.add_input('x_aero0', shape_by_conn=True,
distributed=True,
desc='initial aero surface node coordinates',
tags=['mphys_coordinates'])
self.add_input('u_struct', shape_by_conn=True,
distributed=True,
desc='structural node displacements',
tags=['mphys_coupling'])
self.add_input('f_aero', shape_by_conn=True,
distributed=True,
desc='aerodynamic force vector',
tags=['mphys_coupling'])
# outputs
self.add_output('f_struct', shape = struct_nnodes*struct_ndof,
distributed=True,
desc='structural force vector',
tags=['mphys_coupling'])
# partials
#self.declare_partials('f_struct',['x_struct0','x_aero0','u_struct','f_aero'])
def compute(self, inputs, outputs):
if self.check_partials:
x_s0 = np.array(inputs['x_struct0'],dtype=TransferScheme.dtype)
x_a0 = np.array(inputs['x_aero0'],dtype=TransferScheme.dtype)
self.meld.setStructNodes(x_s0)
self.meld.setAeroNodes(x_a0)
f_a = np.array(inputs['f_aero'],dtype=TransferScheme.dtype)
f_s = np.zeros(self.struct_nnodes*3,dtype=TransferScheme.dtype)
u_s = np.zeros(self.struct_nnodes*3,dtype=TransferScheme.dtype)
for i in range(3):
u_s[i::3] = inputs['u_struct'][i::self.struct_ndof]
u_a = np.zeros(inputs['f_aero'].size,dtype=TransferScheme.dtype)
self.meld.transferDisps(u_s,u_a)
self.meld.transferLoads(f_a,f_s)
outputs['f_struct'][:] = 0.0
for i in range(3):
outputs['f_struct'][i::self.struct_ndof] = f_s[i::3]
def compute_jacvec_product(self, inputs, d_inputs, d_outputs, mode):
"""
The explicit component is defined as:
f_s = g(f_a,u_s,x_a0,x_s0)
The MELD internal residual is defined as:
L = f_s - g(f_a,u_s,x_a0,x_s0)
So explicit partials below for f_s are negative partials of L
"""
if self.check_partials:
x_s0 = np.array(inputs['x_struct0'],dtype=TransferScheme.dtype)
x_a0 = np.array(inputs['x_aero0'],dtype=TransferScheme.dtype)
self.meld.setStructNodes(x_s0)
self.meld.setAeroNodes(x_a0)
f_a = | np.array(inputs['f_aero'],dtype=TransferScheme.dtype) | numpy.array |
import numpy as np
import gym
from gym import spaces
from gym.utils import seeding
class pushBox(gym.Env):
RIGHT = 0
UP = 1
LEFT = 2
DOWN = 3
def __init__(self, grid_size=10, mode='vector'):
assert mode in ['vector','image'], f'mode {mode} invalid'
assert grid_size >= 8
super(pushBox,self).__init__()
self.mode = mode
self.grid_size = grid_size
self.n_steps = 0
self.max_n_steps = grid_size * 4
self.state = np.zeros((2,), dtype=int)
self.goal = np.zeros((2,), dtype=int)
self.box = np.zeros((2,), dtype=int)
self.state_color = np.array([0, 255, 0])
self.goal_color = np.array([255, 0, 0])
self.box_color = np.array([0, 0, 255])
self.action_space = spaces.Discrete(4)
if mode == 'vector':
high = np.full(4, grid_size, dtype=np.float32)
self.observation_space = spaces.Box(-high, high, dtype=np.float32)
elif mode == 'image':
self.observation_space = spaces.Box(0, 255, (grid_size * 4, grid_size * 4, 3), dtype = np.uint8)
def _get_obs(self):
if self.mode == 'vector':
return np.concatenate((np.subtract(self.state, self.box), np.subtract(self.box, self.goal)))
else:
obs = | np.zeros((self.grid_size * 4, self.grid_size * 4, 3), dtype=np.uint8) | numpy.zeros |
import numpy as np
def sigmoid(X):
"""
Apply Sigmoid on X Vector.
PARAMETERS
==========
X: ndarray(dtype=float, ndim=1)
Array containing Input Values.
RETURNS
=======
ndarray(dtype=float,ndim=1)
Output Vector after Vectorised Operation.
"""
return 1/(1 + np.exp(-X))
def tanh(X):
"""
Apply Inverse of Tan on X Vector.
PARAMETERS
==========
X: ndarray(dtype=float, ndim=1)
Array containing Input Values.
RETURNS
=======
ndarray(dtype=float,ndim=1)
Output Vector after Vectorised Operation.
"""
return np.tanh(X)
def softmax(X):
"""
Apply Softmax on X Vector.
PARAMETERS
==========
X: ndarray(dtype=float, ndim=1)
Array containing Input Values.
Sum: float
Sum of values of Input Array.
RETURNS
=======
ndarray(dtype=float,ndim=1)
Output Vector after Vectorised Operation.
"""
Sum = np.sum(np.exp(X))
return np.exp(X)/Sum
def softsign(X):
"""
Apply Softsign on X Vector.
PARAMETERS
==========
X: ndarray(dtype=float, ndim=1)
Array containing Input Values.
RETURNS
=======
ndarray(dtype=float,ndim=1)
Output Vector after Vectorised Operation.
"""
return X/(np.abs(X) + 1)
def relu(X):
"""
Apply Rectified Linear Unit on X Vector.
PARAMETERS
==========
X: ndarray(dtype=float, ndim=1)
Array containing Input Values.
RETURNS
=======
ndarray(dtype=float,ndim=1)
Output Vector after Vectorised Operation.
"""
return np.maximum(0, X)
def leakyRelu(X):
"""
Apply Leaky Rectified Linear Unit on X Vector.
PARAMETERS
==========
X: ndarray(dtype=float, ndim=1)
Array containing Input Values.
RETURNS
=======
ndarray(dtype=float,ndim=1)
Output Vector after Vectorised Operation.
"""
return np.maximum(0.01*X, X)
def elu(X, alpha=1.0):
"""
Apply Exponential Linear Unit on X Vector.
PARAMETERS
==========
X: ndarray(dtype=float, ndim=1)
Array containing Input Values.
alpha: float
Curve Constant for Values of X less than 0.
RETURNS
=======
ndarray(dtype=float,ndim=1)
Output Vector after Vectorised Operation.
"""
assert(alpha > 0)
return | np.maximum(0, X) | numpy.maximum |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.4'
# jupytext_version: 1.2.0
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # s_checklist_scenariobased_step08 [<img src="https://www.arpm.co/lab/icons/icon_permalink.png" width=30 height=30 style="display: inline;">](https://www.arpm.co/lab/redirect.php?code=s_checklist_scenariobased_step08&codeLang=Python)
# For details, see [here](https://www.arpm.co/lab/redirect.php?permalink=ex-vue-8).
# +
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from arpym.statistics import meancov_sp
from arpym.estimation import fit_lfm_lasso
# -
# ## [Input parameters](https://www.arpm.co/lab/redirect.php?permalink=s_checklist_scenariobased_step08-parameters)
# +
# indicates which projection to continue from
# True: use copula-marginal projections
# False: use historical projections
copula_marginal = True
# parameter for lasso minimization
if copula_marginal:
lam = 98000
else:
lam = 15000
# -
# ## [Step 0](https://www.arpm.co/lab/redirect.php?permalink=s_checklist_scenariobased_step08-implementation-step00): Load data
# +
path = '../../../databases/temporary-databases/'
# Risk drivers identification
db_riskdrivers_series = pd.read_csv(path+'db_riskdrivers_series.csv',
index_col=0)
x = db_riskdrivers_series.values
riskdriver_names = np.array(db_riskdrivers_series.columns)
db_riskdrivers_tools = pd.read_csv(path+'db_riskdrivers_tools.csv')
d_ = int(db_riskdrivers_tools['d_'][0])
n_stocks = int(db_riskdrivers_tools['n_stocks'][0])
t_now = np.datetime64(db_riskdrivers_tools.t_now[0], 'D')
# Pricing
db_holdings = pd.read_csv(path+'db_holdings.csv')
if copula_marginal:
# Projection
db_projection_tools = pd.read_csv(path+'db_projection_tools.csv')
j_ = int(db_projection_tools['j_'][0])
t_hor = np.datetime64(db_projection_tools['t_hor'][0], 'D')
m_ = np.busday_count(t_now, t_hor)
db_projection_riskdrivers = pd.read_csv(path+'db_projection_riskdrivers.csv')
x_proj = db_projection_riskdrivers.values.reshape(j_, m_+1, d_)
db_scenprob = pd.read_csv(path+'db_scenario_probs.csv')
p = db_scenprob['p'].values
# Aggregation
db_exante_perf = pd.read_csv(path+'db_exante_perf.csv')
y_h = db_exante_perf.values.squeeze()
# Ex-ante evaluation
db_quantile_and_satis = pd.read_csv(path+'db_quantile_and_satis.csv')
c_es = db_quantile_and_satis['c_es'][0]
es_yh = db_quantile_and_satis['es_yh'][0]
neg_var_yh = db_quantile_and_satis['neg_var_yh'][0]
else:
# Projection
db_projection_tools = pd.read_csv(path+'db_projection_bootstrap_tools.csv')
j_ = int(db_projection_tools['j_'][0])
t_hor = np.datetime64(db_projection_tools['t_hor'][0], 'D')
m_ = | np.busday_count(t_now, t_hor) | numpy.busday_count |
import unittest
import numpy as np
import numpy.testing as npt
import wisdem.drivetrainse.layout as lay
import wisdem.drivetrainse.drive_structure as ds
from wisdem.commonse import gravity
npts = 12
class TestDirectStructure(unittest.TestCase):
def setUp(self):
self.inputs = {}
self.outputs = {}
self.discrete_inputs = {}
self.discrete_outputs = {}
self.opt = {}
self.discrete_inputs["upwind"] = True
self.inputs["L_12"] = 2.0
self.inputs["L_h1"] = 1.0
self.inputs["L_generator"] = 3.25
# self.inputs['L_2n'] = 1.5
# self.inputs['L_grs'] = 1.1
# self.inputs['L_gsn'] = 1.1
self.inputs["L_hss"] = 0.75
self.inputs["L_gearbox"] = 1.2
self.inputs["overhang"] = 6.25
self.inputs["drive_height"] = 4.875
self.inputs["tilt"] = 4.0
self.inputs["access_diameter"] = 0.9
myones = np.ones(5)
self.inputs["lss_diameter"] = 3.3 * myones
self.inputs["lss_wall_thickness"] = 0.45 * myones
self.inputs["hss_diameter"] = 1.6 * np.ones(3)
self.inputs["hss_wall_thickness"] = 0.25 * np.ones(3)
self.inputs["nose_diameter"] = 2.2 * myones
self.inputs["nose_wall_thickness"] = 0.1 * myones
self.inputs["bedplate_wall_thickness"] = 0.06 * np.ones(npts)
self.inputs["bedplate_flange_width"] = 1.5
self.inputs["bedplate_flange_thickness"] = 0.05
# self.inputs['bedplate_web_height'] = 1.0
self.inputs["bedplate_web_thickness"] = 0.05
self.inputs["D_top"] = 6.5
self.inputs["hub_diameter"] = 4.0
self.inputs["other_mass"] = 200e3
self.inputs["mb1_mass"] = 10e3
self.inputs["mb1_I"] = 10e3 * 0.5 * 2 ** 2 * np.ones(3)
self.inputs["mb2_mass"] = 10e3
self.inputs["mb2_I"] = 10e3 * 0.5 * 1.5 ** 2 * np.ones(3)
self.inputs["mb1_max_defl_ang"] = 0.008
self.inputs["mb2_max_defl_ang"] = 0.008
self.inputs["m_stator"] = 100e3
self.inputs["cm_stator"] = -0.3
self.inputs["I_stator"] = np.array([1e6, 5e5, 5e5, 0.0, 0.0, 0.0])
self.inputs["generator_rotor_mass"] = 100e3
self.inputs["cm_rotor"] = -0.3
self.inputs["generator_rotor_I"] = np.array([1e6, 5e5, 5e5, 0.0, 0.0, 0.0])
self.inputs["generator_stator_mass"] = 100e3
self.inputs["cm_rotor"] = -0.3
self.inputs["generator_stator_I"] = np.array([1e6, 5e5, 5e5, 0.0, 0.0, 0.0])
self.inputs["generator_mass"] = 200e3
self.inputs["generator_I"] = np.array([2e6, 1e6, 1e6, 0.0, 0.0, 0.0])
self.inputs["gearbox_mass"] = 100e3
self.inputs["gearbox_I"] = np.array([1e6, 5e5, 5e5])
self.inputs["brake_mass"] = 10e3
self.inputs["brake_I"] = np.array([1e4, 5e3, 5e3])
self.inputs["carrier_mass"] = 10e3
self.inputs["carrier_I"] = np.array([1e4, 5e3, 5e3])
self.inputs["gear_ratio"] = 1.0
self.inputs["F_mb1"] = np.array([2409.750e3, -1716.429e3, 74.3529e3]).reshape((3, 1))
self.inputs["F_mb2"] = np.array([2409.750e3, -1716.429e3, 74.3529e3]).reshape((3, 1))
self.inputs["M_mb1"] = np.array([-1.83291e7, 6171.7324e3, 5785.82946e3]).reshape((3, 1))
self.inputs["M_mb2"] = np.array([-1.83291e7, 6171.7324e3, 5785.82946e3]).reshape((3, 1))
self.inputs["hub_system_mass"] = 100e3
self.inputs["hub_system_cm"] = 2.0
self.inputs["hub_system_I"] = np.array([2409.750e3, -1716.429e3, 74.3529e3, 0.0, 0.0, 0.0])
self.inputs["F_hub"] = np.array([2409.750e3, 0.0, 74.3529e2]).reshape((3, 1))
self.inputs["M_hub"] = np.array([-1.83291e4, 6171.7324e2, 5785.82946e2]).reshape((3, 1))
self.inputs["lss_E"] = self.inputs["hss_E"] = self.inputs["bedplate_E"] = 210e9
self.inputs["lss_G"] = self.inputs["hss_G"] = self.inputs["bedplate_G"] = 80.8e9
self.inputs["lss_rho"] = self.inputs["hss_rho"] = self.inputs["bedplate_rho"] = 7850.0
self.inputs["lss_Xy"] = self.inputs["hss_Xy"] = self.inputs["bedplate_Xy"] = 250e6
self.opt["gamma_f"] = 1.35
self.opt["gamma_m"] = 1.3
self.opt["gamma_n"] = 1.0
def compute_layout(self, direct=True):
myobj = lay.DirectLayout() if direct else lay.GearedLayout()
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
for k in self.outputs.keys():
self.inputs[k] = self.outputs[k]
def testBaseF_BaseM(self):
self.inputs["tilt"] = 0.0
self.inputs["F_mb1"] = np.zeros(3).reshape((3, 1))
self.inputs["F_mb2"] = np.zeros(3).reshape((3, 1))
self.inputs["M_mb1"] = np.zeros(3).reshape((3, 1))
self.inputs["M_mb2"] = np.zeros(3).reshape((3, 1))
self.compute_layout()
myobj = ds.Nose_Stator_Bedplate_Frame(modeling_options=self.opt, n_dlcs=1)
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
npt.assert_almost_equal(self.outputs["base_F"][:2], 0.0)
npt.assert_almost_equal(self.outputs["base_M"][0], 0.0)
npt.assert_almost_equal(self.outputs["base_M"][-1], 0.0)
F0 = self.outputs["base_F"]
M0 = self.outputs["base_M"]
self.inputs["other_mass"] += 500e3
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
npt.assert_almost_equal(self.outputs["base_F"][:2], 0.0)
npt.assert_almost_equal(self.outputs["base_F"][2], F0[2] - 500e3 * gravity)
npt.assert_almost_equal(self.outputs["base_M"][0], 0.0)
npt.assert_almost_equal(self.outputs["base_M"][1], M0[1])
npt.assert_almost_equal(self.outputs["base_M"][2], 0.0)
self.inputs["M_mb1"] = 10e3 * np.arange(1, 4).reshape((3, 1))
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
npt.assert_almost_equal(self.outputs["base_F"][:2], 0.0)
npt.assert_almost_equal(self.outputs["base_F"][2], F0[2] - 500e3 * gravity)
npt.assert_almost_equal(self.outputs["base_M"], M0 + self.inputs["M_mb1"], decimal=0)
self.inputs["M_mb2"] = 20e3 * np.arange(1, 4).reshape((3, 1))
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
npt.assert_almost_equal(self.outputs["base_F"][:2], 0.0)
npt.assert_almost_equal(self.outputs["base_F"][2], F0[2] - 500e3 * gravity)
npt.assert_almost_equal(self.outputs["base_M"], M0 + self.inputs["M_mb1"] + self.inputs["M_mb2"], decimal=-1)
self.inputs["F_mb1"] = np.array([30e2, 40e2, 50e2]).reshape((3, 1))
self.inputs["F_mb2"] = np.array([30e2, 40e2, 50e2]).reshape((3, 1))
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
npt.assert_almost_equal(self.outputs["base_F"][:2], 2 * self.inputs["F_mb2"][:2])
npt.assert_almost_equal(self.outputs["base_F"][2], F0[2] - 500e3 * gravity + 2 * 50e2)
def testBaseF_BaseM_withTilt(self):
self.inputs["tilt"] = 5.0
self.inputs["F_mb1"] = np.zeros(3).reshape((3, 1))
self.inputs["F_mb2"] = np.zeros(3).reshape((3, 1))
self.inputs["M_mb1"] = np.zeros(3).reshape((3, 1))
self.inputs["M_mb2"] = np.zeros(3).reshape((3, 1))
self.compute_layout()
myobj = ds.Nose_Stator_Bedplate_Frame(modeling_options=self.opt, n_dlcs=1)
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
npt.assert_almost_equal(self.outputs["base_F"][:2], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["base_M"][0], 0.0)
npt.assert_almost_equal(self.outputs["base_M"][-1], 0.0)
F0 = self.outputs["base_F"]
M0 = self.outputs["base_M"]
self.inputs["other_mass"] += 500e3
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
npt.assert_almost_equal(self.outputs["base_F"][:2], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["base_F"][2], F0[2] - 500e3 * gravity)
npt.assert_almost_equal(self.outputs["base_M"][0], 0.0)
npt.assert_almost_equal(self.outputs["base_M"][1], M0[1])
npt.assert_almost_equal(self.outputs["base_M"][2], 0.0)
self.inputs["M_mb1"] = 10e3 * np.arange(1, 4).reshape((3, 1))
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
npt.assert_almost_equal(self.outputs["base_F"][:2], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["base_F"][2], F0[2] - 500e3 * gravity)
npt.assert_almost_equal(self.outputs["base_M"][1], M0[1] + self.inputs["M_mb1"][1], decimal=0)
self.inputs["M_mb2"] = 20e3 * np.arange(1, 4).reshape((3, 1))
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
npt.assert_almost_equal(self.outputs["base_F"][:2], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["base_F"][2], F0[2] - 500e3 * gravity)
npt.assert_almost_equal(
self.outputs["base_M"][1], M0[1] + self.inputs["M_mb1"][1] + self.inputs["M_mb2"][1], decimal=-1
)
self.inputs["F_mb1"] = np.array([30e2, 40e2, 50e2]).reshape((3, 1))
self.inputs["F_mb2"] = np.array([30e2, 40e2, 50e2]).reshape((3, 1))
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
npt.assert_almost_equal(self.outputs["base_F"][1], 2 * self.inputs["F_mb2"][1])
def testBaseF_BaseM_Downwind(self):
self.inputs["tilt"] = 0.0
self.discrete_inputs["upwind"] = False
self.inputs["F_mb1"] = np.zeros(3).reshape((3, 1))
self.inputs["F_mb2"] = np.zeros(3).reshape((3, 1))
self.inputs["M_mb1"] = np.zeros(3).reshape((3, 1))
self.inputs["M_mb2"] = np.zeros(3).reshape((3, 1))
self.compute_layout()
myobj = ds.Nose_Stator_Bedplate_Frame(modeling_options=self.opt, n_dlcs=1)
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
npt.assert_almost_equal(self.outputs["base_F"][:2], 0.0)
npt.assert_almost_equal(self.outputs["base_M"][0], 0.0)
npt.assert_almost_equal(self.outputs["base_M"][-1], 0.0)
F0 = self.outputs["base_F"]
M0 = self.outputs["base_M"]
self.inputs["other_mass"] += 500e3
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
npt.assert_almost_equal(self.outputs["base_F"][:2], 0.0)
npt.assert_almost_equal(self.outputs["base_F"][2], F0[2] - 500e3 * gravity)
npt.assert_almost_equal(self.outputs["base_M"][0], 0.0)
npt.assert_almost_equal(self.outputs["base_M"][1], M0[1])
npt.assert_almost_equal(self.outputs["base_M"][2], 0.0)
self.inputs["M_mb1"] = 10e3 * np.arange(1, 4).reshape((3, 1))
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
npt.assert_almost_equal(self.outputs["base_F"][:2], 0.0)
npt.assert_almost_equal(self.outputs["base_F"][2], F0[2] - 500e3 * gravity)
npt.assert_almost_equal(self.outputs["base_M"], M0 + self.inputs["M_mb1"], decimal=0)
self.inputs["M_mb2"] = 20e3 * np.arange(1, 4).reshape((3, 1))
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
npt.assert_almost_equal(self.outputs["base_F"][:2], 0.0)
npt.assert_almost_equal(self.outputs["base_F"][2], F0[2] - 500e3 * gravity)
npt.assert_almost_equal(self.outputs["base_M"], M0 + self.inputs["M_mb1"] + self.inputs["M_mb2"], decimal=-1)
self.inputs["F_mb1"] = np.array([30e2, 40e2, 50e2]).reshape((3, 1))
self.inputs["F_mb2"] = np.array([30e2, 40e2, 50e2]).reshape((3, 1))
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
npt.assert_almost_equal(self.outputs["base_F"][:2], 2 * self.inputs["F_mb2"][:2])
npt.assert_almost_equal(self.outputs["base_F"][2], F0[2] - 500e3 * gravity + 2 * 50e2)
def testBaseF_BaseM_withTilt_Downwind(self):
self.inputs["tilt"] = 5.0
self.discrete_inputs["upwind"] = False
self.inputs["F_mb1"] = np.zeros(3).reshape((3, 1))
self.inputs["F_mb2"] = np.zeros(3).reshape((3, 1))
self.inputs["M_mb1"] = np.zeros(3).reshape((3, 1))
self.inputs["M_mb2"] = np.zeros(3).reshape((3, 1))
self.compute_layout()
myobj = ds.Nose_Stator_Bedplate_Frame(modeling_options=self.opt, n_dlcs=1)
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
npt.assert_almost_equal(self.outputs["base_F"][:2], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["base_M"][0], 0.0)
npt.assert_almost_equal(self.outputs["base_M"][-1], 0.0)
F0 = self.outputs["base_F"]
M0 = self.outputs["base_M"]
self.inputs["other_mass"] += 500e3
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
npt.assert_almost_equal(self.outputs["base_F"][:2], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["base_F"][2], F0[2] - 500e3 * gravity)
npt.assert_almost_equal(self.outputs["base_M"][0], 0.0)
| npt.assert_almost_equal(self.outputs["base_M"][1], M0[1]) | numpy.testing.assert_almost_equal |
import numpy as np
import numpy.testing as npt
from dipy.data import get_data
from dipy.sims.voxel import add_noise
from dipy.segment.mrf import (ConstantObservationModel,
IteratedConditionalModes)
from dipy.segment.tissue import (TissueClassifierHMRF)
# Load a coronal slice from a T1-weighted MRI
fname = get_data('t1_coronal_slice')
single_slice = np.load(fname)
# Stack a few copies to form a 3D volume
nslices = 5
image = np.zeros(shape=single_slice.shape + (nslices,))
image[..., :nslices] = single_slice[..., None]
# Set up parameters
nclasses = 4
beta = np.float64(0.0)
max_iter = 10
background_noise = True
# Making squares
square = np.zeros((256, 256, 3), dtype=np.int16)
square[42:213, 42:213, :] = 1
square[71:185, 71:185, :] = 2
square[99:157, 99:157, :] = 3
square_gauss = np.zeros((256, 256, 3)) + 0.001
square_gauss = add_noise(square_gauss, 10000, 1, noise_type='gaussian')
square_gauss[42:213, 42:213, :] = 1
noise_1 = np.random.normal(1.001, 0.0001,
size=square_gauss[42:213, 42:213, :].shape)
square_gauss[42:213, 42:213, :] = square_gauss[42:213, 42:213, :] + noise_1
square_gauss[71:185, 71:185, :] = 2
noise_2 = np.random.normal(2.001, 0.0001,
size=square_gauss[71:185, 71:185, :].shape)
square_gauss[71:185, 71:185, :] = square_gauss[71:185, 71:185, :] + noise_2
square_gauss[99:157, 99:157, :] = 3
noise_3 = np.random.normal(3.001, 0.0001,
size=square_gauss[99:157, 99:157, :].shape)
square_gauss[99:157, 99:157, :] = square_gauss[99:157, 99:157, :] + noise_3
square_1 = np.zeros((256, 256, 3)) + 0.001
square_1 = add_noise(square_1, 10000, 1, noise_type='gaussian')
temp_1 = np.random.random_integers(20, size=(171, 171, 3))
temp_1 = np.where(temp_1 < 20, 1, 3)
square_1[42:213, 42:213, :] = temp_1
temp_2 = np.random.random_integers(20, size=(114, 114, 3))
temp_2 = np.where(temp_2 < 19, 2, 1)
square_1[71:185, 71:185, :] = temp_2
temp_3 = np.random.random_integers(20, size=(58, 58, 3))
temp_3 = np.where(temp_3 < 20, 3, 1)
square_1[99:157, 99:157, :] = temp_3
def test_greyscale_image():
com = ConstantObservationModel()
icm = IteratedConditionalModes()
mu, sigma = com.initialize_param_uniform(image, nclasses)
sigmasq = sigma ** 2
npt.assert_array_almost_equal(mu, np.array([0., 0.25, 0.5, 0.75]))
npt.assert_array_almost_equal(sigma, np.array([1.0, 1.0, 1.0, 1.0]))
npt.assert_array_almost_equal(sigmasq, np.array([1.0, 1.0, 1.0, 1.0]))
neglogl = com.negloglikelihood(image, mu, sigmasq, nclasses)
npt.assert_(neglogl[100, 100, 1, 0] != neglogl[100, 100, 1, 1])
npt.assert_(neglogl[100, 100, 1, 1] != neglogl[100, 100, 1, 2])
npt.assert_(neglogl[100, 100, 1, 2] != neglogl[100, 100, 1, 3])
npt.assert_(neglogl[100, 100, 1, 1] != neglogl[100, 100, 1, 3])
initial_segmentation = icm.initialize_maximum_likelihood(neglogl)
npt.assert_(initial_segmentation.max() == nclasses - 1)
npt.assert_(initial_segmentation.min() == 0)
PLN = icm.prob_neighborhood(initial_segmentation, beta, nclasses)
print(PLN.shape)
npt.assert_(np.all((PLN >= 0) & (PLN <= 1.0)))
if beta == 0.0:
npt.assert_almost_equal(PLN[50, 50, 1, 0], 0.25, True)
npt.assert_almost_equal(PLN[50, 50, 1, 1], 0.25, True)
npt.assert_almost_equal(PLN[50, 50, 1, 2], 0.25, True)
npt.assert_almost_equal(PLN[50, 50, 1, 3], 0.25, True)
npt.assert_almost_equal(PLN[147, 129, 1, 0], 0.25, True)
npt.assert_almost_equal(PLN[147, 129, 1, 1], 0.25, True)
npt.assert_almost_equal(PLN[147, 129, 1, 2], 0.25, True)
npt.assert_almost_equal(PLN[147, 129, 1, 3], 0.25, True)
npt.assert_almost_equal(PLN[61, 152, 1, 0], 0.25, True)
npt.assert_almost_equal(PLN[61, 152, 1, 1], 0.25, True)
npt.assert_almost_equal(PLN[61, 152, 1, 2], 0.25, True)
npt.assert_almost_equal(PLN[61, 152, 1, 3], 0.25, True)
npt.assert_almost_equal(PLN[100, 100, 1, 0], 0.25, True)
npt.assert_almost_equal(PLN[100, 100, 1, 1], 0.25, True)
npt.assert_almost_equal(PLN[100, 100, 1, 2], 0.25, True)
npt.assert_almost_equal(PLN[100, 100, 1, 3], 0.25, True)
PLY = com.prob_image(image, nclasses, mu, sigmasq, PLN)
print(PLY)
npt.assert_(np.all((PLY >= 0) & (PLY <= 1.0)))
mu_upd, sigmasq_upd = com.update_param(image, PLY, mu, nclasses)
print(mu)
print(mu_upd)
npt.assert_(mu_upd[0] != mu[0])
npt.assert_(mu_upd[1] != mu[1])
npt.assert_(mu_upd[2] != mu[2])
npt.assert_(mu_upd[3] != mu[3])
print(sigmasq)
print(sigmasq_upd)
npt.assert_(sigmasq_upd[0] != sigmasq[0])
npt.assert_(sigmasq_upd[1] != sigmasq[1])
npt.assert_(sigmasq_upd[2] != sigmasq[2])
npt.assert_(sigmasq_upd[3] != sigmasq[3])
icm_segmentation, energy = icm.icm_ising(neglogl, beta,
initial_segmentation)
npt.assert_(np.abs(np.sum(icm_segmentation)) != 0)
npt.assert_(icm_segmentation.max() == nclasses - 1)
npt.assert_(icm_segmentation.min() == 0)
def test_greyscale_iter():
max_iter = 15
beta = np.float64(0.1)
com = ConstantObservationModel()
icm = IteratedConditionalModes()
mu, sigma = com.initialize_param_uniform(image, nclasses)
sigmasq = sigma ** 2
neglogl = com.negloglikelihood(image, mu, sigmasq, nclasses)
initial_segmentation = icm.initialize_maximum_likelihood(neglogl)
npt.assert_(initial_segmentation.max() == nclasses - 1)
npt.assert_(initial_segmentation.min() == 0)
mu, sigma = com.seg_stats(image, initial_segmentation, nclasses)
sigmasq = sigma ** 2
npt.assert_(mu[0] >= 0.0)
npt.assert_(mu[1] >= 0.0)
npt.assert_(mu[2] >= 0.0)
npt.assert_(mu[3] >= 0.0)
npt.assert_(sigmasq[0] >= 0.0)
npt.assert_(sigmasq[1] >= 0.0)
npt.assert_(sigmasq[2] >= 0.0)
npt.assert_(sigmasq[3] >= 0.0)
if background_noise:
zero = np.zeros_like(image) + 0.001
zero_noise = add_noise(zero, 10000, 1, noise_type='gaussian')
image_gauss = np.where(image == 0, zero_noise, image)
else:
image_gauss = image
final_segmentation = np.empty_like(image)
seg_init = initial_segmentation.copy()
energies = []
for i in range(max_iter):
PLN = icm.prob_neighborhood(initial_segmentation, beta,
nclasses)
npt.assert_(np.all((PLN >= 0) & (PLN <= 1.0)))
if beta == 0.0:
npt.assert_almost_equal(PLN[50, 50, 1, 0], 0.25, True)
npt.assert_almost_equal(PLN[50, 50, 1, 1], 0.25, True)
npt.assert_almost_equal(PLN[50, 50, 1, 2], 0.25, True)
npt.assert_almost_equal(PLN[50, 50, 1, 3], 0.25, True)
npt.assert_almost_equal(PLN[147, 129, 1, 0], 0.25, True)
npt.assert_almost_equal(PLN[147, 129, 1, 1], 0.25, True)
npt.assert_almost_equal(PLN[147, 129, 1, 2], 0.25, True)
npt.assert_almost_equal(PLN[147, 129, 1, 3], 0.25, True)
npt.assert_almost_equal(PLN[61, 152, 1, 0], 0.25, True)
npt.assert_almost_equal(PLN[61, 152, 1, 1], 0.25, True)
npt.assert_almost_equal(PLN[61, 152, 1, 2], 0.25, True)
npt.assert_almost_equal(PLN[61, 152, 1, 3], 0.25, True)
npt.assert_almost_equal(PLN[100, 100, 1, 0], 0.25, True)
npt.assert_almost_equal(PLN[100, 100, 1, 1], 0.25, True)
npt.assert_almost_equal(PLN[100, 100, 1, 2], 0.25, True)
npt.assert_almost_equal(PLN[100, 100, 1, 3], 0.25, True)
PLY = com.prob_image(image_gauss, nclasses, mu, sigmasq, PLN)
npt.assert_(np.all((PLY >= 0) & (PLY <= 1.0)))
npt.assert_(PLY[50, 50, 1, 0] > PLY[50, 50, 1, 1])
npt.assert_(PLY[50, 50, 1, 0] > PLY[50, 50, 1, 2])
npt.assert_(PLY[50, 50, 1, 0] > PLY[50, 50, 1, 3])
npt.assert_(PLY[100, 100, 1, 3] > PLY[100, 100, 1, 0])
npt.assert_(PLY[100, 100, 1, 3] > PLY[100, 100, 1, 1])
npt.assert_(PLY[100, 100, 1, 3] > PLY[100, 100, 1, 2])
mu_upd, sigmasq_upd = com.update_param(image_gauss, PLY, mu, nclasses)
npt.assert_(mu_upd[0] >= 0.0)
npt.assert_(mu_upd[1] >= 0.0)
npt.assert_(mu_upd[2] >= 0.0)
npt.assert_(mu_upd[3] >= 0.0)
npt.assert_(sigmasq_upd[0] >= 0.0)
npt.assert_(sigmasq_upd[1] >= 0.0)
npt.assert_(sigmasq_upd[2] >= 0.0)
npt.assert_(sigmasq_upd[3] >= 0.0)
negll = com.negloglikelihood(image_gauss,
mu_upd, sigmasq_upd, nclasses)
npt.assert_(negll[50, 50, 1, 0] < negll[50, 50, 1, 1])
npt.assert_(negll[50, 50, 1, 0] < negll[50, 50, 1, 2])
npt.assert_(negll[50, 50, 1, 0] < negll[50, 50, 1, 3])
npt.assert_(negll[100, 100, 1, 3] < negll[100, 100, 1, 0])
npt.assert_(negll[100, 100, 1, 3] < negll[100, 100, 1, 1])
npt.assert_(negll[100, 100, 1, 3] < negll[100, 100, 1, 2])
final_segmentation, energy = icm.icm_ising(negll, beta,
initial_segmentation)
print(energy[energy > -np.inf].sum())
energies.append(energy[energy > -np.inf].sum())
initial_segmentation = final_segmentation.copy()
mu = mu_upd.copy()
sigmasq = sigmasq_upd.copy()
npt.assert_(energies[-1] < energies[0])
difference_map = np.abs(seg_init - final_segmentation)
npt.assert_(np.abs(np.sum(difference_map)) != 0)
def test_square_iter():
com = ConstantObservationModel()
icm = IteratedConditionalModes()
initial_segmentation = square
mu, sigma = com.seg_stats(square_gauss, initial_segmentation,
nclasses)
sigmasq = sigma ** 2
npt.assert_(mu[0] >= 0.0)
npt.assert_(mu[1] >= 0.0)
npt.assert_(mu[2] >= 0.0)
npt.assert_(mu[3] >= 0.0)
npt.assert_(sigmasq[0] >= 0.0)
npt.assert_(sigmasq[1] >= 0.0)
npt.assert_(sigmasq[2] >= 0.0)
npt.assert_(sigmasq[3] >= 0.0)
final_segmentation = np.empty_like(square_gauss)
seg_init = initial_segmentation.copy()
energies = []
for i in range(max_iter):
print('\n')
print('>> Iteration: ' + str(i))
print('\n')
PLN = icm.prob_neighborhood(initial_segmentation, beta,
nclasses)
npt.assert_(np.all((PLN >= 0) & (PLN <= 1.0)))
if beta == 0.0:
npt.assert_(PLN[25, 25, 1, 0] == 0.25)
npt.assert_(PLN[25, 25, 1, 1] == 0.25)
npt.assert_(PLN[25, 25, 1, 2] == 0.25)
| npt.assert_(PLN[25, 25, 1, 3] == 0.25) | numpy.testing.assert_ |
from typing import Iterator, NamedTuple
import numpy as np
import random as rnd
from petnet.tensor import Tensor
from typing import Callable, Tuple
Batch = NamedTuple("Batch", [("inputs", Tensor), ("targets", Tensor)])
class DataIterator:
def __call__(self) -> Iterator[Batch]:
raise NotImplementedError
def iterate(self, inputs: Tensor, targets: Tensor, starts: Tensor, batch_size: int) -> Iterator[Batch]:
for start in starts:
end = start + batch_size
batch_inputs = inputs[start:end]
batch_targets = targets[start:end]
yield Batch(batch_inputs, batch_targets)
class BatchIterator(DataIterator):
def __init__(self, inputs: Tensor, targets: Tensor, batch_size: int = 2, shuffle: bool = True) -> None:
self.batch_size = batch_size
self.shuffle = shuffle
self.inputs = inputs
self.targets = targets
def __call__(self) -> Iterator[Batch]:
starts = np.arange(0, np.size(self.inputs, 0), self.batch_size)
if self.shuffle:
np.random.shuffle(starts)
return self.iterate(self.inputs, self.targets, starts, self.batch_size)
Epoch = NamedTuple("Batch", [("inputs", Tensor), ("targets", Tensor)])
FGen = Callable[[], Epoch]
class GenIterator(DataIterator):
def __init__(self, generator: FGen, batch_size: int = 2):
self.generator = generator
self.batch_size = batch_size
def __call__(self) -> Iterator[Batch]:
inputs, targets = self.generator()
starts = np.arange(0, len(inputs), self.batch_size)
return self.iterate(inputs, targets, starts, self.batch_size)
class SampleIterator(DataIterator):
def __init__(self, inputs: Tensor, targets: Tensor, epoch_size: int = 1000, batch_size: int = 2) -> None:
self.batch_size = batch_size
self.epoch_size = epoch_size
self.inputs = inputs
self.targets = targets
def __call__(self) -> Iterator[Batch]:
epoch_inputs = []
epoch_targets = []
for _ in range(self.epoch_size):
index = rnd.randrange(0, len(self.inputs))
epoch_inputs.append(self.inputs[index])
epoch_targets.append(self.targets[index])
starts = np.arange(0, np.size(epoch_inputs, 0), self.batch_size)
return self.iterate(np.array(epoch_inputs), np.array(epoch_targets), starts, self.batch_size)
class SampleMultInputsIterator(SampleIterator):
def __call__(self) -> Iterator[Batch]:
epoch_inputs = []
epoch_targets = []
for _ in range(self.epoch_size):
index = rnd.randrange(0, | np.size(self.inputs, 0) | numpy.size |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras activation functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python.keras import activations
from tensorflow.python.keras import backend
from tensorflow.python.keras import combinations
from tensorflow.python.keras.layers import advanced_activations
from tensorflow.python.keras.layers import core
from tensorflow.python.keras.layers import serialization
from tensorflow.python.ops import nn_ops as nn
from tensorflow.python.platform import test
def _ref_softmax(values):
m = np.max(values)
e = np.exp(values - m)
return e / np.sum(e)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class KerasActivationsTest(test.TestCase, parameterized.TestCase):
def test_serialization(self):
all_activations = [
'softmax', 'relu', 'elu', 'tanh', 'sigmoid', 'hard_sigmoid', 'linear',
'softplus', 'softsign', 'selu', 'gelu'
]
for name in all_activations:
fn = activations.get(name)
ref_fn = getattr(activations, name)
assert fn == ref_fn
config = activations.serialize(fn)
fn = activations.deserialize(config)
assert fn == ref_fn
def test_serialization_v2(self):
activation_map = {nn.softmax_v2: 'softmax'}
for fn_v2_key in activation_map:
fn_v2 = activations.get(fn_v2_key)
config = activations.serialize(fn_v2)
fn = activations.deserialize(config)
assert fn.__name__ == activation_map[fn_v2_key]
def test_serialization_with_layers(self):
activation = advanced_activations.LeakyReLU(alpha=0.1)
layer = core.Dense(3, activation=activation)
config = serialization.serialize(layer)
# with custom objects
deserialized_layer = serialization.deserialize(
config, custom_objects={'LeakyReLU': activation})
self.assertEqual(deserialized_layer.__class__.__name__,
layer.__class__.__name__)
self.assertEqual(deserialized_layer.activation.__class__.__name__,
activation.__class__.__name__)
# without custom objects
deserialized_layer = serialization.deserialize(config)
self.assertEqual(deserialized_layer.__class__.__name__,
layer.__class__.__name__)
self.assertEqual(deserialized_layer.activation.__class__.__name__,
activation.__class__.__name__)
def test_softmax(self):
x = backend.placeholder(ndim=2)
f = backend.function([x], [activations.softmax(x)])
test_values = np.random.random((2, 5))
result = f([test_values])[0]
expected = _ref_softmax(test_values[0])
self.assertAllClose(result[0], expected, rtol=1e-05)
x = backend.placeholder(ndim=1)
with self.assertRaises(ValueError):
activations.softmax(x)
def test_softmax_2d_axis0(self):
x = backend.placeholder(ndim=2)
f = backend.function([x], [activations.softmax(x, axis=0)])
test_values = np.random.random((2, 5))
result = f([test_values])[0]
expected = np.zeros((2, 5))
for i in range(5):
expected[:, i] = _ref_softmax(test_values[:, i])
self.assertAllClose(result, expected, rtol=1e-05)
def test_temporal_softmax(self):
x = backend.placeholder(shape=(2, 2, 3))
f = backend.function([x], [activations.softmax(x)])
test_values = np.random.random((2, 2, 3)) * 10
result = f([test_values])[0]
expected = _ref_softmax(test_values[0, 0])
self.assertAllClose(result[0, 0], expected, rtol=1e-05)
def test_selu(self):
x = backend.placeholder(ndim=2)
f = backend.function([x], [activations.selu(x)])
alpha = 1.6732632423543772848170429916717
scale = 1.0507009873554804934193349852946
positive_values = np.array([[1, 2]], dtype=backend.floatx())
result = f([positive_values])[0]
self.assertAllClose(result, positive_values * scale, rtol=1e-05)
negative_values = np.array([[-1, -2]], dtype=backend.floatx())
result = f([negative_values])[0]
true_result = (np.exp(negative_values) - 1) * scale * alpha
self.assertAllClose(result, true_result)
def test_softplus(self):
def softplus(x):
return np.log(np.ones_like(x) + np.exp(x))
x = backend.placeholder(ndim=2)
f = backend.function([x], [activations.softplus(x)])
test_values = np.random.random((2, 5))
result = f([test_values])[0]
expected = softplus(test_values)
self.assertAllClose(result, expected, rtol=1e-05)
def test_softsign(self):
def softsign(x):
return np.divide(x, np.ones_like(x) + np.absolute(x))
x = backend.placeholder(ndim=2)
f = backend.function([x], [activations.softsign(x)])
test_values = np.random.random((2, 5))
result = f([test_values])[0]
expected = softsign(test_values)
self.assertAllClose(result, expected, rtol=1e-05)
def test_sigmoid(self):
def ref_sigmoid(x):
if x >= 0:
return 1 / (1 + | np.exp(-x) | numpy.exp |
"""
This module contains DC related optimization algorithms
"""
import cvxpy
import numpy
import cmath
def sample_spherical(dim):
vec = numpy.random.randn(dim)
vec /= numpy.linalg.norm(vec)
return vec
def rand_a(pre_aa):
# unsatisfied
dim = numpy.linalg.matrix_rank(pre_aa)
sup_vec = sample_spherical(dim)
eig_values, eig_vectors = numpy.linalg.eig(pre_aa)
print(eig_values)
eig_val_mat = numpy.eye(dim)
for i in range(dim):
eig_val_mat[i][i] = numpy.sqrt(abs(eig_values[i]))
candidate_a = numpy.dot(numpy.dot(eig_vectors, eig_val_mat), sup_vec)
print('candidate_a: ' + str(numpy.dot(candidate_a.T, candidate_a)))
print('trace aa: ' + str(numpy.trace(pre_aa)))
def rand_b(pre_aa):
candidates = list()
k, d = pre_aa.shape
candidate_a = numpy.zeros((k, 1))
for i in range(k):
candidate_a[i] = numpy.sqrt(abs(pre_aa[i][i]))
candidates.append(candidate_a)
return candidates
def rand_c(pre_aa, num_candidates=100):
candidates = list()
k, d = pre_aa.shape
# dim = numpy.linalg.matrix_rank(pre_aa)
eig_values, eig_vectors = numpy.linalg.eig(pre_aa)
dim = eig_values.shape[0]
eig_val_mat = numpy.eye(dim)
for i in range(dim):
eig_val_mat[i][i] = numpy.sqrt(abs(eig_values[i]))
mean = numpy.zeros(dim)
cov = numpy.eye(dim)
for j in range(num_candidates):
sup_vec = numpy.random.multivariate_normal(mean, cov)
candidate_a = numpy.dot(numpy.dot(eig_vectors, eig_val_mat), sup_vec)
candidates.append(candidate_a.reshape(k, 1))
return candidates
def scale_to_satisfy_constraints(a, h_mat):
scale = 1
k, m = h_mat.shape
for i in range(m):
obj = numpy.linalg.norm(numpy.dot(a.T, numpy.mat(h_mat[:, i]).T))
if obj < 1:
scale = max(scale, 1 / obj)
return numpy.multiply(scale, a)
def sdr_solver(selected_set, h_mat, cache_a=None):
k, m = h_mat.shape
# print('shape of h_mat: ' + str(h_mat.shape))
if len(selected_set) == 0:
return numpy.ones((k, 1))
h_list = list()
for i in range(m):
h_vec = numpy.matrix(h_mat[:, i]).T
h_list.append(numpy.dot(h_vec, h_vec.H))
pre_aa = numpy.random.rand(k, k)
pre_aa = numpy.dot(pre_aa, pre_aa.T)
pre_aa = numpy.add(pre_aa, pre_aa.T)
if cache_a is not None:
pre_aa = numpy.dot(cache_a, cache_a.T)
aa = cvxpy.Variable((k, k), PSD=True)
constraints = [aa >> 0]
for i in range(m):
if i in selected_set:
constraints = constraints + [cvxpy.real(cvxpy.trace(aa @ h_list[i])) >= 1]
obj = cvxpy.Minimize(cvxpy.trace(aa))
prob = cvxpy.Problem(obj, constraints)
prob.solve()
if aa.value is not None and numpy.linalg.matrix_rank(aa.value) == 1:
pre_aa = aa.value
else:
if aa.value is not None:
pre_aa = aa.value
del aa, constraints, obj, prob
candidates = rand_c(pre_aa, num_candidates=5)
for i in range(len(candidates)):
candidates[i] = scale_to_satisfy_constraints(candidates[i], h_mat)
best_candidate_a = min(candidates, key=lambda a: numpy.linalg.norm(a))
# check_feasibility(selected_set, h_mat, best_candidate_a)
return best_candidate_a
def dca_solver(selected_set, h_mat, cache_a=None, max_iter=100, tol=1e-10, data_info=None):
# time = 0
k, m = h_mat.shape
# print(h_mat)
if len(selected_set) == 0:
return numpy.ones((k, 1))
h_list = list()
for i in range(m):
h_vec = numpy.mat(h_mat[:, i]).T
h_list.append(numpy.dot(h_vec, h_vec.H))
# print(numpy.diag(h_list[i]))
pre_aa = numpy.random.rand(k, k)
pre_aa = numpy.dot(pre_aa, pre_aa.T)
pre_aa = numpy.add(pre_aa, pre_aa.T)
if cache_a is not None:
pre_aa = numpy.dot(cache_a, cache_a.T)
# warm_start
aa = cvxpy.Variable((k, k))
a_sub_gradient = cvxpy.Parameter((k, k))
constraints = [aa >> 0, cvxpy.trace(aa) >= 0]
for j in range(m):
if j in selected_set:
constraints = constraints + [cvxpy.real(cvxpy.trace(aa @ h_list[j])) >= 1]
obj = cvxpy.Minimize(2 * cvxpy.trace(aa) - cvxpy.trace(a_sub_gradient.T @ aa))
prob = cvxpy.Problem(obj, constraints)
for i in range(max_iter):
if abs(numpy.trace(pre_aa) - numpy.linalg.norm(pre_aa, ord=2)) < tol:
break
u, s, vh = numpy.linalg.svd(pre_aa)
um = u[:, 0]
um = numpy.mat(um).T
a_sub_gradient.value = numpy.real(numpy.dot(um, um.H))
prob.solve(verbose=False)
# time += prob.solver_stats.solve_time
# print(
# 'Iter ' + str(i) + ': status ' + str(prob.status) + ', optimal value ' + str(
# prob.value))
if prob.status == cvxpy.INFEASIBLE:
# print(h_mat)
print('Error occurred with DCA.')
# exit()
return numpy.zeros((k, 1))
if aa.value is not None:
pre_aa = aa.value
eig_values, eig_vectors = numpy.linalg.eig(pre_aa)
idx = eig_values.argmax()
a = eig_vectors[:, idx]
# a = numpy.multiply(numpy.sqrt(abs(eig_values[0])), numpy.matrix(a).T)
a = numpy.multiply(cmath.sqrt(eig_values[idx]), numpy.matrix(a).T).reshape((k,1))
return dca_scale(a, h_mat, selected_set)
def dca_scale(a, h_mat, selected_set):
scale = 1
# k, m = h_mat.shape
for i in selected_set:
obj = numpy.linalg.norm(numpy.dot(a.T, numpy.mat(h_mat[:, i]).T))
if obj < 1:
scale = max(scale, 1 / obj)
return numpy.multiply(scale, a)
def sparse_optimization_dca(h_mat, theta, cache_a=None, max_iter=50, tol=1e-10):
# time = 0
k, m = h_mat.shape
h_list = list()
for i in range(m):
h_vec = numpy.mat(h_mat[:, i]).T
h_list.append( | numpy.dot(h_vec, h_vec.H) | numpy.dot |
import time
import numpy as np
import utils.measurement_subs as measurement_subs
import utils.socket_subs as socket_subs
from .do_fridge_sweep import do_fridge_sweep
from .do_device_sweep import do_device_sweep
def device_fridge_2d(
graph_proc, rpg, data_file,
read_inst, sweep_inst=[], set_inst=[],
set_value=[], pre_value=[], finish_value=[],
fridge_sweep="B", fridge_set=0.0,
device_start=0.0, device_stop=1.0, device_step=0.1, device_finish=0.0,
device_mid=[],
fridge_start=0.0, fridge_stop=1.0, fridge_rate=0.1,
delay=0, sample=1,
timeout=-1, wait=0.0,
comment="No comment!", network_dir="Z:\\DATA",
persist=True, x_custom=[]
):
"""2D data acquisition either by sweeping a device parameter
or by sweepng a fridge parameter
The program decides which of these to do depending on if the
the variable "sweep_inst" is assigned.
i.e. if "sweep_inst" is assigned the device is swept and the
fridge parameter is stepped.
If the device is being swept the variable "fridge_rate" is the size
of successive steps of either T or B.
If the fridge is being swept the first set_inst is stepped by the
"device_step"
For the case of successive B sweeps the fridge will be swept
forwards and backwards
e.g. Vg = -60 V B = -9 --> +9 T
Vg = -50 V B = +9 --> -9 T
etc ...
Note that in this case the first "set_value" will be overwritten
therefore a dummy e.g. 0.0 should be written in the case that there
are additional set_inst
"""
if sweep_inst:
sweep_device = True
else:
sweep_device = False
if fridge_sweep == "B":
b_sweep = True
else:
b_sweep = False
if not finish_value:
finish_value = list(set_value)
# We step over the x variable and sweep over the y
if sweep_device:
x_vec = np.hstack((np.arange(fridge_start, fridge_stop, fridge_rate), fridge_stop))
y_start = device_start
y_stop = device_stop
y_step = device_step
else:
x_vec = np.hstack(( | np.arange(device_start, device_stop, device_step) | numpy.arange |
# Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import mindspore as ms
import mindspore.nn as nn
from mindspore import Tensor, Parameter
from mindspore import context
from mindspore.common.api import _executor
from mindspore.nn import TrainOneStepCell
from mindspore.nn.optim import Momentum, LARS
from mindspore.ops import operations as P
class NetWithLoss(nn.Cell):
def __init__(self, network, strategy3):
super(NetWithLoss, self).__init__()
self.loss = P.SoftmaxCrossEntropyWithLogits().set_strategy(strategy3)
self.network = network
def construct(self, x, b):
predict = self.network(x)
return self.loss(predict, b)[0]
def compile_net(net, x, b):
net.set_auto_parallel()
_executor.compile(net, x, b)
def test_momentum():
class Net(nn.Cell):
def __init__(self, strategy1, strategy2, weight):
super().__init__()
self.weight = Parameter(weight, "w1")
self.matmul = P.MatMul(transpose_a=False, transpose_b=True).set_strategy(strategy1)
self.relu = P.ReLU().set_strategy(strategy2)
def construct(self, x):
out = self.matmul(x, self.weight)
out = self.relu(out)
return out
context.set_auto_parallel_context(device_num=4, global_rank=0)
strategy1 = ((2, 1), (2, 1))
strategy2 = ((4, 1),)
strategy3 = ((4, 1), (4, 1))
x = Tensor(np.ones([64, 32]), dtype=ms.float32)
weight = Tensor(np.ones([64, 32]), dtype=ms.float32)
b = Tensor(np.ones([64, 64]), dtype=ms.float32)
net = Net(strategy1, strategy2, weight)
optimizer = Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9)
net_with_loss = NetWithLoss(net, strategy3)
train_net = TrainOneStepCell(net_with_loss, optimizer)
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel")
compile_net(train_net, x, b)
def test_momentum_with_loss_scale():
class Net(nn.Cell):
def __init__(self, strategy1, strategy2, weight):
super().__init__()
self.weight = Parameter(weight, "w1")
self.matmul = P.MatMul(transpose_a=False, transpose_b=True).set_strategy(strategy1)
self.relu = P.ReLU().set_strategy(strategy2)
def construct(self, x):
out = self.matmul(x, self.weight)
out = self.relu(out)
return out
context.set_auto_parallel_context(device_num=4, global_rank=0)
strategy1 = ((2, 1), (2, 1))
strategy2 = ((4, 1),)
strategy3 = ((4, 1), (4, 1))
x = Tensor(np.ones([64, 32]), dtype=ms.float32)
weight = Tensor(np.ones([64, 32]), dtype=ms.float32)
b = Tensor( | np.ones([64, 64]) | numpy.ones |
from scipy.ndimage import distance_transform_edt
import numpy as np
import matplotlib.pyplot as plt
from pyvis import colorize_segmented_image
def imshow_border_cells(ax, cell, seg, edges, colors):
ax.imshow(colorize_segmented_image(expanded, 'rgb', colors=colors), cmap='jet')
print([i for i in edges if cell in i and 0 not in i])
xs, ys = [], []
for i in | np.unique([i for i in exp_rag.edges if cell in i and 0 not in i]) | numpy.unique |
from abc import ABC, abstractmethod
from typing import List
import numpy as np
from gym import spaces
from cyberbattle._env import cyberbattle_env
from cyberbattle._env.cyberbattle_env import EnvironmentBounds
def owned_nodes(obs: cyberbattle_env.Observation):
return np.nonzero(obs['nodes_privilegelevel'])[0]
def discovered_nodes_notowned(obs: cyberbattle_env.Observation):
return np.nonzero(obs['nodes_privilegelevel'] == 0)[0]
class Feature(spaces.MultiDiscrete, ABC):
def __init__(self, ep: EnvironmentBounds, nvec):
self.ep = ep
super().__init__(nvec)
@property
def flat_size(self):
return np.prod(self.nvec)
@abstractmethod
def get(self, obs: cyberbattle_env.Observation) -> np.ndarray:
raise NotImplementedError
class ConcatFeatures(Feature):
def __init__(self, ep: EnvironmentBounds, features: List[Feature]):
self.features = features
self.dim_sizes = np.concatenate([f.nvec for f in self.features])
super().__init__(ep, self.dim_sizes)
def get(self, obs: cyberbattle_env.Observation) -> np.ndarray:
feature_vector = [f.get(obs) for f in self.features]
feature_vector = np.concatenate(feature_vector)
# feature_vector = np.expand_dims(feature_vector, 0)
return feature_vector
class FeatureGlobalNodesProperties(Feature):
def __init__(self, ep: EnvironmentBounds):
super(FeatureGlobalNodesProperties, self).__init__(ep, [4] * ep.property_count * ep.maximum_node_count)
def get(self, obs: cyberbattle_env.Observation) -> np.ndarray:
features = []
for i in range(self.ep.maximum_node_count):
if i < len(obs['discovered_nodes_properties']):
features.append(np.copy(obs['discovered_nodes_properties'][i]) + 1)
else:
features.append(np.ones(self.ep.property_count))
return np.concatenate(features)
class FeatureGlobalCredentialCacheLength(Feature):
def __init__(self, ep: EnvironmentBounds):
super().__init__(ep, [ep.maximum_total_credentials])
def get(self, obs: cyberbattle_env.Observation) -> np.ndarray:
return np.array([obs['credential_cache_length']])
class FeatureGlobalCredentialCache(Feature):
def __init__(self, ep: EnvironmentBounds):
super(FeatureGlobalCredentialCache, self).__init__(ep, [ep.maximum_node_count,
ep.port_count] * ep.maximum_total_credentials)
def get(self, obs: cyberbattle_env.Observation) -> np.ndarray:
features = [
obs['credential_cache_matrix'][i] if i < len(obs['credential_cache_matrix']) else np.zeros(2)
for i in range(self.ep.maximum_total_credentials)
]
return np.concatenate(features)
class FeatureGlobalNodesPrivilegeLevel(Feature):
def __init__(self, ep: EnvironmentBounds, max_privilege_level: int):
self.max_privilege_level = max_privilege_level
super(FeatureGlobalNodesPrivilegeLevel, self).__init__(ep, [max_privilege_level + 1] * ep.maximum_node_count)
def get(self, obs: cyberbattle_env.Observation) -> np.ndarray:
features = np.array(obs['nodes_privilegelevel']) + 1
features.resize(self.ep.maximum_node_count, refcheck=False)
return features
class FeatureNewlyDiscoveredNodesCount(Feature):
def __init__(self, ep: EnvironmentBounds):
super().__init__(ep, [1 + ep.maximum_node_count])
def get(self, obs: cyberbattle_env.Observation) -> np.ndarray:
features = np.array(obs['newly_discovered_nodes_count'])
return features
class FeatureLateralMove(Feature):
def __init__(self, ep: EnvironmentBounds):
super().__init__(ep, [2])
def get(self, obs: cyberbattle_env.Observation) -> np.ndarray:
features = np.array(obs['lateral_move'])
return features
class FeatureProbeResult(Feature):
def __init__(self, ep: EnvironmentBounds):
super().__init__(ep, [3])
def get(self, obs: cyberbattle_env.Observation) -> np.ndarray:
features = | np.array(obs['probe_result']) | numpy.array |
# -*- coding: utf-8 -*-
#
# NanchiPlot 0.1.0-dev
# License: MIT License
# Author: <NAME>
# E-mail: <EMAIL>
# Code: https://github.com/JorgeDeLosSantos/NanchiPlot
#
#~ from __future__ import absolute_import
import wx
import os
import numpy as np
# Nanchi files
try:
from initmpl import *
import setplot # Axes & Figure props
import iodata as io # Read & Write data
import uibase as ui # Main interfaces
import uiaux as aux # Auxiliar interfaces
import uitoolbar as tb # Toolbars (Toolbar, AxesToolbar, LineToolbar)
from _const_ import * # Constants
except ImportError:
from nanchi.initmpl import *
import nanchi.setplot as setplot# Axes & Figure props
import nanchi.iodata as io # Read & Write data
import nanchi.uibase as ui # Main interfaces
import nanchi.uiaux as aux # Auxiliar interfaces
import nanchi.uitoolbar as tb # Toolbars (Toolbar, AxesToolbar, LineToolbar)
from nanchi._const_ import * # Constants
class NanchiPlot(wx.Frame):
def __init__(self,parent):
wx.Frame.__init__(self,parent,title=NANCHI_MAIN_CAPTION,size=(800,600))
self.initMenu()
self.initCtrls()
self.initCtrls()
self.initToolBar()
self.initSizers()
self.initEvents()
# Icon
self.icon = wx.Icon(PATH_NANCHI_LOGO)
self.SetIcon(self.icon)
# Reference to main objects
self.axes = self.notebook.graphs.axes
self.figure = self.notebook.graphs.figure
self.canvas = self.notebook.graphs.canvas
self.data = self.notebook.data
# Display on center
self.Centre(True)
self.Show()
def initMenu(self):
"""
Creating menu bar
"""
m_file = wx.Menu()
save = m_file.Append(-1, "Save image... \tCtrl+S")
export_img = m_file.Append(-1, "Export data as image...")
export_txt = m_file.Append(-1, "Export data as ASCII...")
m_file.AppendSeparator()
import_data = m_file.Append(-1, "Import data... \tCtrl+I")
import_image = m_file.Append(-1, "Import image...")
m_file.AppendSeparator()
_exit = m_file.Append(-1, "Quit \tCtrl+Q")
m_help = wx.Menu()
_help = m_help.Append(-1, "Help")
about = m_help.Append(-1, "About...")
menu_bar = wx.MenuBar()
menu_bar.Append(m_file, "File")
menu_bar.Append(m_help, "Help")
self.SetMenuBar(menu_bar)
self.Bind(wx.EVT_MENU, self.OnSave, save)
self.Bind(wx.EVT_MENU, self.OnExportASCII, export_txt)
self.Bind(wx.EVT_MENU, self.OnExportImage, export_img)
self.Bind(wx.EVT_MENU, self.OnImport, import_data)
self.Bind(wx.EVT_MENU, self.OnLoadImage, import_image)
self.Bind(wx.EVT_MENU, self.OnAbout, about)
self.Bind(wx.EVT_MENU, self.OnHelp, _help)
self.Bind(wx.EVT_MENU, self.OnExit, _exit)
def initSizers(self):
"""
Initialize sizers
"""
self.mainsz = wx.BoxSizer(wx.VERTICAL)
self.panelsz = wx.BoxSizer(wx.HORIZONTAL)
self.mainsz.Add(self.toolbar, 0, wx.EXPAND)
self.panelsz.Add(self.notebook, 1, wx.EXPAND|wx.ALL, 2)
self.panelsz.Add(self.axestoolbar, 0, wx.EXPAND|wx.ALL)
self.panelsz.Add(self.linetoolbar, 0, wx.EXPAND|wx.ALL)
self.mainsz.Add(self.mainpanel, 1, wx.EXPAND)
self.mainpanel.SetSizer(self.panelsz)
self.SetSizer(self.mainsz)
def initCtrls(self):
"""
Initialize basic controls
"""
# Status bar
self.SB_FONT = wx.Font(10, wx.SWISS, wx.NORMAL, wx.NORMAL)
self.SB_FONT.SetFaceName(u"DejaVu Sans Mono")
self.sb = aux.StatusBar(self, -1)
self.sb.SetFont(self.SB_FONT)
self.sb.SetForegroundColour("#aa00aa")
self.SetStatusBar(self.sb)
self.sb.SetStatusText(SB_ON_INIT)
self.mainpanel = wx.Panel(self,-1)
self.notebook = ui.NanchiNoteBook(self.mainpanel)
def initToolBar(self):
"""
Initialize tool bar
"""
self.toolbar = tb.MainToolbar(self)
self.toolbar.Realize()
self.axestoolbar = tb.AxesToolbar(self.mainpanel)
self.axestoolbar.Realize()
self.linetoolbar = tb.LineToolbar(self.mainpanel)
self.linetoolbar.Realize()
def initEvents(self):
"""
Initialize events
"""
self.graphs = self.notebook.graphs
self.Bind(wx.EVT_TOOL, self.OnImport, self.toolbar.import_tool)
self.Bind(wx.EVT_TOOL, self.OnLoadImage, self.toolbar.load_image_tool)
self.Bind(wx.EVT_TOOL, self.OnFunction, self.toolbar.function_tool)
self.Bind(wx.EVT_TOOL, self.OnBivariableFunction, self.toolbar.bivariable_function_tool)
self.Bind(wx.EVT_TOOL, self.OnPlot, self.toolbar.plot_tool)
self.Bind(wx.EVT_TOOL, self.OnBar, self.toolbar.bar_tool)
self.Bind(wx.EVT_TOOL, self.OnScatter, self.toolbar.scatter_tool)
self.Bind(wx.EVT_TOOL, self.OnPie, self.toolbar.pie_tool)
self.Bind(wx.EVT_TOOL, self.OnImage, self.toolbar.image_tool)
self.Bind(wx.EVT_TOOL, self.OnContour, self.toolbar.contour_tool)
self.Bind(wx.EVT_TOOL, self.OnContourf, self.toolbar.contourf_tool)
self.Bind(wx.EVT_TOOL, self.graphs.OnZoom, self.axestoolbar.zoom_box_tool)
self.Bind(wx.EVT_TOOL, self.OnResetView, self.axestoolbar.reset_view_tool)
self.Bind(wx.EVT_TOOL, self.graphs.OnBackground, self.axestoolbar.axes_color_tool)
self.Bind(wx.EVT_TOOL, self.graphs.OnGridColor, self.axestoolbar.grid_color_tool)
self.Bind(wx.EVT_TOOL, self.graphs.OnGridStyle, self.axestoolbar.grid_style_tool)
self.Bind(wx.EVT_TOOL, self.graphs.OnXLabel, self.axestoolbar.xlabel_tool)
self.Bind(wx.EVT_TOOL, self.graphs.OnYLabel, self.axestoolbar.ylabel_tool)
self.Bind(wx.EVT_TOOL, self.graphs.OnXTicks, self.axestoolbar.xticks_tool)
self.Bind(wx.EVT_TOOL, self.graphs.OnYTicks, self.axestoolbar.yticks_tool)
self.Bind(wx.EVT_TOOL, self.graphs.OnLineColor, self.linetoolbar.line_color_tool)
self.Bind(wx.EVT_TOOL, self.graphs.OnLineWidth, self.linetoolbar.line_width_tool)
self.Bind(wx.EVT_TOOL, self.graphs.OnLineStyle, self.linetoolbar.line_style_tool)
self.Bind(wx.EVT_TOOL, self.graphs.OnLineLabel, self.linetoolbar.line_label_tool)
self.Bind(wx.EVT_TOOL, self.graphs.OnShowLegend, self.linetoolbar.show_legend_tool)
self.Bind(wx.EVT_TOOL, self.OnPieLabels, self.linetoolbar.pie_labels_tool)
self.Bind(wx.EVT_TOOL, self.graphs.OnMoveLine, self.linetoolbar.move_line_tool)
self.Bind(wx.EVT_TOOL, self.graphs.OnMoveText, self.linetoolbar.move_text_tool)
self.Bind(wx.EVT_TOOL, self.OnPieLabels, self.linetoolbar.pie_labels_tool)
self.Bind(wx.EVT_TOOL, self.graphs.OnMoveLine, self.linetoolbar.move_line_tool)
self.Bind(wx.EVT_TOOL, self.graphs.OnMoveText, self.linetoolbar.move_text_tool)
self.Bind(wx.EVT_TOOL, self.graphs.OnText, self.linetoolbar.text_tool)
def OnExit(self,event):
"""
File -> Quit
"""
self.Close(True)
def OnHelp(self,event):
"""
Help -> Help
"""
try:
os.startfile(PATH_DOCUMENTATION_HTML)
except:
"""
Not exist file
"""
print("Help file not found")
pass
def OnSave(self,event):
"""
File -> Save image... -> (Short-Cut) Ctrl + S
"""
wldc = ON_SAVE_WILDCARD
dlg=wx.FileDialog(self, "Save", os.getcwd(), style=wx.SAVE, wildcard=wldc)
if dlg.ShowModal() == wx.ID_OK:
self.figure.savefig(dlg.GetPath())
dlg.Destroy()
def OnExportASCII(self,event):
data = self.data.grid_data.GetArrayData()
wldc = ON_EXPORT_ASCII_WILDCARD
dlg=wx.FileDialog(self, "Save", os.getcwd(), style=wx.SAVE, wildcard=wldc)
if dlg.ShowModal() == wx.ID_OK:
fname = dlg.GetPath()
io.write_txt(fname, data)
dlg.Destroy()
def OnExportImage(self,event):
data = self.data.grid_data.GetArrayData()
wldc = ON_EXPORT_IMAGE_WILDCARD
dlg=wx.FileDialog(self, "Save", os.getcwd(), style=wx.SAVE, wildcard=wldc)
if dlg.ShowModal() == wx.ID_OK:
fname = dlg.GetPath()
io.imsave(fname, data)
dlg.Destroy()
def OnImport(self,event):
"""
Import data
"""
dlg = aux.ImportDialog(None)
if dlg.ShowModal() == wx.ID_OK:
busy_dlg = aux.BusyInfo("Wait a moment...", self)
data = dlg.GetData()
if data is None:
self.sb.SetStatusText(SB_ON_IMPORT_DATA_FAIL%(path))
del busy_dlg
else:
self.data.grid_data.SetArrayData(data)
del busy_dlg
dlg.Destroy()
def OnLoadImage(self,event):
"""
Import images
"""
path = ""
wildcard = ON_IMPORT_IMAGE_WILDCARD
dlg = wx.FileDialog(self, message="Select an image",
defaultDir=os.getcwd(), wildcard=wildcard, style=wx.OPEN)
if dlg.ShowModal() == wx.ID_OK:
busy_dlg = aux.BusyInfo("Wait a moment...", self)
path = dlg.GetPath()
data = io.imread(path)
self.data.grid_data.SetArrayData(data)
self.sb.SetStatusText(SB_ON_IMPORT_IMAGE%(path))
del busy_dlg
else:
self.sb.SetStatusText(SB_ON_IMPORT_IMAGE_CANCEL)
dlg.Destroy()
def OnFunction(self,event):
"""
Create data from f(x) function
"""
from numpy import (sin,cos,tan,log,exp)
dialog = aux.FunctionDialog(None)
if dialog.ShowModal() == wx.ID_OK:
fx,a,b,points = dialog.GetData()
try:
x = np.linspace(float(a), float(b), float(points))
fx = eval(fx)
self.data.grid_data.SetArrayData(np.array([x,fx]).transpose())
self.data.grid_data.SetColLabelValue(0,"x")
self.data.grid_data.SetColLabelValue(1,"f(x)")
self.sb.SetStatusText(SB_ON_CREATE_DATA_FUNCTION)
except:
self.sb.SetStatusText(SB_ERROR_ON_CREATE_DATA)
dialog.Destroy()
def OnBivariableFunction(self,event):
"""
Create data from f(x,y) function
"""
from numpy import (sin,cos,tan,log,exp)
dialog = aux.BivariableFunctionDialog(None)
if dialog.ShowModal() == wx.ID_OK:
fxy,x,y,points = dialog.GetData()
try:
x1,x2 = [float(n) for n in x]
y1,y2 = [float(n) for n in y]
xx = np.linspace(x1, x2, points)
yy = np.linspace(y1, y2, points)
x,y = np.meshgrid(xx,yy)
Z = eval(fxy)
self.data.grid_data.SetArrayData(Z)
self.sb.SetStatusText(SB_ON_CREATE_DATA_BIVARIABLE_FUNCTION)
except:
self.sb.SetStatusText(SB_ERROR_ON_CREATE_DATA)
dialog.Destroy()
def OnPlot(self,event):
"""
Line plot
"""
setplot.set_default_params(self.axes,self.figure)
busy_dlg = aux.BusyInfo("Wait a moment...", self)
X = self.data.grid_data.GetArrayData()
rows,cols = X.shape
if cols == 2: # Common case
self.axes.plot(X[:,0],X[:,1], picker=True)
elif cols == 1:
self.axes.plot(X[:,0], picker=True)
elif cols > 2:
for col in range(cols):
#clabel = self.data.grid_data.GetColLabelValue(col)
self.axes.plot(X[:,col], picker=True)
self.canvas.draw()
del busy_dlg
def OnPolar(self,event):
"""
Unavailable
Possibility: Rectangular axes -> Polar axes (temporarily)
"""
pass
def OnBar(self,event):
"""
Plot bars
"""
setplot.set_default_params(self.axes, self.figure)
X = self.data.grid_data.GetArrayData()
rows,cols = X.shape
# Reference: http://matthiaseisen.com/pp/patterns/p0178/
#
# Space between bars groups (FACTOR)
KB = 0.85
# Counter
k = 0
# For each row
for jj in range(rows):
kw = 1.0/(cols+1.5) # bar width
x = | np.linspace(k, k+KB, cols, endpoint=False) | numpy.linspace |
import configparser
import numpy
import sys
import time
import random
import math
import os
from copy import deepcopy
import json
from numpy.linalg import norm
from numpy import dot
import numpy as np
import codecs
from scipy.stats import spearmanr
import tensorflow as tf
import torch
import torch.nn as nn
from torch.autograd import Variable
from scipy.spatial.distance import pdist
from scipy.spatial.distance import squareform
if torch.cuda.is_available:
CUDA = 0
def FloatTensorWrapper(tensor, cuda=0):
if cuda >= 0:
tensor = torch.FloatTensor(tensor).cuda(cuda)
else:
tensor = torch.FloatTensor(tensor)
return tensor
def LongTensorWrapper(tensor, cuda=0):
if cuda >= 0:
tensor = torch.LongTensor(tensor).cuda(cuda)
else:
tensor = torch.LongTensor(tensor)
return tensor
def DoubleTensorWrapper(tensor, cuda=0):
if cuda >= 0:
tensor = torch.DoubleTensor(tensor).cuda(cuda)
else:
tensor = torch.DoubleTensor(tensor)
return tensor
def l2_loss(input_tensor, target_tensor):
loss_matrix = nn.functional.mse_loss(input_tensor, target_tensor, reduce=False)
return torch.sum(loss_matrix)/2
class PytorchModel(torch.nn.Module):
def __init__(self, W, attract_margin_value=1.0, repel_margin_value=0.0, l2_reg_constant=1e-9):
super(PytorchModel, self).__init__()
self.attract_margin = attract_margin_value
self.repel_margin = repel_margin_value
self.regularisation_constant = l2_reg_constant
self.init_W = nn.Embedding(W.shape[0], W.shape[1])
self.init_W.weight = nn.Parameter(torch.DoubleTensor(W), requires_grad=False)
self.dynamic_W = nn.Embedding(W.shape[0], W.shape[1])
self.dynamic_W.weight = nn.Parameter(torch.DoubleTensor(W), requires_grad=True)
def attract_cost(self, attract_examples, negative_examples_attract):
np_attract_examples = np.array(attract_examples)
np_negative_examples_attract = np.array(negative_examples_attract)
attract_examples_left = nn.functional.normalize(self.dynamic_W(Variable(LongTensorWrapper(np_attract_examples[:,0], CUDA))))
attract_examples_right = nn.functional.normalize(self.dynamic_W(Variable(LongTensorWrapper(np_attract_examples[:,1], CUDA))))
negative_examples_attract_left = nn.functional.normalize(self.dynamic_W(Variable(LongTensorWrapper(np_negative_examples_attract[:,0], CUDA))))
negative_examples_attract_right = nn.functional.normalize(self.dynamic_W(Variable(LongTensorWrapper(np_negative_examples_attract[:,1], CUDA))))
# dot product between the example pairs.
attract_similarity_between_examples = torch.sum(torch.mul(attract_examples_left, attract_examples_right), 1)
# dot product of each word in the example with its negative example.
attract_similarity_to_negatives_left = torch.sum(torch.mul(attract_examples_left, negative_examples_attract_left), 1)
attract_similarity_to_negatives_right = torch.sum(torch.mul(attract_examples_right, negative_examples_attract_right), 1)
attract_cost = nn.functional.relu(self.attract_margin + attract_similarity_to_negatives_left - attract_similarity_between_examples) + \
nn.functional.relu(self.attract_margin + attract_similarity_to_negatives_right - attract_similarity_between_examples)
original_attract_examples_left = self.init_W(LongTensorWrapper(np_attract_examples[:,0], CUDA))
original_attract_examples_right = self.init_W(LongTensorWrapper(np_attract_examples[:,1], CUDA))
original_negative_examples_attract_left = self.init_W(LongTensorWrapper(np_negative_examples_attract[:,0], CUDA))
original_negative_examples_attract_right = self.init_W(LongTensorWrapper(np_negative_examples_attract[:,1], CUDA))
# and then define the respective regularisation costs:
regularisation_cost_attract = self.regularisation_constant * (l2_loss(original_attract_examples_left, attract_examples_left) + l2_loss(original_attract_examples_right, attract_examples_right))
attract_cost += regularisation_cost_attract
return attract_cost
def repel_cost(self, repel_examples, negative_examples_repel):
np_repel_examples = np.array(repel_examples)
np_negative_examples_repel = np.array(negative_examples_repel)
repel_examples_left = nn.functional.normalize(self.dynamic_W(Variable(LongTensorWrapper(np_repel_examples[:,0], CUDA))))
repel_examples_right = nn.functional.normalize(self.dynamic_W(Variable(LongTensorWrapper(np_repel_examples[:,1], CUDA))))
negative_examples_repel_left = nn.functional.normalize(self.dynamic_W(Variable(LongTensorWrapper(np_negative_examples_repel[:,0], CUDA))))
negative_examples_repel_right = nn.functional.normalize(self.dynamic_W(Variable(LongTensorWrapper(np_negative_examples_repel[:,1], CUDA))))
# dot product between the example pairs.
repel_similarity_between_examples = torch.sum(torch.mul(repel_examples_left, repel_examples_right), 1)
# dot product of each word in the example with its negative example.
repel_similarity_to_negatives_left = torch.sum(torch.mul(repel_examples_left, negative_examples_repel_left), 1)
repel_similarity_to_negatives_right = torch.sum(torch.mul(repel_examples_right, negative_examples_repel_right), 1)
repel_cost = nn.functional.relu(self.repel_margin + repel_similarity_to_negatives_left - repel_similarity_between_examples) + \
nn.functional.relu(self.repel_margin + repel_similarity_to_negatives_right - repel_similarity_between_examples)
# load the original distributional vectors for the example pairs:
original_repel_examples_left = self.init_W(LongTensorWrapper(np_repel_examples[:,0], CUDA))
original_repel_examples_right = self.init_W(LongTensorWrapper(np_repel_examples[:,1], CUDA))
original_negative_examples_repel_left = self.init_W(LongTensorWrapper(np_negative_examples_repel[:,0], CUDA))
original_negative_examples_repel_right = self.init_W(LongTensorWrapper(np_negative_examples_repel[:,1], CUDA))
# and then define the respective regularisation costs:
regularisation_cost_repel = self.regularisation_constant * (l2_loss(original_repel_examples_left, repel_examples_left) + l2_loss(original_repel_examples_right, repel_examples_right))
repel_cost += regularisation_cost_repel
return repel_cost
class ExperimentRun:
"""
This class stores all of the data and hyperparameters required for an Attract-Repel run.
"""
def __init__(self, config_filepath):
"""
To initialise the class, we need to supply the config file, which contains the location of
the pretrained (distributional) word vectors, the location of (potentially more than one)
collections of linguistic constraints (one pair per line), as well as the
hyperparameters of the Attract-Repel procedure (as detailed in the TACL paper).
"""
self.config = configparser.RawConfigParser()
try:
self.config.read(config_filepath)
except:
print("Couldn't read config file from", config_filepath)
return None
distributional_vectors_filepath = self.config.get("data", "distributional_vectors")
try:
self.output_filepath = self.config.get("data", "output_filepath")
except:
self.output_filepath = "results/final_vectors.txt"
# load initial distributional word vectors.
distributional_vectors = load_word_vectors(distributional_vectors_filepath)
if not distributional_vectors:
return
print("SimLex score (Spearman's rho coefficient) of initial vectors is:\n")
simlex_scores(distributional_vectors)
self.vocabulary = set(distributional_vectors.keys())
# this will be used to load constraints
self.vocab_index = {}
self.inverted_index = {}
for idx, word in enumerate(self.vocabulary):
self.vocab_index[word] = idx
self.inverted_index[idx] = word
# load list of filenames for synonyms and antonyms.
synonym_list = self.config.get("data", "synonyms").replace("[","").replace("]", "").replace(" ", "").split(",")
antonym_list = self.config.get("data", "antonyms").replace("[","").replace("]", "").replace(" ", "").split(",")
self.synonyms = set()
self.antonyms = set()
if synonym_list != "":
# and we then have all the information to load all linguistic constraints
for syn_filepath in synonym_list:
if syn_filepath != "":
self.synonyms = self.synonyms | self.load_constraints(syn_filepath)
else:
self.synonyms = set()
if antonym_list != "":
for ant_filepath in antonym_list:
if ant_filepath != "":
self.antonyms = self.antonyms | self.load_constraints(ant_filepath)
else:
self.antonyms = set()
# finally, load the experiment hyperparameters:
self.load_experiment_hyperparameters()
self.embedding_size = random.choice(list(distributional_vectors.values())).shape[0]
self.vocabulary_size = len(self.vocabulary)
# Next, prepare the matrix of initial vectors and initialise the model.
numpy_embedding = numpy.zeros((self.vocabulary_size, self.embedding_size), dtype="float64")
for idx in range(0, self.vocabulary_size):
numpy_embedding[idx, :] = distributional_vectors[self.inverted_index[idx]]
self.model = PytorchModel(numpy_embedding,
attract_margin_value=self.attract_margin_value,
repel_margin_value=self.repel_margin_value,
l2_reg_constant=self.regularisation_constant_value)
if CUDA >= 0:
self.model.cuda(CUDA)
def load_constraints(self, constraints_filepath):
"""
This methods reads a collection of constraints from the specified file, and returns a set with
all constraints for which both of their constituent words are in the specified vocabulary.
"""
constraints_filepath.strip()
constraints = set()
with codecs.open(constraints_filepath, "r", "utf-8") as f:
for line in f:
word_pair = line.split()
if word_pair[0] in self.vocabulary and word_pair[1] in self.vocabulary and word_pair[0] != word_pair[1]:
constraints |= {(self.vocab_index[word_pair[0]], self.vocab_index[word_pair[1]])}
return constraints
def load_experiment_hyperparameters(self):
"""
This method loads/sets the hyperparameters of the procedure as specified in the paper.
"""
self.attract_margin_value = self.config.getfloat("hyperparameters", "attract_margin")
self.repel_margin_value = self.config.getfloat("hyperparameters", "repel_margin")
self.batch_size = int(self.config.getfloat("hyperparameters", "batch_size"))
self.regularisation_constant_value = self.config.getfloat("hyperparameters", "l2_reg_constant")
self.max_iter = self.config.getfloat("hyperparameters", "max_iter")
self.log_scores_over_time = self.config.get("experiment", "log_scores_over_time")
self.print_simlex = self.config.get("experiment", "print_simlex")
if self.log_scores_over_time in ["True", "true"]:
self.log_scores_over_time = True
else:
self.log_scores_over_time = False
if self.print_simlex in ["True", "true"]:
self.print_simlex = True
else:
self.print_simlex = False
print("\nExperiment hyperparameters (attract_margin, repel_margin, batch_size, l2_reg_constant, max_iter):", \
self.attract_margin_value, self.repel_margin_value, self.batch_size, self.regularisation_constant_value, self.max_iter)
def extract_negative_examples(self, list_minibatch, attract_batch = True):
"""
For each example in the minibatch, this method returns the closest vector which is not
in each words example pair.
"""
np_list_minibatch = np.array(list_minibatch)
list_of_representations = []
list_of_indices = []
lefts = Variable(LongTensorWrapper(np_list_minibatch[:, 0], CUDA))
rights = Variable(LongTensorWrapper(np_list_minibatch[:, 1], CUDA))
representations = [nn.functional.normalize(self.model.dynamic_W(lefts)).data.cpu().numpy(), nn.functional.normalize(self.model.dynamic_W(rights)).data.cpu().numpy()]
for idx, (example_left, example_right) in enumerate(list_minibatch):
list_of_representations.append(representations[0][idx])
list_of_representations.append(representations[1][idx])
list_of_indices.append(example_left)
list_of_indices.append(example_right)
condensed_distance_list = pdist(list_of_representations, 'cosine')
square_distance_list = squareform(condensed_distance_list)
if attract_batch:
default_value = 2.0 # value to set for given attract/repel pair, so that it can not be found as closest or furthest away.
else:
default_value = 0.0 # for antonyms, we want the opposite value from the synonym one. Cosine Distance is [0,2].
for i in range(len(square_distance_list)):
square_distance_list[i,i]=default_value
if i % 2 == 0:
square_distance_list[i,i+1] = default_value
else:
square_distance_list[i,i-1] = default_value
if attract_batch:
negative_example_indices = numpy.argmin(square_distance_list,axis=1) # for each of the 100 elements, finds the index which has the minimal cosine distance (i.e. most similar).
else:
negative_example_indices = numpy.argmax(square_distance_list, axis=1) # for antonyms, find the least similar one.
negative_examples = []
for idx in range(len(list_minibatch)):
negative_example_left = list_of_indices[negative_example_indices[2 * idx]]
negative_example_right = list_of_indices[negative_example_indices[2 * idx + 1]]
negative_examples.append((negative_example_left, negative_example_right))
negative_examples = mix_sampling(list_minibatch, negative_examples)
return negative_examples
def attract_repel(self):
"""
This method repeatedly applies optimisation steps to fit the word vectors to the provided linguistic constraints.
"""
current_iteration = 0
# Post-processing: remove synonym pairs which are deemed to be both synonyms and antonyms:
for antonym_pair in self.antonyms:
if antonym_pair in self.synonyms:
self.synonyms.remove(antonym_pair)
self.synonyms = list(self.synonyms)
self.antonyms = list(self.antonyms)
self.syn_count = len(self.synonyms)
self.ant_count = len(self.antonyms)
print("\nAntonym pairs:", len(self.antonyms), "Synonym pairs:", len(self.synonyms))
list_of_simlex = []
list_of_wordsim = []
syn_batches = int(self.syn_count / self.batch_size)
ant_batches = int(self.ant_count / self.batch_size)
batches_per_epoch = syn_batches + ant_batches
print("\nRunning the optimisation procedure for", self.max_iter, "iterations...")
last_time = time.time()
if self.log_scores_over_time:
fwrite_simlex = open("results/simlex_scores.txt", "w")
fwrite_wordsim = open("results/wordsim_scores.txt", "w")
# set optimizer
attract_optimizer = torch.optim.Adagrad(self.model.dynamic_W.parameters(), lr=0.01)
repel_optimizer = torch.optim.Adagrad(self.model.dynamic_W.parameters(), lr=0.01)
while current_iteration < self.max_iter:
# how many attract/repel batches we've done in this epoch so far.
antonym_counter = 0
synonym_counter = 0
order_of_synonyms = [i for i in range(0, self.syn_count)]
order_of_antonyms = [i for i in range(0, self.ant_count)]
random.shuffle(order_of_synonyms)
random.shuffle(order_of_antonyms)
# list of 0 where we run synonym batch, 1 where we run antonym batch
list_of_batch_types = [0] * batches_per_epoch
list_of_batch_types[syn_batches:] = [1] * ant_batches # all antonym batches to 1
random.shuffle(list_of_batch_types)
if current_iteration == 0:
print("\nStarting epoch:", current_iteration+1, "\n")
else:
print("\nStarting epoch:", current_iteration+1, "Last epoch took:", round(time.time() - last_time, 1), "seconds. \n")
last_time = time.time()
for batch_index in range(0, batches_per_epoch):
# we can Log SimLex / WordSim scores
if self.log_scores_over_time and (batch_index % (batches_per_epoch/20) == 0):
(simlex_score, wordsim_score) = self.create_vector_dictionary()
list_of_simlex.append(simlex_score)
list_of_wordsim.append(wordsim_score)
fwrite_simlex.write(len(list_of_simlex)+1, simlex_score)
fwrite_wordsim.write(len(list_of_simlex)+1, wordsim_score)
syn_or_ant_batch = list_of_batch_types[batch_index]
if syn_or_ant_batch == 0:
# do one synonymy batch:
synonymy_examples = [self.synonyms[order_of_synonyms[x]] for x in range(synonym_counter * self.batch_size, (synonym_counter+1) * self.batch_size)]
current_negatives = self.extract_negative_examples(synonymy_examples, attract_batch=True)
attract_cost = self.model.attract_cost(synonymy_examples, current_negatives)
# apply gradients
self.model.zero_grad()
torch.sum(attract_cost).backward()
self.model.dynamic_W.weight.grad.data.clamp_(-2.0, 2.0)
attract_optimizer.step()
synonym_counter += 1
else:
antonymy_examples = [self.antonyms[order_of_antonyms[x]] for x in range(antonym_counter * self.batch_size, (antonym_counter+1) * self.batch_size)]
current_negatives = self.extract_negative_examples(antonymy_examples, attract_batch=False)
repel_cost = self.model.repel_cost(antonymy_examples, current_negatives)
# apply gradients
self.model.zero_grad()
torch.sum(repel_cost).backward()
self.model.dynamic_W.weight.grad.data.clamp_(-2.0, 2.0)
repel_optimizer.step()
antonym_counter += 1
current_iteration += 1
self.create_vector_dictionary() # whether to print SimLex score at the end of each epoch
def create_vector_dictionary(self):
"""
Extracts the current word vectors from TensorFlow embeddings and (if print_simlex=True) prints their SimLex scores.
"""
log_time = time.time()
current_vectors = self.model.dynamic_W.weight.data.cpu().numpy()
self.word_vectors = {}
for idx in range(0, self.vocabulary_size):
self.word_vectors[self.inverted_index[idx]] = normalise_vector(current_vectors[idx, :])
if self.log_scores_over_time or self.print_simlex:
(score_simlex, score_wordsim) = simlex_scores(self.word_vectors, self.print_simlex)
return (score_simlex, score_wordsim)
return (1.0, 1.0)
def random_different_from(top_range, number_to_not_repeat):
result = random.randint(0, top_range-1)
while result == number_to_not_repeat:
result = random.randint(0, top_range-1)
return result
def mix_sampling(list_of_examples, negative_examples):
"""
Converts half of the negative examples to random words from the batch (that are not in the given example pair).
"""
mixed_negative_examples = []
batch_size = len(list_of_examples)
for idx, (left_idx, right_idx) in enumerate(negative_examples):
new_left = left_idx
new_right = right_idx
if random.random() >= 0.5:
new_left = list_of_examples[random_different_from(batch_size, idx)][random.randint(0, 1)]
if random.random() >= 0.5:
new_right = list_of_examples[random_different_from(batch_size, idx)][random.randint(0, 1)]
mixed_negative_examples.append((new_left, new_right))
return mixed_negative_examples
def normalise_word_vectors(word_vectors, norm=1.0):
"""
This method normalises the collection of word vectors provided in the word_vectors dictionary.
"""
for word in word_vectors:
word_vectors[word] /= math.sqrt((word_vectors[word]**2).sum() + 1e-6)
word_vectors[word] = word_vectors[word] * norm
return word_vectors
def load_word_vectors(fname, isBinary=False):
"""
Loads 300x1 word vecs from Google (Mikolov) word2vec
"""
print("Loading pretrained word vectors from", fname)
word_vecs = {}
if isBinary:
with open(fname, "rb") as f:
header = f.readline()
vocab_size, layer1_size = map(int, header.split())
binary_len = numpy.dtype('float64').itemsize * layer1_size
for line in range(vocab_size):
word = b""
while True:
ch = f.read(1)
if ch == b' ':
break
if ch != b'\n':
word += ch
word_vecs[word.decode()] = numpy.fromstring(f.read(binary_len), dtype='float64')
else:
f = codecs.open(fname, 'r', 'utf-8')
f.readline()
for line in f:
line = line.split(" ", 1)
key = line[0].lower()
word_vecs[key] = numpy.fromstring(line[1], dtype="float64", sep=" ")
print(len(word_vecs), "vectors loaded from", fname)
return word_vecs
def print_word_vectors(word_vectors, write_path):
"""
This function prints the collection of word vectors to file, in a plain textual format.
"""
f_write = codecs.open(write_path, 'w', 'utf-8')
for key in word_vectors:
f_write.write(key+" "+" ".join(map(str, numpy.round(word_vectors[key], decimals=6))))
print("Printed", len(word_vectors), "word vectors to:", write_path)
def simlex_analysis(word_vectors, language="english", source="simlex"):
"""
This method computes the Spearman's rho correlation (with p-value) of the supplied word vectors.
"""
pair_list = []
if source == "simlex":
fread_simlex=codecs.open("evaluation/simlex-" + language + ".txt", 'r', 'utf-8')
elif source == "simlex-old":
fread_simlex=codecs.open("evaluation/simlex-english-old.txt", 'r', 'utf-8')
elif source == "simverb":
fread_simlex=codecs.open("evaluation/simverb.txt", 'r', 'utf-8')
elif source == "wordsim":
fread_simlex=codecs.open("evaluation/ws-353/wordsim353-" + language + ".txt", 'r', 'utf-8') # specify english, english-rel, etc.
line_number = 0
for line in fread_simlex:
if line_number > 0:
tokens = line.split()
word_i = tokens[0].lower()
word_j = tokens[1].lower()
score = float(tokens[2])
if word_i in word_vectors and word_j in word_vectors:
pair_list.append( ((word_i, word_j), score) )
else:
pass
line_number += 1
if not pair_list:
return (0.0, 0)
pair_list.sort(key=lambda x: - x[1])
coverage = len(pair_list)
extracted_list = []
extracted_scores = {}
for (x,y) in pair_list:
(word_i, word_j) = x
current_distance = distance(word_vectors[word_i], word_vectors[word_j])
extracted_scores[(word_i, word_j)] = current_distance
extracted_list.append(((word_i, word_j), current_distance))
extracted_list.sort(key=lambda x: x[1])
spearman_original_list = []
spearman_target_list = []
for position_1, (word_pair, score_1) in enumerate(pair_list):
score_2 = extracted_scores[word_pair]
position_2 = extracted_list.index((word_pair, score_2))
spearman_original_list.append(position_1)
spearman_target_list.append(position_2)
spearman_rho = spearmanr(spearman_original_list, spearman_target_list)
return round(spearman_rho[0], 3), coverage
def normalise_vector(v1):
return v1 / norm(v1)
def distance(v1, v2, normalised_vectors=False):
"""
Returns the cosine distance between two vectors.
If the vectors are normalised, there is no need for the denominator, which is always one.
"""
if normalised_vectors:
return 1 - dot(v1, v2)
else:
return 1 - | dot(v1, v2) | numpy.dot |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.